From noreply at buildbot.pypy.org Tue Jul 1 00:13:43 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 1 Jul 2014 00:13:43 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Small optimization for one case Message-ID: <20140630221343.90A4B1C33F0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r72301:e6db9b63d6e6 Date: 2014-06-30 22:00 +0200 http://bitbucket.org/pypy/pypy/changeset/e6db9b63d6e6/ Log: Small optimization for one case diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -2353,8 +2353,12 @@ assert isinstance(loc_index, ImmedLoc) cardindex = loc_index.value >> card_bits if isinstance(loc_base, RegLoc): - mc.MOV_ri(r11.value, cardindex << 4) # 32/64bit - mc.ADD_rr(r11.value, loc_base.value) + if rx86.fits_in_32bits(write_locks_base + cardindex): + write_locks_base += cardindex + mc.MOV_rr(r11.value, loc_base.value) + else: + mc.MOV_ri(r11.value, cardindex << 4) # 32/64bit + mc.ADD_rr(r11.value, loc_base.value) mc.SHR_ri(r11.value, 4) else: mc.MOV_ri(r11.value, cardindex + (loc_base.value >> 4)) From noreply at buildbot.pypy.org Tue Jul 1 06:11:50 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 1 Jul 2014 06:11:50 +0200 (CEST) Subject: [pypy-commit] pypy default: don't import stuuf from conftest for no bloody reason Message-ID: <20140701041150.12F051C33F0@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r72302:13859c071d0f Date: 2014-07-01 05:11 +0100 http://bitbucket.org/pypy/pypy/changeset/13859c071d0f/ Log: don't import stuuf from conftest for no bloody reason diff --git a/rpython/conftest.py b/rpython/conftest.py --- a/rpython/conftest.py +++ b/rpython/conftest.py @@ -1,10 +1,8 @@ -from os.path import * import py, pytest from rpython.tool import leakfinder pytest_plugins = 'rpython.tool.pytest.expecttest' -cdir = realpath(join(dirname(__file__), 'translator', 'c')) option = None def braindead_deindent(self): diff --git a/rpython/rlib/_rffi_stacklet.py b/rpython/rlib/_rffi_stacklet.py --- a/rpython/rlib/_rffi_stacklet.py +++ b/rpython/rlib/_rffi_stacklet.py @@ -3,7 +3,7 @@ from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.rtyper.tool import rffi_platform from rpython.rlib.rarithmetic import is_emulated_long -from rpython.conftest import cdir +from rpython.translator import cdir cdir = py.path.local(cdir) diff --git a/rpython/rlib/clibffi.py b/rpython/rlib/clibffi.py --- a/rpython/rlib/clibffi.py +++ b/rpython/rlib/clibffi.py @@ -15,7 +15,7 @@ from rpython.rlib.objectmodel import specialize from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.translator.platform import platform -from rpython.conftest import cdir +from rpython.translator import cdir from platform import machine import py import os diff --git a/rpython/rlib/rdtoa.py b/rpython/rlib/rdtoa.py --- a/rpython/rlib/rdtoa.py +++ b/rpython/rlib/rdtoa.py @@ -1,7 +1,7 @@ from __future__ import with_statement from rpython.rlib import rfloat from rpython.translator.tool.cbuild import ExternalCompilationInfo -from rpython.conftest import cdir +from rpython.translator import cdir from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib import jit from rpython.rlib.rstring import StringBuilder diff --git a/rpython/rlib/rgil.py b/rpython/rlib/rgil.py --- a/rpython/rlib/rgil.py +++ b/rpython/rlib/rgil.py @@ -1,5 +1,5 @@ import py -from rpython.conftest import cdir +from rpython.translator import cdir from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.rtyper.lltypesystem import lltype, llmemory, rffi diff --git a/rpython/rlib/rsignal.py b/rpython/rlib/rsignal.py --- a/rpython/rlib/rsignal.py +++ b/rpython/rlib/rsignal.py @@ -1,7 +1,7 @@ import signal as cpy_signal import sys import py -from rpython.conftest import cdir +from rpython.translator import cdir from rpython.rtyper.tool import rffi_platform from rpython.rtyper.lltypesystem import lltype, rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo diff --git a/rpython/rlib/rstack.py b/rpython/rlib/rstack.py --- a/rpython/rlib/rstack.py +++ b/rpython/rlib/rstack.py @@ -10,7 +10,7 @@ from rpython.rlib import rgc from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.lltypesystem.lloperation import llop -from rpython.conftest import cdir +from rpython.translator import cdir from rpython.translator.tool.cbuild import ExternalCompilationInfo # ____________________________________________________________ diff --git a/rpython/rlib/rthread.py b/rpython/rlib/rthread.py --- a/rpython/rlib/rthread.py +++ b/rpython/rlib/rthread.py @@ -1,6 +1,6 @@ from rpython.rtyper.lltypesystem import rffi, lltype, llmemory from rpython.translator.tool.cbuild import ExternalCompilationInfo -from rpython.conftest import cdir +from rpython.translator import cdir import py from rpython.rlib import jit, rgc from rpython.rlib.debug import ll_assert @@ -59,7 +59,7 @@ c_thread_acquirelock = llexternal('RPyThreadAcquireLock', [TLOCKP, rffi.INT], rffi.INT, releasegil=True) # release the GIL -c_thread_acquirelock_timed = llexternal('RPyThreadAcquireLockTimed', +c_thread_acquirelock_timed = llexternal('RPyThreadAcquireLockTimed', [TLOCKP, rffi.LONGLONG, rffi.INT], rffi.INT, releasegil=True) # release the GIL diff --git a/rpython/rtyper/lltypesystem/module/ll_math.py b/rpython/rtyper/lltypesystem/module/ll_math.py --- a/rpython/rtyper/lltypesystem/module/ll_math.py +++ b/rpython/rtyper/lltypesystem/module/ll_math.py @@ -3,7 +3,7 @@ import py import sys -from rpython.conftest import cdir +from rpython.translator import cdir from rpython.rlib import jit, rposix from rpython.rlib.rfloat import INFINITY, NAN, isfinite, isinf, isnan from rpython.rtyper.lltypesystem import lltype, rffi diff --git a/rpython/translator/__init__.py b/rpython/translator/__init__.py --- a/rpython/translator/__init__.py +++ b/rpython/translator/__init__.py @@ -0,0 +1,3 @@ +from os.path import realpath, join, dirname +cdir = realpath(join(dirname(__file__), 'c')) +del realpath, join, dirname diff --git a/rpython/translator/c/test/test_standalone.py b/rpython/translator/c/test/test_standalone.py --- a/rpython/translator/c/test/test_standalone.py +++ b/rpython/translator/c/test/test_standalone.py @@ -13,7 +13,7 @@ from rpython.translator.c.genc import CStandaloneBuilder, ExternalCompilationInfo from rpython.annotator.listdef import s_list_of_strings from rpython.tool.udir import udir -from rpython.conftest import cdir +from rpython.translator import cdir from rpython.conftest import option def setup_module(module): @@ -382,7 +382,7 @@ if str(path).find(':')>=0: # bad choice of udir, there is a ':' in it which messes up the test pass - else: + else: out, err = cbuilder.cmdexec("", err=True, env={'PYPYLOG': str(path)}) size = os.stat(str(path)).st_size assert out.strip() == 'got:a.' + str(size) + '.' From noreply at buildbot.pypy.org Tue Jul 1 10:39:15 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 1 Jul 2014 10:39:15 +0200 (CEST) Subject: [pypy-commit] stmgc card-marking: handle everything in collect_cardrefs_to_nursery Message-ID: <20140701083915.C12F21C024A@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: card-marking Changeset: r1265:1f04257b1db7 Date: 2014-07-01 10:40 +0200 http://bitbucket.org/pypy/stmgc/changeset/1f04257b1db7/ Log: handle everything in collect_cardrefs_to_nursery diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -67,6 +67,7 @@ /* Card marking. Don't remove GCFLAG_WRITE_BARRIER because we need to come back to _stm_write_slowpath_card() for every card to mark. Add GCFLAG_CARDS_SET. */ + assert(!(obj->stm_flags & GCFLAG_CARDS_SET)); obj->stm_flags |= GCFLAG_CARDS_SET; assert(STM_PSEGMENT->old_objects_with_cards); LIST_APPEND(STM_PSEGMENT->old_objects_with_cards, obj); diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -328,6 +328,7 @@ static inline void _collect_now(object_t *obj, bool was_definitely_young) { assert(!_is_young(obj)); + assert(!(obj->stm_flags & GCFLAG_CARDS_SET)); dprintf(("_collect_now: %p\n", obj)); @@ -339,22 +340,6 @@ stmcb_trace((struct object_s *)realobj, &minor_trace_if_young); obj->stm_flags |= GCFLAG_WRITE_BARRIER; - if (obj->stm_flags & GCFLAG_CARDS_SET) { - /* all objects that had WB cleared need to be fully synchronised - on commit, so we have to mark all their cards */ - struct stm_priv_segment_info_s *pseg = get_priv_segment( - STM_SEGMENT->segment_num); - - /* stm_wb-slowpath should never have triggered for young objs */ - assert(!was_definitely_young); - - if (!IS_OVERFLOW_OBJ(STM_PSEGMENT, obj)) { - _reset_object_cards(pseg, obj, CARD_MARKED_OLD, true); /* mark all */ - } else { - /* simply clear overflow */ - _reset_object_cards(pseg, obj, CARD_CLEAR, false); - } - } } /* else traced in collect_cardrefs_to_nursery if necessary */ } @@ -371,12 +356,11 @@ assert(!_is_young(obj)); if (!(obj->stm_flags & GCFLAG_CARDS_SET)) { - /* handled in _collect_now() */ + /* sometimes we remove the CARDS_SET in the WB slowpath, see core.c */ continue; } - /* traces cards, clears marked cards or marks them old if - necessary */ + /* traces cards, clears marked cards or marks them old if necessary */ _trace_card_object(obj); assert(!(obj->stm_flags & GCFLAG_CARDS_SET)); @@ -551,6 +535,7 @@ if (!commit && STM_PSEGMENT->large_overflow_objects == NULL) STM_PSEGMENT->large_overflow_objects = list_create(); + /* All the objects we move out of the nursery become "overflow" objects. We use the list 'objects_pointing_to_nursery' to hold the ones we didn't trace so far. */ @@ -558,6 +543,11 @@ if (STM_PSEGMENT->objects_pointing_to_nursery == NULL) { STM_PSEGMENT->objects_pointing_to_nursery = list_create(); + /* collect objs with cards, adds to objects_pointing_to_nursery + and makes sure there are no objs with cards left in + modified_old_objs */ + collect_cardrefs_to_nursery(); + /* See the doc of 'objects_pointing_to_nursery': if it is NULL, then it is implicitly understood to be equal to 'modified_old_objects'. We could copy modified_old_objects @@ -567,6 +557,7 @@ num_old = 0; } else { + collect_cardrefs_to_nursery(); num_old = STM_PSEGMENT->modified_old_objects_markers_num_old; } @@ -574,7 +565,6 @@ collect_roots_in_nursery(); - collect_cardrefs_to_nursery(); collect_oldrefs_to_nursery(); assert(list_is_empty(STM_PSEGMENT->old_objects_with_cards)); From noreply at buildbot.pypy.org Tue Jul 1 10:45:33 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 1 Jul 2014 10:45:33 +0200 (CEST) Subject: [pypy-commit] stmgc card-marking: Another interface needed for the PyPy JIT Message-ID: <20140701084533.295531C024A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: card-marking Changeset: r1266:2f4d07820293 Date: 2014-06-30 17:48 +0200 http://bitbucket.org/pypy/stmgc/changeset/2f4d07820293/ Log: Another interface needed for the PyPy JIT diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -244,6 +244,14 @@ return mark_card; } +char *_stm_write_slowpath_card_extra_base(void) +{ + /* for the PyPy JIT: _stm_write_slowpath_card_extra_base[obj >> 4] + is the byte that must be set to CARD_MARKED. The logic below + does the same, but more explicitly. */ + return (char *)write_locks - WRITELOCK_START + 1; +} + void _stm_write_slowpath_card(object_t *obj, uintptr_t index) { /* If CARDS_SET is not set so far, issue a normal write barrier. diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -109,6 +109,7 @@ void _stm_write_slowpath(object_t *); void _stm_write_slowpath_card(object_t *, uintptr_t); char _stm_write_slowpath_card_extra(object_t *); +char *_stm_write_slowpath_card_extra_base(void); object_t *_stm_allocate_slowpath(ssize_t); object_t *_stm_allocate_external(ssize_t); void _stm_become_inevitable(const char*); From noreply at buildbot.pypy.org Tue Jul 1 10:45:34 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 1 Jul 2014 10:45:34 +0200 (CEST) Subject: [pypy-commit] stmgc card-marking: Expose this value 100 too Message-ID: <20140701084534.4EC211C024A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: card-marking Changeset: r1267:e1df81263680 Date: 2014-06-30 17:57 +0200 http://bitbucket.org/pypy/stmgc/changeset/e1df81263680/ Log: Expose this value 100 too diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -225,10 +225,10 @@ static uint8_t write_locks[WRITELOCK_END - WRITELOCK_START]; enum /* card values for write_locks */ { - CARD_CLEAR = 0, /* card not used at all */ - CARD_MARKED = 100, /* card marked for tracing in the next gc */ - CARD_MARKED_OLD = 101, /* card was marked before, but cleared - in a GC */ + CARD_CLEAR = 0, /* card not used at all */ + CARD_MARKED = _STM_CARD_MARKED, /* card marked for tracing in the next gc */ + CARD_MARKED_OLD = 101, /* card was marked before, but cleared + in a GC */ }; diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -110,6 +110,7 @@ void _stm_write_slowpath_card(object_t *, uintptr_t); char _stm_write_slowpath_card_extra(object_t *); char *_stm_write_slowpath_card_extra_base(void); +#define _STM_CARD_MARKED 100 object_t *_stm_allocate_slowpath(ssize_t); object_t *_stm_allocate_external(ssize_t); void _stm_become_inevitable(const char*); From noreply at buildbot.pypy.org Tue Jul 1 10:45:35 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 1 Jul 2014 10:45:35 +0200 (CEST) Subject: [pypy-commit] stmgc card-marking: merge heads Message-ID: <20140701084535.786651C024A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: card-marking Changeset: r1268:b9101a55e80d Date: 2014-07-01 10:45 +0200 http://bitbucket.org/pypy/stmgc/changeset/b9101a55e80d/ Log: merge heads diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -67,6 +67,7 @@ /* Card marking. Don't remove GCFLAG_WRITE_BARRIER because we need to come back to _stm_write_slowpath_card() for every card to mark. Add GCFLAG_CARDS_SET. */ + assert(!(obj->stm_flags & GCFLAG_CARDS_SET)); obj->stm_flags |= GCFLAG_CARDS_SET; assert(STM_PSEGMENT->old_objects_with_cards); LIST_APPEND(STM_PSEGMENT->old_objects_with_cards, obj); diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -328,6 +328,7 @@ static inline void _collect_now(object_t *obj, bool was_definitely_young) { assert(!_is_young(obj)); + assert(!(obj->stm_flags & GCFLAG_CARDS_SET)); dprintf(("_collect_now: %p\n", obj)); @@ -339,22 +340,6 @@ stmcb_trace((struct object_s *)realobj, &minor_trace_if_young); obj->stm_flags |= GCFLAG_WRITE_BARRIER; - if (obj->stm_flags & GCFLAG_CARDS_SET) { - /* all objects that had WB cleared need to be fully synchronised - on commit, so we have to mark all their cards */ - struct stm_priv_segment_info_s *pseg = get_priv_segment( - STM_SEGMENT->segment_num); - - /* stm_wb-slowpath should never have triggered for young objs */ - assert(!was_definitely_young); - - if (!IS_OVERFLOW_OBJ(STM_PSEGMENT, obj)) { - _reset_object_cards(pseg, obj, CARD_MARKED_OLD, true); /* mark all */ - } else { - /* simply clear overflow */ - _reset_object_cards(pseg, obj, CARD_CLEAR, false); - } - } } /* else traced in collect_cardrefs_to_nursery if necessary */ } @@ -371,12 +356,11 @@ assert(!_is_young(obj)); if (!(obj->stm_flags & GCFLAG_CARDS_SET)) { - /* handled in _collect_now() */ + /* sometimes we remove the CARDS_SET in the WB slowpath, see core.c */ continue; } - /* traces cards, clears marked cards or marks them old if - necessary */ + /* traces cards, clears marked cards or marks them old if necessary */ _trace_card_object(obj); assert(!(obj->stm_flags & GCFLAG_CARDS_SET)); @@ -551,6 +535,7 @@ if (!commit && STM_PSEGMENT->large_overflow_objects == NULL) STM_PSEGMENT->large_overflow_objects = list_create(); + /* All the objects we move out of the nursery become "overflow" objects. We use the list 'objects_pointing_to_nursery' to hold the ones we didn't trace so far. */ @@ -558,6 +543,11 @@ if (STM_PSEGMENT->objects_pointing_to_nursery == NULL) { STM_PSEGMENT->objects_pointing_to_nursery = list_create(); + /* collect objs with cards, adds to objects_pointing_to_nursery + and makes sure there are no objs with cards left in + modified_old_objs */ + collect_cardrefs_to_nursery(); + /* See the doc of 'objects_pointing_to_nursery': if it is NULL, then it is implicitly understood to be equal to 'modified_old_objects'. We could copy modified_old_objects @@ -567,6 +557,7 @@ num_old = 0; } else { + collect_cardrefs_to_nursery(); num_old = STM_PSEGMENT->modified_old_objects_markers_num_old; } @@ -574,7 +565,6 @@ collect_roots_in_nursery(); - collect_cardrefs_to_nursery(); collect_oldrefs_to_nursery(); assert(list_is_empty(STM_PSEGMENT->old_objects_with_cards)); From noreply at buildbot.pypy.org Tue Jul 1 11:09:55 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 1 Jul 2014 11:09:55 +0200 (CEST) Subject: [pypy-commit] stmgc card-marking: reset cards on overflow objs only needed when aborting (otherwise they are already cleared by normal minor collections) Message-ID: <20140701090955.2E3C11C31F4@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: card-marking Changeset: r1269:664aca4f69ca Date: 2014-07-01 11:10 +0200 http://bitbucket.org/pypy/stmgc/changeset/664aca4f69ca/ Log: reset cards on overflow objs only needed when aborting (otherwise they are already cleared by normal minor collections) diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -946,6 +946,24 @@ /* throw away the content of the nursery */ long bytes_in_nursery = throw_away_nursery(pseg); + /* modified_old_objects' cards get cleared in + reset_modified_from_other_segments. Objs in old_objs_with_cards but not + in modified_old_objs are overflow objects and handled here: */ + if (pseg->large_overflow_objects != NULL) { + /* some overflow objects may have cards when aborting, clear them too */ + LIST_FOREACH_R(pseg->large_overflow_objects, object_t * /*item*/, + { + struct object_s *realobj = (struct object_s *) + REAL_ADDRESS(pseg->pub.segment_base, item); + + if (realobj->stm_flags & GCFLAG_CARDS_SET) { + /* CARDS_SET is enough since other HAS_CARDS objs + are already cleared */ + _reset_object_cards(pseg, item, CARD_CLEAR, false); + } + }); + } + /* reset all the modified objects (incl. re-adding GCFLAG_WRITE_BARRIER) */ reset_modified_from_other_segments(segment_num); _verify_cards_cleared_in_all_lists(pseg); diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -325,7 +325,7 @@ -static inline void _collect_now(object_t *obj, bool was_definitely_young) +static inline void _collect_now(object_t *obj) { assert(!_is_young(obj)); assert(!(obj->stm_flags & GCFLAG_CARDS_SET)); @@ -376,8 +376,7 @@ uintptr_t obj_sync_now = list_pop_item(lst); object_t *obj = (object_t *)(obj_sync_now & ~FLAG_SYNC_LARGE); - bool was_definitely_young = (obj_sync_now & FLAG_SYNC_LARGE); - _collect_now(obj, was_definitely_young); + _collect_now(obj); assert(!(obj->stm_flags & GCFLAG_CARDS_SET)); if (obj_sync_now & FLAG_SYNC_LARGE) { @@ -407,7 +406,7 @@ dprintf(("collect_modified_old_objects\n")); LIST_FOREACH_R( STM_PSEGMENT->modified_old_objects, object_t * /*item*/, - _collect_now(item, false)); + _collect_now(item)); } static void collect_roots_from_markers(uintptr_t num_old) @@ -475,25 +474,6 @@ tree_clear(pseg->nursery_objects_shadows); - - /* modified_old_objects' cards get cleared in push_modified_to_other_segments - or reset_modified_from_other_segments. Objs in old_objs_with_cards but not - in modified_old_objs are overflow objects and handled here: */ - if (pseg->large_overflow_objects != NULL) { - /* some overflow objects may have cards when aborting, clear them too */ - LIST_FOREACH_R(pseg->large_overflow_objects, object_t * /*item*/, - { - struct object_s *realobj = (struct object_s *) - REAL_ADDRESS(pseg->pub.segment_base, item); - - if (realobj->stm_flags & GCFLAG_CARDS_SET) { - /* CARDS_SET is enough since other HAS_CARDS objs - are already cleared */ - _reset_object_cards(pseg, item, CARD_CLEAR, false); - } - }); - } - return nursery_used; #pragma pop_macro("STM_SEGMENT") #pragma pop_macro("STM_PSEGMENT") From noreply at buildbot.pypy.org Tue Jul 1 11:39:39 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 1 Jul 2014 11:39:39 +0200 (CEST) Subject: [pypy-commit] stmgc card-marking: Workaround for what seems like a clang bug (I'm sure people would argue Message-ID: <20140701093939.131831C024A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: card-marking Changeset: r1270:6d6832a447c3 Date: 2014-07-01 11:37 +0200 http://bitbucket.org/pypy/stmgc/changeset/6d6832a447c3/ Log: Workaround for what seems like a clang bug (I'm sure people would argue otherwise, but I don't care: it needs a workaround). diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -245,12 +245,13 @@ return mark_card; } -char *_stm_write_slowpath_card_extra_base(void) +long _stm_write_slowpath_card_extra_base(void) { /* for the PyPy JIT: _stm_write_slowpath_card_extra_base[obj >> 4] is the byte that must be set to CARD_MARKED. The logic below does the same, but more explicitly. */ - return (char *)write_locks - WRITELOCK_START + 1; + return (((long)write_locks) - WRITELOCK_START + 1) + + 0x4000000000000000L; // <- workaround for a clang bug :-( } void _stm_write_slowpath_card(object_t *obj, uintptr_t index) diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -109,7 +109,7 @@ void _stm_write_slowpath(object_t *); void _stm_write_slowpath_card(object_t *, uintptr_t); char _stm_write_slowpath_card_extra(object_t *); -char *_stm_write_slowpath_card_extra_base(void); +long _stm_write_slowpath_card_extra_base(void); #define _STM_CARD_MARKED 100 object_t *_stm_allocate_slowpath(ssize_t); object_t *_stm_allocate_external(ssize_t); From noreply at buildbot.pypy.org Tue Jul 1 11:42:45 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 1 Jul 2014 11:42:45 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/6d6832a447c3 (branch card-marking) and fix the call to _stm_write_slowpath_card_extra_base() Message-ID: <20140701094245.1DF611C024A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r72303:46d55a933c65 Date: 2014-07-01 11:42 +0200 http://bitbucket.org/pypy/pypy/changeset/46d55a933c65/ Log: import stmgc/6d6832a447c3 (branch card-marking) and fix the call to _stm_write_slowpath_card_extra_base() diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -28,7 +28,7 @@ adr_write_slowpath_card_extra = ( CFlexSymbolic('((long)&_stm_write_slowpath_card_extra)')) adr__stm_write_slowpath_card_extra_base = ( - CFlexSymbolic('((long)_stm_write_slowpath_card_extra_base())')) + CFlexSymbolic('(_stm_write_slowpath_card_extra_base()-0x4000000000000000L)')) CARD_MARKED = CFlexSymbolic('_STM_CARD_MARKED') CARD_SIZE = CFlexSymbolic('_STM_CARD_SIZE') diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -e1df81263680 +6d6832a447c3 diff --git a/rpython/translator/stm/src_stm/stm/core.c b/rpython/translator/stm/src_stm/stm/core.c --- a/rpython/translator/stm/src_stm/stm/core.c +++ b/rpython/translator/stm/src_stm/stm/core.c @@ -68,6 +68,7 @@ /* Card marking. Don't remove GCFLAG_WRITE_BARRIER because we need to come back to _stm_write_slowpath_card() for every card to mark. Add GCFLAG_CARDS_SET. */ + assert(!(obj->stm_flags & GCFLAG_CARDS_SET)); obj->stm_flags |= GCFLAG_CARDS_SET; assert(STM_PSEGMENT->old_objects_with_cards); LIST_APPEND(STM_PSEGMENT->old_objects_with_cards, obj); @@ -245,12 +246,13 @@ return mark_card; } -char *_stm_write_slowpath_card_extra_base(void) +long _stm_write_slowpath_card_extra_base(void) { /* for the PyPy JIT: _stm_write_slowpath_card_extra_base[obj >> 4] is the byte that must be set to CARD_MARKED. The logic below does the same, but more explicitly. */ - return (char *)write_locks - WRITELOCK_START + 1; + return (((long)write_locks) - WRITELOCK_START + 1) + + 0x4000000000000000L; // <- workaround for a clang bug :-( } void _stm_write_slowpath_card(object_t *obj, uintptr_t index) @@ -946,6 +948,24 @@ /* throw away the content of the nursery */ long bytes_in_nursery = throw_away_nursery(pseg); + /* modified_old_objects' cards get cleared in + reset_modified_from_other_segments. Objs in old_objs_with_cards but not + in modified_old_objs are overflow objects and handled here: */ + if (pseg->large_overflow_objects != NULL) { + /* some overflow objects may have cards when aborting, clear them too */ + LIST_FOREACH_R(pseg->large_overflow_objects, object_t * /*item*/, + { + struct object_s *realobj = (struct object_s *) + REAL_ADDRESS(pseg->pub.segment_base, item); + + if (realobj->stm_flags & GCFLAG_CARDS_SET) { + /* CARDS_SET is enough since other HAS_CARDS objs + are already cleared */ + _reset_object_cards(pseg, item, CARD_CLEAR, false); + } + }); + } + /* reset all the modified objects (incl. re-adding GCFLAG_WRITE_BARRIER) */ reset_modified_from_other_segments(segment_num); _verify_cards_cleared_in_all_lists(pseg); diff --git a/rpython/translator/stm/src_stm/stm/nursery.c b/rpython/translator/stm/src_stm/stm/nursery.c --- a/rpython/translator/stm/src_stm/stm/nursery.c +++ b/rpython/translator/stm/src_stm/stm/nursery.c @@ -326,9 +326,10 @@ -static inline void _collect_now(object_t *obj, bool was_definitely_young) +static inline void _collect_now(object_t *obj) { assert(!_is_young(obj)); + assert(!(obj->stm_flags & GCFLAG_CARDS_SET)); dprintf(("_collect_now: %p\n", obj)); @@ -340,22 +341,6 @@ stmcb_trace((struct object_s *)realobj, &minor_trace_if_young); obj->stm_flags |= GCFLAG_WRITE_BARRIER; - if (obj->stm_flags & GCFLAG_CARDS_SET) { - /* all objects that had WB cleared need to be fully synchronised - on commit, so we have to mark all their cards */ - struct stm_priv_segment_info_s *pseg = get_priv_segment( - STM_SEGMENT->segment_num); - - /* stm_wb-slowpath should never have triggered for young objs */ - assert(!was_definitely_young); - - if (!IS_OVERFLOW_OBJ(STM_PSEGMENT, obj)) { - _reset_object_cards(pseg, obj, CARD_MARKED_OLD, true); /* mark all */ - } else { - /* simply clear overflow */ - _reset_object_cards(pseg, obj, CARD_CLEAR, false); - } - } } /* else traced in collect_cardrefs_to_nursery if necessary */ } @@ -372,12 +357,11 @@ assert(!_is_young(obj)); if (!(obj->stm_flags & GCFLAG_CARDS_SET)) { - /* handled in _collect_now() */ + /* sometimes we remove the CARDS_SET in the WB slowpath, see core.c */ continue; } - /* traces cards, clears marked cards or marks them old if - necessary */ + /* traces cards, clears marked cards or marks them old if necessary */ _trace_card_object(obj); assert(!(obj->stm_flags & GCFLAG_CARDS_SET)); @@ -393,8 +377,7 @@ uintptr_t obj_sync_now = list_pop_item(lst); object_t *obj = (object_t *)(obj_sync_now & ~FLAG_SYNC_LARGE); - bool was_definitely_young = (obj_sync_now & FLAG_SYNC_LARGE); - _collect_now(obj, was_definitely_young); + _collect_now(obj); assert(!(obj->stm_flags & GCFLAG_CARDS_SET)); if (obj_sync_now & FLAG_SYNC_LARGE) { @@ -424,7 +407,7 @@ dprintf(("collect_modified_old_objects\n")); LIST_FOREACH_R( STM_PSEGMENT->modified_old_objects, object_t * /*item*/, - _collect_now(item, false)); + _collect_now(item)); } static void collect_roots_from_markers(uintptr_t num_old) @@ -492,25 +475,6 @@ tree_clear(pseg->nursery_objects_shadows); - - /* modified_old_objects' cards get cleared in push_modified_to_other_segments - or reset_modified_from_other_segments. Objs in old_objs_with_cards but not - in modified_old_objs are overflow objects and handled here: */ - if (pseg->large_overflow_objects != NULL) { - /* some overflow objects may have cards when aborting, clear them too */ - LIST_FOREACH_R(pseg->large_overflow_objects, object_t * /*item*/, - { - struct object_s *realobj = (struct object_s *) - REAL_ADDRESS(pseg->pub.segment_base, item); - - if (realobj->stm_flags & GCFLAG_CARDS_SET) { - /* CARDS_SET is enough since other HAS_CARDS objs - are already cleared */ - _reset_object_cards(pseg, item, CARD_CLEAR, false); - } - }); - } - return nursery_used; #pragma pop_macro("STM_SEGMENT") #pragma pop_macro("STM_PSEGMENT") @@ -552,6 +516,7 @@ if (!commit && STM_PSEGMENT->large_overflow_objects == NULL) STM_PSEGMENT->large_overflow_objects = list_create(); + /* All the objects we move out of the nursery become "overflow" objects. We use the list 'objects_pointing_to_nursery' to hold the ones we didn't trace so far. */ @@ -559,6 +524,11 @@ if (STM_PSEGMENT->objects_pointing_to_nursery == NULL) { STM_PSEGMENT->objects_pointing_to_nursery = list_create(); + /* collect objs with cards, adds to objects_pointing_to_nursery + and makes sure there are no objs with cards left in + modified_old_objs */ + collect_cardrefs_to_nursery(); + /* See the doc of 'objects_pointing_to_nursery': if it is NULL, then it is implicitly understood to be equal to 'modified_old_objects'. We could copy modified_old_objects @@ -568,6 +538,7 @@ num_old = 0; } else { + collect_cardrefs_to_nursery(); num_old = STM_PSEGMENT->modified_old_objects_markers_num_old; } @@ -575,7 +546,6 @@ collect_roots_in_nursery(); - collect_cardrefs_to_nursery(); collect_oldrefs_to_nursery(); assert(list_is_empty(STM_PSEGMENT->old_objects_with_cards)); diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -110,7 +110,7 @@ void _stm_write_slowpath(object_t *); void _stm_write_slowpath_card(object_t *, uintptr_t); char _stm_write_slowpath_card_extra(object_t *); -char *_stm_write_slowpath_card_extra_base(void); +long _stm_write_slowpath_card_extra_base(void); #define _STM_CARD_MARKED 100 object_t *_stm_allocate_slowpath(ssize_t); object_t *_stm_allocate_external(ssize_t); From noreply at buildbot.pypy.org Tue Jul 1 12:06:41 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 1 Jul 2014 12:06:41 +0200 (CEST) Subject: [pypy-commit] stmgc card-marking: Increase the usable memory to 24GB (from 1.5GB). I'd like to increase Message-ID: <20140701100641.BD7081C3331@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: card-marking Changeset: r1271:f90f884585dd Date: 2014-07-01 12:06 +0200 http://bitbucket.org/pypy/stmgc/changeset/f90f884585dd/ Log: Increase the usable memory to 24GB (from 1.5GB). I'd like to increase it more but I'm getting again clang linking errors... diff --git a/c7/demo/demo2.c b/c7/demo/demo2.c --- a/c7/demo/demo2.c +++ b/c7/demo/demo2.c @@ -303,7 +303,7 @@ unregister_thread_local(); - stm_teardown(); + //stm_teardown(); return 0; } diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -14,7 +14,7 @@ #endif -#define NB_PAGES (1500*256) // 1500MB +#define NB_PAGES (24000*256) // 24GB #define NB_SEGMENTS STM_NB_SEGMENTS #define NB_SEGMENTS_MAX 240 /* don't increase NB_SEGMENTS past this */ #define MAP_PAGES_FLAGS (MAP_SHARED | MAP_ANONYMOUS | MAP_NORESERVE) From noreply at buildbot.pypy.org Tue Jul 1 12:12:38 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 1 Jul 2014 12:12:38 +0200 (CEST) Subject: [pypy-commit] pypy default: Clarify to the compiler that this path is not a fall-through Message-ID: <20140701101238.BD1EC1C31F4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72304:ef7d6396c2e4 Date: 2014-07-01 12:12 +0200 http://bitbucket.org/pypy/pypy/changeset/ef7d6396c2e4/ Log: Clarify to the compiler that this path is not a fall-through diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -267,7 +267,7 @@ # Emit default case yield 'default:' if defaultlink is None: - yield '\tassert(!"bad switch!!");' + yield '\tassert(!"bad switch!!"); abort();' else: for op in self.gen_link(defaultlink): yield '\t' + op From noreply at buildbot.pypy.org Tue Jul 1 13:07:03 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 1 Jul 2014 13:07:03 +0200 (CEST) Subject: [pypy-commit] stmgc card-marking: Reduce the limit to 2.5GB again. We're getting relocation errors on Message-ID: <20140701110703.097861C31F4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: card-marking Changeset: r1272:f18bff5ab704 Date: 2014-07-01 13:07 +0200 http://bitbucket.org/pypy/stmgc/changeset/f18bff5ab704/ Log: Reduce the limit to 2.5GB again. We're getting relocation errors on pypy, and fork() takes forever... diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -14,7 +14,7 @@ #endif -#define NB_PAGES (24000*256) // 24GB +#define NB_PAGES (2500*256) // 2500MB #define NB_SEGMENTS STM_NB_SEGMENTS #define NB_SEGMENTS_MAX 240 /* don't increase NB_SEGMENTS past this */ #define MAP_PAGES_FLAGS (MAP_SHARED | MAP_ANONYMOUS | MAP_NORESERVE) From noreply at buildbot.pypy.org Tue Jul 1 13:08:11 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 1 Jul 2014 13:08:11 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/f18bff5ab704 (branch card-marking) Message-ID: <20140701110811.292141C31F4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r72305:2903b643cad6 Date: 2014-07-01 13:07 +0200 http://bitbucket.org/pypy/pypy/changeset/2903b643cad6/ Log: import stmgc/f18bff5ab704 (branch card-marking) diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -6d6832a447c3 +f18bff5ab704 diff --git a/rpython/translator/stm/src_stm/stm/core.h b/rpython/translator/stm/src_stm/stm/core.h --- a/rpython/translator/stm/src_stm/stm/core.h +++ b/rpython/translator/stm/src_stm/stm/core.h @@ -15,7 +15,7 @@ #endif -#define NB_PAGES (1500*256) // 1500MB +#define NB_PAGES (2500*256) // 2500MB #define NB_SEGMENTS STM_NB_SEGMENTS #define NB_SEGMENTS_MAX 240 /* don't increase NB_SEGMENTS past this */ #define MAP_PAGES_FLAGS (MAP_SHARED | MAP_ANONYMOUS | MAP_NORESERVE) From noreply at buildbot.pypy.org Tue Jul 1 16:14:40 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 1 Jul 2014 16:14:40 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: be sure to initialize the transaction_length when starting a thread Message-ID: <20140701141440.ADA391C024A@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r72306:f1bc1a8a5ae1 Date: 2014-07-01 16:14 +0200 http://bitbucket.org/pypy/pypy/changeset/f1bc1a8a5ae1/ Log: be sure to initialize the transaction_length when starting a thread diff --git a/rpython/rlib/rthread.py b/rpython/rlib/rthread.py --- a/rpython/rlib/rthread.py +++ b/rpython/rlib/rthread.py @@ -62,7 +62,7 @@ c_thread_acquirelock = llexternal('RPyThreadAcquireLock', [TLOCKP, rffi.INT], rffi.INT, releasegil=True) # release the GIL -c_thread_acquirelock_timed = llexternal('RPyThreadAcquireLockTimed', +c_thread_acquirelock_timed = llexternal('RPyThreadAcquireLockTimed', [TLOCKP, rffi.LONGLONG, rffi.INT], rffi.INT, releasegil=True) # release the GIL @@ -97,9 +97,11 @@ @specialize.arg(0) def ll_start_new_thread(func): - if rgc.stm_is_enabled: - from rpython.rlib.rstm import register_invoke_around_extcall + if rgc.stm_is_enabled(): + from rpython.rlib.rstm import (register_invoke_around_extcall, + set_transaction_length) register_invoke_around_extcall() + set_transaction_length(1.0) ident = c_thread_start(func) if ident == -1: raise error("can't start new thread") From noreply at buildbot.pypy.org Tue Jul 1 16:57:52 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 1 Jul 2014 16:57:52 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: adapt some tests and make hint_commit_soon do something again even if transaction_length is set to unlimited. It previously didn't do anything in that case, so now we will get more breaks again since we call stmcb_commit_soon in minor collections. Message-ID: <20140701145752.E05731C0083@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r72307:3e144ed1d5b7 Date: 2014-07-01 16:58 +0200 http://bitbucket.org/pypy/pypy/changeset/3e144ed1d5b7/ Log: adapt some tests and make hint_commit_soon do something again even if transaction_length is set to unlimited. It previously didn't do anything in that case, so now we will get more breaks again since we call stmcb_commit_soon in minor collections. diff --git a/rpython/translator/stm/src_stm/stmgcintf.c b/rpython/translator/stm/src_stm/stmgcintf.c --- a/rpython/translator/stm/src_stm/stmgcintf.c +++ b/rpython/translator/stm/src_stm/stmgcintf.c @@ -42,7 +42,8 @@ if (((long)pypy_stm_nursery_low_fill_mark_saved) > 0) { pypy_stm_nursery_low_fill_mark_saved = 0; } - } else if (((long)pypy_stm_nursery_low_fill_mark) > 0) { + } else { + /* if (((long)pypy_stm_nursery_low_fill_mark) > 0) */ /* if not set to unlimited by pypy_stm_setup() (s.b.) */ pypy_stm_nursery_low_fill_mark = 0; } diff --git a/rpython/translator/stm/test/test_ztranslated.py b/rpython/translator/stm/test/test_ztranslated.py --- a/rpython/translator/stm/test/test_ztranslated.py +++ b/rpython/translator/stm/test/test_ztranslated.py @@ -90,6 +90,7 @@ def test_should_break_transaction(self): def entry_point(argv): + rstm.hint_commit_soon() print '<', int(rstm.should_break_transaction()), '>' return 0 t, cbuilder = self.compile(entry_point) @@ -213,7 +214,7 @@ S = lltype.GcStruct('S', ('got_exception', OBJECTPTR)) PS = lltype.Ptr(S) perform_transaction = rstm.make_perform_transaction(check, PS) - + from rpython.rtyper.lltypesystem import lltype R = lltype.GcStruct('R', ('x', lltype.Signed)) S1 = lltype.Struct('S1', ('r', lltype.Ptr(R))) @@ -281,6 +282,11 @@ Parent().xy = 0 globf.xy = -2 globf.yx = 'hi there %d' % len(argv) + + # make sure perform_transaction breaks the transaction: + rstm.hint_commit_soon() + assert rstm.should_break_transaction() + perform_transaction(lltype.nullptr(PS.TO)) return 0 t, cbuilder = self.compile(main) @@ -378,6 +384,9 @@ perform_transaction = rstm.make_perform_transaction(check, PS) def main(argv): + # make sure perform_transaction breaks the transaction: + rstm.hint_commit_soon() + assert rstm.should_break_transaction() perform_transaction(lltype.nullptr(PS.TO)) return 0 @@ -589,8 +598,8 @@ 'File "/tmp/foobaz.py", line 73, in bar\n' 'stopping bar\n') in data assert ('starting some_extremely_longish_and_boring_function_name\n' - 'File "...bla/br/project/foobaz.py", line 81,' - ' in some_extremely_longish_a...\n') in data + 'File "\n') in data def test_pypy_marker_2(self): import time @@ -619,6 +628,11 @@ llop.stm_setup_expand_marker_for_pypy( lltype.Void, pycode1, "co_filename", "co_name", "co_firstlineno", "co_lnotab") + + # make sure perform_transaction breaks the transaction: + rstm.hint_commit_soon() + assert rstm.should_break_transaction() + perform_transaction(lltype.malloc(S)) return 0 # From noreply at buildbot.pypy.org Tue Jul 1 20:47:02 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 1 Jul 2014 20:47:02 +0200 (CEST) Subject: [pypy-commit] cffi default: Add malloc.h, needed for alloca() in this file. Message-ID: <20140701184702.84FC21C0083@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1533:003b8ea084ef Date: 2014-07-01 20:47 +0200 http://bitbucket.org/cffi/cffi/changeset/003b8ea084ef/ Log: Add malloc.h, needed for alloca() in this file. diff --git a/c/misc_win32.h b/c/misc_win32.h --- a/c/misc_win32.h +++ b/c/misc_win32.h @@ -1,3 +1,4 @@ +#include /* for alloca() */ /************************************************************/ /* errno and GetLastError support */ From noreply at buildbot.pypy.org Wed Jul 2 00:17:59 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 2 Jul 2014 00:17:59 +0200 (CEST) Subject: [pypy-commit] pypy default: fix imports Message-ID: <20140701221800.0AB091C3331@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r72308:ec8a85b373b9 Date: 2014-07-01 23:17 +0100 http://bitbucket.org/pypy/pypy/changeset/ec8a85b373b9/ Log: fix imports diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py --- a/pypy/module/_lsprof/interp_lsprof.py +++ b/pypy/module/_lsprof/interp_lsprof.py @@ -11,7 +11,7 @@ from rpython.rlib.rtimer import read_timestamp, _is_64_bit from rpython.rtyper.lltypesystem import rffi, lltype from rpython.translator.tool.cbuild import ExternalCompilationInfo -from rpython.conftest import cdir +from rpython.translator import cdir from rpython.rlib.rarithmetic import r_longlong import time, sys diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -10,7 +10,7 @@ from rpython.rtyper.lltypesystem import ll2ctypes from rpython.rtyper.annlowlevel import llhelper from rpython.rlib.objectmodel import we_are_translated -from rpython.conftest import cdir +from rpython.translator import cdir from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.translator.gensupp import NameManager from rpython.tool.udir import udir From noreply at buildbot.pypy.org Wed Jul 2 08:24:49 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 2 Jul 2014 08:24:49 +0200 (CEST) Subject: [pypy-commit] pypy default: Detect the x32 mode. (Note that there are compilation issues too Message-ID: <20140702062449.069F61C3225@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72309:049f1e96b1de Date: 2014-07-02 08:24 +0200 http://bitbucket.org/pypy/pypy/changeset/049f1e96b1de/ Log: Detect the x32 mode. (Note that there are compilation issues too which may prevent a translate.py from ever reaching this point, but well, if we fix these, then we'll hit this barrier rather than compile a buggy executable.) diff --git a/rpython/jit/backend/detect_cpu.py b/rpython/jit/backend/detect_cpu.py --- a/rpython/jit/backend/detect_cpu.py +++ b/rpython/jit/backend/detect_cpu.py @@ -73,11 +73,14 @@ result = MODEL_X86_64 else: assert sys.maxint == 2**31-1 - from rpython.jit.backend.x86.detect_sse2 import detect_sse2 - if detect_sse2(): + from rpython.jit.backend.x86 import detect_sse2 + if detect_sse2.detect_sse2(): result = MODEL_X86 else: result = MODEL_X86_NO_SSE2 + if detect_sse2.detect_x32_mode(): + raise ProcessorAutodetectError( + 'JITting in x32 mode is not implemented') # if result.startswith('arm'): from rpython.jit.backend.arm.detect import detect_float diff --git a/rpython/jit/backend/x86/detect_sse2.py b/rpython/jit/backend/x86/detect_sse2.py --- a/rpython/jit/backend/x86/detect_sse2.py +++ b/rpython/jit/backend/x86/detect_sse2.py @@ -1,3 +1,4 @@ +import sys from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib.rmmap import alloc, free @@ -18,9 +19,26 @@ free(data, 4096) return bool(code & (1<<25)) and bool(code & (1<<26)) +def detect_x32_mode(): + data = alloc(4096) + pos = 0 # 32-bit 64-bit / x32 + for c in ("\x48" # DEC EAX + "\xB8\xC8\x00\x00\x00"# MOV EAX, 200 MOV RAX, 0x40404040000000C8 + "\x40\x40\x40\x40" # 4x INC EAX + "\xC3"): # RET RET + data[pos] = c + pos += 1 + fnptr = rffi.cast(lltype.Ptr(lltype.FuncType([], lltype.Signed)), data) + code = fnptr() + free(data, 4096) + assert code in (200, 204, 0x40404040000000C8) + return code == 200 + if __name__ == '__main__': if detect_sse2(): print 'Processor supports sse2.' else: print 'Missing processor support for sse2.' + if detect_x32_mode(): + print 'Process is running in "x32" mode.' From noreply at buildbot.pypy.org Wed Jul 2 08:49:38 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 2 Jul 2014 08:49:38 +0200 (CEST) Subject: [pypy-commit] pypy default: test: doing a large number of ping-pongs between two threads, using locks, Message-ID: <20140702064938.43C4C1C3225@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72310:1a8c4f5e30da Date: 2014-07-02 08:49 +0200 http://bitbucket.org/pypy/pypy/changeset/1a8c4f5e30da/ Log: test: doing a large number of ping-pongs between two threads, using locks, should complete in a reasonable time on a translated pypy with -A. diff --git a/pypy/module/thread/test/support.py b/pypy/module/thread/test/support.py --- a/pypy/module/thread/test/support.py +++ b/pypy/module/thread/test/support.py @@ -44,6 +44,7 @@ spaceconfig = dict(usemodules=('thread', 'rctime', 'signal')) def setup_class(cls): + cls.w_runappdirect = cls.space.wrap(cls.runappdirect) if cls.runappdirect: def plain_waitfor(self, condition, delay=1): adaptivedelay = 0.04 diff --git a/pypy/module/thread/test/test_lock.py b/pypy/module/thread/test/test_lock.py --- a/pypy/module/thread/test/test_lock.py +++ b/pypy/module/thread/test/test_lock.py @@ -57,8 +57,34 @@ assert lock.acquire() is True assert lock.acquire(False) is False raises(TypeError, lock.acquire, True, timeout=.1) - lock._py3k_acquire(True, timeout=.01) - lock._py3k_acquire(True, .01) + if hasattr(lock, '_py3k_acquire'): + lock._py3k_acquire(True, timeout=.01) + lock._py3k_acquire(True, .01) + else: + assert self.runappdirect, "missing lock._py3k_acquire()" + + def test_ping_pong(self): + # The purpose of this test is that doing a large number of ping-pongs + # between two threads, using locks, should complete in a reasonable + # time on a translated pypy with -A. If the GIL logic causes too + # much sleeping, then it will fail. + import thread, time + COUNT = 100000 if self.runappdirect else 50 + lock1 = thread.allocate_lock() + lock2 = thread.allocate_lock() + def fn(): + for i in range(COUNT): + lock1.acquire() + lock2.release() + lock2.acquire() + print "STARTING" + start = time.time() + thread.start_new_thread(fn, ()) + for i in range(COUNT): + lock2.acquire() + lock1.release() + stop = time.time() + assert stop - start < 30.0 # ~0.6 sec on pypy-c-jit def test_compile_lock(): From noreply at buildbot.pypy.org Wed Jul 2 10:12:29 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 2 Jul 2014 10:12:29 +0200 (CEST) Subject: [pypy-commit] pypy default: Optimize array.extend() and make it support directly lists of ints or floats. Message-ID: <20140702081229.9D4391C0083@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72311:6b30b8d83c24 Date: 2014-07-02 10:11 +0200 http://bitbucket.org/pypy/pypy/changeset/6b30b8d83c24/ Log: Optimize array.extend() and make it support directly lists of ints or floats. diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -674,6 +674,10 @@ return rffi.cast(mytype.itemtype, item) # # "regular" case: it fits in an rpython integer (lltype.Signed) + # or it is a float + return self.item_from_int_or_float(item) + + def item_from_int_or_float(self, item): result = rffi.cast(mytype.itemtype, item) if mytype.canoverflow: if rffi.cast(lltype.Signed, result) != item: @@ -686,8 +690,8 @@ % mytype.bytes) if not mytype.signed: msg = 'un' + msg # 'signed' => 'unsigned' - raise OperationError(space.w_OverflowError, - space.wrap(msg)) + raise OperationError(self.space.w_OverflowError, + self.space.wrap(msg)) return result def __del__(self): @@ -734,27 +738,32 @@ def fromsequence(self, w_seq): space = self.space oldlen = self.len + newlen = oldlen try: - new = space.len_w(w_seq) - self.setlen(self.len + new) - except OperationError: - pass - - i = 0 - try: - if mytype.typecode == 'u': - myiter = space.unpackiterable + # optimized case for arrays of integers or floats + if mytype.unwrap == 'int_w': + lst = space.listview_int(w_seq) + elif mytype.unwrap == 'float_w': + lst = space.listview_float(w_seq) else: - myiter = space.listview - for w_i in myiter(w_seq): - if oldlen + i >= self.len: - self.setlen(oldlen + i + 1) - self.buffer[oldlen + i] = self.item_w(w_i) - i += 1 - except OperationError: - self.setlen(oldlen + i) - raise - self.setlen(oldlen + i) + lst = None + if lst is not None: + self.setlen(oldlen + len(lst)) + buf = self.buffer + for num in lst: + buf[newlen] = self.item_from_int_or_float(num) + newlen += 1 + return + # + # this is the general case + lst_w = space.listview(w_seq) + self.setlen(oldlen + len(lst_w)) + for w_num in lst_w: + self.buffer[newlen] = self.item_w(w_num) + newlen += 1 + finally: + if self.len != newlen: + self.setlen(newlen) def extend(self, w_iterable, accept_different_array=False): space = self.space From noreply at buildbot.pypy.org Wed Jul 2 11:18:48 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 2 Jul 2014 11:18:48 +0200 (CEST) Subject: [pypy-commit] pypy default: Issue #1783 Message-ID: <20140702091848.9B2C61C0083@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72312:93a8f5aeb3bc Date: 2014-07-02 11:18 +0200 http://bitbucket.org/pypy/pypy/changeset/93a8f5aeb3bc/ Log: Issue #1783 Improve array.extend(x) by not requiring an intermediate list in case there isn't one, e.g. if x is a generator or iterator. diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -967,6 +967,13 @@ """ return self.unpackiterable(w_iterable, expected_length) + def listview_no_unpack(self, w_iterable): + """ Same as listview() if cheap. If 'w_iterable' is something like + a generator, for example, then return None instead. + May return None anyway. + """ + return None + def listview_bytes(self, w_list): """ Return a list of unwrapped strings out of a list of strings. If the argument is not a list or does not contain only strings, return None. diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -15,6 +15,7 @@ interp2app, interpindirect2app, unwrap_spec) from pypy.interpreter.typedef import ( GetSetProperty, TypeDef, make_weakref_descr) +from pypy.interpreter.generator import GeneratorIterator from pypy.module._file.interp_file import W_File from pypy.objspace.std.floatobject import W_FloatObject @@ -630,6 +631,10 @@ def make_array(mytype): W_ArrayBase = globals()['W_ArrayBase'] + unpack_driver = jit.JitDriver(name='unpack_array', + greens=['tp'], + reds=['self', 'w_iterator']) + class W_Array(W_ArrayBase): itemsize = mytype.bytes typecode = mytype.typecode @@ -739,31 +744,64 @@ space = self.space oldlen = self.len newlen = oldlen - try: - # optimized case for arrays of integers or floats - if mytype.unwrap == 'int_w': - lst = space.listview_int(w_seq) - elif mytype.unwrap == 'float_w': - lst = space.listview_float(w_seq) - else: - lst = None - if lst is not None: - self.setlen(oldlen + len(lst)) + + # optimized case for arrays of integers or floats + if mytype.unwrap == 'int_w': + lst = space.listview_int(w_seq) + elif mytype.unwrap == 'float_w': + lst = space.listview_float(w_seq) + else: + lst = None + if lst is not None: + self.setlen(oldlen + len(lst)) + try: buf = self.buffer for num in lst: buf[newlen] = self.item_from_int_or_float(num) newlen += 1 - return - # - # this is the general case - lst_w = space.listview(w_seq) + except OperationError: + self.setlen(newlen) + raise + return + + # this is the common case: w_seq is a list or a tuple + lst_w = space.listview_no_unpack(w_seq) + if lst_w is not None: self.setlen(oldlen + len(lst_w)) - for w_num in lst_w: - self.buffer[newlen] = self.item_w(w_num) - newlen += 1 - finally: - if self.len != newlen: - self.setlen(newlen) + buf = self.buffer + try: + for w_num in lst_w: + # note: self.item_w() might invoke arbitrary code. + # In case it resizes the same array, then strange + # things may happen, but as we don't reload 'buf' + # we know that one is big enough for all items + # (so at least we avoid crashes) + buf[newlen] = self.item_w(w_num) + newlen += 1 + except OperationError: + if buf == self.buffer: + self.setlen(newlen) + raise + return + + self._fromiterable(w_seq) + + def _fromiterable(self, w_seq): + # a more careful case if w_seq happens to be a very large + # iterable: don't copy the items into some intermediate list + w_iterator = self.space.iter(w_seq) + tp = self.space.type(w_iterator) + while True: + unpack_driver.jit_merge_point(tp=tp, self=self, + w_iterator=w_iterator) + space = self.space + try: + w_item = space.next(w_iterator) + except OperationError, e: + if not e.match(space, space.w_StopIteration): + raise + break # done + self.descr_append(space, w_item) def extend(self, w_iterable, accept_different_array=False): space = self.space @@ -806,8 +844,9 @@ def descr_append(self, space, w_x): x = self.item_w(w_x) - self.setlen(self.len + 1) - self.buffer[self.len - 1] = x + index = self.len + self.setlen(index + 1) + self.buffer[index] = x # List interface def descr_count(self, space, w_val): diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -421,14 +421,19 @@ assert expected_length >= 0 return self.fixedview(w_obj, expected_length, unroll=True) + def listview_no_unpack(self, w_obj): + if type(w_obj) is W_ListObject: + return w_obj.getitems() + elif isinstance(w_obj, W_AbstractTupleObject) and self._uses_tuple_iter(w_obj): + return w_obj.getitems_copy() + elif isinstance(w_obj, W_ListObject) and self._uses_list_iter(w_obj): + return w_obj.getitems() + else: + return None + def listview(self, w_obj, expected_length=-1): - if type(w_obj) is W_ListObject: - t = w_obj.getitems() - elif isinstance(w_obj, W_AbstractTupleObject) and self._uses_tuple_iter(w_obj): - t = w_obj.getitems_copy() - elif isinstance(w_obj, W_ListObject) and self._uses_list_iter(w_obj): - t = w_obj.getitems() - else: + t = self.listview_no_unpack(w_obj) + if t is None: return ObjSpace.unpackiterable(self, w_obj, expected_length) if expected_length != -1 and len(t) != expected_length: raise self._wrap_expected_length(expected_length, len(t)) From noreply at buildbot.pypy.org Wed Jul 2 15:51:07 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 2 Jul 2014 15:51:07 +0200 (CEST) Subject: [pypy-commit] pypy default: Add some logic in an attempt to fix issue #1782. There are cases where it slows things down; see comments. Message-ID: <20140702135107.3B54F1D3522@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72313:640d0a2fedc6 Date: 2014-07-02 14:33 +0200 http://bitbucket.org/pypy/pypy/changeset/640d0a2fedc6/ Log: Add some logic in an attempt to fix issue #1782. There are cases where it slows things down; see comments. diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -61,6 +61,13 @@ return self.send_ex(w_arg) def send_ex(self, w_arg, operr=None): + pycode = self.pycode + if jit.we_are_jitted() and should_not_inline(pycode): + generatorentry_driver.jit_merge_point(gen=self, w_arg=w_arg, + operr=operr, pycode=pycode) + return self._send_ex(w_arg, operr) + + def _send_ex(self, w_arg, operr): space = self.space if self.running: raise OperationError(space.w_ValueError, @@ -72,8 +79,7 @@ if operr is None: operr = OperationError(space.w_StopIteration, space.w_None) raise operr - # XXX it's not clear that last_instr should be promoted at all - # but as long as it is necessary for call_assembler, let's do it early + last_instr = jit.promote(frame.last_instr) if last_instr == -1: if w_arg and not space.is_w(w_arg, space.w_None): @@ -214,3 +220,38 @@ "interrupting generator of ") break block = block.previous + + + +def get_printable_location_genentry(bytecode): + return '%s ' % (bytecode.get_repr(),) +generatorentry_driver = jit.JitDriver(greens=['pycode'], + reds=['gen', 'w_arg', 'operr'], + get_printable_location = + get_printable_location_genentry, + name='generatorentry') + +from pypy.tool.stdlib_opcode import HAVE_ARGUMENT, opmap +YIELD_VALUE = opmap['YIELD_VALUE'] + + at jit.elidable_promote() +def should_not_inline(pycode): + # Should not inline generators with more than one "yield", + # as an approximative fix (see issue #1782). There are cases + # where it slows things down; for example calls to a simple + # generator that just produces a few simple values with a few + # consecutive "yield" statements. It fixes the near-infinite + # slow-down in issue #1782, though... + count_yields = 0 + code = pycode.co_code + n = len(code) + i = 0 + while i < n: + c = code[i] + op = ord(c) + if op == YIELD_VALUE: + count_yields += 1 + i += 1 + if op >= HAVE_ARGUMENT: + i += 2 + return count_yields >= 2 diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/test_generator.py --- a/pypy/interpreter/test/test_generator.py +++ b/pypy/interpreter/test/test_generator.py @@ -278,4 +278,21 @@ def f(): yield 1 raise StopIteration - assert tuple(f()) == (1,) \ No newline at end of file + assert tuple(f()) == (1,) + + +def test_should_not_inline(space): + from pypy.interpreter.generator import should_not_inline + w_co = space.appexec([], '''(): + def g(x): + yield x + 5 + return g.func_code + ''') + assert should_not_inline(w_co) == False + w_co = space.appexec([], '''(): + def g(x): + yield x + 5 + yield x + 6 + return g.func_code + ''') + assert should_not_inline(w_co) == True From noreply at buildbot.pypy.org Wed Jul 2 16:26:16 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 2 Jul 2014 16:26:16 +0200 (CEST) Subject: [pypy-commit] pypy default: Issue #1779: PyList_GetItem() took a time proportional to the length of Message-ID: <20140702142616.AABEA1C024A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72314:07de89e151e9 Date: 2014-07-02 16:24 +0200 http://bitbucket.org/pypy/pypy/changeset/07de89e151e9/ Log: Issue #1779: PyList_GetItem() took a time proportional to the length of the list in case the list's strategy is not the default one. diff --git a/pypy/module/cpyext/listobject.py b/pypy/module/cpyext/listobject.py --- a/pypy/module/cpyext/listobject.py +++ b/pypy/module/cpyext/listobject.py @@ -46,11 +46,11 @@ IndexError exception.""" if not isinstance(w_list, W_ListObject): PyErr_BadInternalCall(space) - wrappeditems = w_list.getitems() - if index < 0 or index >= len(wrappeditems): + if index < 0 or index >= w_list.length(): raise OperationError(space.w_IndexError, space.wrap( "list index out of range")) - return borrow_from(w_list, wrappeditems[index]) + w_item = w_list.getitem(index) + return borrow_from(w_list, w_item) @cpython_api([PyObject, PyObject], rffi.INT_real, error=-1) From noreply at buildbot.pypy.org Wed Jul 2 17:07:17 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 2 Jul 2014 17:07:17 +0200 (CEST) Subject: [pypy-commit] pypy default: Tweaks to timeit.py: Message-ID: <20140702150717.6B46B1D350D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72315:bbabcc9974eb Date: 2014-07-02 17:06 +0200 http://bitbucket.org/pypy/pypy/changeset/bbabcc9974eb/ Log: Tweaks to timeit.py: * don't use itertools.repeat(), just plainly do "while n > 0: n -= 1". * recompile the source code each time before calling inner(). There are situations like Issue #1776 where PyPy tries to reuse the JIT code from before, but that's not going to work: the first thing the function does is the "-s" statement, which may declare new classes (here a namedtuple). We end up with bridges from the inner loop; more and more of them every time we call inner(). diff --git a/lib-python/2.7/timeit.py b/lib-python/2.7/timeit.py --- a/lib-python/2.7/timeit.py +++ b/lib-python/2.7/timeit.py @@ -55,11 +55,6 @@ import gc import sys import time -try: - import itertools -except ImportError: - # Must be an older Python version (see timeit() below) - itertools = None __all__ = ["Timer"] @@ -81,7 +76,8 @@ def inner(_it, _timer): %(setup)s _t0 = _timer() - for _i in _it: + while _it > 0: + _it -= 1 %(stmt)s _t1 = _timer() return _t1 - _t0 @@ -96,7 +92,8 @@ def inner(_it, _timer, _func=func): setup() _t0 = _timer() - for _i in _it: + while _it > 0: + _it -= 1 _func() _t1 = _timer() return _t1 - _t0 @@ -133,9 +130,11 @@ else: raise ValueError("setup is neither a string nor callable") self.src = src # Save for traceback display - code = compile(src, dummy_src_name, "exec") - exec code in globals(), ns - self.inner = ns["inner"] + def make_inner(): + code = compile(src, dummy_src_name, "exec") + exec code in globals(), ns + return ns["inner"] + self.make_inner = make_inner elif hasattr(stmt, '__call__'): self.src = None if isinstance(setup, basestring): @@ -144,7 +143,8 @@ exec _setup in globals(), ns elif not hasattr(setup, '__call__'): raise ValueError("setup is neither a string nor callable") - self.inner = _template_func(setup, stmt) + inner = _template_func(setup, stmt) + self.make_inner = lambda: inner else: raise ValueError("stmt is neither a string nor callable") @@ -185,15 +185,12 @@ to one million. The main statement, the setup statement and the timer function to be used are passed to the constructor. """ - if itertools: - it = itertools.repeat(None, number) - else: - it = [None] * number + inner = self.make_inner() gcold = gc.isenabled() if '__pypy__' not in sys.builtin_module_names: gc.disable() # only do that on CPython try: - timing = self.inner(it, self.timer) + timing = inner(number, self.timer) finally: if gcold: gc.enable() From noreply at buildbot.pypy.org Wed Jul 2 17:44:14 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 2 Jul 2014 17:44:14 +0200 (CEST) Subject: [pypy-commit] pypy default: Issue #1790: implement numpy.empty() differently than numpy.zeros(). Message-ID: <20140702154414.A1AB81D34FF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72316:4d1d1c2d78ae Date: 2014-07-02 17:37 +0200 http://bitbucket.org/pypy/pypy/changeset/4d1d1c2d78ae/ Log: Issue #1790: implement numpy.empty() differently than numpy.zeros(). diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -12,7 +12,7 @@ 'scalar' : 'ctors.build_scalar', 'array': 'ctors.array', 'zeros': 'ctors.zeros', - 'empty': 'ctors.zeros', + 'empty': 'ctors.empty', 'empty_like': 'ctors.empty_like', 'fromstring': 'ctors.fromstring', 'frombuffer': 'ctors.frombuffer', diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -28,12 +28,12 @@ self.implementation = implementation @staticmethod - def from_shape(space, shape, dtype, order='C', w_instance=None): + def from_shape(space, shape, dtype, order='C', w_instance=None, zero=True): from pypy.module.micronumpy import concrete from pypy.module.micronumpy.strides import calc_strides strides, backstrides = calc_strides(shape, dtype.base, order) impl = concrete.ConcreteArray(shape, dtype.base, order, strides, - backstrides) + backstrides, zero=zero) if w_instance: return wrap_impl(space, space.type(w_instance), w_instance, impl) return W_NDimArray(impl) diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -369,9 +369,11 @@ class ConcreteArray(ConcreteArrayNotOwning): - def __init__(self, shape, dtype, order, strides, backstrides, storage=lltype.nullptr(RAW_STORAGE)): + def __init__(self, shape, dtype, order, strides, backstrides, + storage=lltype.nullptr(RAW_STORAGE), zero=True): if storage == lltype.nullptr(RAW_STORAGE): - storage = dtype.itemtype.malloc(support.product(shape) * dtype.elsize) + storage = dtype.itemtype.malloc(support.product(shape) * + dtype.elsize, zero=zero) ConcreteArrayNotOwning.__init__(self, shape, dtype, order, strides, backstrides, storage) diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -91,13 +91,19 @@ return w_arr -def zeros(space, w_shape, w_dtype=None, w_order=None): +def _zeros_or_empty(space, w_shape, w_dtype, w_order, zero): dtype = space.interp_w(descriptor.W_Dtype, space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype)) if dtype.is_str_or_unicode() and dtype.elsize < 1: dtype = descriptor.variable_dtype(space, dtype.char + '1') shape = shape_converter(space, w_shape, dtype) - return W_NDimArray.from_shape(space, shape, dtype=dtype) + return W_NDimArray.from_shape(space, shape, dtype=dtype, zero=zero) + +def empty(space, w_shape, w_dtype=None, w_order=None): + return _zeros_or_empty(space, w_shape, w_dtype, w_order, zero=False) + +def zeros(space, w_shape, w_dtype=None, w_order=None): + return _zeros_or_empty(space, w_shape, w_dtype, w_order, zero=True) @unwrap_spec(subok=bool) @@ -111,7 +117,8 @@ if dtype.is_str_or_unicode() and dtype.elsize < 1: dtype = descriptor.variable_dtype(space, dtype.char + '1') return W_NDimArray.from_shape(space, w_a.get_shape(), dtype=dtype, - w_instance=w_a if subok else None) + w_instance=w_a if subok else None, + zero=False) def _fromstring_text(space, s, count, sep, length, dtype): diff --git a/pypy/module/micronumpy/test/test_arrayops.py b/pypy/module/micronumpy/test/test_arrayops.py --- a/pypy/module/micronumpy/test/test_arrayops.py +++ b/pypy/module/micronumpy/test/test_arrayops.py @@ -2,6 +2,20 @@ class AppTestNumSupport(BaseNumpyAppTest): + def test_zeros(self): + from numpypy import zeros, empty + a = zeros(3) + assert len(a) == 3 + assert a[0] == a[1] == a[2] == 0 + a = empty(1000) + assert len(a) == 1000 + for i in range(1000): + if a[i] != 0: + break + else: + raise AssertionError( + "empty() returned a zeroed out array of length 1000 (unlikely)") + def test_where(self): from numpypy import where, ones, zeros, array a = [1, 2, 3, 0, -3] diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -11,7 +11,7 @@ class MockDtype(object): class itemtype(object): @staticmethod - def malloc(size): + def malloc(size, zero=True): return None def __init__(self): diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -117,8 +117,11 @@ def __repr__(self): return self.__class__.__name__ - def malloc(self, size): - return alloc_raw_storage(size, track_allocation=False, zero=True) + def malloc(self, size, zero=True): + if zero: + return alloc_raw_storage(size, track_allocation=False, zero=True) + else: + return alloc_raw_storage(size, track_allocation=False, zero=False) class Primitive(object): _mixin_ = True From noreply at buildbot.pypy.org Wed Jul 2 17:52:50 2014 From: noreply at buildbot.pypy.org (Hubert Hesse) Date: Wed, 2 Jul 2014 17:52:50 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk 64bit-c2: Float asString produced wrong results because image instances were read wrongly Message-ID: <20140702155250.9CEDF1D34FF@cobra.cs.uni-duesseldorf.de> Author: Hubert Hesse Branch: 64bit-c2 Changeset: r847:54b5ca0cfbd4 Date: 2014-06-12 20:14 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/54b5ca0cfbd4/ Log: Float asString produced wrong results because image instances were read wrongly diff --git a/spyvm/squeakimage.py b/spyvm/squeakimage.py --- a/spyvm/squeakimage.py +++ b/spyvm/squeakimage.py @@ -561,8 +561,9 @@ return bytes[:stop] # omit odd bytes def get_ruints(self, required_len=-1): - from rpython.rlib.rarithmetic import r_uint - words = [r_uint(x) for x in self.chunk.data] + from rpython.rlib.rarithmetic import r_uint32 + # XXX: Fix for 64bit image support + words = [r_uint32(x) for x in self.chunk.data] if required_len != -1 and len(words) != required_len: raise CorruptImageError("Expected %d words, got %d" % (required_len, len(words))) return words From noreply at buildbot.pypy.org Wed Jul 2 17:52:51 2014 From: noreply at buildbot.pypy.org (timfel) Date: Wed, 2 Jul 2014 17:52:51 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk vref: commit first translating version where all senders are vrefs Message-ID: <20140702155251.DF91D1D34FF@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: vref Changeset: r848:a0a057d2e444 Date: 2014-07-02 13:05 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/a0a057d2e444/ Log: commit first translating version where all senders are vrefs diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -86,6 +86,7 @@ s_new_context = self.c_loop(s_new_context) except StackOverflow, e: s_new_context = e.s_context + s_new_context.unvirtualize_sender() except Return, nlr: s_new_context = s_sender while s_new_context is not nlr.s_target_context: @@ -98,9 +99,11 @@ except ProcessSwitch, p: if self.trace: print "====== Switch from: %s to: %s ======" % (s_new_context.short_str(), p.s_new_context.short_str()) + s_new_context.unvirtualize_sender() s_new_context = p.s_new_context def c_loop(self, s_context, may_context_switch=True): + assert isinstance(s_context, ContextPartShadow) old_pc = 0 if not jit.we_are_jitted() and may_context_switch: self.quick_check_for_interrupt(s_context) diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -203,7 +203,7 @@ return r_uint(val) - @jit.elidable + # @jit.elidable def as_repr_string(self): return "W_SmallInteger(%d)" % self.value @@ -457,7 +457,7 @@ name = self.s_class.name return "a %s" % (name or '?',) - @jit.elidable + # @jit.elidable def as_repr_string(self): return self.as_embellished_string("W_O /w Class", "") @@ -491,7 +491,7 @@ class W_AbstractPointersObject(W_AbstractObjectWithClassReference): """Common object.""" _attrs_ = ['shadow'] - + def changed(self): # This is invoked when an instance-variable is changed. # Kept here in case it might be usefull in the future. @@ -550,7 +550,7 @@ def _get_shadow(self): return self.shadow - + @objectmodel.specialize.arg(2) def attach_shadow_of_class(self, space, TheClass): shadow = TheClass(space, self) @@ -632,7 +632,7 @@ w_other.changed() return True - @jit.elidable + # @jit.elidable def as_repr_string(self): return W_AbstractObjectWithClassReference.as_embellished_string(self, className='W_PointersObject', @@ -651,11 +651,11 @@ self.fieldtypes = fieldtypes_of_length(self.s_class, size) for i in range(size): # do it by hand for the JIT's sake vars[i] = w_nil - + def set_vars(self, new_vars): self._vars = new_vars make_sure_not_resized(self._vars) - + def fillin(self, space, g_self): W_AbstractPointersObject.fillin(self, space, g_self) from spyvm.fieldtypes import fieldtypes_of @@ -1013,7 +1013,7 @@ _immutable_fields_ = ['_realsize', 'display', '_depth', '_real_depth_buffer'] pixelbuffer = None - + @staticmethod def create(space, w_class, size, depth, display): if depth < 8: diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -1315,7 +1315,7 @@ # Set some fields s_block_ctx.store_pc(s_block_ctx.initialip()) try: - s_block_ctx.store_s_sender(s_frame) + s_block_ctx.store_s_sender(virtual=jit.virtual_ref(s_frame)) except SenderChainManipulation, e: assert e.s_context == s_block_ctx return s_block_ctx diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -34,7 +34,7 @@ import_from_mixin(version.VersionMixin) version = None - + def __init__(self, space, w_self): AbstractShadow.__init__(self, space, w_self) self.changed() @@ -78,7 +78,7 @@ _attrs_ = ["name", "_instance_size", "instance_varsized", "instance_kind", "_s_methoddict", "_s_superclass", "subclass_s"] - + def __init__(self, space, w_self): # fields added here should also be in objspace.py:56ff, 300ff self.name = '' @@ -449,17 +449,19 @@ class ContextPartShadow(AbstractRedirectingShadow): __metaclass__ = extendabletype - _attrs_ = ['_s_sender', '_pc', '_temps_and_stack', + _attrs_ = ['_direct_s_sender', '_virtual_s_sender', '_pc', '_temps_and_stack', '_stack_ptr', 'instances_w'] _virtualizable_ = [ - "_s_sender", "_pc", + "_virtual_s_sender", "_direct_s_sender", + "_pc", "_temps_and_stack[*]", "_stack_ptr", "_w_self", "_w_self_size" ] def __init__(self, space, w_self): - self._s_sender = None + self._virtual_s_sender = jit.vref_None + self._direct_s_sender = None AbstractRedirectingShadow.__init__(self, space, w_self) self.instances_w = {} @@ -541,25 +543,35 @@ " Return self of the method, or the method that contains the block " return self.s_home().w_receiver() - def store_s_sender(self, s_sender): - assert s_sender is None or isinstance(s_sender, ContextPartShadow) - self._s_sender = s_sender - raise error.SenderChainManipulation(self) + def store_s_sender(self, direct=None, virtual=jit.vref_None, raiseError=True): + assert direct is None or virtual is jit.vref_None # can only set one or the other + if self._virtual_s_sender is not jit.vref_None and virtual is jit.vref_None: + # if we have a vref but we're removing it... + sender = self._virtual_s_sender() + jit.virtual_ref_finish(self._virtual_s_sender, sender) + self._virtual_s_sender = virtual + self._direct_s_sender = direct + if raiseError: + raise error.SenderChainManipulation(self) def store_w_sender(self, w_sender): assert isinstance(w_sender, model.W_PointersObject) if w_sender.is_same_object(self.space.w_nil): - self._s_sender = None + self.store_s_sender(raiseError=False) else: - self.store_s_sender(w_sender.as_context_get_shadow(self.space)) + self.store_s_sender(direct=w_sender.as_context_get_shadow(self.space)) def w_sender(self): - if self._s_sender is None: + sender = self.s_sender() + if sender is None: return self.space.w_nil - return self._s_sender.w_self() + return sender.w_self() def s_sender(self): - return self._s_sender + if self._direct_s_sender: + return self._direct_s_sender + else: + return self._virtual_s_sender() def store_unwrap_pc(self, w_pc): if w_pc.is_same_object(self.space.w_nil): @@ -592,10 +604,16 @@ def mark_returned(self): self.store_pc(-1) try: - self.store_s_sender(None) + self.store_s_sender() except error.SenderChainManipulation, e: assert self == e.s_context + def unvirtualize_sender(self): + sender = self.s_sender() + self.store_s_sender(direct=sender, raiseError=False) + if sender: + sender.unvirtualize_sender() + def is_returned(self): return self.pc() == -1 and self.w_sender is self.space.w_nil @@ -879,7 +897,7 @@ s_new_context.store_w_method(s_method.w_self()) if s_sender: try: - s_new_context.store_s_sender(s_sender) + s_new_context.store_s_sender(virtual=jit.virtual_ref(s_sender)) except error.SenderChainManipulation, e: assert s_new_context == e.s_context s_new_context.store_w_receiver(w_receiver) From noreply at buildbot.pypy.org Wed Jul 2 17:52:53 2014 From: noreply at buildbot.pypy.org (timfel) Date: Wed, 2 Jul 2014 17:52:53 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk vref: virtualize sender only around c_loop Message-ID: <20140702155253.00D1E1D34FF@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: vref Changeset: r849:2f3aaab262c5 Date: 2014-07-02 17:48 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/2f3aaab262c5/ Log: virtualize sender only around c_loop diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -86,7 +86,6 @@ s_new_context = self.c_loop(s_new_context) except StackOverflow, e: s_new_context = e.s_context - s_new_context.unvirtualize_sender() except Return, nlr: s_new_context = s_sender while s_new_context is not nlr.s_target_context: @@ -99,15 +98,28 @@ except ProcessSwitch, p: if self.trace: print "====== Switch from: %s to: %s ======" % (s_new_context.short_str(), p.s_new_context.short_str()) - s_new_context.unvirtualize_sender() s_new_context = p.s_new_context def c_loop(self, s_context, may_context_switch=True): + s_sender = s_context.s_sender() + s_sender_ref = jit.vref_None + if s_sender: + s_sender_ref = jit.virtual_ref(s_sender) + s_context.store_s_sender(virtual=s_sender_ref, raiseError=False) + try: + self._c_loop_virtual(s_context, may_context_switch=may_context_switch) + finally: + if s_sender: + jit.virtual_ref_finish(s_sender_ref, s_sender) + s_context.restore_s_sender(s_sender) + + def _c_loop_virtual(self, s_context, may_context_switch=True): assert isinstance(s_context, ContextPartShadow) old_pc = 0 if not jit.we_are_jitted() and may_context_switch: self.quick_check_for_interrupt(s_context) method = s_context.s_method() + while True: pc = s_context.pc() if pc < old_pc: diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -1315,7 +1315,7 @@ # Set some fields s_block_ctx.store_pc(s_block_ctx.initialip()) try: - s_block_ctx.store_s_sender(virtual=jit.virtual_ref(s_frame)) + s_block_ctx.store_s_sender(direct=s_frame) except SenderChainManipulation, e: assert e.s_context == s_block_ctx return s_block_ctx diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -543,12 +543,14 @@ " Return self of the method, or the method that contains the block " return self.s_home().w_receiver() + def restore_s_sender(self, s_direct): + if self._virtual_s_sender is not jit.vref_None: + # virtual sender wasn't already cleared by e.g. mark_returned + self._virtual_s_sender = jit.vref_None + self._direct_s_sender = s_direct + def store_s_sender(self, direct=None, virtual=jit.vref_None, raiseError=True): assert direct is None or virtual is jit.vref_None # can only set one or the other - if self._virtual_s_sender is not jit.vref_None and virtual is jit.vref_None: - # if we have a vref but we're removing it... - sender = self._virtual_s_sender() - jit.virtual_ref_finish(self._virtual_s_sender, sender) self._virtual_s_sender = virtual self._direct_s_sender = direct if raiseError: @@ -608,12 +610,6 @@ except error.SenderChainManipulation, e: assert self == e.s_context - def unvirtualize_sender(self): - sender = self.s_sender() - self.store_s_sender(direct=sender, raiseError=False) - if sender: - sender.unvirtualize_sender() - def is_returned(self): return self.pc() == -1 and self.w_sender is self.space.w_nil @@ -897,7 +893,7 @@ s_new_context.store_w_method(s_method.w_self()) if s_sender: try: - s_new_context.store_s_sender(virtual=jit.virtual_ref(s_sender)) + s_new_context.store_s_sender(direct=s_sender) except error.SenderChainManipulation, e: assert s_new_context == e.s_context s_new_context.store_w_receiver(w_receiver) From noreply at buildbot.pypy.org Wed Jul 2 17:53:08 2014 From: noreply at buildbot.pypy.org (timfel) Date: Wed, 2 Jul 2014 17:53:08 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk vref: stub bitblt primitives so they don't error Message-ID: <20140702155308.5310A1D34FF@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: vref Changeset: r850:f5a42c0946bf Date: 2014-07-02 17:49 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/f5a42c0946bf/ Log: stub bitblt primitives so they don't error diff too long, truncating to 2000 out of 351216 lines diff --git a/images/Squeak4.5-noBitBlt.changes b/images/Squeak4.5-noBitBlt.changes old mode 100644 new mode 100755 --- a/images/Squeak4.5-noBitBlt.changes +++ b/images/Squeak4.5-noBitBlt.changes @@ -36,4 +36,144 @@ Workspace allInstances do: [:w | w topView delete]. ReleaseBuilderFor4dot4 prepareNewBuild. Smalltalk snapshot: true andQuit: true. -! ----End fileIn of a stream----! ----SNAPSHOT----{31 March 2013 . 3:27:34 pm} Squeak4.5-12327.image priorSource: 7430688! !Installer methodsFor: 'squeakmap' stamp: 'fbs 1/28/2013 19:25' prior: 57597950! packageAndVersionFrom: pkg | p | p := ReadStream on: pkg . ^{(p upTo: $(). p upTo: $)} collect: [:s | s withBlanksTrimmed].! ! "Installer-Core"! !Categorizer methodsFor: 'fileIn/Out' stamp: 'cwp 6/20/2012 16:58'! scanFrom: aStream environment: anEnvironment ^ self scanFrom: aStream! ! !ClassCategoryReader methodsFor: 'fileIn/Out' stamp: 'cwp 6/20/2012 17:21'! scanFrom: aStream environment: anEnvironment "File in methods from the stream, aStream." | methodText | [methodText := aStream nextChunkText. methodText size > 0] whileTrue: [class compile: methodText environment: anEnvironment classified: category withStamp: changeStamp notifying: nil]! ! !ClassCommentReader methodsFor: 'as yet unclassified' stamp: 'cwp 6/20/2012 17:22'! scanFrom: aStream environment: anEnvironment ^ self scanFrom: aStream! ! !Metaclass methodsFor: 'compiling' stamp: 'cwp 6/20/2012 17:29'! bindingOf: varName environment: anEnvironment ^ thisClass classBindingOf: varName environment: anEnvironment! ! !LargePositiveInteger methodsFor: 'arithmetic' stamp: 'nice 12/30/2012 20:03' prior: 22505876! \\ aNumber "Primitive. Take the receiver modulo the argument. The result is the remainder rounded towards negative infinity, of the receiver divided by the argument. Fail if the argument is 0. Fail if either the argument or the result is not a SmallInteger or a LargePositiveInteger less than 2-to-the-30th (1073741824). Optional. See Object documentation whatIsAPrimitive." aNumber isInteger ifTrue: [| neg qr q r | neg := self negative == aNumber negative == false. qr := (self digitDiv: (aNumber class == SmallInteger ifTrue: [aNumber abs] ifFalse: [aNumber]) neg: neg). q := qr first normalize. r := qr last normalize. ^(q negative ifTrue: [r isZero not] ifFalse: [q isZero and: [neg]]) ifTrue: [r + aNumber] ifFalse: [r]]. ^super \\ aNumber ! ! !LargePositiveInteger methodsFor: 'converting' stamp: 'nice 1/27/2012 22:41' prior: 37616324! asFloat "Answer a Float that best approximates the value of the receiver. This algorithm is optimized to process only the significant digits of a LargeInteger. And it does honour IEEE 754 round to nearest even mode in case of excess precision (see details below)." "How numbers are rounded in IEEE 754 default rounding mode: A shift is applied so that the highest 53 bits are placed before the floating point to form a mantissa. The trailing bits form the fraction part placed after the floating point. This fractional number must be rounded to the nearest integer. If fraction part is 2r0.1, exactly between two consecutive integers, there is a tie. The nearest even integer is chosen in this case. Examples (First 52bits of mantissa are omitted for brevity): 2r0.00001 is rounded downward to 2r0 2r1.00001 is rounded downward to 2r1 2r0.1 is a tie and rounded to 2r0 (nearest even) 2r1.1 is a tie and rounded to 2r10 (nearest even) 2r0.10001 is rounded upward to 2r1 2r1.10001 is rounded upward to 2r10 Thus, if the next bit after floating point is 0, the mantissa is left unchanged. If next bit after floating point is 1, an odd mantissa is always rounded upper. An even mantissa is rounded upper only if the fraction part is not a tie." "Algorihm details: The floating point hardware can perform the rounding correctly with several excess bits as long as there is a single inexact operation. This can be obtained by splitting the mantissa plus excess bits in two part with less bits than Float precision. Note 1: the inexact flag in floating point hardware must not be trusted because in some cases the operations would be exact but would not take into account some bits that were truncated before the Floating point operations. Note 2: the floating point hardware is presumed configured in default rounding mode." | mantissa shift excess result n | "Check how many bits excess the maximum precision of a Float mantissa." excess := self highBitOfMagnitude - Float precision. excess > 7 ifTrue: ["Remove the excess bits but seven." mantissa := self bitShiftMagnitude: 7 - excess. shift := excess - 7. "An even mantissa with a single excess bit immediately following would be truncated. But this would not be correct if above shift has truncated some extra bits. Check this case, and round excess bits upper manually." ((mantissa digitAt: 1) = 2r01000000 and: [self anyBitOfMagnitudeFrom: 1 to: shift]) ifTrue: [mantissa := mantissa + 1]] ifFalse: [mantissa := self. shift := 0]. "There will be a single inexact round off at last iteration" result := (mantissa digitAt: (n := mantissa digitLength)) asFloat. [(n := n - 1) > 0] whileTrue: [ result := 256.0 * result + (mantissa digitAt: n) asFloat]. ^result timesTwoPower: shift.! ! !LargePositiveInteger methodsFor: 'private' stamp: 'nice 12/30/2012 14:25'! primitiveQuo: anInteger "Primitive. Divide the receiver by the argument and return the result. Round the result down towards zero to make it a whole integer. Fail if the argument is 0. Fail if either the argument or the result is not a SmallInteger or a LargePositiveInteger less than 2-to-the-30th (1073741824). Optional. See Object documentation whatIsAPrimitive." ^nil! ! !LargePositiveInteger methodsFor: 'arithmetic' stamp: 'nice 12/30/2012 14:34'! rem: aNumber "Remainder defined in terms of quo:. See super rem:. This is defined only to speed up case of very large integers." (self primitiveQuo: aNumber) ifNotNil: [:quo | ^self - (quo * aNumber)]. aNumber isInteger ifTrue: [| ng rem | ng := self negative == aNumber negative == false. rem := (self digitDiv: (aNumber class == SmallInteger ifTrue: [aNumber abs] ifFalse: [aNumber]) neg: ng) at: 2. ^ rem normalize]. ^super rem: aNumber! ! !LargeNegativeInteger methodsFor: 'converting' stamp: 'nice 1/1/2013 15:42' prior: 37616204! asFloat ^super asFloat negated! ! !UndefinedObject methodsFor: 'class hierarchy' stamp: 'cwp 6/22/2012 15:39'! literalScannedAs: scannedLiteral environment: anEnvironment notifying: requestor ^ scannedLiteral! ! !Behavior methodsFor: 'testing method dictionary' stamp: 'cwp 6/20/2012 17:32'! bindingOf: varName environment: anEnvironment ^superclass bindingOf: varName environment: anEnvironment! ! !Behavior methodsFor: 'testing method dictionary' stamp: 'cwp 6/20/2012 17:30'! classBindingOf: varName environment: anEnvironment ^self bindingOf: varName environment: anEnvironment! ! !Behavior methodsFor: 'printing' stamp: 'cwp 6/22/2012 15:37'! literalScannedAs: scannedLiteral environment: anEnvironment notifying: requestor "Postprocesses a literal scanned by Scanner scanToken (esp. xLitQuote). If scannedLiteral is not an association, answer it. Else, if it is of the form: nil->#NameOfMetaclass answer nil->theMetaclass, if any has that name, else report an error. Else, if it is of the form: #NameOfGlobalVariable->anythiEng answer the global, class, or pool association with that nameE, if any, else add it to Undeclared a answer the new Association." | key value | (scannedLiteral isVariableBinding) ifFalse: [^ scannedLiteral]. key := scannedLiteral key. value := scannedLiteral value. key ifNil: "###" [(self bindingOf: value environment: anEnvironment) ifNotNil: [:assoc| (assoc value isKindOf: Behavior) ifTrue: [^ nil->assoc value class]]. requestor notify: 'No such metaclass'. ^false]. (key isSymbol) ifTrue: "##" [(self bindingOf: key environment: anEnvironment) ifNotNil: [:assoc | ^assoc]. ^ anEnvironment undeclared: key]. requestor notify: '## must be followed by a non-local variable name'. ^false " Form literalScannedAs: 14 notifying: nil 14 Form literalScannedAs: #OneBitForm notiEfying: nil OneBitForm Form literalScannedAs: ##OneBitForm notifying: nil OneBitForm->a Form Form literalScannedAs: ##Form notifying: nil Form->Form Form literalScannedAs: ###Form notifying: nil nilE->Form class "! ! !Fraction methodsFor: 'converting' stamp: 'nice 11/21/2011 22:34' prior: 37619655! asFloat "Answer a Float that closely approximates the value of the receiver. This implementation will answer the closest floating point number to the receiver. In case of a tie, it will use the IEEE 754 round to nearest even mode. In case of overflow, it will answer +/- Float infinity." | a b mantissa exponent hasTruncatedBits lostBit n ha hb hm | a := numerator abs. b := denominator. "denominator is always positive" ha := a highBitOfMagnitude. hb := b highBitOfMagnitude. "Number of bits to keep in mantissa plus one to handle rounding." n := 1 + Float precision. "If both numerator and denominator are represented exactly in floating point number, then fastest thing to do is to use hardwired float division." (ha < n and: [hb < n]) ifTrue: [^numerator asFloat / denominator asFloat]. "Shift the fraction by a power of two exponent so as to obtain a mantissa with n bits. First guess is rough, the mantissa might have n+1 bits." exponent := ha - hb - n. exponent >= 0 ifTrue: [b := b bitShift: exponent] ifFalse: [a := a bitShift: exponent negated]. mantissa := a quo: b. hasTruncatedBits := a > (mantissa * b). hm := mantissa highBit. "Check for gradual underflow, in which case the mantissa will loose bits. Keep at least one bit to let underflow preserve the sign of zero." lostBit := Float emin - (exponent + hm - 1). lostBit > 0 ifTrue: [n := n - lostBit max: 1]. "Remove excess bits in the mantissa." hm > n ifTrue: [exponent := exponent + hm - n. hasTruncatedBits := hasTruncatedBits or: [mantissa anyBitOfMagnitudeFrom: 1 to: hm - n]. mantissa := mantissa bitShift: n - hm]. "Check if mantissa must be rounded upward. The case of tie (mantissa odd & hasTruncatedBits not) will be handled by Integer>>asFloat." (hasTruncatedBits and: [mantissa odd]) ifTrue: [mantissa := mantissa + 1]. ^ (self positive ifTrue: [mantissa asFloat] ifFalse: [mantissa asFloat negated]) timesTwoPower: exponent! ! !Float methodsFor: 'arithmetic' stamp: 'nice 12/20/2012 23:16' prior: 20878776! negated "Answer a Number that is the negation of the receiver. Implementation note: this version cares of negativeZero." ^-1.0 * self! ! !ClassDescription methodsFor: 'compiling' stamp: 'cwp 6/20/2012 17:21'! compile: text environment: anEnvironment classified: category withStamp: changeStamp notifying: requestor ^ self compile: text environment: anEnvironment classified: category withStamp: changeStamp notifying: requestor logSource: self acceptsLoggingOfCompilation! ! !ClassDescription methodsFor: 'compiling' stamp: 'cwp 12/27/2012 13:17'! compile: text environment: anEnvironment classified: category withStamp: changeStamp notifying: requestor logSource: logSource | methodAndNode context methodNode | context := CompilationCue source: text class: self environment: anEnvironment category: category requestor: requestor. methodNode := self newCompiler compile: context ifFail: [^ nil]. methodAndNode := CompiledMethodWithNode generateMethodFromNode: methodNode trailer: self defaultMethodTrailer. logSource ifTrue: [ self logMethodSource: text forMethodWithNode: methodAndNode inCategory: category withStamp: changeStamp notifying: requestor. ]. self addAndClassifySelector: methodAndNode selector withMethod: methodAndNode method inProtocol: category notifying: requestor. self instanceSide noteCompilationOf: methodAndNode selector meta: self isClassSide. ^ methodAndNode selector! ! !Class methodsFor: 'compiling' stamp: 'cwp 6/20/2012 09:47'! bindingOf: varName environment: anEnvironment "Answer the binding of some variable resolved in the scope of the receiver" | aSymbol binding | aSymbol := varName asSymbol. "First look in classVar dictionary." binding := self classPool bindingOf: aSymbol. binding ifNotNil:[^binding]. "Next look in shared pools." self sharedPools do:[:pool | binding := pool bindingOf: aSymbol. binding ifNotNil:[^binding]. ]. "Next look in declared environment." binding := anEnvironment bindingOf: aSymbol. binding ifNotNil:[^binding]. "Finally look higher up the superclass chain and fail at the end." superclass == nil ifTrue: [^ nil] ifFalse: [^ superclass bindingOf: aSymbol]. ! ! "Kernel"! ParseNode subclass: #Encoder instanceVariableNames: 'scopeTable nTemps supered requestor class selector literalStream selectorSet litIndSet litSet sourceRanges globalSourceRanges addedSelectorAndMethodClassLiterals optimizedSelectors cue' classVariableNames: '' poolDictionaries: '' category: 'Compiler-Kernel'! !Encoder commentStamp: 'cwp 12/26/2012 23:29' prior: 36323851! I encode names and literals into tree nodes with byte codes for the compiler. Byte codes for literals are not assigned until the tree-sizing pass of the compiler, because only then is it known which literals are actually needed. I also keep track of sourceCode ranges during parsing and code generation so I can provide an inverse map for the debugger.! Scanner subclass: #Parser instanceVariableNames: 'here hereType hereMark hereEnd prevMark prevEnd encoder requestor parseNode failBlock requestorOffset tempsMark doitFlag properties category queriedUnusedTemporaries cue' classVariableNames: '' poolDictionaries: '' category: 'Compiler-Kernel'! !Parser commentStamp: 'cwp 12/26/2012 23:34' prior: 38557958! I parse Smalltalk syntax and create a MethodNode that is the root of the parse tree. I look one token ahead.! Object subclass: #CompilationCue instanceVariableNames: 'source context receiver class environment category requestor' classVariableNames: '' poolDictionaries: '' category: 'Compiler-Kernel'! Object subclass: #Compiler instanceVariableNames: 'sourceStream requestor class category context parser cue' classVariableNames: '' poolDictionaries: '' category: 'Compiler-Kernel'! !Compiler commentStamp: 'cwp 12/26/2012 23:17' prior: 59257505! The compiler accepts Smalltalk source code and compiles it with respect to a given class. The user of the compiler supplies a context so that temporary variables are accessible during compilation. If there is an error, a requestor (usually a kind of StringHolderController) is sent the message notify:at:in: so that the error message can be displayed. If there is no error, then the result of compilation is a MethodNode, which is the root of a parse tree whose nodes are kinds of ParseNodes. The parse tree can be sent messages to (1) generate code for a CompiledMethod (this is done for compiling methods or evaluating expressions); (2) pretty-print the code (for formatting); or (3) produce a map from object code back to source code (used by debugger program-counter selection). See also Parser, Encoder, ParseNode.! !Encoder methodsFor: 'initialize-release' stamp: 'cwp 12/26/2012 23:34'! init: aCue notifying: anObject "The use of the variable requestor is a bit confusing here. This is *not* the original requestor, which is available through the cue. It's the Parser instance that is using the encoder." self setCue: aCue. requestor := anObject. nTemps := 0. supered := false. self initScopeAndLiteralTables. cue getClass variablesAndOffsetsDo: [:variable "" :offset "" | offset isNil ifTrue: [scopeTable at: variable name put: (FieldNode new fieldDefinition: variable)] ifFalse: [scopeTable at: variable put: (offset >= 0 ifTrue: [InstanceVariableNode new name: variable index: offset] ifFalse: [MaybeContextInstanceVariableNode new name: variable index: offset negated])]]. cue context ~~ nil ifTrue: [| homeNode | homeNode := self bindTemp: self doItInContextName. "0th temp = aContext passed as arg" cue context tempNames withIndexDo: [:variable :index| scopeTable at: variable put: (MessageAsTempNode new receiver: homeNode selector: #namedTempAt: arguments: (Array with: (self encodeLiteral: index)) precedence: 3 from: self)]]. sourceRanges := Dictionary new: 32. globalSourceRanges := OrderedCollection new: 32 ! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/26/2012 23:30'! setCue: aCue cue := aCue. "Also set legacy instance variables for methods that don't use cue yet" class := cue getClass.! ! !Dictionary methodsFor: '*Compiler' stamp: 'cwp 6/22/2012 09:17'! bindingOf: varName ifAbsent: aBlock ^self associationAt: varName ifAbsent: aBlock! ! !Parser methodsFor: 'private' stamp: 'cwp 12/26/2012 23:37'! init: sourceStream cue: aCue failBlock: aBlock self setCue: aCue. failBlock := aBlock. requestorOffset := 0. super scan: sourceStream. prevMark := hereMark := mark. self advance ! ! !Parser methodsFor: 'public access' stamp: 'cwp 12/26/2012 23:41'! parse: sourceStream cue: aCue noPattern: noPattern ifFail: aBlock "Answer a MethodNode for the argument, sourceStream, that is the root of a parse tree. Parsing is done with respect to the CompilationCue to resolve variables. Errors in parsing are reported to the cue's requestor; otherwise aBlock is evaluated. The argument noPattern is a Boolean that is true if the the sourceStream does not contain a method header (i.e., for DoIts)." | methNode repeatNeeded myStream s p subSelection | myStream := sourceStream. [repeatNeeded := false. p := myStream position. s := myStream upToEnd. myStream position: p. subSelection := aCue requestor notNil and: [aCue requestor selectionInterval = (p + 1 to: p + s size)]. self encoder init: aCue notifying: self. self init: myStream cue: aCue failBlock: [^ aBlock value]. doitFlag := noPattern. failBlock:= aBlock. [methNode := self method: noPattern context: cue context] on: ReparseAfterSourceEditing do: [ :ex | repeatNeeded := true. myStream := subSelection ifTrue: [ReadStream on: cue requestor text string from: cue requestor selectionInterval first to: cue requestor selectionInterval last] ifFalse: [ReadStream on: cue requestor text string]]. repeatNeeded] whileTrue: [encoder := self encoder class new]. methNode sourceText: s. ^methNode ! ! !Parser methodsFor: 'private' stamp: 'cwp 12/26/2012 23:35'! setCue: aCue cue := aCue. "Also set legacy variables for methods that don't use cue yet." requestor := cue requestor. category := cue category.! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:53'! class: aClass ^ self context: nil class: aClass requestor: nil! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:53'! context: aContext class: aClass requestor: anObject ^ self source: nil context: aContext receiver: nil class: aClass environment: (aClass ifNotNil: [aClass environment]) category: nil requestor: anObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:16'! source: aTextOrStream class: aClass environment: anEnvironment category: aString requestor: anObject ^ self source: aTextOrStream context: nil receiver: nil class: aClass environment: anEnvironment category: aString requestor: anObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:53'! source: aTextOrStream context: aContext class: aClass category: aString requestor: anObject ^ self source: aTextOrStream context: aContext receiver: (aContext ifNotNil: [aContext receiver]) class: aClass environment: (aClass ifNotNil: [aClass environment]) category: aString requestor: anObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:54'! source: aTextOrStream context: aContext class: aClass requestor: anObject ^ self source: aTextOrStream context: aContext class: aClass category: nil requestor: anObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:55'! source: aTextOrStream context: aContext receiver: recObject class: aClass environment: anEnvironment category: aString requestor: reqObject ^ self basicNew initializeWithSource: aTextOrStream context: aContext receiver: recObject class: aClass environment: anEnvironment category: aString requestor: reqObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:16'! source: aString environment: anEnvironment ^ self source: aString context: nil receiver: nil class: UndefinedObject environment: anEnvironment category: nil requestor: nil! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:54'! source: aTextOrStream requestor: anObject ^ self source: aTextOrStream context: nil class: nil requestor: anObject! ! !CompilationCue methodsFor: 'binding' stamp: 'cwp 6/20/2012 09:39'! bindingOf: aSymbol ^ class bindingOf: aSymbol environment: environment! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:15'! category ^ category! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 12/26/2012 23:19'! context ^ context! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:15'! environment ^ environment! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:16'! getClass ^ class! ! !CompilationCue methodsFor: 'initialization' stamp: 'cwp 12/26/2012 23:16'! initializeWithSource: aTextOrString context: aContext receiver: recObject class: aClass environment: anEnvironment category: aString requestor: reqObject self initialize. source := aTextOrString isStream ifTrue: [aTextOrString contents] ifFalse: [aTextOrString]. context := aContext. receiver := recObject. class := aClass. environment := anEnvironment. category := aString. requestor := reqObject! ! !CompilationCue methodsFor: 'binding' stamp: 'cwp 6/22/2012 15:39'! literalScannedAs: anObject notifying: anEncoder ^ class literalScannedAs: anObject environment: environment notifying: anEncoder! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:15'! receiver ^ receiver! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:16'! requestor ^ requestor! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:15'! source ^ source! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:44'! sourceStream ^ source readStream! ! !Compiler class methodsFor: 'evaluating' stamp: 'cwp 6/20/2012 17:25'! evaluate: aString environment: anEnvironment ^ self evaluate: aString environment: anEnvironment logged: false! ! !Compiler class methodsFor: 'evaluating' stamp: 'cwp 12/27/2012 12:36'! evaluate: aString environment: anEnvironment logged: aBoolean | cue | cue := CompilationCue source: aString environment: anEnvironment. ^ self new evaluate: aString cue: cue ifFail: [^ nil] logged: aBoolean! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 13:18'! compile: aCue ifFail: failBlock "Answer a MethodNode. If the MethodNode can not be created, notify the requestor in the contxt. If the requestor is nil, evaluate failBlock instead. The MethodNode is the root of a parse tree. It can be told to generate a CompiledMethod to be installed in the method dictionary of the class specified by the context." self setCue: aCue. self source: cue source. ^self translate: sourceStream noPattern: false ifFail: failBlock! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 00:06'! evaluate: textOrStream cue: aCue ifFail: failBlock logged: logFlag "Compiles the sourceStream into a parse tree, then generates code into a method. Finally, the compiled method is invoked from here via withArgs:executeMethod:, hence the system no longer creates Doit method litter on errors." | methodNode method value toLog itsSelection itsSelectionString | self setCue: aCue. self source: textOrStream. methodNode := self translate: sourceStream noPattern: true ifFail: [^failBlock value]. method := self interactive ifTrue: [methodNode generateWithTempNames] ifFalse: [methodNode generate]. value := cue receiver withArgs: (cue context ifNil: [#()] ifNotNil: [{cue context}]) executeMethod: method. logFlag ifTrue: [toLog := ((cue requestor respondsTo: #selection) and:[(itsSelection := cue requestor selection) notNil and:[(itsSelectionString := itsSelection asString) isEmptyOrNil not]]) ifTrue:[itsSelectionString] ifFalse:[sourceStream contents]. SystemChangeNotifier uniqueInstance evaluated: toLog context: cue context]. ^ value ! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/26/2012 23:20'! setCue: aCue cue := aCue. "Set legacy instance variables for methods that don't use cue yet." requestor := cue requestor. class := cue getClass. category := cue category. context := cue context.! ! !Compiler methodsFor: 'private' stamp: 'cwp 6/19/2012 21:58'! source: textOrStream sourceStream := (textOrStream isKindOf: PositionableStream) ifTrue: [ textOrStream ] ifFalse: [ ReadStream on: textOrStream asString ]! ! "Compiler"! !SmartRefStream class methodsFor: 'i/o' stamp: 'cwp 6/20/2012 17:42'! scanFrom: aByteStream environment: anEnvironment ^ self scanFrom: aByteStream! ! !SmartRefStream methodsFor: 'read write' stamp: 'cwp 6/20/2012 17:41'! scanFrom: aByteStream environment: anEnvironment ^ self scanFrom: aByteStream! ! !ImageSegment methodsFor: 'fileIn/Out' stamp: 'cwp 6/20/2012 17:23'! scanFrom: aStream environment: anEnvironment ^ self scanFrom: aStream! ! !PseudoClass methodsFor: 'printing' stamp: 'cwp 6/22/2012 15:39'! literalScannedAs: scannedLiteral environment: anEnvironment notifying: requestor ^ scannedLiteral! ! !InternalTranslator methodsFor: 'fileIn/fileOut' stamp: 'cwp 6/20/2012 17:34'! scanFrom: aStream environment: anEnvironment "Read a definition of dictionary. Make sure current locale corresponds my locale id" | aString newTranslations assoc currentPlatform | newTranslations := Dictionary new. currentPlatform := Locale currentPlatform. [Locale currentPlatform: (Locale localeID: id). [aString := aStream nextChunk withSqueakLineEndings. aString size > 0] whileTrue: [assoc := Compiler evaluate: aString environment: anEnvironment. assoc value = '' ifTrue: [self class registerPhrase: assoc key] ifFalse: [newTranslations add: assoc]]] ensure: [Locale currentPlatform: currentPlatform]. self mergeTranslations: newTranslations! ! !NaturalLanguageTranslator methodsFor: 'fileIn/fileOut' stamp: 'cwp 6/20/2012 17:26'! scanFrom: aStream environment: anEnvironment "Read a definition of dictionary. Make sure current locale corresponds my locale id" | newTranslations currentPlatform | newTranslations := Dictionary new. currentPlatform := Locale currentPlatform. [| aString assoc | Locale currentPlatform: (Locale localeID: id). [aString := aStream nextChunk withSqueakLineEndings. aString size > 0] whileTrue: [assoc := Compiler evaluate: aString environment: anEnvironment. assoc value = '' ifTrue: [self class registerPhrase: assoc key] ifFalse: [newTranslations add: assoc]]] ensure: [Locale currentPlatform: currentPlatform]. self mergeTranslations: newTranslations! ! !ObjectScanner methodsFor: 'scanning' stamp: 'cwp 6/20/2012 17:39'! scanFrom: aByteStream environment: anEnvironment "This should probably be reimplemented using an environment for compilation. For now, don't change anything" ^ self scanFrom: aByteStream! ! !SystemDictionary methodsFor: 'accessing' stamp: 'cwp 6/22/2012 09:16'! bindingOf: varName ifAbsent: aBlock "SystemDictionary includes Symbols only" ^super bindingOf: varName asSymbol ifAbsent: aBlock! ! !SystemDictionary methodsFor: 'accessing' stamp: 'cwp 6/22/2012 15:48'! undeclared ^ self at: #Undeclared! ! "System"! !ExceptionTests methodsFor: 'testing-outer' stamp: 'fbs 1/1/2013 22:14' prior: 40840955! expectedFailures ^ #().! ! "Tests"! ReleaseBuilder subclass: #ReleaseBuilderFor4dot5 instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'ReleaseBuilder'! !ReleaseBuilderFor4dot5 commentStamp: 'fbs 1/1/2013 20:25' prior: 0! The release builder for Squeak 4.5! !ReleaseBuilder class methodsFor: 'scripts' stamp: 'fbs 12/31/2012 20:43'! transferCurrentPackagesAsUser: username password: password "Copy the packages currently loaded in the image from the trunk repository to my releaseRepository." | trunkRep releaseRep | trunkRep := self trunkRepository. releaseRep := self releaseRepository user: username; password: password; yourself. MCWorkingCopy allManagers do: [ : eachWorkingCopy | eachWorkingCopy ancestors do: [ : eachVersionInfo | (releaseRep includesVersionNamed: eachVersionInfo versionName) ifFalse: [ (trunkRep versionWithInfo: eachVersionInfo) ifNil: [ Warning signal: eachVersionInfo name , ' not found in ', trunkRep ] ifNotNilDo: [ : ver | releaseRep storeVersion: ver ] ] ] ]! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:23'! openWelcomeWorkspaces TheWorldMainDockingBar instance showWelcomeText: #squeakUserInterface label: 'Squeak User Interface' in: (40 @ 40 extent: 500 @ 300). TheWorldMainDockingBar instance showWelcomeText: #workingWithSqueak label: 'Working With Squeak' in: (80 @ 80 extent: 500 @ 300). TheWorldMainDockingBar instance showWelcomeText: #licenseInformation label: 'License Information' in: (120 @ 120 extent: 500 @ 300). TheWorldMainDockingBar instance showWelcomeText: #welcomeFutureDirections label: 'Future Directions' in: (160 @ 160 extent: 500 @ 300). TheWorldMainDockingBar instance showWelcomeText: #welcomeToSqueak label: 'Welcome to Squeak 4.5' in: (200 @ 200 extent: 500 @ 300)! ! !ReleaseBuilderFor4dot5 class methodsFor: 'scripts' stamp: 'fbs 1/1/2013 20:22'! prepareNewBuild super prepareNewBuild. MCMockPackageInfo initialize.! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:24'! releaseRepository "At release time, change 'trunk' to 'squeak45'." ^ MCHttpRepository location: 'http://source.squeak.org/trunk' user: 'squeak' password: 'squeak'! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:22'! setDisplayExtent: extent "Uncomment next line when the primitives become available in the Squeak VM." " DisplayScreen hostWindowSize: extent." Display extent = extent ifFalse: [ Warning signal: 'Display extent not set to ', extent ]! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:23'! setPreferences Preferences installBrightWindowColors ; setPreference: #scrollBarsWithoutMenuButton toValue: true ; setPreference: #swapMouseButtons toValue: true ; setPreference: #annotationPanes toValue: true ; setPreference: #showSplitterHandles toValue: false ; setPreference: #showBoundsInHalo toValue: true ; setPreference: #alternateHandlesLook toValue: false ; setPreference: #roundedMenuCorners toValue: false ; setPreference: #roundedWindowCorners toValue: false. PluggableButtonMorph roundedButtonCorners: false. FillInTheBlankMorph roundedDialogCorners: false. Workspace shouldStyle: false. NetNameResolver enableIPv6: true.! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:23'! switchToNewRepository | old44Repository | MCMcmUpdater defaultUpdateURL: self releaseRepository description. old44Repository := MCRepositoryGroup default repositories detect: [:each | each description includesSubString: 'squeak44'] ifNone: [nil]. old44Repository ifNotNil: [MCRepositoryGroup default removeRepository: old44Repository]. MCRepositoryGroup default addRepository: self releaseRepository! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:23'! versionString ^ 'Squeak4.5'.! ! ReleaseBuilder class removeSelector: #transferCurrentPackages! "ReleaseBuilder"! !Environment class methodsFor: 'as yet unclassified' stamp: 'cwp 1/1/2013 18:52' prior: 40834114! initialize self install! ! "Environments"! !Parser methodsFor: 'private' stamp: 'cwp 12/26/2012 23:59' prior: 52081878! initPattern: aString notifying: req return: aBlock | result | self init: (ReadStream on: aString asString) cue: (CompilationCue source: aString requestor: req) failBlock: [^nil]. encoder := self. result := aBlock value: (self pattern: false inContext: nil). encoder := failBlock := nil. "break cycles" ^result! ! !Parser methodsFor: 'public access' stamp: 'cwp 12/27/2012 00:01' prior: 34175471! parse: sourceStream class: class category: aCategory noPattern: noPattern context: aContext notifying: req ifFail: aBlock | c | c := CompilationCue source: sourceStream context: aContext class: class category: aCategory requestor: req. ^ self parse: sourceStream cue: c noPattern: noPattern ifFail: aBlock! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 09:11' prior: 34183963! evaluate: textOrStream in: aContext to: receiver notifying: aRequestor ifFail: failBlock logged: logFlag "Compiles the sourceStream into a parse tree, then generates code into a method. If aContext is not nil, the text can refer to temporaries in that context (the Debugger uses this). If aRequestor is not nil, then it will receive a notify:at: message before the attempt to evaluate is aborted. Finally, the compiled method is invoked from here via withArgs:executeMethod:, hence the system no longer creates Doit method litter on errors." | theClass | theClass := ((aContext == nil ifTrue: [receiver] ifFalse: [aContext receiver]) class). self setCue: (CompilationCue source: textOrStream context: aContext receiver: receiver class: theClass environment: theClass environment category: nil requestor: aRequestor). ^ self evaluate: textOrStream cue: cue ifFail: failBlock logged: logFlag! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 09:17' prior: 34185488! from: textOrStream class: aClass classified: aCategory context: aContext notifying: req self source: textOrStream. self setCue: (CompilationCue source: textOrStream context: aContext class: aClass category: aCategory requestor: req)! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/26/2012 23:55' prior: 50781309! from: textOrStream class: aClass context: aContext notifying: req self source: textOrStream. self setCue: (CompilationCue source: textOrStream context: aContext class: aClass requestor: req) ! ! !Encoder methodsFor: 'initialize-release' stamp: 'cwp 12/27/2012 09:41' prior: 50996506! init: aClass context: aContext notifying: anObject | c | c := CompilationCue context: aContext class: aClass requestor: nil. self init: c notifying: anObject! ! !Encoder methodsFor: 'initialize-release' stamp: 'cwp 12/26/2012 23:58' prior: 39061698! temps: tempVars literals: lits class: cl "Initialize this encoder for decompilation." self setCue: (CompilationCue class: cl). supered := false. nTemps := tempVars size. tempVars do: [:node | scopeTable at: node name put: node]. literalStream := WriteStream on: (Array new: lits size). literalStream nextPutAll: lits. sourceRanges := Dictionary new: 32. globalSourceRanges := OrderedCollection new: 32.! ! "Compiler"! !Class methodsFor: 'class variables' stamp: 'cwp 6/22/2012 15:48' prior: 36026010! addClassVarName: aString "Add the argument, aString, as a class variable of the receiver. Signal an error if the first character of aString is not capitalized, or if it is already a variable named in the class." | symbol oldState | oldState := self copy. aString first canBeGlobalVarInitial ifFalse: [^self error: aString, ' class variable name should be capitalized; proceed to include anyway.']. symbol := aString asSymbol. self withAllSubclasses do: [:subclass | (self canFindWithoutEnvironment: symbol) ifTrue: [ (DuplicateVariableError new) superclass: superclass; "fake!!!!!!" variable: aString; signal: aString, ' is already defined']]. classPool == nil ifTrue: [classPool := Dictionary new]. (classPool includesKey: symbol) ifFalse: ["Pick up any refs in Undeclared" classPool declare: symbol from: environment undeclared. SystemChangeNotifier uniqueInstance classDefinitionChangedFrom: oldState to: self]! ! !Class methodsFor: 'compiling' stamp: 'cwp 6/20/2012 09:48' prior: 54782024! bindingOf: varName ^ self bindingOf: varName environment: self environment! ! !Class methodsFor: 'organization' stamp: 'cwp 6/25/2012 18:25' prior: 54785804! category "Answer the system organization category for the receiver. First check whether the category name stored in the ivar is still correct and only if this fails look it up (latter is much more expensive)" category ifNotNil: [ :symbol | ((self environment organization listAtCategoryNamed: symbol) includes: self name) ifTrue: [ ^symbol ] ]. category := self environment organization categoryOfElement: self name. ^category! ! !Class methodsFor: 'initialize-release' stamp: 'cwp 6/22/2012 15:49' prior: 36027730! declare: varString "Declare class variables common to all instances. Answer whether recompilation is advisable." | newVars conflicts | newVars := (Scanner new scanFieldNames: varString) collect: [:x | x asSymbol]. newVars do: [:var | var first canBeGlobalVarInitial ifFalse: [self error: var, ' class variable name should be capitalized; proceed to include anyway.']]. conflicts := false. classPool == nil ifFalse: [(classPool keys reject: [:x | newVars includes: x]) do: [:var | self removeClassVarName: var]]. (newVars reject: [:var | self classPool includesKey: var]) do: [:var | "adding" "check if new vars defined elsewhere" (self canFindWithoutEnvironment: var) ifTrue: [ (DuplicateVariableError new) superclass: superclass; "fake!!!!!!" variable: var; signal: var, ' is already defined'. conflicts := true]]. newVars size > 0 ifTrue: [classPool := self classPool. "in case it was nil" newVars do: [:var | classPool declare: var from: environment undeclared]]. ^conflicts! ! !Class methodsFor: 'class variables' stamp: 'cwp 6/22/2012 15:49' prior: 54802475! removeClassVarName: aString "Remove the class variable whose name is the argument, aString, from the names defined in the receiver, a class. Create an error notification if aString is not a class variable or if it is still being used in the code of the class." | aSymbol | aSymbol := aString asSymbol. (classPool includesKey: aSymbol) ifFalse: [^self error: aString, ' is not a class variable']. self withAllSubclasses do:[:subclass | (Array with: subclass with: subclass class) do:[:classOrMeta | (classOrMeta whichSelectorsReferTo: (classPool associationAt: aSymbol)) isEmpty ifFalse: [ InMidstOfFileinNotification signal ifTrue: [ Transcript cr; show: self name, ' (' , aString , ' is Undeclared) '. ^ environment undeclared declare: aSymbol from: classPool]. (self confirm: (aString,' is still used in code of class ', classOrMeta name, '.\Is it okay to move it to Undeclared?') withCRs) ifTrue:[^Undeclared declare: aSymbol from: classPool] ifFalse:[^self]]]]. classPool removeKey: aSymbol. classPool isEmpty ifTrue: [classPool := nil]. ! ! !Class methodsFor: 'class name' stamp: 'cwp 6/22/2012 15:49' prior: 54796206! rename: aString "The new name of the receiver is the argument, aString." | oldName newName | (newName := aString asSymbol) = (oldName := self name) ifTrue: [^ self]. (self environment includesKey: newName) ifTrue: [^ self error: newName , ' already exists']. (environment undeclared includesKey: newName) ifTrue: [self inform: 'There are references to, ' , aString printString , ' from Undeclared. Check them after this change.']. name := newName. self environment renameClass: self from: oldName! ! !ClassBuilder methodsFor: 'class definition' stamp: 'cwp 6/22/2012 01:05' prior: 39054430! name: className inEnvironment: env subclassOf: newSuper type: type instanceVariableNames: instVarString classVariableNames: classVarString poolDictionaries: poolString category: category unsafe: unsafe "Define a new class in the given environment. If unsafe is true do not run any validation checks. This facility is provided to implement important system changes." | oldClass instVars classVars copyOfOldClass newClass | environ := env. instVars := Scanner new scanFieldNames: instVarString. classVars := (Scanner new scanFieldNames: classVarString) collect: [:x | x asSymbol]. "Validate the proposed name" unsafe ifFalse:[(self validateClassName: className) ifFalse:[^nil]]. oldClass := env at: className ifAbsent:[nil]. oldClass isBehavior ifFalse: [oldClass := nil] "Already checked in #validateClassName:" ifTrue: [ copyOfOldClass := oldClass copy. copyOfOldClass superclass addSubclass: copyOfOldClass]. [ | newCategory needNew force organization oldCategory | unsafe ifFalse:[ "Run validation checks so we know that we have a good chance for recompilation" (self validateSuperclass: newSuper forSubclass: oldClass) ifFalse:[^nil]. (self validateInstvars: instVars from: oldClass forSuper: newSuper) ifFalse:[^nil]. (self validateClassvars: classVars from: oldClass forSuper: newSuper) ifFalse:[^nil]. (self validateSubclassFormat: type from: oldClass forSuper: newSuper extra: instVars size) ifFalse:[^nil]]. "See if we need a new subclass" needNew := self needsSubclassOf: newSuper type: type instanceVariables: instVars from: oldClass. needNew == nil ifTrue:[^nil]. "some error" (needNew and:[unsafe not]) ifTrue:[ "Make sure we don't redefine any dangerous classes" (self tooDangerousClasses includes: oldClass name) ifTrue:[ self error: oldClass name, ' cannot be changed'. ]. "Check if the receiver should not be redefined" (oldClass ~~ nil and:[oldClass shouldNotBeRedefined]) ifTrue:[ self notify: oldClass name asText allBold, ' should not be redefined. \Proceed to store over it.' withCRs]]. needNew ifTrue:[ "Create the new class" newClass := self newSubclassOf: newSuper type: type instanceVariables: instVars from: oldClass. newClass == nil ifTrue:[^nil]. "Some error" newClass setName: className. newClass environment: environ. ] ifFalse:[ "Reuse the old class" newClass := oldClass. ]. "Install the class variables and pool dictionaries... " force := (newClass declare: classVarString) | (newClass sharing: poolString). "... classify ..." newCategory := category asSymbol. organization := environ ifNotNil:[environ organization]. oldClass isNil ifFalse: [oldCategory := (organization categoryOfElement: oldClass name) asSymbol]. organization classify: newClass name under: newCategory suppressIfDefault: true. "... recompile ..." newClass := self recompile: force from: oldClass to: newClass mutate: false. "... export if not yet done ..." (environ at: newClass name ifAbsent:[nil]) == newClass ifFalse:[ [environ at: newClass name put: newClass] on: AttemptToWriteReadOnlyGlobal do:[:ex| ex resume: true]. environ flushClassNameCache. ]. newClass doneCompiling. "... notify interested clients ..." oldClass isNil ifTrue: [ SystemChangeNotifier uniqueInstance classAdded: newClass inCategory: newCategory. ^ newClass]. newCategory ~= oldCategory ifTrue: [SystemChangeNotifier uniqueInstance class: newClass recategorizedFrom: oldCategory to: category] ifFalse: [SystemChangeNotifier uniqueInstance classDefinitionChangedFrom: copyOfOldClass to: newClass.]. ] ensure: [copyOfOldClass ifNotNil: [copyOfOldClass superclass removeSubclass: copyOfOldClass]. Behavior flushObsoleteSubclasses. ]. ^newClass! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 22:57' prior: 18572019! superclass: newSuper subclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class." | env | env := EnvironmentRequest signal ifNil: [newSuper environment]. ^self name: t inEnvironment: env subclassOf: newSuper type: newSuper typeOfClass instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 23:01' prior: 50629912! superclass: aClass variableByteSubclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class in which the subclass is to have indexable byte-sized nonpointer variables." | oldClassOrNil actualType env | (aClass instSize > 0) ifTrue: [^self error: 'cannot make a byte subclass of a class with named fields']. (aClass isVariable and: [aClass isWords]) ifTrue: [^self error: 'cannot make a byte subclass of a class with word fields']. (aClass isVariable and: [aClass isPointers]) ifTrue: [^self error: 'cannot make a byte subclass of a class with pointer fields']. oldClassOrNil := aClass environment at: t ifAbsent:[nil]. actualType := (oldClassOrNil notNil and: [oldClassOrNil typeOfClass == #compiledMethod]) ifTrue: [#compiledMethod] ifFalse: [#bytes]. env := EnvironmentRequest signal ifNil: [aClass environment]. ^self name: t inEnvironment: env subclassOf: aClass type: actualType instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 23:03' prior: 18573442! superclass: aClass variableSubclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class in which the subclass is to have indexable pointer variables." | env | aClass isBits ifTrue: [^self error: 'cannot make a pointer subclass of a class with non-pointer fields']. env := EnvironmentRequest signal ifNil: [aClass environment]. ^self name: t inEnvironment: env subclassOf: aClass type: #variable instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 23:04' prior: 18574098! superclass: aClass variableWordSubclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class in which the subclass is to have indexable word-sized nonpointer variables." | env | (aClass instSize > 0) ifTrue: [^self error: 'cannot make a word subclass of a class with named fields']. (aClass isVariable and: [aClass isBytes]) ifTrue: [^self error: 'cannot make a word subclass of a class with byte fields']. (aClass isVariable and: [aClass isPointers]) ifTrue: [^self error: 'cannot make a word subclass of a class with pointer fields']. env := EnvironmentRequest signal ifNil: [aClass environment]. ^self name: t inEnvironment: env subclassOf: aClass type: #words instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 23:04' prior: 18575028! superclass: aClass weakSubclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class (the receiver) in which the subclass is to have weak indexable pointer variables." | env | aClass isBits ifTrue: [^self error: 'cannot make a pointer subclass of a class with non-pointer fields']. env := EnvironmentRequest signal ifNil: [aClass environment]. ^self name: t inEnvironment: env subclassOf: aClass type: #weak instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! "Kernel"! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:21' prior: 59135029! ambiguousSelector: aString inRange: anInterval | correctedSelector userSelection offset intervalWithOffset | self interactive ifFalse: [ "In non interactive mode, compile with backward comapatibility: $- is part of literal argument" Transcript cr; store: encoder classEncoding; nextPutAll:#'>>';store: encoder selector; show: ' would send ' , token , '-'. ^super ambiguousSelector: aString inRange: anInterval]. "handle the text selection" userSelection := cue requestor selectionInterval. intervalWithOffset := anInterval first + requestorOffset to: anInterval last + requestorOffset. cue requestor selectFrom: intervalWithOffset first to: intervalWithOffset last. cue requestor select. "Build the menu with alternatives" correctedSelector := AmbiguousSelector signalName: aString inRange: intervalWithOffset. correctedSelector ifNil: [^self fail]. "Execute the selected action" offset := self substituteWord: correctedSelector wordInterval: intervalWithOffset offset: 0. cue requestor deselect. cue requestor selectInvisiblyFrom: userSelection first to: userSelection last + offset. token := (correctedSelector readStream upTo: Character space) asSymbol! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:21' prior: 38558136! collectTemporaryDeclarationsFrom: methodNode | tempsMarks str | tempsMarks := OrderedCollection new. str := cue requestor text asString. methodNode accept: (ParseNodeEnumerator ofBlock: [ :aNode | | mark | (aNode class canUnderstand: #tempsMark) ifTrue: [mark := aNode tempsMark. (mark notNil and: [ mark between: 1 and: str size ] and: [ (str at: mark) = $| ]) ifTrue: [ tempsMarks addLast: aNode ]]]). (tempsMark notNil and: [ tempsMark between: 1 and: str size ] and: [ (str at: tempsMark) = $| ]) ifTrue: [ tempsMarks addLast: self ]. ^ tempsMarks sorted: [ :a :b | a tempsMark > b tempsMark ]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:20' prior: 52096606! correctSelector: proposedKeyword wordIntervals: spots exprInterval: expInt ifAbort: abortAction "Correct the proposedKeyword to some selector symbol, correcting the original text if such action is indicated. abortAction is invoked if the proposedKeyword couldn't be converted into a valid selector. Spots is an ordered collection of intervals within the test stream of the for each of the keyword parts." | correctSelector userSelection | "If we can't ask the user, assume that the keyword will be defined later" self interactive ifFalse: [^proposedKeyword asSymbol]. userSelection := cue requestor selectionInterval. cue requestor selectFrom: spots first first to: spots last last. cue requestor select. correctSelector := UnknownSelector name: proposedKeyword. correctSelector ifNil: [^abortAction value]. cue requestor deselect. cue requestor selectInvisiblyFrom: userSelection first to: userSelection last. self substituteSelector: correctSelector keywords wordIntervals: spots. ^(proposedKeyword last ~~ $: and: [correctSelector last == $:]) ifTrue: [abortAction value] ifFalse: [correctSelector]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:20' prior: 33907242! correctVariable: proposedVariable interval: spot "Correct the proposedVariable to a known variable, or declare it as a new variable if such action is requested. We support declaring lowercase variables as temps or inst-vars, and uppercase variables as Globals or ClassVars, depending on whether the context is nil (class=UndefinedObject). Spot is the interval within the test stream of the variable. rr 3/4/2004 10:26 : adds the option to define a new class. " "Check if this is an i-var, that has been corrected already (ugly)" "Display the pop-up menu" | binding userSelection action | (encoder classEncoding instVarNames includes: proposedVariable) ifTrue: [^InstanceVariableNode new name: proposedVariable index: (encoder classEncoding allInstVarNames indexOf: proposedVariable)]. "If we can't ask the user for correction, make it undeclared" self interactive ifFalse: [^encoder undeclared: proposedVariable]. "First check to see if the requestor knows anything about the variable" (binding := cue requestor bindingOf: proposedVariable) ifNotNil: [^encoder global: binding name: proposedVariable]. userSelection := cue requestor selectionInterval. cue requestor selectFrom: spot first to: spot last. cue requestor select. "Build the menu with alternatives" action := UndeclaredVariable signalFor: self name: proposedVariable inRange: spot. action ifNil: [^self fail]. "Execute the selected action" cue requestor deselect. cue requestor selectInvisiblyFrom: userSelection first to: userSelection last. ^action value! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:19' prior: 34172921! declareUndeclaredTemps: methodNode "Declare any undeclared temps, declaring them at the smallest enclosing scope." | undeclared userSelection blocksToVars | (undeclared := encoder undeclaredTemps) isEmpty ifTrue: [^self]. userSelection := cue requestor selectionInterval. blocksToVars := IdentityDictionary new. undeclared do: [:var| (blocksToVars at: (var tag == #method ifTrue: [methodNode block] ifFalse: [methodNode accept: (VariableScopeFinder new ofVariable: var)]) ifAbsentPut: [SortedCollection new]) add: var name]. (blocksToVars removeKey: methodNode block ifAbsent: []) ifNotNil: [:rootVars| rootVars do: [:varName| self pasteTempAtMethodLevel: varName]]. (blocksToVars keys sorted: [:a :b| a tempsMark < b tempsMark]) do: [:block| | decl | decl := (blocksToVars at: block) reduce: [:a :b| a, ' ', b]. block temporaries isEmpty ifTrue: [self substituteWord: ' | ', decl, ' |' wordInterval: (block tempsMark + 1 to: block tempsMark) offset: requestorOffset] ifFalse: [self substituteWord: decl, ' ' wordInterval: (block tempsMark to: block tempsMark - 1) offset: requestorOffset]]. cue requestor selectInvisiblyFrom: userSelection first to: userSelection last + requestorOffset. ReparseAfterSourceEditing signal! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 11:45' prior: 37183770! defineClass: className "prompts the user to define a new class, asks for it's category, and lets the users edit further the definition" | sym cat def d2 | sym := className asSymbol. cat := UIManager default request: 'Enter class category : ' initialAnswer: self encoder classEncoding theNonMetaClass category. cat ifEmpty: [cat := 'Unknown']. def := 'Object subclass: #' , sym , ' instanceVariableNames: '''' classVariableNames: '''' poolDictionaries: '''' category: ''' , cat , ''''. d2 := UIManager default request: 'Edit class definition : ' initialAnswer: def. d2 ifEmpty: [d2 := def]. Compiler evaluate: d2. ^ encoder global: (cue environment bindingOf: sym) name: sym! ! !Parser methodsFor: 'primitives' stamp: 'cwp 12/27/2012 11:46' prior: 37184567! externalFunctionDeclaration "Parse the function declaration for a call to an external library." | descriptorClass callType modifier retType externalName args argType module fn | descriptorClass := cue environment valueOf: #ExternalFunction ifAbsent: [^ false]. callType := descriptorClass callingConventionFor: here. callType == nil ifTrue:[^false]. [modifier := descriptorClass callingConventionModifierFor: token. modifier notNil] whileTrue: [self advance. callType := callType bitOr: modifier]. "Parse return type" self advance. retType := self externalType: descriptorClass. retType == nil ifTrue:[^self expected:'return type']. "Parse function name or index" externalName := here. (self match: #string) ifTrue:[externalName := externalName asSymbol] ifFalse:[(self match:#number) ifFalse:[^self expected:'function name or index']]. (self matchToken: #'(') ifFalse:[^self expected:'argument list']. args := WriteStream on: Array new. [here == #')'] whileFalse:[ argType := self externalType: descriptorClass. argType == nil ifTrue:[^self expected:'argument']. argType isVoid & argType isPointerType not ifFalse:[args nextPut: argType]. ]. (self matchToken: #')') ifFalse:[^self expected:')']. (self matchToken: 'module:') ifTrue:[ module := here. (self match: #string) ifFalse:[^self expected: 'String']. module := module asSymbol]. Smalltalk at: #ExternalLibraryFunction ifPresent:[:xfn| fn := xfn name: externalName module: module callType: callType returnType: retType argumentTypes: args contents. self allocateLiteral: fn. ]. (self matchToken: 'error:') ifTrue: [| errorCodeVariable | errorCodeVariable := here. (hereType == #string or: [hereType == #word]) ifFalse:[^self expected: 'error code (a variable or string)']. self advance. self addPragma: (Pragma keyword: #primitive:error: arguments: (Array with: 120 with: errorCodeVariable)). fn ifNotNil: [fn setErrorCodeName: errorCodeVariable]] ifFalse: [self addPragma: (Pragma keyword: #primitive: arguments: #(120))]. ^true ! ! !Parser methodsFor: 'error handling' stamp: 'cwp 12/27/2012 10:19' prior: 58306169! interactive "Answer true if compilation is interactive" ^ cue requestor notNil! ! !Parser methodsFor: 'error handling' stamp: 'cwp 12/27/2012 10:22' prior: 58137223! notify: string at: location cue requestor isNil ifTrue: [(encoder == self or: [encoder isNil]) ifTrue: [^ self fail "failure setting up syntax error"]. SyntaxErrorNotification inClass: encoder classEncoding category: cue category withCode: (source contents asText copyReplaceFrom: location to: location - 1 with: ((string , ' ->') asText allBold addAttribute: TextColor red; yourself)) doitFlag: doitFlag errorMessage: string location: location] ifFalse: [cue requestor notify: string , ' ->' at: location in: source]. ^self fail! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:17' prior: 34177108! pasteTempAtMethodLevel: name | insertion delta theTextString characterBeforeMark | theTextString := cue requestor text string. characterBeforeMark := theTextString at: tempsMark-1 ifAbsent: [$ ]. (theTextString at: tempsMark) = $| ifTrue: [ "Paste it before the second vertical bar" insertion := name, ' '. characterBeforeMark isSeparator ifFalse: [ insertion := ' ', insertion]. delta := 0. ] ifFalse: [ "No bars - insert some with CR, tab" insertion := '| ' , name , ' |',String cr. delta := 2. "the bar and CR" characterBeforeMark = Character tab ifTrue: [ insertion := insertion , String tab. delta := delta + 1. "the tab" ]. ]. tempsMark := tempsMark + (self substituteWord: insertion wordInterval: (tempsMark to: tempsMark-1) offset: 0) - delta! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:16' prior: 52095305! queryUndefined | varStart varName | varName := parseNode key. varStart := self endOfLastToken + requestorOffset - varName size + 1. cue requestor selectFrom: varStart to: varStart + varName size - 1; select. (UndefinedVariable name: varName) ifFalse: [^ self fail]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:15' prior: 38599341! removeEmptyTempDeclarationsFrom: methodNode | sourceCode madeChanges tempsMarkHolder | sourceCode := cue requestor text asString. tempsMarkHolder := self collectTemporaryDeclarationsFrom: methodNode. madeChanges := false. tempsMarkHolder do: [ :currentBlock | | tempsMarkChar0 tempsMarkChar1 tempsMarkChar2 end start | tempsMarkChar0 := (sourceCode at: currentBlock tempsMark). tempsMarkChar1 := (sourceCode at: currentBlock tempsMark - 1). tempsMarkChar2 := (sourceCode at: currentBlock tempsMark - 2). tempsMarkChar0 = $| & tempsMarkChar1 = $| ifTrue: [ end := currentBlock tempsMark. start := end - 1]. tempsMarkChar0 = $| & tempsMarkChar1 = $ & tempsMarkChar2 = $| ifTrue: [ end := currentBlock tempsMark. start := end - 2]. start notNil & end notNil ifTrue: [ | lineStart lineEnd | lineStart := 1 + (sourceCode lastIndexOf: Character cr startingAt: start - 1 ifAbsent: [ 0 ]). lineEnd := sourceCode indexOf: Character cr startingAt: end + 1 ifAbsent: [ sourceCode size ]. ((sourceCode indexOfAnyOf: CharacterSet nonSeparators startingAt: lineStart) >= start and: [ (sourceCode indexOfAnyOf: CharacterSet nonSeparators startingAt: end + 1) > lineEnd ]) ifTrue: [ start := lineStart. end := lineEnd ]. cue requestor correctFrom: start to: end with: ''. madeChanges := true. currentBlock tempsMark: nil ] ]. madeChanges ifTrue: [ReparseAfterSourceEditing signal]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:15' prior: 38561281! removeUnusedTemporaryNamed: temp from: str lookingAt: currentBlock movingTempMarksOf: someBlocks | start end | end := currentBlock tempsMark - 1. ["Beginning at right temp marker..." start := end - temp size + 1. end < temp size or: [ (str at: start) = $| ] or: [ temp = (str copyFrom: start to: end) and: [ ((str at: start - 1) = $| | (str at: start - 1) isSeparator) & ((str at: end + 1) = $| | (str at: end + 1) isSeparator) ] ]] whileFalse: [ "Search left for the unused temp" end := cue requestor nextTokenFrom: end direction: -1 ]. (end < temp size or: [ (str at: start) = $| ]) ifFalse: [(str at: start - 1) = $ ifTrue: [ start := start - 1 ]. cue requestor correctFrom: start to: end with: ''. someBlocks do: [ :aBlock | aBlock tempsMark: aBlock tempsMark - (end - start + 1)]. ^true ]. ^false! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:14' prior: 38562194! removeUnusedTemps: methodNode "Scan for unused temp names, and prompt the user about the prospect of removing each one found" | madeChanges tempsMarkHolder unusedTempNames tempMarkHoldersToChange | madeChanges := false. tempMarkHoldersToChange := OrderedCollection new. tempsMarkHolder := self collectTemporaryDeclarationsFrom: methodNode. unusedTempNames := encoder unusedTempNames select: [ :temp | (encoder lookupVariable: temp ifAbsent: [ ]) isUndefTemp and: [ self queriedUnusedTemporaries at: temp ifAbsentPut: [UnusedVariable name: temp] ]]. tempsMarkHolder do: [ :currentBlock | tempMarkHoldersToChange add: currentBlock. unusedTempNames do: [ :temp | (self removeUnusedTemporaryNamed: temp from: cue requestor text asString lookingAt: currentBlock movingTempMarksOf: tempMarkHoldersToChange) ifTrue: [ madeChanges := true ]]]. madeChanges ifTrue: [ self removeEmptyTempDeclarationsFrom: methodNode. ReparseAfterSourceEditing signal ]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:14' prior: 34179326! substituteWord: correctWord wordInterval: spot offset: o "Substitute the correctSelector into the (presumed interactive) receiver. Update requestorOffset based on the delta size and answer the updated offset." cue requestor correctFrom: spot first + o to: spot last + o with: correctWord. requestorOffset := requestorOffset + correctWord size - spot size. ^o + correctWord size - spot size! ! !Parser methodsFor: 'expression types' stamp: 'cwp 12/27/2012 10:14' prior: 34179807! temporaries " [ '|' (variable)* '|' ]" | vars theActualText | (self match: #verticalBar) ifFalse: ["no temps" doitFlag ifTrue: [tempsMark := self interactive ifTrue: [cue requestor selectionInterval first] ifFalse: [1]. ^ #()]. tempsMark := hereMark "formerly --> prevMark + prevToken". tempsMark > 0 ifTrue: [theActualText := source contents. [tempsMark < theActualText size and: [(theActualText at: tempsMark) isSeparator]] whileTrue: [tempsMark := tempsMark + 1]]. ^ #()]. vars := OrderedCollection new. [hereType == #word] whileTrue: [vars addLast: (encoder bindTemp: self advance)]. (self match: #verticalBar) ifTrue: [tempsMark := prevMark. ^ vars]. ^ self expected: 'Vertical bar' ! ! !Parser methodsFor: 'expression types' stamp: 'cwp 12/27/2012 10:14' prior: 34180638! temporariesIn: methodSelector " [ '|' (variable)* '|' ]" | vars theActualText | (self match: #verticalBar) ifFalse: ["no temps" doitFlag ifTrue: [tempsMark := self interactive ifTrue: [cue requestor selectionInterval first] ifFalse: [1]. ^ #()]. tempsMark := hereMark "formerly --> prevMark + prevToken". tempsMark > 0 ifTrue: [theActualText := source contents. [tempsMark < theActualText size and: [(theActualText at: tempsMark) isSeparator]] whileTrue: [tempsMark := tempsMark + 1]]. ^ #()]. vars := OrderedCollection new. [hereType == #word] whileTrue: [vars addLast: (encoder bindTemp: self advance in: methodSelector)]. (self match: #verticalBar) ifTrue: [tempsMark := prevMark. ^ vars]. ^ self expected: 'Vertical bar'! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 10:11' prior: 53971863! compiledMethodFor: textOrStream in: aContext to: receiver notifying: aRequestor ifFail: failBlock logged: logFlag "Compiles the sourceStream into a parse tree, then generates code into a method, and answers it. If receiver is not nil, then the text can refer to instance variables of that receiver (the Inspector uses this). If aContext is not nil, the text can refer to temporaries in that context (the Debugger uses this). If aRequestor is not nil, then it will receive a notify:at: message before the attempt to evaluate is aborted." | methodNode method theClass | theClass := (aContext == nil ifTrue: [receiver] ifFalse: [aContext receiver]) class. self from: textOrStream class: theClass context: aContext notifying: aRequestor. methodNode := self translate: sourceStream noPattern: true ifFail: [^failBlock value]. method := self interactive ifTrue: [ methodNode generateWithTempNames ] ifFalse: [methodNode generate]. logFlag ifTrue: [SystemChangeNotifier uniqueInstance evaluated: sourceStream contents context: aContext]. ^method! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/27/2012 11:33' prior: 34363593! format: aStream noPattern: noPattern ifFail: failBlock ^(self parser parse: aStream cue: cue noPattern: noPattern ifFail: [^failBlock value]) preen! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/27/2012 10:08' prior: 58306325! interactive "Answer true if compilation is interactive" ^ cue requestor notNil! ! !Compiler methodsFor: 'error handling' stamp: 'cwp 12/27/2012 10:10' prior: 50779387! notify: aString at: location "Refer to the comment in Object|notify:." ^ cue requestor == nil ifTrue: [SyntaxErrorNotification inClass: cue getClass category: cue category withCode: (sourceStream contents copyReplaceFrom: location to: location - 1 with: aString) doitFlag: false errorMessage: aString location: location] ifFalse: [cue requestor notify: aString at: location in: sourceStream]! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 11:34' prior: 50777201! parse: textOrStream in: aClass notifying: req "Compile the argument, textOrStream, with respect to the class, aClass, and answer the MethodNode that is the root of the resulting parse tree. Notify the argument, req, if an error occurs. The failBlock is defaulted to an empty block." self from: textOrStream class: aClass context: nil notifying: req. ^self parser parse: sourceStream cue: cue noPattern: false ifFail: []! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 10:09' prior: 36332471! parser parser ifNil: [parser := (cue getClass ifNil: [self class]) newParser]. ^parser! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/27/2012 11:37' prior: 50780779! translate: aStream noPattern: noPattern ifFail: failBlock ^self parser parse: aStream cue: cue noPattern: noPattern ifFail: [^failBlock value]! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 11:37' prior: 19124095! translate: aStream noPattern: noPattern ifFail: failBlock parser: parser | tree | tree := parser parse: aStream cue: cue noPattern: noPattern ifFail: [^ failBlock value]. ^ tree! ! !Encoder methodsFor: 'results' stamp: 'cwp 12/27/2012 10:26' prior: 50999892! associationForClass | assoc | assoc := self environment associationAt: cue getClass name ifAbsent: [nil]. ^assoc value == cue getClass ifTrue: [assoc] ifFalse: [Association new value: cue getClass]! ! !Encoder methodsFor: 'temps' stamp: 'cwp 12/27/2012 10:25' prior: 20148386! bindTemp: name in: methodSelector "Declare a temporary; error not if a field or class variable." scopeTable at: name ifPresent:[:node| "When non-interactive raise the error only if its a duplicate" (node isTemp or:[requestor interactive]) ifTrue:[^self notify:'Name is already defined'] ifFalse:[Transcript show: '(', name, ' is shadowed in "' , cue getClass printString , '>>' , methodSelector printString , '")']]. ^self reallyBind: name! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 10:25' prior: 20149084! classEncoding "This is a hack so that the parser may findout what class it was parsing for when it wants to create a syntax error view." ^ cue getClass! ! !Encoder methodsFor: 'encoding' stamp: 'cwp 12/27/2012 11:39' prior: 20138819! encodeLiteral: object ^self name: object key: (cue literalScannedAs: object notifying: self) class: LiteralNode type: LdLitType set: litSet! ! !Encoder methodsFor: 'encoding' stamp: 'cwp 12/27/2012 11:40' prior: 20139010! encodeSelector: aSelector ^self name: aSelector key: aSelector class: SelectorNode type: SendType set: selectorSet! ! !Encoder methodsFor: 'encoding' stamp: 'cwp 12/27/2012 11:40' prior: 58545123! environment "Answer the environment of the current compilation context, be it in a class or global (e.g. a workspace)" ^cue environment! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 11:41' prior: 50994497! lookupInPools: varName ifFound: assocBlock ^Symbol hasInterned: varName ifTrue: [:sym| (cue bindingOf: sym) ifNil: [^false] ifNotNil: [:assoc| assocBlock value: assoc]]! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 10:24' prior: 51004306! possibleNamesFor: proposedName | results | results := cue getClass possibleVariablesFor: proposedName continuedFrom: nil. ^ proposedName correctAgainst: nil continuedFrom: results. ! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 10:24' prior: 50995012! possibleVariablesFor: proposedVariable | results | results := proposedVariable correctAgainstDictionary: scopeTable continuedFrom: nil. proposedVariable first canBeGlobalVarInitial ifTrue: [ results := cue getClass possibleVariablesFor: proposedVariable continuedFrom: results ]. ^ proposedVariable correctAgainst: nil continuedFrom: results. ! ! !Encoder methodsFor: 'encoding' stamp: 'cwp 12/27/2012 11:42' prior: 51002830! undeclared: name | sym | requestor interactive ifTrue: [requestor requestor == #error: ifTrue: [requestor error: 'Undeclared']. ^self notify: 'Undeclared']. "Allow knowlegeable clients to squash the undeclared warning if they want (e.g. Diffing pretty printers that are simply formatting text). As this breaks compilation it should only be used by clients that want to discard the result of the compilation. To squash the warning use e.g. [Compiler format: code in: class notifying: nil decorated: false] on: UndeclaredVariableWarning do: [:ex| ex resume: false]" sym := name asSymbol. ^(UndeclaredVariableWarning new name: name selector: selector class: cue getClass) signal ifTrue: [| undeclared | undeclared := cue environment undeclared. undeclared at: sym put: nil. self global: (undeclared associationAt: sym) name: sym] ifFalse: [self global: (Association key: sym) name: sym]! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 10:23' prior: 51006007! warnAboutShadowed: name requestor addWarning: name,' is shadowed'. selector ifNotNil: [Transcript cr; show: cue getClass name,'>>', selector, '(', name,' is shadowed)']! ! "Compiler"! !SmalltalkImage methodsFor: 'housekeeping' stamp: 'cwp 6/22/2012 15:56' prior: 58497062! cleanOutUndeclared globals undeclared removeUnreferencedKeys! ! !SmalltalkImage methodsFor: 'special objects' stamp: 'cwp 6/22/2012 09:01' prior: 40515090! recreateSpecialObjectsArray "Smalltalk recreateSpecialObjectsArray" "To external package developers: **** DO NOT OVERRIDE THIS METHOD. ***** If you are writing a plugin and need additional special object(s) for your own use, use addGCRoot() function and use own, separate special objects registry " "The Special Objects Array is an array of objects used by the Squeak virtual machine. Its contents are critical and accesses to it by the VM are unchecked, so don't even think of playing here unless you know what you are doing." | newArray | newArray := Array new: 56. "Nil false and true get used throughout the interpreter" newArray at: 1 put: nil. newArray at: 2 put: false. newArray at: 3 put: true. "This association holds the active process (a ProcessScheduler)" newArray at: 4 put: (self bindingOf: #Processor). "Numerous classes below used for type checking and instantiation" newArray at: 5 put: Bitmap. newArray at: 6 put: SmallInteger. newArray at: 7 put: ByteString. newArray at: 8 put: Array. newArray at: 9 put: Smalltalk. newArray at: 10 put: Float. newArray at: 11 put: MethodContext. newArray at: 12 put: BlockContext. newArray at: 13 put: Point. newArray at: 14 put: LargePositiveInteger. newArray at: 15 put: Display. newArray at: 16 put: Message. newArray at: 17 put: CompiledMethod. newArray at: 18 put: (self specialObjectsArray at: 18). "(low space Semaphore)" newArray at: 19 put: Semaphore. newArray at: 20 put: Character. newArray at: 21 put: #doesNotUnderstand:. newArray at: 22 put: #cannotReturn:. newArray at: 23 put: nil. "This is the process signalling low space." "An array of the 32 selectors that are compiled as special bytecodes, paired alternately with the number of arguments each takes." newArray at: 24 put: #( #+ 1 #- 1 #< 1 #> 1 #<= 1 #>= 1 #= 1 #~= 1 #* 1 #/ 1 #\\ 1 #@ 1 #bitShift: 1 #// 1 #bitAnd: 1 #bitOr: 1 #at: 1 #at:put: 2 #size 0 #next 0 #nextPut: 1 #atEnd 0 #== 1 #class 0 #blockCopy: 1 #value 0 #value: 1 #do: 1 #new 0 #new: 1 #x 0 #y 0 ). "An array of the 255 Characters in ascii order. Cog inlines table into machine code at: prim so do not regenerate it." newArray at: 25 put: (self specialObjectsArray at: 25). newArray at: 26 put: #mustBeBoolean. newArray at: 27 put: ByteArray. newArray at: 28 put: Process. "An array of up to 31 classes whose instances will have compact headers" newArray at: 29 put: self compactClassesArray. newArray at: 30 put: (self specialObjectsArray at: 30). "(delay Semaphore)" newArray at: 31 put: (self specialObjectsArray at: 31). "(user interrupt Semaphore)" "Entries 32 - 34 unreferenced. Previously these contained prototype instances to be copied for fast initialization" newArray at: 32 put: nil. "was (Float new: 2)" newArray at: 33 put: nil. "was (LargePositiveInteger new: 4)" newArray at: 34 put: nil. "was Point new" newArray at: 35 put: #cannotInterpret:. "Note: This must be fixed once we start using context prototypes (yeah, right)" "(MethodContext new: CompiledMethod fullFrameSize)." newArray at: 36 put: (self specialObjectsArray at: 36). "Is the prototype MethodContext (unused by the VM)" newArray at: 37 put: BlockClosure. "(BlockContext new: CompiledMethod fullFrameSize)." newArray at: 38 put: (self specialObjectsArray at: 38). "Is the prototype BlockContext (unused by the VM)" "array of objects referred to by external code" newArray at: 39 put: (self specialObjectsArray at: 39). "preserve external semaphores" newArray at: 40 put: nil. "Reserved for Mutex in Cog VMs" newArray at: 41 put: nil. "Reserved for a LinkedList instance for overlapped calls in CogMT" "finalization Semaphore" newArray at: 42 put: ((self specialObjectsArray at: 42) ifNil: [Semaphore new]). newArray at: 43 put: LargeNegativeInteger. "External objects for callout. Note: Written so that one can actually completely remove the FFI." newArray at: 44 put: (self at: #ExternalAddress ifAbsent: []). newArray at: 45 put: (self at: #ExternalStructure ifAbsent: []). newArray at: 46 put: (self at: #ExternalData ifAbsent: []). newArray at: 47 put: (self at: #ExternalFunction ifAbsent: []). newArray at: 48 put: (self at: #ExternalLibrary ifAbsent: []). newArray at: 49 put: #aboutToReturn:through:. newArray at: 50 put: #run:with:in:. "51 reserved for immutability message" "newArray at: 51 put: #attemptToAssign:withIndex:." newArray at: 52 put: #(nil "nil => generic error" #'bad receiver' #'bad argument' #'bad index' #'bad number of arguments' #'inappropriate operation' #'unsupported operation' #'no modification' #'insufficient object memory' #'insufficient C memory' #'not found' #'bad method' #'internal error in named primitive machinery' #'object may move'). "53 to 55 are for Alien" newArray at: 53 put: (self at: #Alien ifAbsent: []). newArray at: 54 put: #invokeCallback:stack:registers:jmpbuf:. newArray at: 55 put: (self at: #UnsafeAlien ifAbsent: []). "Weak reference finalization" newArray at: 56 put: (self at: #WeakFinalizationList ifAbsent: []). "Now replace the interpreter's reference in one atomic operation" self specialObjectsArray becomeForward: newArray ! ! !SmalltalkImage methodsFor: 'shrinking' stamp: 'cwp 6/22/2012 15:57' prior: 37288071! unloadAllKnownPackages "Unload all packages we know how to unload and reload" "Prepare unloading" Smalltalk zapMVCprojects. Flaps disableGlobalFlaps: false. StandardScriptingSystem removeUnreferencedPlayers. Project removeAllButCurrent. #('Morphic-UserObjects' 'EToy-UserObjects' 'Morphic-Imported' ) do: [:each | SystemOrganization removeSystemCategory: each]. Smalltalk at: #ServiceRegistry ifPresent:[:aClass| SystemChangeNotifier uniqueInstance noMoreNotificationsFor: aClass. ]. World removeAllMorphs. "Go unloading" #( 'ReleaseBuilder' 'ScriptLoader' '311Deprecated' '39Deprecated' 'Universes' 'SMLoader' 'SMBase' 'Installer-Core' 'VersionNumberTests' 'VersionNumber' 'Services-Base' 'PreferenceBrowser' 'Nebraska' 'ToolBuilder-MVC' 'ST80' 'CollectionsTests' 'GraphicsTests' 'KernelTests' 'MorphicTests' 'MultilingualTests' 'NetworkTests' 'ToolsTests' 'TraitsTests' 'SystemChangeNotification-Tests' 'FlexibleVocabularies' 'EToys' 'Protocols' 'XML-Parser' 'Tests' 'SUnitGUI' 'Help-Squeak' 'HelpSystem' 'SystemReporter' ) do: [:pkgName| (MCPackage named: pkgName) unload. MCMcmUpdater disableUpdatesOfPackage: pkgName. ]. "Traits use custom unload" Smalltalk at: #Trait ifPresent:[:aClass| aClass unloadTraits]. "Post-unload cleanup" MCWorkingCopy flushObsoletePackageInfos. SystemOrganization removeSystemCategory: 'UserObjects'. Presenter defaultPresenterClass: nil. World dumpPresenter. ScheduledControllers := nil. Preferences removePreference: #allowEtoyUserCustomEvents. SystemOrganization removeEmptyCategories. ChangeSet removeChangeSetsNamedSuchThat:[:cs | (cs == ChangeSet current) not]. globals undeclared removeUnreferencedKeys. StandardScriptingSystem initialize. MCFileBasedRepository flushAllCaches. MCDefinition clearInstances. Behavior flushObsoleteSubclasses. ChangeSet current clear. ChangeSet current name: 'Unnamed1'. Smalltalk flushClassNameCache. Smalltalk at: #Browser ifPresent:[:br| br initialize]. DebuggerMethodMap voidMapCache. DataStream initialize. AppRegistry removeObsolete. FileServices removeObsolete. Preferences removeObsolete. TheWorldMenu removeObsolete. Smalltalk garbageCollect. Symbol compactSymbolTable. TheWorldMainDockingBar updateInstances. MorphicProject defaultFill: (Color gray: 0.9). World color: (Color gray: 0.9). ! ! !InternalTranslator methodsFor: 'fileIn/fileOut' stamp: 'cwp 6/20/2012 17:34' prior: 40472775! scanFrom: aStream ^ self scanFrom: aStream environment: Environment default! ! !NaturalLanguageTranslator methodsFor: 'fileIn/fileOut' stamp: 'cwp 6/20/2012 17:27' prior: 40496770! scanFrom: aStream ^ self scanFrom: aStream environment: Environment default! ! !SystemDictionary methodsFor: 'dictionary access' stamp: 'cwp 6/22/2012 15:58' prior: 30574136! at: aKey put: anObject "Override from Dictionary to check Undeclared and fix up references to undeclared variables." | index element | (self includesKey: aKey) ifFalse: [self declare: aKey from: (self at: #Undeclared). self flushClassNameCache]. super at: aKey put: anObject. ^ anObject! ! "System"! CodeHolder subclass: #Browser instanceVariableNames: 'environment systemOrganizer classOrganizer metaClassOrganizer editSelection metaClassIndicated selectedSystemCategory selectedClassName selectedMessageName selectedMessageCategoryName' classVariableNames: 'ListClassesHierarchically RecentClasses' poolDictionaries: '' category: 'Tools-Browser'! !Browser commentStamp: 'cwp 12/27/2012 11:09' prior: 36419432! I represent a query path into the class descriptions, the software of the system.! !Browser methodsFor: 'accessing' stamp: 'cwp 6/24/2012 23:20'! selectEnvironment: anEnvironment environment := anEnvironment. From noreply at buildbot.pypy.org Wed Jul 2 19:57:08 2014 From: noreply at buildbot.pypy.org (ISF) Date: Wed, 2 Jul 2014 19:57:08 +0200 (CEST) Subject: [pypy-commit] pypy ppc-updated-backend: Fix frame_depth calculated size Message-ID: <20140702175708.E8E251C0ECA@cobra.cs.uni-duesseldorf.de> Author: Ivan Sichmann Freitas Branch: ppc-updated-backend Changeset: r72317:034413cecc37 Date: 2014-07-02 17:56 +0000 http://bitbucket.org/pypy/pypy/changeset/034413cecc37/ Log: Fix frame_depth calculated size diff --git a/rpython/jit/backend/ppc/ppc_assembler.py b/rpython/jit/backend/ppc/ppc_assembler.py --- a/rpython/jit/backend/ppc/ppc_assembler.py +++ b/rpython/jit/backend/ppc/ppc_assembler.py @@ -9,7 +9,8 @@ FPR_SAVE_AREA, NONVOLATILES_FLOAT, FLOAT_INT_CONVERSION, FORCE_INDEX, SIZE_LOAD_IMM_PATCH_SP, - FORCE_INDEX_OFS, LR_BC_OFFSET) + FORCE_INDEX_OFS, LR_BC_OFFSET, + JITFRAME_FIXED_SIZE) from rpython.jit.backend.ppc.helper.assembler import Saved_Volatiles from rpython.jit.backend.ppc.helper.regalloc import _check_imm_arg import rpython.jit.backend.ppc.register as r @@ -783,6 +784,10 @@ operations = newoperations return operations + def update_frame_depth(self, frame_depth): + baseofs = self.cpu.get_baseofs_of_frame_field() + self.current_clt.frame_info.update_frame_depth(baseofs, frame_depth) + def assemble_loop(self, loopname, inputargs, operations, looptoken, log): clt = CompiledLoopToken(self.cpu, looptoken.number) clt.allgcrefs = [] @@ -813,11 +818,11 @@ frame_info = self.datablockwrapper.malloc_aligned(jitframe.JITFRAMEINFO_SIZE, alignment=WORD) clt.frame_info = rffi.cast(jitframe.JITFRAMEINFOPTR, frame_info) - clt.allgcreafs = [] - clt.frame_info.clear() direct_bootstrap_code = self.mc.currpos() frame_depth = self.compute_frame_depth(spilling_area, param_depth) + frame_depth += JITFRAME_FIXED_SIZE + self.update_frame_depth(frame_depth) self.gen_bootstrap_code(start_pos, frame_depth) self.write_pending_failure_recoveries() From noreply at buildbot.pypy.org Wed Jul 2 20:59:11 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 2 Jul 2014 20:59:11 +0200 (CEST) Subject: [pypy-commit] pypy default: Prescale the dictionary in ll_dict_update(). Message-ID: <20140702185911.9C3FE1D34FF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72318:f63fbf006d6f Date: 2014-07-02 19:53 +0200 http://bitbucket.org/pypy/pypy/changeset/f63fbf006d6f/ Log: Prescale the dictionary in ll_dict_update(). diff --git a/rpython/rtyper/lltypesystem/rdict.py b/rpython/rtyper/lltypesystem/rdict.py --- a/rpython/rtyper/lltypesystem/rdict.py +++ b/rpython/rtyper/lltypesystem/rdict.py @@ -540,18 +540,21 @@ # avoid extra branches. def ll_dict_resize(d): - old_entries = d.entries - old_size = len(old_entries) # make a 'new_size' estimate and shrink it if there are many # deleted entry markers. See CPython for why it is a good idea to # quadruple the dictionary size as long as it's not too big. num_items = d.num_items + 1 if num_items > 50000: new_estimate = num_items * 2 else: new_estimate = num_items * 4 + _ll_dict_resize_to(d, new_estimate) +ll_dict_resize.oopspec = 'dict.resize(d)' + +def _ll_dict_resize_to(d, new_estimate): new_size = DICT_INITSIZE while new_size <= new_estimate: new_size *= 2 - # + old_entries = d.entries + old_size = len(d.entries) d.entries = lltype.typeOf(old_entries).TO.allocate(new_size) d.num_items = 0 d.resize_counter = new_size * 2 @@ -563,7 +566,6 @@ ll_dict_insertclean(d, entry.key, entry.value, hash) i += 1 old_entries.delete() -ll_dict_resize.oopspec = 'dict.resize(d)' # ------- a port of CPython's dictobject.c's lookdict implementation ------- PERTURB_SHIFT = 5 @@ -816,6 +818,16 @@ ll_clear.oopspec = 'dict.clear(d)' def ll_update(dic1, dic2): + # Prescale 'dic1', assuming that most items don't collide. + # If this assumption is false, 'dic1' becomes at most two times too large. + # * dic2.num_items = upper bound on the number of items added + # * (dic1.resize_counter - 1) // 3 = room left in dic1 + # so, if dic2 has 1 item, we need dic1.resize_counter > 3 + # if dic2 has 2 items we need dic1.resize_counter > 6 etc. + if not (dic1.resize_counter > dic2.num_items * 3): + new_estimate = (dic1.num_items + dic2.num_items) * 2 + _ll_dict_resize_to(dic1, new_estimate) + # entries = dic2.entries d2len = len(entries) i = 0 From noreply at buildbot.pypy.org Wed Jul 2 20:59:44 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 2 Jul 2014 20:59:44 +0200 (CEST) Subject: [pypy-commit] pypy default: Update: now stm is 'only' 2x slower rather than 10x on translate.py. Message-ID: <20140702185944.579E31D3500@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72319:787e204c5c92 Date: 2014-07-02 20:10 +0200 http://bitbucket.org/pypy/pypy/changeset/787e204c5c92/ Log: Update: now stm is 'only' 2x slower rather than 10x on translate.py. diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -92,9 +92,9 @@ We're busy fixing them as we find them; feel free to `report bugs`_. * It runs with an overhead as low as 20% on examples like "richards". - There are also other examples with higher overheads --up to 10x for - "translate.py"-- which we are still trying to understand. One suspect - is our partial GC implementation, see below. + There are also other examples with higher overheads --currently up to + 2x for "translate.py"-- which we are still trying to understand. + One suspect is our partial GC implementation, see below. * Currently limited to 1.5 GB of RAM (this is just a parameter in `core.h`__). Memory overflows are not correctly handled; they cause @@ -111,9 +111,8 @@ * The GC is new; although clearly inspired by PyPy's regular GC, it misses a number of optimizations for now. Programs allocating large - numbers of small objects that don't immediately die, as well as - programs that modify large lists or dicts, suffer from these missing - optimizations. + numbers of small objects that don't immediately die (surely a common + situation) suffer from these missing optimizations. * The GC has no support for destructors: the ``__del__`` method is never called (including on file objects, which won't be closed for you). From noreply at buildbot.pypy.org Wed Jul 2 20:59:51 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 2 Jul 2014 20:59:51 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: hg merge default Message-ID: <20140702185951.C77D11D3500@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r72320:dd8e2f69fe96 Date: 2014-07-02 20:44 +0200 http://bitbucket.org/pypy/pypy/changeset/dd8e2f69fe96/ Log: hg merge default diff too long, truncating to 2000 out of 35820 lines diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -6,3 +6,11 @@ 9b623bc48b5950cf07184462a0e48f2c4df0d720 pypy-2.1-beta1-arm 9b623bc48b5950cf07184462a0e48f2c4df0d720 pypy-2.1-beta1-arm ab0dd631c22015ed88e583d9fdd4c43eebf0be21 pypy-2.1-beta1-arm +20e51c4389ed4469b66bb9d6289ce0ecfc82c4b9 release-2.3.0 +20e51c4389ed4469b66bb9d6289ce0ecfc82c4b9 release-2.3.0 +0000000000000000000000000000000000000000 release-2.3.0 +394146e9bb673514c61f0150ab2013ccf78e8de7 release-2.3 +32f35069a16d819b58c1b6efb17c44e3e53397b2 release-2.2=3.1 +32f35069a16d819b58c1b6efb17c44e3e53397b2 release-2.3.1 +32f35069a16d819b58c1b6efb17c44e3e53397b2 release-2.2=3.1 +0000000000000000000000000000000000000000 release-2.2=3.1 diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -44,31 +44,33 @@ Alex Gaynor Michael Hudson David Schneider + Matti Picus + Brian Kearns + Philip Jenvey Holger Krekel Christian Tismer Hakan Ardo Benjamin Peterson - Matti Picus - Philip Jenvey + Manuel Jacob Anders Chrigstrom - Brian Kearns Eric van Riet Paap + Wim Lavrijsen + Ronan Lamy Richard Emslie Alexander Schremmer - Wim Lavrijsen Dan Villiom Podlaski Christiansen - Manuel Jacob Lukas Diekmann Sven Hager Anders Lehmann Aurelien Campeas Niklaus Haldimann - Ronan Lamy Camillo Bruni Laura Creighton Toon Verwaest + Remi Meier Leonardo Santagada Seo Sanghyeon + Romain Guillebert Justin Peel Ronny Pfannschmidt David Edelsohn @@ -80,52 +82,62 @@ Daniel Roberts Niko Matsakis Adrien Di Mascio + Alexander Hesse Ludovic Aubry - Alexander Hesse Jacob Hallen - Romain Guillebert Jason Creighton Alex Martelli Michal Bendowski Jan de Mooij + stian Michael Foord Stephan Diehl Stefan Schwarzer Valentino Volonghi Tomek Meka Patrick Maupin - stian Bob Ippolito Bruno Gola Jean-Paul Calderone Timo Paulssen + Squeaky Alexandre Fayolle Simon Burton Marius Gedminas John Witulski + Konstantin Lopuhin Greg Price Dario Bertini Mark Pearse Simon Cross - Konstantin Lopuhin Andreas Stührk Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov + Paweł Piotr Przeradowski Paul deGrandis Ilya Osadchiy + Tobias Oberstein Adrian Kuhn Boris Feigin + Stefano Rivera tav + Taavi Burns Georg Brandl Bert Freudenberg Stian Andreassen - Stefano Rivera + Laurence Tratt Wanja Saatkamp + Ivan Sichmann Freitas Gerald Klix Mike Blume - Taavi Burns Oscar Nierstrasz + Stefan H. Muller + Jeremy Thurgood + Gregor Wegberg + Rami Chowdhury + Tobias Pape + Edd Barrett David Malcolm Eugene Oden Henry Mason @@ -135,18 +147,16 @@ Dusty Phillips Lukas Renggli Guenter Jantzen - Tobias Oberstein - Remi Meier Ned Batchelder Amit Regmi Ben Young Nicolas Chauvat Andrew Durdin + Andrew Chambers Michael Schneider Nicholas Riley Jason Chu Igor Trindade Oliveira - Jeremy Thurgood Rocco Moretti Gintautas Miliauskas Michael Twomey @@ -159,18 +169,19 @@ Karl Bartel Brian Dorsey Victor Stinner + Andrews Medina Stuart Williams Jasper Schulz + Christian Hudon Toby Watson Antoine Pitrou Aaron Iles Michael Cheng Justas Sadzevicius + Mikael Schönenberg Gasper Zejn Neil Shepperd - Mikael Schönenberg Elmo Mäntynen - Tobias Pape Jonathan David Riehl Stanislaw Halik Anders Qvist @@ -182,19 +193,18 @@ Alexander Sedov Corbin Simpson Christopher Pope - Laurence Tratt - Guillebert Romain + wenzhuman Christian Tismer + Marc Abramowitz Dan Stromberg Stefano Parmesan - Christian Hudon Alexis Daboville Jens-Uwe Mager Carl Meyer Karl Ramm Pieter Zieschang Gabriel - Paweł Piotr Przeradowski + Lukas Vacek Andrew Dalke Sylvain Thenault Nathan Taylor @@ -203,8 +213,11 @@ Alejandro J. Cura Jacob Oscarson Travis Francis Athougies + Ryan Gonzalez Kristjan Valur Jonsson + Sebastian Pawluś Neil Blakey-Milner + anatoly techtonik Lutz Paelike Lucio Torre Lars Wassermann @@ -218,13 +231,14 @@ Martin Blais Lene Wagner Tomo Cocoa - Andrews Medina roberto at goyle + Yury V. Zaytsev + Anna Katrina Dominguez William Leslie Bobby Impollonia timo at eistee.fritz.box Andrew Thompson - Yusei Tahara + Ben Darnell Roberto De Ioris Juan Francisco Cantero Hurtado Godefroid Chappelle @@ -234,28 +248,39 @@ Michael Hudson-Doyle Anders Sigfridsson Yasir Suhail + rafalgalczynski at gmail.com Floris Bruynooghe + Laurens Van Houtven Akira Li Gustavo Niemeyer Stephan Busemann - Anna Katrina Dominguez + Rafał Gałczyński + Yusei Tahara Christian Muirhead James Lan shoma hosaka - Daniel Neuhäuser + Daniel Neuh?user + Matthew Miller Buck Golemon Konrad Delong Dinu Gherman Chris Lambacher coolbutuseless at gmail.com + Rodrigo Araújo + w31rd0 Jim Baker - Rodrigo Araújo + James Robert Armin Ronacher Brett Cannon yrttyr + aliceinwire + OlivierBlanvillain Zooko Wilcox-O Hearn Tomer Chachamu Christopher Groskopf + Asmo Soinio + Stefan Marr + jiaaro opassembler.py Antony Lee Jim Hunziker @@ -263,12 +288,13 @@ Even Wiik Thomassen jbs soareschen + Kurt Griffiths + Mike Bayer Flavio Percoco Kristoffer Kleine yasirs Michael Chermside Anna Ravencroft - Andrew Chambers Julien Phalip Dan Loewenherz diff --git a/_pytest/resultlog.py b/_pytest/resultlog.py --- a/_pytest/resultlog.py +++ b/_pytest/resultlog.py @@ -56,6 +56,9 @@ for line in longrepr.splitlines(): py.builtin.print_(" %s" % line, file=self.logfile) for key, text in sections: + # py.io.StdCaptureFD may send in unicode + if isinstance(text, unicode): + text = text.encode('utf-8') py.builtin.print_(" ", file=self.logfile) py.builtin.print_(" -------------------- %s --------------------" % key.rstrip(), file=self.logfile) diff --git a/lib-python/2.7/ctypes/__init__.py b/lib-python/2.7/ctypes/__init__.py --- a/lib-python/2.7/ctypes/__init__.py +++ b/lib-python/2.7/ctypes/__init__.py @@ -389,12 +389,13 @@ func.__name__ = name_or_ordinal return func -class PyDLL(CDLL): - """This class represents the Python library itself. It allows to - access Python API functions. The GIL is not released, and - Python exceptions are handled correctly. - """ - _func_flags_ = _FUNCFLAG_CDECL | _FUNCFLAG_PYTHONAPI +# Not in PyPy +#class PyDLL(CDLL): +# """This class represents the Python library itself. It allows to +# access Python API functions. The GIL is not released, and +# Python exceptions are handled correctly. +# """ +# _func_flags_ = _FUNCFLAG_CDECL | _FUNCFLAG_PYTHONAPI if _os.name in ("nt", "ce"): @@ -447,15 +448,8 @@ return self._dlltype(name) cdll = LibraryLoader(CDLL) -pydll = LibraryLoader(PyDLL) - -if _os.name in ("nt", "ce"): - pythonapi = PyDLL("python dll", None, _sys.dllhandle) -elif _sys.platform == "cygwin": - pythonapi = PyDLL("libpython%d.%d.dll" % _sys.version_info[:2]) -else: - pythonapi = PyDLL(None) - +# not on PyPy +#pydll = LibraryLoader(PyDLL) if _os.name in ("nt", "ce"): windll = LibraryLoader(WinDLL) diff --git a/lib-python/2.7/ctypes/test/test_values.py b/lib-python/2.7/ctypes/test/test_values.py --- a/lib-python/2.7/ctypes/test/test_values.py +++ b/lib-python/2.7/ctypes/test/test_values.py @@ -4,6 +4,7 @@ import unittest from ctypes import * +from ctypes.test import xfail import _ctypes_test @@ -23,7 +24,8 @@ class Win_ValuesTestCase(unittest.TestCase): """This test only works when python itself is a dll/shared library""" - + + @xfail def test_optimizeflag(self): # This test accesses the Py_OptimizeFlag intger, which is # exported by the Python dll. @@ -40,6 +42,7 @@ else: self.assertEqual(opt, 2) + @xfail def test_frozentable(self): # Python exports a PyImport_FrozenModules symbol. This is a # pointer to an array of struct _frozen entries. The end of the @@ -75,6 +78,7 @@ from ctypes import _pointer_type_cache del _pointer_type_cache[struct_frozen] + @xfail def test_undefined(self): self.assertRaises(ValueError, c_int.in_dll, pydll, "Undefined_Symbol") diff --git a/lib-python/2.7/imputil.py b/lib-python/2.7/imputil.py --- a/lib-python/2.7/imputil.py +++ b/lib-python/2.7/imputil.py @@ -422,7 +422,8 @@ saved back to the filesystem for future imports. The source file's modification timestamp must be provided as a Long value. """ - codestring = open(pathname, 'rU').read() + with open(pathname, 'rU') as fp: + codestring = fp.read() if codestring and codestring[-1] != '\n': codestring = codestring + '\n' code = __builtin__.compile(codestring, pathname, 'exec') @@ -603,8 +604,8 @@ self.desc = desc def import_file(self, filename, finfo, fqname): - fp = open(filename, self.desc[1]) - module = imp.load_module(fqname, fp, filename, self.desc) + with open(filename, self.desc[1]) as fp: + module = imp.load_module(fqname, fp, filename, self.desc) module.__file__ = filename return 0, module, { } diff --git a/lib-python/2.7/modulefinder.py b/lib-python/2.7/modulefinder.py --- a/lib-python/2.7/modulefinder.py +++ b/lib-python/2.7/modulefinder.py @@ -109,16 +109,16 @@ def run_script(self, pathname): self.msg(2, "run_script", pathname) - fp = open(pathname, READ_MODE) - stuff = ("", "r", imp.PY_SOURCE) - self.load_module('__main__', fp, pathname, stuff) + with open(pathname, READ_MODE) as fp: + stuff = ("", "r", imp.PY_SOURCE) + self.load_module('__main__', fp, pathname, stuff) def load_file(self, pathname): dir, name = os.path.split(pathname) name, ext = os.path.splitext(name) - fp = open(pathname, READ_MODE) - stuff = (ext, "r", imp.PY_SOURCE) - self.load_module(name, fp, pathname, stuff) + with open(pathname, READ_MODE) as fp: + stuff = (ext, "r", imp.PY_SOURCE) + self.load_module(name, fp, pathname, stuff) def import_hook(self, name, caller=None, fromlist=None, level=-1): self.msg(3, "import_hook", name, caller, fromlist, level) @@ -461,6 +461,8 @@ fp, buf, stuff = self.find_module("__init__", m.__path__) self.load_module(fqname, fp, buf, stuff) self.msgout(2, "load_package ->", m) + if fp: + fp.close() return m def add_module(self, fqname): diff --git a/lib-python/2.7/test/test_argparse.py b/lib-python/2.7/test/test_argparse.py --- a/lib-python/2.7/test/test_argparse.py +++ b/lib-python/2.7/test/test_argparse.py @@ -48,6 +48,9 @@ def tearDown(self): os.chdir(self.old_dir) + import gc + # Force a collection which should close FileType() options + gc.collect() for root, dirs, files in os.walk(self.temp_dir, topdown=False): for name in files: os.chmod(os.path.join(self.temp_dir, name), stat.S_IWRITE) diff --git a/lib-python/2.7/test/test_gdbm.py b/lib-python/2.7/test/test_gdbm.py --- a/lib-python/2.7/test/test_gdbm.py +++ b/lib-python/2.7/test/test_gdbm.py @@ -74,6 +74,29 @@ size2 = os.path.getsize(filename) self.assertTrue(size1 > size2 >= size0) + def test_sync(self): + # check if sync works at all, not sure how to check it + self.g = gdbm.open(filename, 'cf') + self.g['x'] = 'x' * 10000 + self.g.sync() + + def test_get_key(self): + self.g = gdbm.open(filename, 'cf') + self.g['x'] = 'x' * 10000 + self.g.close() + self.g = gdbm.open(filename, 'r') + self.assertEquals(self.g['x'], 'x' * 10000) + + def test_key_with_null_bytes(self): + key = 'a\x00b' + value = 'c\x00d' + self.g = gdbm.open(filename, 'cf') + self.g[key] = value + self.g.close() + self.g = gdbm.open(filename, 'r') + self.assertEquals(self.g[key], value) + self.assertTrue(key in self.g) + self.assertTrue(self.g.has_key(key)) def test_main(): run_unittest(TestGdbm) diff --git a/lib-python/2.7/timeit.py b/lib-python/2.7/timeit.py --- a/lib-python/2.7/timeit.py +++ b/lib-python/2.7/timeit.py @@ -55,11 +55,6 @@ import gc import sys import time -try: - import itertools -except ImportError: - # Must be an older Python version (see timeit() below) - itertools = None __all__ = ["Timer"] @@ -81,7 +76,8 @@ def inner(_it, _timer): %(setup)s _t0 = _timer() - for _i in _it: + while _it > 0: + _it -= 1 %(stmt)s _t1 = _timer() return _t1 - _t0 @@ -96,7 +92,8 @@ def inner(_it, _timer, _func=func): setup() _t0 = _timer() - for _i in _it: + while _it > 0: + _it -= 1 _func() _t1 = _timer() return _t1 - _t0 @@ -133,9 +130,11 @@ else: raise ValueError("setup is neither a string nor callable") self.src = src # Save for traceback display - code = compile(src, dummy_src_name, "exec") - exec code in globals(), ns - self.inner = ns["inner"] + def make_inner(): + code = compile(src, dummy_src_name, "exec") + exec code in globals(), ns + return ns["inner"] + self.make_inner = make_inner elif hasattr(stmt, '__call__'): self.src = None if isinstance(setup, basestring): @@ -144,7 +143,8 @@ exec _setup in globals(), ns elif not hasattr(setup, '__call__'): raise ValueError("setup is neither a string nor callable") - self.inner = _template_func(setup, stmt) + inner = _template_func(setup, stmt) + self.make_inner = lambda: inner else: raise ValueError("stmt is neither a string nor callable") @@ -185,15 +185,12 @@ to one million. The main statement, the setup statement and the timer function to be used are passed to the constructor. """ - if itertools: - it = itertools.repeat(None, number) - else: - it = [None] * number + inner = self.make_inner() gcold = gc.isenabled() if '__pypy__' not in sys.builtin_module_names: gc.disable() # only do that on CPython try: - timing = self.inner(it, self.timer) + timing = inner(number, self.timer) finally: if gcold: gc.enable() diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -219,6 +219,8 @@ if restype is None: import ctypes restype = ctypes.c_int + if self._argtypes_ is None: + self._argtypes_ = [] self._ptr = self._getfuncptr_fromaddress(self._argtypes_, restype) self._check_argtypes_for_fastpath() return diff --git a/lib_pypy/_pypy_interact.py b/lib_pypy/_pypy_interact.py --- a/lib_pypy/_pypy_interact.py +++ b/lib_pypy/_pypy_interact.py @@ -3,6 +3,8 @@ import sys import os +irc_header = "And now for something completely different" + def interactive_console(mainmodule=None, quiet=False): # set sys.{ps1,ps2} just before invoking the interactive interpreter. This @@ -15,8 +17,7 @@ if not quiet: try: from _pypy_irc_topic import some_topic - text = "And now for something completely different: ``%s''" % ( - some_topic(),) + text = "%s: ``%s''" % ( irc_header, some_topic()) while len(text) >= 80: i = text[:80].rfind(' ') print(text[:i]) diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_pypy_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -13,7 +13,15 @@ k1 = k1.lstrip('0x').rstrip('L') k2 = hex(binascii.crc32(key[1::2]) & 0xffffffff) k2 = k2.lstrip('0').rstrip('L') - output_dir = tempfile.gettempdir() + os.path.sep + 'tmp_%s%s' %(k1, k2) + try: + username = os.environ['USER'] #linux, et al + except KeyError: + try: + username = os.environ['USERNAME'] #windows + except KeyError: + username = os.getuid() + output_dir = tempfile.gettempdir() + os.path.sep + 'tmp_%s_%s%s' % ( + username, k1, k2) if not os.path.exists(output_dir): os.mkdir(output_dir) return output_dir diff --git a/lib_pypy/_tkinter/license.terms b/lib_pypy/_tkinter/license.terms new file mode 100644 --- /dev/null +++ b/lib_pypy/_tkinter/license.terms @@ -0,0 +1,39 @@ +This software is copyrighted by the Regents of the University of +California, Sun Microsystems, Inc., and other parties. The following +terms apply to all files associated with the software unless explicitly +disclaimed in individual files. + +The authors hereby grant permission to use, copy, modify, distribute, +and license this software and its documentation for any purpose, provided +that existing copyright notices are retained in all copies and that this +notice is included verbatim in any distributions. No written agreement, +license, or royalty fee is required for any of the authorized uses. +Modifications to this software may be copyrighted by their authors +and need not follow the licensing terms described here, provided that +the new terms are clearly indicated on the first page of each file where +they apply. + +IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY +FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES +ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY +DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, +INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE +IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE +NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR +MODIFICATIONS. + +GOVERNMENT USE: If you are acquiring this software on behalf of the +U.S. government, the Government shall have only "Restricted Rights" +in the software and related documentation as defined in the Federal +Acquisition Regulations (FARs) in Clause 52.227.19 (c) (2). If you +are acquiring the software on behalf of the Department of Defense, the +software shall be classified as "Commercial Computer Software" and the +Government shall have only "Restricted Rights" as defined in Clause +252.227-7013 (c) (1) of DFARs. Notwithstanding the foregoing, the +authors grant the U.S. Government and others acting in its behalf +permission to use and distribute the software in accordance with the +terms specified in this license. diff --git a/lib_pypy/_tkinter/tklib.py b/lib_pypy/_tkinter/tklib.py --- a/lib_pypy/_tkinter/tklib.py +++ b/lib_pypy/_tkinter/tklib.py @@ -121,6 +121,10 @@ incdirs = [] linklibs = ['tcl85', 'tk85'] libdirs = [] +elif sys.platform == 'darwin': + incdirs = ['/System/Library/Frameworks/Tk.framework/Versions/Current/Headers/'] + linklibs = ['tcl', 'tk'] + libdirs = [] else: incdirs=['/usr/include/tcl'] linklibs=['tcl', 'tk'] diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -443,6 +443,10 @@ for enumname, enumval in zip(tp.enumerators, tp.enumvalues): if enumname not in library.__dict__: library.__dict__[enumname] = enumval + for key, val in ffi._parser._int_constants.items(): + if key not in library.__dict__: + library.__dict__[key] = val + copied_enums.append(True) if name in library.__dict__: return diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -24,6 +24,7 @@ _r_partial_array = re.compile(r"\[\s*\.\.\.\s*\]") _r_words = re.compile(r"\w+|\S") _parser_cache = None +_r_int_literal = re.compile(r"^0?x?[0-9a-f]+u?l?$", re.IGNORECASE) def _get_parser(): global _parser_cache @@ -99,6 +100,7 @@ self._structnode2type = weakref.WeakKeyDictionary() self._override = False self._packed = False + self._int_constants = {} def _parse(self, csource): csource, macros = _preprocess(csource) @@ -128,9 +130,10 @@ finally: if lock is not None: lock.release() - return ast, macros + # csource will be used to find buggy source text + return ast, macros, csource - def convert_pycparser_error(self, e, csource): + def _convert_pycparser_error(self, e, csource): # xxx look for ":NUM:" at the start of str(e) and try to interpret # it as a line number line = None @@ -142,6 +145,12 @@ csourcelines = csource.splitlines() if 1 <= linenum <= len(csourcelines): line = csourcelines[linenum-1] + return line + + def convert_pycparser_error(self, e, csource): + line = self._convert_pycparser_error(e, csource) + + msg = str(e) if line: msg = 'cannot parse "%s"\n%s' % (line.strip(), msg) else: @@ -160,14 +169,9 @@ self._packed = prev_packed def _internal_parse(self, csource): - ast, macros = self._parse(csource) + ast, macros, csource = self._parse(csource) # add the macros - for key, value in macros.items(): - value = value.strip() - if value != '...': - raise api.CDefError('only supports the syntax "#define ' - '%s ..." for now (literally)' % key) - self._declare('macro ' + key, value) + self._process_macros(macros) # find the first "__dotdotdot__" and use that as a separator # between the repeated typedefs and the real csource iterator = iter(ast.ext) @@ -175,27 +179,61 @@ if decl.name == '__dotdotdot__': break # - for decl in iterator: - if isinstance(decl, pycparser.c_ast.Decl): - self._parse_decl(decl) - elif isinstance(decl, pycparser.c_ast.Typedef): - if not decl.name: - raise api.CDefError("typedef does not declare any name", - decl) - if (isinstance(decl.type.type, pycparser.c_ast.IdentifierType) - and decl.type.type.names == ['__dotdotdot__']): - realtype = model.unknown_type(decl.name) - elif (isinstance(decl.type, pycparser.c_ast.PtrDecl) and - isinstance(decl.type.type, pycparser.c_ast.TypeDecl) and - isinstance(decl.type.type.type, - pycparser.c_ast.IdentifierType) and - decl.type.type.type.names == ['__dotdotdot__']): - realtype = model.unknown_ptr_type(decl.name) + try: + for decl in iterator: + if isinstance(decl, pycparser.c_ast.Decl): + self._parse_decl(decl) + elif isinstance(decl, pycparser.c_ast.Typedef): + if not decl.name: + raise api.CDefError("typedef does not declare any name", + decl) + if (isinstance(decl.type.type, pycparser.c_ast.IdentifierType) + and decl.type.type.names == ['__dotdotdot__']): + realtype = model.unknown_type(decl.name) + elif (isinstance(decl.type, pycparser.c_ast.PtrDecl) and + isinstance(decl.type.type, pycparser.c_ast.TypeDecl) and + isinstance(decl.type.type.type, + pycparser.c_ast.IdentifierType) and + decl.type.type.type.names == ['__dotdotdot__']): + realtype = model.unknown_ptr_type(decl.name) + else: + realtype = self._get_type(decl.type, name=decl.name) + self._declare('typedef ' + decl.name, realtype) else: - realtype = self._get_type(decl.type, name=decl.name) - self._declare('typedef ' + decl.name, realtype) + raise api.CDefError("unrecognized construct", decl) + except api.FFIError as e: + msg = self._convert_pycparser_error(e, csource) + if msg: + e.args = (e.args[0] + "\n *** Err: %s" % msg,) + raise + + def _add_constants(self, key, val): + if key in self._int_constants: + raise api.FFIError( + "multiple declarations of constant: %s" % (key,)) + self._int_constants[key] = val + + def _process_macros(self, macros): + for key, value in macros.items(): + value = value.strip() + match = _r_int_literal.search(value) + if match is not None: + int_str = match.group(0).lower().rstrip("ul") + + # "010" is not valid oct in py3 + if (int_str.startswith("0") and + int_str != "0" and + not int_str.startswith("0x")): + int_str = "0o" + int_str[1:] + + pyvalue = int(int_str, 0) + self._add_constants(key, pyvalue) + elif value == '...': + self._declare('macro ' + key, value) else: - raise api.CDefError("unrecognized construct", decl) + raise api.CDefError('only supports the syntax "#define ' + '%s ..." (literally) or "#define ' + '%s 0x1FF" for now' % (key, key)) def _parse_decl(self, decl): node = decl.type @@ -227,7 +265,7 @@ self._declare('variable ' + decl.name, tp) def parse_type(self, cdecl): - ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl) + ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl)[:2] assert not macros exprnode = ast.ext[-1].type.args.params[0] if isinstance(exprnode, pycparser.c_ast.ID): @@ -306,7 +344,8 @@ if ident == 'void': return model.void_type if ident == '__dotdotdot__': - raise api.FFIError('bad usage of "..."') + raise api.FFIError(':%d: bad usage of "..."' % + typenode.coord.line) return resolve_common_type(ident) # if isinstance(type, pycparser.c_ast.Struct): @@ -333,7 +372,8 @@ return self._get_struct_union_enum_type('union', typenode, name, nested=True) # - raise api.FFIError("bad or unsupported type declaration") + raise api.FFIError(":%d: bad or unsupported type declaration" % + typenode.coord.line) def _parse_function_type(self, typenode, funcname=None): params = list(getattr(typenode.args, 'params', [])) @@ -499,6 +539,10 @@ if (isinstance(exprnode, pycparser.c_ast.UnaryOp) and exprnode.op == '-'): return -self._parse_constant(exprnode.expr) + # load previously defined int constant + if (isinstance(exprnode, pycparser.c_ast.ID) and + exprnode.name in self._int_constants): + return self._int_constants[exprnode.name] # if partial_length_ok: if (isinstance(exprnode, pycparser.c_ast.ID) and @@ -506,8 +550,8 @@ self._partial_length = True return '...' # - raise api.FFIError("unsupported expression: expected a " - "simple numeric constant") + raise api.FFIError(":%d: unsupported expression: expected a " + "simple numeric constant" % exprnode.coord.line) def _build_enum_type(self, explicit_name, decls): if decls is not None: @@ -522,6 +566,7 @@ if enum.value is not None: nextenumvalue = self._parse_constant(enum.value) enumvalues.append(nextenumvalue) + self._add_constants(enum.name, nextenumvalue) nextenumvalue += 1 enumvalues = tuple(enumvalues) tp = model.EnumType(explicit_name, enumerators, enumvalues) @@ -535,3 +580,5 @@ kind = name.split(' ', 1)[0] if kind in ('typedef', 'struct', 'union', 'enum'): self._declare(name, tp) + for k, v in other._int_constants.items(): + self._add_constants(k, v) diff --git a/lib_pypy/cffi/ffiplatform.py b/lib_pypy/cffi/ffiplatform.py --- a/lib_pypy/cffi/ffiplatform.py +++ b/lib_pypy/cffi/ffiplatform.py @@ -38,6 +38,7 @@ import distutils.errors # dist = Distribution({'ext_modules': [ext]}) + dist.parse_config_files() options = dist.get_option_dict('build_ext') options['force'] = ('ffiplatform', True) options['build_lib'] = ('ffiplatform', tmpdir) diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -89,43 +89,54 @@ # by generate_cpy_function_method(). prnt('static PyMethodDef _cffi_methods[] = {') self._generate("method") - prnt(' {"_cffi_setup", _cffi_setup, METH_VARARGS},') - prnt(' {NULL, NULL} /* Sentinel */') + prnt(' {"_cffi_setup", _cffi_setup, METH_VARARGS, NULL},') + prnt(' {NULL, NULL, 0, NULL} /* Sentinel */') prnt('};') prnt() # # standard init. modname = self.verifier.get_module_name() - if sys.version_info >= (3,): - prnt('static struct PyModuleDef _cffi_module_def = {') - prnt(' PyModuleDef_HEAD_INIT,') - prnt(' "%s",' % modname) - prnt(' NULL,') - prnt(' -1,') - prnt(' _cffi_methods,') - prnt(' NULL, NULL, NULL, NULL') - prnt('};') - prnt() - initname = 'PyInit_%s' % modname - createmod = 'PyModule_Create(&_cffi_module_def)' - errorcase = 'return NULL' - finalreturn = 'return lib' - else: - initname = 'init%s' % modname - createmod = 'Py_InitModule("%s", _cffi_methods)' % modname - errorcase = 'return' - finalreturn = 'return' + constants = self._chained_list_constants[False] + prnt('#if PY_MAJOR_VERSION >= 3') + prnt() + prnt('static struct PyModuleDef _cffi_module_def = {') + prnt(' PyModuleDef_HEAD_INIT,') + prnt(' "%s",' % modname) + prnt(' NULL,') + prnt(' -1,') + prnt(' _cffi_methods,') + prnt(' NULL, NULL, NULL, NULL') + prnt('};') + prnt() prnt('PyMODINIT_FUNC') - prnt('%s(void)' % initname) + prnt('PyInit_%s(void)' % modname) prnt('{') prnt(' PyObject *lib;') - prnt(' lib = %s;' % createmod) - prnt(' if (lib == NULL || %s < 0)' % ( - self._chained_list_constants[False],)) - prnt(' %s;' % errorcase) - prnt(' _cffi_init();') - prnt(' %s;' % finalreturn) + prnt(' lib = PyModule_Create(&_cffi_module_def);') + prnt(' if (lib == NULL)') + prnt(' return NULL;') + prnt(' if (%s < 0 || _cffi_init() < 0) {' % (constants,)) + prnt(' Py_DECREF(lib);') + prnt(' return NULL;') + prnt(' }') + prnt(' return lib;') prnt('}') + prnt() + prnt('#else') + prnt() + prnt('PyMODINIT_FUNC') + prnt('init%s(void)' % modname) + prnt('{') + prnt(' PyObject *lib;') + prnt(' lib = Py_InitModule("%s", _cffi_methods);' % modname) + prnt(' if (lib == NULL)') + prnt(' return;') + prnt(' if (%s < 0 || _cffi_init() < 0)' % (constants,)) + prnt(' return;') + prnt(' return;') + prnt('}') + prnt() + prnt('#endif') def load_library(self): # XXX review all usages of 'self' here! @@ -394,7 +405,7 @@ meth = 'METH_O' else: meth = 'METH_VARARGS' - self._prnt(' {"%s", _cffi_f_%s, %s},' % (name, name, meth)) + self._prnt(' {"%s", _cffi_f_%s, %s, NULL},' % (name, name, meth)) _loading_cpy_function = _loaded_noop @@ -481,8 +492,8 @@ if tp.fldnames is None: return # nothing to do with opaque structs layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) - self._prnt(' {"%s", %s, METH_NOARGS},' % (layoutfuncname, - layoutfuncname)) + self._prnt(' {"%s", %s, METH_NOARGS, NULL},' % (layoutfuncname, + layoutfuncname)) def _loading_struct_or_union(self, tp, prefix, name, module): if tp.fldnames is None: @@ -589,13 +600,7 @@ 'variable type'),)) assert delayed else: - prnt(' if (LONG_MIN <= (%s) && (%s) <= LONG_MAX)' % (name, name)) - prnt(' o = PyInt_FromLong((long)(%s));' % (name,)) - prnt(' else if ((%s) <= 0)' % (name,)) - prnt(' o = PyLong_FromLongLong((long long)(%s));' % (name,)) - prnt(' else') - prnt(' o = PyLong_FromUnsignedLongLong(' - '(unsigned long long)(%s));' % (name,)) + prnt(' o = _cffi_from_c_int_const(%s);' % name) prnt(' if (o == NULL)') prnt(' return -1;') if size_too: @@ -632,13 +637,18 @@ # ---------- # enums + def _enum_funcname(self, prefix, name): + # "$enum_$1" => "___D_enum____D_1" + name = name.replace('$', '___D_') + return '_cffi_e_%s_%s' % (prefix, name) + def _generate_cpy_enum_decl(self, tp, name, prefix='enum'): if tp.partial: for enumerator in tp.enumerators: self._generate_cpy_const(True, enumerator, delayed=False) return # - funcname = '_cffi_e_%s_%s' % (prefix, name) + funcname = self._enum_funcname(prefix, name) prnt = self._prnt prnt('static int %s(PyObject *lib)' % funcname) prnt('{') @@ -760,17 +770,30 @@ #include #include -#ifdef MS_WIN32 -#include /* for alloca() */ -typedef __int8 int8_t; -typedef __int16 int16_t; -typedef __int32 int32_t; -typedef __int64 int64_t; -typedef unsigned __int8 uint8_t; -typedef unsigned __int16 uint16_t; -typedef unsigned __int32 uint32_t; -typedef unsigned __int64 uint64_t; -typedef unsigned char _Bool; +/* this block of #ifs should be kept exactly identical between + c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py */ +#if defined(_MSC_VER) +# include /* for alloca() */ +# if _MSC_VER < 1600 /* MSVC < 2010 */ + typedef __int8 int8_t; + typedef __int16 int16_t; + typedef __int32 int32_t; + typedef __int64 int64_t; + typedef unsigned __int8 uint8_t; + typedef unsigned __int16 uint16_t; + typedef unsigned __int32 uint32_t; + typedef unsigned __int64 uint64_t; +# else +# include +# endif +# if _MSC_VER < 1800 /* MSVC < 2013 */ + typedef unsigned char _Bool; +# endif +#else +# include +# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) +# include +# endif #endif #if PY_MAJOR_VERSION < 3 @@ -795,6 +818,15 @@ #define _cffi_to_c_double PyFloat_AsDouble #define _cffi_to_c_float PyFloat_AsDouble +#define _cffi_from_c_int_const(x) \ + (((x) > 0) ? \ + ((unsigned long long)(x) <= (unsigned long long)LONG_MAX) ? \ + PyInt_FromLong((long)(x)) : \ + PyLong_FromUnsignedLongLong((unsigned long long)(x)) : \ + ((long long)(x) >= (long long)LONG_MIN) ? \ + PyInt_FromLong((long)(x)) : \ + PyLong_FromLongLong((long long)(x))) + #define _cffi_from_c_int(x, type) \ (((type)-1) > 0 ? /* unsigned */ \ (sizeof(type) < sizeof(long) ? PyInt_FromLong(x) : \ @@ -804,14 +836,14 @@ PyLong_FromLongLong(x))) #define _cffi_to_c_int(o, type) \ - (sizeof(type) == 1 ? (((type)-1) > 0 ? _cffi_to_c_u8(o) \ - : _cffi_to_c_i8(o)) : \ - sizeof(type) == 2 ? (((type)-1) > 0 ? _cffi_to_c_u16(o) \ - : _cffi_to_c_i16(o)) : \ - sizeof(type) == 4 ? (((type)-1) > 0 ? _cffi_to_c_u32(o) \ - : _cffi_to_c_i32(o)) : \ - sizeof(type) == 8 ? (((type)-1) > 0 ? _cffi_to_c_u64(o) \ - : _cffi_to_c_i64(o)) : \ + (sizeof(type) == 1 ? (((type)-1) > 0 ? (type)_cffi_to_c_u8(o) \ + : (type)_cffi_to_c_i8(o)) : \ + sizeof(type) == 2 ? (((type)-1) > 0 ? (type)_cffi_to_c_u16(o) \ + : (type)_cffi_to_c_i16(o)) : \ + sizeof(type) == 4 ? (((type)-1) > 0 ? (type)_cffi_to_c_u32(o) \ + : (type)_cffi_to_c_i32(o)) : \ + sizeof(type) == 8 ? (((type)-1) > 0 ? (type)_cffi_to_c_u64(o) \ + : (type)_cffi_to_c_i64(o)) : \ (Py_FatalError("unsupported size for type " #type), 0)) #define _cffi_to_c_i8 \ @@ -885,25 +917,32 @@ return PyBool_FromLong(was_alive); } -static void _cffi_init(void) +static int _cffi_init(void) { - PyObject *module = PyImport_ImportModule("_cffi_backend"); - PyObject *c_api_object; + PyObject *module, *c_api_object = NULL; + module = PyImport_ImportModule("_cffi_backend"); if (module == NULL) - return; + goto failure; c_api_object = PyObject_GetAttrString(module, "_C_API"); if (c_api_object == NULL) - return; + goto failure; if (!PyCapsule_CheckExact(c_api_object)) { - Py_DECREF(c_api_object); PyErr_SetNone(PyExc_ImportError); - return; + goto failure; } memcpy(_cffi_exports, PyCapsule_GetPointer(c_api_object, "cffi"), _CFFI_NUM_EXPORTS * sizeof(void *)); + + Py_DECREF(module); Py_DECREF(c_api_object); + return 0; + + failure: + Py_XDECREF(module); + Py_XDECREF(c_api_object); + return -1; } #define _cffi_type(num) ((CTypeDescrObject *)PyList_GET_ITEM(_cffi_types, num)) diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -249,10 +249,10 @@ prnt(' /* %s */' % str(e)) # cannot verify it, ignore prnt('}') self.export_symbols.append(layoutfuncname) - prnt('ssize_t %s(ssize_t i)' % (layoutfuncname,)) + prnt('intptr_t %s(intptr_t i)' % (layoutfuncname,)) prnt('{') prnt(' struct _cffi_aligncheck { char x; %s y; };' % cname) - prnt(' static ssize_t nums[] = {') + prnt(' static intptr_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') for fname, ftype, fbitsize in tp.enumfields(): @@ -276,7 +276,7 @@ return # nothing to do with opaque structs layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) # - BFunc = self.ffi._typeof_locked("ssize_t(*)(ssize_t)")[0] + BFunc = self.ffi._typeof_locked("intptr_t(*)(intptr_t)")[0] function = module.load_function(BFunc, layoutfuncname) layout = [] num = 0 @@ -410,13 +410,18 @@ # ---------- # enums + def _enum_funcname(self, prefix, name): + # "$enum_$1" => "___D_enum____D_1" + name = name.replace('$', '___D_') + return '_cffi_e_%s_%s' % (prefix, name) + def _generate_gen_enum_decl(self, tp, name, prefix='enum'): if tp.partial: for enumerator in tp.enumerators: self._generate_gen_const(True, enumerator) return # - funcname = '_cffi_e_%s_%s' % (prefix, name) + funcname = self._enum_funcname(prefix, name) self.export_symbols.append(funcname) prnt = self._prnt prnt('int %s(char *out_error)' % funcname) @@ -453,7 +458,7 @@ else: BType = self.ffi._typeof_locked("char[]")[0] BFunc = self.ffi._typeof_locked("int(*)(char*)")[0] - funcname = '_cffi_e_%s_%s' % (prefix, name) + funcname = self._enum_funcname(prefix, name) function = module.load_function(BFunc, funcname) p = self.ffi.new(BType, 256) if function(p) < 0: @@ -547,20 +552,29 @@ #include #include /* XXX for ssize_t on some platforms */ -#ifdef _WIN32 -# include -# define snprintf _snprintf -typedef __int8 int8_t; -typedef __int16 int16_t; -typedef __int32 int32_t; -typedef __int64 int64_t; -typedef unsigned __int8 uint8_t; -typedef unsigned __int16 uint16_t; -typedef unsigned __int32 uint32_t; -typedef unsigned __int64 uint64_t; -typedef SSIZE_T ssize_t; -typedef unsigned char _Bool; +/* this block of #ifs should be kept exactly identical between + c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py */ +#if defined(_MSC_VER) +# include /* for alloca() */ +# if _MSC_VER < 1600 /* MSVC < 2010 */ + typedef __int8 int8_t; + typedef __int16 int16_t; + typedef __int32 int32_t; + typedef __int64 int64_t; + typedef unsigned __int8 uint8_t; + typedef unsigned __int16 uint16_t; + typedef unsigned __int32 uint32_t; + typedef unsigned __int64 uint64_t; +# else +# include +# endif +# if _MSC_VER < 1800 /* MSVC < 2013 */ + typedef unsigned char _Bool; +# endif #else -# include +# include +# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) +# include +# endif #endif ''' diff --git a/lib_pypy/gdbm.py b/lib_pypy/gdbm.py new file mode 100644 --- /dev/null +++ b/lib_pypy/gdbm.py @@ -0,0 +1,174 @@ +import cffi, os + +ffi = cffi.FFI() +ffi.cdef(''' +#define GDBM_READER ... +#define GDBM_WRITER ... +#define GDBM_WRCREAT ... +#define GDBM_NEWDB ... +#define GDBM_FAST ... +#define GDBM_SYNC ... +#define GDBM_NOLOCK ... +#define GDBM_REPLACE ... + +void* gdbm_open(char *, int, int, int, void (*)()); +void gdbm_close(void*); + +typedef struct { + char *dptr; + int dsize; +} datum; + +datum gdbm_fetch(void*, datum); +int gdbm_delete(void*, datum); +int gdbm_store(void*, datum, datum, int); +int gdbm_exists(void*, datum); + +int gdbm_reorganize(void*); + +datum gdbm_firstkey(void*); +datum gdbm_nextkey(void*, datum); +void gdbm_sync(void*); + +char* gdbm_strerror(int); +int gdbm_errno; + +void free(void*); +''') + +try: + lib = ffi.verify(''' + #include "gdbm.h" + ''', libraries=['gdbm']) +except cffi.VerificationError as e: + # distutils does not preserve the actual message, + # but the verification is simple enough that the + # failure must be due to missing gdbm dev libs + raise ImportError('%s: %s' %(e.__class__.__name__, e)) + +class error(Exception): + pass + +def _fromstr(key): + if not isinstance(key, str): + raise TypeError("gdbm mappings have string indices only") + return {'dptr': ffi.new("char[]", key), 'dsize': len(key)} + +class gdbm(object): + ll_dbm = None + + def __init__(self, filename, iflags, mode): + res = lib.gdbm_open(filename, 0, iflags, mode, ffi.NULL) + self.size = -1 + if not res: + self._raise_from_errno() + self.ll_dbm = res + + def close(self): + if self.ll_dbm: + lib.gdbm_close(self.ll_dbm) + self.ll_dbm = None + + def _raise_from_errno(self): + if ffi.errno: + raise error(os.strerror(ffi.errno)) + raise error(lib.gdbm_strerror(lib.gdbm_errno)) + + def __len__(self): + if self.size < 0: + self.size = len(self.keys()) + return self.size + + def __setitem__(self, key, value): + self._check_closed() + self._size = -1 + r = lib.gdbm_store(self.ll_dbm, _fromstr(key), _fromstr(value), + lib.GDBM_REPLACE) + if r < 0: + self._raise_from_errno() + + def __delitem__(self, key): + self._check_closed() + res = lib.gdbm_delete(self.ll_dbm, _fromstr(key)) + if res < 0: + raise KeyError(key) + + def __contains__(self, key): + self._check_closed() + return lib.gdbm_exists(self.ll_dbm, _fromstr(key)) + has_key = __contains__ + + def __getitem__(self, key): + self._check_closed() + drec = lib.gdbm_fetch(self.ll_dbm, _fromstr(key)) + if not drec.dptr: + raise KeyError(key) + res = str(ffi.buffer(drec.dptr, drec.dsize)) + lib.free(drec.dptr) + return res + + def keys(self): + self._check_closed() + l = [] + key = lib.gdbm_firstkey(self.ll_dbm) + while key.dptr: + l.append(str(ffi.buffer(key.dptr, key.dsize))) + nextkey = lib.gdbm_nextkey(self.ll_dbm, key) + lib.free(key.dptr) + key = nextkey + return l + + def firstkey(self): + self._check_closed() + key = lib.gdbm_firstkey(self.ll_dbm) + if key.dptr: + res = str(ffi.buffer(key.dptr, key.dsize)) + lib.free(key.dptr) + return res + + def nextkey(self, key): + self._check_closed() + key = lib.gdbm_nextkey(self.ll_dbm, _fromstr(key)) + if key.dptr: + res = str(ffi.buffer(key.dptr, key.dsize)) + lib.free(key.dptr) + return res + + def reorganize(self): + self._check_closed() + if lib.gdbm_reorganize(self.ll_dbm) < 0: + self._raise_from_errno() + + def _check_closed(self): + if not self.ll_dbm: + raise error("GDBM object has already been closed") + + __del__ = close + + def sync(self): + self._check_closed() + lib.gdbm_sync(self.ll_dbm) + +def open(filename, flags='r', mode=0666): + if flags[0] == 'r': + iflags = lib.GDBM_READER + elif flags[0] == 'w': + iflags = lib.GDBM_WRITER + elif flags[0] == 'c': + iflags = lib.GDBM_WRCREAT + elif flags[0] == 'n': + iflags = lib.GDBM_NEWDB + else: + raise error("First flag must be one of 'r', 'w', 'c' or 'n'") + for flag in flags[1:]: + if flag == 'f': + iflags |= lib.GDBM_FAST + elif flag == 's': + iflags |= lib.GDBM_SYNC + elif flag == 'u': + iflags |= lib.GDBM_NOLOCK + else: + raise error("Flag '%s' not supported" % flag) + return gdbm(filename, iflags, mode) + +open_flags = "rwcnfsu" diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -113,7 +113,7 @@ try: for name in modlist: __import__(name) - except (ImportError, CompilationError, py.test.skip.Exception), e: + except (ImportError, CompilationError, py.test.skip.Exception) as e: errcls = e.__class__.__name__ raise Exception( "The module %r is disabled\n" % (modname,) + diff --git a/pypy/doc/Makefile b/pypy/doc/Makefile --- a/pypy/doc/Makefile +++ b/pypy/doc/Makefile @@ -7,63 +7,80 @@ PAPER = BUILDDIR = _build +# User-friendly check for sphinx-build +ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) +$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) +endif + # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . +# the i18n builder cannot share the environment and doctrees with the others +I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . -.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex man changes linkcheck doctest +.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext help: @echo "Please use \`make ' where is one of" - @echo " html to make standalone HTML files" - @echo " dirhtml to make HTML files named index.html in directories" - @echo " pickle to make pickle files" - @echo " json to make JSON files" - @echo " htmlhelp to make HTML files and a HTML help project" - @echo " qthelp to make HTML files and a qthelp project" - @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" - @echo " man to make manual pages" - @echo " changes to make an overview of all changed/added/deprecated items" - @echo " linkcheck to check all external links for integrity" - @echo " doctest to run all doctests embedded in the documentation (if enabled)" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " singlehtml to make a single large HTML file" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " devhelp to make HTML files and a Devhelp project" + @echo " epub to make an epub" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " latexpdf to make LaTeX files and run them through pdflatex" + @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" + @echo " text to make text files" + @echo " man to make manual pages" + @echo " texinfo to make Texinfo files" + @echo " info to make Texinfo files and run them through makeinfo" + @echo " gettext to make PO message catalogs" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " xml to make Docutils-native XML files" + @echo " pseudoxml to make pseudoxml-XML files for display purposes" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: - -rm -rf $(BUILDDIR)/* + rm -rf $(BUILDDIR)/* html: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." +singlehtml: + $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml + @echo + @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." + pickle: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ @@ -72,35 +89,89 @@ @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/PyPy.qhc" +devhelp: + $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp + @echo + @echo "Build finished." + @echo "To view the help file:" + @echo "# mkdir -p $$HOME/.local/share/devhelp/PyPy" + @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/PyPy" + @echo "# devhelp" + +epub: + $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub + @echo + @echo "Build finished. The epub file is in $(BUILDDIR)/epub." + latex: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." - @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ - "run these through (pdf)latex." + @echo "Run \`make' in that directory to run these through (pdf)latex" \ + "(use \`make latexpdf' here to do that automatically)." + +latexpdf: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through pdflatex..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +latexpdfja: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through platex and dvipdfmx..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +text: + $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text + @echo + @echo "Build finished. The text files are in $(BUILDDIR)/text." man: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo - @echo "Build finished. The manual pages are in $(BUILDDIR)/man" + @echo "Build finished. The manual pages are in $(BUILDDIR)/man." + +texinfo: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo + @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." + @echo "Run \`make' in that directory to run these through makeinfo" \ + "(use \`make info' here to do that automatically)." + +info: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo "Running Texinfo files through makeinfo..." + make -C $(BUILDDIR)/texinfo info + @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." + +gettext: + $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale + @echo + @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." + +xml: + $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml + @echo + @echo "Build finished. The XML files are in $(BUILDDIR)/xml." + +pseudoxml: + $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml + @echo + @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -105,7 +105,7 @@ while True: try: w_key = space.next(w_iter) - except OperationError, e: + except OperationError as e: if not e.match(space, space.w_StopIteration): raise # re-raise other app-level exceptions break @@ -348,8 +348,12 @@ **objects** - Normal rules apply. Special methods are not honoured, except ``__init__``, - ``__del__`` and ``__iter__``. + Normal rules apply. The only special methods that are honoured are + ``__init__``, ``__del__``, ``__len__``, ``__getitem__``, ``__setitem__``, + ``__getslice__``, ``__setslice__``, and ``__iter__``. To handle slicing, + ``__getslice__`` and ``__setslice__`` must be used; using ``__getitem__`` and + ``__setitem__`` for slicing isn't supported. Additionally, using negative + indices for slicing is still not support, even when using ``__getslice__``. This layout makes the number of types to take care about quite limited. @@ -567,7 +571,7 @@ try: ... - except OperationError, e: + except OperationError as e: if not e.match(space, space.w_XxxError): raise ... diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -18,11 +18,31 @@ # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.append(os.path.abspath('.')) + +# -- Read The Docs theme config ------------------------------------------------ + +# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org +on_rtd = os.environ.get('READTHEDOCS', None) == 'True' + +if not on_rtd: # only import and set the theme if we're building docs locally + try: + import sphinx_rtd_theme + html_theme = 'sphinx_rtd_theme' + html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] + except ImportError: + print('sphinx_rtd_theme is not installed') + html_theme = 'default' + +# otherwise, readthedocs.org uses their theme by default, so no need to specify it + + # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.ifconfig', 'sphinx.ext.graphviz', 'pypyconfig'] +extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', + 'sphinx.ext.todo', 'sphinx.ext.ifconfig', 'sphinx.ext.graphviz', + 'pypyconfig'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] @@ -45,9 +65,9 @@ # built documents. # # The short X.Y version. -version = '2.2' +version = '2.3' # The full version, including alpha/beta/rc tags. -release = '2.2.1' +release = '2.3.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -91,7 +111,7 @@ # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. -html_theme = 'default' +#html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the diff --git a/pypy/doc/config/translation.log.txt b/pypy/doc/config/translation.log.txt --- a/pypy/doc/config/translation.log.txt +++ b/pypy/doc/config/translation.log.txt @@ -2,4 +2,4 @@ These must be enabled by setting the PYPYLOG environment variable. The exact set of features supported by PYPYLOG is described in -pypy/translation/c/src/debug_print.h. +rpython/translator/c/src/debug_print.h. diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -99,6 +99,7 @@ Stian Andreassen Laurence Tratt Wanja Saatkamp + Ivan Sichmann Freitas Gerald Klix Mike Blume Oscar Nierstrasz @@ -183,7 +184,9 @@ Alejandro J. Cura Jacob Oscarson Travis Francis Athougies + Ryan Gonzalez Kristjan Valur Jonsson + Sebastian Pawluś Neil Blakey-Milner anatoly techtonik Lutz Paelike @@ -216,6 +219,7 @@ Michael Hudson-Doyle Anders Sigfridsson Yasir Suhail + rafalgalczynski at gmail.com Floris Bruynooghe Laurens Van Houtven Akira Li @@ -245,6 +249,8 @@ Zooko Wilcox-O Hearn Tomer Chachamu Christopher Groskopf + Asmo Soinio + Stefan Marr jiaaro opassembler.py Antony Lee diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -348,4 +348,9 @@ type and vice versa. For builtin types, a dictionary will be returned that cannot be changed (but still looks and behaves like a normal dictionary). +* PyPy prints a random line from past #pypy IRC topics at startup in + interactive mode. In a released version, this behaviour is supressed, but + setting the environment variable PYPY_IRC_TOPIC will bring it back. Note that + downstream package providers have been known to totally disable this feature. + .. include:: _ref.txt diff --git a/pypy/doc/ctypes-implementation.rst b/pypy/doc/ctypes-implementation.rst --- a/pypy/doc/ctypes-implementation.rst +++ b/pypy/doc/ctypes-implementation.rst @@ -72,13 +72,11 @@ Here is a list of the limitations and missing features of the current implementation: -* ``ctypes.pythonapi`` lets you access the CPython C API emulation layer - of PyPy, at your own risks and without doing anything sensible about - the GIL. Since PyPy 2.3, these functions are also named with an extra - "Py", for example ``PyPyInt_FromLong()``. Basically, don't use this, - but it might more or less work in simple cases if you do. (Obviously, - assuming the PyObject pointers you get have any particular fields in - any particular order is just going to crash.) +* ``ctypes.pythonapi`` is missing. In previous versions, it was present + and redirected to the `cpyext` C API emulation layer, but our + implementation did not do anything sensible about the GIL and the + functions were named with an extra "Py", for example + ``PyPyInt_FromLong()``. It was removed for being unhelpful. * We copy Python strings instead of having pointers to raw buffers diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.rst +++ b/pypy/doc/extradoc.rst @@ -8,6 +8,9 @@ *Articles about PyPy published so far, most recent first:* (bibtex_ file) +* `A Way Forward in Parallelising Dynamic Languages`_, + R. Meier, A. Rigo + * `Runtime Feedback in a Meta-Tracing JIT for Efficient Dynamic Languages`_, C.F. Bolz, A. Cuni, M. Fijalkowski, M. Leuschel, S. Pedroni, A. Rigo @@ -71,6 +74,7 @@ .. _bibtex: https://bitbucket.org/pypy/extradoc/raw/tip/talk/bibtex.bib +.. _`A Way Forward in Parallelising Dynamic Languages`: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/icooolps2014/position-paper.pdf .. _`Runtime Feedback in a Meta-Tracing JIT for Efficient Dynamic Languages`: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/icooolps2011/jit-hints.pdf .. _`Allocation Removal by Partial Evaluation in a Tracing JIT`: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/pepm2011/bolz-allocation-removal.pdf .. _`Towards a Jitting VM for Prolog Execution`: http://www.stups.uni-duesseldorf.de/mediawiki/images/a/a7/Pub-BoLeSch2010.pdf @@ -93,6 +97,11 @@ Talks and Presentations ---------------------------------- +*This part is no longer updated.* The complete list is here__ (in +alphabetical order). + +.. __: https://bitbucket.org/pypy/extradoc/src/extradoc/talk/ + Talks in 2010 +++++++++++++ diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -465,9 +465,13 @@ This is documented (here__ and here__). It needs 4 GB of RAM to run "rpython targetpypystandalone" on top of PyPy, a bit more when running -on CPython. If you have less than 4 GB it will just swap forever (or -fail if you don't have enough swap). On 32-bit, divide the numbers by -two. +on top of CPython. If you have less than 4 GB free, it will just swap +forever (or fail if you don't have enough swap). And we mean *free:* +if the machine has 4 GB *in total,* then it will swap. + +On 32-bit, divide the numbers by two. (We didn't try recently, but in +the past it was possible to compile a 32-bit version on a 2 GB Linux +machine with nothing else running: no Gnome/KDE, for example.) .. __: http://pypy.org/download.html#building-from-source .. __: https://pypy.readthedocs.org/en/latest/getting-started-python.html#translating-the-pypy-python-interpreter diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -28,11 +28,6 @@ pypy/doc/tool/makecontributor.py generates the list of contributors * rename pypy/doc/whatsnew_head.rst to whatsnew_VERSION.rst and create a fresh whatsnew_head.rst after the release -* merge PYPY_IRC_TOPIC environment variable handling from previous release - in pypy/doc/getting-started-dev.rst, pypy/doc/man/pypy.1.rst, and - pypy/interpreter/app_main.py so release versions will not print a random - IRC topic by default. -* change the tracker to have a new release tag to file bugs against * go to pypy/tool/release and run: force-builds.py * wait for builds to complete, make sure there are no failures diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -1,19 +1,43 @@ Historical release notes ------------------------- +======================== + +Cpython 2.7 compatible versions +=============================== .. toctree:: + release-2.3.1.rst + release-2.3.0.rst + release-2.2.1.rst + release-2.2.0.rst + release-2.1.0.rst + release-2.1.0-beta2.rst + release-2.1.0-beta1.rst + release-2.1.0.rst + release-2.0.2.rst + release-2.0.1.rst + release-2.0.0.rst + release-2.0.0-beta2.rst + release-2.0.0-beta1.rst + release-1.9.0.rst + release-1.8.0.rst + release-1.7.0.rst + release-1.6.0.rst + release-1.5.0.rst + release-1.4.1.rst + release-1.4.0beta.rst + release-1.4.0.rst + release-1.3.0.rst + release-1.2.0.rst + release-1.1.0.rst + release-1.0.0.rst + release-0.99.0.rst + release-0.9.0.rst + release-0.8.0.rst + release-0.7.0.rst release-0.6 - release-0.7.0.rst - release-0.8.0.rst - release-0.9.0.rst - release-0.99.0.rst - release-1.0.0.rst - release-1.1.0.rst - release-1.2.0.rst - release-1.3.0.rst - release-1.4.0.rst - release-1.4.0beta.rst - release-1.4.1.rst - release-1.5.0.rst - release-1.6.0.rst + +Cpython 3.2 compatible versions +=============================== +.. toctree:: + release-pypy3-2.1.0-beta1.rst diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -40,7 +40,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 2.2.1`_: the latest official release +* `Release 2.3.1`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -110,7 +110,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 2.2.1`: http://pypy.org/download.html +.. _`Release 2.3.1`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html diff --git a/pypy/doc/make.bat b/pypy/doc/make.bat --- a/pypy/doc/make.bat +++ b/pypy/doc/make.bat @@ -2,11 +2,15 @@ REM Command file for Sphinx documentation -set SPHINXBUILD=sphinx-build +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) set BUILDDIR=_build set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . +set I18NSPHINXOPTS=%SPHINXOPTS% . if NOT "%PAPER%" == "" ( set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% + set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% ) if "%1" == "" goto help @@ -14,16 +18,25 @@ if "%1" == "help" ( :help echo.Please use `make ^` where ^ is one of - echo. html to make standalone HTML files From noreply at buildbot.pypy.org Wed Jul 2 20:59:53 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 2 Jul 2014 20:59:53 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Fix Message-ID: <20140702185953.1CF021D3500@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r72321:5d850e01c23b Date: 2014-07-02 20:53 +0200 http://bitbucket.org/pypy/pypy/changeset/5d850e01c23b/ Log: Fix diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -344,29 +344,6 @@ def robjmodel_keepalive_until_here(*args_s): return immutablevalue(None) - at analyzer_for(rpython.rtyper.lltypesystem.llmemory.cast_ptr_to_adr) -def llmemory_cast_ptr_to_adr(s): - from rpython.rtyper.llannotation import SomeInteriorPtr - assert not isinstance(s, SomeInteriorPtr) - return SomeAddress() - - at analyzer_for(rpython.rtyper.lltypesystem.llmemory.cast_adr_to_ptr) -def llmemory_cast_adr_to_ptr(s, s_type): - assert s_type.is_constant() - return SomePtr(s_type.const) - - at analyzer_for(rpython.rtyper.lltypesystem.llmemory.cast_adr_to_int) -def llmemory_cast_adr_to_int(s, s_mode=None): - return SomeInteger() # xxx - - at analyzer_for(rpython.rtyper.lltypesystem.llmemory.cast_adr_to_uint_symbolic) -def llmemory_cast_adr_to_uint_symbolic(s): - return SomeInteger(unsigned=True) - - at analyzer_for(rpython.rtyper.lltypesystem.llmemory.cast_int_to_adr) -def llmemory_cast_int_to_adr(s): - return SomeAddress() - try: import unicodedata except ImportError: diff --git a/rpython/rtyper/lltypesystem/llmemory.py b/rpython/rtyper/lltypesystem/llmemory.py --- a/rpython/rtyper/lltypesystem/llmemory.py +++ b/rpython/rtyper/lltypesystem/llmemory.py @@ -769,12 +769,16 @@ res = cast(lltype.Signed, res) return res + at analyzer_for(cast_adr_to_int) +def ann_cast_adr_to_int(s, s_mode=None): + return SomeInteger() # xxx + def cast_adr_to_uint_symbolic(adr): return adr._cast_to_uint() - at analyzer_for(cast_adr_to_int) -def ann_cast_adr_to_int(s, s_mode=None): - return SomeInteger() # xxx + at analyzer_for(cast_adr_to_uint_symbolic) +def ann_cast_adr_to_uint_symbolic(s): + return SomeInteger(unsigned=True) _NONGCREF = lltype.Ptr(lltype.OpaqueType('NONGCREF')) From noreply at buildbot.pypy.org Wed Jul 2 21:28:46 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 2 Jul 2014 21:28:46 +0200 (CEST) Subject: [pypy-commit] pypy default: Details from the stm branch Message-ID: <20140702192846.3F8221C0ECA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72322:659b814b797a Date: 2014-07-02 21:28 +0200 http://bitbucket.org/pypy/pypy/changeset/659b814b797a/ Log: Details from the stm branch diff --git a/rpython/jit/backend/x86/callbuilder.py b/rpython/jit/backend/x86/callbuilder.py --- a/rpython/jit/backend/x86/callbuilder.py +++ b/rpython/jit/backend/x86/callbuilder.py @@ -132,7 +132,6 @@ self.mc.ADD(ebp, imm(1)) # ebp any more; and ignore 'fastgil' def move_real_result_and_call_reacqgil_addr(self, fastgil): - from rpython.jit.backend.x86.assembler import heap from rpython.jit.backend.x86 import rx86 # # check if we need to call the reacqgil() function or not diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -499,7 +499,7 @@ 'SETARRAYITEM_GC/3d', 'SETARRAYITEM_RAW/3d', 'SETINTERIORFIELD_GC/3d', - 'SETINTERIORFIELD_RAW/3d', # only used by llsupport/rewrite.py + 'SETINTERIORFIELD_RAW/3d', # right now, only used by tests 'RAW_STORE/3d', 'SETFIELD_GC/2d', 'SETFIELD_RAW/2d', diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -328,6 +328,11 @@ #Since there is no GetErrorMode, do a double Set old_mode = SetErrorMode(flags) SetErrorMode(old_mode | flags) + if env is None: + envrepr = '' + else: + envrepr = ' [env=%r]' % (env,) + log.cmdexec('%s %s%s' % (self.executable_name, args, envrepr)) res = self.translator.platform.execute(self.executable_name, args, env=env) if sys.platform == 'win32': diff --git a/rpython/translator/c/src/mem.h b/rpython/translator/c/src/mem.h --- a/rpython/translator/c/src/mem.h +++ b/rpython/translator/c/src/mem.h @@ -117,7 +117,9 @@ #define OP_BOEHM_DISAPPEARING_LINK(link, obj, r) /* nothing */ #define OP_GC__DISABLE_FINALIZERS(r) /* nothing */ #define OP_GC__ENABLE_FINALIZERS(r) /* nothing */ -#define GC_REGISTER_FINALIZER(a,b,c,d,e) /* nothing */ +#define GC_REGISTER_FINALIZER(a, b, c, d, e) /* nothing */ +#define GC_gcollect() /* nothing */ +#define GC_set_max_heap_size(a) /* nothing */ #endif /************************************************************/ From noreply at buildbot.pypy.org Wed Jul 2 21:29:52 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 2 Jul 2014 21:29:52 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: fixes Message-ID: <20140702192952.E19D31C0ECA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r72323:84e3bd9b16ad Date: 2014-07-02 21:28 +0200 http://bitbucket.org/pypy/pypy/changeset/84e3bd9b16ad/ Log: fixes diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -180,7 +180,7 @@ STACK_CHECK_SLOWPATH = lltype.Ptr(lltype.FuncType([lltype.Signed], lltype.Void)) def insert_stack_check(): - assert not self.cpu.gc_ll_descr.stm + assert not self.gc_ll_descr.stm endaddr = rstack._stack_get_end_adr() lengthaddr = rstack._stack_get_length_adr() f = llhelper(STACK_CHECK_SLOWPATH, rstack.stack_check_slowpath) diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -2743,9 +2743,7 @@ addr0 = stmtlocal.threadlocal_base() addr = addr1 - addr0 assert rx86.fits_in_32bits(addr) - mc = self.mc - mc.writechar(stmtlocal.SEGMENT_TL) # prefix: %fs or %gs - mc.MOV_rj(resloc.value, addr) # memory read + self.mc.MOV_rj(resloc.value, (stmtlocal.SEGMENT_TL, addr)) def get_set_errno(self, op, loc, issue_a_write): # this function is only called on Linux @@ -2753,23 +2751,23 @@ addr = stmtlocal.get_errno_tl() assert rx86.fits_in_32bits(addr) mc = self.mc - mc.writechar(stmtlocal.SEGMENT_TL) # prefix: %fs or %gs - # !!important: the *next* instruction must be the one using 'addr'!! + SEGTL = stmtlocal.SEGMENT_TL if issue_a_write: if isinstance(loc, RegLoc): - mc.MOV32_jr(addr, loc.value) # memory write from reg + mc.MOV32_jr((SEGTL, addr), loc.value) # memory write from reg else: assert isinstance(loc, ImmedLoc) newvalue = loc.value newvalue = rffi.cast(rffi.INT, newvalue) newvalue = rffi.cast(lltype.Signed, newvalue) - mc.MOV32_ji(addr, newvalue) # memory write immediate + mc.MOV32_ji((SEGTL, addr), newvalue) # memory write immediate else: assert isinstance(loc, RegLoc) if IS_X86_32: - mc.MOV_rj(loc.value, addr) # memory read + mc.MOV_rj(loc.value, (SEGTL, addr)) # memory read elif IS_X86_64: - mc.MOVSX32_rj(loc.value, addr) # memory read, sign-extend + mc.MOVSX32_rj(loc.value, + (SEGTL, addr)) # memory read, sign-extend genop_discard_list = [Assembler386.not_implemented_op_discard] * rop._LAST diff --git a/rpython/jit/backend/x86/callbuilder.py b/rpython/jit/backend/x86/callbuilder.py --- a/rpython/jit/backend/x86/callbuilder.py +++ b/rpython/jit/backend/x86/callbuilder.py @@ -126,13 +126,12 @@ self.asm.set_extra_stack_depth(self.mc, -delta * WORD) css_value = eax # - self.mc.MOV(heap(fastgil), css_value) + self.mc.MOV(heap(self.asm.SEGMENT_NO, fastgil), css_value) # if not we_are_translated(): # for testing: we should not access self.mc.ADD(ebp, imm(1)) # ebp any more; and ignore 'fastgil' def move_real_result_and_call_reacqgil_addr(self, fastgil): - from rpython.jit.backend.x86.assembler import heap from rpython.jit.backend.x86 import rx86 # # check if we need to call the reacqgil() function or not @@ -161,10 +160,11 @@ # mc.MOV(old_value, imm(1)) if rx86.fits_in_32bits(fastgil): - mc.XCHG_rj(old_value.value, fastgil) + mc.XCHG_rj(old_value.value, (self.asm.SEGMENT_NO, fastgil)) else: mc.MOV_ri(X86_64_SCRATCH_REG.value, fastgil) - mc.XCHG_rm(old_value.value, (X86_64_SCRATCH_REG.value, 0)) + mc.XCHG_rm(old_value.value, + (self.asm.SEGMENT_NO, X86_64_SCRATCH_REG.value, 0)) mc.CMP(old_value, css_value) mc.J_il8(rx86.Conditions['E'], 0) je_location = mc.get_relative_pos() diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py --- a/rpython/jit/metainterp/executor.py +++ b/rpython/jit/metainterp/executor.py @@ -357,6 +357,7 @@ rop.DEBUG_MERGE_POINT, rop.JIT_DEBUG, rop.SETARRAYITEM_RAW, + rop.SETINTERIORFIELD_RAW, rop.CALL_PURE, rop.CALL_RELEASE_GIL, rop.QUASIIMMUT_FIELD, diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -267,6 +267,7 @@ opnum == rop.SETFIELD_RAW or # no effect on GC struct/array opnum == rop.SETARRAYITEM_GC or # handled specially opnum == rop.SETARRAYITEM_RAW or # no effect on GC struct + opnum == rop.SETINTERIORFIELD_RAW or # no effect on GC struct opnum == rop.RAW_STORE or # no effect on GC struct opnum == rop.STRSETITEM or # no effect on GC struct/array opnum == rop.UNICODESETITEM or # no effect on GC struct/array diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -505,6 +505,7 @@ 'SETARRAYITEM_GC/3d', 'SETARRAYITEM_RAW/3d', 'SETINTERIORFIELD_GC/3d', + 'SETINTERIORFIELD_RAW/3d', # right now, only used by tests 'RAW_STORE/3d', 'SETFIELD_GC/2d', 'SETFIELD_RAW/2d', From noreply at buildbot.pypy.org Thu Jul 3 00:28:52 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 3 Jul 2014 00:28:52 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: Twisted with SSL works these days Message-ID: <20140702222852.7B2D51C0ECA@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: extradoc Changeset: r514:836e9b3f5226 Date: 2014-07-02 15:28 -0700 http://bitbucket.org/pypy/pypy.org/changeset/836e9b3f5226/ Log: Twisted with SSL works these days diff --git a/compat.html b/compat.html --- a/compat.html +++ b/compat.html @@ -74,7 +74,7 @@
  • django
  • sqlalchemy
  • flask
  • -
  • twisted (without ssl support)
  • +
  • twisted
  • pylons
  • divmod's nevow
  • pyglet
  • diff --git a/source/compat.txt b/source/compat.txt --- a/source/compat.txt +++ b/source/compat.txt @@ -42,7 +42,7 @@ * flask -* twisted (without ssl support) +* twisted * pylons From noreply at buildbot.pypy.org Thu Jul 3 05:26:32 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 3 Jul 2014 05:26:32 +0200 (CEST) Subject: [pypy-commit] pypy default: allow uint ops as ok_ops since INVALID_SOCKET is uint on MSVC Message-ID: <20140703032632.38A491C0ECA@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r72324:eb50bceda86c Date: 2014-07-03 13:25 +1000 http://bitbucket.org/pypy/pypy/changeset/eb50bceda86c/ Log: allow uint ops as ok_ops since INVALID_SOCKET is uint on MSVC diff --git a/rpython/translator/backendopt/finalizer.py b/rpython/translator/backendopt/finalizer.py --- a/rpython/translator/backendopt/finalizer.py +++ b/rpython/translator/backendopt/finalizer.py @@ -31,7 +31,7 @@ if op.opname in self.ok_operations: return self.bottom_result() if (op.opname.startswith('int_') or op.opname.startswith('float_') - or op.opname.startswith('cast_')): + or op.opname.startswith('uint_') or op.opname.startswith('cast_')): return self.bottom_result() if op.opname == 'setfield' or op.opname == 'bare_setfield': TP = op.args[2].concretetype From noreply at buildbot.pypy.org Thu Jul 3 11:40:46 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 3 Jul 2014 11:40:46 +0200 (CEST) Subject: [pypy-commit] pypy default: Improve the test, which fails on "pypy -A": try repeatedly to get a Message-ID: <20140703094046.9221B1C0257@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72325:28a1ebabc3e4 Date: 2014-07-03 11:40 +0200 http://bitbucket.org/pypy/pypy/changeset/28a1ebabc3e4/ Log: Improve the test, which fails on "pypy -A": try repeatedly to get a small array, fill it with garbage, and then free it. It's likely we end up at the same location with still the garbage. diff --git a/pypy/module/micronumpy/test/test_arrayops.py b/pypy/module/micronumpy/test/test_arrayops.py --- a/pypy/module/micronumpy/test/test_arrayops.py +++ b/pypy/module/micronumpy/test/test_arrayops.py @@ -3,18 +3,27 @@ class AppTestNumSupport(BaseNumpyAppTest): def test_zeros(self): - from numpypy import zeros, empty + from numpypy import zeros a = zeros(3) assert len(a) == 3 assert a[0] == a[1] == a[2] == 0 - a = empty(1000) - assert len(a) == 1000 + + def test_empty(self): + from numpypy import empty + import gc for i in range(1000): - if a[i] != 0: - break + a = empty(3) + assert len(a) == 3 + if not (a[0] == a[1] == a[2] == 0): + break # done + a[0] = 1.23 + a[1] = 4.56 + a[2] = 7.89 + del a + gc.collect() else: raise AssertionError( - "empty() returned a zeroed out array of length 1000 (unlikely)") + "empty() returned a zeroed out array every time") def test_where(self): from numpypy import where, ones, zeros, array From noreply at buildbot.pypy.org Thu Jul 3 15:31:30 2014 From: noreply at buildbot.pypy.org (Patrick Rein) Date: Thu, 3 Jul 2014 15:31:30 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk stmgc-c7: Added OSLock class to access rthread.lock and corresponding primitives. Message-ID: <20140703133130.3099A1D353B@cobra.cs.uni-duesseldorf.de> Author: Patrick Rein Branch: stmgc-c7 Changeset: r851:3f97fdcac997 Date: 2014-07-03 15:26 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/3f97fdcac997/ Log: Added OSLock class to access rthread.lock and corresponding primitives. diff --git a/images/Squeak4.5-12568.changes b/images/Squeak4.5-12568.changes --- a/images/Squeak4.5-12568.changes +++ b/images/Squeak4.5-12568.changes @@ -758,4 +758,4 @@ self fieldNew: swapField. ]. - ^ self field! ! ----QUIT----{22 May 2014 . 3:33:07 pm} Squeak4.5-12568.image priorSource: 93437! ----STARTUP----{22 May 2014 . 3:33:13 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 15:33'! gameLifeOfLife STMSimulation benchmark.! ! ----QUIT----{22 May 2014 . 3:34:03 pm} Squeak4.5-12568.image priorSource: 110218! ----STARTUP----{22 May 2014 . 3:34:57 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 15:35'! gameOfLife STMSimulation benchmark.! ! Integer removeSelector: #gameLifeOfLife! ----QUIT----{22 May 2014 . 3:35:14 pm} Squeak4.5-12568.image priorSource: 110526! ----STARTUP----{22 May 2014 . 3:36:22 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 15:36' prior: 33665224! gameOfLife STMSimulation benchmark2.! ! ----QUIT----{22 May 2014 . 3:36:45 pm} Squeak4.5-12568.image priorSource: 110873! ----STARTUP----{22 May 2014 . 3:36:49 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT----{22 May 2014 . 3:36:53 pm} Squeak4.5-12568.image priorSource: 111195! ----STARTUP----{22 May 2014 . 3:36:56 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 15:37' prior: 33665587! gameOfLife SPyVM print: STMSimulation benchmark2.! ! ----QUIT----{22 May 2014 . 3:37:32 pm} Squeak4.5-12568.image priorSource: 111392! ----STARTUP----{22 May 2014 . 3:38:15 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT----{22 May 2014 . 3:38:35 pm} Squeak4.5-12568.image priorSource: 111727! ----STARTUP----{2 June 2014 . 12:57:18 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 6/2/2014 12:58'! benchStmParallelWarmed 10 timesRepeat: [SPyVM print: (self benchStmParallel)].! ! ----QUIT----{2 June 2014 . 12:58:21 pm} Squeak4.5-12568.image priorSource: 111924! \ No newline at end of file + ^ self field! ! ----QUIT----{22 May 2014 . 3:33:07 pm} Squeak4.5-12568.image priorSource: 93437! ----STARTUP----{22 May 2014 . 3:33:13 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 15:33'! gameLifeOfLife STMSimulation benchmark.! ! ----QUIT----{22 May 2014 . 3:34:03 pm} Squeak4.5-12568.image priorSource: 110218! ----STARTUP----{22 May 2014 . 3:34:57 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 15:35'! gameOfLife STMSimulation benchmark.! ! Integer removeSelector: #gameLifeOfLife! ----QUIT----{22 May 2014 . 3:35:14 pm} Squeak4.5-12568.image priorSource: 110526! ----STARTUP----{22 May 2014 . 3:36:22 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 15:36' prior: 33665224! gameOfLife STMSimulation benchmark2.! ! ----QUIT----{22 May 2014 . 3:36:45 pm} Squeak4.5-12568.image priorSource: 110873! ----STARTUP----{22 May 2014 . 3:36:49 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT----{22 May 2014 . 3:36:53 pm} Squeak4.5-12568.image priorSource: 111195! ----STARTUP----{22 May 2014 . 3:36:56 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 15:37' prior: 33665587! gameOfLife SPyVM print: STMSimulation benchmark2.! ! ----QUIT----{22 May 2014 . 3:37:32 pm} Squeak4.5-12568.image priorSource: 111392! ----STARTUP----{22 May 2014 . 3:38:15 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT----{22 May 2014 . 3:38:35 pm} Squeak4.5-12568.image priorSource: 111727! ----STARTUP----{2 June 2014 . 12:57:18 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 6/2/2014 12:58'! benchStmParallelWarmed 10 timesRepeat: [SPyVM print: (self benchStmParallel)].! ! ----QUIT----{2 June 2014 . 12:58:21 pm} Squeak4.5-12568.image priorSource: 111924! ----STARTUP----{26 June 2014 . 2:47:09 pm} as D:\code\python\spy-vm\lang-smalltalk\images\Squeak4.5-12568.image! 'From Squeak4.4 of 28 April 2013 [latest update: #12627] on 26 June 2014 at 2:46:54 pm'! Object subclass: #OSLock instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! Object subclass: #OSLock instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !OSLock methodsFor: 'as yet unclassified' stamp: 'pre 6/26/2014 14:25'! lock ! ! !OSLock methodsFor: 'as yet unclassified' stamp: 'pre 6/26/2014 14:26'! release ! ! ----End fileIn of a stream----! 'From Squeak4.4 of 28 April 2013 [latest update: #12627] on 26 June 2014 at 2:46:45 pm'! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 14:48' prior: 33647508! parallelForkTest | p | FileStream stdout nextPutAll: 'starting stm process.'. p := [ 1 + 1. ] parallelFork. p wait! ! ----End fileIn of D:\code\python\spy-vm\lang-smalltalk\images\Integer-parallelForkTest.st----! ----QUIT----{26 June 2014 . 2:47:49 pm} Squeak4.5-12568.image priorSource: 112268! ----STARTUP----{26 June 2014 . 2:49:11 pm} as D:\code\python\spy-vm\lang-smalltalk\images\Squeak4.5-12568.image! 'From Squeak4.4 of 28 April 2013 [latest update: #12627] on 26 June 2014 at 2:46:45 pm'! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 14:48' prior: 33667646! parallelForkTest | p | FileStream stdout nextPutAll: 'starting stm process.'. p := [ 1 + 1. ] parallelFork. p wait! ! ----End fileIn of D:\code\python\spy-vm\lang-smalltalk\images\Integer-parallelForkTest.st----! ----SNAPSHOT----{26 June 2014 . 2:49:57 pm} Squeak4.5-12568.image priorSource: 113431! ----QUIT----{26 June 2014 . 2:50 pm} Squeak4.5-12568.image priorSource: 114022! ----STARTUP----{26 June 2014 . 2:52:02 pm} as D:\code\python\spy-vm\lang-smalltalk\images\Squeak4.5-12568.image! 'From Squeak4.4 of 28 April 2013 [latest update: #12627] on 26 June 2014 at 2:51:52 pm'! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'pre 6/26/2014 14:43'! osLockTest | lock process1 process2 process2lock | lock := OSLock new. lock lock. process2lock := OSLock new. process2lock lock. process1 := [ SPyVM print: 'First process start'. lock lock. process2lock release. SPyVM print: 'First process after lock'. lock release. ] parallelFork . process2 := [ SPyVM print: 'Second process start'. process2lock lock. lock lock. SPyVM print: 'Second process after lock'. process2lock release. lock release. ] parallelFork . SPyVM print: 'Processes initialized.'. lock release. process1 wait. process2 wait.! ! ----End fileIn of a stream----! ----QUIT----{26 June 2014 . 2:53:18 pm} Squeak4.5-12568.image priorSource: 114110! ----STARTUP----{26 June 2014 . 3:02:03 pm} as D:\code\python\spy-vm\lang-smalltalk\images\Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'pre 6/26/2014 15:02'! osLockEasyTest | lock | lock := OSLock new. lock lock. lock release. SPyVM print: 'Survived lock.'! ! ----QUIT----{26 June 2014 . 3:02:52 pm} Squeak4.5-12568.image priorSource: 115096! ----STARTUP----{26 June 2014 . 3:03:40 pm} as D:\code\python\spy-vm\lang-smalltalk\images\Squeak4.5-12568.image! !OSLock methodsFor: 'as yet unclassified' stamp: 'pre 6/26/2014 15:04' prior: 33667310! lock SPyVM print: '* STM Process did not fork *' , Character cr. self primitiveFailed. self resume! ! !OSLock methodsFor: 'as yet unclassified' stamp: 'pre 6/26/2014 15:04' prior: 33667409! release SPyVM print: '* STM Process did not fork *' , Character cr. self primitiveFailed. self resume! ! !OSLock methodsFor: 'as yet unclassified' stamp: 'pre 6/26/2014 15:04' prior: 33670408! release SPyVM print: '* OS Lock could not be released *' , Character cr. self primitiveFailed. self resume! ! !OSLock methodsFor: 'as yet unclassified' stamp: 'pre 6/26/2014 15:04' prior: 33670196! lock SPyVM print: '* OS Lock could not be locked *' , Character cr. self primitiveFailed. self resume! ! ----QUIT----{26 June 2014 . 3:04:45 pm} Squeak4.5-12568.image priorSource: 115476! ----STARTUP----{26 June 2014 . 3:08:07 pm} as D:\code\python\spy-vm\lang-smalltalk\images\Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'pre 6/26/2014 15:08' prior: 33669797! osLockEasyTest | lock | lock := OSLock new. lock lock. SPyVM print: 'Survived lock.'! ! ----QUIT----{26 June 2014 . 3:08:24 pm} Squeak4.5-12568.image priorSource: 116537! ----STARTUP----{26 June 2014 . 3:09:05 pm} as D:\code\python\spy-vm\lang-smalltalk\images\Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'pre 6/26/2014 15:09' prior: 33671254! osLockEasyTest | lock | lock := OSLock new. lock lock. lock release. SPyVM print: 'Survived lock.'! ! ----QUIT----{26 June 2014 . 3:09:17 pm} Squeak4.5-12568.image priorSource: 116916! ----STARTUP----{26 June 2014 . 3:57:38 pm} as D:\code\python\spy-vm\lang-smalltalk\images\Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'pre 6/26/2014 15:57' prior: 33671633! osLockEasyTest | lock | lock := OSLock new. lock lock. SPyVM print: 'Survived lock.' lock release. SPyVM print: 'Survived lock.'! ! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'pre 6/26/2014 15:58' prior: 33672027! osLockEasyTest | lock | lock := OSLock new. lock lock. SPyVM print: 'Survived lock.'. lock release. SPyVM print: 'Survived lock.'. ^ self! ! ----QUIT----{26 June 2014 . 3:58:58 pm} Squeak4.5-12568.image priorSource: 117310! ----STARTUP----{26 June 2014 . 3:59:21 pm} as D:\code\python\spy-vm\lang-smalltalk\images\Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'pre 6/26/2014 15:59' prior: 33672253! osLockEasyTest | lock | lock := OSLock new. lock lock. SPyVM print: 'Survived locking.'. lock release. SPyVM print: 'Survived releasing.'. ^ self! ! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'pre 6/26/2014 15:59' prior: 33672690! osLockEasyTest | lock1 | lock1 := OSLock new. lock1 lock. SPyVM print: 'Survived locking.'. lock1 release. SPyVM print: 'Survived releasing.'. ^ self! ! ----QUIT----{26 June 2014 . 4:00 pm} Squeak4.5-12568.image priorSource: 117973! ----STARTUP----{26 June 2014 . 4:07:56 pm} as D:\code\python\spy-vm\lang-smalltalk\images\Squeak4.5-12568.image! !OSLock methodsFor: 'as yet unclassified' stamp: 'pre 6/26/2014 16:09' prior: 33670843! lock self internalLock ! ! !OSLock methodsFor: 'as yet unclassified' stamp: 'pre 6/26/2014 16:09'! internalLock SPyVM print: '* OS Lock could not be locked *' , Character cr. self primitiveFailed. self resume! ! !OSLock methodsFor: 'as yet unclassified' stamp: 'pre 6/26/2014 16:09'! internalRelease SPyVM print: '* OS Lock could not be released *' , Character cr. self primitiveFailed. self resume! ! !OSLock methodsFor: 'as yet unclassified' stamp: 'pre 6/26/2014 16:09' prior: 33670623! release self internalRelease! ! ----QUIT----{26 June 2014 . 4:09:34 pm} Squeak4.5-12568.image priorSource: 118668! \ No newline at end of file diff --git a/images/Squeak4.5-12568.image b/images/Squeak4.5-12568.image index 0d24c0b0506552d59e3dd4db7be72f7e565a8d7c..6628e9cf95b4860c2c865166b07278b10aec9b80 GIT binary patch [cut] diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -1335,6 +1335,10 @@ STM_ATOMIC_ENTER = 1302 # 790 STM_ATOMIC_LEAVE = 1303 # 791 +# OS Lock Primitives +OS_LOCK_LOCK = 1304 # 792 +OS_LOCK_RELEASE = 1305 # 793 + @expose_primitive(BLOCK_COPY, unwrap_spec=[object, int]) def func(interp, s_frame, w_context, argcnt): @@ -1542,6 +1546,26 @@ rstm.decrement_atomic() + at expose_primitive(OS_LOCK_LOCK, unwrap_spec=[object], no_result=True) +def func(interp, s_frame, w_rcvr): + from rpython.rlib import rthread + + if not isinstance(w_rcvr, model.W_PointersObject): + raise PrimitiveFailedError("OS_LOCK_LOCK primitive was not called on an OSLock Object") + + lock_shadow = w_rcvr.as_special_get_shadow(interp.space, shadow.OSLockShadow) + lock_shadow.os_lock() + + at expose_primitive(OS_LOCK_RELEASE, unwrap_spec=[object], no_result=True) +def func(interp, s_frame, w_rcvr): + from rpython.rlib import rthread + + if not isinstance(w_rcvr, model.W_PointersObject): + raise PrimitiveFailedError("OS_LOCK_LOCK primitive was not called on an OSLock Object") + + lock_shadow = w_rcvr.as_special_get_shadow(interp.space, shadow.OSLockShadow) + lock_shadow.os_release() + # ___________________________________________________________________________ # BlockClosure Primitives diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -1142,4 +1142,17 @@ return lock_result def signal(self): - self.lock.release() \ No newline at end of file + self.lock.release() + +class OSLockShadow(AbstractShadow): + + def __init__(self, space, w_self): + AbstractShadow.__init__(self, space, w_self) + self.intern_lock = rthread.allocate_lock() + + def os_lock(self): + self.intern_lock.acquire(True) + + def os_release(self): + self.intern_lock.release() + From noreply at buildbot.pypy.org Thu Jul 3 16:32:24 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Thu, 3 Jul 2014 16:32:24 +0200 (CEST) Subject: [pypy-commit] pypy scalar-operations: avoid converting scalars to arrays when calling unary ufuncs Message-ID: <20140703143224.AB2851D2D3D@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: scalar-operations Changeset: r72326:e32c6b0eff6c Date: 2014-07-03 15:31 +0100 http://bitbucket.org/pypy/pypy/changeset/e32c6b0eff6c/ Log: avoid converting scalars to arrays when calling unary ufuncs diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -312,8 +312,8 @@ out = args_w[1] if space.is_w(out, space.w_None): out = None - w_obj = convert_to_array(space, w_obj) - dtype = w_obj.get_dtype() + w_obj = numpify(space, w_obj) + dtype = _get_dtype(space, w_obj) if dtype.is_flexible(): raise OperationError(space.w_TypeError, space.wrap('Not implemented for this type')) @@ -323,7 +323,7 @@ raise oefmt(space.w_TypeError, "ufunc %s not supported for the input type", self.name) calc_dtype = find_unaryop_result_dtype(space, - w_obj.get_dtype(), + dtype, promote_to_float=self.promote_to_float, promote_bools=self.promote_bools) if out is not None: @@ -353,6 +353,7 @@ else: out.fill(space, w_val) return out + assert isinstance(w_obj, W_NDimArray) shape = shape_agreement(space, w_obj.get_shape(), out, broadcast_down=False) return loop.call1(space, shape, self.func, calc_dtype, res_dtype, From noreply at buildbot.pypy.org Thu Jul 3 16:42:02 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Thu, 3 Jul 2014 16:42:02 +0200 (CEST) Subject: [pypy-commit] pypy scalar-operations: hg merge default Message-ID: <20140703144202.AF14E1D2D3D@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: scalar-operations Changeset: r72327:58201d7159b3 Date: 2014-07-03 15:41 +0100 http://bitbucket.org/pypy/pypy/changeset/58201d7159b3/ Log: hg merge default diff too long, truncating to 2000 out of 2386 lines diff --git a/lib-python/2.7/ctypes/__init__.py b/lib-python/2.7/ctypes/__init__.py --- a/lib-python/2.7/ctypes/__init__.py +++ b/lib-python/2.7/ctypes/__init__.py @@ -389,12 +389,13 @@ func.__name__ = name_or_ordinal return func -class PyDLL(CDLL): - """This class represents the Python library itself. It allows to - access Python API functions. The GIL is not released, and - Python exceptions are handled correctly. - """ - _func_flags_ = _FUNCFLAG_CDECL | _FUNCFLAG_PYTHONAPI +# Not in PyPy +#class PyDLL(CDLL): +# """This class represents the Python library itself. It allows to +# access Python API functions. The GIL is not released, and +# Python exceptions are handled correctly. +# """ +# _func_flags_ = _FUNCFLAG_CDECL | _FUNCFLAG_PYTHONAPI if _os.name in ("nt", "ce"): @@ -447,15 +448,8 @@ return self._dlltype(name) cdll = LibraryLoader(CDLL) -pydll = LibraryLoader(PyDLL) - -if _os.name in ("nt", "ce"): - pythonapi = PyDLL("python dll", None, _sys.dllhandle) -elif _sys.platform == "cygwin": - pythonapi = PyDLL("libpython%d.%d.dll" % _sys.version_info[:2]) -else: - pythonapi = PyDLL(None) - +# not on PyPy +#pydll = LibraryLoader(PyDLL) if _os.name in ("nt", "ce"): windll = LibraryLoader(WinDLL) diff --git a/lib-python/2.7/ctypes/test/test_values.py b/lib-python/2.7/ctypes/test/test_values.py --- a/lib-python/2.7/ctypes/test/test_values.py +++ b/lib-python/2.7/ctypes/test/test_values.py @@ -4,6 +4,7 @@ import unittest from ctypes import * +from ctypes.test import xfail import _ctypes_test @@ -23,7 +24,8 @@ class Win_ValuesTestCase(unittest.TestCase): """This test only works when python itself is a dll/shared library""" - + + @xfail def test_optimizeflag(self): # This test accesses the Py_OptimizeFlag intger, which is # exported by the Python dll. @@ -40,6 +42,7 @@ else: self.assertEqual(opt, 2) + @xfail def test_frozentable(self): # Python exports a PyImport_FrozenModules symbol. This is a # pointer to an array of struct _frozen entries. The end of the @@ -75,6 +78,7 @@ from ctypes import _pointer_type_cache del _pointer_type_cache[struct_frozen] + @xfail def test_undefined(self): self.assertRaises(ValueError, c_int.in_dll, pydll, "Undefined_Symbol") diff --git a/lib-python/2.7/timeit.py b/lib-python/2.7/timeit.py --- a/lib-python/2.7/timeit.py +++ b/lib-python/2.7/timeit.py @@ -55,11 +55,6 @@ import gc import sys import time -try: - import itertools -except ImportError: - # Must be an older Python version (see timeit() below) - itertools = None __all__ = ["Timer"] @@ -81,7 +76,8 @@ def inner(_it, _timer): %(setup)s _t0 = _timer() - for _i in _it: + while _it > 0: + _it -= 1 %(stmt)s _t1 = _timer() return _t1 - _t0 @@ -96,7 +92,8 @@ def inner(_it, _timer, _func=func): setup() _t0 = _timer() - for _i in _it: + while _it > 0: + _it -= 1 _func() _t1 = _timer() return _t1 - _t0 @@ -133,9 +130,11 @@ else: raise ValueError("setup is neither a string nor callable") self.src = src # Save for traceback display - code = compile(src, dummy_src_name, "exec") - exec code in globals(), ns - self.inner = ns["inner"] + def make_inner(): + code = compile(src, dummy_src_name, "exec") + exec code in globals(), ns + return ns["inner"] + self.make_inner = make_inner elif hasattr(stmt, '__call__'): self.src = None if isinstance(setup, basestring): @@ -144,7 +143,8 @@ exec _setup in globals(), ns elif not hasattr(setup, '__call__'): raise ValueError("setup is neither a string nor callable") - self.inner = _template_func(setup, stmt) + inner = _template_func(setup, stmt) + self.make_inner = lambda: inner else: raise ValueError("stmt is neither a string nor callable") @@ -185,15 +185,12 @@ to one million. The main statement, the setup statement and the timer function to be used are passed to the constructor. """ - if itertools: - it = itertools.repeat(None, number) - else: - it = [None] * number + inner = self.make_inner() gcold = gc.isenabled() if '__pypy__' not in sys.builtin_module_names: gc.disable() # only do that on CPython try: - timing = self.inner(it, self.timer) + timing = inner(number, self.timer) finally: if gcold: gc.enable() diff --git a/pypy/doc/ctypes-implementation.rst b/pypy/doc/ctypes-implementation.rst --- a/pypy/doc/ctypes-implementation.rst +++ b/pypy/doc/ctypes-implementation.rst @@ -72,13 +72,11 @@ Here is a list of the limitations and missing features of the current implementation: -* ``ctypes.pythonapi`` lets you access the CPython C API emulation layer - of PyPy, at your own risks and without doing anything sensible about - the GIL. Since PyPy 2.3, these functions are also named with an extra - "Py", for example ``PyPyInt_FromLong()``. Basically, don't use this, - but it might more or less work in simple cases if you do. (Obviously, - assuming the PyObject pointers you get have any particular fields in - any particular order is just going to crash.) +* ``ctypes.pythonapi`` is missing. In previous versions, it was present + and redirected to the `cpyext` C API emulation layer, but our + implementation did not do anything sensible about the GIL and the + functions were named with an extra "Py", for example + ``PyPyInt_FromLong()``. It was removed for being unhelpful. * We copy Python strings instead of having pointers to raw buffers diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -465,9 +465,13 @@ This is documented (here__ and here__). It needs 4 GB of RAM to run "rpython targetpypystandalone" on top of PyPy, a bit more when running -on CPython. If you have less than 4 GB it will just swap forever (or -fail if you don't have enough swap). On 32-bit, divide the numbers by -two. +on top of CPython. If you have less than 4 GB free, it will just swap +forever (or fail if you don't have enough swap). And we mean *free:* +if the machine has 4 GB *in total,* then it will swap. + +On 32-bit, divide the numbers by two. (We didn't try recently, but in +the past it was possible to compile a 32-bit version on a 2 GB Linux +machine with nothing else running: no Gnome/KDE, for example.) .. __: http://pypy.org/download.html#building-from-source .. __: https://pypy.readthedocs.org/en/latest/getting-started-python.html#translating-the-pypy-python-interpreter diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -92,9 +92,9 @@ We're busy fixing them as we find them; feel free to `report bugs`_. * It runs with an overhead as low as 20% on examples like "richards". - There are also other examples with higher overheads --up to 10x for - "translate.py"-- which we are still trying to understand. One suspect - is our partial GC implementation, see below. + There are also other examples with higher overheads --currently up to + 2x for "translate.py"-- which we are still trying to understand. + One suspect is our partial GC implementation, see below. * Currently limited to 1.5 GB of RAM (this is just a parameter in `core.h`__). Memory overflows are not correctly handled; they cause @@ -111,9 +111,8 @@ * The GC is new; although clearly inspired by PyPy's regular GC, it misses a number of optimizations for now. Programs allocating large - numbers of small objects that don't immediately die, as well as - programs that modify large lists or dicts, suffer from these missing - optimizations. + numbers of small objects that don't immediately die (surely a common + situation) suffer from these missing optimizations. * The GC has no support for destructors: the ``__del__`` method is never called (including on file objects, which won't be closed for you). diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -43,3 +43,7 @@ .. branch: jit-get-errno Optimize the errno handling in the JIT, notably around external function calls. Linux-only. + +.. branch: disable_pythonapi +Remove non-functioning ctypes.pyhonapi and ctypes.PyDLL, document this +incompatibility with cpython. Recast sys.dllhandle to an int. diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -967,6 +967,13 @@ """ return self.unpackiterable(w_iterable, expected_length) + def listview_no_unpack(self, w_iterable): + """ Same as listview() if cheap. If 'w_iterable' is something like + a generator, for example, then return None instead. + May return None anyway. + """ + return None + def listview_bytes(self, w_list): """ Return a list of unwrapped strings out of a list of strings. If the argument is not a list or does not contain only strings, return None. diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -496,6 +496,13 @@ """ +class UserDelCallback(object): + def __init__(self, w_obj, callback, descrname): + self.w_obj = w_obj + self.callback = callback + self.descrname = descrname + self.next = None + class UserDelAction(AsyncAction): """An action that invokes all pending app-level __del__() method. This is done as an action instead of immediately when the @@ -506,12 +513,18 @@ def __init__(self, space): AsyncAction.__init__(self, space) - self.dying_objects = [] + self.dying_objects = None + self.dying_objects_last = None self.finalizers_lock_count = 0 self.enabled_at_app_level = True def register_callback(self, w_obj, callback, descrname): - self.dying_objects.append((w_obj, callback, descrname)) + cb = UserDelCallback(w_obj, callback, descrname) + if self.dying_objects_last is None: + self.dying_objects = cb + else: + self.dying_objects_last.next = cb + self.dying_objects_last = cb self.fire() def perform(self, executioncontext, frame): @@ -525,13 +538,33 @@ # avoid too deep recursions of the kind of __del__ being called # while in the middle of another __del__ call. pending = self.dying_objects - self.dying_objects = [] + self.dying_objects = None + self.dying_objects_last = None space = self.space - for i in range(len(pending)): - w_obj, callback, descrname = pending[i] - pending[i] = (None, None, None) + while pending is not None: try: - callback(w_obj) + pending.callback(pending.w_obj) except OperationError, e: - e.write_unraisable(space, descrname, w_obj) + e.write_unraisable(space, pending.descrname, pending.w_obj) e.clear(space) # break up reference cycles + pending = pending.next + # + # Note: 'dying_objects' used to be just a regular list instead + # of a chained list. This was the cause of "leaks" if we have a + # program that constantly creates new objects with finalizers. + # Here is why: say 'dying_objects' is a long list, and there + # are n instances in it. Then we spend some time in this + # function, possibly triggering more GCs, but keeping the list + # of length n alive. Then the list is suddenly freed at the + # end, and we return to the user program. At this point the + # GC limit is still very high, because just before, there was + # a list of length n alive. Assume that the program continues + # to allocate a lot of instances with finalizers. The high GC + # limit means that it could allocate a lot of instances before + # reaching it --- possibly more than n. So the whole procedure + # repeats with higher and higher values of n. + # + # This does not occur in the current implementation because + # there is no list of length n: if n is large, then the GC + # will run several times while walking the list, but it will + # see lower and lower memory usage, with no lower bound of n. diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -61,6 +61,13 @@ return self.send_ex(w_arg) def send_ex(self, w_arg, operr=None): + pycode = self.pycode + if jit.we_are_jitted() and should_not_inline(pycode): + generatorentry_driver.jit_merge_point(gen=self, w_arg=w_arg, + operr=operr, pycode=pycode) + return self._send_ex(w_arg, operr) + + def _send_ex(self, w_arg, operr): space = self.space if self.running: raise OperationError(space.w_ValueError, @@ -72,8 +79,7 @@ if operr is None: operr = OperationError(space.w_StopIteration, space.w_None) raise operr - # XXX it's not clear that last_instr should be promoted at all - # but as long as it is necessary for call_assembler, let's do it early + last_instr = jit.promote(frame.last_instr) if last_instr == -1: if w_arg and not space.is_w(w_arg, space.w_None): @@ -214,3 +220,38 @@ "interrupting generator of ") break block = block.previous + + + +def get_printable_location_genentry(bytecode): + return '%s ' % (bytecode.get_repr(),) +generatorentry_driver = jit.JitDriver(greens=['pycode'], + reds=['gen', 'w_arg', 'operr'], + get_printable_location = + get_printable_location_genentry, + name='generatorentry') + +from pypy.tool.stdlib_opcode import HAVE_ARGUMENT, opmap +YIELD_VALUE = opmap['YIELD_VALUE'] + + at jit.elidable_promote() +def should_not_inline(pycode): + # Should not inline generators with more than one "yield", + # as an approximative fix (see issue #1782). There are cases + # where it slows things down; for example calls to a simple + # generator that just produces a few simple values with a few + # consecutive "yield" statements. It fixes the near-infinite + # slow-down in issue #1782, though... + count_yields = 0 + code = pycode.co_code + n = len(code) + i = 0 + while i < n: + c = code[i] + op = ord(c) + if op == YIELD_VALUE: + count_yields += 1 + i += 1 + if op >= HAVE_ARGUMENT: + i += 2 + return count_yields >= 2 diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/test_generator.py --- a/pypy/interpreter/test/test_generator.py +++ b/pypy/interpreter/test/test_generator.py @@ -278,4 +278,21 @@ def f(): yield 1 raise StopIteration - assert tuple(f()) == (1,) \ No newline at end of file + assert tuple(f()) == (1,) + + +def test_should_not_inline(space): + from pypy.interpreter.generator import should_not_inline + w_co = space.appexec([], '''(): + def g(x): + yield x + 5 + return g.func_code + ''') + assert should_not_inline(w_co) == False + w_co = space.appexec([], '''(): + def g(x): + yield x + 5 + yield x + 6 + return g.func_code + ''') + assert should_not_inline(w_co) == True diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py --- a/pypy/module/_cffi_backend/ctypefunc.py +++ b/pypy/module/_cffi_backend/ctypefunc.py @@ -4,7 +4,7 @@ import sys -from rpython.rlib import jit, clibffi, jit_libffi +from rpython.rlib import jit, clibffi, jit_libffi, rgc from rpython.rlib.jit_libffi import (CIF_DESCRIPTION, CIF_DESCRIPTION_P, FFI_TYPE, FFI_TYPE_P, FFI_TYPE_PP, SIZE_OF_FFI_ARG) from rpython.rlib.objectmodel import we_are_translated, instantiate @@ -63,6 +63,7 @@ CifDescrBuilder(fvarargs, self.ctitem).rawallocate(ctypefunc) return ctypefunc + @rgc.must_be_light_finalizer def __del__(self): if self.cif_descr: lltype.free(self.cif_descr, flavor='raw') diff --git a/pypy/module/_io/interp_textio.py b/pypy/module/_io/interp_textio.py --- a/pypy/module/_io/interp_textio.py +++ b/pypy/module/_io/interp_textio.py @@ -458,6 +458,10 @@ self._check_init(space) return space.call_method(self.w_buffer, "seekable") + def isatty_w(self, space): + self._check_init(space) + return space.call_method(self.w_buffer, "isatty") + def fileno_w(self, space): self._check_init(space) return space.call_method(self.w_buffer, "fileno") @@ -1035,6 +1039,7 @@ readable = interp2app(W_TextIOWrapper.readable_w), writable = interp2app(W_TextIOWrapper.writable_w), seekable = interp2app(W_TextIOWrapper.seekable_w), + isatty = interp2app(W_TextIOWrapper.isatty_w), fileno = interp2app(W_TextIOWrapper.fileno_w), name = GetSetProperty(W_TextIOWrapper.name_get_w), buffer = interp_attrproperty_w("w_buffer", cls=W_TextIOWrapper), diff --git a/pypy/module/_io/test/test_textio.py b/pypy/module/_io/test/test_textio.py --- a/pypy/module/_io/test/test_textio.py +++ b/pypy/module/_io/test/test_textio.py @@ -25,6 +25,12 @@ t = _io.TextIOWrapper(b) assert t.readable() assert t.seekable() + # + class CustomFile(object): + def isatty(self): return 'YES' + readable = writable = seekable = lambda self: False + t = _io.TextIOWrapper(CustomFile()) + assert t.isatty() == 'YES' def test_default_implementations(self): import _io diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py --- a/pypy/module/_lsprof/interp_lsprof.py +++ b/pypy/module/_lsprof/interp_lsprof.py @@ -11,7 +11,7 @@ from rpython.rlib.rtimer import read_timestamp, _is_64_bit from rpython.rtyper.lltypesystem import rffi, lltype from rpython.translator.tool.cbuild import ExternalCompilationInfo -from rpython.conftest import cdir +from rpython.translator import cdir from rpython.rlib.rarithmetic import r_longlong import time, sys diff --git a/pypy/module/_socket/__init__.py b/pypy/module/_socket/__init__.py --- a/pypy/module/_socket/__init__.py +++ b/pypy/module/_socket/__init__.py @@ -6,8 +6,8 @@ } interpleveldefs = { - 'SocketType': 'interp_socket.W_RSocket', - 'socket' : 'interp_socket.W_RSocket', + 'SocketType': 'interp_socket.W_Socket', + 'socket' : 'interp_socket.W_Socket', 'error' : 'interp_socket.get_error(space, "error")', 'herror' : 'interp_socket.get_error(space, "herror")', 'gaierror' : 'interp_socket.get_error(space, "gaierror")', diff --git a/pypy/module/_socket/interp_func.py b/pypy/module/_socket/interp_func.py --- a/pypy/module/_socket/interp_func.py +++ b/pypy/module/_socket/interp_func.py @@ -1,8 +1,12 @@ -from pypy.interpreter.gateway import unwrap_spec, WrappedDefault -from pypy.module._socket.interp_socket import converted_error, W_RSocket, addr_as_object, ipaddr_from_object from rpython.rlib import rsocket from rpython.rlib.rsocket import SocketError, INVALID_SOCKET + from pypy.interpreter.error import OperationError +from pypy.interpreter.gateway import unwrap_spec, WrappedDefault +from pypy.module._socket.interp_socket import ( + converted_error, W_Socket, addr_as_object, ipaddr_from_object +) + def gethostname(space): """gethostname() -> string @@ -136,10 +140,10 @@ The remaining arguments are the same as for socket(). """ try: - sock = rsocket.fromfd(fd, family, type, proto, W_RSocket) + sock = rsocket.fromfd(fd, family, type, proto) except SocketError, e: raise converted_error(space, e) - return space.wrap(sock) + return space.wrap(W_Socket(sock)) @unwrap_spec(family=int, type=int, proto=int) def socketpair(space, family=rsocket.socketpair_default_family, @@ -153,10 +157,13 @@ AF_UNIX if defined on the platform; otherwise, the default is AF_INET. """ try: - sock1, sock2 = rsocket.socketpair(family, type, proto, W_RSocket) + sock1, sock2 = rsocket.socketpair(family, type, proto) except SocketError, e: raise converted_error(space, e) - return space.newtuple([space.wrap(sock1), space.wrap(sock2)]) + return space.newtuple([ + space.wrap(W_Socket(sock1)), + space.wrap(W_Socket(sock2)) + ]) # The following 4 functions refuse all negative numbers, like CPython 2.6. # They could also check that the argument is not too large, but CPython 2.6 diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -1,14 +1,18 @@ +from rpython.rlib import rsocket +from rpython.rlib.rarithmetic import intmask +from rpython.rlib.rsocket import ( + RSocket, AF_INET, SOCK_STREAM, SocketError, SocketErrorWithErrno, + RSocketError +) +from rpython.rtyper.lltypesystem import lltype, rffi + +from pypy.interpreter import gateway from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.typedef import TypeDef, make_weakref_descr,\ - interp_attrproperty +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault -from rpython.rlib.rarithmetic import intmask -from rpython.rtyper.lltypesystem import lltype, rffi -from rpython.rlib import rsocket -from rpython.rlib.rsocket import RSocket, AF_INET, SOCK_STREAM -from rpython.rlib.rsocket import SocketError, SocketErrorWithErrno, RSocketError -from pypy.interpreter.error import OperationError, oefmt -from pypy.interpreter import gateway +from pypy.interpreter.typedef import ( + GetSetProperty, TypeDef, make_weakref_descr +) # XXX Hack to seperate rpython and pypy @@ -124,10 +128,18 @@ return addr -class W_RSocket(W_Root, RSocket): - def __del__(self): - self.clear_all_weakrefs() - RSocket.__del__(self) +class W_Socket(W_Root): + def __init__(self, sock): + self.sock = sock + + def get_type_w(self, space): + return space.wrap(self.sock.type) + + def get_proto_w(self, space): + return space.wrap(self.sock.proto) + + def get_family_w(self, space): + return space.wrap(self.sock.family) def accept_w(self, space): """accept() -> (socket object, address info) @@ -137,22 +149,22 @@ info is a pair (hostaddr, port). """ try: - fd, addr = self.accept() + fd, addr = self.sock.accept() sock = rsocket.make_socket( - fd, self.family, self.type, self.proto, W_RSocket) - return space.newtuple([space.wrap(sock), + fd, self.sock.family, self.sock.type, self.sock.proto) + return space.newtuple([space.wrap(W_Socket(sock)), addr_as_object(addr, sock.fd, space)]) - except SocketError, e: + except SocketError as e: raise converted_error(space, e) # convert an Address into an app-level object def addr_as_object(self, space, address): - return addr_as_object(address, self.fd, space) + return addr_as_object(address, self.sock.fd, space) # convert an app-level object into an Address # based on the current socket's family def addr_from_object(self, space, w_address): - return addr_from_object(self.family, space, w_address) + return addr_from_object(self.sock.family, space, w_address) def bind_w(self, space, w_addr): """bind(address) @@ -162,8 +174,8 @@ sockets the address is a tuple (ifname, proto [,pkttype [,hatype]]) """ try: - self.bind(self.addr_from_object(space, w_addr)) - except SocketError, e: + self.sock.bind(self.addr_from_object(space, w_addr)) + except SocketError as e: raise converted_error(space, e) def close_w(self, space): @@ -172,7 +184,7 @@ Close the socket. It cannot be used after this call. """ try: - self.close() + self.sock.close() except SocketError: # cpython doesn't return any errors on close pass @@ -184,8 +196,8 @@ is a pair (host, port). """ try: - self.connect(self.addr_from_object(space, w_addr)) - except SocketError, e: + self.sock.connect(self.addr_from_object(space, w_addr)) + except SocketError as e: raise converted_error(space, e) def connect_ex_w(self, space, w_addr): @@ -196,15 +208,16 @@ """ try: addr = self.addr_from_object(space, w_addr) - except SocketError, e: + except SocketError as e: raise converted_error(space, e) - error = self.connect_ex(addr) + error = self.sock.connect_ex(addr) return space.wrap(error) def dup_w(self, space): try: - return self.dup(W_RSocket) - except SocketError, e: + sock = self.sock.dup() + return W_Socket(sock) + except SocketError as e: raise converted_error(space, e) def fileno_w(self, space): @@ -212,7 +225,7 @@ Return the integer file descriptor of the socket. """ - return space.wrap(intmask(self.fd)) + return space.wrap(intmask(self.sock.fd)) def getpeername_w(self, space): """getpeername() -> address info @@ -221,9 +234,9 @@ info is a pair (hostaddr, port). """ try: - addr = self.getpeername() - return addr_as_object(addr, self.fd, space) - except SocketError, e: + addr = self.sock.getpeername() + return addr_as_object(addr, self.sock.fd, space) + except SocketError as e: raise converted_error(space, e) def getsockname_w(self, space): @@ -233,9 +246,9 @@ info is a pair (hostaddr, port). """ try: - addr = self.getsockname() - return addr_as_object(addr, self.fd, space) - except SocketError, e: + addr = self.sock.getsockname() + return addr_as_object(addr, self.sock.fd, space) + except SocketError as e: raise converted_error(space, e) @unwrap_spec(level=int, optname=int) @@ -248,11 +261,11 @@ """ if w_buflen is None: try: - return space.wrap(self.getsockopt_int(level, optname)) - except SocketError, e: + return space.wrap(self.sock.getsockopt_int(level, optname)) + except SocketError as e: raise converted_error(space, e) buflen = space.int_w(w_buflen) - return space.wrap(self.getsockopt(level, optname, buflen)) + return space.wrap(self.sock.getsockopt(level, optname, buflen)) def gettimeout_w(self, space): """gettimeout() -> timeout @@ -260,7 +273,7 @@ Returns the timeout in floating seconds associated with socket operations. A timeout of None indicates that timeouts on socket """ - timeout = self.gettimeout() + timeout = self.sock.gettimeout() if timeout < 0.0: return space.w_None return space.wrap(timeout) @@ -274,8 +287,8 @@ will allow before refusing new connections. """ try: - self.listen(backlog) - except SocketError, e: + self.sock.listen(backlog) + except SocketError as e: raise converted_error(space, e) @unwrap_spec(w_mode = WrappedDefault("r"), @@ -298,8 +311,8 @@ the remote end is closed and all data is read, return the empty string. """ try: - data = self.recv(buffersize, flags) - except SocketError, e: + data = self.sock.recv(buffersize, flags) + except SocketError as e: raise converted_error(space, e) return space.wrap(data) @@ -310,13 +323,13 @@ Like recv(buffersize, flags) but also return the sender's address info. """ try: - data, addr = self.recvfrom(buffersize, flags) + data, addr = self.sock.recvfrom(buffersize, flags) if addr: - w_addr = addr_as_object(addr, self.fd, space) + w_addr = addr_as_object(addr, self.sock.fd, space) else: w_addr = space.w_None return space.newtuple([space.wrap(data), w_addr]) - except SocketError, e: + except SocketError as e: raise converted_error(space, e) @unwrap_spec(data='bufferstr', flags=int) @@ -328,8 +341,8 @@ sent; this may be less than len(data) if the network is busy. """ try: - count = self.send(data, flags) - except SocketError, e: + count = self.sock.send(data, flags) + except SocketError as e: raise converted_error(space, e) return space.wrap(count) @@ -343,8 +356,9 @@ to tell how much data has been sent. """ try: - self.sendall(data, flags, space.getexecutioncontext().checksignals) - except SocketError, e: + self.sock.sendall( + data, flags, space.getexecutioncontext().checksignals) + except SocketError as e: raise converted_error(space, e) @unwrap_spec(data='bufferstr') @@ -364,8 +378,8 @@ w_addr = w_param3 try: addr = self.addr_from_object(space, w_addr) - count = self.sendto(data, flags, addr) - except SocketError, e: + count = self.sock.sendto(data, flags, addr) + except SocketError as e: raise converted_error(space, e) return space.wrap(count) @@ -377,7 +391,7 @@ setblocking(True) is equivalent to settimeout(None); setblocking(False) is equivalent to settimeout(0.0). """ - self.setblocking(flag) + self.sock.setblocking(flag) @unwrap_spec(level=int, optname=int) def setsockopt_w(self, space, level, optname, w_optval): @@ -391,13 +405,13 @@ except: optval = space.str_w(w_optval) try: - self.setsockopt(level, optname, optval) - except SocketError, e: + self.sock.setsockopt(level, optname, optval) + except SocketError as e: raise converted_error(space, e) return try: - self.setsockopt_int(level, optname, optval) - except SocketError, e: + self.sock.setsockopt_int(level, optname, optval) + except SocketError as e: raise converted_error(space, e) def settimeout_w(self, space, w_timeout): @@ -415,7 +429,7 @@ if timeout < 0.0: raise OperationError(space.w_ValueError, space.wrap('Timeout value out of range')) - self.settimeout(timeout) + self.sock.settimeout(timeout) @unwrap_spec(nbytes=int, flags=int) def recv_into_w(self, space, w_buffer, nbytes=0, flags=0): @@ -424,8 +438,8 @@ if nbytes == 0 or nbytes > lgt: nbytes = lgt try: - return space.wrap(self.recvinto(rwbuffer, nbytes, flags)) - except SocketError, e: + return space.wrap(self.sock.recvinto(rwbuffer, nbytes, flags)) + except SocketError as e: raise converted_error(space, e) @unwrap_spec(nbytes=int, flags=int) @@ -435,13 +449,13 @@ if nbytes == 0 or nbytes > lgt: nbytes = lgt try: - readlgt, addr = self.recvfrom_into(rwbuffer, nbytes, flags) + readlgt, addr = self.sock.recvfrom_into(rwbuffer, nbytes, flags) if addr: - w_addr = addr_as_object(addr, self.fd, space) + w_addr = addr_as_object(addr, self.sock.fd, space) else: w_addr = space.w_None return space.newtuple([space.wrap(readlgt), w_addr]) - except SocketError, e: + except SocketError as e: raise converted_error(space, e) @unwrap_spec(cmd=int) @@ -473,7 +487,7 @@ option_ptr.c_keepaliveinterval = space.uint_w(w_interval) res = _c.WSAIoctl( - self.fd, cmd, value_ptr, value_size, + self.sock.fd, cmd, value_ptr, value_size, rffi.NULL, 0, recv_ptr, rffi.NULL, rffi.NULL) if res < 0: raise converted_error(space, rsocket.last_error()) @@ -494,8 +508,8 @@ (flag == SHUT_RDWR). """ try: - self.shutdown(how) - except SocketError, e: + self.sock.shutdown(how) + except SocketError as e: raise converted_error(space, e) #------------------------------------------------------------ @@ -536,12 +550,13 @@ @unwrap_spec(family=int, type=int, proto=int) def newsocket(space, w_subtype, family=AF_INET, type=SOCK_STREAM, proto=0): - sock = space.allocate_instance(W_RSocket, w_subtype) + self = space.allocate_instance(W_Socket, w_subtype) try: - W_RSocket.__init__(sock, family, type, proto) - except SocketError, e: + sock = RSocket(family, type, proto) + except SocketError as e: raise converted_error(space, e) - return space.wrap(sock) + W_Socket.__init__(self, sock) + return space.wrap(self) descr_socket_new = interp2app(newsocket) # ____________________________________________________________ @@ -597,10 +612,10 @@ socketmethods = {} for methodname in socketmethodnames: - method = getattr(W_RSocket, methodname + '_w') + method = getattr(W_Socket, methodname + '_w') socketmethods[methodname] = interp2app(method) -W_RSocket.typedef = TypeDef("_socket.socket", +W_Socket.typedef = TypeDef("_socket.socket", __doc__ = """\ socket([family[, type[, proto]]]) -> socket object @@ -639,9 +654,9 @@ [*] not available on all platforms!""", __new__ = descr_socket_new, - __weakref__ = make_weakref_descr(W_RSocket), - type = interp_attrproperty('type', W_RSocket), - proto = interp_attrproperty('proto', W_RSocket), - family = interp_attrproperty('family', W_RSocket), + __weakref__ = make_weakref_descr(W_Socket), + type = GetSetProperty(W_Socket.get_type_w), + proto = GetSetProperty(W_Socket.get_proto_w), + family = GetSetProperty(W_Socket.get_family_w), ** socketmethods ) diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -15,6 +15,7 @@ interp2app, interpindirect2app, unwrap_spec) from pypy.interpreter.typedef import ( GetSetProperty, TypeDef, make_weakref_descr) +from pypy.interpreter.generator import GeneratorIterator from pypy.module._file.interp_file import W_File from pypy.objspace.std.floatobject import W_FloatObject @@ -630,6 +631,10 @@ def make_array(mytype): W_ArrayBase = globals()['W_ArrayBase'] + unpack_driver = jit.JitDriver(name='unpack_array', + greens=['tp'], + reds=['self', 'w_iterator']) + class W_Array(W_ArrayBase): itemsize = mytype.bytes typecode = mytype.typecode @@ -674,6 +679,10 @@ return rffi.cast(mytype.itemtype, item) # # "regular" case: it fits in an rpython integer (lltype.Signed) + # or it is a float + return self.item_from_int_or_float(item) + + def item_from_int_or_float(self, item): result = rffi.cast(mytype.itemtype, item) if mytype.canoverflow: if rffi.cast(lltype.Signed, result) != item: @@ -686,8 +695,8 @@ % mytype.bytes) if not mytype.signed: msg = 'un' + msg # 'signed' => 'unsigned' - raise OperationError(space.w_OverflowError, - space.wrap(msg)) + raise OperationError(self.space.w_OverflowError, + self.space.wrap(msg)) return result def __del__(self): @@ -734,27 +743,65 @@ def fromsequence(self, w_seq): space = self.space oldlen = self.len - try: - new = space.len_w(w_seq) - self.setlen(self.len + new) - except OperationError: - pass + newlen = oldlen - i = 0 - try: - if mytype.typecode == 'u': - myiter = space.unpackiterable - else: - myiter = space.listview - for w_i in myiter(w_seq): - if oldlen + i >= self.len: - self.setlen(oldlen + i + 1) - self.buffer[oldlen + i] = self.item_w(w_i) - i += 1 - except OperationError: - self.setlen(oldlen + i) - raise - self.setlen(oldlen + i) + # optimized case for arrays of integers or floats + if mytype.unwrap == 'int_w': + lst = space.listview_int(w_seq) + elif mytype.unwrap == 'float_w': + lst = space.listview_float(w_seq) + else: + lst = None + if lst is not None: + self.setlen(oldlen + len(lst)) + try: + buf = self.buffer + for num in lst: + buf[newlen] = self.item_from_int_or_float(num) + newlen += 1 + except OperationError: + self.setlen(newlen) + raise + return + + # this is the common case: w_seq is a list or a tuple + lst_w = space.listview_no_unpack(w_seq) + if lst_w is not None: + self.setlen(oldlen + len(lst_w)) + buf = self.buffer + try: + for w_num in lst_w: + # note: self.item_w() might invoke arbitrary code. + # In case it resizes the same array, then strange + # things may happen, but as we don't reload 'buf' + # we know that one is big enough for all items + # (so at least we avoid crashes) + buf[newlen] = self.item_w(w_num) + newlen += 1 + except OperationError: + if buf == self.buffer: + self.setlen(newlen) + raise + return + + self._fromiterable(w_seq) + + def _fromiterable(self, w_seq): + # a more careful case if w_seq happens to be a very large + # iterable: don't copy the items into some intermediate list + w_iterator = self.space.iter(w_seq) + tp = self.space.type(w_iterator) + while True: + unpack_driver.jit_merge_point(tp=tp, self=self, + w_iterator=w_iterator) + space = self.space + try: + w_item = space.next(w_iterator) + except OperationError, e: + if not e.match(space, space.w_StopIteration): + raise + break # done + self.descr_append(space, w_item) def extend(self, w_iterable, accept_different_array=False): space = self.space @@ -797,8 +844,9 @@ def descr_append(self, space, w_x): x = self.item_w(w_x) - self.setlen(self.len + 1) - self.buffer[self.len - 1] = x + index = self.len + self.setlen(index + 1) + self.buffer[index] = x # List interface def descr_count(self, space, w_val): diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -10,7 +10,7 @@ from rpython.rtyper.lltypesystem import ll2ctypes from rpython.rtyper.annlowlevel import llhelper from rpython.rlib.objectmodel import we_are_translated -from rpython.conftest import cdir +from rpython.translator import cdir from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.translator.gensupp import NameManager from rpython.tool.udir import udir diff --git a/pypy/module/cpyext/listobject.py b/pypy/module/cpyext/listobject.py --- a/pypy/module/cpyext/listobject.py +++ b/pypy/module/cpyext/listobject.py @@ -46,11 +46,11 @@ IndexError exception.""" if not isinstance(w_list, W_ListObject): PyErr_BadInternalCall(space) - wrappeditems = w_list.getitems() - if index < 0 or index >= len(wrappeditems): + if index < 0 or index >= w_list.length(): raise OperationError(space.w_IndexError, space.wrap( "list index out of range")) - return borrow_from(w_list, wrappeditems[index]) + w_item = w_list.getitem(index) + return borrow_from(w_list, w_item) @cpython_api([PyObject, PyObject], rffi.INT_real, error=-1) diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -205,12 +205,7 @@ import sys if sys.platform != "win32" or sys.version_info < (2, 6): skip("Windows Python >= 2.6 only") - assert sys.dllhandle - assert sys.dllhandle.getaddressindll('PyPyErr_NewException') - import ctypes # slow - PyUnicode_GetDefaultEncoding = ctypes.pythonapi.PyPyUnicode_GetDefaultEncoding - PyUnicode_GetDefaultEncoding.restype = ctypes.c_char_p - assert PyUnicode_GetDefaultEncoding() == 'ascii' + assert isinstance(sys.dllhandle, int) class AppTestCpythonExtensionBase(LeakCheckingTest): diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -12,7 +12,7 @@ 'scalar' : 'ctors.build_scalar', 'array': 'ctors.array', 'zeros': 'ctors.zeros', - 'empty': 'ctors.zeros', + 'empty': 'ctors.empty', 'empty_like': 'ctors.empty_like', 'fromstring': 'ctors.fromstring', 'frombuffer': 'ctors.frombuffer', diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -33,12 +33,12 @@ self.implementation = implementation @staticmethod - def from_shape(space, shape, dtype, order='C', w_instance=None): + def from_shape(space, shape, dtype, order='C', w_instance=None, zero=True): from pypy.module.micronumpy import concrete from pypy.module.micronumpy.strides import calc_strides strides, backstrides = calc_strides(shape, dtype.base, order) impl = concrete.ConcreteArray(shape, dtype.base, order, strides, - backstrides) + backstrides, zero=zero) if w_instance: return wrap_impl(space, space.type(w_instance), w_instance, impl) return W_NDimArray(impl) diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -369,9 +369,11 @@ class ConcreteArray(ConcreteArrayNotOwning): - def __init__(self, shape, dtype, order, strides, backstrides, storage=lltype.nullptr(RAW_STORAGE)): + def __init__(self, shape, dtype, order, strides, backstrides, + storage=lltype.nullptr(RAW_STORAGE), zero=True): if storage == lltype.nullptr(RAW_STORAGE): - storage = dtype.itemtype.malloc(support.product(shape) * dtype.elsize) + storage = dtype.itemtype.malloc(support.product(shape) * + dtype.elsize, zero=zero) ConcreteArrayNotOwning.__init__(self, shape, dtype, order, strides, backstrides, storage) diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -128,13 +128,19 @@ return w_arr -def zeros(space, w_shape, w_dtype=None, w_order=None): +def _zeros_or_empty(space, w_shape, w_dtype, w_order, zero): dtype = space.interp_w(descriptor.W_Dtype, space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype)) if dtype.is_str_or_unicode() and dtype.elsize < 1: dtype = descriptor.variable_dtype(space, dtype.char + '1') shape = shape_converter(space, w_shape, dtype) - return W_NDimArray.from_shape(space, shape, dtype=dtype) + return W_NDimArray.from_shape(space, shape, dtype=dtype, zero=zero) + +def empty(space, w_shape, w_dtype=None, w_order=None): + return _zeros_or_empty(space, w_shape, w_dtype, w_order, zero=False) + +def zeros(space, w_shape, w_dtype=None, w_order=None): + return _zeros_or_empty(space, w_shape, w_dtype, w_order, zero=True) @unwrap_spec(subok=bool) @@ -148,7 +154,8 @@ if dtype.is_str_or_unicode() and dtype.elsize < 1: dtype = descriptor.variable_dtype(space, dtype.char + '1') return W_NDimArray.from_shape(space, w_a.get_shape(), dtype=dtype, - w_instance=w_a if subok else None) + w_instance=w_a if subok else None, + zero=False) def _fromstring_text(space, s, count, sep, length, dtype): diff --git a/pypy/module/micronumpy/test/test_arrayops.py b/pypy/module/micronumpy/test/test_arrayops.py --- a/pypy/module/micronumpy/test/test_arrayops.py +++ b/pypy/module/micronumpy/test/test_arrayops.py @@ -2,6 +2,29 @@ class AppTestNumSupport(BaseNumpyAppTest): + def test_zeros(self): + from numpypy import zeros + a = zeros(3) + assert len(a) == 3 + assert a[0] == a[1] == a[2] == 0 + + def test_empty(self): + from numpypy import empty + import gc + for i in range(1000): + a = empty(3) + assert len(a) == 3 + if not (a[0] == a[1] == a[2] == 0): + break # done + a[0] = 1.23 + a[1] = 4.56 + a[2] = 7.89 + del a + gc.collect() + else: + raise AssertionError( + "empty() returned a zeroed out array every time") + def test_where(self): from numpypy import where, ones, zeros, array a = [1, 2, 3, 0, -3] diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -11,7 +11,7 @@ class MockDtype(object): class itemtype(object): @staticmethod - def malloc(size): + def malloc(size, zero=True): return None def __init__(self): diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -117,8 +117,11 @@ def __repr__(self): return self.__class__.__name__ - def malloc(self, size): - return alloc_raw_storage(size, track_allocation=False, zero=True) + def malloc(self, size, zero=True): + if zero: + return alloc_raw_storage(size, track_allocation=False, zero=True) + else: + return alloc_raw_storage(size, track_allocation=False, zero=False) class Primitive(object): _mixin_ = True diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -578,13 +578,15 @@ except OperationError, e: # fall back to the original byte string result_w[i] = w_bytes + return space.newlist(result_w) else: dirname = space.str0_w(w_dirname) result = rposix.listdir(dirname) - result_w = [space.wrap(s) for s in result] + # The list comprehension is a workaround for an obscure translation + # bug. + return space.newlist_bytes([x for x in result]) except OSError, e: raise wrap_oserror2(space, e, w_dirname) - return space.newlist(result_w) def pipe(space): "Create a pipe. Returns (read_end, write_end)." diff --git a/pypy/module/sys/test/test_sysmodule.py b/pypy/module/sys/test/test_sysmodule.py --- a/pypy/module/sys/test/test_sysmodule.py +++ b/pypy/module/sys/test/test_sysmodule.py @@ -391,7 +391,8 @@ import sys if hasattr(sys, "getwindowsversion"): v = sys.getwindowsversion() - assert isinstance(v, tuple) + if '__pypy__' in sys.builtin_module_names: + assert isinstance(v, tuple) assert len(v) == 5 assert isinstance(v[0], int) assert isinstance(v[1], int) @@ -419,6 +420,10 @@ if hasattr(sys, "winver"): assert sys.winver == sys.version[:3] + def test_dllhandle(self): + import sys + assert hasattr(sys, 'dllhandle') == (sys.platform == 'win32') + def test_dlopenflags(self): import sys if hasattr(sys, "setdlopenflags"): @@ -486,7 +491,8 @@ assert isinstance(sys.version, basestring) assert isinstance(sys.warnoptions, list) vi = sys.version_info - assert isinstance(vi, tuple) + if '__pypy__' in sys.builtin_module_names: + assert isinstance(vi, tuple) assert len(vi) == 5 assert isinstance(vi[0], int) assert isinstance(vi[1], int) @@ -512,6 +518,8 @@ def test_pypy_attributes(self): import sys + if '__pypy__' not in sys.builtin_module_names: + skip("only on PyPy") assert isinstance(sys.pypy_objspaceclass, str) vi = sys.pypy_version_info assert isinstance(vi, tuple) @@ -528,10 +536,14 @@ def test_subversion(self): import sys + if '__pypy__' not in sys.builtin_module_names: + skip("only on PyPy") assert sys.subversion == ('PyPy', '', '') def test__mercurial(self): import sys, re + if '__pypy__' not in sys.builtin_module_names: + skip("only on PyPy") project, hgtag, hgid = sys._mercurial assert project == 'PyPy' # the tag or branch may be anything, including the empty string diff --git a/pypy/module/sys/vm.py b/pypy/module/sys/vm.py --- a/pypy/module/sys/vm.py +++ b/pypy/module/sys/vm.py @@ -233,8 +233,6 @@ def get_dllhandle(space): if not space.config.objspace.usemodules.cpyext: return space.wrap(0) - if not space.config.objspace.usemodules._rawffi: - return space.wrap(0) return _get_dllhandle(space) @@ -243,11 +241,14 @@ from pypy.module.cpyext.api import State handle = space.fromcache(State).get_pythonapi_handle() - # Make a dll object with it - from pypy.module._rawffi.interp_rawffi import W_CDLL - from rpython.rlib.clibffi import RawCDLL - cdll = RawCDLL(handle) - return space.wrap(W_CDLL(space, "python api", cdll)) + # It used to be a CDLL + # from pypy.module._rawffi.interp_rawffi import W_CDLL + # from rpython.rlib.clibffi import RawCDLL + # cdll = RawCDLL(handle) + # return space.wrap(W_CDLL(space, "python api", cdll)) + # Provide a cpython-compatible int + from rpython.rtyper.lltypesystem import lltype, rffi + return space.wrap(rffi.cast(lltype.Signed, handle)) def getsizeof(space, w_object, w_default=None): """Not implemented on PyPy.""" diff --git a/pypy/module/thread/test/support.py b/pypy/module/thread/test/support.py --- a/pypy/module/thread/test/support.py +++ b/pypy/module/thread/test/support.py @@ -44,6 +44,7 @@ spaceconfig = dict(usemodules=('thread', 'rctime', 'signal')) def setup_class(cls): + cls.w_runappdirect = cls.space.wrap(cls.runappdirect) if cls.runappdirect: def plain_waitfor(self, condition, delay=1): adaptivedelay = 0.04 diff --git a/pypy/module/thread/test/test_lock.py b/pypy/module/thread/test/test_lock.py --- a/pypy/module/thread/test/test_lock.py +++ b/pypy/module/thread/test/test_lock.py @@ -57,8 +57,34 @@ assert lock.acquire() is True assert lock.acquire(False) is False raises(TypeError, lock.acquire, True, timeout=.1) - lock._py3k_acquire(True, timeout=.01) - lock._py3k_acquire(True, .01) + if hasattr(lock, '_py3k_acquire'): + lock._py3k_acquire(True, timeout=.01) + lock._py3k_acquire(True, .01) + else: + assert self.runappdirect, "missing lock._py3k_acquire()" + + def test_ping_pong(self): + # The purpose of this test is that doing a large number of ping-pongs + # between two threads, using locks, should complete in a reasonable + # time on a translated pypy with -A. If the GIL logic causes too + # much sleeping, then it will fail. + import thread, time + COUNT = 100000 if self.runappdirect else 50 + lock1 = thread.allocate_lock() + lock2 = thread.allocate_lock() + def fn(): + for i in range(COUNT): + lock1.acquire() + lock2.release() + lock2.acquire() + print "STARTING" + start = time.time() + thread.start_new_thread(fn, ()) + for i in range(COUNT): + lock2.acquire() + lock1.release() + stop = time.time() + assert stop - start < 30.0 # ~0.6 sec on pypy-c-jit def test_compile_lock(): diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -421,14 +421,19 @@ assert expected_length >= 0 return self.fixedview(w_obj, expected_length, unroll=True) + def listview_no_unpack(self, w_obj): + if type(w_obj) is W_ListObject: + return w_obj.getitems() + elif isinstance(w_obj, W_AbstractTupleObject) and self._uses_tuple_iter(w_obj): + return w_obj.getitems_copy() + elif isinstance(w_obj, W_ListObject) and self._uses_list_iter(w_obj): + return w_obj.getitems() + else: + return None + def listview(self, w_obj, expected_length=-1): - if type(w_obj) is W_ListObject: - t = w_obj.getitems() - elif isinstance(w_obj, W_AbstractTupleObject) and self._uses_tuple_iter(w_obj): - t = w_obj.getitems_copy() - elif isinstance(w_obj, W_ListObject) and self._uses_list_iter(w_obj): - t = w_obj.getitems() - else: + t = self.listview_no_unpack(w_obj) + if t is None: return ObjSpace.unpackiterable(self, w_obj, expected_length) if expected_length != -1 and len(t) != expected_length: raise self._wrap_expected_length(expected_length, len(t)) diff --git a/pypy/tool/gcdump.py b/pypy/tool/gcdump.py --- a/pypy/tool/gcdump.py +++ b/pypy/tool/gcdump.py @@ -43,7 +43,7 @@ def print_summary(self): items = self.summary.items() - items.sort(key=lambda(typenum, stat): stat[1]) # sort by totalsize + items.sort(key=lambda (typenum, stat): stat[1]) # sort by totalsize totalsize = 0 for typenum, stat in items: totalsize += stat[1] diff --git a/rpython/conftest.py b/rpython/conftest.py --- a/rpython/conftest.py +++ b/rpython/conftest.py @@ -1,10 +1,8 @@ -from os.path import * import py, pytest from rpython.tool import leakfinder pytest_plugins = 'rpython.tool.pytest.expecttest' -cdir = realpath(join(dirname(__file__), 'translator', 'c')) option = None def braindead_deindent(self): diff --git a/rpython/jit/backend/detect_cpu.py b/rpython/jit/backend/detect_cpu.py --- a/rpython/jit/backend/detect_cpu.py +++ b/rpython/jit/backend/detect_cpu.py @@ -73,11 +73,14 @@ result = MODEL_X86_64 else: assert sys.maxint == 2**31-1 - from rpython.jit.backend.x86.detect_sse2 import detect_sse2 - if detect_sse2(): + from rpython.jit.backend.x86 import detect_sse2 + if detect_sse2.detect_sse2(): result = MODEL_X86 else: result = MODEL_X86_NO_SSE2 + if detect_sse2.detect_x32_mode(): + raise ProcessorAutodetectError( + 'JITting in x32 mode is not implemented') # if result.startswith('arm'): from rpython.jit.backend.arm.detect import detect_float diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -396,16 +396,6 @@ #op = op.copy_and_change(rop.SETFIELD_RAW) self.newops.append(op) - def handle_write_barrier_setinteriorfield(self, op): - val = op.getarg(0) - if val not in self.write_barrier_applied: - v = op.getarg(2) - if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and - bool(v.value)): # store a non-NULL - self.gen_write_barrier(val) - #op = op.copy_and_change(rop.SETINTERIORFIELD_RAW) - self.newops.append(op) - def handle_write_barrier_setarrayitem(self, op): val = op.getarg(0) if val not in self.write_barrier_applied: @@ -413,9 +403,11 @@ if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and bool(v.value)): # store a non-NULL self.gen_write_barrier_array(val, op.getarg(1)) - #op = op.copy_and_change(rop.SETARRAYITEM_RAW) + #op = op.copy_and_change(rop.SET{ARRAYITEM,INTERIORFIELD}_RAW) self.newops.append(op) + handle_write_barrier_setinteriorfield = handle_write_barrier_setarrayitem + def gen_write_barrier(self, v_base): write_barrier_descr = self.gc_ll_descr.write_barrier_descr args = [v_base] diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -669,7 +669,7 @@ jump(p1, p2) """, """ [p1, p2] - cond_call_gc_wb(p1, descr=wbdescr) + cond_call_gc_wb_array(p1, 0, descr=wbdescr) setinteriorfield_gc(p1, 0, p2, descr=interiorzdescr) jump(p1, p2) """, interiorzdescr=interiorzdescr) diff --git a/rpython/jit/backend/llsupport/test/ztranslation_test.py b/rpython/jit/backend/llsupport/test/ztranslation_test.py --- a/rpython/jit/backend/llsupport/test/ztranslation_test.py +++ b/rpython/jit/backend/llsupport/test/ztranslation_test.py @@ -10,6 +10,9 @@ from rpython.jit.backend.test.support import CCompiledMixin from rpython.jit.codewriter.policy import StopAtXPolicy from rpython.config.config import ConfigError +from rpython.translator.tool.cbuild import ExternalCompilationInfo +from rpython.rtyper.lltypesystem import lltype, rffi + class TranslationTest(CCompiledMixin): CPUClass = getcpuclass() @@ -25,6 +28,7 @@ # - floats neg and abs # - threadlocalref_get # - get_errno, set_errno + # - llexternal with macro=True class Frame(object): _virtualizable_ = ['i'] @@ -36,9 +40,15 @@ pass t = ThreadLocalReference(Foo) - @dont_look_inside - def myabs(x): - return abs(x) + eci = ExternalCompilationInfo(post_include_bits=[''' +#define pypy_my_fabs(x) fabs(x) +''']) + myabs1 = rffi.llexternal('pypy_my_fabs', [lltype.Float], + lltype.Float, macro=True, releasegil=False, + compilation_info=eci) + myabs2 = rffi.llexternal('pypy_my_fabs', [lltype.Float], + lltype.Float, macro=True, releasegil=True, + compilation_info=eci) jitdriver = JitDriver(greens = [], reds = ['total', 'frame', 'j'], @@ -61,7 +71,7 @@ frame.i -= 1 j *= -0.712 if j + (-j): raise ValueError - k = myabs(j) + k = myabs1(myabs2(j)) if k - abs(j): raise ValueError if k - abs(-j): raise ValueError if t.get().nine != 9: raise ValueError @@ -69,7 +79,6 @@ if rposix.get_errno() != total: raise ValueError return chr(total % 253) # - from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib.libffi import types, CDLL, ArgChain from rpython.rlib.test.test_clibffi import get_libm_name libm_name = get_libm_name(sys.platform) diff --git a/rpython/jit/backend/x86/callbuilder.py b/rpython/jit/backend/x86/callbuilder.py --- a/rpython/jit/backend/x86/callbuilder.py +++ b/rpython/jit/backend/x86/callbuilder.py @@ -132,7 +132,6 @@ self.mc.ADD(ebp, imm(1)) # ebp any more; and ignore 'fastgil' def move_real_result_and_call_reacqgil_addr(self, fastgil): - from rpython.jit.backend.x86.assembler import heap from rpython.jit.backend.x86 import rx86 # # check if we need to call the reacqgil() function or not diff --git a/rpython/jit/backend/x86/detect_sse2.py b/rpython/jit/backend/x86/detect_sse2.py --- a/rpython/jit/backend/x86/detect_sse2.py +++ b/rpython/jit/backend/x86/detect_sse2.py @@ -1,3 +1,4 @@ +import sys from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib.rmmap import alloc, free @@ -18,9 +19,26 @@ free(data, 4096) return bool(code & (1<<25)) and bool(code & (1<<26)) +def detect_x32_mode(): + data = alloc(4096) + pos = 0 # 32-bit 64-bit / x32 + for c in ("\x48" # DEC EAX + "\xB8\xC8\x00\x00\x00"# MOV EAX, 200 MOV RAX, 0x40404040000000C8 + "\x40\x40\x40\x40" # 4x INC EAX + "\xC3"): # RET RET + data[pos] = c + pos += 1 + fnptr = rffi.cast(lltype.Ptr(lltype.FuncType([], lltype.Signed)), data) + code = fnptr() + free(data, 4096) + assert code in (200, 204, 0x40404040000000C8) + return code == 200 + if __name__ == '__main__': if detect_sse2(): print 'Processor supports sse2.' else: print 'Missing processor support for sse2.' + if detect_x32_mode(): + print 'Process is running in "x32" mode.' diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -499,7 +499,7 @@ 'SETARRAYITEM_GC/3d', 'SETARRAYITEM_RAW/3d', 'SETINTERIORFIELD_GC/3d', - 'SETINTERIORFIELD_RAW/3d', # only used by llsupport/rewrite.py + 'SETINTERIORFIELD_RAW/3d', # right now, only used by tests 'RAW_STORE/3d', 'SETFIELD_GC/2d', 'SETFIELD_RAW/2d', diff --git a/rpython/memory/test/snippet.py b/rpython/memory/test/snippet.py --- a/rpython/memory/test/snippet.py +++ b/rpython/memory/test/snippet.py @@ -47,22 +47,33 @@ class State: pass state = State() + def age_of(c): + return state.age[ord(c) - ord('a')] + def set_age_of(c, newvalue): + # NB. this used to be a dictionary, but setting into a dict + # consumes memory. This has the effect that this test's + # __del__ methods can consume more memory and potentially + # cause another collection. This would result in objects + # being unexpectedly destroyed at the same 'state.time'. + state.age[ord(c) - ord('a')] = newvalue + class A: def __init__(self, key): self.key = key self.refs = [] def __del__(self): - assert state.age[self.key] == -1 - state.age[self.key] = state.time + from rpython.rlib.debug import debug_print + debug_print("DEL:", self.key) + assert age_of(self.key) == -1 + set_age_of(self.key, state.time) state.progress = True def build_example(input): state.time = 0 - state.age = {} + state.age = [-1] * len(letters) vertices = {} for c in letters: vertices[c] = A(c) - state.age[c] = -1 for c, d in input: vertices[c].refs.append(vertices[d]) @@ -72,6 +83,8 @@ input, components, strict = examples[i] build_example(input) while state.time < len(letters): + from rpython.rlib.debug import debug_print + debug_print("STATE.TIME:", state.time) state.progress = False llop.gc__collect(lltype.Void) if not state.progress: @@ -80,16 +93,16 @@ # summarize the finalization order lst = [] for c in letters: - lst.append('%s:%d' % (c, state.age[c])) + lst.append('%s:%d' % (c, age_of(c))) summary = ', '.join(lst) # check that all instances have been finalized - if -1 in state.age.values(): + if -1 in state.age: return error(i, summary, "not all instances finalized") # check that if a -> b and a and b are not in the same # strong component, then a is finalized strictly before b for c, d in strict: - if state.age[c] >= state.age[d]: + if age_of(c) >= age_of(d): return error(i, summary, "%s should be finalized before %s" % (c, d)) @@ -98,7 +111,7 @@ for component in components: seen = {} for c in component: - age = state.age[c] + age = age_of(c) if age in seen: d = seen[age] return error(i, summary, diff --git a/rpython/rlib/_rffi_stacklet.py b/rpython/rlib/_rffi_stacklet.py --- a/rpython/rlib/_rffi_stacklet.py +++ b/rpython/rlib/_rffi_stacklet.py @@ -3,7 +3,7 @@ from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.rtyper.tool import rffi_platform from rpython.rlib.rarithmetic import is_emulated_long -from rpython.conftest import cdir +from rpython.translator import cdir cdir = py.path.local(cdir) diff --git a/rpython/rlib/_rsocket_rffi.py b/rpython/rlib/_rsocket_rffi.py --- a/rpython/rlib/_rsocket_rffi.py +++ b/rpython/rlib/_rsocket_rffi.py @@ -493,10 +493,16 @@ getnameinfo = external('getnameinfo', [sockaddr_ptr, socklen_t, CCHARP, size_t, CCHARP, size_t, rffi.INT], rffi.INT) -htonl = external('htonl', [rffi.UINT], rffi.UINT, releasegil=False) -htons = external('htons', [rffi.USHORT], rffi.USHORT, releasegil=False) -ntohl = external('ntohl', [rffi.UINT], rffi.UINT, releasegil=False) -ntohs = external('ntohs', [rffi.USHORT], rffi.USHORT, releasegil=False) +if sys.platform.startswith("openbsd"): + htonl = external('htonl', [rffi.UINT], rffi.UINT, releasegil=False, macro=True) + htons = external('htons', [rffi.USHORT], rffi.USHORT, releasegil=False, macro=True) + ntohl = external('ntohl', [rffi.UINT], rffi.UINT, releasegil=False, macro=True) + ntohs = external('ntohs', [rffi.USHORT], rffi.USHORT, releasegil=False, macro=True) +else: + htonl = external('htonl', [rffi.UINT], rffi.UINT, releasegil=False) + htons = external('htons', [rffi.USHORT], rffi.USHORT, releasegil=False) + ntohl = external('ntohl', [rffi.UINT], rffi.UINT, releasegil=False) + ntohs = external('ntohs', [rffi.USHORT], rffi.USHORT, releasegil=False) if _POSIX: inet_aton = external('inet_aton', [CCHARP, lltype.Ptr(in_addr)], diff --git a/rpython/rlib/clibffi.py b/rpython/rlib/clibffi.py --- a/rpython/rlib/clibffi.py +++ b/rpython/rlib/clibffi.py @@ -15,7 +15,7 @@ from rpython.rlib.objectmodel import specialize from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.translator.platform import platform -from rpython.conftest import cdir +from rpython.translator import cdir from platform import machine import py import os diff --git a/rpython/rlib/rdtoa.py b/rpython/rlib/rdtoa.py --- a/rpython/rlib/rdtoa.py +++ b/rpython/rlib/rdtoa.py @@ -1,7 +1,7 @@ from __future__ import with_statement from rpython.rlib import rfloat from rpython.translator.tool.cbuild import ExternalCompilationInfo -from rpython.conftest import cdir +from rpython.translator import cdir from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib import jit from rpython.rlib.rstring import StringBuilder diff --git a/rpython/rlib/rgil.py b/rpython/rlib/rgil.py --- a/rpython/rlib/rgil.py +++ b/rpython/rlib/rgil.py @@ -1,5 +1,5 @@ import py -from rpython.conftest import cdir +from rpython.translator import cdir from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.rtyper.lltypesystem import lltype, llmemory, rffi diff --git a/rpython/rlib/rsignal.py b/rpython/rlib/rsignal.py --- a/rpython/rlib/rsignal.py +++ b/rpython/rlib/rsignal.py @@ -1,7 +1,7 @@ import signal as cpy_signal import sys import py -from rpython.conftest import cdir +from rpython.translator import cdir from rpython.rtyper.tool import rffi_platform from rpython.rtyper.lltypesystem import lltype, rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo diff --git a/rpython/rlib/rsocket.py b/rpython/rlib/rsocket.py --- a/rpython/rlib/rsocket.py +++ b/rpython/rlib/rsocket.py @@ -15,17 +15,18 @@ # It's unclear if makefile() and SSL support belong here or only as # app-level code for PyPy. +from rpython.rlib import _rsocket_rffi as _c, jit, rgc from rpython.rlib.objectmodel import instantiate, keepalive_until_here -from rpython.rlib import _rsocket_rffi as _c from rpython.rlib.rarithmetic import intmask, r_uint from rpython.rlib.rthread import dummy_lock from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.lltypesystem.rffi import sizeof, offsetof -INVALID_SOCKET = _c.INVALID_SOCKET -from rpython.rlib import jit + + # Usage of @jit.dont_look_inside in this file is possibly temporary # and only because some lltypes declared in _rsocket_rffi choke the # JIT's codewriter right now (notably, FixedSizeArray). +INVALID_SOCKET = _c.INVALID_SOCKET def mallocbuf(buffersize): @@ -86,6 +87,7 @@ self.addr_p = addr self.addrlen = addrlen + @rgc.must_be_light_finalizer def __del__(self): if self.addr_p: lltype.free(self.addr_p, flavor='raw', track_allocation=False) @@ -493,8 +495,8 @@ class RSocket(object): """RPython-level socket object. """ - _mixin_ = True # for interp_socket.py fd = _c.INVALID_SOCKET + def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, fd=_c.INVALID_SOCKET): """Create a new socket.""" @@ -509,6 +511,7 @@ self.proto = proto self.timeout = defaults.timeout + @rgc.must_be_light_finalizer def __del__(self): fd = self.fd if fd != _c.INVALID_SOCKET: diff --git a/rpython/rlib/rstack.py b/rpython/rlib/rstack.py --- a/rpython/rlib/rstack.py +++ b/rpython/rlib/rstack.py @@ -10,7 +10,7 @@ from rpython.rlib import rgc from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.lltypesystem.lloperation import llop -from rpython.conftest import cdir +from rpython.translator import cdir from rpython.translator.tool.cbuild import ExternalCompilationInfo # ____________________________________________________________ diff --git a/rpython/rlib/rthread.py b/rpython/rlib/rthread.py --- a/rpython/rlib/rthread.py +++ b/rpython/rlib/rthread.py @@ -1,6 +1,6 @@ from rpython.rtyper.lltypesystem import rffi, lltype, llmemory from rpython.translator.tool.cbuild import ExternalCompilationInfo -from rpython.conftest import cdir +from rpython.translator import cdir import py from rpython.rlib import jit, rgc from rpython.rlib.debug import ll_assert @@ -59,7 +59,7 @@ c_thread_acquirelock = llexternal('RPyThreadAcquireLock', [TLOCKP, rffi.INT], rffi.INT, releasegil=True) # release the GIL -c_thread_acquirelock_timed = llexternal('RPyThreadAcquireLockTimed', +c_thread_acquirelock_timed = llexternal('RPyThreadAcquireLockTimed', [TLOCKP, rffi.LONGLONG, rffi.INT], rffi.INT, releasegil=True) # release the GIL diff --git a/rpython/rtyper/lltypesystem/module/ll_math.py b/rpython/rtyper/lltypesystem/module/ll_math.py --- a/rpython/rtyper/lltypesystem/module/ll_math.py +++ b/rpython/rtyper/lltypesystem/module/ll_math.py @@ -3,7 +3,7 @@ import py import sys -from rpython.conftest import cdir +from rpython.translator import cdir from rpython.rlib import jit, rposix from rpython.rlib.rfloat import INFINITY, NAN, isfinite, isinf, isnan from rpython.rtyper.lltypesystem import lltype, rffi diff --git a/rpython/rtyper/lltypesystem/rdict.py b/rpython/rtyper/lltypesystem/rdict.py --- a/rpython/rtyper/lltypesystem/rdict.py +++ b/rpython/rtyper/lltypesystem/rdict.py @@ -540,18 +540,21 @@ # avoid extra branches. def ll_dict_resize(d): - old_entries = d.entries - old_size = len(old_entries) # make a 'new_size' estimate and shrink it if there are many # deleted entry markers. See CPython for why it is a good idea to # quadruple the dictionary size as long as it's not too big. num_items = d.num_items + 1 if num_items > 50000: new_estimate = num_items * 2 else: new_estimate = num_items * 4 + _ll_dict_resize_to(d, new_estimate) +ll_dict_resize.oopspec = 'dict.resize(d)' + +def _ll_dict_resize_to(d, new_estimate): new_size = DICT_INITSIZE while new_size <= new_estimate: new_size *= 2 - # + old_entries = d.entries + old_size = len(d.entries) d.entries = lltype.typeOf(old_entries).TO.allocate(new_size) d.num_items = 0 d.resize_counter = new_size * 2 @@ -563,7 +566,6 @@ ll_dict_insertclean(d, entry.key, entry.value, hash) i += 1 old_entries.delete() -ll_dict_resize.oopspec = 'dict.resize(d)' # ------- a port of CPython's dictobject.c's lookdict implementation ------- PERTURB_SHIFT = 5 @@ -816,6 +818,16 @@ ll_clear.oopspec = 'dict.clear(d)' def ll_update(dic1, dic2): + # Prescale 'dic1', assuming that most items don't collide. + # If this assumption is false, 'dic1' becomes at most two times too large. + # * dic2.num_items = upper bound on the number of items added + # * (dic1.resize_counter - 1) // 3 = room left in dic1 + # so, if dic2 has 1 item, we need dic1.resize_counter > 3 + # if dic2 has 2 items we need dic1.resize_counter > 6 etc. + if not (dic1.resize_counter > dic2.num_items * 3): + new_estimate = (dic1.num_items + dic2.num_items) * 2 + _ll_dict_resize_to(dic1, new_estimate) + # entries = dic2.entries d2len = len(entries) i = 0 diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -95,6 +95,8 @@ name, macro, ext_type, compilation_info) else: _callable = ll2ctypes.LL2CtypesCallable(ext_type, calling_conv) + else: From noreply at buildbot.pypy.org Thu Jul 3 16:52:12 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Thu, 3 Jul 2014 16:52:12 +0200 (CEST) Subject: [pypy-commit] pypy scalar-operations: Re-enable important test Message-ID: <20140703145212.D93B51D3493@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: scalar-operations Changeset: r72328:210fb8550c7c Date: 2014-07-03 15:51 +0100 http://bitbucket.org/pypy/pypy/changeset/210fb8550c7c/ Log: Re-enable important test diff --git a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py --- a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py +++ b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py @@ -30,6 +30,7 @@ """) def test_array_getitem_accumulate(self): + """Check that operations/ufuncs on array items are jitted correctly""" def main(): import _numpypy.multiarray as np arr = np.zeros((300, 300)) @@ -43,7 +44,6 @@ log = self.run(main, []) assert log.result == 0 loop, = log.loops_by_filename(self.filepath) - skip('used to pass on 69421-f3e717c94913') assert loop.match(""" i81 = int_lt(i76, 300) guard_true(i81, descr=...) From noreply at buildbot.pypy.org Thu Jul 3 20:03:38 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 3 Jul 2014 20:03:38 +0200 (CEST) Subject: [pypy-commit] pypy default: Tweak to dict.update(). Message-ID: <20140703180338.646DF1D34B9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72329:b5a2e5eb59dc Date: 2014-07-03 12:36 +0200 http://bitbucket.org/pypy/pypy/changeset/b5a2e5eb59dc/ Log: Tweak to dict.update(). diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -492,6 +492,23 @@ def view_as_kwargs(self, w_dict): return (None, None) + def getiterkeys(self, w_dict): + raise NotImplementedError + + def getitervalues(self, w_dict): + raise NotImplementedError + + def getiteritems(self, w_dict): + raise NotImplementedError + + def rev_update1_dict_dict(self, w_dict, w_updatedict): + iteritems = self.iteritems(w_dict) + while True: + w_key, w_value = iteritems.next_item() + if w_key is None: + break + w_updatedict.setitem(w_key, w_value) + class EmptyDictStrategy(DictStrategy): erase, unerase = rerased.new_erasing_pair("empty") @@ -593,11 +610,13 @@ # ---------- iterator interface ---------------- def getiterkeys(self, w_dict): - return iter([None]) - getitervalues = getiterkeys + return iter([]) + + def getitervalues(self, w_dict): + return iter([]) def getiteritems(self, w_dict): - return iter([(None, None)]) + return iter([]) # Iterator Implementation base classes @@ -725,9 +744,29 @@ def iteritems(self, w_dict): return IterClassItems(self.space, self, w_dict) + + @jit.look_inside_iff(lambda self, w_dict, w_updatedict: + w_dict_unrolling_heuristic(w_dict)) + def rev_update1_dict_dict(self, w_dict, w_updatedict): + if override_next_item is not None: + # this is very similar to the general version, but the difference + # is that it is specialized to call a specific next_item() + iteritems = IterClassItems(self.space, self, w_dict) + while True: + w_key, w_value = iteritems.next_item() + if w_key is None: + break + w_updatedict.setitem(w_key, w_value) + else: + for key, value in self.getiteritems(w_dict): + w_key = wrapkey(self.space, key) + w_value = wrapvalue(self.space, value) + w_updatedict.setitem(w_key, w_value) + dictimpl.iterkeys = iterkeys dictimpl.itervalues = itervalues dictimpl.iteritems = iteritems + dictimpl.rev_update1_dict_dict = rev_update1_dict_dict create_iterator_classes(EmptyDictStrategy) @@ -1063,15 +1102,8 @@ update1_keys(space, w_dict, w_data, data_w) - at jit.look_inside_iff(lambda space, w_dict, w_data: - w_dict_unrolling_heuristic(w_data)) def update1_dict_dict(space, w_dict, w_data): - iterator = w_data.iteritems() - while True: - w_key, w_value = iterator.next_item() - if w_key is None: - break - w_dict.setitem(w_key, w_value) + w_data.strategy.rev_update1_dict_dict(w_data, w_dict) def update1_pairs(space, w_dict, data_w): From noreply at buildbot.pypy.org Thu Jul 3 20:03:40 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 3 Jul 2014 20:03:40 +0200 (CEST) Subject: [pypy-commit] pypy default: For dict.update(), pre-scale the dictionary from RPython code in Message-ID: <20140703180340.9C7701D34B9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72330:0a347de43469 Date: 2014-07-03 17:52 +0200 http://bitbucket.org/pypy/pypy/changeset/0a347de43469/ Log: For dict.update(), pre-scale the dictionary from RPython code in dictmultiobject.py. diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -1,8 +1,9 @@ """The builtin dict implementation""" -from rpython.rlib import jit, rerased +from rpython.rlib import jit, rerased, objectmodel from rpython.rlib.debug import mark_dict_non_null from rpython.rlib.objectmodel import newlist_hint, r_dict, specialize +from rpython.rlib.unroll import SpecTag from rpython.tool.sourcetools import func_renamer, func_with_new_name from pypy.interpreter.baseobjspace import W_Root @@ -509,6 +510,9 @@ break w_updatedict.setitem(w_key, w_value) + def prepare_update(self, w_dict, num_extra): + pass + class EmptyDictStrategy(DictStrategy): erase, unerase = rerased.new_erasing_pair("empty") @@ -748,20 +752,32 @@ @jit.look_inside_iff(lambda self, w_dict, w_updatedict: w_dict_unrolling_heuristic(w_dict)) def rev_update1_dict_dict(self, w_dict, w_updatedict): + # the logic is to call prepare_dict_update() after the first setitem(): + # it gives the w_updatedict a chance to switch its strategy. if override_next_item is not None: # this is very similar to the general version, but the difference # is that it is specialized to call a specific next_item() iteritems = IterClassItems(self.space, self, w_dict) + spec = _SPEC1 while True: w_key, w_value = iteritems.next_item() if w_key is None: break w_updatedict.setitem(w_key, w_value) + if spec is _SPEC1: + spec = _SPEC2 + w_updatedict.strategy.prepare_update(w_updatedict, + w_dict.length() - 1) else: + spec = _SPEC1 for key, value in self.getiteritems(w_dict): w_key = wrapkey(self.space, key) w_value = wrapvalue(self.space, value) w_updatedict.setitem(w_key, w_value) + if spec is _SPEC1: + spec = _SPEC2 + w_updatedict.strategy.prepare_update(w_updatedict, + w_dict.length() - 1) dictimpl.iterkeys = iterkeys dictimpl.itervalues = itervalues @@ -770,6 +786,9 @@ create_iterator_classes(EmptyDictStrategy) +_SPEC1 = SpecTag() +_SPEC2 = SpecTag() + # concrete subclasses of the above @@ -884,6 +903,10 @@ def getiteritems(self, w_dict): return self.unerase(w_dict.dstorage).iteritems() + def prepare_update(self, w_dict, num_extra): + objectmodel.prepare_dict_update(self.unerase(w_dict.dstorage), + num_extra) + class ObjectDictStrategy(AbstractTypedStrategy, DictStrategy): erase, unerase = rerased.new_erasing_pair("object") diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -388,6 +388,9 @@ return SomeImpossibleValue() dct1.dictdef.union(dct2.dictdef) + def method__prepare_dict_update(dct, num): + pass + def method_keys(self): return getbookkeeper().newlist(self.dictdef.read_key()) diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -740,6 +740,14 @@ return repr(self.key) +def prepare_dict_update(dict, n_elements): + """RPython hint that the given dict (or r_dict) will soon be + enlarged by n_elements.""" + if we_are_translated(): + dict._prepare_dict_update(n_elements) + # ^^ call an extra method that doesn't exist before translation + + # ____________________________________________________________ def import_from_mixin(M, special_methods=['__init__', '__del__']): diff --git a/rpython/rlib/test/test_objectmodel.py b/rpython/rlib/test/test_objectmodel.py --- a/rpython/rlib/test/test_objectmodel.py +++ b/rpython/rlib/test/test_objectmodel.py @@ -321,6 +321,14 @@ res = self.interpret(g, [3]) assert res == 77 + def test_prepare_dict_update(self): + def g(n): + d = {} + prepare_dict_update(d, n) + return 42 + res = self.interpret(g, [3]) + assert res == 42 # "did not crash" + def test_compute_hash(self): class Foo(object): pass diff --git a/rpython/rtyper/lltypesystem/rdict.py b/rpython/rtyper/lltypesystem/rdict.py --- a/rpython/rtyper/lltypesystem/rdict.py +++ b/rpython/rtyper/lltypesystem/rdict.py @@ -286,6 +286,11 @@ hop.exception_cannot_occur() return hop.gendirectcall(ll_update, v_dic1, v_dic2) + def rtype_method__prepare_dict_update(self, hop): + v_dict, v_num = hop.inputargs(self, lltype.Signed) + hop.exception_cannot_occur() + hop.gendirectcall(ll_prepare_dict_update, v_dict, v_num) + def _rtype_method_kvi(self, hop, ll_func): v_dic, = hop.inputargs(self) r_list = hop.r_result @@ -543,13 +548,14 @@ # make a 'new_size' estimate and shrink it if there are many # deleted entry markers. See CPython for why it is a good idea to # quadruple the dictionary size as long as it's not too big. - num_items = d.num_items + 1 - if num_items > 50000: new_estimate = num_items * 2 - else: new_estimate = num_items * 4 - _ll_dict_resize_to(d, new_estimate) + # (Quadrupling comes from '(d.num_items + d.num_items + 1) * 2' + # as long as num_items is not too large.) + num_extra = min(d.num_items + 1, 30000) + _ll_dict_resize_to(d, num_extra) ll_dict_resize.oopspec = 'dict.resize(d)' -def _ll_dict_resize_to(d, new_estimate): +def _ll_dict_resize_to(d, num_extra): + new_estimate = (d.num_items + num_extra) * 2 new_size = DICT_INITSIZE while new_size <= new_estimate: new_size *= 2 @@ -818,16 +824,7 @@ ll_clear.oopspec = 'dict.clear(d)' def ll_update(dic1, dic2): - # Prescale 'dic1', assuming that most items don't collide. - # If this assumption is false, 'dic1' becomes at most two times too large. - # * dic2.num_items = upper bound on the number of items added - # * (dic1.resize_counter - 1) // 3 = room left in dic1 - # so, if dic2 has 1 item, we need dic1.resize_counter > 3 - # if dic2 has 2 items we need dic1.resize_counter > 6 etc. - if not (dic1.resize_counter > dic2.num_items * 3): - new_estimate = (dic1.num_items + dic2.num_items) * 2 - _ll_dict_resize_to(dic1, new_estimate) - # + ll_prepare_dict_update(dic1, dic2.num_items) entries = dic2.entries d2len = len(entries) i = 0 @@ -842,6 +839,16 @@ i += 1 ll_update.oopspec = 'dict.update(dic1, dic2)' +def ll_prepare_dict_update(d, num_extra): + # Prescale 'd' for 'num_extra' items, assuming that most items don't + # collide. If this assumption is false, 'd' becomes too large by at + # most 'num_extra'. The logic is based on: + # (d.resize_counter - 1) // 3 = room left in d + # so, if num_extra == 1, we need d.resize_counter > 3 + # if num_extra == 2, we need d.resize_counter > 6 etc. + jit.conditional_call(d.resize_counter <= num_extra * 3, + _ll_dict_resize_to, d, num_extra) + # this is an implementation of keys(), values() and items() # in a single function. # note that by specialization on func, three different From noreply at buildbot.pypy.org Thu Jul 3 20:03:42 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 3 Jul 2014 20:03:42 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Abandon the merge: needs more work to fix e.g. the threadlocalref Message-ID: <20140703180342.3BED21D34B9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r72331:2aef0e942480 Date: 2014-07-03 18:16 +0200 http://bitbucket.org/pypy/pypy/changeset/2aef0e942480/ Log: Abandon the merge: needs more work to fix e.g. the threadlocalref and I don't want to do it right now :-/ From noreply at buildbot.pypy.org Thu Jul 3 20:03:43 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 3 Jul 2014 20:03:43 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Import stm.rst from trunk again. (This checkin is also here to Message-ID: <20140703180343.DEA7D1D34B9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r72332:c65fdedbe74a Date: 2014-07-03 18:17 +0200 http://bitbucket.org/pypy/pypy/changeset/c65fdedbe74a/ Log: Import stm.rst from trunk again. (This checkin is also here to mark the current head of the stmgc-c7 branch.) diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -30,7 +30,8 @@ ``pypy-stm`` is a variant of the regular PyPy interpreter. With caveats_ listed below, it should be in theory within 20%-50% slower than a -regular PyPy, comparing the JIT version in both cases. It is called +regular PyPy, comparing the JIT version in both cases (but see below!). +It is called STM for Software Transactional Memory, which is the internal technique used (see `Reference to implementation details`_). @@ -90,6 +91,11 @@ * So far, small examples work fine, but there are still a few bugs. We're busy fixing them as we find them; feel free to `report bugs`_. +* It runs with an overhead as low as 20% on examples like "richards". + There are also other examples with higher overheads --currently up to + 2x for "translate.py"-- which we are still trying to understand. + One suspect is our partial GC implementation, see below. + * Currently limited to 1.5 GB of RAM (this is just a parameter in `core.h`__). Memory overflows are not correctly handled; they cause segfaults. @@ -105,9 +111,8 @@ * The GC is new; although clearly inspired by PyPy's regular GC, it misses a number of optimizations for now. Programs allocating large - numbers of small objects that don't immediately die, as well as - programs that modify large lists or dicts, suffer from these missing - optimizations. + numbers of small objects that don't immediately die (surely a common + situation) suffer from these missing optimizations. * The GC has no support for destructors: the ``__del__`` method is never called (including on file objects, which won't be closed for you). From noreply at buildbot.pypy.org Thu Jul 3 20:03:46 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 3 Jul 2014 20:03:46 +0200 (CEST) Subject: [pypy-commit] pypy default: Translation fixes Message-ID: <20140703180346.A7AEA1D34B9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72333:bc07df429f7b Date: 2014-07-03 18:20 +0200 http://bitbucket.org/pypy/pypy/changeset/bc07df429f7b/ Log: Translation fixes diff --git a/pypy/module/pypyjit/policy.py b/pypy/module/pypyjit/policy.py --- a/pypy/module/pypyjit/policy.py +++ b/pypy/module/pypyjit/policy.py @@ -13,19 +13,21 @@ cache = space.fromcache(Cache) if cache.in_recursion: return - if space.is_true(cache.w_abort_hook): + w_abort_hook = cache.w_abort_hook + assert w_abort_hook is not None + if space.is_true(w_abort_hook): cache.in_recursion = True oplist_w = wrap_oplist(space, logops, operations) try: try: - space.call_function(cache.w_abort_hook, + space.call_function(w_abort_hook, space.wrap(jitdriver.name), wrap_greenkey(space, jitdriver, greenkey, greenkey_repr), space.wrap(Counters.counter_names[reason]), space.newlist(oplist_w) ) except OperationError, e: - e.write_unraisable(space, "jit hook ", cache.w_abort_hook) + e.write_unraisable(space, "jit hook ", w_abort_hook) finally: cache.in_recursion = False diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -740,6 +740,7 @@ return repr(self.key) + at specialize.call_location() def prepare_dict_update(dict, n_elements): """RPython hint that the given dict (or r_dict) will soon be enlarged by n_elements.""" From noreply at buildbot.pypy.org Thu Jul 3 20:03:52 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 3 Jul 2014 20:03:52 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: dummy merge, abandon 2aef0e942480 Message-ID: <20140703180352.9E71F1D34B9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r72334:d0483f8d8fcd Date: 2014-07-03 18:23 +0200 http://bitbucket.org/pypy/pypy/changeset/d0483f8d8fcd/ Log: dummy merge, abandon 2aef0e942480 From noreply at buildbot.pypy.org Thu Jul 3 20:03:54 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 3 Jul 2014 20:03:54 +0200 (CEST) Subject: [pypy-commit] pypy default: More attempts at translation fixes Message-ID: <20140703180354.64AAD1D34B9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72335:d4f1606fbfe7 Date: 2014-07-03 20:02 +0200 http://bitbucket.org/pypy/pypy/changeset/d4f1606fbfe7/ Log: More attempts at translation fixes diff --git a/pypy/module/pypyjit/interp_resop.py b/pypy/module/pypyjit/interp_resop.py --- a/pypy/module/pypyjit/interp_resop.py +++ b/pypy/module/pypyjit/interp_resop.py @@ -56,6 +56,7 @@ jit hook won't be called for that. """ cache = space.fromcache(Cache) + assert w_hook is not None cache.w_compile_hook = w_hook cache.in_recursion = NonConstant(False) @@ -90,6 +91,7 @@ as attributes on JitLoopInfo object. """ cache = space.fromcache(Cache) + assert w_hook is not None cache.w_abort_hook = w_hook cache.in_recursion = NonConstant(False) diff --git a/pypy/module/pypyjit/policy.py b/pypy/module/pypyjit/policy.py --- a/pypy/module/pypyjit/policy.py +++ b/pypy/module/pypyjit/policy.py @@ -13,21 +13,19 @@ cache = space.fromcache(Cache) if cache.in_recursion: return - w_abort_hook = cache.w_abort_hook - assert w_abort_hook is not None - if space.is_true(w_abort_hook): + if space.is_true(cache.w_abort_hook): cache.in_recursion = True oplist_w = wrap_oplist(space, logops, operations) try: try: - space.call_function(w_abort_hook, + space.call_function(cache.w_abort_hook, space.wrap(jitdriver.name), wrap_greenkey(space, jitdriver, greenkey, greenkey_repr), space.wrap(Counters.counter_names[reason]), space.newlist(oplist_w) ) except OperationError, e: - e.write_unraisable(space, "jit hook ", w_abort_hook) + e.write_unraisable(space, "jit hook ", cache.w_abort_hook) finally: cache.in_recursion = False From noreply at buildbot.pypy.org Thu Jul 3 20:36:04 2014 From: noreply at buildbot.pypy.org (Corbin Simpson) Date: Thu, 3 Jul 2014 20:36:04 +0200 (CEST) Subject: [pypy-commit] pypy promote-unicode: Introduce promote_unicode(). Message-ID: <20140703183604.7309F1D34C3@cobra.cs.uni-duesseldorf.de> Author: Corbin Simpson Branch: promote-unicode Changeset: r72336:3489a054a745 Date: 2014-07-03 11:35 -0700 http://bitbucket.org/pypy/pypy/changeset/3489a054a745/ Log: Introduce promote_unicode(). This is a companion to promote_string() which promotes Unicode strings by value. To use, simply ``from rpython.rlib.jit import promote_unicode`` and then ``promote_unicode(any_unicode_string)``. A few tests are included, and a couple bits of testing mocks were improved to permit tests to not fail. diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -571,6 +571,23 @@ op1 = SpaceOperation('str_guard_value', [op.args[0], c, descr], op.result) return [SpaceOperation('-live-', [], None), op1, None] + if (hints.get('promote_unicode') and + op.args[0].concretetype is not lltype.Void): + U = lltype.Ptr(rstr.UNICODE) + assert op.args[0].concretetype == U + self._register_extra_helper(EffectInfo.OS_UNIEQ_NONNULL, + "str.eq_nonnull", + [U, U], + lltype.Signed, + EffectInfo.EF_ELIDABLE_CANNOT_RAISE) + descr, p = self.callcontrol.callinfocollection.callinfo_for_oopspec( + EffectInfo.OS_UNIEQ_NONNULL) + # XXX this is fairly ugly way of creating a constant, + # however, callinfocollection has no better interface + c = Constant(p.adr.ptr, lltype.typeOf(p.adr.ptr)) + op1 = SpaceOperation('str_guard_value', [op.args[0], c, descr], + op.result) + return [SpaceOperation('-live-', [], None), op1, None] if hints.get('force_virtualizable'): return SpaceOperation('hint_force_virtualizable', [op.args[0]], None) if hints.get('force_no_const'): # for tests only diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py --- a/rpython/jit/codewriter/test/test_jtransform.py +++ b/rpython/jit/codewriter/test/test_jtransform.py @@ -107,7 +107,7 @@ return True return False def callinfo_for_oopspec(self, oopspecindex): - assert oopspecindex == effectinfo.EffectInfo.OS_STREQ_NONNULL + # assert oopspecindex == effectinfo.EffectInfo.OS_STREQ_NONNULL class c: class adr: ptr = 1 @@ -1059,6 +1059,21 @@ assert op1.result == v2 assert op0.opname == '-live-' +def test_unicode_promote(): + PUNICODE = lltype.Ptr(rstr.UNICODE) + v1 = varoftype(PUNICODE) + v2 = varoftype(PUNICODE) + op = SpaceOperation('hint', + [v1, Constant({'promote_unicode': True}, lltype.Void)], + v2) + tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) + op0, op1, _ = tr.rewrite_operation(op) + assert op1.opname == 'str_guard_value' + assert op1.args[0] == v1 + assert op1.args[2] == 'calldescr' + assert op1.result == v2 + assert op0.opname == '-live-' + def test_double_promote_str(): PSTR = lltype.Ptr(rstr.STR) v1 = varoftype(PSTR) diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -60,6 +60,7 @@ * promote - promote the argument from a variable into a constant * promote_string - same, but promote string by *value* + * promote_unicode - same, but promote unicode string by *value* * access_directly - directly access a virtualizable, as a structure and don't treat it as a virtualizable * fresh_virtualizable - means that virtualizable was just allocated. @@ -79,6 +80,9 @@ def promote_string(x): return hint(x, promote_string=True) +def promote_unicode(x): + return hint(x, promote_unicode=True) + def dont_look_inside(func): """ Make sure the JIT does not trace inside decorated function (it becomes a call instead) From noreply at buildbot.pypy.org Thu Jul 3 23:06:38 2014 From: noreply at buildbot.pypy.org (wenzhuman) Date: Thu, 3 Jul 2014 23:06:38 +0200 (CEST) Subject: [pypy-commit] pypy gc_no_cleanup_nursery: simplify the malloc method logic Message-ID: <20140703210638.0C00D1D3493@cobra.cs.uni-duesseldorf.de> Author: wenzhuman Branch: gc_no_cleanup_nursery Changeset: r72337:6dd33f47d74f Date: 2014-07-03 12:48 +0000 http://bitbucket.org/pypy/pypy/changeset/6dd33f47d74f/ Log: simplify the malloc method logic diff --git a/rpython/memory/gc/base.py b/rpython/memory/gc/base.py --- a/rpython/memory/gc/base.py +++ b/rpython/memory/gc/base.py @@ -151,14 +151,14 @@ assert not needs_finalizer itemsize = self.varsize_item_sizes(typeid) offset_to_length = self.varsize_offset_to_length(typeid) - if not hasattr(self, 'malloc_varsize'): + if self.malloc_zero_filled: malloc_varsize = self.malloc_varsize_clear else: malloc_varsize = self.malloc_varsize ref = malloc_varsize(typeid, length, size, itemsize, offset_to_length) else: - if not hasattr(self, 'malloc_fixedsize'): + if self.malloc_zero_filled: malloc_fixedsize = self.malloc_fixedsize_clear else: malloc_fixedsize = self.malloc_fixedsize diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -273,7 +273,11 @@ s_gcref = SomePtr(llmemory.GCREF) gcdata = self.gcdata translator = self.translator - if hasattr(GCClass, 'malloc_fixedsize_clear'): + #use the GC flag to find which malloc method to use + #malloc_zero_filled == Ture -> malloc_fixedsize/varsize_clear + #malloc_zero_filled == Flase -> malloc_fixedsize/varsize + malloc_fixedsize_meth = None + if GCClass.malloc_zero_filled: malloc_fixedsize_clear_meth = GCClass.malloc_fixedsize_clear.im_func self.malloc_fixedsize_clear_ptr = getfn( malloc_fixedsize_clear_meth, @@ -283,8 +287,13 @@ annmodel.SomeBool(), annmodel.SomeBool()], s_gcref, inline = False) + self.malloc_fixedsize_ptr = self.malloc_fixedsize_clear_ptr + self.malloc_varsize_ptr = getfn( + GCClass.malloc_varsize_clear.im_func, + [s_gc, s_typeid16] + + [annmodel.SomeInteger(nonneg=True) for i in range(4)], s_gcref) - if hasattr(GCClass, 'malloc_fixedsize'): + else: malloc_fixedsize_meth = GCClass.malloc_fixedsize.im_func self.malloc_fixedsize_ptr = getfn( malloc_fixedsize_meth, @@ -294,19 +303,11 @@ annmodel.SomeBool(), annmodel.SomeBool()], s_gcref, inline = False) - else: - malloc_fixedsize_meth = None - self.malloc_fixedsize_ptr = self.malloc_fixedsize_clear_ptr - if hasattr(GCClass, 'malloc_varsize'): - self.malloc_varsize_ptr = getfn( + self.malloc_varsize_ptr = getfn( GCClass.malloc_varsize.im_func, [s_gc, s_typeid16] + [annmodel.SomeInteger(nonneg=True) for i in range(4)], s_gcref) - else: - self.malloc_varsize_ptr = getfn( - GCClass.malloc_varsize_clear.im_func, - [s_gc, s_typeid16] - + [annmodel.SomeInteger(nonneg=True) for i in range(4)], s_gcref) + self.collect_ptr = getfn(GCClass.collect.im_func, [s_gc, annmodel.SomeInteger()], annmodel.s_None) self.can_move_ptr = getfn(GCClass.can_move.im_func, From noreply at buildbot.pypy.org Thu Jul 3 23:06:39 2014 From: noreply at buildbot.pypy.org (wenzhuman) Date: Thu, 3 Jul 2014 23:06:39 +0200 (CEST) Subject: [pypy-commit] pypy gc_no_cleanup_nursery: passed the backend test Message-ID: <20140703210639.7398D1D3493@cobra.cs.uni-duesseldorf.de> Author: wenzhuman Branch: gc_no_cleanup_nursery Changeset: r72338:d3b83138e0d7 Date: 2014-07-03 17:03 +0000 http://bitbucket.org/pypy/pypy/changeset/d3b83138e0d7/ Log: passed the backend test diff --git a/rpython/jit/backend/llsupport/test/test_gc.py b/rpython/jit/backend/llsupport/test/test_gc.py --- a/rpython/jit/backend/llsupport/test/test_gc.py +++ b/rpython/jit/backend/llsupport/test/test_gc.py @@ -59,7 +59,7 @@ x += self.gcheaderbuilder.size_gc_header return x, tid - def do_malloc_fixedsize_clear(self, RESTYPE, type_id, size, + def do_malloc_fixedsize(self, RESTYPE, type_id, size, has_finalizer, has_light_finalizer, contains_weakptr): assert not contains_weakptr @@ -70,7 +70,7 @@ self.record.append(("fixedsize", repr(size), tid, p)) return p - def do_malloc_varsize_clear(self, RESTYPE, type_id, length, size, + def do_malloc_varsize(self, RESTYPE, type_id, length, size, itemsize, offset_to_length): p, tid = self._malloc(type_id, size + itemsize * length) (p + offset_to_length).signed[0] = length diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -279,7 +279,7 @@ malloc_fixedsize_meth = None if GCClass.malloc_zero_filled: malloc_fixedsize_clear_meth = GCClass.malloc_fixedsize_clear.im_func - self.malloc_fixedsize_clear_ptr = getfn( + self.malloc_fixedsize_ptr = getfn( malloc_fixedsize_clear_meth, [s_gc, s_typeid16, annmodel.SomeInteger(nonneg=True), @@ -287,7 +287,6 @@ annmodel.SomeBool(), annmodel.SomeBool()], s_gcref, inline = False) - self.malloc_fixedsize_ptr = self.malloc_fixedsize_clear_ptr self.malloc_varsize_ptr = getfn( GCClass.malloc_varsize_clear.im_func, [s_gc, s_typeid16] From noreply at buildbot.pypy.org Thu Jul 3 23:58:09 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Thu, 3 Jul 2014 23:58:09 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Implement "yield from" opcode Message-ID: <20140703215809.5853A1D293B@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r72339:02025d9b1f7d Date: 2014-06-22 19:08 +0200 http://bitbucket.org/pypy/pypy/changeset/02025d9b1f7d/ Log: Implement "yield from" opcode diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py --- a/pypy/interpreter/astcompiler/codegen.py +++ b/pypy/interpreter/astcompiler/codegen.py @@ -871,11 +871,12 @@ self.load_const(self.space.w_None) self.emit_op(ops.YIELD_VALUE) - def visit_YieldFrom(self, yie): - # XXX not correctly implemented. - self.update_position(yie.lineno) - yie.value.walkabout(self) - self.emit_op(ops.YIELD_VALUE) + def visit_YieldFrom(self, yfr): + self.update_position(yfr.lineno) + yfr.value.walkabout(self) + self.emit_op(ops.GET_ITER) + self.load_const(self.space.w_None) + self.emit_op(ops.YIELD_FROM) def visit_Num(self, num): self.update_position(num.lineno) diff --git a/pypy/interpreter/astcompiler/symtable.py b/pypy/interpreter/astcompiler/symtable.py --- a/pypy/interpreter/astcompiler/symtable.py +++ b/pypy/interpreter/astcompiler/symtable.py @@ -431,6 +431,10 @@ self.scope.note_yield(yie) ast.GenericASTVisitor.visit_Yield(self, yie) + def visit_YieldFrom(self, yfr): + self.scope.note_yield(yfr) + ast.GenericASTVisitor.visit_YieldFrom(self, yfr) + def visit_Global(self, glob): for name in glob.names: old_role = self.scope.lookup_role(name) diff --git a/pypy/interpreter/astcompiler/test/test_astbuilder.py b/pypy/interpreter/astcompiler/test/test_astbuilder.py --- a/pypy/interpreter/astcompiler/test/test_astbuilder.py +++ b/pypy/interpreter/astcompiler/test/test_astbuilder.py @@ -928,7 +928,6 @@ expr = self.get_first_expr("yield") assert isinstance(expr, ast.Yield) assert expr.value is None - assert expr.is_from == 0 expr = self.get_first_expr("yield x") assert isinstance(expr.value, ast.Name) assign = self.get_first_stmt("x = yield x") @@ -937,8 +936,8 @@ def test_yield_from(self): expr = self.get_first_expr("yield from x") + assert isinstance(expr, ast.YieldFrom) assert isinstance(expr.value, ast.Name) - assert expr.is_from == 1 def test_unaryop(self): unary_ops = ( diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -965,6 +965,15 @@ yield self.st, 'x = list(d for d in [1] or [])', 'x', [1] yield self.st, 'y = [d for d in [1] or []]', 'y', [1] + def test_yield_from(self): + test = """if 1: + def f(): + yield from range(3) + def g(): + return list(f()) + """ + yield self.st, test, "g()", range(3) + class AppTestCompiler: diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -401,6 +401,8 @@ self.WITH_CLEANUP(oparg, next_instr) elif opcode == opcodedesc.YIELD_VALUE.index: self.YIELD_VALUE(oparg, next_instr) + elif opcode == opcodedesc.YIELD_FROM.index: + self.YIELD_FROM(oparg, next_instr) else: self.MISSING_OPCODE(oparg, next_instr) @@ -1000,6 +1002,34 @@ def YIELD_VALUE(self, oparg, next_instr): raise Yield + def YIELD_FROM(self, oparg, next_instr): + space = self.space + w_value = self.popvalue() + w_gen = self.peekvalue() + try: + if space.is_none(w_value): + w_retval = space.next(w_gen) + else: + w_retval = space.call_method(w_gen, "send", w_value) + except OperationError as e: + if not e.match(self.space, self.space.w_StopIteration): + raise + self.popvalue() # Remove iter from stack + try: + w_value = space.getattr(e.get_w_value(space), space.wrap("value")) + except OperationError as e: + if not e.match(self.space, self.space.w_AttributeError): + raise + w_value = space.w_None + self.pushvalue(w_value) + return next_instr + else: + # iter remains on stack, w_retval is value to be yielded. + self.pushvalue(w_retval) + # and repeat... + self.last_instr = self.last_instr - 1 + raise Yield + def jump_absolute(self, jumpto, ec): # this function is overridden by pypy.module.pypyjit.interp_jit check_nonneg(jumpto) From noreply at buildbot.pypy.org Thu Jul 3 23:58:10 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Thu, 3 Jul 2014 23:58:10 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Make memoryview objects weakrefable Message-ID: <20140703215810.A72341D293B@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r72340:b82a4b6b39d3 Date: 2014-06-22 20:18 +0200 http://bitbucket.org/pypy/pypy/changeset/b82a4b6b39d3/ Log: Make memoryview objects weakrefable diff --git a/pypy/objspace/std/memoryobject.py b/pypy/objspace/std/memoryobject.py --- a/pypy/objspace/std/memoryobject.py +++ b/pypy/objspace/std/memoryobject.py @@ -7,7 +7,7 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app -from pypy.interpreter.typedef import TypeDef, GetSetProperty +from pypy.interpreter.typedef import TypeDef, GetSetProperty, make_weakref_descr def _buffer_setitem(space, buf, w_index, w_obj): @@ -173,6 +173,7 @@ __repr__ = interp2app(W_MemoryView.descr_repr), __enter__ = interp2app(W_MemoryView.descr_enter), __exit__ = interp2app(W_MemoryView.descr_exit), + __weakref__ = make_weakref_descr(W_MemoryView), tobytes = interp2app(W_MemoryView.descr_tobytes), tolist = interp2app(W_MemoryView.descr_tolist), release = interp2app(W_MemoryView.descr_release), diff --git a/pypy/objspace/std/test/test_memoryobject.py b/pypy/objspace/std/test/test_memoryobject.py --- a/pypy/objspace/std/test/test_memoryobject.py +++ b/pypy/objspace/std/test/test_memoryobject.py @@ -87,6 +87,11 @@ def test_hash(self): raises(TypeError, "hash(memoryview(b'hello'))") + def test_weakref(self): + import weakref + m = memoryview(b'hello') + weakref.ref(m) + def test_getitem_only_ints(self): class MyInt(object): def __init__(self, x): From noreply at buildbot.pypy.org Thu Jul 3 23:58:12 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Thu, 3 Jul 2014 23:58:12 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Memoryview objects are now hashable. Message-ID: <20140703215812.0EA731D293B@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r72341:b8bb27a1905f Date: 2014-06-22 20:34 +0200 http://bitbucket.org/pypy/pypy/changeset/b8bb27a1905f/ Log: Memoryview objects are now hashable. diff --git a/pypy/objspace/std/memoryobject.py b/pypy/objspace/std/memoryobject.py --- a/pypy/objspace/std/memoryobject.py +++ b/pypy/objspace/std/memoryobject.py @@ -4,6 +4,7 @@ import operator from rpython.rlib.buffer import Buffer, SubBuffer +from rpython.rlib.objectmodel import compute_hash from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app @@ -34,6 +35,7 @@ def __init__(self, buf): assert isinstance(buf, Buffer) self.buf = buf + self._hash = -1 def buffer_w(self, space, flags): self._check_released(space) @@ -142,6 +144,15 @@ else: return self.getrepr(space, u'memory') + def descr_hash(self, space): + if self._hash == -1: + self._check_released(space) + if not self.buf.readonly: + raise OperationError(space.w_ValueError, space.wrap( + "cannot hash writable memoryview object")) + self._hash = compute_hash(self.buf.as_str()) + return space.wrap(self._hash) + def descr_release(self, space): self.buf = None @@ -171,6 +182,7 @@ __ne__ = interp2app(W_MemoryView.descr_ne), __setitem__ = interp2app(W_MemoryView.descr_setitem), __repr__ = interp2app(W_MemoryView.descr_repr), + __hash__ = interp2app(W_MemoryView.descr_hash), __enter__ = interp2app(W_MemoryView.descr_enter), __exit__ = interp2app(W_MemoryView.descr_exit), __weakref__ = make_weakref_descr(W_MemoryView), diff --git a/pypy/objspace/std/test/test_memoryobject.py b/pypy/objspace/std/test/test_memoryobject.py --- a/pypy/objspace/std/test/test_memoryobject.py +++ b/pypy/objspace/std/test/test_memoryobject.py @@ -85,7 +85,7 @@ assert repr(memoryview(b'hello')).startswith(' Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r72342:4deadf46db89 Date: 2014-06-22 20:41 +0200 http://bitbucket.org/pypy/pypy/changeset/4deadf46db89/ Log: mmap.error is OSError diff --git a/pypy/module/mmap/__init__.py b/pypy/module/mmap/__init__.py --- a/pypy/module/mmap/__init__.py +++ b/pypy/module/mmap/__init__.py @@ -8,7 +8,7 @@ 'ACCESS_WRITE': 'space.wrap(interp_mmap.ACCESS_WRITE)', 'ACCESS_COPY' : 'space.wrap(interp_mmap.ACCESS_COPY)', 'mmap': 'interp_mmap.W_MMap', - 'error': 'space.fromcache(interp_mmap.Cache).w_error', + 'error': 'space.w_OSError', } appleveldefs = { diff --git a/pypy/module/mmap/interp_mmap.py b/pypy/module/mmap/interp_mmap.py --- a/pypy/module/mmap/interp_mmap.py +++ b/pypy/module/mmap/interp_mmap.py @@ -283,10 +283,6 @@ ACCESS_WRITE = rmmap.ACCESS_WRITE ACCESS_COPY = rmmap.ACCESS_COPY -class Cache: - def __init__(self, space): - self.w_error = space.new_exception_class("mmap.error", - space.w_EnvironmentError) def mmap_error(space, e): if isinstance(e, RValueError): @@ -296,8 +292,7 @@ return OperationError(space.w_TypeError, space.wrap(e.message)) elif isinstance(e, OSError): - w_error = space.fromcache(Cache).w_error - return wrap_oserror(space, e, w_exception_class=w_error) + return wrap_oserror(space, e) else: # bogus 'e'? return OperationError(space.w_SystemError, space.wrap('%s' % e)) diff --git a/pypy/module/mmap/test/test_mmap.py b/pypy/module/mmap/test/test_mmap.py --- a/pypy/module/mmap/test/test_mmap.py +++ b/pypy/module/mmap/test/test_mmap.py @@ -31,9 +31,7 @@ assert isinstance(mmap.PROT_READ, int) assert isinstance(mmap.PROT_WRITE, int) - assert 'mmap.error' in str(mmap.error) - assert mmap.error is not EnvironmentError - assert issubclass(mmap.error, EnvironmentError) + assert mmap.error is OSError def test_args(self): from mmap import mmap From noreply at buildbot.pypy.org Thu Jul 3 23:58:14 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Thu, 3 Jul 2014 23:58:14 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: BlockingIOError is now in the exceptions module. Message-ID: <20140703215814.AA51E1D293B@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r72343:0e4c3da62a4b Date: 2014-06-23 10:03 +0200 http://bitbucket.org/pypy/pypy/changeset/0e4c3da62a4b/ Log: BlockingIOError is now in the exceptions module. diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1807,6 +1807,7 @@ 'BaseException', 'BufferError', 'BytesWarning', + 'BlockingIOError', 'DeprecationWarning', 'EOFError', 'EnvironmentError', diff --git a/pypy/module/_io/__init__.py b/pypy/module/_io/__init__.py --- a/pypy/module/_io/__init__.py +++ b/pypy/module/_io/__init__.py @@ -7,7 +7,7 @@ interpleveldefs = { 'DEFAULT_BUFFER_SIZE': 'space.wrap(interp_iobase.DEFAULT_BUFFER_SIZE)', - 'BlockingIOError': 'interp_io.W_BlockingIOError', + 'BlockingIOError': 'space.w_BlockingIOError', 'UnsupportedOperation': 'space.fromcache(interp_io.Cache).w_unsupportedoperation', '_IOBase': 'interp_iobase.W_IOBase', diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -12,20 +12,18 @@ from pypy.module._io.interp_iobase import ( W_IOBase, DEFAULT_BUFFER_SIZE, convert_size, trap_eintr, check_readable_w, check_writable_w, check_seekable_w) -from pypy.module._io.interp_io import W_BlockingIOError from rpython.rlib import rthread STATE_ZERO, STATE_OK, STATE_DETACHED = range(3) def make_write_blocking_error(space, written): - w_type = space.gettypeobject(W_BlockingIOError.typedef) w_value = space.call_function( - w_type, + space.w_BlockingIOError, space.wrap(rposix.get_errno()), space.wrap("write could not complete without blocking"), space.wrap(written)) - return OperationError(w_type, w_value) + return OperationError(space.w_BlockingIOError, w_value) class TryLock(object): @@ -734,11 +732,8 @@ try: self._writer_flush_unlocked(space) except OperationError, e: - if not e.match(space, space.gettypeobject( - W_BlockingIOError.typedef)): + if not e.match(space, space.w_BlockingIOError): raise - w_exc = e.get_w_value(space) - assert isinstance(w_exc, W_BlockingIOError) if self.readable: self._reader_reset_buf() # Make some place by shifting the buffer diff --git a/pypy/module/_io/interp_io.py b/pypy/module/_io/interp_io.py --- a/pypy/module/_io/interp_io.py +++ b/pypy/module/_io/interp_io.py @@ -16,25 +16,6 @@ "io.UnsupportedOperation", space.newtuple([space.w_ValueError, space.w_IOError])) -class W_BlockingIOError(W_IOError): - def __init__(self, space): - W_IOError.__init__(self, space) - self.written = 0 - - @unwrap_spec(written=int) - def descr_init(self, space, w_errno, w_strerror, written=0): - W_IOError.descr_init(self, space, [w_errno, w_strerror]) - self.written = written - -W_BlockingIOError.typedef = TypeDef( - 'BlockingIOError', W_IOError.typedef, - __doc__ = ("Exception raised when I/O would block on a non-blocking " - "I/O stream"), - __new__ = generic_new_descr(W_BlockingIOError), - __init__ = interp2app(W_BlockingIOError.descr_init), - characters_written = interp_attrproperty('written', W_BlockingIOError), - ) - DEFAULT_BUFFER_SIZE = 8 * 1024 @unwrap_spec(mode=str, buffering=int, diff --git a/pypy/module/_io/test/test_io.py b/pypy/module/_io/test/test_io.py --- a/pypy/module/_io/test/test_io.py +++ b/pypy/module/_io/test/test_io.py @@ -56,7 +56,7 @@ import _io try: raise _io.BlockingIOError(42, "test blocking", 123) - except IOError as e: + except OSError as e: assert isinstance(e, _io.BlockingIOError) assert e.errno == 42 assert e.strerror == "test blocking" diff --git a/pypy/module/exceptions/interp_exceptions.py b/pypy/module/exceptions/interp_exceptions.py --- a/pypy/module/exceptions/interp_exceptions.py +++ b/pypy/module/exceptions/interp_exceptions.py @@ -73,9 +73,10 @@ """ from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.typedef import (TypeDef, GetSetProperty, descr_get_dict, - descr_set_dict, descr_del_dict) -from pypy.interpreter.gateway import interp2app +from pypy.interpreter.typedef import ( + TypeDef, GetSetProperty, interp_attrproperty, + descr_get_dict, descr_set_dict, descr_del_dict) +from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.error import OperationError, setup_context from pypy.interpreter.pytraceback import PyTraceback, check_traceback from rpython.rlib import rwin32 @@ -552,8 +553,45 @@ ) # Various OSError subclasses added in Python 3.3 -W_BlockingIOError = _new_exception( - "BlockingIOError", W_OSError, "I/O operation would block.") +class W_BlockingIOError(W_OSError): + "I/O operation would block." + + def __init__(self, space): + W_OSError.__init__(self, space) + self.written = -1 + + def descr_init(self, space, args_w): + W_OSError.descr_init(self, space, args_w) + # BlockingIOError's 3rd argument can be the number of + # characters written. + if len(args_w) >= 3: + try: + written = space.int_w(args_w[2]) + except OperationError: + pass + else: + self.written = written + + def descr_get_written(self, space): + if self.written == -1: + raise OperationError(space.w_AttributeError, + space.wrap("characters_written")) + return space.wrap(self.written) + + def descr_set_written(self, space, w_written): + self.written = space.int_w(w_written) + + +W_BlockingIOError.typedef = TypeDef( + 'BlockingIOError', W_OSError.typedef, + __doc__ = ("Exception raised when I/O would block on a non-blocking " + "I/O stream"), + __new__ = _new(W_BlockingIOError), + __init__ = interp2app(W_BlockingIOError.descr_init), + characters_written = GetSetProperty(W_BlockingIOError.descr_get_written, + W_BlockingIOError.descr_set_written), + ) + W_ConnectionError = _new_exception( "ConnectionError", W_OSError, "Connection error.") W_ChildProcessError = _new_exception( diff --git a/pypy/module/exceptions/test/test_exc.py b/pypy/module/exceptions/test/test_exc.py --- a/pypy/module/exceptions/test/test_exc.py +++ b/pypy/module/exceptions/test/test_exc.py @@ -292,3 +292,14 @@ assert ImportError("message", path="y").path == "y" raises(TypeError, ImportError, invalid="z") + def test_blockingioerror(self): + args = ("a", "b", "c", "d", "e") + for n in range(6): + e = BlockingIOError(*args[:n]) + raises(AttributeError, getattr, e, 'characters_written') + e = BlockingIOError("a", "b", 3) + assert e.characters_written == 3 + e.characters_written = 5 + assert e.characters_written == 5 + + From noreply at buildbot.pypy.org Thu Jul 3 23:58:16 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Thu, 3 Jul 2014 23:58:16 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: PEP3151: OSError is IOError is EnvironmentError is socket.error is select.error! Message-ID: <20140703215816.1540F1D293B@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r72344:38117d8ea60f Date: 2014-06-23 09:47 +0200 http://bitbucket.org/pypy/pypy/changeset/38117d8ea60f/ Log: PEP3151: OSError is IOError is EnvironmentError is socket.error is select.error! diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -565,8 +565,7 @@ class SocketAPI: def __init__(self, space): - self.w_error = space.new_exception_class( - "_socket.error", space.w_IOError) + self.w_error = space.w_OSError self.w_herror = space.new_exception_class( "_socket.herror", self.w_error) self.w_gaierror = space.new_exception_class( diff --git a/pypy/module/exceptions/interp_exceptions.py b/pypy/module/exceptions/interp_exceptions.py --- a/pypy/module/exceptions/interp_exceptions.py +++ b/pypy/module/exceptions/interp_exceptions.py @@ -33,11 +33,11 @@ +-- AssertionError +-- AttributeError +-- BufferError - +-- EnvironmentError - | +-- IOError - | +-- OSError - | +-- WindowsError (Windows) - | +-- VMSError (VMS) + +-- OSError + | = EnvironmentError + | = IOError + | = WindowsError (Windows) + | = VMSError (VMS) +-- EOFError +-- ImportError +-- LookupError @@ -439,8 +439,8 @@ W_Warning, """Base class for warnings about features which will be deprecated in the future.""") -class W_EnvironmentError(W_Exception): - """Base class for I/O related errors.""" +class W_OSError(W_Exception): + """OS system call failed.""" def __init__(self, space): self.w_errno = space.w_None @@ -484,21 +484,21 @@ )) return W_BaseException.descr_str(self, space) -W_EnvironmentError.typedef = TypeDef( - 'EnvironmentError', +W_OSError.typedef = TypeDef( + 'OSError', W_Exception.typedef, - __doc__ = W_EnvironmentError.__doc__, - __new__ = _new(W_EnvironmentError), - __reduce__ = interp2app(W_EnvironmentError.descr_reduce), - __init__ = interp2app(W_EnvironmentError.descr_init), - __str__ = interp2app(W_EnvironmentError.descr_str), - errno = readwrite_attrproperty_w('w_errno', W_EnvironmentError), - strerror = readwrite_attrproperty_w('w_strerror', W_EnvironmentError), - filename = readwrite_attrproperty_w('w_filename', W_EnvironmentError), + __doc__ = W_OSError.__doc__, + __new__ = _new(W_OSError), + __reduce__ = interp2app(W_OSError.descr_reduce), + __init__ = interp2app(W_OSError.descr_init), + __str__ = interp2app(W_OSError.descr_str), + errno = readwrite_attrproperty_w('w_errno', W_OSError), + strerror = readwrite_attrproperty_w('w_strerror', W_OSError), + filename = readwrite_attrproperty_w('w_filename', W_OSError), ) -W_OSError = _new_exception('OSError', W_EnvironmentError, - """OS system call failed.""") +W_EnvironmentError = W_OSError +W_IOError = W_OSError class W_WindowsError(W_OSError): """MS-Windows OS system call failed.""" @@ -643,9 +643,6 @@ W_NameError = _new_exception('NameError', W_Exception, """Name not found globally.""") -W_IOError = _new_exception('IOError', W_EnvironmentError, - """I/O operation failed.""") - class W_SyntaxError(W_Exception): """Invalid syntax.""" diff --git a/pypy/module/select/__init__.py b/pypy/module/select/__init__.py --- a/pypy/module/select/__init__.py +++ b/pypy/module/select/__init__.py @@ -11,7 +11,7 @@ interpleveldefs = { 'select': 'interp_select.select', - 'error' : 'space.fromcache(interp_select.Cache).w_error' + 'error' : 'space.w_OSError', } if os.name =='posix': diff --git a/pypy/module/select/interp_select.py b/pypy/module/select/interp_select.py --- a/pypy/module/select/interp_select.py +++ b/pypy/module/select/interp_select.py @@ -7,10 +7,6 @@ defaultevents = rpoll.POLLIN | rpoll.POLLOUT | rpoll.POLLPRI -class Cache: - def __init__(self, space): - self.w_error = space.new_exception_class("select.error") - def poll(space): """Returns a polling object, which supports registering and unregistering file descriptors, and then polling them for I/O events.""" @@ -63,9 +59,8 @@ try: retval = rpoll.poll(self.fddict, timeout) except rpoll.PollError, e: - w_errortype = space.fromcache(Cache).w_error message = e.get_msg() - raise OperationError(w_errortype, + raise OperationError(space.w_OSError, space.newtuple([space.wrap(e.errno), space.wrap(message)])) finally: @@ -125,8 +120,7 @@ if res < 0: errno = _c.geterrno() msg = _c.socket_strerror_str(errno) - w_errortype = space.fromcache(Cache).w_error - raise OperationError(w_errortype, space.newtuple([ + raise OperationError(space.w_OSError, space.newtuple([ space.wrap(errno), space.wrap(msg)])) resin_w = [] From noreply at buildbot.pypy.org Thu Jul 3 23:58:17 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Thu, 3 Jul 2014 23:58:17 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Fix some cpyext tests. Message-ID: <20140703215817.580741D293B@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r72345:a53f63e78743 Date: 2014-06-23 14:11 +0200 http://bitbucket.org/pypy/pypy/changeset/a53f63e78743/ Log: Fix some cpyext tests. diff --git a/pypy/module/_frozen_importlib/__init__.py b/pypy/module/_frozen_importlib/__init__.py --- a/pypy/module/_frozen_importlib/__init__.py +++ b/pypy/module/_frozen_importlib/__init__.py @@ -24,8 +24,9 @@ source = fp.read() pathname = "" code_w = ec.compiler.compile(source, pathname, 'exec', 0) - w_dict = space.newdict() - space.setitem(w_dict, space.wrap('__name__'), self.w_name) + space.setitem(self.w_dict, space.wrap('__name__'), self.w_name) + space.setitem(self.w_dict, space.wrap('__builtins__'), + space.wrap(space.builtin)) code_w.exec_code(space, self.w_dict, self.w_dict) def startup(self, space): diff --git a/pypy/module/cpyext/test/date.c b/pypy/module/cpyext/test/date.c --- a/pypy/module/cpyext/test/date.c +++ b/pypy/module/cpyext/test/date.c @@ -17,6 +17,8 @@ PyObject *module, *othermodule; module = PyModule_Create(&moduledef); othermodule = PyImport_ImportModule("apple.banana"); + if (!othermodule) + return NULL; Py_DECREF(othermodule); return module; } diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -217,10 +217,12 @@ class AppTestCpythonExtensionBase(LeakCheckingTest): def setup_class(cls): - cls.space.getbuiltinmodule("cpyext") - from pypy.module.imp.importing import importhook - importhook(cls.space, "os") # warm up reference counts - state = cls.space.fromcache(RefcountState) + space = cls.space + space.getbuiltinmodule("cpyext") + # 'import os' to warm up reference counts + w_import = space.builtin.getdictvalue(space, '__import__') + space.call_function(w_import, space.wrap("os")) + state = space.fromcache(RefcountState) state.non_heaptypes_w[:] = [] def setup_method(self, func): diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -79,10 +79,9 @@ source = fp.read() pathname = "" % modulename code_w = ec.compiler.compile(source, pathname, 'exec', 0) - w_dict = space.newdict() w_mod = add_module(space, space.wrap(modulename)) space.setitem(space.sys.get('modules'), w_mod.w_name, w_mod) - space.setitem(w_dict, space.wrap('__name__'), w_mod.w_name) + space.setitem(w_mod.w_dict, space.wrap('__name__'), w_mod.w_name) code_w.exec_code(space, w_mod.w_dict, w_mod.w_dict) assert check_sys_modules_w(space, modulename) return w_mod From noreply at buildbot.pypy.org Thu Jul 3 23:58:18 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Thu, 3 Jul 2014 23:58:18 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Another fix Message-ID: <20140703215818.946001D293B@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r72346:ca2e17af825d Date: 2014-06-23 17:48 +0200 http://bitbucket.org/pypy/pypy/changeset/ca2e17af825d/ Log: Another fix diff --git a/pypy/module/cpyext/test/test_thread.py b/pypy/module/cpyext/test/test_thread.py --- a/pypy/module/cpyext/test/test_thread.py +++ b/pypy/module/cpyext/test/test_thread.py @@ -18,7 +18,7 @@ results = [] def some_thread(): res = module.get_thread_ident() - results.append((res, threading._get_ident())) + results.append((res, threading.get_ident())) some_thread() assert results[0][0] == results[0][1] From noreply at buildbot.pypy.org Thu Jul 3 23:58:19 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Thu, 3 Jul 2014 23:58:19 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Apply fix for Cpython Issue14857 Message-ID: <20140703215819.D8D7B1D293B@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r72347:8f89bba8441c Date: 2014-07-03 23:23 +0200 http://bitbucket.org/pypy/pypy/changeset/8f89bba8441c/ Log: Apply fix for Cpython Issue14857 diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py --- a/pypy/interpreter/astcompiler/codegen.py +++ b/pypy/interpreter/astcompiler/codegen.py @@ -1354,10 +1354,10 @@ # compile the body proper self._handle_body(cls.body) # return the (empty) __class__ cell - scope = self.scope.lookup("@__class__") + scope = self.scope.lookup("__class__") if scope == symtable.SCOPE_CELL: # Return the cell where to store __class__ - self.emit_op_arg(ops.LOAD_CLOSURE, self.cell_vars["@__class__"]) + self.emit_op_arg(ops.LOAD_CLOSURE, self.cell_vars["__class__"]) else: # This happens when nobody references the cell self.load_const(self.space.w_None) diff --git a/pypy/interpreter/astcompiler/symtable.py b/pypy/interpreter/astcompiler/symtable.py --- a/pypy/interpreter/astcompiler/symtable.py +++ b/pypy/interpreter/astcompiler/symtable.py @@ -240,7 +240,7 @@ def note_symbol(self, identifier, role): # Special-case super: it counts as a use of __class__ if role == SYM_USED and identifier == 'super': - self.note_symbol('@__class__', SYM_USED) + self.note_symbol('__class__', SYM_USED) return Scope.note_symbol(self, identifier, role) def note_yield(self, yield_node): @@ -298,12 +298,12 @@ return misc.mangle(name, self.name) def _pass_special_names(self, local, new_bound): - assert '@__class__' in local - new_bound['@__class__'] = None + assert '__class__' in local + new_bound['__class__'] = None def _finalize_cells(self, free): for name, role in self.symbols.iteritems(): - if role == SCOPE_LOCAL and name in free and name == '@__class__': + if role == SCOPE_LOCAL and name in free and name == '__class__': self.symbols[name] = SCOPE_CELL del free[name] @@ -392,7 +392,7 @@ clsdef.kwargs.walkabout(self) self.visit_sequence(clsdef.decorator_list) self.push_scope(ClassScope(clsdef), clsdef) - self.note_symbol('@__class__', SYM_ASSIGNED) + self.note_symbol('__class__', SYM_ASSIGNED) self.note_symbol('__locals__', SYM_PARAM) self.visit_sequence(clsdef.body) self.pop_scope() diff --git a/pypy/module/__builtin__/descriptor.py b/pypy/module/__builtin__/descriptor.py --- a/pypy/module/__builtin__/descriptor.py +++ b/pypy/module/__builtin__/descriptor.py @@ -67,7 +67,7 @@ "super(): arg[0] deleted")) index = 0 for name in code.co_freevars: - if name == "@__class__": + if name == "__class__": break index += 1 else: diff --git a/pypy/module/__builtin__/test/test_descriptor.py b/pypy/module/__builtin__/test/test_descriptor.py --- a/pypy/module/__builtin__/test/test_descriptor.py +++ b/pypy/module/__builtin__/test/test_descriptor.py @@ -400,3 +400,21 @@ assert x.y == 42 del x.x assert x.z == 42 + + def test___class___variable(self): + class X: + def f(self): + return __class__ + assert X().f() is X + + class X: + @classmethod + def f(cls): + return __class__ + assert X.f() is X + + class X: + @staticmethod + def f(): + return __class__ + assert X.f() is X From noreply at buildbot.pypy.org Thu Jul 3 23:58:21 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Thu, 3 Jul 2014 23:58:21 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Apply fix for Cpython Issue15839 Message-ID: <20140703215821.3C8CF1D293B@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r72348:7562de248abb Date: 2014-07-03 23:35 +0200 http://bitbucket.org/pypy/pypy/changeset/7562de248abb/ Log: Apply fix for Cpython Issue15839 diff --git a/pypy/module/__builtin__/descriptor.py b/pypy/module/__builtin__/descriptor.py --- a/pypy/module/__builtin__/descriptor.py +++ b/pypy/module/__builtin__/descriptor.py @@ -56,14 +56,14 @@ frame = ec.gettopframe() code = frame.pycode if not code: - raise OperationError(space.w_SystemError, space.wrap( + raise OperationError(space.w_RuntimeError, space.wrap( "super(): no code object")) if code.co_argcount == 0: - raise OperationError(space.w_SystemError, space.wrap( + raise OperationError(space.w_RuntimeError, space.wrap( "super(): no arguments")) w_obj = frame.locals_stack_w[0] if not w_obj: - raise OperationError(space.w_SystemError, space.wrap( + raise OperationError(space.w_RuntimeError, space.wrap( "super(): arg[0] deleted")) index = 0 for name in code.co_freevars: @@ -71,11 +71,15 @@ break index += 1 else: - raise OperationError(space.w_SystemError, space.wrap( + raise OperationError(space.w_RuntimeError, space.wrap( "super(): __class__ cell not found")) # a kind of LOAD_DEREF cell = frame.cells[len(code.co_cellvars) + index] - w_starttype = cell.get() + try: + w_starttype = cell.get() + except ValueError: + raise OperationError(space.w_RuntimeError, space.wrap( + "super(): empty __class__ cell")) w_obj_or_type = w_obj if space.is_none(w_obj_or_type): diff --git a/pypy/module/__builtin__/test/test_descriptor.py b/pypy/module/__builtin__/test/test_descriptor.py --- a/pypy/module/__builtin__/test/test_descriptor.py +++ b/pypy/module/__builtin__/test/test_descriptor.py @@ -418,3 +418,20 @@ def f(): return __class__ assert X.f() is X + + def test_obscure_super_errors(self): + """ + def f(): + super() + raises(RuntimeError, f) + def f(x): + del x + super() + raises(RuntimeError, f, None) + class X: + def f(x): + nonlocal __class__ + del __class__ + super() + raises(RuntimeError, X().f) + """ From noreply at buildbot.pypy.org Thu Jul 3 23:58:22 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Thu, 3 Jul 2014 23:58:22 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Fix for cpython Issue #17983 Message-ID: <20140703215822.7DDE71D293B@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r72349:37095df3c8ad Date: 2014-07-03 23:51 +0200 http://bitbucket.org/pypy/pypy/changeset/37095df3c8ad/ Log: Fix for cpython Issue #17983 diff --git a/pypy/interpreter/astcompiler/symtable.py b/pypy/interpreter/astcompiler/symtable.py --- a/pypy/interpreter/astcompiler/symtable.py +++ b/pypy/interpreter/astcompiler/symtable.py @@ -437,6 +437,9 @@ def visit_Global(self, glob): for name in glob.names: + if isinstance(self.scope, ClassScope) and name == '__class__': + raise SyntaxError("cannot make __class__ global", + glob.lineno, glob.col_offset) old_role = self.scope.lookup_role(name) if old_role & (SYM_USED | SYM_ASSIGNED): if old_role & SYM_ASSIGNED: diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -974,6 +974,15 @@ """ yield self.st, test, "g()", range(3) + def test__class__global(self): + source = """if 1: + class X: + global __class__ + def f(self): + super() + """ + py.test.raises(SyntaxError, self.simple_test, source, None, None) + class AppTestCompiler: From noreply at buildbot.pypy.org Thu Jul 3 23:58:23 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Thu, 3 Jul 2014 23:58:23 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Python3.3 slightly changed the error message Message-ID: <20140703215823.AF4181D293B@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r72350:b0c62d972ccf Date: 2014-07-03 23:54 +0200 http://bitbucket.org/pypy/pypy/changeset/b0c62d972ccf/ Log: Python3.3 slightly changed the error message diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -347,7 +347,7 @@ A().m() except ImportError as e: msg = str(e) - ''', "msg", "No module named __foo__") + ''', "msg", "No module named '__foo__'") def test_if_stmts(self): yield self.st, "a = 42\nif a > 10: a += 2", "a", 44 From noreply at buildbot.pypy.org Fri Jul 4 00:13:02 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 4 Jul 2014 00:13:02 +0200 (CEST) Subject: [pypy-commit] pypy default: Copy the checkin message as comment to the newly introduced function. Message-ID: <20140703221302.CD3681D2D6D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72351:2d41724c01ad Date: 2014-07-04 00:12 +0200 http://bitbucket.org/pypy/pypy/changeset/2d41724c01ad/ Log: Copy the checkin message as comment to the newly introduced function. diff --git a/lib-python/2.7/timeit.py b/lib-python/2.7/timeit.py --- a/lib-python/2.7/timeit.py +++ b/lib-python/2.7/timeit.py @@ -131,6 +131,14 @@ raise ValueError("setup is neither a string nor callable") self.src = src # Save for traceback display def make_inner(): + # PyPy tweak: recompile the source code each time before + # calling inner(). There are situations like Issue #1776 + # where PyPy tries to reuse the JIT code from before, + # but that's not going to work: the first thing the + # function does is the "-s" statement, which may declare + # new classes (here a namedtuple). We end up with + # bridges from the inner loop; more and more of them + # every time we call inner(). code = compile(src, dummy_src_name, "exec") exec code in globals(), ns return ns["inner"] From noreply at buildbot.pypy.org Fri Jul 4 10:04:53 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 4 Jul 2014 10:04:53 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Use a regular lock as the fall-back "atomic" object here, as it also Message-ID: <20140704080453.237CB1C024A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r72352:617dab8c0da1 Date: 2014-07-04 10:04 +0200 http://bitbucket.org/pypy/pypy/changeset/617dab8c0da1/ Log: Use a regular lock as the fall-back "atomic" object here, as it also supports "with" directly. diff --git a/lib_pypy/transaction.py b/lib_pypy/transaction.py --- a/lib_pypy/transaction.py +++ b/lib_pypy/transaction.py @@ -17,17 +17,11 @@ try: from __pypy__.thread import atomic except ImportError: - # Not a STM-enabled PyPy. We can still provide a version of 'atomic' - # that is good enough for our purposes. With this limited version, + # Not a STM-enabled PyPy. We can use a regular lock for 'atomic', + # which is good enough for our purposes. With this limited version, # an atomic block in thread X will not prevent running thread Y, if # thread Y is not within an atomic block at all. - _atomic_global_lock = thread.allocate_lock() - class _Atomic(object): - def __enter__(self): - _atomic_global_lock.acquire() - def __exit__(self, *args): - _atomic_global_lock.release() - atomic = _Atomic() + atomic = thread.allocate_lock() try: from __pypy__.thread import signals_enabled From noreply at buildbot.pypy.org Fri Jul 4 13:58:58 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 4 Jul 2014 13:58:58 +0200 (CEST) Subject: [pypy-commit] pypy default: Add a warning to the tproxy docs Message-ID: <20140704115858.1F5DA1D3545@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72353:261cce2fec2d Date: 2014-07-04 13:58 +0200 http://bitbucket.org/pypy/pypy/changeset/261cce2fec2d/ Log: Add a warning to the tproxy docs diff --git a/pypy/doc/objspace-proxies.rst b/pypy/doc/objspace-proxies.rst --- a/pypy/doc/objspace-proxies.rst +++ b/pypy/doc/objspace-proxies.rst @@ -26,6 +26,16 @@ Transparent Proxies ================================ +.. warning:: + + This is a feature that was tried experimentally long ago, and we + found no really good use cases. The basic functionality is still + there, but we don't recommend using it. Some of the examples below + might not work any more (e.g. you can't tproxy a list object any + more). The rest can be done by hacking in standard Python. If + anyone is interested in working on tproxy again, he is welcome, but + we don't regard this as an interesting extension. + PyPy's Transparent Proxies allow routing of operations on objects to a callable. Application level code can customize objects without interfering with the type system - ``type(proxied_list) is list`` holds true From noreply at buildbot.pypy.org Fri Jul 4 14:21:06 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 4 Jul 2014 14:21:06 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Cancel 3e144ed1d5b7: it makes translate.py take 25% longer... Message-ID: <20140704122106.F20A51D35D7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r72354:3676ac825f07 Date: 2014-07-04 14:20 +0200 http://bitbucket.org/pypy/pypy/changeset/3676ac825f07/ Log: Cancel 3e144ed1d5b7: it makes translate.py take 25% longer... diff --git a/rpython/translator/stm/src_stm/stmgcintf.c b/rpython/translator/stm/src_stm/stmgcintf.c --- a/rpython/translator/stm/src_stm/stmgcintf.c +++ b/rpython/translator/stm/src_stm/stmgcintf.c @@ -42,8 +42,7 @@ if (((long)pypy_stm_nursery_low_fill_mark_saved) > 0) { pypy_stm_nursery_low_fill_mark_saved = 0; } - } else { - /* if (((long)pypy_stm_nursery_low_fill_mark) > 0) */ + } else if (((long)pypy_stm_nursery_low_fill_mark) > 0) { /* if not set to unlimited by pypy_stm_setup() (s.b.) */ pypy_stm_nursery_low_fill_mark = 0; } From noreply at buildbot.pypy.org Fri Jul 4 14:23:48 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 4 Jul 2014 14:23:48 +0200 (CEST) Subject: [pypy-commit] pypy default: Improve the speed of some non-jitted parts of the code, by disabling the assert() there when Message-ID: <20140704122348.073E91D35E1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72355:7c2871e9cb26 Date: 2014-07-04 14:21 +0200 http://bitbucket.org/pypy/pypy/changeset/7c2871e9cb26/ Log: Improve the speed of some non-jitted parts of the code, by disabling the assert() there when compiled in non-debug mode. diff --git a/rpython/translator/tool/cbuild.py b/rpython/translator/tool/cbuild.py --- a/rpython/translator/tool/cbuild.py +++ b/rpython/translator/tool/cbuild.py @@ -361,4 +361,8 @@ typedef unsigned long Unsigned; # define SIGNED_MIN LONG_MIN #endif + +#if !defined(RPY_ASSERT) && !defined(RPY_LL_ASSERT) +# define NDEBUG +#endif ''' From noreply at buildbot.pypy.org Fri Jul 4 14:23:49 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 4 Jul 2014 14:23:49 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Improve the speed of some non-jitted parts of the code, by disabling the assert() there when Message-ID: <20140704122349.8F04F1D35E1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r72356:4f5e243c027d Date: 2014-07-04 14:21 +0200 http://bitbucket.org/pypy/pypy/changeset/4f5e243c027d/ Log: Improve the speed of some non-jitted parts of the code, by disabling the assert() there when compiled in non-debug mode. diff --git a/rpython/translator/tool/cbuild.py b/rpython/translator/tool/cbuild.py --- a/rpython/translator/tool/cbuild.py +++ b/rpython/translator/tool/cbuild.py @@ -366,4 +366,8 @@ #else typedef unsigned char bool_t; #endif + +#if !defined(RPY_ASSERT) && !defined(RPY_LL_ASSERT) +# define NDEBUG +#endif ''' From noreply at buildbot.pypy.org Fri Jul 4 14:23:50 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 4 Jul 2014 14:23:50 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20140704122350.DB14C1D35E1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72357:b668c9626bce Date: 2014-07-04 14:23 +0200 http://bitbucket.org/pypy/pypy/changeset/b668c9626bce/ Log: merge heads diff --git a/pypy/doc/objspace-proxies.rst b/pypy/doc/objspace-proxies.rst --- a/pypy/doc/objspace-proxies.rst +++ b/pypy/doc/objspace-proxies.rst @@ -26,6 +26,16 @@ Transparent Proxies ================================ +.. warning:: + + This is a feature that was tried experimentally long ago, and we + found no really good use cases. The basic functionality is still + there, but we don't recommend using it. Some of the examples below + might not work any more (e.g. you can't tproxy a list object any + more). The rest can be done by hacking in standard Python. If + anyone is interested in working on tproxy again, he is welcome, but + we don't regard this as an interesting extension. + PyPy's Transparent Proxies allow routing of operations on objects to a callable. Application level code can customize objects without interfering with the type system - ``type(proxied_list) is list`` holds true From noreply at buildbot.pypy.org Fri Jul 4 16:40:03 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 4 Jul 2014 16:40:03 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix a big leak in our '_ssl' module. Argh. Thanks bob_grigoryan on irc. Message-ID: <20140704144003.1BA6B1C024A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72358:0726938dad41 Date: 2014-07-04 16:39 +0200 http://bitbucket.org/pypy/pypy/changeset/0726938dad41/ Log: Fix a big leak in our '_ssl' module. Argh. Thanks bob_grigoryan on irc. diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -233,40 +233,40 @@ return self.space.wrap('') raise ssl_error(self.space, "Socket closed without SSL shutdown handshake") - raw_buf, gc_buf = rffi.alloc_buffer(num_bytes) - while True: - err = 0 + with rffi.scoped_alloc_buffer(num_bytes) as buf: + while True: + err = 0 - count = libssl_SSL_read(self.ssl, raw_buf, num_bytes) - err = libssl_SSL_get_error(self.ssl, count) + count = libssl_SSL_read(self.ssl, buf.raw, num_bytes) + err = libssl_SSL_get_error(self.ssl, count) - if err == SSL_ERROR_WANT_READ: - sockstate = check_socket_and_wait_for_timeout(self.space, - self.w_socket, False) - elif err == SSL_ERROR_WANT_WRITE: - sockstate = check_socket_and_wait_for_timeout(self.space, - self.w_socket, True) - elif (err == SSL_ERROR_ZERO_RETURN and - libssl_SSL_get_shutdown(self.ssl) == SSL_RECEIVED_SHUTDOWN): - return self.space.wrap("") - else: - sockstate = SOCKET_OPERATION_OK + if err == SSL_ERROR_WANT_READ: + sockstate = check_socket_and_wait_for_timeout(self.space, + self.w_socket, False) + elif err == SSL_ERROR_WANT_WRITE: + sockstate = check_socket_and_wait_for_timeout(self.space, + self.w_socket, True) + elif (err == SSL_ERROR_ZERO_RETURN and + libssl_SSL_get_shutdown(self.ssl) == SSL_RECEIVED_SHUTDOWN): + return self.space.wrap("") + else: + sockstate = SOCKET_OPERATION_OK - if sockstate == SOCKET_HAS_TIMED_OUT: - raise ssl_error(self.space, "The read operation timed out") - elif sockstate == SOCKET_IS_NONBLOCKING: - break + if sockstate == SOCKET_HAS_TIMED_OUT: + raise ssl_error(self.space, "The read operation timed out") + elif sockstate == SOCKET_IS_NONBLOCKING: + break - if err == SSL_ERROR_WANT_READ or err == SSL_ERROR_WANT_WRITE: - continue - else: - break + if err == SSL_ERROR_WANT_READ or err == SSL_ERROR_WANT_WRITE: + continue + else: + break - if count <= 0: - raise _ssl_seterror(self.space, self, count) + if count <= 0: + raise _ssl_seterror(self.space, self, count) - result = rffi.str_from_buffer(raw_buf, gc_buf, num_bytes, count) - rffi.keep_buffer_alive_until_here(raw_buf, gc_buf) + result = buf.str(count) + return self.space.wrap(result) def _refresh_nonblocking(self, space): From noreply at buildbot.pypy.org Fri Jul 4 16:45:12 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 4 Jul 2014 16:45:12 +0200 (CEST) Subject: [pypy-commit] pypy default: Replace more usages of rffi.alloc_buffer() with scoped_alloc_buffer(). Message-ID: <20140704144512.631A81C0EEA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72359:8bc897775808 Date: 2014-07-04 16:44 +0200 http://bitbucket.org/pypy/pypy/changeset/8bc897775808/ Log: Replace more usages of rffi.alloc_buffer() with scoped_alloc_buffer(). diff --git a/pypy/module/cpyext/test/test_unicodeobject.py b/pypy/module/cpyext/test/test_unicodeobject.py --- a/pypy/module/cpyext/test/test_unicodeobject.py +++ b/pypy/module/cpyext/test/test_unicodeobject.py @@ -442,8 +442,8 @@ def test_copy(self, space, api): w_x = space.wrap(u"abcd\u0660") - target_chunk, _ = rffi.alloc_unicodebuffer(space.int_w(space.len(w_x))) - #lltype.malloc(Py_UNICODE, space.int_w(space.len(w_x)), flavor='raw') + count1 = space.int_w(space.len(w_x)) + target_chunk = lltype.malloc(rffi.CWCHARP.TO, count1, flavor='raw') x_chunk = api.PyUnicode_AS_UNICODE(w_x) api.Py_UNICODE_COPY(target_chunk, x_chunk, 4) diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -188,16 +188,13 @@ finally: lltype.free(buf, flavor='raw') else: - raw_buf, gc_buf = rffi.alloc_buffer(size) - try: - returned_size = c_fread(raw_buf, 1, size, ll_file) + with rffi.scoped_alloc_buffer(size) as buf: + returned_size = c_fread(buf.raw, 1, size, ll_file) returned_size = intmask(returned_size) # is between 0 and size if returned_size == 0: if not c_feof(ll_file): raise _error(ll_file) - s = rffi.str_from_buffer(raw_buf, gc_buf, size, returned_size) - finally: - rffi.keep_buffer_alive_until_here(raw_buf, gc_buf) + s = buf.str(returned_size) return s def seek(self, pos, whence=0): @@ -270,25 +267,21 @@ def readline(self): if self.ll_file: - raw_buf, gc_buf = rffi.alloc_buffer(BASE_LINE_SIZE) - try: - c = self._readline1(raw_buf) + with rffi.scoped_alloc_buffer(BASE_LINE_SIZE) as buf: + c = self._readline1(buf.raw) if c >= 0: - return rffi.str_from_buffer(raw_buf, gc_buf, - BASE_LINE_SIZE, c) + return buf.str(c) # # this is the rare case: the line is longer than BASE_LINE_SIZE s = StringBuilder() while True: - s.append_charpsize(raw_buf, BASE_LINE_SIZE - 1) - c = self._readline1(raw_buf) + s.append_charpsize(buf.raw, BASE_LINE_SIZE - 1) + c = self._readline1(buf.raw) if c >= 0: break # - s.append_charpsize(raw_buf, c) + s.append_charpsize(buf.raw, c) return s.build() - finally: - rffi.keep_buffer_alive_until_here(raw_buf, gc_buf) raise ValueError("I/O operation on closed file") diff --git a/rpython/rlib/rsocket.py b/rpython/rlib/rsocket.py --- a/rpython/rlib/rsocket.py +++ b/rpython/rlib/rsocket.py @@ -827,15 +827,12 @@ if timeout == 1: raise SocketTimeout elif timeout == 0: - raw_buf, gc_buf = rffi.alloc_buffer(buffersize) - try: + with rffi.scoped_alloc_buffer(buffersize) as buf: read_bytes = _c.socketrecv(self.fd, - rffi.cast(rffi.VOIDP, raw_buf), + rffi.cast(rffi.VOIDP, buf.raw), buffersize, flags) if read_bytes >= 0: - return rffi.str_from_buffer(raw_buf, gc_buf, buffersize, read_bytes) - finally: - rffi.keep_buffer_alive_until_here(raw_buf, gc_buf) + return buf.str(read_bytes) raise self.error_handler() def recvinto(self, rwbuffer, nbytes, flags=0): @@ -852,11 +849,10 @@ if timeout == 1: raise SocketTimeout elif timeout == 0: - raw_buf, gc_buf = rffi.alloc_buffer(buffersize) - try: + with rffi.scoped_alloc_buffer(buffersize) as buf: address, addr_p, addrlen_p = self._addrbuf() try: - read_bytes = _c.recvfrom(self.fd, raw_buf, buffersize, flags, + read_bytes = _c.recvfrom(self.fd, buf.raw, buffersize, flags, addr_p, addrlen_p) addrlen = rffi.cast(lltype.Signed, addrlen_p[0]) finally: @@ -867,10 +863,8 @@ address.addrlen = addrlen else: address = None - data = rffi.str_from_buffer(raw_buf, gc_buf, buffersize, read_bytes) + data = buf.str(read_bytes) return (data, address) - finally: - rffi.keep_buffer_alive_until_here(raw_buf, gc_buf) raise self.error_handler() def recvfrom_into(self, rwbuffer, nbytes, flags=0): diff --git a/rpython/rtyper/module/ll_os.py b/rpython/rtyper/module/ll_os.py --- a/rpython/rtyper/module/ll_os.py +++ b/rpython/rtyper/module/ll_os.py @@ -1006,15 +1006,12 @@ if count < 0: raise OSError(errno.EINVAL, None) rposix.validate_fd(fd) - raw_buf, gc_buf = rffi.alloc_buffer(count) - try: - void_buf = rffi.cast(rffi.VOIDP, raw_buf) + with rffi.scoped_alloc_buffer(count) as buf: + void_buf = rffi.cast(rffi.VOIDP, buf.raw) got = rffi.cast(lltype.Signed, os_read(fd, void_buf, count)) if got < 0: raise OSError(rposix.get_errno(), "os_read failed") - return rffi.str_from_buffer(raw_buf, gc_buf, count, got) - finally: - rffi.keep_buffer_alive_until_here(raw_buf, gc_buf) + return buf.str(got) return extdef([int, int], SomeString(can_be_None=True), "ll_os.ll_os_read", llimpl=os_read_llimpl) From noreply at buildbot.pypy.org Fri Jul 4 18:22:09 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 4 Jul 2014 18:22:09 +0200 (CEST) Subject: [pypy-commit] benchmarks default: Add a mini benchmark. Message-ID: <20140704162209.37A8E1C1347@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r264:620a3e907582 Date: 2014-07-04 18:21 +0200 http://bitbucket.org/pypy/benchmarks/changeset/620a3e907582/ Log: Add a mini benchmark. diff --git a/multithread/minibench1.py b/multithread/minibench1.py new file mode 100644 --- /dev/null +++ b/multithread/minibench1.py @@ -0,0 +1,28 @@ +import thread, sys + +def f(n, lock): + total = 0 + lst1 = ["foo"] + for i in xrange(n): + lst1.append(i) + total += lst1.pop() + sys.stdout.write('%d\n' % total) + lock.release() + + +T = 4 # number of threads +N = 100000000 # number of iterations in each thread +if len(sys.argv) >= 2: + T = int(sys.argv[1]) + if len(sys.argv) >= 3: + N = int(sys.argv[2]) + +locks = [] +for i in range(T): + lock = thread.allocate_lock() + lock.acquire() + locks.append(lock) + thread.start_new_thread(f, (N, lock)) + +for lock in locks: + lock.acquire() From noreply at buildbot.pypy.org Sat Jul 5 10:48:43 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 5 Jul 2014 10:48:43 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: Mention that the Ubuntu 12.04 binary works well on 14.04 too Message-ID: <20140705084843.1E52A1C3288@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r515:8e87d836c8d9 Date: 2014-07-05 10:48 +0200 http://bitbucket.org/pypy/pypy.org/changeset/8e87d836c8d9/ Log: Mention that the Ubuntu 12.04 binary works well on 14.04 too diff --git a/download.html b/download.html --- a/download.html +++ b/download.html @@ -90,7 +90,7 @@ portable Linux binaries.

    • Linux x86 binary (32bit, tar.bz2 built on Ubuntu 10.04.4 LTS) (see [1] below)
    • -
    • Linux x86 binary (64bit, tar.bz2 built on Ubuntu 12.04.2 LTS) (see [1] below)
    • +
    • Linux x86-64 binary (64bit, tar.bz2 built on Ubuntu 12.04 - 14.04) (see [1] below)
    • ARM Hardfloat Linux binary (ARMHF/gnueabihf, tar.bz2, Raspbian) (see [1] below)
    • ARM Hardfloat Linux binary (ARMHF/gnueabihf, tar.bz2, Ubuntu Raring) (see [1] below)
    • ARM Softfloat Linux binary (ARMEL/gnueabi, tar.bz2, Ubuntu Precise) (see [1] below)
    • @@ -111,8 +111,8 @@ them unless you're ready to hack your system by adding symlinks to the libraries it tries to open.

        -
      • Linux binary (32bit, tar.bz2 built on Ubuntu 10.04.4 LTS) (see [1] below)
      • -
      • Linux binary (64bit, tar.bz2 built on Ubuntu 12.04.2 LTS) (see [1] below)
      • +
      • Linux x86 binary (32bit, tar.bz2 built on Ubuntu 10.04.4 LTS) (see [1] below)
      • +
      • Linux x86-64 binary (64bit, tar.bz2 built on Ubuntu 12.04 - 14.04) (see [1] below)
      • ARM Hardfloat Linux binary (ARMHF/gnueabihf, tar.bz2, Raspbian) (see [1] below)
      • ARM Hardfloat Linux binary (ARMHF/gnueabihf, tar.bz2, Ubuntu Raring) (see [1] below)
      • ARM Softfloat Linux binary (ARMEL/gnueabi, tar.bz2, Ubuntu Precise) (see [1] below)
      • diff --git a/source/download.txt b/source/download.txt --- a/source/download.txt +++ b/source/download.txt @@ -73,7 +73,7 @@ .. _`portable Linux binaries`: https://github.com/squeaky-pl/portable-pypy * `Linux x86 binary (32bit, tar.bz2 built on Ubuntu 10.04.4 LTS)`__ (see ``[1]`` below) -* `Linux x86 binary (64bit, tar.bz2 built on Ubuntu 12.04.2 LTS)`__ (see ``[1]`` below) +* `Linux x86-64 binary (64bit, tar.bz2 built on Ubuntu 12.04 - 14.04)`__ (see ``[1]`` below) * `ARM Hardfloat Linux binary (ARMHF/gnueabihf, tar.bz2, Raspbian)`__ (see ``[1]`` below) * `ARM Hardfloat Linux binary (ARMHF/gnueabihf, tar.bz2, Ubuntu Raring)`__ (see ``[1]`` below) * `ARM Softfloat Linux binary (ARMEL/gnueabi, tar.bz2, Ubuntu Precise)`__ (see ``[1]`` below) @@ -107,8 +107,8 @@ them** unless you're ready to hack your system by adding symlinks to the libraries it tries to open. -* `Linux binary (32bit, tar.bz2 built on Ubuntu 10.04.4 LTS)`__ (see ``[1]`` below) -* `Linux binary (64bit, tar.bz2 built on Ubuntu 12.04.2 LTS)`__ (see ``[1]`` below) +* `Linux x86 binary (32bit, tar.bz2 built on Ubuntu 10.04.4 LTS)`__ (see ``[1]`` below) +* `Linux x86-64 binary (64bit, tar.bz2 built on Ubuntu 12.04 - 14.04)`__ (see ``[1]`` below) * `ARM Hardfloat Linux binary (ARMHF/gnueabihf, tar.bz2, Raspbian)`__ (see ``[1]`` below) * `ARM Hardfloat Linux binary (ARMHF/gnueabihf, tar.bz2, Ubuntu Raring)`__ (see ``[1]`` below) * `ARM Softfloat Linux binary (ARMEL/gnueabi, tar.bz2, Ubuntu Precise)`__ (see ``[1]`` below) From noreply at buildbot.pypy.org Sat Jul 5 15:56:43 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 5 Jul 2014 15:56:43 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Print "PyPy-STM" instead of "PyPy" in the banner, to distinguish the two Message-ID: <20140705135643.6ED3A1C3334@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r72360:b2f768401d32 Date: 2014-07-05 15:56 +0200 http://bitbucket.org/pypy/pypy/changeset/b2f768401d32/ Log: Print "PyPy-STM" instead of "PyPy" in the banner, to distinguish the two more easily. diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -61,13 +61,17 @@ ver = "%d.%d.%d" % (PYPY_VERSION[0], PYPY_VERSION[1], PYPY_VERSION[2]) if PYPY_VERSION[3] != "final": ver = ver + "-%s%d" %(PYPY_VERSION[3], PYPY_VERSION[4]) - return space.wrap("%d.%d.%d (%s, %s, %s)\n[PyPy %s%s]" % ( + extra = '' + if space.config.translation.stm: + extra = '-STM' + return space.wrap("%d.%d.%d (%s, %s, %s)\n[PyPy%s %s%s]" % ( CPYTHON_VERSION[0], CPYTHON_VERSION[1], CPYTHON_VERSION[2], get_repo_version_info(root=pypyroot)[1], date, time, + extra, ver, compiler_version())) From noreply at buildbot.pypy.org Sat Jul 5 17:05:35 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 5 Jul 2014 17:05:35 +0200 (CEST) Subject: [pypy-commit] cffi default: prepare for 0.8.3 Message-ID: <20140705150535.775401C34C8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1534:057515cfe543 Date: 2014-07-05 16:47 +0200 http://bitbucket.org/cffi/cffi/changeset/057515cfe543/ Log: prepare for 0.8.3 diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -5504,7 +5504,7 @@ if (v == NULL || PyModule_AddObject(m, "_C_API", v) < 0) INITERROR; - v = PyText_FromString("0.8.2"); + v = PyText_FromString("0.8.3"); if (v == NULL || PyModule_AddObject(m, "__version__", v) < 0) INITERROR; diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -3199,4 +3199,4 @@ def test_version(): # this test is here mostly for PyPy - assert __version__ == "0.8.2" + assert __version__ == "0.8.3" diff --git a/cffi/__init__.py b/cffi/__init__.py --- a/cffi/__init__.py +++ b/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.8.2" -__version_info__ = (0, 8, 2) +__version__ = "0.8.3" +__version_info__ = (0, 8, 3) diff --git a/doc/source/conf.py b/doc/source/conf.py --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -47,7 +47,7 @@ # The short X.Y version. version = '0.8' # The full version, including alpha/beta/rc tags. -release = '0.8.2' +release = '0.8.3' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -88,7 +88,7 @@ Download and Installation: -* http://pypi.python.org/packages/source/c/cffi/cffi-0.8.2.tar.gz +* http://pypi.python.org/packages/source/c/cffi/cffi-0.8.3.tar.gz - Or grab the most current version by following the instructions below. diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -113,7 +113,7 @@ `Mailing list `_ """, - version='0.8.2', + version='0.8.3', packages=['cffi'], zip_safe=False, From noreply at buildbot.pypy.org Sat Jul 5 17:05:37 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 5 Jul 2014 17:05:37 +0200 (CEST) Subject: [pypy-commit] cffi release-0.8: hg merge default Message-ID: <20140705150537.1D4AF1C34C8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-0.8 Changeset: r1535:1f0fc199c139 Date: 2014-07-05 16:47 +0200 http://bitbucket.org/cffi/cffi/changeset/1f0fc199c139/ Log: hg merge default diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -5,7 +5,6 @@ #ifdef MS_WIN32 #include #include "misc_win32.h" -#include /* for alloca() */ #else #include #include @@ -13,9 +12,32 @@ #include #include #include -#if (defined (__SVR4) && defined (__sun)) || defined(_AIX) +#endif + +/* this block of #ifs should be kept exactly identical between + c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py */ +#if defined(_MSC_VER) +# include /* for alloca() */ +# if _MSC_VER < 1600 /* MSVC < 2010 */ + typedef __int8 int8_t; + typedef __int16 int16_t; + typedef __int32 int32_t; + typedef __int64 int64_t; + typedef unsigned __int8 uint8_t; + typedef unsigned __int16 uint16_t; + typedef unsigned __int32 uint32_t; + typedef unsigned __int64 uint64_t; +# else +# include +# endif +# if _MSC_VER < 1800 /* MSVC < 2013 */ + typedef unsigned char _Bool; +# endif +#else +# include +# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) # include -#endif +# endif #endif #include "malloc_closure.h" @@ -5482,7 +5504,7 @@ if (v == NULL || PyModule_AddObject(m, "_C_API", v) < 0) INITERROR; - v = PyText_FromString("0.8.2"); + v = PyText_FromString("0.8.3"); if (v == NULL || PyModule_AddObject(m, "__version__", v) < 0) INITERROR; diff --git a/c/check__thread.c b/c/check__thread.c deleted file mode 100644 --- a/c/check__thread.c +++ /dev/null @@ -1,1 +0,0 @@ -__thread int some_threadlocal_variable_42; diff --git a/c/minibuffer.h b/c/minibuffer.h --- a/c/minibuffer.h +++ b/c/minibuffer.h @@ -105,8 +105,9 @@ static int mb_getbuf(MiniBufferObj *self, Py_buffer *view, int flags) { - return PyBuffer_FillInfo(view, NULL, self->mb_data, self->mb_size, - /*readonly=*/0, PyBUF_CONTIG | PyBUF_FORMAT); + return PyBuffer_FillInfo(view, (PyObject *)self, + self->mb_data, self->mb_size, + /*readonly=*/0, flags); } static PySequenceMethods mb_as_sequence = { diff --git a/c/misc_win32.h b/c/misc_win32.h --- a/c/misc_win32.h +++ b/c/misc_win32.h @@ -1,3 +1,4 @@ +#include /* for alloca() */ /************************************************************/ /* errno and GetLastError support */ @@ -192,7 +193,27 @@ static void *dlsym(void *handle, const char *symbol) { - return GetProcAddress((HMODULE)handle, symbol); + void *address = GetProcAddress((HMODULE)handle, symbol); +#ifndef MS_WIN64 + if (!address) { + /* If 'symbol' is not found, then try '_symbol at N' for N in + (0, 4, 8, 12, ..., 124). Unlike ctypes, we try to do that + for any symbol, although in theory it should only be done + for __stdcall functions. + */ + int i; + char *mangled_name = alloca(1 + strlen(symbol) + 1 + 3 + 1); + if (!mangled_name) + return NULL; + for (i = 0; i < 32; i++) { + sprintf(mangled_name, "_%s@%d", symbol, i * 4); + address = GetProcAddress((HMODULE)handle, mangled_name); + if (address) + break; + } + } +#endif + return address; } static void dlclose(void *handle) @@ -210,21 +231,6 @@ return buf; } - -/************************************************************/ -/* types */ - -typedef __int8 int8_t; -typedef __int16 int16_t; -typedef __int32 int32_t; -typedef __int64 int64_t; -typedef unsigned __int8 uint8_t; -typedef unsigned __int16 uint16_t; -typedef unsigned __int32 uint32_t; -typedef unsigned __int64 uint64_t; -typedef unsigned char _Bool; - - /************************************************************/ /* obscure */ diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -1102,7 +1102,7 @@ def test_read_variable(): ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard ## https://bugs.pypy.org/issue1643 - if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'): + if not sys.platform.startswith("linux"): py.test.skip("untested") BVoidP = new_pointer_type(new_void_type()) ll = find_and_load_library('c') @@ -1112,7 +1112,7 @@ def test_read_variable_as_unknown_length_array(): ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard ## https://bugs.pypy.org/issue1643 - if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'): + if not sys.platform.startswith("linux"): py.test.skip("untested") BCharP = new_pointer_type(new_primitive_type("char")) BArray = new_array_type(BCharP, None) @@ -1124,7 +1124,7 @@ def test_write_variable(): ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard ## https://bugs.pypy.org/issue1643 - if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'): + if not sys.platform.startswith("linux"): py.test.skip("untested") BVoidP = new_pointer_type(new_void_type()) ll = find_and_load_library('c') @@ -3199,4 +3199,4 @@ def test_version(): # this test is here mostly for PyPy - assert __version__ == "0.8.2" + assert __version__ == "0.8.3" diff --git a/cffi/__init__.py b/cffi/__init__.py --- a/cffi/__init__.py +++ b/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.8.2" -__version_info__ = (0, 8, 2) +__version__ = "0.8.3" +__version_info__ = (0, 8, 3) diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -443,6 +443,10 @@ for enumname, enumval in zip(tp.enumerators, tp.enumvalues): if enumname not in library.__dict__: library.__dict__[enumname] = enumval + for key, val in ffi._parser._int_constants.items(): + if key not in library.__dict__: + library.__dict__[key] = val + copied_enums.append(True) if name in library.__dict__: return diff --git a/cffi/cparser.py b/cffi/cparser.py --- a/cffi/cparser.py +++ b/cffi/cparser.py @@ -24,6 +24,7 @@ _r_partial_array = re.compile(r"\[\s*\.\.\.\s*\]") _r_words = re.compile(r"\w+|\S") _parser_cache = None +_r_int_literal = re.compile(r"^0?x?[0-9a-f]+u?l?$", re.IGNORECASE) def _get_parser(): global _parser_cache @@ -99,6 +100,7 @@ self._structnode2type = weakref.WeakKeyDictionary() self._override = False self._packed = False + self._int_constants = {} def _parse(self, csource): csource, macros = _preprocess(csource) @@ -128,9 +130,10 @@ finally: if lock is not None: lock.release() - return ast, macros + # csource will be used to find buggy source text + return ast, macros, csource - def convert_pycparser_error(self, e, csource): + def _convert_pycparser_error(self, e, csource): # xxx look for ":NUM:" at the start of str(e) and try to interpret # it as a line number line = None @@ -142,6 +145,12 @@ csourcelines = csource.splitlines() if 1 <= linenum <= len(csourcelines): line = csourcelines[linenum-1] + return line + + def convert_pycparser_error(self, e, csource): + line = self._convert_pycparser_error(e, csource) + + msg = str(e) if line: msg = 'cannot parse "%s"\n%s' % (line.strip(), msg) else: @@ -160,14 +169,9 @@ self._packed = prev_packed def _internal_parse(self, csource): - ast, macros = self._parse(csource) + ast, macros, csource = self._parse(csource) # add the macros - for key, value in macros.items(): - value = value.strip() - if value != '...': - raise api.CDefError('only supports the syntax "#define ' - '%s ..." for now (literally)' % key) - self._declare('macro ' + key, value) + self._process_macros(macros) # find the first "__dotdotdot__" and use that as a separator # between the repeated typedefs and the real csource iterator = iter(ast.ext) @@ -175,27 +179,61 @@ if decl.name == '__dotdotdot__': break # - for decl in iterator: - if isinstance(decl, pycparser.c_ast.Decl): - self._parse_decl(decl) - elif isinstance(decl, pycparser.c_ast.Typedef): - if not decl.name: - raise api.CDefError("typedef does not declare any name", - decl) - if (isinstance(decl.type.type, pycparser.c_ast.IdentifierType) - and decl.type.type.names == ['__dotdotdot__']): - realtype = model.unknown_type(decl.name) - elif (isinstance(decl.type, pycparser.c_ast.PtrDecl) and - isinstance(decl.type.type, pycparser.c_ast.TypeDecl) and - isinstance(decl.type.type.type, - pycparser.c_ast.IdentifierType) and - decl.type.type.type.names == ['__dotdotdot__']): - realtype = model.unknown_ptr_type(decl.name) + try: + for decl in iterator: + if isinstance(decl, pycparser.c_ast.Decl): + self._parse_decl(decl) + elif isinstance(decl, pycparser.c_ast.Typedef): + if not decl.name: + raise api.CDefError("typedef does not declare any name", + decl) + if (isinstance(decl.type.type, pycparser.c_ast.IdentifierType) + and decl.type.type.names == ['__dotdotdot__']): + realtype = model.unknown_type(decl.name) + elif (isinstance(decl.type, pycparser.c_ast.PtrDecl) and + isinstance(decl.type.type, pycparser.c_ast.TypeDecl) and + isinstance(decl.type.type.type, + pycparser.c_ast.IdentifierType) and + decl.type.type.type.names == ['__dotdotdot__']): + realtype = model.unknown_ptr_type(decl.name) + else: + realtype = self._get_type(decl.type, name=decl.name) + self._declare('typedef ' + decl.name, realtype) else: - realtype = self._get_type(decl.type, name=decl.name) - self._declare('typedef ' + decl.name, realtype) + raise api.CDefError("unrecognized construct", decl) + except api.FFIError as e: + msg = self._convert_pycparser_error(e, csource) + if msg: + e.args = (e.args[0] + "\n *** Err: %s" % msg,) + raise + + def _add_constants(self, key, val): + if key in self._int_constants: + raise api.FFIError( + "multiple declarations of constant: %s" % (key,)) + self._int_constants[key] = val + + def _process_macros(self, macros): + for key, value in macros.items(): + value = value.strip() + match = _r_int_literal.search(value) + if match is not None: + int_str = match.group(0).lower().rstrip("ul") + + # "010" is not valid oct in py3 + if (int_str.startswith("0") and + int_str != "0" and + not int_str.startswith("0x")): + int_str = "0o" + int_str[1:] + + pyvalue = int(int_str, 0) + self._add_constants(key, pyvalue) + elif value == '...': + self._declare('macro ' + key, value) else: - raise api.CDefError("unrecognized construct", decl) + raise api.CDefError('only supports the syntax "#define ' + '%s ..." (literally) or "#define ' + '%s 0x1FF" for now' % (key, key)) def _parse_decl(self, decl): node = decl.type @@ -227,7 +265,7 @@ self._declare('variable ' + decl.name, tp) def parse_type(self, cdecl): - ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl) + ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl)[:2] assert not macros exprnode = ast.ext[-1].type.args.params[0] if isinstance(exprnode, pycparser.c_ast.ID): @@ -306,7 +344,8 @@ if ident == 'void': return model.void_type if ident == '__dotdotdot__': - raise api.FFIError('bad usage of "..."') + raise api.FFIError(':%d: bad usage of "..."' % + typenode.coord.line) return resolve_common_type(ident) # if isinstance(type, pycparser.c_ast.Struct): @@ -333,7 +372,8 @@ return self._get_struct_union_enum_type('union', typenode, name, nested=True) # - raise api.FFIError("bad or unsupported type declaration") + raise api.FFIError(":%d: bad or unsupported type declaration" % + typenode.coord.line) def _parse_function_type(self, typenode, funcname=None): params = list(getattr(typenode.args, 'params', [])) @@ -499,6 +539,10 @@ if (isinstance(exprnode, pycparser.c_ast.UnaryOp) and exprnode.op == '-'): return -self._parse_constant(exprnode.expr) + # load previously defined int constant + if (isinstance(exprnode, pycparser.c_ast.ID) and + exprnode.name in self._int_constants): + return self._int_constants[exprnode.name] # if partial_length_ok: if (isinstance(exprnode, pycparser.c_ast.ID) and @@ -506,8 +550,8 @@ self._partial_length = True return '...' # - raise api.FFIError("unsupported expression: expected a " - "simple numeric constant") + raise api.FFIError(":%d: unsupported expression: expected a " + "simple numeric constant" % exprnode.coord.line) def _build_enum_type(self, explicit_name, decls): if decls is not None: @@ -522,6 +566,7 @@ if enum.value is not None: nextenumvalue = self._parse_constant(enum.value) enumvalues.append(nextenumvalue) + self._add_constants(enum.name, nextenumvalue) nextenumvalue += 1 enumvalues = tuple(enumvalues) tp = model.EnumType(explicit_name, enumerators, enumvalues) @@ -535,3 +580,5 @@ kind = name.split(' ', 1)[0] if kind in ('typedef', 'struct', 'union', 'enum'): self._declare(name, tp) + for k, v in other._int_constants.items(): + self._add_constants(k, v) diff --git a/cffi/ffiplatform.py b/cffi/ffiplatform.py --- a/cffi/ffiplatform.py +++ b/cffi/ffiplatform.py @@ -38,6 +38,7 @@ import distutils.errors # dist = Distribution({'ext_modules': [ext]}) + dist.parse_config_files() options = dist.get_option_dict('build_ext') options['force'] = ('ffiplatform', True) options['build_lib'] = ('ffiplatform', tmpdir) diff --git a/cffi/vengine_cpy.py b/cffi/vengine_cpy.py --- a/cffi/vengine_cpy.py +++ b/cffi/vengine_cpy.py @@ -89,43 +89,54 @@ # by generate_cpy_function_method(). prnt('static PyMethodDef _cffi_methods[] = {') self._generate("method") - prnt(' {"_cffi_setup", _cffi_setup, METH_VARARGS},') - prnt(' {NULL, NULL} /* Sentinel */') + prnt(' {"_cffi_setup", _cffi_setup, METH_VARARGS, NULL},') + prnt(' {NULL, NULL, 0, NULL} /* Sentinel */') prnt('};') prnt() # # standard init. modname = self.verifier.get_module_name() - if sys.version_info >= (3,): - prnt('static struct PyModuleDef _cffi_module_def = {') - prnt(' PyModuleDef_HEAD_INIT,') - prnt(' "%s",' % modname) - prnt(' NULL,') - prnt(' -1,') - prnt(' _cffi_methods,') - prnt(' NULL, NULL, NULL, NULL') - prnt('};') - prnt() - initname = 'PyInit_%s' % modname - createmod = 'PyModule_Create(&_cffi_module_def)' - errorcase = 'return NULL' - finalreturn = 'return lib' - else: - initname = 'init%s' % modname - createmod = 'Py_InitModule("%s", _cffi_methods)' % modname - errorcase = 'return' - finalreturn = 'return' + constants = self._chained_list_constants[False] + prnt('#if PY_MAJOR_VERSION >= 3') + prnt() + prnt('static struct PyModuleDef _cffi_module_def = {') + prnt(' PyModuleDef_HEAD_INIT,') + prnt(' "%s",' % modname) + prnt(' NULL,') + prnt(' -1,') + prnt(' _cffi_methods,') + prnt(' NULL, NULL, NULL, NULL') + prnt('};') + prnt() prnt('PyMODINIT_FUNC') - prnt('%s(void)' % initname) + prnt('PyInit_%s(void)' % modname) prnt('{') prnt(' PyObject *lib;') - prnt(' lib = %s;' % createmod) - prnt(' if (lib == NULL || %s < 0)' % ( - self._chained_list_constants[False],)) - prnt(' %s;' % errorcase) - prnt(' _cffi_init();') - prnt(' %s;' % finalreturn) + prnt(' lib = PyModule_Create(&_cffi_module_def);') + prnt(' if (lib == NULL)') + prnt(' return NULL;') + prnt(' if (%s < 0 || _cffi_init() < 0) {' % (constants,)) + prnt(' Py_DECREF(lib);') + prnt(' return NULL;') + prnt(' }') + prnt(' return lib;') prnt('}') + prnt() + prnt('#else') + prnt() + prnt('PyMODINIT_FUNC') + prnt('init%s(void)' % modname) + prnt('{') + prnt(' PyObject *lib;') + prnt(' lib = Py_InitModule("%s", _cffi_methods);' % modname) + prnt(' if (lib == NULL)') + prnt(' return;') + prnt(' if (%s < 0 || _cffi_init() < 0)' % (constants,)) + prnt(' return;') + prnt(' return;') + prnt('}') + prnt() + prnt('#endif') def load_library(self): # XXX review all usages of 'self' here! @@ -394,7 +405,7 @@ meth = 'METH_O' else: meth = 'METH_VARARGS' - self._prnt(' {"%s", _cffi_f_%s, %s},' % (name, name, meth)) + self._prnt(' {"%s", _cffi_f_%s, %s, NULL},' % (name, name, meth)) _loading_cpy_function = _loaded_noop @@ -481,8 +492,8 @@ if tp.fldnames is None: return # nothing to do with opaque structs layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) - self._prnt(' {"%s", %s, METH_NOARGS},' % (layoutfuncname, - layoutfuncname)) + self._prnt(' {"%s", %s, METH_NOARGS, NULL},' % (layoutfuncname, + layoutfuncname)) def _loading_struct_or_union(self, tp, prefix, name, module): if tp.fldnames is None: @@ -589,13 +600,7 @@ 'variable type'),)) assert delayed else: - prnt(' if (LONG_MIN <= (%s) && (%s) <= LONG_MAX)' % (name, name)) - prnt(' o = PyInt_FromLong((long)(%s));' % (name,)) - prnt(' else if ((%s) <= 0)' % (name,)) - prnt(' o = PyLong_FromLongLong((long long)(%s));' % (name,)) - prnt(' else') - prnt(' o = PyLong_FromUnsignedLongLong(' - '(unsigned long long)(%s));' % (name,)) + prnt(' o = _cffi_from_c_int_const(%s);' % name) prnt(' if (o == NULL)') prnt(' return -1;') if size_too: @@ -632,13 +637,18 @@ # ---------- # enums + def _enum_funcname(self, prefix, name): + # "$enum_$1" => "___D_enum____D_1" + name = name.replace('$', '___D_') + return '_cffi_e_%s_%s' % (prefix, name) + def _generate_cpy_enum_decl(self, tp, name, prefix='enum'): if tp.partial: for enumerator in tp.enumerators: self._generate_cpy_const(True, enumerator, delayed=False) return # - funcname = '_cffi_e_%s_%s' % (prefix, name) + funcname = self._enum_funcname(prefix, name) prnt = self._prnt prnt('static int %s(PyObject *lib)' % funcname) prnt('{') @@ -760,17 +770,30 @@ #include #include -#ifdef MS_WIN32 -#include /* for alloca() */ -typedef __int8 int8_t; -typedef __int16 int16_t; -typedef __int32 int32_t; -typedef __int64 int64_t; -typedef unsigned __int8 uint8_t; -typedef unsigned __int16 uint16_t; -typedef unsigned __int32 uint32_t; -typedef unsigned __int64 uint64_t; -typedef unsigned char _Bool; +/* this block of #ifs should be kept exactly identical between + c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py */ +#if defined(_MSC_VER) +# include /* for alloca() */ +# if _MSC_VER < 1600 /* MSVC < 2010 */ + typedef __int8 int8_t; + typedef __int16 int16_t; + typedef __int32 int32_t; + typedef __int64 int64_t; + typedef unsigned __int8 uint8_t; + typedef unsigned __int16 uint16_t; + typedef unsigned __int32 uint32_t; + typedef unsigned __int64 uint64_t; +# else +# include +# endif +# if _MSC_VER < 1800 /* MSVC < 2013 */ + typedef unsigned char _Bool; +# endif +#else +# include +# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) +# include +# endif #endif #if PY_MAJOR_VERSION < 3 @@ -795,6 +818,15 @@ #define _cffi_to_c_double PyFloat_AsDouble #define _cffi_to_c_float PyFloat_AsDouble +#define _cffi_from_c_int_const(x) \ + (((x) > 0) ? \ + ((unsigned long long)(x) <= (unsigned long long)LONG_MAX) ? \ + PyInt_FromLong((long)(x)) : \ + PyLong_FromUnsignedLongLong((unsigned long long)(x)) : \ + ((long long)(x) >= (long long)LONG_MIN) ? \ + PyInt_FromLong((long)(x)) : \ + PyLong_FromLongLong((long long)(x))) + #define _cffi_from_c_int(x, type) \ (((type)-1) > 0 ? /* unsigned */ \ (sizeof(type) < sizeof(long) ? PyInt_FromLong(x) : \ @@ -804,14 +836,14 @@ PyLong_FromLongLong(x))) #define _cffi_to_c_int(o, type) \ - (sizeof(type) == 1 ? (((type)-1) > 0 ? _cffi_to_c_u8(o) \ - : _cffi_to_c_i8(o)) : \ - sizeof(type) == 2 ? (((type)-1) > 0 ? _cffi_to_c_u16(o) \ - : _cffi_to_c_i16(o)) : \ - sizeof(type) == 4 ? (((type)-1) > 0 ? _cffi_to_c_u32(o) \ - : _cffi_to_c_i32(o)) : \ - sizeof(type) == 8 ? (((type)-1) > 0 ? _cffi_to_c_u64(o) \ - : _cffi_to_c_i64(o)) : \ + (sizeof(type) == 1 ? (((type)-1) > 0 ? (type)_cffi_to_c_u8(o) \ + : (type)_cffi_to_c_i8(o)) : \ + sizeof(type) == 2 ? (((type)-1) > 0 ? (type)_cffi_to_c_u16(o) \ + : (type)_cffi_to_c_i16(o)) : \ + sizeof(type) == 4 ? (((type)-1) > 0 ? (type)_cffi_to_c_u32(o) \ + : (type)_cffi_to_c_i32(o)) : \ + sizeof(type) == 8 ? (((type)-1) > 0 ? (type)_cffi_to_c_u64(o) \ + : (type)_cffi_to_c_i64(o)) : \ (Py_FatalError("unsupported size for type " #type), 0)) #define _cffi_to_c_i8 \ @@ -885,25 +917,32 @@ return PyBool_FromLong(was_alive); } -static void _cffi_init(void) +static int _cffi_init(void) { - PyObject *module = PyImport_ImportModule("_cffi_backend"); - PyObject *c_api_object; + PyObject *module, *c_api_object = NULL; + module = PyImport_ImportModule("_cffi_backend"); if (module == NULL) - return; + goto failure; c_api_object = PyObject_GetAttrString(module, "_C_API"); if (c_api_object == NULL) - return; + goto failure; if (!PyCapsule_CheckExact(c_api_object)) { - Py_DECREF(c_api_object); PyErr_SetNone(PyExc_ImportError); - return; + goto failure; } memcpy(_cffi_exports, PyCapsule_GetPointer(c_api_object, "cffi"), _CFFI_NUM_EXPORTS * sizeof(void *)); + + Py_DECREF(module); Py_DECREF(c_api_object); + return 0; + + failure: + Py_XDECREF(module); + Py_XDECREF(c_api_object); + return -1; } #define _cffi_type(num) ((CTypeDescrObject *)PyList_GET_ITEM(_cffi_types, num)) diff --git a/cffi/vengine_gen.py b/cffi/vengine_gen.py --- a/cffi/vengine_gen.py +++ b/cffi/vengine_gen.py @@ -249,10 +249,10 @@ prnt(' /* %s */' % str(e)) # cannot verify it, ignore prnt('}') self.export_symbols.append(layoutfuncname) - prnt('ssize_t %s(ssize_t i)' % (layoutfuncname,)) + prnt('intptr_t %s(intptr_t i)' % (layoutfuncname,)) prnt('{') prnt(' struct _cffi_aligncheck { char x; %s y; };' % cname) - prnt(' static ssize_t nums[] = {') + prnt(' static intptr_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') for fname, ftype, fbitsize in tp.enumfields(): @@ -276,7 +276,7 @@ return # nothing to do with opaque structs layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) # - BFunc = self.ffi._typeof_locked("ssize_t(*)(ssize_t)")[0] + BFunc = self.ffi._typeof_locked("intptr_t(*)(intptr_t)")[0] function = module.load_function(BFunc, layoutfuncname) layout = [] num = 0 @@ -410,13 +410,18 @@ # ---------- # enums + def _enum_funcname(self, prefix, name): + # "$enum_$1" => "___D_enum____D_1" + name = name.replace('$', '___D_') + return '_cffi_e_%s_%s' % (prefix, name) + def _generate_gen_enum_decl(self, tp, name, prefix='enum'): if tp.partial: for enumerator in tp.enumerators: self._generate_gen_const(True, enumerator) return # - funcname = '_cffi_e_%s_%s' % (prefix, name) + funcname = self._enum_funcname(prefix, name) self.export_symbols.append(funcname) prnt = self._prnt prnt('int %s(char *out_error)' % funcname) @@ -453,7 +458,7 @@ else: BType = self.ffi._typeof_locked("char[]")[0] BFunc = self.ffi._typeof_locked("int(*)(char*)")[0] - funcname = '_cffi_e_%s_%s' % (prefix, name) + funcname = self._enum_funcname(prefix, name) function = module.load_function(BFunc, funcname) p = self.ffi.new(BType, 256) if function(p) < 0: @@ -547,20 +552,29 @@ #include #include /* XXX for ssize_t on some platforms */ -#ifdef _WIN32 -# include -# define snprintf _snprintf -typedef __int8 int8_t; -typedef __int16 int16_t; -typedef __int32 int32_t; -typedef __int64 int64_t; -typedef unsigned __int8 uint8_t; -typedef unsigned __int16 uint16_t; -typedef unsigned __int32 uint32_t; -typedef unsigned __int64 uint64_t; -typedef SSIZE_T ssize_t; -typedef unsigned char _Bool; +/* this block of #ifs should be kept exactly identical between + c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py */ +#if defined(_MSC_VER) +# include /* for alloca() */ +# if _MSC_VER < 1600 /* MSVC < 2010 */ + typedef __int8 int8_t; + typedef __int16 int16_t; + typedef __int32 int32_t; + typedef __int64 int64_t; + typedef unsigned __int8 uint8_t; + typedef unsigned __int16 uint16_t; + typedef unsigned __int32 uint32_t; + typedef unsigned __int64 uint64_t; +# else +# include +# endif +# if _MSC_VER < 1800 /* MSVC < 2013 */ + typedef unsigned char _Bool; +# endif #else -# include +# include +# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) +# include +# endif #endif ''' diff --git a/doc/source/conf.py b/doc/source/conf.py --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -47,7 +47,7 @@ # The short X.Y version. version = '0.8' # The full version, including alpha/beta/rc tags. -release = '0.8.2' +release = '0.8.3' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -1,31 +1,34 @@ CFFI documentation ================================ -Foreign Function Interface for Python calling C code. The aim of this project -is to provide a convenient and reliable way of calling C code from Python. -The interface is based on `LuaJIT's FFI`_ and follows a few principles: +C Foreign Function Interface for Python. The goal is to provide a +convenient and reliable way to call compiled C code from Python using +interface declarations written in C. -* The goal is to call C code from Python. You should be able to do so - without learning a 3rd language: every alternative requires you to learn - their own language (Cython_, SWIG_) or API (ctypes_). So we tried to - assume that you know Python and C and minimize the extra bits of API that - you need to learn. +The interface is based on `LuaJIT's FFI`_, and follows a few principles: + +* The goal is to call C code from Python without learning a 3rd language: + existing alternatives require users to learn domain specific language + (Cython_, SWIG_) or API (ctypes_). The CFFI design requires users to know + only C and Python, minimizing the extra bits of API that need to be learned. * Keep all the Python-related logic in Python so that you don't need to write much C code (unlike `CPython native C extensions`_). -* Work either at the level of the ABI (Application Binary Interface) - or the API (Application Programming Interface). Usually, C - libraries have a specified C API but often not an ABI (e.g. they may +* The preferred way is to work at the level of the API (Application + Programming Interface): the C compiler is called from the declarations + you write to validate and link to the C language constructs. + Alternatively, it is also possible to work at the ABI level + (Application Binary Interface), the way ctypes_ work. + However, on non-Windows platforms, C libraries typically + have a specified C API but not an ABI (e.g. they may document a "struct" as having at least these fields, but maybe more). - (ctypes_ works at the ABI level, whereas Cython_ and `native C extensions`_ - work at the API level.) -* We try to be complete. For now some C99 constructs are not supported, +* Try to be complete. For now some C99 constructs are not supported, but all C89 should be, including macros (and including macro "abuses", which you can `manually wrap`_ in saner-looking C functions). -* We attempt to support both PyPy and CPython, with a reasonable path +* Attempt to support both PyPy and CPython, with a reasonable path for other Python implementations like IronPython and Jython. * Note that this project is **not** about embedding executable C code in @@ -38,7 +41,7 @@ .. _`CPython native C extensions`: http://docs.python.org/extending/extending.html .. _`native C extensions`: http://docs.python.org/extending/extending.html .. _`ctypes`: http://docs.python.org/library/ctypes.html -.. _`Weave`: http://www.scipy.org/Weave +.. _`Weave`: http://wiki.scipy.org/Weave .. _`manually wrap`: `The verification step`_ @@ -85,13 +88,13 @@ Download and Installation: -* http://pypi.python.org/packages/source/c/cffi/cffi-0.8.2.tar.gz +* http://pypi.python.org/packages/source/c/cffi/cffi-0.8.3.tar.gz - Or grab the most current version by following the instructions below. - - MD5: 37fc88c62f40d04e8a18192433f951ec + - MD5: ... - - SHA: 75a6c433664a7a38d4d03cecbdc72cef4c3cceac + - SHA: ... * Or get it from the `Bitbucket page`_: ``hg clone https://bitbucket.org/cffi/cffi`` @@ -851,7 +854,7 @@ ``ffi`` normally caches the string ``"int[]"`` to not need to re-parse it all the time. -.. versionadded:: 0.9 +.. versionadded:: 0.8.2 The ``ffi.cdef()`` call takes an optional argument ``packed``: if True, then all structs declared within this cdef are "packed". This has a meaning similar to ``__attribute__((packed))`` in GCC. It @@ -1195,13 +1198,14 @@ owned memory will not be freed as long as the buffer is alive. Moreover buffer objects now support weakrefs to them. -.. versionchanged:: 0.9 - Before version 0.9, ``bytes(buf)`` was supported in Python 3 to get +.. versionchanged:: 0.8.2 + Before version 0.8.2, ``bytes(buf)`` was supported in Python 3 to get the content of the buffer, but on Python 2 it would return the repr ``<_cffi_backend.buffer object>``. This has been fixed. But you should avoid using ``str(buf)``: it now gives inconsistent results between Python 2 and Python 3 (this is similar to how ``str()`` - gives inconsistent results on regular byte strings). + gives inconsistent results on regular byte strings). Use ``buf[:]`` + instead. ``ffi.typeof("C type" or cdata object)``: return an object of type diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -42,25 +42,14 @@ resultlist[:] = res def ask_supports_thread(): - if sys.platform == "darwin": - sys.stderr.write("OS/X: confusion between 'cc' versus 'gcc'") - sys.stderr.write(" (see issue 123)\n") - sys.stderr.write("will not use '__thread' in the C code\n") - return - import distutils.errors - from distutils.ccompiler import new_compiler - compiler = new_compiler(force=1) - try: - compiler.compile(['c/check__thread.c']) - except distutils.errors.CompileError: - sys.stderr.write("the above error message can be safely ignored;\n") - sys.stderr.write("will not use '__thread' in the C code\n") + from distutils.core import Distribution + config = Distribution().get_command_obj('config') + ok = config.try_compile('__thread int some_threadlocal_variable_42;') + if ok: + define_macros.append(('USE__THREAD', None)) else: - define_macros.append(('USE__THREAD', None)) - try: - os.unlink('c/check__thread.o') - except OSError: - pass + sys.stderr.write("Note: will not use '__thread' in the C code\n") + sys.stderr.write("The above error message can be safely ignored\n") def use_pkg_config(): _ask_pkg_config(include_dirs, '--cflags-only-I', '-I', sysroot=True) @@ -124,7 +113,7 @@ `Mailing list `_ """, - version='0.8.2', + version='0.8.3', packages=['cffi'], zip_safe=False, diff --git a/testing/backend_tests.py b/testing/backend_tests.py --- a/testing/backend_tests.py +++ b/testing/backend_tests.py @@ -865,25 +865,25 @@ def test_enum(self): ffi = FFI(backend=self.Backend()) - ffi.cdef("enum foo { A, B, CC, D };") - assert ffi.string(ffi.cast("enum foo", 0)) == "A" - assert ffi.string(ffi.cast("enum foo", 2)) == "CC" - assert ffi.string(ffi.cast("enum foo", 3)) == "D" + ffi.cdef("enum foo { A0, B0, CC0, D0 };") + assert ffi.string(ffi.cast("enum foo", 0)) == "A0" + assert ffi.string(ffi.cast("enum foo", 2)) == "CC0" + assert ffi.string(ffi.cast("enum foo", 3)) == "D0" assert ffi.string(ffi.cast("enum foo", 4)) == "4" - ffi.cdef("enum bar { A, B=-2, CC, D, E };") - assert ffi.string(ffi.cast("enum bar", 0)) == "A" - assert ffi.string(ffi.cast("enum bar", -2)) == "B" - assert ffi.string(ffi.cast("enum bar", -1)) == "CC" - assert ffi.string(ffi.cast("enum bar", 1)) == "E" + ffi.cdef("enum bar { A1, B1=-2, CC1, D1, E1 };") + assert ffi.string(ffi.cast("enum bar", 0)) == "A1" + assert ffi.string(ffi.cast("enum bar", -2)) == "B1" + assert ffi.string(ffi.cast("enum bar", -1)) == "CC1" + assert ffi.string(ffi.cast("enum bar", 1)) == "E1" assert ffi.cast("enum bar", -2) != ffi.cast("enum bar", -2) assert ffi.cast("enum foo", 0) != ffi.cast("enum bar", 0) assert ffi.cast("enum bar", 0) != ffi.cast("int", 0) - assert repr(ffi.cast("enum bar", -1)) == "" + assert repr(ffi.cast("enum bar", -1)) == "" assert repr(ffi.cast("enum foo", -1)) == ( # enums are unsigned, if "") # they contain no neg value - ffi.cdef("enum baz { A=0x1000, B=0x2000 };") - assert ffi.string(ffi.cast("enum baz", 0x1000)) == "A" - assert ffi.string(ffi.cast("enum baz", 0x2000)) == "B" + ffi.cdef("enum baz { A2=0x1000, B2=0x2000 };") + assert ffi.string(ffi.cast("enum baz", 0x1000)) == "A2" + assert ffi.string(ffi.cast("enum baz", 0x2000)) == "B2" def test_enum_in_struct(self): ffi = FFI(backend=self.Backend()) @@ -1322,6 +1322,16 @@ e = ffi.cast("enum e", 0) assert ffi.string(e) == "AA" # pick the first one arbitrarily + def test_enum_refer_previous_enum_value(self): + ffi = FFI(backend=self.Backend()) + ffi.cdef("enum e { AA, BB=2, CC=4, DD=BB, EE, FF=CC, GG=FF };") + assert ffi.string(ffi.cast("enum e", 2)) == "BB" + assert ffi.string(ffi.cast("enum e", 3)) == "EE" + assert ffi.sizeof("char[DD]") == 2 + assert ffi.sizeof("char[EE]") == 3 + assert ffi.sizeof("char[FF]") == 4 + assert ffi.sizeof("char[GG]") == 4 + def test_nested_anonymous_struct(self): ffi = FFI(backend=self.Backend()) ffi.cdef(""" @@ -1543,6 +1553,7 @@ ffi2.include(ffi1) p = ffi2.cast("enum foo", 1) assert ffi2.string(p) == "FB" + assert ffi2.sizeof("char[FC]") == 2 def test_include_typedef_2(self): backend = self.Backend() @@ -1570,3 +1581,25 @@ assert s[0].a == b'X' assert s[1].b == -4892220 assert s[1].a == b'Y' + + def test_define_integer_constant(self): + ffi = FFI(backend=self.Backend()) + ffi.cdef(""" + #define DOT_0 0 + #define DOT 100 + #define DOT_OCT 0100l + #define DOT_HEX 0x100u + #define DOT_HEX2 0X10 + #define DOT_UL 1000UL + enum foo {AA, BB=DOT, CC}; + """) + lib = ffi.dlopen(None) + assert ffi.string(ffi.cast("enum foo", 100)) == "BB" + assert lib.DOT_0 == 0 + assert lib.DOT == 100 + assert lib.DOT_OCT == 0o100 + assert lib.DOT_HEX == 0x100 + assert lib.DOT_HEX2 == 0x10 + assert lib.DOT_UL == 1000 + + diff --git a/testing/test_function.py b/testing/test_function.py --- a/testing/test_function.py +++ b/testing/test_function.py @@ -402,3 +402,18 @@ if wr() is not None: import gc; gc.collect() assert wr() is None # 'data' does not leak + + def test_windows_stdcall(self): + if sys.platform != 'win32': + py.test.skip("Windows-only test") + if self.Backend is CTypesBackend: + py.test.skip("not with the ctypes backend") + ffi = FFI(backend=self.Backend()) + ffi.cdef(""" + BOOL QueryPerformanceFrequency(LONGLONG *lpFrequency); + """) + m = ffi.dlopen("Kernel32.dll") + p_freq = ffi.new("LONGLONG *") + res = m.QueryPerformanceFrequency(p_freq) + assert res != 0 + assert p_freq[0] != 0 diff --git a/testing/test_parsing.py b/testing/test_parsing.py --- a/testing/test_parsing.py +++ b/testing/test_parsing.py @@ -161,9 +161,10 @@ def test_define_not_supported_for_now(): ffi = FFI(backend=FakeBackend()) - e = py.test.raises(CDefError, ffi.cdef, "#define FOO 42") - assert str(e.value) == \ - 'only supports the syntax "#define FOO ..." for now (literally)' + e = py.test.raises(CDefError, ffi.cdef, '#define FOO "blah"') + assert str(e.value) == ( + 'only supports the syntax "#define FOO ..." (literally)' + ' or "#define FOO 0x1FF" for now') def test_unnamed_struct(): ffi = FFI(backend=FakeBackend()) diff --git a/testing/test_verify.py b/testing/test_verify.py --- a/testing/test_verify.py +++ b/testing/test_verify.py @@ -1,4 +1,4 @@ -import py +import py, re import sys, os, math, weakref from cffi import FFI, VerificationError, VerificationMissing, model from testing.support import * @@ -29,6 +29,24 @@ def setup_module(): import cffi.verifier cffi.verifier.cleanup_tmpdir() + # + # check that no $ sign is produced in the C file; it used to be the + # case that anonymous enums would produce '$enum_$1', which was + # used as part of a function name. GCC accepts such names, but it's + # apparently non-standard. + _r_comment = re.compile(r"/\*.*?\*/|//.*?$", re.DOTALL | re.MULTILINE) + _r_string = re.compile(r'\".*?\"') + def _write_source_and_check(self, file=None): + base_write_source(self, file) + if file is None: + f = open(self.sourcefilename) + data = f.read() + f.close() + data = _r_comment.sub(' ', data) + data = _r_string.sub('"skipped"', data) + assert '$' not in data + base_write_source = cffi.verifier.Verifier._write_source + cffi.verifier.Verifier._write_source = _write_source_and_check def test_module_type(): @@ -153,6 +171,9 @@ all_primitive_types = model.PrimitiveType.ALL_PRIMITIVE_TYPES +if sys.platform == 'win32': + all_primitive_types = all_primitive_types.copy() + del all_primitive_types['ssize_t'] all_integer_types = sorted(tp for tp in all_primitive_types if all_primitive_types[tp] == 'i') all_float_types = sorted(tp for tp in all_primitive_types @@ -1452,8 +1473,8 @@ assert func() == 42 def test_FILE_stored_in_stdout(): - if sys.platform == 'win32': - py.test.skip("MSVC: cannot assign to stdout") + if not sys.platform.startswith('linux'): + py.test.skip("likely, we cannot assign to stdout") ffi = FFI() ffi.cdef("int printf(const char *, ...); FILE *setstdout(FILE *);") lib = ffi.verify(""" @@ -1636,8 +1657,8 @@ ffi = FFI() ffi.cdef(""" int (*python_callback)(int how_many, int *values); - void *const c_callback; /* pass this ptr to C routines */ - int some_c_function(void *cb); + int (*const c_callback)(int,...); /* pass this ptr to C routines */ + int some_c_function(int(*cb)(int,...)); """) lib = ffi.verify(""" #include @@ -1884,3 +1905,60 @@ p = lib.f2(42) x = lib.f1(p) assert x == 42 + +def _run_in_multiple_threads(test1): + test1() + import sys + try: + import thread + except ImportError: + import _thread as thread + errors = [] + def wrapper(lock): + try: + test1() + except: + errors.append(sys.exc_info()) + lock.release() + locks = [] + for i in range(10): + _lock = thread.allocate_lock() + _lock.acquire() + thread.start_new_thread(wrapper, (_lock,)) + locks.append(_lock) + for _lock in locks: + _lock.acquire() + if errors: + raise errors[0][1] + +def test_errno_working_even_with_pypys_jit(): + ffi = FFI() + ffi.cdef("int f(int);") + lib = ffi.verify(""" + #include + int f(int x) { return (errno = errno + x); } + """) + @_run_in_multiple_threads + def test1(): + ffi.errno = 0 + for i in range(10000): + e = lib.f(1) + assert e == i + 1 + assert ffi.errno == e + for i in range(10000): + ffi.errno = i + e = lib.f(42) + assert e == i + 42 + +def test_getlasterror_working_even_with_pypys_jit(): + if sys.platform != 'win32': + py.test.skip("win32-only test") + ffi = FFI() + ffi.cdef("void SetLastError(DWORD);") + lib = ffi.dlopen("Kernel32.dll") + @_run_in_multiple_threads + def test1(): + for i in range(10000): + n = (1 << 29) + i + lib.SetLastError(n) + assert ffi.getwinerror()[0] == n From noreply at buildbot.pypy.org Sat Jul 5 17:05:38 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 5 Jul 2014 17:05:38 +0200 (CEST) Subject: [pypy-commit] cffi release-0.8: Update MD5/SHA Message-ID: <20140705150538.453F21C34C8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-0.8 Changeset: r1536:91e72d4a59d9 Date: 2014-07-05 17:05 +0200 http://bitbucket.org/cffi/cffi/changeset/91e72d4a59d9/ Log: Update MD5/SHA diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -92,9 +92,9 @@ - Or grab the most current version by following the instructions below. - - MD5: ... + - MD5: 57e140a7d475f58bada8f2ada3f5749e - - SHA: ... + - SHA: 4fd222f3044b9210476255d753c0bb22b8050f99 * Or get it from the `Bitbucket page`_: ``hg clone https://bitbucket.org/cffi/cffi`` From noreply at buildbot.pypy.org Sat Jul 5 17:32:01 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 5 Jul 2014 17:32:01 +0200 (CEST) Subject: [pypy-commit] pypy default: One more ignored instruction Message-ID: <20140705153201.14AE61C3334@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72361:85672cabac67 Date: 2014-07-05 17:31 +0200 http://bitbucket.org/pypy/pypy/changeset/85672cabac67/ Log: One more ignored instruction diff --git a/rpython/translator/c/gcc/trackgcroot.py b/rpython/translator/c/gcc/trackgcroot.py --- a/rpython/translator/c/gcc/trackgcroot.py +++ b/rpython/translator/c/gcc/trackgcroot.py @@ -523,6 +523,8 @@ 'movnt', 'mfence', 'lfence', 'sfence', # bit manipulations 'andn', 'bextr', 'blsi', 'blsmask', 'blsr', 'tzcnt', 'lzcnt', + # uh, this can occur with a 'call' on the following line... + 'rex64', ]) # a partial list is hopefully good enough for now; it's all to support From noreply at buildbot.pypy.org Sat Jul 5 18:40:46 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 5 Jul 2014 18:40:46 +0200 (CEST) Subject: [pypy-commit] cffi release-0.8: A workaround by Alex Gaynor for a bug in distutils that shows up on OS/X Message-ID: <20140705164046.AD0001C024A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-0.8 Changeset: r1537:c63026a567f1 Date: 2014-07-05 18:40 +0200 http://bitbucket.org/cffi/cffi/changeset/c63026a567f1/ Log: A workaround by Alex Gaynor for a bug in distutils that shows up on OS/X diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -43,6 +43,8 @@ def ask_supports_thread(): from distutils.core import Distribution + from distutils.sysconfig import get_config_vars + get_config_vars() # workaround for a bug of distutils, e.g. on OS/X config = Distribution().get_command_obj('config') ok = config.try_compile('__thread int some_threadlocal_variable_42;') if ok: From noreply at buildbot.pypy.org Sat Jul 5 18:53:24 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 5 Jul 2014 18:53:24 +0200 (CEST) Subject: [pypy-commit] cffi release-0.8: Update to 0.8.4 Message-ID: <20140705165324.D6A1E1C024A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-0.8 Changeset: r1538:39abae73cbd4 Date: 2014-07-05 18:53 +0200 http://bitbucket.org/cffi/cffi/changeset/39abae73cbd4/ Log: Update to 0.8.4 diff --git a/cffi/__init__.py b/cffi/__init__.py --- a/cffi/__init__.py +++ b/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.8.3" -__version_info__ = (0, 8, 3) +__version__ = "0.8.4" +__version_info__ = (0, 8, 4) diff --git a/doc/source/conf.py b/doc/source/conf.py --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -47,7 +47,7 @@ # The short X.Y version. version = '0.8' # The full version, including alpha/beta/rc tags. -release = '0.8.3' +release = '0.8.4' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -88,13 +88,13 @@ Download and Installation: -* http://pypi.python.org/packages/source/c/cffi/cffi-0.8.3.tar.gz +* http://pypi.python.org/packages/source/c/cffi/cffi-0.8.4.tar.gz - Or grab the most current version by following the instructions below. - - MD5: 57e140a7d475f58bada8f2ada3f5749e + - MD5: ... - - SHA: 4fd222f3044b9210476255d753c0bb22b8050f99 + - SHA: ... * Or get it from the `Bitbucket page`_: ``hg clone https://bitbucket.org/cffi/cffi`` diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -115,7 +115,7 @@ `Mailing list `_ """, - version='0.8.3', + version='0.8.4', packages=['cffi'], zip_safe=False, diff --git a/testing/test_version.py b/testing/test_version.py --- a/testing/test_version.py +++ b/testing/test_version.py @@ -10,6 +10,7 @@ '0.7.1': '0.7', # did not change '0.7.2': '0.7', # did not change '0.8.1': '0.8', # did not change (essentially) + '0.8.4': '0.8.3', # did not change } def test_version(): From noreply at buildbot.pypy.org Sat Jul 5 18:55:19 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 5 Jul 2014 18:55:19 +0200 (CEST) Subject: [pypy-commit] cffi release-0.8: Add the MD5/SHA Message-ID: <20140705165519.E2ADA1C024A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-0.8 Changeset: r1539:19a9c5b072f3 Date: 2014-07-05 18:55 +0200 http://bitbucket.org/cffi/cffi/changeset/19a9c5b072f3/ Log: Add the MD5/SHA diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -92,9 +92,9 @@ - Or grab the most current version by following the instructions below. - - MD5: ... + - MD5: 148894125d3fa696b418dc6559818f7a - - SHA: ... + - SHA: 754ad62d0868bd48f34b2a5818575493e15b5514 * Or get it from the `Bitbucket page`_: ``hg clone https://bitbucket.org/cffi/cffi`` From noreply at buildbot.pypy.org Sat Jul 5 18:55:21 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 5 Jul 2014 18:55:21 +0200 (CEST) Subject: [pypy-commit] cffi default: hg merge release-0.8 Message-ID: <20140705165521.1DFE21C024A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1540:41a3446cfe40 Date: 2014-07-05 18:55 +0200 http://bitbucket.org/cffi/cffi/changeset/41a3446cfe40/ Log: hg merge release-0.8 diff --git a/cffi/__init__.py b/cffi/__init__.py --- a/cffi/__init__.py +++ b/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.8.3" -__version_info__ = (0, 8, 3) +__version__ = "0.8.4" +__version_info__ = (0, 8, 4) diff --git a/doc/source/conf.py b/doc/source/conf.py --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -47,7 +47,7 @@ # The short X.Y version. version = '0.8' # The full version, including alpha/beta/rc tags. -release = '0.8.3' +release = '0.8.4' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -88,13 +88,13 @@ Download and Installation: -* http://pypi.python.org/packages/source/c/cffi/cffi-0.8.3.tar.gz +* http://pypi.python.org/packages/source/c/cffi/cffi-0.8.4.tar.gz - Or grab the most current version by following the instructions below. - - MD5: ... + - MD5: 148894125d3fa696b418dc6559818f7a - - SHA: ... + - SHA: 754ad62d0868bd48f34b2a5818575493e15b5514 * Or get it from the `Bitbucket page`_: ``hg clone https://bitbucket.org/cffi/cffi`` diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -43,6 +43,8 @@ def ask_supports_thread(): from distutils.core import Distribution + from distutils.sysconfig import get_config_vars + get_config_vars() # workaround for a bug of distutils, e.g. on OS/X config = Distribution().get_command_obj('config') ok = config.try_compile('__thread int some_threadlocal_variable_42;') if ok: @@ -113,7 +115,7 @@ `Mailing list `_ """, - version='0.8.3', + version='0.8.4', packages=['cffi'], zip_safe=False, diff --git a/testing/test_version.py b/testing/test_version.py --- a/testing/test_version.py +++ b/testing/test_version.py @@ -10,6 +10,7 @@ '0.7.1': '0.7', # did not change '0.7.2': '0.7', # did not change '0.8.1': '0.8', # did not change (essentially) + '0.8.4': '0.8.3', # did not change } def test_version(): From noreply at buildbot.pypy.org Sat Jul 5 19:12:24 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 5 Jul 2014 19:12:24 +0200 (CEST) Subject: [pypy-commit] cffi release-0.8: Argh. The version 0.8.4 works with the backend "0.8.4" or "0.8", but Message-ID: <20140705171224.1C7CB1C0ECA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-0.8 Changeset: r1541:9b86d5f7007f Date: 2014-07-05 19:12 +0200 http://bitbucket.org/cffi/cffi/changeset/9b86d5f7007f/ Log: Argh. The version 0.8.4 works with the backend "0.8.4" or "0.8", but not "0.8.3". As a result, the release 0.8.4 is completely unusable. Get rid of the possibility to have some different version numbers in the backend, and prepare for 0.8.5... diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -5504,7 +5504,7 @@ if (v == NULL || PyModule_AddObject(m, "_C_API", v) < 0) INITERROR; - v = PyText_FromString("0.8.3"); + v = PyText_FromString("0.8.5"); if (v == NULL || PyModule_AddObject(m, "__version__", v) < 0) INITERROR; diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -3199,4 +3199,4 @@ def test_version(): # this test is here mostly for PyPy - assert __version__ == "0.8.3" + assert __version__ == "0.8.5" diff --git a/cffi/__init__.py b/cffi/__init__.py --- a/cffi/__init__.py +++ b/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.8.4" -__version_info__ = (0, 8, 4) +__version__ = "0.8.5" +__version_info__ = (0, 8, 5) diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -55,8 +55,7 @@ # _cffi_backend.so compiled. import _cffi_backend as backend from . import __version__ - assert (backend.__version__ == __version__ or - backend.__version__ == __version__[:3]) + assert backend.__version__ == __version__ # (If you insist you can also try to pass the option # 'backend=backend_ctypes.CTypesBackend()', but don't # rely on it! It's probably not going to work well.) diff --git a/doc/source/conf.py b/doc/source/conf.py --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -47,7 +47,7 @@ # The short X.Y version. version = '0.8' # The full version, including alpha/beta/rc tags. -release = '0.8.4' +release = '0.8.5' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -88,13 +88,13 @@ Download and Installation: -* http://pypi.python.org/packages/source/c/cffi/cffi-0.8.4.tar.gz +* http://pypi.python.org/packages/source/c/cffi/cffi-0.8.5.tar.gz - Or grab the most current version by following the instructions below. - - MD5: 148894125d3fa696b418dc6559818f7a + - MD5: ... - - SHA: 754ad62d0868bd48f34b2a5818575493e15b5514 + - SHA: ... * Or get it from the `Bitbucket page`_: ``hg clone https://bitbucket.org/cffi/cffi`` diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -115,7 +115,7 @@ `Mailing list `_ """, - version='0.8.4', + version='0.8.5', packages=['cffi'], zip_safe=False, diff --git a/testing/test_version.py b/testing/test_version.py --- a/testing/test_version.py +++ b/testing/test_version.py @@ -5,19 +5,20 @@ if '_cffi_backend' in sys.builtin_module_names: py.test.skip("this is embedded version") -BACKEND_VERSIONS = { - '0.4.2': '0.4', # did not change - '0.7.1': '0.7', # did not change - '0.7.2': '0.7', # did not change - '0.8.1': '0.8', # did not change (essentially) - '0.8.4': '0.8.3', # did not change - } +#BACKEND_VERSIONS = { +# '0.4.2': '0.4', # did not change +# '0.7.1': '0.7', # did not change +# '0.7.2': '0.7', # did not change +# '0.8.1': '0.8', # did not change (essentially) +# '0.8.4': '0.8.3', # did not change +# } def test_version(): v = cffi.__version__ version_info = '.'.join(str(i) for i in cffi.__version_info__) assert v == version_info - assert BACKEND_VERSIONS.get(v, v) == _cffi_backend.__version__ + #v = BACKEND_VERSIONS.get(v, v) + assert v == _cffi_backend.__version__ def test_doc_version(): parent = os.path.dirname(os.path.dirname(__file__)) @@ -48,5 +49,5 @@ v = cffi.__version__ p = os.path.join(parent, 'c', 'test_c.py') content = open(p).read() - assert (('assert __version__ == "%s"' % BACKEND_VERSIONS.get(v, v)) - in content) + #v = BACKEND_VERSIONS.get(v, v) + assert (('assert __version__ == "%s"' % v) in content) From noreply at buildbot.pypy.org Sat Jul 5 19:16:27 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 5 Jul 2014 19:16:27 +0200 (CEST) Subject: [pypy-commit] cffi release-0.8: MD5/SHA Message-ID: <20140705171627.8C4871C0ECA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-0.8 Changeset: r1542:9c34ce4ba70e Date: 2014-07-05 19:16 +0200 http://bitbucket.org/cffi/cffi/changeset/9c34ce4ba70e/ Log: MD5/SHA diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -92,9 +92,9 @@ - Or grab the most current version by following the instructions below. - - MD5: ... + - MD5: 964981f3fada08abbe9a6f8948f3a4c3 - - SHA: ... + - SHA: f921b0ad5360c58a87c927b63d5a177ac3e8847d * Or get it from the `Bitbucket page`_: ``hg clone https://bitbucket.org/cffi/cffi`` From noreply at buildbot.pypy.org Sat Jul 5 19:16:28 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 5 Jul 2014 19:16:28 +0200 (CEST) Subject: [pypy-commit] cffi default: hg merge release-0.8 Message-ID: <20140705171628.D5BEC1C0ECA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1543:76f18e78b377 Date: 2014-07-05 19:16 +0200 http://bitbucket.org/cffi/cffi/changeset/76f18e78b377/ Log: hg merge release-0.8 diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -5504,7 +5504,7 @@ if (v == NULL || PyModule_AddObject(m, "_C_API", v) < 0) INITERROR; - v = PyText_FromString("0.8.3"); + v = PyText_FromString("0.8.5"); if (v == NULL || PyModule_AddObject(m, "__version__", v) < 0) INITERROR; diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -3199,4 +3199,4 @@ def test_version(): # this test is here mostly for PyPy - assert __version__ == "0.8.3" + assert __version__ == "0.8.5" diff --git a/cffi/__init__.py b/cffi/__init__.py --- a/cffi/__init__.py +++ b/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.8.4" -__version_info__ = (0, 8, 4) +__version__ = "0.8.5" +__version_info__ = (0, 8, 5) diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -55,8 +55,7 @@ # _cffi_backend.so compiled. import _cffi_backend as backend from . import __version__ - assert (backend.__version__ == __version__ or - backend.__version__ == __version__[:3]) + assert backend.__version__ == __version__ # (If you insist you can also try to pass the option # 'backend=backend_ctypes.CTypesBackend()', but don't # rely on it! It's probably not going to work well.) diff --git a/doc/source/conf.py b/doc/source/conf.py --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -47,7 +47,7 @@ # The short X.Y version. version = '0.8' # The full version, including alpha/beta/rc tags. -release = '0.8.4' +release = '0.8.5' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -88,13 +88,13 @@ Download and Installation: -* http://pypi.python.org/packages/source/c/cffi/cffi-0.8.4.tar.gz +* http://pypi.python.org/packages/source/c/cffi/cffi-0.8.5.tar.gz - Or grab the most current version by following the instructions below. - - MD5: 148894125d3fa696b418dc6559818f7a + - MD5: 964981f3fada08abbe9a6f8948f3a4c3 - - SHA: 754ad62d0868bd48f34b2a5818575493e15b5514 + - SHA: f921b0ad5360c58a87c927b63d5a177ac3e8847d * Or get it from the `Bitbucket page`_: ``hg clone https://bitbucket.org/cffi/cffi`` diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -115,7 +115,7 @@ `Mailing list `_ """, - version='0.8.4', + version='0.8.5', packages=['cffi'], zip_safe=False, diff --git a/testing/test_version.py b/testing/test_version.py --- a/testing/test_version.py +++ b/testing/test_version.py @@ -5,19 +5,20 @@ if '_cffi_backend' in sys.builtin_module_names: py.test.skip("this is embedded version") -BACKEND_VERSIONS = { - '0.4.2': '0.4', # did not change - '0.7.1': '0.7', # did not change - '0.7.2': '0.7', # did not change - '0.8.1': '0.8', # did not change (essentially) - '0.8.4': '0.8.3', # did not change - } +#BACKEND_VERSIONS = { +# '0.4.2': '0.4', # did not change +# '0.7.1': '0.7', # did not change +# '0.7.2': '0.7', # did not change +# '0.8.1': '0.8', # did not change (essentially) +# '0.8.4': '0.8.3', # did not change +# } def test_version(): v = cffi.__version__ version_info = '.'.join(str(i) for i in cffi.__version_info__) assert v == version_info - assert BACKEND_VERSIONS.get(v, v) == _cffi_backend.__version__ + #v = BACKEND_VERSIONS.get(v, v) + assert v == _cffi_backend.__version__ def test_doc_version(): parent = os.path.dirname(os.path.dirname(__file__)) @@ -48,5 +49,5 @@ v = cffi.__version__ p = os.path.join(parent, 'c', 'test_c.py') content = open(p).read() - assert (('assert __version__ == "%s"' % BACKEND_VERSIONS.get(v, v)) - in content) + #v = BACKEND_VERSIONS.get(v, v) + assert (('assert __version__ == "%s"' % v) in content) From noreply at buildbot.pypy.org Sat Jul 5 19:41:46 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 5 Jul 2014 19:41:46 +0200 (CEST) Subject: [pypy-commit] cffi default: Fix for Windows, which doesn't have a reasonable snprintf() Message-ID: <20140705174146.3191E1C024A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1544:079492211215 Date: 2014-07-05 19:41 +0200 http://bitbucket.org/cffi/cffi/changeset/079492211215/ Log: Fix for Windows, which doesn't have a reasonable snprintf() diff --git a/cffi/vengine_gen.py b/cffi/vengine_gen.py --- a/cffi/vengine_gen.py +++ b/cffi/vengine_gen.py @@ -435,14 +435,14 @@ enumerator, enumerator, enumvalue)) prnt(' char buf[64];') prnt(' if ((%s) < 0)' % enumerator) - prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % enumerator) + prnt(' sprintf(buf, "%%ld", (long)(%s));' % enumerator) prnt(' else') - prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' % + prnt(' sprintf(buf, "%%lu", (unsigned long)(%s));' % enumerator) - prnt(' snprintf(out_error, 255,' + prnt(' sprintf(out_error,' ' "%s has the real value %s, not %s",') prnt(' "%s", buf, "%d");' % ( - enumerator, enumvalue)) + enumerator[:100], enumvalue)) prnt(' return -1;') prnt(' }') prnt(' return 0;') From noreply at buildbot.pypy.org Sat Jul 5 20:08:10 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 5 Jul 2014 20:08:10 +0200 (CEST) Subject: [pypy-commit] cffi release-0.8: hg merge default Message-ID: <20140705180810.3584F1C34C8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-0.8 Changeset: r1545:4573982bcf68 Date: 2014-07-05 19:49 +0200 http://bitbucket.org/cffi/cffi/changeset/4573982bcf68/ Log: hg merge default diff --git a/cffi/vengine_gen.py b/cffi/vengine_gen.py --- a/cffi/vengine_gen.py +++ b/cffi/vengine_gen.py @@ -435,14 +435,14 @@ enumerator, enumerator, enumvalue)) prnt(' char buf[64];') prnt(' if ((%s) < 0)' % enumerator) - prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % enumerator) + prnt(' sprintf(buf, "%%ld", (long)(%s));' % enumerator) prnt(' else') - prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' % + prnt(' sprintf(buf, "%%lu", (unsigned long)(%s));' % enumerator) - prnt(' snprintf(out_error, 255,' + prnt(' sprintf(out_error,' ' "%s has the real value %s, not %s",') prnt(' "%s", buf, "%d");' % ( - enumerator, enumvalue)) + enumerator[:100], enumvalue)) prnt(' return -1;') prnt(' }') prnt(' return 0;') From noreply at buildbot.pypy.org Sat Jul 5 20:08:11 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 5 Jul 2014 20:08:11 +0200 (CEST) Subject: [pypy-commit] cffi release-0.8: Prepare release 0.8.6 Message-ID: <20140705180811.94FDE1C34C8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-0.8 Changeset: r1546:452a1c1a5005 Date: 2014-07-05 20:06 +0200 http://bitbucket.org/cffi/cffi/changeset/452a1c1a5005/ Log: Prepare release 0.8.6 diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -5504,7 +5504,7 @@ if (v == NULL || PyModule_AddObject(m, "_C_API", v) < 0) INITERROR; - v = PyText_FromString("0.8.5"); + v = PyText_FromString("0.8.6"); if (v == NULL || PyModule_AddObject(m, "__version__", v) < 0) INITERROR; diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -3199,4 +3199,4 @@ def test_version(): # this test is here mostly for PyPy - assert __version__ == "0.8.5" + assert __version__ == "0.8.6" diff --git a/cffi/__init__.py b/cffi/__init__.py --- a/cffi/__init__.py +++ b/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.8.5" -__version_info__ = (0, 8, 5) +__version__ = "0.8.6" +__version_info__ = (0, 8, 6) diff --git a/doc/source/conf.py b/doc/source/conf.py --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -47,7 +47,7 @@ # The short X.Y version. version = '0.8' # The full version, including alpha/beta/rc tags. -release = '0.8.5' +release = '0.8.6' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -88,13 +88,13 @@ Download and Installation: -* http://pypi.python.org/packages/source/c/cffi/cffi-0.8.5.tar.gz +* http://pypi.python.org/packages/source/c/cffi/cffi-0.8.6.tar.gz - Or grab the most current version by following the instructions below. - - MD5: 964981f3fada08abbe9a6f8948f3a4c3 + - MD5: ... - - SHA: f921b0ad5360c58a87c927b63d5a177ac3e8847d + - SHA: ... * Or get it from the `Bitbucket page`_: ``hg clone https://bitbucket.org/cffi/cffi`` diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -115,7 +115,7 @@ `Mailing list `_ """, - version='0.8.5', + version='0.8.6', packages=['cffi'], zip_safe=False, From noreply at buildbot.pypy.org Sat Jul 5 20:08:12 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 5 Jul 2014 20:08:12 +0200 (CEST) Subject: [pypy-commit] cffi release-0.8: update MD5/SHA Message-ID: <20140705180812.EC1A21C34C8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-0.8 Changeset: r1547:ca52363ff6ac Date: 2014-07-05 20:08 +0200 http://bitbucket.org/cffi/cffi/changeset/ca52363ff6ac/ Log: update MD5/SHA diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -92,9 +92,9 @@ - Or grab the most current version by following the instructions below. - - MD5: ... + - MD5: 474b5a68299a6f05009171de1dc91be6 - - SHA: ... + - SHA: 4e82390201e6f30e9df8a91cd176df19b8f2d547 * Or get it from the `Bitbucket page`_: ``hg clone https://bitbucket.org/cffi/cffi`` From noreply at buildbot.pypy.org Sat Jul 5 20:08:14 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 5 Jul 2014 20:08:14 +0200 (CEST) Subject: [pypy-commit] cffi default: hg merge release-0.8 Message-ID: <20140705180814.5B9111C34C8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1548:59fd1de71875 Date: 2014-07-05 20:08 +0200 http://bitbucket.org/cffi/cffi/changeset/59fd1de71875/ Log: hg merge release-0.8 diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -5504,7 +5504,7 @@ if (v == NULL || PyModule_AddObject(m, "_C_API", v) < 0) INITERROR; - v = PyText_FromString("0.8.5"); + v = PyText_FromString("0.8.6"); if (v == NULL || PyModule_AddObject(m, "__version__", v) < 0) INITERROR; diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -3199,4 +3199,4 @@ def test_version(): # this test is here mostly for PyPy - assert __version__ == "0.8.5" + assert __version__ == "0.8.6" diff --git a/cffi/__init__.py b/cffi/__init__.py --- a/cffi/__init__.py +++ b/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.8.5" -__version_info__ = (0, 8, 5) +__version__ = "0.8.6" +__version_info__ = (0, 8, 6) diff --git a/doc/source/conf.py b/doc/source/conf.py --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -47,7 +47,7 @@ # The short X.Y version. version = '0.8' # The full version, including alpha/beta/rc tags. -release = '0.8.5' +release = '0.8.6' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -88,13 +88,13 @@ Download and Installation: -* http://pypi.python.org/packages/source/c/cffi/cffi-0.8.5.tar.gz +* http://pypi.python.org/packages/source/c/cffi/cffi-0.8.6.tar.gz - Or grab the most current version by following the instructions below. - - MD5: 964981f3fada08abbe9a6f8948f3a4c3 + - MD5: 474b5a68299a6f05009171de1dc91be6 - - SHA: f921b0ad5360c58a87c927b63d5a177ac3e8847d + - SHA: 4e82390201e6f30e9df8a91cd176df19b8f2d547 * Or get it from the `Bitbucket page`_: ``hg clone https://bitbucket.org/cffi/cffi`` diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -115,7 +115,7 @@ `Mailing list `_ """, - version='0.8.5', + version='0.8.6', packages=['cffi'], zip_safe=False, From noreply at buildbot.pypy.org Sat Jul 5 22:45:51 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sat, 5 Jul 2014 22:45:51 +0200 (CEST) Subject: [pypy-commit] pypy llvm-translation-backend: Rename gc_header -> needs_gc_header. Message-ID: <20140705204551.96AC91C024A@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r72362:5a07adb52ffb Date: 2014-07-05 22:38 +0200 http://bitbucket.org/pypy/pypy/changeset/5a07adb52ffb/ Log: Rename gc_header -> needs_gc_header. diff --git a/rpython/translator/llvm/genllvm.py b/rpython/translator/llvm/genllvm.py --- a/rpython/translator/llvm/genllvm.py +++ b/rpython/translator/llvm/genllvm.py @@ -41,7 +41,7 @@ class Type(object): varsize = False - gc_header = False + needs_gc_header = False def repr_type(self, extra_len=None): return self.typestr @@ -415,11 +415,11 @@ class StructType(Type): - def setup(self, name, fields, gc_header): + def setup(self, name, fields, needs_gc_header): self.name = name - self.gc_header = gc_header + self.needs_gc_header = needs_gc_header fields = list(fields) - if gc_header: + if needs_gc_header: fields = database.genllvm.gcpolicy.get_gc_fields() + fields elif all(t is LLVMVoid for t, f in fields): fields.append((LLVMSigned, '_fill')) @@ -437,8 +437,8 @@ return fields = ((db.get_type(type._flds[f]), f) for f in type._names) is_gc = type._gckind == 'gc' - gc_header = is_gc and type._first_struct() == (None, None) - self.setup('%' + type._name, fields, gc_header) + needs_gc_header = is_gc and type._first_struct() == (None, None) + self.setup('%' + type._name, fields, needs_gc_header) def repr_type(self, extra_len=None): if extra_len not in self.size_variants: @@ -463,7 +463,7 @@ return self.name[1:] def is_zero(self, value): - if self.gc_header: + if self.needs_gc_header: return False elif self.fldnames_wo_voids == ['_fill']: return True @@ -476,7 +476,7 @@ def repr_value(self, value, extra_len=None): if self.is_zero(value): return 'zeroinitializer' - if self.gc_header: + if self.needs_gc_header: data = database.genllvm.gcpolicy.get_gc_field_values(value) data.extend(getattr(value, fn) for _, fn in self.fields[1:]) else: @@ -576,7 +576,7 @@ varsize = True def setup(self, of, is_gc): - self.gc_header = is_gc + self.needs_gc_header = is_gc self.bare_array_type = BareArrayType() self.bare_array_type.setup(of, None) self.struct_type = StructType() @@ -605,7 +605,7 @@ return self.struct_type.repr_type_and_value(ArrayHelper(value)) def add_indices(self, gep, index): - if self.gc_header: + if self.needs_gc_header: gep.add_field_index(2) else: gep.add_field_index(1) @@ -749,7 +749,7 @@ class_ = _LL_TO_LLVM[type.__class__] self.types[type] = ret = class_() ret.setup_from_lltype(self, type) - if ret.gc_header: + if ret.needs_gc_header: _llvm_needs_header[type] = database.genllvm.gcpolicy \ .get_gc_fields_lltype() # hint for ll2ctypes return ret @@ -1227,7 +1227,7 @@ self.w('{result.V} = add {result.T} 0, {type.length}' .format(**locals())) else: - if type.gc_header: + if type.needs_gc_header: gep.add_field_index(1) else: gep.add_field_index(0) From noreply at buildbot.pypy.org Sat Jul 5 22:45:52 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sat, 5 Jul 2014 22:45:52 +0200 (CEST) Subject: [pypy-commit] pypy llvm-translation-backend: Close head (the parent of this commit was pushed by accident). Message-ID: <20140705204552.D93AA1C024A@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r72363:640b6e023555 Date: 2014-07-05 22:42 +0200 http://bitbucket.org/pypy/pypy/changeset/640b6e023555/ Log: Close head (the parent of this commit was pushed by accident). From noreply at buildbot.pypy.org Sat Jul 5 22:45:54 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sat, 5 Jul 2014 22:45:54 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 32435d62aa33 on branch gc-pinning Message-ID: <20140705204554.4689E1C024A@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: closed-branches Changeset: r72364:a9e0f15c33e3 Date: 2014-07-05 22:44 +0200 http://bitbucket.org/pypy/pypy/changeset/a9e0f15c33e3/ Log: Merge closed head 32435d62aa33 on branch gc-pinning From noreply at buildbot.pypy.org Sat Jul 5 22:45:55 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sat, 5 Jul 2014 22:45:55 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: Merge closed head d79aec73fa3c on branch gc-two-end-nursery Message-ID: <20140705204555.668D81C024A@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: closed-branches Changeset: r72365:52d662f367bc Date: 2014-07-05 22:44 +0200 http://bitbucket.org/pypy/pypy/changeset/52d662f367bc/ Log: Merge closed head d79aec73fa3c on branch gc-two-end-nursery From noreply at buildbot.pypy.org Sat Jul 5 22:45:56 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sat, 5 Jul 2014 22:45:56 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 640b6e023555 on branch llvm-translation-backend Message-ID: <20140705204556.9AB321C024A@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: closed-branches Changeset: r72366:b8457b91c09a Date: 2014-07-05 22:44 +0200 http://bitbucket.org/pypy/pypy/changeset/b8457b91c09a/ Log: Merge closed head 640b6e023555 on branch llvm-translation-backend From noreply at buildbot.pypy.org Sat Jul 5 22:45:57 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sat, 5 Jul 2014 22:45:57 +0200 (CEST) Subject: [pypy-commit] pypy closed-branches: re-close this branch Message-ID: <20140705204557.BD7E11C024A@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: closed-branches Changeset: r72367:613891c5da29 Date: 2014-07-05 22:44 +0200 http://bitbucket.org/pypy/pypy/changeset/613891c5da29/ Log: re-close this branch From noreply at buildbot.pypy.org Sun Jul 6 14:36:08 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 6 Jul 2014 14:36:08 +0200 (CEST) Subject: [pypy-commit] pypy default: improve windows build instructions Message-ID: <20140706123608.082771C0250@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r72368:979f8b8abacb Date: 2014-07-06 22:35 +1000 http://bitbucket.org/pypy/pypy/changeset/979f8b8abacb/ Log: improve windows build instructions diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -132,19 +132,23 @@ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Download http://www.gzip.org/zlib/zlib-1.2.3.tar.gz and extract it in -the base directory. Then compile:: +the base directory. Then compile as a static library:: cd zlib-1.2.3 nmake -f win32\Makefile.msc - copy zlib1.dll \zlib.dll + copy zlib1.lib + copy zlib.h zconf.h The bz2 compression library ~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Get the same version of bz2 used by python and compile as a static library:: svn export http://svn.python.org/projects/external/bzip2-1.0.6 cd bzip2-1.0.6 nmake -f makefile.msc - copy bzip.dll \bzip.dll + copy libbz2.lib + copy bzlib.h + The sqlite3 database library ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -166,7 +170,8 @@ is actually enough for pypy). Then, copy the file ``win32\bin\release\libexpat.dll`` somewhere in -your PATH. +your PATH, ``win32\bin\release\libexpat.lib`` somewhere in LIB, and +both ``lib\expat.h`` and ``lib\expat_external.h`` somewhere in INCLUDE. The OpenSSL library ~~~~~~~~~~~~~~~~~~~ From noreply at buildbot.pypy.org Sun Jul 6 15:41:11 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 6 Jul 2014 15:41:11 +0200 (CEST) Subject: [pypy-commit] pypy default: Merged in scalar-operations (pull request #243) Message-ID: <20140706134111.323781C0906@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r72369:f1bd7e48eb65 Date: 2014-07-06 14:40 +0100 http://bitbucket.org/pypy/pypy/changeset/f1bd7e48eb65/ Log: Merged in scalar-operations (pull request #243) Fix performance regression on ufunc(, ) in numpy diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -18,7 +18,12 @@ pass -class W_NDimArray(W_Root): +class W_NumpyObject(W_Root): + """Base class for ndarrays and scalars (aka boxes).""" + _attrs_ = [] + + +class W_NDimArray(W_NumpyObject): __metaclass__ = extendabletype def __init__(self, implementation): @@ -85,6 +90,14 @@ w_val = dtype.coerce(space, space.wrap(0)) return convert_to_array(space, w_val) + @staticmethod + def from_scalar(space, w_scalar): + """Convert a scalar into a 0-dim array""" + dtype = w_scalar.get_dtype(space) + w_arr = W_NDimArray.from_shape(space, [], dtype) + w_arr.set_scalar_value(w_scalar) + return w_arr + def convert_to_array(space, w_obj): from pypy.module.micronumpy.ctors import array diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -1,4 +1,3 @@ -from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.mixedmodule import MixedModule @@ -14,7 +13,7 @@ from rpython.rtyper.lltypesystem import lltype, rffi from rpython.tool.sourcetools import func_with_new_name from pypy.module.micronumpy import constants as NPY -from pypy.module.micronumpy.base import W_NDimArray +from pypy.module.micronumpy.base import W_NDimArray, W_NumpyObject from pypy.module.micronumpy.concrete import VoidBoxStorage from pypy.module.micronumpy.flagsobj import W_FlagsObject @@ -126,7 +125,7 @@ return ret -class W_GenericBox(W_Root): +class W_GenericBox(W_NumpyObject): _attrs_ = ['w_flags'] def descr__new__(space, w_subtype, __args__): @@ -136,6 +135,12 @@ def get_dtype(self, space): return self._get_dtype(space) + def is_scalar(self): + return True + + def get_scalar_value(self): + return self + def item(self, space): return self.get_dtype(space).itemtype.to_builtin_type(space, self) diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -4,7 +4,8 @@ from rpython.rlib.rstring import strip_spaces from rpython.rtyper.lltypesystem import lltype, rffi from pypy.module.micronumpy import descriptor, loop -from pypy.module.micronumpy.base import W_NDimArray, convert_to_array +from pypy.module.micronumpy.base import ( + W_NDimArray, convert_to_array, W_NumpyObject) from pypy.module.micronumpy.converters import shape_converter @@ -24,24 +25,44 @@ return box +def try_array_method(space, w_object, w_dtype=None): + w___array__ = space.lookup(w_object, "__array__") + if w___array__ is None: + return None + if w_dtype is None: + w_dtype = space.w_None + w_array = space.get_and_call_function(w___array__, w_object, w_dtype) + if isinstance(w_array, W_NDimArray): + return w_array + else: + raise oefmt(space.w_ValueError, + "object __array__ method not producing an array") + + @unwrap_spec(ndmin=int, copy=bool, subok=bool) def array(space, w_object, w_dtype=None, copy=True, w_order=None, subok=False, ndmin=0): + w_res = _array(space, w_object, w_dtype, copy, w_order, subok) + shape = w_res.get_shape() + if len(shape) < ndmin: + shape = [1] * (ndmin - len(shape)) + shape + impl = w_res.implementation.set_shape(space, w_res, shape) + if w_res is w_object: + return W_NDimArray(impl) + else: + w_res.implementation = impl + return w_res + +def _array(space, w_object, w_dtype=None, copy=True, w_order=None, subok=False): from pypy.module.micronumpy import strides # for anything that isn't already an array, try __array__ method first if not isinstance(w_object, W_NDimArray): - w___array__ = space.lookup(w_object, "__array__") - if w___array__ is not None: - if space.is_none(w_dtype): - w_dtype = space.w_None - w_array = space.get_and_call_function(w___array__, w_object, w_dtype) - if isinstance(w_array, W_NDimArray): - # feed w_array back into array() for other properties - return array(space, w_array, w_dtype, False, w_order, subok, ndmin) - else: - raise oefmt(space.w_ValueError, - "object __array__ method not producing an array") + w_array = try_array_method(space, w_object, w_dtype) + if w_array is not None: + # continue with w_array, but do further operations in place + w_object = w_array + copy = False dtype = descriptor.decode_w_dtype(space, w_dtype) @@ -57,19 +78,10 @@ # arrays with correct dtype if isinstance(w_object, W_NDimArray) and \ (space.is_none(w_dtype) or w_object.get_dtype() is dtype): - shape = w_object.get_shape() if copy: - w_ret = w_object.descr_copy(space) + return w_object.descr_copy(space) else: - if ndmin <= len(shape): - return w_object - new_impl = w_object.implementation.set_shape(space, w_object, shape) - w_ret = W_NDimArray(new_impl) - if ndmin > len(shape): - shape = [1] * (ndmin - len(shape)) + shape - w_ret.implementation = w_ret.implementation.set_shape(space, - w_ret, shape) - return w_ret + return w_object # not an array or incorrect dtype shape, elems_w = strides.find_shape_and_elems(space, w_object, dtype) @@ -81,8 +93,6 @@ # promote S0 -> S1, U0 -> U1 dtype = descriptor.variable_dtype(space, dtype.char + '1') - if ndmin > len(shape): - shape = [1] * (ndmin - len(shape)) + shape w_arr = W_NDimArray.from_shape(space, shape, dtype, order=order) if len(elems_w) == 1: w_arr.set_scalar_value(dtype.coerce(space, elems_w[0])) @@ -91,6 +101,33 @@ return w_arr +def numpify(space, w_object): + """Convert the object to a W_NumpyObject""" + # XXX: code duplication with _array() + from pypy.module.micronumpy import strides + if isinstance(w_object, W_NumpyObject): + return w_object + # for anything that isn't already an array, try __array__ method first + w_array = try_array_method(space, w_object) + if w_array is not None: + return w_array + + shape, elems_w = strides.find_shape_and_elems(space, w_object, None) + dtype = strides.find_dtype_for_seq(space, elems_w, None) + if dtype is None: + dtype = descriptor.get_dtype_cache(space).w_float64dtype + elif dtype.is_str_or_unicode() and dtype.elsize < 1: + # promote S0 -> S1, U0 -> U1 + dtype = descriptor.variable_dtype(space, dtype.char + '1') + + if len(elems_w) == 1: + return dtype.coerce(space, elems_w[0]) + else: + w_arr = W_NDimArray.from_shape(space, shape, dtype) + loop.assign(space, w_arr, elems_w) + return w_arr + + def _zeros_or_empty(space, w_shape, w_dtype, w_order, zero): dtype = space.interp_w(descriptor.W_Dtype, space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype)) diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -7,6 +7,7 @@ from rpython.tool.sourcetools import func_with_new_name from pypy.module.micronumpy import boxes, descriptor, loop, constants as NPY from pypy.module.micronumpy.base import convert_to_array, W_NDimArray +from pypy.module.micronumpy.ctors import numpify from pypy.module.micronumpy.strides import shape_agreement @@ -17,6 +18,13 @@ def done_if_false(dtype, val): return not dtype.itemtype.bool(val) +def _get_dtype(space, w_npyobj): + if isinstance(w_npyobj, boxes.W_GenericBox): + return w_npyobj.get_dtype(space) + else: + assert isinstance(w_npyobj, W_NDimArray) + return w_npyobj.get_dtype() + class W_Ufunc(W_Root): _immutable_fields_ = [ @@ -304,8 +312,8 @@ out = args_w[1] if space.is_w(out, space.w_None): out = None - w_obj = convert_to_array(space, w_obj) - dtype = w_obj.get_dtype() + w_obj = numpify(space, w_obj) + dtype = _get_dtype(space, w_obj) if dtype.is_flexible(): raise OperationError(space.w_TypeError, space.wrap('Not implemented for this type')) @@ -315,7 +323,7 @@ raise oefmt(space.w_TypeError, "ufunc %s not supported for the input type", self.name) calc_dtype = find_unaryop_result_dtype(space, - w_obj.get_dtype(), + dtype, promote_to_float=self.promote_to_float, promote_bools=self.promote_bools) if out is not None: @@ -345,6 +353,7 @@ else: out.fill(space, w_val) return out + assert isinstance(w_obj, W_NDimArray) shape = shape_agreement(space, w_obj.get_shape(), out, broadcast_down=False) return loop.call1(space, shape, self.func, calc_dtype, res_dtype, @@ -385,10 +394,10 @@ else: [w_lhs, w_rhs] = args_w w_out = None - w_lhs = convert_to_array(space, w_lhs) - w_rhs = convert_to_array(space, w_rhs) - w_ldtype = w_lhs.get_dtype() - w_rdtype = w_rhs.get_dtype() + w_lhs = numpify(space, w_lhs) + w_rhs = numpify(space, w_rhs) + w_ldtype = _get_dtype(space, w_lhs) + w_rdtype = _get_dtype(space, w_rhs) if w_ldtype.is_str() and w_rdtype.is_str() and \ self.comparison_func: pass @@ -451,6 +460,12 @@ else: out = arr return out + if isinstance(w_lhs, boxes.W_GenericBox): + w_lhs = W_NDimArray.from_scalar(space, w_lhs) + assert isinstance(w_lhs, W_NDimArray) + if isinstance(w_rhs, boxes.W_GenericBox): + w_rhs = W_NDimArray.from_scalar(space, w_rhs) + assert isinstance(w_rhs, W_NDimArray) new_shape = shape_agreement(space, w_lhs.get_shape(), w_rhs) new_shape = shape_agreement(space, new_shape, out, broadcast_down=False) return loop.call2(space, new_shape, self.func, calc_dtype, diff --git a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py --- a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py +++ b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py @@ -30,6 +30,7 @@ """) def test_array_getitem_accumulate(self): + """Check that operations/ufuncs on array items are jitted correctly""" def main(): import _numpypy.multiarray as np arr = np.zeros((300, 300)) @@ -43,7 +44,6 @@ log = self.run(main, []) assert log.result == 0 loop, = log.loops_by_filename(self.filepath) - skip('used to pass on 69421-f3e717c94913') assert loop.match(""" i81 = int_lt(i76, 300) guard_true(i81, descr=...) From noreply at buildbot.pypy.org Sun Jul 6 15:41:23 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 6 Jul 2014 15:41:23 +0200 (CEST) Subject: [pypy-commit] pypy scalar-operations: Close branch scalar-operations Message-ID: <20140706134123.628B41C0906@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: scalar-operations Changeset: r72370:82fa12110c8f Date: 2014-07-06 14:40 +0100 http://bitbucket.org/pypy/pypy/changeset/82fa12110c8f/ Log: Close branch scalar-operations From noreply at buildbot.pypy.org Sun Jul 6 15:45:38 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 6 Jul 2014 15:45:38 +0200 (CEST) Subject: [pypy-commit] pypy default: update whatsnew-head.rst Message-ID: <20140706134538.7180A1C0906@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r72371:fbd0dadee790 Date: 2014-07-06 14:45 +0100 http://bitbucket.org/pypy/pypy/changeset/fbd0dadee790/ Log: update whatsnew-head.rst diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -47,3 +47,6 @@ .. branch: disable_pythonapi Remove non-functioning ctypes.pyhonapi and ctypes.PyDLL, document this incompatibility with cpython. Recast sys.dllhandle to an int. + +.. branch: scalar-operations +Fix performance regression on ufunc(, ) in numpy. From noreply at buildbot.pypy.org Sun Jul 6 19:29:13 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 6 Jul 2014 19:29:13 +0200 (CEST) Subject: [pypy-commit] cffi default: Oops. Using memcpy() here can be bogus because the addresses can overlap. Message-ID: <20140706172913.3BEE11D2960@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1549:133eb25752ff Date: 2014-07-06 19:29 +0200 http://bitbucket.org/cffi/cffi/changeset/133eb25752ff/ Log: Oops. Using memcpy() here can be bogus because the addresses can overlap. diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -1959,7 +1959,7 @@ if ((ctv->ct_flags & CT_ARRAY) && (ctv->ct_itemdescr == ct) && (get_array_length((CDataObject *)v) == length)) { /* fast path: copying from exactly the correct type */ - memcpy(cdata, ((CDataObject *)v)->c_data, itemsize * length); + memmove(cdata, ((CDataObject *)v)->c_data, itemsize * length); return 0; } } From noreply at buildbot.pypy.org Mon Jul 7 07:38:32 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 7 Jul 2014 07:38:32 +0200 (CEST) Subject: [pypy-commit] pypy llvm-translation-backend: Explicitly pass -fno-rtti when compiling PyPyGC.cpp. Message-ID: <20140707053832.CE06C1D3528@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r72372:ca9f8a00d634 Date: 2014-07-06 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/ca9f8a00d634/ Log: Explicitly pass -fno-rtti when compiling PyPyGC.cpp. diff --git a/rpython/translator/llvm/genllvm.py b/rpython/translator/llvm/genllvm.py --- a/rpython/translator/llvm/genllvm.py +++ b/rpython/translator/llvm/genllvm.py @@ -1839,7 +1839,7 @@ gc_cpp = this_file.new(basename='PyPyGC.cpp') gc_lib = this_file.new(purebasename='PyPyGC', ext=self.translator.platform.so_ext) - cflags = cmdexec('llvm-config --cxxflags').strip() + cflags = cmdexec('llvm-config --cxxflags').strip() + ' -fno-rtti' cmdexec('clang {} -shared {} -o {}'.format(cflags, gc_cpp, gc_lib)) return gc_lib From noreply at buildbot.pypy.org Mon Jul 7 07:38:34 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 7 Jul 2014 07:38:34 +0200 (CEST) Subject: [pypy-commit] pypy llvm-translation-backend: minor style fix Message-ID: <20140707053834.130671D3528@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r72373:405a6af63224 Date: 2014-07-06 12:16 +0200 http://bitbucket.org/pypy/pypy/changeset/405a6af63224/ Log: minor style fix diff --git a/rpython/translator/llvm/genllvm.py b/rpython/translator/llvm/genllvm.py --- a/rpython/translator/llvm/genllvm.py +++ b/rpython/translator/llvm/genllvm.py @@ -55,7 +55,7 @@ def get_extra_len(self, value): raise NotImplementedError("Override in subclass.") - def repr_value(self, obj): + def repr_value(self, value): raise NotImplementedError("Override in subclass.") def repr_type_and_value(self, value): @@ -714,14 +714,16 @@ ptr_type.refs[obj] = 'null' -_LL_TO_LLVM = {lltype.Ptr: PtrType, - lltype.Struct: StructType, lltype.GcStruct: StructType, - lltype.Array: ArrayType, lltype.GcArray: ArrayType, - lltype.FixedSizeArray: BareArrayType, - lltype.FuncType: FuncType, - lltype.OpaqueType: OpaqueType, lltype.GcOpaqueType: OpaqueType, - llgroup.GroupType: GroupType, - llmemory._WeakRefType: OpaqueType} +_LL_TO_LLVM = { + lltype.Ptr: PtrType, + lltype.Struct: StructType, lltype.GcStruct: StructType, + lltype.Array: ArrayType, lltype.GcArray: ArrayType, + lltype.FixedSizeArray: BareArrayType, + lltype.FuncType: FuncType, + lltype.OpaqueType: OpaqueType, lltype.GcOpaqueType: OpaqueType, + llgroup.GroupType: GroupType, + llmemory._WeakRefType: OpaqueType, +} class Database(object): identifier_regex = re.compile('^[%@][a-zA-Z$._][a-zA-Z$._0-9]*$') From noreply at buildbot.pypy.org Mon Jul 7 07:38:35 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 7 Jul 2014 07:38:35 +0200 (CEST) Subject: [pypy-commit] pypy llvm-translation-backend: Rename PtrType.to() -> PtrType.tmp() to avoid ambiguity with PtrType's `to` attribute. Message-ID: <20140707053835.5D4BA1D3528@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r72374:e752ad534f8f Date: 2014-07-07 07:16 +0200 http://bitbucket.org/pypy/pypy/changeset/e752ad534f8f/ Log: Rename PtrType.to() -> PtrType.tmp() to avoid ambiguity with PtrType's `to` attribute. diff --git a/rpython/translator/llvm/genllvm.py b/rpython/translator/llvm/genllvm.py --- a/rpython/translator/llvm/genllvm.py +++ b/rpython/translator/llvm/genllvm.py @@ -407,7 +407,7 @@ to = parent_type.add_indices(gep, child) self.refs[obj] = ( 'bitcast({} getelementptr inbounds({}, {}) to {})' - .format(PtrType.to(to).repr_type(), parent_ref, + .format(PtrType.tmp(to).repr_type(), parent_ref, ', '.join(gep.indices), self.repr_type())) else: self.to.repr_ref(self, obj) @@ -1185,7 +1185,7 @@ def _get_element(self, result, var, *fields): if result.type is not LLVMVoid: - t = self._tmp(PtrType.to(result.type)) + t = self._tmp(PtrType.tmp(result.type)) self._get_element_ptr(var, fields, t) self.w('{result.V} = load {t.TV}'.format(**locals())) op_getfield = op_bare_getfield = _get_element @@ -1196,7 +1196,7 @@ fields = rest[:-1] value = rest[-1] if value.type is not LLVMVoid: - t = self._tmp(PtrType.to(value.type)) + t = self._tmp(PtrType.tmp(value.type)) self._get_element_ptr(var, fields, t) self.w('store {value.TV}, {t.TV}'.format(**locals())) op_setfield = op_bare_setfield = _set_element @@ -1204,17 +1204,17 @@ op_setarrayitem = op_bare_setarrayitem = _set_element def op_direct_fieldptr(self, result, ptr, field): - t = self._tmp(PtrType.to(result.type.to.of)) + t = self._tmp(PtrType.tmp(result.type.to.of)) self._get_element_ptr(ptr, [field], t) self.w('{result.V} = bitcast {t.TV} to {result.T}'.format(**locals())) def op_direct_arrayitems(self, result, ptr): - t = self._tmp(PtrType.to(result.type.to.of)) + t = self._tmp(PtrType.tmp(result.type.to.of)) self._get_element_ptr(ptr, [ConstantRepr(LLVMSigned, 0)], t) self.w('{result.V} = bitcast {t.TV} to {result.T}'.format(**locals())) def op_direct_ptradd(self, result, var, val): - t = self._tmp(PtrType.to(result.type.to.of)) + t = self._tmp(PtrType.tmp(result.type.to.of)) self.w('{t.V} = getelementptr inbounds {var.TV}, i64 0, {val.TV}' .format(**locals())) self.w('{result.V} = bitcast {t.TV} to {result.T}'.format(**locals())) @@ -1233,7 +1233,7 @@ gep.add_field_index(1) else: gep.add_field_index(0) - t = self._tmp(PtrType.to(LLVMSigned)) + t = self._tmp(PtrType.tmp(LLVMSigned)) gep.assign(t) self.w('{result.V} = load {t.TV}'.format(**locals())) op_getinteriorarraysize = op_getarraysize @@ -1333,9 +1333,9 @@ self.op_direct_call(result, get_repr(raw_free), ptr) def _get_addr(self, ptr_to, addr, offset): - t1 = self._tmp(PtrType.to(LLVMChar)) - t2 = self._tmp(PtrType.to(LLVMChar)) - t3 = self._tmp(PtrType.to(ptr_to)) + t1 = self._tmp(PtrType.tmp(LLVMChar)) + t2 = self._tmp(PtrType.tmp(LLVMChar)) + t3 = self._tmp(PtrType.tmp(ptr_to)) self._cast(t1, addr) self.w('{t2.V} = getelementptr inbounds {t1.TV}, {offset.TV}' .format(**locals())) From noreply at buildbot.pypy.org Mon Jul 7 07:38:36 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 7 Jul 2014 07:38:36 +0200 (CEST) Subject: [pypy-commit] pypy llvm-translation-backend: oops Message-ID: <20140707053836.A6F3D1D3528@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r72375:f878b24a4a5e Date: 2014-07-07 07:19 +0200 http://bitbucket.org/pypy/pypy/changeset/f878b24a4a5e/ Log: oops diff --git a/rpython/translator/llvm/genllvm.py b/rpython/translator/llvm/genllvm.py --- a/rpython/translator/llvm/genllvm.py +++ b/rpython/translator/llvm/genllvm.py @@ -365,7 +365,7 @@ self.refs = {None: 'null'} @classmethod - def to(cls, to): + def tmp(cls, to): # call __new__ to prevent __init__ from being called self = cls.__new__(cls) self.to = to From noreply at buildbot.pypy.org Mon Jul 7 07:38:37 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 7 Jul 2014 07:38:37 +0200 (CEST) Subject: [pypy-commit] pypy llvm-translation-backend: Move weakref and RTTI functionality out of OpaqueType. Message-ID: <20140707053837.EAEDF1D3528@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r72376:afe803c5e077 Date: 2014-07-07 07:30 +0200 http://bitbucket.org/pypy/pypy/changeset/afe803c5e077/ Log: Move weakref and RTTI functionality out of OpaqueType. diff --git a/rpython/translator/llvm/genllvm.py b/rpython/translator/llvm/genllvm.py --- a/rpython/translator/llvm/genllvm.py +++ b/rpython/translator/llvm/genllvm.py @@ -13,6 +13,7 @@ from rpython.memory.gctransform.refcounting import RefcountingGCTransformer from rpython.memory.gctransform.shadowstack import ( ShadowStackFrameworkGCTransformer) +from rpython.memory.gctypelayout import WEAKREF, convert_weakref_to from rpython.rlib import exports from rpython.rlib.jit import _we_are_jitted from rpython.rlib.objectmodel import (Symbolic, ComputedIntSymbolic, @@ -43,6 +44,9 @@ varsize = False needs_gc_header = False + def setup_from_lltype(self, db, type): + pass + def repr_type(self, extra_len=None): return self.typestr @@ -94,7 +98,8 @@ else: ptr_type.refs[obj] = name hash_ = database.genllvm.gcpolicy.get_prebuilt_hash(obj) - if (obj._TYPE._hints.get('immutable', False) and + if (hasattr(obj._TYPE, '_hints') and + obj._TYPE._hints.get('immutable', False) and obj._TYPE._gckind != 'gc'): global_attrs += 'constant' else: @@ -346,7 +351,7 @@ lltype.Float: FloatType('double', 64), lltype.SingleFloat: FloatType('float', 32), lltype.LongFloat: FloatType('x86_fp80', 80), - llmemory.Address: LLVMAddress + llmemory.Address: LLVMAddress, } for type in rffi.NUMBER_TYPES + [lltype.Char, lltype.UniChar]: @@ -692,26 +697,39 @@ class OpaqueType(Type): - typestr = '{}' - - def setup_from_lltype(self, db, type): - pass + typestr = 'i8' def repr_of_type(self): return 'opaque' def is_zero(self, value): - return True + raise ValueError("value is opaque") def repr_ref(self, ptr_type, obj): if hasattr(obj, 'container'): - ptr_type.refs[obj] = 'bitcast({} to {{}}*)'.format( - get_repr(obj.container._as_ptr()).TV) - elif isinstance(obj, llmemory._wref): - ptr_type.refs[obj] = 'bitcast({} to {{}}*)'.format( - get_repr(obj._converted_weakref).TV) + realvalue = get_repr(lltype.cast_opaque_ptr( + lltype.Ptr(lltype.typeOf(obj.container)), obj._as_ptr())) + ptr_type.refs[obj] = 'bitcast({.TV} to i8*)'.format(realvalue) else: - ptr_type.refs[obj] = 'null' + raise ValueError("value is opaque") + + +class WeakRefType(Type): + def setup_from_lltype(self, db, type): + self.struct_type = StructType() + self.struct_type.setup_from_lltype(db, WEAKREF) + + def repr_type(self, extra_len=None): + return self.struct_type.repr_type(extra_len) + + def repr_of_type(self): + return 'weakref' + + def is_zero(self, value): + return self.struct_type.is_zero(value._converted_weakref) + + def repr_value(self, value): + return self.struct_type.repr_value(value._converted_weakref) _LL_TO_LLVM = { @@ -722,7 +740,7 @@ lltype.FuncType: FuncType, lltype.OpaqueType: OpaqueType, lltype.GcOpaqueType: OpaqueType, llgroup.GroupType: GroupType, - llmemory._WeakRefType: OpaqueType, + llmemory._WeakRefType: WeakRefType, } class Database(object): @@ -1520,10 +1538,9 @@ elif type is llmemory.GCREF.TO and hasattr(value, 'container'): self._consider_constant(value.ORIGTYPE.TO, value.container) elif type is llmemory.WeakRef: - from rpython.memory.gctypelayout import convert_weakref_to wrapper = convert_weakref_to(value._dereference()) self._consider_constant(wrapper._TYPE, wrapper) - value._converted_weakref = wrapper + value._converted_weakref = wrapper._obj self.gctransformer.consider_constant(type, value) p, c = lltype.parentlink(value) @@ -1560,7 +1577,14 @@ class FrameworkGCPolicy(GCPolicy): - RttiType = OpaqueType + class RttiType(Type): + typestr = '{}' + + def is_zero(self, value): + return True + + def repr_ref(self, ptr_type, obj): + ptr_type.refs[obj] = 'null' def __init__(self, genllvm): GCPolicy.__init__(self, genllvm) From noreply at buildbot.pypy.org Mon Jul 7 07:38:39 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 7 Jul 2014 07:38:39 +0200 (CEST) Subject: [pypy-commit] pypy llvm-translation-backend: Minor refactoring: slightly change the way struct names are handled. Message-ID: <20140707053839.4453C1D3528@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r72377:e3796537464c Date: 2014-07-07 07:37 +0200 http://bitbucket.org/pypy/pypy/changeset/e3796537464c/ Log: Minor refactoring: slightly change the way struct names are handled. diff --git a/rpython/translator/llvm/genllvm.py b/rpython/translator/llvm/genllvm.py --- a/rpython/translator/llvm/genllvm.py +++ b/rpython/translator/llvm/genllvm.py @@ -51,7 +51,7 @@ return self.typestr def repr_of_type(self): - return self.repr_type() + return self.typestr def is_zero(self, value): raise NotImplementedError("Override in subclass.") @@ -438,21 +438,21 @@ def setup_from_lltype(self, db, type): if (type._hints.get('typeptr', False) and db.genllvm.translator.config.translation.gcremovetypeptr): - self.setup('%' + type._name, [], True) + self.setup(type._name, [], True) return fields = ((db.get_type(type._flds[f]), f) for f in type._names) is_gc = type._gckind == 'gc' needs_gc_header = is_gc and type._first_struct() == (None, None) - self.setup('%' + type._name, fields, needs_gc_header) + self.setup(type._name, fields, needs_gc_header) def repr_type(self, extra_len=None): if extra_len not in self.size_variants: if extra_len is not None: - name = self.name + '_plus_{}'.format(extra_len) + name = '%{}_plus_{}'.format(self.name, extra_len) elif self.varsize: - name = self.name + '_varsize' + name = '%{}_varsize'.format(self.name) else: - name = self.name + name = '%{}'.format(self.name) self.size_variants[extra_len] = name = database.unique_name(name) lastname = self.fldnames_wo_voids[-1] tmp = (' {semicolon}{fldtype}{comma} ; {fldname}\n'.format( @@ -465,7 +465,7 @@ return self.size_variants[extra_len] def repr_of_type(self): - return self.name[1:] + return self.name def is_zero(self, value): if self.needs_gc_header: @@ -586,7 +586,7 @@ self.bare_array_type.setup(of, None) self.struct_type = StructType() fields = [(LLVMSigned, 'len'), (self.bare_array_type, 'items')] - self.struct_type.setup('%array_of_' + of.repr_of_type(), fields, is_gc) + self.struct_type.setup('array_of_' + of.repr_of_type(), fields, is_gc) def setup_from_lltype(self, db, type): self.setup(db.get_type(type.OF), type._gckind == 'gc') @@ -648,7 +648,7 @@ 'getelementptr inbounds({}* {}, i64 0, i32 {})' .format(self.typestr, groupname, i)) struct_type = StructType() - struct_type.setup(self.typestr, fields, False) + struct_type.setup('group_' + obj.name, fields, False) database.f.write('{} = global {}\n'.format( groupname, struct_type.repr_type_and_value(group))) From noreply at buildbot.pypy.org Mon Jul 7 12:57:40 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 7 Jul 2014 12:57:40 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Workaround: some Linux systems start processes with a non-null %gs Message-ID: <20140707105740.BE99C1D2DDD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r72378:dd3c06b77a11 Date: 2014-07-07 12:57 +0200 http://bitbucket.org/pypy/pypy/changeset/dd3c06b77a11/ Log: Workaround: some Linux systems start processes with a non-null %gs content. It seems that forcing %gs to be 0 here solves problems in case we have some 'late_initializations'. diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -795,6 +795,10 @@ print >> f, 'char *RPython_StartupCode(void) {' print >> f, '\tchar *error = NULL;' + if database.with_stm: + print >> f, '\t/* XXX temporary workaround for late_initializations */' + print >> f, '\tsyscall(SYS_arch_prctl, ARCH_SET_GS, (uint64_t)0);' + # put float infinities in global constants, we should not have so many of them for now to make # a table+loop preferable for dest, value in database.late_initializations: @@ -896,6 +900,14 @@ filename = targetdir.join(modulename + '.c') f = filename.open('w') + if database.with_stm: + print >> f, '/* XXX temporary, for SYS_arch_prctl below */' + print >> f, '#define _GNU_SOURCE' + print >> f, '#include ' + print >> f, '#include ' + print >> f, '#include ' + print >> f, '#include ' + print >> f incfilename = targetdir.join('common_header.h') fi = incfilename.open('w') fi.write('#ifndef _PY_COMMON_HEADER_H\n#define _PY_COMMON_HEADER_H\n') From noreply at buildbot.pypy.org Mon Jul 7 13:16:18 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 7 Jul 2014 13:16:18 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Renamed variable/parameter. Message-ID: <20140707111618.7218B1C024A@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r852:35d8fe62d2aa Date: 2014-05-28 15:08 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/35d8fe62d2aa/ Log: Renamed variable/parameter. diff --git a/spyvm/storage_statistics.py b/spyvm/storage_statistics.py --- a/spyvm/storage_statistics.py +++ b/spyvm/storage_statistics.py @@ -49,7 +49,7 @@ class StatisticsModule(object): uses_classname = False - def storage_operation(self, operation_key, storage_size, element_classname): + def storage_operation(self, operation_key, storage_size, container_classname): raise NotImplementedError("Abstract class") def print_results(self): raise NotImplementedError("Abstract class") @@ -61,12 +61,12 @@ class StatisticsLogger(StatisticsModule): uses_classname = True - def storage_operation(self, operation_key, storage_size, element_classname): - print self.log_string(operation_key, storage_size, element_classname) + def storage_operation(self, operation_key, storage_size, container_classname): + print self.log_string(operation_key, storage_size, container_classname) - def log_string(self, operation_key, storage_size, element_classname): - if element_classname: - return "%s of %s size %d" % (self.key_string(operation_key), element_classname, storage_size) + def log_string(self, operation_key, storage_size, container_classname): + if container_classname: + return "%s of %s size %d" % (self.key_string(operation_key), container_classname, storage_size) else: return "%s size %d" % (self.key_string(operation_key), storage_size) @@ -79,7 +79,7 @@ def __init__(self): self.stats = {} - def storage_operation(self, operation_key, storage_size, element_classname): + def storage_operation(self, operation_key, storage_size, container_classname): if not operation_key in self.stats: self.stats[operation_key] = self.initial_value() self.increment_value(self.stats[operation_key], storage_size) @@ -112,8 +112,8 @@ self.outgoing_operations = {} self.outgoing_elements = {} - def storage_operation(self, key, storage_size, element_classname): - StatisticsCollector.storage_operation(self, key, storage_size, element_classname) + def storage_operation(self, key, storage_size, container_classname): + StatisticsCollector.storage_operation(self, key, storage_size, container_classname) source_type = key[1] target_type = key[2] self.fill_maps(self.incoming_operations, self.incoming_elements, target_type, storage_size) From noreply at buildbot.pypy.org Mon Jul 7 13:16:19 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 7 Jul 2014 13:16:19 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Added histogram output for storage statistics. Message-ID: <20140707111619.B87461C024A@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r853:689aa666070e Date: 2014-06-23 19:14 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/689aa666070e/ Log: Added histogram output for storage statistics. diff --git a/spyvm/storage_statistics.py b/spyvm/storage_statistics.py --- a/spyvm/storage_statistics.py +++ b/spyvm/storage_statistics.py @@ -32,20 +32,18 @@ old_storage = None size = w_obj.size() - key = self.make_key(operation, old_storage, new_storage) if self.using_classname and log_classname: classname = w_obj.guess_classname() else: classname = None for module in self.modules: + key = module.make_key(operation, old_storage, new_storage) module.storage_operation(key, size, classname) - def make_key(self, operation, old_storage, new_storage): - return (operation, old_storage, new_storage) - def print_results(self): for module in self.modules: module.print_results() + print "\n\n" class StatisticsModule(object): uses_classname = False @@ -53,6 +51,8 @@ raise NotImplementedError("Abstract class") def print_results(self): raise NotImplementedError("Abstract class") + def make_key(self, operation, old_storage, new_storage): + return (operation, old_storage, new_storage) def key_string(self, key): if key[1]: return "%s (%s -> %s)" % (key[0], key[1], key[2]) @@ -82,17 +82,46 @@ def storage_operation(self, operation_key, storage_size, container_classname): if not operation_key in self.stats: self.stats[operation_key] = self.initial_value() - self.increment_value(self.stats[operation_key], storage_size) + self.increment_value(self.stats[operation_key], storage_size, container_classname) def sorted_keys(self): keys = [ x for x in self.stats ] StatsSorter(keys).sort() return keys +class HistogramStatisticsCollector(AbstractStatisticsCollector): + # Stores classnames with sizes + # Value: map + + uses_classname = True + def initial_value(self): return {} + def increment_value(self, value_object, storage_size, container_classname): + if not container_classname in value_object: + value_object[container_classname] = [0, 0] + m = value_object[container_classname] + m[0] = m[0] + storage_size + m[1] = m[1] + 1 + + def make_key(self, operation, old_storage, new_storage): + return (new_storage) + + def print_results(self): + print "## Histogram statistics:" + for key in self.sorted_keys(): + print "##" + print "# %s" % key + print "Data Objects Elements" + classes = self.stats[key] + for cls in classes: + tuple = classes[cls] + sum = tuple[0] + num = tuple[1] + print "%s\t%d\t%d" % (cls, num, sum) + class StatisticsCollector(AbstractStatisticsCollector): # Value: [total_size, num_operations] def initial_value(self): return [0, 0] - def increment_value(self, value_object, storage_size): + def increment_value(self, value_object, storage_size, container_classname): value_object[0] = value_object[0] + storage_size value_object[1] = value_object[1] + 1 def print_results(self): @@ -106,7 +135,7 @@ class DotStatisticsCollector(StatisticsCollector): def __init__(self): - AbstractStatisticsCollector.__init__(self) + StatisticsCollector.__init__(self) self.incoming_operations = {} self.incoming_elements = {} self.outgoing_operations = {} @@ -181,7 +210,7 @@ class DetailedStatisticsCollector(AbstractStatisticsCollector): # Value: list of numbers (sizes) def initial_value(self): return [] - def increment_value(self, value_object, storage_size): + def increment_value(self, value_object, storage_size, container_classname): value_object.append(storage_size) def print_results(self): print "Detailed Storage Statistics:" @@ -195,8 +224,9 @@ _collector = StatisticsCollector() _detailedcollector = DetailedStatisticsCollector() _dotcollector = DotStatisticsCollector() +_histogramcollector = HistogramStatisticsCollector() -def activate_statistics(log=False, statistics=False, detailed_statistics=False, dot=False): +def activate_statistics(log=False, statistics=False, detailed_statistics=False, dot=False, histogram=False): if log: _stats.add_module(_logger) if statistics: @@ -207,6 +237,8 @@ _stats.add_module(_dotcollector) # Start a comment in order to make the entire output valid dot code. Hack. print "/*" + if histogram: + _stats.add_module(_histogramcollector) def print_statistics(): _stats.print_results() diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -134,6 +134,7 @@ --strategy-stats --strategy-stats-dot --strategy-stats-details + --strategy-stats-histogram [image path, default: Squeak.image] """ % (argv[0], constants.MAX_LOOP_DEPTH) @@ -204,6 +205,8 @@ storage_statistics.activate_statistics(statistics=True) elif arg == "--strategy-stats-dot": storage_statistics.activate_statistics(dot=True) + elif arg == "--strategy-stats-histogram": + storage_statistics.activate_statistics(histogram=True) elif arg == "--strategy-stats-details": storage_statistics.activate_statistics(statistics=True, detailed_statistics=True) elif path is None: From noreply at buildbot.pypy.org Mon Jul 7 13:16:20 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 7 Jul 2014 13:16:20 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Fixed histogram statistics output Message-ID: <20140707111620.DCA081C024A@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r854:e6b70019cd99 Date: 2014-06-27 13:41 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/e6b70019cd99/ Log: Fixed histogram statistics output diff --git a/spyvm/storage_statistics.py b/spyvm/storage_statistics.py --- a/spyvm/storage_statistics.py +++ b/spyvm/storage_statistics.py @@ -1,5 +1,6 @@ from rpython.rlib.listsort import TimSort +from rpython.rlib.objectmodel import import_from_mixin class StatsSorter(TimSort): """Sort a tuple of 3 strings""" @@ -74,7 +75,7 @@ # Nothing to do, this is just for logging during runtime. pass -class AbstractStatisticsCollector(StatisticsModule): +class StatisticsCollectorMixin(StatisticsModule): def __init__(self): self.stats = {} @@ -89,9 +90,10 @@ StatsSorter(keys).sort() return keys -class HistogramStatisticsCollector(AbstractStatisticsCollector): +class HistogramStatisticsCollector(StatisticsModule): # Stores classnames with sizes # Value: map + import_from_mixin(StatisticsCollectorMixin) uses_classname = True def initial_value(self): return {} @@ -103,23 +105,27 @@ m[1] = m[1] + 1 def make_key(self, operation, old_storage, new_storage): - return (new_storage) + return (new_storage, "", "") def print_results(self): print "## Histogram statistics:" - for key in self.sorted_keys(): - print "##" - print "# %s" % key + for key_tuple in self.sorted_keys(): + key = key_tuple[0] + if not "Storage" in key: + continue + print "\n# %s" % key print "Data Objects Elements" - classes = self.stats[key] + classes = self.stats[key_tuple] for cls in classes: tuple = classes[cls] sum = tuple[0] num = tuple[1] - print "%s\t%d\t%d" % (cls, num, sum) + print "%d slots in %d objects: %s" % (sum, num, cls) -class StatisticsCollector(AbstractStatisticsCollector): +class StatisticsCollector(StatisticsModule): # Value: [total_size, num_operations] + import_from_mixin(StatisticsCollectorMixin) + def initial_value(self): return [0, 0] def increment_value(self, value_object, storage_size, container_classname): value_object[0] = value_object[0] + storage_size @@ -207,8 +213,10 @@ result += "}" return result -class DetailedStatisticsCollector(AbstractStatisticsCollector): +class DetailedStatisticsCollector(StatisticsModule): # Value: list of numbers (sizes) + import_from_mixin(StatisticsCollectorMixin) + def initial_value(self): return [] def increment_value(self, value_object, storage_size, container_classname): value_object.append(storage_size) From noreply at buildbot.pypy.org Mon Jul 7 13:16:22 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 7 Jul 2014 13:16:22 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Removed storage_statistics module, replaced with very simple storage_logger module. Message-ID: <20140707111622.29E3B1C024A@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r855:9468db2cf599 Date: 2014-06-30 17:57 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/9468db2cf599/ Log: Removed storage_statistics module, replaced with very simple storage_logger module. diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -15,7 +15,7 @@ that create W_PointersObjects of correct size with attached shadows. """ import sys, weakref -from spyvm import constants, error, version, storage_statistics +from spyvm import constants, error, version, storage_logger from spyvm.version import elidable_for_version, constant_for_version, constant_for_version_arg from rpython.rlib import rrandom, objectmodel, jit, signature @@ -566,7 +566,7 @@ _attrs_ = ['shadow'] shadow = None repr_classname = "W_PointersObject" - log_storage = storage_statistics.log + log_storage = storage_logger.log @jit.unroll_safe def __init__(self, space, w_class, size, weak=False): diff --git a/spyvm/storage_logger.py b/spyvm/storage_logger.py new file mode 100644 --- /dev/null +++ b/spyvm/storage_logger.py @@ -0,0 +1,27 @@ + +# Put flag in a list to make it modifyable after compile time. +_active = [False] + +def activate(): + _active[0] = True + +def log(w_obj, operation, old_storage_object=None, log_classname=True): + if not _active[0]: + return + + # Gather information to be logged + new_storage = w_obj.shadow.repr_classname + if old_storage_object: + old_storage = old_storage_object.repr_classname + else: + old_storage = None + size = w_obj.size() + if log_classname: + classname = w_obj.guess_classname() + else: + classname = None + + # Construct and print the logstring + old_storage_string = "%s -> " % old_storage if old_storage else "" + classname_string = " of %s" % classname if classname else "" + print "%s (%s%s)%s size %d" % (operation, old_storage_string, new_storage, classname_string, size) diff --git a/spyvm/storage_statistics.py b/spyvm/storage_statistics.py deleted file mode 100644 --- a/spyvm/storage_statistics.py +++ /dev/null @@ -1,255 +0,0 @@ - -from rpython.rlib.listsort import TimSort -from rpython.rlib.objectmodel import import_from_mixin - -class StatsSorter(TimSort): - """Sort a tuple of 3 strings""" - def lt(self, a, b): - if a[0] == b[0]: - if a[1] == b[1]: - return a[2] < b[2] - else: - return a[1] < b[1] - else: - return a[0] < b[0] - -class StorageStatistics(object): - - def __init__(self): - self.modules = [] - self.using_classname = False - - def add_module(self, module): - if module not in self.modules: - self.modules.append(module) - self.using_classname = self.using_classname or module.uses_classname - - def log(self, w_obj, operation, old_storage_object, log_classname): - if len(self.modules) > 0: - new_storage = w_obj.shadow.repr_classname - if old_storage_object: - old_storage = old_storage_object.repr_classname - else: - old_storage = None - size = w_obj.size() - - if self.using_classname and log_classname: - classname = w_obj.guess_classname() - else: - classname = None - for module in self.modules: - key = module.make_key(operation, old_storage, new_storage) - module.storage_operation(key, size, classname) - - def print_results(self): - for module in self.modules: - module.print_results() - print "\n\n" - -class StatisticsModule(object): - uses_classname = False - def storage_operation(self, operation_key, storage_size, container_classname): - raise NotImplementedError("Abstract class") - def print_results(self): - raise NotImplementedError("Abstract class") - def make_key(self, operation, old_storage, new_storage): - return (operation, old_storage, new_storage) - def key_string(self, key): - if key[1]: - return "%s (%s -> %s)" % (key[0], key[1], key[2]) - else: - return "%s (%s)" % (key[0], key[2]) - -class StatisticsLogger(StatisticsModule): - uses_classname = True - def storage_operation(self, operation_key, storage_size, container_classname): - print self.log_string(operation_key, storage_size, container_classname) - - def log_string(self, operation_key, storage_size, container_classname): - if container_classname: - return "%s of %s size %d" % (self.key_string(operation_key), container_classname, storage_size) - else: - return "%s size %d" % (self.key_string(operation_key), storage_size) - - def print_results(self): - # Nothing to do, this is just for logging during runtime. - pass - -class StatisticsCollectorMixin(StatisticsModule): - - def __init__(self): - self.stats = {} - - def storage_operation(self, operation_key, storage_size, container_classname): - if not operation_key in self.stats: - self.stats[operation_key] = self.initial_value() - self.increment_value(self.stats[operation_key], storage_size, container_classname) - - def sorted_keys(self): - keys = [ x for x in self.stats ] - StatsSorter(keys).sort() - return keys - -class HistogramStatisticsCollector(StatisticsModule): - # Stores classnames with sizes - # Value: map - import_from_mixin(StatisticsCollectorMixin) - - uses_classname = True - def initial_value(self): return {} - def increment_value(self, value_object, storage_size, container_classname): - if not container_classname in value_object: - value_object[container_classname] = [0, 0] - m = value_object[container_classname] - m[0] = m[0] + storage_size - m[1] = m[1] + 1 - - def make_key(self, operation, old_storage, new_storage): - return (new_storage, "", "") - - def print_results(self): - print "## Histogram statistics:" - for key_tuple in self.sorted_keys(): - key = key_tuple[0] - if not "Storage" in key: - continue - print "\n# %s" % key - print "Data Objects Elements" - classes = self.stats[key_tuple] - for cls in classes: - tuple = classes[cls] - sum = tuple[0] - num = tuple[1] - print "%d slots in %d objects: %s" % (sum, num, cls) - -class StatisticsCollector(StatisticsModule): - # Value: [total_size, num_operations] - import_from_mixin(StatisticsCollectorMixin) - - def initial_value(self): return [0, 0] - def increment_value(self, value_object, storage_size, container_classname): - value_object[0] = value_object[0] + storage_size - value_object[1] = value_object[1] + 1 - def print_results(self): - print "Storage Statistics:" - for key in self.sorted_keys(): - tuple = self.stats[key] - sum = tuple[0] - num = tuple[1] - print "\t%s: %d times, avg size: %f" % (self.key_string(key), num, float(sum)/num) - -class DotStatisticsCollector(StatisticsCollector): - - def __init__(self): - StatisticsCollector.__init__(self) - self.incoming_operations = {} - self.incoming_elements = {} - self.outgoing_operations = {} - self.outgoing_elements = {} - - def storage_operation(self, key, storage_size, container_classname): - StatisticsCollector.storage_operation(self, key, storage_size, container_classname) - source_type = key[1] - target_type = key[2] - self.fill_maps(self.incoming_operations, self.incoming_elements, target_type, storage_size) - if source_type: - self.fill_maps(self.outgoing_operations, self.outgoing_elements, source_type, storage_size) - - def fill_maps(self, operations_map, elements_map, key_type, size): - if key_type not in operations_map: - operations_map[key_type] = 0 - elements_map[key_type] = 0 - operations_map[key_type] = operations_map[key_type] + 1 - elements_map[key_type] = elements_map[key_type] + size - - def print_results(self): - print "Storage Statistics (dot format):" - print "================================" - print "*/" # End the commend started in activate_statistics() - print self.dot_string() - - def dot_string(self): - # Return a string that is valid dot code and can be parsed by the graphviz dot utility. - # Unfortunately, this is pretty complicated and messy... Sorry. - - result = "digraph G {" - result += "loading_image [label=\"Image Loading\",shape=box];" - result += "created_object [label=\"Object Creation\",shape=box];" - for key in self.stats: - operation_type = key[0] - target_node = key[2] - elements = self.stats[key][0] - operations = self.stats[key][1] - label_suffix = "" - if operation_type == "Switched": - source_node = key[1] - percent_ops = float(operations) / float(self.incoming_operations[source_node]) * 100 - percent_elements = float(elements) / float(self.incoming_elements[source_node]) * 100 - label_suffix = "\n%d%% objects\n%d%% elements" % (int(percent_ops), int(percent_elements)) - elif operation_type == "Initialized": - source_node = "created_object" - elif operation_type == "Filledin": - source_node = "loading_image" - else: - print "Could not handle storage operation %s" % operation_type - continue - result += "%s -> %s [label=\"%d objects\n%d elements per object%s\"];" % (source_node, target_node, operations, elements/operations, label_suffix) - for type in self.incoming_operations: - incoming_ops = self.incoming_operations[type] - incoming_els = self.incoming_elements[type] - label = "\nIncoming objects: %d" % incoming_ops - label += "\nIncoming elements: %d" % incoming_els - if type in self.outgoing_operations: - remaining_ops = incoming_ops - self.outgoing_operations[type] - remaining_els = incoming_els - self.outgoing_elements[type] - else: - remaining_ops = incoming_ops - remaining_els = incoming_els - percent_remaining_ops = float(remaining_ops) / incoming_ops * 100 - percent_remaining_els = float(remaining_els) / incoming_els * 100 - label += "\nRemaining objects: %d (%d%%)" % (remaining_ops, int(percent_remaining_ops)) - label += "\nRemaining elements: %d (%d%%)" % (remaining_els, int(percent_remaining_els)) - result += "%s [label=\"%s%s\"];" % (type, type, label) - result += "}" - return result - -class DetailedStatisticsCollector(StatisticsModule): - # Value: list of numbers (sizes) - import_from_mixin(StatisticsCollectorMixin) - - def initial_value(self): return [] - def increment_value(self, value_object, storage_size, container_classname): - value_object.append(storage_size) - def print_results(self): - print "Detailed Storage Statistics:" - for key in self.sorted_keys(): - print "\t%s: s" % (self.key_string(key), self.stats[key]) - -# Static & global access to a StorageStatistics instance. - -_stats = StorageStatistics() -_logger = StatisticsLogger() -_collector = StatisticsCollector() -_detailedcollector = DetailedStatisticsCollector() -_dotcollector = DotStatisticsCollector() -_histogramcollector = HistogramStatisticsCollector() - -def activate_statistics(log=False, statistics=False, detailed_statistics=False, dot=False, histogram=False): - if log: - _stats.add_module(_logger) - if statistics: - _stats.add_module(_collector) - if detailed_statistics: - _stats.add_module(_detailedcollector) - if dot: - _stats.add_module(_dotcollector) - # Start a comment in order to make the entire output valid dot code. Hack. - print "/*" - if histogram: - _stats.add_module(_histogramcollector) - -def print_statistics(): - _stats.print_results() - -def log(w_obj, operation, old_storage=None, log_classname=True): - _stats.log(w_obj, operation, old_storage, log_classname) diff --git a/spyvm/test/test_strategies.py b/spyvm/test/test_strategies.py --- a/spyvm/test/test_strategies.py +++ b/spyvm/test/test_strategies.py @@ -1,5 +1,5 @@ import py -from spyvm import wrapper, model, interpreter, shadow, storage_statistics +from spyvm import wrapper, model, interpreter, shadow from spyvm.error import WrapperException, FatalError from .util import read_image, copy_to_module, cleanup_module @@ -175,50 +175,3 @@ a.store(space, 1, space.wrap_int(2)) assert isinstance(a.shadow, shadow.ListStorageShadow) check_arr(a, [1.2, 2, w_nil, w_nil, w_nil]) - -def test_statistics_stats(): - col = storage_statistics.DetailedStatisticsCollector() - stats = storage_statistics.StorageStatistics() - col.storage_operation(stats.make_key("B", "old", "new"), 3, None) - col.storage_operation(stats.make_key("B", "old", "new"), 4, None) - col.storage_operation(stats.make_key("B", "old2", "new2"), 20, None) - col.storage_operation(stats.make_key("B", "old", "new"), 5, None) - col.storage_operation(stats.make_key("A", "old", "new"), 1, None) - col.storage_operation(stats.make_key("A", "old", "new"), 2, None) - col.storage_operation(stats.make_key("C", "old", "new"), 10, None) - col.storage_operation(stats.make_key("C", "old", "new"), 11, None) - keys = col.sorted_keys() - assert keys == [ ("A", "old", "new"), ("B", "old", "new"), ("B", "old2", "new2"), ("C", "old", "new") ] - assert col.stats[keys[0]] == [1, 2] - assert col.stats[keys[1]] == [3, 4, 5] - assert col.stats[keys[2]] == [20] - assert col.stats[keys[3]] == [10, 11] - -def test_statistics_log(): - stats = storage_statistics.StorageStatistics() - log = storage_statistics.StatisticsLogger() - s = log.log_string(stats.make_key("Operation", "old_storage", "new_storage"), 22, "classname") - assert s == "Operation (old_storage -> new_storage) of classname size 22" - s = log.log_string(stats.make_key("InitialOperation", None, "some_new_storage"), 40, "a_classname") - assert s == "InitialOperation (some_new_storage) of a_classname size 40" - -def test_statistics_stats_dot(): - col = storage_statistics.DotStatisticsCollector() - stats = storage_statistics.StorageStatistics() - - col.storage_operation(stats.make_key("Switched", "old", "new"), 10, None) - col.storage_operation(stats.make_key("Switched", "old", "new"), 10, None) - col.storage_operation(stats.make_key("Switched", "new", "new2"), 10, None) - col.storage_operation(stats.make_key("Switched", "old2", "new"), 5, None) - col.storage_operation(stats.make_key("Initialized", None, "old"), 13, None) - col.storage_operation(stats.make_key("Initialized", None, "old"), 10, None) - col.storage_operation(stats.make_key("Initialized", None, "old"), 10, None) - col.storage_operation(stats.make_key("Initialized", None, "old2"), 15, None) - col.storage_operation(stats.make_key("Filledin", None, "old2"), 20, None) - col.storage_operation(stats.make_key("Filledin", None, "new"), 10, None) - col.storage_operation(stats.make_key("Filledin", None, "new"), 11, None) - - # The dot-code is correct, I checked ;) - assert col.dot_string() == \ - 'digraph G {loading_image [label="Image Loading",shape=box];created_object [label="Object Creation",shape=box];created_object -> old2 [label="1 objects\n15 elements per object"];loading_image -> new [label="2 objects\n10 elements per object"];old -> new [label="2 objects\n10 elements per object\n66% objects\n60% elements"];loading_image -> old2 [label="1 objects\n20 elements per object"];created_object -> old [label="3 objects\n11 elements per object"];old2 -> new [label="1 objects\n5 elements per object\n50% objects\n14% elements"];new -> new2 [label="1 objects\n10 elements per object\n20% objects\n21% elements"];new2 [label="new2\nIncoming objects: 1\nIncoming elements: 10\nRemaining objects: 1 (100%)\nRemaining elements: 10 (100%)"];new [label="new\nIncoming objects: 5\nIncoming elements: 46\nRemaining objects: 4 (80%)\nRemaining elements: 36 (78%)"];old2 [label="old2\nIncoming objects: 2\nIncoming elements: 35\nRemaining objects: 1 (50%)\nRemaining elements: 30 (85%)"];old [label="old\nIncoming objects: 3\nIncoming elements: 33\nRemaining objects: 1 (33%)\nRemaining elements: 13 (39%)"];}' - \ No newline at end of file diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -6,7 +6,7 @@ from rpython.rlib import jit, rpath from spyvm import model, interpreter, squeakimage, objspace, wrapper,\ - error, shadow, storage_statistics, constants + error, shadow, storage_logger, constants from spyvm.tool.analyseimage import create_image from spyvm.interpreter_proxy import VirtualMachine @@ -130,11 +130,7 @@ -p|--poll_events -ni|--no-interrupts -d|--max-stack-depth [number, default %d, <= 0 disables stack protection] - --strategy-log - --strategy-stats - --strategy-stats-dot - --strategy-stats-details - --strategy-stats-histogram + -l|--storage-log [image path, default: Squeak.image] """ % (argv[0], constants.MAX_LOOP_DEPTH) @@ -199,16 +195,8 @@ _arg_missing(argv, idx, arg) max_stack_depth = int(argv[idx + 1]) idx += 1 - elif arg == "--strategy-log": - storage_statistics.activate_statistics(log=True) - elif arg == "--strategy-stats": - storage_statistics.activate_statistics(statistics=True) - elif arg == "--strategy-stats-dot": - storage_statistics.activate_statistics(dot=True) - elif arg == "--strategy-stats-histogram": - storage_statistics.activate_statistics(histogram=True) - elif arg == "--strategy-stats-details": - storage_statistics.activate_statistics(statistics=True, detailed_statistics=True) + elif arg in ["-l", "--storage-log"]: + storage_logger.activate() elif path is None: path = argv[idx] else: @@ -245,7 +233,6 @@ else: _run_image(interp) result = 0 - storage_statistics.print_statistics() return result From noreply at buildbot.pypy.org Mon Jul 7 13:16:23 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 7 Jul 2014 13:16:23 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Added script to parse storage-log and convert to summary or dot-string/graph. Message-ID: <20140707111623.5E8FB1C024A@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r856:f4ae062456ae Date: 2014-07-01 12:12 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/f4ae062456ae/ Log: Added script to parse storage-log and convert to summary or dot- string/graph. diff --git a/spyvm/tool/storagelog_parser.py b/spyvm/tool/storagelog_parser.py new file mode 100644 --- /dev/null +++ b/spyvm/tool/storagelog_parser.py @@ -0,0 +1,529 @@ + +import re, sys, operator + +OPERATIONS = ["Filledin", "Initialized", "Switched"] + +# ==================================================================== +# ======== Basic functions +# ==================================================================== + +def parse(filename, flags): + entries = [] + with open(filename, 'r', 1) as file: + while True: + line = file.readline() + if len(line) == 0: + break + entry = parse_line(line, flags) + if entry: + entries.append(entry) + return entries + +line_pattern = re.compile("^(?P\w+) \(((?P\w+) -> )?(?P\w+)\)( of (?P.+))? size (?P[0-9]+)$") + +def parse_line(line, flags): + result = line_pattern.match(line) + if result is None: + if flags.verbose: + print "Could not parse line: %s" % line[:-1] + return None + operation = result.group('operation') + old_storage = result.group('old') + new_storage = result.group('new') + classname = result.group('classname') + size = result.group('size') + if old_storage is None: + if operation == "Filledin": + old_storage = " Image Loading Storage" # Space to be sorted to the beginning + elif operation == "Initialized": + old_storage = " Object Creation Storage" + else: + assert False, "old_storage has to be available in a Switched operation" + entry = LogEntry(operation, old_storage, new_storage, classname, size) + #entry.is_special = + return entry + +class LogEntry(object): + + def __init__(self, operation, old_storage, new_storage, classname, size): + self.operation = str(operation) + self.old_storage = str(old_storage) + self.new_storage = str(new_storage) + self.classname = str(classname) + self.size = float(size) + + def full_key(self): + return (self.operation, self.old_storage, self.new_storage) + + def __str__(self): + old_storage_string = "%s -> " % self.old_storage if self.old_storage else "" + classname_string = " of %s" % self.classname if self.classname else "" + return "%s (%s%s)%s size %d" % (self.operation, old_storage_string, self.new_storage, classname_string, self.size) + +# ==================================================================== +# ======== Graph parsing +# ==================================================================== + +class Operations(object): + + def __init__(self, objects=0, slots=0): + self.objects = objects + self.slots = slots + + def __str__(self, total=None): + if self.objects == 0: + avg_slots = 0 + else: + avg_slots = float(self.slots) / self.objects + if total is not None and total.slots != 0: + percent_slots = " (%.1f%%)" % (float(self.slots)*100 / total.slots) + else: + percent_slots = "" + if total is not None and total.objects != 0: + percent_objects = " (%.1f%%)" % (float(self.objects)*100 / total.objects) + else: + percent_objects = "" + return "%d%s slots in %d%s objects (avg size: %.1f)" % (self.slots, percent_slots, self.objects, percent_objects, avg_slots) + + def __repr__(self): + return "%s(%s)" % (self.__str__(), object.__repr__(self)) + + def add_log_entry(self, entry): + self.slots = self.slots + entry.size + self.objects = self.objects + 1 + + def __sub__(self, other): + return Operations(self.objects - other.objects, self.slots - other.slots) + + def __add__(self, other): + return Operations(self.objects + other.objects, self.slots + other.slots) + + def __lt__(self, other): + return self.slots < other.slots + + def empty(self): + return self.objects == 0 and self.slots == 0 + + def prefixprint(self, key="", total=None): + if not self.empty(): + print "%s%s" % (key, self.__str__(total)) + +class ClassOperations(object): + + def __init__(self): + self.classes = {} + + def cls(self, name): + if name not in self.classes: + self.classes[name] = Operations() + return self.classes[name] + + def total(self): + return reduce(operator.add, self.classes.values(), Operations()) + + def __str__(self): + return "ClassOperations(%s)" % self.classes + + def __repr__(self): + return "%s(%s)" % (self.__str__(), object.__repr__(self)) + + def __add__(self, other): + result = ClassOperations() + result.classes = dict(self.classes) + for classname, other_class in other.classes.items(): + result.cls(classname) # Make sure exists. + result.classes[classname] += other_class + return result + + def __sub__(self, other): + result = ClassOperations() + result.classes = dict(self.classes) + for classname, other_class in other.classes.items(): + result.cls(classname) # Make sure exists. + result.classes[classname] -= other_class + return result + +class StorageEdge(object): + + def __init__(self, operation="None", origin=None, target=None): + assert operation == "None" or operation in OPERATIONS, "Unknown operation %s" % operation + self.operation = operation + self.classes = ClassOperations() + self.origin = origin + self.target = target + + def full_key(self): + return (self.operation, self.origin.name, self.target.name) + + def cls(self, classname): + return self.classes.cls(classname) + + def total(self): + return self.classes.total() + + def notify_nodes(self): + self.origin.note_outgoing(self) + self.target.note_incoming(self) + + def add_log_entry(self, entry): + self.cls(entry.classname).add_log_entry(entry) + + def __str__(self): + return "[%s %s -> %s]" % (self.operation, self.origin, self.target) + + def __repr__(self): + return "%s(%s)" % (self.__str__(), object.__repr__(self)) + + def __add__(self, other): + origin = self.origin if self.origin is not None else other.origin + target = self.target if self.target is not None else other.target + result = StorageEdge(self.operation, origin, target) + result.classes += self.classes + other.classes + return result + + def __sub__(self, other): + origin = self.origin if self.origin is not None else other.origin + target = self.target if self.target is not None else other.target + result = StorageEdge(self.operation, origin, target) + result.classes += self.classes - other.classes + return result + +class StorageNode(object): + + def __init__(self, name): + self.name = name + self.incoming = set() + self.outgoing = set() + + def note_incoming(self, edge): + assert edge.target is self + if edge not in self.incoming: + self.incoming.add(edge) + + def note_outgoing(self, edge): + assert edge.origin is self + if edge not in self.outgoing: + self.outgoing.add(edge) + + def incoming_edges(self, operation): + return filter(lambda x: x.operation == operation, self.incoming) + + def outgoing_edges(self, operation): + return filter(lambda x: x.operation == operation, self.outgoing) + + def sum_incoming(self, operation): + return reduce(operator.add, self.incoming_edges(operation), StorageEdge(operation)) + + def sum_outgoing(self, operation): + return reduce(operator.add, self.outgoing_edges(operation), StorageEdge(operation)) + + def sum_all_incoming(self): + return reduce(operator.add, self.incoming, StorageEdge()) + + def sum_all_outgoing(self): + return reduce(operator.add, self.outgoing, StorageEdge()) + + def __str__(self): + return self.name + + def __repr__(self): + return "%s(%s)" % (self.__str__(), object.__repr__(self)) + + def merge_edge_sets(self, set1, set2, key_slot): + getter = lambda edge: edge.__dict__[key_slot] + set_dict = dict([(getter(edge), edge) for edge in set1]) + for edge in set2: + key = getter(edge) + if key not in set_dict: + set_dict[key] = edge + else: + set_dict[key] += edge + return set(set_dict.values()) + + def __add__(self, other): + result = StorageNode("%s_%s" % (self.name, other.name)) + result.incoming = self.merge_edge_sets(self.incoming, other.incoming, "origin") + # TODO bullshit code + for edge in result.incoming: + edge.target = result + result.outgoing = self.merge_edge_sets(self.outgoing, other.outgoing, "target") + for edge in result.outgoing: + edge.origin = result + return result + + def __lt__(self, other): + return self.name < other.name + +class StorageGraph(object): + + def __init__(self): + self.nodes = {} + self.edges = {} + + def node(self, name): + if name not in self.nodes: + self.nodes[name] = StorageNode(name) + return self.nodes[name] + + def assert_sanity(self): + visited_edges = set() + for node in self.nodes.values(): + for edge in node.incoming: + assert edge in self.edges.values(), "Edge not in graph's edges: %s" % edge + visited_edges.add(edge) + if not edge.target is node: + print "Wrong edge target: %s\nIncoming edge: %s\nIn node: %s" % (edge.target, edge, node) + assert False + if not edge in edge.origin.outgoing: + print "Edge not in origin's outgoing: %s\nIncoming edge: %s\nIn node: %s" % (edge.origin.outgoing, edge, node) + assert False + for edge in node.outgoing: + assert edge in self.edges.values(), "Edge not in graph's edges: %s" % edge + visited_edges.add(edge) + if not edge.origin is node: + print "Wrong edge origin: %s\nOutgoing edge: %s\nIn node: %s" % (edge.origin, edge, node) + assert False + if not edge in edge.target.incoming: + print "Edge not in origin's incoming: %s\nOutgoing edge: %s\nIn node: %s" % (edge.target.incoming, edge, node) + assert False + assert len(visited_edges) == len(self.edges.values()), "Not all of graph's edges visited." + + def add_log_entry(self, log_entry): + key = log_entry.full_key() + if key not in self.edges: + edge = StorageEdge(log_entry.operation, self.node(log_entry.old_storage), self.node(log_entry.new_storage)) + self.edges[key] = edge + edge.notify_nodes() + self.edges[key].add_log_entry(log_entry) + + def collapse_nodes(self, collapsed_nodes, new_name=None): + for node in collapsed_nodes: + del self.nodes[node.name] + for edge in node.incoming: + del self.edges[edge.full_key()] + for edge in node.outgoing: + del self.edges[edge.full_key()] + new_node = reduce(operator.add, collapsed_nodes) + if new_name is not None: + new_node.name = new_name + self.nodes[new_node.name] = new_node + # TODO bullshit code + for node in collapsed_nodes: + for edge in node.incoming: + edge.origin.outgoing.remove(edge) + new_edges = filter(lambda filtered: filtered.origin == edge.origin, new_node.incoming) + assert len(new_edges) == 1 + edge.origin.outgoing.add(new_edges[0]) + for edge in node.outgoing: + edge.target.incoming.remove(edge) + new_edges = filter(lambda filtered: filtered.target == edge.target, new_node.outgoing) + assert len(new_edges) == 1 + edge.target.incoming.add(new_edges[0]) + for edge in new_node.incoming: + self.edges[edge.full_key()] = edge + for edge in new_node.outgoing: + self.edges[edge.full_key()] = edge + self.assert_sanity() + + def split_nodes(self, new_name=None): + nodes = filter(lambda node: "Storage" not in node.name, self.nodes.values()) + self.collapse_nodes(nodes, new_name) + + def sorted_nodes(self): + nodes = self.nodes.values() + nodes.sort() + return nodes + +def make_graph(entries): + graph = StorageGraph() + for e in entries: + graph.add_log_entry(e) + graph.assert_sanity() + return graph + +# ==================================================================== +# ======== Command - Summarize log content +# ==================================================================== + +def command_summarize(entries, flags): + print_summary(entries, flags) + +def print_summary(entries, flags): + graph = make_graph(entries) + if not flags.allstorage: + graph.split_nodes() + for node in graph.sorted_nodes(): + node.print_summary(flags) + +def StorageNode_print_summary(self, flags): + print "\n%s:" % self.name + sum = StorageEdge() + total_incoming = self.sum_all_incoming().total() if flags.percent else None + + print "\tIncoming:" + for operation in OPERATIONS: + if flags.detailed: + edges = [ (edge.origin.name, edge) for edge in self.incoming_edges(operation) ] + else: + edges = [ (operation, self.sum_incoming(operation)) ] + for edgename, edge in edges: + edge.print_with_name("\t\t\t", edgename, total_incoming, flags) + sum += edge + + print "\tOutgoing:" + for operation in OPERATIONS: + if flags.detailed: + edges = [ (edge.target.name, edge) for edge in self.outgoing_edges(operation) ] + else: + edges = [ (operation, self.sum_outgoing(operation)) ] + for edgename, edge in edges: + edge.print_with_name("\t\t\t", edgename, total_incoming, flags) + sum -= edge + + sum.print_with_name("\t", "Remaining", total_incoming, flags) + +StorageNode.print_summary = StorageNode_print_summary + +def StorageEdge_print_with_name(self, prefix, edgename, total_reference, flags): + if flags.classes: + print "%s%s:" % (prefix, edgename) + prefix += "\t\t" + operations = self.classes.classes.items() + operations.sort(reverse=True, key=operator.itemgetter(1)) + else: + operations = [ (edgename, self.total()) ] + for classname, classops in operations: + classops.prefixprint("%s%s: " % (prefix, classname), total_reference) + +StorageEdge.print_with_name = StorageEdge_print_with_name + +# ==================================================================== +# ======== Command - DOT output +# ==================================================================== + +# Output is valid dot code and can be parsed by the graphviz dot utility. +def command_print_dot(entries, flags): + graph = make_graph(entries) + print "/*" + print "Storage Statistics (dot format):" + print "================================" + print "*/" + print dot_string(graph, flags) + +def command_dot(entries, flags): + import subprocess + dot = dot_string(make_graph(entries), flags) + command = ["dot", "-Tjpg", "-o%s.jpg" % flags.logfile] + print "Running:\n%s" % " ".join(command) + p = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + output = p.communicate(input=dot)[0] + print output + +def dot_string(graph, flags): + result = "digraph G {" + incoming_cache = {} + if not flags.allstorage: + graph.split_nodes("Other") + + for node in graph.nodes.values(): + incoming = node.sum_all_incoming().total() + outgoing = node.sum_all_outgoing().total() + remaining = incoming - outgoing + if remaining.objects < 0: + # TODO This is a special node. Hacky way to find out. + incoming_cache[node.name] = outgoing + shape = ",shape=box" + label = "\nObjects: %d" % outgoing.objects + label += "\nSlots: %d" % outgoing.slots + else: + incoming_cache[node.name] = incoming + shape = "" + label = "\nIncoming objects: %d" % incoming.objects + label += "\nIncoming elements: %d" % incoming.slots + if flags.percent and incoming.objects != 0: + percent_remaining_objects = " (%.1f%%)" % (remaining.objects * 100 / incoming.objects) + percent_remaining_slots = " (%.1f%%)" % (remaining.slots * 100 / incoming.slots) + else: + percent_remaining_objects = percent_remaining_slots = "" + label += "\nRemaining objects: %d%s" % (remaining.objects, percent_remaining_objects) + label += "\nRemaining elements: %d%s" % (remaining.slots, percent_remaining_slots) + result += "%s [label=\"%s%s\"%s];" % (node.name.replace(" ", "_"), node.name, label, shape) + + for edge in graph.edges.values(): + total = edge.total() + str_objects = "%d objects" % total.objects + str_slots = "%d slots" % total.slots + incoming = incoming_cache[edge.origin.name] + if flags.percent and incoming.objects != 0: + str_objects += " (%.1f%%)" % (float(total.objects) * 100 / incoming.objects) + str_slots += " (%.1f%%)" % (float(total.slots) * 100 / incoming.slots) + + target_node = edge.target.name.replace(" ", "_") + source_node = edge.origin.name.replace(" ", "_") + result += "%s -> %s [label=\"%s\n%s\n%d slots per object\"];" % (source_node, target_node, str_objects, str_slots, total.slots / total.objects) + + result += "}" + return result + +# ==================================================================== +# ======== Main +# ==================================================================== + +def command_print_entries(entries): + for e in entries: + print e + +class Flags(object): + + def __init__(self, flags): + self.flags = {} + for name, short in flags: + self.__dict__[name] = False + self.flags[short] = name + + def handle(self, arg): + if arg in self.flags: + self.__dict__[self.flags[arg]] = True + return True + else: + return False + + def __str__(self): + descriptions = [ ("%s (%s)" % description) for description in self.flags.items() ] + return "[%s]" % " | ".join(descriptions) + +def usage(flags, commands): + print "Arguments: logfile command %s" % flags + print "Available commands: %s" % commands + exit(1) + +def main(argv): + flags = Flags([ + ('verbose', '-v'), + ('percent', '-p'), + ('allstorage', '-a'), + ('detailed', '-d'), + ('classes', '-c'), + ]) + + command_prefix = "command_" + module = sys.modules[__name__].__dict__ + commands = [ a[len(command_prefix):] for a in module.keys() if a.startswith(command_prefix) ] + + if len(argv) < 2: + usage(flags, commands) + logfile = argv[0] + flags.logfile = logfile + command = argv[1] + for flag in argv[2:]: + if not flags.handle(flag): + usage(flags, commands) + if command not in commands: + usage(flags, commands) + + func = module[command_prefix + command] + entries = parse(logfile, flags) + func(entries, flags) + +if __name__ == "__main__": + main(sys.argv[1:]) From noreply at buildbot.pypy.org Mon Jul 7 13:16:24 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 7 Jul 2014 13:16:24 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Fixed storage_logger.py Message-ID: <20140707111624.889F41C024A@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r857:48cb3f9eda6e Date: 2014-07-01 12:58 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/48cb3f9eda6e/ Log: Fixed storage_logger.py diff --git a/spyvm/storage_logger.py b/spyvm/storage_logger.py --- a/spyvm/storage_logger.py +++ b/spyvm/storage_logger.py @@ -1,12 +1,16 @@ -# Put flag in a list to make it modifyable after compile time. -_active = [False] +# Put flag in an object to make it modifyable after compile time. +class LoggerActive(object): + def __init__(self): + self.active = False + +_active = LoggerActive() def activate(): - _active[0] = True + _active.active = True def log(w_obj, operation, old_storage_object=None, log_classname=True): - if not _active[0]: + if not _active.active: return # Gather information to be logged diff --git a/spyvm/tool/storagelog_parser.py b/spyvm/tool/storagelog_parser.py --- a/spyvm/tool/storagelog_parser.py +++ b/spyvm/tool/storagelog_parser.py @@ -297,6 +297,8 @@ self.edges[key].add_log_entry(log_entry) def collapse_nodes(self, collapsed_nodes, new_name=None): + if len(collapsed_nodes) == 0: + return for node in collapsed_nodes: del self.nodes[node.name] for edge in node.incoming: From noreply at buildbot.pypy.org Mon Jul 7 13:16:25 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 7 Jul 2014 13:16:25 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Added binary storage-log output & parsing to shrink the logfile. Message-ID: <20140707111625.BE7E21C024A@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r858:1f520c5d82db Date: 2014-07-01 16:23 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/1f520c5d82db/ Log: Added binary storage-log output & parsing to shrink the logfile. diff --git a/spyvm/storage_logger.py b/spyvm/storage_logger.py --- a/spyvm/storage_logger.py +++ b/spyvm/storage_logger.py @@ -1,16 +1,20 @@ -# Put flag in an object to make it modifyable after compile time. -class LoggerActive(object): +import sys + +# Put flags in an object to make it modifyable after compile time. +class LoggerOptions(object): def __init__(self): self.active = False + self.binary = False -_active = LoggerActive() +_options = LoggerOptions() -def activate(): - _active.active = True +def activate(binary = False): + _options.active = True + _options.binary = binary def log(w_obj, operation, old_storage_object=None, log_classname=True): - if not _active.active: + if not _options.active: return # Gather information to be logged @@ -25,7 +29,61 @@ else: classname = None - # Construct and print the logstring + if _options.binary: + binary_output(operation, old_storage, new_storage, classname, size) + else: + output(operation, old_storage, new_storage, classname, size) + +def output(operation, old_storage, new_storage, classname, size): + # Construct and print a simple logstring old_storage_string = "%s -> " % old_storage if old_storage else "" classname_string = " of %s" % classname if classname else "" print "%s (%s%s)%s size %d" % (operation, old_storage_string, new_storage, classname_string, size) + +operation_map = { + "Filledin": 1, + "Initialized": 2, + "Switched": 3, +} + +storage_map = { + "AllNilStorageShadow": 1, + "SmallIntegerOrNilStorageShadow": 2, + "FloatOrNilStorageShadow": 3, + "ListStorageShadow": 4, + "WeakListStorageShadow": 5, + "ClassShadow": 6, + "MethodDictionaryShadow": 7, + "BlockContextShadow": 8, + "MethodContextShadow": 9, + "CachedObjectShadow": 10, + "ObserveeShadow": 11, + None: 12, +} + +def binary_output(operation, old_storage, new_storage, classname, size): + # Output a byte-coded log entry + bytes = bytearray() + + # First 3 bytes: operation, old_storage, new_storage + assert operation in operation_map, "Cannot handle operation %s" % operation + bytes.append(operation_map[operation]) + assert old_storage in storage_map, "Cannot handle old-storage type %s" % old_storage + bytes.append(storage_map[old_storage]) + assert new_storage in storage_map, "Cannot handle new-storage type %s" % new_storage + bytes.append(storage_map[new_storage]) + + # Next: 2 bytes encoding object size (big endian) + assert size < 2**16, "Object of type %s too large (size %d)" % (classname, size) + mask = (1<<8)-1 + bytes.append(size & mask) + mask = mask<<8 + bytes.append((size & mask) >> 8) + + # Next: classname string plus terminating null-character + if classname: + for c in classname: + bytes.append(c) + bytes.append(0) + + sys.stdout.write(bytes) diff --git a/spyvm/tool/storagelog_parser.py b/spyvm/tool/storagelog_parser.py --- a/spyvm/tool/storagelog_parser.py +++ b/spyvm/tool/storagelog_parser.py @@ -1,24 +1,77 @@ import re, sys, operator +import spyvm.storage_logger OPERATIONS = ["Filledin", "Initialized", "Switched"] +# Reverse the two maps used to encode the byte encoded log-output +storage_map = {v:k for k, v in spyvm.storage_logger.storage_map.items()} +operation_map = {v:k for k, v in spyvm.storage_logger.operation_map.items()} + # ==================================================================== # ======== Basic functions # ==================================================================== +def filesize(file): + import os + return os.path.getsize(file.name) + def parse(filename, flags): entries = [] with open(filename, 'r', 1) as file: - while True: - line = file.readline() - if len(line) == 0: - break - entry = parse_line(line, flags) - if entry: - entries.append(entry) + if flags.binary: + while True: + try: + entry = parse_binary(file) + if entry == None: + if flags.verbose: + tell = file.tell() + format = (tell, len(entries), filesize(file) - tell) + print "Stopped parsing after %d bytes (%d entries). Ignoring leftover %d bytes." % format + break + else: + entries.append(entry) + except: + print "Exception while parsing file, after %d bytes (%d entries)" % (file.tell(), len(entries)) + raise + else: + while True: + line = file.readline() + if len(line) == 0: + break + entry = parse_line(line, flags) + if entry: + entries.append(entry) return entries +def parse_binary(file): + # First 3 bytes: operation, old storage, new storage + header = file.read(3) + operation_byte = ord(header[0]) + old_storage_byte = ord(header[1]) + new_storage_byte = ord(header[2]) + # This is the only way to check if we are reading a correct log entry + if operation_byte not in operation_map or old_storage_byte not in storage_map or new_storage_byte not in storage_map: + return None + operation = operation_map[operation_byte] + old_storage = storage_map[old_storage_byte] + new_storage = storage_map[new_storage_byte] + + # Next 2 bytes: object size (big endian) + size_bytes = file.read(2) + size = int(ord(size_bytes[0]) + (ord(size_bytes[1])<<8)) + + # Last: classname, nul-terminated + classname = "" + while True: + byte = file.read(1) + if byte == chr(0): + break + classname += byte + if len(classname) == 0: + classname = None + return LogEntry(operation, old_storage, new_storage, classname, size) + line_pattern = re.compile("^(?P\w+) \(((?P\w+) -> )?(?P\w+)\)( of (?P.+))? size (?P[0-9]+)$") def parse_line(line, flags): @@ -32,25 +85,24 @@ new_storage = result.group('new') classname = result.group('classname') size = result.group('size') - if old_storage is None: - if operation == "Filledin": - old_storage = " Image Loading Storage" # Space to be sorted to the beginning - elif operation == "Initialized": - old_storage = " Object Creation Storage" - else: - assert False, "old_storage has to be available in a Switched operation" - entry = LogEntry(operation, old_storage, new_storage, classname, size) - #entry.is_special = - return entry + return LogEntry(operation, old_storage, new_storage, classname, size) class LogEntry(object): def __init__(self, operation, old_storage, new_storage, classname, size): self.operation = str(operation) - self.old_storage = str(old_storage) self.new_storage = str(new_storage) self.classname = str(classname) self.size = float(size) + + if old_storage is None: + if operation == "Filledin": + old_storage = " Image Loading Storage" # Space to be sorted to the beginning + elif operation == "Initialized": + old_storage = " Object Creation Storage" + else: + assert False, "old_storage has to be available in a Switched operation" + self.old_storage = str(old_storage) def full_key(self): return (self.operation, self.old_storage, self.new_storage) @@ -471,7 +523,7 @@ # ======== Main # ==================================================================== -def command_print_entries(entries): +def command_print_entries(entries, flags): for e in entries: print e @@ -506,6 +558,7 @@ ('allstorage', '-a'), ('detailed', '-d'), ('classes', '-c'), + ('binary', '-b'), ]) command_prefix = "command_" diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -131,6 +131,7 @@ -ni|--no-interrupts -d|--max-stack-depth [number, default %d, <= 0 disables stack protection] -l|--storage-log + -lb|--storage-log-binary (output should be redirected to file) [image path, default: Squeak.image] """ % (argv[0], constants.MAX_LOOP_DEPTH) @@ -197,6 +198,8 @@ idx += 1 elif arg in ["-l", "--storage-log"]: storage_logger.activate() + elif arg in ["-lb", "--storage-log-binary"]: + storage_logger.activate(binary=True) elif path is None: path = argv[idx] else: From noreply at buildbot.pypy.org Mon Jul 7 13:16:26 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 7 Jul 2014 13:16:26 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Fixed storage_logger.py for RPython, slightly improved dot-graph. Message-ID: <20140707111626.E38A51C024A@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r859:519def1169d7 Date: 2014-07-01 16:51 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/519def1169d7/ Log: Fixed storage_logger.py for RPython, slightly improved dot-graph. diff --git a/spyvm/storage_logger.py b/spyvm/storage_logger.py --- a/spyvm/storage_logger.py +++ b/spyvm/storage_logger.py @@ -1,5 +1,3 @@ - -import sys # Put flags in an object to make it modifyable after compile time. class LoggerOptions(object): @@ -63,7 +61,7 @@ def binary_output(operation, old_storage, new_storage, classname, size): # Output a byte-coded log entry - bytes = bytearray() + bytes = [] # bytearray() # First 3 bytes: operation, old_storage, new_storage assert operation in operation_map, "Cannot handle operation %s" % operation @@ -81,9 +79,14 @@ bytes.append((size & mask) >> 8) # Next: classname string plus terminating null-character + i = 5 if classname: for c in classname: - bytes.append(c) + bytes.append(ord(c)) + i += 1 bytes.append(0) - sys.stdout.write(bytes) + # No simpler way for RPython's sake. + import os + for b in bytes: + os.write(1, chr(b)) diff --git a/spyvm/tool/storagelog_parser.py b/spyvm/tool/storagelog_parser.py --- a/spyvm/tool/storagelog_parser.py +++ b/spyvm/tool/storagelog_parser.py @@ -293,7 +293,7 @@ return set(set_dict.values()) def __add__(self, other): - result = StorageNode("%s_%s" % (self.name, other.name)) + result = StorageNode("%s %s" % (self.name, other.name)) result.incoming = self.merge_edge_sets(self.incoming, other.incoming, "origin") # TODO bullshit code for edge in result.incoming: @@ -493,14 +493,17 @@ incoming_cache[node.name] = incoming shape = "" label = "\nIncoming objects: %d" % incoming.objects - label += "\nIncoming elements: %d" % incoming.slots - if flags.percent and incoming.objects != 0: - percent_remaining_objects = " (%.1f%%)" % (remaining.objects * 100 / incoming.objects) - percent_remaining_slots = " (%.1f%%)" % (remaining.slots * 100 / incoming.slots) + label += "\nIncoming slots: %d" % incoming.slots + if remaining.objects == incoming.objects: + label += "\n(All remaining)" else: - percent_remaining_objects = percent_remaining_slots = "" - label += "\nRemaining objects: %d%s" % (remaining.objects, percent_remaining_objects) - label += "\nRemaining elements: %d%s" % (remaining.slots, percent_remaining_slots) + if flags.percent and incoming.objects != 0: + percent_remaining_objects = " (%.1f%%)" % (remaining.objects * 100 / incoming.objects) + percent_remaining_slots = " (%.1f%%)" % (remaining.slots * 100 / incoming.slots) + else: + percent_remaining_objects = percent_remaining_slots = "" + label += "\nRemaining objects: %d%s" % (remaining.objects, percent_remaining_objects) + label += "\nRemaining slots: %d%s" % (remaining.slots, percent_remaining_slots) result += "%s [label=\"%s%s\"%s];" % (node.name.replace(" ", "_"), node.name, label, shape) for edge in graph.edges.values(): @@ -514,7 +517,7 @@ target_node = edge.target.name.replace(" ", "_") source_node = edge.origin.name.replace(" ", "_") - result += "%s -> %s [label=\"%s\n%s\n%d slots per object\"];" % (source_node, target_node, str_objects, str_slots, total.slots / total.objects) + result += "%s -> %s [label=\"%s\n%s\n%.1f slots per object\"];" % (source_node, target_node, str_objects, str_slots, total.slots / total.objects) result += "}" return result From noreply at buildbot.pypy.org Mon Jul 7 13:16:28 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 7 Jul 2014 13:16:28 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Using 4 bytes to encode object size due to large objects in Squeak image. Message-ID: <20140707111628.213A41C024A@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r860:a5b744a0ec04 Date: 2014-07-01 17:21 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/a5b744a0ec04/ Log: Using 4 bytes to encode object size due to large objects in Squeak image. diff --git a/spyvm/storage_logger.py b/spyvm/storage_logger.py --- a/spyvm/storage_logger.py +++ b/spyvm/storage_logger.py @@ -71,19 +71,26 @@ assert new_storage in storage_map, "Cannot handle new-storage type %s" % new_storage bytes.append(storage_map[new_storage]) - # Next: 2 bytes encoding object size (big endian) - assert size < 2**16, "Object of type %s too large (size %d)" % (classname, size) + # Next: 4 bytes encoding object size (big endian) + # Assert not compiling in RPython + # assert size < 2**32, "Object of type %s too large (size %d)" % (classname, size) mask = (1<<8)-1 - bytes.append(size & mask) + shift = 0 + bytes.append((size & mask) >> shift) mask = mask<<8 - bytes.append((size & mask) >> 8) + shift += 8 + bytes.append((size & mask) >> shift) + mask = mask<<8 + shift += 8 + bytes.append((size & mask) >> shift) + mask = mask<<8 + shift += 8 + bytes.append((size & mask) >> shift) # Next: classname string plus terminating null-character - i = 5 if classname: for c in classname: bytes.append(ord(c)) - i += 1 bytes.append(0) # No simpler way for RPython's sake. diff --git a/spyvm/tool/storagelog_parser.py b/spyvm/tool/storagelog_parser.py --- a/spyvm/tool/storagelog_parser.py +++ b/spyvm/tool/storagelog_parser.py @@ -57,9 +57,9 @@ old_storage = storage_map[old_storage_byte] new_storage = storage_map[new_storage_byte] - # Next 2 bytes: object size (big endian) - size_bytes = file.read(2) - size = int(ord(size_bytes[0]) + (ord(size_bytes[1])<<8)) + # Next 4 bytes: object size (big endian) + size_bytes = file.read(4) + size = int(ord(size_bytes[0]) + (ord(size_bytes[1])<<8) + (ord(size_bytes[2])<<16) + (ord(size_bytes[3])<<24)) # Last: classname, nul-terminated classname = "" From noreply at buildbot.pypy.org Mon Jul 7 13:16:29 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 7 Jul 2014 13:16:29 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Added "-" as special filename meaning that stdin will be parsed. Message-ID: <20140707111629.353C61C024A@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r861:d8200654841f Date: 2014-07-02 12:41 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/d8200654841f/ Log: Added "-" as special filename meaning that stdin will be parsed. Aggregating parsed lines directly, instead of collecting them. Added thousand-separators to output. diff --git a/spyvm/tool/storagelog_parser.py b/spyvm/tool/storagelog_parser.py --- a/spyvm/tool/storagelog_parser.py +++ b/spyvm/tool/storagelog_parser.py @@ -1,5 +1,5 @@ -import re, sys, operator +import re, os, sys, operator import spyvm.storage_logger OPERATIONS = ["Filledin", "Initialized", "Switched"] @@ -9,30 +9,35 @@ operation_map = {v:k for k, v in spyvm.storage_logger.operation_map.items()} # ==================================================================== -# ======== Basic functions +# ======== Logfile parsing # ==================================================================== -def filesize(file): - import os - return os.path.getsize(file.name) - -def parse(filename, flags): - entries = [] - with open(filename, 'r', 1) as file: +def parse(filename, flags, callback): + parsed_entries = 0 + if filename == "-": + opener = lambda: sys.stdin + else: + opener = lambda: open(filename, 'r', 1) + with opener() as file: if flags.binary: while True: try: entry = parse_binary(file) if entry == None: if flags.verbose: - tell = file.tell() - format = (tell, len(entries), filesize(file) - tell) - print "Stopped parsing after %d bytes (%d entries). Ignoring leftover %d bytes." % format + if file is sys.stdin: + print "Stopped after parsing %d entries." % parsed_entries + else: + tell = file.tell() + format = (tell, parsed_entries, os.path.getsize(file.name) - tell) + print "Stopped parsing after %d bytes (%d entries). Ignoring leftover %d bytes." % format break else: - entries.append(entry) + parsed_entries += 1 + callback(entry) except: - print "Exception while parsing file, after %d bytes (%d entries)" % (file.tell(), len(entries)) + tell = 0 if file is sys.stdin else file.tell() + print "Exception while parsing file, after %d bytes (%d entries)" % (tell, len(entries)) raise else: while True: @@ -41,30 +46,48 @@ break entry = parse_line(line, flags) if entry: - entries.append(entry) - return entries + parsed_entries += 1 + callback(entry) + return parsed_entries + +def safe_read(file, size): + result = file.read(size) + retries = 20 + # Try to work around stdin's unpredictability + while len(result) < size: + result += file.read(size - len(result)) + retries -= 1 + if retries < 0: + return None + import time + time.sleep(0.001) + return result def parse_binary(file): # First 3 bytes: operation, old storage, new storage - header = file.read(3) + header = safe_read(file, 3) + if header is None: return None operation_byte = ord(header[0]) old_storage_byte = ord(header[1]) new_storage_byte = ord(header[2]) # This is the only way to check if we are reading a correct log entry if operation_byte not in operation_map or old_storage_byte not in storage_map or new_storage_byte not in storage_map: + print "Wrong 3 bytes: %d %d %d" % header return None operation = operation_map[operation_byte] old_storage = storage_map[old_storage_byte] new_storage = storage_map[new_storage_byte] # Next 4 bytes: object size (big endian) - size_bytes = file.read(4) + size_bytes = safe_read(file, 4) + if size_bytes is None: return None size = int(ord(size_bytes[0]) + (ord(size_bytes[1])<<8) + (ord(size_bytes[2])<<16) + (ord(size_bytes[3])<<24)) # Last: classname, nul-terminated classname = "" while True: - byte = file.read(1) + byte = safe_read(file, 1) + if byte is None: return None if byte == chr(0): break classname += byte @@ -135,7 +158,9 @@ percent_objects = " (%.1f%%)" % (float(self.objects)*100 / total.objects) else: percent_objects = "" - return "%d%s slots in %d%s objects (avg size: %.1f)" % (self.slots, percent_slots, self.objects, percent_objects, avg_slots) + slots = format(self.slots, ",.0f") + objects = format(self.objects, ",.0f") + return "%s%s slots in %s%s objects (avg size: %.1f)" % (slots, percent_slots, objects, percent_objects, avg_slots) def __repr__(self): return "%s(%s)" % (self.__str__(), object.__repr__(self)) @@ -388,10 +413,11 @@ nodes.sort() return nodes -def make_graph(entries): +def make_graph(logfile, flags): graph = StorageGraph() - for e in entries: - graph.add_log_entry(e) + def callback(entry): + graph.add_log_entry(entry) + parse(logfile, flags, callback) graph.assert_sanity() return graph @@ -399,11 +425,8 @@ # ======== Command - Summarize log content # ==================================================================== -def command_summarize(entries, flags): - print_summary(entries, flags) - -def print_summary(entries, flags): - graph = make_graph(entries) +def command_summarize(logfile, flags): + graph = make_graph(logfile, flags) if not flags.allstorage: graph.split_nodes() for node in graph.sorted_nodes(): @@ -456,17 +479,17 @@ # ==================================================================== # Output is valid dot code and can be parsed by the graphviz dot utility. -def command_print_dot(entries, flags): - graph = make_graph(entries) +def command_print_dot(logfile, flags): + graph = make_graph(logfile, flags) print "/*" print "Storage Statistics (dot format):" print "================================" print "*/" print dot_string(graph, flags) -def command_dot(entries, flags): +def command_dot(logfile, flags): import subprocess - dot = dot_string(make_graph(entries), flags) + dot = dot_string(make_graph(logfile, flags), flags) command = ["dot", "-Tjpg", "-o%s.jpg" % flags.logfile] print "Running:\n%s" % " ".join(command) p = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) @@ -487,33 +510,31 @@ # TODO This is a special node. Hacky way to find out. incoming_cache[node.name] = outgoing shape = ",shape=box" - label = "\nObjects: %d" % outgoing.objects - label += "\nSlots: %d" % outgoing.slots + label = "\nObjects: %s" % format(outgoing.objects, ",.0f") + label += "\nSlots: %s" % format(outgoing.slots, ",.0f") else: incoming_cache[node.name] = incoming shape = "" - label = "\nIncoming objects: %d" % incoming.objects - label += "\nIncoming slots: %d" % incoming.slots - if remaining.objects == incoming.objects: - label += "\n(All remaining)" - else: + label = "\nIncoming objects: %s" % format(incoming.objects, ",.0f") + label += "\nIncoming slots: %s" % format(incoming.slots, ",.0f") + if remaining.objects != incoming.objects: if flags.percent and incoming.objects != 0: - percent_remaining_objects = " (%.1f%%)" % (remaining.objects * 100 / incoming.objects) - percent_remaining_slots = " (%.1f%%)" % (remaining.slots * 100 / incoming.slots) + percent_remaining_objects = " (%.1f%%)" % (float(remaining.objects)*100 / incoming.objects) + percent_remaining_slots = " (%.1f%%)" % (float(remaining.slots)*100 / incoming.slots) else: percent_remaining_objects = percent_remaining_slots = "" - label += "\nRemaining objects: %d%s" % (remaining.objects, percent_remaining_objects) - label += "\nRemaining slots: %d%s" % (remaining.slots, percent_remaining_slots) + label += "\nRemaining objects: %s%s" % (format(remaining.objects, ",.0f"), percent_remaining_objects) + label += "\nRemaining slots: %s%s" % (format(remaining.slots, ",.0f"), percent_remaining_slots) result += "%s [label=\"%s%s\"%s];" % (node.name.replace(" ", "_"), node.name, label, shape) for edge in graph.edges.values(): total = edge.total() - str_objects = "%d objects" % total.objects - str_slots = "%d slots" % total.slots + str_objects = "%s objects" % format(total.objects, ",.0f") + str_slots = "%s slots" % format(total.slots, ",.0f") incoming = incoming_cache[edge.origin.name] if flags.percent and incoming.objects != 0: - str_objects += " (%.1f%%)" % (float(total.objects) * 100 / incoming.objects) - str_slots += " (%.1f%%)" % (float(total.slots) * 100 / incoming.slots) + str_objects += " (%.1f%%)" % (float(total.objects)*100 / incoming.objects) + str_slots += " (%.1f%%)" % (float(total.slots)*100 / incoming.slots) target_node = edge.target.name.replace(" ", "_") source_node = edge.origin.name.replace(" ", "_") @@ -526,9 +547,10 @@ # ======== Main # ==================================================================== -def command_print_entries(entries, flags): - for e in entries: - print e +def command_print_entries(logfile, flags): + def callback(entry): + print entry + parse(logfile, flags, callback) class Flags(object): @@ -580,8 +602,7 @@ usage(flags, commands) func = module[command_prefix + command] - entries = parse(logfile, flags) - func(entries, flags) + func(logfile, flags) if __name__ == "__main__": main(sys.argv[1:]) From noreply at buildbot.pypy.org Mon Jul 7 13:16:30 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 7 Jul 2014 13:16:30 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Removed binary output for logs, did not help. Added aggregated output, which does the trick. Message-ID: <20140707111630.595651C024A@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r862:cc227d08b393 Date: 2014-07-02 17:53 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/cc227d08b393/ Log: Removed binary output for logs, did not help. Added aggregated output, which does the trick. diff --git a/spyvm/storage_logger.py b/spyvm/storage_logger.py --- a/spyvm/storage_logger.py +++ b/spyvm/storage_logger.py @@ -1,18 +1,46 @@ -# Put flags in an object to make it modifyable after compile time. -class LoggerOptions(object): +class Logger(object): def __init__(self): self.active = False - self.binary = False + self.aggregate = False + self.logs = {} + + def log(self, operation, old_storage, new_storage, classname, size): + if self.aggregate: + key = (operation, old_storage, new_storage, classname) + if key not in self.logs: + self.logs[key] = [0, 0] + tuple = self.logs[key] + tuple[0] += size + tuple[1] += 1 + else: + self.output(operation, old_storage, new_storage, classname, size, 1) + + def print_aggregated_log(self): + if not self.aggregate: + return + for key, tuple in self.logs.items(): + operation, old_storage, new_storage, classname = key + slots, objects = tuple + self.output(operation, old_storage, new_storage, classname, slots, objects) + + def output(self, operation, old_storage, new_storage, classname, slots, objects): + old_storage_string = "%s -> " % old_storage if old_storage else "" + classname_string = " of %s" % classname if classname else "" + format = (operation, old_storage_string, new_storage, classname_string, slots, objects) + print "%s (%s%s)%s size %d objects %d" % format -_options = LoggerOptions() +_logger = Logger() -def activate(binary = False): - _options.active = True - _options.binary = binary +def activate(aggregate=False): + _logger.active = True + _logger.aggregate = aggregate + +def print_aggregated_log(): + _logger.print_aggregated_log() def log(w_obj, operation, old_storage_object=None, log_classname=True): - if not _options.active: + if not _logger.active: return # Gather information to be logged @@ -27,73 +55,5 @@ else: classname = None - if _options.binary: - binary_output(operation, old_storage, new_storage, classname, size) - else: - output(operation, old_storage, new_storage, classname, size) - -def output(operation, old_storage, new_storage, classname, size): - # Construct and print a simple logstring - old_storage_string = "%s -> " % old_storage if old_storage else "" - classname_string = " of %s" % classname if classname else "" - print "%s (%s%s)%s size %d" % (operation, old_storage_string, new_storage, classname_string, size) - -operation_map = { - "Filledin": 1, - "Initialized": 2, - "Switched": 3, -} - -storage_map = { - "AllNilStorageShadow": 1, - "SmallIntegerOrNilStorageShadow": 2, - "FloatOrNilStorageShadow": 3, - "ListStorageShadow": 4, - "WeakListStorageShadow": 5, - "ClassShadow": 6, - "MethodDictionaryShadow": 7, - "BlockContextShadow": 8, - "MethodContextShadow": 9, - "CachedObjectShadow": 10, - "ObserveeShadow": 11, - None: 12, -} - -def binary_output(operation, old_storage, new_storage, classname, size): - # Output a byte-coded log entry - bytes = [] # bytearray() - - # First 3 bytes: operation, old_storage, new_storage - assert operation in operation_map, "Cannot handle operation %s" % operation - bytes.append(operation_map[operation]) - assert old_storage in storage_map, "Cannot handle old-storage type %s" % old_storage - bytes.append(storage_map[old_storage]) - assert new_storage in storage_map, "Cannot handle new-storage type %s" % new_storage - bytes.append(storage_map[new_storage]) - - # Next: 4 bytes encoding object size (big endian) - # Assert not compiling in RPython - # assert size < 2**32, "Object of type %s too large (size %d)" % (classname, size) - mask = (1<<8)-1 - shift = 0 - bytes.append((size & mask) >> shift) - mask = mask<<8 - shift += 8 - bytes.append((size & mask) >> shift) - mask = mask<<8 - shift += 8 - bytes.append((size & mask) >> shift) - mask = mask<<8 - shift += 8 - bytes.append((size & mask) >> shift) - - # Next: classname string plus terminating null-character - if classname: - for c in classname: - bytes.append(ord(c)) - bytes.append(0) - - # No simpler way for RPython's sake. - import os - for b in bytes: - os.write(1, chr(b)) + _logger.log(operation, old_storage, new_storage, classname, size) + \ No newline at end of file diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -131,7 +131,7 @@ -ni|--no-interrupts -d|--max-stack-depth [number, default %d, <= 0 disables stack protection] -l|--storage-log - -lb|--storage-log-binary (output should be redirected to file) + -L|--storage-log-aggregate [image path, default: Squeak.image] """ % (argv[0], constants.MAX_LOOP_DEPTH) @@ -198,8 +198,8 @@ idx += 1 elif arg in ["-l", "--storage-log"]: storage_logger.activate() - elif arg in ["-lb", "--storage-log-binary"]: - storage_logger.activate(binary=True) + elif arg in ["-L", "--storage-log-aggregate"]: + storage_logger.activate(aggregate=True) elif path is None: path = argv[idx] else: @@ -236,6 +236,7 @@ else: _run_image(interp) result = 0 + storage_logger.print_aggregated_log() return result From noreply at buildbot.pypy.org Mon Jul 7 13:16:31 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 7 Jul 2014 13:16:31 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Removed parsing of binary logs. Slightly improved outputs. Added command to aggregate a logfile. Message-ID: <20140707111631.7ECE11C024A@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r863:0fbe2a4a6a3b Date: 2014-07-02 17:54 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/0fbe2a4a6a3b/ Log: Removed parsing of binary logs. Slightly improved outputs. Added command to aggregate a logfile. diff --git a/spyvm/tool/storagelog_parser.py b/spyvm/tool/storagelog_parser.py --- a/spyvm/tool/storagelog_parser.py +++ b/spyvm/tool/storagelog_parser.py @@ -1,12 +1,10 @@ import re, os, sys, operator -import spyvm.storage_logger OPERATIONS = ["Filledin", "Initialized", "Switched"] -# Reverse the two maps used to encode the byte encoded log-output -storage_map = {v:k for k, v in spyvm.storage_logger.storage_map.items()} -operation_map = {v:k for k, v in spyvm.storage_logger.operation_map.items()} +IMAGE_LOADING_STORAGE = " Image Loading Storage" # Space to be sorted to the beginning +OBJECT_CREATION_STORAGE = " Object Creation Storage" # ==================================================================== # ======== Logfile parsing @@ -19,83 +17,17 @@ else: opener = lambda: open(filename, 'r', 1) with opener() as file: - if flags.binary: - while True: - try: - entry = parse_binary(file) - if entry == None: - if flags.verbose: - if file is sys.stdin: - print "Stopped after parsing %d entries." % parsed_entries - else: - tell = file.tell() - format = (tell, parsed_entries, os.path.getsize(file.name) - tell) - print "Stopped parsing after %d bytes (%d entries). Ignoring leftover %d bytes." % format - break - else: - parsed_entries += 1 - callback(entry) - except: - tell = 0 if file is sys.stdin else file.tell() - print "Exception while parsing file, after %d bytes (%d entries)" % (tell, len(entries)) - raise - else: - while True: - line = file.readline() - if len(line) == 0: - break - entry = parse_line(line, flags) - if entry: - parsed_entries += 1 - callback(entry) + while True: + line = file.readline() + if len(line) == 0: + break + entry = parse_line(line, flags) + if entry: + parsed_entries += 1 + callback(entry) return parsed_entries -def safe_read(file, size): - result = file.read(size) - retries = 20 - # Try to work around stdin's unpredictability - while len(result) < size: - result += file.read(size - len(result)) - retries -= 1 - if retries < 0: - return None - import time - time.sleep(0.001) - return result - -def parse_binary(file): - # First 3 bytes: operation, old storage, new storage - header = safe_read(file, 3) - if header is None: return None - operation_byte = ord(header[0]) - old_storage_byte = ord(header[1]) - new_storage_byte = ord(header[2]) - # This is the only way to check if we are reading a correct log entry - if operation_byte not in operation_map or old_storage_byte not in storage_map or new_storage_byte not in storage_map: - print "Wrong 3 bytes: %d %d %d" % header - return None - operation = operation_map[operation_byte] - old_storage = storage_map[old_storage_byte] - new_storage = storage_map[new_storage_byte] - - # Next 4 bytes: object size (big endian) - size_bytes = safe_read(file, 4) - if size_bytes is None: return None - size = int(ord(size_bytes[0]) + (ord(size_bytes[1])<<8) + (ord(size_bytes[2])<<16) + (ord(size_bytes[3])<<24)) - - # Last: classname, nul-terminated - classname = "" - while True: - byte = safe_read(file, 1) - if byte is None: return None - if byte == chr(0): - break - classname += byte - if len(classname) == 0: - classname = None - return LogEntry(operation, old_storage, new_storage, classname, size) - -line_pattern = re.compile("^(?P\w+) \(((?P\w+) -> )?(?P\w+)\)( of (?P.+))? size (?P[0-9]+)$") +line_pattern = re.compile("^(?P\w+) \(((?P\w+) -> )?(?P\w+)\)( of (?P.+))? size (?P[0-9]+)( objects (?P[0-9]+))?$") def parse_line(line, flags): result = line_pattern.match(line) @@ -108,32 +40,42 @@ new_storage = result.group('new') classname = result.group('classname') size = result.group('size') - return LogEntry(operation, old_storage, new_storage, classname, size) + objects = result.group('objects') + return LogEntry(operation, old_storage, new_storage, classname, size, objects) class LogEntry(object): - def __init__(self, operation, old_storage, new_storage, classname, size): + def __init__(self, operation, old_storage, new_storage, classname, size, objects): self.operation = str(operation) self.new_storage = str(new_storage) self.classname = str(classname) - self.size = float(size) + self.size = int(size) + self.objects = int(objects) if objects else 1 if old_storage is None: if operation == "Filledin": - old_storage = " Image Loading Storage" # Space to be sorted to the beginning + old_storage = IMAGE_LOADING_STORAGE elif operation == "Initialized": - old_storage = " Object Creation Storage" + old_storage = OBJECT_CREATION_STORAGE else: assert False, "old_storage has to be available in a Switched operation" self.old_storage = str(old_storage) + def clear_old_storage(self): + if self.old_storage in (IMAGE_LOADING_STORAGE, OBJECT_CREATION_STORAGE): + self.old_storage = None + def full_key(self): return (self.operation, self.old_storage, self.new_storage) + def __lt__(self, other): + return self.classname < other.classname + def __str__(self): old_storage_string = "%s -> " % self.old_storage if self.old_storage else "" classname_string = " of %s" % self.classname if self.classname else "" - return "%s (%s%s)%s size %d" % (self.operation, old_storage_string, self.new_storage, classname_string, self.size) + objects_string = " objects %d" % self.objects if self.objects > 1 else "" + return "%s (%s%s)%s size %d%s" % (self.operation, old_storage_string, self.new_storage, classname_string, self.size, objects_string) # ==================================================================== # ======== Graph parsing @@ -158,8 +100,8 @@ percent_objects = " (%.1f%%)" % (float(self.objects)*100 / total.objects) else: percent_objects = "" - slots = format(self.slots, ",.0f") - objects = format(self.objects, ",.0f") + slots = format(self.slots, ",d") + objects = format(self.objects, ",d") return "%s%s slots in %s%s objects (avg size: %.1f)" % (slots, percent_slots, objects, percent_objects, avg_slots) def __repr__(self): @@ -167,7 +109,7 @@ def add_log_entry(self, entry): self.slots = self.slots + entry.size - self.objects = self.objects + 1 + self.objects = self.objects + entry.objects def __sub__(self, other): return Operations(self.objects - other.objects, self.slots - other.slots) @@ -245,6 +187,17 @@ def add_log_entry(self, entry): self.cls(entry.classname).add_log_entry(entry) + def as_log_entries(self): + entries = [] + for classname, ops in self.classes.classes.items(): + entry = LogEntry(self.operation, self.origin.name, self.target.name, classname, ops.slots, ops.objects) + entry.clear_old_storage() + entries.append(entry) + return entries + + def __lt__(self, other): + return self.full_key() < other.full_key() + def __str__(self): return "[%s %s -> %s]" % (self.operation, self.origin, self.target) @@ -544,14 +497,28 @@ return result # ==================================================================== -# ======== Main +# ======== Other commands # ==================================================================== +def command_aggregate(logfile, flags): + graph = make_graph(logfile, flags) + edges = graph.edges.values() + edges.sort() + for edge in edges: + logentries = edge.as_log_entries() + logentries.sort() + for entry in logentries: + print entry + def command_print_entries(logfile, flags): def callback(entry): print entry parse(logfile, flags, callback) +# ==================================================================== +# ======== Main +# ==================================================================== + class Flags(object): def __init__(self, flags): @@ -583,7 +550,6 @@ ('allstorage', '-a'), ('detailed', '-d'), ('classes', '-c'), - ('binary', '-b'), ]) command_prefix = "command_" From noreply at buildbot.pypy.org Mon Jul 7 13:16:52 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 7 Jul 2014 13:16:52 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Added new benchmark - matrix multiplication using arrays full of SmallIntegers or Floats. Message-ID: <20140707111652.1AC661C024A@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r864:bc84bc8ed49c Date: 2014-07-03 15:36 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/bc84bc8ed49c/ Log: Added new benchmark - matrix multiplication using arrays full of SmallIntegers or Floats. diff too long, truncating to 2000 out of 308107 lines diff --git a/images/Squeak4.5-12568.changes b/images/Squeak4.5-12568.changes --- a/images/Squeak4.5-12568.changes +++ b/images/Squeak4.5-12568.changes @@ -36,4 +36,4 @@ Workspace allInstances do: [:w | w topView delete]. ReleaseBuilderFor4dot4 prepareNewBuild. Smalltalk snapshot: true andQuit: true. -! ----End fileIn of a stream----! ----SNAPSHOT----{31 March 2013 . 3:27:34 pm} Squeak4.5-12327.image priorSource: 7430688! !Installer methodsFor: 'squeakmap' stamp: 'fbs 1/28/2013 19:25' prior: 57597950! packageAndVersionFrom: pkg | p | p := ReadStream on: pkg . ^{(p upTo: $(). p upTo: $)} collect: [:s | s withBlanksTrimmed].! ! "Installer-Core"! !Categorizer methodsFor: 'fileIn/Out' stamp: 'cwp 6/20/2012 16:58'! scanFrom: aStream environment: anEnvironment ^ self scanFrom: aStream! ! !ClassCategoryReader methodsFor: 'fileIn/Out' stamp: 'cwp 6/20/2012 17:21'! scanFrom: aStream environment: anEnvironment "File in methods from the stream, aStream." | methodText | [methodText := aStream nextChunkText. methodText size > 0] whileTrue: [class compile: methodText environment: anEnvironment classified: category withStamp: changeStamp notifying: nil]! ! !ClassCommentReader methodsFor: 'as yet unclassified' stamp: 'cwp 6/20/2012 17:22'! scanFrom: aStream environment: anEnvironment ^ self scanFrom: aStream! ! !Metaclass methodsFor: 'compiling' stamp: 'cwp 6/20/2012 17:29'! bindingOf: varName environment: anEnvironment ^ thisClass classBindingOf: varName environment: anEnvironment! ! !LargePositiveInteger methodsFor: 'arithmetic' stamp: 'nice 12/30/2012 20:03' prior: 22505876! \\ aNumber "Primitive. Take the receiver modulo the argument. The result is the remainder rounded towards negative infinity, of the receiver divided by the argument. Fail if the argument is 0. Fail if either the argument or the result is not a SmallInteger or a LargePositiveInteger less than 2-to-the-30th (1073741824). Optional. See Object documentation whatIsAPrimitive." aNumber isInteger ifTrue: [| neg qr q r | neg := self negative == aNumber negative == false. qr := (self digitDiv: (aNumber class == SmallInteger ifTrue: [aNumber abs] ifFalse: [aNumber]) neg: neg). q := qr first normalize. r := qr last normalize. ^(q negative ifTrue: [r isZero not] ifFalse: [q isZero and: [neg]]) ifTrue: [r + aNumber] ifFalse: [r]]. ^super \\ aNumber ! ! !LargePositiveInteger methodsFor: 'converting' stamp: 'nice 1/27/2012 22:41' prior: 37616324! asFloat "Answer a Float that best approximates the value of the receiver. This algorithm is optimized to process only the significant digits of a LargeInteger. And it does honour IEEE 754 round to nearest even mode in case of excess precision (see details below)." "How numbers are rounded in IEEE 754 default rounding mode: A shift is applied so that the highest 53 bits are placed before the floating point to form a mantissa. The trailing bits form the fraction part placed after the floating point. This fractional number must be rounded to the nearest integer. If fraction part is 2r0.1, exactly between two consecutive integers, there is a tie. The nearest even integer is chosen in this case. Examples (First 52bits of mantissa are omitted for brevity): 2r0.00001 is rounded downward to 2r0 2r1.00001 is rounded downward to 2r1 2r0.1 is a tie and rounded to 2r0 (nearest even) 2r1.1 is a tie and rounded to 2r10 (nearest even) 2r0.10001 is rounded upward to 2r1 2r1.10001 is rounded upward to 2r10 Thus, if the next bit after floating point is 0, the mantissa is left unchanged. If next bit after floating point is 1, an odd mantissa is always rounded upper. An even mantissa is rounded upper only if the fraction part is not a tie." "Algorihm details: The floating point hardware can perform the rounding correctly with several excess bits as long as there is a single inexact operation. This can be obtained by splitting the mantissa plus excess bits in two part with less bits than Float precision. Note 1: the inexact flag in floating point hardware must not be trusted because in some cases the operations would be exact but would not take into account some bits that were truncated before the Floating point operations. Note 2: the floating point hardware is presumed configured in default rounding mode." | mantissa shift excess result n | "Check how many bits excess the maximum precision of a Float mantissa." excess := self highBitOfMagnitude - Float precision. excess > 7 ifTrue: ["Remove the excess bits but seven." mantissa := self bitShiftMagnitude: 7 - excess. shift := excess - 7. "An even mantissa with a single excess bit immediately following would be truncated. But this would not be correct if above shift has truncated some extra bits. Check this case, and round excess bits upper manually." ((mantissa digitAt: 1) = 2r01000000 and: [self anyBitOfMagnitudeFrom: 1 to: shift]) ifTrue: [mantissa := mantissa + 1]] ifFalse: [mantissa := self. shift := 0]. "There will be a single inexact round off at last iteration" result := (mantissa digitAt: (n := mantissa digitLength)) asFloat. [(n := n - 1) > 0] whileTrue: [ result := 256.0 * result + (mantissa digitAt: n) asFloat]. ^result timesTwoPower: shift.! ! !LargePositiveInteger methodsFor: 'private' stamp: 'nice 12/30/2012 14:25'! primitiveQuo: anInteger "Primitive. Divide the receiver by the argument and return the result. Round the result down towards zero to make it a whole integer. Fail if the argument is 0. Fail if either the argument or the result is not a SmallInteger or a LargePositiveInteger less than 2-to-the-30th (1073741824). Optional. See Object documentation whatIsAPrimitive." ^nil! ! !LargePositiveInteger methodsFor: 'arithmetic' stamp: 'nice 12/30/2012 14:34'! rem: aNumber "Remainder defined in terms of quo:. See super rem:. This is defined only to speed up case of very large integers." (self primitiveQuo: aNumber) ifNotNil: [:quo | ^self - (quo * aNumber)]. aNumber isInteger ifTrue: [| ng rem | ng := self negative == aNumber negative == false. rem := (self digitDiv: (aNumber class == SmallInteger ifTrue: [aNumber abs] ifFalse: [aNumber]) neg: ng) at: 2. ^ rem normalize]. ^super rem: aNumber! ! !LargeNegativeInteger methodsFor: 'converting' stamp: 'nice 1/1/2013 15:42' prior: 37616204! asFloat ^super asFloat negated! ! !UndefinedObject methodsFor: 'class hierarchy' stamp: 'cwp 6/22/2012 15:39'! literalScannedAs: scannedLiteral environment: anEnvironment notifying: requestor ^ scannedLiteral! ! !Behavior methodsFor: 'testing method dictionary' stamp: 'cwp 6/20/2012 17:32'! bindingOf: varName environment: anEnvironment ^superclass bindingOf: varName environment: anEnvironment! ! !Behavior methodsFor: 'testing method dictionary' stamp: 'cwp 6/20/2012 17:30'! classBindingOf: varName environment: anEnvironment ^self bindingOf: varName environment: anEnvironment! ! !Behavior methodsFor: 'printing' stamp: 'cwp 6/22/2012 15:37'! literalScannedAs: scannedLiteral environment: anEnvironment notifying: requestor "Postprocesses a literal scanned by Scanner scanToken (esp. xLitQuote). If scannedLiteral is not an association, answer it. Else, if it is of the form: nil->#NameOfMetaclass answer nil->theMetaclass, if any has that name, else report an error. Else, if it is of the form: #NameOfGlobalVariable->anythiEng answer the global, class, or pool association with that nameE, if any, else add it to Undeclared a answer the new Association." | key value | (scannedLiteral isVariableBinding) ifFalse: [^ scannedLiteral]. key := scannedLiteral key. value := scannedLiteral value. key ifNil: "###" [(self bindingOf: value environment: anEnvironment) ifNotNil: [:assoc| (assoc value isKindOf: Behavior) ifTrue: [^ nil->assoc value class]]. requestor notify: 'No such metaclass'. ^false]. (key isSymbol) ifTrue: "##" [(self bindingOf: key environment: anEnvironment) ifNotNil: [:assoc | ^assoc]. ^ anEnvironment undeclared: key]. requestor notify: '## must be followed by a non-local variable name'. ^false " Form literalScannedAs: 14 notifying: nil 14 Form literalScannedAs: #OneBitForm notiEfying: nil OneBitForm Form literalScannedAs: ##OneBitForm notifying: nil OneBitForm->a Form Form literalScannedAs: ##Form notifying: nil Form->Form Form literalScannedAs: ###Form notifying: nil nilE->Form class "! ! !Fraction methodsFor: 'converting' stamp: 'nice 11/21/2011 22:34' prior: 37619655! asFloat "Answer a Float that closely approximates the value of the receiver. This implementation will answer the closest floating point number to the receiver. In case of a tie, it will use the IEEE 754 round to nearest even mode. In case of overflow, it will answer +/- Float infinity." | a b mantissa exponent hasTruncatedBits lostBit n ha hb hm | a := numerator abs. b := denominator. "denominator is always positive" ha := a highBitOfMagnitude. hb := b highBitOfMagnitude. "Number of bits to keep in mantissa plus one to handle rounding." n := 1 + Float precision. "If both numerator and denominator are represented exactly in floating point number, then fastest thing to do is to use hardwired float division." (ha < n and: [hb < n]) ifTrue: [^numerator asFloat / denominator asFloat]. "Shift the fraction by a power of two exponent so as to obtain a mantissa with n bits. First guess is rough, the mantissa might have n+1 bits." exponent := ha - hb - n. exponent >= 0 ifTrue: [b := b bitShift: exponent] ifFalse: [a := a bitShift: exponent negated]. mantissa := a quo: b. hasTruncatedBits := a > (mantissa * b). hm := mantissa highBit. "Check for gradual underflow, in which case the mantissa will loose bits. Keep at least one bit to let underflow preserve the sign of zero." lostBit := Float emin - (exponent + hm - 1). lostBit > 0 ifTrue: [n := n - lostBit max: 1]. "Remove excess bits in the mantissa." hm > n ifTrue: [exponent := exponent + hm - n. hasTruncatedBits := hasTruncatedBits or: [mantissa anyBitOfMagnitudeFrom: 1 to: hm - n]. mantissa := mantissa bitShift: n - hm]. "Check if mantissa must be rounded upward. The case of tie (mantissa odd & hasTruncatedBits not) will be handled by Integer>>asFloat." (hasTruncatedBits and: [mantissa odd]) ifTrue: [mantissa := mantissa + 1]. ^ (self positive ifTrue: [mantissa asFloat] ifFalse: [mantissa asFloat negated]) timesTwoPower: exponent! ! !Float methodsFor: 'arithmetic' stamp: 'nice 12/20/2012 23:16' prior: 20878776! negated "Answer a Number that is the negation of the receiver. Implementation note: this version cares of negativeZero." ^-1.0 * self! ! !ClassDescription methodsFor: 'compiling' stamp: 'cwp 6/20/2012 17:21'! compile: text environment: anEnvironment classified: category withStamp: changeStamp notifying: requestor ^ self compile: text environment: anEnvironment classified: category withStamp: changeStamp notifying: requestor logSource: self acceptsLoggingOfCompilation! ! !ClassDescription methodsFor: 'compiling' stamp: 'cwp 12/27/2012 13:17'! compile: text environment: anEnvironment classified: category withStamp: changeStamp notifying: requestor logSource: logSource | methodAndNode context methodNode | context := CompilationCue source: text class: self environment: anEnvironment category: category requestor: requestor. methodNode := self newCompiler compile: context ifFail: [^ nil]. methodAndNode := CompiledMethodWithNode generateMethodFromNode: methodNode trailer: self defaultMethodTrailer. logSource ifTrue: [ self logMethodSource: text forMethodWithNode: methodAndNode inCategory: category withStamp: changeStamp notifying: requestor. ]. self addAndClassifySelector: methodAndNode selector withMethod: methodAndNode method inProtocol: category notifying: requestor. self instanceSide noteCompilationOf: methodAndNode selector meta: self isClassSide. ^ methodAndNode selector! ! !Class methodsFor: 'compiling' stamp: 'cwp 6/20/2012 09:47'! bindingOf: varName environment: anEnvironment "Answer the binding of some variable resolved in the scope of the receiver" | aSymbol binding | aSymbol := varName asSymbol. "First look in classVar dictionary." binding := self classPool bindingOf: aSymbol. binding ifNotNil:[^binding]. "Next look in shared pools." self sharedPools do:[:pool | binding := pool bindingOf: aSymbol. binding ifNotNil:[^binding]. ]. "Next look in declared environment." binding := anEnvironment bindingOf: aSymbol. binding ifNotNil:[^binding]. "Finally look higher up the superclass chain and fail at the end." superclass == nil ifTrue: [^ nil] ifFalse: [^ superclass bindingOf: aSymbol]. ! ! "Kernel"! ParseNode subclass: #Encoder instanceVariableNames: 'scopeTable nTemps supered requestor class selector literalStream selectorSet litIndSet litSet sourceRanges globalSourceRanges addedSelectorAndMethodClassLiterals optimizedSelectors cue' classVariableNames: '' poolDictionaries: '' category: 'Compiler-Kernel'! !Encoder commentStamp: 'cwp 12/26/2012 23:29' prior: 36323851! I encode names and literals into tree nodes with byte codes for the compiler. Byte codes for literals are not assigned until the tree-sizing pass of the compiler, because only then is it known which literals are actually needed. I also keep track of sourceCode ranges during parsing and code generation so I can provide an inverse map for the debugger.! Scanner subclass: #Parser instanceVariableNames: 'here hereType hereMark hereEnd prevMark prevEnd encoder requestor parseNode failBlock requestorOffset tempsMark doitFlag properties category queriedUnusedTemporaries cue' classVariableNames: '' poolDictionaries: '' category: 'Compiler-Kernel'! !Parser commentStamp: 'cwp 12/26/2012 23:34' prior: 38557958! I parse Smalltalk syntax and create a MethodNode that is the root of the parse tree. I look one token ahead.! Object subclass: #CompilationCue instanceVariableNames: 'source context receiver class environment category requestor' classVariableNames: '' poolDictionaries: '' category: 'Compiler-Kernel'! Object subclass: #Compiler instanceVariableNames: 'sourceStream requestor class category context parser cue' classVariableNames: '' poolDictionaries: '' category: 'Compiler-Kernel'! !Compiler commentStamp: 'cwp 12/26/2012 23:17' prior: 59257505! The compiler accepts Smalltalk source code and compiles it with respect to a given class. The user of the compiler supplies a context so that temporary variables are accessible during compilation. If there is an error, a requestor (usually a kind of StringHolderController) is sent the message notify:at:in: so that the error message can be displayed. If there is no error, then the result of compilation is a MethodNode, which is the root of a parse tree whose nodes are kinds of ParseNodes. The parse tree can be sent messages to (1) generate code for a CompiledMethod (this is done for compiling methods or evaluating expressions); (2) pretty-print the code (for formatting); or (3) produce a map from object code back to source code (used by debugger program-counter selection). See also Parser, Encoder, ParseNode.! !Encoder methodsFor: 'initialize-release' stamp: 'cwp 12/26/2012 23:34'! init: aCue notifying: anObject "The use of the variable requestor is a bit confusing here. This is *not* the original requestor, which is available through the cue. It's the Parser instance that is using the encoder." self setCue: aCue. requestor := anObject. nTemps := 0. supered := false. self initScopeAndLiteralTables. cue getClass variablesAndOffsetsDo: [:variable "" :offset "" | offset isNil ifTrue: [scopeTable at: variable name put: (FieldNode new fieldDefinition: variable)] ifFalse: [scopeTable at: variable put: (offset >= 0 ifTrue: [InstanceVariableNode new name: variable index: offset] ifFalse: [MaybeContextInstanceVariableNode new name: variable index: offset negated])]]. cue context ~~ nil ifTrue: [| homeNode | homeNode := self bindTemp: self doItInContextName. "0th temp = aContext passed as arg" cue context tempNames withIndexDo: [:variable :index| scopeTable at: variable put: (MessageAsTempNode new receiver: homeNode selector: #namedTempAt: arguments: (Array with: (self encodeLiteral: index)) precedence: 3 from: self)]]. sourceRanges := Dictionary new: 32. globalSourceRanges := OrderedCollection new: 32 ! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/26/2012 23:30'! setCue: aCue cue := aCue. "Also set legacy instance variables for methods that don't use cue yet" class := cue getClass.! ! !Dictionary methodsFor: '*Compiler' stamp: 'cwp 6/22/2012 09:17'! bindingOf: varName ifAbsent: aBlock ^self associationAt: varName ifAbsent: aBlock! ! !Parser methodsFor: 'private' stamp: 'cwp 12/26/2012 23:37'! init: sourceStream cue: aCue failBlock: aBlock self setCue: aCue. failBlock := aBlock. requestorOffset := 0. super scan: sourceStream. prevMark := hereMark := mark. self advance ! ! !Parser methodsFor: 'public access' stamp: 'cwp 12/26/2012 23:41'! parse: sourceStream cue: aCue noPattern: noPattern ifFail: aBlock "Answer a MethodNode for the argument, sourceStream, that is the root of a parse tree. Parsing is done with respect to the CompilationCue to resolve variables. Errors in parsing are reported to the cue's requestor; otherwise aBlock is evaluated. The argument noPattern is a Boolean that is true if the the sourceStream does not contain a method header (i.e., for DoIts)." | methNode repeatNeeded myStream s p subSelection | myStream := sourceStream. [repeatNeeded := false. p := myStream position. s := myStream upToEnd. myStream position: p. subSelection := aCue requestor notNil and: [aCue requestor selectionInterval = (p + 1 to: p + s size)]. self encoder init: aCue notifying: self. self init: myStream cue: aCue failBlock: [^ aBlock value]. doitFlag := noPattern. failBlock:= aBlock. [methNode := self method: noPattern context: cue context] on: ReparseAfterSourceEditing do: [ :ex | repeatNeeded := true. myStream := subSelection ifTrue: [ReadStream on: cue requestor text string from: cue requestor selectionInterval first to: cue requestor selectionInterval last] ifFalse: [ReadStream on: cue requestor text string]]. repeatNeeded] whileTrue: [encoder := self encoder class new]. methNode sourceText: s. ^methNode ! ! !Parser methodsFor: 'private' stamp: 'cwp 12/26/2012 23:35'! setCue: aCue cue := aCue. "Also set legacy variables for methods that don't use cue yet." requestor := cue requestor. category := cue category.! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:53'! class: aClass ^ self context: nil class: aClass requestor: nil! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:53'! context: aContext class: aClass requestor: anObject ^ self source: nil context: aContext receiver: nil class: aClass environment: (aClass ifNotNil: [aClass environment]) category: nil requestor: anObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:16'! source: aTextOrStream class: aClass environment: anEnvironment category: aString requestor: anObject ^ self source: aTextOrStream context: nil receiver: nil class: aClass environment: anEnvironment category: aString requestor: anObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:53'! source: aTextOrStream context: aContext class: aClass category: aString requestor: anObject ^ self source: aTextOrStream context: aContext receiver: (aContext ifNotNil: [aContext receiver]) class: aClass environment: (aClass ifNotNil: [aClass environment]) category: aString requestor: anObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:54'! source: aTextOrStream context: aContext class: aClass requestor: anObject ^ self source: aTextOrStream context: aContext class: aClass category: nil requestor: anObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:55'! source: aTextOrStream context: aContext receiver: recObject class: aClass environment: anEnvironment category: aString requestor: reqObject ^ self basicNew initializeWithSource: aTextOrStream context: aContext receiver: recObject class: aClass environment: anEnvironment category: aString requestor: reqObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:16'! source: aString environment: anEnvironment ^ self source: aString context: nil receiver: nil class: UndefinedObject environment: anEnvironment category: nil requestor: nil! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:54'! source: aTextOrStream requestor: anObject ^ self source: aTextOrStream context: nil class: nil requestor: anObject! ! !CompilationCue methodsFor: 'binding' stamp: 'cwp 6/20/2012 09:39'! bindingOf: aSymbol ^ class bindingOf: aSymbol environment: environment! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:15'! category ^ category! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 12/26/2012 23:19'! context ^ context! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:15'! environment ^ environment! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:16'! getClass ^ class! ! !CompilationCue methodsFor: 'initialization' stamp: 'cwp 12/26/2012 23:16'! initializeWithSource: aTextOrString context: aContext receiver: recObject class: aClass environment: anEnvironment category: aString requestor: reqObject self initialize. source := aTextOrString isStream ifTrue: [aTextOrString contents] ifFalse: [aTextOrString]. context := aContext. receiver := recObject. class := aClass. environment := anEnvironment. category := aString. requestor := reqObject! ! !CompilationCue methodsFor: 'binding' stamp: 'cwp 6/22/2012 15:39'! literalScannedAs: anObject notifying: anEncoder ^ class literalScannedAs: anObject environment: environment notifying: anEncoder! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:15'! receiver ^ receiver! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:16'! requestor ^ requestor! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:15'! source ^ source! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:44'! sourceStream ^ source readStream! ! !Compiler class methodsFor: 'evaluating' stamp: 'cwp 6/20/2012 17:25'! evaluate: aString environment: anEnvironment ^ self evaluate: aString environment: anEnvironment logged: false! ! !Compiler class methodsFor: 'evaluating' stamp: 'cwp 12/27/2012 12:36'! evaluate: aString environment: anEnvironment logged: aBoolean | cue | cue := CompilationCue source: aString environment: anEnvironment. ^ self new evaluate: aString cue: cue ifFail: [^ nil] logged: aBoolean! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 13:18'! compile: aCue ifFail: failBlock "Answer a MethodNode. If the MethodNode can not be created, notify the requestor in the contxt. If the requestor is nil, evaluate failBlock instead. The MethodNode is the root of a parse tree. It can be told to generate a CompiledMethod to be installed in the method dictionary of the class specified by the context." self setCue: aCue. self source: cue source. ^self translate: sourceStream noPattern: false ifFail: failBlock! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 00:06'! evaluate: textOrStream cue: aCue ifFail: failBlock logged: logFlag "Compiles the sourceStream into a parse tree, then generates code into a method. Finally, the compiled method is invoked from here via withArgs:executeMethod:, hence the system no longer creates Doit method litter on errors." | methodNode method value toLog itsSelection itsSelectionString | self setCue: aCue. self source: textOrStream. methodNode := self translate: sourceStream noPattern: true ifFail: [^failBlock value]. method := self interactive ifTrue: [methodNode generateWithTempNames] ifFalse: [methodNode generate]. value := cue receiver withArgs: (cue context ifNil: [#()] ifNotNil: [{cue context}]) executeMethod: method. logFlag ifTrue: [toLog := ((cue requestor respondsTo: #selection) and:[(itsSelection := cue requestor selection) notNil and:[(itsSelectionString := itsSelection asString) isEmptyOrNil not]]) ifTrue:[itsSelectionString] ifFalse:[sourceStream contents]. SystemChangeNotifier uniqueInstance evaluated: toLog context: cue context]. ^ value ! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/26/2012 23:20'! setCue: aCue cue := aCue. "Set legacy instance variables for methods that don't use cue yet." requestor := cue requestor. class := cue getClass. category := cue category. context := cue context.! ! !Compiler methodsFor: 'private' stamp: 'cwp 6/19/2012 21:58'! source: textOrStream sourceStream := (textOrStream isKindOf: PositionableStream) ifTrue: [ textOrStream ] ifFalse: [ ReadStream on: textOrStream asString ]! ! "Compiler"! !SmartRefStream class methodsFor: 'i/o' stamp: 'cwp 6/20/2012 17:42'! scanFrom: aByteStream environment: anEnvironment ^ self scanFrom: aByteStream! ! !SmartRefStream methodsFor: 'read write' stamp: 'cwp 6/20/2012 17:41'! scanFrom: aByteStream environment: anEnvironment ^ self scanFrom: aByteStream! ! !ImageSegment methodsFor: 'fileIn/Out' stamp: 'cwp 6/20/2012 17:23'! scanFrom: aStream environment: anEnvironment ^ self scanFrom: aStream! ! !PseudoClass methodsFor: 'printing' stamp: 'cwp 6/22/2012 15:39'! literalScannedAs: scannedLiteral environment: anEnvironment notifying: requestor ^ scannedLiteral! ! !InternalTranslator methodsFor: 'fileIn/fileOut' stamp: 'cwp 6/20/2012 17:34'! scanFrom: aStream environment: anEnvironment "Read a definition of dictionary. Make sure current locale corresponds my locale id" | aString newTranslations assoc currentPlatform | newTranslations := Dictionary new. currentPlatform := Locale currentPlatform. [Locale currentPlatform: (Locale localeID: id). [aString := aStream nextChunk withSqueakLineEndings. aString size > 0] whileTrue: [assoc := Compiler evaluate: aString environment: anEnvironment. assoc value = '' ifTrue: [self class registerPhrase: assoc key] ifFalse: [newTranslations add: assoc]]] ensure: [Locale currentPlatform: currentPlatform]. self mergeTranslations: newTranslations! ! !NaturalLanguageTranslator methodsFor: 'fileIn/fileOut' stamp: 'cwp 6/20/2012 17:26'! scanFrom: aStream environment: anEnvironment "Read a definition of dictionary. Make sure current locale corresponds my locale id" | newTranslations currentPlatform | newTranslations := Dictionary new. currentPlatform := Locale currentPlatform. [| aString assoc | Locale currentPlatform: (Locale localeID: id). [aString := aStream nextChunk withSqueakLineEndings. aString size > 0] whileTrue: [assoc := Compiler evaluate: aString environment: anEnvironment. assoc value = '' ifTrue: [self class registerPhrase: assoc key] ifFalse: [newTranslations add: assoc]]] ensure: [Locale currentPlatform: currentPlatform]. self mergeTranslations: newTranslations! ! !ObjectScanner methodsFor: 'scanning' stamp: 'cwp 6/20/2012 17:39'! scanFrom: aByteStream environment: anEnvironment "This should probably be reimplemented using an environment for compilation. For now, don't change anything" ^ self scanFrom: aByteStream! ! !SystemDictionary methodsFor: 'accessing' stamp: 'cwp 6/22/2012 09:16'! bindingOf: varName ifAbsent: aBlock "SystemDictionary includes Symbols only" ^super bindingOf: varName asSymbol ifAbsent: aBlock! ! !SystemDictionary methodsFor: 'accessing' stamp: 'cwp 6/22/2012 15:48'! undeclared ^ self at: #Undeclared! ! "System"! !ExceptionTests methodsFor: 'testing-outer' stamp: 'fbs 1/1/2013 22:14' prior: 40840955! expectedFailures ^ #().! ! "Tests"! ReleaseBuilder subclass: #ReleaseBuilderFor4dot5 instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'ReleaseBuilder'! !ReleaseBuilderFor4dot5 commentStamp: 'fbs 1/1/2013 20:25' prior: 0! The release builder for Squeak 4.5! !ReleaseBuilder class methodsFor: 'scripts' stamp: 'fbs 12/31/2012 20:43'! transferCurrentPackagesAsUser: username password: password "Copy the packages currently loaded in the image from the trunk repository to my releaseRepository." | trunkRep releaseRep | trunkRep := self trunkRepository. releaseRep := self releaseRepository user: username; password: password; yourself. MCWorkingCopy allManagers do: [ : eachWorkingCopy | eachWorkingCopy ancestors do: [ : eachVersionInfo | (releaseRep includesVersionNamed: eachVersionInfo versionName) ifFalse: [ (trunkRep versionWithInfo: eachVersionInfo) ifNil: [ Warning signal: eachVersionInfo name , ' not found in ', trunkRep ] ifNotNilDo: [ : ver | releaseRep storeVersion: ver ] ] ] ]! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:23'! openWelcomeWorkspaces TheWorldMainDockingBar instance showWelcomeText: #squeakUserInterface label: 'Squeak User Interface' in: (40 @ 40 extent: 500 @ 300). TheWorldMainDockingBar instance showWelcomeText: #workingWithSqueak label: 'Working With Squeak' in: (80 @ 80 extent: 500 @ 300). TheWorldMainDockingBar instance showWelcomeText: #licenseInformation label: 'License Information' in: (120 @ 120 extent: 500 @ 300). TheWorldMainDockingBar instance showWelcomeText: #welcomeFutureDirections label: 'Future Directions' in: (160 @ 160 extent: 500 @ 300). TheWorldMainDockingBar instance showWelcomeText: #welcomeToSqueak label: 'Welcome to Squeak 4.5' in: (200 @ 200 extent: 500 @ 300)! ! !ReleaseBuilderFor4dot5 class methodsFor: 'scripts' stamp: 'fbs 1/1/2013 20:22'! prepareNewBuild super prepareNewBuild. MCMockPackageInfo initialize.! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:24'! releaseRepository "At release time, change 'trunk' to 'squeak45'." ^ MCHttpRepository location: 'http://source.squeak.org/trunk' user: 'squeak' password: 'squeak'! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:22'! setDisplayExtent: extent "Uncomment next line when the primitives become available in the Squeak VM." " DisplayScreen hostWindowSize: extent." Display extent = extent ifFalse: [ Warning signal: 'Display extent not set to ', extent ]! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:23'! setPreferences Preferences installBrightWindowColors ; setPreference: #scrollBarsWithoutMenuButton toValue: true ; setPreference: #swapMouseButtons toValue: true ; setPreference: #annotationPanes toValue: true ; setPreference: #showSplitterHandles toValue: false ; setPreference: #showBoundsInHalo toValue: true ; setPreference: #alternateHandlesLook toValue: false ; setPreference: #roundedMenuCorners toValue: false ; setPreference: #roundedWindowCorners toValue: false. PluggableButtonMorph roundedButtonCorners: false. FillInTheBlankMorph roundedDialogCorners: false. Workspace shouldStyle: false. NetNameResolver enableIPv6: true.! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:23'! switchToNewRepository | old44Repository | MCMcmUpdater defaultUpdateURL: self releaseRepository description. old44Repository := MCRepositoryGroup default repositories detect: [:each | each description includesSubString: 'squeak44'] ifNone: [nil]. old44Repository ifNotNil: [MCRepositoryGroup default removeRepository: old44Repository]. MCRepositoryGroup default addRepository: self releaseRepository! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:23'! versionString ^ 'Squeak4.5'.! ! ReleaseBuilder class removeSelector: #transferCurrentPackages! "ReleaseBuilder"! !Environment class methodsFor: 'as yet unclassified' stamp: 'cwp 1/1/2013 18:52' prior: 40834114! initialize self install! ! "Environments"! !Parser methodsFor: 'private' stamp: 'cwp 12/26/2012 23:59' prior: 52081878! initPattern: aString notifying: req return: aBlock | result | self init: (ReadStream on: aString asString) cue: (CompilationCue source: aString requestor: req) failBlock: [^nil]. encoder := self. result := aBlock value: (self pattern: false inContext: nil). encoder := failBlock := nil. "break cycles" ^result! ! !Parser methodsFor: 'public access' stamp: 'cwp 12/27/2012 00:01' prior: 34175471! parse: sourceStream class: class category: aCategory noPattern: noPattern context: aContext notifying: req ifFail: aBlock | c | c := CompilationCue source: sourceStream context: aContext class: class category: aCategory requestor: req. ^ self parse: sourceStream cue: c noPattern: noPattern ifFail: aBlock! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 09:11' prior: 34183963! evaluate: textOrStream in: aContext to: receiver notifying: aRequestor ifFail: failBlock logged: logFlag "Compiles the sourceStream into a parse tree, then generates code into a method. If aContext is not nil, the text can refer to temporaries in that context (the Debugger uses this). If aRequestor is not nil, then it will receive a notify:at: message before the attempt to evaluate is aborted. Finally, the compiled method is invoked from here via withArgs:executeMethod:, hence the system no longer creates Doit method litter on errors." | theClass | theClass := ((aContext == nil ifTrue: [receiver] ifFalse: [aContext receiver]) class). self setCue: (CompilationCue source: textOrStream context: aContext receiver: receiver class: theClass environment: theClass environment category: nil requestor: aRequestor). ^ self evaluate: textOrStream cue: cue ifFail: failBlock logged: logFlag! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 09:17' prior: 34185488! from: textOrStream class: aClass classified: aCategory context: aContext notifying: req self source: textOrStream. self setCue: (CompilationCue source: textOrStream context: aContext class: aClass category: aCategory requestor: req)! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/26/2012 23:55' prior: 50781309! from: textOrStream class: aClass context: aContext notifying: req self source: textOrStream. self setCue: (CompilationCue source: textOrStream context: aContext class: aClass requestor: req) ! ! !Encoder methodsFor: 'initialize-release' stamp: 'cwp 12/27/2012 09:41' prior: 50996506! init: aClass context: aContext notifying: anObject | c | c := CompilationCue context: aContext class: aClass requestor: nil. self init: c notifying: anObject! ! !Encoder methodsFor: 'initialize-release' stamp: 'cwp 12/26/2012 23:58' prior: 39061698! temps: tempVars literals: lits class: cl "Initialize this encoder for decompilation." self setCue: (CompilationCue class: cl). supered := false. nTemps := tempVars size. tempVars do: [:node | scopeTable at: node name put: node]. literalStream := WriteStream on: (Array new: lits size). literalStream nextPutAll: lits. sourceRanges := Dictionary new: 32. globalSourceRanges := OrderedCollection new: 32.! ! "Compiler"! !Class methodsFor: 'class variables' stamp: 'cwp 6/22/2012 15:48' prior: 36026010! addClassVarName: aString "Add the argument, aString, as a class variable of the receiver. Signal an error if the first character of aString is not capitalized, or if it is already a variable named in the class." | symbol oldState | oldState := self copy. aString first canBeGlobalVarInitial ifFalse: [^self error: aString, ' class variable name should be capitalized; proceed to include anyway.']. symbol := aString asSymbol. self withAllSubclasses do: [:subclass | (self canFindWithoutEnvironment: symbol) ifTrue: [ (DuplicateVariableError new) superclass: superclass; "fake!!!!!!" variable: aString; signal: aString, ' is already defined']]. classPool == nil ifTrue: [classPool := Dictionary new]. (classPool includesKey: symbol) ifFalse: ["Pick up any refs in Undeclared" classPool declare: symbol from: environment undeclared. SystemChangeNotifier uniqueInstance classDefinitionChangedFrom: oldState to: self]! ! !Class methodsFor: 'compiling' stamp: 'cwp 6/20/2012 09:48' prior: 54782024! bindingOf: varName ^ self bindingOf: varName environment: self environment! ! !Class methodsFor: 'organization' stamp: 'cwp 6/25/2012 18:25' prior: 54785804! category "Answer the system organization category for the receiver. First check whether the category name stored in the ivar is still correct and only if this fails look it up (latter is much more expensive)" category ifNotNil: [ :symbol | ((self environment organization listAtCategoryNamed: symbol) includes: self name) ifTrue: [ ^symbol ] ]. category := self environment organization categoryOfElement: self name. ^category! ! !Class methodsFor: 'initialize-release' stamp: 'cwp 6/22/2012 15:49' prior: 36027730! declare: varString "Declare class variables common to all instances. Answer whether recompilation is advisable." | newVars conflicts | newVars := (Scanner new scanFieldNames: varString) collect: [:x | x asSymbol]. newVars do: [:var | var first canBeGlobalVarInitial ifFalse: [self error: var, ' class variable name should be capitalized; proceed to include anyway.']]. conflicts := false. classPool == nil ifFalse: [(classPool keys reject: [:x | newVars includes: x]) do: [:var | self removeClassVarName: var]]. (newVars reject: [:var | self classPool includesKey: var]) do: [:var | "adding" "check if new vars defined elsewhere" (self canFindWithoutEnvironment: var) ifTrue: [ (DuplicateVariableError new) superclass: superclass; "fake!!!!!!" variable: var; signal: var, ' is already defined'. conflicts := true]]. newVars size > 0 ifTrue: [classPool := self classPool. "in case it was nil" newVars do: [:var | classPool declare: var from: environment undeclared]]. ^conflicts! ! !Class methodsFor: 'class variables' stamp: 'cwp 6/22/2012 15:49' prior: 54802475! removeClassVarName: aString "Remove the class variable whose name is the argument, aString, from the names defined in the receiver, a class. Create an error notification if aString is not a class variable or if it is still being used in the code of the class." | aSymbol | aSymbol := aString asSymbol. (classPool includesKey: aSymbol) ifFalse: [^self error: aString, ' is not a class variable']. self withAllSubclasses do:[:subclass | (Array with: subclass with: subclass class) do:[:classOrMeta | (classOrMeta whichSelectorsReferTo: (classPool associationAt: aSymbol)) isEmpty ifFalse: [ InMidstOfFileinNotification signal ifTrue: [ Transcript cr; show: self name, ' (' , aString , ' is Undeclared) '. ^ environment undeclared declare: aSymbol from: classPool]. (self confirm: (aString,' is still used in code of class ', classOrMeta name, '.\Is it okay to move it to Undeclared?') withCRs) ifTrue:[^Undeclared declare: aSymbol from: classPool] ifFalse:[^self]]]]. classPool removeKey: aSymbol. classPool isEmpty ifTrue: [classPool := nil]. ! ! !Class methodsFor: 'class name' stamp: 'cwp 6/22/2012 15:49' prior: 54796206! rename: aString "The new name of the receiver is the argument, aString." | oldName newName | (newName := aString asSymbol) = (oldName := self name) ifTrue: [^ self]. (self environment includesKey: newName) ifTrue: [^ self error: newName , ' already exists']. (environment undeclared includesKey: newName) ifTrue: [self inform: 'There are references to, ' , aString printString , ' from Undeclared. Check them after this change.']. name := newName. self environment renameClass: self from: oldName! ! !ClassBuilder methodsFor: 'class definition' stamp: 'cwp 6/22/2012 01:05' prior: 39054430! name: className inEnvironment: env subclassOf: newSuper type: type instanceVariableNames: instVarString classVariableNames: classVarString poolDictionaries: poolString category: category unsafe: unsafe "Define a new class in the given environment. If unsafe is true do not run any validation checks. This facility is provided to implement important system changes." | oldClass instVars classVars copyOfOldClass newClass | environ := env. instVars := Scanner new scanFieldNames: instVarString. classVars := (Scanner new scanFieldNames: classVarString) collect: [:x | x asSymbol]. "Validate the proposed name" unsafe ifFalse:[(self validateClassName: className) ifFalse:[^nil]]. oldClass := env at: className ifAbsent:[nil]. oldClass isBehavior ifFalse: [oldClass := nil] "Already checked in #validateClassName:" ifTrue: [ copyOfOldClass := oldClass copy. copyOfOldClass superclass addSubclass: copyOfOldClass]. [ | newCategory needNew force organization oldCategory | unsafe ifFalse:[ "Run validation checks so we know that we have a good chance for recompilation" (self validateSuperclass: newSuper forSubclass: oldClass) ifFalse:[^nil]. (self validateInstvars: instVars from: oldClass forSuper: newSuper) ifFalse:[^nil]. (self validateClassvars: classVars from: oldClass forSuper: newSuper) ifFalse:[^nil]. (self validateSubclassFormat: type from: oldClass forSuper: newSuper extra: instVars size) ifFalse:[^nil]]. "See if we need a new subclass" needNew := self needsSubclassOf: newSuper type: type instanceVariables: instVars from: oldClass. needNew == nil ifTrue:[^nil]. "some error" (needNew and:[unsafe not]) ifTrue:[ "Make sure we don't redefine any dangerous classes" (self tooDangerousClasses includes: oldClass name) ifTrue:[ self error: oldClass name, ' cannot be changed'. ]. "Check if the receiver should not be redefined" (oldClass ~~ nil and:[oldClass shouldNotBeRedefined]) ifTrue:[ self notify: oldClass name asText allBold, ' should not be redefined. \Proceed to store over it.' withCRs]]. needNew ifTrue:[ "Create the new class" newClass := self newSubclassOf: newSuper type: type instanceVariables: instVars from: oldClass. newClass == nil ifTrue:[^nil]. "Some error" newClass setName: className. newClass environment: environ. ] ifFalse:[ "Reuse the old class" newClass := oldClass. ]. "Install the class variables and pool dictionaries... " force := (newClass declare: classVarString) | (newClass sharing: poolString). "... classify ..." newCategory := category asSymbol. organization := environ ifNotNil:[environ organization]. oldClass isNil ifFalse: [oldCategory := (organization categoryOfElement: oldClass name) asSymbol]. organization classify: newClass name under: newCategory suppressIfDefault: true. "... recompile ..." newClass := self recompile: force from: oldClass to: newClass mutate: false. "... export if not yet done ..." (environ at: newClass name ifAbsent:[nil]) == newClass ifFalse:[ [environ at: newClass name put: newClass] on: AttemptToWriteReadOnlyGlobal do:[:ex| ex resume: true]. environ flushClassNameCache. ]. newClass doneCompiling. "... notify interested clients ..." oldClass isNil ifTrue: [ SystemChangeNotifier uniqueInstance classAdded: newClass inCategory: newCategory. ^ newClass]. newCategory ~= oldCategory ifTrue: [SystemChangeNotifier uniqueInstance class: newClass recategorizedFrom: oldCategory to: category] ifFalse: [SystemChangeNotifier uniqueInstance classDefinitionChangedFrom: copyOfOldClass to: newClass.]. ] ensure: [copyOfOldClass ifNotNil: [copyOfOldClass superclass removeSubclass: copyOfOldClass]. Behavior flushObsoleteSubclasses. ]. ^newClass! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 22:57' prior: 18572019! superclass: newSuper subclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class." | env | env := EnvironmentRequest signal ifNil: [newSuper environment]. ^self name: t inEnvironment: env subclassOf: newSuper type: newSuper typeOfClass instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 23:01' prior: 50629912! superclass: aClass variableByteSubclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class in which the subclass is to have indexable byte-sized nonpointer variables." | oldClassOrNil actualType env | (aClass instSize > 0) ifTrue: [^self error: 'cannot make a byte subclass of a class with named fields']. (aClass isVariable and: [aClass isWords]) ifTrue: [^self error: 'cannot make a byte subclass of a class with word fields']. (aClass isVariable and: [aClass isPointers]) ifTrue: [^self error: 'cannot make a byte subclass of a class with pointer fields']. oldClassOrNil := aClass environment at: t ifAbsent:[nil]. actualType := (oldClassOrNil notNil and: [oldClassOrNil typeOfClass == #compiledMethod]) ifTrue: [#compiledMethod] ifFalse: [#bytes]. env := EnvironmentRequest signal ifNil: [aClass environment]. ^self name: t inEnvironment: env subclassOf: aClass type: actualType instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 23:03' prior: 18573442! superclass: aClass variableSubclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class in which the subclass is to have indexable pointer variables." | env | aClass isBits ifTrue: [^self error: 'cannot make a pointer subclass of a class with non-pointer fields']. env := EnvironmentRequest signal ifNil: [aClass environment]. ^self name: t inEnvironment: env subclassOf: aClass type: #variable instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 23:04' prior: 18574098! superclass: aClass variableWordSubclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class in which the subclass is to have indexable word-sized nonpointer variables." | env | (aClass instSize > 0) ifTrue: [^self error: 'cannot make a word subclass of a class with named fields']. (aClass isVariable and: [aClass isBytes]) ifTrue: [^self error: 'cannot make a word subclass of a class with byte fields']. (aClass isVariable and: [aClass isPointers]) ifTrue: [^self error: 'cannot make a word subclass of a class with pointer fields']. env := EnvironmentRequest signal ifNil: [aClass environment]. ^self name: t inEnvironment: env subclassOf: aClass type: #words instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 23:04' prior: 18575028! superclass: aClass weakSubclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class (the receiver) in which the subclass is to have weak indexable pointer variables." | env | aClass isBits ifTrue: [^self error: 'cannot make a pointer subclass of a class with non-pointer fields']. env := EnvironmentRequest signal ifNil: [aClass environment]. ^self name: t inEnvironment: env subclassOf: aClass type: #weak instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! "Kernel"! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:21' prior: 59135029! ambiguousSelector: aString inRange: anInterval | correctedSelector userSelection offset intervalWithOffset | self interactive ifFalse: [ "In non interactive mode, compile with backward comapatibility: $- is part of literal argument" Transcript cr; store: encoder classEncoding; nextPutAll:#'>>';store: encoder selector; show: ' would send ' , token , '-'. ^super ambiguousSelector: aString inRange: anInterval]. "handle the text selection" userSelection := cue requestor selectionInterval. intervalWithOffset := anInterval first + requestorOffset to: anInterval last + requestorOffset. cue requestor selectFrom: intervalWithOffset first to: intervalWithOffset last. cue requestor select. "Build the menu with alternatives" correctedSelector := AmbiguousSelector signalName: aString inRange: intervalWithOffset. correctedSelector ifNil: [^self fail]. "Execute the selected action" offset := self substituteWord: correctedSelector wordInterval: intervalWithOffset offset: 0. cue requestor deselect. cue requestor selectInvisiblyFrom: userSelection first to: userSelection last + offset. token := (correctedSelector readStream upTo: Character space) asSymbol! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:21' prior: 38558136! collectTemporaryDeclarationsFrom: methodNode | tempsMarks str | tempsMarks := OrderedCollection new. str := cue requestor text asString. methodNode accept: (ParseNodeEnumerator ofBlock: [ :aNode | | mark | (aNode class canUnderstand: #tempsMark) ifTrue: [mark := aNode tempsMark. (mark notNil and: [ mark between: 1 and: str size ] and: [ (str at: mark) = $| ]) ifTrue: [ tempsMarks addLast: aNode ]]]). (tempsMark notNil and: [ tempsMark between: 1 and: str size ] and: [ (str at: tempsMark) = $| ]) ifTrue: [ tempsMarks addLast: self ]. ^ tempsMarks sorted: [ :a :b | a tempsMark > b tempsMark ]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:20' prior: 52096606! correctSelector: proposedKeyword wordIntervals: spots exprInterval: expInt ifAbort: abortAction "Correct the proposedKeyword to some selector symbol, correcting the original text if such action is indicated. abortAction is invoked if the proposedKeyword couldn't be converted into a valid selector. Spots is an ordered collection of intervals within the test stream of the for each of the keyword parts." | correctSelector userSelection | "If we can't ask the user, assume that the keyword will be defined later" self interactive ifFalse: [^proposedKeyword asSymbol]. userSelection := cue requestor selectionInterval. cue requestor selectFrom: spots first first to: spots last last. cue requestor select. correctSelector := UnknownSelector name: proposedKeyword. correctSelector ifNil: [^abortAction value]. cue requestor deselect. cue requestor selectInvisiblyFrom: userSelection first to: userSelection last. self substituteSelector: correctSelector keywords wordIntervals: spots. ^(proposedKeyword last ~~ $: and: [correctSelector last == $:]) ifTrue: [abortAction value] ifFalse: [correctSelector]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:20' prior: 33907242! correctVariable: proposedVariable interval: spot "Correct the proposedVariable to a known variable, or declare it as a new variable if such action is requested. We support declaring lowercase variables as temps or inst-vars, and uppercase variables as Globals or ClassVars, depending on whether the context is nil (class=UndefinedObject). Spot is the interval within the test stream of the variable. rr 3/4/2004 10:26 : adds the option to define a new class. " "Check if this is an i-var, that has been corrected already (ugly)" "Display the pop-up menu" | binding userSelection action | (encoder classEncoding instVarNames includes: proposedVariable) ifTrue: [^InstanceVariableNode new name: proposedVariable index: (encoder classEncoding allInstVarNames indexOf: proposedVariable)]. "If we can't ask the user for correction, make it undeclared" self interactive ifFalse: [^encoder undeclared: proposedVariable]. "First check to see if the requestor knows anything about the variable" (binding := cue requestor bindingOf: proposedVariable) ifNotNil: [^encoder global: binding name: proposedVariable]. userSelection := cue requestor selectionInterval. cue requestor selectFrom: spot first to: spot last. cue requestor select. "Build the menu with alternatives" action := UndeclaredVariable signalFor: self name: proposedVariable inRange: spot. action ifNil: [^self fail]. "Execute the selected action" cue requestor deselect. cue requestor selectInvisiblyFrom: userSelection first to: userSelection last. ^action value! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:19' prior: 34172921! declareUndeclaredTemps: methodNode "Declare any undeclared temps, declaring them at the smallest enclosing scope." | undeclared userSelection blocksToVars | (undeclared := encoder undeclaredTemps) isEmpty ifTrue: [^self]. userSelection := cue requestor selectionInterval. blocksToVars := IdentityDictionary new. undeclared do: [:var| (blocksToVars at: (var tag == #method ifTrue: [methodNode block] ifFalse: [methodNode accept: (VariableScopeFinder new ofVariable: var)]) ifAbsentPut: [SortedCollection new]) add: var name]. (blocksToVars removeKey: methodNode block ifAbsent: []) ifNotNil: [:rootVars| rootVars do: [:varName| self pasteTempAtMethodLevel: varName]]. (blocksToVars keys sorted: [:a :b| a tempsMark < b tempsMark]) do: [:block| | decl | decl := (blocksToVars at: block) reduce: [:a :b| a, ' ', b]. block temporaries isEmpty ifTrue: [self substituteWord: ' | ', decl, ' |' wordInterval: (block tempsMark + 1 to: block tempsMark) offset: requestorOffset] ifFalse: [self substituteWord: decl, ' ' wordInterval: (block tempsMark to: block tempsMark - 1) offset: requestorOffset]]. cue requestor selectInvisiblyFrom: userSelection first to: userSelection last + requestorOffset. ReparseAfterSourceEditing signal! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 11:45' prior: 37183770! defineClass: className "prompts the user to define a new class, asks for it's category, and lets the users edit further the definition" | sym cat def d2 | sym := className asSymbol. cat := UIManager default request: 'Enter class category : ' initialAnswer: self encoder classEncoding theNonMetaClass category. cat ifEmpty: [cat := 'Unknown']. def := 'Object subclass: #' , sym , ' instanceVariableNames: '''' classVariableNames: '''' poolDictionaries: '''' category: ''' , cat , ''''. d2 := UIManager default request: 'Edit class definition : ' initialAnswer: def. d2 ifEmpty: [d2 := def]. Compiler evaluate: d2. ^ encoder global: (cue environment bindingOf: sym) name: sym! ! !Parser methodsFor: 'primitives' stamp: 'cwp 12/27/2012 11:46' prior: 37184567! externalFunctionDeclaration "Parse the function declaration for a call to an external library." | descriptorClass callType modifier retType externalName args argType module fn | descriptorClass := cue environment valueOf: #ExternalFunction ifAbsent: [^ false]. callType := descriptorClass callingConventionFor: here. callType == nil ifTrue:[^false]. [modifier := descriptorClass callingConventionModifierFor: token. modifier notNil] whileTrue: [self advance. callType := callType bitOr: modifier]. "Parse return type" self advance. retType := self externalType: descriptorClass. retType == nil ifTrue:[^self expected:'return type']. "Parse function name or index" externalName := here. (self match: #string) ifTrue:[externalName := externalName asSymbol] ifFalse:[(self match:#number) ifFalse:[^self expected:'function name or index']]. (self matchToken: #'(') ifFalse:[^self expected:'argument list']. args := WriteStream on: Array new. [here == #')'] whileFalse:[ argType := self externalType: descriptorClass. argType == nil ifTrue:[^self expected:'argument']. argType isVoid & argType isPointerType not ifFalse:[args nextPut: argType]. ]. (self matchToken: #')') ifFalse:[^self expected:')']. (self matchToken: 'module:') ifTrue:[ module := here. (self match: #string) ifFalse:[^self expected: 'String']. module := module asSymbol]. Smalltalk at: #ExternalLibraryFunction ifPresent:[:xfn| fn := xfn name: externalName module: module callType: callType returnType: retType argumentTypes: args contents. self allocateLiteral: fn. ]. (self matchToken: 'error:') ifTrue: [| errorCodeVariable | errorCodeVariable := here. (hereType == #string or: [hereType == #word]) ifFalse:[^self expected: 'error code (a variable or string)']. self advance. self addPragma: (Pragma keyword: #primitive:error: arguments: (Array with: 120 with: errorCodeVariable)). fn ifNotNil: [fn setErrorCodeName: errorCodeVariable]] ifFalse: [self addPragma: (Pragma keyword: #primitive: arguments: #(120))]. ^true ! ! !Parser methodsFor: 'error handling' stamp: 'cwp 12/27/2012 10:19' prior: 58306169! interactive "Answer true if compilation is interactive" ^ cue requestor notNil! ! !Parser methodsFor: 'error handling' stamp: 'cwp 12/27/2012 10:22' prior: 58137223! notify: string at: location cue requestor isNil ifTrue: [(encoder == self or: [encoder isNil]) ifTrue: [^ self fail "failure setting up syntax error"]. SyntaxErrorNotification inClass: encoder classEncoding category: cue category withCode: (source contents asText copyReplaceFrom: location to: location - 1 with: ((string , ' ->') asText allBold addAttribute: TextColor red; yourself)) doitFlag: doitFlag errorMessage: string location: location] ifFalse: [cue requestor notify: string , ' ->' at: location in: source]. ^self fail! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:17' prior: 34177108! pasteTempAtMethodLevel: name | insertion delta theTextString characterBeforeMark | theTextString := cue requestor text string. characterBeforeMark := theTextString at: tempsMark-1 ifAbsent: [$ ]. (theTextString at: tempsMark) = $| ifTrue: [ "Paste it before the second vertical bar" insertion := name, ' '. characterBeforeMark isSeparator ifFalse: [ insertion := ' ', insertion]. delta := 0. ] ifFalse: [ "No bars - insert some with CR, tab" insertion := '| ' , name , ' |',String cr. delta := 2. "the bar and CR" characterBeforeMark = Character tab ifTrue: [ insertion := insertion , String tab. delta := delta + 1. "the tab" ]. ]. tempsMark := tempsMark + (self substituteWord: insertion wordInterval: (tempsMark to: tempsMark-1) offset: 0) - delta! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:16' prior: 52095305! queryUndefined | varStart varName | varName := parseNode key. varStart := self endOfLastToken + requestorOffset - varName size + 1. cue requestor selectFrom: varStart to: varStart + varName size - 1; select. (UndefinedVariable name: varName) ifFalse: [^ self fail]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:15' prior: 38599341! removeEmptyTempDeclarationsFrom: methodNode | sourceCode madeChanges tempsMarkHolder | sourceCode := cue requestor text asString. tempsMarkHolder := self collectTemporaryDeclarationsFrom: methodNode. madeChanges := false. tempsMarkHolder do: [ :currentBlock | | tempsMarkChar0 tempsMarkChar1 tempsMarkChar2 end start | tempsMarkChar0 := (sourceCode at: currentBlock tempsMark). tempsMarkChar1 := (sourceCode at: currentBlock tempsMark - 1). tempsMarkChar2 := (sourceCode at: currentBlock tempsMark - 2). tempsMarkChar0 = $| & tempsMarkChar1 = $| ifTrue: [ end := currentBlock tempsMark. start := end - 1]. tempsMarkChar0 = $| & tempsMarkChar1 = $ & tempsMarkChar2 = $| ifTrue: [ end := currentBlock tempsMark. start := end - 2]. start notNil & end notNil ifTrue: [ | lineStart lineEnd | lineStart := 1 + (sourceCode lastIndexOf: Character cr startingAt: start - 1 ifAbsent: [ 0 ]). lineEnd := sourceCode indexOf: Character cr startingAt: end + 1 ifAbsent: [ sourceCode size ]. ((sourceCode indexOfAnyOf: CharacterSet nonSeparators startingAt: lineStart) >= start and: [ (sourceCode indexOfAnyOf: CharacterSet nonSeparators startingAt: end + 1) > lineEnd ]) ifTrue: [ start := lineStart. end := lineEnd ]. cue requestor correctFrom: start to: end with: ''. madeChanges := true. currentBlock tempsMark: nil ] ]. madeChanges ifTrue: [ReparseAfterSourceEditing signal]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:15' prior: 38561281! removeUnusedTemporaryNamed: temp from: str lookingAt: currentBlock movingTempMarksOf: someBlocks | start end | end := currentBlock tempsMark - 1. ["Beginning at right temp marker..." start := end - temp size + 1. end < temp size or: [ (str at: start) = $| ] or: [ temp = (str copyFrom: start to: end) and: [ ((str at: start - 1) = $| | (str at: start - 1) isSeparator) & ((str at: end + 1) = $| | (str at: end + 1) isSeparator) ] ]] whileFalse: [ "Search left for the unused temp" end := cue requestor nextTokenFrom: end direction: -1 ]. (end < temp size or: [ (str at: start) = $| ]) ifFalse: [(str at: start - 1) = $ ifTrue: [ start := start - 1 ]. cue requestor correctFrom: start to: end with: ''. someBlocks do: [ :aBlock | aBlock tempsMark: aBlock tempsMark - (end - start + 1)]. ^true ]. ^false! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:14' prior: 38562194! removeUnusedTemps: methodNode "Scan for unused temp names, and prompt the user about the prospect of removing each one found" | madeChanges tempsMarkHolder unusedTempNames tempMarkHoldersToChange | madeChanges := false. tempMarkHoldersToChange := OrderedCollection new. tempsMarkHolder := self collectTemporaryDeclarationsFrom: methodNode. unusedTempNames := encoder unusedTempNames select: [ :temp | (encoder lookupVariable: temp ifAbsent: [ ]) isUndefTemp and: [ self queriedUnusedTemporaries at: temp ifAbsentPut: [UnusedVariable name: temp] ]]. tempsMarkHolder do: [ :currentBlock | tempMarkHoldersToChange add: currentBlock. unusedTempNames do: [ :temp | (self removeUnusedTemporaryNamed: temp from: cue requestor text asString lookingAt: currentBlock movingTempMarksOf: tempMarkHoldersToChange) ifTrue: [ madeChanges := true ]]]. madeChanges ifTrue: [ self removeEmptyTempDeclarationsFrom: methodNode. ReparseAfterSourceEditing signal ]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:14' prior: 34179326! substituteWord: correctWord wordInterval: spot offset: o "Substitute the correctSelector into the (presumed interactive) receiver. Update requestorOffset based on the delta size and answer the updated offset." cue requestor correctFrom: spot first + o to: spot last + o with: correctWord. requestorOffset := requestorOffset + correctWord size - spot size. ^o + correctWord size - spot size! ! !Parser methodsFor: 'expression types' stamp: 'cwp 12/27/2012 10:14' prior: 34179807! temporaries " [ '|' (variable)* '|' ]" | vars theActualText | (self match: #verticalBar) ifFalse: ["no temps" doitFlag ifTrue: [tempsMark := self interactive ifTrue: [cue requestor selectionInterval first] ifFalse: [1]. ^ #()]. tempsMark := hereMark "formerly --> prevMark + prevToken". tempsMark > 0 ifTrue: [theActualText := source contents. [tempsMark < theActualText size and: [(theActualText at: tempsMark) isSeparator]] whileTrue: [tempsMark := tempsMark + 1]]. ^ #()]. vars := OrderedCollection new. [hereType == #word] whileTrue: [vars addLast: (encoder bindTemp: self advance)]. (self match: #verticalBar) ifTrue: [tempsMark := prevMark. ^ vars]. ^ self expected: 'Vertical bar' ! ! !Parser methodsFor: 'expression types' stamp: 'cwp 12/27/2012 10:14' prior: 34180638! temporariesIn: methodSelector " [ '|' (variable)* '|' ]" | vars theActualText | (self match: #verticalBar) ifFalse: ["no temps" doitFlag ifTrue: [tempsMark := self interactive ifTrue: [cue requestor selectionInterval first] ifFalse: [1]. ^ #()]. tempsMark := hereMark "formerly --> prevMark + prevToken". tempsMark > 0 ifTrue: [theActualText := source contents. [tempsMark < theActualText size and: [(theActualText at: tempsMark) isSeparator]] whileTrue: [tempsMark := tempsMark + 1]]. ^ #()]. vars := OrderedCollection new. [hereType == #word] whileTrue: [vars addLast: (encoder bindTemp: self advance in: methodSelector)]. (self match: #verticalBar) ifTrue: [tempsMark := prevMark. ^ vars]. ^ self expected: 'Vertical bar'! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 10:11' prior: 53971863! compiledMethodFor: textOrStream in: aContext to: receiver notifying: aRequestor ifFail: failBlock logged: logFlag "Compiles the sourceStream into a parse tree, then generates code into a method, and answers it. If receiver is not nil, then the text can refer to instance variables of that receiver (the Inspector uses this). If aContext is not nil, the text can refer to temporaries in that context (the Debugger uses this). If aRequestor is not nil, then it will receive a notify:at: message before the attempt to evaluate is aborted." | methodNode method theClass | theClass := (aContext == nil ifTrue: [receiver] ifFalse: [aContext receiver]) class. self from: textOrStream class: theClass context: aContext notifying: aRequestor. methodNode := self translate: sourceStream noPattern: true ifFail: [^failBlock value]. method := self interactive ifTrue: [ methodNode generateWithTempNames ] ifFalse: [methodNode generate]. logFlag ifTrue: [SystemChangeNotifier uniqueInstance evaluated: sourceStream contents context: aContext]. ^method! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/27/2012 11:33' prior: 34363593! format: aStream noPattern: noPattern ifFail: failBlock ^(self parser parse: aStream cue: cue noPattern: noPattern ifFail: [^failBlock value]) preen! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/27/2012 10:08' prior: 58306325! interactive "Answer true if compilation is interactive" ^ cue requestor notNil! ! !Compiler methodsFor: 'error handling' stamp: 'cwp 12/27/2012 10:10' prior: 50779387! notify: aString at: location "Refer to the comment in Object|notify:." ^ cue requestor == nil ifTrue: [SyntaxErrorNotification inClass: cue getClass category: cue category withCode: (sourceStream contents copyReplaceFrom: location to: location - 1 with: aString) doitFlag: false errorMessage: aString location: location] ifFalse: [cue requestor notify: aString at: location in: sourceStream]! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 11:34' prior: 50777201! parse: textOrStream in: aClass notifying: req "Compile the argument, textOrStream, with respect to the class, aClass, and answer the MethodNode that is the root of the resulting parse tree. Notify the argument, req, if an error occurs. The failBlock is defaulted to an empty block." self from: textOrStream class: aClass context: nil notifying: req. ^self parser parse: sourceStream cue: cue noPattern: false ifFail: []! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 10:09' prior: 36332471! parser parser ifNil: [parser := (cue getClass ifNil: [self class]) newParser]. ^parser! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/27/2012 11:37' prior: 50780779! translate: aStream noPattern: noPattern ifFail: failBlock ^self parser parse: aStream cue: cue noPattern: noPattern ifFail: [^failBlock value]! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 11:37' prior: 19124095! translate: aStream noPattern: noPattern ifFail: failBlock parser: parser | tree | tree := parser parse: aStream cue: cue noPattern: noPattern ifFail: [^ failBlock value]. ^ tree! ! !Encoder methodsFor: 'results' stamp: 'cwp 12/27/2012 10:26' prior: 50999892! associationForClass | assoc | assoc := self environment associationAt: cue getClass name ifAbsent: [nil]. ^assoc value == cue getClass ifTrue: [assoc] ifFalse: [Association new value: cue getClass]! ! !Encoder methodsFor: 'temps' stamp: 'cwp 12/27/2012 10:25' prior: 20148386! bindTemp: name in: methodSelector "Declare a temporary; error not if a field or class variable." scopeTable at: name ifPresent:[:node| "When non-interactive raise the error only if its a duplicate" (node isTemp or:[requestor interactive]) ifTrue:[^self notify:'Name is already defined'] ifFalse:[Transcript show: '(', name, ' is shadowed in "' , cue getClass printString , '>>' , methodSelector printString , '")']]. ^self reallyBind: name! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 10:25' prior: 20149084! classEncoding "This is a hack so that the parser may findout what class it was parsing for when it wants to create a syntax error view." ^ cue getClass! ! !Encoder methodsFor: 'encoding' stamp: 'cwp 12/27/2012 11:39' prior: 20138819! encodeLiteral: object ^self name: object key: (cue literalScannedAs: object notifying: self) class: LiteralNode type: LdLitType set: litSet! ! !Encoder methodsFor: 'encoding' stamp: 'cwp 12/27/2012 11:40' prior: 20139010! encodeSelector: aSelector ^self name: aSelector key: aSelector class: SelectorNode type: SendType set: selectorSet! ! !Encoder methodsFor: 'encoding' stamp: 'cwp 12/27/2012 11:40' prior: 58545123! environment "Answer the environment of the current compilation context, be it in a class or global (e.g. a workspace)" ^cue environment! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 11:41' prior: 50994497! lookupInPools: varName ifFound: assocBlock ^Symbol hasInterned: varName ifTrue: [:sym| (cue bindingOf: sym) ifNil: [^false] ifNotNil: [:assoc| assocBlock value: assoc]]! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 10:24' prior: 51004306! possibleNamesFor: proposedName | results | results := cue getClass possibleVariablesFor: proposedName continuedFrom: nil. ^ proposedName correctAgainst: nil continuedFrom: results. ! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 10:24' prior: 50995012! possibleVariablesFor: proposedVariable | results | results := proposedVariable correctAgainstDictionary: scopeTable continuedFrom: nil. proposedVariable first canBeGlobalVarInitial ifTrue: [ results := cue getClass possibleVariablesFor: proposedVariable continuedFrom: results ]. ^ proposedVariable correctAgainst: nil continuedFrom: results. ! ! !Encoder methodsFor: 'encoding' stamp: 'cwp 12/27/2012 11:42' prior: 51002830! undeclared: name | sym | requestor interactive ifTrue: [requestor requestor == #error: ifTrue: [requestor error: 'Undeclared']. ^self notify: 'Undeclared']. "Allow knowlegeable clients to squash the undeclared warning if they want (e.g. Diffing pretty printers that are simply formatting text). As this breaks compilation it should only be used by clients that want to discard the result of the compilation. To squash the warning use e.g. [Compiler format: code in: class notifying: nil decorated: false] on: UndeclaredVariableWarning do: [:ex| ex resume: false]" sym := name asSymbol. ^(UndeclaredVariableWarning new name: name selector: selector class: cue getClass) signal ifTrue: [| undeclared | undeclared := cue environment undeclared. undeclared at: sym put: nil. self global: (undeclared associationAt: sym) name: sym] ifFalse: [self global: (Association key: sym) name: sym]! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 10:23' prior: 51006007! warnAboutShadowed: name requestor addWarning: name,' is shadowed'. selector ifNotNil: [Transcript cr; show: cue getClass name,'>>', selector, '(', name,' is shadowed)']! ! "Compiler"! !SmalltalkImage methodsFor: 'housekeeping' stamp: 'cwp 6/22/2012 15:56' prior: 58497062! cleanOutUndeclared globals undeclared removeUnreferencedKeys! ! !SmalltalkImage methodsFor: 'special objects' stamp: 'cwp 6/22/2012 09:01' prior: 40515090! recreateSpecialObjectsArray "Smalltalk recreateSpecialObjectsArray" "To external package developers: **** DO NOT OVERRIDE THIS METHOD. ***** If you are writing a plugin and need additional special object(s) for your own use, use addGCRoot() function and use own, separate special objects registry " "The Special Objects Array is an array of objects used by the Squeak virtual machine. Its contents are critical and accesses to it by the VM are unchecked, so don't even think of playing here unless you know what you are doing." | newArray | newArray := Array new: 56. "Nil false and true get used throughout the interpreter" newArray at: 1 put: nil. newArray at: 2 put: false. newArray at: 3 put: true. "This association holds the active process (a ProcessScheduler)" newArray at: 4 put: (self bindingOf: #Processor). "Numerous classes below used for type checking and instantiation" newArray at: 5 put: Bitmap. newArray at: 6 put: SmallInteger. newArray at: 7 put: ByteString. newArray at: 8 put: Array. newArray at: 9 put: Smalltalk. newArray at: 10 put: Float. newArray at: 11 put: MethodContext. newArray at: 12 put: BlockContext. newArray at: 13 put: Point. newArray at: 14 put: LargePositiveInteger. newArray at: 15 put: Display. newArray at: 16 put: Message. newArray at: 17 put: CompiledMethod. newArray at: 18 put: (self specialObjectsArray at: 18). "(low space Semaphore)" newArray at: 19 put: Semaphore. newArray at: 20 put: Character. newArray at: 21 put: #doesNotUnderstand:. newArray at: 22 put: #cannotReturn:. newArray at: 23 put: nil. "This is the process signalling low space." "An array of the 32 selectors that are compiled as special bytecodes, paired alternately with the number of arguments each takes." newArray at: 24 put: #( #+ 1 #- 1 #< 1 #> 1 #<= 1 #>= 1 #= 1 #~= 1 #* 1 #/ 1 #\\ 1 #@ 1 #bitShift: 1 #// 1 #bitAnd: 1 #bitOr: 1 #at: 1 #at:put: 2 #size 0 #next 0 #nextPut: 1 #atEnd 0 #== 1 #class 0 #blockCopy: 1 #value 0 #value: 1 #do: 1 #new 0 #new: 1 #x 0 #y 0 ). "An array of the 255 Characters in ascii order. Cog inlines table into machine code at: prim so do not regenerate it." newArray at: 25 put: (self specialObjectsArray at: 25). newArray at: 26 put: #mustBeBoolean. newArray at: 27 put: ByteArray. newArray at: 28 put: Process. "An array of up to 31 classes whose instances will have compact headers" newArray at: 29 put: self compactClassesArray. newArray at: 30 put: (self specialObjectsArray at: 30). "(delay Semaphore)" newArray at: 31 put: (self specialObjectsArray at: 31). "(user interrupt Semaphore)" "Entries 32 - 34 unreferenced. Previously these contained prototype instances to be copied for fast initialization" newArray at: 32 put: nil. "was (Float new: 2)" newArray at: 33 put: nil. "was (LargePositiveInteger new: 4)" newArray at: 34 put: nil. "was Point new" newArray at: 35 put: #cannotInterpret:. "Note: This must be fixed once we start using context prototypes (yeah, right)" "(MethodContext new: CompiledMethod fullFrameSize)." newArray at: 36 put: (self specialObjectsArray at: 36). "Is the prototype MethodContext (unused by the VM)" newArray at: 37 put: BlockClosure. "(BlockContext new: CompiledMethod fullFrameSize)." newArray at: 38 put: (self specialObjectsArray at: 38). "Is the prototype BlockContext (unused by the VM)" "array of objects referred to by external code" newArray at: 39 put: (self specialObjectsArray at: 39). "preserve external semaphores" newArray at: 40 put: nil. "Reserved for Mutex in Cog VMs" newArray at: 41 put: nil. "Reserved for a LinkedList instance for overlapped calls in CogMT" "finalization Semaphore" newArray at: 42 put: ((self specialObjectsArray at: 42) ifNil: [Semaphore new]). newArray at: 43 put: LargeNegativeInteger. "External objects for callout. Note: Written so that one can actually completely remove the FFI." newArray at: 44 put: (self at: #ExternalAddress ifAbsent: []). newArray at: 45 put: (self at: #ExternalStructure ifAbsent: []). newArray at: 46 put: (self at: #ExternalData ifAbsent: []). newArray at: 47 put: (self at: #ExternalFunction ifAbsent: []). newArray at: 48 put: (self at: #ExternalLibrary ifAbsent: []). newArray at: 49 put: #aboutToReturn:through:. newArray at: 50 put: #run:with:in:. "51 reserved for immutability message" "newArray at: 51 put: #attemptToAssign:withIndex:." newArray at: 52 put: #(nil "nil => generic error" #'bad receiver' #'bad argument' #'bad index' #'bad number of arguments' #'inappropriate operation' #'unsupported operation' #'no modification' #'insufficient object memory' #'insufficient C memory' #'not found' #'bad method' #'internal error in named primitive machinery' #'object may move'). "53 to 55 are for Alien" newArray at: 53 put: (self at: #Alien ifAbsent: []). newArray at: 54 put: #invokeCallback:stack:registers:jmpbuf:. newArray at: 55 put: (self at: #UnsafeAlien ifAbsent: []). "Weak reference finalization" newArray at: 56 put: (self at: #WeakFinalizationList ifAbsent: []). "Now replace the interpreter's reference in one atomic operation" self specialObjectsArray becomeForward: newArray ! ! !SmalltalkImage methodsFor: 'shrinking' stamp: 'cwp 6/22/2012 15:57' prior: 37288071! unloadAllKnownPackages "Unload all packages we know how to unload and reload" "Prepare unloading" Smalltalk zapMVCprojects. Flaps disableGlobalFlaps: false. StandardScriptingSystem removeUnreferencedPlayers. Project removeAllButCurrent. #('Morphic-UserObjects' 'EToy-UserObjects' 'Morphic-Imported' ) do: [:each | SystemOrganization removeSystemCategory: each]. Smalltalk at: #ServiceRegistry ifPresent:[:aClass| SystemChangeNotifier uniqueInstance noMoreNotificationsFor: aClass. ]. World removeAllMorphs. "Go unloading" #( 'ReleaseBuilder' 'ScriptLoader' '311Deprecated' '39Deprecated' 'Universes' 'SMLoader' 'SMBase' 'Installer-Core' 'VersionNumberTests' 'VersionNumber' 'Services-Base' 'PreferenceBrowser' 'Nebraska' 'ToolBuilder-MVC' 'ST80' 'CollectionsTests' 'GraphicsTests' 'KernelTests' 'MorphicTests' 'MultilingualTests' 'NetworkTests' 'ToolsTests' 'TraitsTests' 'SystemChangeNotification-Tests' 'FlexibleVocabularies' 'EToys' 'Protocols' 'XML-Parser' 'Tests' 'SUnitGUI' 'Help-Squeak' 'HelpSystem' 'SystemReporter' ) do: [:pkgName| (MCPackage named: pkgName) unload. MCMcmUpdater disableUpdatesOfPackage: pkgName. ]. "Traits use custom unload" Smalltalk at: #Trait ifPresent:[:aClass| aClass unloadTraits]. "Post-unload cleanup" MCWorkingCopy flushObsoletePackageInfos. SystemOrganization removeSystemCategory: 'UserObjects'. Presenter defaultPresenterClass: nil. World dumpPresenter. ScheduledControllers := nil. Preferences removePreference: #allowEtoyUserCustomEvents. SystemOrganization removeEmptyCategories. ChangeSet removeChangeSetsNamedSuchThat:[:cs | (cs == ChangeSet current) not]. globals undeclared removeUnreferencedKeys. StandardScriptingSystem initialize. MCFileBasedRepository flushAllCaches. MCDefinition clearInstances. Behavior flushObsoleteSubclasses. ChangeSet current clear. ChangeSet current name: 'Unnamed1'. Smalltalk flushClassNameCache. Smalltalk at: #Browser ifPresent:[:br| br initialize]. DebuggerMethodMap voidMapCache. DataStream initialize. AppRegistry removeObsolete. FileServices removeObsolete. Preferences removeObsolete. TheWorldMenu removeObsolete. Smalltalk garbageCollect. Symbol compactSymbolTable. TheWorldMainDockingBar updateInstances. MorphicProject defaultFill: (Color gray: 0.9). World color: (Color gray: 0.9). ! ! !InternalTranslator methodsFor: 'fileIn/fileOut' stamp: 'cwp 6/20/2012 17:34' prior: 40472775! scanFrom: aStream ^ self scanFrom: aStream environment: Environment default! ! !NaturalLanguageTranslator methodsFor: 'fileIn/fileOut' stamp: 'cwp 6/20/2012 17:27' prior: 40496770! scanFrom: aStream ^ self scanFrom: aStream environment: Environment default! ! !SystemDictionary methodsFor: 'dictionary access' stamp: 'cwp 6/22/2012 15:58' prior: 30574136! at: aKey put: anObject "Override from Dictionary to check Undeclared and fix up references to undeclared variables." | index element | (self includesKey: aKey) ifFalse: [self declare: aKey from: (self at: #Undeclared). self flushClassNameCache]. super at: aKey put: anObject. ^ anObject! ! "System"! CodeHolder subclass: #Browser instanceVariableNames: 'environment systemOrganizer classOrganizer metaClassOrganizer editSelection metaClassIndicated selectedSystemCategory selectedClassName selectedMessageName selectedMessageCategoryName' classVariableNames: 'ListClassesHierarchically RecentClasses' poolDictionaries: '' category: 'Tools-Browser'! !Browser commentStamp: 'cwp 12/27/2012 11:09' prior: 36419432! I represent a query path into the class descriptions, the software of the system.! !Browser methodsFor: 'accessing' stamp: 'cwp 6/24/2012 23:20'! selectEnvironment: anEnvironment environment := anEnvironment. systemOrganizer := environment organization! ! !Browser methodsFor: 'system category list' stamp: 'cwp 6/24/2012 23:06' prior: 36467357! From noreply at buildbot.pypy.org Mon Jul 7 13:16:53 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 7 Jul 2014 13:16:53 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Added an option to also aggregate and log the classes of elements that cause an object to switch to another storage strategy. Message-ID: <20140707111653.45F2D1C024A@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r865:13350a81184e Date: 2014-07-03 15:37 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/13350a81184e/ Log: Added an option to also aggregate and log the classes of elements that cause an object to switch to another storage strategy. diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -612,16 +612,16 @@ assert shadow, "The shadow has not been initialized yet!" return shadow - def switch_shadow(self, new_shadow): + def switch_shadow(self, new_shadow, w_element=None): old_shadow = self.assert_shadow() new_shadow.copy_from(old_shadow) self.store_shadow(new_shadow) new_shadow.attach_shadow() - self.log_storage("Switched", old_shadow) + self.log_storage("Switched", old_shadow, w_element=w_element) def store_with_new_storage(self, new_storage, n0, w_val): space = self.space() - self.switch_shadow(new_storage(space, self, self.size())) + self.switch_shadow(new_storage(space, self, self.size()), w_element=w_val) self.store(space, n0, w_val) def space(self): diff --git a/spyvm/storage_logger.py b/spyvm/storage_logger.py --- a/spyvm/storage_logger.py +++ b/spyvm/storage_logger.py @@ -1,45 +1,65 @@ + +class LogEntry(object): + def __init__(self): + self.slots = 0 + self.objects = 0 + self.element_classnames = {} + + def add(self, size, element_classname): + self.slots += size + self.objects += 1 + if element_classname: + self.element_classnames[element_classname] = None + + def classnames(self): + if len(self.element_classnames) > 0: + return self.element_classnames.keys() + return None class Logger(object): def __init__(self): self.active = False self.aggregate = False + self.elements = False self.logs = {} - def log(self, operation, old_storage, new_storage, classname, size): + def log(self, operation, old_storage, new_storage, classname, size, element_classname): if self.aggregate: key = (operation, old_storage, new_storage, classname) if key not in self.logs: - self.logs[key] = [0, 0] - tuple = self.logs[key] - tuple[0] += size - tuple[1] += 1 + self.logs[key] = LogEntry() + entry = self.logs[key] + entry.add(size, element_classname) else: - self.output(operation, old_storage, new_storage, classname, size, 1) + element_classnames = [ element_classname ] if element_classname else None + self.output(operation, old_storage, new_storage, classname, size, 1, element_classnames) def print_aggregated_log(self): if not self.aggregate: return - for key, tuple in self.logs.items(): + for key, entry in self.logs.items(): operation, old_storage, new_storage, classname = key - slots, objects = tuple - self.output(operation, old_storage, new_storage, classname, slots, objects) + slots, objects, element_classnames = entry.slots, entry.objects, entry.classnames() + self.output(operation, old_storage, new_storage, classname, slots, objects, element_classnames) - def output(self, operation, old_storage, new_storage, classname, slots, objects): + def output(self, operation, old_storage, new_storage, classname, slots, objects, element_classnames): old_storage_string = "%s -> " % old_storage if old_storage else "" classname_string = " of %s" % classname if classname else "" - format = (operation, old_storage_string, new_storage, classname_string, slots, objects) - print "%s (%s%s)%s size %d objects %d" % format + element_string = (" elements: " + " ".join(element_classnames)) if element_classnames else "" + format = (operation, old_storage_string, new_storage, classname_string, slots, objects, element_string) + print "%s (%s%s)%s size %d objects %d%s" % format _logger = Logger() -def activate(aggregate=False): +def activate(aggregate=False, elements=False): _logger.active = True - _logger.aggregate = aggregate + _logger.aggregate = _logger.aggregate or aggregate + _logger.elements = _logger.elements or elements def print_aggregated_log(): _logger.print_aggregated_log() -def log(w_obj, operation, old_storage_object=None, log_classname=True): +def log(w_obj, operation, old_storage_object=None, log_classname=True, w_element=None): if not _logger.active: return @@ -54,6 +74,10 @@ classname = w_obj.guess_classname() else: classname = None + if _logger.elements and w_element and log_classname: + element_classname = w_element.guess_classname() + else: + element_classname = None - _logger.log(operation, old_storage, new_storage, classname, size) + _logger.log(operation, old_storage, new_storage, classname, size, element_classname) \ No newline at end of file diff --git a/spyvm/tool/storagelog_parser.py b/spyvm/tool/storagelog_parser.py --- a/spyvm/tool/storagelog_parser.py +++ b/spyvm/tool/storagelog_parser.py @@ -27,7 +27,7 @@ callback(entry) return parsed_entries -line_pattern = re.compile("^(?P\w+) \(((?P\w+) -> )?(?P\w+)\)( of (?P.+))? size (?P[0-9]+)( objects (?P[0-9]+))?$") +line_pattern = re.compile("^(?P\w+) \(((?P\w+) -> )?(?P\w+)\)( of (?P.+))? size (?P[0-9]+)( objects (?P[0-9]+))?( elements: (?P.+( .+)+))?$") def parse_line(line, flags): result = line_pattern.match(line) @@ -41,16 +41,20 @@ classname = result.group('classname') size = result.group('size') objects = result.group('objects') - return LogEntry(operation, old_storage, new_storage, classname, size, objects) + classnames = result.group('classnames') + if classnames is not None: + classnames = classnames.split(' ') + return LogEntry(operation, old_storage, new_storage, classname, size, objects, classnames) class LogEntry(object): - def __init__(self, operation, old_storage, new_storage, classname, size, objects): + def __init__(self, operation, old_storage, new_storage, classname, size, objects, classnames): self.operation = str(operation) self.new_storage = str(new_storage) self.classname = str(classname) self.size = int(size) self.objects = int(objects) if objects else 1 + self.classnames = set(classnames) if classnames else set() if old_storage is None: if operation == "Filledin": @@ -83,9 +87,10 @@ class Operations(object): - def __init__(self, objects=0, slots=0): + def __init__(self, objects=0, slots=0, element_classnames=[]): self.objects = objects self.slots = slots + self.element_classnames = set(element_classnames) def __str__(self, total=None): if self.objects == 0: @@ -102,7 +107,9 @@ percent_objects = "" slots = format(self.slots, ",d") objects = format(self.objects, ",d") - return "%s%s slots in %s%s objects (avg size: %.1f)" % (slots, percent_slots, objects, percent_objects, avg_slots) + classnames = (" [ elements: %s ]" % ' '.join([str(x) for x in self.element_classnames])) \ + if len(self.element_classnames) else "" + return "%s%s slots in %s%s objects (avg size: %.1f)%s" % (slots, percent_slots, objects, percent_objects, avg_slots, classnames) def __repr__(self): return "%s(%s)" % (self.__str__(), object.__repr__(self)) @@ -110,6 +117,7 @@ def add_log_entry(self, entry): self.slots = self.slots + entry.size self.objects = self.objects + entry.objects + self.element_classnames |= entry.classnames def __sub__(self, other): return Operations(self.objects - other.objects, self.slots - other.slots) diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -132,6 +132,7 @@ -d|--max-stack-depth [number, default %d, <= 0 disables stack protection] -l|--storage-log -L|--storage-log-aggregate + -E|--storage-log-elements [image path, default: Squeak.image] """ % (argv[0], constants.MAX_LOOP_DEPTH) @@ -200,6 +201,8 @@ storage_logger.activate() elif arg in ["-L", "--storage-log-aggregate"]: storage_logger.activate(aggregate=True) + elif arg in ["-E", "--storage-log-elements"]: + storage_logger.activate(elements=True) elif path is None: path = argv[idx] else: From noreply at buildbot.pypy.org Mon Jul 7 13:16:54 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 7 Jul 2014 13:16:54 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Fixed log parsing regex Message-ID: <20140707111654.5DD4E1C024A@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r866:d39d1b72d99e Date: 2014-07-03 21:12 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/d39d1b72d99e/ Log: Fixed log parsing regex diff --git a/spyvm/tool/storagelog_parser.py b/spyvm/tool/storagelog_parser.py --- a/spyvm/tool/storagelog_parser.py +++ b/spyvm/tool/storagelog_parser.py @@ -27,7 +27,7 @@ callback(entry) return parsed_entries -line_pattern = re.compile("^(?P\w+) \(((?P\w+) -> )?(?P\w+)\)( of (?P.+))? size (?P[0-9]+)( objects (?P[0-9]+))?( elements: (?P.+( .+)+))?$") +line_pattern = re.compile("^(?P\w+) \(((?P\w+) -> )?(?P\w+)\)( of (?P.+))? size (?P[0-9]+)( objects (?P[0-9]+))?( elements: (?P.+( .+)*))?$") def parse_line(line, flags): result = line_pattern.match(line) From noreply at buildbot.pypy.org Mon Jul 7 13:17:08 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 7 Jul 2014 13:17:08 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Added some code to the Matrix benchmark. Message-ID: <20140707111708.BF55B1C024A@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r867:d1dfa8569637 Date: 2014-07-04 10:14 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/d1dfa8569637/ Log: Added some code to the Matrix benchmark. diff --git a/images/Squeak4.5-noBitBlt.changes b/images/Squeak4.5-noBitBlt.changes --- a/images/Squeak4.5-noBitBlt.changes +++ b/images/Squeak4.5-noBitBlt.changes @@ -12208,4 +12208,4 @@ ]. "self footer." - ^ self! ! ----QUIT----{2 April 2014 . 11:59:41 am} Squeak4.5-noBitBlt.image priorSource: 15812182! ----STARTUP----{3 July 2014 . 11:14:14 am} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! SystemOrganization addCategory: #Anton! Object subclass: #AntonMatrix instanceVariableNames: 'fields columns rows' classVariableNames: '' poolDictionaries: '' category: 'Anton'! Object subclass: #AntonMatrix instanceVariableNames: 'fields columns rows' classVariableNames: '' poolDictionaries: '' category: 'Anton'! !AntonMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 10:17'! at: point ^ self x: point x y: point y! ! !AntonMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 10:16'! at: point put: number ^ self x: point x y: point y put: number! ! !AntonMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 10:12'! columns ^ columns! ! !AntonMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 10:55'! fieldsDo: block (1 to: self rows) do: [ :row | (1 to: self columns) do: [ :column | block value: row value: column ] ].! ! !AntonMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 10:56'! fill: block self fieldsDo: [ :x :y | self x: x y: y put: (block value: x value: y) ].! ! !AntonMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 10:12'! rows ^ rows! ! !AntonMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 10:17'! x: x y: y ^ fields at: (self offsetX: x y: y)! ! !AntonMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 10:17'! x: x y: y put: number fields at: (self offsetX: x y: y) put: number! ! !AntonMatrix methodsFor: 'private' stamp: 'ag 7/3/2014 10:44'! offsetX: x y: y ^ (y-1) * columns + x! ! !AntonMatrix methodsFor: 'initialization' stamp: 'ag 7/3/2014 10:43'! initializeFields: f rows: r rows := r. (f size \\ r) = 0 ifFalse: [ self error: 'Illegal initialization.' ]. columns := f size / r. fields := f.! ! !AntonMatrix methodsFor: 'initialization' stamp: 'ag 7/3/2014 10:18'! initializeRows: r columns: c rows := r. columns := c. fields := Array new: rows * columns.! ! !AntonMatrix methodsFor: 'math' stamp: 'ag 7/3/2014 10:30'! * other | result | (self columns = other rows and: [ self rows = other columns ]) ifFalse: [ ^ self error: 'Cannot multiply, wrong dimensions.' ]. result := AntonMatrix rows: self rows columns: other columns. (1 to: self rows) do: [ :row | (1 to: other columns) do: [ :column | | value | value := 0. (1 to: self columns) do: [ :i | value := value + ((self x: i y: row) * (other x: column y: i)) ]. result x: column y: row put: value ] ]. ^ result! ! !AntonMatrix methodsFor: 'printing' stamp: 'ag 7/3/2014 10:47'! printOn: s (1 to: self rows) do: [ :row | (1 to: self columns) do: [ :column | s nextPutAll: (self x: column y: row) asString. s nextPutAll: ' ' ]. s nextPutAll: String cr ].! ! !AntonMatrix methodsFor: 'benchmarking' stamp: 'ag 7/3/2014 11:01'! fillRandomFloats: generator | max | max := SmallInteger maxVal sqrt asInteger. self fill: [ :x :y | max atRandom: generator ].! ! !AntonMatrix methodsFor: 'benchmarking' stamp: 'ag 7/3/2014 11:02'! fillRandomInts: generator "Fill with SmallInteger values small enough to stay SmallIntegers after multiplication." self fill: [ :x :y | generator next * 100 ].! ! "-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- "! AntonMatrix class instanceVariableNames: ''! !AntonMatrix class methodsFor: 'benchmarking' stamp: 'ag 7/3/2014 11:13'! benchFloats: numOfRuns multiplicationsPerRun: mults rows: r columns: c | generator | generator := Random seed: 23456432. numOfRuns timesRepeat: [ | a b | a := AntonMatrix rows: r columns: c. b := AntonMatrix rows: r columns: c. a fillRandomFloats: generator. b fillRandomFloats: generator. mults timesRepeat: [ a * b ] ].! ! !AntonMatrix class methodsFor: 'benchmarking' stamp: 'ag 7/3/2014 11:03'! benchInts: numOfRuns multiplicationsPerRun: mults rows: r columns: c | generator | generator := Random seed: 23456432. numOfRuns timesRepeat: [ | a b | a := AntonMatrix rows: r columns: c. b := AntonMatrix rows: r columns: c. a fillRandomInts: generator. b fillRandomInts: generator. mults timesRepeat: [ a * b ] ].! ! !AntonMatrix class methodsFor: 'instance creation' stamp: 'ag 7/3/2014 10:35'! fields: fields rows: r ^ self basicNew initializeFields: fields rows: r! ! !AntonMatrix class methodsFor: 'instance creation' stamp: 'ag 7/3/2014 10:19'! rows: r columns: c ^ self basicNew initializeRows: r columns: c; yourself! ! Object subclass: #AntonMatrixBenchmark instanceVariableNames: '' classVariableNames: 'Cols Mults NumOfRuns Rows' poolDictionaries: '' category: 'Anton'! Object subclass: #AntonMatrixBenchmark instanceVariableNames: '' classVariableNames: 'Cols Mults NumOfRuns Rows' poolDictionaries: '' category: 'Anton'! !AntonMatrixBenchmark methodsFor: 'bench' stamp: 'ag 7/3/2014 11:19'! benchFloats AntonMatrix benchFloats: NumOfRuns multiplicationsPerRun: Mults rows: Rows columns: Cols.! ! !AntonMatrixBenchmark methodsFor: 'bench' stamp: 'ag 7/3/2014 11:18'! benchInts AntonMatrix benchInts: NumOfRuns multiplicationsPerRun: Mults rows: Rows columns: Cols.! ! "-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- "! AntonMatrixBenchmark class instanceVariableNames: ''! !AntonMatrixBenchmark class methodsFor: 'initialization' stamp: 'ag 7/3/2014 11:24'! config: spec | tokens nextInt | tokens := spec findTokens: ' '. nextInt := [ :default | (tokens ifEmpty: [ nil ] ifNotEmptyDo: #removeFirst) asInteger ifNil: [ default ] ]. NumOfRuns := nextInt value: 10. Mults := nextInt value: 100. Rows := nextInt value: 100. Cols := nextInt value: 100.! ! !AntonMatrixBenchmark class methodsFor: 'initialization' stamp: 'ag 7/3/2014 11:20'! initialize super initialize. NumOfRuns := 10. Mults := 100. Cols := 100. Rows := 100.! ! AntonMatrixBenchmark initialize! ----End fileIn of C:\Dev\lang-smalltalk\Anton.st----! !SmallInteger methodsFor: 'as yet unclassified' stamp: 'ag 7/3/2014 11:27'! benchMatrixInt: spec AntonMatrixBenchmark config: spec. ! ! !SmallInteger methodsFor: 'as yet unclassified' stamp: 'ag 7/3/2014 11:28' prior: 49374034! benchMatrixInt: spec AntonMatrixBenchmark config: spec. ^ Benchmarks runMatching: 'AntonMatrix' iterations: self benchmarkIterations! ! !SmallInteger methodsFor: 'as yet unclassified' stamp: 'ag 7/3/2014 11:28'! benchMatrix: spec AntonMatrixBenchmark config: spec. ^ Benchmarks runMatching: 'AntonMatrix' iterations: self benchmarkIterations! ! SystemOrganization renameCategory: #Anton toBe: #'Matrix-Benchmarks'! Smalltalk renameClassNamed: #AntonMatrix as: #BenchMatrix! Object subclass: #SimpleMatrixBenchmark instanceVariableNames: '' classVariableNames: 'Cols Mults NumOfRuns Rows' poolDictionaries: '' category: 'Matrix-Benchmarks'! Smalltalk removeClassNamed: #SimpleMatrixBenchmark! Smalltalk renameClassNamed: #AntonMatrixBenchmark as: #SimpleMatrixBenchmark! SmallInteger removeSelector: #benchMatrixInt:! !SmallInteger methodsFor: 'as yet unclassified' stamp: 'ag 7/3/2014 11:30' prior: 49374406! benchMatrix: spec SimpleMatrixBenchmark config: spec. ^ Benchmarks runMatching: 'SimpleMatrixBenchmark' iterations: self benchmarkIterations! ! !Benchmarks class methodsFor: 'benchmarks' stamp: 'ag 7/3/2014 11:31' prior: 49367383! allBenchmarks ^ { CPBAStarBenchmark. CPBBinaryTreeBenchmark. CPBBlowfishSuite. CPBChameneosBenchmark. CPBDeltaBlueBenchmark. CPBMandelbrotBenchmarkSuite. CPBNBodyBenchmark. "CPBPolymorphyBenchmark." "Commented out because it compiled code in setup." CPBRichardsBenchmark. CPBSplayTreeBenchmark. SimpleMatrixBenchmark. }! ! ----QUIT----{3 July 2014 . 11:32:10 am} Squeak4.5-noBitBlt.image priorSource: 15813551! ----STARTUP----{3 July 2014 . 11:34:49 am} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! SMarkSuite subclass: #SimpleMatrixBenchmark instanceVariableNames: '' classVariableNames: 'Cols Mults NumOfRuns Rows' poolDictionaries: '' category: 'Matrix-Benchmarks'! !SimpleMatrixBenchmark methodsFor: 'bench' stamp: 'ag 7/3/2014 11:37' prior: 49372902! benchFloats BenchMatrix benchFloats: NumOfRuns multiplicationsPerRun: Mults rows: Rows columns: Cols.! ! !SimpleMatrixBenchmark methodsFor: 'bench' stamp: 'ag 7/3/2014 11:37' prior: 49373080! benchInts BenchMatrix benchInts: NumOfRuns multiplicationsPerRun: Mults rows: Rows columns: Cols.! ! SimpleMatrixBenchmark config: '5 5 5 5'! Benchmarks runMatching: 'SimpleMatrixBenchmark' iterations: 1! !SimpleMatrixBenchmark class methodsFor: 'initialization' stamp: 'ag 7/3/2014 11:38' prior: 49373773! initialize super initialize. NumOfRuns := 10. Mults := 10. Cols := 10. Rows := 10.! ! self initialize! !SimpleMatrixBenchmark class methodsFor: 'initialization' stamp: 'ag 7/3/2014 11:39' prior: 49376651! initialize "self initialize" super initialize. NumOfRuns := 10. Mults := 10. Cols := 10. Rows := 10.! ! ----QUIT----{3 July 2014 . 11:39:08 am} Squeak4.5-noBitBlt.image priorSource: 15821257! ----STARTUP----{3 July 2014 . 11:48:06 am} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! !BenchMatrix class methodsFor: 'benchmarking' stamp: 'ag 7/3/2014 11:49' prior: 49371447! benchFloats: numOfRuns multiplicationsPerRun: mults rows: r columns: c | generator | generator := Random seed: 23456432. numOfRuns timesRepeat: [ | a b | a := BenchMatrix rows: r columns: c. b := BenchMatrix rows: c columns: r. a fillRandomFloats: generator. b fillRandomFloats: generator. mults timesRepeat: [ a * b ] ].! ! !BenchMatrix class methodsFor: 'benchmarking' stamp: 'ag 7/3/2014 11:49' prior: 49371861! benchInts: numOfRuns multiplicationsPerRun: mults rows: r columns: c | generator | generator := Random seed: 23456432. numOfRuns timesRepeat: [ | a b | a := BenchMatrix rows: r columns: c. b := BenchMatrix rows: c columns: r. a fillRandomInts: generator. b fillRandomInts: generator. mults timesRepeat: [ a * b ] ].! ! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 11:51' prior: 49368902! fieldsDo: block (1 to: self rows) do: [ :row | (1 to: self columns) do: [ :column | block value: column value: row ] ].! ! 1 benchMatrix: '1 10 100 10'! 1 benchMatrix: '1 10 100 10'! 1 benchMatrix: '1 10 10 100'! 1 benchMatrix: '1 10 10 1000'! ----QUIT----{3 July 2014 . 11:51:44 am} Squeak4.5-noBitBlt.image priorSource: 15822543! ----STARTUP----{3 July 2014 . 12:30:20 pm} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! Array variableSubclass: #BenchMatrix instanceVariableNames: 'columns rows' classVariableNames: '' poolDictionaries: '' category: 'Matrix-Benchmarks'! Array variableSubclass: #BenchMatrix instanceVariableNames: 'rows' classVariableNames: '' poolDictionaries: '' category: 'Matrix-Benchmarks'! BenchMatrix removeSelector: #at:! BenchMatrix removeSelector: #at:put:! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:31' prior: 49368813! columns ^ self size / rows! ! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:31' prior: 49379008! columns ^ self size / rows! ! 11/2! 11//2! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:33' prior: 49378103! fieldsDo: block (1 to: self size) do: [ :i | block value: i \\ rows value: i // rows ].! ! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:34' prior: 49379251! fieldsDo: block 1 to: self size do: [ :i | block value: i \\ rows value: i // rows ].! ! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:34' prior: 49369340! x: x y: y ^ self at: (self offsetX: x y: y)! ! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:34' prior: 49369457! x: x y: y put: number self at: (self offsetX: x y: y) put: number! ! a := BenchMatrix fields: #( 3 2 1 1 0 2 ) rows: 2! b := BenchMatrix fields: #( 1 2 0 1 4 0 ) rows: 3! a! a! a rows! a columns! BenchMatrix removeSelector: #initializeRows:columns:! !BenchMatrix methodsFor: 'initialization' stamp: 'ag 7/3/2014 12:36'! initializeRows: r rows := r.! ! BenchMatrix removeSelector: #initializeFields:rows:! !BenchMatrix class methodsFor: 'instance creation' stamp: 'ag 7/3/2014 12:37' prior: 49372274! fields: fields rows: r | columns f rows | rows := r. (f size \\ r) = 0 ifFalse: [ self error: 'Illegal initialization.' ]. columns := f size / r. " fields := f." ^ self basicNew initializeFields: fields rows: r! ! !BenchMatrix methodsFor: 'initialization' stamp: 'ag 7/3/2014 12:37'! rows: r rows := r.! ! BenchMatrix removeSelector: #initializeRows:! Array withAll: #(1 2 3)! !BenchMatrix class methodsFor: 'instance creation' stamp: 'ag 7/3/2014 12:39' prior: 49380248! fields: fields rows: r (fields size \\ r) = 0 ifFalse: [ self error: 'Illegal initialization.' ]. ^ (self withAll: fields) rows: r; yourself! ! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:39' prior: 49379122! columns ^ self size // rows! ! !BenchMatrix class methodsFor: 'instance creation' stamp: 'ag 7/3/2014 12:40' prior: 49372433! rows: r columns: c ^ (self new: r * c) rows: r; fillZeros; yourself! ! !BenchMatrix methodsFor: 'initialization' stamp: 'ag 7/3/2014 12:40'! fillZeros self fill: [ :x :y | 0 ].! ! i! i \\ rows! i //rows! rows! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:42' prior: 49379428! fieldsDo: block 1 to: self size do: [ :i | block value: i \\ rows + 1 value: i // rows + 1 ].! ! x := BenchMatrix rows: 4 columns: 3.! x! o := OrderedCollection new.! x fieldsDo: [ :x :y | o add: x -> y ].! o! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:43' prior: 49381404! fieldsDo: block 0 to: self size do: [ :i | block value: i \\ rows + 1 value: i // rows + 1 ].! ! o := OrderedCollection new.! x fieldsDo: [ :x :y | o add: x -> y ].! o! x := BenchMatrix rows: 4 columns: 3.! x! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:44' prior: 49381705! fieldsDo: block 0 to: self size + 1 do: [ :i | block value: i \\ rows + 1 value: i // rows + 1 ].! ! x := BenchMatrix rows: 4 columns: 3.! x! o := OrderedCollection new.! x fieldsDo: [ :x :y | o add: x -> y ].! o! o size! x size! o size! o asSet size! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:46' prior: 49382006! fieldsDo: block 1 to: self size do: [ :i | block value: i \\ rows + 1 value: i // rows + 1 ].! ! o := OrderedCollection new.! x fieldsDo: [ :x :y | o add: x -> y ].! o! o size! o ! 1 \\ 4! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:46' prior: 49382353! fieldsDo: block 1 to: self size do: [ :i | block value: i \\ rows value: i // rows + 1 ].! ! o := OrderedCollection new.! x fieldsDo: [ :x :y | o add: x -> y ].! o size! o ! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:50' prior: 49382634! fieldsDo: block | columns | columns := self columns. 1 to: self size do: [ :i | block value: i \\ columns value: i // columns + 1 ].! ! Array variableSubclass: #BenchMatrix instanceVariableNames: 'rows columns' classVariableNames: '' poolDictionaries: '' category: 'Matrix-Benchmarks'! Array variableSubclass: #BenchMatrix instanceVariableNames: 'rows columns' classVariableNames: '' poolDictionaries: '' category: 'Matrix-Benchmarks'! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:51' prior: 49382898! fieldsDo: block 1 to: self size do: [ :i | block value: i \\ columns value: i // columns + 1 ].! ! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:51' prior: 49380969! columns ^ columns! ! !BenchMatrix methodsFor: 'initialization' stamp: 'ag 7/3/2014 12:51' prior: 49380543! rows: r rows := r. columns := self size // r.! ! x := BenchMatrix rows: 4 columns: 3.! x! o := OrderedCollection new. x fieldsDo: [ :x :y | o add: x -> y ]. ! ox! o! !BenchMatrix methodsFor: 'initialization' stamp: 'ag 7/3/2014 12:52' prior: 49381247! fillZeros self atAllPut: 0.! ! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:53' prior: 49383432! fieldsDo: block 0 to: self size - 1 do: [ :i | block value: i \\ columns + 1 value: i // columns + 1 ].! ! o := OrderedCollection new. x fieldsDo: [ :x :y | o add: x -> y ].! o size! o! a := BenchMatrix fields: #( 3 2 1 1 0 2 ) rows: 2! b := BenchMatrix fields: #( 1 2 0 1 4 0 ) rows: 3! !BenchMatrix methodsFor: 'math' stamp: 'ag 7/3/2014 12:55' prior: 49370092! * other | result | (self columns = other rows and: [ self rows = other columns ]) ifFalse: [ ^ self error: 'Cannot multiply, wrong dimensions.' ]. result := BenchMatrix rows: self rows columns: other columns. (1 to: self rows) do: [ :row | (1 to: other columns) do: [ :column | | value | value := 0. (1 to: self columns) do: [ :i | value := value + ((self x: i y: row) * (other x: column y: i)) ]. result x: column y: row put: value ] ]. ^ result! ! a * b! self assert: (Array withAll: (a * b)) = #(7 8 9 2)! BenchMatrix class organization addCategory: #test! !BenchMatrix class methodsFor: 'test' stamp: 'ag 7/3/2014 12:57'! tinyTest "self tinyTest" | a b | a := BenchMatrix fields: #( 3 2 1 1 0 2 ) rows: 2. b := BenchMatrix fields: #( 1 2 0 1 4 0 ) rows: 3. self assert: (Array withAll: (a * b)) = #(7 8 9 2).! ! self tinyTest! 1 benchMatrix: '1 3 5 5'! 1 benchMatrix: '1 10 5 5'! ----QUIT----{3 July 2014 . 12:58:52 pm} Squeak4.5-noBitBlt.image priorSource: 15823926! ----STARTUP----{3 July 2014 . 1:05:04 pm} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! a := BenchMatrix rows: 20 columns: 20. b := BenchMatrix rows: 20 columns: 20. generator := Random seed: 13243456. a fillRandomInts: generator. b fillRandomInts: generator.! (a collect: #class) asSet! (b collect: #class) asSet! a := BenchMatrix rows: 20 columns: 20. b := BenchMatrix rows: 20 columns: 20. generator := Random seed: 13243456. a fillRandomFloats: generator. b fillRandomInts: generator.! (b collect: #class) asSet! (a collect: #class) asSet! !BenchMatrix methodsFor: 'benchmarking' stamp: 'ag 7/3/2014 13:08' prior: 49371100! fillRandomInts: generator "Fill with SmallInteger values small enough to stay SmallIntegers after multiplication." | max | max := SmallInteger maxVal sqrt asInteger. self fill: [ :x :y | max atRandom: generator ]. ! ! !BenchMatrix methodsFor: 'benchmarking' stamp: 'ag 7/3/2014 13:08' prior: 49370897! fillRandomFloats: generator self fill: [ :x :y | generator next * 100 ].! ! a := BenchMatrix rows: 20 columns: 20. b := BenchMatrix rows: 20 columns: 20. generator := Random seed: 13243456. a fillRandomFloats: generator. b fillRandomInts: generator. (a collect: #class) asSet ! a := BenchMatrix rows: 20 columns: 20. b := BenchMatrix rows: 20 columns: 20. generator := Random seed: 13243456. a fillRandomFloats: generator. b fillRandomInts: generator. (b collect: #class) asSet ! a := BenchMatrix rows: 20 columns: 20. b := BenchMatrix rows: 20 columns: 20. generator := Random seed: 13243456. a fillRandomInts: generator. b fillRandomInts: generator. ! c := a * b! (c collect: #class) asSet! !BenchMatrix methodsFor: 'benchmarking' stamp: 'ag 7/3/2014 13:09' prior: 49386143! fillRandomInts: generator "Fill with SmallInteger values small enough to stay SmallIntegers after multiplication." | max | max := 1000. self fill: [ :x :y | max atRandom: generator ]. ! ! a := BenchMatrix rows: 20 columns: 20. b := BenchMatrix rows: 20 columns: 20. generator := Random seed: 13243456. a fillRandomInts: generator. b fillRandomInts: generator. c := a * b. (c collect: #class) asSet! ----QUIT----{3 July 2014 . 1:09:37 pm} Squeak4.5-noBitBlt.image priorSource: 15830973! \ No newline at end of file + ^ self! ! ----QUIT----{2 April 2014 . 11:59:41 am} Squeak4.5-noBitBlt.image priorSource: 15812182! ----STARTUP----{3 July 2014 . 11:14:14 am} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! SystemOrganization addCategory: #Anton! Object subclass: #AntonMatrix instanceVariableNames: 'fields columns rows' classVariableNames: '' poolDictionaries: '' category: 'Anton'! Object subclass: #AntonMatrix instanceVariableNames: 'fields columns rows' classVariableNames: '' poolDictionaries: '' category: 'Anton'! !AntonMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 10:17'! at: point ^ self x: point x y: point y! ! !AntonMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 10:16'! at: point put: number ^ self x: point x y: point y put: number! ! !AntonMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 10:12'! columns ^ columns! ! !AntonMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 10:55'! fieldsDo: block (1 to: self rows) do: [ :row | (1 to: self columns) do: [ :column | block value: row value: column ] ].! ! !AntonMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 10:56'! fill: block self fieldsDo: [ :x :y | self x: x y: y put: (block value: x value: y) ].! ! !AntonMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 10:12'! rows ^ rows! ! !AntonMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 10:17'! x: x y: y ^ fields at: (self offsetX: x y: y)! ! !AntonMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 10:17'! x: x y: y put: number fields at: (self offsetX: x y: y) put: number! ! !AntonMatrix methodsFor: 'private' stamp: 'ag 7/3/2014 10:44'! offsetX: x y: y ^ (y-1) * columns + x! ! !AntonMatrix methodsFor: 'initialization' stamp: 'ag 7/3/2014 10:43'! initializeFields: f rows: r rows := r. (f size \\ r) = 0 ifFalse: [ self error: 'Illegal initialization.' ]. columns := f size / r. fields := f.! ! !AntonMatrix methodsFor: 'initialization' stamp: 'ag 7/3/2014 10:18'! initializeRows: r columns: c rows := r. columns := c. fields := Array new: rows * columns.! ! !AntonMatrix methodsFor: 'math' stamp: 'ag 7/3/2014 10:30'! * other | result | (self columns = other rows and: [ self rows = other columns ]) ifFalse: [ ^ self error: 'Cannot multiply, wrong dimensions.' ]. result := AntonMatrix rows: self rows columns: other columns. (1 to: self rows) do: [ :row | (1 to: other columns) do: [ :column | | value | value := 0. (1 to: self columns) do: [ :i | value := value + ((self x: i y: row) * (other x: column y: i)) ]. result x: column y: row put: value ] ]. ^ result! ! !AntonMatrix methodsFor: 'printing' stamp: 'ag 7/3/2014 10:47'! printOn: s (1 to: self rows) do: [ :row | (1 to: self columns) do: [ :column | s nextPutAll: (self x: column y: row) asString. s nextPutAll: ' ' ]. s nextPutAll: String cr ].! ! !AntonMatrix methodsFor: 'benchmarking' stamp: 'ag 7/3/2014 11:01'! fillRandomFloats: generator | max | max := SmallInteger maxVal sqrt asInteger. self fill: [ :x :y | max atRandom: generator ].! ! !AntonMatrix methodsFor: 'benchmarking' stamp: 'ag 7/3/2014 11:02'! fillRandomInts: generator "Fill with SmallInteger values small enough to stay SmallIntegers after multiplication." self fill: [ :x :y | generator next * 100 ].! ! "-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- "! AntonMatrix class instanceVariableNames: ''! !AntonMatrix class methodsFor: 'benchmarking' stamp: 'ag 7/3/2014 11:13'! benchFloats: numOfRuns multiplicationsPerRun: mults rows: r columns: c | generator | generator := Random seed: 23456432. numOfRuns timesRepeat: [ | a b | a := AntonMatrix rows: r columns: c. b := AntonMatrix rows: r columns: c. a fillRandomFloats: generator. b fillRandomFloats: generator. mults timesRepeat: [ a * b ] ].! ! !AntonMatrix class methodsFor: 'benchmarking' stamp: 'ag 7/3/2014 11:03'! benchInts: numOfRuns multiplicationsPerRun: mults rows: r columns: c | generator | generator := Random seed: 23456432. numOfRuns timesRepeat: [ | a b | a := AntonMatrix rows: r columns: c. b := AntonMatrix rows: r columns: c. a fillRandomInts: generator. b fillRandomInts: generator. mults timesRepeat: [ a * b ] ].! ! !AntonMatrix class methodsFor: 'instance creation' stamp: 'ag 7/3/2014 10:35'! fields: fields rows: r ^ self basicNew initializeFields: fields rows: r! ! !AntonMatrix class methodsFor: 'instance creation' stamp: 'ag 7/3/2014 10:19'! rows: r columns: c ^ self basicNew initializeRows: r columns: c; yourself! ! Object subclass: #AntonMatrixBenchmark instanceVariableNames: '' classVariableNames: 'Cols Mults NumOfRuns Rows' poolDictionaries: '' category: 'Anton'! Object subclass: #AntonMatrixBenchmark instanceVariableNames: '' classVariableNames: 'Cols Mults NumOfRuns Rows' poolDictionaries: '' category: 'Anton'! !AntonMatrixBenchmark methodsFor: 'bench' stamp: 'ag 7/3/2014 11:19'! benchFloats AntonMatrix benchFloats: NumOfRuns multiplicationsPerRun: Mults rows: Rows columns: Cols.! ! !AntonMatrixBenchmark methodsFor: 'bench' stamp: 'ag 7/3/2014 11:18'! benchInts AntonMatrix benchInts: NumOfRuns multiplicationsPerRun: Mults rows: Rows columns: Cols.! ! "-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- "! AntonMatrixBenchmark class instanceVariableNames: ''! !AntonMatrixBenchmark class methodsFor: 'initialization' stamp: 'ag 7/3/2014 11:24'! config: spec | tokens nextInt | tokens := spec findTokens: ' '. nextInt := [ :default | (tokens ifEmpty: [ nil ] ifNotEmptyDo: #removeFirst) asInteger ifNil: [ default ] ]. NumOfRuns := nextInt value: 10. Mults := nextInt value: 100. Rows := nextInt value: 100. Cols := nextInt value: 100.! ! !AntonMatrixBenchmark class methodsFor: 'initialization' stamp: 'ag 7/3/2014 11:20'! initialize super initialize. NumOfRuns := 10. Mults := 100. Cols := 100. Rows := 100.! ! AntonMatrixBenchmark initialize! ----End fileIn of C:\Dev\lang-smalltalk\Anton.st----! !SmallInteger methodsFor: 'as yet unclassified' stamp: 'ag 7/3/2014 11:27'! benchMatrixInt: spec AntonMatrixBenchmark config: spec. ! ! !SmallInteger methodsFor: 'as yet unclassified' stamp: 'ag 7/3/2014 11:28' prior: 49374034! benchMatrixInt: spec AntonMatrixBenchmark config: spec. ^ Benchmarks runMatching: 'AntonMatrix' iterations: self benchmarkIterations! ! !SmallInteger methodsFor: 'as yet unclassified' stamp: 'ag 7/3/2014 11:28'! benchMatrix: spec AntonMatrixBenchmark config: spec. ^ Benchmarks runMatching: 'AntonMatrix' iterations: self benchmarkIterations! ! SystemOrganization renameCategory: #Anton toBe: #'Matrix-Benchmarks'! Smalltalk renameClassNamed: #AntonMatrix as: #BenchMatrix! Object subclass: #SimpleMatrixBenchmark instanceVariableNames: '' classVariableNames: 'Cols Mults NumOfRuns Rows' poolDictionaries: '' category: 'Matrix-Benchmarks'! Smalltalk removeClassNamed: #SimpleMatrixBenchmark! Smalltalk renameClassNamed: #AntonMatrixBenchmark as: #SimpleMatrixBenchmark! SmallInteger removeSelector: #benchMatrixInt:! !SmallInteger methodsFor: 'as yet unclassified' stamp: 'ag 7/3/2014 11:30' prior: 49374406! benchMatrix: spec SimpleMatrixBenchmark config: spec. ^ Benchmarks runMatching: 'SimpleMatrixBenchmark' iterations: self benchmarkIterations! ! !Benchmarks class methodsFor: 'benchmarks' stamp: 'ag 7/3/2014 11:31' prior: 49367383! allBenchmarks ^ { CPBAStarBenchmark. CPBBinaryTreeBenchmark. CPBBlowfishSuite. CPBChameneosBenchmark. CPBDeltaBlueBenchmark. CPBMandelbrotBenchmarkSuite. CPBNBodyBenchmark. "CPBPolymorphyBenchmark." "Commented out because it compiled code in setup." CPBRichardsBenchmark. CPBSplayTreeBenchmark. SimpleMatrixBenchmark. }! ! ----QUIT----{3 July 2014 . 11:32:10 am} Squeak4.5-noBitBlt.image priorSource: 15813551! ----STARTUP----{3 July 2014 . 11:34:49 am} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! SMarkSuite subclass: #SimpleMatrixBenchmark instanceVariableNames: '' classVariableNames: 'Cols Mults NumOfRuns Rows' poolDictionaries: '' category: 'Matrix-Benchmarks'! !SimpleMatrixBenchmark methodsFor: 'bench' stamp: 'ag 7/3/2014 11:37' prior: 49372902! benchFloats BenchMatrix benchFloats: NumOfRuns multiplicationsPerRun: Mults rows: Rows columns: Cols.! ! !SimpleMatrixBenchmark methodsFor: 'bench' stamp: 'ag 7/3/2014 11:37' prior: 49373080! benchInts BenchMatrix benchInts: NumOfRuns multiplicationsPerRun: Mults rows: Rows columns: Cols.! ! SimpleMatrixBenchmark config: '5 5 5 5'! Benchmarks runMatching: 'SimpleMatrixBenchmark' iterations: 1! !SimpleMatrixBenchmark class methodsFor: 'initialization' stamp: 'ag 7/3/2014 11:38' prior: 49373773! initialize super initialize. NumOfRuns := 10. Mults := 10. Cols := 10. Rows := 10.! ! self initialize! !SimpleMatrixBenchmark class methodsFor: 'initialization' stamp: 'ag 7/3/2014 11:39' prior: 49376651! initialize "self initialize" super initialize. NumOfRuns := 10. Mults := 10. Cols := 10. Rows := 10.! ! ----QUIT----{3 July 2014 . 11:39:08 am} Squeak4.5-noBitBlt.image priorSource: 15821257! ----STARTUP----{3 July 2014 . 11:48:06 am} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! !BenchMatrix class methodsFor: 'benchmarking' stamp: 'ag 7/3/2014 11:49' prior: 49371447! benchFloats: numOfRuns multiplicationsPerRun: mults rows: r columns: c | generator | generator := Random seed: 23456432. numOfRuns timesRepeat: [ | a b | a := BenchMatrix rows: r columns: c. b := BenchMatrix rows: c columns: r. a fillRandomFloats: generator. b fillRandomFloats: generator. mults timesRepeat: [ a * b ] ].! ! !BenchMatrix class methodsFor: 'benchmarking' stamp: 'ag 7/3/2014 11:49' prior: 49371861! benchInts: numOfRuns multiplicationsPerRun: mults rows: r columns: c | generator | generator := Random seed: 23456432. numOfRuns timesRepeat: [ | a b | a := BenchMatrix rows: r columns: c. b := BenchMatrix rows: c columns: r. a fillRandomInts: generator. b fillRandomInts: generator. mults timesRepeat: [ a * b ] ].! ! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 11:51' prior: 49368902! fieldsDo: block (1 to: self rows) do: [ :row | (1 to: self columns) do: [ :column | block value: column value: row ] ].! ! 1 benchMatrix: '1 10 100 10'! 1 benchMatrix: '1 10 100 10'! 1 benchMatrix: '1 10 10 100'! 1 benchMatrix: '1 10 10 1000'! ----QUIT----{3 July 2014 . 11:51:44 am} Squeak4.5-noBitBlt.image priorSource: 15822543! ----STARTUP----{3 July 2014 . 12:30:20 pm} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! Array variableSubclass: #BenchMatrix instanceVariableNames: 'columns rows' classVariableNames: '' poolDictionaries: '' category: 'Matrix-Benchmarks'! Array variableSubclass: #BenchMatrix instanceVariableNames: 'rows' classVariableNames: '' poolDictionaries: '' category: 'Matrix-Benchmarks'! BenchMatrix removeSelector: #at:! BenchMatrix removeSelector: #at:put:! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:31' prior: 49368813! columns ^ self size / rows! ! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:31' prior: 49379008! columns ^ self size / rows! ! 11/2! 11//2! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:33' prior: 49378103! fieldsDo: block (1 to: self size) do: [ :i | block value: i \\ rows value: i // rows ].! ! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:34' prior: 49379251! fieldsDo: block 1 to: self size do: [ :i | block value: i \\ rows value: i // rows ].! ! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:34' prior: 49369340! x: x y: y ^ self at: (self offsetX: x y: y)! ! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:34' prior: 49369457! x: x y: y put: number self at: (self offsetX: x y: y) put: number! ! a := BenchMatrix fields: #( 3 2 1 1 0 2 ) rows: 2! b := BenchMatrix fields: #( 1 2 0 1 4 0 ) rows: 3! a! a! a rows! a columns! BenchMatrix removeSelector: #initializeRows:columns:! !BenchMatrix methodsFor: 'initialization' stamp: 'ag 7/3/2014 12:36'! initializeRows: r rows := r.! ! BenchMatrix removeSelector: #initializeFields:rows:! !BenchMatrix class methodsFor: 'instance creation' stamp: 'ag 7/3/2014 12:37' prior: 49372274! fields: fields rows: r | columns f rows | rows := r. (f size \\ r) = 0 ifFalse: [ self error: 'Illegal initialization.' ]. columns := f size / r. " fields := f." ^ self basicNew initializeFields: fields rows: r! ! !BenchMatrix methodsFor: 'initialization' stamp: 'ag 7/3/2014 12:37'! rows: r rows := r.! ! BenchMatrix removeSelector: #initializeRows:! Array withAll: #(1 2 3)! !BenchMatrix class methodsFor: 'instance creation' stamp: 'ag 7/3/2014 12:39' prior: 49380248! fields: fields rows: r (fields size \\ r) = 0 ifFalse: [ self error: 'Illegal initialization.' ]. ^ (self withAll: fields) rows: r; yourself! ! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:39' prior: 49379122! columns ^ self size // rows! ! !BenchMatrix class methodsFor: 'instance creation' stamp: 'ag 7/3/2014 12:40' prior: 49372433! rows: r columns: c ^ (self new: r * c) rows: r; fillZeros; yourself! ! !BenchMatrix methodsFor: 'initialization' stamp: 'ag 7/3/2014 12:40'! fillZeros self fill: [ :x :y | 0 ].! ! i! i \\ rows! i //rows! rows! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:42' prior: 49379428! fieldsDo: block 1 to: self size do: [ :i | block value: i \\ rows + 1 value: i // rows + 1 ].! ! x := BenchMatrix rows: 4 columns: 3.! x! o := OrderedCollection new.! x fieldsDo: [ :x :y | o add: x -> y ].! o! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:43' prior: 49381404! fieldsDo: block 0 to: self size do: [ :i | block value: i \\ rows + 1 value: i // rows + 1 ].! ! o := OrderedCollection new.! x fieldsDo: [ :x :y | o add: x -> y ].! o! x := BenchMatrix rows: 4 columns: 3.! x! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:44' prior: 49381705! fieldsDo: block 0 to: self size + 1 do: [ :i | block value: i \\ rows + 1 value: i // rows + 1 ].! ! x := BenchMatrix rows: 4 columns: 3.! x! o := OrderedCollection new.! x fieldsDo: [ :x :y | o add: x -> y ].! o! o size! x size! o size! o asSet size! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:46' prior: 49382006! fieldsDo: block 1 to: self size do: [ :i | block value: i \\ rows + 1 value: i // rows + 1 ].! ! o := OrderedCollection new.! x fieldsDo: [ :x :y | o add: x -> y ].! o! o size! o ! 1 \\ 4! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:46' prior: 49382353! fieldsDo: block 1 to: self size do: [ :i | block value: i \\ rows value: i // rows + 1 ].! ! o := OrderedCollection new.! x fieldsDo: [ :x :y | o add: x -> y ].! o size! o ! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:50' prior: 49382634! fieldsDo: block | columns | columns := self columns. 1 to: self size do: [ :i | block value: i \\ columns value: i // columns + 1 ].! ! Array variableSubclass: #BenchMatrix instanceVariableNames: 'rows columns' classVariableNames: '' poolDictionaries: '' category: 'Matrix-Benchmarks'! Array variableSubclass: #BenchMatrix instanceVariableNames: 'rows columns' classVariableNames: '' poolDictionaries: '' category: 'Matrix-Benchmarks'! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:51' prior: 49382898! fieldsDo: block 1 to: self size do: [ :i | block value: i \\ columns value: i // columns + 1 ].! ! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:51' prior: 49380969! columns ^ columns! ! !BenchMatrix methodsFor: 'initialization' stamp: 'ag 7/3/2014 12:51' prior: 49380543! rows: r rows := r. columns := self size // r.! ! x := BenchMatrix rows: 4 columns: 3.! x! o := OrderedCollection new. x fieldsDo: [ :x :y | o add: x -> y ]. ! ox! o! !BenchMatrix methodsFor: 'initialization' stamp: 'ag 7/3/2014 12:52' prior: 49381247! fillZeros self atAllPut: 0.! ! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:53' prior: 49383432! fieldsDo: block 0 to: self size - 1 do: [ :i | block value: i \\ columns + 1 value: i // columns + 1 ].! ! o := OrderedCollection new. x fieldsDo: [ :x :y | o add: x -> y ].! o size! o! a := BenchMatrix fields: #( 3 2 1 1 0 2 ) rows: 2! b := BenchMatrix fields: #( 1 2 0 1 4 0 ) rows: 3! !BenchMatrix methodsFor: 'math' stamp: 'ag 7/3/2014 12:55' prior: 49370092! * other | result | (self columns = other rows and: [ self rows = other columns ]) ifFalse: [ ^ self error: 'Cannot multiply, wrong dimensions.' ]. result := BenchMatrix rows: self rows columns: other columns. (1 to: self rows) do: [ :row | (1 to: other columns) do: [ :column | | value | value := 0. (1 to: self columns) do: [ :i | value := value + ((self x: i y: row) * (other x: column y: i)) ]. result x: column y: row put: value ] ]. ^ result! ! a * b! self assert: (Array withAll: (a * b)) = #(7 8 9 2)! BenchMatrix class organization addCategory: #test! !BenchMatrix class methodsFor: 'test' stamp: 'ag 7/3/2014 12:57'! tinyTest "self tinyTest" | a b | a := BenchMatrix fields: #( 3 2 1 1 0 2 ) rows: 2. b := BenchMatrix fields: #( 1 2 0 1 4 0 ) rows: 3. self assert: (Array withAll: (a * b)) = #(7 8 9 2).! ! self tinyTest! 1 benchMatrix: '1 3 5 5'! 1 benchMatrix: '1 10 5 5'! ----QUIT----{3 July 2014 . 12:58:52 pm} Squeak4.5-noBitBlt.image priorSource: 15823926! ----STARTUP----{3 July 2014 . 1:05:04 pm} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! a := BenchMatrix rows: 20 columns: 20. b := BenchMatrix rows: 20 columns: 20. generator := Random seed: 13243456. a fillRandomInts: generator. b fillRandomInts: generator.! (a collect: #class) asSet! (b collect: #class) asSet! a := BenchMatrix rows: 20 columns: 20. b := BenchMatrix rows: 20 columns: 20. generator := Random seed: 13243456. a fillRandomFloats: generator. b fillRandomInts: generator.! (b collect: #class) asSet! (a collect: #class) asSet! !BenchMatrix methodsFor: 'benchmarking' stamp: 'ag 7/3/2014 13:08' prior: 49371100! fillRandomInts: generator "Fill with SmallInteger values small enough to stay SmallIntegers after multiplication." | max | max := SmallInteger maxVal sqrt asInteger. self fill: [ :x :y | max atRandom: generator ]. ! ! !BenchMatrix methodsFor: 'benchmarking' stamp: 'ag 7/3/2014 13:08' prior: 49370897! fillRandomFloats: generator self fill: [ :x :y | generator next * 100 ].! ! a := BenchMatrix rows: 20 columns: 20. b := BenchMatrix rows: 20 columns: 20. generator := Random seed: 13243456. a fillRandomFloats: generator. b fillRandomInts: generator. (a collect: #class) asSet ! a := BenchMatrix rows: 20 columns: 20. b := BenchMatrix rows: 20 columns: 20. generator := Random seed: 13243456. a fillRandomFloats: generator. b fillRandomInts: generator. (b collect: #class) asSet ! a := BenchMatrix rows: 20 columns: 20. b := BenchMatrix rows: 20 columns: 20. generator := Random seed: 13243456. a fillRandomInts: generator. b fillRandomInts: generator. ! c := a * b! (c collect: #class) asSet! !BenchMatrix methodsFor: 'benchmarking' stamp: 'ag 7/3/2014 13:09' prior: 49386143! fillRandomInts: generator "Fill with SmallInteger values small enough to stay SmallIntegers after multiplication." | max | max := 1000. self fill: [ :x :y | max atRandom: generator ]. ! ! a := BenchMatrix rows: 20 columns: 20. b := BenchMatrix rows: 20 columns: 20. generator := Random seed: 13243456. a fillRandomInts: generator. b fillRandomInts: generator. c := a * b. (c collect: #class) asSet! ----QUIT----{3 July 2014 . 1:09:37 pm} Squeak4.5-noBitBlt.image priorSource: 15830973! ----STARTUP----{3 July 2014 . 8:26:43 pm} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! a := BenchMatrix rows: 20 columns: 20. b := BenchMatrix rows: 20 columns: 20. generator := Random seed: 13243456. a fillRandomInts: generator. b fillRandomInts: generator.! c := a * b.! (c collect: #class) asSet! c! !SmallInteger methodsFor: 'as yet unclassified' stamp: 'ag 7/3/2014 20:27'! testMatrix ! ! !SmallInteger methodsFor: 'as yet unclassified' stamp: 'ag 7/3/2014 20:28' prior: 49388134! testMatrix | a b c generator | a := BenchMatrix rows: 20 columns: 20. b := BenchMatrix rows: 20 columns: 20. generator := Random seed: 13243456. a fillRandomInts: generator. b fillRandomInts: generator. c := a * b. ^ (c collect: #class) asSet asString! ! 5 testMatrix! ----SNAPSHOT----{3 July 2014 . 8:28:40 pm} Squeak4.5-noBitBlt.1.image priorSource: 15833215! ----QUIT----{3 July 2014 . 8:28:49 pm} Squeak4.5-noBitBlt.1.image priorSource: 15834093! ----STARTUP----{3 July 2014 . 9:02:43 pm} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! !BenchMatrix methodsFor: 'initialization' stamp: 'ag 7/3/2014 21:03' prior: 49383727! rows: r rows := r asFloat. columns := (self size // r) asFloat.! ! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 21:03' prior: 49369257! rows ^ rows asInteger! ! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 21:03' prior: 49383617! columns ^ columns asInteger! ! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 21:04' prior: 49384103! fieldsDo: block 0 to: self size - 1 do: [ :i | block value: i \\ self columns + 1 value: i // self columns + 1 ].! ! ----QUIT----{3 July 2014 . 9:04:33 pm} Squeak4.5-noBitBlt.image priorSource: 15834187! \ No newline at end of file diff --git a/images/Squeak4.5-noBitBlt.image b/images/Squeak4.5-noBitBlt.image index 901620a8a8d4d194528f72a369392610813925a4..46e8d064f5b9b3ded5cfa05ee2ff1651286c82e7 GIT binary patch [cut] From noreply at buildbot.pypy.org Mon Jul 7 13:17:09 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 7 Jul 2014 13:17:09 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: WORK IN PROGRESS. This compiles, but segfaults. Message-ID: <20140707111709.EB7791C024A@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r868:3fe7264ab317 Date: 2014-07-07 13:15 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/3fe7264ab317/ Log: WORK IN PROGRESS. This compiles, but segfaults. - Refactoring: maintaining 2 backreferences to the sender in ContextPartObjects (direct_sender and virtual_sender). As many contexts as possible should use virtual_sender; when it becomes necessary, direct_sender will be set instead (should break performance). Context objects are now always created WITHOUT a sender reference; it is set and deleted by the interpreter. Required small changes in lots of places. - Made ProcessWrapper more consistent and removed 2 duplicated methods. - Fixed tests. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -68,7 +68,9 @@ while True: assert self.current_stack_depth == 0 # Need to save s_sender, loop_bytecodes will nil this on return - s_sender = s_new_context.s_sender() + # Virtual references are not allowed here, and neither are "fresh" contexts (except for the toplevel one). + assert s_new_context.virtual_sender is jit.vref_None + s_sender = s_new_context.direct_sender try: self.loop_bytecodes(s_new_context) raise Exception("loop_bytecodes left without raising...") @@ -79,7 +81,7 @@ except Return, nlr: s_new_context = s_sender while s_new_context is not nlr.s_target_context: - s_sender = s_new_context.s_sender() + s_sender = s_new_context.direct_sender s_new_context._activate_unwind_context(self) s_new_context = s_sender s_new_context.push(nlr.value) @@ -88,7 +90,7 @@ print "====== Switched process from: %s" % s_new_context.short_str() print "====== to: %s " % p.s_new_context.short_str() s_new_context = p.s_new_context - + def loop_bytecodes(self, s_context, may_context_switch=True): old_pc = 0 if not jit.we_are_jitted() and may_context_switch: @@ -98,6 +100,7 @@ pc = s_context.pc() if pc < old_pc: if jit.we_are_jitted(): + # Do the interrupt-check at the end of a loop, don't interrupt loops midway. self.jitted_check_for_interrupt(s_context) self.jit_driver.can_enter_jit( pc=pc, self=self, method=method, @@ -115,17 +118,30 @@ else: s_context.push(nlr.value) - # This is just a wrapper around loop_bytecodes that handles the stack overflow protection mechanism - def stack_frame(self, s_new_frame, may_context_switch=True): - if self.max_stack_depth > 0: - if self.current_stack_depth >= self.max_stack_depth: - raise StackOverflow(s_new_frame) - - self.current_stack_depth += 1 + # This is a wrapper around loop_bytecodes that cleanly enters/leaves the frame + # and handles the stack overflow protection mechanism. + def stack_frame(self, s_frame, s_sender, may_context_switch=True): + assert s_frame.virtual_sender is jit.vref_None try: - self.loop_bytecodes(s_new_frame, may_context_switch) + # Enter the context - store a virtual reference back to the sender + # Non-fresh contexts can happen, e.g. when activating a stored BlockContext. + # The same frame object must not pass through here recursively! + if s_frame.is_fresh(): + s_frame.virtual_sender = jit.virtual_ref(s_sender) + + self.current_stack_depth += 1 + if self.max_stack_depth > 0: + if self.current_stack_depth >= self.max_stack_depth: + raise StackOverflow(s_frame) + + # Now (continue to) execute the context bytecodes + self.loop_bytecodes(s_frame, may_context_switch) finally: self.current_stack_depth -= 1 + # Cleanly leave the context. This will finish the virtual sender-reference, if + # it is still there, which can happen in case of ProcessSwitch or StackOverflow; + # in case of a Return, this will already be handled while unwinding the stack. + s_frame.finish_virtual_sender() def step(self, context): bytecode = context.fetch_next_bytecode() @@ -177,7 +193,7 @@ self.next_wakeup_tick = 0 semaphore = self.space.objtable["w_timerSemaphore"] if not semaphore.is_nil(self.space): - wrapper.SemaphoreWrapper(self.space, semaphore).signal(s_frame.w_self()) + wrapper.SemaphoreWrapper(self.space, semaphore).signal(s_frame) # We have no finalization process, so far. # We do not support external semaphores. # In cog, the method to add such a semaphore is only called in GC. @@ -195,7 +211,12 @@ except ReturnFromTopLevel, e: return e.object - def perform(self, w_receiver, selector, *arguments_w): + def perform(self, w_receiver, selector, *w_arguments): + s_frame = self.create_toplevel_context(w_receiver, selector, *w_arguments) + self.interrupt_check_counter = self.interrupt_counter_size + return self.interpret_toplevel(s_frame.w_self()) + + def create_toplevel_context(self, w_receiver, selector, *w_arguments): if isinstance(selector, str): if selector == "asSymbol": w_selector = self.image.w_asSymbol @@ -207,15 +228,13 @@ w_method = model.W_CompiledMethod(self.space, header=512) w_method.literalatput0(self.space, 1, w_selector) - assert len(arguments_w) <= 7 - w_method.setbytes([chr(131), chr(len(arguments_w) << 5 + 0), chr(124)]) #returnTopFromMethodBytecode + assert len(w_arguments) <= 7 + w_method.setbytes([chr(131), chr(len(w_arguments) << 5 + 0), chr(124)]) #returnTopFromMethodBytecode w_method.set_lookup_class_and_name(w_receiver.getclass(self.space), "Interpreter.perform") - s_frame = MethodContextShadow(self.space, None, w_method, w_receiver, []) + s_frame = MethodContextShadow(self.space, w_method=w_method, w_receiver=w_receiver) s_frame.push(w_receiver) - s_frame.push_all(list(arguments_w)) - - self.interrupt_check_counter = self.interrupt_counter_size - return self.interpret_toplevel(s_frame.w_self()) + s_frame.push_all(list(w_arguments)) + return s_frame def padding(self, symbol=' '): return symbol * self.current_stack_depth @@ -233,8 +252,7 @@ class ContextSwitchException(Exception): """General Exception that causes the interpreter to leave - the current context. The current pc is required in order to update - the context object that we are leaving.""" + the current context.""" _attrs_ = ["s_new_context"] def __init__(self, s_new_context): self.s_new_context = s_new_context @@ -528,7 +546,7 @@ s_compiledin.s_superclass()) def _sendSelector(self, w_selector, argcount, interp, - receiver, receiverclassshadow): + receiver, receiverclassshadow, w_arguments=None): assert argcount >= 0 try: w_method = receiverclassshadow.lookup(w_selector) @@ -537,19 +555,22 @@ code = w_method.primitive() if code: + if w_arguments: + self.push_all(w_arguments) try: return self._call_primitive(code, interp, argcount, w_method, w_selector) except primitives.PrimitiveFailedError: pass # ignore this error and fall back to the Smalltalk version - arguments = self.pop_and_return_n(argcount) - s_frame = w_method.create_frame(interp.space, receiver, arguments, self) + if not w_arguments: + w_arguments = self.pop_and_return_n(argcount) + s_frame = w_method.create_frame(interp.space, receiver, w_arguments) self.pop() # receiver # ###################################################################### if interp.trace: print interp.padding() + s_frame.short_str() - return interp.stack_frame(s_frame) + return interp.stack_frame(s_frame, self) @objectmodel.specialize.arg(1) def _sendSelfSelectorSpecial(self, selector, numargs, interp): @@ -560,7 +581,7 @@ w_special_selector = self.space.objtable["w_" + special_selector] s_class = receiver.class_shadow(self.space) w_method = s_class.lookup(w_special_selector) - s_frame = w_method.create_frame(interp.space, receiver, w_args, self) + s_frame = w_method.create_frame(interp.space, receiver, w_args) # ###################################################################### if interp.trace: @@ -568,7 +589,7 @@ if not objectmodel.we_are_translated(): import pdb; pdb.set_trace() - return interp.stack_frame(s_frame) + return interp.stack_frame(s_frame, self) def _doesNotUnderstand(self, w_selector, argcount, interp, receiver): arguments = self.pop_and_return_n(argcount) diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -1411,15 +1411,15 @@ def is_array_object(self): return True - - def create_frame(self, space, receiver, arguments, sender = None): + + def create_frame(self, space, receiver, arguments=[]): from spyvm.shadow import MethodContextShadow assert len(arguments) == self.argsize - return MethodContextShadow(space, None, self, receiver, arguments, sender) + return MethodContextShadow(space, w_method=self, w_receiver=receiver, arguments=arguments) # === Printing === - def guess_classname (self): + def guess_classname(self): return "CompiledMethod" def str_content(self): diff --git a/spyvm/objspace.py b/spyvm/objspace.py --- a/spyvm/objspace.py +++ b/spyvm/objspace.py @@ -197,11 +197,6 @@ elif isinstance(w_v, model.W_SmallInteger): return float(w_v.value) raise UnwrappingError() - def unwrap_pointersobject(self, w_v): - if not isinstance(w_v, model.W_PointersObject): - raise UnwrappingError() - return w_v - @jit.look_inside_iff(lambda self, w_array: jit.isconstant(w_array.size())) def unwrap_array(self, w_array): # Check that our argument has pointers format and the class: diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -10,6 +10,10 @@ from rpython.rlib import rarithmetic, rfloat, unroll, jit +def assert_class(interp, w_obj, w_class): + if not w_obj.getclass(interp.space).is_same_object(w_class): + raise PrimitiveFailedError() + def assert_bounds(n0, minimum, maximum): if not minimum <= n0 < maximum: raise PrimitiveFailedError() @@ -100,17 +104,17 @@ if unwrap_spec is None: def wrapped(interp, s_frame, argument_count_m1, w_method=None): if compiled_method: - w_result = func(interp, s_frame, argument_count_m1, w_method) + result = func(interp, s_frame, argument_count_m1, w_method) else: - w_result = func(interp, s_frame, argument_count_m1) + result = func(interp, s_frame, argument_count_m1) if result_is_new_frame: - return interp.stack_frame(w_result, may_context_switch) + return interp.stack_frame(result, s_frame, may_context_switch) if not no_result: - assert w_result is not None - s_frame.push(w_result) + assert result is not None + s_frame.push(result) else: len_unwrap_spec = len(unwrap_spec) - assert (len_unwrap_spec + 2 == len(inspect.getargspec(func)[0])), "wrong number of arguments" + assert len_unwrap_spec + 2 == len(inspect.getargspec(func)[0]), "wrong number of arguments" unrolling_unwrap_spec = unrolling_iterable(enumerate(unwrap_spec)) def wrapped(interp, s_frame, argument_count_m1, w_method=None): argument_count = argument_count_m1 + 1 # to account for the rcvr @@ -153,7 +157,7 @@ if clean_stack: # happens only if no exception occurs! s_frame.pop_n(len_unwrap_spec) - return interp.stack_frame(s_new_frame, may_context_switch) + return interp.stack_frame(s_new_frame, s_frame, may_context_switch) else: w_result = func(interp, s_frame, *args) # After calling primitive, reload context-shadow in case it @@ -379,7 +383,6 @@ print ("%s" % s_frame.peek(1)).replace('\r', '\n') if isinstance(w_message, model.W_PointersObject): print ('%s' % w_message.fetch_all(s_frame.space)).replace('\r', '\n') - # raise Exit('Probably Debugger called...') raise PrimitiveFailedError() # ___________________________________________________________________________ @@ -593,8 +596,7 @@ @expose_primitive(NEW_METHOD, unwrap_spec=[object, int, int]) def func(interp, s_frame, w_class, bytecount, header): # We ignore w_class because W_CompiledMethod is special - w_method = model.W_CompiledMethod(s_frame.space, bytecount, header) - return w_method + return model.W_CompiledMethod(interp.space, bytecount, header) # ___________________________________________________________________________ # I/O Primitives @@ -965,15 +967,14 @@ raise PrimitiveFailedError @expose_primitive(LOW_SPACE_SEMAPHORE, unwrap_spec=[object, object]) -def func(interp, s_frame, w_reciver, i): +def func(interp, s_frame, w_receiver, i): # dont know when the space runs out - return w_reciver - + return w_receiver @expose_primitive(SIGNAL_AT_BYTES_LEFT, unwrap_spec=[object, int]) -def func(interp, s_frame, w_reciver, i): +def func(interp, s_frame, w_receiver, i): # dont know when the space runs out - return w_reciver + return w_receiver @expose_primitive(DEFER_UPDATES, unwrap_spec=[object, bool]) def func(interp, s_frame, w_receiver, flag): @@ -1287,19 +1288,8 @@ # The block bytecodes are stored inline: so we skip past the # byteodes to invoke this primitive to find them (hence +2) initialip = s_frame.pc() + 2 - s_new_context = shadow.BlockContextShadow( - interp.space, None, w_method_context, argcnt, initialip) + s_new_context = shadow.BlockContextShadow(interp.space, None, w_method_context, argcnt, initialip) return s_new_context.w_self() - -def finalize_block_ctx(interp, s_block_ctx, s_frame): - from spyvm.error import SenderChainManipulation - # Set some fields - s_block_ctx.store_pc(s_block_ctx.initialip()) - try: - s_block_ctx.store_s_sender(s_frame) - except SenderChainManipulation, e: - assert e.s_context == s_block_ctx - return s_block_ctx @expose_primitive(VALUE, result_is_new_frame=True) def func(interp, s_frame, argument_count): @@ -1333,7 +1323,8 @@ s_block_ctx.push_all(block_args) s_frame.pop() - return finalize_block_ctx(interp, s_block_ctx, s_frame) + s_block_ctx.reset_pc() + return s_block_ctx @expose_primitive(VALUE_WITH_ARGS, unwrap_spec=[object, list], result_is_new_frame=True) @@ -1352,7 +1343,8 @@ # XXX Check original logic. Image does not test this anyway # because falls back to value + internal implementation - return finalize_block_ctx(interp, s_block_ctx, s_frame) + s_block_ctx.reset_pc() + return s_block_ctx @expose_primitive(PERFORM) def func(interp, s_frame, argcount): @@ -1361,72 +1353,49 @@ @expose_primitive(PERFORM_WITH_ARGS, unwrap_spec=[object, object, list], no_result=True, clean_stack=False) -def func(interp, s_frame, w_rcvr, w_selector, args_w): +def func(interp, s_frame, w_rcvr, w_selector, w_arguments): from spyvm.shadow import MethodNotFound - argcount = len(args_w) s_frame.pop_n(2) # removing our arguments + + return s_frame._sendSelector(w_selector, len(w_arguments), interp, w_rcvr, + w_rcvr.class_shadow(interp.space), w_arguments=w_arguments) - try: - w_method = w_rcvr.class_shadow(interp.space).lookup(w_selector) - except MethodNotFound: - return s_frame._doesNotUnderstand(w_selector, argcount, interp, w_rcvr) - - code = w_method.primitive() - if code: - s_frame.push_all(args_w) - try: - return s_frame._call_primitive(code, interp, argcount, w_method, w_selector) - except PrimitiveFailedError: - pass # ignore this error and fall back to the Smalltalk version - s_new_frame = w_method.create_frame(interp.space, w_rcvr, args_w, s_frame) - s_frame.pop() - return interp.stack_frame(s_new_frame) - - at expose_primitive(WITH_ARGS_EXECUTE_METHOD, unwrap_spec=[object, list, object], no_result=True) + at expose_primitive(WITH_ARGS_EXECUTE_METHOD, + result_is_new_frame=True, unwrap_spec=[object, list, object]) def func(interp, s_frame, w_rcvr, args_w, w_cm): if not isinstance(w_cm, model.W_CompiledMethod): raise PrimitiveFailedError() code = w_cm.primitive() if code: raise PrimitiveFailedError("withArgs:executeMethod: not support with primitive method") - s_new_frame = w_cm.create_frame(interp.space, w_rcvr, args_w, s_frame) - return interp.stack_frame(s_new_frame) + return w_cm.create_frame(interp.space, w_rcvr, args_w) + + +# XXX we might want to disable the assert_class checks in the 4 primitives below @expose_primitive(SIGNAL, unwrap_spec=[object], clean_stack=False, no_result=True) def func(interp, s_frame, w_rcvr): - # XXX we might want to disable this check - if not w_rcvr.getclass(interp.space).is_same_object( - interp.space.w_Semaphore): - raise PrimitiveFailedError() - wrapper.SemaphoreWrapper(interp.space, w_rcvr).signal(s_frame.w_self()) + assert_class(interp, w_rcvr, interp.space.w_Semaphore) + wrapper.SemaphoreWrapper(interp.space, w_rcvr).signal(s_frame) @expose_primitive(WAIT, unwrap_spec=[object], clean_stack=False, no_result=True) def func(interp, s_frame, w_rcvr): - # XXX we might want to disable this check - if not w_rcvr.getclass(interp.space).is_same_object( - interp.space.w_Semaphore): - raise PrimitiveFailedError() - wrapper.SemaphoreWrapper(interp.space, w_rcvr).wait(s_frame.w_self()) + assert_class(interp, w_rcvr, interp.space.w_Semaphore) + wrapper.SemaphoreWrapper(interp.space, w_rcvr).wait(s_frame) - at expose_primitive(RESUME, unwrap_spec=[object], result_is_new_frame=True, clean_stack=False) + at expose_primitive(RESUME, unwrap_spec=[object], no_result=True, clean_stack=False) def func(interp, s_frame, w_rcvr): - # XXX we might want to disable this check - if not w_rcvr.getclass(interp.space).is_same_object( - interp.space.w_Process): - raise PrimitiveFailedError() - w_frame = wrapper.ProcessWrapper(interp.space, w_rcvr).resume(s_frame.w_self()) - w_frame = interp.space.unwrap_pointersobject(w_frame) - return w_frame.as_context_get_shadow(interp.space) + import pdb; pdb.set_trace() + assert_class(interp, w_rcvr, interp.space.w_Process) + wrapper.ProcessWrapper(interp.space, w_rcvr).resume(s_frame) - at expose_primitive(SUSPEND, unwrap_spec=[object], result_is_new_frame=True, clean_stack=False) + at expose_primitive(SUSPEND, unwrap_spec=[object], no_result=True, clean_stack=False) def func(interp, s_frame, w_rcvr): - # XXX we might want to disable this check - if not w_rcvr.getclass(interp.space).is_same_object( - interp.space.w_Process): - raise PrimitiveFailedError() - w_frame = wrapper.ProcessWrapper(interp.space, w_rcvr).suspend(s_frame.w_self()) - w_frame = interp.space.unwrap_pointersobject(w_frame) - return w_frame.as_context_get_shadow(interp.space) + import pdb; pdb.set_trace() + assert_class(interp, w_rcvr, interp.space.w_Process) + wrapper.ProcessWrapper(interp.space, w_rcvr).suspend(s_frame) + + @expose_primitive(FLUSH_CACHE, unwrap_spec=[object]) def func(interp, s_frame, w_rcvr): @@ -1455,11 +1424,9 @@ return w_context -def activateClosure(interp, s_frame, w_block, args_w): +def activateClosure(interp, w_block, args_w): space = interp.space - if not w_block.getclass(space).is_same_object( - space.w_BlockClosure): - raise PrimitiveFailedError() + assert_class(interp, w_block, space.w_BlockClosure) block = wrapper.BlockClosureWrapper(space, w_block) if not block.numArgs() == len(args_w): raise PrimitiveFailedError() @@ -1470,7 +1437,7 @@ # additionally to the smalltalk implementation, this also pushes # args and copiedValues - s_new_frame = block.asContextWithSender(s_frame.w_self(), args_w) + s_new_frame = block.create_frame(args_w) w_closureMethod = s_new_frame.w_method() assert isinstance(w_closureMethod, model.W_CompiledMethod) @@ -1481,35 +1448,35 @@ @expose_primitive(CLOSURE_VALUE, unwrap_spec=[object], result_is_new_frame=True) def func(interp, s_frame, w_block_closure): - return activateClosure(interp, s_frame, w_block_closure, []) + return activateClosure(interp, w_block_closure, []) @expose_primitive(CLOSURE_VALUE_, unwrap_spec=[object, object], result_is_new_frame=True) def func(interp, s_frame, w_block_closure, w_a0): - return activateClosure(interp, s_frame, w_block_closure, [w_a0]) + return activateClosure(interp, w_block_closure, [w_a0]) @expose_primitive(CLOSURE_VALUE_VALUE, unwrap_spec=[object, object, object], result_is_new_frame=True) def func(interp, s_frame, w_block_closure, w_a0, w_a1): - return activateClosure(interp, s_frame, w_block_closure, [w_a0, w_a1]) + return activateClosure(interp, w_block_closure, [w_a0, w_a1]) @expose_primitive(CLOSURE_VALUE_VALUE_VALUE, unwrap_spec=[object, object, object, object], result_is_new_frame=True) def func(interp, s_frame, w_block_closure, w_a0, w_a1, w_a2): - return activateClosure(interp, s_frame, w_block_closure, [w_a0, w_a1, w_a2]) + return activateClosure(interp, w_block_closure, [w_a0, w_a1, w_a2]) @expose_primitive(CLOSURE_VALUE_VALUE_VALUE_VALUE, unwrap_spec=[object, object, object, object, object], result_is_new_frame=True) def func(interp, s_frame, w_block_closure, w_a0, w_a1, w_a2, w_a3): - return activateClosure(interp, s_frame, w_block_closure, [w_a0, w_a1, w_a2, w_a3]) + return activateClosure(interp, w_block_closure, [w_a0, w_a1, w_a2, w_a3]) @expose_primitive(CLOSURE_VALUE_WITH_ARGS, unwrap_spec=[object, list], result_is_new_frame=True) def func(interp, s_frame, w_block_closure, args_w): - return activateClosure(interp, s_frame, w_block_closure, args_w) + return activateClosure(interp, w_block_closure, args_w) @expose_primitive(CLOSURE_VALUE_NO_CONTEXT_SWITCH, unwrap_spec=[object], result_is_new_frame=True, may_context_switch=False) def func(interp, s_frame, w_block_closure): - return activateClosure(interp, s_frame, w_block_closure, []) + return activateClosure(interp, w_block_closure, []) @expose_primitive(CLOSURE_VALUE_NO_CONTEXT_SWITCH_, unwrap_spec=[object, object], result_is_new_frame=True, may_context_switch=False) def func(interp, s_frame, w_block_closure, w_a0): - return activateClosure(interp, s_frame, w_block_closure, [w_a0]) + return activateClosure(interp, w_block_closure, [w_a0]) # ___________________________________________________________________________ # Override the default primitive to give latitude to the VM in context management. diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -607,13 +607,14 @@ class ContextPartShadow(AbstractRedirectingShadow): __metaclass__ = extendabletype - _attrs_ = ['_s_sender', '_pc', '_temps_and_stack', + _attrs_ = ['direct_sender', 'virtual_sender', + '_pc', '_temps_and_stack', '_stack_ptr', 'instances_w'] repr_classname = "ContextPartShadow" _virtualizable_ = [ - "_s_sender", "_pc", - "_temps_and_stack[*]", "_stack_ptr", + 'direct_sender', 'virtual_sender', + "_pc", "_temps_and_stack[*]", "_stack_ptr", "_w_self", "_w_self_size" ] @@ -621,7 +622,8 @@ # Initialization def __init__(self, space, w_self): - self._s_sender = None + self.direct_sender = None + self.virtual_sender = jit.vref_None AbstractRedirectingShadow.__init__(self, space, w_self) self.instances_w = {} @@ -671,7 +673,7 @@ if n0 == constants.CTXPART_SENDER_INDEX: assert isinstance(w_value, model.W_PointersObject) if w_value.is_nil(self.space): - self._s_sender = None + self.store_s_sender(None, raise_error=False) else: self.store_s_sender(w_value.as_context_get_shadow(self.space)) return @@ -690,19 +692,40 @@ raise error.WrapperException("Index in context out of bounds") # === Sender === + # There are two fields for the sender (virtual and direct). Only one of them is can be set at a time. + # As long as the frame object is virtualized, using the virtual reference should increase performance. + # As soon as a frame object is forced to the heap, the direct reference must be used. - def store_s_sender(self, s_sender): - assert s_sender is None or isinstance(s_sender, ContextPartShadow) - self._s_sender = s_sender - raise error.SenderChainManipulation(self) + def is_fresh(self): + return self.direct_sender is None and self.virtual_sender is jit.vref_None + + def finish_virtual_sender(self, save_direct_sender=True): + if self.virtual_sender is not jit.vref_None: + sender = self.virtual_sender() + jit.virtual_ref_finish(self.virtual_sender, sender) + self.virtual_sender = jit.vref_None + if save_direct_sender: + self.direct_sender = sender + + def store_s_sender(self, s_sender, raise_error=True): + # If we have a virtual back reference, we must finish it before storing the direct reference. + self.finish_virtual_sender(save_direct_sender=False) + self.direct_sender = s_sender + if raise_error: + raise error.SenderChainManipulation(self) def w_sender(self): - if self._s_sender is None: + sender = self.s_sender() + if sender is None: return self.space.w_nil - return self._s_sender.w_self() + return sender.w_self() def s_sender(self): - return self._s_sender + if self.direct_sender: + return self.direct_sender + else: + result = self.virtual_sender() + return result # === Stack Pointer === @@ -779,10 +802,7 @@ def mark_returned(self): self.store_pc(-1) - try: - self.store_s_sender(None) - except error.SenderChainManipulation, e: - assert self == e.s_context + self.store_s_sender(None, raise_error=False) def is_returned(self): return self.pc() == -1 and self.w_sender().is_nil(self.space) @@ -1042,7 +1062,10 @@ initialip = self.initialip() initialip += 1 + self.w_method().literalsize return self.space.wrap_int(initialip) - + + def reset_pc(self): + self.store_pc(self.initialip()) + def initialip(self): return self._initialip @@ -1079,7 +1102,7 @@ @jit.unroll_safe def __init__(self, space, w_self=None, w_method=None, w_receiver=None, - arguments=None, s_sender=None, closure=None, pc=0): + arguments=[], closure=None, pc=0): self = jit.hint(self, access_directly=True, fresh_virtualizable=True) ContextPartShadow.__init__(self, space, w_self) self.store_w_receiver(w_receiver) @@ -1095,18 +1118,9 @@ else: self._w_method = None - if s_sender: - try: - self.store_s_sender(s_sender) - except error.SenderChainManipulation, e: - assert self == e.s_context - - if arguments: - argc = len(arguments) - for i0 in range(argc): - self.settemp(i0, arguments[i0]) - else: - argc = 0 + argc = len(arguments) + for i0 in range(argc): + self.settemp(i0, arguments[i0]) if closure: for i0 in range(closure.size()): diff --git a/spyvm/test/jit.py b/spyvm/test/jit.py --- a/spyvm/test/jit.py +++ b/spyvm/test/jit.py @@ -49,7 +49,7 @@ w_method.literals = literals w_method.setbytes(bytes) w_receiver = stack[0] - s_frame = shadow.MethodContextShadow(space, None, w_method, w_receiver, []) + s_frame = shadow.MethodContextShadow(space, w_method=w_method, w_receiver=w_receiver) w_frame = s_frame.w_self() def interp_execute_frame(): return interp.interpret_toplevel(w_frame) diff --git a/spyvm/test/test_interpreter.py b/spyvm/test/test_interpreter.py --- a/spyvm/test/test_interpreter.py +++ b/spyvm/test/test_interpreter.py @@ -1013,12 +1013,12 @@ assert False class StackTestInterpreter(TestInterpreter): - def stack_frame(self, w_frame, may_interrupt=True): + def stack_frame(self, s_frame, s_sender, may_interrupt=True): stack_depth = self.current_stack_depth for i in range(stack_depth + 1): assert sys._getframe(5 + i * 7).f_code.co_name == 'loop_bytecodes' assert sys._getframe(6 + stack_depth * 7).f_code.co_name == 'loop' - return interpreter.Interpreter.stack_frame(self, w_frame) + return interpreter.Interpreter.stack_frame(self, s_frame, s_sender, may_interrupt) def test_actual_stackdepth(): # | testBlock | diff --git a/spyvm/test/test_shadow.py b/spyvm/test/test_shadow.py --- a/spyvm/test/test_shadow.py +++ b/spyvm/test/test_shadow.py @@ -281,9 +281,7 @@ def test_methodcontext_s_home(): w_context = methodcontext() s_context = w_context.as_methodcontext_get_shadow(space) - w_middle_context = methodcontext(w_sender=w_context) - s_middle_context = w_middle_context.as_methodcontext_get_shadow(space) w_closure = space.newClosure(w_context, 3, 0, []) - s_closure_context = wrapper.BlockClosureWrapper(space, w_closure).asContextWithSender(w_middle_context, []) + s_closure_context = wrapper.BlockClosureWrapper(space, w_closure).create_frame() assert s_closure_context.s_home() is s_context diff --git a/spyvm/test/test_wrapper.py b/spyvm/test/test_wrapper.py --- a/spyvm/test/test_wrapper.py +++ b/spyvm/test/test_wrapper.py @@ -12,7 +12,7 @@ cleanup_module(__name__) def new_frame(): - return _new_frame(space, "")[0] + return _new_frame(space, "")[0].as_context_get_shadow(space) def test_simpleread(): w_o = model.W_PointersObject(space, None, 2) @@ -152,7 +152,7 @@ def test_suspend_asleep(self): process, old_process = self.make_processes(4, 2, space.w_false) - w_frame = process.suspend(space.w_true) + process.suspend(space.w_true) process_list = wrapper.scheduler(space).get_process_list(process.priority()) assert process_list.first_link() is process_list.last_link() assert process_list.first_link().is_nil(space) @@ -168,7 +168,7 @@ assert process_list.first_link() is process_list.last_link() assert process_list.first_link().is_nil(space) assert old_process.my_list().is_nil(space) - assert old_process.suspended_context() is current_context + assert old_process.suspended_context() is current_context.w_self() assert wrapper.scheduler(space).active_process() is process._w_self def new_process_consistency(self, process, old_process, w_active_context): @@ -181,15 +181,16 @@ assert priority_list.first_link() is process._w_self def old_process_consistency(self, old_process, old_process_context): - assert old_process.suspended_context() is old_process_context + assert old_process.suspended_context() is old_process_context.w_self() priority_list = wrapper.scheduler(space).get_process_list(old_process.priority()) assert priority_list.first_link() is old_process._w_self def make_processes(self, sleepingpriority, runningpriority, sleepingcontext): + if not isinstance(sleepingcontext, model.W_Object): + sleepingcontext = sleepingcontext.w_self() scheduler = wrapper.scheduler(space) - sleeping = new_process(priority=sleepingpriority, - w_suspended_context=sleepingcontext) + sleeping = new_process(priority=sleepingpriority, w_suspended_context=sleepingcontext) sleeping.put_to_sleep() running = new_process(priority=runningpriority) scheduler.store_active_process(running._w_self) diff --git a/spyvm/test/test_zin_squeak_4_5_image.py b/spyvm/test/test_zin_squeak_4_5_image.py --- a/spyvm/test/test_zin_squeak_4_5_image.py +++ b/spyvm/test/test_zin_squeak_4_5_image.py @@ -42,10 +42,11 @@ # create a frame for our newly crafted method with a valid sender (to avoid raising returnFromTop to early) s_initial_frame = create_method(chr(0x7c)).create_frame(space, w(0), []) - w_frame = w_method.create_frame(space, w(0), [], sender=s_initial_frame).w_self() - + s_frame = w_method.create_frame(space, w(0)) + s_frame.store_s_sender(s_initial_frame, raise_error=False) + try: - interp.loop(w_frame) + interp.loop(s_frame.w_self()) except interpreter.ReturnFromTopLevel, e: assert e.object.as_string() == 'b2' except interpreter.StackOverflow, e: @@ -67,11 +68,12 @@ w('ensure'), space.w_BlockClosure]) # create a frame for our newly crafted method with a valid sender (to avoid raising returnFromTop to early) - s_initial_frame = create_method(chr(0x7c)).create_frame(space, w(0), []) - w_frame = w_method.create_frame(space, w(0), [], sender=s_initial_frame).w_self() - + s_initial_frame = create_method(chr(0x7c)).create_frame(space, w(0)) + s_frame = w_method.create_frame(space, w(0)) + s_frame.store_s_sender(s_initial_frame, raise_error=False) + try: - interp.loop(w_frame) + interp.loop(s_frame.w_self()) except interpreter.ReturnFromTopLevel, e: assert e.object.as_string() == 'b1' except interpreter.StackOverflow, e: diff --git a/spyvm/test/util.py b/spyvm/test/util.py --- a/spyvm/test/util.py +++ b/spyvm/test/util.py @@ -81,11 +81,13 @@ self._loop = True return interpreter.Interpreter.loop(self, w_active_context) - def stack_frame(self, s_new_frame, may_context_switch=True): + def stack_frame(self, s_new_frame, s_sender, may_context_switch=True): if not self._loop: - return s_new_frame # this test is done to not loop in test, - # but rather step just once where wanted - return interpreter.Interpreter.stack_frame(self, s_new_frame, may_context_switch) + # this test is done to not loop in test, but rather step just once where wanted + # Unfortunately, we have to mimick some of the original behaviour. + s_new_frame.store_s_sender(s_sender, raise_error=False) + return s_new_frame + return interpreter.Interpreter.stack_frame(self, s_new_frame, s_sender, may_context_switch) class BootstrappedObjSpace(objspace.ObjSpace): diff --git a/spyvm/tool/analyseimage.py b/spyvm/tool/analyseimage.py --- a/spyvm/tool/analyseimage.py +++ b/spyvm/tool/analyseimage.py @@ -56,7 +56,7 @@ w_method = s_class.lookup("tinyBenchmarks") assert w_method - w_frame = w_method.create_frame(interp.space, w_object, []) + w_frame = w_method.create_frame(interp.space, w_object) interp.store_w_active_context(w_frame) from spyvm.interpreter import BYTECODE_TABLE diff --git a/spyvm/wrapper.py b/spyvm/wrapper.py --- a/spyvm/wrapper.py +++ b/spyvm/wrapper.py @@ -82,37 +82,36 @@ assert isinstance(w_frame, model.W_PointersObject) raise ProcessSwitch(w_frame.as_context_get_shadow(self.space)) - def deactivate(self, w_current_frame): - self.put_to_sleep() - self.store_suspended_context(w_current_frame) + def deactivate(self, s_current_frame, put_to_sleep=True): + if put_to_sleep: + self.put_to_sleep() + self.store_suspended_context(s_current_frame.w_self()) - def resume(self, w_current_frame): + def resume(self, s_current_frame): sched = scheduler(self.space) active_process = ProcessWrapper(self.space, sched.active_process()) active_priority = active_process.priority() priority = self.priority() if priority > active_priority: - active_process.deactivate(w_current_frame) - return self.activate() + active_process.deactivate(s_current_frame) + self.activate() else: self.put_to_sleep() - return w_current_frame def is_active_process(self): return self._w_self.is_same_object(scheduler(self.space).active_process()) - def suspend(self, w_current_frame): + def suspend(self, s_current_frame): if self.is_active_process(): assert self.my_list().is_nil(self.space) w_process = scheduler(self.space).pop_highest_priority_process() - self.store_suspended_context(w_current_frame) - return ProcessWrapper(self.space, w_process).activate() + self.deactivate(s_current_frame, put_to_sleep=False) + ProcessWrapper(self.space, w_process).activate() else: if not self.my_list().is_nil(self.space): process_list = ProcessListWrapper(self.space, self.my_list()) process_list.remove(self._w_self) self.store_my_list(self.space.w_nil) - return w_current_frame class LinkedListWrapper(Wrapper): first_link, store_first_link = make_getter_setter(0) @@ -212,24 +211,22 @@ excess_signals, store_excess_signals = make_int_getter_setter(2) - def signal(self, w_current_frame): + def signal(self, s_current_frame): if self.is_empty_list(): value = self.excess_signals() self.store_excess_signals(value + 1) - return w_current_frame else: process = self.remove_first_link_of_list() - return ProcessWrapper(self.space, process).resume(w_current_frame) + ProcessWrapper(self.space, process).resume(s_current_frame) - def wait(self, w_current_frame): + def wait(self, s_current_frame): excess = self.excess_signals() w_process = scheduler(self.space).active_process() if excess > 0: self.store_excess_signals(excess - 1) - return w_current_frame else: self.add_last_link(w_process) - return ProcessWrapper(self.space, w_process).suspend(w_current_frame) + ProcessWrapper(self.space, w_process).suspend(s_current_frame) class PointWrapper(Wrapper): x, store_x = make_int_getter_setter(0) @@ -241,7 +238,7 @@ startpc, store_startpc = make_int_getter_setter(constants.BLKCLSR_STARTPC) numArgs, store_numArgs = make_int_getter_setter(constants.BLKCLSR_NUMARGS) - def asContextWithSender(self, w_context, arguments): + def create_frame(self, arguments=[]): from spyvm import shadow w_outerContext = self.outerContext() if not isinstance(w_outerContext, model.W_PointersObject): @@ -250,10 +247,8 @@ w_method = s_outerContext.w_method() w_receiver = s_outerContext.w_receiver() pc = self.startpc() - w_method.bytecodeoffset() - 1 - s_new_frame = shadow.MethodContextShadow(self.space, None, w_method, w_receiver, - arguments, s_sender=w_context.get_shadow(self.space), - closure=self, pc=pc) - return s_new_frame + return shadow.MethodContextShadow(self.space, w_method=w_method, w_receiver=w_receiver, + arguments=arguments, closure=self, pc=pc) def tempsize(self): # We ignore the number of temps a block has, because the first diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -104,19 +104,11 @@ return _run_benchmark(interp, 0, selector, "") def context_for(interp, number, benchmark, stringarg): - # XXX: Copied from interpreter >> perform - space = interp.space - argcount = 0 if stringarg == "" else 1 - w_receiver = space.wrap_int(number) - w_selector = interp.perform(space.wrap_string(benchmark), "asSymbol") - w_method = model.W_CompiledMethod(space, header=512) - w_method.literalatput0(space, 1, w_selector) - w_method.setbytes([chr(131), chr(argcount << 5), chr(124)]) #returnTopFromMethodBytecodeBytecode - s_frame = shadow.MethodContextShadow(space, None, w_method, w_receiver, []) - s_frame.push(w_receiver) - if not stringarg == "": - s_frame.push(space.wrap_string(stringarg)) - return s_frame + w_receiver = interp.space.wrap_int(number) + if stringarg: + return interp.create_toplevel_context(w_receiver, benchmark, interp.space.wrap_string(stringarg)) + else: + return interp.create_toplevel_context(w_receiver, benchmark) def _usage(argv): print """ diff --git a/targettinybenchsmalltalk.py b/targettinybenchsmalltalk.py --- a/targettinybenchsmalltalk.py +++ b/targettinybenchsmalltalk.py @@ -25,7 +25,7 @@ w_object = model.W_SmallInteger(0) s_class = w_object.class_shadow(space) w_method = s_class.lookup(w_selector) - s_frame = w_method.create_frame(space, w_object, []) + s_frame = w_method.create_frame(space, w_object) return interp, s_frame interp, s_frame = setup() From noreply at buildbot.pypy.org Mon Jul 7 22:13:29 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 7 Jul 2014 22:13:29 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: update #13 (forgot to commit this before) Message-ID: <20140707201329.A11B01C024A@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: extradoc Changeset: r5353:36da5bb08996 Date: 2014-07-07 13:13 -0700 http://bitbucket.org/pypy/extradoc/changeset/36da5bb08996/ Log: update #13 (forgot to commit this before) diff --git a/blog/draft/py3k-status-update-13.rst b/blog/draft/py3k-status-update-13.rst new file mode 100644 --- /dev/null +++ b/blog/draft/py3k-status-update-13.rst @@ -0,0 +1,51 @@ +Py3k status update #13 +---------------------- + +This is the 13th status update about our work on the `py3k branch`_, which we +can work on thanks to all of the people who donated_ to the `py3k proposal`_. + +We're just finishing up a cleanup of int/long types. This work helps the py3k +branch unify these types into the Python 3 int and restore `JIT compilation of +machine sized integers`_. + +This cleanup also removes `multimethods`_ from these types. PyPy has +historically used a clever implementation of multimethod dispatch for declaring +methods of the __builtin__ types in RPython. + +This multimethod scheme provides some convenient features for doing this, +however we've come to the conclusion that it may be more trouble than it's +worth. A major problem of multimethods is that they generate a large amount of +stub methods which burden the already lengthy and memory hungry RPython +translation process. Also, their implementation and behavior can be somewhat +complicated/obscure. + +The alternative to multimethods involves doing the work of the type checking +and dispatching rules in a more verbose, manual way. It's a little more work in +the end but less magical. + +Recently, Manuel Jacob finished a large cleanup effort of the +unicode/string/bytearray types that also removed their multimethods. This work +also benefits the py3k branch: it'll help with future `PEP 393`_ (or `PEP 393 +alternative`_) work. This effort was partly sponsored by Google's Summer of +Code: thanks Manuel and Google! + +Now there's only a couple major pieces left in the multimethod removal (the +float/complex types and special marshaling code) and a few minor pieces that +should be relatively easy. + +In conclusion, there's been some good progress made on py3k and multimethod +removal this winter, albeit a bit slower than we would have liked. + +cheers, +Phil + +.. _donated: http://morepypy.blogspot.com/2012/01/py3k-and-numpy-first-stage-thanks-to.html +.. _`py3k proposal`: http://pypy.org/py3donate.html +.. _`py3k branch`: https://bitbucket.org/pypy/pypy/commits/all/tip/branch%28%22py3k%22%29 + +.. _`JIT compilation of machine sized integers`: + http://morepypy.blogspot.com/2013/11/py3k-status-update-12.html +.. _`multimethods`: http://doc.pypy.org/en/latest/objspace.html#multimethods + +.. _`PEP 393`: http://www.python.org/dev/peps/pep-0393/ +.. _`PEP 393 alternative`: http://lucumr.pocoo.org/2014/1/9/ucs-vs-utf8/ From noreply at buildbot.pypy.org Tue Jul 8 01:26:34 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 8 Jul 2014 01:26:34 +0200 (CEST) Subject: [pypy-commit] pypy default: fix unsafe FormatMessage call (windows) Message-ID: <20140707232634.B41191D353E@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r72379:f3d274416c97 Date: 2014-07-08 09:22 +1000 http://bitbucket.org/pypy/pypy/changeset/f3d274416c97/ Log: fix unsafe FormatMessage call (windows) diff --git a/rpython/rlib/rwin32.py b/rpython/rlib/rwin32.py --- a/rpython/rlib/rwin32.py +++ b/rpython/rlib/rwin32.py @@ -79,7 +79,7 @@ "MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT)") defines = """FORMAT_MESSAGE_ALLOCATE_BUFFER FORMAT_MESSAGE_FROM_SYSTEM - MAX_PATH _MAX_ENV + MAX_PATH _MAX_ENV FORMAT_MESSAGE_IGNORE_INSERTS WAIT_OBJECT_0 WAIT_TIMEOUT INFINITE ERROR_INVALID_HANDLE DELETE READ_CONTROL SYNCHRONIZE WRITE_DAC @@ -226,7 +226,8 @@ buf[0] = lltype.nullptr(rffi.CCHARP.TO) try: msglen = FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | - FORMAT_MESSAGE_FROM_SYSTEM, + FORMAT_MESSAGE_FROM_SYSTEM | + FORMAT_MESSAGE_IGNORE_INSERTS, None, rffi.cast(DWORD, code), DEFAULT_LANGUAGE, diff --git a/rpython/rlib/test/test_rwin32.py b/rpython/rlib/test/test_rwin32.py --- a/rpython/rlib/test/test_rwin32.py +++ b/rpython/rlib/test/test_rwin32.py @@ -58,3 +58,9 @@ for key, value in env.iteritems(): assert type(key) is unicode assert type(value) is unicode + +def test_formaterror(): + # choose one with formatting characters and newlines + msg = rwin32.FormatError(34) + assert '%2' in msg + From noreply at buildbot.pypy.org Tue Jul 8 05:16:42 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 8 Jul 2014 05:16:42 +0200 (CEST) Subject: [pypy-commit] pypy default: merge dtype record hash, based on pr#242 (yuyichao) Message-ID: <20140708031642.918591C024A@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r72380:f20ac16753b6 Date: 2014-07-08 13:15 +1000 http://bitbucket.org/pypy/pypy/changeset/f20ac16753b6/ Log: merge dtype record hash, based on pr#242 (yuyichao) diff --git a/pypy/module/micronumpy/descriptor.py b/pypy/module/micronumpy/descriptor.py --- a/pypy/module/micronumpy/descriptor.py +++ b/pypy/module/micronumpy/descriptor.py @@ -6,7 +6,7 @@ from pypy.interpreter.typedef import (TypeDef, GetSetProperty, interp_attrproperty, interp_attrproperty_w) from rpython.rlib import jit -from rpython.rlib.objectmodel import specialize +from rpython.rlib.objectmodel import specialize, compute_hash from rpython.rlib.rarithmetic import r_longlong, r_ulonglong from pypy.module.micronumpy import types, boxes, base, support, constants as NPY from pypy.module.micronumpy.appbridge import get_appbridge_cache @@ -254,8 +254,38 @@ def descr_ne(self, space, w_other): return space.wrap(not self.eq(space, w_other)) + def _compute_hash(self, space, x): + from rpython.rlib.rarithmetic import intmask + if self.fields is None and self.subdtype is None: + endian = self.byteorder + if endian == NPY.NATIVE: + endian = NPY.NATBYTE + flags = 0 + y = 0x345678 + y = intmask((1000003 * y) ^ ord(self.kind[0])) + y = intmask((1000003 * y) ^ ord(endian[0])) + y = intmask((1000003 * y) ^ flags) + y = intmask((1000003 * y) ^ self.elsize) + if self.is_flexible(): + y = intmask((1000003 * y) ^ self.alignment) + return intmask((1000003 * x) ^ y) + if self.fields is not None: + for name, (offset, subdtype) in self.fields.iteritems(): + assert isinstance(subdtype, W_Dtype) + y = intmask(1000003 * (0x345678 ^ compute_hash(name))) + y = intmask(1000003 * (y ^ compute_hash(offset))) + y = intmask(1000003 * (y ^ subdtype._compute_hash(space, + 0x345678))) + x = intmask(x ^ y) + if self.subdtype is not None: + for s in self.shape: + x = intmask((1000003 * x) ^ compute_hash(s)) + x = self.base._compute_hash(space, x) + return x + def descr_hash(self, space): - return space.hash(self.descr_reduce(space)) + return space.wrap(self._compute_hash(space, 0x345678)) + def descr_str(self, space): if self.fields: diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -368,15 +368,30 @@ d5 = numpy.dtype([('f0', 'i4'), ('f1', d2)]) d6 = numpy.dtype([('f0', 'i4'), ('f1', d3)]) import sys - if '__pypy__' not in sys.builtin_module_names: - assert hash(d1) == hash(d2) - assert hash(d1) != hash(d3) - assert hash(d4) == hash(d5) - assert hash(d4) != hash(d6) - else: - for d in [d1, d2, d3, d4, d5, d6]: - raises(TypeError, hash, d) + assert hash(d1) == hash(d2) + assert hash(d1) != hash(d3) + assert hash(d4) == hash(d5) + assert hash(d4) != hash(d6) + def test_record_hash(self): + from numpy import dtype + # make sure the fields hash return different value + # for different order of field in a structure + + # swap names + t1 = dtype([('x', ' Author: Yichao Yu Branch: Changeset: r72381:2aabeb712f61 Date: 2014-07-04 22:11 +0800 http://bitbucket.org/pypy/pypy/changeset/2aabeb712f61/ Log: make numpy scalar non-iterable diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -153,6 +153,11 @@ raise OperationError(space.w_IndexError, space.wrap( "invalid index to scalar variable")) + def descr_iter(self, space): + # Making numpy scalar non-iterable with a valid __getitem__ method + raise oefmt(space.w_TypeError, + "'%T' object is not iterable", self) + def descr_str(self, space): return space.wrap(self.get_dtype(space).itemtype.str_format(self)) @@ -555,6 +560,7 @@ __new__ = interp2app(W_GenericBox.descr__new__.im_func), __getitem__ = interp2app(W_GenericBox.descr_getitem), + __iter__ = interp2app(W_GenericBox.descr_iter), __str__ = interp2app(W_GenericBox.descr_str), __repr__ = interp2app(W_GenericBox.descr_str), __format__ = interp2app(W_GenericBox.descr_format), diff --git a/pypy/module/test_lib_pypy/numpypy/core/test_numeric.py b/pypy/module/test_lib_pypy/numpypy/core/test_numeric.py --- a/pypy/module/test_lib_pypy/numpypy/core/test_numeric.py +++ b/pypy/module/test_lib_pypy/numpypy/core/test_numeric.py @@ -249,4 +249,12 @@ assert d.dtype == dtype('int32') assert (d == [[1, 0, 0], [0, 1, 0], [0, 0, 1]]).all() - + def test_scalar_iter(self): + from numpypy import int8, int16, int32, int64, float32, float64 + for t in int8, int16, int32, int64, float32, float64: + try: + iter(t(17)) + except TypeError: + pass + else: + assert False, "%s object should not be iterable." % t From noreply at buildbot.pypy.org Tue Jul 8 05:53:43 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 8 Jul 2014 05:53:43 +0200 (CEST) Subject: [pypy-commit] pypy default: move test to untranslated Message-ID: <20140708035343.A5D751C0906@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r72382:30e15663c576 Date: 2014-07-08 13:46 +1000 http://bitbucket.org/pypy/pypy/changeset/30e15663c576/ Log: move test to untranslated diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -290,3 +290,13 @@ assert np.isnan(b/a) b = t(0.) assert np.isnan(b/a) + + def test_scalar_iter(self): + from numpypy import int8, int16, int32, int64, float32, float64 + for t in int8, int16, int32, int64, float32, float64: + try: + iter(t(17)) + except TypeError: + pass + else: + assert False, "%s object should not be iterable." % t diff --git a/pypy/module/test_lib_pypy/numpypy/core/test_numeric.py b/pypy/module/test_lib_pypy/numpypy/core/test_numeric.py --- a/pypy/module/test_lib_pypy/numpypy/core/test_numeric.py +++ b/pypy/module/test_lib_pypy/numpypy/core/test_numeric.py @@ -248,13 +248,3 @@ assert d.shape == (3, 3) assert d.dtype == dtype('int32') assert (d == [[1, 0, 0], [0, 1, 0], [0, 0, 1]]).all() - - def test_scalar_iter(self): - from numpypy import int8, int16, int32, int64, float32, float64 - for t in int8, int16, int32, int64, float32, float64: - try: - iter(t(17)) - except TypeError: - pass - else: - assert False, "%s object should not be iterable." % t From noreply at buildbot.pypy.org Tue Jul 8 09:43:29 2014 From: noreply at buildbot.pypy.org (waedt) Date: Tue, 8 Jul 2014 09:43:29 +0200 (CEST) Subject: [pypy-commit] pypy utf8-unicode2: pobjspace and interpreter tests now pass Message-ID: <20140708074329.B74EA1C1068@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: utf8-unicode2 Changeset: r72383:104602bd7dd9 Date: 2014-07-08 02:37 -0500 http://bitbucket.org/pypy/pypy/changeset/104602bd7dd9/ Log: pobjspace and interpreter tests now pass diff too long, truncating to 2000 out of 2118 lines diff --git a/pypy/interpreter/astcompiler/test/test_astbuilder.py b/pypy/interpreter/astcompiler/test/test_astbuilder.py --- a/pypy/interpreter/astcompiler/test/test_astbuilder.py +++ b/pypy/interpreter/astcompiler/test/test_astbuilder.py @@ -8,6 +8,7 @@ from pypy.interpreter.pyparser.error import SyntaxError from pypy.interpreter.astcompiler.astbuilder import ast_from_node from pypy.interpreter.astcompiler import ast, consts +from pypy.interpreter.utf8 import Utf8Str class TestAstBuilder: @@ -1103,7 +1104,7 @@ assert info.encoding == "utf-7" s = ast_from_node(space, tree, info).body[0].value assert isinstance(s, ast.Str) - assert space.eq_w(s.s, space.wrap(sentence)) + assert space.eq_w(s.s, space.wrap(Utf8Str.from_unicode(sentence))) def test_string_bug(self): space = self.space diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -919,11 +919,7 @@ import sys d = {} exec '# -*- coding: utf-8 -*-\n\nu = u"\xf0\x9f\x92\x8b"' in d - if sys.maxunicode > 65535 and self.maxunicode > 65535: - expected_length = 1 - else: - expected_length = 2 - assert len(d['u']) == expected_length + assert len(d['u']) == 1 class TestOptimizations: diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -153,6 +153,10 @@ const = code_hook(space, const, hidden_applevel, code_hook) if isinstance(const, unicode): const = Utf8Str.from_unicode(const) + if isinstance(const, tuple): + const = tuple(x if not isinstance(x, unicode) + else Utf8Str.from_unicode(x) + for x in const) newconsts_w[num] = space.wrap(const) num += 1 # stick the underlying CPython magic value, if the code object diff --git a/pypy/interpreter/pyparser/test/test_parsestring.py b/pypy/interpreter/pyparser/test/test_parsestring.py --- a/pypy/interpreter/pyparser/test/test_parsestring.py +++ b/pypy/interpreter/pyparser/test/test_parsestring.py @@ -102,7 +102,4 @@ def test_decode_unicode_utf8(self): buf = parsestring.decode_unicode_utf8(self.space, 'u"\xf0\x9f\x92\x8b"', 2, 6) - if sys.maxunicode == 65535: - assert buf == r"\U0000d83d\U0000dc8b" - else: - assert buf == r"\U0001f48b" + assert buf == r"\U0001f48b" diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -4,6 +4,7 @@ from pypy.interpreter import gateway, argument from pypy.interpreter.gateway import ObjSpace, W_Root, WrappedDefault from pypy.interpreter.signature import Signature +from pypy.interpreter.utf8 import Utf8Str import py import sys @@ -519,7 +520,7 @@ unicode]) w_app_g3_u = space.wrap(app_g3_u) assert self.space.eq_w( - space.call_function(w_app_g3_u, w(u"foo")), + space.call_function(w_app_g3_u, w(Utf8Str("foo"))), w(3)) assert self.space.eq_w( space.call_function(w_app_g3_u, w("baz")), diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py --- a/pypy/interpreter/test/test_objspace.py +++ b/pypy/interpreter/test/test_objspace.py @@ -2,6 +2,7 @@ from pypy.interpreter.error import OperationError from pypy.interpreter.function import Function from pypy.interpreter.pycode import PyCode +from pypy.interpreter.utf8 import Utf8Str from rpython.rlib.rarithmetic import r_longlong, r_ulonglong import sys @@ -217,8 +218,9 @@ w = space.wrap assert space.str0_w(w("123")) == "123" exc = space.raises_w(space.w_TypeError, space.str0_w, w("123\x004")) - assert space.unicode0_w(w(u"123")) == u"123" - exc = space.raises_w(space.w_TypeError, space.unicode0_w, w(u"123\x004")) + assert space.unicode0_w(w(Utf8Str("123"))) == u"123" + exc = space.raises_w(space.w_TypeError, space.unicode0_w, + w(Utf8Str.from_unicode(u"123\x004"))) def test_getindex_w(self): w_instance1 = self.space.appexec([], """(): diff --git a/pypy/interpreter/test/test_utf8.py b/pypy/interpreter/test/test_utf8.py --- a/pypy/interpreter/test/test_utf8.py +++ b/pypy/interpreter/test/test_utf8.py @@ -35,13 +35,15 @@ iter.move(i) if i != 4: assert iter.peek_next() == [0x41, 0x10F, 0x20AC, 0x1F63D][i] - assert list(iter) == [0x41, 0x10F, 0x20AC, 0x1F63D][i:] + l = list(iter) + assert l == [0x41, 0x10F, 0x20AC, 0x1F63D][i:] for i in range(1, 5): iter = s.codepoint_iter() list(iter) # move the iterator to the end iter.move(-i) - assert list(iter) == [0x41, 0x10F, 0x20AC, 0x1F63D][4-i:] + l = list(iter) + assert l == [0x41, 0x10F, 0x20AC, 0x1F63D][4-i:] iter = s.char_iter() l = [s.bytes.decode('utf8') for s in list(iter)] @@ -50,6 +52,27 @@ else: assert l == [u'A', u'\u010F', u'\u20AC', u'\U00001F63D'] +def test_reverse_iterator(): + s = build_utf8str() + iter = s.reverse_codepoint_iter() + assert iter.peek_next() == 0x1F63D + assert list(iter) == [0x1F63D, 0x20AC, 0x10F, 0x41] + + for i in range(1, 5): + iter = s.reverse_codepoint_iter() + iter.move(i) + if i != 4: + assert iter.peek_next() == [0x1F63D, 0x20AC, 0x10F, 0x41][i] + l = list(iter) + assert l == [0x1F63D, 0x20AC, 0x10F, 0x41][i:] + + for i in range(1, 5): + iter = s.reverse_codepoint_iter() + list(iter) # move the iterator to the end + iter.move(-i) + l = list(iter) + assert l == [0x1F63D, 0x20AC, 0x10F, 0x41][4-i:] + def test_builder_append_slice(): builder = Utf8Builder() builder.append_slice(Utf8Str.from_unicode(u"0ê0"), 1, 2) @@ -57,6 +80,10 @@ assert builder.build() == u"êes" +def test_eq(): + assert Utf8Str('test') == Utf8Str('test') + assert Utf8Str('test') != Utf8Str('test1') + def test_unicode_literal_comparison(): builder = Utf8Builder() builder.append(0x10F) @@ -152,5 +179,17 @@ assert s.split() == u.split() assert s.split(' ') == u.split(' ') - assert s.split(maxsplit=1) == u.split(None, 1) + assert s.split(maxsplit=2) == u.split(None, 2) + assert s.split(' ', 2) == u.split(' ', 2) assert s.split('\n') == [s] + +def test_rsplit(): + # U+00A0 is a non-breaking space + u = u"one two three\xA0four" + s = Utf8Str.from_unicode(u) + + assert s.rsplit() == u.rsplit() + assert s.rsplit(' ') == u.rsplit(' ') + assert s.rsplit(maxsplit=2) == u.rsplit(None, 2) + assert s.rsplit(' ', 2) == u.rsplit(' ', 2) + assert s.rsplit('\n') == [s] diff --git a/pypy/interpreter/utf8.py b/pypy/interpreter/utf8.py --- a/pypy/interpreter/utf8.py +++ b/pypy/interpreter/utf8.py @@ -104,6 +104,9 @@ return Utf8Str('') # TODO: If start > _len or stop >= _len, then raise exception + if stop > len(self): + stop = len(self) + if self._is_ascii: return Utf8Str(self.bytes[start:stop], True) @@ -124,6 +127,12 @@ return Utf8Str(self.bytes[start_byte:stop_byte], is_ascii, stop - start) + def byte_slice(self, start, end): + return Utf8Str(self.bytes[start:end], self._is_ascii) + + def __repr__(self): + return "" % unicode(self) + def __add__(self, other): return Utf8Str(self.bytes + other.bytes, self._is_ascii and other._is_ascii) @@ -134,6 +143,9 @@ def __len__(self): return self._len + def __hash__(self): + return hash(self.bytes) + def __eq__(self, other): """NOT_RPYTHON""" if isinstance(other, Utf8Str): @@ -143,6 +155,27 @@ return False + def __ne__(self, other): + """NOT_RPYTHON""" + if isinstance(other, Utf8Str): + return self.bytes != other.bytes + if isinstance(other, unicode): + return unicode(self.bytes, 'utf8') != other + + return True + + def __lt__(self, other): + return self.bytes < other.bytes + + def __le__(self, other): + return self.bytes <= other.bytes + + def __gt__(self, other): + return self.bytes > other.bytes + + def __ge__(self, other): + return self.bytes >= other.bytes + @specialize.argtype(1) def __contains__(self, other): if isinstance(other, Utf8Str): @@ -158,11 +191,20 @@ def __iter__(self): return self.char_iter() + def __unicode__(self): + return unicode(self.bytes, 'utf8') + def char_iter(self): - return Utf8StrCharIterator(self) + return Utf8CharacterIter(self) + + def reverse_char_iter(self): + return Utf8ReverseCharacterIter(self) def codepoint_iter(self): - return Utf8StrCodePointIterator(self) + return Utf8CodePointIter(self) + + def reverse_codepoint_iter(self): + return Utf8ReverseCodePointIter(self) @specialize.argtype(1, 2) def _bound_check(self, start, end): @@ -270,12 +312,11 @@ else: break - iter.prev_count(1) start_byte = iter.byte_pos - iter.next_count(1) if maxsplit == 0: - res.append(Utf8Str(self.bytes[start_byte:len(self.bytes)])) + res.append(Utf8Str(self.bytes[start_byte:len(self.bytes)], + self._is_ascii)) break for cd in iter: @@ -283,12 +324,12 @@ break else: # Hit the end of the string - res.append(Utf8Str(self.bytes[start_byte:len(self.bytes)])) + res.append(Utf8Str(self.bytes[start_byte:len(self.bytes)], + self._is_ascii)) break - iter.prev_count(1) - res.append(Utf8Str(self.bytes[start_byte:iter.byte_pos])) - iter.next_count(1) + res.append(Utf8Str(self.bytes[start_byte:iter.byte_pos], + self._is_ascii)) maxsplit -= 1 return res @@ -302,15 +343,54 @@ other_bytes = other.bytes return [Utf8Str(s) for s in self.bytes.rsplit(other_bytes, maxsplit)] - # TODO: I need to make a reverse_codepoint_iter first + res = [] + iter = self.reverse_codepoint_iter() + while True: + # Find the start of the next word + for cd in iter: + if not unicodedb.isspace(cd): + break + else: + break + start_byte = self.next_char(iter.byte_pos) + + if maxsplit == 0: + res.append(Utf8Str(self.bytes[0:start_byte], self._is_ascii)) + break + + # Find the end of the word + for cd in iter: + if unicodedb.isspace(cd): + break + else: + # We hit the end of the string + res.append(Utf8Str(self.bytes[0:start_byte], self._is_ascii)) + break + + end_byte = self.next_char(iter.byte_pos) + res.append(Utf8Str(self.bytes[end_byte:start_byte], + self._is_ascii)) + maxsplit -= 1 + + res.reverse() + return res + + @specialize.argtype(1) def join(self, other): if len(other) == 0: return Utf8Str('') - assert isinstance(other[0], Utf8Str) - return Utf8Str(self.bytes.join([s.bytes for s in other]), - self._is_ascii and all(s._is_ascii for s in other)) + if isinstance(other[0], Utf8Str): + return Utf8Str( + self.bytes.join([s.bytes for s in other]), + self._is_ascii and all(s._is_ascii for s in other) + ) + else: + return Utf8Str( + self.bytes.join([s for s in other]), + self._is_ascii and all(s._is_ascii for s in other) + ) def as_unicode(self): """NOT_RPYTHON""" @@ -321,83 +401,18 @@ """NOT_RPYTHON""" return Utf8Str(u.encode('utf-8')) -class Utf8StrCodePointIterator(object): - def __init__(self, ustr): - self.ustr = ustr - self.pos = 0 - self.byte_pos = 0 + def next_char(self, byte_pos): + return byte_pos + utf8_code_length[ord(self.bytes[byte_pos])] - if len(ustr) != 0: - self.current = utf8ord_bytes(ustr.bytes, 0) - else: - self.current = -1 + def prev_char(self, byte_pos): + if byte_pos == 0: + return -1 + byte_pos -= 1 + while utf8_code_length[ord(self.bytes[byte_pos])] == 0: + byte_pos -= 1 + return byte_pos - def __iter__(self): - return self - def next(self): - if self.pos == len(self.ustr): - raise StopIteration() - self.current = utf8ord_bytes(self.ustr.bytes, self.byte_pos) - - self.byte_pos += utf8_code_length[ord(self.ustr.bytes[self.byte_pos])] - self.pos += 1 - - return self.current - - def next_count(self, count=1): - self.pos += count - while count > 1: - self.byte_pos += utf8_code_length[ord(self.ustr.bytes[self.byte_pos])] - count -= 1 - self.current = utf8ord_bytes(self.ustr.bytes, self.byte_pos) - self.byte_pos += utf8_code_length[ord(self.ustr.bytes[self.byte_pos])] - - def prev_count(self, count=1): - self.pos -= count - while count > 0: - self.byte_pos -= 1 - while utf8_code_length[ord(self.ustr.bytes[self.byte_pos])] == 0: - self.byte_pos -= 1 - count -= 1 - - self.current = utf8ord_bytes(self.ustr.bytes, self.byte_pos) - - def move(self, count): - if count > 0: - self.next_count(count) - elif count < 0: - self.prev_count(-count) - - def peek_next(self): - return utf8ord_bytes(self.ustr.bytes, self.byte_pos) - -class Utf8StrCharIterator(object): - def __init__(self, ustr): - self.ustr = ustr - self.byte_pos = 0 - self.current = self._get_current() - - def __iter__(self): - return self - - def _get_current(self): - if self.byte_pos == len(self.ustr.bytes): - return None - length = utf8_code_length[ord(self.ustr.bytes[self.byte_pos])] - return Utf8Str(''.join([self.ustr.bytes[i] - for i in range(self.byte_pos, self.byte_pos + length)]), - length == 1) - - def next(self): - #import pdb; pdb.set_trace() - ret = self.current - if ret is None: - raise StopIteration() - - self.byte_pos += utf8_code_length[ord(self.ustr.bytes[self.byte_pos])] - self.current = self._get_current() - return ret class Utf8Builder(object): @specialize.argtype(1) @@ -452,9 +467,168 @@ raise TypeError("Invalid type '%s' for Utf8Str.append_slice" % type(s)) + @specialize.argtype(1) def append_multiple_char(self, c, count): - self._builder.append_multiple_char(c, count) + # TODO: What do I do when I have an int? Is it fine to just loop over + # .append(c) then? Should (can) I force a resize first? + if isinstance(c, int): + self._builder.append_multiple_char(chr(c), count) + return + + if len(c) > 1: + import pdb; pdb.set_trace() + if isinstance(c, str): + self._builder.append_multiple_char(c, count) + else: + self._builder.append_multiple_char(c.bytes, count) def build(self): return Utf8Str(self._builder.build(), self._is_ascii) +# _______________________________________________ + +# iter.current is the current (ie the last returned) element +# iter.pos isthe position of the current element +# iter.byte_pos isthe byte position of the current element +# In the before-the-start state, for foward iterators iter.pos and +# iter.byte_pos are -1. For reverse iterators, they are len(ustr) and +# len(ustr.bytes) respectively. + +class ForwardIterBase(object): + def __init__(self, ustr): + self.ustr = ustr + self.pos = -1 + + self._byte_pos = 0 + self.byte_pos = -1 + self.current = self._default + + def __iter__(self): + return self + + def next(self): + if self.pos + 1 == len(self.ustr): + raise StopIteration() + + self.pos += 1 + self.byte_pos = self._byte_pos + + self.current = self._value(self.byte_pos) + + self._byte_pos = self.ustr.next_char(self._byte_pos) + return self.current + + def peek_next(self): + return self._value(self._byte_pos) + + def peek_prev(self): + return self._value(self._move_backward(self.byte_pos)) + + def move(self, count): + if count > 0: + self.pos += count + + while count != 1: + self._byte_pos = self.ustr.next_char(self._byte_pos) + count -= 1 + self.byte_pos = self._byte_pos + self._byte_pos = self.ustr.next_char(self._byte_pos) + self.current = self._value(self.byte_pos) + + elif count < 0: + self.pos += count + while count < -1: + self.byte_pos = self.ustr.prev_char(self.byte_pos) + count += 1 + self._byte_pos = self.byte_pos + self.byte_pos = self.ustr.prev_char(self.byte_pos) + self.current = self._value(self.byte_pos) + + def copy(self): + iter = self.__class__(self.ustr) + iter.pos = self.pos + iter.byte_pos = self.byte_pos + iter._byte_pos = self._byte_pos + iter.current = self.current + return iter + +class ReverseIterBase(object): + def __init__(self, ustr): + self.ustr = ustr + self.pos = len(ustr) + self.byte_pos = len(ustr.bytes) + self.current = self._default + + def __iter__(self): + return self + + def next(self): + if self.pos == 0: + raise StopIteration() + + self.pos -= 1 + self.byte_pos = self.ustr.prev_char(self.byte_pos) + self.current = self._value(self.byte_pos) + return self.current + + def peek_next(self): + return self._value(self.ustr.prev_char(self.byte_pos)) + + def peek_prev(self): + return self._value(self.ustr.next_char(self.byte_pos)) + + def move(self, count): + if count > 0: + self.pos -= count + while count != 0: + self.byte_pos = self.ustr.prev_char(self.byte_pos) + count -= 1 + self.current = self._value(self.byte_pos) + elif count < 0: + self.pos -= count + while count != 0: + self.byte_pos = self.ustr.next_char(self.byte_pos) + count += 1 + self.current = self._value(self.byte_pos) + + def copy(self): + iter = self.__class__(self.ustr) + iter.pos = self.pos + iter.byte_pos = self.byte_pos + iter.current = self.current + return iter + +def make_iterator(name, base, calc_value, default): + class C(base): + _default = default + _value = calc_value + C.__name__ = name + return C + +def codepoint_calc_value(self, byte_pos): + if byte_pos == -1 or byte_pos == len(self.ustr.bytes): + return -1 + return utf8ord_bytes(self.ustr.bytes, byte_pos) + +def character_calc_value(self, byte_pos): + if byte_pos == -1 or byte_pos == len(self.ustr.bytes): + return None + length = utf8_code_length[ord(self.ustr.bytes[self.byte_pos])] + return Utf8Str(''.join([self.ustr.bytes[i] + for i in range(self.byte_pos, self.byte_pos + length)]), + length == 1) + +Utf8CodePointIter = make_iterator("Utf8CodePointIter", ForwardIterBase, + codepoint_calc_value, -1) +Utf8CharacterIter = make_iterator("Utf8CharacterIter", ForwardIterBase, + character_calc_value, None) +Utf8ReverseCodePointIter = make_iterator( + "Utf8ReverseCodePointIter", ReverseIterBase, codepoint_calc_value, -1) +Utf8ReverseCharacterIter = make_iterator( + "Utf8ReverseCharacterIter", ReverseIterBase, character_calc_value, None) + +del make_iterator +del codepoint_calc_value +del character_calc_value +del ForwardIterBase +del ReverseIterBase diff --git a/pypy/interpreter/utf8_codecs.py b/pypy/interpreter/utf8_codecs.py --- a/pypy/interpreter/utf8_codecs.py +++ b/pypy/interpreter/utf8_codecs.py @@ -208,7 +208,6 @@ pos = 0 while pos < size: - #oc = ORD(s, pos) oc = utf8ord(s, pos) # Escape quotes @@ -460,10 +459,10 @@ else: return s.bytes - iter.move(-1) result = Utf8Builder(len(s.bytes)) result.append_slice(s.bytes, 0, iter.byte_pos) + iter.move(-1) for oc in iter: if oc >= 0xD800 and oc <= 0xDFFF: # Check the next character to see if this is a surrogate pair @@ -741,7 +740,6 @@ result = Utf8Builder(size // 2) - #XXX I think the errors are not correctly handled here while pos < size: # remaining bytes at the end? (size should be even) if len(s) - pos < 2: @@ -869,7 +867,8 @@ def str_decode_utf_32_helper(s, size, errors, final=True, errorhandler=None, - byteorder="native"): + byteorder="native", + encodingname='utf32'): if errorhandler is None: errorhandler = default_unicode_error_decode bo = 0 @@ -924,7 +923,7 @@ if len(s) - pos < 4: if not final: break - r, pos = errorhandler(errors, 'utf32', "truncated data", + r, pos = errorhandler(errors, encodingname, "truncated data", s, pos, len(s)) result.append(r) if len(s) - pos < 4: @@ -933,7 +932,8 @@ ch = ((ord(s[pos + iorder[3]]) << 24) | (ord(s[pos + iorder[2]]) << 16) | (ord(s[pos + iorder[1]]) << 8) | ord(s[pos + iorder[0]])) if ch >= 0x110000: - r, pos = errorhandler(errors, 'utf32', "codepoint not in range(0x110000)", + r, pos = errorhandler(errors, encodingname, + "codepoint not in range(0x110000)", s, pos, len(s)) result.append(r) continue @@ -1097,7 +1097,7 @@ if errorhandler is None: errorhandler = default_unicode_error_decode if size == 0: - return u'', 0 + return Utf8Str(''), 0 inShift = False base64bits = 0 @@ -1345,9 +1345,12 @@ def str_decode_unicode_internal(s, size, errors, final=False, errorhandler=None): if BYTEORDER == 'little': - return str_decode_utf_32_le(s, size, errors, errorhandler) + result, length, byteorder = str_decode_utf_32_helper( + s, size, errors, final, errorhandler, "little", "unicode_internal") else: - return str_decode_utf_32_be(s, size, errors, errorhandler) + result, length, byteorder = str_decode_utf_32_helper( + s, size, errors, final, errorhandler, "internal", "unicode_internal") + return result, length def unicode_encode_unicode_internal(s, size, errors, errorhandler=None): if BYTEORDER == 'little': @@ -1561,6 +1564,7 @@ def default_unicode_error_decode(errors, encoding, msg, s, startingpos, endingpos): + """NOT_RPYTHON""" if errors == 'replace': return _unicode_error_replacement, endingpos if errors == 'ignore': @@ -1570,9 +1574,10 @@ def default_unicode_error_encode(errors, encoding, msg, u, startingpos, endingpos): + """NOT_RPYTHON""" if errors == 'replace': return '?', None, endingpos if errors == 'ignore': return '', None, endingpos - raise UnicodeEncodeError(encoding, u, startingpos, endingpos, msg) + raise UnicodeEncodeError(encoding, unicode(u), startingpos, endingpos, msg) diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -9,6 +9,7 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import WrappedDefault, interp2app, unwrap_spec from pypy.interpreter.signature import Signature +from pypy.interpreter.utf8_codecs import str_decode_latin_1 from pypy.objspace.std.sliceobject import W_SliceObject from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.stringmethods import StringMethods, _get_buffer @@ -154,9 +155,11 @@ w_dict = self.getdict(space) if w_dict is None: w_dict = space.w_None + ustr = str_decode_latin_1(''.join(self.data), len(self.data), + 'strict')[0] return space.newtuple([ space.type(self), space.newtuple([ - space.wrap(''.join(self.data).decode('latin-1')), + space.wrap(ustr), space.wrap('latin-1')]), w_dict]) diff --git a/pypy/objspace/std/newformat.py b/pypy/objspace/std/newformat.py --- a/pypy/objspace/std/newformat.py +++ b/pypy/objspace/std/newformat.py @@ -4,7 +4,9 @@ import string from pypy.interpreter.error import OperationError, oefmt -from pypy.interpreter.utf8 import Utf8Str, Utf8Builder, ORD +from pypy.interpreter.utf8 import Utf8Str, Utf8Builder, ORD, utf8chr +from pypy.interpreter.utf8_codecs import ( + unicode_encode_latin_1, unicode_encode_ascii, str_decode_ascii) from rpython.rlib import rstring, runicode, rlocale, rfloat, jit from rpython.rlib.objectmodel import specialize from rpython.rlib.rfloat import copysign, formatd @@ -20,7 +22,7 @@ result = 0 i = start while i < end: - digit = ord(s[i]) - ord('0') + digit = ORD(s, i) - ord('0') if 0 <= digit <= 9: if result > (sys.maxint - digit) / 10: raise oefmt(space.w_ValueError, @@ -63,22 +65,24 @@ out = Utf8Builder() else: out = rstring.StringBuilder() + if not level: raise OperationError(space.w_ValueError, space.wrap("Recursion depth exceeded")) level -= 1 - s = self.template - return self._do_build_string(start, end, level, out, s) + return self._do_build_string(start, end, level, out, self.template) @jit.look_inside_iff(lambda self, start, end, level, out, s: jit.isconstant(s)) def _do_build_string(self, start, end, level, out, s): space = self.space last_literal = i = start + while i < end: c = ORD(s, i) i += 1 if c == ord("{") or c == ord("}"): at_end = i == end + # Find escaped "{" and "}" markup_follows = True if c == ord("}"): @@ -87,6 +91,7 @@ space.wrap("Single '}'")) i += 1 markup_follows = False + if c == ord("{"): if at_end: raise OperationError(space.w_ValueError, @@ -94,6 +99,7 @@ if ORD(s, i) == ord("{"): i += 1 markup_follows = False + # Attach literal data, ending with { or } out.append_slice(s, last_literal, i - 1) if not markup_follows: @@ -101,6 +107,7 @@ end_literal = i - 1 assert end_literal > last_literal literal = self.template[last_literal:end_literal] + w_entry = space.newtuple([ space.wrap(literal), space.w_None, space.w_None, space.w_None]) @@ -108,6 +115,7 @@ self.last_end = i last_literal = i continue + nested = 1 field_start = i recursive = False @@ -121,6 +129,7 @@ if not nested: break i += 1 + if nested: raise OperationError(space.w_ValueError, space.wrap("Unmatched '{'")) @@ -139,41 +148,43 @@ # Find ":" or "!" i = start while i < end: - c = s[i] + c = ORD(s, i) if c == ord(":") or c == ord("!"): end_name = i + if c == ord("!"): i += 1 if i == end: w_msg = self.space.wrap("expected conversion") raise OperationError(self.space.w_ValueError, w_msg) - conversion = s[i] + + conversion = ORD(s, i) i += 1 if i < end: - if s[i] != ':': + if ORD(s, i) != ord(':'): w_msg = self.space.wrap("expected ':' after" " format specifier") raise OperationError(self.space.w_ValueError, w_msg) i += 1 else: - conversion = None + conversion = -1 i += 1 return s[start:end_name], conversion, i i += 1 - return s[start:end], None, end + return s[start:end], -1, end @jit.unroll_safe def _get_argument(self, name): # First, find the argument. space = self.space i = 0 - end = len(name) - while i < end: - c = name[i] + while i < len(name): + c = ORD(name, i) if c == ord("[") or c == ord("."): break i += 1 + empty = not i if empty: index = -1 @@ -181,12 +192,14 @@ index, stop = _parse_int(self.space, name, 0, i) if stop != i: index = -1 + use_numeric = empty or index != -1 if self.auto_numbering_state == ANS_INIT and use_numeric: if empty: self.auto_numbering_state = ANS_AUTO else: self.auto_numbering_state = ANS_MANUAL + if use_numeric: if self.auto_numbering_state == ANS_MANUAL: if empty: @@ -204,7 +217,8 @@ kwarg = name[:i] if self.is_unicode: try: - arg_key = kwarg.encode("latin-1") + arg_key = unicode_encode_latin_1(kwarg, len(kwarg), + 'strict') except UnicodeEncodeError: # Not going to be found in a dict of strings. raise OperationError(space.w_KeyError, space.wrap(kwarg)) @@ -220,7 +234,7 @@ except IndexError: w_msg = space.wrap("index out of range") raise OperationError(space.w_IndexError, w_msg) - return self._resolve_lookups(w_arg, name, i, end) + return self._resolve_lookups(w_arg, name, i, len(name)) @jit.unroll_safe def _resolve_lookups(self, w_obj, name, start, end): @@ -228,15 +242,16 @@ space = self.space i = start while i < end: - c = name[i] + c = ORD(name, i) if c == ord("."): i += 1 start = i while i < end: - c = name[i] + c = ORD(name, i) if c == ord("[") or c == ord("."): break i += 1 + if start == i: w_msg = space.wrap("Empty attribute in format string") raise OperationError(space.w_ValueError, w_msg) @@ -247,18 +262,17 @@ self.parser_list_w.append(space.newtuple([ space.w_True, w_attr])) elif c == ord("["): - got_bracket = False i += 1 start = i while i < end: - c = name[i] + c = ORD(name, i) if c == ord("]"): - got_bracket = True break i += 1 - if not got_bracket: + else: raise OperationError(space.w_ValueError, space.wrap("Missing ']'")) + index, reached = _parse_int(self.space, name, start, i) if index != -1 and reached == i: w_item = space.wrap(index) @@ -285,29 +299,30 @@ if c == ord("[") or c == ord("."): break i += 1 + if i == 0: index = -1 else: index, stop = _parse_int(self.space, name, 0, i) if stop != i: index = -1 + if index >= 0: w_first = space.wrap(index) else: w_first = space.wrap(name[:i]) - # + self.parser_list_w = [] self._resolve_lookups(None, name, i, end) - # + return space.newtuple([w_first, space.iter(space.newlist(self.parser_list_w))]) def _convert(self, w_obj, conversion): space = self.space - conv = ORD(conversion, 0) - if conv == ord("r"): + if conversion == ord("r"): return space.repr(w_obj) - elif conv == ord("s"): + elif conversion == ord("s"): if self.is_unicode: return space.call_function(space.w_unicode, w_obj) return space.str(w_obj) @@ -318,7 +333,7 @@ def _render_field(self, start, end, recursive, level): name, conversion, spec_start = self._parse_field(start, end) spec = self.template[spec_start:end] - # + if self.parser_list_w is not None: # used from formatter_parser() if level == 1: # ignore recursive calls @@ -333,12 +348,13 @@ self.parser_list_w.append(w_entry) self.last_end = end + 1 return self.empty - # + w_obj = self._get_argument(name) - if conversion is not None: + if conversion != -1: w_obj = self._convert(w_obj, conversion) if recursive: spec = self._build_string(spec_start, end, level) + w_rendered = self.space.format(w_obj, self.space.wrap(spec)) unwrapper = "unicode_w" if self.is_unicode else "str_w" to_interp = getattr(self.space, unwrapper) @@ -348,7 +364,7 @@ self.parser_list_w = [] self.last_end = 0 self._build_string(0, len(self.template), 2) - # + space = self.space if self.last_end < len(self.template): w_lastentry = space.newtuple([ @@ -413,7 +429,7 @@ def __init__(self, space, is_unicode, spec): self.space = space self.is_unicode = is_unicode - self.empty = u"" if is_unicode else "" + self.empty = Utf8Str("") if is_unicode else "" self.spec = spec def _is_alignment(self, c): @@ -429,78 +445,76 @@ def _parse_spec(self, default_type, default_align): space = self.space - self._fill_char = self._lit("\0")[0] - self._align = default_align + self._fill_char = ord("\0") + + self._align = ord(default_align) self._alternate = False - self._sign = "\0" + self._sign = ord("\0") self._thousands_sep = False self._precision = -1 - the_type = default_type + spec = self.spec if not spec: return True + length = len(spec) i = 0 got_align = True - if length - i >= 2 and self._is_alignment(spec[i + 1]): - self._align = spec[i + 1] - self._fill_char = spec[i] + + if length - i >= 2 and self._is_alignment(ORD(spec, i + 1)): + self._align = ORD(spec, i + 1) + self._fill_char = ORD(spec, i) i += 2 - elif length - i >= 1 and self._is_alignment(spec[i]): - self._align = spec[i] + elif length - i >= 1 and self._is_alignment(ORD(spec, i)): + self._align = ORD(spec, i) i += 1 else: got_align = False - if length - i >= 1 and self._is_sign(spec[i]): - self._sign = spec[i] + + if length - i >= 1 and self._is_sign(ORD(spec, i)): + self._sign = ORD(spec, i) i += 1 - if length - i >= 1 and spec[i] == "#": + if length - i >= 1 and ORD(spec, i) == ord("#"): self._alternate = True i += 1 - if self._fill_char == "\0" and length - i >= 1 and spec[i] == "0": - self._fill_char = self._lit("0")[0] + + if (self._fill_char == ord("\0") and length - i >= 1 and + ORD(spec, i) == ord("0")): + self._fill_char = ord("0") if not got_align: - self._align = "=" + self._align = ord("=") i += 1 + self._width, i = _parse_int(self.space, spec, i, length) - if length != i and spec[i] == ",": + if length != i and ORD(spec, i) == ord(","): self._thousands_sep = True i += 1 - if length != i and spec[i] == ".": + if length != i and ORD(spec, i) == ord("."): i += 1 self._precision, i = _parse_int(self.space, spec, i, length) if self._precision == -1: raise OperationError(space.w_ValueError, space.wrap("no precision given")) + if length - i > 1: raise OperationError(space.w_ValueError, space.wrap("invalid format spec")) if length - i == 1: - presentation_type = spec[i] if self.is_unicode: try: - the_type = spec[i].encode("ascii")[0] + self._type = unicode_encode_ascii(spec[i], 1, 'strict')[0] except UnicodeEncodeError: raise OperationError(space.w_ValueError, space.wrap("invalid presentation type")) else: - the_type = presentation_type + self._type = spec[i] i += 1 - self._type = the_type + else: + self._type = default_type + if self._thousands_sep: - tp = self._type - if (tp == "d" or - tp == "e" or - tp == "f" or - tp == "g" or - tp == "E" or - tp == "G" or - tp == "%" or - tp == "F" or - tp == "\0"): - # ok - pass - else: + if self._type not in ('d', 'e', 'f', 'g', 'E', 'G', '%', 'F', + '\0'): raise OperationError(space.w_ValueError, space.wrap("invalid type with ','")) return False @@ -511,12 +525,13 @@ total = self._width else: total = length + align = self._align - if align == ">": + if align == ord(">"): left = total - length - elif align == "^": + elif align == ord("^"): left = (total - length) / 2 - elif align == "<" or align == "=": + elif align == ord("<") or align == ord("="): left = 0 else: raise AssertionError("shouldn't be here") @@ -525,22 +540,16 @@ self._right_pad = right return total - def _lit(self, s): - if self.is_unicode: - return s.decode("ascii") - else: - return s - def _pad(self, string): builder = self._builder() - builder.append_multiple_char(self._fill_char, self._left_pad) + builder.append_multiple_char(chr(self._fill_char), self._left_pad) builder.append(string) - builder.append_multiple_char(self._fill_char, self._right_pad) + builder.append_multiple_char(chr(self._fill_char), self._right_pad) return builder.build() def _builder(self): if self.is_unicode: - return rstring.UnicodeBuilder() + return Utf8Builder() else: return rstring.StringBuilder() @@ -555,23 +564,25 @@ return space.wrap(string) if self._type != "s": self._unknown_presentation("string") - if self._sign != "\0": + if self._sign != ord("\0"): msg = "Sign not allowed in string format specifier" raise OperationError(space.w_ValueError, space.wrap(msg)) if self._alternate: msg = "Alternate form not allowed in string format specifier" raise OperationError(space.w_ValueError, space.wrap(msg)) - if self._align == "=": + if self._align == ord("="): msg = "'=' alignment not allowed in string format specifier" raise OperationError(space.w_ValueError, space.wrap(msg)) + length = len(string) precision = self._precision if precision != -1 and length >= precision: assert precision >= 0 length = precision string = string[:precision] - if self._fill_char == "\0": - self._fill_char = self._lit(" ")[0] + + if self._fill_char == ord("\0"): + self._fill_char = ord(" ") self._calc_padding(string, length) return space.wrap(self._pad(string)) @@ -586,9 +597,11 @@ dec = "." thousands = "" grouping = "\256" + if self.is_unicode: - self._loc_dec = dec.decode("ascii") - self._loc_thousands = thousands.decode("ascii") + self._loc_dec = str_decode_ascii(dec, len(dec), 'strict')[0] + self._loc_thousands = str_decode_ascii( + thousands, len(thousands), 'strict')[0] else: self._loc_dec = dec self._loc_thousands = thousands @@ -617,41 +630,45 @@ spec.n_rpadding = 0 spec.n_min_width = 0 spec.n_total = 0 - spec.sign = "\0" + spec.sign = ord("\0") spec.n_sign = 0 + sign = self._sign - if sign == "+": + if sign == ord("+"): spec.n_sign = 1 - spec.sign = "-" if sign_char == "-" else "+" - elif sign == " ": + spec.sign = ord("-") if sign_char == "-" else ord("+") + elif sign == ord(" "): spec.n_sign = 1 - spec.sign = "-" if sign_char == "-" else " " + spec.sign = ord("-") if sign_char == "-" else ord(" ") elif sign_char == "-": spec.n_sign = 1 - spec.sign = "-" + spec.sign = ord("-") extra_length = (spec.n_sign + spec.n_prefix + spec.n_decimal + spec.n_remainder) # Not padding or digits - if self._fill_char == "0" and self._align == "=": + + if self._fill_char == ord("0") and self._align == ord("="): spec.n_min_width = self._width - extra_length if self._loc_thousands: self._group_digits(spec, digits[to_number:]) n_grouped_digits = len(self._grouped_digits) else: n_grouped_digits = spec.n_digits + n_padding = self._width - (extra_length + n_grouped_digits) if n_padding > 0: align = self._align - if align == "<": + if align == ord("<"): spec.n_rpadding = n_padding - elif align == ">": + elif align == ord(">"): spec.n_lpadding = n_padding - elif align == "^": + elif align == ord("^"): spec.n_lpadding = n_padding // 2 spec.n_rpadding = n_padding - spec.n_lpadding - elif align == "=": + elif align == ord("="): spec.n_spadding = n_padding else: raise AssertionError("shouldn't reach") + spec.n_total = spec.n_lpadding + spec.n_sign + spec.n_prefix + \ spec.n_spadding + n_grouped_digits + \ spec.n_decimal + spec.n_remainder + spec.n_rpadding @@ -720,21 +737,28 @@ def _fill_number(self, spec, num, to_digits, to_prefix, fill_char, to_remainder, upper, grouped_digits=None): out = self._builder() + if spec.n_lpadding: - out.append_multiple_char(fill_char[0], spec.n_lpadding) + out.append_multiple_char(chr(fill_char), spec.n_lpadding) + if spec.n_sign: if self.is_unicode: - sign = spec.sign.decode("ascii") + # TODO: A better way to do this might be to check if + # spec.sign < 127 ... + sign = str_decode_ascii(chr(spec.sign), 1, 'strict')[0] else: - sign = spec.sign + sign = chr(spec.sign) out.append(sign) + if spec.n_prefix: pref = num[to_prefix:to_prefix + spec.n_prefix] if upper: pref = self._upcase_string(pref) out.append(pref) + if spec.n_spadding: - out.append_multiple_char(fill_char[0], spec.n_spadding) + out.append_multiple_char(chr(fill_char), spec.n_spadding) + if spec.n_digits != 0: if self._loc_thousands: if grouped_digits is not None: @@ -749,12 +773,13 @@ if upper: digits = self._upcase_string(digits) out.append(digits) + if spec.n_decimal: - out.append(self._lit(".")[0]) + out.append(".") if spec.n_remainder: out.append(num[to_remainder:]) if spec.n_rpadding: - out.append_multiple_char(fill_char[0], spec.n_rpadding) + out.append_multiple_char(chr(fill_char), spec.n_rpadding) #if complex, need to call twice - just retun the buffer return out.build() @@ -764,14 +789,14 @@ msg = "precision not allowed in integer type" raise OperationError(space.w_ValueError, space.wrap(msg)) sign_char = "\0" - tp = self._type - if tp == "c": - if self._sign != "\0": + + if self._type == "c": + if self._sign != ord("\0"): msg = "sign not allowed with 'c' presentation type" raise OperationError(space.w_ValueError, space.wrap(msg)) value = space.int_w(w_num) if self.is_unicode: - result = runicode.UNICHR(value) + result = utf8chr(value) else: result = chr(value) n_digits = 1 @@ -781,16 +806,16 @@ to_prefix = 0 to_numeric = 0 else: - if tp == "b": + if self._type == "b": base = 2 skip_leading = 2 - elif tp == "o": + elif self._type == "o": base = 8 skip_leading = 2 - elif tp == "x" or tp == "X": + elif self._type == "x" or self._type == "X": base = 16 skip_leading = 2 - elif tp == "n" or tp == "d": + elif self._type == "n" or self._type == "d": base = 10 skip_leading = 0 else: @@ -801,7 +826,7 @@ result = self._long_to_base(base, space.bigint_w(w_num)) n_prefix = skip_leading if self._alternate else 0 to_prefix = 0 - if result[0] == "-": + if ORD(result, 0) == ord("-"): sign_char = "-" skip_leading += 1 to_prefix += 1 @@ -809,10 +834,10 @@ n_remainder = 0 to_remainder = 0 to_numeric = skip_leading - self._get_locale(tp) + self._get_locale(self._type) spec = self._calc_num_width(n_prefix, sign_char, to_numeric, n_digits, n_remainder, False, result) - fill = self._lit(" ") if self._fill_char == "\0" else self._fill_char + fill = ord(" ") if self._fill_char == ord("\0") else self._fill_char upper = self._type == "X" return self.space.wrap(self._fill_number(spec, result, to_numeric, to_prefix, fill, to_remainder, upper)) @@ -827,14 +852,14 @@ prefix = "0x" as_str = value.format(LONG_DIGITS[:base], prefix) if self.is_unicode: - return as_str.decode("ascii") + return str_decode_ascii(as_str, len(as_str), 'strict')[0] return as_str def _int_to_base(self, base, value): if base == 10: s = str(value) if self.is_unicode: - return s.decode("ascii") + return str_decode_ascii(s, len(s), 'strict')[0] return s # This part is slow. negative = value < 0 @@ -879,22 +904,10 @@ if self.is_unicode: return space.call_function(space.w_unicode, w_num) return self.space.str(w_num) - tp = self._type - if (tp == "b" or - tp == "c" or - tp == "d" or - tp == "o" or - tp == "x" or - tp == "X" or - tp == "n"): + + if self._type in ("b", "c", "d", "o", "x", "X", "n"): return self._format_int_or_long(w_num, kind) - elif (tp == "e" or - tp == "E" or - tp == "f" or - tp == "F" or - tp == "g" or - tp == "G" or - tp == "%"): + elif self._type in ("e", "E", "f", "F", "g", "G", "%"): w_float = space.float(w_num) return self._format_float(w_float) else: @@ -921,6 +934,7 @@ if self._alternate: msg = "alternate form not allowed in float formats" raise OperationError(space.w_ValueError, space.wrap(msg)) + tp = self._type self._get_locale(tp) if tp == "\0": @@ -929,6 +943,7 @@ flags |= rfloat.DTSF_ADD_DOT_0 elif tp == "n": tp = "g" + value = space.float_w(w_float) if tp == "%": tp = "f" @@ -936,6 +951,7 @@ add_pct = True else: add_pct = False + if self._precision == -1: self._precision = default_precision result, special = rfloat.double_to_string(value, tp, @@ -943,22 +959,26 @@ if add_pct: result += "%" n_digits = len(result) - if result[0] == "-": + + if ORD(result, 0) == ord("-"): sign = "-" to_number = 1 n_digits -= 1 else: sign = "\0" to_number = 0 + have_dec_point, to_remainder = self._parse_number(result, to_number) n_remainder = len(result) - to_remainder + if self.is_unicode: - digits = result.decode("ascii") + digits = str_decode_ascii(result , len(result), 'strict')[0] else: digits = result + spec = self._calc_num_width(0, sign, to_number, n_digits, n_remainder, have_dec_point, digits) - fill = self._lit(" ") if self._fill_char == "\0" else self._fill_char + fill = ord(" ") if self._fill_char == ord("\0") else self._fill_char return self.space.wrap(self._fill_number(spec, digits, to_number, 0, fill, to_remainder, False)) @@ -968,30 +988,23 @@ if self.is_unicode: return space.call_function(space.w_unicode, w_float) return space.str(w_float) - tp = self._type - if (tp == "\0" or - tp == "e" or - tp == "E" or - tp == "f" or - tp == "F" or - tp == "g" or - tp == "G" or - tp == "n" or - tp == "%"): + + if self._type in ("\0", "e", "E", "f", "F", "g", "G", "n", "%"): return self._format_float(w_float) self._unknown_presentation("float") def _format_complex(self, w_complex): space = self.space + tp = self._type self._get_locale(tp) default_precision = 6 - if self._align == "=": + if self._align == ord("="): # '=' alignment is invalid msg = ("'=' alignment flag is not allowed in" " complex format specifier") raise OperationError(space.w_ValueError, space.wrap(msg)) - if self._fill_char == "0": + if self._fill_char == ord("0"): #zero padding is invalid msg = "Zero padding is not allowed in complex format specifier" raise OperationError(space.w_ValueError, space.wrap(msg)) @@ -1047,7 +1060,7 @@ tmp_fill_char = self._fill_char tmp_align = self._align tmp_width = self._width - self._fill_char = "\0" + self._fill_char = ord("\0") self._align = "<" self._width = -1 @@ -1058,8 +1071,8 @@ to_imag_number) if self.is_unicode: - re_num = re_num.decode("ascii") - im_num = im_num.decode("ascii") + re_num = str_decode_ascii(re_num, len(re_num), 'strict')[0] + im_num = str_decode_ascii(im_num, len(im_num), 'strict')[0] #set remainder, in CPython _parse_number sets this #using n_re_digits causes tests to fail @@ -1073,7 +1086,7 @@ #self._grouped_digits will get overwritten in imaginary calc_num_width re_grouped_digits = self._grouped_digits if not skip_re: - self._sign = "+" + self._sign = ord("+") im_spec = self._calc_num_width(0, im_sign, to_imag_number, n_im_digits, im_n_remainder, im_have_dec, im_num) @@ -1093,14 +1106,14 @@ out = self._builder() fill = self._fill_char - if fill == "\0": - fill = self._lit(" ")[0] + if fill == ord("\0"): + fill = ord(" ") #compose the string #add left padding - out.append_multiple_char(fill, self._left_pad) + out.append_multiple_char(chr(fill), self._left_pad) if add_parens: - out.append(self._lit('(')[0]) + out.append('(') #if the no. has a real component, add it if not skip_re: @@ -1114,13 +1127,13 @@ im_grouped_digits)) #add 'j' character - out.append(self._lit('j')[0]) + out.append('j') if add_parens: - out.append(self._lit(')')[0]) + out.append(')') #add right padding - out.append_multiple_char(fill, self._right_pad) + out.append_multiple_char(chr(fill), self._right_pad) return self.space.wrap(out.build()) @@ -1131,15 +1144,8 @@ #parse format specification, set associated variables if self._parse_spec("\0", ">"): return space.str(w_complex) - tp = self._type - if (tp == "\0" or - tp == "e" or - tp == "E" or - tp == "f" or - tp == "F" or - tp == "g" or - tp == "G" or - tp == "n"): + + if self._type in ('\0', 'e', 'E', 'f', 'F', 'g', 'G', 'n'): return self._format_complex(w_complex) self._unknown_presentation("complex") return Formatter diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -162,9 +162,6 @@ if isinstance(x, Utf8Str): return wrapunicode(self, x) - if isinstance(x, unicode): - import pdb; pdb.set_trace() - if isinstance(x, float): return W_FloatObject(x) if isinstance(x, W_Root): diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -585,13 +585,13 @@ by = self._op_val(space, w_sep) if len(by) == 0: raise oefmt(space.w_ValueError, "empty separator") - res = self._split(value, by, maxsplit) + res = self._rsplit(value, by, maxsplit) return self._newlist_unwrapped(space, res) @staticmethod def _rsplit(value, sep=None, maxsplit=-1): - return value.split(sep, maxsplit) + return rsplit(value, sep, maxsplit) @unwrap_spec(keepends=bool) def descr_splitlines(self, space, keepends=False): @@ -606,7 +606,8 @@ eol = pos pos += 1 # read CRLF as one line break - if pos < length and value[eol] == '\r' and value[pos] == '\n': + if (pos < length and ORD(value, eol) == ord('\r') and + ORD(value, pos) == ord('\n')): pos += 1 if keepends: eol = pos diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -3,6 +3,7 @@ from pypy.objspace.std.dictmultiobject import (W_DictMultiObject, BytesDictStrategy, ObjectDictStrategy) +from pypy.interpreter.utf8 import Utf8Str class TestW_DictObject(object): @@ -142,8 +143,9 @@ def test_listview_unicode_dict(self): w = self.space.wrap + w_u = lambda x: w(Utf8Str.from_unicode(x)) w_d = self.space.newdict() - w_d.initialize_content([(w(u"a"), w(1)), (w(u"b"), w(2))]) + w_d.initialize_content([(w_u(u"a"), w(1)), (w_u(u"b"), w(2))]) assert self.space.listview_unicode(w_d) == [u"a", u"b"] def test_listview_int_dict(self): @@ -154,7 +156,8 @@ def test_keys_on_string_unicode_int_dict(self, monkeypatch): w = self.space.wrap - + w_u = lambda x: w(Utf8Str.from_unicode(x)) + w_d = self.space.newdict() w_d.initialize_content([(w(1), w("a")), (w(2), w("b"))]) w_l = self.space.call_method(w_d, "keys") @@ -174,7 +177,7 @@ # but we need space.newlist_unicode for it monkeypatch.undo() w_d = self.space.newdict() - w_d.initialize_content([(w(u"a"), w(1)), (w(u"b"), w(6))]) + w_d.initialize_content([(w_u(u"a"), w(1)), (w_u(u"b"), w(6))]) w_l = self.space.call_method(w_d, "keys") assert sorted(self.space.listview_unicode(w_l)) == [u"a", u"b"] diff --git a/pypy/objspace/std/test/test_index.py b/pypy/objspace/std/test/test_index.py --- a/pypy/objspace/std/test/test_index.py +++ b/pypy/objspace/std/test/test_index.py @@ -1,5 +1,7 @@ from py.test import raises +from pypy.interpreter.utf8 import Utf8Str + class AppTest_IndexProtocol: def setup_class(self): w_oldstyle = self.space.appexec([], """(): @@ -263,7 +265,7 @@ class AppTest_UnicodeTestCase(SeqTestCase, StringTestCase): def setup_method(self, method): SeqTestCase.setup_method(self, method) - self.w_seq = self.space.wrap(u"this is a test") + self.w_seq = self.space.wrap(Utf8Str.from_unicode(u"this is a test")) self.w_const = self.space.appexec([], """(): return unicode""") diff --git a/pypy/objspace/std/test/test_lengthhint.py b/pypy/objspace/std/test/test_lengthhint.py --- a/pypy/objspace/std/test/test_lengthhint.py +++ b/pypy/objspace/std/test/test_lengthhint.py @@ -1,3 +1,4 @@ +from pypy.interpreter.utf8 import Utf8Str from pypy.module._collections.interp_deque import W_Deque from pypy.module.itertools.interp_itertools import W_Repeat @@ -71,7 +72,7 @@ self._test_length_hint(self.space.wrap('P' * self.SIZE)) def test_unicode(self): - self._test_length_hint(self.space.wrap(u'Y' * self.SIZE)) + self._test_length_hint(self.space.wrap(Utf8Str('Y' * self.SIZE))) def test_tuple(self): self._test_length_hint(self.space.wrap(tuple(self.ITEMS))) diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -1,4 +1,5 @@ import sys +from pypy.interpreter.utf8 import Utf8Str from pypy.objspace.std.listobject import ( W_ListObject, EmptyListStrategy, ObjectListStrategy, IntegerListStrategy, FloatListStrategy, BytesListStrategy, RangeListStrategy, @@ -11,20 +12,22 @@ def test_check_strategy(self): space = self.space w = space.wrap + w_u = lambda x: w(Utf8Str(x)) assert isinstance(W_ListObject(space, []).strategy, EmptyListStrategy) assert isinstance(W_ListObject(space, [w(1),w('a')]).strategy, ObjectListStrategy) assert isinstance(W_ListObject(space, [w(1),w(2),w(3)]).strategy, IntegerListStrategy) assert isinstance(W_ListObject(space, [w('a'), w('b')]).strategy, BytesListStrategy) - assert isinstance(W_ListObject(space, [w(u'a'), w(u'b')]).strategy, + assert isinstance(W_ListObject(space, [w_u('a'), w_u('b')]).strategy, UnicodeListStrategy) - assert isinstance(W_ListObject(space, [w(u'a'), w('b')]).strategy, + assert isinstance(W_ListObject(space, [w_u('a'), w('b')]).strategy, ObjectListStrategy) # mixed unicode and bytes def test_empty_to_any(self): space = self.space w = space.wrap + w_u = lambda x: w(Utf8Str(x)) l = W_ListObject(space, []) assert isinstance(l.strategy, EmptyListStrategy) l.append(w((1,3))) @@ -42,7 +45,7 @@ l = W_ListObject(space, []) assert isinstance(l.strategy, EmptyListStrategy) - l.append(w(u'a')) + l.append(w_u('a')) assert isinstance(l.strategy, UnicodeListStrategy) l = W_ListObject(space, []) @@ -70,9 +73,10 @@ def test_unicode_to_any(self): space = self.space - l = W_ListObject(space, [space.wrap(u'a'), space.wrap(u'b'), space.wrap(u'c')]) + w_u = lambda x: space.wrap(Utf8Str(x)) + l = W_ListObject(space, [w_u('a'), w_u('b'), w_u('c')]) assert isinstance(l.strategy, UnicodeListStrategy) - l.append(space.wrap(u'd')) + l.append(w_u('d')) assert isinstance(l.strategy, UnicodeListStrategy) l.append(space.wrap(3)) assert isinstance(l.strategy, ObjectListStrategy) @@ -89,6 +93,7 @@ def test_setitem(self): space = self.space w = space.wrap + w_u = lambda x: w(Utf8Str(x)) # This should work if test_listobject.py passes l = W_ListObject(space, [w('a'),w('b'),w('c')]) assert space.eq_w(l.getitem(0), w('a')) @@ -110,7 +115,7 @@ assert isinstance(l.strategy, ObjectListStrategy) # UnicodeStrategy to ObjectStrategy - l = W_ListObject(space, [w(u'a'),w(u'b'),w(u'c')]) + l = W_ListObject(space, [w_u('a'),w_u('b'),w_u('c')]) assert isinstance(l.strategy, UnicodeListStrategy) l.setitem(0, w(2)) assert isinstance(l.strategy, ObjectListStrategy) @@ -124,6 +129,7 @@ def test_insert(self): space = self.space w = space.wrap + w_u = lambda x: w(Utf8Str(x)) # no change l = W_ListObject(space, [w(1),w(2),w(3)]) assert isinstance(l.strategy, IntegerListStrategy) @@ -137,7 +143,7 @@ assert isinstance(l.strategy, ObjectListStrategy) # UnicodeStrategy - l = W_ListObject(space, [w(u'a'),w(u'b'),w(u'c')]) + l = W_ListObject(space, [w_u('a'),w_u('b'),w_u('c')]) assert isinstance(l.strategy, UnicodeListStrategy) l.insert(3, w(2)) assert isinstance(l.strategy, ObjectListStrategy) @@ -186,6 +192,7 @@ def test_setslice(self): space = self.space w = space.wrap + w_u = lambda x: w(Utf8Str(x)) l = W_ListObject(space, []) assert isinstance(l.strategy, EmptyListStrategy) @@ -217,7 +224,7 @@ assert isinstance(l.strategy, ObjectListStrategy) # UnicodeStrategy to ObjectStrategy - l = W_ListObject(space, [w(u'a'), w(u'b'), w(u'c')]) + l = W_ListObject(space, [w_u('a'), w_u('b'), w_u('c')]) assert isinstance(l.strategy, UnicodeListStrategy) l.setslice(0, 1, 2, W_ListObject(space, [w(1), w(2), w(3)])) assert isinstance(l.strategy, ObjectListStrategy) @@ -242,6 +249,8 @@ def wrapitems(items): items_w = [] for i in items: + if isinstance(i, unicode): + i = Utf8Str.from_unicode(i) items_w.append(space.wrap(i)) return items_w @@ -323,6 +332,7 @@ def test_empty_extend_with_any(self): space = self.space w = space.wrap + w_u = lambda x: w(Utf8Str(x)) empty = W_ListObject(space, []) assert isinstance(empty.strategy, EmptyListStrategy) @@ -336,7 +346,7 @@ empty = W_ListObject(space, []) assert isinstance(empty.strategy, EmptyListStrategy) - empty.extend(W_ListObject(space, [w(u"a"), w(u"b"), w(u"c")])) + empty.extend(W_ListObject(space, [w_u("a"), w_u("b"), w_u("c")])) assert isinstance(empty.strategy, UnicodeListStrategy) empty = W_ListObject(space, []) @@ -588,11 +598,12 @@ assert self.space.eq_w(l3, W_ListObject(self.space, [self.space.wrap(1), self.space.wrap(2), self.space.wrap(3), self.space.wrap(4), self.space.wrap(5)])) def test_unicode(self): + wrap_u = lambda x: self.space.wrap(Utf8Str(x)) l1 = W_ListObject(self.space, [self.space.wrap("eins"), self.space.wrap("zwei")]) assert isinstance(l1.strategy, BytesListStrategy) - l2 = W_ListObject(self.space, [self.space.wrap(u"eins"), self.space.wrap(u"zwei")]) + l2 = W_ListObject(self.space, [wrap_u("eins"), wrap_u("zwei")]) assert isinstance(l2.strategy, UnicodeListStrategy) - l3 = W_ListObject(self.space, [self.space.wrap("eins"), self.space.wrap(u"zwei")]) + l3 = W_ListObject(self.space, [self.space.wrap("eins"), wrap_u(u"zwei")]) assert isinstance(l3.strategy, ObjectListStrategy) def test_listview_bytes(self): @@ -603,20 +614,22 @@ def test_listview_unicode(self): space = self.space + wrap_u = lambda x: self.space.wrap(Utf8Str(x)) assert space.listview_unicode(space.wrap(1)) == None - w_l = self.space.newlist([self.space.wrap(u'a'), self.space.wrap(u'b')]) + w_l = self.space.newlist([wrap_u('a'), wrap_u('b')]) assert space.listview_unicode(w_l) == [u"a", u"b"] def test_string_join_uses_listview_bytes(self): space = self.space + wrap_u = lambda x: self.space.wrap(Utf8Str(x)) w_l = self.space.newlist([self.space.wrap('a'), self.space.wrap('b')]) w_l.getitems = None assert space.str_w(space.call_method(space.wrap("c"), "join", w_l)) == "acb" # # the same for unicode - w_l = self.space.newlist([self.space.wrap(u'a'), self.space.wrap(u'b')]) + w_l = self.space.newlist([wrap_u('a'), wrap_u('b')]) w_l.getitems = None - assert space.unicode_w(space.call_method(space.wrap(u"c"), "join", w_l)) == u"acb" + assert space.unicode_w(space.call_method(wrap_u("c"), "join", w_l)) == u"acb" def test_string_join_returns_same_instance(self): space = self.space @@ -626,10 +639,11 @@ assert space.is_w(space.call_method(space.wrap(" -- "), "join", w_l), w_text) # # the same for unicode - w_text = space.wrap(u"text") + w_base = space.wrap(Utf8Str(" -- ")) + w_text = space.wrap(Utf8Str("text")) w_l = self.space.newlist([w_text]) w_l.getitems = None - assert space.is_w(space.call_method(space.wrap(u" -- "), "join", w_l), w_text) + assert space.is_w(space.call_method(w_base, "join", w_l), w_text) def test_newlist_bytes(self): space = self.space @@ -656,7 +670,7 @@ def test_unicode_uses_newlist_unicode(self): space = self.space - w_u = space.wrap(u"a b c") + w_u = space.wrap(Utf8Str("a b c")) space.newlist = None try: w_l = space.call_method(w_u, "split") @@ -711,7 +725,8 @@ def test_listview_unicode_list(self): space = self.space - w_l = W_ListObject(space, [space.wrap(u"a"), space.wrap(u"b")]) + wrap_u = lambda x: self.space.wrap(Utf8Str(x)) + w_l = W_ListObject(space, [wrap_u("a"), wrap_u("b")]) assert self.space.listview_unicode(w_l) == [u"a", u"b"] def test_listview_int_list(self): diff --git a/pypy/objspace/std/test/test_newformat.py b/pypy/objspace/std/test/test_newformat.py --- a/pypy/objspace/std/test/test_newformat.py +++ b/pypy/objspace/std/test/test_newformat.py @@ -382,30 +382,30 @@ assert l == [('abcd', None, None, None)] # l = list('ab{0}cd'._formatter_parser()) - assert l == [('ab', '0', '', None), ('cd', None, None, None)] + assert l == [('ab', '0', '', -1), ('cd', None, None, None)] # l = list('{0}cd'._formatter_parser()) - assert l == [('', '0', '', None), ('cd', None, None, None)] + assert l == [('', '0', '', -1), ('cd', None, None, None)] # l = list('ab{0}'._formatter_parser()) - assert l == [('ab', '0', '', None)] + assert l == [('ab', '0', '', -1)] # l = list(''._formatter_parser()) assert l == [] # l = list('{0:123}'._formatter_parser()) - assert l == [('', '0', '123', None)] + assert l == [('', '0', '123', -1)] # l = list('{0!x:123}'._formatter_parser()) - assert l == [('', '0', '123', 'x')] + assert l == [('', '0', '123', ord('x'))] # l = list('{0!x:12{sdd}3}'._formatter_parser()) - assert l == [('', '0', '12{sdd}3', 'x')] + assert l == [('', '0', '12{sdd}3', ord('x'))] def test_u_formatter_parser(self): l = list(u'{0!x:12{sdd}3}'._formatter_parser()) - assert l == [(u'', u'0', u'12{sdd}3', u'x')] - for x in l[0]: + assert l == [(u'', u'0', u'12{sdd}3', ord(u'x'))] + for x in l[0][:-1]: assert isinstance(x, unicode) def test_formatter_parser_escape(self): From noreply at buildbot.pypy.org Tue Jul 8 09:49:01 2014 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 8 Jul 2014 09:49:01 +0200 (CEST) Subject: [pypy-commit] pypy default: accept unicode keys Message-ID: <20140708074901.D73BE1C0F1D@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r72384:c8d30edc0498 Date: 2014-07-08 09:48 +0200 http://bitbucket.org/pypy/pypy/changeset/c8d30edc0498/ Log: accept unicode keys diff --git a/lib-python/2.7/test/test_gdbm.py b/lib-python/2.7/test/test_gdbm.py --- a/lib-python/2.7/test/test_gdbm.py +++ b/lib-python/2.7/test/test_gdbm.py @@ -98,6 +98,17 @@ self.assertTrue(key in self.g) self.assertTrue(self.g.has_key(key)) + def test_unicode_key(self): + key = u'ab' + value = u'cd' + self.g = gdbm.open(filename, 'cf') + self.g[key] = value + self.g.close() + self.g = gdbm.open(filename, 'r') + self.assertEquals(self.g[key], value) + self.assertTrue(key in self.g) + self.assertTrue(self.g.has_key(key)) + def test_main(): run_unittest(TestGdbm) diff --git a/lib_pypy/gdbm.py b/lib_pypy/gdbm.py --- a/lib_pypy/gdbm.py +++ b/lib_pypy/gdbm.py @@ -50,6 +50,8 @@ pass def _fromstr(key): + if isinstance(key, unicode): + key = key.encode("ascii") if not isinstance(key, str): raise TypeError("gdbm mappings have string indices only") return {'dptr': ffi.new("char[]", key), 'dsize': len(key)} From noreply at buildbot.pypy.org Tue Jul 8 09:59:40 2014 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 8 Jul 2014 09:59:40 +0200 (CEST) Subject: [pypy-commit] pypy default: pass the errno as positional arg too Message-ID: <20140708075940.D3D741D35AE@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r72385:df9c15f76446 Date: 2014-07-08 09:59 +0200 http://bitbucket.org/pypy/pypy/changeset/df9c15f76446/ Log: pass the errno as positional arg too diff --git a/lib_pypy/gdbm.py b/lib_pypy/gdbm.py --- a/lib_pypy/gdbm.py +++ b/lib_pypy/gdbm.py @@ -73,8 +73,8 @@ def _raise_from_errno(self): if ffi.errno: - raise error(os.strerror(ffi.errno)) - raise error(lib.gdbm_strerror(lib.gdbm_errno)) + raise error(ffi.errno, os.strerror(ffi.errno)) + raise error(lib.gdbm_errno, lib.gdbm_strerror(lib.gdbm_errno)) def __len__(self): if self.size < 0: @@ -143,7 +143,7 @@ def _check_closed(self): if not self.ll_dbm: - raise error("GDBM object has already been closed") + raise error(0, "GDBM object has already been closed") __del__ = close @@ -161,7 +161,7 @@ elif flags[0] == 'n': iflags = lib.GDBM_NEWDB else: - raise error("First flag must be one of 'r', 'w', 'c' or 'n'") + raise error(0, "First flag must be one of 'r', 'w', 'c' or 'n'") for flag in flags[1:]: if flag == 'f': iflags |= lib.GDBM_FAST @@ -170,7 +170,7 @@ elif flag == 'u': iflags |= lib.GDBM_NOLOCK else: - raise error("Flag '%s' not supported" % flag) + raise error(0, "Flag '%s' not supported" % flag) return gdbm(filename, iflags, mode) open_flags = "rwcnfsu" From noreply at buildbot.pypy.org Tue Jul 8 15:26:36 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 8 Jul 2014 15:26:36 +0200 (CEST) Subject: [pypy-commit] pypy default: Avoid unnecessary wrapping and unwrapping of the keys in update(). This is not JITted code. Message-ID: <20140708132636.20BA91C3288@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72386:844c2e1c2673 Date: 2014-07-08 13:13 +0200 http://bitbucket.org/pypy/pypy/changeset/844c2e1c2673/ Log: Avoid unnecessary wrapping and unwrapping of the keys in update(). This is not JITted code. diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -699,9 +699,10 @@ else: wrapkey = dictimpl.wrapkey.im_func if not hasattr(dictimpl, 'wrapvalue'): - wrapvalue = lambda space, key: key + wrapvalue = lambda space, value: value else: wrapvalue = dictimpl.wrapvalue.im_func + setitem_untyped = getattr(dictimpl, 'setitem_untyped', None) class IterClassKeys(BaseKeyIterator): def __init__(self, space, strategy, impl): @@ -770,10 +771,19 @@ w_dict.length() - 1) else: spec = _SPEC1 - for key, value in self.getiteritems(w_dict): - w_key = wrapkey(self.space, key) - w_value = wrapvalue(self.space, value) - w_updatedict.setitem(w_key, w_value) + iteritems = self.getiteritems(w_dict) + for key, value in iteritems: + if spec is not _SPEC3: + if (setitem_untyped is not None and + self is w_updatedict.strategy): + dstorage = w_updatedict.dstorage + spec = _SPEC3 + else: + w_key = wrapkey(self.space, key) + w_value = wrapvalue(self.space, value) + w_updatedict.setitem(w_key, w_value) + if spec is _SPEC3: + setitem_untyped(self, dstorage, key, value) if spec is _SPEC1: spec = _SPEC2 w_updatedict.strategy.prepare_update(w_updatedict, @@ -786,8 +796,9 @@ create_iterator_classes(EmptyDictStrategy) -_SPEC1 = SpecTag() -_SPEC2 = SpecTag() +_SPEC1 = SpecTag() # first iteration +_SPEC2 = SpecTag() # all other iteration +_SPEC3 = SpecTag() # same strategy with setitem_untyped() # concrete subclasses of the above @@ -907,6 +918,9 @@ objectmodel.prepare_dict_update(self.unerase(w_dict.dstorage), num_extra) + def setitem_untyped(self, dstorage, key, w_value): + self.unerase(dstorage)[key] = w_value + class ObjectDictStrategy(AbstractTypedStrategy, DictStrategy): erase, unerase = rerased.new_erasing_pair("object") From noreply at buildbot.pypy.org Tue Jul 8 15:26:37 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 8 Jul 2014 15:26:37 +0200 (CEST) Subject: [pypy-commit] pypy default: Found another reason for slowness of dict.update(): the keys are wrapped Message-ID: <20140708132637.7CBAC1C3288@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72387:7f9dca73c7b6 Date: 2014-07-08 13:33 +0200 http://bitbucket.org/pypy/pypy/changeset/7f9dca73c7b6/ Log: Found another reason for slowness of dict.update(): the keys are wrapped and unwrapped. Fixed, and removed the magic SpecTag too. diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -3,7 +3,6 @@ from rpython.rlib import jit, rerased, objectmodel from rpython.rlib.debug import mark_dict_non_null from rpython.rlib.objectmodel import newlist_hint, r_dict, specialize -from rpython.rlib.unroll import SpecTag from rpython.tool.sourcetools import func_renamer, func_with_new_name from pypy.interpreter.baseobjspace import W_Root @@ -759,35 +758,51 @@ # this is very similar to the general version, but the difference # is that it is specialized to call a specific next_item() iteritems = IterClassItems(self.space, self, w_dict) - spec = _SPEC1 + w_key, w_value = iteritems.next_item() + if w_key is None: + return + w_updatedict.setitem(w_key, w_value) + w_updatedict.strategy.prepare_update(w_updatedict, + w_dict.length() - 1) while True: w_key, w_value = iteritems.next_item() if w_key is None: + return + w_updatedict.setitem(w_key, w_value) + else: + iteritems = self.getiteritems(w_dict) + if not same_strategy(self, w_updatedict): + # Different strategy. Try to copy one item of w_dict + for key, value in iteritems: + w_key = wrapkey(self.space, key) + w_value = wrapvalue(self.space, value) + w_updatedict.setitem(w_key, w_value) break - w_updatedict.setitem(w_key, w_value) - if spec is _SPEC1: - spec = _SPEC2 - w_updatedict.strategy.prepare_update(w_updatedict, - w_dict.length() - 1) - else: - spec = _SPEC1 - iteritems = self.getiteritems(w_dict) - for key, value in iteritems: - if spec is not _SPEC3: - if (setitem_untyped is not None and - self is w_updatedict.strategy): - dstorage = w_updatedict.dstorage - spec = _SPEC3 - else: + else: + return # w_dict is completely empty, nothing to do + count = w_dict.length() - 1 + w_updatedict.strategy.prepare_update(w_updatedict, count) + # If the strategy is still different, continue the slow way + if not same_strategy(self, w_updatedict): + for key, value in iteritems: w_key = wrapkey(self.space, key) w_value = wrapvalue(self.space, value) w_updatedict.setitem(w_key, w_value) - if spec is _SPEC3: - setitem_untyped(self, dstorage, key, value) - if spec is _SPEC1: - spec = _SPEC2 - w_updatedict.strategy.prepare_update(w_updatedict, - w_dict.length() - 1) + return # done + else: + # Same strategy. + self.prepare_update(w_updatedict, w_dict.length()) + # + # Use setitem_untyped() to speed up copying without + # wrapping/unwrapping the key. + assert setitem_untyped is not None + dstorage = w_updatedict.dstorage + for key, value in iteritems: + setitem_untyped(self, dstorage, key, value) + + def same_strategy(self, w_otherdict): + return (setitem_untyped is not None and + w_otherdict.strategy is self) dictimpl.iterkeys = iterkeys dictimpl.itervalues = itervalues @@ -796,10 +811,6 @@ create_iterator_classes(EmptyDictStrategy) -_SPEC1 = SpecTag() # first iteration -_SPEC2 = SpecTag() # all other iteration -_SPEC3 = SpecTag() # same strategy with setitem_untyped() - # concrete subclasses of the above From noreply at buildbot.pypy.org Tue Jul 8 15:26:38 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 8 Jul 2014 15:26:38 +0200 (CEST) Subject: [pypy-commit] pypy default: Translation fix Message-ID: <20140708132638.BCB2F1C3288@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72388:af210d996bf8 Date: 2014-07-08 13:51 +0200 http://bitbucket.org/pypy/pypy/changeset/af210d996bf8/ Log: Translation fix diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -701,7 +701,12 @@ wrapvalue = lambda space, value: value else: wrapvalue = dictimpl.wrapvalue.im_func - setitem_untyped = getattr(dictimpl, 'setitem_untyped', None) + if not hasattr(dictimpl, 'setitem_untyped'): + setitem_untyped = None + else: + setitem_untyped = dictimpl.setitem_untyped.im_func + setitem_untyped = func_with_new_name(setitem_untyped, + 'setitem_untyped_%s' % dictimpl.__name__) class IterClassKeys(BaseKeyIterator): def __init__(self, space, strategy, impl): From noreply at buildbot.pypy.org Tue Jul 8 16:54:56 2014 From: noreply at buildbot.pypy.org (waedt) Date: Tue, 8 Jul 2014 16:54:56 +0200 (CEST) Subject: [pypy-commit] pypy utf8-unicode2: Don't actually skip these tests Message-ID: <20140708145456.7646F1C1068@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: utf8-unicode2 Changeset: r72389:62603ec2b980 Date: 2014-07-08 09:54 -0500 http://bitbucket.org/pypy/pypy/changeset/62603ec2b980/ Log: Don't actually skip these tests diff --git a/pypy/interpreter/test/test_utf8_codecs.py b/pypy/interpreter/test/test_utf8_codecs.py --- a/pypy/interpreter/test/test_utf8_codecs.py +++ b/pypy/interpreter/test/test_utf8_codecs.py @@ -3,7 +3,7 @@ import py import sys, random -from pypy.interpreter.utf8 import Utf8Str +from pypy.interpreter.utf8 import Utf8Str, utf8chr from pypy.interpreter import utf8_codecs @@ -740,12 +740,7 @@ assert encoder(u'u\u1234', 2, 'replace') == 'u?' -# TODO: Do I need to actually skip these? class TestTranslation(object): - def setup_class(cls): - if utf8_codecs.MAXUNICODE != sys.maxunicode: - py.test.skip("these tests cannot run on the llinterp") - def test_utf8(self): from rpython.rtyper.test.test_llinterp import interpret def f(x): @@ -758,16 +753,10 @@ assert res def test_encode_surrogate_pair(self): - u = runicode.UNICHR(0xD800) + runicode.UNICHR(0xDC00) - if runicode.MAXUNICODE < 65536: - # Narrow unicode build, consider utf16 surrogate pairs - assert utf8_codecs.unicode_encode_unicode_escape( - u, len(u), True) == r'\U00010000' - assert utf8_codecs.unicode_encode_raw_unicode_escape( - u, len(u), True) == r'\U00010000' - else: - # Wide unicode build, don't merge utf16 surrogate pairs - assert utf8_codecs.unicode_encode_unicode_escape( - u, len(u), True) == r'\ud800\udc00' - assert utf8_codecs.unicode_encode_raw_unicode_escape( - u, len(u), True) == r'\ud800\udc00' + u = utf8chr(0xD800) + utf8chr(0xDC00) + + # Wide unicode build, don't merge utf16 surrogate pairs + assert utf8_codecs.unicode_encode_unicode_escape( + u, len(u), True) == r'\ud800\udc00' + assert utf8_codecs.unicode_encode_raw_unicode_escape( + u, len(u), True) == r'\ud800\udc00' From noreply at buildbot.pypy.org Tue Jul 8 16:54:57 2014 From: noreply at buildbot.pypy.org (waedt) Date: Tue, 8 Jul 2014 16:54:57 +0200 (CEST) Subject: [pypy-commit] pypy utf8-unicode2: Re-enable wrapping unicode objects Message-ID: <20140708145457.D87D21C1068@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: utf8-unicode2 Changeset: r72390:11e3ba8aad74 Date: 2014-07-08 09:54 -0500 http://bitbucket.org/pypy/pypy/changeset/11e3ba8aad74/ Log: Re-enable wrapping unicode objects diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -227,6 +227,9 @@ wrappeditems = [self.wrap(item) for item in x] return W_FrozensetObject(self, wrappeditems) + if isinstance(x, unicode): + return W_UnicodeObject(Utf8Str.from_unicode(x)) + if x is __builtin__.Ellipsis: # '__builtin__.Ellipsis' avoids confusion with special.Ellipsis return self.w_Ellipsis From noreply at buildbot.pypy.org Tue Jul 8 17:38:48 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 8 Jul 2014 17:38:48 +0200 (CEST) Subject: [pypy-commit] pypy default: Refactor the implementation of the dict iterators in RPython. The goal Message-ID: <20140708153848.3FADA1D2317@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72391:711f53c92504 Date: 2014-07-08 16:35 +0200 http://bitbucket.org/pypy/pypy/changeset/711f53c92504/ Log: Refactor the implementation of the dict iterators in RPython. The goal is to avoid the allocation of a tuple in iteritems().next(). diff --git a/rpython/jit/codewriter/support.py b/rpython/jit/codewriter/support.py --- a/rpython/jit/codewriter/support.py +++ b/rpython/jit/codewriter/support.py @@ -507,18 +507,7 @@ _ll_1_dict_values.need_result_type = True _ll_1_dict_items .need_result_type = True - _dictnext_keys = staticmethod(ll_rdict.ll_dictnext_group['keys']) - _dictnext_values = staticmethod(ll_rdict.ll_dictnext_group['values']) - _dictnext_items = staticmethod(ll_rdict.ll_dictnext_group['items']) - - def _ll_1_dictiter_nextkeys(iter): - return LLtypeHelpers._dictnext_keys(None, iter) - def _ll_1_dictiter_nextvalues(iter): - return LLtypeHelpers._dictnext_values(None, iter) - def _ll_1_dictiter_nextitems(RES, iter): - return LLtypeHelpers._dictnext_items(lltype.Ptr(RES), iter) - _ll_1_dictiter_nextitems.need_result_type = True - + _ll_1_dictiter_next = ll_rdict._ll_dictnext _ll_1_dict_resize = ll_rdict.ll_dict_resize # ---------- ordered dict ---------- @@ -534,18 +523,7 @@ _ll_1_odict_values.need_result_type = True _ll_1_odict_items .need_result_type = True - _odictnext_keys = staticmethod(rordereddict.ll_dictnext_group['keys']) - _odictnext_values = staticmethod(rordereddict.ll_dictnext_group['values']) - _odictnext_items = staticmethod(rordereddict.ll_dictnext_group['items']) - - def _ll_1_odictiter_nextkeys(iter): - return LLtypeHelpers._odictnext_keys(None, iter) - def _ll_1_odictiter_nextvalues(iter): - return LLtypeHelpers._odictnext_values(None, iter) - def _ll_1_odictiter_nextitems(RES, iter): - return LLtypeHelpers._odictnext_items(lltype.Ptr(RES), iter) - _ll_1_odictiter_nextitems.need_result_type = True - + _ll_1_odictiter_next = rordereddict._ll_dictnext _ll_1_odict_resize = rordereddict.ll_dict_resize # ---------- strings and unicode ---------- diff --git a/rpython/rtyper/lltypesystem/rdict.py b/rpython/rtyper/lltypesystem/rdict.py --- a/rpython/rtyper/lltypesystem/rdict.py +++ b/rpython/rtyper/lltypesystem/rdict.py @@ -721,7 +721,7 @@ ('dict', r_dict.lowleveltype), ('index', lltype.Signed))) self.ll_dictiter = ll_dictiter - self.ll_dictnext = ll_dictnext_group[variant] + self._ll_dictnext = _ll_dictnext def ll_dictiter(ITERPTR, d): @@ -730,45 +730,26 @@ iter.index = 0 return iter -def _make_ll_dictnext(kind): - # make three versions of the following function: keys, values, items - @jit.look_inside_iff(lambda RETURNTYPE, iter: jit.isvirtual(iter) - and (iter.dict is None or - jit.isvirtual(iter.dict))) - @jit.oopspec("dictiter.next%s(iter)" % kind) - def ll_dictnext(RETURNTYPE, iter): - # note that RETURNTYPE is None for keys and values - dict = iter.dict - if dict: - entries = dict.entries - index = iter.index - assert index >= 0 - entries_len = len(entries) - while index < entries_len: - entry = entries[index] - is_valid = entries.valid(index) - index = index + 1 - if is_valid: - iter.index = index - if RETURNTYPE is lltype.Void: - return None - elif kind == 'items': - r = lltype.malloc(RETURNTYPE.TO) - r.item0 = recast(RETURNTYPE.TO.item0, entry.key) - r.item1 = recast(RETURNTYPE.TO.item1, entry.value) - return r - elif kind == 'keys': - return entry.key - elif kind == 'values': - return entry.value - # clear the reference to the dict and prevent restarts - iter.dict = lltype.nullptr(lltype.typeOf(iter).TO.dict.TO) - raise StopIteration - return ll_dictnext - -ll_dictnext_group = {'keys' : _make_ll_dictnext('keys'), - 'values': _make_ll_dictnext('values'), - 'items' : _make_ll_dictnext('items')} + at jit.look_inside_iff(lambda iter: jit.isvirtual(iter) + and (iter.dict is None or + jit.isvirtual(iter.dict))) + at jit.oopspec("dictiter.next(iter)") +def _ll_dictnext(iter): + dict = iter.dict + if dict: + entries = dict.entries + index = iter.index + assert index >= 0 + entries_len = len(entries) + while index < entries_len: + nextindex = index + 1 + if entries.valid(index): + iter.index = nextindex + return index + index = nextindex + # clear the reference to the dict and prevent restarts + iter.dict = lltype.nullptr(lltype.typeOf(iter).TO.dict.TO) + raise StopIteration # _____________________________________________________________ # methods diff --git a/rpython/rtyper/lltypesystem/rordereddict.py b/rpython/rtyper/lltypesystem/rordereddict.py --- a/rpython/rtyper/lltypesystem/rordereddict.py +++ b/rpython/rtyper/lltypesystem/rordereddict.py @@ -886,7 +886,7 @@ self.variant = variant self.lowleveltype = get_ll_dictiter(r_dict.lowleveltype) self.ll_dictiter = ll_dictiter - self.ll_dictnext = ll_dictnext_group[variant] + self._ll_dictnext = _ll_dictnext def ll_dictiter(ITERPTR, d): @@ -895,49 +895,26 @@ iter.index = 0 return iter -def _make_ll_dictnext(kind): - # make three versions of the following function: keys, values, items - @jit.look_inside_iff(lambda RETURNTYPE, iter: jit.isvirtual(iter) - and (iter.dict is None or - jit.isvirtual(iter.dict))) - @jit.oopspec("odictiter.next%s(iter)" % kind) - def ll_dictnext(RETURNTYPE, iter): - # note that RETURNTYPE is None for keys and values - dict = iter.dict - if not dict: - raise StopIteration - + at jit.look_inside_iff(lambda iter: jit.isvirtual(iter) + and (iter.dict is None or + jit.isvirtual(iter.dict))) + at jit.oopspec("odictiter.next(iter)") +def _ll_dictnext(iter): + dict = iter.dict + if dict: entries = dict.entries index = iter.index assert index >= 0 entries_len = dict.num_used_items while index < entries_len: - entry = entries[index] - is_valid = entries.valid(index) - index = index + 1 - if is_valid: - iter.index = index - if RETURNTYPE is lltype.Void: - return None - elif kind == 'items': - r = lltype.malloc(RETURNTYPE.TO) - r.item0 = recast(RETURNTYPE.TO.item0, entry.key) - r.item1 = recast(RETURNTYPE.TO.item1, entry.value) - return r - elif kind == 'keys': - return entry.key - elif kind == 'values': - return entry.value - + nextindex = index + 1 + if entries.valid(index): + iter.index = nextindex + return index + index = nextindex # clear the reference to the dict and prevent restarts iter.dict = lltype.nullptr(lltype.typeOf(iter).TO.dict.TO) - raise StopIteration - - return ll_dictnext - -ll_dictnext_group = {'keys' : _make_ll_dictnext('keys'), - 'values': _make_ll_dictnext('values'), - 'items' : _make_ll_dictnext('items')} + raise StopIteration # _____________________________________________________________ # methods diff --git a/rpython/rtyper/rdict.py b/rpython/rtyper/rdict.py --- a/rpython/rtyper/rdict.py +++ b/rpython/rtyper/rdict.py @@ -75,18 +75,49 @@ def rtype_next(self, hop): variant = self.variant v_iter, = hop.inputargs(self) - if variant in ('keys', 'values'): - c1 = hop.inputconst(lltype.Void, None) - else: - c1 = hop.inputconst(lltype.Void, hop.r_result.lowleveltype) # record that we know about these two possible exceptions hop.has_implicit_exception(StopIteration) hop.has_implicit_exception(RuntimeError) hop.exception_is_here() - v = hop.gendirectcall(self.ll_dictnext, c1, v_iter) + v_index = hop.gendirectcall(self._ll_dictnext, v_iter) + if variant == 'items' and hop.r_result.lowleveltype != lltype.Void: + c1 = hop.inputconst(lltype.Void, hop.r_result.lowleveltype.TO) + cflags = hop.inputconst(lltype.Void, {'flavor': 'gc'}) + v_result = hop.genop('malloc', [c1, cflags], + resulttype = hop.r_result.lowleveltype) + DICT = self.lowleveltype.TO.dict + c_dict = hop.inputconst(lltype.Void, 'dict') + v_dict = hop.genop('getfield', [v_iter, c_dict], resulttype=DICT) + ENTRIES = DICT.TO.entries + c_entries = hop.inputconst(lltype.Void, 'entries') + v_entries = hop.genop('getfield', [v_dict, c_entries], + resulttype=ENTRIES) + if variant != 'values': + KEY = ENTRIES.TO.OF.key + c_key = hop.inputconst(lltype.Void, 'key') + v_key = hop.genop('getinteriorfield', [v_entries, v_index, c_key], + resulttype=KEY) + if variant != 'keys': + VALUE = ENTRIES.TO.OF.value + c_value = hop.inputconst(lltype.Void, 'value') + v_value = hop.genop('getinteriorfield', [v_entries,v_index,c_value], + resulttype=VALUE) if variant == 'keys': - return self.r_dict.recast_key(hop.llops, v) + return self.r_dict.recast_key(hop.llops, v_key) elif variant == 'values': - return self.r_dict.recast_value(hop.llops, v) + return self.r_dict.recast_value(hop.llops, v_value) + elif hop.r_result.lowleveltype == lltype.Void: + return hop.inputconst(lltype.Void, None) else: - return v + assert variant == 'items' + ITEM0 = v_result.concretetype.TO.item0 + ITEM1 = v_result.concretetype.TO.item1 + if ITEM0 != v_key.concretetype: + v_key = hop.genop('cast_pointer', [v_key], resulttype=ITEM0) + if ITEM1 != v_value.concretetype: + v_value = hop.genop('cast_pointer', [v_value], resulttype=ITEM1) + c_item0 = hop.inputconst(lltype.Void, 'item0') + c_item1 = hop.inputconst(lltype.Void, 'item1') + hop.genop('setfield', [v_result, c_item0, v_key]) + hop.genop('setfield', [v_result, c_item1, v_value]) + return v_result diff --git a/rpython/rtyper/test/test_rordereddict.py b/rpython/rtyper/test/test_rordereddict.py --- a/rpython/rtyper/test/test_rordereddict.py +++ b/rpython/rtyper/test/test_rordereddict.py @@ -138,12 +138,12 @@ rordereddict.ll_dict_setitem(ll_d, llstr("j"), 2) ITER = rordereddict.get_ll_dictiter(lltype.Ptr(DICT)) ll_iter = rordereddict.ll_dictiter(ITER, ll_d) - ll_iterkeys = rordereddict.ll_dictnext_group['keys'] - next = ll_iterkeys(lltype.Signed, ll_iter) - assert hlstr(next) == "k" - next = ll_iterkeys(lltype.Signed, ll_iter) - assert hlstr(next) == "j" - py.test.raises(StopIteration, ll_iterkeys, lltype.Signed, ll_iter) + ll_dictnext = rordereddict._ll_dictnext + num = ll_dictnext(ll_iter) + assert hlstr(ll_d.entries[num].key) == "k" + num = ll_dictnext(ll_iter) + assert hlstr(ll_d.entries[num].key) == "j" + py.test.raises(StopIteration, ll_dictnext, ll_iter) def test_popitem(self): DICT = self._get_str_dict() From noreply at buildbot.pypy.org Tue Jul 8 17:38:49 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 8 Jul 2014 17:38:49 +0200 (CEST) Subject: [pypy-commit] pypy default: Add comment Message-ID: <20140708153849.784CC1D2317@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72392:b74596c6094a Date: 2014-07-08 16:46 +0200 http://bitbucket.org/pypy/pypy/changeset/b74596c6094a/ Log: Add comment diff --git a/rpython/rtyper/rdict.py b/rpython/rtyper/rdict.py --- a/rpython/rtyper/rdict.py +++ b/rpython/rtyper/rdict.py @@ -81,6 +81,8 @@ hop.exception_is_here() v_index = hop.gendirectcall(self._ll_dictnext, v_iter) if variant == 'items' and hop.r_result.lowleveltype != lltype.Void: + # this allocates the tuple for the result, directly in the function + # where it will be used (likely). This will let it be removed. c1 = hop.inputconst(lltype.Void, hop.r_result.lowleveltype.TO) cflags = hop.inputconst(lltype.Void, {'flavor': 'gc'}) v_result = hop.genop('malloc', [c1, cflags], From noreply at buildbot.pypy.org Tue Jul 8 19:49:08 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 8 Jul 2014 19:49:08 +0200 (CEST) Subject: [pypy-commit] pypy default: More tweaks Message-ID: <20140708174908.ED5831C1068@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72393:8920909d084e Date: 2014-07-08 19:48 +0200 http://bitbucket.org/pypy/pypy/changeset/8920909d084e/ Log: More tweaks diff --git a/rpython/rtyper/lltypesystem/rdict.py b/rpython/rtyper/lltypesystem/rdict.py --- a/rpython/rtyper/lltypesystem/rdict.py +++ b/rpython/rtyper/lltypesystem/rdict.py @@ -805,6 +805,8 @@ ll_clear.oopspec = 'dict.clear(d)' def ll_update(dic1, dic2): + if dic1 == dic2: + return ll_prepare_dict_update(dic1, dic2.num_items) entries = dic2.entries d2len = len(entries) @@ -827,7 +829,13 @@ # (d.resize_counter - 1) // 3 = room left in d # so, if num_extra == 1, we need d.resize_counter > 3 # if num_extra == 2, we need d.resize_counter > 6 etc. - jit.conditional_call(d.resize_counter <= num_extra * 3, + # Note however a further hack: if num_extra <= d.num_items, + # we avoid calling _ll_dict_resize_to here. This is to handle + # the case where dict.update() actually has a lot of collisions. + # If num_extra is much greater than d.num_items the conditional_call + # will trigger anyway, which is really the goal. + x = num_extra - d.num_items + jit.conditional_call(d.resize_counter <= x * 3, _ll_dict_resize_to, d, num_extra) # this is an implementation of keys(), values() and items() From noreply at buildbot.pypy.org Wed Jul 9 03:02:44 2014 From: noreply at buildbot.pypy.org (yuyichao) Date: Wed, 9 Jul 2014 03:02:44 +0200 (CEST) Subject: [pypy-commit] pypy py3k-reset-locale: initialize LC_CTYPE instead of LC_ALL Message-ID: <20140709010244.8A8C11D23FA@cobra.cs.uni-duesseldorf.de> Author: Yichao Yu Branch: py3k-reset-locale Changeset: r72394:2bd3b970804c Date: 2014-07-09 07:45 +0800 http://bitbucket.org/pypy/pypy/changeset/2bd3b970804c/ Log: initialize LC_CTYPE instead of LC_ALL diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -57,7 +57,7 @@ space.call_function(w_run_toplevel, w_call_startup_gateway) if rlocale.HAVE_LANGINFO: try: - rlocale.setlocale(rlocale.LC_ALL, '') + rlocale.setlocale(rlocale.LC_CTYPE, '') except rlocale.LocaleError: pass w_executable = space.fsdecode(space.wrapbytes(argv[0])) From noreply at buildbot.pypy.org Wed Jul 9 03:02:45 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 9 Jul 2014 03:02:45 +0200 (CEST) Subject: [pypy-commit] pypy py3k: Merged in yuyichao/pypy/py3k-reset-locale (pull request #246) Message-ID: <20140709010245.E63301D23FA@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r72395:bd92f95fcd54 Date: 2014-07-08 18:02 -0700 http://bitbucket.org/pypy/pypy/changeset/bd92f95fcd54/ Log: Merged in yuyichao/pypy/py3k-reset-locale (pull request #246) initialize LC_CTYPE instead of LC_ALL diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -57,7 +57,7 @@ space.call_function(w_run_toplevel, w_call_startup_gateway) if rlocale.HAVE_LANGINFO: try: - rlocale.setlocale(rlocale.LC_ALL, '') + rlocale.setlocale(rlocale.LC_CTYPE, '') except rlocale.LocaleError: pass w_executable = space.fsdecode(space.wrapbytes(argv[0])) From noreply at buildbot.pypy.org Wed Jul 9 03:03:00 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 9 Jul 2014 03:03:00 +0200 (CEST) Subject: [pypy-commit] pypy py3k-reset-locale: Close branch py3k-reset-locale Message-ID: <20140709010300.B5EF91D23FA@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-reset-locale Changeset: r72396:d577539c2307 Date: 2014-07-08 18:02 -0700 http://bitbucket.org/pypy/pypy/changeset/d577539c2307/ Log: Close branch py3k-reset-locale From noreply at buildbot.pypy.org Wed Jul 9 16:18:16 2014 From: noreply at buildbot.pypy.org (timfel) Date: Wed, 9 Jul 2014 16:18:16 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-refactoring-virtual-pc: start adding vrefs Message-ID: <20140709141816.A5E601C33F5@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: storage-refactoring-virtual-pc Changeset: r869:6fb935c7c9b6 Date: 2014-07-03 16:21 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/6fb935c7c9b6/ Log: start adding vrefs diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -26,7 +26,7 @@ _immutable_fields_ = ["space", "image", "image_name", "max_stack_depth", "interrupt_counter_size", "startup_time", "evented", "interrupts"] - + jit_driver = jit.JitDriver( greens=['pc', 'self', 'method'], reds=['s_context'], @@ -38,7 +38,7 @@ trace=False, evented=True, interrupts=True, max_stack_depth=constants.MAX_LOOP_DEPTH): import time - + # === Initialize immutable variables self.space = space self.image = image @@ -54,7 +54,7 @@ self.interrupt_counter_size = int(os.environ["SPY_ICS"]) except KeyError: self.interrupt_counter_size = constants.INTERRUPT_COUNTER_SIZE - + # === Initialize mutable variables self.interrupt_check_counter = self.interrupt_counter_size self.current_stack_depth = 0 @@ -108,19 +108,25 @@ if jit.we_are_jitted(): self.jitted_check_for_interrupt(s_context) self.jit_driver.can_enter_jit(pc=pc, self=self, method=method, s_context=s_context) - + # This is just a wrapper around loop_bytecodes that handles the stack overflow protection mechanism def stack_frame(self, s_new_frame, may_context_switch=True, fresh_context=False): if self.max_stack_depth > 0: if self.current_stack_depth >= self.max_stack_depth: raise StackOverflow(s_new_frame) - + self.current_stack_depth += 1 + s_sender = s_new_frame.s_sender() + assert s_sender + s_sender_ref = jit.virtual_ref(s_sender) + s_new_frame.store_s_sender(s_sender_ref, raiseError=False) try: self.loop_bytecodes(s_new_frame, may_context_switch=may_context_switch, fresh_context=fresh_context) finally: + jit.virtual_ref_finish(s_sender_ref, s_sender) + s_new_frame.restore_s_sender(s_sender) self.current_stack_depth -= 1 - + def step(self, context, pc): bytecode = context.fetch_bytecode(pc) pc += 1 @@ -134,9 +140,9 @@ if start <= bytecode <= stop: return getattr(context, methname)(self, bytecode, pc) assert False, "unreachable" - + # ============== Methods for handling user interrupts ============== - + def jitted_check_for_interrupt(self, s_frame): if not self.interrupts: return @@ -147,7 +153,7 @@ decr_by = int(trace_length // 100) decr_by = max(decr_by, 1) self.quick_check_for_interrupt(s_frame, decr_by) - + def quick_check_for_interrupt(self, s_frame, dec=1): if not self.interrupts: return @@ -183,7 +189,7 @@ return intmask(int((time.time() - self.startup_time) * 1000) & constants.TAGGED_MASK) # ============== Convenience methods for executing code ============== - + def interpret_toplevel(self, w_frame): try: self.loop(w_frame) @@ -199,7 +205,7 @@ "asSymbol") else: w_selector = selector - + w_method = model.W_CompiledMethod(self.space, header=512) w_method.literalatput0(self.space, 1, w_selector) assert len(arguments_w) <= 7 @@ -208,10 +214,10 @@ s_frame = MethodContextShadow(self.space, None, w_method, w_receiver, []) s_frame.push(w_receiver) s_frame.push_all(list(arguments_w)) - + self.interrupt_check_counter = self.interrupt_counter_size return self.interpret_toplevel(s_frame.w_self()) - + def padding(self, symbol=' '): return symbol * self.current_stack_depth @@ -247,9 +253,20 @@ # jump=True means the pc is changed in an unpredictable way. # The implementation method must additionally handle the pc. # needs_pc=True means the bytecode implementation required the pc, but will not change it. + +from rpython.rlib.unroll import SpecTag +class unrolling_int(int, SpecTag): + def __add__(self, other): + return unrolling_int(int.__add__(self, other)) + __radd__ = __add__ + def __sub__(self, other): + return unrolling_int(int.__sub__(self, other)) + def __rsub__(self, other): + return unrolling_int(int.__rsub__(self, other)) +unrolling_zero = unrolling_int(0) + def bytecode_implementation(parameter_bytes=0, jump=False, needs_pc=False): def bytecode_implementation_decorator(actual_implementation_method): - from rpython.rlib.unroll import unrolling_zero @jit.unroll_safe def bytecode_implementation_wrapper(self, interp, current_bytecode, pc): parameters = () @@ -351,9 +368,9 @@ # __extend__ adds new methods to the ContextPartShadow class class __extend__(ContextPartShadow): - + # ====== Push/Pop bytecodes ====== - + @bytecode_implementation() def pushReceiverVariableBytecode(self, interp, current_bytecode): index = current_bytecode & 15 @@ -432,7 +449,7 @@ @bytecode_implementation() def popStackBytecode(self, interp, current_bytecode): self.pop() - + @bytecode_implementation(parameter_bytes=1) def pushNewArrayBytecode(self, interp, current_bytecode, descriptor): arraySize, popIntoArray = splitter[7, 1](descriptor) @@ -442,9 +459,9 @@ else: newArray = interp.space.w_Array.as_class_get_shadow(interp.space).new(arraySize) self.push(newArray) - + # ====== Extended Push/Pop bytecodes ====== - + def _extendedVariableTypeAndIndex(self, descriptor): return ((descriptor >> 6) & 3), (descriptor & 63) @@ -480,16 +497,16 @@ @bytecode_implementation(parameter_bytes=1) def extendedStoreBytecode(self, interp, current_bytecode, descriptor): return self._extendedStoreBytecode(interp, current_bytecode, descriptor) - + @bytecode_implementation(parameter_bytes=1) def extendedStoreAndPopBytecode(self, interp, current_bytecode, descriptor): self._extendedStoreBytecode(interp, current_bytecode, descriptor) self.pop() - + def _extract_index_and_temps(self, index_in_array, index_of_array): w_indirectTemps = self.gettemp(index_of_array) return index_in_array, w_indirectTemps - + @bytecode_implementation(parameter_bytes=2) def pushRemoteTempLongBytecode(self, interp, current_bytecode, index_in_array, index_of_array): index_in_array, w_indirectTemps = self._extract_index_and_temps(index_in_array, index_of_array) @@ -527,7 +544,7 @@ copiedValues: copiedValues). self jump: blockSize """ - + space = self.space numArgs, numCopied = splitter[4, 4](descriptor) blockSize = (j << 8) | i @@ -536,7 +553,7 @@ self.push(w_closure) assert blockSize >= 0 return self._jump(blockSize, pc) - + # ====== Helpers for send/return bytecodes ====== def _sendSelfSelector(self, w_selector, argcount, interp): @@ -558,7 +575,7 @@ w_method = receiverclassshadow.lookup(w_selector) except MethodNotFound: return self._doesNotUnderstand(w_selector, argcount, interp, receiver) - + code = w_method.primitive() if code: try: @@ -579,21 +596,21 @@ def _sendSelfSelectorSpecial(self, interp, selector, numargs): w_selector = self.space.get_special_selector(selector) return self._sendSelfSelector(w_selector, numargs, interp) - + def _sendSpecialSelector(self, interp, receiver, special_selector, w_args=[]): w_special_selector = self.space.objtable["w_" + special_selector] s_class = receiver.class_shadow(self.space) w_method = s_class.lookup(w_special_selector) s_frame = w_method.create_frame(interp.space, receiver, w_args, self) - + # ###################################################################### if interp.trace: print '%s %s %s: #%s' % (interp.padding('#'), special_selector, s_frame.short_str(), w_args) if not objectmodel.we_are_translated(): import pdb; pdb.set_trace() - + return interp.stack_frame(s_frame) - + def _doesNotUnderstand(self, w_selector, argcount, interp, receiver): arguments = self.pop_and_return_n(argcount) w_message_class = self.space.classtable["w_Message"] @@ -603,7 +620,7 @@ w_message.store(self.space, 0, w_selector) w_message.store(self.space, 1, self.space.wrap_list(arguments)) self.pop() # The receiver, already known. - + try: return self._sendSpecialSelector(interp, receiver, "doesNotUnderstand", [w_message]) except MethodNotFound: @@ -612,10 +629,10 @@ assert isinstance(s_class, ClassShadow) print "Missing doesNotUnderstand in hierarchy of %s" % s_class.getname() raise - + def _mustBeBoolean(self, interp, receiver): return self._sendSpecialSelector(interp, receiver, "mustBeBoolean") - + def _call_primitive(self, code, interp, argcount, w_method, w_selector): # ################################################################## if interp.trace: @@ -635,11 +652,11 @@ def _return(self, return_value, interp, s_return_to): # unfortunately, this assert is not true for some tests. TODO fix this. # assert self._stack_ptr == self.tempsize() - + # ################################################################## if interp.trace: print '%s<- %s' % (interp.padding(), return_value.as_repr_string()) - + if s_return_to is None: # This should never happen while executing a normal image. raise ReturnFromTopLevel(return_value) @@ -736,7 +753,7 @@ return self._sendSelfSelector(w_selector, argcount, interp) # ====== Misc ====== - + def _activate_unwind_context(self, interp, current_pc): # TODO put the constant somewhere else. # Primitive 198 is used in BlockClosure >> ensure: @@ -754,11 +771,11 @@ raise nlr finally: self.mark_returned() - + @bytecode_implementation() def unknownBytecode(self, interp, current_bytecode): raise MissingBytecode("unknownBytecode") - + @bytecode_implementation() def experimentalBytecode(self, interp, current_bytecode): raise MissingBytecode("experimentalBytecode") @@ -775,7 +792,7 @@ else: w_alternative = interp.space.w_true w_expected = interp.space.w_false - + # Don't check the class, just compare with only two Boolean instances. w_bool = self.pop() if w_expected.is_same_object(w_bool): diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -1290,13 +1290,13 @@ s_new_context = shadow.BlockContextShadow( interp.space, None, w_method_context, argcnt, initialip) return s_new_context.w_self() - + def finalize_block_ctx(interp, s_block_ctx, s_frame): from spyvm.error import SenderChainManipulation # Set some fields s_block_ctx.store_pc(s_block_ctx.initialip()) try: - s_block_ctx.store_s_sender(s_frame) + s_block_ctx.store_s_sender(jit.non_virtual_ref(s_frame)) except SenderChainManipulation, e: assert e.s_context == s_block_ctx return s_block_ctx diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -20,7 +20,7 @@ _immutable_fields_ = ['space'] provides_getname = False repr_classname = "AbstractShadow" - + def __init__(self, space, w_self): self.space = space assert w_self is None or isinstance(w_self, model.W_PointersObject) @@ -34,19 +34,19 @@ return "<%s %s>" % (self.repr_classname, self.getname()) else: return "<%s>" % self.repr_classname - + def fetch(self, n0): raise NotImplementedError("Abstract class") def store(self, n0, w_value): raise NotImplementedError("Abstract class") def size(self): raise NotImplementedError("Abstract class") - + def attach_shadow(self): pass - + def copy_field_from(self, n0, other_shadow): self.store(n0, other_shadow.fetch(n0)) - + # This can be overwritten to change the order of initialization. def copy_from(self, other_shadow): assert self.size() == other_shadow.size() @@ -98,24 +98,24 @@ # Class must provide: wrap, unwrap, nil_value, is_nil_value, wrapper_class _attrs_ = ['storage'] _immutable_fields_ = ['storage'] - + def __init__(self, space, w_self, size): AbstractStorageShadow.__init__(self, space, w_self, size) self.storage = [self.nil_value] * size - + def size(self): return len(self.storage) - + def generalized_strategy_for(self, w_val): return ListStorageShadow - + def fetch(self, n0): val = self.storage[n0] if self.is_nil_value(val): return self.space.w_nil else: return self.wrap(self.space, val) - + def do_store(self, n0, w_val): if w_val.is_nil(self.space): self.storage[n0] = self.nil_value @@ -134,7 +134,7 @@ nil_value = constants.MAXINT wrapper_class = model.W_SmallInteger import_from_mixin(AbstractValueOrNilStorageMixin) - + @staticmethod def static_can_contain(space, w_val): return _value_or_nil_can_handle(SmallIntegerOrNilStorageShadow, space, w_val) @@ -153,7 +153,7 @@ nil_value = sys.float_info.max wrapper_class = model.W_Float import_from_mixin(AbstractValueOrNilStorageMixin) - + @staticmethod def static_can_contain(space, w_val): return _value_or_nil_can_handle(FloatOrNilStorageShadow, space, w_val) @@ -193,17 +193,17 @@ if float_can_handle and not FloatOrNilStorageShadow.static_can_contain(space, w_obj): float_can_handle = False specialized_strategies = specialized_strategies - 1 - + if specialized_strategies <= 0: return ListStorageShadow - + if all_nil_can_handle: return AllNilStorageShadow if small_int_can_handle: return SmallIntegerOrNilStorageShadow if float_can_handle: return FloatOrNilStorageShadow - + # If this happens, please look for a bug in the code above. assert False, "No strategy could be found for list..." @@ -223,7 +223,7 @@ _immutable_fields_ = ['storage'] repr_classname = "ListStorageShadow" import_from_mixin(ListStorageMixin) - + def initialize_storage(self, size): self.storage = [self.space.w_nil] * size def fetch(self, n0): @@ -236,7 +236,7 @@ _immutable_fields_ = ['storage'] repr_classname = "WeakListStorageShadow" import_from_mixin(ListStorageMixin) - + def initialize_storage(self, size): self.storage = [weakref.ref(self.space.w_nil)] * size def fetch(self, n0): @@ -245,14 +245,14 @@ def store(self, n0, w_value): assert w_value is not None self.storage[n0] = weakref.ref(w_value) - + class AbstractCachingShadow(ListStorageShadow): _immutable_fields_ = ['version?'] _attrs_ = ['version'] repr_classname = "AbstractCachingShadow" import_from_mixin(version.VersionMixin) version = None - + def __init__(self, space, w_self): ListStorageShadow.__init__(self, space, w_self, 0) self.changed() @@ -284,7 +284,7 @@ _s_superclass = _s_methoddict = None provides_getname = True repr_classname = "ClassShadow" - + def __init__(self, space, w_self): self.subclass_s = {} AbstractCachingShadow.__init__(self, space, w_self) @@ -305,7 +305,7 @@ # In Slang the value is read directly as a boxed integer, so that # the code gets a "pointer" whose bits are set as above, but # shifted one bit to the left and with the lowest bit set to 1. - + # Compute the instance size (really the size, not the number of bytes) instsize_lo = (classformat >> 1) & 0x3F instsize_hi = (classformat >> (9 + 1)) & 0xC0 @@ -313,10 +313,10 @@ # decode the instSpec format = (classformat >> 7) & 15 self.instance_varsized = format >= 2 - + # In case of raised exception below. self.changed() - + if format < 4: self.instance_kind = POINTERS elif format == 4: @@ -356,7 +356,7 @@ return # Some of the special info has changed -> Switch version. self.changed() - + def store_w_superclass(self, w_class): superclass = self._s_superclass if w_class is None or w_class.is_nil(self.space): @@ -383,24 +383,24 @@ return if methoddict: methoddict.s_class = None self.store_s_methoddict(s_new_methoddict) - + def store_s_methoddict(self, s_methoddict): s_methoddict.s_class = self s_methoddict.sync_method_cache() self._s_methoddict = s_methoddict - + def attach_s_class(self, s_other): self.subclass_s[s_other] = None def detach_s_class(self, s_other): del self.subclass_s[s_other] - + def store_w_name(self, w_name): if isinstance(w_name, model.W_BytesObject): self.name = w_name.as_string() else: self.name = None - + @jit.unroll_safe def flush_method_caches(self): look_in_shadow = self @@ -497,7 +497,7 @@ self.version = version for s_class in self.subclass_s: s_class.superclass_changed(version) - + # _______________________________________________________________ # Methods used only in testing @@ -532,7 +532,7 @@ _immutable_fields_ = ['invalid?', 's_class'] _attrs_ = ['methoddict', 'invalid', 's_class'] repr_classname = "MethodDictionaryShadow" - + def __init__(self, space, w_self): self.invalid = True self.s_class = None @@ -541,7 +541,7 @@ def update(self): self.sync_method_cache() - + def find_selector(self, w_selector): if self.invalid: return None # we may be invalid if Smalltalk code did not call flushCache @@ -593,7 +593,7 @@ class AbstractRedirectingShadow(AbstractShadow): _attrs_ = ['_w_self_size'] repr_classname = "AbstractRedirectingShadow" - + def __init__(self, space, w_self): AbstractShadow.__init__(self, space, w_self) if w_self is not None: @@ -610,7 +610,7 @@ _attrs_ = ['_s_sender', '_pc', '_temps_and_stack', '_stack_ptr', 'instances_w'] repr_classname = "ContextPartShadow" - + _virtualizable_ = [ "_s_sender", "_pc", "_temps_and_stack[*]", "_stack_ptr", @@ -619,9 +619,9 @@ # ______________________________________________________________________ # Initialization - + def __init__(self, space, w_self): - self._s_sender = None + self._s_sender = jit.vref_None AbstractRedirectingShadow.__init__(self, space, w_self) self.instances_w = {} @@ -630,26 +630,26 @@ AbstractRedirectingShadow.copy_field_from(self, n0, other_shadow) except error.SenderChainManipulation, e: assert e.s_context == self - + def copy_from(self, other_shadow): # Some fields have to be initialized before the rest, to ensure correct initialization. privileged_fields = self.fields_to_copy_first() for n0 in privileged_fields: self.copy_field_from(n0, other_shadow) - + # Now the temp size will be known. self.init_stack_and_temps() - + for n0 in range(self.size()): if n0 not in privileged_fields: self.copy_field_from(n0, other_shadow) - + def fields_to_copy_first(self): return [] - + # ______________________________________________________________________ # Accessing object fields - + def fetch(self, n0): if n0 == constants.CTXPART_SENDER_INDEX: return self.w_sender() @@ -671,9 +671,9 @@ if n0 == constants.CTXPART_SENDER_INDEX: assert isinstance(w_value, model.W_PointersObject) if w_value.is_nil(self.space): - self._s_sender = None + self._s_sender = jit.vref_None else: - self.store_s_sender(w_value.as_context_get_shadow(self.space)) + self.store_s_sender(jit.non_virtual_ref(w_value.as_context_get_shadow(self.space))) return if n0 == constants.CTXPART_PC_INDEX: return self.store_unwrap_pc(w_value) @@ -688,24 +688,30 @@ else: # XXX later should store tail out of known context part as well raise error.WrapperException("Index in context out of bounds") - + # === Sender === - - def store_s_sender(self, s_sender): - assert s_sender is None or isinstance(s_sender, ContextPartShadow) + + def store_s_sender(self, s_sender, raiseError=True): + assert s_sender is jit.vref_None or isinstance(s_sender, jit.DirectVRef) self._s_sender = s_sender - raise error.SenderChainManipulation(self) - + if raiseError: + raise error.SenderChainManipulation(self) + + def restore_s_sender(self, s_direct): + if self._s_sender is not jit.vref_None: + # virtual sender wasn't already cleared by e.g. mark_returned + self._s_sender = s_direct + def w_sender(self): if self._s_sender is None: return self.space.w_nil return self._s_sender.w_self() - + def s_sender(self): - return self._s_sender - + return self._s_sender() + # === Stack Pointer === - + def unwrap_store_stackpointer(self, w_sp1): # the stackpointer in the W_PointersObject starts counting at the # tempframe start @@ -724,12 +730,12 @@ def stackdepth(self): return rarithmetic.intmask(self._stack_ptr) - + def wrap_stackpointer(self): return self.space.wrap_int(self.stackdepth()) # === Program Counter === - + def store_unwrap_pc(self, w_pc): if w_pc.is_nil(self.space): self.store_pc(-1) @@ -754,9 +760,9 @@ def store_pc(self, newpc): assert newpc >= -1 self._pc = newpc - + # === Subclassed accessors === - + def s_home(self): raise NotImplementedError() @@ -765,22 +771,22 @@ def w_receiver(self): raise NotImplementedError() - + def w_method(self): raise NotImplementedError() - + def tempsize(self): raise NotImplementedError() - + def is_closure_context(self): raise NotImplementedError() - + # === Other properties of Contexts === - + def mark_returned(self): self.store_pc(-1) try: - self.store_s_sender(None) + self.store_s_sender(jit.vref_None) except error.SenderChainManipulation, e: assert self == e.s_context @@ -789,25 +795,25 @@ def external_stackpointer(self): return self.stackdepth() + self.stackstart() - + def stackend(self): # XXX this is incorrect when there is subclassing return self._w_self_size - + def fetch_next_bytecode(self): pc = jit.promote(self._pc) assert pc >= 0 self._pc += 1 return self.fetch_bytecode(pc) - + def fetch_bytecode(self, pc): bytecode = self.w_method().fetch_bytecode(pc) return ord(bytecode) - + # ______________________________________________________________________ # Temporary Variables # - # Every context has it's own stack. BlockContexts share their temps with + # Every context has it's own stack. BlockContexts share their temps with # their home contexts. MethodContexts created from a BlockClosure get their # temps copied from the closure upon activation. Changes are not propagated back; # this is handled by the compiler by allocating an extra Array for temps. @@ -817,7 +823,7 @@ def settemp(self, index, w_value): raise NotImplementedError() - + # ______________________________________________________________________ # Stack Manipulation @@ -831,13 +837,13 @@ for i in range(tempsize): temps_and_stack[i] = self.space.w_nil self._stack_ptr = rarithmetic.r_uint(tempsize) # we point after the last element - + def stack_get(self, index0): return self._temps_and_stack[index0] - + def stack_put(self, index0, w_val): self._temps_and_stack[index0] = w_val - + def stack(self): """NOT_RPYTHON""" # purely for testing return self._temps_and_stack[self.tempsize():self._stack_ptr] @@ -892,7 +898,7 @@ # ______________________________________________________________________ # Primitive support - + def store_instances_array(self, w_class, match_w): # used for primitives 77 & 78 self.instances_w[w_class] = match_w @@ -919,7 +925,7 @@ j += 1 retval += "\n---------------------" return retval - + def short_str(self): arg_strings = self.argument_strings() if len(arg_strings) > 0: @@ -933,10 +939,10 @@ self.w_receiver().as_repr_string(), args ) - + def print_stack(self, method=True): return self.print_padded_stack(method)[1] - + def print_padded_stack(self, method): padding = ret_str = '' if self.s_sender() is not None: @@ -950,9 +956,9 @@ class BlockContextShadow(ContextPartShadow): _attrs_ = ['_w_home', '_initialip', '_eargc'] repr_classname = "BlockContextShadow" - + # === Initialization === - + def __init__(self, space, w_self=None, w_home=None, argcnt=0, initialip=0): self = jit.hint(self, access_directly=True, fresh_virtualizable=True) creating_w_self = w_self is None @@ -972,40 +978,40 @@ def fields_to_copy_first(self): return [ constants.BLKCTX_HOME_INDEX ] - + # === Implemented accessors === - + def s_home(self): return self._w_home.as_methodcontext_get_shadow(self.space) - + def stackstart(self): return constants.BLKCTX_STACK_START def tempsize(self): # A blockcontext doesn't have any temps return 0 - + def w_receiver(self): return self.s_home().w_receiver() - + def w_method(self): retval = self.s_home().w_method() assert isinstance(retval, model.W_CompiledMethod) return retval - + def is_closure_context(self): return True - + # === Temporary variables === - + def gettemp(self, index): return self.s_home().gettemp(index) def settemp(self, index, w_value): self.s_home().settemp(index, w_value) - + # === Accessing object fields === - + def fetch(self, n0): if n0 == constants.BLKCTX_HOME_INDEX: return self._w_home @@ -1025,11 +1031,11 @@ return self.unwrap_store_eargc(w_value) else: return ContextPartShadow.store(self, n0, w_value) - + def store_w_home(self, w_home): assert isinstance(w_home, model.W_PointersObject) self._w_home = w_home - + def unwrap_store_initialip(self, w_value): initialip = self.space.unwrap_int(w_value) initialip -= 1 + self.w_method().literalsize @@ -1037,7 +1043,7 @@ def store_initialip(self, initialip): self._initialip = initialip - + def wrap_initialip(self): initialip = self.initialip() initialip += 1 + self.w_method().literalsize @@ -1045,7 +1051,7 @@ def initialip(self): return self._initialip - + def unwrap_store_eargc(self, w_value): self.store_expected_argument_count(self.space.unwrap_int(w_value)) @@ -1059,24 +1065,24 @@ self._eargc = argc # === Stack Manipulation === - + def reset_stack(self): self.pop_n(self.stackdepth()) # === Printing === - + def argument_strings(self): return [] - + def method_str(self): return '[] in %s' % self.w_method().get_identifier_string() class MethodContextShadow(ContextPartShadow): _attrs_ = ['closure', '_w_receiver', '_w_method'] repr_classname = "MethodContextShadow" - + # === Initialization === - + @jit.unroll_safe def __init__(self, space, w_self=None, w_method=None, w_receiver=None, arguments=None, s_sender=None, closure=None, pc=0): @@ -1085,7 +1091,7 @@ self.store_w_receiver(w_receiver) self.store_pc(pc) self.closure = closure - + if w_method: self.store_w_method(w_method) # The summand is needed, because we calculate i.a. our stackdepth relative of the size of w_self. @@ -1094,29 +1100,29 @@ self.init_stack_and_temps() else: self._w_method = None - + if s_sender: try: - self.store_s_sender(s_sender) + self.store_s_sender(jit.non_virtual_ref(s_sender)) except error.SenderChainManipulation, e: assert self == e.s_context - + if arguments: argc = len(arguments) for i0 in range(argc): self.settemp(i0, arguments[i0]) else: argc = 0 - + if closure: for i0 in range(closure.size()): self.settemp(i0+argc, closure.at0(i0)) def fields_to_copy_first(self): return [ constants.MTHDCTX_METHOD, constants.MTHDCTX_CLOSURE_OR_NIL ] - + # === Accessing object fields === - + def fetch(self, n0): if n0 == constants.MTHDCTX_METHOD: return self.w_method() @@ -1150,12 +1156,12 @@ return self.settemp(temp_i, w_value) else: return ContextPartShadow.store(self, n0, w_value) - + def store_w_receiver(self, w_receiver): self._w_receiver = w_receiver - + # === Implemented Accessors === - + def s_home(self): if self.is_closure_context(): # this is a context for a blockClosure @@ -1168,31 +1174,31 @@ return s_outerContext.s_home() else: return self - + def stackstart(self): return constants.MTHDCTX_TEMP_FRAME_START - + def store_w_method(self, w_method): assert isinstance(w_method, model.W_CompiledMethod) self._w_method = w_method def w_receiver(self): return self._w_receiver - + def w_method(self): retval = self._w_method assert isinstance(retval, model.W_CompiledMethod) return retval - + def tempsize(self): if not self.is_closure_context(): return self.w_method().tempsize() else: return self.closure.tempsize() - + def is_closure_context(self): return self.closure is not None - + # ______________________________________________________________________ # Marriage of MethodContextShadows with PointerObjects only when required @@ -1209,9 +1215,9 @@ self._w_self = w_self self._w_self_size = w_self.size() return w_self - + # === Temporary variables === - + def gettemp(self, index0): return self.stack_get(index0) @@ -1219,7 +1225,7 @@ self.stack_put(index0, w_value) # === Printing === - + def argument_strings(self): argcount = self.w_method().argsize tempsize = self.w_method().tempsize() From noreply at buildbot.pypy.org Wed Jul 9 16:18:17 2014 From: noreply at buildbot.pypy.org (timfel) Date: Wed, 9 Jul 2014 16:18:17 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-refactoring-virtual-pc: graft changes from 64bit branch onto master Message-ID: <20140709141817.E906A1C33F5@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: storage-refactoring-virtual-pc Changeset: r870:2eead83ddd0a Date: 2014-02-07 13:12 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/2eead83ddd0a/ Log: graft changes from 64bit branch onto master diff --git a/spyvm/constants.py b/spyvm/constants.py --- a/spyvm/constants.py +++ b/spyvm/constants.py @@ -146,7 +146,7 @@ "timerSemaphore" : SO_TIMER_SEMAPHORE, } -LONG_BIT = 32 +from rpython.rlib.rarithmetic import LONG_BIT TAGGED_MAXINT = 2 ** (LONG_BIT - 2) - 1 TAGGED_MININT = -2 ** (LONG_BIT - 2) diff --git a/spyvm/display.py b/spyvm/display.py --- a/spyvm/display.py +++ b/spyvm/display.py @@ -1,4 +1,3 @@ -from rpython.rlib.rarithmetic import r_uint from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib.runicode import unicode_encode_utf_8 from rpython.rlib import jit diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -93,6 +93,7 @@ s_new_context = p.s_new_context def loop_bytecodes(self, s_context, fresh_context=False, may_context_switch=True): + assert isinstance(s_context, ContextPartShadow) if not jit.we_are_jitted() and may_context_switch: self.quick_check_for_interrupt(s_context) method = s_context.w_method() diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -15,17 +15,33 @@ that create W_PointersObjects of correct size with attached shadows. """ import sys, weakref +<<<<<<< local from spyvm import constants, error, version, storage_statistics from spyvm.version import elidable_for_version, constant_for_version, constant_for_version_arg +======= +from spyvm import constants, error, system +>>>>>>> other from rpython.rlib import rrandom, objectmodel, jit, signature +<<<<<<< local from rpython.rlib.rarithmetic import intmask, r_uint, r_int from rpython.rlib.debug import make_sure_not_resized +======= +from rpython.rlib.rarithmetic import intmask, r_uint32, r_uint, r_int +>>>>>>> other from rpython.tool.pairtype import extendabletype from rpython.rlib.objectmodel import instantiate, compute_hash, import_from_mixin, we_are_translated from rpython.rtyper.lltypesystem import lltype, rffi from rsdl import RSDL, RSDL_helper + +if system.IS_64BIT: + from rpython.rlib.rarithmetic import widen +else: + def widen(x): + return x + + class W_Object(object): """Root of Squeak model, abstract.""" _attrs_ = [] # no RPython-level instance variables allowed in W_Object @@ -216,7 +232,7 @@ return isinstance(self.value, int) and self.value < 0x8000 def lshift(self, space, shift): - from rpython.rlib.rarithmetic import ovfcheck, intmask, r_uint + from rpython.rlib.rarithmetic import ovfcheck, intmask # shift > 0, therefore the highest bit of upperbound is not set, # i.e. upperbound is positive upperbound = intmask(r_uint(-1) >> shift) @@ -359,7 +375,6 @@ return space.wrap_int((self.value >> shift) & mask) def unwrap_uint(self, space): - from rpython.rlib.rarithmetic import r_uint return r_uint(self.value) def clone(self, space): @@ -469,11 +484,11 @@ from rpython.rlib.rstruct.ieee import float_pack r = float_pack(self.value, 8) # C double if n0 == 0: - return space.wrap_uint(r_uint(intmask(r >> 32))) + return space.wrap_uint(r_uint32(intmask(r >> 32))) else: # bounds-check for primitive access is done in the primitive assert n0 == 1 - return space.wrap_uint(r_uint(intmask(r))) + return space.wrap_uint(r_uint32(intmask(r))) def store(self, space, n0, w_obj): from rpython.rlib.rstruct.ieee import float_unpack, float_pack @@ -799,14 +814,19 @@ byte0 = ord(self.getchar(byte_index0)) byte1 = ord(self.getchar(byte_index0 + 1)) << 8 if byte1 & 0x8000 != 0: - byte1 = intmask(r_uint(0xffff0000) | r_uint(byte1)) + byte1 = intmask(widen(r_uint32(0xffff0000)) | widen(r_uint32(byte1))) return space.wrap_int(byte1 | byte0) def short_atput0(self, space, index0, w_value): from rpython.rlib.rarithmetic import int_between i_value = space.unwrap_int(w_value) - if not int_between(-0x8000, i_value, 0x8000): - raise error.PrimitiveFailedError + if constants.LONG_BIT == 64: + if (not int_between(0, i_value, 0x8000) and + not int_between(0, i_value ^ (0xffffffff), 0x8000)): + raise error.PrimitiveFailedError + else: + if not int_between(-0x8000, i_value, 0x8000): + raise error.PrimitiveFailedError byte_index0 = index0 * 2 byte0 = i_value & 0xff byte1 = (i_value & 0xff00) >> 8 @@ -938,20 +958,25 @@ else: short = (word >> 16) & 0xffff if short & 0x8000 != 0: - short = r_uint(0xffff0000) | r_uint(short) + short = widen(r_uint32(0xffff0000)) | short return space.wrap_int(intmask(short)) def short_atput0(self, space, index0, w_value): from rpython.rlib.rarithmetic import int_between i_value = space.unwrap_int(w_value) - if not int_between(-0x8000, i_value, 0x8000): - raise error.PrimitiveFailedError + if constants.LONG_BIT == 64: + if (not int_between(0, i_value, 0x8000) and + not int_between(0, i_value ^ (0xffffffff), 0x8000)): + raise error.PrimitiveFailedError + else: + if not int_between(-0x8000, i_value, 0x8000): + raise error.PrimitiveFailedError word_index0 = index0 / 2 - word = intmask(self.getword(word_index0)) + word = intmask(r_uint32(self.getword(word_index0))) if index0 % 2 == 0: - word = intmask(r_uint(word) & r_uint(0xffff0000)) | (i_value & 0xffff) + word = intmask(widen(r_uint32(word)) & widen(r_uint32(0xffff0000))) | (i_value & 0xffff) else: - word = (i_value << 16) | (word & 0xffff) + word = intmask(r_uint32((i_value << 16) | (word & 0xffff))) value = r_uint(word) self.setword(word_index0, value) @@ -1020,11 +1045,16 @@ class W_DisplayBitmap(W_AbstractObjectWithClassReference): _attrs_ = ['pixelbuffer', '_realsize', '_real_depth_buffer', 'display', '_depth'] +<<<<<<< local _immutable_fields_ = ['_realsize', 'display', '_depth'] repr_classname = "W_DisplayBitmap" pixelbuffer = None +======= + _immutable_fields_ = ['_realsize', 'display', '_depth', '_real_depth_buffer'] + +>>>>>>> other @staticmethod def create(space, w_class, size, depth, display): if depth < 8: @@ -1041,7 +1071,7 @@ def __init__(self, space, w_class, size, depth, display): W_AbstractObjectWithClassReference.__init__(self, space, w_class) - self._real_depth_buffer = lltype.malloc(rffi.CArray(rffi.UINT), size, flavor='raw') + self._real_depth_buffer = [r_uint(0)] * size self._realsize = size self.display = display self._depth = depth @@ -1052,7 +1082,7 @@ def atput0(self, space, index0, w_value): word = space.unwrap_uint(w_value) - self.setword(index0, word) + self.setword(index0, r_uint(word)) def flush_to_screen(self): self.display.flip() @@ -1077,7 +1107,7 @@ def setword(self, n, word): self._real_depth_buffer[n] = word - self.display.get_pixelbuffer()[n] = word + self.display.get_pixelbuffer()[n] = r_uint32(word) def is_array_object(self): return True @@ -1116,14 +1146,14 @@ ((msb & mask) << 11) ) - self.display.get_pixelbuffer()[n] = r_uint(lsb | (msb << 16)) + self.display.get_pixelbuffer()[n] = r_uint32(lsb | (msb << 16)) class W_8BitDisplayBitmap(W_DisplayBitmap): repr_classname = "W_8BitDisplayBitmap" def setword(self, n, word): self._real_depth_buffer[n] = word - self.display.get_pixelbuffer()[n] = r_uint( + self.display.get_pixelbuffer()[n] = r_uint32( (word >> 24) | ((word >> 8) & 0x0000ff00) | ((word << 8) & 0x00ff0000) | @@ -1136,7 +1166,7 @@ @jit.unroll_safe def setword(self, n, word): self._real_depth_buffer[n] = word - word = r_uint(word) + nWord = r_uint(word) pos = self.compute_pos(n) assert self._depth <= 4 rshift = 32 - self._depth @@ -1145,10 +1175,10 @@ return mapword = r_uint(0) for i in xrange(4): - pixel = r_uint(word) >> rshift + pixel = r_uint(nWord) >> rshift mapword |= (r_uint(pixel) << (i * 8)) - word <<= self._depth - self.display.get_pixelbuffer()[pos] = mapword + nWord <<= self._depth + self.display.get_pixelbuffer()[pos] = r_uint32(mapword) pos += 1 def compute_pos(self, n): diff --git a/spyvm/objspace.py b/spyvm/objspace.py --- a/spyvm/objspace.py +++ b/spyvm/objspace.py @@ -1,6 +1,10 @@ import os +<<<<<<< local from spyvm import constants, model, shadow, wrapper, version +======= +from spyvm import constants, model, shadow, wrapper, system +>>>>>>> other from spyvm.error import UnwrappingError, WrappingError, PrimitiveFailedError from rpython.rlib import jit, rpath from rpython.rlib.objectmodel import instantiate, specialize @@ -23,7 +27,7 @@ self.make_bootstrap_objects() def find_executable(self, executable): - if os.sep in executable or (os.name == "nt" and ":" in executable): + if os.sep in executable or (system.IS_WINDOWS and ":" in executable): return executable path = os.environ.get("PATH") if path: @@ -107,9 +111,8 @@ # methods for wrapping and unwrapping stuff def wrap_int(self, val): - from spyvm import constants - assert isinstance(val, int) - # we don't do tagging + if not isinstance(val, int): + raise WrappingError return model.W_SmallInteger(val) def wrap_uint(self, val): diff --git a/spyvm/plugins/bitblt.py b/spyvm/plugins/bitblt.py --- a/spyvm/plugins/bitblt.py +++ b/spyvm/plugins/bitblt.py @@ -17,7 +17,7 @@ raise PrimitiveFailedError("BitBlt primitive not called in BitBlt object!") # only allow combinationRules 0-41 - combinationRule = interp.space.unwrap_positive_32bit_int(w_rcvr.fetch(interp.space, 3)) + combinationRule = interp.space.unwrap_int(w_rcvr.fetch(interp.space, 3)) if combinationRule > 41: raise PrimitiveFailedError("Missing combinationRule %d" % combinationRule) diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -5,7 +5,7 @@ from spyvm import model, shadow from spyvm import constants, display from spyvm.error import PrimitiveFailedError, \ - PrimitiveNotYetWrittenError + PrimitiveNotYetWrittenError, WrappingError from spyvm import wrapper from rpython.rlib import rarithmetic, rfloat, unroll, jit @@ -300,9 +300,13 @@ @expose_primitive(FLOAT_TRUNCATED, unwrap_spec=[float]) def func(interp, s_frame, f): try: - return interp.space.wrap_int(rarithmetic.ovfcheck_float_to_int(f)) + integer = rarithmetic.ovfcheck_float_to_int(f) except OverflowError: raise PrimitiveFailedError + try: + return interp.space.wrap_int(integer) # in 64bit VMs, this may fail + except WrappingError: + raise PrimitiveFailedError @expose_primitive(FLOAT_TIMES_TWO_POWER, unwrap_spec=[float, int]) def func(interp, s_frame, rcvr, arg): @@ -647,17 +651,22 @@ def func(interp, s_frame, argcount, w_method): from spyvm.interpreter import Return w_rcvr = s_frame.peek(0) - try: - s_frame._sendSelfSelector(interp.image.w_simulateCopyBits, 0, interp) - except Return: - w_dest_form = w_rcvr.fetch(interp.space, 0) - w_display = interp.space.objtable['w_display'] - if w_dest_form.is_same_object(w_display): - w_bitmap = w_display.fetch(interp.space, 0) - assert isinstance(w_bitmap, model.W_DisplayBitmap) - w_bitmap.flush_to_screen() - return w_rcvr - except shadow.MethodNotFound: + w_display = interp.space.objtable['w_display'] + if interp.space.unwrap_int(w_display.fetch(interp.space, 3)) == 1: + try: + s_frame._sendSelfSelector(interp.image.w_simulateCopyBits, 0, interp) + except Return: + w_dest_form = w_rcvr.fetch(interp.space, 0) + if w_dest_form.is_same_object(w_display): + w_bitmap = w_display.fetch(interp.space, 0) + assert isinstance(w_bitmap, model.W_DisplayBitmap) + w_bitmap.flush_to_screen() + return w_rcvr + except shadow.MethodNotFound: + from spyvm.plugins.bitblt import BitBltPlugin + BitBltPlugin.call("primitiveCopyBits", interp, s_frame, argcount, s_method) + return w_rcvr + else: from spyvm.plugins.bitblt import BitBltPlugin BitBltPlugin.call("primitiveCopyBits", interp, s_frame, argcount, w_method) return w_rcvr @@ -872,6 +881,15 @@ w_rcvr.w_class = w_arg_class + +if constants.LONG_BIT == 32: + def callIProxy(signature, interp, s_frame, argcount, s_method): + from spyvm.interpreter_proxy import IProxy + return IProxy.call(signature, interp, s_frame, argcount, s_method) +else: + def callIProxy(signature, interp, s_frame, argcount, s_method): + raise PrimitiveFailedError + @expose_primitive(EXTERNAL_CALL, clean_stack=False, no_result=True, compiled_method=True) def func(interp, s_frame, argcount, w_method): space = interp.space @@ -898,8 +916,12 @@ from spyvm.plugins.vmdebugging import DebuggingPlugin return DebuggingPlugin.call(signature[1], interp, s_frame, argcount, w_method) else: +<<<<<<< local from spyvm.interpreter_proxy import IProxy return IProxy.call(signature, interp, s_frame, argcount, w_method) +======= + return callIProxy(signature, interp, s_frame, argcount, s_method) +>>>>>>> other raise PrimitiveFailedError @expose_primitive(COMPILED_METHOD_FLUSH_CACHE, unwrap_spec=[object]) @@ -1073,7 +1095,7 @@ sec_since_epoch = rarithmetic.r_uint(time.time()) # XXX: overflow check necessary? sec_since_1901 = sec_since_epoch + secs_between_1901_and_1970 - return interp.space.wrap_uint(sec_since_1901) + return interp.space.wrap_uint(rarithmetic.r_uint(sec_since_1901)) #____________________________________________________________________________ @@ -1117,7 +1139,7 @@ w_arg.setchar(i, chr(new_value)) elif isinstance(w_arg, model.W_WordsObject) or isinstance(w_arg, model.W_DisplayBitmap): for i in xrange(w_arg.size()): - w_arg.setword(i, new_value) + w_arg.setword(i, rarithmetic.r_uint(new_value)) else: raise PrimitiveFailedError return w_arg diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -692,7 +692,7 @@ # === Sender === def store_s_sender(self, s_sender, raiseError=True): - assert s_sender is jit.vref_None or isinstance(s_sender, jit.DirectVRef) + # assert s_sender is jit.vref_None or isinstance(s_sender, jit.DirectVRef) self._s_sender = s_sender if raiseError: raise error.SenderChainManipulation(self) @@ -700,12 +700,12 @@ def restore_s_sender(self, s_direct): if self._s_sender is not jit.vref_None: # virtual sender wasn't already cleared by e.g. mark_returned - self._s_sender = s_direct + self._s_sender = jit.non_virtual_ref(s_direct) def w_sender(self): - if self._s_sender is None: + if self._s_sender is jit.vref_None: return self.space.w_nil - return self._s_sender.w_self() + return self.s_sender().w_self() def s_sender(self): return self._s_sender() diff --git a/spyvm/squeakimage.py b/spyvm/squeakimage.py --- a/spyvm/squeakimage.py +++ b/spyvm/squeakimage.py @@ -386,12 +386,20 @@ self.startup_time = time.time() def run_spy_hacks(self, space): +<<<<<<< local pass # w_display = space.objtable["w_display"] # if w_display is not None and not w_display.is_nil(space): # if space.unwrap_int(w_display.fetch(space, 3)) < 8: # # non-native indexed color depth not well supported # w_display.store(space, 3, space.wrap_int(8)) +======= + if constants.LONG_BIT == 64: + w_display = space.objtable["w_display"] + if w_display is not None and w_display is not space.w_nil: + if space.unwrap_int(w_display.fetch(space, 3)) < 32: + w_display.store(space, 3, space.wrap_int(32)) +>>>>>>> other def find_symbol(self, space, reader, symbol): w_dnu = self.special(constants.SO_DOES_NOT_UNDERSTAND) diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -6,9 +6,12 @@ from rpython.rlib import jit, rpath from spyvm import model, interpreter, squeakimage, objspace, wrapper,\ +<<<<<<< local error, shadow, storage_statistics, constants +======= + error, shadow, system +>>>>>>> other from spyvm.tool.analyseimage import create_image -from spyvm.interpreter_proxy import VirtualMachine def print_result(w_result): # This will also print contents of strings/symbols/numbers @@ -253,6 +256,9 @@ # driver.config.translation.gc = "stmgc" # driver.config.translation.gcrootfinder = "stm" from rpython.rlib import rgc + driver.exe_name = "rsqueakvm" + if system.IS_64BIT: + driver.exe_name += "-64" if hasattr(rgc, "stm_is_enabled"): driver.config.translation.stm = True driver.config.translation.thread = True From noreply at buildbot.pypy.org Wed Jul 9 16:18:19 2014 From: noreply at buildbot.pypy.org (timfel) Date: Wed, 9 Jul 2014 16:18:19 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: first version to run Squeak4.5 again Message-ID: <20140709141819.284861C33F5@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: storage Changeset: r871:ed6b708b05a3 Date: 2014-07-09 15:37 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/ed6b708b05a3/ Log: first version to run Squeak4.5 again diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -26,7 +26,7 @@ _immutable_fields_ = ["space", "image", "image_name", "max_stack_depth", "interrupt_counter_size", "startup_time", "evented", "interrupts"] - + jit_driver = jit.JitDriver( greens=['pc', 'self', 'method'], reds=['s_context'], @@ -38,7 +38,7 @@ trace=False, evented=True, interrupts=True, max_stack_depth=constants.MAX_LOOP_DEPTH): import time - + # === Initialize immutable variables self.space = space self.image = image @@ -54,7 +54,7 @@ self.interrupt_counter_size = int(os.environ["SPY_ICS"]) except KeyError: self.interrupt_counter_size = constants.INTERRUPT_COUNTER_SIZE - + # === Initialize mutable variables self.interrupt_check_counter = self.interrupt_counter_size self.current_stack_depth = 0 @@ -90,7 +90,7 @@ print "====== Switched process from: %s" % s_new_context.short_str() print "====== to: %s " % p.s_new_context.short_str() s_new_context = p.s_new_context - + def loop_bytecodes(self, s_context, may_context_switch=True): old_pc = 0 if not jit.we_are_jitted() and may_context_switch: @@ -117,7 +117,7 @@ raise nlr else: s_context.push(nlr.value) - + # This is a wrapper around loop_bytecodes that cleanly enters/leaves the frame # and handles the stack overflow protection mechanism. def stack_frame(self, s_frame, s_sender, may_context_switch=True): @@ -126,14 +126,14 @@ # Enter the context - store a virtual reference back to the sender # Non-fresh contexts can happen, e.g. when activating a stored BlockContext. # The same frame object must not pass through here recursively! - if s_frame.is_fresh(): + if s_frame.is_fresh() and s_sender is not None: s_frame.virtual_sender = jit.virtual_ref(s_sender) - + self.current_stack_depth += 1 if self.max_stack_depth > 0: if self.current_stack_depth >= self.max_stack_depth: raise StackOverflow(s_frame) - + # Now (continue to) execute the context bytecodes self.loop_bytecodes(s_frame, may_context_switch) finally: @@ -142,7 +142,7 @@ # it is still there, which can happen in case of ProcessSwitch or StackOverflow; # in case of a Return, this will already be handled while unwinding the stack. s_frame.finish_virtual_sender() - + def step(self, context): bytecode = context.fetch_next_bytecode() for entry in UNROLLING_BYTECODE_RANGES: @@ -155,9 +155,9 @@ if start <= bytecode <= stop: return getattr(context, methname)(self, bytecode) assert 0, "unreachable" - + # ============== Methods for handling user interrupts ============== - + def jitted_check_for_interrupt(self, s_frame): if not self.interrupts: return @@ -168,7 +168,7 @@ decr_by = int(trace_length // 100) decr_by = max(decr_by, 1) self.quick_check_for_interrupt(s_frame, decr_by) - + def quick_check_for_interrupt(self, s_frame, dec=1): if not self.interrupts: return @@ -204,7 +204,7 @@ return intmask(int((time.time() - self.startup_time) * 1000) & constants.TAGGED_MASK) # ============== Convenience methods for executing code ============== - + def interpret_toplevel(self, w_frame): try: self.loop(w_frame) @@ -215,7 +215,7 @@ s_frame = self.create_toplevel_context(w_receiver, selector, *w_arguments) self.interrupt_check_counter = self.interrupt_counter_size return self.interpret_toplevel(s_frame.w_self()) - + def create_toplevel_context(self, w_receiver, selector, *w_arguments): if isinstance(selector, str): if selector == "asSymbol": @@ -225,7 +225,7 @@ "asSymbol") else: w_selector = selector - + w_method = model.W_CompiledMethod(self.space, header=512) w_method.literalatput0(self.space, 1, w_selector) assert len(w_arguments) <= 7 @@ -235,7 +235,7 @@ s_frame.push(w_receiver) s_frame.push_all(list(w_arguments)) return s_frame - + def padding(self, symbol=' '): return symbol * self.current_stack_depth @@ -265,11 +265,26 @@ class ProcessSwitch(ContextSwitchException): """This causes the interpreter to switch the executed context.""" + +import rpython.rlib.unroll +if hasattr(unroll, "unrolling_zero"): + unrolling_zero = unroll.unrolling_zero +else: + class unrolling_int(int, unroll.SpecTag): + def __add__(self, other): + return unrolling_int(int.__add__(self, other)) + __radd__ = __add__ + def __sub__(self, other): + return unrolling_int(int.__sub__(self, other)) + def __rsub__(self, other): + return unrolling_int(int.__rsub__(self, other)) + unrolling_zero = unrolling_int(0) + + # This is a decorator for bytecode implementation methods. # parameter_bytes=N means N additional bytes are fetched as parameters. def bytecode_implementation(parameter_bytes=0): def bytecode_implementation_decorator(actual_implementation_method): - from rpython.rlib.unroll import unrolling_zero @jit.unroll_safe def bytecode_implementation_wrapper(self, interp, current_bytecode): parameters = () @@ -345,9 +360,9 @@ # __extend__ adds new methods to the ContextPartShadow class class __extend__(ContextPartShadow): - + # ====== Push/Pop bytecodes ====== - + @bytecode_implementation() def pushReceiverVariableBytecode(self, interp, current_bytecode): index = current_bytecode & 15 @@ -426,7 +441,7 @@ @bytecode_implementation() def popStackBytecode(self, interp, current_bytecode): self.pop() - + @bytecode_implementation(parameter_bytes=1) def pushNewArrayBytecode(self, interp, current_bytecode, descriptor): arraySize, popIntoArray = splitter[7, 1](descriptor) @@ -436,9 +451,9 @@ else: newArray = interp.space.w_Array.as_class_get_shadow(interp.space).new(arraySize) self.push(newArray) - + # ====== Extended Push/Pop bytecodes ====== - + def _extendedVariableTypeAndIndex(self, descriptor): return ((descriptor >> 6) & 3), (descriptor & 63) @@ -474,16 +489,16 @@ @bytecode_implementation(parameter_bytes=1) def extendedStoreBytecode(self, interp, current_bytecode, descriptor): return self._extendedStoreBytecode(interp, current_bytecode, descriptor) - + @bytecode_implementation(parameter_bytes=1) def extendedStoreAndPopBytecode(self, interp, current_bytecode, descriptor): self._extendedStoreBytecode(interp, current_bytecode, descriptor) self.pop() - + def _extract_index_and_temps(self, index_in_array, index_of_array): w_indirectTemps = self.gettemp(index_of_array) return index_in_array, w_indirectTemps - + @bytecode_implementation(parameter_bytes=2) def pushRemoteTempLongBytecode(self, interp, current_bytecode, index_in_array, index_of_array): index_in_array, w_indirectTemps = self._extract_index_and_temps(index_in_array, index_of_array) @@ -521,7 +536,7 @@ copiedValues: copiedValues). self jump: blockSize """ - + space = self.space numArgs, numCopied = splitter[4, 4](descriptor) blockSize = (j << 8) | i @@ -530,7 +545,7 @@ self.pop_and_return_n(numCopied)) self.push(w_closure) self._jump(blockSize) - + # ====== Helpers for send/return bytecodes ====== def _sendSelfSelector(self, w_selector, argcount, interp): @@ -552,7 +567,7 @@ w_method = receiverclassshadow.lookup(w_selector) except MethodNotFound: return self._doesNotUnderstand(w_selector, argcount, interp, receiver) - + code = w_method.primitive() if code: if w_arguments: @@ -576,21 +591,21 @@ def _sendSelfSelectorSpecial(self, selector, numargs, interp): w_selector = self.space.get_special_selector(selector) return self._sendSelfSelector(w_selector, numargs, interp) - + def _sendSpecialSelector(self, interp, receiver, special_selector, w_args=[]): w_special_selector = self.space.objtable["w_" + special_selector] s_class = receiver.class_shadow(self.space) w_method = s_class.lookup(w_special_selector) s_frame = w_method.create_frame(interp.space, receiver, w_args) - + # ###################################################################### if interp.trace: print '%s %s %s: #%s' % (interp.padding('#'), special_selector, s_frame.short_str(), w_args) if not objectmodel.we_are_translated(): import pdb; pdb.set_trace() - + return interp.stack_frame(s_frame, self) - + def _doesNotUnderstand(self, w_selector, argcount, interp, receiver): arguments = self.pop_and_return_n(argcount) w_message_class = self.space.classtable["w_Message"] @@ -600,7 +615,7 @@ w_message.store(self.space, 0, w_selector) w_message.store(self.space, 1, self.space.wrap_list(arguments)) self.pop() # The receiver, already known. - + try: return self._sendSpecialSelector(interp, receiver, "doesNotUnderstand", [w_message]) except MethodNotFound: @@ -609,10 +624,10 @@ assert isinstance(s_class, ClassShadow) print "Missing doesNotUnderstand in hierarchy of %s" % s_class.getname() raise - + def _mustBeBoolean(self, interp, receiver): return self._sendSpecialSelector(interp, receiver, "mustBeBoolean") - + def _call_primitive(self, code, interp, argcount, w_method, w_selector): # ################################################################## if interp.trace: @@ -632,11 +647,11 @@ def _return(self, return_value, interp, s_return_to): # unfortunately, this assert is not true for some tests. TODO fix this. # assert self._stack_ptr == self.tempsize() - + # ################################################################## if interp.trace: print '%s<- %s' % (interp.padding(), return_value.as_repr_string()) - + if s_return_to is None: # This should never happen while executing a normal image. raise ReturnFromTopLevel(return_value) @@ -733,7 +748,7 @@ return self._sendSelfSelector(w_selector, argcount, interp) # ====== Misc ====== - + def _activate_unwind_context(self, interp): # TODO put the constant somewhere else. # Primitive 198 is used in BlockClosure >> ensure: @@ -751,11 +766,11 @@ raise nlr finally: self.mark_returned() - + @bytecode_implementation() def unknownBytecode(self, interp, current_bytecode): raise MissingBytecode("unknownBytecode") - + @bytecode_implementation() def experimentalBytecode(self, interp, current_bytecode): raise MissingBytecode("experimentalBytecode") @@ -772,7 +787,7 @@ else: w_alternative = interp.space.w_true w_expected = interp.space.w_false - + # Don't check the class, just compare with only two Boolean instances. w_bool = self.pop() if w_expected.is_same_object(w_bool): diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -1356,7 +1356,7 @@ def func(interp, s_frame, w_rcvr, w_selector, w_arguments): from spyvm.shadow import MethodNotFound s_frame.pop_n(2) # removing our arguments - + return s_frame._sendSelector(w_selector, len(w_arguments), interp, w_rcvr, w_rcvr.class_shadow(interp.space), w_arguments=w_arguments) @@ -1385,17 +1385,15 @@ @expose_primitive(RESUME, unwrap_spec=[object], no_result=True, clean_stack=False) def func(interp, s_frame, w_rcvr): - import pdb; pdb.set_trace() assert_class(interp, w_rcvr, interp.space.w_Process) wrapper.ProcessWrapper(interp.space, w_rcvr).resume(s_frame) @expose_primitive(SUSPEND, unwrap_spec=[object], no_result=True, clean_stack=False) def func(interp, s_frame, w_rcvr): - import pdb; pdb.set_trace() assert_class(interp, w_rcvr, interp.space.w_Process) wrapper.ProcessWrapper(interp.space, w_rcvr).suspend(s_frame) - - + + @expose_primitive(FLUSH_CACHE, unwrap_spec=[object]) def func(interp, s_frame, w_rcvr): diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -20,7 +20,7 @@ _immutable_fields_ = ['space'] provides_getname = False repr_classname = "AbstractShadow" - + def __init__(self, space, w_self): self.space = space assert w_self is None or isinstance(w_self, model.W_PointersObject) @@ -34,19 +34,19 @@ return "<%s %s>" % (self.repr_classname, self.getname()) else: return "<%s>" % self.repr_classname - + def fetch(self, n0): raise NotImplementedError("Abstract class") def store(self, n0, w_value): raise NotImplementedError("Abstract class") def size(self): raise NotImplementedError("Abstract class") - + def attach_shadow(self): pass - + def copy_field_from(self, n0, other_shadow): self.store(n0, other_shadow.fetch(n0)) - + # This can be overwritten to change the order of initialization. def copy_from(self, other_shadow): assert self.size() == other_shadow.size() @@ -98,24 +98,24 @@ # Class must provide: wrap, unwrap, nil_value, is_nil_value, wrapper_class _attrs_ = ['storage'] _immutable_fields_ = ['storage'] - + def __init__(self, space, w_self, size): AbstractStorageShadow.__init__(self, space, w_self, size) self.storage = [self.nil_value] * size - + def size(self): return len(self.storage) - + def generalized_strategy_for(self, w_val): return ListStorageShadow - + def fetch(self, n0): val = self.storage[n0] if self.is_nil_value(val): return self.space.w_nil else: return self.wrap(self.space, val) - + def do_store(self, n0, w_val): if w_val.is_nil(self.space): self.storage[n0] = self.nil_value @@ -134,7 +134,7 @@ nil_value = constants.MAXINT wrapper_class = model.W_SmallInteger import_from_mixin(AbstractValueOrNilStorageMixin) - + @staticmethod def static_can_contain(space, w_val): return _value_or_nil_can_handle(SmallIntegerOrNilStorageShadow, space, w_val) @@ -153,7 +153,7 @@ nil_value = sys.float_info.max wrapper_class = model.W_Float import_from_mixin(AbstractValueOrNilStorageMixin) - + @staticmethod def static_can_contain(space, w_val): return _value_or_nil_can_handle(FloatOrNilStorageShadow, space, w_val) @@ -193,17 +193,17 @@ if float_can_handle and not FloatOrNilStorageShadow.static_can_contain(space, w_obj): float_can_handle = False specialized_strategies = specialized_strategies - 1 - + if specialized_strategies <= 0: return ListStorageShadow - + if all_nil_can_handle: return AllNilStorageShadow if small_int_can_handle: return SmallIntegerOrNilStorageShadow if float_can_handle: return FloatOrNilStorageShadow - + # If this happens, please look for a bug in the code above. assert False, "No strategy could be found for list..." @@ -223,7 +223,7 @@ _immutable_fields_ = ['storage'] repr_classname = "ListStorageShadow" import_from_mixin(ListStorageMixin) - + def initialize_storage(self, size): self.storage = [self.space.w_nil] * size def fetch(self, n0): @@ -236,7 +236,7 @@ _immutable_fields_ = ['storage'] repr_classname = "WeakListStorageShadow" import_from_mixin(ListStorageMixin) - + def initialize_storage(self, size): self.storage = [weakref.ref(self.space.w_nil)] * size def fetch(self, n0): @@ -245,14 +245,14 @@ def store(self, n0, w_value): assert w_value is not None self.storage[n0] = weakref.ref(w_value) - + class AbstractCachingShadow(ListStorageShadow): _immutable_fields_ = ['version?'] _attrs_ = ['version'] repr_classname = "AbstractCachingShadow" import_from_mixin(version.VersionMixin) version = None - + def __init__(self, space, w_self): ListStorageShadow.__init__(self, space, w_self, 0) self.changed() @@ -284,7 +284,7 @@ _s_superclass = _s_methoddict = None provides_getname = True repr_classname = "ClassShadow" - + def __init__(self, space, w_self): self.subclass_s = {} AbstractCachingShadow.__init__(self, space, w_self) @@ -305,7 +305,7 @@ # In Slang the value is read directly as a boxed integer, so that # the code gets a "pointer" whose bits are set as above, but # shifted one bit to the left and with the lowest bit set to 1. - + # Compute the instance size (really the size, not the number of bytes) instsize_lo = (classformat >> 1) & 0x3F instsize_hi = (classformat >> (9 + 1)) & 0xC0 @@ -313,10 +313,10 @@ # decode the instSpec format = (classformat >> 7) & 15 self.instance_varsized = format >= 2 - + # In case of raised exception below. self.changed() - + if format < 4: self.instance_kind = POINTERS elif format == 4: @@ -356,7 +356,7 @@ return # Some of the special info has changed -> Switch version. self.changed() - + def store_w_superclass(self, w_class): superclass = self._s_superclass if w_class is None or w_class.is_nil(self.space): @@ -383,24 +383,24 @@ return if methoddict: methoddict.s_class = None self.store_s_methoddict(s_new_methoddict) - + def store_s_methoddict(self, s_methoddict): s_methoddict.s_class = self s_methoddict.sync_method_cache() self._s_methoddict = s_methoddict - + def attach_s_class(self, s_other): self.subclass_s[s_other] = None def detach_s_class(self, s_other): del self.subclass_s[s_other] - + def store_w_name(self, w_name): if isinstance(w_name, model.W_BytesObject): self.name = w_name.as_string() else: self.name = None - + @jit.unroll_safe def flush_method_caches(self): look_in_shadow = self @@ -497,7 +497,7 @@ self.version = version for s_class in self.subclass_s: s_class.superclass_changed(version) - + # _______________________________________________________________ # Methods used only in testing @@ -532,7 +532,7 @@ _immutable_fields_ = ['invalid?', 's_class'] _attrs_ = ['methoddict', 'invalid', 's_class'] repr_classname = "MethodDictionaryShadow" - + def __init__(self, space, w_self): self.invalid = True self.s_class = None @@ -541,7 +541,7 @@ def update(self): self.sync_method_cache() - + def find_selector(self, w_selector): if self.invalid: return None # we may be invalid if Smalltalk code did not call flushCache @@ -593,7 +593,7 @@ class AbstractRedirectingShadow(AbstractShadow): _attrs_ = ['_w_self_size'] repr_classname = "AbstractRedirectingShadow" - + def __init__(self, space, w_self): AbstractShadow.__init__(self, space, w_self) if w_self is not None: @@ -611,7 +611,7 @@ '_pc', '_temps_and_stack', '_stack_ptr', 'instances_w'] repr_classname = "ContextPartShadow" - + _virtualizable_ = [ 'direct_sender', 'virtual_sender', "_pc", "_temps_and_stack[*]", "_stack_ptr", @@ -620,7 +620,7 @@ # ______________________________________________________________________ # Initialization - + def __init__(self, space, w_self): self.direct_sender = None self.virtual_sender = jit.vref_None @@ -632,26 +632,26 @@ AbstractRedirectingShadow.copy_field_from(self, n0, other_shadow) except error.SenderChainManipulation, e: assert e.s_context == self - + def copy_from(self, other_shadow): # Some fields have to be initialized before the rest, to ensure correct initialization. privileged_fields = self.fields_to_copy_first() for n0 in privileged_fields: self.copy_field_from(n0, other_shadow) - + # Now the temp size will be known. self.init_stack_and_temps() - + for n0 in range(self.size()): if n0 not in privileged_fields: self.copy_field_from(n0, other_shadow) - + def fields_to_copy_first(self): return [] - + # ______________________________________________________________________ # Accessing object fields - + def fetch(self, n0): if n0 == constants.CTXPART_SENDER_INDEX: return self.w_sender() @@ -690,45 +690,45 @@ else: # XXX later should store tail out of known context part as well raise error.WrapperException("Index in context out of bounds") - + # === Sender === # There are two fields for the sender (virtual and direct). Only one of them is can be set at a time. # As long as the frame object is virtualized, using the virtual reference should increase performance. # As soon as a frame object is forced to the heap, the direct reference must be used. - + def is_fresh(self): return self.direct_sender is None and self.virtual_sender is jit.vref_None - + def finish_virtual_sender(self, save_direct_sender=True): if self.virtual_sender is not jit.vref_None: - sender = self.virtual_sender() + sender = self.virtual_sender() # xxx: check if we can move this down jit.virtual_ref_finish(self.virtual_sender, sender) self.virtual_sender = jit.vref_None if save_direct_sender: self.direct_sender = sender - + def store_s_sender(self, s_sender, raise_error=True): # If we have a virtual back reference, we must finish it before storing the direct reference. - self.finish_virtual_sender(save_direct_sender=False) + # self.finish_virtual_sender(save_direct_sender=False) self.direct_sender = s_sender if raise_error: raise error.SenderChainManipulation(self) - + def w_sender(self): sender = self.s_sender() if sender is None: return self.space.w_nil return sender.w_self() - + def s_sender(self): if self.direct_sender: return self.direct_sender else: result = self.virtual_sender() return result - + # === Stack Pointer === - + def unwrap_store_stackpointer(self, w_sp1): # the stackpointer in the W_PointersObject starts counting at the # tempframe start @@ -747,12 +747,12 @@ def stackdepth(self): return rarithmetic.intmask(self._stack_ptr) - + def wrap_stackpointer(self): return self.space.wrap_int(self.stackdepth()) # === Program Counter === - + def store_unwrap_pc(self, w_pc): if w_pc.is_nil(self.space): self.store_pc(-1) @@ -777,9 +777,9 @@ def store_pc(self, newpc): assert newpc >= -1 self._pc = newpc - + # === Subclassed accessors === - + def s_home(self): raise NotImplementedError() @@ -788,18 +788,18 @@ def w_receiver(self): raise NotImplementedError() - + def w_method(self): raise NotImplementedError() - + def tempsize(self): raise NotImplementedError() - + def is_closure_context(self): raise NotImplementedError() - + # === Other properties of Contexts === - + def mark_returned(self): self.store_pc(-1) self.store_s_sender(None, raise_error=False) @@ -809,25 +809,25 @@ def external_stackpointer(self): return self.stackdepth() + self.stackstart() - + def stackend(self): # XXX this is incorrect when there is subclassing return self._w_self_size - + def fetch_next_bytecode(self): pc = jit.promote(self._pc) assert pc >= 0 self._pc += 1 return self.fetch_bytecode(pc) - + def fetch_bytecode(self, pc): bytecode = self.w_method().fetch_bytecode(pc) return ord(bytecode) - + # ______________________________________________________________________ # Temporary Variables # - # Every context has it's own stack. BlockContexts share their temps with + # Every context has it's own stack. BlockContexts share their temps with # their home contexts. MethodContexts created from a BlockClosure get their # temps copied from the closure upon activation. Changes are not propagated back; # this is handled by the compiler by allocating an extra Array for temps. @@ -837,7 +837,7 @@ def settemp(self, index, w_value): raise NotImplementedError() - + # ______________________________________________________________________ # Stack Manipulation @@ -851,13 +851,13 @@ for i in range(tempsize): temps_and_stack[i] = self.space.w_nil self._stack_ptr = rarithmetic.r_uint(tempsize) # we point after the last element - + def stack_get(self, index0): return self._temps_and_stack[index0] - + def stack_put(self, index0, w_val): self._temps_and_stack[index0] = w_val - + def stack(self): """NOT_RPYTHON""" # purely for testing return self._temps_and_stack[self.tempsize():self._stack_ptr] @@ -912,7 +912,7 @@ # ______________________________________________________________________ # Primitive support - + def store_instances_array(self, w_class, match_w): # used for primitives 77 & 78 self.instances_w[w_class] = match_w @@ -939,7 +939,7 @@ j += 1 retval += "\n---------------------" return retval - + def short_str(self): arg_strings = self.argument_strings() if len(arg_strings) > 0: @@ -953,10 +953,10 @@ self.w_receiver().as_repr_string(), args ) - + def print_stack(self, method=True): return self.print_padded_stack(method)[1] - + def print_padded_stack(self, method): padding = ret_str = '' if self.s_sender() is not None: @@ -970,9 +970,9 @@ class BlockContextShadow(ContextPartShadow): _attrs_ = ['_w_home', '_initialip', '_eargc'] repr_classname = "BlockContextShadow" - + # === Initialization === - + def __init__(self, space, w_self=None, w_home=None, argcnt=0, initialip=0): self = jit.hint(self, access_directly=True, fresh_virtualizable=True) creating_w_self = w_self is None @@ -992,40 +992,40 @@ def fields_to_copy_first(self): return [ constants.BLKCTX_HOME_INDEX ] - + # === Implemented accessors === - + def s_home(self): return self._w_home.as_methodcontext_get_shadow(self.space) - + def stackstart(self): return constants.BLKCTX_STACK_START def tempsize(self): # A blockcontext doesn't have any temps return 0 - + def w_receiver(self): return self.s_home().w_receiver() - + def w_method(self): retval = self.s_home().w_method() assert isinstance(retval, model.W_CompiledMethod) return retval - + def is_closure_context(self): return True - + # === Temporary variables === - + def gettemp(self, index): return self.s_home().gettemp(index) def settemp(self, index, w_value): self.s_home().settemp(index, w_value) - + # === Accessing object fields === - + def fetch(self, n0): if n0 == constants.BLKCTX_HOME_INDEX: return self._w_home @@ -1045,11 +1045,11 @@ return self.unwrap_store_eargc(w_value) else: return ContextPartShadow.store(self, n0, w_value) - + def store_w_home(self, w_home): assert isinstance(w_home, model.W_PointersObject) self._w_home = w_home - + def unwrap_store_initialip(self, w_value): initialip = self.space.unwrap_int(w_value) initialip -= 1 + self.w_method().literalsize @@ -1057,18 +1057,18 @@ def store_initialip(self, initialip): self._initialip = initialip - + def wrap_initialip(self): initialip = self.initialip() initialip += 1 + self.w_method().literalsize return self.space.wrap_int(initialip) - + def reset_pc(self): self.store_pc(self.initialip()) - + def initialip(self): return self._initialip - + def unwrap_store_eargc(self, w_value): self.store_expected_argument_count(self.space.unwrap_int(w_value)) @@ -1082,24 +1082,24 @@ self._eargc = argc # === Stack Manipulation === - + def reset_stack(self): self.pop_n(self.stackdepth()) # === Printing === - + def argument_strings(self): return [] - + def method_str(self): return '[] in %s' % self.w_method().get_identifier_string() class MethodContextShadow(ContextPartShadow): _attrs_ = ['closure', '_w_receiver', '_w_method'] repr_classname = "MethodContextShadow" - + # === Initialization === - + @jit.unroll_safe def __init__(self, space, w_self=None, w_method=None, w_receiver=None, arguments=[], closure=None, pc=0): @@ -1108,7 +1108,7 @@ self.store_w_receiver(w_receiver) self.store_pc(pc) self.closure = closure - + if w_method: self.store_w_method(w_method) # The summand is needed, because we calculate i.a. our stackdepth relative of the size of w_self. @@ -1117,20 +1117,20 @@ self.init_stack_and_temps() else: self._w_method = None - + argc = len(arguments) for i0 in range(argc): self.settemp(i0, arguments[i0]) - + if closure: for i0 in range(closure.size()): self.settemp(i0+argc, closure.at0(i0)) def fields_to_copy_first(self): return [ constants.MTHDCTX_METHOD, constants.MTHDCTX_CLOSURE_OR_NIL ] - + # === Accessing object fields === - + def fetch(self, n0): if n0 == constants.MTHDCTX_METHOD: return self.w_method() @@ -1164,12 +1164,12 @@ return self.settemp(temp_i, w_value) else: return ContextPartShadow.store(self, n0, w_value) - + def store_w_receiver(self, w_receiver): self._w_receiver = w_receiver - + # === Implemented Accessors === - + def s_home(self): if self.is_closure_context(): # this is a context for a blockClosure @@ -1182,31 +1182,31 @@ return s_outerContext.s_home() else: return self - + def stackstart(self): return constants.MTHDCTX_TEMP_FRAME_START - + def store_w_method(self, w_method): assert isinstance(w_method, model.W_CompiledMethod) self._w_method = w_method def w_receiver(self): return self._w_receiver - + def w_method(self): retval = self._w_method assert isinstance(retval, model.W_CompiledMethod) return retval - + def tempsize(self): if not self.is_closure_context(): return self.w_method().tempsize() else: return self.closure.tempsize() - + def is_closure_context(self): return self.closure is not None - + # ______________________________________________________________________ # Marriage of MethodContextShadows with PointerObjects only when required @@ -1223,9 +1223,9 @@ self._w_self = w_self self._w_self_size = w_self.size() return w_self - + # === Temporary variables === - + def gettemp(self, index0): return self.stack_get(index0) @@ -1233,7 +1233,7 @@ self.stack_put(index0, w_value) # === Printing === - + def argument_strings(self): argcount = self.w_method().argsize tempsize = self.w_method().tempsize() From noreply at buildbot.pypy.org Wed Jul 9 16:18:20 2014 From: noreply at buildbot.pypy.org (timfel) Date: Wed, 9 Jul 2014 16:18:20 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: only force and store the sender if the context wasn't returned properly (it had an exception) Message-ID: <20140709141820.50E171C33F5@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: storage Changeset: r872:04a55ec5b4d2 Date: 2014-07-09 16:11 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/04a55ec5b4d2/ Log: only force and store the sender if the context wasn't returned properly (it had an exception) diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -141,7 +141,7 @@ # Cleanly leave the context. This will finish the virtual sender-reference, if # it is still there, which can happen in case of ProcessSwitch or StackOverflow; # in case of a Return, this will already be handled while unwinding the stack. - s_frame.finish_virtual_sender() + s_frame.finish_virtual_sender(s_sender) def step(self, context): bytecode = context.fetch_next_bytecode() diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -699,13 +699,15 @@ def is_fresh(self): return self.direct_sender is None and self.virtual_sender is jit.vref_None - def finish_virtual_sender(self, save_direct_sender=True): + def finish_virtual_sender(self, s_sender): if self.virtual_sender is not jit.vref_None: - sender = self.virtual_sender() # xxx: check if we can move this down - jit.virtual_ref_finish(self.virtual_sender, sender) + if self.pc() != -1: + # stack is unrolling, but this frame was not + # marked_returned: it is an escaped frame + sender = self.virtual_sender() + self.direct_sender = sender + jit.virtual_ref_finish(self.virtual_sender, s_sender) self.virtual_sender = jit.vref_None - if save_direct_sender: - self.direct_sender = sender def store_s_sender(self, s_sender, raise_error=True): # If we have a virtual back reference, we must finish it before storing the direct reference. From noreply at buildbot.pypy.org Wed Jul 9 17:18:32 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 9 Jul 2014 17:18:32 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20140709151832.7C6571C021D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72398:3dec80f8412a Date: 2014-07-09 17:18 +0200 http://bitbucket.org/pypy/pypy/changeset/3dec80f8412a/ Log: merge heads diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -73,13 +73,12 @@ 'builtinify' : 'interp_magic.builtinify', 'lookup_special' : 'interp_magic.lookup_special', 'do_what_I_mean' : 'interp_magic.do_what_I_mean', - 'list_strategy' : 'interp_magic.list_strategy', 'validate_fd' : 'interp_magic.validate_fd', 'resizelist_hint' : 'interp_magic.resizelist_hint', 'newlist_hint' : 'interp_magic.newlist_hint', 'add_memory_pressure' : 'interp_magic.add_memory_pressure', 'newdict' : 'interp_dict.newdict', - 'dictstrategy' : 'interp_dict.dictstrategy', + 'strategy' : 'interp_magic.strategy', # dict,set,list 'set_debug' : 'interp_magic.set_debug', 'locals_to_fast' : 'interp_magic.locals_to_fast', } diff --git a/pypy/module/__pypy__/interp_dict.py b/pypy/module/__pypy__/interp_dict.py --- a/pypy/module/__pypy__/interp_dict.py +++ b/pypy/module/__pypy__/interp_dict.py @@ -1,7 +1,6 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec -from pypy.objspace.std.dictmultiobject import W_DictMultiObject @unwrap_spec(type=str) def newdict(space, type): @@ -31,13 +30,3 @@ return space.newdict(strdict=True) else: raise oefmt(space.w_TypeError, "unknown type of dict %s", type) - -def dictstrategy(space, w_obj): - """ dictstrategy(dict) - - show the underlaying strategy used by a dict object - """ - if not isinstance(w_obj, W_DictMultiObject): - raise OperationError(space.w_TypeError, - space.wrap("expecting dict object")) - return space.wrap('%r' % (w_obj.strategy,)) diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -2,7 +2,9 @@ from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter.pyframe import PyFrame from rpython.rlib.objectmodel import we_are_translated +from pypy.objspace.std.dictmultiobject import W_DictMultiObject from pypy.objspace.std.listobject import W_ListObject +from pypy.objspace.std.setobject import W_BaseSetObject from pypy.objspace.std.typeobject import MethodCache from pypy.objspace.std.mapdict import MapAttrCache from rpython.rlib import rposix, rgc @@ -70,12 +72,27 @@ def do_what_I_mean(space): return space.wrap(42) -def list_strategy(space, w_list): - if isinstance(w_list, W_ListObject): - return space.wrap(w_list.strategy._applevel_repr) + +def _nameof(cls): + return cls.__name__ +_nameof._annspecialcase_ = 'specialize:memo' + +def strategy(space, w_obj): + """ strategy(dict or list or set) + + Return the underlying strategy currently used by a dict, list or set object + """ + if isinstance(w_obj, W_DictMultiObject): + name = _nameof(w_obj.strategy.__class__) + elif isinstance(w_obj, W_ListObject): + name = _nameof(w_obj.strategy.__class__) + elif isinstance(w_obj, W_BaseSetObject): + name = _nameof(w_obj.strategy.__class__) else: - w_msg = space.wrap("Can only get the list strategy of a list") - raise OperationError(space.w_TypeError, w_msg) + raise OperationError(space.w_TypeError, + space.wrap("expecting dict or list or set object")) + return space.wrap(name) + @unwrap_spec(fd='c_int') def validate_fd(space, fd): diff --git a/pypy/module/__pypy__/test/test_special.py b/pypy/module/__pypy__/test/test_special.py --- a/pypy/module/__pypy__/test/test_special.py +++ b/pypy/module/__pypy__/test/test_special.py @@ -46,26 +46,42 @@ assert x == 42 def test_list_strategy(self): - from __pypy__ import list_strategy + from __pypy__ import strategy l = [1, 2, 3] - assert list_strategy(l) == "int" + assert strategy(l) == "IntegerListStrategy" l = ["a", "b", "c"] - assert list_strategy(l) == "bytes" + assert strategy(l) == "BytesListStrategy" l = [u"a", u"b", u"c"] - assert list_strategy(l) == "unicode" + assert strategy(l) == "UnicodeListStrategy" l = [1.1, 2.2, 3.3] - assert list_strategy(l) == "float" + assert strategy(l) == "FloatListStrategy" l = range(3) - assert list_strategy(l) == "simple_range" + assert strategy(l) == "SimpleRangeListStrategy" l = range(1, 2) - assert list_strategy(l) == "range" + assert strategy(l) == "RangeListStrategy" l = [1, "b", 3] - assert list_strategy(l) == "object" + assert strategy(l) == "ObjectListStrategy" l = [] - assert list_strategy(l) == "empty" + assert strategy(l) == "EmptyListStrategy" o = 5 - raises(TypeError, list_strategy, 5) + raises(TypeError, strategy, 5) + + def test_dict_strategy(self): + from __pypy__ import strategy + + d = {} + assert strategy(d) == "EmptyDictStrategy" + d = {1: None, 5: None} + assert strategy(d) == "IntDictStrategy" + + def test_set_strategy(self): + from __pypy__ import strategy + + s = set() + assert strategy(s) == "EmptySetStrategy" + s = set([2, 3, 4]) + assert strategy(s) == "IntegerSetStrategy" class AppTestJitFeatures(object): diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -842,8 +842,6 @@ W_Lists do not switch back to EmptyListStrategy when becoming empty again. """ - _applevel_repr = "empty" - def __init__(self, space): ListStrategy.__init__(self, space) @@ -1102,8 +1100,6 @@ method providing only positive length. The storage is a one element tuple with positive integer storing length.""" - _applevel_repr = "simple_range" - erase, unerase = rerased.new_erasing_pair("simple_range") erase = staticmethod(erase) unerase = staticmethod(unerase) @@ -1176,8 +1172,6 @@ destroying the range (inserting, appending non-ints) the strategy is switched to IntegerListStrategy.""" - _applevel_repr = "range" - erase, unerase = rerased.new_erasing_pair("range") erase = staticmethod(erase) unerase = staticmethod(unerase) @@ -1555,7 +1549,6 @@ import_from_mixin(AbstractUnwrappedStrategy) _none_value = None - _applevel_repr = "object" def unwrap(self, w_obj): return w_obj @@ -1590,7 +1583,6 @@ import_from_mixin(AbstractUnwrappedStrategy) _none_value = 0 - _applevel_repr = "int" def wrap(self, intval): return self.space.wrap(intval) @@ -1644,7 +1636,6 @@ import_from_mixin(AbstractUnwrappedStrategy) _none_value = 0.0 - _applevel_repr = "float" def wrap(self, floatval): return self.space.wrap(floatval) @@ -1677,7 +1668,6 @@ import_from_mixin(AbstractUnwrappedStrategy) _none_value = None - _applevel_repr = "bytes" def wrap(self, stringval): return self.space.wrap(stringval) @@ -1710,7 +1700,6 @@ import_from_mixin(AbstractUnwrappedStrategy) _none_value = None - _applevel_repr = "unicode" def wrap(self, stringval): return self.space.wrap(stringval) diff --git a/rpython/rtyper/test/test_rclass.py b/rpython/rtyper/test/test_rclass.py --- a/rpython/rtyper/test/test_rclass.py +++ b/rpython/rtyper/test/test_rclass.py @@ -440,6 +440,25 @@ res = self.interpret(f, [3]) assert res == ~0x0200 & 0x3ff + def test_class___name__(self): + class ACLS(object): pass + class Bcls(ACLS): pass + class CCls(ACLS): pass + def nameof(cls): + return cls.__name__ + nameof._annspecialcase_ = "specialize:memo" + def f(i): + if i == 1: x = ACLS() + elif i == 2: x = Bcls() + else: x = CCls() + return nameof(x.__class__) + res = self.interpret(f, [1]) + assert ''.join(res.chars) == 'ACLS' + res = self.interpret(f, [2]) + assert ''.join(res.chars) == 'Bcls' + res = self.interpret(f, [3]) + assert ''.join(res.chars) == 'CCls' + def test_hash_preservation(self): from rpython.rlib.objectmodel import current_object_addr_as_int from rpython.rlib.objectmodel import compute_identity_hash From noreply at buildbot.pypy.org Wed Jul 9 17:20:21 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 9 Jul 2014 17:20:21 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix a set-strategy bug: set-of-ints.update(empty-set) would devolve the Message-ID: <20140709152021.0FEF71C1068@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72399:cf24a62874df Date: 2014-07-09 17:19 +0200 http://bitbucket.org/pypy/pypy/changeset/cf24a62874df/ Log: Fix a set-strategy bug: set-of-ints.update(empty-set) would devolve the set to set-of-objects... diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -1181,7 +1181,8 @@ d_other = self.unerase(w_other.sstorage) d_set.update(d_other) return - + if w_other.length() == 0: + return w_set.switch_to_object_strategy(self.space) w_set.update(w_other) diff --git a/pypy/objspace/std/test/test_setobject.py b/pypy/objspace/std/test/test_setobject.py --- a/pypy/objspace/std/test/test_setobject.py +++ b/pypy/objspace/std/test/test_setobject.py @@ -960,3 +960,10 @@ # did not work before because of an optimization that swaps both # operands when the first set is larger than the second assert type(frozenset([1, 2]) & set([2])) is frozenset + + def test_update_bug_strategy(self): + from __pypy__ import strategy + s = set([1, 2, 3]) + assert strategy(s) == "IntegerSetStrategy" + s.update(set()) + assert strategy(s) == "IntegerSetStrategy" From noreply at buildbot.pypy.org Wed Jul 9 17:18:31 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 9 Jul 2014 17:18:31 +0200 (CEST) Subject: [pypy-commit] pypy default: Unify __pypy__.list_strategy() with __pypy__.dictstrategy() and make it Message-ID: <20140709151831.114A41C021D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72397:65c577d414ca Date: 2014-07-09 17:08 +0200 http://bitbucket.org/pypy/pypy/changeset/65c577d414ca/ Log: Unify __pypy__.list_strategy() with __pypy__.dictstrategy() and make it work for sets too. diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -73,13 +73,12 @@ 'builtinify' : 'interp_magic.builtinify', 'lookup_special' : 'interp_magic.lookup_special', 'do_what_I_mean' : 'interp_magic.do_what_I_mean', - 'list_strategy' : 'interp_magic.list_strategy', 'validate_fd' : 'interp_magic.validate_fd', 'resizelist_hint' : 'interp_magic.resizelist_hint', 'newlist_hint' : 'interp_magic.newlist_hint', 'add_memory_pressure' : 'interp_magic.add_memory_pressure', 'newdict' : 'interp_dict.newdict', - 'dictstrategy' : 'interp_dict.dictstrategy', + 'strategy' : 'interp_magic.strategy', # dict,set,list 'set_debug' : 'interp_magic.set_debug', 'locals_to_fast' : 'interp_magic.locals_to_fast', } diff --git a/pypy/module/__pypy__/interp_dict.py b/pypy/module/__pypy__/interp_dict.py --- a/pypy/module/__pypy__/interp_dict.py +++ b/pypy/module/__pypy__/interp_dict.py @@ -1,7 +1,6 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec -from pypy.objspace.std.dictmultiobject import W_DictMultiObject @unwrap_spec(type=str) def newdict(space, type): @@ -31,13 +30,3 @@ return space.newdict(strdict=True) else: raise oefmt(space.w_TypeError, "unknown type of dict %s", type) - -def dictstrategy(space, w_obj): - """ dictstrategy(dict) - - show the underlaying strategy used by a dict object - """ - if not isinstance(w_obj, W_DictMultiObject): - raise OperationError(space.w_TypeError, - space.wrap("expecting dict object")) - return space.wrap('%r' % (w_obj.strategy,)) diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -2,7 +2,9 @@ from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter.pyframe import PyFrame from rpython.rlib.objectmodel import we_are_translated +from pypy.objspace.std.dictmultiobject import W_DictMultiObject from pypy.objspace.std.listobject import W_ListObject +from pypy.objspace.std.setobject import W_BaseSetObject from pypy.objspace.std.typeobject import MethodCache from pypy.objspace.std.mapdict import MapAttrCache from rpython.rlib import rposix, rgc @@ -70,12 +72,27 @@ def do_what_I_mean(space): return space.wrap(42) -def list_strategy(space, w_list): - if isinstance(w_list, W_ListObject): - return space.wrap(w_list.strategy._applevel_repr) + +def _nameof(cls): + return cls.__name__ +_nameof._annspecialcase_ = 'specialize:memo' + +def strategy(space, w_obj): + """ strategy(dict or list or set) + + Return the underlying strategy currently used by a dict, list or set object + """ + if isinstance(w_obj, W_DictMultiObject): + name = _nameof(w_obj.strategy.__class__) + elif isinstance(w_obj, W_ListObject): + name = _nameof(w_obj.strategy.__class__) + elif isinstance(w_obj, W_BaseSetObject): + name = _nameof(w_obj.strategy.__class__) else: - w_msg = space.wrap("Can only get the list strategy of a list") - raise OperationError(space.w_TypeError, w_msg) + raise OperationError(space.w_TypeError, + space.wrap("expecting dict or list or set object")) + return space.wrap(name) + @unwrap_spec(fd='c_int') def validate_fd(space, fd): diff --git a/pypy/module/__pypy__/test/test_special.py b/pypy/module/__pypy__/test/test_special.py --- a/pypy/module/__pypy__/test/test_special.py +++ b/pypy/module/__pypy__/test/test_special.py @@ -46,26 +46,42 @@ assert x == 42 def test_list_strategy(self): - from __pypy__ import list_strategy + from __pypy__ import strategy l = [1, 2, 3] - assert list_strategy(l) == "int" + assert strategy(l) == "IntegerListStrategy" l = ["a", "b", "c"] - assert list_strategy(l) == "bytes" + assert strategy(l) == "BytesListStrategy" l = [u"a", u"b", u"c"] - assert list_strategy(l) == "unicode" + assert strategy(l) == "UnicodeListStrategy" l = [1.1, 2.2, 3.3] - assert list_strategy(l) == "float" + assert strategy(l) == "FloatListStrategy" l = range(3) - assert list_strategy(l) == "simple_range" + assert strategy(l) == "SimpleRangeListStrategy" l = range(1, 2) - assert list_strategy(l) == "range" + assert strategy(l) == "RangeListStrategy" l = [1, "b", 3] - assert list_strategy(l) == "object" + assert strategy(l) == "ObjectListStrategy" l = [] - assert list_strategy(l) == "empty" + assert strategy(l) == "EmptyListStrategy" o = 5 - raises(TypeError, list_strategy, 5) + raises(TypeError, strategy, 5) + + def test_dict_strategy(self): + from __pypy__ import strategy + + d = {} + assert strategy(d) == "EmptyDictStrategy" + d = {1: None, 5: None} + assert strategy(d) == "IntDictStrategy" + + def test_set_strategy(self): + from __pypy__ import strategy + + s = set() + assert strategy(s) == "EmptySetStrategy" + s = set([2, 3, 4]) + assert strategy(s) == "IntegerSetStrategy" class AppTestJitFeatures(object): diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -842,8 +842,6 @@ W_Lists do not switch back to EmptyListStrategy when becoming empty again. """ - _applevel_repr = "empty" - def __init__(self, space): ListStrategy.__init__(self, space) @@ -1102,8 +1100,6 @@ method providing only positive length. The storage is a one element tuple with positive integer storing length.""" - _applevel_repr = "simple_range" - erase, unerase = rerased.new_erasing_pair("simple_range") erase = staticmethod(erase) unerase = staticmethod(unerase) @@ -1176,8 +1172,6 @@ destroying the range (inserting, appending non-ints) the strategy is switched to IntegerListStrategy.""" - _applevel_repr = "range" - erase, unerase = rerased.new_erasing_pair("range") erase = staticmethod(erase) unerase = staticmethod(unerase) @@ -1555,7 +1549,6 @@ import_from_mixin(AbstractUnwrappedStrategy) _none_value = None - _applevel_repr = "object" def unwrap(self, w_obj): return w_obj @@ -1590,7 +1583,6 @@ import_from_mixin(AbstractUnwrappedStrategy) _none_value = 0 - _applevel_repr = "int" def wrap(self, intval): return self.space.wrap(intval) @@ -1644,7 +1636,6 @@ import_from_mixin(AbstractUnwrappedStrategy) _none_value = 0.0 - _applevel_repr = "float" def wrap(self, floatval): return self.space.wrap(floatval) @@ -1677,7 +1668,6 @@ import_from_mixin(AbstractUnwrappedStrategy) _none_value = None - _applevel_repr = "bytes" def wrap(self, stringval): return self.space.wrap(stringval) @@ -1710,7 +1700,6 @@ import_from_mixin(AbstractUnwrappedStrategy) _none_value = None - _applevel_repr = "unicode" def wrap(self, stringval): return self.space.wrap(stringval) diff --git a/rpython/rtyper/test/test_rclass.py b/rpython/rtyper/test/test_rclass.py --- a/rpython/rtyper/test/test_rclass.py +++ b/rpython/rtyper/test/test_rclass.py @@ -440,6 +440,25 @@ res = self.interpret(f, [3]) assert res == ~0x0200 & 0x3ff + def test_class___name__(self): + class ACLS(object): pass + class Bcls(ACLS): pass + class CCls(ACLS): pass + def nameof(cls): + return cls.__name__ + nameof._annspecialcase_ = "specialize:memo" + def f(i): + if i == 1: x = ACLS() + elif i == 2: x = Bcls() + else: x = CCls() + return nameof(x.__class__) + res = self.interpret(f, [1]) + assert ''.join(res.chars) == 'ACLS' + res = self.interpret(f, [2]) + assert ''.join(res.chars) == 'Bcls' + res = self.interpret(f, [3]) + assert ''.join(res.chars) == 'CCls' + def test_hash_preservation(self): from rpython.rlib.objectmodel import current_object_addr_as_int from rpython.rlib.objectmodel import compute_identity_hash From noreply at buildbot.pypy.org Wed Jul 9 17:48:05 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 9 Jul 2014 17:48:05 +0200 (CEST) Subject: [pypy-commit] pypy default: Retype the field "name" on the base object RPython class to be a regular Message-ID: <20140709154805.8209D1D2335@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72400:0d95b46fffc3 Date: 2014-07-09 17:46 +0200 http://bitbucket.org/pypy/pypy/changeset/0d95b46fffc3/ Log: Retype the field "name" on the base object RPython class to be a regular rstr.STR instead of some null-terminated Array(Char). diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -42,7 +42,7 @@ return ': '.join([str(x) for x in self.args]) def type_name(etype): - return ''.join(etype.name).rstrip('\x00') + return ''.join(etype.name.chars) class LLInterpreter(object): """ low level interpreter working with concrete values. """ @@ -145,7 +145,7 @@ assert isinstance(exc, LLException) klass, inst = exc.args[0], exc.args[1] for cls in enumerate_exceptions_top_down(): - if "".join(klass.name).rstrip("\0") == cls.__name__: + if "".join(klass.name.chars) == cls.__name__: return cls raise ValueError("couldn't match exception, maybe it" " has RPython attributes like OSError?") diff --git a/rpython/rtyper/lltypesystem/rclass.py b/rpython/rtyper/lltypesystem/rclass.py --- a/rpython/rtyper/lltypesystem/rclass.py +++ b/rpython/rtyper/lltypesystem/rclass.py @@ -22,6 +22,7 @@ from rpython.rlib import objectmodel from rpython.tool.identity_dict import identity_dict from rpython.rtyper.lltypesystem.lloperation import llop +from rpython.rtyper.lltypesystem import rstr # # There is one "vtable" per user class, with the following structure: @@ -32,7 +33,7 @@ # RuntimeTypeInfo * rtti; # Signed subclassrange_min; //this is also the id of the class itself # Signed subclassrange_max; -# array { char } * name; +# RPyString * name; # struct object * instantiate(); # } # @@ -68,7 +69,7 @@ ('subclassrange_min', Signed), ('subclassrange_max', Signed), ('rtti', Ptr(RuntimeTypeInfo)), - ('name', Ptr(Array(Char))), + ('name', Ptr(rstr.STR)), ('hash', Signed), ('instantiate', Ptr(FuncType([], OBJECTPTR))), hints = {'immutable': True})) @@ -89,13 +90,6 @@ vtable = vtable.super return vtable -def alloc_array_name(name): - p = malloc(Array(Char), len(name)+1, immortal=True) - for i in range(len(name)): - p[i] = name[i] - p[len(name)] = '\x00' - return p - class ClassRepr(AbstractClassRepr): def __init__(self, rtyper, classdef): @@ -203,7 +197,7 @@ name = 'object' else: name = rsubcls.classdef.shortname - vtable.name = alloc_array_name(name) + vtable.name = rstr.string_repr.convert_const(name) if hasattr(rsubcls.classdef, 'my_instantiate_graph'): graph = rsubcls.classdef.my_instantiate_graph vtable.instantiate = self.rtyper.getcallable(graph) @@ -579,7 +573,6 @@ return hop.genop('ptr_nonzero', [vinst], resulttype=Bool) def ll_str(self, i): # doesn't work for non-gc classes! - from rpython.rtyper.lltypesystem import rstr from rpython.rtyper.lltypesystem.ll_str import ll_int2hex from rpython.rlib.rarithmetic import r_uint if not i: @@ -590,14 +583,8 @@ #uid = r_uint(cast_ptr_to_int(i)) uid = r_uint(llop.gc_id(lltype.Signed, i)) # - nameLen = len(instance.typeptr.name) - nameString = rstr.mallocstr(nameLen-1) - i = 0 - while i < nameLen - 1: - nameString.chars[i] = instance.typeptr.name[i] - i += 1 res = rstr.instance_str_prefix - res = rstr.ll_strconcat(res, nameString) + res = rstr.ll_strconcat(res, instance.typeptr.name) res = rstr.ll_strconcat(res, rstr.instance_str_infix) res = rstr.ll_strconcat(res, ll_int2hex(uid, False)) res = rstr.ll_strconcat(res, rstr.instance_str_suffix) diff --git a/rpython/rtyper/test/tool.py b/rpython/rtyper/test/tool.py --- a/rpython/rtyper/test/tool.py +++ b/rpython/rtyper/test/tool.py @@ -68,7 +68,7 @@ return fnptr._obj._callable def class_name(self, value): - return "".join(value.super.typeptr.name)[:-1] + return ''.join(value.super.typeptr.name.chars) def read_attr(self, value, attr_name): value = value._obj diff --git a/rpython/translator/c/src/debug_traceback.c b/rpython/translator/c/src/debug_traceback.c --- a/rpython/translator/c/src/debug_traceback.c +++ b/rpython/translator/c/src/debug_traceback.c @@ -66,7 +66,8 @@ void pypy_debug_catch_fatal_exception(void) { pypy_debug_traceback_print(); - fprintf(stderr, "Fatal RPython error: %s\n", - RPyFetchExceptionType()->ov_name->items); + fprintf(stderr, "Fatal RPython error: %.*s\n", + (int)(RPyFetchExceptionType()->ov_name->rs_chars.length), + RPyFetchExceptionType()->ov_name->rs_chars.items); abort(); } diff --git a/rpython/translator/c/src/exception.c b/rpython/translator/c/src/exception.c --- a/rpython/translator/c/src/exception.c +++ b/rpython/translator/c/src/exception.c @@ -16,8 +16,9 @@ long lineno, const char *functionname) { #ifdef DO_LOG_EXC - fprintf(stderr, "%s %s: %s:%ld %s\n", msg, - RPyFetchExceptionType()->ov_name->items, + fprintf(stderr, "%s %.*s: %s:%ld %s\n", msg, + (int)(RPyFetchExceptionType()->ov_name->rs_chars.length), + RPyFetchExceptionType()->ov_name->rs_chars.items, filename, lineno, functionname); #endif } From noreply at buildbot.pypy.org Wed Jul 9 18:33:59 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 9 Jul 2014 18:33:59 +0200 (CEST) Subject: [pypy-commit] pypy default: Support in RPython fetching the __name__ of a class. Message-ID: <20140709163359.D47231C33F5@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72401:4bf29ad76462 Date: 2014-07-09 18:01 +0200 http://bitbucket.org/pypy/pypy/changeset/4bf29ad76462/ Log: Support in RPython fetching the __name__ of a class. diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -4276,6 +4276,15 @@ py.test.raises(annmodel.AnnotatorError, a.build_types, f, [annmodel.s_None]) + def test_class___name__(self): + class Abc(object): + pass + def f(): + return Abc().__class__.__name__ + a = self.RPythonAnnotator() + s = a.build_types(f, []) + assert isinstance(s, annmodel.SomeString) + def g(n): return [0, 1, 2, n] diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -745,6 +745,11 @@ class __extend__(SomePBC): def getattr(self, s_attr): + assert s_attr.is_constant() + if s_attr.const == '__name__': + from rpython.annotator.description import ClassDesc + if self.getKind() is ClassDesc: + return SomeString() bookkeeper = getbookkeeper() return bookkeeper.pbc_getattr(self, s_attr) getattr.can_only_throw = [] diff --git a/rpython/rtyper/rpbc.py b/rpython/rtyper/rpbc.py --- a/rpython/rtyper/rpbc.py +++ b/rpython/rtyper/rpbc.py @@ -5,7 +5,7 @@ from rpython.annotator.argument import simple_args from rpython.rtyper import rclass, callparse from rpython.rtyper.error import TyperError -from rpython.rtyper.lltypesystem.lltype import typeOf, Void +from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.rmodel import (Repr, inputconst, CanBeNull, mangle, warning, impossible_repr) from rpython.tool.pairtype import pair, pairtype @@ -113,7 +113,7 @@ llfn = rtyper.getcallable(graph) concreterow[funcdesc] = llfn assert len(concreterow) > 0 - concreterow.fntype = typeOf(llfn) # 'llfn' from the loop above + concreterow.fntype = lltype.typeOf(llfn)# 'llfn' from the loop above # (they should all have the same type) concreterows[shape, index] = concreterow @@ -161,7 +161,7 @@ self.callfamily = s_pbc.any_description().getcallfamily() if len(s_pbc.descriptions) == 1 and not s_pbc.can_be_None: # a single function - self.lowleveltype = Void + self.lowleveltype = lltype.Void else: concretetable, uniquerows = get_concrete_calltable(self.rtyper, self.callfamily) @@ -193,7 +193,7 @@ return self.funccache[funcdesc] except KeyError: pass - if self.lowleveltype is Void: + if self.lowleveltype is lltype.Void: result = None else: llfns = {} @@ -225,7 +225,7 @@ value = value.im_func # unbound method -> bare function elif isinstance(value, staticmethod): value = value.__get__(42) # hackish, get the function wrapped by staticmethod - if self.lowleveltype is Void: + if self.lowleveltype is lltype.Void: return None if value is None: null = self.rtyper.type_system.null_callable(self.lowleveltype) @@ -239,27 +239,27 @@ 'index' and 'shape' tells which of its items we are interested in. """ assert v.concretetype == self.lowleveltype - if self.lowleveltype is Void: + if self.lowleveltype is lltype.Void: assert len(self.s_pbc.descriptions) == 1 # lowleveltype wouldn't be Void otherwise funcdesc, = self.s_pbc.descriptions row_of_one_graph = self.callfamily.calltables[shape][index] graph = row_of_one_graph[funcdesc] llfn = self.rtyper.getcallable(graph) - return inputconst(typeOf(llfn), llfn) + return inputconst(lltype.typeOf(llfn), llfn) elif len(self.uniquerows) == 1: return v else: # 'v' is a Struct pointer, read the corresponding field row = self.concretetable[shape, index] - cname = inputconst(Void, row.attrname) + cname = inputconst(lltype.Void, row.attrname) return self.get_specfunc_row(llop, v, cname, row.fntype) def get_unique_llfn(self): # try to build a unique low-level function. Avoid to use # whenever possible! Doesn't work with specialization, multiple # different call sites, etc. - if self.lowleveltype is not Void: + if self.lowleveltype is not lltype.Void: raise TyperError("cannot pass multiple functions here") assert len(self.s_pbc.descriptions) == 1 # lowleveltype wouldn't be Void otherwise @@ -281,7 +281,7 @@ if graphs != [graph]*len(graphs): raise TyperError("cannot pass a specialized function here") llfn = self.rtyper.getcallable(graph) - return inputconst(typeOf(llfn), llfn) + return inputconst(lltype.typeOf(llfn), llfn) def get_concrete_llfn(self, s_pbc, args_s, op): bk = self.rtyper.annotator.bookkeeper @@ -293,7 +293,7 @@ row_of_one_graph = self.callfamily.calltables[shape][index] graph = row_of_one_graph[funcdesc] llfn = self.rtyper.getcallable(graph) - return inputconst(typeOf(llfn), llfn) + return inputconst(lltype.typeOf(llfn), llfn) def rtype_simple_call(self, hop): return self.call(hop) @@ -319,7 +319,7 @@ if isinstance(vlist[0], Constant): v = hop.genop('direct_call', vlist, resulttype = rresult) else: - vlist.append(hop.inputconst(Void, row_of_graphs.values())) + vlist.append(hop.inputconst(lltype.Void, row_of_graphs.values())) v = hop.genop('indirect_call', vlist, resulttype = rresult) if hop.r_result is impossible_repr: return None # see test_always_raising_methods @@ -331,10 +331,10 @@ # this check makes sense because both source and dest repr are FunctionsPBCRepr if r_fpbc1.lowleveltype == r_fpbc2.lowleveltype: return v - if r_fpbc1.lowleveltype is Void: + if r_fpbc1.lowleveltype is lltype.Void: return inputconst(r_fpbc2, r_fpbc1.s_pbc.const) - if r_fpbc2.lowleveltype is Void: - return inputconst(Void, None) + if r_fpbc2.lowleveltype is lltype.Void: + return inputconst(lltype.Void, None) return NotImplemented class OverriddenFunctionPBCRepr(Repr): @@ -342,7 +342,7 @@ self.rtyper = rtyper self.s_pbc = s_pbc assert len(s_pbc.descriptions) == 1 - self.lowleveltype = Void + self.lowleveltype = lltype.Void def rtype_simple_call(self, hop): from rpython.rtyper.rspecialcase import rtype_call_specialcase @@ -377,7 +377,7 @@ class SingleFrozenPBCRepr(Repr): """Representation selected for a single non-callable pre-built constant.""" - lowleveltype = Void + lowleveltype = lltype.Void def __init__(self, frozendesc): self.frozendesc = frozendesc @@ -412,7 +412,7 @@ return self.converted_pbc_cache[frozendesc] except KeyError: r = self.rtyper.getrepr(annmodel.SomePBC([frozendesc])) - if r.lowleveltype is Void: + if r.lowleveltype is lltype.Void: # must create a new empty structure, as a placeholder pbc = self.create_instance() else: @@ -462,7 +462,7 @@ result = self.create_instance() self.pbc_cache[frozendesc] = result for attr, (mangled_name, r_value) in self.fieldmap.items(): - if r_value.lowleveltype is Void: + if r_value.lowleveltype is lltype.Void: continue try: thisattrvalue = frozendesc.attrcache[attr] @@ -479,7 +479,7 @@ return hop.inputconst(hop.r_result, hop.s_result.const) attr = hop.args_s[1].const - vpbc, vattr = hop.inputargs(self, Void) + vpbc, vattr = hop.inputargs(self, lltype.Void) v_res = self.getfield(vpbc, attr, hop.llops) mangled_name, r_res = self.fieldmap[attr] return hop.llops.convertvar(v_res, r_res, hop.r_result) @@ -503,7 +503,7 @@ class __extend__(pairtype(AbstractMultipleUnrelatedFrozenPBCRepr, SingleFrozenPBCRepr)): def convert_from_to((r_pbc1, r_pbc2), v, llops): - return inputconst(Void, r_pbc2.frozendesc) + return inputconst(lltype.Void, r_pbc2.frozendesc) class MethodOfFrozenPBCRepr(Repr): @@ -594,7 +594,7 @@ # raise TyperError("unsupported: variable of type " # "class-pointer or None") if s_pbc.is_constant(): - self.lowleveltype = Void + self.lowleveltype = lltype.Void else: self.lowleveltype = self.getlowleveltype() @@ -617,7 +617,7 @@ def convert_desc(self, desc): if desc not in self.s_pbc.descriptions: raise TyperError("%r not in %r" % (desc, self)) - if self.lowleveltype is Void: + if self.lowleveltype is lltype.Void: return None subclassdef = desc.getuniqueclassdef() r_subclass = rclass.getclassrepr(self.rtyper, subclassdef) @@ -625,7 +625,7 @@ def convert_const(self, cls): if cls is None: - if self.lowleveltype is Void: + if self.lowleveltype is lltype.Void: return None else: T = self.lowleveltype @@ -639,8 +639,15 @@ return hop.inputconst(hop.r_result, hop.s_result.const) else: attr = hop.args_s[1].const + if attr == '__name__': + from rpython.rtyper.lltypesystem import rstr + class_repr = rclass.getclassrepr(self.rtyper, None) + vcls, vattr = hop.inputargs(class_repr, lltype.Void) + cname = inputconst(lltype.Void, 'name') + return hop.genop('getfield', [vcls, cname], + resulttype = lltype.Ptr(rstr.STR)) access_set, class_repr = self.get_access_set(attr) - vcls, vattr = hop.inputargs(class_repr, Void) + vcls, vattr = hop.inputargs(class_repr, lltype.Void) v_res = class_repr.getpbcfield(vcls, access_set, attr, hop.llops) s_res = access_set.s_value r_res = self.rtyper.getrepr(s_res) @@ -669,7 +676,7 @@ if len(self.s_pbc.descriptions) == 1: # instantiating a single class - if self.lowleveltype is not Void: + if self.lowleveltype is not lltype.Void: assert 0, "XXX None-or-1-class instantation not implemented" assert isinstance(s_instance, annmodel.SomeInstance) classdef = s_instance.classdef @@ -726,7 +733,7 @@ # turn a PBC of classes to a standard pointer-to-vtable class repr if r_clspbc.lowleveltype == r_cls.lowleveltype: return v - if r_clspbc.lowleveltype is Void: + if r_clspbc.lowleveltype is lltype.Void: return inputconst(r_cls, r_clspbc.s_pbc.const) # convert from ptr-to-object-vtable to ptr-to-more-precise-vtable return r_cls.fromclasstype(v, llops) @@ -736,10 +743,10 @@ # this check makes sense because both source and dest repr are ClassesPBCRepr if r_clspbc1.lowleveltype == r_clspbc2.lowleveltype: return v - if r_clspbc1.lowleveltype is Void: + if r_clspbc1.lowleveltype is lltype.Void: return inputconst(r_clspbc2, r_clspbc1.s_pbc.const) - if r_clspbc2.lowleveltype is Void: - return inputconst(Void, r_clspbc2.s_pbc.const) + if r_clspbc2.lowleveltype is lltype.Void: + return inputconst(lltype.Void, r_clspbc2.s_pbc.const) return NotImplemented def adjust_shape(hop2, s_shape): diff --git a/rpython/rtyper/test/test_rpbc.py b/rpython/rtyper/test/test_rpbc.py --- a/rpython/rtyper/test/test_rpbc.py +++ b/rpython/rtyper/test/test_rpbc.py @@ -1642,6 +1642,20 @@ res = self.interpret(g, []) assert res == False + def test_class___name__(self): + class Base(object): pass + class ASub(Base): pass + def g(n): + if n == 1: + x = Base() + else: + x = ASub() + return x.__class__.__name__ + res = self.interpret(g, [1]) + assert self.ll_to_string(res) == "Base" + res = self.interpret(g, [2]) + assert self.ll_to_string(res) == "ASub" + # ____________________________________________________________ class TestRPBCExtra(BaseRtypingTest): From noreply at buildbot.pypy.org Wed Jul 9 18:34:01 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 9 Jul 2014 18:34:01 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix Message-ID: <20140709163401.2EA711C33F5@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72402:a5013f2a905e Date: 2014-07-09 18:32 +0200 http://bitbucket.org/pypy/pypy/changeset/a5013f2a905e/ Log: Fix diff --git a/rpython/jit/metainterp/virtualref.py b/rpython/jit/metainterp/virtualref.py --- a/rpython/jit/metainterp/virtualref.py +++ b/rpython/jit/metainterp/virtualref.py @@ -1,5 +1,5 @@ from rpython.rtyper.rmodel import inputconst, log -from rpython.rtyper.lltypesystem import lltype, llmemory, rclass +from rpython.rtyper.lltypesystem import lltype, llmemory, rclass, rstr from rpython.jit.metainterp import history from rpython.jit.metainterp.virtualizable import TOKEN_NONE from rpython.jit.metainterp.virtualizable import TOKEN_TRACING_RESCALL @@ -19,7 +19,7 @@ self.jit_virtual_ref_vtable = lltype.malloc(rclass.OBJECT_VTABLE, zero=True, flavor='raw', immortal=True) - self.jit_virtual_ref_vtable.name = rclass.alloc_array_name( + self.jit_virtual_ref_vtable.name = rstr.string_repr.convert_const( 'jit_virtual_ref') # build some constants adr = llmemory.cast_ptr_to_adr(self.jit_virtual_ref_vtable) From noreply at buildbot.pypy.org Wed Jul 9 18:37:51 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 9 Jul 2014 18:37:51 +0200 (CEST) Subject: [pypy-commit] pypy default: Use __class__.__name__ directly here. Message-ID: <20140709163751.66CE91C33F5@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72403:dab3b81bdd77 Date: 2014-07-09 18:36 +0200 http://bitbucket.org/pypy/pypy/changeset/dab3b81bdd77/ Log: Use __class__.__name__ directly here. diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -73,21 +73,17 @@ return space.wrap(42) -def _nameof(cls): - return cls.__name__ -_nameof._annspecialcase_ = 'specialize:memo' - def strategy(space, w_obj): """ strategy(dict or list or set) Return the underlying strategy currently used by a dict, list or set object """ if isinstance(w_obj, W_DictMultiObject): - name = _nameof(w_obj.strategy.__class__) + name = w_obj.strategy.__class__.__name__ elif isinstance(w_obj, W_ListObject): - name = _nameof(w_obj.strategy.__class__) + name = w_obj.strategy.__class__.__name__ elif isinstance(w_obj, W_BaseSetObject): - name = _nameof(w_obj.strategy.__class__) + name = w_obj.strategy.__class__.__name__ else: raise OperationError(space.w_TypeError, space.wrap("expecting dict or list or set object")) From noreply at buildbot.pypy.org Wed Jul 9 19:08:05 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 9 Jul 2014 19:08:05 +0200 (CEST) Subject: [pypy-commit] pypy default: Update to 0.8.6. No other changes in _cffi_backend from 0.8.2. Message-ID: <20140709170805.3BAB41C1068@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72404:44f8bf48500e Date: 2014-07-09 19:07 +0200 http://bitbucket.org/pypy/pypy/changeset/44f8bf48500e/ Log: Update to 0.8.6. No other changes in _cffi_backend from 0.8.2. diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -8,7 +8,7 @@ appleveldefs = { } interpleveldefs = { - '__version__': 'space.wrap("0.8.2")', + '__version__': 'space.wrap("0.8.6")', 'load_library': 'libraryobj.load_library', diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -3188,4 +3188,4 @@ def test_version(): # this test is here mostly for PyPy - assert __version__ == "0.8.2" + assert __version__ == "0.8.6" From noreply at buildbot.pypy.org Wed Jul 9 19:08:06 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 9 Jul 2014 19:08:06 +0200 (CEST) Subject: [pypy-commit] pypy default: More places that need to be fixed for the new 'name' field type. Message-ID: <20140709170806.8D5811C1068@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72405:2982c4350071 Date: 2014-07-09 19:07 +0200 http://bitbucket.org/pypy/pypy/changeset/2982c4350071/ Log: More places that need to be fixed for the new 'name' field type. diff --git a/rpython/jit/codewriter/assembler.py b/rpython/jit/codewriter/assembler.py --- a/rpython/jit/codewriter/assembler.py +++ b/rpython/jit/codewriter/assembler.py @@ -248,7 +248,7 @@ if isinstance(TYPE, lltype.FuncType): name = value._obj._name elif TYPE == rclass.OBJECT_VTABLE: - name = ''.join(value.name).rstrip('\x00') + name = ''.join(value.name.chars) else: return addr = llmemory.cast_ptr_to_adr(value) diff --git a/rpython/jit/codewriter/heaptracker.py b/rpython/jit/codewriter/heaptracker.py --- a/rpython/jit/codewriter/heaptracker.py +++ b/rpython/jit/codewriter/heaptracker.py @@ -1,4 +1,4 @@ -from rpython.rtyper.lltypesystem import lltype, llmemory, rclass +from rpython.rtyper.lltypesystem import lltype, llmemory, rclass, rstr from rpython.rlib.objectmodel import we_are_translated @@ -66,11 +66,7 @@ def set_testing_vtable_for_gcstruct(GCSTRUCT, vtable, name): # only for tests that need to register the vtable of their malloc'ed # structures in case they are GcStruct inheriting from OBJECT. - namez = name + '\x00' - vtable.name = lltype.malloc(rclass.OBJECT_VTABLE.name.TO, len(namez), - immortal=True) - for i in range(len(namez)): - vtable.name[i] = namez[i] + vtable.name = rstr.string_repr.convert_const(name) testing_gcstruct2vtable[GCSTRUCT] = vtable testing_gcstruct2vtable = {} From noreply at buildbot.pypy.org Wed Jul 9 19:47:51 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 9 Jul 2014 19:47:51 +0200 (CEST) Subject: [pypy-commit] pypy default: Fixes Message-ID: <20140709174751.029ED1D26B6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72406:3ae2e0814de1 Date: 2014-07-09 19:47 +0200 http://bitbucket.org/pypy/pypy/changeset/3ae2e0814de1/ Log: Fixes diff --git a/rpython/jit/backend/test/test_ll_random.py b/rpython/jit/backend/test/test_ll_random.py --- a/rpython/jit/backend/test/test_ll_random.py +++ b/rpython/jit/backend/test/test_ll_random.py @@ -112,11 +112,7 @@ self.vtable_counter += 1 S = self.get_random_structure_type(r, with_vtable=vtable, cache=False) name = S._name - vtable.name = lltype.malloc(lltype.Array(lltype.Char), len(name)+1, - immortal=True) - for i in range(len(name)): - vtable.name[i] = name[i] - vtable.name[len(name)] = '\x00' + vtable.name = rstr.string_repr.convert_const(name) self.structure_types_and_vtables.append((S, vtable)) # heaptracker.register_known_gctype(self.cpu, vtable, S) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -81,10 +81,10 @@ return box.getref(rclass.OBJECTPTR).typeptr node_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) - node_vtable.name = rclass.alloc_array_name('node') + node_vtable.name = rstr.string_repr.convert_const('node') node_vtable_adr = llmemory.cast_ptr_to_adr(node_vtable) node_vtable2 = lltype.malloc(OBJECT_VTABLE, immortal=True) - node_vtable2.name = rclass.alloc_array_name('node2') + node_vtable2.name = rstr.string_repr.convert_const('node2') node_vtable_adr2 = llmemory.cast_ptr_to_adr(node_vtable2) cpu = runner.LLGraphCPU(None) @@ -331,7 +331,7 @@ def get_name_from_address(self, addr): # hack try: - return "".join(addr.ptr.name)[:-1] # remove \x00 + return "".join(addr.ptr.name.chars) except AttributeError: return "" From noreply at buildbot.pypy.org Wed Jul 9 19:56:21 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 9 Jul 2014 19:56:21 +0200 (CEST) Subject: [pypy-commit] pypy default: Reintroduce rclass.alloc_array_name() and revert some changes to use it again. Message-ID: <20140709175621.498901D26B7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72407:1f0c59905c74 Date: 2014-07-09 19:55 +0200 http://bitbucket.org/pypy/pypy/changeset/1f0c59905c74/ Log: Reintroduce rclass.alloc_array_name() and revert some changes to use it again. diff --git a/rpython/jit/backend/test/test_ll_random.py b/rpython/jit/backend/test/test_ll_random.py --- a/rpython/jit/backend/test/test_ll_random.py +++ b/rpython/jit/backend/test/test_ll_random.py @@ -112,7 +112,7 @@ self.vtable_counter += 1 S = self.get_random_structure_type(r, with_vtable=vtable, cache=False) name = S._name - vtable.name = rstr.string_repr.convert_const(name) + vtable.name = rclass.alloc_array_name(name) self.structure_types_and_vtables.append((S, vtable)) # heaptracker.register_known_gctype(self.cpu, vtable, S) diff --git a/rpython/jit/codewriter/heaptracker.py b/rpython/jit/codewriter/heaptracker.py --- a/rpython/jit/codewriter/heaptracker.py +++ b/rpython/jit/codewriter/heaptracker.py @@ -1,4 +1,4 @@ -from rpython.rtyper.lltypesystem import lltype, llmemory, rclass, rstr +from rpython.rtyper.lltypesystem import lltype, llmemory, rclass from rpython.rlib.objectmodel import we_are_translated @@ -66,7 +66,7 @@ def set_testing_vtable_for_gcstruct(GCSTRUCT, vtable, name): # only for tests that need to register the vtable of their malloc'ed # structures in case they are GcStruct inheriting from OBJECT. - vtable.name = rstr.string_repr.convert_const(name) + vtable.name = rclass.alloc_array_name(name) testing_gcstruct2vtable[GCSTRUCT] = vtable testing_gcstruct2vtable = {} diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -1,6 +1,6 @@ import py, random -from rpython.rtyper.lltypesystem import lltype, llmemory, rclass, rstr, rffi +from rpython.rtyper.lltypesystem import lltype, llmemory, rclass, rffi from rpython.rtyper.lltypesystem.rclass import OBJECT, OBJECT_VTABLE from rpython.rtyper.rclass import FieldListAccessor, IR_QUASIIMMUTABLE @@ -81,10 +81,10 @@ return box.getref(rclass.OBJECTPTR).typeptr node_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) - node_vtable.name = rstr.string_repr.convert_const('node') + node_vtable.name = rclass.alloc_array_name('node') node_vtable_adr = llmemory.cast_ptr_to_adr(node_vtable) node_vtable2 = lltype.malloc(OBJECT_VTABLE, immortal=True) - node_vtable2.name = rstr.string_repr.convert_const('node2') + node_vtable2.name = rclass.alloc_array_name('node2') node_vtable_adr2 = llmemory.cast_ptr_to_adr(node_vtable2) cpu = runner.LLGraphCPU(None) diff --git a/rpython/jit/metainterp/virtualref.py b/rpython/jit/metainterp/virtualref.py --- a/rpython/jit/metainterp/virtualref.py +++ b/rpython/jit/metainterp/virtualref.py @@ -1,5 +1,5 @@ from rpython.rtyper.rmodel import inputconst, log -from rpython.rtyper.lltypesystem import lltype, llmemory, rclass, rstr +from rpython.rtyper.lltypesystem import lltype, llmemory, rclass from rpython.jit.metainterp import history from rpython.jit.metainterp.virtualizable import TOKEN_NONE from rpython.jit.metainterp.virtualizable import TOKEN_TRACING_RESCALL @@ -19,7 +19,7 @@ self.jit_virtual_ref_vtable = lltype.malloc(rclass.OBJECT_VTABLE, zero=True, flavor='raw', immortal=True) - self.jit_virtual_ref_vtable.name = rstr.string_repr.convert_const( + self.jit_virtual_ref_vtable.name = rclass.alloc_array_name( 'jit_virtual_ref') # build some constants adr = llmemory.cast_ptr_to_adr(self.jit_virtual_ref_vtable) diff --git a/rpython/rtyper/lltypesystem/rclass.py b/rpython/rtyper/lltypesystem/rclass.py --- a/rpython/rtyper/lltypesystem/rclass.py +++ b/rpython/rtyper/lltypesystem/rclass.py @@ -90,6 +90,9 @@ vtable = vtable.super return vtable +def alloc_array_name(name): + return rstr.string_repr.convert_const(name) + class ClassRepr(AbstractClassRepr): def __init__(self, rtyper, classdef): @@ -197,7 +200,7 @@ name = 'object' else: name = rsubcls.classdef.shortname - vtable.name = rstr.string_repr.convert_const(name) + vtable.name = alloc_array_name(name) if hasattr(rsubcls.classdef, 'my_instantiate_graph'): graph = rsubcls.classdef.my_instantiate_graph vtable.instantiate = self.rtyper.getcallable(graph) From noreply at buildbot.pypy.org Wed Jul 9 21:26:46 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 9 Jul 2014 21:26:46 +0200 (CEST) Subject: [pypy-commit] pypy default: Tests for the same situation with other set operations. Fix for Message-ID: <20140709192646.02CF11C1068@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72408:e27d1225e4ca Date: 2014-07-09 21:26 +0200 http://bitbucket.org/pypy/pypy/changeset/e27d1225e4ca/ Log: Tests for the same situation with other set operations. Fix for symmetric_difference. diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -1060,10 +1060,14 @@ return storage, strategy def symmetric_difference(self, w_set, w_other): + if w_other.length() == 0: + return w_set.copy_real() storage, strategy = self._symmetric_difference_base(w_set, w_other) return w_set.from_storage_and_strategy(storage, strategy) def symmetric_difference_update(self, w_set, w_other): + if w_other.length() == 0: + return storage, strategy = self._symmetric_difference_base(w_set, w_other) w_set.strategy = strategy w_set.sstorage = storage diff --git a/pypy/objspace/std/test/test_setobject.py b/pypy/objspace/std/test/test_setobject.py --- a/pypy/objspace/std/test/test_setobject.py +++ b/pypy/objspace/std/test/test_setobject.py @@ -967,3 +967,28 @@ assert strategy(s) == "IntegerSetStrategy" s.update(set()) assert strategy(s) == "IntegerSetStrategy" + # + s = set([1, 2, 3]) + s |= set() + assert strategy(s) == "IntegerSetStrategy" + # + s = set([1, 2, 3]).difference(set()) + assert strategy(s) == "IntegerSetStrategy" + # + s = set([1, 2, 3]) + s.difference_update(set()) + assert strategy(s) == "IntegerSetStrategy" + # + s = set([1, 2, 3]).symmetric_difference(set()) + assert strategy(s) == "IntegerSetStrategy" + # + s = set([1, 2, 3]) + s.symmetric_difference_update(set()) + assert strategy(s) == "IntegerSetStrategy" + # + s = set([1, 2, 3]).intersection(set()) + assert strategy(s) == "EmptySetStrategy" + # + s = set([1, 2, 3]) + s.intersection_update(set()) + assert strategy(s) == "EmptySetStrategy" From noreply at buildbot.pypy.org Thu Jul 10 10:16:31 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 10 Jul 2014 10:16:31 +0200 (CEST) Subject: [pypy-commit] pypy default: Add a Python 3 section. Message-ID: <20140710081631.620D51C34C8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72409:7e3e9f6ace6f Date: 2014-07-10 10:16 +0200 http://bitbucket.org/pypy/pypy/changeset/7e3e9f6ace6f/ Log: Add a Python 3 section. diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -28,7 +28,8 @@ Introduction ============ -``pypy-stm`` is a variant of the regular PyPy interpreter. With caveats_ +``pypy-stm`` is a variant of the regular PyPy interpreter. (This +version supports Python 2.7; see below for `Python 3`_.) With caveats_ listed below, it should be in theory within 20%-50% slower than a regular PyPy, comparing the JIT version in both cases (but see below!). It is called @@ -137,6 +138,25 @@ +Python 3 +======== + +In this document I describe "pypy-stm", which is based on PyPy's Python +2.7 interpreter. Supporting Python 3 should take about half an +afternoon of work. Obviously, what I *don't* mean is that by tomorrow +you can have a finished and polished "pypy3-stm" product. General py3k +work is still missing; and general stm work is also still missing. But +they are rather independent from each other, as usual in PyPy. The +required afternoon of work will certainly be done one of these days now +that the internal interfaces seem to stabilize. + +The same is true for other languages implemented in the RPython +framework, although the amount of work to put there might vary, because +the STM framework within RPython is currently targeting the PyPy +interpreter and other ones might have slightly different needs. + + + User Guide ========== From noreply at buildbot.pypy.org Thu Jul 10 10:21:46 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 10 Jul 2014 10:21:46 +0200 (CEST) Subject: [pypy-commit] pypy default: Update Message-ID: <20140710082146.B75BB1C34C8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72410:ccadece8737d Date: 2014-07-10 10:21 +0200 http://bitbucket.org/pypy/pypy/changeset/ccadece8737d/ Log: Update diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -509,8 +509,6 @@ The last two lines are special; they are an internal marker read by ``transactional_memory.print_abort_info()``. -These statistics are not printed out for the main thread, for now. - Reference to implementation details ----------------------------------- From noreply at buildbot.pypy.org Thu Jul 10 12:56:49 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Thu, 10 Jul 2014 12:56:49 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Removed forgotten pdb breakpoints. Message-ID: <20140710105649.D08B11C0906@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r873:d033d87d9e19 Date: 2014-07-07 17:33 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/d033d87d9e19/ Log: Removed forgotten pdb breakpoints. diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -1385,13 +1385,11 @@ @expose_primitive(RESUME, unwrap_spec=[object], no_result=True, clean_stack=False) def func(interp, s_frame, w_rcvr): - import pdb; pdb.set_trace() assert_class(interp, w_rcvr, interp.space.w_Process) wrapper.ProcessWrapper(interp.space, w_rcvr).resume(s_frame) @expose_primitive(SUSPEND, unwrap_spec=[object], no_result=True, clean_stack=False) def func(interp, s_frame, w_rcvr): - import pdb; pdb.set_trace() assert_class(interp, w_rcvr, interp.space.w_Process) wrapper.ProcessWrapper(interp.space, w_rcvr).suspend(s_frame) From noreply at buildbot.pypy.org Thu Jul 10 12:56:51 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Thu, 10 Jul 2014 12:56:51 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Changed perform/create_toplevel_context methods of interpreter to work better with RPython. Message-ID: <20140710105651.21EDE1C0906@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r874:01a714785a05 Date: 2014-07-07 17:35 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/01a714785a05/ Log: Changed perform/create_toplevel_context methods of interpreter to work better with RPython. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -211,20 +211,18 @@ except ReturnFromTopLevel, e: return e.object - def perform(self, w_receiver, selector, *w_arguments): - s_frame = self.create_toplevel_context(w_receiver, selector, *w_arguments) + def perform(self, w_receiver, selector="", w_selector=None, w_arguments=[]): + s_frame = self.create_toplevel_context(w_receiver, selector, w_selector, w_arguments) self.interrupt_check_counter = self.interrupt_counter_size return self.interpret_toplevel(s_frame.w_self()) - def create_toplevel_context(self, w_receiver, selector, *w_arguments): - if isinstance(selector, str): + def create_toplevel_context(self, w_receiver, selector="", w_selector=None, w_arguments=[]): + if w_selector is None: + assert selector, "Need either string or W_Object selector" if selector == "asSymbol": w_selector = self.image.w_asSymbol else: - w_selector = self.perform(self.space.wrap_string(selector), - "asSymbol") - else: - w_selector = selector + w_selector = self.perform(self.space.wrap_string(selector), "asSymbol") w_method = model.W_CompiledMethod(self.space, header=512) w_method.literalatput0(self.space, 1, w_selector) diff --git a/spyvm/test/jit.py b/spyvm/test/jit.py --- a/spyvm/test/jit.py +++ b/spyvm/test/jit.py @@ -36,7 +36,7 @@ def preload_perform(imagename, receiver, selector, *args): interp = load(imagename) def interp_miniloop(): - return interp.perform(receiver, selector, *args) + return interp.perform(receiver, selector, w_arguments=list(args)) return interp_miniloop # This will build a jit executing a synthetic method composed of the given bytecodes and literals, diff --git a/spyvm/test/test_largeinteger.py b/spyvm/test/test_largeinteger.py --- a/spyvm/test/test_largeinteger.py +++ b/spyvm/test/test_largeinteger.py @@ -7,7 +7,6 @@ def setup_module(): space, interp, _, _ = read_image('bootstrapped.image') w = space.w - perform = interp.perform copy_to_module(locals(), __name__) interp.trace = False space.initialize_class(space.w_String, interp) diff --git a/spyvm/test/test_miniimage.py b/spyvm/test/test_miniimage.py --- a/spyvm/test/test_miniimage.py +++ b/spyvm/test/test_miniimage.py @@ -5,7 +5,10 @@ def setup_module(): space, interp, image, reader = read_image("mini.image") w = space.w - perform = interp.perform + def perform_wrapper(receiver, selector, *args): + w_selector = None if isinstance(selector, str) else selector + return interp.perform(receiver, selector, w_selector, list(args)) + perform = perform_wrapper copy_to_module(locals(), __name__) def teardown_module(): @@ -191,7 +194,7 @@ w_abs = interp.perform(interp.space.w("abs"), "asSymbol") for value in [10, -3, 0]: w_object = model.W_SmallInteger(value) - w_res = interp.perform(w_object, w_abs) + w_res = interp.perform(w_object, w_selector=w_abs) assert w_res.value == abs(value) def test_lookup_abs_in_integer(): diff --git a/spyvm/test/util.py b/spyvm/test/util.py --- a/spyvm/test/util.py +++ b/spyvm/test/util.py @@ -266,5 +266,5 @@ def initialize_class(self, w_class, interp): initialize_symbol = find_symbol_in_methoddict_of("initialize", w_class.class_shadow(self)) - interp.perform(w_class, initialize_symbol) + interp.perform(w_class, w_selector=initialize_symbol) \ No newline at end of file From noreply at buildbot.pypy.org Thu Jul 10 12:56:52 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Thu, 10 Jul 2014 12:56:52 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Refactored command line flags a little to be more precise and give more controll over what happens. Message-ID: <20140710105652.5516C1C0906@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r875:f9f21debba52 Date: 2014-07-07 17:36 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/f9f21debba52/ Log: Refactored command line flags a little to be more precise and give more controll over what happens. Added descriptions to the usage- string. diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -10,201 +10,114 @@ from spyvm.tool.analyseimage import create_image from spyvm.interpreter_proxy import VirtualMachine -def print_result(w_result): - # This will also print contents of strings/symbols/numbers - print w_result.as_repr_string().replace('\r', '\n') - -def _run_benchmark(interp, number, benchmark, arg): - from spyvm.plugins.vmdebugging import stop_ui_process - stop_ui_process() - - space = interp.space - scheduler = wrapper.scheduler(space) - w_hpp = scheduler.active_process() - if space.unwrap_int(scheduler.active_process().fetch(space, 2)) > space.unwrap_int(w_hpp.fetch(space, 2)): - w_hpp = scheduler.active_process() - assert isinstance(w_hpp, model.W_PointersObject) - w_benchmark_proc = model.W_PointersObject( - space, - w_hpp.getclass(space), - w_hpp.size() - ) - - s_frame = context_for(interp, number, benchmark, arg) - # second variable is suspended context - w_benchmark_proc.store(space, 1, s_frame.w_self()) - - # third variable is priority - priority = space.unwrap_int(w_hpp.fetch(space, 2)) / 2 + 1 - # Priorities below 10 are not allowed in newer versions of Squeak. - if interp.image.version.has_closures: - priority = max(11, priority) - else: - priority = 7 - w_benchmark_proc.store(space, 2, space.wrap_int(priority)) - - # make process eligible for scheduling - wrapper.ProcessWrapper(space, w_benchmark_proc).put_to_sleep() - - t1 = time.time() - w_result = _run_image(interp) - t2 = time.time() - if w_result: - print_result(w_result) - print "took %s seconds" % (t2 - t1) - return 0 - return -1 - -def _run_image(interp): - space = interp.space - ap = wrapper.ProcessWrapper(space, wrapper.scheduler(space).active_process()) - w_ctx = ap.suspended_context() - assert isinstance(w_ctx, model.W_PointersObject) - ap.store_suspended_context(space.w_nil) - try: - return interp.interpret_toplevel(w_ctx) - except error.Exit, e: - print e.msg - -def _run_code(interp, code, as_benchmark=False): - import time - selector = "DoIt%d" % int(time.time()) - space = interp.space - w_receiver = space.w_nil - w_receiver_class = w_receiver.getclass(space) - try: - w_result = interp.perform( - w_receiver_class, - "compile:classified:notifying:", - space.wrap_string("%s\r\n%s" % (selector, code)), - space.wrap_string("spy-run-code"), - space.w_nil - ) - w_receiver_class.as_class_get_shadow(space).s_methoddict().sync_method_cache() - except interpreter.ReturnFromTopLevel, e: - print e.object - return 1 - except error.Exit, e: - print e.msg - return 1 - - if not as_benchmark: - try: - w_result = interp.perform(w_receiver, selector) - except interpreter.ReturnFromTopLevel, e: - print e.object - return 1 - except error.Exit, e: - print e.msg - return 1 - if w_result: - print_result(w_result) - return 0 - else: - return _run_benchmark(interp, 0, selector, "") - -def context_for(interp, number, benchmark, stringarg): - w_receiver = interp.space.wrap_int(number) - if stringarg: - return interp.create_toplevel_context(w_receiver, benchmark, interp.space.wrap_string(stringarg)) - else: - return interp.create_toplevel_context(w_receiver, benchmark) - def _usage(argv): print """ - Usage: %s - -j|--jit [jitargs] - -n|--number [smallint, default: 0] - -m|--method [benchmark on smallint] - -a|--arg [string argument to #method] - -r|--run [code string] - -b|--benchmark [code string] - -p|--poll_events - -ni|--no-interrupts - -d|--max-stack-depth [number, default %d, <= 0 disables stack protection] - -l|--storage-log - -L|--storage-log-aggregate - -E|--storage-log-elements - [image path, default: Squeak.image] + Usage: %s [-r|-m] [-naH] [-jpis] [-tlLE] + - image path (default: Squeak.image) + + Execution mode: + (no flags) - Image will be normally opened. + -r|--run - Code will be compiled and executed, result printed. + -m|--method - Selector will be sent to a SmallInteger, result printed. + -h|--help - Output this and exit. + + Execution parameters: + -n|--num - Only with -m or -r, SmallInteger to be used as receiver (default: nil). + -a|--arg - Only with -m, will be used as single String argument. + -H|--headless - Only with -m or -r, run in headless mode. + Execute the context directly, ignoring the active context in the image. + The execution will 'hijack' the active process. + Image window will probably not open. Good for benchmarking. + By default, a high-priority process will be created for the context, then the image + will be started normally. + -u - Only with -m or -r, try to stop UI-process at startup. Can help with -H. + + Other parameters: + -j|--jit - jitargs will be passed to the jit configuration. + -p|--poll - Actively poll for events. Try this if the image is not responding well. + -i|--no-interrupts - Disable timer interrupt. Disables non-cooperative scheduling. + -s - After num stack frames, the entire stack will be dumped to the heap. + This breaks performance, but protects agains stack overflow. + num <= 0 disables stack protection (default: %d) + + Logging parameters: + -t|--trace - Output a trace of each message, primitive, return value and process switch. + -l|--storage-log - Output a log of storage operations. + -L|--storage-log-aggregate - Output an aggregated storage log at the end of execution. + -E|--storage-log-elements - Include classnames of elements into the storage log. + """ % (argv[0], constants.MAX_LOOP_DEPTH) -def _arg_missing(argv, idx, arg): - if len(argv) == idx + 1: +def get_parameter(argv, idx, arg): + if len(argv) < idx + 1: raise RuntimeError("Error: missing argument after %s" % arg) - + return argv[idx], idx + 1 + prebuilt_space = objspace.ObjSpace() def entry_point(argv): - idx = 1 - path = None + # == Main execution parameters + selector = None + code = "" number = 0 - benchmark = None + have_number = False + stringarg = None + headless = False + # == Other parameters + poll = False + interrupts = True + max_stack_depth = constants.MAX_LOOP_DEPTH trace = False - evented = True - stringarg = "" - code = None - as_benchmark = False - max_stack_depth = constants.MAX_LOOP_DEPTH - interrupts = True + + path = argv[1] if len(argv) > 1 else "Squeak.image" + idx = 2 while idx < len(argv): arg = argv[idx] + idx += 1 if arg in ["-h", "--help"]: _usage(argv) return 0 elif arg in ["-j", "--jit"]: - _arg_missing(argv, idx, arg) - jitarg = argv[idx + 1] - idx += 1 + jitarg, idx = get_parameter(argv, idx, arg) jit.set_user_param(interpreter.Interpreter.jit_driver, jitarg) elif arg in ["-n", "--number"]: - _arg_missing(argv, idx, arg) - number = int(argv[idx + 1]) - idx += 1 + numarg, idx = get_parameter(argv, idx, arg) + number = int(numarg) + have_number = True elif arg in ["-m", "--method"]: - _arg_missing(argv, idx, arg) - benchmark = argv[idx + 1] - idx += 1 + selector, idx = get_parameter(argv, idx, arg) elif arg in ["-t", "--trace"]: trace = True - elif arg in ["-p", "--poll_events"]: - evented = False + elif arg in ["-p", "--poll"]: + poll = True elif arg in ["-a", "--arg"]: - _arg_missing(argv, idx, arg) - stringarg = argv[idx + 1] - idx += 1 + stringarg, idx = get_parameter(argv, idx, arg) elif arg in ["-r", "--run"]: - _arg_missing(argv, idx, arg) - code = argv[idx + 1] - as_benchmark = False - idx += 1 - elif arg in ["-b", "--benchmark"]: - _arg_missing(argv, idx, arg) - code = argv[idx + 1] - as_benchmark = True - idx += 1 - elif arg in ["-ni", "--no-interrupts"]: + code, idx = get_parameter(argv, idx, arg) + elif arg in ["-i", "--no-interrupts"]: interrupts = False - elif arg in ["-d", "--max-stack-depth"]: - _arg_missing(argv, idx, arg) - max_stack_depth = int(argv[idx + 1]) - idx += 1 + elif arg in ["-s"]: + arg, idx = get_parameter(argv, idx, arg) + max_stack_depth = int(arg) + elif arg in ["-H", "--headless"]: + headless = True + elif arg in ["-u"]: + from spyvm.plugins.vmdebugging import stop_ui_process + stop_ui_process() elif arg in ["-l", "--storage-log"]: storage_logger.activate() elif arg in ["-L", "--storage-log-aggregate"]: storage_logger.activate(aggregate=True) elif arg in ["-E", "--storage-log-elements"]: storage_logger.activate(elements=True) - elif path is None: - path = argv[idx] else: _usage(argv) return -1 - idx += 1 - - if path is None: - path = "Squeak.image" - + + if code and selector: + raise RuntimeError("Cannot handle both -r and -m.") + path = rpath.rabspath(path) try: f = open_file_as_stream(path, mode="rb", buffering=0) @@ -216,27 +129,108 @@ os.write(2, "%s -- %s (LoadError)\n" % (os.strerror(e.errno), path)) return 1 + # Load & prepare image and environment space = prebuilt_space image_reader = squeakimage.reader_for_image(space, squeakimage.Stream(data=imagedata)) image = create_image(space, image_reader) interp = interpreter.Interpreter(space, image, image_name=path, - trace=trace, evented=evented, + trace=trace, evented=not poll, interrupts=interrupts, max_stack_depth=max_stack_depth) space.runtime_setup(argv[0]) - result = 0 - if benchmark is not None: - result = _run_benchmark(interp, number, benchmark, stringarg) - elif code is not None: - result = _run_code(interp, code, as_benchmark=as_benchmark) + + # Create context to be executed + if code or selector: + if not have_number: + w_receiver = interp.space.w_nil + else: + w_receiver = interp.space.wrap_int(number) + if code: + selector = compile_code(interp, w_receiver, code) + if selector is None: + return -1 # Compilation failed, message is printed. + s_frame = create_context(interp, w_receiver, selector, stringarg) + if headless: + context = s_frame + else: + create_process(interp, s_frame) + context = active_context(interp.space) else: - _run_image(interp) - result = 0 + context = active_context(interp.space) + + w_result = execute_context(interp, context) + print result_string(w_result) storage_logger.print_aggregated_log() - return result + return 0 +def result_string(w_result): + # This will also print contents of strings/symbols/numbers + return w_result.as_repr_string().replace('\r', '\n') -# _____ Define and setup target ___ +def compile_code(interp, w_receiver, code): + import time + selector = "DoIt%d" % int(time.time()) + space = interp.space + w_receiver_class = w_receiver.getclass(space) + try: + w_result = interp.perform( + w_receiver_class, + "compile:classified:notifying:", + w_arguments = [space.wrap_string("%s\r\n%s" % (selector, code)), + space.wrap_string("spy-run-code"), + space.w_nil] + ) + # TODO - is this expected in every image? + if not isinstance(w_result, model.W_BytesObject) or w_result.as_string() != selector: + print "Compilation failed, unexpected result: %s" % result_string(w_result) + return None + except error.Exit, e: + print "Exited while compiling code: %s" % e.msg + return None + w_receiver_class.as_class_get_shadow(space).s_methoddict().sync_method_cache() + return selector + +def create_context(interp, w_receiver, selector, stringarg): + args = [] + if stringarg: + args.append(interp.space.wrap_string(stringarg)) + return interp.create_toplevel_context(w_receiver, selector, w_arguments = args) + +def create_process(interp, s_frame): + space = interp.space + w_active_process = wrapper.scheduler(space).active_process() + assert isinstance(w_active_process, model.W_PointersObject) + w_benchmark_proc = model.W_PointersObject( + space, w_active_process.getclass(space), w_active_process.size() + ) + if interp.image.version.has_closures: + # Priorities below 10 are not allowed in newer versions of Squeak. + active_priority = space.unwrap_int(w_active_process.fetch(space, 2)) + priority = active_priority / 2 + 1 + priority = max(11, priority) + else: + priority = 7 + w_benchmark_proc.store(space, 1, s_frame.w_self()) + w_benchmark_proc.store(space, 2, space.wrap_int(priority)) + + # Make process eligible for scheduling + wrapper.ProcessWrapper(space, w_benchmark_proc).put_to_sleep() + +def active_context(space): + w_active_process = wrapper.scheduler(space).active_process() + active_process = wrapper.ProcessWrapper(space, w_active_process) + w_active_context = active_process.suspended_context() + assert isinstance(w_active_context, model.W_PointersObject) + active_process.store_suspended_context(space.w_nil) + return w_active_context.as_context_get_shadow(space) +def execute_context(interp, s_frame, measure=False): + try: + return interp.interpret_toplevel(s_frame.w_self()) + except error.Exit, e: + print "Exited: %s" % e.msg + return None + +# _____ Target and Main _____ def target(driver, *args): # driver.config.translation.gc = "stmgc" @@ -247,11 +241,9 @@ driver.config.translation.thread = True return entry_point, None - def jitpolicy(self): from rpython.jit.codewriter.policy import JitPolicy return JitPolicy() - if __name__ == "__main__": entry_point(sys.argv) From noreply at buildbot.pypy.org Thu Jul 10 12:56:53 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Thu, 10 Jul 2014 12:56:53 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Added a suppress_process_switch flag as a hack to enable the -r flag in the Squeak image. Message-ID: <20140710105653.7134A1C0906@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r876:f77d391e1255 Date: 2014-07-07 18:46 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/f77d391e1255/ Log: Added a suppress_process_switch flag as a hack to enable the -r flag in the Squeak image. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -86,6 +86,7 @@ s_new_context = s_sender s_new_context.push(nlr.value) except ProcessSwitch, p: + assert not self.space.suppress_process_switch, "ProcessSwitch should be disabled..." if self.trace: print "====== Switched process from: %s" % s_new_context.short_str() print "====== to: %s " % p.s_new_context.short_str() diff --git a/spyvm/objspace.py b/spyvm/objspace.py --- a/spyvm/objspace.py +++ b/spyvm/objspace.py @@ -21,6 +21,9 @@ self.make_bootstrap_classes() self.make_bootstrap_objects() + + # This is a hack; see compile_code() in targetimageloadingsmalltalk.py + self.suppress_process_switch = False def find_executable(self, executable): if os.sep in executable or (os.name == "nt" and ":" in executable): diff --git a/spyvm/wrapper.py b/spyvm/wrapper.py --- a/spyvm/wrapper.py +++ b/spyvm/wrapper.py @@ -93,8 +93,9 @@ active_priority = active_process.priority() priority = self.priority() if priority > active_priority: - active_process.deactivate(s_current_frame) - self.activate() + if not self.space.suppress_process_switch: + active_process.deactivate(s_current_frame) + self.activate() else: self.put_to_sleep() @@ -103,10 +104,11 @@ def suspend(self, s_current_frame): if self.is_active_process(): - assert self.my_list().is_nil(self.space) - w_process = scheduler(self.space).pop_highest_priority_process() - self.deactivate(s_current_frame, put_to_sleep=False) - ProcessWrapper(self.space, w_process).activate() + if not self.space.suppress_process_switch: + assert self.my_list().is_nil(self.space) + w_process = scheduler(self.space).pop_highest_priority_process() + self.deactivate(s_current_frame, put_to_sleep=False) + ProcessWrapper(self.space, w_process).activate() else: if not self.my_list().is_nil(self.space): process_list = ProcessListWrapper(self.space, self.my_list()) diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -172,13 +172,22 @@ space = interp.space w_receiver_class = w_receiver.getclass(space) try: - w_result = interp.perform( - w_receiver_class, - "compile:classified:notifying:", - w_arguments = [space.wrap_string("%s\r\n%s" % (selector, code)), - space.wrap_string("spy-run-code"), - space.w_nil] - ) + try: + # The suppress_process_switch flag is a hack/workaround to enable compiling code + # before having initialized the image cleanly. The problem is that the TimingSemaphore is not yet + # registered (primitive 136 not called), so the idle process will never be left once it is entered. + # TODO - Find a way to cleanly initialize the image, without executing the active_context of the image. + # Instead, we want to execute our own context. Then remove this flag (and all references to it) + interp.space.suppress_process_switch = True + w_result = interp.perform( + w_receiver_class, + "compile:classified:notifying:", + w_arguments = [space.wrap_string("%s\r\n%s" % (selector, code)), + space.wrap_string("spy-run-code"), + space.w_nil] + ) + finally: + interp.space.suppress_process_switch = False # TODO - is this expected in every image? if not isinstance(w_result, model.W_BytesObject) or w_result.as_string() != selector: print "Compilation failed, unexpected result: %s" % result_string(w_result) From noreply at buildbot.pypy.org Thu Jul 10 12:56:54 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Thu, 10 Jul 2014 12:56:54 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Fixed the suppress_process_switch hack, using an array to store the flag, making it modifiable. Message-ID: <20140710105654.88F551C0906@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r877:307b424d7195 Date: 2014-07-09 13:59 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/307b424d7195/ Log: Fixed the suppress_process_switch hack, using an array to store the flag, making it modifiable. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -86,7 +86,7 @@ s_new_context = s_sender s_new_context.push(nlr.value) except ProcessSwitch, p: - assert not self.space.suppress_process_switch, "ProcessSwitch should be disabled..." + assert not self.space.suppress_process_switch[0], "ProcessSwitch should be disabled..." if self.trace: print "====== Switched process from: %s" % s_new_context.short_str() print "====== to: %s " % p.s_new_context.short_str() diff --git a/spyvm/objspace.py b/spyvm/objspace.py --- a/spyvm/objspace.py +++ b/spyvm/objspace.py @@ -23,7 +23,7 @@ self.make_bootstrap_objects() # This is a hack; see compile_code() in targetimageloadingsmalltalk.py - self.suppress_process_switch = False + self.suppress_process_switch = [False] def find_executable(self, executable): if os.sep in executable or (os.name == "nt" and ":" in executable): diff --git a/spyvm/wrapper.py b/spyvm/wrapper.py --- a/spyvm/wrapper.py +++ b/spyvm/wrapper.py @@ -93,7 +93,7 @@ active_priority = active_process.priority() priority = self.priority() if priority > active_priority: - if not self.space.suppress_process_switch: + if not self.space.suppress_process_switch[0]: active_process.deactivate(s_current_frame) self.activate() else: @@ -104,7 +104,7 @@ def suspend(self, s_current_frame): if self.is_active_process(): - if not self.space.suppress_process_switch: + if not self.space.suppress_process_switch[0]: assert self.my_list().is_nil(self.space) w_process = scheduler(self.space).pop_highest_priority_process() self.deactivate(s_current_frame, put_to_sleep=False) diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -178,7 +178,7 @@ # registered (primitive 136 not called), so the idle process will never be left once it is entered. # TODO - Find a way to cleanly initialize the image, without executing the active_context of the image. # Instead, we want to execute our own context. Then remove this flag (and all references to it) - interp.space.suppress_process_switch = True + interp.space.suppress_process_switch[0] = True w_result = interp.perform( w_receiver_class, "compile:classified:notifying:", @@ -187,7 +187,7 @@ space.w_nil] ) finally: - interp.space.suppress_process_switch = False + interp.space.suppress_process_switch[0] = False # TODO - is this expected in every image? if not isinstance(w_result, model.W_BytesObject) or w_result.as_string() != selector: print "Compilation failed, unexpected result: %s" % result_string(w_result) From noreply at buildbot.pypy.org Thu Jul 10 12:56:55 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Thu, 10 Jul 2014 12:56:55 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Merged Message-ID: <20140710105655.C64EB1C0906@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r878:656c751d8f58 Date: 2014-07-09 17:13 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/656c751d8f58/ Log: Merged diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -26,7 +26,7 @@ _immutable_fields_ = ["space", "image", "image_name", "max_stack_depth", "interrupt_counter_size", "startup_time", "evented", "interrupts"] - + jit_driver = jit.JitDriver( greens=['pc', 'self', 'method'], reds=['s_context'], @@ -38,7 +38,7 @@ trace=False, evented=True, interrupts=True, max_stack_depth=constants.MAX_LOOP_DEPTH): import time - + # === Initialize immutable variables self.space = space self.image = image @@ -54,7 +54,7 @@ self.interrupt_counter_size = int(os.environ["SPY_ICS"]) except KeyError: self.interrupt_counter_size = constants.INTERRUPT_COUNTER_SIZE - + # === Initialize mutable variables self.interrupt_check_counter = self.interrupt_counter_size self.current_stack_depth = 0 @@ -91,7 +91,7 @@ print "====== Switched process from: %s" % s_new_context.short_str() print "====== to: %s " % p.s_new_context.short_str() s_new_context = p.s_new_context - + def loop_bytecodes(self, s_context, may_context_switch=True): old_pc = 0 if not jit.we_are_jitted() and may_context_switch: @@ -118,7 +118,7 @@ raise nlr else: s_context.push(nlr.value) - + # This is a wrapper around loop_bytecodes that cleanly enters/leaves the frame # and handles the stack overflow protection mechanism. def stack_frame(self, s_frame, s_sender, may_context_switch=True): @@ -127,14 +127,14 @@ # Enter the context - store a virtual reference back to the sender # Non-fresh contexts can happen, e.g. when activating a stored BlockContext. # The same frame object must not pass through here recursively! - if s_frame.is_fresh(): + if s_frame.is_fresh() and s_sender is not None: s_frame.virtual_sender = jit.virtual_ref(s_sender) - + self.current_stack_depth += 1 if self.max_stack_depth > 0: if self.current_stack_depth >= self.max_stack_depth: raise StackOverflow(s_frame) - + # Now (continue to) execute the context bytecodes self.loop_bytecodes(s_frame, may_context_switch) finally: @@ -142,8 +142,8 @@ # Cleanly leave the context. This will finish the virtual sender-reference, if # it is still there, which can happen in case of ProcessSwitch or StackOverflow; # in case of a Return, this will already be handled while unwinding the stack. - s_frame.finish_virtual_sender() - + s_frame.finish_virtual_sender(s_sender) + def step(self, context): bytecode = context.fetch_next_bytecode() for entry in UNROLLING_BYTECODE_RANGES: @@ -156,9 +156,9 @@ if start <= bytecode <= stop: return getattr(context, methname)(self, bytecode) assert 0, "unreachable" - + # ============== Methods for handling user interrupts ============== - + def jitted_check_for_interrupt(self, s_frame): if not self.interrupts: return @@ -169,7 +169,7 @@ decr_by = int(trace_length // 100) decr_by = max(decr_by, 1) self.quick_check_for_interrupt(s_frame, decr_by) - + def quick_check_for_interrupt(self, s_frame, dec=1): if not self.interrupts: return @@ -205,7 +205,7 @@ return intmask(int((time.time() - self.startup_time) * 1000) & constants.TAGGED_MASK) # ============== Convenience methods for executing code ============== - + def interpret_toplevel(self, w_frame): try: self.loop(w_frame) @@ -234,7 +234,7 @@ s_frame.push(w_receiver) s_frame.push_all(list(w_arguments)) return s_frame - + def padding(self, symbol=' '): return symbol * self.current_stack_depth @@ -264,11 +264,26 @@ class ProcessSwitch(ContextSwitchException): """This causes the interpreter to switch the executed context.""" + +import rpython.rlib.unroll +if hasattr(unroll, "unrolling_zero"): + unrolling_zero = unroll.unrolling_zero +else: + class unrolling_int(int, unroll.SpecTag): + def __add__(self, other): + return unrolling_int(int.__add__(self, other)) + __radd__ = __add__ + def __sub__(self, other): + return unrolling_int(int.__sub__(self, other)) + def __rsub__(self, other): + return unrolling_int(int.__rsub__(self, other)) + unrolling_zero = unrolling_int(0) + + # This is a decorator for bytecode implementation methods. # parameter_bytes=N means N additional bytes are fetched as parameters. def bytecode_implementation(parameter_bytes=0): def bytecode_implementation_decorator(actual_implementation_method): - from rpython.rlib.unroll import unrolling_zero @jit.unroll_safe def bytecode_implementation_wrapper(self, interp, current_bytecode): parameters = () @@ -344,9 +359,9 @@ # __extend__ adds new methods to the ContextPartShadow class class __extend__(ContextPartShadow): - + # ====== Push/Pop bytecodes ====== - + @bytecode_implementation() def pushReceiverVariableBytecode(self, interp, current_bytecode): index = current_bytecode & 15 @@ -425,7 +440,7 @@ @bytecode_implementation() def popStackBytecode(self, interp, current_bytecode): self.pop() - + @bytecode_implementation(parameter_bytes=1) def pushNewArrayBytecode(self, interp, current_bytecode, descriptor): arraySize, popIntoArray = splitter[7, 1](descriptor) @@ -435,9 +450,9 @@ else: newArray = interp.space.w_Array.as_class_get_shadow(interp.space).new(arraySize) self.push(newArray) - + # ====== Extended Push/Pop bytecodes ====== - + def _extendedVariableTypeAndIndex(self, descriptor): return ((descriptor >> 6) & 3), (descriptor & 63) @@ -473,16 +488,16 @@ @bytecode_implementation(parameter_bytes=1) def extendedStoreBytecode(self, interp, current_bytecode, descriptor): return self._extendedStoreBytecode(interp, current_bytecode, descriptor) - + @bytecode_implementation(parameter_bytes=1) def extendedStoreAndPopBytecode(self, interp, current_bytecode, descriptor): self._extendedStoreBytecode(interp, current_bytecode, descriptor) self.pop() - + def _extract_index_and_temps(self, index_in_array, index_of_array): w_indirectTemps = self.gettemp(index_of_array) return index_in_array, w_indirectTemps - + @bytecode_implementation(parameter_bytes=2) def pushRemoteTempLongBytecode(self, interp, current_bytecode, index_in_array, index_of_array): index_in_array, w_indirectTemps = self._extract_index_and_temps(index_in_array, index_of_array) @@ -520,7 +535,7 @@ copiedValues: copiedValues). self jump: blockSize """ - + space = self.space numArgs, numCopied = splitter[4, 4](descriptor) blockSize = (j << 8) | i @@ -529,7 +544,7 @@ self.pop_and_return_n(numCopied)) self.push(w_closure) self._jump(blockSize) - + # ====== Helpers for send/return bytecodes ====== def _sendSelfSelector(self, w_selector, argcount, interp): @@ -551,7 +566,7 @@ w_method = receiverclassshadow.lookup(w_selector) except MethodNotFound: return self._doesNotUnderstand(w_selector, argcount, interp, receiver) - + code = w_method.primitive() if code: if w_arguments: @@ -575,21 +590,21 @@ def _sendSelfSelectorSpecial(self, selector, numargs, interp): w_selector = self.space.get_special_selector(selector) return self._sendSelfSelector(w_selector, numargs, interp) - + def _sendSpecialSelector(self, interp, receiver, special_selector, w_args=[]): w_special_selector = self.space.objtable["w_" + special_selector] s_class = receiver.class_shadow(self.space) w_method = s_class.lookup(w_special_selector) s_frame = w_method.create_frame(interp.space, receiver, w_args) - + # ###################################################################### if interp.trace: print '%s %s %s: #%s' % (interp.padding('#'), special_selector, s_frame.short_str(), w_args) if not objectmodel.we_are_translated(): import pdb; pdb.set_trace() - + return interp.stack_frame(s_frame, self) - + def _doesNotUnderstand(self, w_selector, argcount, interp, receiver): arguments = self.pop_and_return_n(argcount) w_message_class = self.space.classtable["w_Message"] @@ -599,7 +614,7 @@ w_message.store(self.space, 0, w_selector) w_message.store(self.space, 1, self.space.wrap_list(arguments)) self.pop() # The receiver, already known. - + try: return self._sendSpecialSelector(interp, receiver, "doesNotUnderstand", [w_message]) except MethodNotFound: @@ -608,10 +623,10 @@ assert isinstance(s_class, ClassShadow) print "Missing doesNotUnderstand in hierarchy of %s" % s_class.getname() raise - + def _mustBeBoolean(self, interp, receiver): return self._sendSpecialSelector(interp, receiver, "mustBeBoolean") - + def _call_primitive(self, code, interp, argcount, w_method, w_selector): # ################################################################## if interp.trace: @@ -631,11 +646,11 @@ def _return(self, return_value, interp, s_return_to): # unfortunately, this assert is not true for some tests. TODO fix this. # assert self._stack_ptr == self.tempsize() - + # ################################################################## if interp.trace: print '%s<- %s' % (interp.padding(), return_value.as_repr_string()) - + if s_return_to is None: # This should never happen while executing a normal image. raise ReturnFromTopLevel(return_value) @@ -732,7 +747,7 @@ return self._sendSelfSelector(w_selector, argcount, interp) # ====== Misc ====== - + def _activate_unwind_context(self, interp): # TODO put the constant somewhere else. # Primitive 198 is used in BlockClosure >> ensure: @@ -750,11 +765,11 @@ raise nlr finally: self.mark_returned() - + @bytecode_implementation() def unknownBytecode(self, interp, current_bytecode): raise MissingBytecode("unknownBytecode") - + @bytecode_implementation() def experimentalBytecode(self, interp, current_bytecode): raise MissingBytecode("experimentalBytecode") @@ -771,7 +786,7 @@ else: w_alternative = interp.space.w_true w_expected = interp.space.w_false - + # Don't check the class, just compare with only two Boolean instances. w_bool = self.pop() if w_expected.is_same_object(w_bool): diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -1356,7 +1356,7 @@ def func(interp, s_frame, w_rcvr, w_selector, w_arguments): from spyvm.shadow import MethodNotFound s_frame.pop_n(2) # removing our arguments - + return s_frame._sendSelector(w_selector, len(w_arguments), interp, w_rcvr, w_rcvr.class_shadow(interp.space), w_arguments=w_arguments) @@ -1392,8 +1392,8 @@ def func(interp, s_frame, w_rcvr): assert_class(interp, w_rcvr, interp.space.w_Process) wrapper.ProcessWrapper(interp.space, w_rcvr).suspend(s_frame) - - + + @expose_primitive(FLUSH_CACHE, unwrap_spec=[object]) def func(interp, s_frame, w_rcvr): diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -20,7 +20,7 @@ _immutable_fields_ = ['space'] provides_getname = False repr_classname = "AbstractShadow" - + def __init__(self, space, w_self): self.space = space assert w_self is None or isinstance(w_self, model.W_PointersObject) @@ -34,19 +34,19 @@ return "<%s %s>" % (self.repr_classname, self.getname()) else: return "<%s>" % self.repr_classname - + def fetch(self, n0): raise NotImplementedError("Abstract class") def store(self, n0, w_value): raise NotImplementedError("Abstract class") def size(self): raise NotImplementedError("Abstract class") - + def attach_shadow(self): pass - + def copy_field_from(self, n0, other_shadow): self.store(n0, other_shadow.fetch(n0)) - + # This can be overwritten to change the order of initialization. def copy_from(self, other_shadow): assert self.size() == other_shadow.size() @@ -98,24 +98,24 @@ # Class must provide: wrap, unwrap, nil_value, is_nil_value, wrapper_class _attrs_ = ['storage'] _immutable_fields_ = ['storage'] - + def __init__(self, space, w_self, size): AbstractStorageShadow.__init__(self, space, w_self, size) self.storage = [self.nil_value] * size - + def size(self): return len(self.storage) - + def generalized_strategy_for(self, w_val): return ListStorageShadow - + def fetch(self, n0): val = self.storage[n0] if self.is_nil_value(val): return self.space.w_nil else: return self.wrap(self.space, val) - + def do_store(self, n0, w_val): if w_val.is_nil(self.space): self.storage[n0] = self.nil_value @@ -134,7 +134,7 @@ nil_value = constants.MAXINT wrapper_class = model.W_SmallInteger import_from_mixin(AbstractValueOrNilStorageMixin) - + @staticmethod def static_can_contain(space, w_val): return _value_or_nil_can_handle(SmallIntegerOrNilStorageShadow, space, w_val) @@ -153,7 +153,7 @@ nil_value = sys.float_info.max wrapper_class = model.W_Float import_from_mixin(AbstractValueOrNilStorageMixin) - + @staticmethod def static_can_contain(space, w_val): return _value_or_nil_can_handle(FloatOrNilStorageShadow, space, w_val) @@ -193,17 +193,17 @@ if float_can_handle and not FloatOrNilStorageShadow.static_can_contain(space, w_obj): float_can_handle = False specialized_strategies = specialized_strategies - 1 - + if specialized_strategies <= 0: return ListStorageShadow - + if all_nil_can_handle: return AllNilStorageShadow if small_int_can_handle: return SmallIntegerOrNilStorageShadow if float_can_handle: return FloatOrNilStorageShadow - + # If this happens, please look for a bug in the code above. assert False, "No strategy could be found for list..." @@ -223,7 +223,7 @@ _immutable_fields_ = ['storage'] repr_classname = "ListStorageShadow" import_from_mixin(ListStorageMixin) - + def initialize_storage(self, size): self.storage = [self.space.w_nil] * size def fetch(self, n0): @@ -236,7 +236,7 @@ _immutable_fields_ = ['storage'] repr_classname = "WeakListStorageShadow" import_from_mixin(ListStorageMixin) - + def initialize_storage(self, size): self.storage = [weakref.ref(self.space.w_nil)] * size def fetch(self, n0): @@ -245,14 +245,14 @@ def store(self, n0, w_value): assert w_value is not None self.storage[n0] = weakref.ref(w_value) - + class AbstractCachingShadow(ListStorageShadow): _immutable_fields_ = ['version?'] _attrs_ = ['version'] repr_classname = "AbstractCachingShadow" import_from_mixin(version.VersionMixin) version = None - + def __init__(self, space, w_self): ListStorageShadow.__init__(self, space, w_self, 0) self.changed() @@ -284,7 +284,7 @@ _s_superclass = _s_methoddict = None provides_getname = True repr_classname = "ClassShadow" - + def __init__(self, space, w_self): self.subclass_s = {} AbstractCachingShadow.__init__(self, space, w_self) @@ -305,7 +305,7 @@ # In Slang the value is read directly as a boxed integer, so that # the code gets a "pointer" whose bits are set as above, but # shifted one bit to the left and with the lowest bit set to 1. - + # Compute the instance size (really the size, not the number of bytes) instsize_lo = (classformat >> 1) & 0x3F instsize_hi = (classformat >> (9 + 1)) & 0xC0 @@ -313,10 +313,10 @@ # decode the instSpec format = (classformat >> 7) & 15 self.instance_varsized = format >= 2 - + # In case of raised exception below. self.changed() - + if format < 4: self.instance_kind = POINTERS elif format == 4: @@ -356,7 +356,7 @@ return # Some of the special info has changed -> Switch version. self.changed() - + def store_w_superclass(self, w_class): superclass = self._s_superclass if w_class is None or w_class.is_nil(self.space): @@ -383,24 +383,24 @@ return if methoddict: methoddict.s_class = None self.store_s_methoddict(s_new_methoddict) - + def store_s_methoddict(self, s_methoddict): s_methoddict.s_class = self s_methoddict.sync_method_cache() self._s_methoddict = s_methoddict - + def attach_s_class(self, s_other): self.subclass_s[s_other] = None def detach_s_class(self, s_other): del self.subclass_s[s_other] - + def store_w_name(self, w_name): if isinstance(w_name, model.W_BytesObject): self.name = w_name.as_string() else: self.name = None - + @jit.unroll_safe def flush_method_caches(self): look_in_shadow = self @@ -497,7 +497,7 @@ self.version = version for s_class in self.subclass_s: s_class.superclass_changed(version) - + # _______________________________________________________________ # Methods used only in testing @@ -532,7 +532,7 @@ _immutable_fields_ = ['invalid?', 's_class'] _attrs_ = ['methoddict', 'invalid', 's_class'] repr_classname = "MethodDictionaryShadow" - + def __init__(self, space, w_self): self.invalid = True self.s_class = None @@ -541,7 +541,7 @@ def update(self): self.sync_method_cache() - + def find_selector(self, w_selector): if self.invalid: return None # we may be invalid if Smalltalk code did not call flushCache @@ -593,7 +593,7 @@ class AbstractRedirectingShadow(AbstractShadow): _attrs_ = ['_w_self_size'] repr_classname = "AbstractRedirectingShadow" - + def __init__(self, space, w_self): AbstractShadow.__init__(self, space, w_self) if w_self is not None: @@ -611,7 +611,7 @@ '_pc', '_temps_and_stack', '_stack_ptr', 'instances_w'] repr_classname = "ContextPartShadow" - + _virtualizable_ = [ 'direct_sender', 'virtual_sender', "_pc", "_temps_and_stack[*]", "_stack_ptr", @@ -620,7 +620,7 @@ # ______________________________________________________________________ # Initialization - + def __init__(self, space, w_self): self.direct_sender = None self.virtual_sender = jit.vref_None @@ -632,26 +632,26 @@ AbstractRedirectingShadow.copy_field_from(self, n0, other_shadow) except error.SenderChainManipulation, e: assert e.s_context == self - + def copy_from(self, other_shadow): # Some fields have to be initialized before the rest, to ensure correct initialization. privileged_fields = self.fields_to_copy_first() for n0 in privileged_fields: self.copy_field_from(n0, other_shadow) - + # Now the temp size will be known. self.init_stack_and_temps() - + for n0 in range(self.size()): if n0 not in privileged_fields: self.copy_field_from(n0, other_shadow) - + def fields_to_copy_first(self): return [] - + # ______________________________________________________________________ # Accessing object fields - + def fetch(self, n0): if n0 == constants.CTXPART_SENDER_INDEX: return self.w_sender() @@ -690,45 +690,47 @@ else: # XXX later should store tail out of known context part as well raise error.WrapperException("Index in context out of bounds") - + # === Sender === # There are two fields for the sender (virtual and direct). Only one of them is can be set at a time. # As long as the frame object is virtualized, using the virtual reference should increase performance. # As soon as a frame object is forced to the heap, the direct reference must be used. - + def is_fresh(self): return self.direct_sender is None and self.virtual_sender is jit.vref_None - - def finish_virtual_sender(self, save_direct_sender=True): + + def finish_virtual_sender(self, s_sender): if self.virtual_sender is not jit.vref_None: - sender = self.virtual_sender() - jit.virtual_ref_finish(self.virtual_sender, sender) + if self.pc() != -1: + # stack is unrolling, but this frame was not + # marked_returned: it is an escaped frame + sender = self.virtual_sender() + self.direct_sender = sender + jit.virtual_ref_finish(self.virtual_sender, s_sender) self.virtual_sender = jit.vref_None - if save_direct_sender: - self.direct_sender = sender - + def store_s_sender(self, s_sender, raise_error=True): # If we have a virtual back reference, we must finish it before storing the direct reference. - self.finish_virtual_sender(save_direct_sender=False) + # self.finish_virtual_sender(save_direct_sender=False) self.direct_sender = s_sender if raise_error: raise error.SenderChainManipulation(self) - + def w_sender(self): sender = self.s_sender() if sender is None: return self.space.w_nil return sender.w_self() - + def s_sender(self): if self.direct_sender: return self.direct_sender else: result = self.virtual_sender() return result - + # === Stack Pointer === - + def unwrap_store_stackpointer(self, w_sp1): # the stackpointer in the W_PointersObject starts counting at the # tempframe start @@ -747,12 +749,12 @@ def stackdepth(self): return rarithmetic.intmask(self._stack_ptr) - + def wrap_stackpointer(self): return self.space.wrap_int(self.stackdepth()) # === Program Counter === - + def store_unwrap_pc(self, w_pc): if w_pc.is_nil(self.space): self.store_pc(-1) @@ -777,9 +779,9 @@ def store_pc(self, newpc): assert newpc >= -1 self._pc = newpc - + # === Subclassed accessors === - + def s_home(self): raise NotImplementedError() @@ -788,18 +790,18 @@ def w_receiver(self): raise NotImplementedError() - + def w_method(self): raise NotImplementedError() - + def tempsize(self): raise NotImplementedError() - + def is_closure_context(self): raise NotImplementedError() - + # === Other properties of Contexts === - + def mark_returned(self): self.store_pc(-1) self.store_s_sender(None, raise_error=False) @@ -809,25 +811,25 @@ def external_stackpointer(self): return self.stackdepth() + self.stackstart() - + def stackend(self): # XXX this is incorrect when there is subclassing return self._w_self_size - + def fetch_next_bytecode(self): pc = jit.promote(self._pc) assert pc >= 0 self._pc += 1 return self.fetch_bytecode(pc) - + def fetch_bytecode(self, pc): bytecode = self.w_method().fetch_bytecode(pc) return ord(bytecode) - + # ______________________________________________________________________ # Temporary Variables # - # Every context has it's own stack. BlockContexts share their temps with + # Every context has it's own stack. BlockContexts share their temps with # their home contexts. MethodContexts created from a BlockClosure get their # temps copied from the closure upon activation. Changes are not propagated back; # this is handled by the compiler by allocating an extra Array for temps. @@ -837,7 +839,7 @@ def settemp(self, index, w_value): raise NotImplementedError() - + # ______________________________________________________________________ # Stack Manipulation @@ -851,13 +853,13 @@ for i in range(tempsize): temps_and_stack[i] = self.space.w_nil self._stack_ptr = rarithmetic.r_uint(tempsize) # we point after the last element - + def stack_get(self, index0): return self._temps_and_stack[index0] - + def stack_put(self, index0, w_val): self._temps_and_stack[index0] = w_val - + def stack(self): """NOT_RPYTHON""" # purely for testing return self._temps_and_stack[self.tempsize():self._stack_ptr] @@ -912,7 +914,7 @@ # ______________________________________________________________________ # Primitive support - + def store_instances_array(self, w_class, match_w): # used for primitives 77 & 78 self.instances_w[w_class] = match_w @@ -939,7 +941,7 @@ j += 1 retval += "\n---------------------" return retval - + def short_str(self): arg_strings = self.argument_strings() if len(arg_strings) > 0: @@ -953,10 +955,10 @@ self.w_receiver().as_repr_string(), args ) - + def print_stack(self, method=True): return self.print_padded_stack(method)[1] - + def print_padded_stack(self, method): padding = ret_str = '' if self.s_sender() is not None: @@ -970,9 +972,9 @@ class BlockContextShadow(ContextPartShadow): _attrs_ = ['_w_home', '_initialip', '_eargc'] repr_classname = "BlockContextShadow" - + # === Initialization === - + def __init__(self, space, w_self=None, w_home=None, argcnt=0, initialip=0): self = jit.hint(self, access_directly=True, fresh_virtualizable=True) creating_w_self = w_self is None @@ -992,40 +994,40 @@ def fields_to_copy_first(self): return [ constants.BLKCTX_HOME_INDEX ] - + # === Implemented accessors === - + def s_home(self): return self._w_home.as_methodcontext_get_shadow(self.space) - + def stackstart(self): return constants.BLKCTX_STACK_START def tempsize(self): # A blockcontext doesn't have any temps return 0 - + def w_receiver(self): return self.s_home().w_receiver() - + def w_method(self): retval = self.s_home().w_method() assert isinstance(retval, model.W_CompiledMethod) return retval - + def is_closure_context(self): return True - + # === Temporary variables === - + def gettemp(self, index): return self.s_home().gettemp(index) def settemp(self, index, w_value): self.s_home().settemp(index, w_value) - + # === Accessing object fields === - + def fetch(self, n0): if n0 == constants.BLKCTX_HOME_INDEX: return self._w_home @@ -1045,11 +1047,11 @@ return self.unwrap_store_eargc(w_value) else: return ContextPartShadow.store(self, n0, w_value) - + def store_w_home(self, w_home): assert isinstance(w_home, model.W_PointersObject) self._w_home = w_home - + def unwrap_store_initialip(self, w_value): initialip = self.space.unwrap_int(w_value) initialip -= 1 + self.w_method().literalsize @@ -1057,18 +1059,18 @@ def store_initialip(self, initialip): self._initialip = initialip - + def wrap_initialip(self): initialip = self.initialip() initialip += 1 + self.w_method().literalsize return self.space.wrap_int(initialip) - + def reset_pc(self): self.store_pc(self.initialip()) - + def initialip(self): return self._initialip - + def unwrap_store_eargc(self, w_value): self.store_expected_argument_count(self.space.unwrap_int(w_value)) @@ -1082,24 +1084,24 @@ self._eargc = argc # === Stack Manipulation === - + def reset_stack(self): self.pop_n(self.stackdepth()) # === Printing === - + def argument_strings(self): return [] - + def method_str(self): return '[] in %s' % self.w_method().get_identifier_string() class MethodContextShadow(ContextPartShadow): _attrs_ = ['closure', '_w_receiver', '_w_method'] repr_classname = "MethodContextShadow" - + # === Initialization === - + @jit.unroll_safe def __init__(self, space, w_self=None, w_method=None, w_receiver=None, arguments=[], closure=None, pc=0): @@ -1108,7 +1110,7 @@ self.store_w_receiver(w_receiver) self.store_pc(pc) self.closure = closure - + if w_method: self.store_w_method(w_method) # The summand is needed, because we calculate i.a. our stackdepth relative of the size of w_self. @@ -1117,20 +1119,20 @@ self.init_stack_and_temps() else: self._w_method = None - + argc = len(arguments) for i0 in range(argc): self.settemp(i0, arguments[i0]) - + if closure: for i0 in range(closure.size()): self.settemp(i0+argc, closure.at0(i0)) def fields_to_copy_first(self): return [ constants.MTHDCTX_METHOD, constants.MTHDCTX_CLOSURE_OR_NIL ] - + # === Accessing object fields === - + def fetch(self, n0): if n0 == constants.MTHDCTX_METHOD: return self.w_method() @@ -1164,12 +1166,12 @@ return self.settemp(temp_i, w_value) else: return ContextPartShadow.store(self, n0, w_value) - + def store_w_receiver(self, w_receiver): self._w_receiver = w_receiver - + # === Implemented Accessors === - + def s_home(self): if self.is_closure_context(): # this is a context for a blockClosure @@ -1182,31 +1184,31 @@ return s_outerContext.s_home() else: return self - + def stackstart(self): return constants.MTHDCTX_TEMP_FRAME_START - + def store_w_method(self, w_method): assert isinstance(w_method, model.W_CompiledMethod) self._w_method = w_method def w_receiver(self): return self._w_receiver - + def w_method(self): retval = self._w_method assert isinstance(retval, model.W_CompiledMethod) return retval - + def tempsize(self): if not self.is_closure_context(): return self.w_method().tempsize() else: return self.closure.tempsize() - + def is_closure_context(self): return self.closure is not None - + # ______________________________________________________________________ # Marriage of MethodContextShadows with PointerObjects only when required @@ -1223,9 +1225,9 @@ self._w_self = w_self self._w_self_size = w_self.size() return w_self - + # === Temporary variables === - + def gettemp(self, index0): return self.stack_get(index0) @@ -1233,7 +1235,7 @@ self.stack_put(index0, w_value) # === Printing === - + def argument_strings(self): argcount = self.w_method().argsize tempsize = self.w_method().tempsize() From noreply at buildbot.pypy.org Thu Jul 10 12:56:56 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Thu, 10 Jul 2014 12:56:56 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Enabled headless mode by default, to behave like the original flags. Message-ID: <20140710105656.E628C1C0906@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r879:cf03ec7872eb Date: 2014-07-10 11:46 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/cf03ec7872eb/ Log: Enabled headless mode by default, to behave like the original flags. diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -12,7 +12,7 @@ def _usage(argv): print """ - Usage: %s [-r|-m] [-naH] [-jpis] [-tlLE] + Usage: %s [-r|-m] [-naHu] [-jpis] [-tlLE] - image path (default: Squeak.image) Execution mode: @@ -24,13 +24,12 @@ Execution parameters: -n|--num - Only with -m or -r, SmallInteger to be used as receiver (default: nil). -a|--arg - Only with -m, will be used as single String argument. - -H|--headless - Only with -m or -r, run in headless mode. - Execute the context directly, ignoring the active context in the image. - The execution will 'hijack' the active process. - Image window will probably not open. Good for benchmarking. - By default, a high-priority process will be created for the context, then the image - will be started normally. - -u - Only with -m or -r, try to stop UI-process at startup. Can help with -H. + -P|--process - Only with -m or -r, create a high-priority Process for the context. + The images last active Process will be started first. + By default, run in headless mode. This will ignore the active process + in the image and execute the context directly. The image window will + probably not open. Good for benchmarking. + -u - Only with -m or -r, try to stop UI-process at startup. Can help benchmarking. Other parameters: -j|--jit - jitargs will be passed to the jit configuration. @@ -62,7 +61,7 @@ number = 0 have_number = False stringarg = None - headless = False + headless = True # == Other parameters poll = False interrupts = True @@ -100,8 +99,8 @@ elif arg in ["-s"]: arg, idx = get_parameter(argv, idx, arg) max_stack_depth = int(arg) - elif arg in ["-H", "--headless"]: - headless = True + elif arg in ["-P", "--process"]: + headless = False elif arg in ["-u"]: from spyvm.plugins.vmdebugging import stop_ui_process stop_ui_process() From noreply at buildbot.pypy.org Thu Jul 10 12:56:58 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Thu, 10 Jul 2014 12:56:58 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Removed virtual reference of sender. Was causing performance problems. Message-ID: <20140710105658.07B3F1C0906@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r880:4934d77ae183 Date: 2014-07-10 12:38 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/4934d77ae183/ Log: Removed virtual reference of sender. Was causing performance problems. Going to add this refactoring to the vref branch. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -67,10 +67,7 @@ s_new_context = w_active_context.as_context_get_shadow(self.space) while True: assert self.current_stack_depth == 0 - # Need to save s_sender, loop_bytecodes will nil this on return - # Virtual references are not allowed here, and neither are "fresh" contexts (except for the toplevel one). - assert s_new_context.virtual_sender is jit.vref_None - s_sender = s_new_context.direct_sender + s_sender = s_new_context.s_sender() try: self.loop_bytecodes(s_new_context) raise Exception("loop_bytecodes left without raising...") @@ -81,7 +78,7 @@ except Return, nlr: s_new_context = s_sender while s_new_context is not nlr.s_target_context: - s_sender = s_new_context.direct_sender + s_sender = s_new_context.s_sender() s_new_context._activate_unwind_context(self) s_new_context = s_sender s_new_context.push(nlr.value) @@ -122,27 +119,19 @@ # This is a wrapper around loop_bytecodes that cleanly enters/leaves the frame # and handles the stack overflow protection mechanism. def stack_frame(self, s_frame, s_sender, may_context_switch=True): - assert s_frame.virtual_sender is jit.vref_None try: - # Enter the context - store a virtual reference back to the sender - # Non-fresh contexts can happen, e.g. when activating a stored BlockContext. - # The same frame object must not pass through here recursively! - if s_frame.is_fresh() and s_sender is not None: - s_frame.virtual_sender = jit.virtual_ref(s_sender) - + if s_frame._s_sender is None and s_sender is not None: + s_frame.store_s_sender(s_sender, raise_error=False) + self.current_stack_depth += 1 if self.max_stack_depth > 0: if self.current_stack_depth >= self.max_stack_depth: raise StackOverflow(s_frame) - + # Now (continue to) execute the context bytecodes self.loop_bytecodes(s_frame, may_context_switch) finally: self.current_stack_depth -= 1 - # Cleanly leave the context. This will finish the virtual sender-reference, if - # it is still there, which can happen in case of ProcessSwitch or StackOverflow; - # in case of a Return, this will already be handled while unwinding the stack. - s_frame.finish_virtual_sender(s_sender) def step(self, context): bytecode = context.fetch_next_bytecode() diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -607,13 +607,13 @@ class ContextPartShadow(AbstractRedirectingShadow): __metaclass__ = extendabletype - _attrs_ = ['direct_sender', 'virtual_sender', + _attrs_ = ['_s_sender', '_pc', '_temps_and_stack', '_stack_ptr', 'instances_w'] repr_classname = "ContextPartShadow" _virtualizable_ = [ - 'direct_sender', 'virtual_sender', + '_s_sender', "_pc", "_temps_and_stack[*]", "_stack_ptr", "_w_self", "_w_self_size" ] @@ -622,8 +622,7 @@ # Initialization def __init__(self, space, w_self): - self.direct_sender = None - self.virtual_sender = jit.vref_None + self._s_sender = None AbstractRedirectingShadow.__init__(self, space, w_self) self.instances_w = {} @@ -692,27 +691,9 @@ raise error.WrapperException("Index in context out of bounds") # === Sender === - # There are two fields for the sender (virtual and direct). Only one of them is can be set at a time. - # As long as the frame object is virtualized, using the virtual reference should increase performance. - # As soon as a frame object is forced to the heap, the direct reference must be used. - - def is_fresh(self): - return self.direct_sender is None and self.virtual_sender is jit.vref_None - - def finish_virtual_sender(self, s_sender): - if self.virtual_sender is not jit.vref_None: - if self.pc() != -1: - # stack is unrolling, but this frame was not - # marked_returned: it is an escaped frame - sender = self.virtual_sender() - self.direct_sender = sender - jit.virtual_ref_finish(self.virtual_sender, s_sender) - self.virtual_sender = jit.vref_None def store_s_sender(self, s_sender, raise_error=True): - # If we have a virtual back reference, we must finish it before storing the direct reference. - # self.finish_virtual_sender(save_direct_sender=False) - self.direct_sender = s_sender + self._s_sender = s_sender if raise_error: raise error.SenderChainManipulation(self) @@ -723,11 +704,7 @@ return sender.w_self() def s_sender(self): - if self.direct_sender: - return self.direct_sender - else: - result = self.virtual_sender() - return result + return self._s_sender # === Stack Pointer === From noreply at buildbot.pypy.org Thu Jul 10 12:56:59 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Thu, 10 Jul 2014 12:56:59 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Fixed console output when executing an entire image. Message-ID: <20140710105659.190181C0906@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r881:54246f8c1106 Date: 2014-07-10 12:41 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/54246f8c1106/ Log: Fixed console output when executing an entire image. diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -163,6 +163,8 @@ def result_string(w_result): # This will also print contents of strings/symbols/numbers + if not w_result: + return "" return w_result.as_repr_string().replace('\r', '\n') def compile_code(interp, w_receiver, code): @@ -232,6 +234,7 @@ return w_active_context.as_context_get_shadow(space) def execute_context(interp, s_frame, measure=False): + print "" # Line break after image-loading-indicator characters try: return interp.interpret_toplevel(s_frame.w_self()) except error.Exit, e: From noreply at buildbot.pypy.org Thu Jul 10 14:28:02 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 10 Jul 2014 14:28:02 +0200 (CEST) Subject: [pypy-commit] pypy default: "Your tests are not a benchmark": add link from the FAQ Message-ID: <20140710122802.890E41C1068@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72411:f57183ddd4d0 Date: 2014-07-10 14:27 +0200 http://bitbucket.org/pypy/pypy/changeset/f57183ddd4d0/ Log: "Your tests are not a benchmark": add link from the FAQ diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -171,16 +171,21 @@ You might be interested in our `benchmarking site`_ and our `jit documentation`_. -Note that the JIT has a very high warm-up cost, meaning that the -programs are slow at the beginning. If you want to compare the timings -with CPython, even relatively simple programs need to run *at least* one -second, preferrably at least a few seconds. Large, complicated programs -need even more time to warm-up the JIT. +`Your tests are not a benchmark`_: tests tend to be slow under PyPy +because they run exactly once; if they are good tests, they exercise +various corner cases in your code. This is a bad case for JIT +compilers. Note also that our JIT has a very high warm-up cost, meaning +that any program is slow at the beginning. If you want to compare the +timings with CPython, even relatively simple programs need to run *at +least* one second, preferrably at least a few seconds. Large, +complicated programs need even more time to warm-up the JIT. .. _`benchmarking site`: http://speed.pypy.org .. _`jit documentation`: jit/index.html +.. _`your tests are not a benchmark`: http://alexgaynor.net/2013/jul/15/your-tests-are-not-benchmark/ + --------------------------------------------------------------- Couldn't the JIT dump and reload already-compiled machine code? --------------------------------------------------------------- From noreply at buildbot.pypy.org Thu Jul 10 15:29:22 2014 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 10 Jul 2014 15:29:22 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: use StackOverflow protection from rlib Message-ID: <20140710132922.73BC51C0F86@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: storage Changeset: r882:ca8d309213f3 Date: 2014-07-10 12:00 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/ca8d309213f3/ Log: use StackOverflow protection from rlib diff --git a/spyvm/constants.py b/spyvm/constants.py --- a/spyvm/constants.py +++ b/spyvm/constants.py @@ -190,6 +190,5 @@ # Interpreter constants # -MAX_LOOP_DEPTH = 100 INTERRUPT_COUNTER_SIZE = 10000 CompileTime = time.time() diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -4,7 +4,7 @@ from spyvm import model, constants, primitives, conftest, wrapper from spyvm.tool.bitmanipulation import splitter -from rpython.rlib import jit +from rpython.rlib import jit, rstackovf from rpython.rlib import objectmodel, unroll class MissingBytecode(Exception): @@ -24,7 +24,7 @@ class Interpreter(object): _immutable_fields_ = ["space", "image", "image_name", - "max_stack_depth", "interrupt_counter_size", + "interrupt_counter_size", "startup_time", "evented", "interrupts"] jit_driver = jit.JitDriver( @@ -35,8 +35,7 @@ ) def __init__(self, space, image=None, image_name="", - trace=False, evented=True, interrupts=True, - max_stack_depth=constants.MAX_LOOP_DEPTH): + trace=False, evented=True, interrupts=True): import time # === Initialize immutable variables @@ -47,7 +46,6 @@ self.startup_time = image.startup_time else: self.startup_time = constants.CompileTime - self.max_stack_depth = max_stack_depth self.evented = evented self.interrupts = interrupts try: @@ -57,7 +55,6 @@ # === Initialize mutable variables self.interrupt_check_counter = self.interrupt_counter_size - self.current_stack_depth = 0 self.next_wakeup_tick = 0 self.trace = trace self.trace_proxy = False @@ -66,7 +63,6 @@ # This is the top-level loop and is not invoked recursively. s_new_context = w_active_context.as_context_get_shadow(self.space) while True: - assert self.current_stack_depth == 0 # Need to save s_sender, loop_bytecodes will nil this on return # Virtual references are not allowed here, and neither are "fresh" contexts (except for the toplevel one). assert s_new_context.virtual_sender is jit.vref_None @@ -128,16 +124,12 @@ # The same frame object must not pass through here recursively! if s_frame.is_fresh() and s_sender is not None: s_frame.virtual_sender = jit.virtual_ref(s_sender) - - self.current_stack_depth += 1 - if self.max_stack_depth > 0: - if self.current_stack_depth >= self.max_stack_depth: - raise StackOverflow(s_frame) - # Now (continue to) execute the context bytecodes self.loop_bytecodes(s_frame, may_context_switch) + except rstackovf.StackOverflow: + rstackovf.check_stack_overflow() + raise StackOverflow(s_frame) finally: - self.current_stack_depth -= 1 # Cleanly leave the context. This will finish the virtual sender-reference, if # it is still there, which can happen in case of ProcessSwitch or StackOverflow; # in case of a Return, this will already be handled while unwinding the stack. @@ -237,7 +229,7 @@ return s_frame def padding(self, symbol=' '): - return symbol * self.current_stack_depth + return symbol class ReturnFromTopLevel(Exception): _attrs_ = ["object"] @@ -976,11 +968,9 @@ # in order to enable tracing/jumping for message sends etc. def debugging(): def stepping_debugger_init(original): - def meth(self, space, image=None, image_name="", trace=False, - max_stack_depth=constants.MAX_LOOP_DEPTH): + def meth(self, space, image=None, image_name="", trace=False): return_value = original(self, space, image=image, - image_name=image_name, trace=trace, - max_stack_depth=max_stack_depth) + image_name=image_name, trace=trace) # ############################################################## self.message_stepping = False diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -87,7 +87,7 @@ except error.Exit, e: print e.msg return 1 - + if not as_benchmark: try: w_result = interp.perform(w_receiver, selector) @@ -121,12 +121,11 @@ -b|--benchmark [code string] -p|--poll_events -ni|--no-interrupts - -d|--max-stack-depth [number, default %d, <= 0 disables stack protection] -l|--storage-log -L|--storage-log-aggregate -E|--storage-log-elements [image path, default: Squeak.image] - """ % (argv[0], constants.MAX_LOOP_DEPTH) + """ % argv[0] def _arg_missing(argv, idx, arg): if len(argv) == idx + 1: @@ -144,9 +143,8 @@ stringarg = "" code = None as_benchmark = False - max_stack_depth = constants.MAX_LOOP_DEPTH interrupts = True - + while idx < len(argv): arg = argv[idx] if arg in ["-h", "--help"]: @@ -185,10 +183,6 @@ idx += 1 elif arg in ["-ni", "--no-interrupts"]: interrupts = False - elif arg in ["-d", "--max-stack-depth"]: - _arg_missing(argv, idx, arg) - max_stack_depth = int(argv[idx + 1]) - idx += 1 elif arg in ["-l", "--storage-log"]: storage_logger.activate() elif arg in ["-L", "--storage-log-aggregate"]: @@ -215,13 +209,13 @@ except OSError as e: os.write(2, "%s -- %s (LoadError)\n" % (os.strerror(e.errno), path)) return 1 - + space = prebuilt_space image_reader = squeakimage.reader_for_image(space, squeakimage.Stream(data=imagedata)) image = create_image(space, image_reader) interp = interpreter.Interpreter(space, image, image_name=path, trace=trace, evented=evented, - interrupts=interrupts, max_stack_depth=max_stack_depth) + interrupts=interrupts) space.runtime_setup(argv[0]) result = 0 if benchmark is not None: From noreply at buildbot.pypy.org Thu Jul 10 15:29:23 2014 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 10 Jul 2014 15:29:23 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: avoid forcing s_sender for local returns (that is, all returns from methods and return top from block) Message-ID: <20140710132923.9E83A1C0F86@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: storage Changeset: r883:cb252f497113 Date: 2014-07-10 14:08 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/cb252f497113/ Log: avoid forcing s_sender for local returns (that is, all returns from methods and return top from block) diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -75,11 +75,13 @@ print "====== StackOverflow, contexts forced to heap at: %s" % e.s_new_context.short_str() s_new_context = e.s_new_context except Return, nlr: + assert nlr.s_target_context or nlr.is_local s_new_context = s_sender - while s_new_context is not nlr.s_target_context: - s_sender = s_new_context.direct_sender - s_new_context._activate_unwind_context(self) - s_new_context = s_sender + if not nlr.is_local: + while s_new_context is not nlr.s_target_context: + s_sender = s_new_context.direct_sender + s_new_context._activate_unwind_context(self) + s_new_context = s_sender s_new_context.push(nlr.value) except ProcessSwitch, p: if self.trace: @@ -108,11 +110,16 @@ try: self.step(s_context) except Return, nlr: - if nlr.s_target_context is not s_context: + if nlr.s_target_context is s_context or nlr.is_local: + s_context.push(nlr.value) + else: + if nlr.s_target_context is None: + # This is the case where we are returning to our sender. + # Mark the return as local, so our sender will take it + nlr.is_local = True s_context._activate_unwind_context(self) raise nlr - else: - s_context.push(nlr.value) + # This is a wrapper around loop_bytecodes that cleanly enters/leaves the frame # and handles the stack overflow protection mechanism. @@ -237,10 +244,11 @@ self.object = object class Return(Exception): - _attrs_ = ["value", "s_target_context"] + _attrs_ = ["value", "s_target_context", "is_local"] def __init__(self, s_target_context, w_result): self.value = w_result self.s_target_context = s_target_context + self.is_local = False class ContextSwitchException(Exception): """General Exception that causes the interpreter to leave @@ -636,7 +644,7 @@ interp.padding(), code, w_method.safe_identifier_string(), w_selector.str_content()) raise e - def _return(self, return_value, interp, s_return_to): + def _return(self, return_value, interp, local_return=False): # unfortunately, this assert is not true for some tests. TODO fix this. # assert self._stack_ptr == self.tempsize() @@ -644,36 +652,47 @@ if interp.trace: print '%s<- %s' % (interp.padding(), return_value.as_repr_string()) - if s_return_to is None: - # This should never happen while executing a normal image. - raise ReturnFromTopLevel(return_value) + if self.home_is_self() or local_return: + # a local return just needs to go up the stack once. there + # it will find the sender as a local, and we don't have to + # force the reference + s_return_to = None + if self.direct_sender is None and self.virtual_sender is jit.vref_None: + # This should never happen while executing a normal image. + raise ReturnFromTopLevel(return_value) + else: + s_return_to = self.s_home().s_sender() + if s_return_to is None: + # This should never happen while executing a normal image. + raise ReturnFromTopLevel(return_value) + raise Return(s_return_to, return_value) # ====== Send/Return bytecodes ====== @bytecode_implementation() def returnReceiverBytecode(self, interp, current_bytecode): - return self._return(self.w_receiver(), interp, self.s_home().s_sender()) + return self._return(self.w_receiver(), interp) @bytecode_implementation() def returnTrueBytecode(self, interp, current_bytecode): - return self._return(interp.space.w_true, interp, self.s_home().s_sender()) + return self._return(interp.space.w_true, interp) @bytecode_implementation() def returnFalseBytecode(self, interp, current_bytecode): - return self._return(interp.space.w_false, interp, self.s_home().s_sender()) + return self._return(interp.space.w_false, interp) @bytecode_implementation() def returnNilBytecode(self, interp, current_bytecode): - return self._return(interp.space.w_nil, interp, self.s_home().s_sender()) + return self._return(interp.space.w_nil, interp) @bytecode_implementation() def returnTopFromMethodBytecode(self, interp, current_bytecode): - return self._return(self.pop(), interp, self.s_home().s_sender()) + return self._return(self.pop(), interp) @bytecode_implementation() def returnTopFromBlockBytecode(self, interp, current_bytecode): - return self._return(self.pop(), interp, self.s_sender()) + return self._return(self.pop(), interp, local_return=True) @bytecode_implementation() def sendLiteralSelectorBytecode(self, interp, current_bytecode): @@ -754,7 +773,8 @@ try: self.bytecodePrimValue(interp, 0) except Return, nlr: - if self is not nlr.s_target_context: + assert nlr.s_target_context or nlr.is_local + if self is not nlr.s_target_context and not nlr.is_local: raise nlr finally: self.mark_returned() diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -800,6 +800,9 @@ def is_closure_context(self): raise NotImplementedError() + def home_is_self(self): + raise NotImplementedError() + # === Other properties of Contexts === def mark_returned(self): @@ -1018,6 +1021,9 @@ def is_closure_context(self): return True + def home_is_self(self): + return False + # === Temporary variables === def gettemp(self, index): @@ -1209,6 +1215,9 @@ def is_closure_context(self): return self.closure is not None + def home_is_self(self): + return not self.is_closure_context() + # ______________________________________________________________________ # Marriage of MethodContextShadows with PointerObjects only when required From noreply at buildbot.pypy.org Thu Jul 10 15:29:24 2014 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 10 Jul 2014 15:29:24 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-vrefs-rstackovf-localreturn: separate branch Message-ID: <20140710132924.B4D2A1C0F86@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: storage-vrefs-rstackovf-localreturn Changeset: r884:03b440963bdb Date: 2014-07-10 14:25 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/03b440963bdb/ Log: separate branch From noreply at buildbot.pypy.org Thu Jul 10 15:29:25 2014 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 10 Jul 2014 15:29:25 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-vrefs-rstackovf-localreturn: use StackOverflow protection from rlib Message-ID: <20140710132925.DD5CB1C0F86@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: storage-vrefs-rstackovf-localreturn Changeset: r885:6704ab7a2008 Date: 2014-07-10 12:00 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/6704ab7a2008/ Log: use StackOverflow protection from rlib diff --git a/spyvm/constants.py b/spyvm/constants.py --- a/spyvm/constants.py +++ b/spyvm/constants.py @@ -190,6 +190,5 @@ # Interpreter constants # -MAX_LOOP_DEPTH = 100 INTERRUPT_COUNTER_SIZE = 10000 CompileTime = time.time() diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -4,7 +4,7 @@ from spyvm import model, constants, primitives, conftest, wrapper from spyvm.tool.bitmanipulation import splitter -from rpython.rlib import jit +from rpython.rlib import jit, rstackovf from rpython.rlib import objectmodel, unroll class MissingBytecode(Exception): @@ -24,7 +24,7 @@ class Interpreter(object): _immutable_fields_ = ["space", "image", "image_name", - "max_stack_depth", "interrupt_counter_size", + "interrupt_counter_size", "startup_time", "evented", "interrupts"] jit_driver = jit.JitDriver( @@ -35,8 +35,7 @@ ) def __init__(self, space, image=None, image_name="", - trace=False, evented=True, interrupts=True, - max_stack_depth=constants.MAX_LOOP_DEPTH): + trace=False, evented=True, interrupts=True): import time # === Initialize immutable variables @@ -47,7 +46,6 @@ self.startup_time = image.startup_time else: self.startup_time = constants.CompileTime - self.max_stack_depth = max_stack_depth self.evented = evented self.interrupts = interrupts try: @@ -57,7 +55,6 @@ # === Initialize mutable variables self.interrupt_check_counter = self.interrupt_counter_size - self.current_stack_depth = 0 self.next_wakeup_tick = 0 self.trace = trace self.trace_proxy = False @@ -66,7 +63,6 @@ # This is the top-level loop and is not invoked recursively. s_new_context = w_active_context.as_context_get_shadow(self.space) while True: - assert self.current_stack_depth == 0 # Need to save s_sender, loop_bytecodes will nil this on return # Virtual references are not allowed here, and neither are "fresh" contexts (except for the toplevel one). assert s_new_context.virtual_sender is jit.vref_None @@ -128,16 +124,12 @@ # The same frame object must not pass through here recursively! if s_frame.is_fresh() and s_sender is not None: s_frame.virtual_sender = jit.virtual_ref(s_sender) - - self.current_stack_depth += 1 - if self.max_stack_depth > 0: - if self.current_stack_depth >= self.max_stack_depth: - raise StackOverflow(s_frame) - # Now (continue to) execute the context bytecodes self.loop_bytecodes(s_frame, may_context_switch) + except rstackovf.StackOverflow: + rstackovf.check_stack_overflow() + raise StackOverflow(s_frame) finally: - self.current_stack_depth -= 1 # Cleanly leave the context. This will finish the virtual sender-reference, if # it is still there, which can happen in case of ProcessSwitch or StackOverflow; # in case of a Return, this will already be handled while unwinding the stack. @@ -237,7 +229,7 @@ return s_frame def padding(self, symbol=' '): - return symbol * self.current_stack_depth + return symbol class ReturnFromTopLevel(Exception): _attrs_ = ["object"] @@ -976,11 +968,9 @@ # in order to enable tracing/jumping for message sends etc. def debugging(): def stepping_debugger_init(original): - def meth(self, space, image=None, image_name="", trace=False, - max_stack_depth=constants.MAX_LOOP_DEPTH): + def meth(self, space, image=None, image_name="", trace=False): return_value = original(self, space, image=image, - image_name=image_name, trace=trace, - max_stack_depth=max_stack_depth) + image_name=image_name, trace=trace) # ############################################################## self.message_stepping = False diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -87,7 +87,7 @@ except error.Exit, e: print e.msg return 1 - + if not as_benchmark: try: w_result = interp.perform(w_receiver, selector) @@ -121,12 +121,11 @@ -b|--benchmark [code string] -p|--poll_events -ni|--no-interrupts - -d|--max-stack-depth [number, default %d, <= 0 disables stack protection] -l|--storage-log -L|--storage-log-aggregate -E|--storage-log-elements [image path, default: Squeak.image] - """ % (argv[0], constants.MAX_LOOP_DEPTH) + """ % argv[0] def _arg_missing(argv, idx, arg): if len(argv) == idx + 1: @@ -144,9 +143,8 @@ stringarg = "" code = None as_benchmark = False - max_stack_depth = constants.MAX_LOOP_DEPTH interrupts = True - + while idx < len(argv): arg = argv[idx] if arg in ["-h", "--help"]: @@ -185,10 +183,6 @@ idx += 1 elif arg in ["-ni", "--no-interrupts"]: interrupts = False - elif arg in ["-d", "--max-stack-depth"]: - _arg_missing(argv, idx, arg) - max_stack_depth = int(argv[idx + 1]) - idx += 1 elif arg in ["-l", "--storage-log"]: storage_logger.activate() elif arg in ["-L", "--storage-log-aggregate"]: @@ -215,13 +209,13 @@ except OSError as e: os.write(2, "%s -- %s (LoadError)\n" % (os.strerror(e.errno), path)) return 1 - + space = prebuilt_space image_reader = squeakimage.reader_for_image(space, squeakimage.Stream(data=imagedata)) image = create_image(space, image_reader) interp = interpreter.Interpreter(space, image, image_name=path, trace=trace, evented=evented, - interrupts=interrupts, max_stack_depth=max_stack_depth) + interrupts=interrupts) space.runtime_setup(argv[0]) result = 0 if benchmark is not None: From noreply at buildbot.pypy.org Thu Jul 10 15:29:26 2014 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 10 Jul 2014 15:29:26 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-vrefs-rstackovf-localreturn: avoid forcing s_sender for local returns (that is, all returns from methods and return top from block) Message-ID: <20140710132926.EF9371C0F86@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: storage-vrefs-rstackovf-localreturn Changeset: r886:71d4742bcc58 Date: 2014-07-10 14:08 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/71d4742bcc58/ Log: avoid forcing s_sender for local returns (that is, all returns from methods and return top from block) diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -75,11 +75,13 @@ print "====== StackOverflow, contexts forced to heap at: %s" % e.s_new_context.short_str() s_new_context = e.s_new_context except Return, nlr: + assert nlr.s_target_context or nlr.is_local s_new_context = s_sender - while s_new_context is not nlr.s_target_context: - s_sender = s_new_context.direct_sender - s_new_context._activate_unwind_context(self) - s_new_context = s_sender + if not nlr.is_local: + while s_new_context is not nlr.s_target_context: + s_sender = s_new_context.direct_sender + s_new_context._activate_unwind_context(self) + s_new_context = s_sender s_new_context.push(nlr.value) except ProcessSwitch, p: if self.trace: @@ -108,11 +110,16 @@ try: self.step(s_context) except Return, nlr: - if nlr.s_target_context is not s_context: + if nlr.s_target_context is s_context or nlr.is_local: + s_context.push(nlr.value) + else: + if nlr.s_target_context is None: + # This is the case where we are returning to our sender. + # Mark the return as local, so our sender will take it + nlr.is_local = True s_context._activate_unwind_context(self) raise nlr - else: - s_context.push(nlr.value) + # This is a wrapper around loop_bytecodes that cleanly enters/leaves the frame # and handles the stack overflow protection mechanism. @@ -237,10 +244,11 @@ self.object = object class Return(Exception): - _attrs_ = ["value", "s_target_context"] + _attrs_ = ["value", "s_target_context", "is_local"] def __init__(self, s_target_context, w_result): self.value = w_result self.s_target_context = s_target_context + self.is_local = False class ContextSwitchException(Exception): """General Exception that causes the interpreter to leave @@ -636,7 +644,7 @@ interp.padding(), code, w_method.safe_identifier_string(), w_selector.str_content()) raise e - def _return(self, return_value, interp, s_return_to): + def _return(self, return_value, interp, local_return=False): # unfortunately, this assert is not true for some tests. TODO fix this. # assert self._stack_ptr == self.tempsize() @@ -644,36 +652,47 @@ if interp.trace: print '%s<- %s' % (interp.padding(), return_value.as_repr_string()) - if s_return_to is None: - # This should never happen while executing a normal image. - raise ReturnFromTopLevel(return_value) + if self.home_is_self() or local_return: + # a local return just needs to go up the stack once. there + # it will find the sender as a local, and we don't have to + # force the reference + s_return_to = None + if self.direct_sender is None and self.virtual_sender is jit.vref_None: + # This should never happen while executing a normal image. + raise ReturnFromTopLevel(return_value) + else: + s_return_to = self.s_home().s_sender() + if s_return_to is None: + # This should never happen while executing a normal image. + raise ReturnFromTopLevel(return_value) + raise Return(s_return_to, return_value) # ====== Send/Return bytecodes ====== @bytecode_implementation() def returnReceiverBytecode(self, interp, current_bytecode): - return self._return(self.w_receiver(), interp, self.s_home().s_sender()) + return self._return(self.w_receiver(), interp) @bytecode_implementation() def returnTrueBytecode(self, interp, current_bytecode): - return self._return(interp.space.w_true, interp, self.s_home().s_sender()) + return self._return(interp.space.w_true, interp) @bytecode_implementation() def returnFalseBytecode(self, interp, current_bytecode): - return self._return(interp.space.w_false, interp, self.s_home().s_sender()) + return self._return(interp.space.w_false, interp) @bytecode_implementation() def returnNilBytecode(self, interp, current_bytecode): - return self._return(interp.space.w_nil, interp, self.s_home().s_sender()) + return self._return(interp.space.w_nil, interp) @bytecode_implementation() def returnTopFromMethodBytecode(self, interp, current_bytecode): - return self._return(self.pop(), interp, self.s_home().s_sender()) + return self._return(self.pop(), interp) @bytecode_implementation() def returnTopFromBlockBytecode(self, interp, current_bytecode): - return self._return(self.pop(), interp, self.s_sender()) + return self._return(self.pop(), interp, local_return=True) @bytecode_implementation() def sendLiteralSelectorBytecode(self, interp, current_bytecode): @@ -754,7 +773,8 @@ try: self.bytecodePrimValue(interp, 0) except Return, nlr: - if self is not nlr.s_target_context: + assert nlr.s_target_context or nlr.is_local + if self is not nlr.s_target_context and not nlr.is_local: raise nlr finally: self.mark_returned() diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -800,6 +800,9 @@ def is_closure_context(self): raise NotImplementedError() + def home_is_self(self): + raise NotImplementedError() + # === Other properties of Contexts === def mark_returned(self): @@ -1018,6 +1021,9 @@ def is_closure_context(self): return True + def home_is_self(self): + return False + # === Temporary variables === def gettemp(self, index): @@ -1209,6 +1215,9 @@ def is_closure_context(self): return self.closure is not None + def home_is_self(self): + return not self.is_closure_context() + # ______________________________________________________________________ # Marriage of MethodContextShadows with PointerObjects only when required From noreply at buildbot.pypy.org Thu Jul 10 15:29:28 2014 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 10 Jul 2014 15:29:28 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: use StackOverflow protection from rlib Message-ID: <20140710132928.14E411C0F86@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: storage Changeset: r887:75fb3a75ff6b Date: 2014-07-10 12:00 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/75fb3a75ff6b/ Log: use StackOverflow protection from rlib diff --git a/spyvm/constants.py b/spyvm/constants.py --- a/spyvm/constants.py +++ b/spyvm/constants.py @@ -190,6 +190,5 @@ # Interpreter constants # -MAX_LOOP_DEPTH = 100 INTERRUPT_COUNTER_SIZE = 10000 CompileTime = time.time() From noreply at buildbot.pypy.org Thu Jul 10 15:29:29 2014 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 10 Jul 2014 15:29:29 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: use StackOverflow protection from rlib Message-ID: <20140710132929.318AE1C0F86@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: storage Changeset: r888:afd77668220d Date: 2014-07-10 12:00 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/afd77668220d/ Log: use StackOverflow protection from rlib diff --git a/spyvm/constants.py b/spyvm/constants.py --- a/spyvm/constants.py +++ b/spyvm/constants.py @@ -190,6 +190,5 @@ # Interpreter constants # -MAX_LOOP_DEPTH = 100 INTERRUPT_COUNTER_SIZE = 10000 CompileTime = time.time() diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -4,7 +4,7 @@ from spyvm import model, constants, primitives, conftest, wrapper from spyvm.tool.bitmanipulation import splitter -from rpython.rlib import jit +from rpython.rlib import jit, rstackovf from rpython.rlib import objectmodel, unroll class MissingBytecode(Exception): @@ -24,7 +24,7 @@ class Interpreter(object): _immutable_fields_ = ["space", "image", "image_name", - "max_stack_depth", "interrupt_counter_size", + "interrupt_counter_size", "startup_time", "evented", "interrupts"] jit_driver = jit.JitDriver( @@ -35,8 +35,7 @@ ) def __init__(self, space, image=None, image_name="", - trace=False, evented=True, interrupts=True, - max_stack_depth=constants.MAX_LOOP_DEPTH): + trace=False, evented=True, interrupts=True): import time # === Initialize immutable variables @@ -47,7 +46,6 @@ self.startup_time = image.startup_time else: self.startup_time = constants.CompileTime - self.max_stack_depth = max_stack_depth self.evented = evented self.interrupts = interrupts try: @@ -57,7 +55,6 @@ # === Initialize mutable variables self.interrupt_check_counter = self.interrupt_counter_size - self.current_stack_depth = 0 self.next_wakeup_tick = 0 self.trace = trace self.trace_proxy = False @@ -66,7 +63,6 @@ # This is the top-level loop and is not invoked recursively. s_new_context = w_active_context.as_context_get_shadow(self.space) while True: - assert self.current_stack_depth == 0 s_sender = s_new_context.s_sender() try: self.loop_bytecodes(s_new_context) @@ -122,16 +118,11 @@ try: if s_frame._s_sender is None and s_sender is not None: s_frame.store_s_sender(s_sender, raise_error=False) - - self.current_stack_depth += 1 - if self.max_stack_depth > 0: - if self.current_stack_depth >= self.max_stack_depth: - raise StackOverflow(s_frame) - # Now (continue to) execute the context bytecodes self.loop_bytecodes(s_frame, may_context_switch) - finally: - self.current_stack_depth -= 1 + except rstackovf.StackOverflow: + rstackovf.check_stack_overflow() + raise StackOverflow(s_frame) def step(self, context): bytecode = context.fetch_next_bytecode() @@ -205,7 +196,7 @@ s_frame = self.create_toplevel_context(w_receiver, selector, w_selector, w_arguments) self.interrupt_check_counter = self.interrupt_counter_size return self.interpret_toplevel(s_frame.w_self()) - + def create_toplevel_context(self, w_receiver, selector="", w_selector=None, w_arguments=[]): if w_selector is None: assert selector, "Need either string or W_Object selector" @@ -213,7 +204,7 @@ w_selector = self.image.w_asSymbol else: w_selector = self.perform(self.space.wrap_string(selector), "asSymbol") - + w_method = model.W_CompiledMethod(self.space, header=512) w_method.literalatput0(self.space, 1, w_selector) assert len(w_arguments) <= 7 @@ -225,7 +216,7 @@ return s_frame def padding(self, symbol=' '): - return symbol * self.current_stack_depth + return symbol class ReturnFromTopLevel(Exception): _attrs_ = ["object"] @@ -964,11 +955,9 @@ # in order to enable tracing/jumping for message sends etc. def debugging(): def stepping_debugger_init(original): - def meth(self, space, image=None, image_name="", trace=False, - max_stack_depth=constants.MAX_LOOP_DEPTH): + def meth(self, space, image=None, image_name="", trace=False): return_value = original(self, space, image=image, - image_name=image_name, trace=trace, - max_stack_depth=max_stack_depth) + image_name=image_name, trace=trace) # ############################################################## self.message_stepping = False diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -14,13 +14,13 @@ print """ Usage: %s [-r|-m] [-naHu] [-jpis] [-tlLE] - image path (default: Squeak.image) - + Execution mode: (no flags) - Image will be normally opened. -r|--run - Code will be compiled and executed, result printed. -m|--method - Selector will be sent to a SmallInteger, result printed. -h|--help - Output this and exit. - + Execution parameters: -n|--num - Only with -m or -r, SmallInteger to be used as receiver (default: nil). -a|--arg - Only with -m, will be used as single String argument. @@ -30,28 +30,25 @@ in the image and execute the context directly. The image window will probably not open. Good for benchmarking. -u - Only with -m or -r, try to stop UI-process at startup. Can help benchmarking. - + Other parameters: -j|--jit - jitargs will be passed to the jit configuration. -p|--poll - Actively poll for events. Try this if the image is not responding well. -i|--no-interrupts - Disable timer interrupt. Disables non-cooperative scheduling. - -s - After num stack frames, the entire stack will be dumped to the heap. - This breaks performance, but protects agains stack overflow. - num <= 0 disables stack protection (default: %d) - + Logging parameters: -t|--trace - Output a trace of each message, primitive, return value and process switch. -l|--storage-log - Output a log of storage operations. -L|--storage-log-aggregate - Output an aggregated storage log at the end of execution. -E|--storage-log-elements - Include classnames of elements into the storage log. - - """ % (argv[0], constants.MAX_LOOP_DEPTH) + + """ % argv[0] def get_parameter(argv, idx, arg): if len(argv) < idx + 1: raise RuntimeError("Error: missing argument after %s" % arg) return argv[idx], idx + 1 - + prebuilt_space = objspace.ObjSpace() def entry_point(argv): @@ -65,12 +62,11 @@ # == Other parameters poll = False interrupts = True - max_stack_depth = constants.MAX_LOOP_DEPTH trace = False - + path = argv[1] if len(argv) > 1 else "Squeak.image" idx = 2 - + while idx < len(argv): arg = argv[idx] idx += 1 @@ -96,9 +92,6 @@ code, idx = get_parameter(argv, idx, arg) elif arg in ["-i", "--no-interrupts"]: interrupts = False - elif arg in ["-s"]: - arg, idx = get_parameter(argv, idx, arg) - max_stack_depth = int(arg) elif arg in ["-P", "--process"]: headless = False elif arg in ["-u"]: @@ -113,10 +106,10 @@ else: _usage(argv) return -1 - + if code and selector: raise RuntimeError("Cannot handle both -r and -m.") - + path = rpath.rabspath(path) try: f = open_file_as_stream(path, mode="rb", buffering=0) @@ -127,16 +120,16 @@ except OSError as e: os.write(2, "%s -- %s (LoadError)\n" % (os.strerror(e.errno), path)) return 1 - + # Load & prepare image and environment space = prebuilt_space image_reader = squeakimage.reader_for_image(space, squeakimage.Stream(data=imagedata)) image = create_image(space, image_reader) interp = interpreter.Interpreter(space, image, image_name=path, trace=trace, evented=not poll, - interrupts=interrupts, max_stack_depth=max_stack_depth) + interrupts=interrupts) space.runtime_setup(argv[0]) - + # Create context to be executed if code or selector: if not have_number: @@ -155,7 +148,7 @@ context = active_context(interp.space) else: context = active_context(interp.space) - + w_result = execute_context(interp, context) print result_string(w_result) storage_logger.print_aggregated_log() @@ -198,13 +191,13 @@ return None w_receiver_class.as_class_get_shadow(space).s_methoddict().sync_method_cache() return selector - + def create_context(interp, w_receiver, selector, stringarg): args = [] if stringarg: args.append(interp.space.wrap_string(stringarg)) return interp.create_toplevel_context(w_receiver, selector, w_arguments = args) - + def create_process(interp, s_frame): space = interp.space w_active_process = wrapper.scheduler(space).active_process() @@ -221,10 +214,10 @@ priority = 7 w_benchmark_proc.store(space, 1, s_frame.w_self()) w_benchmark_proc.store(space, 2, space.wrap_int(priority)) - + # Make process eligible for scheduling wrapper.ProcessWrapper(space, w_benchmark_proc).put_to_sleep() - + def active_context(space): w_active_process = wrapper.scheduler(space).active_process() active_process = wrapper.ProcessWrapper(space, w_active_process) @@ -240,7 +233,7 @@ except error.Exit, e: print "Exited: %s" % e.msg return None - + # _____ Target and Main _____ def target(driver, *args): From noreply at buildbot.pypy.org Thu Jul 10 15:29:30 2014 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 10 Jul 2014 15:29:30 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: avoid forcing s_sender for local returns (that is, all returns from methods and return top from block) Message-ID: <20140710132930.6BB661C0F86@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: storage Changeset: r889:50071fb31ad9 Date: 2014-07-10 14:08 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/50071fb31ad9/ Log: avoid forcing s_sender for local returns (that is, all returns from methods and return top from block) diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -72,11 +72,13 @@ print "====== StackOverflow, contexts forced to heap at: %s" % e.s_new_context.short_str() s_new_context = e.s_new_context except Return, nlr: + assert nlr.s_target_context or nlr.is_local s_new_context = s_sender - while s_new_context is not nlr.s_target_context: - s_sender = s_new_context.s_sender() - s_new_context._activate_unwind_context(self) - s_new_context = s_sender + if not nlr.is_local: + while s_new_context is not nlr.s_target_context: + s_sender = s_new_context.s_sender() + s_new_context._activate_unwind_context(self) + s_new_context = s_sender s_new_context.push(nlr.value) except ProcessSwitch, p: assert not self.space.suppress_process_switch[0], "ProcessSwitch should be disabled..." @@ -106,11 +108,16 @@ try: self.step(s_context) except Return, nlr: - if nlr.s_target_context is not s_context: + if nlr.s_target_context is s_context or nlr.is_local: + s_context.push(nlr.value) + else: + if nlr.s_target_context is None: + # This is the case where we are returning to our sender. + # Mark the return as local, so our sender will take it + nlr.is_local = True s_context._activate_unwind_context(self) raise nlr - else: - s_context.push(nlr.value) + # This is a wrapper around loop_bytecodes that cleanly enters/leaves the frame # and handles the stack overflow protection mechanism. @@ -224,10 +231,11 @@ self.object = object class Return(Exception): - _attrs_ = ["value", "s_target_context"] + _attrs_ = ["value", "s_target_context", "is_local"] def __init__(self, s_target_context, w_result): self.value = w_result self.s_target_context = s_target_context + self.is_local = False class ContextSwitchException(Exception): """General Exception that causes the interpreter to leave @@ -623,7 +631,7 @@ interp.padding(), code, w_method.safe_identifier_string(), w_selector.str_content()) raise e - def _return(self, return_value, interp, s_return_to): + def _return(self, return_value, interp, local_return=False): # unfortunately, this assert is not true for some tests. TODO fix this. # assert self._stack_ptr == self.tempsize() @@ -631,36 +639,47 @@ if interp.trace: print '%s<- %s' % (interp.padding(), return_value.as_repr_string()) - if s_return_to is None: - # This should never happen while executing a normal image. - raise ReturnFromTopLevel(return_value) + if self.home_is_self() or local_return: + # a local return just needs to go up the stack once. there + # it will find the sender as a local, and we don't have to + # force the reference + s_return_to = None + if self.s_sender() is None: + # This should never happen while executing a normal image. + raise ReturnFromTopLevel(return_value) + else: + s_return_to = self.s_home().s_sender() + if s_return_to is None: + # This should never happen while executing a normal image. + raise ReturnFromTopLevel(return_value) + raise Return(s_return_to, return_value) # ====== Send/Return bytecodes ====== @bytecode_implementation() def returnReceiverBytecode(self, interp, current_bytecode): - return self._return(self.w_receiver(), interp, self.s_home().s_sender()) + return self._return(self.w_receiver(), interp) @bytecode_implementation() def returnTrueBytecode(self, interp, current_bytecode): - return self._return(interp.space.w_true, interp, self.s_home().s_sender()) + return self._return(interp.space.w_true, interp) @bytecode_implementation() def returnFalseBytecode(self, interp, current_bytecode): - return self._return(interp.space.w_false, interp, self.s_home().s_sender()) + return self._return(interp.space.w_false, interp) @bytecode_implementation() def returnNilBytecode(self, interp, current_bytecode): - return self._return(interp.space.w_nil, interp, self.s_home().s_sender()) + return self._return(interp.space.w_nil, interp) @bytecode_implementation() def returnTopFromMethodBytecode(self, interp, current_bytecode): - return self._return(self.pop(), interp, self.s_home().s_sender()) + return self._return(self.pop(), interp) @bytecode_implementation() def returnTopFromBlockBytecode(self, interp, current_bytecode): - return self._return(self.pop(), interp, self.s_sender()) + return self._return(self.pop(), interp, local_return=True) @bytecode_implementation() def sendLiteralSelectorBytecode(self, interp, current_bytecode): @@ -741,7 +760,8 @@ try: self.bytecodePrimValue(interp, 0) except Return, nlr: - if self is not nlr.s_target_context: + assert nlr.s_target_context or nlr.is_local + if self is not nlr.s_target_context and not nlr.is_local: raise nlr finally: self.mark_returned() diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -777,6 +777,9 @@ def is_closure_context(self): raise NotImplementedError() + def home_is_self(self): + raise NotImplementedError() + # === Other properties of Contexts === def mark_returned(self): @@ -995,6 +998,9 @@ def is_closure_context(self): return True + def home_is_self(self): + return False + # === Temporary variables === def gettemp(self, index): @@ -1186,6 +1192,9 @@ def is_closure_context(self): return self.closure is not None + def home_is_self(self): + return not self.is_closure_context() + # ______________________________________________________________________ # Marriage of MethodContextShadows with PointerObjects only when required From noreply at buildbot.pypy.org Thu Jul 10 16:58:32 2014 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 10 Jul 2014 16:58:32 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: pull checking for BlockContext>>ensure: primitive into the shadows Message-ID: <20140710145832.583281C1068@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: storage Changeset: r890:6998efebafd6 Date: 2014-07-10 16:58 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/6998efebafd6/ Log: pull checking for BlockContext>>ensure: primitive into the shadows diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -748,9 +748,7 @@ # ====== Misc ====== def _activate_unwind_context(self, interp): - # TODO put the constant somewhere else. - # Primitive 198 is used in BlockClosure >> ensure: - if self.is_closure_context() or self.w_method().primitive() != 198: + if self.is_closure_context() or not self.is_BlockClosure_ensure(): self.mark_returned() return # The first temp is executed flag for both #ensure: and #ifCurtailed: diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -777,6 +777,9 @@ def is_closure_context(self): raise NotImplementedError() + def is_BlockClosure_ensure(self): + raise NotImplementedError() + def home_is_self(self): raise NotImplementedError() @@ -998,6 +1001,9 @@ def is_closure_context(self): return True + def is_BlockClosure_ensure(self): + return False + def home_is_self(self): return False @@ -1080,7 +1086,7 @@ return '[] in %s' % self.w_method().get_identifier_string() class MethodContextShadow(ContextPartShadow): - _attrs_ = ['closure', '_w_receiver', '_w_method'] + _attrs_ = ['closure', '_w_receiver', '_w_method', '_is_BlockClosure_ensure'] repr_classname = "MethodContextShadow" # === Initialization === @@ -1102,6 +1108,7 @@ self.init_stack_and_temps() else: self._w_method = None + self._is_BlockClosure_ensure = False argc = len(arguments) for i0 in range(argc): @@ -1174,6 +1181,9 @@ def store_w_method(self, w_method): assert isinstance(w_method, model.W_CompiledMethod) self._w_method = w_method + if w_method: + # Primitive 198 is used in BlockClosure >> ensure: + self._is_BlockClosure_ensure = (w_method.primitive() == 198) def w_receiver(self): return self._w_receiver @@ -1192,6 +1202,9 @@ def is_closure_context(self): return self.closure is not None + def is_BlockClosure_ensure(self): + return self._is_BlockClosure_ensure + def home_is_self(self): return not self.is_closure_context() diff --git a/spyvm/test/jittest/base.py b/spyvm/test/jittest/base.py --- a/spyvm/test/jittest/base.py +++ b/spyvm/test/jittest/base.py @@ -18,7 +18,7 @@ def run(self, spy, tmpdir, code): logfile = str(tmpdir.join("x.pypylog")) proc = subprocess.Popen( - [str(spy), "-r", code.replace("\n", "\r\n"), BenchmarkImage], + [str(spy), BenchmarkImage, "-r", code.replace("\n", "\r\n")], cwd=str(tmpdir), env={"PYPYLOG": "jit-log-opt:%s" % logfile, "SDL_VIDEODRIVER": "dummy"} From noreply at buildbot.pypy.org Thu Jul 10 17:17:26 2014 From: noreply at buildbot.pypy.org (Conrad Calmez) Date: Thu, 10 Jul 2014 17:17:26 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk stmgc-c7: passed arguments to image via own flag Message-ID: <20140710151726.797A61C0F86@cobra.cs.uni-duesseldorf.de> Author: Conrad Calmez Branch: stmgc-c7 Changeset: r891:ada1ec7e18d8 Date: 2014-07-10 17:17 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/ada1ec7e18d8/ Log: passed arguments to image via own flag also fixes BSD-like argument parsing aka. you can put them in arbitrary order again diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -125,6 +125,7 @@ -r|--run [code string] -b|--benchmark [code string] -p|--poll_events + -s|--smalltalk-args [argument to pass] [image path, default: Squeak.image] """ % argv[0] @@ -148,9 +149,7 @@ while idx < len(argv): arg = argv[idx] - if path is not None: # smalltalk args - smalltalk_args.append(arg) - elif arg in ["-h", "--help"]: + if arg in ["-h", "--help"]: _usage(argv) return 0 elif arg in ["-j", "--jit"]: @@ -184,6 +183,9 @@ code = argv[idx + 1] as_benchmark = True idx += 1 + elif arg in ["-s", "--smalltalk-args"]: + smalltalk_args.append(argv[idx + 1]) + idx += 1 elif path is None: path = argv[idx] else: From noreply at buildbot.pypy.org Thu Jul 10 17:23:23 2014 From: noreply at buildbot.pypy.org (waedt) Date: Thu, 10 Jul 2014 17:23:23 +0200 (CEST) Subject: [pypy-commit] pypy utf8-unicode2: Fix byte-index / char-index mixup Message-ID: <20140710152323.39FF81C0F86@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: utf8-unicode2 Changeset: r72412:b286a841b645 Date: 2014-07-08 22:24 -0500 http://bitbucket.org/pypy/pypy/changeset/b286a841b645/ Log: Fix byte-index / char-index mixup diff --git a/pypy/objspace/std/test/test_unicodeobject.py b/pypy/objspace/std/test/test_unicodeobject.py --- a/pypy/objspace/std/test/test_unicodeobject.py +++ b/pypy/objspace/std/test/test_unicodeobject.py @@ -380,7 +380,9 @@ assert u'ab'.startswith(u'a', 1) is False assert u'ab'.startswith(u'b', 1) is True assert u'abc'.startswith(u'bc', 1, 2) is False - assert u'abc'.startswith(u'c', -1, 4) is True + + assert u'\xE4bc'.startswith(u'\xE4') is True + assert u'\xE4\xE4bc'.startswith(u'\xE4', 1) is True def test_startswith_tuples(self): assert u'hello'.startswith((u'he', u'ha')) diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -324,11 +324,11 @@ def _startswith(self, space, value, w_prefix, start, end): return startswith(value.bytes, self._op_val(space, w_prefix).bytes, - start, end) + value.index_of_char(start), value.index_of_char(end)) def _endswith(self, space, value, w_prefix, start, end): return endswith(value.bytes, self._op_val(space, w_prefix).bytes, - start, end) + value.index_of_char(start), value.index_of_char(end)) @staticmethod def _split(value, sep=None, maxsplit=-1): From noreply at buildbot.pypy.org Thu Jul 10 17:23:24 2014 From: noreply at buildbot.pypy.org (waedt) Date: Thu, 10 Jul 2014 17:23:24 +0200 (CEST) Subject: [pypy-commit] pypy utf8-unicode2: Handle UnicodeEncodeError better; make default error handlers RPython Message-ID: <20140710152324.B46441C0F86@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: utf8-unicode2 Changeset: r72413:02e451d4a78b Date: 2014-07-09 00:42 -0500 http://bitbucket.org/pypy/pypy/changeset/02e451d4a78b/ Log: Handle UnicodeEncodeError better; make default error handlers RPython diff --git a/pypy/interpreter/utf8_codecs.py b/pypy/interpreter/utf8_codecs.py --- a/pypy/interpreter/utf8_codecs.py +++ b/pypy/interpreter/utf8_codecs.py @@ -1,7 +1,7 @@ import sys from rpython.rlib.rstring import StringBuilder -from rpython.rlib.objectmodel import specialize +from rpython.rlib.objectmodel import we_are_translated, specialize from rpython.rlib.rarithmetic import r_uint, intmask from rpython.rlib.unicodedata import unicodedb from rpython.rlib.runicode import utf8_code_length @@ -1564,7 +1564,6 @@ def default_unicode_error_decode(errors, encoding, msg, s, startingpos, endingpos): - """NOT_RPYTHON""" if errors == 'replace': return _unicode_error_replacement, endingpos if errors == 'ignore': @@ -1574,10 +1573,17 @@ def default_unicode_error_encode(errors, encoding, msg, u, startingpos, endingpos): - """NOT_RPYTHON""" if errors == 'replace': return '?', None, endingpos if errors == 'ignore': return '', None, endingpos + + if we_are_translated(): + # The constructor for UnicodeEncodeError requires an actual unicode + # object; a Utf8Str isn't good enough. Converting a Utf8Str to a + # unicode is (somewhat arbitrarily) not RPython. Since, translated + # built-in exceptions don't care about their arguments, only do the + # conversion when not translated. + raise UnicodeEncodeError() raise UnicodeEncodeError(encoding, unicode(u), startingpos, endingpos, msg) From noreply at buildbot.pypy.org Thu Jul 10 17:23:26 2014 From: noreply at buildbot.pypy.org (waedt) Date: Thu, 10 Jul 2014 17:23:26 +0200 (CEST) Subject: [pypy-commit] pypy utf8-unicode2: Fix cpyext Message-ID: <20140710152326.5C48F1C0F86@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: utf8-unicode2 Changeset: r72414:e6b1c681e8ec Date: 2014-07-09 03:30 -0500 http://bitbucket.org/pypy/pypy/changeset/e6b1c681e8ec/ Log: Fix cpyext diff --git a/pypy/interpreter/test/test_utf8.py b/pypy/interpreter/test/test_utf8.py --- a/pypy/interpreter/test/test_utf8.py +++ b/pypy/interpreter/test/test_utf8.py @@ -4,6 +4,7 @@ import sys from pypy.interpreter.utf8 import ( Utf8Str, Utf8Builder, utf8chr, utf8ord) +from rpython.rtyper.lltypesystem import rffi def build_utf8str(): builder = Utf8Builder() @@ -193,3 +194,15 @@ assert s.rsplit(maxsplit=2) == u.rsplit(None, 2) assert s.rsplit(' ', 2) == u.rsplit(' ', 2) assert s.rsplit('\n') == [s] + +def test_copy_to_wcharp(): + s = build_utf8str() + if sys.maxunicode < 0x10000: + # The last character requires a surrogate pair on narrow builds and + # so won't be converted correctly by rffi.wcharp2unicode + s = s[:-1] + + wcharp = s.copy_to_wcharp() + u = rffi.wcharp2unicode(wcharp) + rffi.free_wcharp(wcharp) + assert s == u diff --git a/pypy/interpreter/utf8.py b/pypy/interpreter/utf8.py --- a/pypy/interpreter/utf8.py +++ b/pypy/interpreter/utf8.py @@ -3,6 +3,7 @@ from rpython.rlib.runicode import utf8_code_length from rpython.rlib.unicodedata import unicodedb_5_2_0 as unicodedb from rpython.rlib.rarithmetic import r_uint +from rpython.rtyper.lltypesystem import rffi def utf8chr(value): # Like unichr, but returns a Utf8Str object @@ -73,6 +74,8 @@ self._len = length def index_of_char(self, char): + if char >= len(self): + return len(self.bytes) byte = 0 pos = 0 while pos < char: @@ -412,6 +415,14 @@ byte_pos -= 1 return byte_pos + def copy_to_wcharp(self): + # XXX Temporary solution. This won't work on correctly on systems + # where sizeof(wchar_t) == 2. Also, it copies twice. + from pypy.interpreter.utf8_codecs import unicode_encode_unicode_internal + from rpython.rlib.runicode import MAXUNICODE + bytes = unicode_encode_unicode_internal(self, len(self), 'strict') + return rffi.cast(rffi.CWCHARP, rffi.str2charp(bytes)) + class Utf8Builder(object): diff --git a/pypy/interpreter/utf8_codecs.py b/pypy/interpreter/utf8_codecs.py --- a/pypy/interpreter/utf8_codecs.py +++ b/pypy/interpreter/utf8_codecs.py @@ -1538,8 +1538,8 @@ if rs is not None: # py3k only errorhandler('strict', 'decimal', msg, s, collstart, collend) - for char in ru: - ch = ord(char) + for i in range(len(ru)): + ch = ORD(ru, i) if unicodedb.isspace(ch): result.append(' ') continue diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -229,7 +229,7 @@ builder = UnicodeBuilder() pos = start while pos < end: - code = ord(obj[pos]) + code = utf8ord(obj, pos) if (MAXUNICODE == 0xffff and 0xD800 <= code <= 0xDBFF and pos + 1 < end and 0xDC00 <= ord(obj[pos+1]) <= 0xDFFF): code = (code & 0x03FF) << 10 diff --git a/pypy/module/cpyext/test/test_unicodeobject.py b/pypy/module/cpyext/test/test_unicodeobject.py --- a/pypy/module/cpyext/test/test_unicodeobject.py +++ b/pypy/module/cpyext/test/test_unicodeobject.py @@ -188,7 +188,7 @@ w_u = api.PyUnicode_DecodeUTF8(u, 2, None) assert space.type(w_u) is space.w_unicode - assert space.unwrap(w_u) == 'sp' + assert space.unwrap(w_u) == u'sp' rffi.free_charp(u) def test_encode_utf8(self, space, api): @@ -296,7 +296,7 @@ w_u = space.wrap(u'a') assert api.PyUnicode_FromObject(w_u) is w_u assert space.unwrap( - api.PyUnicode_FromObject(space.wrap('test'))) == 'test' + api.PyUnicode_FromObject(space.wrap('test'))) == u'test' def test_decode(self, space, api): b_text = rffi.str2charp('caf\x82xx') @@ -306,7 +306,7 @@ w_text = api.PyUnicode_FromEncodedObject(space.wrap("test"), b_encoding, None) assert space.isinstance_w(w_text, space.w_unicode) - assert space.unwrap(w_text) == "test" + assert space.unwrap(w_text) == u"test" assert api.PyUnicode_FromEncodedObject(space.wrap(u"test"), b_encoding, None) is None assert api.PyErr_Occurred() is space.w_TypeError diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py --- a/pypy/module/cpyext/unicodeobject.py +++ b/pypy/module/cpyext/unicodeobject.py @@ -1,4 +1,5 @@ from pypy.interpreter.error import OperationError +from pypy.interpreter import utf8_codecs from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.unicodedata import unicodedb from pypy.module.cpyext.api import ( @@ -208,7 +209,7 @@ # Copy unicode buffer w_unicode = from_ref(space, ref) u = space.unicode_w(w_unicode) - ref_unicode.c_buffer = rffi.unicode2wcharp(u) + ref_unicode.c_buffer = u.copy_to_wcharp() return ref_unicode.c_buffer @cpython_api([PyObject], rffi.CWCHARP) @@ -552,7 +553,7 @@ else: errors = None - result, length, byteorder = runicode.str_decode_utf_16_helper( + result, length, byteorder = utf8_codecs.str_decode_utf_16_helper( string, size, errors, True, # final ? false for multiple passes? None, # errorhandler @@ -608,7 +609,7 @@ else: errors = None - result, length, byteorder = runicode.str_decode_utf_32_helper( + result, length, byteorder = utf8_codecs.str_decode_utf_32_helper( string, size, errors, True, # final ? false for multiple passes? None, # errorhandler @@ -640,7 +641,7 @@ else: errors = None state = space.fromcache(CodecState) - result = runicode.unicode_encode_decimal(u, length, errors, + result = utf8_codecs.unicode_encode_decimal(u, length, errors, state.encode_error_handler) i = len(result) output[i] = '\0' @@ -691,10 +692,12 @@ suffix match), 0 otherwise. Return -1 if an error occurred.""" str = space.unicode_w(w_str) substr = space.unicode_w(w_substr) + start = str.index_of_char(start) + end = str.index_of_char(end) if rffi.cast(lltype.Signed, direction) <= 0: - return rstring.startswith(str, substr, start, end) + return rstring.startswith(str.bytes, substr.bytes, start, end) else: - return rstring.endswith(str, substr, start, end) + return rstring.endswith(str.bytes, substr.bytes, start, end) @cpython_api([PyObject, PyObject, Py_ssize_t, Py_ssize_t], Py_ssize_t, error=-1) def PyUnicode_Count(space, w_str, w_substr, start, end): From noreply at buildbot.pypy.org Thu Jul 10 17:46:37 2014 From: noreply at buildbot.pypy.org (Hubert Hesse) Date: Thu, 10 Jul 2014 17:46:37 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk stmgc-c7: Integrated Benchmarks into Image Message-ID: <20140710154637.186FE1C021D@cobra.cs.uni-duesseldorf.de> Author: Hubert Hesse Branch: stmgc-c7 Changeset: r892:2152a5634111 Date: 2014-07-10 17:39 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/2152a5634111/ Log: Integrated Benchmarks into Image diff too long, truncating to 2000 out of 32523 lines diff --git a/images/Squeak4.5-12568.changes b/images/Squeak4.5-12568.changes --- a/images/Squeak4.5-12568.changes +++ b/images/Squeak4.5-12568.changes @@ -758,4 +758,14340 @@ self fieldNew: swapField. ]. - ^ self field! ! ----QUIT----{22 May 2014 . 3:33:07 pm} Squeak4.5-12568.image priorSource: 93437! ----STARTUP----{22 May 2014 . 3:33:13 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 15:33'! gameLifeOfLife STMSimulation benchmark.! ! ----QUIT----{22 May 2014 . 3:34:03 pm} Squeak4.5-12568.image priorSource: 110218! ----STARTUP----{22 May 2014 . 3:34:57 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 15:35'! gameOfLife STMSimulation benchmark.! ! Integer removeSelector: #gameLifeOfLife! ----QUIT----{22 May 2014 . 3:35:14 pm} Squeak4.5-12568.image priorSource: 110526! ----STARTUP----{22 May 2014 . 3:36:22 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 15:36' prior: 33665224! gameOfLife STMSimulation benchmark2.! ! ----QUIT----{22 May 2014 . 3:36:45 pm} Squeak4.5-12568.image priorSource: 110873! ----STARTUP----{22 May 2014 . 3:36:49 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT----{22 May 2014 . 3:36:53 pm} Squeak4.5-12568.image priorSource: 111195! ----STARTUP----{22 May 2014 . 3:36:56 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 15:37' prior: 33665587! gameOfLife SPyVM print: STMSimulation benchmark2.! ! ----QUIT----{22 May 2014 . 3:37:32 pm} Squeak4.5-12568.image priorSource: 111392! ----STARTUP----{22 May 2014 . 3:38:15 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT----{22 May 2014 . 3:38:35 pm} Squeak4.5-12568.image priorSource: 111727! ----STARTUP----{2 June 2014 . 12:57:18 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 6/2/2014 12:58'! benchStmParallelWarmed 10 timesRepeat: [SPyVM print: (self benchStmParallel)].! ! ----QUIT----{2 June 2014 . 12:58:21 pm} Squeak4.5-12568.image priorSource: 111924! ----STARTUP----{26 June 2014 . 2:47:09 pm} as D:\code\python\spy-vm\lang-smalltalk\images\Squeak4.5-12568.image! 'From Squeak4.4 of 28 April 2013 [latest update: #12627] on 26 June 2014 at 2:46:54 pm'! Object subclass: #OSLock instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! Object subclass: #OSLock instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !OSLock methodsFor: 'as yet unclassified' stamp: 'pre 6/26/2014 14:25'! lock ! ! !OSLock methodsFor: 'as yet unclassified' stamp: 'pre 6/26/2014 14:26'! release ! ! ----End fileIn of a stream----! 'From Squeak4.4 of 28 April 2013 [latest update: #12627] on 26 June 2014 at 2:46:45 pm'! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 14:48' prior: 33647508! parallelForkTest | p | FileStream stdout nextPutAll: 'starting stm process.'. p := [ 1 + 1. ] parallelFork. p wait! ! ----End fileIn of D:\code\python\spy-vm\lang-smalltalk\images\Integer-parallelForkTest.st----! ----QUIT----{26 June 2014 . 2:47:49 pm} Squeak4.5-12568.image priorSource: 112268! ----STARTUP----{26 June 2014 . 2:49:11 pm} as D:\code\python\spy-vm\lang-smalltalk\images\Squeak4.5-12568.image! 'From Squeak4.4 of 28 April 2013 [latest update: #12627] on 26 June 2014 at 2:46:45 pm'! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 14:48' prior: 33667646! parallelForkTest | p | FileStream stdout nextPutAll: 'starting stm process.'. p := [ 1 + 1. ] parallelFork. p wait! ! ----End fileIn of D:\code\python\spy-vm\lang-smalltalk\images\Integer-parallelForkTest.st----! ----SNAPSHOT----{26 June 2014 . 2:49:57 pm} Squeak4.5-12568.image priorSource: 113431! ----QUIT----{26 June 2014 . 2:50 pm} Squeak4.5-12568.image priorSource: 114022! ----STARTUP----{26 June 2014 . 2:52:02 pm} as D:\code\python\spy-vm\lang-smalltalk\images\Squeak4.5-12568.image! 'From Squeak4.4 of 28 April 2013 [latest update: #12627] on 26 June 2014 at 2:51:52 pm'! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'pre 6/26/2014 14:43'! osLockTest | lock process1 process2 process2lock | lock := OSLock new. lock lock. process2lock := OSLock new. process2lock lock. process1 := [ SPyVM print: 'First process start'. lock lock. process2lock release. SPyVM print: 'First process after lock'. lock release. ] parallelFork . process2 := [ SPyVM print: 'Second process start'. process2lock lock. lock lock. SPyVM print: 'Second process after lock'. process2lock release. lock release. ] parallelFork . SPyVM print: 'Processes initialized.'. lock release. process1 wait. process2 wait.! ! ----End fileIn of a stream----! ----QUIT----{26 June 2014 . 2:53:18 pm} Squeak4.5-12568.image priorSource: 114110! ----STARTUP----{26 June 2014 . 3:02:03 pm} as D:\code\python\spy-vm\lang-smalltalk\images\Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'pre 6/26/2014 15:02'! osLockEasyTest | lock | lock := OSLock new. lock lock. lock release. SPyVM print: 'Survived lock.'! ! ----QUIT----{26 June 2014 . 3:02:52 pm} Squeak4.5-12568.image priorSource: 115096! ----STARTUP----{26 June 2014 . 3:03:40 pm} as D:\code\python\spy-vm\lang-smalltalk\images\Squeak4.5-12568.image! !OSLock methodsFor: 'as yet unclassified' stamp: 'pre 6/26/2014 15:04' prior: 33667310! lock SPyVM print: '* STM Process did not fork *' , Character cr. self primitiveFailed. self resume! ! !OSLock methodsFor: 'as yet unclassified' stamp: 'pre 6/26/2014 15:04' prior: 33667409! release SPyVM print: '* STM Process did not fork *' , Character cr. self primitiveFailed. self resume! ! !OSLock methodsFor: 'as yet unclassified' stamp: 'pre 6/26/2014 15:04' prior: 33670408! release SPyVM print: '* OS Lock could not be released *' , Character cr. self primitiveFailed. self resume! ! !OSLock methodsFor: 'as yet unclassified' stamp: 'pre 6/26/2014 15:04' prior: 33670196! lock SPyVM print: '* OS Lock could not be locked *' , Character cr. self primitiveFailed. self resume! ! ----QUIT----{26 June 2014 . 3:04:45 pm} Squeak4.5-12568.image priorSource: 115476! ----STARTUP----{26 June 2014 . 3:08:07 pm} as D:\code\python\spy-vm\lang-smalltalk\images\Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'pre 6/26/2014 15:08' prior: 33669797! osLockEasyTest | lock | lock := OSLock new. lock lock. SPyVM print: 'Survived lock.'! ! ----QUIT----{26 June 2014 . 3:08:24 pm} Squeak4.5-12568.image priorSource: 116537! ----STARTUP----{26 June 2014 . 3:09:05 pm} as D:\code\python\spy-vm\lang-smalltalk\images\Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'pre 6/26/2014 15:09' prior: 33671254! osLockEasyTest | lock | lock := OSLock new. lock lock. lock release. SPyVM print: 'Survived lock.'! ! ----QUIT----{26 June 2014 . 3:09:17 pm} Squeak4.5-12568.image priorSource: 116916! ----STARTUP----{26 June 2014 . 3:57:38 pm} as D:\code\python\spy-vm\lang-smalltalk\images\Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'pre 6/26/2014 15:57' prior: 33671633! osLockEasyTest | lock | lock := OSLock new. lock lock. SPyVM print: 'Survived lock.' lock release. SPyVM print: 'Survived lock.'! ! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'pre 6/26/2014 15:58' prior: 33672027! osLockEasyTest | lock | lock := OSLock new. lock lock. SPyVM print: 'Survived lock.'. lock release. SPyVM print: 'Survived lock.'. ^ self! ! ----QUIT----{26 June 2014 . 3:58:58 pm} Squeak4.5-12568.image priorSource: 117310! ----STARTUP----{26 June 2014 . 3:59:21 pm} as D:\code\python\spy-vm\lang-smalltalk\images\Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'pre 6/26/2014 15:59' prior: 33672253! osLockEasyTest | lock | lock := OSLock new. lock lock. SPyVM print: 'Survived locking.'. lock release. SPyVM print: 'Survived releasing.'. ^ self! ! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'pre 6/26/2014 15:59' prior: 33672690! osLockEasyTest | lock1 | lock1 := OSLock new. lock1 lock. SPyVM print: 'Survived locking.'. lock1 release. SPyVM print: 'Survived releasing.'. ^ self! ! ----QUIT----{26 June 2014 . 4:00 pm} Squeak4.5-12568.image priorSource: 117973! ----STARTUP----{26 June 2014 . 4:07:56 pm} as D:\code\python\spy-vm\lang-smalltalk\images\Squeak4.5-12568.image! !OSLock methodsFor: 'as yet unclassified' stamp: 'pre 6/26/2014 16:09' prior: 33670843! lock self internalLock ! ! !OSLock methodsFor: 'as yet unclassified' stamp: 'pre 6/26/2014 16:09'! internalLock SPyVM print: '* OS Lock could not be locked *' , Character cr. self primitiveFailed. self resume! ! !OSLock methodsFor: 'as yet unclassified' stamp: 'pre 6/26/2014 16:09'! internalRelease SPyVM print: '* OS Lock could not be released *' , Character cr. self primitiveFailed. self resume! ! !OSLock methodsFor: 'as yet unclassified' stamp: 'pre 6/26/2014 16:09' prior: 33670623! release self internalRelease! ! ----QUIT----{26 June 2014 . 4:09:34 pm} Squeak4.5-12568.image priorSource: 118668! \ No newline at end of file + ^ self field! ! ----QUIT----{22 May 2014 . 3:33:07 pm} Squeak4.5-12568.image priorSource: 93437! ----STARTUP----{22 May 2014 . 3:33:13 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 15:33'! gameLifeOfLife STMSimulation benchmark.! ! ----QUIT----{22 May 2014 . 3:34:03 pm} Squeak4.5-12568.image priorSource: 110218! ----STARTUP----{22 May 2014 . 3:34:57 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 15:35'! gameOfLife STMSimulation benchmark.! ! Integer removeSelector: #gameLifeOfLife! ----QUIT----{22 May 2014 . 3:35:14 pm} Squeak4.5-12568.image priorSource: 110526! ----STARTUP----{22 May 2014 . 3:36:22 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 15:36' prior: 33665224! gameOfLife STMSimulation benchmark2.! ! ----QUIT----{22 May 2014 . 3:36:45 pm} Squeak4.5-12568.image priorSource: 110873! ----STARTUP----{22 May 2014 . 3:36:49 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT----{22 May 2014 . 3:36:53 pm} Squeak4.5-12568.image priorSource: 111195! ----STARTUP----{22 May 2014 . 3:36:56 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 15:37' prior: 33665587! gameOfLife SPyVM print: STMSimulation benchmark2.! ! ----QUIT----{22 May 2014 . 3:37:32 pm} Squeak4.5-12568.image priorSource: 111392! ----STARTUP----{22 May 2014 . 3:38:15 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT----{22 May 2014 . 3:38:35 pm} Squeak4.5-12568.image priorSource: 111727! ----STARTUP----{22 May 2014 . 3:48:40 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT/NOSAVE----{22 May 2014 . 3:48:43 pm} Squeak4.5-12568.image priorSource: 111924! ----STARTUP----{22 May 2014 . 4:20:56 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 16:21' prior: 33603722! benchStmParallel | sum num threads max start localSums | num := self \\ 100. max := (self - num) // num. sum := 0. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num) collect: [ :i | [((i * max) to: ((i + 1) * max - 1)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 16:25' prior: 33666837! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num) collect: [ :i | | sum | sum := 0. [((i * max) to: ((i + 1) * max - 1)) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. ^ localSums sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 16:26' prior: 33667525! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num) collect: [ :i | | sum | sum := 0. [((i * max) to: ((i + 1) * max - 1)) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. ^ localSums sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 16:26' prior: 33668262! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num) collect: [ :i | | sum | sum := 0. [((i * max) to: ((i + 1) * max - 1)) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. ^ localSums sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 16:26' prior: 33669000! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num) collect: [ :i | | sum | sum := 0. [((i * max) to: ((i + 1) * max - 1)) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. ^ localSums sum printString! ! ----QUIT----{22 May 2014 . 4:26:33 pm} Squeak4.5-12568.image priorSource: 111924! ----STARTUP----{22 May 2014 . 4:28:08 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 16:28' prior: 33669737! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num) collect: [ :i | | sum | sum := 0. [((i * max) to: ((i + 1) * max - 1)) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: localSums. ^ localSums sum printString! ! ----QUIT----{22 May 2014 . 4:28:57 pm} Squeak4.5-12568.image priorSource: 115957! ----STARTUP----{22 May 2014 . 4:30:39 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 16:31' prior: 33670666! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num) collect: [ :i | | sum | sum := 0. [((i * max) to: ((i + 1) * max - 1)) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: localSums asString. ^ localSums sum printString! ! ----QUIT----{22 May 2014 . 4:31:47 pm} Squeak4.5-12568.image priorSource: 116913! ----STARTUP----{22 May 2014 . 4:37:03 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT----{22 May 2014 . 4:37:21 pm} Squeak4.5-12568.image priorSource: 117878! ----STARTUP----{22 May 2014 . 4:37:56 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 16:38' prior: 33671622! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * max - 1)) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: localSums asString. ^ localSums sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 16:38' prior: 33672784! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * (max - 1))) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: localSums asString. ^ localSums sum printString! ! ----QUIT----{22 May 2014 . 4:38:37 pm} Squeak4.5-12568.image priorSource: 118075! ----STARTUP----{22 May 2014 . 4:39:37 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! #('1' '2' '3')! #('1' '2' '3') at: 2! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 16:49' prior: 33673552! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * (max - 1))) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. threads add: [ | sum | sum := 0. [((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]]. localSums at: num put: sum. ]parallelFork. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: localSums asString. ^ localSums sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 16:49' prior: 33674559! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * (max - 1))) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. threads add: [ | sum | sum := 0. [((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]]. localSums at: num put: sum. ] parallelFork. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: localSums asString. ^ localSums sum printString! ! ----QUIT----{22 May 2014 . 4:49:24 pm} Squeak4.5-12568.image priorSource: 119810! ----STARTUP----{22 May 2014 . 4:49:54 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 16:50' prior: 33675489! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * (max - 1))) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. threads add: [ | sum | sum := 0. [((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]]. localSums at: num put: sum. ] parallelFork. SPyVM print: 'bar'. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: localSums asString. ^ localSums sum printString! ! ----QUIT----{22 May 2014 . 4:50:32 pm} Squeak4.5-12568.image priorSource: 121907! ----STARTUP----{22 May 2014 . 4:50:54 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 16:52' prior: 33676616! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * (max - 1))) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. SPyVM print: 'bar'. threads add: [ | sum | sum := 0. [((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]]. localSums at: num put: sum. ] parallelFork. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: localSums asString. ^ localSums sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 16:52' prior: 33677764! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * (max - 1))) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. SPyVM print: 'bar'. threads add: [ | sum | sum := 0. [((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]]. localSums at: num put: sum. ] parallelFork. SPyVM print: 'bar2'. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: localSums asString. ^ localSums sum printString! ! ----QUIT----{22 May 2014 . 4:52:16 pm} Squeak4.5-12568.image priorSource: 123055! ----STARTUP----{22 May 2014 . 4:52:32 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 16:52' prior: 33678717! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * (max - 1))) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. threads add: ([ | sum | sum := 0. [((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]]. localSums at: num put: sum. ] parallelFork). SPyVM print: 'bar2'. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: localSums asString. ^ localSums sum printString! ! ----QUIT----{22 May 2014 . 4:52:57 pm} Squeak4.5-12568.image priorSource: 125181! ----STARTUP----{22 May 2014 . 4:53:15 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT----{22 May 2014 . 4:54:05 pm} Squeak4.5-12568.image priorSource: 126335! ----STARTUP----{22 May 2014 . 4:54:23 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 16:56' prior: 33679890! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * (max - 1))) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. SPyVM print: self-num-max. threads add: [ | sum | sum := 0. ((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]. localSums at: num put: sum. ] parallelFork. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: localSums asString. ^ localSums sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 16:56' prior: 33681241! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * (max - 1))) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. SPyVM print: self-num-max. SPyVM print: self-num. threads add: [ | sum | sum := 0. ((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]. localSums at: num put: sum. ] parallelFork. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: localSums asString. ^ localSums sum printString! ! ----QUIT----{22 May 2014 . 4:56:21 pm} Squeak4.5-12568.image priorSource: 126532! ----STARTUP----{22 May 2014 . 4:56:41 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 16:58' prior: 33682201! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * (max - 1))) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. threads asOrderedCollection add: [ | sum | sum := 0. ((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]. localSums at: num put: sum. ] parallelFork. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: localSums asString. ^ localSums sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 16:58' prior: 33683383! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * (max - 1))) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ] asOrderedCollection. threads add: [ | sum | sum := 0. ((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]. localSums at: num put: sum. ] parallelFork. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: localSums asString. ^ localSums sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 16:58' prior: 33684335! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * (max - 1))) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. threads asOrderedCollection add: [ | sum | sum := 0. ((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]. localSums at: num put: sum. ] parallelFork. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: localSums asString. ^ localSums sum printString! ! ----QUIT----{22 May 2014 . 4:58:37 pm} Squeak4.5-12568.image priorSource: 128674! ----STARTUP----{22 May 2014 . 5:20:50 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT----{22 May 2014 . 5:21:16 pm} Squeak4.5-12568.image priorSource: 131728! ----STARTUP----{22 May 2014 . 5:21:29 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 17:21' prior: 33685288! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * (max - 1))) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. SPyVM print: '1'. SPyVM print: localSums asString. threads asOrderedCollection add: [ | sum | sum := 0. ((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]. localSums at: num put: sum. ] parallelFork. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: localSums asString. ^ localSums sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 17:22' prior: 33686634! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * (max - 1))) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. SPyVM print: '1:'. SPyVM print: localSums asString. threads asOrderedCollection add: [ | sum | sum := 0. ((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]. localSums at: num put: sum. ] parallelFork. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: '2:'. SPyVM print: localSums asString. ^ localSums sum printString! ! ----QUIT----{22 May 2014 . 5:22:18 pm} Squeak4.5-12568.image priorSource: 131925! ----STARTUP----{22 May 2014 . 5:22:42 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 17:23' prior: 33687638! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * (max - 1))) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. SPyVM print: '1:'. SPyVM print: threads asString. threads asOrderedCollection add: [ | sum | sum := 0. ((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]. localSums at: num put: sum. ] parallelFork. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: '2:'. SPyVM print: localSums asString. ^ localSums sum printString! ! ----QUIT----{22 May 2014 . 5:23:22 pm} Squeak4.5-12568.image priorSource: 134149! ----STARTUP----{22 May 2014 . 5:23:33 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 17:25' prior: 33688858! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * (max - 1))) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. threads asOrderedCollection add: [ | sum | sum := 0. ((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]. SPyVM print: 'xx'. SPyVM print: sum. localSums at: num put: sum. ] parallelFork. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: '2:'. SPyVM print: localSums asString. ^ localSums sum printString! ! ----SNAPSHOT----{22 May 2014 . 5:25:10 pm} Squeak4.5-12568.image priorSource: 135367! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 17:25' prior: 33690076! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * (max - 1))) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. threads asOrderedCollection add: [ | sum | sum := 0. ((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]. SPyVM print: 'xx'. SPyVM print: sum. localSums at: num put: sum. ] parallelFork. SPyVM print: threads. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: '2:'. SPyVM print: localSums asString. ^ localSums sum printString! ! ----QUIT----{22 May 2014 . 5:25:30 pm} Squeak4.5-12568.image priorSource: 136577! ----STARTUP----{22 May 2014 . 5:25:55 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT----{22 May 2014 . 5:26:05 pm} Squeak4.5-12568.image priorSource: 137699! ----STARTUP----{22 May 2014 . 5:26:27 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT----{22 May 2014 . 5:28:45 pm} Squeak4.5-12568.image priorSource: 137896! ----STARTUP----{22 May 2014 . 5:29:05 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT/NOSAVE----{22 May 2014 . 5:29:16 pm} Squeak4.5-12568.image priorSource: 138093! ----STARTUP----{22 May 2014 . 5:29:48 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT----{22 May 2014 . 5:30:10 pm} Squeak4.5-12568.image priorSource: 138093! ----STARTUP----{22 May 2014 . 5:30:41 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT----{22 May 2014 . 5:31 pm} Squeak4.5-12568.image priorSource: 138494! ----STARTUP----{22 May 2014 . 5:31:19 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT/NOSAVE----{22 May 2014 . 5:31:35 pm} Squeak4.5-12568.image priorSource: 138691! ----STARTUP----{22 May 2014 . 5:32:08 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 17:32' prior: 33691176! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * (max - 1))) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. SPyVM print: 'yy'. threads asOrderedCollection add: [ | sum | sum := 0. ((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]. SPyVM print: 'xx'. SPyVM print: sum. localSums at: num put: sum. ] parallelFork. SPyVM print: 'sss'. SPyVM print: threads. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside methodaaaaaaaaaaaaaaaaaa:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: '23:'. SPyVM print: localSums asString. ^ localSums sum printString! ! ----QUIT----{22 May 2014 . 5:32:31 pm} Squeak4.5-12568.image priorSource: 138691! ----STARTUP----{22 May 2014 . 5:32:45 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 17:33' prior: 33693601! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * (max - 1))) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. SPyVM print: 'yy'. threads asOrderedCollection add: [ | sum | sum := 0. ((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]. SPyVM print: 'xx'. SPyVM print: sum. localSums at: num put: sum. ] parallelFork. SPyVM print: threads asString, ' threads'. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: '23:'. SPyVM print: localSums asString. ^ localSums sum printString! ! ----QUIT----{22 May 2014 . 5:34:02 pm} Squeak4.5-12568.image priorSource: 140184! ----STARTUP----{22 May 2014 . 5:34:15 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 17:35' prior: 33694893! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * (max - 1))) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. SPyVM print: 'yy'. threads = threads asOrderedCollection add: [ | sum | sum := 0. ((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]. SPyVM print: 'xx'. SPyVM print: sum. localSums at: num put: sum. ] parallelFork. SPyVM print: threads asString, ' threads'. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: '23:'. SPyVM print: localSums asString. ^ localSums sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 17:35' prior: 33696165! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * (max - 1))) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. SPyVM print: 'yy'. threads := threads asOrderedCollection. threads add: [ | sum | sum := 0. ((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]. SPyVM print: 'xx'. SPyVM print: sum. localSums at: num put: sum. ] parallelFork. SPyVM print: threads asString, ' threads'. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: '23:'. SPyVM print: localSums asString. ^ localSums sum printString! ! ----QUIT----{22 May 2014 . 5:35:23 pm} Squeak4.5-12568.image priorSource: 141456! ----STARTUP----{22 May 2014 . 5:40:42 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 17:42' prior: 33697250! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * (max - 1))) do: [ :k | sum := sum + k. SPyVM print: 'from', ( (i-1) * max) asString, 'to', (i * (max - 1)) asString. ]. localSums at: i put: sum. ] parallelFork ]. threads := threads asOrderedCollection. threads add: [ | sum | sum := 0. ((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]. localSums at: num put: sum. ] parallelFork. SPyVM print: threads asString, ' threads'. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: '23:'. SPyVM print: localSums asString. ^ localSums sum printString! ! ----QUIT----{22 May 2014 . 5:42:11 pm} Squeak4.5-12568.image priorSource: 143834! ----STARTUP----{22 May 2014 . 5:42:33 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 17:42' prior: 33698543! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * (max - 1))) do: [ :k | sum := sum + k. ]. SPyVM print: 'from', ( (i-1) * max) asString, 'to', (i * (max - 1)) asString. localSums at: i put: sum. ] parallelFork ]. threads := threads asOrderedCollection. threads add: [ | sum | sum := 0. ((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]. localSums at: num put: sum. ] parallelFork. SPyVM print: threads asString, ' threads'. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: '23:'. SPyVM print: localSums asString. ^ localSums sum printString! ! ----QUIT----{22 May 2014 . 5:42:55 pm} Squeak4.5-12568.image priorSource: 145152! ----STARTUP----{22 May 2014 . 5:43:27 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT----{22 May 2014 . 5:43:55 pm} Squeak4.5-12568.image priorSource: 146468! ----STARTUP----{22 May 2014 . 5:44:27 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 17:45' prior: 33699861! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * max - 1)) do: [ :k | sum := sum + k. ]. SPyVM print: 'from', ( (i-1) * max) asString, 'to', ((i * max) - 1) asString. localSums at: i put: sum. ] parallelFork ]. threads := threads asOrderedCollection. threads add: [ | sum | sum := 0. ((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]. localSums at: num put: sum. ] parallelFork. SPyVM print: threads asString, ' threads'. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: '23:'. SPyVM print: localSums asString. ^ localSums sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 17:45' prior: 33701374! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: ((i * max) - 1)) do: [ :k | sum := sum + k. ]. SPyVM print: 'from', ( (i-1) * max) asString, 'to', ((i * max) - 1) asString. localSums at: i put: sum. ] parallelFork ]. threads := threads asOrderedCollection. threads add: [ | sum | sum := 0. ((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]. localSums at: num put: sum. ] parallelFork. SPyVM print: threads asString, ' threads'. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: '23:'. SPyVM print: localSums asString. ^ localSums sum printString! ! ----QUIT----{22 May 2014 . 5:45:21 pm} Squeak4.5-12568.image priorSource: 146665! ----STARTUP----{22 May 2014 . 5:46:08 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 17:46' prior: 33702491! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: ((i * max) - 1)) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. threads := threads asOrderedCollection. threads add: [ | sum | sum := 0. ((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]. localSums at: num put: sum. ] parallelFork. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. ^ localSums sum printString! ! ----QUIT----{22 May 2014 . 5:46:37 pm} Squeak4.5-12568.image priorSource: 149098! ----STARTUP----{2 June 2014 . 11:04:06 am} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 6/2/2014 11:06'! benchStmParallelWarmed | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: ((i * max) - 1)) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. threads := threads asOrderedCollection. threads add: [ | sum | sum := 0. ((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]. localSums at: num put: sum. ] parallelFork. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. ^ localSums sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'hh 6/2/2014 11:06' prior: 33704928! benchStmParallelWarmed ! ! !Integer methodsFor: 'benchmarks' stamp: 'hh 6/2/2014 11:09' prior: 33705873! benchStmParallelWarmed 3 timesRepeat: [1 benchStmParallel].! ! !Integer methodsFor: 'benchmarks' stamp: 'hh 6/2/2014 11:09' prior: 33705981! benchStmParallelWarmed 3 timesRepeat: [SPyVM print: 1 benchStmParallel].! ! ----SNAPSHOT----{2 June 2014 . 11:09:59 am} Squeak4.5-12568.image priorSource: 150235! !Integer methodsFor: 'benchmarks' stamp: 'hh 6/2/2014 11:10' prior: 33706125! benchStmParallelWarmed 3 timesRepeat: [1 benchStmParallel].! ! ----SNAPSHOT----{2 June 2014 . 11:10:29 am} Squeak4.5-12568.image priorSource: 151771! ----QUIT----{2 June 2014 . 11:10:32 am} Squeak4.5-12568.image priorSource: 152003! ----STARTUP----{2 June 2014 . 11:20:35 am} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 6/2/2014 11:20' prior: 33706370! benchStmParallelWarmed 3 timesRepeat: [SPyVM print: (1 benchStmParallel)].! ! ----QUIT----{2 June 2014 . 11:21:01 am} Squeak4.5-12568.image priorSource: 152091! ----STARTUP----{2 June 2014 . 11:21:35 am} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT----{2 June 2014 . 11:22:41 am} Squeak4.5-12568.image priorSource: 152449! ----STARTUP----{2 June 2014 . 11:23:17 am} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 6/2/2014 11:23' prior: 33706801! benchStmParallelWarmed 3 timesRepeat: [SPyVM print: (self benchStmParallel)].! ! ----QUIT----{2 June 2014 . 11:23:36 am} Squeak4.5-12568.image priorSource: 152648! ----STARTUP----{2 June 2014 . 11:25:39 am} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 6/2/2014 11:25' prior: 33707358! benchStmParallelWarmed 10 timesRepeat: [SPyVM print: (self benchStmParallel)].! ! ----QUIT----{2 June 2014 . 11:25:46 am} Squeak4.5-12568.image priorSource: 153009! ----STARTUP----{2 June 2014 . 11:25:48 am} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT----{2 June 2014 . 11:25:56 am} Squeak4.5-12568.image priorSource: 153371! ----STARTUP----{2 June 2014 . 11:26:31 am} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 6/2/2014 11:26' prior: 33707719! benchStmParallelWarmed 100 timesRepeat: [SPyVM print: (self benchStmParallel)].! ! ----QUIT----{2 June 2014 . 11:26:36 am} Squeak4.5-12568.image priorSource: 153570! ----STARTUP----{2 June 2014 . 11:26:38 am} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT----{2 June 2014 . 11:26:42 am} Squeak4.5-12568.image priorSource: 153933! ----STARTUP----{2 June 2014 . 11:27:14 am} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 6/2/2014 11:27' prior: 33708280! benchStmParallelWarmed 10 timesRepeat: [SPyVM print: (self benchStmParallel)].! ! ----QUIT----{2 June 2014 . 11:27:19 am} Squeak4.5-12568.image priorSource: 154132! ----STARTUP----{5 June 2014 . 1:39:24 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 6/5/2014 13:45'! benchStmThreadCreation | threads start | SPyVM print: ('Threads:', (self printString)). start := Time now asNanoSeconds. threads := [(1 to: self) do: [:t1 | SPyVM print: 'Thread 1 reporting!!']] parallelFork. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString.! ! !Integer methodsFor: 'benchmarks' stamp: 'hh 6/5/2014 13:45' prior: 33709187! benchStmThreadCreation | threads start | SPyVM print: ('Threads:', (self printString)). start := Time now asNanoSeconds. threads := [(1 to: self) do: [:t1 | SPyVM print: 'Thread 1 reporting!!']] parallelFork. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString.! ! !Integer methodsFor: 'benchmarks' stamp: 'hh 6/5/2014 13:45' prior: 33709650! benchStmThreadCreation | threads start | SPyVM print: ('Threads:', (self printString)). start := Time now asNanoSeconds. threads := [(1 to: self) do: [:t1 | SPyVM print: 'Thread reporting!!']] parallelFork. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString.! ! !Integer methodsFor: 'benchmarks' stamp: 'hh 6/5/2014 13:50' prior: 33710111! benchStmThreadCreation | threads start | SPyVM print: ('Threads:', (self printString)). start := Time now asNanoSeconds. threads := [(1 to: self) do: []] parallelFork. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString.! ! ----QUIT----{5 June 2014 . 1:50:23 pm} Squeak4.5-12568.image priorSource: 154494! ----STARTUP----{5 June 2014 . 1:51:28 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 6/5/2014 13:51' prior: 33710570! benchStmThreadCreation | threads start | SPyVM print: ('Threads:', (self printString)). start := Time now asNanoSeconds. threads := [(1 to: self) do: [1+1]] parallelFork. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString.! ! ----QUIT----{5 June 2014 . 1:51:37 pm} Squeak4.5-12568.image priorSource: 156479! ----STARTUP----{5 June 2014 . 1:52:09 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 6/5/2014 13:53' prior: 33711187! benchStmThreadCreation | threads start | SPyVM print: ('Threads:', (self printString)). start := Time now asNanoSeconds. threads := [(1 to: self) do: [1+1] parallelFork.] threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString.! ! ----QUIT----{5 June 2014 . 1:53:31 pm} Squeak4.5-12568.image priorSource: 157099! ----STARTUP----{5 June 2014 . 1:53:59 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 6/5/2014 13:54' prior: 33711807! benchStmThreadCreation | threads start | SPyVM print: ('Threads:', (self printString)). start := Time now asNanoSeconds. threads := [(1 to: self) do: [1+1]] parallelFork. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString.! ! !Integer methodsFor: 'benchmarks' stamp: 'hh 6/5/2014 13:55' prior: 33712425! benchStmThreadCreation | threads start | SPyVM print: ('Threads:', (self printString)). start := Time now asNanoSeconds. threads := (1 to: self) do: [[1+1] parallelFork]. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString.! ! ----QUIT----{5 June 2014 . 1:55:56 pm} Squeak4.5-12568.image priorSource: 157717! ----STARTUP----{5 June 2014 . 1:55:58 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT----{5 June 2014 . 1:56:05 pm} Squeak4.5-12568.image priorSource: 158756! ----STARTUP----{5 June 2014 . 1:56:36 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 6/5/2014 13:56' prior: 33712846! benchStmThreadCreation | threads start | SPyVM print: ('Threads:', (self printString)). start := Time now asNanoSeconds. threads := (1 to: self) collect: [[1+1] parallelFork]. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString.! ! ----QUIT----{5 June 2014 . 1:56:46 pm} Squeak4.5-12568.image priorSource: 158953! ----STARTUP----{5 June 2014 . 1:57:51 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 6/5/2014 13:58' prior: 33713661! benchStmThreadCreation | threads start | SPyVM print: ('Threads:', (self printString)). start := Time now asNanoSeconds. threads := (1 to: self) collect: [[SPyVM print: 'hello'.] parallelFork]. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString.! ! ----QUIT----{5 June 2014 . 1:58:24 pm} Squeak4.5-12568.image priorSource: 159576! ----STARTUP----{5 June 2014 . 1:58:49 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 6/5/2014 13:59' prior: 33714284! benchStmThreadCreation | threads start | From noreply at buildbot.pypy.org Thu Jul 10 17:46:46 2014 From: noreply at buildbot.pypy.org (Hubert Hesse) Date: Thu, 10 Jul 2014 17:46:46 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk stmgc-c7: merge Message-ID: <20140710154646.879F41C021D@cobra.cs.uni-duesseldorf.de> Author: Hubert Hesse Branch: stmgc-c7 Changeset: r893:63cabb3a874b Date: 2014-07-10 17:45 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/63cabb3a874b/ Log: merge diff too long, truncating to 2000 out of 32528 lines diff --git a/images/Squeak4.5-12568.changes b/images/Squeak4.5-12568.changes --- a/images/Squeak4.5-12568.changes +++ b/images/Squeak4.5-12568.changes @@ -758,4 +758,14340 @@ self fieldNew: swapField. ]. - ^ self field! ! ----QUIT----{22 May 2014 . 3:33:07 pm} Squeak4.5-12568.image priorSource: 93437! ----STARTUP----{22 May 2014 . 3:33:13 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 15:33'! gameLifeOfLife STMSimulation benchmark.! ! ----QUIT----{22 May 2014 . 3:34:03 pm} Squeak4.5-12568.image priorSource: 110218! ----STARTUP----{22 May 2014 . 3:34:57 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 15:35'! gameOfLife STMSimulation benchmark.! ! Integer removeSelector: #gameLifeOfLife! ----QUIT----{22 May 2014 . 3:35:14 pm} Squeak4.5-12568.image priorSource: 110526! ----STARTUP----{22 May 2014 . 3:36:22 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 15:36' prior: 33665224! gameOfLife STMSimulation benchmark2.! ! ----QUIT----{22 May 2014 . 3:36:45 pm} Squeak4.5-12568.image priorSource: 110873! ----STARTUP----{22 May 2014 . 3:36:49 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT----{22 May 2014 . 3:36:53 pm} Squeak4.5-12568.image priorSource: 111195! ----STARTUP----{22 May 2014 . 3:36:56 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 15:37' prior: 33665587! gameOfLife SPyVM print: STMSimulation benchmark2.! ! ----QUIT----{22 May 2014 . 3:37:32 pm} Squeak4.5-12568.image priorSource: 111392! ----STARTUP----{22 May 2014 . 3:38:15 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT----{22 May 2014 . 3:38:35 pm} Squeak4.5-12568.image priorSource: 111727! ----STARTUP----{2 June 2014 . 12:57:18 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 6/2/2014 12:58'! benchStmParallelWarmed 10 timesRepeat: [SPyVM print: (self benchStmParallel)].! ! ----QUIT----{2 June 2014 . 12:58:21 pm} Squeak4.5-12568.image priorSource: 111924! ----STARTUP----{26 June 2014 . 2:47:09 pm} as D:\code\python\spy-vm\lang-smalltalk\images\Squeak4.5-12568.image! 'From Squeak4.4 of 28 April 2013 [latest update: #12627] on 26 June 2014 at 2:46:54 pm'! Object subclass: #OSLock instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! Object subclass: #OSLock instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !OSLock methodsFor: 'as yet unclassified' stamp: 'pre 6/26/2014 14:25'! lock ! ! !OSLock methodsFor: 'as yet unclassified' stamp: 'pre 6/26/2014 14:26'! release ! ! ----End fileIn of a stream----! 'From Squeak4.4 of 28 April 2013 [latest update: #12627] on 26 June 2014 at 2:46:45 pm'! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 14:48' prior: 33647508! parallelForkTest | p | FileStream stdout nextPutAll: 'starting stm process.'. p := [ 1 + 1. ] parallelFork. p wait! ! ----End fileIn of D:\code\python\spy-vm\lang-smalltalk\images\Integer-parallelForkTest.st----! ----QUIT----{26 June 2014 . 2:47:49 pm} Squeak4.5-12568.image priorSource: 112268! ----STARTUP----{26 June 2014 . 2:49:11 pm} as D:\code\python\spy-vm\lang-smalltalk\images\Squeak4.5-12568.image! 'From Squeak4.4 of 28 April 2013 [latest update: #12627] on 26 June 2014 at 2:46:45 pm'! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 14:48' prior: 33667646! parallelForkTest | p | FileStream stdout nextPutAll: 'starting stm process.'. p := [ 1 + 1. ] parallelFork. p wait! ! ----End fileIn of D:\code\python\spy-vm\lang-smalltalk\images\Integer-parallelForkTest.st----! ----SNAPSHOT----{26 June 2014 . 2:49:57 pm} Squeak4.5-12568.image priorSource: 113431! ----QUIT----{26 June 2014 . 2:50 pm} Squeak4.5-12568.image priorSource: 114022! ----STARTUP----{26 June 2014 . 2:52:02 pm} as D:\code\python\spy-vm\lang-smalltalk\images\Squeak4.5-12568.image! 'From Squeak4.4 of 28 April 2013 [latest update: #12627] on 26 June 2014 at 2:51:52 pm'! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'pre 6/26/2014 14:43'! osLockTest | lock process1 process2 process2lock | lock := OSLock new. lock lock. process2lock := OSLock new. process2lock lock. process1 := [ SPyVM print: 'First process start'. lock lock. process2lock release. SPyVM print: 'First process after lock'. lock release. ] parallelFork . process2 := [ SPyVM print: 'Second process start'. process2lock lock. lock lock. SPyVM print: 'Second process after lock'. process2lock release. lock release. ] parallelFork . SPyVM print: 'Processes initialized.'. lock release. process1 wait. process2 wait.! ! ----End fileIn of a stream----! ----QUIT----{26 June 2014 . 2:53:18 pm} Squeak4.5-12568.image priorSource: 114110! ----STARTUP----{26 June 2014 . 3:02:03 pm} as D:\code\python\spy-vm\lang-smalltalk\images\Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'pre 6/26/2014 15:02'! osLockEasyTest | lock | lock := OSLock new. lock lock. lock release. SPyVM print: 'Survived lock.'! ! ----QUIT----{26 June 2014 . 3:02:52 pm} Squeak4.5-12568.image priorSource: 115096! ----STARTUP----{26 June 2014 . 3:03:40 pm} as D:\code\python\spy-vm\lang-smalltalk\images\Squeak4.5-12568.image! !OSLock methodsFor: 'as yet unclassified' stamp: 'pre 6/26/2014 15:04' prior: 33667310! lock SPyVM print: '* STM Process did not fork *' , Character cr. self primitiveFailed. self resume! ! !OSLock methodsFor: 'as yet unclassified' stamp: 'pre 6/26/2014 15:04' prior: 33667409! release SPyVM print: '* STM Process did not fork *' , Character cr. self primitiveFailed. self resume! ! !OSLock methodsFor: 'as yet unclassified' stamp: 'pre 6/26/2014 15:04' prior: 33670408! release SPyVM print: '* OS Lock could not be released *' , Character cr. self primitiveFailed. self resume! ! !OSLock methodsFor: 'as yet unclassified' stamp: 'pre 6/26/2014 15:04' prior: 33670196! lock SPyVM print: '* OS Lock could not be locked *' , Character cr. self primitiveFailed. self resume! ! ----QUIT----{26 June 2014 . 3:04:45 pm} Squeak4.5-12568.image priorSource: 115476! ----STARTUP----{26 June 2014 . 3:08:07 pm} as D:\code\python\spy-vm\lang-smalltalk\images\Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'pre 6/26/2014 15:08' prior: 33669797! osLockEasyTest | lock | lock := OSLock new. lock lock. SPyVM print: 'Survived lock.'! ! ----QUIT----{26 June 2014 . 3:08:24 pm} Squeak4.5-12568.image priorSource: 116537! ----STARTUP----{26 June 2014 . 3:09:05 pm} as D:\code\python\spy-vm\lang-smalltalk\images\Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'pre 6/26/2014 15:09' prior: 33671254! osLockEasyTest | lock | lock := OSLock new. lock lock. lock release. SPyVM print: 'Survived lock.'! ! ----QUIT----{26 June 2014 . 3:09:17 pm} Squeak4.5-12568.image priorSource: 116916! ----STARTUP----{26 June 2014 . 3:57:38 pm} as D:\code\python\spy-vm\lang-smalltalk\images\Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'pre 6/26/2014 15:57' prior: 33671633! osLockEasyTest | lock | lock := OSLock new. lock lock. SPyVM print: 'Survived lock.' lock release. SPyVM print: 'Survived lock.'! ! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'pre 6/26/2014 15:58' prior: 33672027! osLockEasyTest | lock | lock := OSLock new. lock lock. SPyVM print: 'Survived lock.'. lock release. SPyVM print: 'Survived lock.'. ^ self! ! ----QUIT----{26 June 2014 . 3:58:58 pm} Squeak4.5-12568.image priorSource: 117310! ----STARTUP----{26 June 2014 . 3:59:21 pm} as D:\code\python\spy-vm\lang-smalltalk\images\Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'pre 6/26/2014 15:59' prior: 33672253! osLockEasyTest | lock | lock := OSLock new. lock lock. SPyVM print: 'Survived locking.'. lock release. SPyVM print: 'Survived releasing.'. ^ self! ! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'pre 6/26/2014 15:59' prior: 33672690! osLockEasyTest | lock1 | lock1 := OSLock new. lock1 lock. SPyVM print: 'Survived locking.'. lock1 release. SPyVM print: 'Survived releasing.'. ^ self! ! ----QUIT----{26 June 2014 . 4:00 pm} Squeak4.5-12568.image priorSource: 117973! ----STARTUP----{26 June 2014 . 4:07:56 pm} as D:\code\python\spy-vm\lang-smalltalk\images\Squeak4.5-12568.image! !OSLock methodsFor: 'as yet unclassified' stamp: 'pre 6/26/2014 16:09' prior: 33670843! lock self internalLock ! ! !OSLock methodsFor: 'as yet unclassified' stamp: 'pre 6/26/2014 16:09'! internalLock SPyVM print: '* OS Lock could not be locked *' , Character cr. self primitiveFailed. self resume! ! !OSLock methodsFor: 'as yet unclassified' stamp: 'pre 6/26/2014 16:09'! internalRelease SPyVM print: '* OS Lock could not be released *' , Character cr. self primitiveFailed. self resume! ! !OSLock methodsFor: 'as yet unclassified' stamp: 'pre 6/26/2014 16:09' prior: 33670623! release self internalRelease! ! ----QUIT----{26 June 2014 . 4:09:34 pm} Squeak4.5-12568.image priorSource: 118668! \ No newline at end of file + ^ self field! ! ----QUIT----{22 May 2014 . 3:33:07 pm} Squeak4.5-12568.image priorSource: 93437! ----STARTUP----{22 May 2014 . 3:33:13 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 15:33'! gameLifeOfLife STMSimulation benchmark.! ! ----QUIT----{22 May 2014 . 3:34:03 pm} Squeak4.5-12568.image priorSource: 110218! ----STARTUP----{22 May 2014 . 3:34:57 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 15:35'! gameOfLife STMSimulation benchmark.! ! Integer removeSelector: #gameLifeOfLife! ----QUIT----{22 May 2014 . 3:35:14 pm} Squeak4.5-12568.image priorSource: 110526! ----STARTUP----{22 May 2014 . 3:36:22 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 15:36' prior: 33665224! gameOfLife STMSimulation benchmark2.! ! ----QUIT----{22 May 2014 . 3:36:45 pm} Squeak4.5-12568.image priorSource: 110873! ----STARTUP----{22 May 2014 . 3:36:49 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT----{22 May 2014 . 3:36:53 pm} Squeak4.5-12568.image priorSource: 111195! ----STARTUP----{22 May 2014 . 3:36:56 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 5/22/2014 15:37' prior: 33665587! gameOfLife SPyVM print: STMSimulation benchmark2.! ! ----QUIT----{22 May 2014 . 3:37:32 pm} Squeak4.5-12568.image priorSource: 111392! ----STARTUP----{22 May 2014 . 3:38:15 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT----{22 May 2014 . 3:38:35 pm} Squeak4.5-12568.image priorSource: 111727! ----STARTUP----{22 May 2014 . 3:48:40 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT/NOSAVE----{22 May 2014 . 3:48:43 pm} Squeak4.5-12568.image priorSource: 111924! ----STARTUP----{22 May 2014 . 4:20:56 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 16:21' prior: 33603722! benchStmParallel | sum num threads max start localSums | num := self \\ 100. max := (self - num) // num. sum := 0. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num) collect: [ :i | [((i * max) to: ((i + 1) * max - 1)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 16:25' prior: 33666837! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num) collect: [ :i | | sum | sum := 0. [((i * max) to: ((i + 1) * max - 1)) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. ^ localSums sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 16:26' prior: 33667525! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num) collect: [ :i | | sum | sum := 0. [((i * max) to: ((i + 1) * max - 1)) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. ^ localSums sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 16:26' prior: 33668262! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num) collect: [ :i | | sum | sum := 0. [((i * max) to: ((i + 1) * max - 1)) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. ^ localSums sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 16:26' prior: 33669000! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num) collect: [ :i | | sum | sum := 0. [((i * max) to: ((i + 1) * max - 1)) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. ^ localSums sum printString! ! ----QUIT----{22 May 2014 . 4:26:33 pm} Squeak4.5-12568.image priorSource: 111924! ----STARTUP----{22 May 2014 . 4:28:08 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 16:28' prior: 33669737! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num) collect: [ :i | | sum | sum := 0. [((i * max) to: ((i + 1) * max - 1)) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: localSums. ^ localSums sum printString! ! ----QUIT----{22 May 2014 . 4:28:57 pm} Squeak4.5-12568.image priorSource: 115957! ----STARTUP----{22 May 2014 . 4:30:39 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 16:31' prior: 33670666! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num) collect: [ :i | | sum | sum := 0. [((i * max) to: ((i + 1) * max - 1)) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: localSums asString. ^ localSums sum printString! ! ----QUIT----{22 May 2014 . 4:31:47 pm} Squeak4.5-12568.image priorSource: 116913! ----STARTUP----{22 May 2014 . 4:37:03 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT----{22 May 2014 . 4:37:21 pm} Squeak4.5-12568.image priorSource: 117878! ----STARTUP----{22 May 2014 . 4:37:56 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 16:38' prior: 33671622! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * max - 1)) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: localSums asString. ^ localSums sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 16:38' prior: 33672784! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * (max - 1))) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: localSums asString. ^ localSums sum printString! ! ----QUIT----{22 May 2014 . 4:38:37 pm} Squeak4.5-12568.image priorSource: 118075! ----STARTUP----{22 May 2014 . 4:39:37 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! #('1' '2' '3')! #('1' '2' '3') at: 2! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 16:49' prior: 33673552! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * (max - 1))) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. threads add: [ | sum | sum := 0. [((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]]. localSums at: num put: sum. ]parallelFork. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: localSums asString. ^ localSums sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 16:49' prior: 33674559! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * (max - 1))) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. threads add: [ | sum | sum := 0. [((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]]. localSums at: num put: sum. ] parallelFork. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: localSums asString. ^ localSums sum printString! ! ----QUIT----{22 May 2014 . 4:49:24 pm} Squeak4.5-12568.image priorSource: 119810! ----STARTUP----{22 May 2014 . 4:49:54 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 16:50' prior: 33675489! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * (max - 1))) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. threads add: [ | sum | sum := 0. [((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]]. localSums at: num put: sum. ] parallelFork. SPyVM print: 'bar'. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: localSums asString. ^ localSums sum printString! ! ----QUIT----{22 May 2014 . 4:50:32 pm} Squeak4.5-12568.image priorSource: 121907! ----STARTUP----{22 May 2014 . 4:50:54 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 16:52' prior: 33676616! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * (max - 1))) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. SPyVM print: 'bar'. threads add: [ | sum | sum := 0. [((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]]. localSums at: num put: sum. ] parallelFork. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: localSums asString. ^ localSums sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 16:52' prior: 33677764! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * (max - 1))) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. SPyVM print: 'bar'. threads add: [ | sum | sum := 0. [((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]]. localSums at: num put: sum. ] parallelFork. SPyVM print: 'bar2'. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: localSums asString. ^ localSums sum printString! ! ----QUIT----{22 May 2014 . 4:52:16 pm} Squeak4.5-12568.image priorSource: 123055! ----STARTUP----{22 May 2014 . 4:52:32 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 16:52' prior: 33678717! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * (max - 1))) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. threads add: ([ | sum | sum := 0. [((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]]. localSums at: num put: sum. ] parallelFork). SPyVM print: 'bar2'. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: localSums asString. ^ localSums sum printString! ! ----QUIT----{22 May 2014 . 4:52:57 pm} Squeak4.5-12568.image priorSource: 125181! ----STARTUP----{22 May 2014 . 4:53:15 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT----{22 May 2014 . 4:54:05 pm} Squeak4.5-12568.image priorSource: 126335! ----STARTUP----{22 May 2014 . 4:54:23 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 16:56' prior: 33679890! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * (max - 1))) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. SPyVM print: self-num-max. threads add: [ | sum | sum := 0. ((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]. localSums at: num put: sum. ] parallelFork. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: localSums asString. ^ localSums sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 16:56' prior: 33681241! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * (max - 1))) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. SPyVM print: self-num-max. SPyVM print: self-num. threads add: [ | sum | sum := 0. ((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]. localSums at: num put: sum. ] parallelFork. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: localSums asString. ^ localSums sum printString! ! ----QUIT----{22 May 2014 . 4:56:21 pm} Squeak4.5-12568.image priorSource: 126532! ----STARTUP----{22 May 2014 . 4:56:41 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 16:58' prior: 33682201! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * (max - 1))) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. threads asOrderedCollection add: [ | sum | sum := 0. ((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]. localSums at: num put: sum. ] parallelFork. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: localSums asString. ^ localSums sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 16:58' prior: 33683383! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * (max - 1))) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ] asOrderedCollection. threads add: [ | sum | sum := 0. ((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]. localSums at: num put: sum. ] parallelFork. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: localSums asString. ^ localSums sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 16:58' prior: 33684335! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * (max - 1))) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. threads asOrderedCollection add: [ | sum | sum := 0. ((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]. localSums at: num put: sum. ] parallelFork. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: localSums asString. ^ localSums sum printString! ! ----QUIT----{22 May 2014 . 4:58:37 pm} Squeak4.5-12568.image priorSource: 128674! ----STARTUP----{22 May 2014 . 5:20:50 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT----{22 May 2014 . 5:21:16 pm} Squeak4.5-12568.image priorSource: 131728! ----STARTUP----{22 May 2014 . 5:21:29 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 17:21' prior: 33685288! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * (max - 1))) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. SPyVM print: '1'. SPyVM print: localSums asString. threads asOrderedCollection add: [ | sum | sum := 0. ((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]. localSums at: num put: sum. ] parallelFork. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: localSums asString. ^ localSums sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 17:22' prior: 33686634! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * (max - 1))) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. SPyVM print: '1:'. SPyVM print: localSums asString. threads asOrderedCollection add: [ | sum | sum := 0. ((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]. localSums at: num put: sum. ] parallelFork. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: '2:'. SPyVM print: localSums asString. ^ localSums sum printString! ! ----QUIT----{22 May 2014 . 5:22:18 pm} Squeak4.5-12568.image priorSource: 131925! ----STARTUP----{22 May 2014 . 5:22:42 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 17:23' prior: 33687638! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * (max - 1))) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. SPyVM print: '1:'. SPyVM print: threads asString. threads asOrderedCollection add: [ | sum | sum := 0. ((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]. localSums at: num put: sum. ] parallelFork. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: '2:'. SPyVM print: localSums asString. ^ localSums sum printString! ! ----QUIT----{22 May 2014 . 5:23:22 pm} Squeak4.5-12568.image priorSource: 134149! ----STARTUP----{22 May 2014 . 5:23:33 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 17:25' prior: 33688858! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * (max - 1))) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. threads asOrderedCollection add: [ | sum | sum := 0. ((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]. SPyVM print: 'xx'. SPyVM print: sum. localSums at: num put: sum. ] parallelFork. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: '2:'. SPyVM print: localSums asString. ^ localSums sum printString! ! ----SNAPSHOT----{22 May 2014 . 5:25:10 pm} Squeak4.5-12568.image priorSource: 135367! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 17:25' prior: 33690076! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * (max - 1))) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. threads asOrderedCollection add: [ | sum | sum := 0. ((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]. SPyVM print: 'xx'. SPyVM print: sum. localSums at: num put: sum. ] parallelFork. SPyVM print: threads. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: '2:'. SPyVM print: localSums asString. ^ localSums sum printString! ! ----QUIT----{22 May 2014 . 5:25:30 pm} Squeak4.5-12568.image priorSource: 136577! ----STARTUP----{22 May 2014 . 5:25:55 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT----{22 May 2014 . 5:26:05 pm} Squeak4.5-12568.image priorSource: 137699! ----STARTUP----{22 May 2014 . 5:26:27 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT----{22 May 2014 . 5:28:45 pm} Squeak4.5-12568.image priorSource: 137896! ----STARTUP----{22 May 2014 . 5:29:05 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT/NOSAVE----{22 May 2014 . 5:29:16 pm} Squeak4.5-12568.image priorSource: 138093! ----STARTUP----{22 May 2014 . 5:29:48 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT----{22 May 2014 . 5:30:10 pm} Squeak4.5-12568.image priorSource: 138093! ----STARTUP----{22 May 2014 . 5:30:41 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT----{22 May 2014 . 5:31 pm} Squeak4.5-12568.image priorSource: 138494! ----STARTUP----{22 May 2014 . 5:31:19 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT/NOSAVE----{22 May 2014 . 5:31:35 pm} Squeak4.5-12568.image priorSource: 138691! ----STARTUP----{22 May 2014 . 5:32:08 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 17:32' prior: 33691176! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * (max - 1))) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. SPyVM print: 'yy'. threads asOrderedCollection add: [ | sum | sum := 0. ((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]. SPyVM print: 'xx'. SPyVM print: sum. localSums at: num put: sum. ] parallelFork. SPyVM print: 'sss'. SPyVM print: threads. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside methodaaaaaaaaaaaaaaaaaa:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: '23:'. SPyVM print: localSums asString. ^ localSums sum printString! ! ----QUIT----{22 May 2014 . 5:32:31 pm} Squeak4.5-12568.image priorSource: 138691! ----STARTUP----{22 May 2014 . 5:32:45 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 17:33' prior: 33693601! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * (max - 1))) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. SPyVM print: 'yy'. threads asOrderedCollection add: [ | sum | sum := 0. ((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]. SPyVM print: 'xx'. SPyVM print: sum. localSums at: num put: sum. ] parallelFork. SPyVM print: threads asString, ' threads'. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: '23:'. SPyVM print: localSums asString. ^ localSums sum printString! ! ----QUIT----{22 May 2014 . 5:34:02 pm} Squeak4.5-12568.image priorSource: 140184! ----STARTUP----{22 May 2014 . 5:34:15 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 17:35' prior: 33694893! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * (max - 1))) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. SPyVM print: 'yy'. threads = threads asOrderedCollection add: [ | sum | sum := 0. ((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]. SPyVM print: 'xx'. SPyVM print: sum. localSums at: num put: sum. ] parallelFork. SPyVM print: threads asString, ' threads'. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: '23:'. SPyVM print: localSums asString. ^ localSums sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 17:35' prior: 33696165! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * (max - 1))) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. SPyVM print: 'yy'. threads := threads asOrderedCollection. threads add: [ | sum | sum := 0. ((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]. SPyVM print: 'xx'. SPyVM print: sum. localSums at: num put: sum. ] parallelFork. SPyVM print: threads asString, ' threads'. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: '23:'. SPyVM print: localSums asString. ^ localSums sum printString! ! ----QUIT----{22 May 2014 . 5:35:23 pm} Squeak4.5-12568.image priorSource: 141456! ----STARTUP----{22 May 2014 . 5:40:42 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 17:42' prior: 33697250! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * (max - 1))) do: [ :k | sum := sum + k. SPyVM print: 'from', ( (i-1) * max) asString, 'to', (i * (max - 1)) asString. ]. localSums at: i put: sum. ] parallelFork ]. threads := threads asOrderedCollection. threads add: [ | sum | sum := 0. ((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]. localSums at: num put: sum. ] parallelFork. SPyVM print: threads asString, ' threads'. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: '23:'. SPyVM print: localSums asString. ^ localSums sum printString! ! ----QUIT----{22 May 2014 . 5:42:11 pm} Squeak4.5-12568.image priorSource: 143834! ----STARTUP----{22 May 2014 . 5:42:33 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 17:42' prior: 33698543! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * (max - 1))) do: [ :k | sum := sum + k. ]. SPyVM print: 'from', ( (i-1) * max) asString, 'to', (i * (max - 1)) asString. localSums at: i put: sum. ] parallelFork ]. threads := threads asOrderedCollection. threads add: [ | sum | sum := 0. ((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]. localSums at: num put: sum. ] parallelFork. SPyVM print: threads asString, ' threads'. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: '23:'. SPyVM print: localSums asString. ^ localSums sum printString! ! ----QUIT----{22 May 2014 . 5:42:55 pm} Squeak4.5-12568.image priorSource: 145152! ----STARTUP----{22 May 2014 . 5:43:27 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT----{22 May 2014 . 5:43:55 pm} Squeak4.5-12568.image priorSource: 146468! ----STARTUP----{22 May 2014 . 5:44:27 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 17:45' prior: 33699861! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: (i * max - 1)) do: [ :k | sum := sum + k. ]. SPyVM print: 'from', ( (i-1) * max) asString, 'to', ((i * max) - 1) asString. localSums at: i put: sum. ] parallelFork ]. threads := threads asOrderedCollection. threads add: [ | sum | sum := 0. ((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]. localSums at: num put: sum. ] parallelFork. SPyVM print: threads asString, ' threads'. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: '23:'. SPyVM print: localSums asString. ^ localSums sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 17:45' prior: 33701374! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: ((i * max) - 1)) do: [ :k | sum := sum + k. ]. SPyVM print: 'from', ( (i-1) * max) asString, 'to', ((i * max) - 1) asString. localSums at: i put: sum. ] parallelFork ]. threads := threads asOrderedCollection. threads add: [ | sum | sum := 0. ((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]. localSums at: num put: sum. ] parallelFork. SPyVM print: threads asString, ' threads'. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. SPyVM print: '23:'. SPyVM print: localSums asString. ^ localSums sum printString! ! ----QUIT----{22 May 2014 . 5:45:21 pm} Squeak4.5-12568.image priorSource: 146665! ----STARTUP----{22 May 2014 . 5:46:08 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 5/22/2014 17:46' prior: 33702491! benchStmParallel | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: ((i * max) - 1)) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. threads := threads asOrderedCollection. threads add: [ | sum | sum := 0. ((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]. localSums at: num put: sum. ] parallelFork. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. ^ localSums sum printString! ! ----QUIT----{22 May 2014 . 5:46:37 pm} Squeak4.5-12568.image priorSource: 149098! ----STARTUP----{2 June 2014 . 11:04:06 am} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 6/2/2014 11:06'! benchStmParallelWarmed | num threads max start localSums | num := self \\ 100. max := (self - num) // num. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). localSums := Array new: num. start := Time now asNanoSeconds. threads := (1 to: num-1) collect: [ :i | | sum | sum := 0. [( ( (i-1) * max) to: ((i * max) - 1)) do: [ :k | sum := sum + k. ]. localSums at: i put: sum. ] parallelFork ]. threads := threads asOrderedCollection. threads add: [ | sum | sum := 0. ((self-num-max) to: self-num) do: [ :k | sum := sum + k. ]. localSums at: num put: sum. ] parallelFork. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. ^ localSums sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'hh 6/2/2014 11:06' prior: 33704928! benchStmParallelWarmed ! ! !Integer methodsFor: 'benchmarks' stamp: 'hh 6/2/2014 11:09' prior: 33705873! benchStmParallelWarmed 3 timesRepeat: [1 benchStmParallel].! ! !Integer methodsFor: 'benchmarks' stamp: 'hh 6/2/2014 11:09' prior: 33705981! benchStmParallelWarmed 3 timesRepeat: [SPyVM print: 1 benchStmParallel].! ! ----SNAPSHOT----{2 June 2014 . 11:09:59 am} Squeak4.5-12568.image priorSource: 150235! !Integer methodsFor: 'benchmarks' stamp: 'hh 6/2/2014 11:10' prior: 33706125! benchStmParallelWarmed 3 timesRepeat: [1 benchStmParallel].! ! ----SNAPSHOT----{2 June 2014 . 11:10:29 am} Squeak4.5-12568.image priorSource: 151771! ----QUIT----{2 June 2014 . 11:10:32 am} Squeak4.5-12568.image priorSource: 152003! ----STARTUP----{2 June 2014 . 11:20:35 am} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 6/2/2014 11:20' prior: 33706370! benchStmParallelWarmed 3 timesRepeat: [SPyVM print: (1 benchStmParallel)].! ! ----QUIT----{2 June 2014 . 11:21:01 am} Squeak4.5-12568.image priorSource: 152091! ----STARTUP----{2 June 2014 . 11:21:35 am} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT----{2 June 2014 . 11:22:41 am} Squeak4.5-12568.image priorSource: 152449! ----STARTUP----{2 June 2014 . 11:23:17 am} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 6/2/2014 11:23' prior: 33706801! benchStmParallelWarmed 3 timesRepeat: [SPyVM print: (self benchStmParallel)].! ! ----QUIT----{2 June 2014 . 11:23:36 am} Squeak4.5-12568.image priorSource: 152648! ----STARTUP----{2 June 2014 . 11:25:39 am} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 6/2/2014 11:25' prior: 33707358! benchStmParallelWarmed 10 timesRepeat: [SPyVM print: (self benchStmParallel)].! ! ----QUIT----{2 June 2014 . 11:25:46 am} Squeak4.5-12568.image priorSource: 153009! ----STARTUP----{2 June 2014 . 11:25:48 am} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT----{2 June 2014 . 11:25:56 am} Squeak4.5-12568.image priorSource: 153371! ----STARTUP----{2 June 2014 . 11:26:31 am} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 6/2/2014 11:26' prior: 33707719! benchStmParallelWarmed 100 timesRepeat: [SPyVM print: (self benchStmParallel)].! ! ----QUIT----{2 June 2014 . 11:26:36 am} Squeak4.5-12568.image priorSource: 153570! ----STARTUP----{2 June 2014 . 11:26:38 am} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT----{2 June 2014 . 11:26:42 am} Squeak4.5-12568.image priorSource: 153933! ----STARTUP----{2 June 2014 . 11:27:14 am} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 6/2/2014 11:27' prior: 33708280! benchStmParallelWarmed 10 timesRepeat: [SPyVM print: (self benchStmParallel)].! ! ----QUIT----{2 June 2014 . 11:27:19 am} Squeak4.5-12568.image priorSource: 154132! ----STARTUP----{5 June 2014 . 1:39:24 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 6/5/2014 13:45'! benchStmThreadCreation | threads start | SPyVM print: ('Threads:', (self printString)). start := Time now asNanoSeconds. threads := [(1 to: self) do: [:t1 | SPyVM print: 'Thread 1 reporting!!']] parallelFork. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString.! ! !Integer methodsFor: 'benchmarks' stamp: 'hh 6/5/2014 13:45' prior: 33709187! benchStmThreadCreation | threads start | SPyVM print: ('Threads:', (self printString)). start := Time now asNanoSeconds. threads := [(1 to: self) do: [:t1 | SPyVM print: 'Thread 1 reporting!!']] parallelFork. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString.! ! !Integer methodsFor: 'benchmarks' stamp: 'hh 6/5/2014 13:45' prior: 33709650! benchStmThreadCreation | threads start | SPyVM print: ('Threads:', (self printString)). start := Time now asNanoSeconds. threads := [(1 to: self) do: [:t1 | SPyVM print: 'Thread reporting!!']] parallelFork. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString.! ! !Integer methodsFor: 'benchmarks' stamp: 'hh 6/5/2014 13:50' prior: 33710111! benchStmThreadCreation | threads start | SPyVM print: ('Threads:', (self printString)). start := Time now asNanoSeconds. threads := [(1 to: self) do: []] parallelFork. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString.! ! ----QUIT----{5 June 2014 . 1:50:23 pm} Squeak4.5-12568.image priorSource: 154494! ----STARTUP----{5 June 2014 . 1:51:28 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 6/5/2014 13:51' prior: 33710570! benchStmThreadCreation | threads start | SPyVM print: ('Threads:', (self printString)). start := Time now asNanoSeconds. threads := [(1 to: self) do: [1+1]] parallelFork. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString.! ! ----QUIT----{5 June 2014 . 1:51:37 pm} Squeak4.5-12568.image priorSource: 156479! ----STARTUP----{5 June 2014 . 1:52:09 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 6/5/2014 13:53' prior: 33711187! benchStmThreadCreation | threads start | SPyVM print: ('Threads:', (self printString)). start := Time now asNanoSeconds. threads := [(1 to: self) do: [1+1] parallelFork.] threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString.! ! ----QUIT----{5 June 2014 . 1:53:31 pm} Squeak4.5-12568.image priorSource: 157099! ----STARTUP----{5 June 2014 . 1:53:59 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 6/5/2014 13:54' prior: 33711807! benchStmThreadCreation | threads start | SPyVM print: ('Threads:', (self printString)). start := Time now asNanoSeconds. threads := [(1 to: self) do: [1+1]] parallelFork. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString.! ! !Integer methodsFor: 'benchmarks' stamp: 'hh 6/5/2014 13:55' prior: 33712425! benchStmThreadCreation | threads start | SPyVM print: ('Threads:', (self printString)). start := Time now asNanoSeconds. threads := (1 to: self) do: [[1+1] parallelFork]. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString.! ! ----QUIT----{5 June 2014 . 1:55:56 pm} Squeak4.5-12568.image priorSource: 157717! ----STARTUP----{5 June 2014 . 1:55:58 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT----{5 June 2014 . 1:56:05 pm} Squeak4.5-12568.image priorSource: 158756! ----STARTUP----{5 June 2014 . 1:56:36 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 6/5/2014 13:56' prior: 33712846! benchStmThreadCreation | threads start | SPyVM print: ('Threads:', (self printString)). start := Time now asNanoSeconds. threads := (1 to: self) collect: [[1+1] parallelFork]. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString.! ! ----QUIT----{5 June 2014 . 1:56:46 pm} Squeak4.5-12568.image priorSource: 158953! ----STARTUP----{5 June 2014 . 1:57:51 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 6/5/2014 13:58' prior: 33713661! benchStmThreadCreation | threads start | SPyVM print: ('Threads:', (self printString)). start := Time now asNanoSeconds. threads := (1 to: self) collect: [[SPyVM print: 'hello'.] parallelFork]. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString.! ! ----QUIT----{5 June 2014 . 1:58:24 pm} Squeak4.5-12568.image priorSource: 159576! ----STARTUP----{5 June 2014 . 1:58:49 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'hh 6/5/2014 13:59' prior: 33714284! benchStmThreadCreation | threads start | From noreply at buildbot.pypy.org Thu Jul 10 18:25:13 2014 From: noreply at buildbot.pypy.org (Conrad Calmez) Date: Thu, 10 Jul 2014 18:25:13 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk stmgc-c7: fixed OS lock primitives so that they raise a PrimitiveFailedError in case of misuse Message-ID: <20140710162513.95D5E1D24C1@cobra.cs.uni-duesseldorf.de> Author: Conrad Calmez Branch: stmgc-c7 Changeset: r894:9b35a2b871a0 Date: 2014-07-10 18:25 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/9b35a2b871a0/ Log: fixed OS lock primitives so that they raise a PrimitiveFailedError in case of misuse credit goes to: @hubx diff --git a/images/Squeak4.5-12568.changes b/images/Squeak4.5-12568.changes --- a/images/Squeak4.5-12568.changes +++ b/images/Squeak4.5-12568.changes @@ -15094,4 +15094,4 @@ ifFalse: [ self organizeMeetingWith: chameleon. first := nil. ] ] atomic value. " tmpString := 'A Chameneos was in the meeting place and first is now: ' . SPyVM print: tmpString. "! ! !CPBChameneos methodsFor: 'as yet unclassified' stamp: 'hh 7/10/2014 15:12' prior: 34771049! run: meetingPlace [ color == #faded ] whileFalse: [ "SPyVM print: 'Chameneos goes to meeting place' , self color." meetingPlace reachedBy: self. - waitingForPair lock. "SPyVM print: 'Chameneos met another one and releases lock' , self color." ]! ! ----QUIT----{10 July 2014 . 3:13 pm} Squeak4.5-benchmarks.image priorSource: 1252802! ----STARTUP----{10 July 2014 . 3:16:52 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/images/Squeak4.5-benchmarks.image! ----QUIT/NOSAVE----{10 July 2014 . 3:17:21 pm} Squeak4.5-benchmarks.image priorSource: 1253908! ----QUIT/NOSAVE----{10 July 2014 . 1:19:47 pm} Squeak4.5-benchmarks.image priorSource: 1253908! ----STARTUP----{10 July 2014 . 5:00:09 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/images/Squeak4.5-benchmarks.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 7/10/2014 17:01'! OSLockTest | lock | lock := OSLock new. lock release.! ! ----QUIT----{10 July 2014 . 5:01:29 pm} Squeak4.5-benchmarks.image priorSource: 1253908! ----STARTUP----{10 July 2014 . 5:04:01 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/images/Squeak4.5-benchmarks.image! ----QUIT/NOSAVE----{10 July 2014 . 5:05:51 pm} Squeak4.5-benchmarks.image priorSource: 1254571! ----STARTUP----{10 July 2014 . 5:35:54 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/images/Squeak4.5-benchmarks.image! ----QUIT/NOSAVE----{10 July 2014 . 5:36:06 pm} Squeak4.5-benchmarks.image priorSource: 1254571! ----STARTUP----{10 July 2014 . 5:44:18 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT/NOSAVE----{10 July 2014 . 5:44:25 pm} Squeak4.5-12568.image priorSource: 1254571! \ No newline at end of file + waitingForPair lock. "SPyVM print: 'Chameneos met another one and releases lock' , self color." ]! ! ----QUIT----{10 July 2014 . 3:13 pm} Squeak4.5-benchmarks.image priorSource: 1252802! ----STARTUP----{10 July 2014 . 3:16:52 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/images/Squeak4.5-benchmarks.image! ----QUIT/NOSAVE----{10 July 2014 . 3:17:21 pm} Squeak4.5-benchmarks.image priorSource: 1253908! ----QUIT/NOSAVE----{10 July 2014 . 1:19:47 pm} Squeak4.5-benchmarks.image priorSource: 1253908! ----STARTUP----{10 July 2014 . 5:00:09 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/images/Squeak4.5-benchmarks.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 7/10/2014 17:01'! OSLockTest | lock | lock := OSLock new. lock release.! ! ----QUIT----{10 July 2014 . 5:01:29 pm} Squeak4.5-benchmarks.image priorSource: 1253908! ----STARTUP----{10 July 2014 . 5:04:01 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/images/Squeak4.5-benchmarks.image! ----QUIT/NOSAVE----{10 July 2014 . 5:05:51 pm} Squeak4.5-benchmarks.image priorSource: 1254571! ----STARTUP----{10 July 2014 . 5:35:54 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/images/Squeak4.5-benchmarks.image! ----QUIT/NOSAVE----{10 July 2014 . 5:36:06 pm} Squeak4.5-benchmarks.image priorSource: 1254571! ----STARTUP----{10 July 2014 . 5:44:18 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT/NOSAVE----{10 July 2014 . 5:44:25 pm} Squeak4.5-12568.image priorSource: 1254571! ----QUIT/NOSAVE----{10 July 2014 . 6:11:07 pm} Squeak4.5-12568.image priorSource: 1254571! ----STARTUP----{10 July 2014 . 6:11:32 pm} as /Users/conrad/Repositories/master-project/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT/NOSAVE----{10 July 2014 . 6:22:16 pm} Squeak4.5-12568.image priorSource: 1254571! \ No newline at end of file diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -1548,23 +1548,29 @@ @expose_primitive(OS_LOCK_LOCK, unwrap_spec=[object], no_result=True) def func(interp, s_frame, w_rcvr): - from rpython.rlib import rthread + from rpython.rlib.rthread import RThreadError if not isinstance(w_rcvr, model.W_PointersObject): - raise PrimitiveFailedError("OS_LOCK_LOCK primitive was not called on an OSLock Object") + raise PrimitiveFailedError("OS_LOCK_LOCK primitive was not called on an OSLock Object") lock_shadow = w_rcvr.as_special_get_shadow(interp.space, shadow.OSLockShadow) - lock_shadow.os_lock() + try: + lock_shadow.os_lock() + except RThreadError as e: + raise PrimitiveFailedError("OS_LOCK_LOCK primitive failed: " + str(e)) @expose_primitive(OS_LOCK_RELEASE, unwrap_spec=[object], no_result=True) def func(interp, s_frame, w_rcvr): - from rpython.rlib import rthread + from rpython.rlib.rthread import RThreadError if not isinstance(w_rcvr, model.W_PointersObject): - raise PrimitiveFailedError("OS_LOCK_LOCK primitive was not called on an OSLock Object") + raise PrimitiveFailedError("OS_LOCK_LOCK primitive was not called on an OSLock Object") lock_shadow = w_rcvr.as_special_get_shadow(interp.space, shadow.OSLockShadow) - lock_shadow.os_release() + try: + lock_shadow.os_release() + except RThreadError as e: + raise PrimitiveFailedError("OS_LOCK_RELEASE primitive failed: " + str(e)) # ___________________________________________________________________________ # BlockClosure Primitives From noreply at buildbot.pypy.org Fri Jul 11 04:57:26 2014 From: noreply at buildbot.pypy.org (wenzhuman) Date: Fri, 11 Jul 2014 04:57:26 +0200 (CEST) Subject: [pypy-commit] pypy gc_no_cleanup_nursery: add test case for no clean up nursery Message-ID: <20140711025726.5D12B1D2326@cobra.cs.uni-duesseldorf.de> Author: wenzhuman Branch: gc_no_cleanup_nursery Changeset: r72415:eb53af1749dd Date: 2014-07-10 22:54 +0000 http://bitbucket.org/pypy/pypy/changeset/eb53af1749dd/ Log: add test case for no clean up nursery diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -428,7 +428,7 @@ # the nursery than really needed, to simplify pointer arithmetic # in malloc_fixedsize_clear(). The few extra pages are never used # anyway so it doesn't even count. - nursery = llarena.arena_malloc(self._nursery_memory_size(), 2) + nursery = llarena.arena_malloc(self._nursery_memory_size(), 0) if not nursery: raise MemoryError("cannot allocate nursery") return nursery From noreply at buildbot.pypy.org Fri Jul 11 04:57:27 2014 From: noreply at buildbot.pypy.org (wenzhuman) Date: Fri, 11 Jul 2014 04:57:27 +0200 (CEST) Subject: [pypy-commit] pypy gc_no_cleanup_nursery: and test case for no clean up nursery Message-ID: <20140711025727.AE1A51D2326@cobra.cs.uni-duesseldorf.de> Author: wenzhuman Branch: gc_no_cleanup_nursery Changeset: r72416:a99dec1483ec Date: 2014-07-10 22:55 +0000 http://bitbucket.org/pypy/pypy/changeset/a99dec1483ec/ Log: and test case for no clean up nursery diff --git a/rpython/memory/gc/test/test_direct.py b/rpython/memory/gc/test/test_direct.py --- a/rpython/memory/gc/test/test_direct.py +++ b/rpython/memory/gc/test/test_direct.py @@ -11,7 +11,7 @@ from rpython.memory.gctypelayout import TypeLayoutBuilder from rpython.rlib.rarithmetic import LONG_BIT, is_valid_int from rpython.memory.gc import minimark, incminimark -from rpython.memory.gctypelayout import zero_gc_pointers, zero_gc_pointers_inside +from rpython.memory.gctypelayout import zero_gc_pointers_inside WORD = LONG_BIT // 8 ADDR_ARRAY = lltype.Array(llmemory.Address) @@ -108,12 +108,13 @@ addr = self.gc.malloc(self.get_type_id(TYPE), n, zero=True) obj_ptr = llmemory.cast_adr_to_ptr(addr, lltype.Ptr(TYPE)) #TODO: only zero fields if there is gc filed add something like has_gc_ptr() - zero_gc_pointers_inside(obj_ptr, TYPE) + if not self.gc.malloc_zero_filled: + zero_gc_pointers_inside(obj_ptr, TYPE) return obj_ptr class DirectGCTest(BaseDirectGCTest): - + def test_simple(self): p = self.malloc(S) p.x = 5 @@ -666,3 +667,9 @@ class TestIncrementalMiniMarkGCFull(DirectGCTest): from rpython.memory.gc.incminimark import IncrementalMiniMarkGC as GCClass + def test_no_cleanup(self): + p = self.malloc(S) + import pytest + with pytest.raises(lltype.UninitializedMemoryAccess): + x1 = p.x + \ No newline at end of file From noreply at buildbot.pypy.org Fri Jul 11 12:25:17 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 11 Jul 2014 12:25:17 +0200 (CEST) Subject: [pypy-commit] pypy default: Oops, I upgraded the _cffi_backend to version 0.8.6 but forgot that Message-ID: <20140711102517.AF6C31D2335@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72417:08d164cb3d40 Date: 2014-07-11 12:24 +0200 http://bitbucket.org/pypy/pypy/changeset/08d164cb3d40/ Log: Oops, I upgraded the _cffi_backend to version 0.8.6 but forgot that the lib_pypy/cffi directory was still checking for 0.8.2. diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.8.2" -__version_info__ = (0, 8, 2) +__version__ = "0.8.6" +__version_info__ = (0, 8, 6) diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -55,8 +55,7 @@ # _cffi_backend.so compiled. import _cffi_backend as backend from . import __version__ - assert (backend.__version__ == __version__ or - backend.__version__ == __version__[:3]) + assert backend.__version__ == __version__ # (If you insist you can also try to pass the option # 'backend=backend_ctypes.CTypesBackend()', but don't # rely on it! It's probably not going to work well.) diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -435,14 +435,14 @@ enumerator, enumerator, enumvalue)) prnt(' char buf[64];') prnt(' if ((%s) < 0)' % enumerator) - prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % enumerator) + prnt(' sprintf(buf, "%%ld", (long)(%s));' % enumerator) prnt(' else') - prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' % + prnt(' sprintf(buf, "%%lu", (unsigned long)(%s));' % enumerator) - prnt(' snprintf(out_error, 255,' + prnt(' sprintf(out_error,' ' "%s has the real value %s, not %s",') prnt(' "%s", buf, "%d");' % ( - enumerator, enumvalue)) + enumerator[:100], enumvalue)) prnt(' return -1;') prnt(' }') prnt(' return 0;') diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_function.py b/pypy/module/test_lib_pypy/cffi_tests/test_function.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_function.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_function.py @@ -403,3 +403,18 @@ if wr() is not None: import gc; gc.collect() assert wr() is None # 'data' does not leak + + def test_windows_stdcall(self): + if sys.platform != 'win32': + py.test.skip("Windows-only test") + if self.Backend is CTypesBackend: + py.test.skip("not with the ctypes backend") + ffi = FFI(backend=self.Backend()) + ffi.cdef(""" + BOOL QueryPerformanceFrequency(LONGLONG *lpFrequency); + """) + m = ffi.dlopen("Kernel32.dll") + p_freq = ffi.new("LONGLONG *") + res = m.QueryPerformanceFrequency(p_freq) + assert res != 0 + assert p_freq[0] != 0 diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_version.py b/pypy/module/test_lib_pypy/cffi_tests/test_version.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_version.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_version.py @@ -6,18 +6,20 @@ if '_cffi_backend' in sys.builtin_module_names: py.test.skip("this is embedded version") -BACKEND_VERSIONS = { - '0.4.2': '0.4', # did not change - '0.7.1': '0.7', # did not change - '0.7.2': '0.7', # did not change - '0.8.1': '0.8', # did not change (essentially) - } +#BACKEND_VERSIONS = { +# '0.4.2': '0.4', # did not change +# '0.7.1': '0.7', # did not change +# '0.7.2': '0.7', # did not change +# '0.8.1': '0.8', # did not change (essentially) +# '0.8.4': '0.8.3', # did not change +# } def test_version(): v = cffi.__version__ version_info = '.'.join(str(i) for i in cffi.__version_info__) assert v == version_info - assert BACKEND_VERSIONS.get(v, v) == _cffi_backend.__version__ + #v = BACKEND_VERSIONS.get(v, v) + assert v == _cffi_backend.__version__ def test_doc_version(): parent = os.path.dirname(os.path.dirname(__file__)) @@ -48,5 +50,5 @@ v = cffi.__version__ p = os.path.join(parent, 'c', 'test_c.py') content = open(p).read() - assert (('assert __version__ == "%s"' % BACKEND_VERSIONS.get(v, v)) - in content) + #v = BACKEND_VERSIONS.get(v, v) + assert (('assert __version__ == "%s"' % v) in content) From noreply at buildbot.pypy.org Fri Jul 11 15:22:39 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 11 Jul 2014 15:22:39 +0200 (CEST) Subject: [pypy-commit] pypy ufuncapi: merge default into branch Message-ID: <20140711132239.5A3B11D2335@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ufuncapi Changeset: r72418:e38f303077d3 Date: 2014-07-10 08:22 +1000 http://bitbucket.org/pypy/pypy/changeset/e38f303077d3/ Log: merge default into branch diff too long, truncating to 2000 out of 12166 lines diff --git a/lib-python/2.7/ctypes/__init__.py b/lib-python/2.7/ctypes/__init__.py --- a/lib-python/2.7/ctypes/__init__.py +++ b/lib-python/2.7/ctypes/__init__.py @@ -389,12 +389,13 @@ func.__name__ = name_or_ordinal return func -class PyDLL(CDLL): - """This class represents the Python library itself. It allows to - access Python API functions. The GIL is not released, and - Python exceptions are handled correctly. - """ - _func_flags_ = _FUNCFLAG_CDECL | _FUNCFLAG_PYTHONAPI +# Not in PyPy +#class PyDLL(CDLL): +# """This class represents the Python library itself. It allows to +# access Python API functions. The GIL is not released, and +# Python exceptions are handled correctly. +# """ +# _func_flags_ = _FUNCFLAG_CDECL | _FUNCFLAG_PYTHONAPI if _os.name in ("nt", "ce"): @@ -447,15 +448,8 @@ return self._dlltype(name) cdll = LibraryLoader(CDLL) -pydll = LibraryLoader(PyDLL) - -if _os.name in ("nt", "ce"): - pythonapi = PyDLL("python dll", None, _sys.dllhandle) -elif _sys.platform == "cygwin": - pythonapi = PyDLL("libpython%d.%d.dll" % _sys.version_info[:2]) -else: - pythonapi = PyDLL(None) - +# not on PyPy +#pydll = LibraryLoader(PyDLL) if _os.name in ("nt", "ce"): windll = LibraryLoader(WinDLL) diff --git a/lib-python/2.7/ctypes/test/test_values.py b/lib-python/2.7/ctypes/test/test_values.py --- a/lib-python/2.7/ctypes/test/test_values.py +++ b/lib-python/2.7/ctypes/test/test_values.py @@ -4,6 +4,7 @@ import unittest from ctypes import * +from ctypes.test import xfail import _ctypes_test @@ -23,7 +24,8 @@ class Win_ValuesTestCase(unittest.TestCase): """This test only works when python itself is a dll/shared library""" - + + @xfail def test_optimizeflag(self): # This test accesses the Py_OptimizeFlag intger, which is # exported by the Python dll. @@ -40,6 +42,7 @@ else: self.assertEqual(opt, 2) + @xfail def test_frozentable(self): # Python exports a PyImport_FrozenModules symbol. This is a # pointer to an array of struct _frozen entries. The end of the @@ -75,6 +78,7 @@ from ctypes import _pointer_type_cache del _pointer_type_cache[struct_frozen] + @xfail def test_undefined(self): self.assertRaises(ValueError, c_int.in_dll, pydll, "Undefined_Symbol") diff --git a/lib-python/2.7/test/test_gdbm.py b/lib-python/2.7/test/test_gdbm.py --- a/lib-python/2.7/test/test_gdbm.py +++ b/lib-python/2.7/test/test_gdbm.py @@ -98,6 +98,17 @@ self.assertTrue(key in self.g) self.assertTrue(self.g.has_key(key)) + def test_unicode_key(self): + key = u'ab' + value = u'cd' + self.g = gdbm.open(filename, 'cf') + self.g[key] = value + self.g.close() + self.g = gdbm.open(filename, 'r') + self.assertEquals(self.g[key], value) + self.assertTrue(key in self.g) + self.assertTrue(self.g.has_key(key)) + def test_main(): run_unittest(TestGdbm) diff --git a/lib-python/2.7/timeit.py b/lib-python/2.7/timeit.py --- a/lib-python/2.7/timeit.py +++ b/lib-python/2.7/timeit.py @@ -55,11 +55,6 @@ import gc import sys import time -try: - import itertools -except ImportError: - # Must be an older Python version (see timeit() below) - itertools = None __all__ = ["Timer"] @@ -81,7 +76,8 @@ def inner(_it, _timer): %(setup)s _t0 = _timer() - for _i in _it: + while _it > 0: + _it -= 1 %(stmt)s _t1 = _timer() return _t1 - _t0 @@ -96,7 +92,8 @@ def inner(_it, _timer, _func=func): setup() _t0 = _timer() - for _i in _it: + while _it > 0: + _it -= 1 _func() _t1 = _timer() return _t1 - _t0 @@ -133,9 +130,19 @@ else: raise ValueError("setup is neither a string nor callable") self.src = src # Save for traceback display - code = compile(src, dummy_src_name, "exec") - exec code in globals(), ns - self.inner = ns["inner"] + def make_inner(): + # PyPy tweak: recompile the source code each time before + # calling inner(). There are situations like Issue #1776 + # where PyPy tries to reuse the JIT code from before, + # but that's not going to work: the first thing the + # function does is the "-s" statement, which may declare + # new classes (here a namedtuple). We end up with + # bridges from the inner loop; more and more of them + # every time we call inner(). + code = compile(src, dummy_src_name, "exec") + exec code in globals(), ns + return ns["inner"] + self.make_inner = make_inner elif hasattr(stmt, '__call__'): self.src = None if isinstance(setup, basestring): @@ -144,7 +151,8 @@ exec _setup in globals(), ns elif not hasattr(setup, '__call__'): raise ValueError("setup is neither a string nor callable") - self.inner = _template_func(setup, stmt) + inner = _template_func(setup, stmt) + self.make_inner = lambda: inner else: raise ValueError("stmt is neither a string nor callable") @@ -185,15 +193,12 @@ to one million. The main statement, the setup statement and the timer function to be used are passed to the constructor. """ - if itertools: - it = itertools.repeat(None, number) - else: - it = [None] * number + inner = self.make_inner() gcold = gc.isenabled() if '__pypy__' not in sys.builtin_module_names: gc.disable() # only do that on CPython try: - timing = self.inner(it, self.timer) + timing = inner(number, self.timer) finally: if gcold: gc.enable() diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -219,6 +219,8 @@ if restype is None: import ctypes restype = ctypes.c_int + if self._argtypes_ is None: + self._argtypes_ = [] self._ptr = self._getfuncptr_fromaddress(self._argtypes_, restype) self._check_argtypes_for_fastpath() return diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_pypy_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -13,7 +13,15 @@ k1 = k1.lstrip('0x').rstrip('L') k2 = hex(binascii.crc32(key[1::2]) & 0xffffffff) k2 = k2.lstrip('0').rstrip('L') - output_dir = tempfile.gettempdir() + os.path.sep + 'tmp_%s%s' %(k1, k2) + try: + username = os.environ['USER'] #linux, et al + except KeyError: + try: + username = os.environ['USERNAME'] #windows + except KeyError: + username = os.getuid() + output_dir = tempfile.gettempdir() + os.path.sep + 'tmp_%s_%s%s' % ( + username, k1, k2) if not os.path.exists(output_dir): os.mkdir(output_dir) return output_dir diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -443,6 +443,10 @@ for enumname, enumval in zip(tp.enumerators, tp.enumvalues): if enumname not in library.__dict__: library.__dict__[enumname] = enumval + for key, val in ffi._parser._int_constants.items(): + if key not in library.__dict__: + library.__dict__[key] = val + copied_enums.append(True) if name in library.__dict__: return diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -24,6 +24,7 @@ _r_partial_array = re.compile(r"\[\s*\.\.\.\s*\]") _r_words = re.compile(r"\w+|\S") _parser_cache = None +_r_int_literal = re.compile(r"^0?x?[0-9a-f]+u?l?$", re.IGNORECASE) def _get_parser(): global _parser_cache @@ -99,6 +100,7 @@ self._structnode2type = weakref.WeakKeyDictionary() self._override = False self._packed = False + self._int_constants = {} def _parse(self, csource): csource, macros = _preprocess(csource) @@ -128,9 +130,10 @@ finally: if lock is not None: lock.release() - return ast, macros + # csource will be used to find buggy source text + return ast, macros, csource - def convert_pycparser_error(self, e, csource): + def _convert_pycparser_error(self, e, csource): # xxx look for ":NUM:" at the start of str(e) and try to interpret # it as a line number line = None @@ -142,6 +145,12 @@ csourcelines = csource.splitlines() if 1 <= linenum <= len(csourcelines): line = csourcelines[linenum-1] + return line + + def convert_pycparser_error(self, e, csource): + line = self._convert_pycparser_error(e, csource) + + msg = str(e) if line: msg = 'cannot parse "%s"\n%s' % (line.strip(), msg) else: @@ -160,14 +169,9 @@ self._packed = prev_packed def _internal_parse(self, csource): - ast, macros = self._parse(csource) + ast, macros, csource = self._parse(csource) # add the macros - for key, value in macros.items(): - value = value.strip() - if value != '...': - raise api.CDefError('only supports the syntax "#define ' - '%s ..." for now (literally)' % key) - self._declare('macro ' + key, value) + self._process_macros(macros) # find the first "__dotdotdot__" and use that as a separator # between the repeated typedefs and the real csource iterator = iter(ast.ext) @@ -175,27 +179,61 @@ if decl.name == '__dotdotdot__': break # - for decl in iterator: - if isinstance(decl, pycparser.c_ast.Decl): - self._parse_decl(decl) - elif isinstance(decl, pycparser.c_ast.Typedef): - if not decl.name: - raise api.CDefError("typedef does not declare any name", - decl) - if (isinstance(decl.type.type, pycparser.c_ast.IdentifierType) - and decl.type.type.names == ['__dotdotdot__']): - realtype = model.unknown_type(decl.name) - elif (isinstance(decl.type, pycparser.c_ast.PtrDecl) and - isinstance(decl.type.type, pycparser.c_ast.TypeDecl) and - isinstance(decl.type.type.type, - pycparser.c_ast.IdentifierType) and - decl.type.type.type.names == ['__dotdotdot__']): - realtype = model.unknown_ptr_type(decl.name) + try: + for decl in iterator: + if isinstance(decl, pycparser.c_ast.Decl): + self._parse_decl(decl) + elif isinstance(decl, pycparser.c_ast.Typedef): + if not decl.name: + raise api.CDefError("typedef does not declare any name", + decl) + if (isinstance(decl.type.type, pycparser.c_ast.IdentifierType) + and decl.type.type.names == ['__dotdotdot__']): + realtype = model.unknown_type(decl.name) + elif (isinstance(decl.type, pycparser.c_ast.PtrDecl) and + isinstance(decl.type.type, pycparser.c_ast.TypeDecl) and + isinstance(decl.type.type.type, + pycparser.c_ast.IdentifierType) and + decl.type.type.type.names == ['__dotdotdot__']): + realtype = model.unknown_ptr_type(decl.name) + else: + realtype = self._get_type(decl.type, name=decl.name) + self._declare('typedef ' + decl.name, realtype) else: - realtype = self._get_type(decl.type, name=decl.name) - self._declare('typedef ' + decl.name, realtype) + raise api.CDefError("unrecognized construct", decl) + except api.FFIError as e: + msg = self._convert_pycparser_error(e, csource) + if msg: + e.args = (e.args[0] + "\n *** Err: %s" % msg,) + raise + + def _add_constants(self, key, val): + if key in self._int_constants: + raise api.FFIError( + "multiple declarations of constant: %s" % (key,)) + self._int_constants[key] = val + + def _process_macros(self, macros): + for key, value in macros.items(): + value = value.strip() + match = _r_int_literal.search(value) + if match is not None: + int_str = match.group(0).lower().rstrip("ul") + + # "010" is not valid oct in py3 + if (int_str.startswith("0") and + int_str != "0" and + not int_str.startswith("0x")): + int_str = "0o" + int_str[1:] + + pyvalue = int(int_str, 0) + self._add_constants(key, pyvalue) + elif value == '...': + self._declare('macro ' + key, value) else: - raise api.CDefError("unrecognized construct", decl) + raise api.CDefError('only supports the syntax "#define ' + '%s ..." (literally) or "#define ' + '%s 0x1FF" for now' % (key, key)) def _parse_decl(self, decl): node = decl.type @@ -227,7 +265,7 @@ self._declare('variable ' + decl.name, tp) def parse_type(self, cdecl): - ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl) + ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl)[:2] assert not macros exprnode = ast.ext[-1].type.args.params[0] if isinstance(exprnode, pycparser.c_ast.ID): @@ -306,7 +344,8 @@ if ident == 'void': return model.void_type if ident == '__dotdotdot__': - raise api.FFIError('bad usage of "..."') + raise api.FFIError(':%d: bad usage of "..."' % + typenode.coord.line) return resolve_common_type(ident) # if isinstance(type, pycparser.c_ast.Struct): @@ -333,7 +372,8 @@ return self._get_struct_union_enum_type('union', typenode, name, nested=True) # - raise api.FFIError("bad or unsupported type declaration") + raise api.FFIError(":%d: bad or unsupported type declaration" % + typenode.coord.line) def _parse_function_type(self, typenode, funcname=None): params = list(getattr(typenode.args, 'params', [])) @@ -499,6 +539,10 @@ if (isinstance(exprnode, pycparser.c_ast.UnaryOp) and exprnode.op == '-'): return -self._parse_constant(exprnode.expr) + # load previously defined int constant + if (isinstance(exprnode, pycparser.c_ast.ID) and + exprnode.name in self._int_constants): + return self._int_constants[exprnode.name] # if partial_length_ok: if (isinstance(exprnode, pycparser.c_ast.ID) and @@ -506,8 +550,8 @@ self._partial_length = True return '...' # - raise api.FFIError("unsupported expression: expected a " - "simple numeric constant") + raise api.FFIError(":%d: unsupported expression: expected a " + "simple numeric constant" % exprnode.coord.line) def _build_enum_type(self, explicit_name, decls): if decls is not None: @@ -522,6 +566,7 @@ if enum.value is not None: nextenumvalue = self._parse_constant(enum.value) enumvalues.append(nextenumvalue) + self._add_constants(enum.name, nextenumvalue) nextenumvalue += 1 enumvalues = tuple(enumvalues) tp = model.EnumType(explicit_name, enumerators, enumvalues) @@ -535,3 +580,5 @@ kind = name.split(' ', 1)[0] if kind in ('typedef', 'struct', 'union', 'enum'): self._declare(name, tp) + for k, v in other._int_constants.items(): + self._add_constants(k, v) diff --git a/lib_pypy/cffi/ffiplatform.py b/lib_pypy/cffi/ffiplatform.py --- a/lib_pypy/cffi/ffiplatform.py +++ b/lib_pypy/cffi/ffiplatform.py @@ -38,6 +38,7 @@ import distutils.errors # dist = Distribution({'ext_modules': [ext]}) + dist.parse_config_files() options = dist.get_option_dict('build_ext') options['force'] = ('ffiplatform', True) options['build_lib'] = ('ffiplatform', tmpdir) diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -89,43 +89,54 @@ # by generate_cpy_function_method(). prnt('static PyMethodDef _cffi_methods[] = {') self._generate("method") - prnt(' {"_cffi_setup", _cffi_setup, METH_VARARGS},') - prnt(' {NULL, NULL} /* Sentinel */') + prnt(' {"_cffi_setup", _cffi_setup, METH_VARARGS, NULL},') + prnt(' {NULL, NULL, 0, NULL} /* Sentinel */') prnt('};') prnt() # # standard init. modname = self.verifier.get_module_name() - if sys.version_info >= (3,): - prnt('static struct PyModuleDef _cffi_module_def = {') - prnt(' PyModuleDef_HEAD_INIT,') - prnt(' "%s",' % modname) - prnt(' NULL,') - prnt(' -1,') - prnt(' _cffi_methods,') - prnt(' NULL, NULL, NULL, NULL') - prnt('};') - prnt() - initname = 'PyInit_%s' % modname - createmod = 'PyModule_Create(&_cffi_module_def)' - errorcase = 'return NULL' - finalreturn = 'return lib' - else: - initname = 'init%s' % modname - createmod = 'Py_InitModule("%s", _cffi_methods)' % modname - errorcase = 'return' - finalreturn = 'return' + constants = self._chained_list_constants[False] + prnt('#if PY_MAJOR_VERSION >= 3') + prnt() + prnt('static struct PyModuleDef _cffi_module_def = {') + prnt(' PyModuleDef_HEAD_INIT,') + prnt(' "%s",' % modname) + prnt(' NULL,') + prnt(' -1,') + prnt(' _cffi_methods,') + prnt(' NULL, NULL, NULL, NULL') + prnt('};') + prnt() prnt('PyMODINIT_FUNC') - prnt('%s(void)' % initname) + prnt('PyInit_%s(void)' % modname) prnt('{') prnt(' PyObject *lib;') - prnt(' lib = %s;' % createmod) - prnt(' if (lib == NULL || %s < 0)' % ( - self._chained_list_constants[False],)) - prnt(' %s;' % errorcase) - prnt(' _cffi_init();') - prnt(' %s;' % finalreturn) + prnt(' lib = PyModule_Create(&_cffi_module_def);') + prnt(' if (lib == NULL)') + prnt(' return NULL;') + prnt(' if (%s < 0 || _cffi_init() < 0) {' % (constants,)) + prnt(' Py_DECREF(lib);') + prnt(' return NULL;') + prnt(' }') + prnt(' return lib;') prnt('}') + prnt() + prnt('#else') + prnt() + prnt('PyMODINIT_FUNC') + prnt('init%s(void)' % modname) + prnt('{') + prnt(' PyObject *lib;') + prnt(' lib = Py_InitModule("%s", _cffi_methods);' % modname) + prnt(' if (lib == NULL)') + prnt(' return;') + prnt(' if (%s < 0 || _cffi_init() < 0)' % (constants,)) + prnt(' return;') + prnt(' return;') + prnt('}') + prnt() + prnt('#endif') def load_library(self): # XXX review all usages of 'self' here! @@ -394,7 +405,7 @@ meth = 'METH_O' else: meth = 'METH_VARARGS' - self._prnt(' {"%s", _cffi_f_%s, %s},' % (name, name, meth)) + self._prnt(' {"%s", _cffi_f_%s, %s, NULL},' % (name, name, meth)) _loading_cpy_function = _loaded_noop @@ -481,8 +492,8 @@ if tp.fldnames is None: return # nothing to do with opaque structs layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) - self._prnt(' {"%s", %s, METH_NOARGS},' % (layoutfuncname, - layoutfuncname)) + self._prnt(' {"%s", %s, METH_NOARGS, NULL},' % (layoutfuncname, + layoutfuncname)) def _loading_struct_or_union(self, tp, prefix, name, module): if tp.fldnames is None: @@ -589,13 +600,7 @@ 'variable type'),)) assert delayed else: - prnt(' if (LONG_MIN <= (%s) && (%s) <= LONG_MAX)' % (name, name)) - prnt(' o = PyInt_FromLong((long)(%s));' % (name,)) - prnt(' else if ((%s) <= 0)' % (name,)) - prnt(' o = PyLong_FromLongLong((long long)(%s));' % (name,)) - prnt(' else') - prnt(' o = PyLong_FromUnsignedLongLong(' - '(unsigned long long)(%s));' % (name,)) + prnt(' o = _cffi_from_c_int_const(%s);' % name) prnt(' if (o == NULL)') prnt(' return -1;') if size_too: @@ -632,13 +637,18 @@ # ---------- # enums + def _enum_funcname(self, prefix, name): + # "$enum_$1" => "___D_enum____D_1" + name = name.replace('$', '___D_') + return '_cffi_e_%s_%s' % (prefix, name) + def _generate_cpy_enum_decl(self, tp, name, prefix='enum'): if tp.partial: for enumerator in tp.enumerators: self._generate_cpy_const(True, enumerator, delayed=False) return # - funcname = '_cffi_e_%s_%s' % (prefix, name) + funcname = self._enum_funcname(prefix, name) prnt = self._prnt prnt('static int %s(PyObject *lib)' % funcname) prnt('{') @@ -760,17 +770,30 @@ #include #include -#ifdef MS_WIN32 -#include /* for alloca() */ -typedef __int8 int8_t; -typedef __int16 int16_t; -typedef __int32 int32_t; -typedef __int64 int64_t; -typedef unsigned __int8 uint8_t; -typedef unsigned __int16 uint16_t; -typedef unsigned __int32 uint32_t; -typedef unsigned __int64 uint64_t; -typedef unsigned char _Bool; +/* this block of #ifs should be kept exactly identical between + c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py */ +#if defined(_MSC_VER) +# include /* for alloca() */ +# if _MSC_VER < 1600 /* MSVC < 2010 */ + typedef __int8 int8_t; + typedef __int16 int16_t; + typedef __int32 int32_t; + typedef __int64 int64_t; + typedef unsigned __int8 uint8_t; + typedef unsigned __int16 uint16_t; + typedef unsigned __int32 uint32_t; + typedef unsigned __int64 uint64_t; +# else +# include +# endif +# if _MSC_VER < 1800 /* MSVC < 2013 */ + typedef unsigned char _Bool; +# endif +#else +# include +# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) +# include +# endif #endif #if PY_MAJOR_VERSION < 3 @@ -795,6 +818,15 @@ #define _cffi_to_c_double PyFloat_AsDouble #define _cffi_to_c_float PyFloat_AsDouble +#define _cffi_from_c_int_const(x) \ + (((x) > 0) ? \ + ((unsigned long long)(x) <= (unsigned long long)LONG_MAX) ? \ + PyInt_FromLong((long)(x)) : \ + PyLong_FromUnsignedLongLong((unsigned long long)(x)) : \ + ((long long)(x) >= (long long)LONG_MIN) ? \ + PyInt_FromLong((long)(x)) : \ + PyLong_FromLongLong((long long)(x))) + #define _cffi_from_c_int(x, type) \ (((type)-1) > 0 ? /* unsigned */ \ (sizeof(type) < sizeof(long) ? PyInt_FromLong(x) : \ @@ -804,14 +836,14 @@ PyLong_FromLongLong(x))) #define _cffi_to_c_int(o, type) \ - (sizeof(type) == 1 ? (((type)-1) > 0 ? _cffi_to_c_u8(o) \ - : _cffi_to_c_i8(o)) : \ - sizeof(type) == 2 ? (((type)-1) > 0 ? _cffi_to_c_u16(o) \ - : _cffi_to_c_i16(o)) : \ - sizeof(type) == 4 ? (((type)-1) > 0 ? _cffi_to_c_u32(o) \ - : _cffi_to_c_i32(o)) : \ - sizeof(type) == 8 ? (((type)-1) > 0 ? _cffi_to_c_u64(o) \ - : _cffi_to_c_i64(o)) : \ + (sizeof(type) == 1 ? (((type)-1) > 0 ? (type)_cffi_to_c_u8(o) \ + : (type)_cffi_to_c_i8(o)) : \ + sizeof(type) == 2 ? (((type)-1) > 0 ? (type)_cffi_to_c_u16(o) \ + : (type)_cffi_to_c_i16(o)) : \ + sizeof(type) == 4 ? (((type)-1) > 0 ? (type)_cffi_to_c_u32(o) \ + : (type)_cffi_to_c_i32(o)) : \ + sizeof(type) == 8 ? (((type)-1) > 0 ? (type)_cffi_to_c_u64(o) \ + : (type)_cffi_to_c_i64(o)) : \ (Py_FatalError("unsupported size for type " #type), 0)) #define _cffi_to_c_i8 \ @@ -885,25 +917,32 @@ return PyBool_FromLong(was_alive); } -static void _cffi_init(void) +static int _cffi_init(void) { - PyObject *module = PyImport_ImportModule("_cffi_backend"); - PyObject *c_api_object; + PyObject *module, *c_api_object = NULL; + module = PyImport_ImportModule("_cffi_backend"); if (module == NULL) - return; + goto failure; c_api_object = PyObject_GetAttrString(module, "_C_API"); if (c_api_object == NULL) - return; + goto failure; if (!PyCapsule_CheckExact(c_api_object)) { - Py_DECREF(c_api_object); PyErr_SetNone(PyExc_ImportError); - return; + goto failure; } memcpy(_cffi_exports, PyCapsule_GetPointer(c_api_object, "cffi"), _CFFI_NUM_EXPORTS * sizeof(void *)); + + Py_DECREF(module); Py_DECREF(c_api_object); + return 0; + + failure: + Py_XDECREF(module); + Py_XDECREF(c_api_object); + return -1; } #define _cffi_type(num) ((CTypeDescrObject *)PyList_GET_ITEM(_cffi_types, num)) diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -249,10 +249,10 @@ prnt(' /* %s */' % str(e)) # cannot verify it, ignore prnt('}') self.export_symbols.append(layoutfuncname) - prnt('ssize_t %s(ssize_t i)' % (layoutfuncname,)) + prnt('intptr_t %s(intptr_t i)' % (layoutfuncname,)) prnt('{') prnt(' struct _cffi_aligncheck { char x; %s y; };' % cname) - prnt(' static ssize_t nums[] = {') + prnt(' static intptr_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') for fname, ftype, fbitsize in tp.enumfields(): @@ -276,7 +276,7 @@ return # nothing to do with opaque structs layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) # - BFunc = self.ffi._typeof_locked("ssize_t(*)(ssize_t)")[0] + BFunc = self.ffi._typeof_locked("intptr_t(*)(intptr_t)")[0] function = module.load_function(BFunc, layoutfuncname) layout = [] num = 0 @@ -410,13 +410,18 @@ # ---------- # enums + def _enum_funcname(self, prefix, name): + # "$enum_$1" => "___D_enum____D_1" + name = name.replace('$', '___D_') + return '_cffi_e_%s_%s' % (prefix, name) + def _generate_gen_enum_decl(self, tp, name, prefix='enum'): if tp.partial: for enumerator in tp.enumerators: self._generate_gen_const(True, enumerator) return # - funcname = '_cffi_e_%s_%s' % (prefix, name) + funcname = self._enum_funcname(prefix, name) self.export_symbols.append(funcname) prnt = self._prnt prnt('int %s(char *out_error)' % funcname) @@ -453,7 +458,7 @@ else: BType = self.ffi._typeof_locked("char[]")[0] BFunc = self.ffi._typeof_locked("int(*)(char*)")[0] - funcname = '_cffi_e_%s_%s' % (prefix, name) + funcname = self._enum_funcname(prefix, name) function = module.load_function(BFunc, funcname) p = self.ffi.new(BType, 256) if function(p) < 0: @@ -547,20 +552,29 @@ #include #include /* XXX for ssize_t on some platforms */ -#ifdef _WIN32 -# include -# define snprintf _snprintf -typedef __int8 int8_t; -typedef __int16 int16_t; -typedef __int32 int32_t; -typedef __int64 int64_t; -typedef unsigned __int8 uint8_t; -typedef unsigned __int16 uint16_t; -typedef unsigned __int32 uint32_t; -typedef unsigned __int64 uint64_t; -typedef SSIZE_T ssize_t; -typedef unsigned char _Bool; +/* this block of #ifs should be kept exactly identical between + c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py */ +#if defined(_MSC_VER) +# include /* for alloca() */ +# if _MSC_VER < 1600 /* MSVC < 2010 */ + typedef __int8 int8_t; + typedef __int16 int16_t; + typedef __int32 int32_t; + typedef __int64 int64_t; + typedef unsigned __int8 uint8_t; + typedef unsigned __int16 uint16_t; + typedef unsigned __int32 uint32_t; + typedef unsigned __int64 uint64_t; +# else +# include +# endif +# if _MSC_VER < 1800 /* MSVC < 2013 */ + typedef unsigned char _Bool; +# endif #else -# include +# include +# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) +# include +# endif #endif ''' diff --git a/lib_pypy/gdbm.py b/lib_pypy/gdbm.py --- a/lib_pypy/gdbm.py +++ b/lib_pypy/gdbm.py @@ -50,6 +50,8 @@ pass def _fromstr(key): + if isinstance(key, unicode): + key = key.encode("ascii") if not isinstance(key, str): raise TypeError("gdbm mappings have string indices only") return {'dptr': ffi.new("char[]", key), 'dsize': len(key)} @@ -71,8 +73,8 @@ def _raise_from_errno(self): if ffi.errno: - raise error(os.strerror(ffi.errno)) - raise error(lib.gdbm_strerror(lib.gdbm_errno)) + raise error(ffi.errno, os.strerror(ffi.errno)) + raise error(lib.gdbm_errno, lib.gdbm_strerror(lib.gdbm_errno)) def __len__(self): if self.size < 0: @@ -141,7 +143,7 @@ def _check_closed(self): if not self.ll_dbm: - raise error("GDBM object has already been closed") + raise error(0, "GDBM object has already been closed") __del__ = close @@ -159,7 +161,7 @@ elif flags[0] == 'n': iflags = lib.GDBM_NEWDB else: - raise error("First flag must be one of 'r', 'w', 'c' or 'n'") + raise error(0, "First flag must be one of 'r', 'w', 'c' or 'n'") for flag in flags[1:]: if flag == 'f': iflags |= lib.GDBM_FAST @@ -168,7 +170,7 @@ elif flag == 'u': iflags |= lib.GDBM_NOLOCK else: - raise error("Flag '%s' not supported" % flag) + raise error(0, "Flag '%s' not supported" % flag) return gdbm(filename, iflags, mode) open_flags = "rwcnfsu" diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -113,7 +113,7 @@ try: for name in modlist: __import__(name) - except (ImportError, CompilationError, py.test.skip.Exception), e: + except (ImportError, CompilationError, py.test.skip.Exception) as e: errcls = e.__class__.__name__ raise Exception( "The module %r is disabled\n" % (modname,) + diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -105,7 +105,7 @@ while True: try: w_key = space.next(w_iter) - except OperationError, e: + except OperationError as e: if not e.match(space, space.w_StopIteration): raise # re-raise other app-level exceptions break @@ -348,8 +348,12 @@ **objects** - Normal rules apply. Special methods are not honoured, except ``__init__``, - ``__del__`` and ``__iter__``. + Normal rules apply. The only special methods that are honoured are + ``__init__``, ``__del__``, ``__len__``, ``__getitem__``, ``__setitem__``, + ``__getslice__``, ``__setslice__``, and ``__iter__``. To handle slicing, + ``__getslice__`` and ``__setslice__`` must be used; using ``__getitem__`` and + ``__setitem__`` for slicing isn't supported. Additionally, using negative + indices for slicing is still not support, even when using ``__getslice__``. This layout makes the number of types to take care about quite limited. @@ -567,7 +571,7 @@ try: ... - except OperationError, e: + except OperationError as e: if not e.match(space, space.w_XxxError): raise ... diff --git a/pypy/doc/config/translation.log.txt b/pypy/doc/config/translation.log.txt --- a/pypy/doc/config/translation.log.txt +++ b/pypy/doc/config/translation.log.txt @@ -2,4 +2,4 @@ These must be enabled by setting the PYPYLOG environment variable. The exact set of features supported by PYPYLOG is described in -pypy/translation/c/src/debug_print.h. +rpython/translator/c/src/debug_print.h. diff --git a/pypy/doc/ctypes-implementation.rst b/pypy/doc/ctypes-implementation.rst --- a/pypy/doc/ctypes-implementation.rst +++ b/pypy/doc/ctypes-implementation.rst @@ -72,13 +72,11 @@ Here is a list of the limitations and missing features of the current implementation: -* ``ctypes.pythonapi`` lets you access the CPython C API emulation layer - of PyPy, at your own risks and without doing anything sensible about - the GIL. Since PyPy 2.3, these functions are also named with an extra - "Py", for example ``PyPyInt_FromLong()``. Basically, don't use this, - but it might more or less work in simple cases if you do. (Obviously, - assuming the PyObject pointers you get have any particular fields in - any particular order is just going to crash.) +* ``ctypes.pythonapi`` is missing. In previous versions, it was present + and redirected to the `cpyext` C API emulation layer, but our + implementation did not do anything sensible about the GIL and the + functions were named with an extra "Py", for example + ``PyPyInt_FromLong()``. It was removed for being unhelpful. * We copy Python strings instead of having pointers to raw buffers diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.rst +++ b/pypy/doc/extradoc.rst @@ -8,6 +8,9 @@ *Articles about PyPy published so far, most recent first:* (bibtex_ file) +* `A Way Forward in Parallelising Dynamic Languages`_, + R. Meier, A. Rigo + * `Runtime Feedback in a Meta-Tracing JIT for Efficient Dynamic Languages`_, C.F. Bolz, A. Cuni, M. Fijalkowski, M. Leuschel, S. Pedroni, A. Rigo @@ -71,6 +74,7 @@ .. _bibtex: https://bitbucket.org/pypy/extradoc/raw/tip/talk/bibtex.bib +.. _`A Way Forward in Parallelising Dynamic Languages`: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/icooolps2014/position-paper.pdf .. _`Runtime Feedback in a Meta-Tracing JIT for Efficient Dynamic Languages`: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/icooolps2011/jit-hints.pdf .. _`Allocation Removal by Partial Evaluation in a Tracing JIT`: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/pepm2011/bolz-allocation-removal.pdf .. _`Towards a Jitting VM for Prolog Execution`: http://www.stups.uni-duesseldorf.de/mediawiki/images/a/a7/Pub-BoLeSch2010.pdf @@ -93,6 +97,11 @@ Talks and Presentations ---------------------------------- +*This part is no longer updated.* The complete list is here__ (in +alphabetical order). + +.. __: https://bitbucket.org/pypy/extradoc/src/extradoc/talk/ + Talks in 2010 +++++++++++++ diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -465,9 +465,13 @@ This is documented (here__ and here__). It needs 4 GB of RAM to run "rpython targetpypystandalone" on top of PyPy, a bit more when running -on CPython. If you have less than 4 GB it will just swap forever (or -fail if you don't have enough swap). On 32-bit, divide the numbers by -two. +on top of CPython. If you have less than 4 GB free, it will just swap +forever (or fail if you don't have enough swap). And we mean *free:* +if the machine has 4 GB *in total,* then it will swap. + +On 32-bit, divide the numbers by two. (We didn't try recently, but in +the past it was possible to compile a 32-bit version on a 2 GB Linux +machine with nothing else running: no Gnome/KDE, for example.) .. __: http://pypy.org/download.html#building-from-source .. __: https://pypy.readthedocs.org/en/latest/getting-started-python.html#translating-the-pypy-python-interpreter diff --git a/pypy/doc/man/pypy.1.rst b/pypy/doc/man/pypy.1.rst --- a/pypy/doc/man/pypy.1.rst +++ b/pypy/doc/man/pypy.1.rst @@ -95,13 +95,12 @@ ``PYPYLOG`` If set to a non-empty value, enable logging, the format is: - *fname* + *fname* or *+fname* logging for profiling: includes all ``debug_start``/``debug_stop`` but not any nested ``debug_print``. *fname* can be ``-`` to log to *stderr*. - Note that using a : in fname is a bad idea, Windows - users, beware. + The *+fname* form can be used if there is a *:* in fname ``:``\ *fname* Full logging, including ``debug_print``. diff --git a/pypy/doc/objspace-proxies.rst b/pypy/doc/objspace-proxies.rst --- a/pypy/doc/objspace-proxies.rst +++ b/pypy/doc/objspace-proxies.rst @@ -26,6 +26,16 @@ Transparent Proxies ================================ +.. warning:: + + This is a feature that was tried experimentally long ago, and we + found no really good use cases. The basic functionality is still + there, but we don't recommend using it. Some of the examples below + might not work any more (e.g. you can't tproxy a list object any + more). The rest can be done by hacking in standard Python. If + anyone is interested in working on tproxy again, he is welcome, but + we don't regard this as an interesting extension. + PyPy's Transparent Proxies allow routing of operations on objects to a callable. Application level code can customize objects without interfering with the type system - ``type(proxied_list) is list`` holds true diff --git a/pypy/doc/release-pypy3-2.3.1.rst b/pypy/doc/release-pypy3-2.3.1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-pypy3-2.3.1.rst @@ -0,0 +1,69 @@ +===================== +PyPy3 2.3.1 - Fulcrum +===================== + +We're pleased to announce the first stable release of PyPy3. PyPy3 +targets Python 3 (3.2.5) compatibility. + +We would like to thank all of the people who donated_ to the `py3k proposal`_ +for supporting the work that went into this. + +You can download the PyPy3 2.3.1 release here: + + http://pypy.org/download.html#pypy3-2-3-1 + +Highlights +========== + +* The first stable release of PyPy3: support for Python 3! + +* The stdlib has been updated to Python 3.2.5 + +* Additional support for the u'unicode' syntax (`PEP 414`_) from Python 3.3 + +* Updates from the default branch, such as incremental GC and various JIT + improvements + +* Resolved some notable JIT performance regressions from PyPy2: + + - Re-enabled the previously disabled collection (list/dict/set) strategies + + - Resolved performance of iteration over range objects + + - Resolved handling of Python 3's exception __context__ unnecessarily forcing + frame object overhead + +.. _`PEP 414`: http://legacy.python.org/dev/peps/pep-0414/ + +What is PyPy? +============== + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7.6 or 3.2.5. It's fast due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows, +and OpenBSD, +as well as newer ARM hardware (ARMv6 or ARMv7, with VFPv3) running Linux. + +While we support 32 bit python on Windows, work on the native Windows 64 +bit python is still stalling, we would welcome a volunteer +to `handle that`_. + +.. _`handle that`: http://doc.pypy.org/en/latest/windows.html#what-is-missing-for-a-full-64-bit-translation + +How to use PyPy? +================= + +We suggest using PyPy from a `virtualenv`_. Once you have a virtualenv +installed, you can follow instructions from `pypy documentation`_ on how +to proceed. This document also covers other `installation schemes`_. + +.. _donated: http://morepypy.blogspot.com/2012/01/py3k-and-numpy-first-stage-thanks-to.html +.. _`py3k proposal`: http://pypy.org/py3donate.html +.. _`pypy documentation`: http://doc.pypy.org/en/latest/getting-started.html#installing-using-virtualenv +.. _`virtualenv`: http://www.virtualenv.org/en/latest/ +.. _`installation schemes`: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy + + +Cheers, +the PyPy team diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -92,9 +92,9 @@ We're busy fixing them as we find them; feel free to `report bugs`_. * It runs with an overhead as low as 20% on examples like "richards". - There are also other examples with higher overheads --up to 10x for - "translate.py"-- which we are still trying to understand. One suspect - is our partial GC implementation, see below. + There are also other examples with higher overheads --currently up to + 2x for "translate.py"-- which we are still trying to understand. + One suspect is our partial GC implementation, see below. * Currently limited to 1.5 GB of RAM (this is just a parameter in `core.h`__). Memory overflows are not correctly handled; they cause @@ -111,9 +111,8 @@ * The GC is new; although clearly inspired by PyPy's regular GC, it misses a number of optimizations for now. Programs allocating large - numbers of small objects that don't immediately die, as well as - programs that modify large lists or dicts, suffer from these missing - optimizations. + numbers of small objects that don't immediately die (surely a common + situation) suffer from these missing optimizations. * The GC has no support for destructors: the ``__del__`` method is never called (including on file objects, which won't be closed for you). diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,6 +3,50 @@ ======================= .. this is a revision shortly after release-2.3.x -.. startrev: 87fdc76bccb4 +.. startrev: ca9b7cf02cf4 +.. branch: fix-bytearray-complexity +Bytearray operations no longer copy the bytearray unnecessarily +Added support for ``__getitem__``, ``__setitem__``, ``__getslice__``, +``__setslice__``, and ``__len__`` to RPython + +.. branch: stringbuilder2-perf +Give the StringBuilder a more flexible internal structure, with a +chained list of strings instead of just one string. This make it +more efficient when building large strings, e.g. with cStringIO(). + +Also, use systematically jit.conditional_call() instead of regular +branches. This lets the JIT make more linear code, at the cost of +forcing a bit more data (to be passed as arguments to +conditional_calls). I would expect the net result to be a slight +slow-down on some simple benchmarks and a speed-up on bigger +programs. + +.. branch: ec-threadlocal +Change the executioncontext's lookup to be done by reading a thread- +local variable (which is implemented in C using '__thread' if +possible, and pthread_getspecific() otherwise). On Linux x86 and +x86-64, the JIT backend has a special optimization that lets it emit +directly a single MOV from a %gs- or %fs-based address. It seems +actually to give a good boost in performance. + +.. branch: fast-gil +A faster way to handle the GIL, particularly in JIT code. The GIL is +now a composite of two concepts: a global number (it's just set from +1 to 0 and back around CALL_RELEASE_GIL), and a real mutex. If there +are threads waiting to acquire the GIL, one of them is actively +checking the global number every 0.1 ms to 1 ms. Overall, JIT loops +full of external function calls now run a bit faster (if no thread was +started yet), or a *lot* faster (if threads were started already). + +.. branch: jit-get-errno +Optimize the errno handling in the JIT, notably around external +function calls. Linux-only. + +.. branch: disable_pythonapi +Remove non-functioning ctypes.pyhonapi and ctypes.PyDLL, document this +incompatibility with cpython. Recast sys.dllhandle to an int. + +.. branch: scalar-operations +Fix performance regression on ufunc(, ) in numpy. diff --git a/pypy/doc/whatsnew-pypy3-2.3.1.rst b/pypy/doc/whatsnew-pypy3-2.3.1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/whatsnew-pypy3-2.3.1.rst @@ -0,0 +1,6 @@ +========================= +What's new in PyPy3 2.3.1 +========================= + +.. this is a revision shortly after pypy3-release-2.3.x +.. startrev: 0137d8e6657d diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -132,19 +132,23 @@ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Download http://www.gzip.org/zlib/zlib-1.2.3.tar.gz and extract it in -the base directory. Then compile:: +the base directory. Then compile as a static library:: cd zlib-1.2.3 nmake -f win32\Makefile.msc - copy zlib1.dll \zlib.dll + copy zlib1.lib + copy zlib.h zconf.h The bz2 compression library ~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Get the same version of bz2 used by python and compile as a static library:: svn export http://svn.python.org/projects/external/bzip2-1.0.6 cd bzip2-1.0.6 nmake -f makefile.msc - copy bzip.dll \bzip.dll + copy libbz2.lib + copy bzlib.h + The sqlite3 database library ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -166,7 +170,8 @@ is actually enough for pypy). Then, copy the file ``win32\bin\release\libexpat.dll`` somewhere in -your PATH. +your PATH, ``win32\bin\release\libexpat.lib`` somewhere in LIB, and +both ``lib\expat.h`` and ``lib\expat_external.h`` somewhere in INCLUDE. The OpenSSL library ~~~~~~~~~~~~~~~~~~~ diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -30,8 +30,6 @@ if w_dict is not None: # for tests w_entry_point = space.getitem(w_dict, space.wrap('entry_point')) w_run_toplevel = space.getitem(w_dict, space.wrap('run_toplevel')) - w_call_finish_gateway = space.wrap(gateway.interp2app(call_finish)) - w_call_startup_gateway = space.wrap(gateway.interp2app(call_startup)) withjit = space.config.objspace.usemodules.pypyjit def entry_point(argv): @@ -53,7 +51,7 @@ argv = argv[:1] + argv[3:] try: try: - space.call_function(w_run_toplevel, w_call_startup_gateway) + space.startup() w_executable = space.wrap(argv[0]) w_argv = space.newlist([space.wrap(s) for s in argv[1:]]) w_exitcode = space.call_function(w_entry_point, w_executable, w_argv) @@ -69,7 +67,7 @@ return 1 finally: try: - space.call_function(w_run_toplevel, w_call_finish_gateway) + space.finish() except OperationError, e: debug("OperationError:") debug(" operror-type: " + e.w_type.getname(space)) @@ -184,11 +182,6 @@ 'pypy_thread_attach': pypy_thread_attach, 'pypy_setup_home': pypy_setup_home} -def call_finish(space): - space.finish() - -def call_startup(space): - space.startup() # _____ Define and setup target ___ diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -395,6 +395,7 @@ def startup(self): # To be called before using the space + self.threadlocals.enter_thread(self) # Initialize already imported builtin modules from pypy.interpreter.module import Module @@ -639,30 +640,33 @@ """NOT_RPYTHON: Abstract method that should put some minimal content into the w_builtins.""" - @jit.loop_invariant def getexecutioncontext(self): "Return what we consider to be the active execution context." # Important: the annotator must not see a prebuilt ExecutionContext: # you should not see frames while you translate # so we make sure that the threadlocals never *have* an # ExecutionContext during translation. - if self.config.translating and not we_are_translated(): - assert self.threadlocals.getvalue() is None, ( - "threadlocals got an ExecutionContext during translation!") - try: - return self._ec_during_translation - except AttributeError: - ec = self.createexecutioncontext() - self._ec_during_translation = ec + if not we_are_translated(): + if self.config.translating: + assert self.threadlocals.get_ec() is None, ( + "threadlocals got an ExecutionContext during translation!") + try: + return self._ec_during_translation + except AttributeError: + ec = self.createexecutioncontext() + self._ec_during_translation = ec + return ec + else: + ec = self.threadlocals.get_ec() + if ec is None: + self.threadlocals.enter_thread(self) + ec = self.threadlocals.get_ec() return ec - # normal case follows. The 'thread' module installs a real - # thread-local object in self.threadlocals, so this builds - # and caches a new ec in each thread. - ec = self.threadlocals.getvalue() - if ec is None: - ec = self.createexecutioncontext() - self.threadlocals.setvalue(ec) - return ec + else: + # translated case follows. self.threadlocals is either from + # 'pypy.interpreter.miscutils' or 'pypy.module.thread.threadlocals'. + # the result is assumed to be non-null: enter_thread() was called. + return self.threadlocals.get_ec() def _freeze_(self): return True @@ -963,6 +967,13 @@ """ return self.unpackiterable(w_iterable, expected_length) + def listview_no_unpack(self, w_iterable): + """ Same as listview() if cheap. If 'w_iterable' is something like + a generator, for example, then return None instead. + May return None anyway. + """ + return None + def listview_bytes(self, w_list): """ Return a list of unwrapped strings out of a list of strings. If the argument is not a list or does not contain only strings, return None. diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -496,6 +496,13 @@ """ +class UserDelCallback(object): + def __init__(self, w_obj, callback, descrname): + self.w_obj = w_obj + self.callback = callback + self.descrname = descrname + self.next = None + class UserDelAction(AsyncAction): """An action that invokes all pending app-level __del__() method. This is done as an action instead of immediately when the @@ -506,12 +513,18 @@ def __init__(self, space): AsyncAction.__init__(self, space) - self.dying_objects = [] + self.dying_objects = None + self.dying_objects_last = None self.finalizers_lock_count = 0 self.enabled_at_app_level = True def register_callback(self, w_obj, callback, descrname): - self.dying_objects.append((w_obj, callback, descrname)) + cb = UserDelCallback(w_obj, callback, descrname) + if self.dying_objects_last is None: + self.dying_objects = cb + else: + self.dying_objects_last.next = cb + self.dying_objects_last = cb self.fire() def perform(self, executioncontext, frame): @@ -525,13 +538,33 @@ # avoid too deep recursions of the kind of __del__ being called # while in the middle of another __del__ call. pending = self.dying_objects - self.dying_objects = [] + self.dying_objects = None + self.dying_objects_last = None space = self.space - for i in range(len(pending)): - w_obj, callback, descrname = pending[i] - pending[i] = (None, None, None) + while pending is not None: try: - callback(w_obj) + pending.callback(pending.w_obj) except OperationError, e: - e.write_unraisable(space, descrname, w_obj) + e.write_unraisable(space, pending.descrname, pending.w_obj) e.clear(space) # break up reference cycles + pending = pending.next + # + # Note: 'dying_objects' used to be just a regular list instead + # of a chained list. This was the cause of "leaks" if we have a + # program that constantly creates new objects with finalizers. + # Here is why: say 'dying_objects' is a long list, and there + # are n instances in it. Then we spend some time in this + # function, possibly triggering more GCs, but keeping the list + # of length n alive. Then the list is suddenly freed at the + # end, and we return to the user program. At this point the + # GC limit is still very high, because just before, there was + # a list of length n alive. Assume that the program continues + # to allocate a lot of instances with finalizers. The high GC + # limit means that it could allocate a lot of instances before + # reaching it --- possibly more than n. So the whole procedure + # repeats with higher and higher values of n. + # + # This does not occur in the current implementation because + # there is no list of length n: if n is large, then the GC + # will run several times while walking the list, but it will + # see lower and lower memory usage, with no lower bound of n. diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -895,7 +895,7 @@ "use unwrap_spec(...=WrappedDefault(default))" % ( self._code.identifier, name, defaultval)) defs_w.append(None) - else: + elif name != '__args__' and name != 'args_w': defs_w.append(space.wrap(defaultval)) if self._code._unwrap_spec: UNDEFINED = object() diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -61,6 +61,13 @@ return self.send_ex(w_arg) def send_ex(self, w_arg, operr=None): + pycode = self.pycode + if jit.we_are_jitted() and should_not_inline(pycode): + generatorentry_driver.jit_merge_point(gen=self, w_arg=w_arg, + operr=operr, pycode=pycode) + return self._send_ex(w_arg, operr) + + def _send_ex(self, w_arg, operr): space = self.space if self.running: raise OperationError(space.w_ValueError, @@ -72,8 +79,7 @@ if operr is None: operr = OperationError(space.w_StopIteration, space.w_None) raise operr - # XXX it's not clear that last_instr should be promoted at all - # but as long as it is necessary for call_assembler, let's do it early + last_instr = jit.promote(frame.last_instr) if last_instr == -1: if w_arg and not space.is_w(w_arg, space.w_None): @@ -214,3 +220,38 @@ "interrupting generator of ") break block = block.previous + + + +def get_printable_location_genentry(bytecode): + return '%s ' % (bytecode.get_repr(),) +generatorentry_driver = jit.JitDriver(greens=['pycode'], + reds=['gen', 'w_arg', 'operr'], + get_printable_location = + get_printable_location_genentry, + name='generatorentry') + +from pypy.tool.stdlib_opcode import HAVE_ARGUMENT, opmap +YIELD_VALUE = opmap['YIELD_VALUE'] + + at jit.elidable_promote() +def should_not_inline(pycode): + # Should not inline generators with more than one "yield", + # as an approximative fix (see issue #1782). There are cases + # where it slows things down; for example calls to a simple + # generator that just produces a few simple values with a few + # consecutive "yield" statements. It fixes the near-infinite + # slow-down in issue #1782, though... + count_yields = 0 + code = pycode.co_code + n = len(code) + i = 0 + while i < n: + c = code[i] + op = ord(c) + if op == YIELD_VALUE: + count_yields += 1 + i += 1 + if op >= HAVE_ARGUMENT: + i += 2 + return count_yields >= 2 diff --git a/pypy/interpreter/miscutils.py b/pypy/interpreter/miscutils.py --- a/pypy/interpreter/miscutils.py +++ b/pypy/interpreter/miscutils.py @@ -11,11 +11,14 @@ """ _value = None - def getvalue(self): + def get_ec(self): return self._value - def setvalue(self, value): - self._value = value + def enter_thread(self, space): + self._value = space.createexecutioncontext() + + def try_enter_thread(self, space): + return False def signals_enabled(self): return True diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -511,10 +511,10 @@ for i in range(min(len(varnames), self.getcode().co_nlocals)): name = varnames[i] w_value = self.locals_stack_w[i] - w_name = self.space.wrap(name) if w_value is not None: - self.space.setitem(self.w_locals, w_name, w_value) + self.space.setitem_str(self.w_locals, name, w_value) else: + w_name = self.space.wrap(name) try: self.space.delitem(self.w_locals, w_name) except OperationError as e: @@ -534,8 +534,7 @@ except ValueError: pass else: - w_name = self.space.wrap(name) - self.space.setitem(self.w_locals, w_name, w_value) + self.space.setitem_str(self.w_locals, name, w_value) @jit.unroll_safe @@ -548,13 +547,9 @@ new_fastlocals_w = [None] * numlocals for i in range(min(len(varnames), numlocals)): - w_name = self.space.wrap(varnames[i]) - try: - w_value = self.space.getitem(self.w_locals, w_name) - except OperationError, e: - if not e.match(self.space, self.space.w_KeyError): - raise - else: + name = varnames[i] + w_value = self.space.finditem_str(self.w_locals, name) + if w_value is not None: new_fastlocals_w[i] = w_value self.setfastscope(new_fastlocals_w) @@ -563,13 +558,8 @@ for i in range(len(freevarnames)): name = freevarnames[i] cell = self.cells[i] - w_name = self.space.wrap(name) - try: - w_value = self.space.getitem(self.w_locals, w_name) - except OperationError, e: - if not e.match(self.space, self.space.w_KeyError): - raise - else: + w_value = self.space.finditem_str(self.w_locals, name) + if w_value is not None: cell.set(w_value) @jit.unroll_safe diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -726,6 +726,22 @@ never_called py.test.raises(AssertionError, space.wrap, gateway.interp2app_temp(g)) + def test_unwrap_spec_default_applevel_bug2(self): + space = self.space + def g(space, w_x, w_y=None, __args__=None): + return w_x + w_g = space.wrap(gateway.interp2app_temp(g)) + w_42 = space.call_function(w_g, space.wrap(42)) + assert space.int_w(w_42) == 42 + py.test.raises(gateway.OperationError, space.call_function, w_g) + # + def g(space, w_x, w_y=None, args_w=None): + return w_x + w_g = space.wrap(gateway.interp2app_temp(g)) + w_42 = space.call_function(w_g, space.wrap(42)) + assert space.int_w(w_42) == 42 + py.test.raises(gateway.OperationError, space.call_function, w_g) + def test_interp2app_doc(self): space = self.space def f(space, w_x): diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/test_generator.py --- a/pypy/interpreter/test/test_generator.py +++ b/pypy/interpreter/test/test_generator.py @@ -278,4 +278,21 @@ def f(): yield 1 raise StopIteration - assert tuple(f()) == (1,) \ No newline at end of file + assert tuple(f()) == (1,) + + +def test_should_not_inline(space): + from pypy.interpreter.generator import should_not_inline + w_co = space.appexec([], '''(): + def g(x): + yield x + 5 + return g.func_code + ''') + assert should_not_inline(w_co) == False + w_co = space.appexec([], '''(): + def g(x): + yield x + 5 + yield x + 6 + return g.func_code + ''') + assert should_not_inline(w_co) == True diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py --- a/pypy/module/_cffi_backend/ccallback.py +++ b/pypy/module/_cffi_backend/ccallback.py @@ -183,9 +183,12 @@ misc._raw_memclear(ll_res, SIZE_OF_FFI_ARG) return # + must_leave = False ec = None + space = callback.space try: - ec = cerrno.get_errno_container(callback.space) + must_leave = space.threadlocals.try_enter_thread(space) + ec = cerrno.get_errno_container(space) cerrno.save_errno_into(ec, e) extra_line = '' try: @@ -206,5 +209,7 @@ except OSError: pass callback.write_error_return_value(ll_res) + if must_leave: + space.threadlocals.leave_thread(space) if ec is not None: cerrno.restore_errno_from(ec) diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py --- a/pypy/module/_cffi_backend/ctypefunc.py +++ b/pypy/module/_cffi_backend/ctypefunc.py @@ -4,7 +4,7 @@ import sys -from rpython.rlib import jit, clibffi, jit_libffi +from rpython.rlib import jit, clibffi, jit_libffi, rgc from rpython.rlib.jit_libffi import (CIF_DESCRIPTION, CIF_DESCRIPTION_P, FFI_TYPE, FFI_TYPE_P, FFI_TYPE_PP, SIZE_OF_FFI_ARG) from rpython.rlib.objectmodel import we_are_translated, instantiate @@ -63,6 +63,7 @@ CifDescrBuilder(fvarargs, self.ctitem).rawallocate(ctypefunc) return ctypefunc + @rgc.must_be_light_finalizer def __del__(self): if self.cif_descr: lltype.free(self.cif_descr, flavor='raw') @@ -156,8 +157,8 @@ data = rffi.ptradd(buffer, cif_descr.exchange_args[i]) flag = get_mustfree_flag(data) if flag == 1: - raw_string = rffi.cast(rffi.CCHARPP, data)[0] - lltype.free(raw_string, flavor='raw') + raw_cdata = rffi.cast(rffi.CCHARPP, data)[0] + lltype.free(raw_cdata, flavor='raw') lltype.free(buffer, flavor='raw') return w_res diff --git a/pypy/module/_io/interp_textio.py b/pypy/module/_io/interp_textio.py --- a/pypy/module/_io/interp_textio.py +++ b/pypy/module/_io/interp_textio.py @@ -458,6 +458,10 @@ self._check_init(space) return space.call_method(self.w_buffer, "seekable") + def isatty_w(self, space): + self._check_init(space) + return space.call_method(self.w_buffer, "isatty") + def fileno_w(self, space): self._check_init(space) return space.call_method(self.w_buffer, "fileno") @@ -1035,6 +1039,7 @@ readable = interp2app(W_TextIOWrapper.readable_w), writable = interp2app(W_TextIOWrapper.writable_w), seekable = interp2app(W_TextIOWrapper.seekable_w), + isatty = interp2app(W_TextIOWrapper.isatty_w), fileno = interp2app(W_TextIOWrapper.fileno_w), name = GetSetProperty(W_TextIOWrapper.name_get_w), buffer = interp_attrproperty_w("w_buffer", cls=W_TextIOWrapper), diff --git a/pypy/module/_io/test/test_textio.py b/pypy/module/_io/test/test_textio.py --- a/pypy/module/_io/test/test_textio.py +++ b/pypy/module/_io/test/test_textio.py @@ -25,6 +25,12 @@ t = _io.TextIOWrapper(b) assert t.readable() assert t.seekable() + # + class CustomFile(object): + def isatty(self): return 'YES' + readable = writable = seekable = lambda self: False + t = _io.TextIOWrapper(CustomFile()) + assert t.isatty() == 'YES' def test_default_implementations(self): import _io diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py --- a/pypy/module/_lsprof/interp_lsprof.py +++ b/pypy/module/_lsprof/interp_lsprof.py @@ -11,7 +11,7 @@ from rpython.rlib.rtimer import read_timestamp, _is_64_bit from rpython.rtyper.lltypesystem import rffi, lltype from rpython.translator.tool.cbuild import ExternalCompilationInfo -from rpython.conftest import cdir +from rpython.translator import cdir from rpython.rlib.rarithmetic import r_longlong import time, sys diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -508,7 +508,10 @@ argshapes = unpack_argshapes(space, w_args) resshape = unpack_resshape(space, w_res) ffi_args = [shape.get_basic_ffi_type() for shape in argshapes] - ffi_res = resshape.get_basic_ffi_type() + if resshape is not None: + ffi_res = resshape.get_basic_ffi_type() + else: + ffi_res = ffi_type_void try: ptr = RawFuncPtr('???', ffi_args, ffi_res, rffi.cast(rffi.VOIDP, addr), flags) diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -353,6 +353,11 @@ assert ptr[0] == rawcall.buffer ptr.free() + def test_raw_callable_returning_void(self): + import _rawffi + _rawffi.FuncPtr(0, [], None) + # assert did not crash + def test_short_addition(self): import _rawffi lib = _rawffi.CDLL(self.lib_name) diff --git a/pypy/module/_socket/__init__.py b/pypy/module/_socket/__init__.py --- a/pypy/module/_socket/__init__.py +++ b/pypy/module/_socket/__init__.py @@ -6,8 +6,8 @@ } interpleveldefs = { - 'SocketType': 'interp_socket.W_RSocket', - 'socket' : 'interp_socket.W_RSocket', + 'SocketType': 'interp_socket.W_Socket', + 'socket' : 'interp_socket.W_Socket', 'error' : 'interp_socket.get_error(space, "error")', 'herror' : 'interp_socket.get_error(space, "herror")', 'gaierror' : 'interp_socket.get_error(space, "gaierror")', diff --git a/pypy/module/_socket/interp_func.py b/pypy/module/_socket/interp_func.py --- a/pypy/module/_socket/interp_func.py +++ b/pypy/module/_socket/interp_func.py @@ -1,8 +1,12 @@ -from pypy.interpreter.gateway import unwrap_spec, WrappedDefault -from pypy.module._socket.interp_socket import converted_error, W_RSocket, addr_as_object, ipaddr_from_object from rpython.rlib import rsocket from rpython.rlib.rsocket import SocketError, INVALID_SOCKET + from pypy.interpreter.error import OperationError +from pypy.interpreter.gateway import unwrap_spec, WrappedDefault +from pypy.module._socket.interp_socket import ( + converted_error, W_Socket, addr_as_object, ipaddr_from_object +) + def gethostname(space): """gethostname() -> string @@ -136,10 +140,10 @@ The remaining arguments are the same as for socket(). """ try: - sock = rsocket.fromfd(fd, family, type, proto, W_RSocket) + sock = rsocket.fromfd(fd, family, type, proto) except SocketError, e: raise converted_error(space, e) - return space.wrap(sock) + return space.wrap(W_Socket(sock)) @unwrap_spec(family=int, type=int, proto=int) def socketpair(space, family=rsocket.socketpair_default_family, @@ -153,10 +157,13 @@ AF_UNIX if defined on the platform; otherwise, the default is AF_INET. """ try: - sock1, sock2 = rsocket.socketpair(family, type, proto, W_RSocket) + sock1, sock2 = rsocket.socketpair(family, type, proto) except SocketError, e: raise converted_error(space, e) - return space.newtuple([space.wrap(sock1), space.wrap(sock2)]) + return space.newtuple([ + space.wrap(W_Socket(sock1)), + space.wrap(W_Socket(sock2)) + ]) # The following 4 functions refuse all negative numbers, like CPython 2.6. # They could also check that the argument is not too large, but CPython 2.6 diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -1,14 +1,18 @@ +from rpython.rlib import rsocket +from rpython.rlib.rarithmetic import intmask +from rpython.rlib.rsocket import ( + RSocket, AF_INET, SOCK_STREAM, SocketError, SocketErrorWithErrno, + RSocketError +) +from rpython.rtyper.lltypesystem import lltype, rffi + +from pypy.interpreter import gateway from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.typedef import TypeDef, make_weakref_descr,\ - interp_attrproperty +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault -from rpython.rlib.rarithmetic import intmask -from rpython.rtyper.lltypesystem import lltype, rffi -from rpython.rlib import rsocket -from rpython.rlib.rsocket import RSocket, AF_INET, SOCK_STREAM -from rpython.rlib.rsocket import SocketError, SocketErrorWithErrno, RSocketError -from pypy.interpreter.error import OperationError, oefmt -from pypy.interpreter import gateway +from pypy.interpreter.typedef import ( + GetSetProperty, TypeDef, make_weakref_descr +) # XXX Hack to seperate rpython and pypy @@ -124,10 +128,18 @@ return addr -class W_RSocket(W_Root, RSocket): - def __del__(self): - self.clear_all_weakrefs() - RSocket.__del__(self) +class W_Socket(W_Root): + def __init__(self, sock): + self.sock = sock + + def get_type_w(self, space): + return space.wrap(self.sock.type) + + def get_proto_w(self, space): + return space.wrap(self.sock.proto) + + def get_family_w(self, space): + return space.wrap(self.sock.family) def accept_w(self, space): """accept() -> (socket object, address info) @@ -137,22 +149,22 @@ info is a pair (hostaddr, port). """ try: - fd, addr = self.accept() + fd, addr = self.sock.accept() sock = rsocket.make_socket( - fd, self.family, self.type, self.proto, W_RSocket) - return space.newtuple([space.wrap(sock), + fd, self.sock.family, self.sock.type, self.sock.proto) + return space.newtuple([space.wrap(W_Socket(sock)), addr_as_object(addr, sock.fd, space)]) - except SocketError, e: + except SocketError as e: raise converted_error(space, e) # convert an Address into an app-level object def addr_as_object(self, space, address): - return addr_as_object(address, self.fd, space) + return addr_as_object(address, self.sock.fd, space) # convert an app-level object into an Address # based on the current socket's family def addr_from_object(self, space, w_address): - return addr_from_object(self.family, space, w_address) + return addr_from_object(self.sock.family, space, w_address) def bind_w(self, space, w_addr): """bind(address) @@ -162,8 +174,8 @@ sockets the address is a tuple (ifname, proto [,pkttype [,hatype]]) """ try: - self.bind(self.addr_from_object(space, w_addr)) - except SocketError, e: + self.sock.bind(self.addr_from_object(space, w_addr)) + except SocketError as e: raise converted_error(space, e) def close_w(self, space): @@ -172,7 +184,7 @@ Close the socket. It cannot be used after this call. """ try: - self.close() + self.sock.close() except SocketError: # cpython doesn't return any errors on close pass @@ -184,8 +196,8 @@ is a pair (host, port). """ try: - self.connect(self.addr_from_object(space, w_addr)) - except SocketError, e: + self.sock.connect(self.addr_from_object(space, w_addr)) + except SocketError as e: raise converted_error(space, e) def connect_ex_w(self, space, w_addr): @@ -196,15 +208,16 @@ """ try: addr = self.addr_from_object(space, w_addr) - except SocketError, e: + except SocketError as e: raise converted_error(space, e) - error = self.connect_ex(addr) + error = self.sock.connect_ex(addr) return space.wrap(error) def dup_w(self, space): try: - return self.dup(W_RSocket) - except SocketError, e: + sock = self.sock.dup() + return W_Socket(sock) + except SocketError as e: raise converted_error(space, e) def fileno_w(self, space): @@ -212,7 +225,7 @@ Return the integer file descriptor of the socket. """ - return space.wrap(intmask(self.fd)) + return space.wrap(intmask(self.sock.fd)) def getpeername_w(self, space): """getpeername() -> address info @@ -221,9 +234,9 @@ info is a pair (hostaddr, port). """ try: - addr = self.getpeername() - return addr_as_object(addr, self.fd, space) - except SocketError, e: + addr = self.sock.getpeername() + return addr_as_object(addr, self.sock.fd, space) + except SocketError as e: raise converted_error(space, e) def getsockname_w(self, space): From noreply at buildbot.pypy.org Fri Jul 11 15:22:40 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 11 Jul 2014 15:22:40 +0200 (CEST) Subject: [pypy-commit] pypy ufuncapi: wip - add python-level test Message-ID: <20140711132240.9B7431D2335@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ufuncapi Changeset: r72419:3d89d854cf6d Date: 2014-07-11 22:39 +1000 http://bitbucket.org/pypy/pypy/changeset/3d89d854cf6d/ Log: wip - add python-level test diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -212,8 +212,14 @@ res = api._PyArray_SimpleNewFromData(0, ptr_s, 15, ptr_a) assert res.get_scalar_value().real == 3. assert res.get_scalar_value().imag == 4. + + def test_Ufunc_FromFuncAndDataAndSignature(self. space, api): + ufunc = api._PyUFunc_FromFuncAndDataAndSignature(space, funcs, data, + types, ntypes, nin, nout, identity, doc, check_return, + signature) -class AppTestCNumber(AppTestCpythonExtensionBase): + +class AppTestNDArray(AppTestCpythonExtensionBase): def test_ndarray_object_c(self): mod = self.import_extension('foo', [ ("test_simplenew", "METH_NOARGS", From noreply at buildbot.pypy.org Fri Jul 11 15:22:41 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 11 Jul 2014 15:22:41 +0200 (CEST) Subject: [pypy-commit] pypy default: Backed out changeset: f20ac16753b6 - new hash function fails tests Message-ID: <20140711132241.EB5111D2335@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r72420:8344e652a12e Date: 2014-07-11 22:42 +1000 http://bitbucket.org/pypy/pypy/changeset/8344e652a12e/ Log: Backed out changeset: f20ac16753b6 - new hash function fails tests diff --git a/pypy/module/micronumpy/descriptor.py b/pypy/module/micronumpy/descriptor.py --- a/pypy/module/micronumpy/descriptor.py +++ b/pypy/module/micronumpy/descriptor.py @@ -6,7 +6,7 @@ from pypy.interpreter.typedef import (TypeDef, GetSetProperty, interp_attrproperty, interp_attrproperty_w) from rpython.rlib import jit -from rpython.rlib.objectmodel import specialize, compute_hash +from rpython.rlib.objectmodel import specialize from rpython.rlib.rarithmetic import r_longlong, r_ulonglong from pypy.module.micronumpy import types, boxes, base, support, constants as NPY from pypy.module.micronumpy.appbridge import get_appbridge_cache @@ -254,38 +254,8 @@ def descr_ne(self, space, w_other): return space.wrap(not self.eq(space, w_other)) - def _compute_hash(self, space, x): - from rpython.rlib.rarithmetic import intmask - if self.fields is None and self.subdtype is None: - endian = self.byteorder - if endian == NPY.NATIVE: - endian = NPY.NATBYTE - flags = 0 - y = 0x345678 - y = intmask((1000003 * y) ^ ord(self.kind[0])) - y = intmask((1000003 * y) ^ ord(endian[0])) - y = intmask((1000003 * y) ^ flags) - y = intmask((1000003 * y) ^ self.elsize) - if self.is_flexible(): - y = intmask((1000003 * y) ^ self.alignment) - return intmask((1000003 * x) ^ y) - if self.fields is not None: - for name, (offset, subdtype) in self.fields.iteritems(): - assert isinstance(subdtype, W_Dtype) - y = intmask(1000003 * (0x345678 ^ compute_hash(name))) - y = intmask(1000003 * (y ^ compute_hash(offset))) - y = intmask(1000003 * (y ^ subdtype._compute_hash(space, - 0x345678))) - x = intmask(x ^ y) - if self.subdtype is not None: - for s in self.shape: - x = intmask((1000003 * x) ^ compute_hash(s)) - x = self.base._compute_hash(space, x) - return x - def descr_hash(self, space): - return space.wrap(self._compute_hash(space, 0x345678)) - + return space.hash(self.descr_reduce(space)) def descr_str(self, space): if self.fields: diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -368,30 +368,15 @@ d5 = numpy.dtype([('f0', 'i4'), ('f1', d2)]) d6 = numpy.dtype([('f0', 'i4'), ('f1', d3)]) import sys - assert hash(d1) == hash(d2) - assert hash(d1) != hash(d3) - assert hash(d4) == hash(d5) - assert hash(d4) != hash(d6) + if '__pypy__' not in sys.builtin_module_names: + assert hash(d1) == hash(d2) + assert hash(d1) != hash(d3) + assert hash(d4) == hash(d5) + assert hash(d4) != hash(d6) + else: + for d in [d1, d2, d3, d4, d5, d6]: + raises(TypeError, hash, d) - def test_record_hash(self): - from numpy import dtype - # make sure the fields hash return different value - # for different order of field in a structure - - # swap names - t1 = dtype([('x', ' Author: mattip Branch: Changeset: r72421:746c98100008 Date: 2014-07-11 23:09 +1000 http://bitbucket.org/pypy/pypy/changeset/746c98100008/ Log: revert scalar __iter__ since it causes test failures (test_ndarray) diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -153,10 +153,12 @@ raise OperationError(space.w_IndexError, space.wrap( "invalid index to scalar variable")) + ''' def descr_iter(self, space): # Making numpy scalar non-iterable with a valid __getitem__ method raise oefmt(space.w_TypeError, "'%T' object is not iterable", self) + ''' def descr_str(self, space): return space.wrap(self.get_dtype(space).itemtype.str_format(self)) @@ -560,7 +562,7 @@ __new__ = interp2app(W_GenericBox.descr__new__.im_func), __getitem__ = interp2app(W_GenericBox.descr_getitem), - __iter__ = interp2app(W_GenericBox.descr_iter), + #__iter__ = interp2app(W_GenericBox.descr_iter), __str__ = interp2app(W_GenericBox.descr_str), __repr__ = interp2app(W_GenericBox.descr_str), __format__ = interp2app(W_GenericBox.descr_format), From noreply at buildbot.pypy.org Fri Jul 11 16:23:09 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 11 Jul 2014 16:23:09 +0200 (CEST) Subject: [pypy-commit] pypy default: Add an explicit assert. Better than crashing obscurely in examples Message-ID: <20140711142309.823E21C0231@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72422:c3d240baabb3 Date: 2014-07-11 16:22 +0200 http://bitbucket.org/pypy/pypy/changeset/c3d240baabb3/ Log: Add an explicit assert. Better than crashing obscurely in examples that embed the PyPy interpreter but fail to call space.startup(). diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -665,8 +665,11 @@ else: # translated case follows. self.threadlocals is either from # 'pypy.interpreter.miscutils' or 'pypy.module.thread.threadlocals'. - # the result is assumed to be non-null: enter_thread() was called. - return self.threadlocals.get_ec() + # the result is assumed to be non-null: enter_thread() was called + # by space.startup(). + ec = self.threadlocals.get_ec() + assert ec is not None + return ec def _freeze_(self): return True From noreply at buildbot.pypy.org Fri Jul 11 16:32:40 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 11 Jul 2014 16:32:40 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: Add these two talks as html with short urls Message-ID: <20140711143240.0838E1C0231@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r516:bb5ad924f6f6 Date: 2014-07-11 16:32 +0200 http://bitbucket.org/pypy/pypy.org/changeset/bb5ad924f6f6/ Log: Add these two talks as html with short urls diff --git a/talk/ep2014-status.html b/talk/ep2014-status.html new file mode 100644 --- /dev/null +++ b/talk/ep2014-status.html @@ -0,0 +1,373 @@ + + + + + + +PyPy status talk (a.k.a.: no no, PyPy is not dead) + + + +
        +

        PyPy status talk (a.k.a.: no no, PyPy is not dead)

        + +
        +

        Abstract

        +

        The current status of PyPy, with a particular focus on what happened in +the last two years, since the last EuroPython PyPy talk. We will give a +brief overview of the current speed and the on-going development efforts +on the JIT, the GC, NumPy, Python 3 compatibility, CFFI, STM...

        +
        +
        +

        Description

        +

        In this talk we will present the current status of PyPy, with a +particular focus on what happened in the last two years, since the last +EuroPython PyPy talk. We will give an overview of the current speed and +the on-going development efforts, including but not limited to:

        +
          +
        • the status of the Just-in-Time Compiler (JIT) and PyPy performance in +general;
        • +
        • the improvements on the Garbage Collector (GC);
        • +
        • the status of the NumPy and Python 3 compatibility subprojects;
        • +
        • CFFI, which aims to be a general C interface mechanism for both +CPython and PyPy;
        • +
        • a quick overview of the STM (Software Transactional Memory) research +project, which aims to solve the GIL problem.
        • +
        +

        This is the "general PyPy status talk" that we give every year at +EuroPython (except last year; hence the "no no, PyPy is not dead" part +of the title of this talk).

        +
        +
        + + diff --git a/talk/ep2014-stm.html b/talk/ep2014-stm.html new file mode 100644 --- /dev/null +++ b/talk/ep2014-stm.html @@ -0,0 +1,379 @@ + + + + + + +Using All These Cores: Transactional Memory in PyPy + + + +
        +

        Using All These Cores: Transactional Memory in PyPy

        + +
        +

        Abstract

        +

        PyPy, the Python implementation written in Python, experimentally +supports Transactional Memory (TM). The strength of TM is to enable a +novel use of multithreading, inheritently safe, and not limited to +special use cases like other approaches. This talk will focus on how it +works under the hood.

        +
        +
        +

        Description

        +

        PyPy is a fast alternative Python implementation. Software +Transactional Memory (STM) is a current academic research topic. Put +the two together --brew for a couple of years-- and we get a version of +PyPy that runs on multiple cores, without the infamous Global +Interpreter Lock (GIL).

        +

        The current research is based on a recent new insight that promises to +give really good performance. The speed of STM is generally measured by +two factors: the ability to scale with the number of CPUs, and the +amount of overhead when compared with other approaches in a single CPU +(in this case, with the regular PyPy with the GIL). Scaling is not +really a problem here, but single-CPU performance is --or used to be. +This new approach gives a single-threaded overhead that should be very +low, maybe 20%, which would definitely be news for STM systems. Right +now (February 2014) we are still implementing it, so we cannot give +final numbers yet, but early results on a small interpreter for a custom +language are around 15%. This looks like a deal-changer for STM.

        +

        In the talk, I will describe our progress, hopefully along with real +numbers and demos. I will then dive under the hood of PyPy to give an +idea about how it works. I will conclude with a picture of how the +future of multi-threaded programming might looks like, for high-level +languages like Python. I will also mention CPython: how hard (or not) +it would be to change the CPython source code to use the same approach.

        +
        +
        + + From noreply at buildbot.pypy.org Fri Jul 11 18:25:20 2014 From: noreply at buildbot.pypy.org (timfel) Date: Fri, 11 Jul 2014 18:25:20 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: extra exception for SenderManipulation, also don't throw if sender unchanged, also make interp.trace immutable Message-ID: <20140711162520.2EFD51C021D@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: storage Changeset: r895:9cb31b513e7d Date: 2014-07-11 18:25 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/9cb31b513e7d/ Log: extra exception for SenderManipulation, also don't throw if sender unchanged, also make interp.trace immutable diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -25,7 +25,7 @@ class Interpreter(object): _immutable_fields_ = ["space", "image", "image_name", "interrupt_counter_size", - "startup_time", "evented", "interrupts"] + "startup_time", "trace", "evented", "interrupts"] jit_driver = jit.JitDriver( greens=['pc', 'self', 'method'], @@ -57,6 +57,7 @@ self.interrupt_check_counter = self.interrupt_counter_size self.next_wakeup_tick = 0 self.trace = trace + self.current_stack_depth = 0 self.trace_proxy = False def loop(self, w_active_context): @@ -69,7 +70,7 @@ raise Exception("loop_bytecodes left without raising...") except StackOverflow, e: if self.trace: - print "====== StackOverflow, contexts forced to heap at: %s" % e.s_new_context.short_str() + print "====== StackOverflow, contexts forced to heap at: \n%s" % e.s_new_context.print_stack() s_new_context = e.s_new_context except Return, nlr: assert nlr.s_target_context or nlr.is_local @@ -80,6 +81,10 @@ s_new_context._activate_unwind_context(self) s_new_context = s_sender s_new_context.push(nlr.value) + except SenderManipulation, e: + if self.trace: + print "====== SenderManipulation out of process, all contexts forced to heap!!!\n%s" % e.s_new_context.print_stack() + s_new_context = e.s_new_context except ProcessSwitch, p: assert not self.space.suppress_process_switch[0], "ProcessSwitch should be disabled..." if self.trace: @@ -223,7 +228,7 @@ return s_frame def padding(self, symbol=' '): - return symbol + return symbol * self.current_stack_depth class ReturnFromTopLevel(Exception): _attrs_ = ["object"] @@ -244,6 +249,10 @@ def __init__(self, s_new_context): self.s_new_context = s_new_context +class SenderManipulation(ContextSwitchException): + """This forces frames to the heap down to where the sender was + manipulated.""" + class StackOverflow(ContextSwitchException): """This causes the current jit-loop to be left. This is an experimental mechanism to avoid stack-overflow errors @@ -570,6 +579,7 @@ # ###################################################################### if interp.trace: + interp.current_stack_depth += 1 print interp.padding() + s_frame.short_str() return interp.stack_frame(s_frame, self) @@ -723,12 +733,18 @@ try: self.w_receiver().store(self.space, third, self.top()) except error.SenderChainManipulation, e: - raise StackOverflow(self) + # TODO: shouldn't need to throw, simply mark the + # receiver as dirty and handle it when we return out + # of that context + raise SenderManipulation(self) elif opType == 6: try: self.w_receiver().store(self.space, third, self.pop()) except error.SenderChainManipulation, e: - raise StackOverflow(self) + # TODO: shouldn't need to throw, simply mark the + # receiver as dirty and handle it when we return out + # of that context + raise SenderManipulation(self) elif opType == 7: w_association = self.w_method().getliteral(third) association = wrapper.AssociationWrapper(self.space, w_association) diff --git a/spyvm/plugins/vmdebugging.py b/spyvm/plugins/vmdebugging.py --- a/spyvm/plugins/vmdebugging.py +++ b/spyvm/plugins/vmdebugging.py @@ -8,15 +8,15 @@ def stop_ui_process(): DebuggingPlugin.userdata['stop_ui'] = True - at DebuggingPlugin.expose_primitive(unwrap_spec=[object]) -def trace(interp, s_frame, w_rcvr): - interp.trace = True - return w_rcvr +# @DebuggingPlugin.expose_primitive(unwrap_spec=[object]) +# def trace(interp, s_frame, w_rcvr): +# interp.trace = True +# return w_rcvr - at DebuggingPlugin.expose_primitive(unwrap_spec=[object]) -def untrace(interp, s_frame, w_rcvr): - interp.trace = False - return w_rcvr +# @DebuggingPlugin.expose_primitive(unwrap_spec=[object]) +# def untrace(interp, s_frame, w_rcvr): +# interp.trace = False +# return w_rcvr @DebuggingPlugin.expose_primitive(unwrap_spec=[object]) def trace_proxy(interp, s_frame, w_rcvr): diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -693,9 +693,11 @@ # === Sender === def store_s_sender(self, s_sender, raise_error=True): - self._s_sender = s_sender - if raise_error: - raise error.SenderChainManipulation(self) + if self._s_sender is not s_sender: + # it happens + self._s_sender = s_sender + if raise_error: + raise error.SenderChainManipulation(self) def w_sender(self): sender = self.s_sender() From noreply at buildbot.pypy.org Sat Jul 12 13:39:36 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 12 Jul 2014 13:39:36 +0200 (CEST) Subject: [pypy-commit] pypy default: skip failing test pending implementation Message-ID: <20140712113936.71C991C0299@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r72423:22b576e6729a Date: 2014-07-12 21:38 +1000 http://bitbucket.org/pypy/pypy/changeset/22b576e6729a/ Log: skip failing test pending implementation diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -292,6 +292,7 @@ assert np.isnan(b/a) def test_scalar_iter(self): + skip('not implemented yet') from numpypy import int8, int16, int32, int64, float32, float64 for t in int8, int16, int32, int64, float32, float64: try: From noreply at buildbot.pypy.org Sat Jul 12 20:29:09 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 12 Jul 2014 20:29:09 +0200 (CEST) Subject: [pypy-commit] pypy default: We include the oplist in the abort hook Message-ID: <20140712182909.8543F1D359B@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r72424:0d575730e10f Date: 2014-07-12 11:26 -0700 http://bitbucket.org/pypy/pypy/changeset/0d575730e10f/ Log: We include the oplist in the abort hook diff --git a/pypy/doc/jit-hooks.rst b/pypy/doc/jit-hooks.rst --- a/pypy/doc/jit-hooks.rst +++ b/pypy/doc/jit-hooks.rst @@ -34,7 +34,7 @@ aborted due to some reason. The hook will be invoked with the siagnture: - ``hook(jitdriver_name, greenkey, reason)`` + ``hook(jitdriver_name, greenkey, reason, oplist)`` Reason is a string, the meaning of other arguments is the same as attributes on JitLoopInfo object From noreply at buildbot.pypy.org Sat Jul 12 20:29:10 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 12 Jul 2014 20:29:10 +0200 (CEST) Subject: [pypy-commit] pypy default: merged upstream Message-ID: <20140712182910.CD5781D359C@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r72425:6b33956820cd Date: 2014-07-12 11:28 -0700 http://bitbucket.org/pypy/pypy/changeset/6b33956820cd/ Log: merged upstream diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -292,6 +292,7 @@ assert np.isnan(b/a) def test_scalar_iter(self): + skip('not implemented yet') from numpypy import int8, int16, int32, int64, float32, float64 for t in int8, int16, int32, int64, float32, float64: try: From noreply at buildbot.pypy.org Sat Jul 12 23:48:49 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 12 Jul 2014 23:48:49 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default (4841c3bead14) Message-ID: <20140712214849.66EBA1C0231@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r72426:79c3c10f7f4d Date: 2014-07-12 14:35 -0700 http://bitbucket.org/pypy/pypy/changeset/79c3c10f7f4d/ Log: merge default (4841c3bead14) diff too long, truncating to 2000 out of 7172 lines diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -10,3 +10,7 @@ 20e51c4389ed4469b66bb9d6289ce0ecfc82c4b9 release-2.3.0 0000000000000000000000000000000000000000 release-2.3.0 394146e9bb673514c61f0150ab2013ccf78e8de7 release-2.3 +32f35069a16d819b58c1b6efb17c44e3e53397b2 release-2.2=3.1 +32f35069a16d819b58c1b6efb17c44e3e53397b2 release-2.3.1 +32f35069a16d819b58c1b6efb17c44e3e53397b2 release-2.2=3.1 +0000000000000000000000000000000000000000 release-2.2=3.1 diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -218,6 +218,8 @@ if restype is None: import ctypes restype = ctypes.c_int + if self._argtypes_ is None: + self._argtypes_ = [] self._ptr = self._getfuncptr_fromaddress(self._argtypes_, restype) self._check_argtypes_for_fastpath() return diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_pypy_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -13,7 +13,15 @@ k1 = k1.lstrip('0x').rstrip('L') k2 = hex(binascii.crc32(key[1::2]) & 0xffffffff) k2 = k2.lstrip('0').rstrip('L') - output_dir = tempfile.gettempdir() + os.path.sep + 'tmp_%s%s' %(k1, k2) + try: + username = os.environ['USER'] #linux, et al + except KeyError: + try: + username = os.environ['USERNAME'] #windows + except KeyError: + username = os.getuid() + output_dir = tempfile.gettempdir() + os.path.sep + 'tmp_%s_%s%s' % ( + username, k1, k2) if not os.path.exists(output_dir): os.mkdir(output_dir) return output_dir diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -443,6 +443,10 @@ for enumname, enumval in zip(tp.enumerators, tp.enumvalues): if enumname not in library.__dict__: library.__dict__[enumname] = enumval + for key, val in ffi._parser._int_constants.items(): + if key not in library.__dict__: + library.__dict__[key] = val + copied_enums.append(True) if name in library.__dict__: return diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -24,6 +24,7 @@ _r_partial_array = re.compile(r"\[\s*\.\.\.\s*\]") _r_words = re.compile(r"\w+|\S") _parser_cache = None +_r_int_literal = re.compile(r"^0?x?[0-9a-f]+u?l?$", re.IGNORECASE) def _get_parser(): global _parser_cache @@ -99,6 +100,7 @@ self._structnode2type = weakref.WeakKeyDictionary() self._override = False self._packed = False + self._int_constants = {} def _parse(self, csource): csource, macros = _preprocess(csource) @@ -128,9 +130,10 @@ finally: if lock is not None: lock.release() - return ast, macros + # csource will be used to find buggy source text + return ast, macros, csource - def convert_pycparser_error(self, e, csource): + def _convert_pycparser_error(self, e, csource): # xxx look for ":NUM:" at the start of str(e) and try to interpret # it as a line number line = None @@ -142,6 +145,12 @@ csourcelines = csource.splitlines() if 1 <= linenum <= len(csourcelines): line = csourcelines[linenum-1] + return line + + def convert_pycparser_error(self, e, csource): + line = self._convert_pycparser_error(e, csource) + + msg = str(e) if line: msg = 'cannot parse "%s"\n%s' % (line.strip(), msg) else: @@ -160,14 +169,9 @@ self._packed = prev_packed def _internal_parse(self, csource): - ast, macros = self._parse(csource) + ast, macros, csource = self._parse(csource) # add the macros - for key, value in macros.items(): - value = value.strip() - if value != '...': - raise api.CDefError('only supports the syntax "#define ' - '%s ..." for now (literally)' % key) - self._declare('macro ' + key, value) + self._process_macros(macros) # find the first "__dotdotdot__" and use that as a separator # between the repeated typedefs and the real csource iterator = iter(ast.ext) @@ -175,27 +179,61 @@ if decl.name == '__dotdotdot__': break # - for decl in iterator: - if isinstance(decl, pycparser.c_ast.Decl): - self._parse_decl(decl) - elif isinstance(decl, pycparser.c_ast.Typedef): - if not decl.name: - raise api.CDefError("typedef does not declare any name", - decl) - if (isinstance(decl.type.type, pycparser.c_ast.IdentifierType) - and decl.type.type.names == ['__dotdotdot__']): - realtype = model.unknown_type(decl.name) - elif (isinstance(decl.type, pycparser.c_ast.PtrDecl) and - isinstance(decl.type.type, pycparser.c_ast.TypeDecl) and - isinstance(decl.type.type.type, - pycparser.c_ast.IdentifierType) and - decl.type.type.type.names == ['__dotdotdot__']): - realtype = model.unknown_ptr_type(decl.name) + try: + for decl in iterator: + if isinstance(decl, pycparser.c_ast.Decl): + self._parse_decl(decl) + elif isinstance(decl, pycparser.c_ast.Typedef): + if not decl.name: + raise api.CDefError("typedef does not declare any name", + decl) + if (isinstance(decl.type.type, pycparser.c_ast.IdentifierType) + and decl.type.type.names == ['__dotdotdot__']): + realtype = model.unknown_type(decl.name) + elif (isinstance(decl.type, pycparser.c_ast.PtrDecl) and + isinstance(decl.type.type, pycparser.c_ast.TypeDecl) and + isinstance(decl.type.type.type, + pycparser.c_ast.IdentifierType) and + decl.type.type.type.names == ['__dotdotdot__']): + realtype = model.unknown_ptr_type(decl.name) + else: + realtype = self._get_type(decl.type, name=decl.name) + self._declare('typedef ' + decl.name, realtype) else: - realtype = self._get_type(decl.type, name=decl.name) - self._declare('typedef ' + decl.name, realtype) + raise api.CDefError("unrecognized construct", decl) + except api.FFIError as e: + msg = self._convert_pycparser_error(e, csource) + if msg: + e.args = (e.args[0] + "\n *** Err: %s" % msg,) + raise + + def _add_constants(self, key, val): + if key in self._int_constants: + raise api.FFIError( + "multiple declarations of constant: %s" % (key,)) + self._int_constants[key] = val + + def _process_macros(self, macros): + for key, value in macros.items(): + value = value.strip() + match = _r_int_literal.search(value) + if match is not None: + int_str = match.group(0).lower().rstrip("ul") + + # "010" is not valid oct in py3 + if (int_str.startswith("0") and + int_str != "0" and + not int_str.startswith("0x")): + int_str = "0o" + int_str[1:] + + pyvalue = int(int_str, 0) + self._add_constants(key, pyvalue) + elif value == '...': + self._declare('macro ' + key, value) else: - raise api.CDefError("unrecognized construct", decl) + raise api.CDefError('only supports the syntax "#define ' + '%s ..." (literally) or "#define ' + '%s 0x1FF" for now' % (key, key)) def _parse_decl(self, decl): node = decl.type @@ -227,7 +265,7 @@ self._declare('variable ' + decl.name, tp) def parse_type(self, cdecl): - ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl) + ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl)[:2] assert not macros exprnode = ast.ext[-1].type.args.params[0] if isinstance(exprnode, pycparser.c_ast.ID): @@ -306,7 +344,8 @@ if ident == 'void': return model.void_type if ident == '__dotdotdot__': - raise api.FFIError('bad usage of "..."') + raise api.FFIError(':%d: bad usage of "..."' % + typenode.coord.line) return resolve_common_type(ident) # if isinstance(type, pycparser.c_ast.Struct): @@ -333,7 +372,8 @@ return self._get_struct_union_enum_type('union', typenode, name, nested=True) # - raise api.FFIError("bad or unsupported type declaration") + raise api.FFIError(":%d: bad or unsupported type declaration" % + typenode.coord.line) def _parse_function_type(self, typenode, funcname=None): params = list(getattr(typenode.args, 'params', [])) @@ -499,6 +539,10 @@ if (isinstance(exprnode, pycparser.c_ast.UnaryOp) and exprnode.op == '-'): return -self._parse_constant(exprnode.expr) + # load previously defined int constant + if (isinstance(exprnode, pycparser.c_ast.ID) and + exprnode.name in self._int_constants): + return self._int_constants[exprnode.name] # if partial_length_ok: if (isinstance(exprnode, pycparser.c_ast.ID) and @@ -506,8 +550,8 @@ self._partial_length = True return '...' # - raise api.FFIError("unsupported expression: expected a " - "simple numeric constant") + raise api.FFIError(":%d: unsupported expression: expected a " + "simple numeric constant" % exprnode.coord.line) def _build_enum_type(self, explicit_name, decls): if decls is not None: @@ -522,6 +566,7 @@ if enum.value is not None: nextenumvalue = self._parse_constant(enum.value) enumvalues.append(nextenumvalue) + self._add_constants(enum.name, nextenumvalue) nextenumvalue += 1 enumvalues = tuple(enumvalues) tp = model.EnumType(explicit_name, enumerators, enumvalues) @@ -535,3 +580,5 @@ kind = name.split(' ', 1)[0] if kind in ('typedef', 'struct', 'union', 'enum'): self._declare(name, tp) + for k, v in other._int_constants.items(): + self._add_constants(k, v) diff --git a/lib_pypy/cffi/ffiplatform.py b/lib_pypy/cffi/ffiplatform.py --- a/lib_pypy/cffi/ffiplatform.py +++ b/lib_pypy/cffi/ffiplatform.py @@ -38,6 +38,7 @@ import distutils.errors # dist = Distribution({'ext_modules': [ext]}) + dist.parse_config_files() options = dist.get_option_dict('build_ext') options['force'] = ('ffiplatform', True) options['build_lib'] = ('ffiplatform', tmpdir) diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -89,43 +89,54 @@ # by generate_cpy_function_method(). prnt('static PyMethodDef _cffi_methods[] = {') self._generate("method") - prnt(' {"_cffi_setup", _cffi_setup, METH_VARARGS},') - prnt(' {NULL, NULL} /* Sentinel */') + prnt(' {"_cffi_setup", _cffi_setup, METH_VARARGS, NULL},') + prnt(' {NULL, NULL, 0, NULL} /* Sentinel */') prnt('};') prnt() # # standard init. modname = self.verifier.get_module_name() - if sys.version_info >= (3,): - prnt('static struct PyModuleDef _cffi_module_def = {') - prnt(' PyModuleDef_HEAD_INIT,') - prnt(' "%s",' % modname) - prnt(' NULL,') - prnt(' -1,') - prnt(' _cffi_methods,') - prnt(' NULL, NULL, NULL, NULL') - prnt('};') - prnt() - initname = 'PyInit_%s' % modname - createmod = 'PyModule_Create(&_cffi_module_def)' - errorcase = 'return NULL' - finalreturn = 'return lib' - else: - initname = 'init%s' % modname - createmod = 'Py_InitModule("%s", _cffi_methods)' % modname - errorcase = 'return' - finalreturn = 'return' + constants = self._chained_list_constants[False] + prnt('#if PY_MAJOR_VERSION >= 3') + prnt() + prnt('static struct PyModuleDef _cffi_module_def = {') + prnt(' PyModuleDef_HEAD_INIT,') + prnt(' "%s",' % modname) + prnt(' NULL,') + prnt(' -1,') + prnt(' _cffi_methods,') + prnt(' NULL, NULL, NULL, NULL') + prnt('};') + prnt() prnt('PyMODINIT_FUNC') - prnt('%s(void)' % initname) + prnt('PyInit_%s(void)' % modname) prnt('{') prnt(' PyObject *lib;') - prnt(' lib = %s;' % createmod) - prnt(' if (lib == NULL || %s < 0)' % ( - self._chained_list_constants[False],)) - prnt(' %s;' % errorcase) - prnt(' _cffi_init();') - prnt(' %s;' % finalreturn) + prnt(' lib = PyModule_Create(&_cffi_module_def);') + prnt(' if (lib == NULL)') + prnt(' return NULL;') + prnt(' if (%s < 0 || _cffi_init() < 0) {' % (constants,)) + prnt(' Py_DECREF(lib);') + prnt(' return NULL;') + prnt(' }') + prnt(' return lib;') prnt('}') + prnt() + prnt('#else') + prnt() + prnt('PyMODINIT_FUNC') + prnt('init%s(void)' % modname) + prnt('{') + prnt(' PyObject *lib;') + prnt(' lib = Py_InitModule("%s", _cffi_methods);' % modname) + prnt(' if (lib == NULL)') + prnt(' return;') + prnt(' if (%s < 0 || _cffi_init() < 0)' % (constants,)) + prnt(' return;') + prnt(' return;') + prnt('}') + prnt() + prnt('#endif') def load_library(self): # XXX review all usages of 'self' here! @@ -394,7 +405,7 @@ meth = 'METH_O' else: meth = 'METH_VARARGS' - self._prnt(' {"%s", _cffi_f_%s, %s},' % (name, name, meth)) + self._prnt(' {"%s", _cffi_f_%s, %s, NULL},' % (name, name, meth)) _loading_cpy_function = _loaded_noop @@ -481,8 +492,8 @@ if tp.fldnames is None: return # nothing to do with opaque structs layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) - self._prnt(' {"%s", %s, METH_NOARGS},' % (layoutfuncname, - layoutfuncname)) + self._prnt(' {"%s", %s, METH_NOARGS, NULL},' % (layoutfuncname, + layoutfuncname)) def _loading_struct_or_union(self, tp, prefix, name, module): if tp.fldnames is None: @@ -589,13 +600,7 @@ 'variable type'),)) assert delayed else: - prnt(' if (LONG_MIN <= (%s) && (%s) <= LONG_MAX)' % (name, name)) - prnt(' o = PyInt_FromLong((long)(%s));' % (name,)) - prnt(' else if ((%s) <= 0)' % (name,)) - prnt(' o = PyLong_FromLongLong((long long)(%s));' % (name,)) - prnt(' else') - prnt(' o = PyLong_FromUnsignedLongLong(' - '(unsigned long long)(%s));' % (name,)) + prnt(' o = _cffi_from_c_int_const(%s);' % name) prnt(' if (o == NULL)') prnt(' return -1;') if size_too: @@ -632,13 +637,18 @@ # ---------- # enums + def _enum_funcname(self, prefix, name): + # "$enum_$1" => "___D_enum____D_1" + name = name.replace('$', '___D_') + return '_cffi_e_%s_%s' % (prefix, name) + def _generate_cpy_enum_decl(self, tp, name, prefix='enum'): if tp.partial: for enumerator in tp.enumerators: self._generate_cpy_const(True, enumerator, delayed=False) return # - funcname = '_cffi_e_%s_%s' % (prefix, name) + funcname = self._enum_funcname(prefix, name) prnt = self._prnt prnt('static int %s(PyObject *lib)' % funcname) prnt('{') @@ -760,17 +770,30 @@ #include #include -#ifdef MS_WIN32 -#include /* for alloca() */ -typedef __int8 int8_t; -typedef __int16 int16_t; -typedef __int32 int32_t; -typedef __int64 int64_t; -typedef unsigned __int8 uint8_t; -typedef unsigned __int16 uint16_t; -typedef unsigned __int32 uint32_t; -typedef unsigned __int64 uint64_t; -typedef unsigned char _Bool; +/* this block of #ifs should be kept exactly identical between + c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py */ +#if defined(_MSC_VER) +# include /* for alloca() */ +# if _MSC_VER < 1600 /* MSVC < 2010 */ + typedef __int8 int8_t; + typedef __int16 int16_t; + typedef __int32 int32_t; + typedef __int64 int64_t; + typedef unsigned __int8 uint8_t; + typedef unsigned __int16 uint16_t; + typedef unsigned __int32 uint32_t; + typedef unsigned __int64 uint64_t; +# else +# include +# endif +# if _MSC_VER < 1800 /* MSVC < 2013 */ + typedef unsigned char _Bool; +# endif +#else +# include +# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) +# include +# endif #endif #if PY_MAJOR_VERSION < 3 @@ -795,6 +818,15 @@ #define _cffi_to_c_double PyFloat_AsDouble #define _cffi_to_c_float PyFloat_AsDouble +#define _cffi_from_c_int_const(x) \ + (((x) > 0) ? \ + ((unsigned long long)(x) <= (unsigned long long)LONG_MAX) ? \ + PyInt_FromLong((long)(x)) : \ + PyLong_FromUnsignedLongLong((unsigned long long)(x)) : \ + ((long long)(x) >= (long long)LONG_MIN) ? \ + PyInt_FromLong((long)(x)) : \ + PyLong_FromLongLong((long long)(x))) + #define _cffi_from_c_int(x, type) \ (((type)-1) > 0 ? /* unsigned */ \ (sizeof(type) < sizeof(long) ? PyInt_FromLong(x) : \ @@ -804,14 +836,14 @@ PyLong_FromLongLong(x))) #define _cffi_to_c_int(o, type) \ - (sizeof(type) == 1 ? (((type)-1) > 0 ? _cffi_to_c_u8(o) \ - : _cffi_to_c_i8(o)) : \ - sizeof(type) == 2 ? (((type)-1) > 0 ? _cffi_to_c_u16(o) \ - : _cffi_to_c_i16(o)) : \ - sizeof(type) == 4 ? (((type)-1) > 0 ? _cffi_to_c_u32(o) \ - : _cffi_to_c_i32(o)) : \ - sizeof(type) == 8 ? (((type)-1) > 0 ? _cffi_to_c_u64(o) \ - : _cffi_to_c_i64(o)) : \ + (sizeof(type) == 1 ? (((type)-1) > 0 ? (type)_cffi_to_c_u8(o) \ + : (type)_cffi_to_c_i8(o)) : \ + sizeof(type) == 2 ? (((type)-1) > 0 ? (type)_cffi_to_c_u16(o) \ + : (type)_cffi_to_c_i16(o)) : \ + sizeof(type) == 4 ? (((type)-1) > 0 ? (type)_cffi_to_c_u32(o) \ + : (type)_cffi_to_c_i32(o)) : \ + sizeof(type) == 8 ? (((type)-1) > 0 ? (type)_cffi_to_c_u64(o) \ + : (type)_cffi_to_c_i64(o)) : \ (Py_FatalError("unsupported size for type " #type), 0)) #define _cffi_to_c_i8 \ @@ -885,25 +917,32 @@ return PyBool_FromLong(was_alive); } -static void _cffi_init(void) +static int _cffi_init(void) { - PyObject *module = PyImport_ImportModule("_cffi_backend"); - PyObject *c_api_object; + PyObject *module, *c_api_object = NULL; + module = PyImport_ImportModule("_cffi_backend"); if (module == NULL) - return; + goto failure; c_api_object = PyObject_GetAttrString(module, "_C_API"); if (c_api_object == NULL) - return; + goto failure; if (!PyCapsule_CheckExact(c_api_object)) { - Py_DECREF(c_api_object); PyErr_SetNone(PyExc_ImportError); - return; + goto failure; } memcpy(_cffi_exports, PyCapsule_GetPointer(c_api_object, "cffi"), _CFFI_NUM_EXPORTS * sizeof(void *)); + + Py_DECREF(module); Py_DECREF(c_api_object); + return 0; + + failure: + Py_XDECREF(module); + Py_XDECREF(c_api_object); + return -1; } #define _cffi_type(num) ((CTypeDescrObject *)PyList_GET_ITEM(_cffi_types, num)) diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -249,10 +249,10 @@ prnt(' /* %s */' % str(e)) # cannot verify it, ignore prnt('}') self.export_symbols.append(layoutfuncname) - prnt('ssize_t %s(ssize_t i)' % (layoutfuncname,)) + prnt('intptr_t %s(intptr_t i)' % (layoutfuncname,)) prnt('{') prnt(' struct _cffi_aligncheck { char x; %s y; };' % cname) - prnt(' static ssize_t nums[] = {') + prnt(' static intptr_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') for fname, ftype, fbitsize in tp.enumfields(): @@ -276,7 +276,7 @@ return # nothing to do with opaque structs layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) # - BFunc = self.ffi._typeof_locked("ssize_t(*)(ssize_t)")[0] + BFunc = self.ffi._typeof_locked("intptr_t(*)(intptr_t)")[0] function = module.load_function(BFunc, layoutfuncname) layout = [] num = 0 @@ -410,13 +410,18 @@ # ---------- # enums + def _enum_funcname(self, prefix, name): + # "$enum_$1" => "___D_enum____D_1" + name = name.replace('$', '___D_') + return '_cffi_e_%s_%s' % (prefix, name) + def _generate_gen_enum_decl(self, tp, name, prefix='enum'): if tp.partial: for enumerator in tp.enumerators: self._generate_gen_const(True, enumerator) return # - funcname = '_cffi_e_%s_%s' % (prefix, name) + funcname = self._enum_funcname(prefix, name) self.export_symbols.append(funcname) prnt = self._prnt prnt('int %s(char *out_error)' % funcname) @@ -453,7 +458,7 @@ else: BType = self.ffi._typeof_locked("char[]")[0] BFunc = self.ffi._typeof_locked("int(*)(char*)")[0] - funcname = '_cffi_e_%s_%s' % (prefix, name) + funcname = self._enum_funcname(prefix, name) function = module.load_function(BFunc, funcname) p = self.ffi.new(BType, 256) if function(p) < 0: @@ -547,20 +552,29 @@ #include #include /* XXX for ssize_t on some platforms */ -#ifdef _WIN32 -# include -# define snprintf _snprintf -typedef __int8 int8_t; -typedef __int16 int16_t; -typedef __int32 int32_t; -typedef __int64 int64_t; -typedef unsigned __int8 uint8_t; -typedef unsigned __int16 uint16_t; -typedef unsigned __int32 uint32_t; -typedef unsigned __int64 uint64_t; -typedef SSIZE_T ssize_t; -typedef unsigned char _Bool; +/* this block of #ifs should be kept exactly identical between + c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py */ +#if defined(_MSC_VER) +# include /* for alloca() */ +# if _MSC_VER < 1600 /* MSVC < 2010 */ + typedef __int8 int8_t; + typedef __int16 int16_t; + typedef __int32 int32_t; + typedef __int64 int64_t; + typedef unsigned __int8 uint8_t; + typedef unsigned __int16 uint16_t; + typedef unsigned __int32 uint32_t; + typedef unsigned __int64 uint64_t; +# else +# include +# endif +# if _MSC_VER < 1800 /* MSVC < 2013 */ + typedef unsigned char _Bool; +# endif #else -# include +# include +# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) +# include +# endif #endif ''' diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -115,7 +115,7 @@ try: for name in modlist: __import__(name) - except (ImportError, CompilationError, py.test.skip.Exception), e: + except (ImportError, CompilationError, py.test.skip.Exception) as e: errcls = e.__class__.__name__ raise Exception( "The module %r is disabled\n" % (modname,) + diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -105,7 +105,7 @@ while True: try: w_key = space.next(w_iter) - except OperationError, e: + except OperationError as e: if not e.match(space, space.w_StopIteration): raise # re-raise other app-level exceptions break @@ -348,8 +348,12 @@ **objects** - Normal rules apply. Special methods are not honoured, except ``__init__``, - ``__del__`` and ``__iter__``. + Normal rules apply. The only special methods that are honoured are + ``__init__``, ``__del__``, ``__len__``, ``__getitem__``, ``__setitem__``, + ``__getslice__``, ``__setslice__``, and ``__iter__``. To handle slicing, + ``__getslice__`` and ``__setslice__`` must be used; using ``__getitem__`` and + ``__setitem__`` for slicing isn't supported. Additionally, using negative + indices for slicing is still not support, even when using ``__getslice__``. This layout makes the number of types to take care about quite limited. @@ -567,7 +571,7 @@ try: ... - except OperationError, e: + except OperationError as e: if not e.match(space, space.w_XxxError): raise ... diff --git a/pypy/doc/config/translation.log.txt b/pypy/doc/config/translation.log.txt --- a/pypy/doc/config/translation.log.txt +++ b/pypy/doc/config/translation.log.txt @@ -2,4 +2,4 @@ These must be enabled by setting the PYPYLOG environment variable. The exact set of features supported by PYPYLOG is described in -pypy/translation/c/src/debug_print.h. +rpython/translator/c/src/debug_print.h. diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.rst +++ b/pypy/doc/extradoc.rst @@ -8,6 +8,9 @@ *Articles about PyPy published so far, most recent first:* (bibtex_ file) +* `A Way Forward in Parallelising Dynamic Languages`_, + R. Meier, A. Rigo + * `Runtime Feedback in a Meta-Tracing JIT for Efficient Dynamic Languages`_, C.F. Bolz, A. Cuni, M. Fijalkowski, M. Leuschel, S. Pedroni, A. Rigo @@ -71,6 +74,7 @@ .. _bibtex: https://bitbucket.org/pypy/extradoc/raw/tip/talk/bibtex.bib +.. _`A Way Forward in Parallelising Dynamic Languages`: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/icooolps2014/position-paper.pdf .. _`Runtime Feedback in a Meta-Tracing JIT for Efficient Dynamic Languages`: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/icooolps2011/jit-hints.pdf .. _`Allocation Removal by Partial Evaluation in a Tracing JIT`: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/pepm2011/bolz-allocation-removal.pdf .. _`Towards a Jitting VM for Prolog Execution`: http://www.stups.uni-duesseldorf.de/mediawiki/images/a/a7/Pub-BoLeSch2010.pdf @@ -93,6 +97,11 @@ Talks and Presentations ---------------------------------- +*This part is no longer updated.* The complete list is here__ (in +alphabetical order). + +.. __: https://bitbucket.org/pypy/extradoc/src/extradoc/talk/ + Talks in 2010 +++++++++++++ diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -6,6 +6,7 @@ .. toctree:: + release-2.3.1.rst release-2.3.0.rst release-2.2.1.rst release-2.2.0.rst diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -40,7 +40,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 2.3.0`_: the latest official release +* `Release 2.3.1`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -110,7 +110,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 2.3.0`: http://pypy.org/download.html +.. _`Release 2.3.1`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html diff --git a/pypy/doc/man/pypy.1.rst b/pypy/doc/man/pypy.1.rst --- a/pypy/doc/man/pypy.1.rst +++ b/pypy/doc/man/pypy.1.rst @@ -95,13 +95,12 @@ ``PYPYLOG`` If set to a non-empty value, enable logging, the format is: - *fname* + *fname* or *+fname* logging for profiling: includes all ``debug_start``/``debug_stop`` but not any nested ``debug_print``. *fname* can be ``-`` to log to *stderr*. - Note that using a : in fname is a bad idea, Windows - users, beware. + The *+fname* form can be used if there is a *:* in fname ``:``\ *fname* Full logging, including ``debug_print``. diff --git a/pypy/doc/release-pypy3-2.3.1.rst b/pypy/doc/release-pypy3-2.3.1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-pypy3-2.3.1.rst @@ -0,0 +1,69 @@ +===================== +PyPy3 2.3.1 - Fulcrum +===================== + +We're pleased to announce the first stable release of PyPy3. PyPy3 +targets Python 3 (3.2.5) compatibility. + +We would like to thank all of the people who donated_ to the `py3k proposal`_ +for supporting the work that went into this. + +You can download the PyPy3 2.3.1 release here: + + http://pypy.org/download.html#pypy3-2-3-1 + +Highlights +========== + +* The first stable release of PyPy3: support for Python 3! + +* The stdlib has been updated to Python 3.2.5 + +* Additional support for the u'unicode' syntax (`PEP 414`_) from Python 3.3 + +* Updates from the default branch, such as incremental GC and various JIT + improvements + +* Resolved some notable JIT performance regressions from PyPy2: + + - Re-enabled the previously disabled collection (list/dict/set) strategies + + - Resolved performance of iteration over range objects + + - Resolved handling of Python 3's exception __context__ unnecessarily forcing + frame object overhead + +.. _`PEP 414`: http://legacy.python.org/dev/peps/pep-0414/ + +What is PyPy? +============== + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7.6 or 3.2.5. It's fast due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows, +and OpenBSD, +as well as newer ARM hardware (ARMv6 or ARMv7, with VFPv3) running Linux. + +While we support 32 bit python on Windows, work on the native Windows 64 +bit python is still stalling, we would welcome a volunteer +to `handle that`_. + +.. _`handle that`: http://doc.pypy.org/en/latest/windows.html#what-is-missing-for-a-full-64-bit-translation + +How to use PyPy? +================= + +We suggest using PyPy from a `virtualenv`_. Once you have a virtualenv +installed, you can follow instructions from `pypy documentation`_ on how +to proceed. This document also covers other `installation schemes`_. + +.. _donated: http://morepypy.blogspot.com/2012/01/py3k-and-numpy-first-stage-thanks-to.html +.. _`py3k proposal`: http://pypy.org/py3donate.html +.. _`pypy documentation`: http://doc.pypy.org/en/latest/getting-started.html#installing-using-virtualenv +.. _`virtualenv`: http://www.virtualenv.org/en/latest/ +.. _`installation schemes`: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy + + +Cheers, +the PyPy team diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,6 +3,30 @@ ======================= .. this is a revision shortly after release-2.3.x -.. startrev: 87fdc76bccb4 +.. startrev: ca9b7cf02cf4 +.. branch: fix-bytearray-complexity +Bytearray operations no longer copy the bytearray unnecessarily +Added support for ``__getitem__``, ``__setitem__``, ``__getslice__``, +``__setslice__``, and ``__len__`` to RPython + +.. branch: stringbuilder2-perf +Give the StringBuilder a more flexible internal structure, with a +chained list of strings instead of just one string. This make it +more efficient when building large strings, e.g. with cStringIO(). + +Also, use systematically jit.conditional_call() instead of regular +branches. This lets the JIT make more linear code, at the cost of +forcing a bit more data (to be passed as arguments to +conditional_calls). I would expect the net result to be a slight +slow-down on some simple benchmarks and a speed-up on bigger +programs. + +.. branch: ec-threadlocal +Change the executioncontext's lookup to be done by reading a thread- +local variable (which is implemented in C using '__thread' if +possible, and pthread_getspecific() otherwise). On Linux x86 and +x86-64, the JIT backend has a special optimization that lets it emit +directly a single MOV from a %gs- or %fs-based address. It seems +actually to give a good boost in performance. diff --git a/pypy/doc/whatsnew-pypy3-2.3.1.rst b/pypy/doc/whatsnew-pypy3-2.3.1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/whatsnew-pypy3-2.3.1.rst @@ -0,0 +1,6 @@ +========================= +What's new in PyPy3 2.3.1 +========================= + +.. this is a revision shortly after pypy3-release-2.3.x +.. startrev: 0137d8e6657d diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -31,8 +31,6 @@ if w_dict is not None: # for tests w_entry_point = space.getitem(w_dict, space.wrap('entry_point')) w_run_toplevel = space.getitem(w_dict, space.wrap('run_toplevel')) - w_call_finish_gateway = space.wrap(gateway.interp2app(call_finish)) - w_call_startup_gateway = space.wrap(gateway.interp2app(call_startup)) withjit = space.config.objspace.usemodules.pypyjit def entry_point(argv): @@ -54,7 +52,7 @@ argv = argv[:1] + argv[3:] try: try: - space.call_function(w_run_toplevel, w_call_startup_gateway) + space.startup() if rlocale.HAVE_LANGINFO: try: rlocale.setlocale(rlocale.LC_ALL, '') @@ -76,7 +74,7 @@ return 1 finally: try: - space.call_function(w_run_toplevel, w_call_finish_gateway) + space.finish() except OperationError, e: debug("OperationError:") debug(" operror-type: " + e.w_type.getname(space).encode('utf-8')) @@ -191,11 +189,6 @@ 'pypy_thread_attach': pypy_thread_attach, 'pypy_setup_home': pypy_setup_home} -def call_finish(space): - space.finish() - -def call_startup(space): - space.startup() # _____ Define and setup target ___ diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -373,6 +373,7 @@ def startup(self): # To be called before using the space + self.threadlocals.enter_thread(self) # Initialize already imported builtin modules from pypy.interpreter.module import Module @@ -628,30 +629,33 @@ """NOT_RPYTHON: Abstract method that should put some minimal content into the w_builtins.""" - @jit.loop_invariant def getexecutioncontext(self): "Return what we consider to be the active execution context." # Important: the annotator must not see a prebuilt ExecutionContext: # you should not see frames while you translate # so we make sure that the threadlocals never *have* an # ExecutionContext during translation. - if self.config.translating and not we_are_translated(): - assert self.threadlocals.getvalue() is None, ( - "threadlocals got an ExecutionContext during translation!") - try: - return self._ec_during_translation - except AttributeError: - ec = self.createexecutioncontext() - self._ec_during_translation = ec + if not we_are_translated(): + if self.config.translating: + assert self.threadlocals.get_ec() is None, ( + "threadlocals got an ExecutionContext during translation!") + try: + return self._ec_during_translation + except AttributeError: + ec = self.createexecutioncontext() + self._ec_during_translation = ec + return ec + else: + ec = self.threadlocals.get_ec() + if ec is None: + self.threadlocals.enter_thread(self) + ec = self.threadlocals.get_ec() return ec - # normal case follows. The 'thread' module installs a real - # thread-local object in self.threadlocals, so this builds - # and caches a new ec in each thread. - ec = self.threadlocals.getvalue() - if ec is None: - ec = self.createexecutioncontext() - self.threadlocals.setvalue(ec) - return ec + else: + # translated case follows. self.threadlocals is either from + # 'pypy.interpreter.miscutils' or 'pypy.module.thread.threadlocals'. + # the result is assumed to be non-null: enter_thread() was called. + return self.threadlocals.get_ec() def _freeze_(self): return True diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -905,7 +905,7 @@ "use unwrap_spec(...=WrappedDefault(default))" % ( self._code.identifier, name, defaultval)) defs_w.append(None) - else: + elif name != '__args__' and name != 'args_w': spec = unwrap_spec[i] if isinstance(defaultval, str) and spec not in [str]: defs_w.append(space.wrapbytes(defaultval)) diff --git a/pypy/interpreter/miscutils.py b/pypy/interpreter/miscutils.py --- a/pypy/interpreter/miscutils.py +++ b/pypy/interpreter/miscutils.py @@ -11,11 +11,14 @@ """ _value = None - def getvalue(self): + def get_ec(self): return self._value - def setvalue(self, value): - self._value = value + def enter_thread(self, space): + self._value = space.createexecutioncontext() + + def try_enter_thread(self, space): + return False def signals_enabled(self): return True diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -738,6 +738,22 @@ never_called py.test.raises(AssertionError, space.wrap, gateway.interp2app_temp(g)) + def test_unwrap_spec_default_applevel_bug2(self): + space = self.space + def g(space, w_x, w_y=None, __args__=None): + return w_x + w_g = space.wrap(gateway.interp2app_temp(g)) + w_42 = space.call_function(w_g, space.wrap(42)) + assert space.int_w(w_42) == 42 + py.test.raises(gateway.OperationError, space.call_function, w_g) + # + def g(space, w_x, w_y=None, args_w=None): + return w_x + w_g = space.wrap(gateway.interp2app_temp(g)) + w_42 = space.call_function(w_g, space.wrap(42)) + assert space.int_w(w_42) == 42 + py.test.raises(gateway.OperationError, space.call_function, w_g) + def test_interp2app_doc(self): space = self.space def f(space, w_x): diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py --- a/pypy/module/_cffi_backend/ccallback.py +++ b/pypy/module/_cffi_backend/ccallback.py @@ -183,9 +183,12 @@ misc._raw_memclear(ll_res, SIZE_OF_FFI_ARG) return # + must_leave = False ec = None + space = callback.space try: - ec = cerrno.get_errno_container(callback.space) + must_leave = space.threadlocals.try_enter_thread(space) + ec = cerrno.get_errno_container(space) cerrno.save_errno_into(ec, e) extra_line = '' try: @@ -206,5 +209,7 @@ except OSError: pass callback.write_error_return_value(ll_res) + if must_leave: + space.threadlocals.leave_thread(space) if ec is not None: cerrno.restore_errno_from(ec) diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -511,7 +511,10 @@ argshapes = unpack_argshapes(space, w_args) resshape = unpack_resshape(space, w_res) ffi_args = [shape.get_basic_ffi_type() for shape in argshapes] - ffi_res = resshape.get_basic_ffi_type() + if resshape is not None: + ffi_res = resshape.get_basic_ffi_type() + else: + ffi_res = ffi_type_void try: ptr = RawFuncPtr('???', ffi_args, ffi_res, rffi.cast(rffi.VOIDP, addr), flags) diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -354,6 +354,11 @@ assert ptr[0] == rawcall.buffer ptr.free() + def test_raw_callable_returning_void(self): + import _rawffi + _rawffi.FuncPtr(0, [], None) + # assert did not crash + def test_short_addition(self): import _rawffi lib = _rawffi.CDLL(self.lib_name) diff --git a/pypy/module/_weakref/test/test_weakref.py b/pypy/module/_weakref/test/test_weakref.py --- a/pypy/module/_weakref/test/test_weakref.py +++ b/pypy/module/_weakref/test/test_weakref.py @@ -15,6 +15,10 @@ gc.collect() assert ref() is None + def test_missing_arg(self): + import _weakref + raises(TypeError, _weakref.ref) + def test_callback(self): import _weakref, gc class A(object): diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -308,9 +308,9 @@ space.sys.get('modules'), space.wrap(name)) - @unwrap_spec(modname=str, prologue=str, PY_SSIZE_T_CLEAN=bool) + @unwrap_spec(modname=str, prologue=str, more_init=str, PY_SSIZE_T_CLEAN=bool) def import_extension(space, modname, w_functions, prologue="", - PY_SSIZE_T_CLEAN=False): + more_init="", PY_SSIZE_T_CLEAN=False): functions = space.unwrap(w_functions) methods_table = [] codes = [] @@ -340,6 +340,8 @@ }; """ % dict(methods='\n'.join(methods_table), modname=modname) init = """PyObject *mod = PyModule_Create(&moduledef);""" + if more_init: + init += more_init return import_module(space, name=modname, init=init, body=body, PY_SSIZE_T_CLEAN=PY_SSIZE_T_CLEAN) diff --git a/pypy/module/cpyext/test/test_intobject.py b/pypy/module/cpyext/test/test_intobject.py --- a/pypy/module/cpyext/test/test_intobject.py +++ b/pypy/module/cpyext/test/test_intobject.py @@ -153,7 +153,7 @@ /*tp_methods*/ 0, /*tp_members*/ enum_members, /*tp_getset*/ 0, - /*tp_base*/ &PyInt_Type, + /*tp_base*/ 0, /* set to &PyInt_Type in init function for MSVC */ /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, @@ -162,7 +162,9 @@ /*tp_alloc*/ 0, /*tp_new*/ 0 }; - """) + """, more_init = ''' + Enum_Type.tp_base = &PyInt_Type; + ''') a = module.newEnum("ULTIMATE_ANSWER", 42) assert type(a).__name__ == "Enum" @@ -176,12 +178,13 @@ ("test_int", "METH_NOARGS", """ PyObject * obj = PyInt_FromLong(42); + PyObject * val; if (!PyInt_Check(obj)) { Py_DECREF(obj); PyErr_SetNone(PyExc_ValueError); return NULL; } - PyObject * val = PyInt_FromLong(((PyIntObject *)obj)->ob_ival); + val = PyInt_FromLong(((PyIntObject *)obj)->ob_ival); Py_DECREF(obj); return val; """ diff --git a/pypy/module/micronumpy/tool/numready/page.html b/pypy/module/micronumpy/tool/numready/page.html --- a/pypy/module/micronumpy/tool/numready/page.html +++ b/pypy/module/micronumpy/tool/numready/page.html @@ -40,6 +40,7 @@

        numpy compatability test results, generated automatically by running
        pypy/module/micronumpy/tool/numready/main.py <path-to-latest-pypy>

        Overall: {{ msg }}

        +

        Warning: a positive result does not mean the function is actually working! It only means that the function/module/constant is present. It may be missing other things.

        diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -71,13 +71,13 @@ "getfield_gc", "guard_value", "guard_not_invalidated"] ops = entry_bridge.ops_by_id('add', opcode='LOAD_GLOBAL') - assert log.opnames(ops) == ["guard_not_invalidated"] + assert log.opnames(ops) == [] # ops = entry_bridge.ops_by_id('call', opcode='LOAD_GLOBAL') assert log.opnames(ops) == [] # assert entry_bridge.match_by_id('call', """ - p38 = call(ConstClass(getexecutioncontext), descr=) + p38 = call(ConstClass(_ll_0_threadlocalref_getter___), descr=) p39 = getfield_gc(p38, descr=) i40 = force_token() p41 = getfield_gc(p38, descr=) @@ -435,7 +435,7 @@ p26 = getfield_gc(p7, descr=) guard_value(p26, ConstPtr(ptr27), descr=...) guard_not_invalidated(descr=...) - p29 = call(ConstClass(getexecutioncontext), descr=) + p29 = call(ConstClass(_ll_0_threadlocalref_getter___), descr=) p30 = getfield_gc(p29, descr=) p31 = force_token() p32 = getfield_gc(p29, descr=) @@ -448,7 +448,6 @@ i39 = getfield_gc_pure(p37, descr=) i40 = int_add_ovf(i22, i39) guard_no_overflow(descr=...) - guard_not_invalidated(descr=...) --TICK-- """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_string.py b/pypy/module/pypyjit/test_pypy_c/test_string.py --- a/pypy/module/pypyjit/test_pypy_c/test_string.py +++ b/pypy/module/pypyjit/test_pypy_c/test_string.py @@ -102,38 +102,37 @@ assert log.result == main(1000) loop, = log.loops_by_filename(self.filepath) assert loop.match(""" - i7 = int_gt(i4, 0) - guard_true(i7, descr=...) + i79 = int_gt(i74, 0) + guard_true(i79, descr=...) guard_not_invalidated(descr=...) - p9 = call(ConstClass(ll_int2dec__Signed), i4, descr=) + p80 = call(ConstClass(ll_int2dec__Signed), i74, descr=) guard_no_exception(descr=...) - i10 = strlen(p9) - i11 = int_is_true(i10) - guard_true(i11, descr=...) - i13 = strgetitem(p9, 0) - i15 = int_eq(i13, 45) - guard_false(i15, descr=...) - i17 = int_neg(i10) - i19 = int_gt(i10, 23) - guard_false(i19, descr=...) - p21 = newstr(23) - copystrcontent(p9, p21, 0, 0, i10) - i25 = int_add(1, i10) - i26 = int_gt(i25, 23) - guard_false(i26, descr=...) - strsetitem(p21, i10, 32) - i30 = int_add(i10, i25) - i31 = int_gt(i30, 23) - guard_false(i31, descr=...) - copystrcontent(p9, p21, 0, i25, i10) - i33 = int_lt(i30, 23) - guard_true(i33, descr=...) - p35 = call(ConstClass(ll_shrink_array__rpy_stringPtr_Signed), p21, i30, descr=) + i85 = strlen(p80) + p86 = new(descr=) + p88 = newstr(23) + setfield_gc(..., descr=) + setfield_gc(..., descr=) + setfield_gc(..., descr=) + call(ConstClass(ll_append_res0__stringbuilderPtr_rpy_stringPtr), p86, p80, descr=) guard_no_exception(descr=...) - i37 = strlen(p35) - i38 = int_add_ovf(i5, i37) + i89 = getfield_gc(p86, descr=) + i90 = getfield_gc(p86, descr=) + i91 = int_eq(i89, i90) + cond_call(i91, ConstClass(ll_grow_by__stringbuilderPtr_Signed), p86, 1, descr=) + guard_no_exception(descr=...) + i92 = getfield_gc(p86, descr=) + i93 = int_add(i92, 1) + p94 = getfield_gc(p86, descr=) + strsetitem(p94, i92, 32) + setfield_gc(p86, i93, descr=) + call(ConstClass(ll_append_res0__stringbuilderPtr_rpy_stringPtr), p86, p80, descr=) + guard_no_exception(descr=...) + p95 = call(..., descr=) # ll_build + guard_no_exception(descr=...) + i96 = strlen(p95) + i97 = int_add_ovf(i71, i96) guard_no_overflow(descr=...) - i40 = int_sub(i4, 1) + i98 = int_sub(i74, 1) --TICK-- jump(..., descr=...) """) diff --git a/pypy/module/sys/vm.py b/pypy/module/sys/vm.py --- a/pypy/module/sys/vm.py +++ b/pypy/module/sys/vm.py @@ -240,7 +240,8 @@ handle = space.fromcache(State).get_pythonapi_handle() # Make a dll object with it - from pypy.module._rawffi.interp_rawffi import W_CDLL, RawCDLL + from pypy.module._rawffi.interp_rawffi import W_CDLL + from rpython.rlib.clibffi import RawCDLL cdll = RawCDLL(handle) return space.wrap(W_CDLL(space, "python api", cdll)) diff --git a/pypy/module/test_lib_pypy/cffi_tests/backend_tests.py b/pypy/module/test_lib_pypy/cffi_tests/backend_tests.py --- a/pypy/module/test_lib_pypy/cffi_tests/backend_tests.py +++ b/pypy/module/test_lib_pypy/cffi_tests/backend_tests.py @@ -866,25 +866,25 @@ def test_enum(self): ffi = FFI(backend=self.Backend()) - ffi.cdef("enum foo { A, B, CC, D };") - assert ffi.string(ffi.cast("enum foo", 0)) == "A" - assert ffi.string(ffi.cast("enum foo", 2)) == "CC" - assert ffi.string(ffi.cast("enum foo", 3)) == "D" + ffi.cdef("enum foo { A0, B0, CC0, D0 };") + assert ffi.string(ffi.cast("enum foo", 0)) == "A0" + assert ffi.string(ffi.cast("enum foo", 2)) == "CC0" + assert ffi.string(ffi.cast("enum foo", 3)) == "D0" assert ffi.string(ffi.cast("enum foo", 4)) == "4" - ffi.cdef("enum bar { A, B=-2, CC, D, E };") - assert ffi.string(ffi.cast("enum bar", 0)) == "A" - assert ffi.string(ffi.cast("enum bar", -2)) == "B" - assert ffi.string(ffi.cast("enum bar", -1)) == "CC" - assert ffi.string(ffi.cast("enum bar", 1)) == "E" + ffi.cdef("enum bar { A1, B1=-2, CC1, D1, E1 };") + assert ffi.string(ffi.cast("enum bar", 0)) == "A1" + assert ffi.string(ffi.cast("enum bar", -2)) == "B1" + assert ffi.string(ffi.cast("enum bar", -1)) == "CC1" + assert ffi.string(ffi.cast("enum bar", 1)) == "E1" assert ffi.cast("enum bar", -2) != ffi.cast("enum bar", -2) assert ffi.cast("enum foo", 0) != ffi.cast("enum bar", 0) assert ffi.cast("enum bar", 0) != ffi.cast("int", 0) - assert repr(ffi.cast("enum bar", -1)) == "" + assert repr(ffi.cast("enum bar", -1)) == "" assert repr(ffi.cast("enum foo", -1)) == ( # enums are unsigned, if "") # they contain no neg value - ffi.cdef("enum baz { A=0x1000, B=0x2000 };") - assert ffi.string(ffi.cast("enum baz", 0x1000)) == "A" - assert ffi.string(ffi.cast("enum baz", 0x2000)) == "B" + ffi.cdef("enum baz { A2=0x1000, B2=0x2000 };") + assert ffi.string(ffi.cast("enum baz", 0x1000)) == "A2" + assert ffi.string(ffi.cast("enum baz", 0x2000)) == "B2" def test_enum_in_struct(self): ffi = FFI(backend=self.Backend()) @@ -1323,6 +1323,16 @@ e = ffi.cast("enum e", 0) assert ffi.string(e) == "AA" # pick the first one arbitrarily + def test_enum_refer_previous_enum_value(self): + ffi = FFI(backend=self.Backend()) + ffi.cdef("enum e { AA, BB=2, CC=4, DD=BB, EE, FF=CC, GG=FF };") + assert ffi.string(ffi.cast("enum e", 2)) == "BB" + assert ffi.string(ffi.cast("enum e", 3)) == "EE" + assert ffi.sizeof("char[DD]") == 2 + assert ffi.sizeof("char[EE]") == 3 + assert ffi.sizeof("char[FF]") == 4 + assert ffi.sizeof("char[GG]") == 4 + def test_nested_anonymous_struct(self): ffi = FFI(backend=self.Backend()) ffi.cdef(""" @@ -1544,6 +1554,7 @@ ffi2.include(ffi1) p = ffi2.cast("enum foo", 1) assert ffi2.string(p) == "FB" + assert ffi2.sizeof("char[FC]") == 2 def test_include_typedef_2(self): backend = self.Backend() @@ -1564,10 +1575,32 @@ assert ffi.alignof("struct is_packed") == 1 s = ffi.new("struct is_packed[2]") s[0].b = 42623381 - s[0].a = 'X' + s[0].a = b'X' s[1].b = -4892220 - s[1].a = 'Y' + s[1].a = b'Y' assert s[0].b == 42623381 - assert s[0].a == 'X' + assert s[0].a == b'X' assert s[1].b == -4892220 - assert s[1].a == 'Y' + assert s[1].a == b'Y' + + def test_define_integer_constant(self): + ffi = FFI(backend=self.Backend()) + ffi.cdef(""" + #define DOT_0 0 + #define DOT 100 + #define DOT_OCT 0100l + #define DOT_HEX 0x100u + #define DOT_HEX2 0X10 + #define DOT_UL 1000UL + enum foo {AA, BB=DOT, CC}; + """) + lib = ffi.dlopen(None) + assert ffi.string(ffi.cast("enum foo", 100)) == "BB" + assert lib.DOT_0 == 0 + assert lib.DOT == 100 + assert lib.DOT_OCT == 0o100 + assert lib.DOT_HEX == 0x100 + assert lib.DOT_HEX2 == 0x10 + assert lib.DOT_UL == 1000 + + diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_function.py b/pypy/module/test_lib_pypy/cffi_tests/test_function.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_function.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_function.py @@ -36,13 +36,11 @@ return self._value lib_m = 'm' -has_sinf = True if sys.platform == 'win32': #there is a small chance this fails on Mingw via environ $CC import distutils.ccompiler if distutils.ccompiler.get_default_compiler() == 'msvc': lib_m = 'msvcrt' - has_sinf = False class TestFunction(object): Backend = CTypesBackend @@ -57,8 +55,8 @@ assert x == math.sin(1.23) def test_sinf(self): - if not has_sinf: - py.test.skip("sinf not available") + if sys.platform == 'win32': + py.test.skip("no sinf found in the Windows stdlib") ffi = FFI(backend=self.Backend()) ffi.cdef(""" float sinf(float x); diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_parsing.py b/pypy/module/test_lib_pypy/cffi_tests/test_parsing.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_parsing.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_parsing.py @@ -162,9 +162,10 @@ def test_define_not_supported_for_now(): ffi = FFI(backend=FakeBackend()) - e = py.test.raises(CDefError, ffi.cdef, "#define FOO 42") - assert str(e.value) == \ - 'only supports the syntax "#define FOO ..." for now (literally)' + e = py.test.raises(CDefError, ffi.cdef, '#define FOO "blah"') + assert str(e.value) == ( + 'only supports the syntax "#define FOO ..." (literally)' + ' or "#define FOO 0x1FF" for now') def test_unnamed_struct(): ffi = FFI(backend=FakeBackend()) diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_verify.py b/pypy/module/test_lib_pypy/cffi_tests/test_verify.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_verify.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_verify.py @@ -1,5 +1,5 @@ # Generated by pypy/tool/import_cffi.py -import py +import py, re import sys, os, math, weakref from cffi import FFI, VerificationError, VerificationMissing, model from pypy.module.test_lib_pypy.cffi_tests.support import * @@ -30,6 +30,24 @@ def setup_module(): import cffi.verifier cffi.verifier.cleanup_tmpdir() + # + # check that no $ sign is produced in the C file; it used to be the + # case that anonymous enums would produce '$enum_$1', which was + # used as part of a function name. GCC accepts such names, but it's + # apparently non-standard. + _r_comment = re.compile(r"/\*.*?\*/|//.*?$", re.DOTALL | re.MULTILINE) + _r_string = re.compile(r'\".*?\"') + def _write_source_and_check(self, file=None): + base_write_source(self, file) + if file is None: + f = open(self.sourcefilename) + data = f.read() + f.close() + data = _r_comment.sub(' ', data) + data = _r_string.sub('"skipped"', data) + assert '$' not in data + base_write_source = cffi.verifier.Verifier._write_source + cffi.verifier.Verifier._write_source = _write_source_and_check def test_module_type(): @@ -154,6 +172,9 @@ all_primitive_types = model.PrimitiveType.ALL_PRIMITIVE_TYPES +if sys.platform == 'win32': + all_primitive_types = all_primitive_types.copy() + del all_primitive_types['ssize_t'] all_integer_types = sorted(tp for tp in all_primitive_types if all_primitive_types[tp] == 'i') all_float_types = sorted(tp for tp in all_primitive_types @@ -1453,8 +1474,8 @@ assert func() == 42 def test_FILE_stored_in_stdout(): - if sys.platform == 'win32': - py.test.skip("MSVC: cannot assign to stdout") + if not sys.platform.startswith('linux'): + py.test.skip("likely, we cannot assign to stdout") ffi = FFI() ffi.cdef("int printf(const char *, ...); FILE *setstdout(FILE *);") lib = ffi.verify(""" @@ -1637,8 +1658,8 @@ ffi = FFI() ffi.cdef(""" int (*python_callback)(int how_many, int *values); - void *const c_callback; /* pass this ptr to C routines */ - int some_c_function(void *cb); + int (*const c_callback)(int,...); /* pass this ptr to C routines */ + int some_c_function(int(*cb)(int,...)); """) lib = ffi.verify(""" #include @@ -1885,3 +1906,60 @@ p = lib.f2(42) x = lib.f1(p) assert x == 42 + +def _run_in_multiple_threads(test1): + test1() + import sys + try: + import thread + except ImportError: + import _thread as thread + errors = [] + def wrapper(lock): + try: + test1() + except: + errors.append(sys.exc_info()) + lock.release() + locks = [] + for i in range(10): + _lock = thread.allocate_lock() + _lock.acquire() + thread.start_new_thread(wrapper, (_lock,)) + locks.append(_lock) + for _lock in locks: + _lock.acquire() + if errors: + raise errors[0][1] + +def test_errno_working_even_with_pypys_jit(): + ffi = FFI() + ffi.cdef("int f(int);") + lib = ffi.verify(""" + #include + int f(int x) { return (errno = errno + x); } + """) + @_run_in_multiple_threads + def test1(): + ffi.errno = 0 + for i in range(10000): + e = lib.f(1) + assert e == i + 1 + assert ffi.errno == e + for i in range(10000): + ffi.errno = i + e = lib.f(42) + assert e == i + 42 + +def test_getlasterror_working_even_with_pypys_jit(): + if sys.platform != 'win32': + py.test.skip("win32-only test") + ffi = FFI() + ffi.cdef("void SetLastError(DWORD);") + lib = ffi.dlopen("Kernel32.dll") + @_run_in_multiple_threads + def test1(): + for i in range(10000): + n = (1 << 29) + i + lib.SetLastError(n) + assert ffi.getwinerror()[0] == n diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_version.py b/pypy/module/test_lib_pypy/cffi_tests/test_version.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_version.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_version.py @@ -11,7 +11,6 @@ '0.7.1': '0.7', # did not change '0.7.2': '0.7', # did not change '0.8.1': '0.8', # did not change (essentially) - '0.8.2': '0.8', # did not change } def test_version(): @@ -26,7 +25,7 @@ content = open(p).read() # v = cffi.__version__ - assert ("version = '%s'\n" % BACKEND_VERSIONS.get(v, v)) in content + assert ("version = '%s'\n" % v[:3]) in content assert ("release = '%s'\n" % v) in content def test_doc_version_file(): diff --git a/pypy/module/thread/__init__.py b/pypy/module/thread/__init__.py --- a/pypy/module/thread/__init__.py +++ b/pypy/module/thread/__init__.py @@ -30,10 +30,11 @@ "NOT_RPYTHON: patches space.threadlocals to use real threadlocals" from pypy.module.thread import gil MixedModule.__init__(self, space, *args) - prev = space.threadlocals.getvalue() + prev_ec = space.threadlocals.get_ec() space.threadlocals = gil.GILThreadLocals() space.threadlocals.initialize(space) - space.threadlocals.setvalue(prev) + if prev_ec is not None: + space.threadlocals._set_ec(prev_ec) from pypy.module.posix.interp_posix import add_fork_hook from pypy.module.thread.os_thread import reinit_threads diff --git a/pypy/module/thread/os_thread.py b/pypy/module/thread/os_thread.py --- a/pypy/module/thread/os_thread.py +++ b/pypy/module/thread/os_thread.py @@ -126,6 +126,8 @@ release = staticmethod(release) def run(space, w_callable, args): + # add the ExecutionContext to space.threadlocals + space.threadlocals.enter_thread(space) try: space.call_args(w_callable, args) except OperationError, e: diff --git a/pypy/module/thread/test/test_gil.py b/pypy/module/thread/test/test_gil.py --- a/pypy/module/thread/test/test_gil.py +++ b/pypy/module/thread/test/test_gil.py @@ -64,13 +64,14 @@ except Exception, e: assert 0 thread.gc_thread_die() + my_gil_threadlocals = gil.GILThreadLocals() def f(): state.data = [] state.datalen1 = 0 state.datalen2 = 0 state.datalen3 = 0 state.datalen4 = 0 - state.threadlocals = gil.GILThreadLocals() + state.threadlocals = my_gil_threadlocals state.threadlocals.setup_threads(space) subident = thread.start_new_thread(bootstrap, ()) mainident = thread.get_ident() diff --git a/pypy/module/thread/threadlocals.py b/pypy/module/thread/threadlocals.py --- a/pypy/module/thread/threadlocals.py +++ b/pypy/module/thread/threadlocals.py @@ -1,4 +1,5 @@ from rpython.rlib import rthread +from rpython.rlib.objectmodel import we_are_translated from pypy.module.thread.error import wrap_thread_error from pypy.interpreter.executioncontext import ExecutionContext @@ -13,53 +14,68 @@ os_thread.bootstrap().""" def __init__(self): + "NOT_RPYTHON" self._valuedict = {} # {thread_ident: ExecutionContext()} self._cleanup_() + self.raw_thread_local = rthread.ThreadLocalReference(ExecutionContext) def _cleanup_(self): self._valuedict.clear() self._mainthreadident = 0 - self._mostrecentkey = 0 # fast minicaching for the common case - self._mostrecentvalue = None # fast minicaching for the common case - def getvalue(self): + def enter_thread(self, space): + "Notification that the current thread is about to start running." + self._set_ec(space.createexecutioncontext()) + + def try_enter_thread(self, space): + if rthread.get_ident() in self._valuedict: + return False + self.enter_thread(space) + return True + + def _set_ec(self, ec): ident = rthread.get_ident() - if ident == self._mostrecentkey: - result = self._mostrecentvalue - else: - value = self._valuedict.get(ident, None) - # slow path: update the minicache - self._mostrecentkey = ident - self._mostrecentvalue = value - result = value - return result + if self._mainthreadident == 0 or self._mainthreadident == ident: + ec._signals_enabled = 1 # the main thread is enabled + self._mainthreadident = ident + self._valuedict[ident] = ec + # This logic relies on hacks and _make_sure_does_not_move(). + # It only works because we keep the 'ec' alive in '_valuedict' too. + self.raw_thread_local.set(ec) - def setvalue(self, value): - ident = rthread.get_ident() - if value is not None: - if self._mainthreadident == 0: - value._signals_enabled = 1 # the main thread is enabled - self._mainthreadident = ident - self._valuedict[ident] = value - else: + def leave_thread(self, space): + "Notification that the current thread is about to stop." + from pypy.module.thread.os_local import thread_is_stopping + ec = self.get_ec() + if ec is not None: try: - del self._valuedict[ident] - except KeyError: - pass - # update the minicache to prevent it from containing an outdated value - self._mostrecentkey = ident - self._mostrecentvalue = value + thread_is_stopping(ec) + finally: + self.raw_thread_local.set(None) + ident = rthread.get_ident() + try: + del self._valuedict[ident] + except KeyError: + pass + + def get_ec(self): + ec = self.raw_thread_local.get() + if not we_are_translated(): + assert ec is self._valuedict.get(rthread.get_ident(), None) + return ec def signals_enabled(self): - ec = self.getvalue() + ec = self.get_ec() return ec is not None and ec._signals_enabled def enable_signals(self, space): - ec = self.getvalue() + ec = self.get_ec() + assert ec is not None ec._signals_enabled += 1 def disable_signals(self, space): - ec = self.getvalue() + ec = self.get_ec() + assert ec is not None new = ec._signals_enabled - 1 if new < 0: raise wrap_thread_error(space, @@ -69,22 +85,15 @@ def getallvalues(self): return self._valuedict - def leave_thread(self, space): - "Notification that the current thread is about to stop." - from pypy.module.thread.os_local import thread_is_stopping - ec = self.getvalue() - if ec is not None: - try: - thread_is_stopping(ec) - finally: - self.setvalue(None) - def reinit_threads(self, space): "Called in the child process after a fork()" ident = rthread.get_ident() - ec = self.getvalue() + ec = self.get_ec() + assert ec is not None + old_sig = ec._signals_enabled if ident != self._mainthreadident: - ec._signals_enabled += 1 + old_sig += 1 self._cleanup_() self._mainthreadident = ident - self.setvalue(ec) + self._set_ec(ec) + ec._signals_enabled = old_sig diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -324,6 +324,9 @@ t = TranslationContext(config=config) self.t = t # for debugging ann = t.buildannotator() + def _do_startup(): + self.threadlocals.enter_thread(self) + ann.build_types(_do_startup, [], complete_now=False) if func is not None: ann.build_types(func, argtypes, complete_now=False) if seeobj_w: diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -1,9 +1,9 @@ """The builtin bytearray implementation""" from rpython.rlib.objectmodel import ( - import_from_mixin, newlist_hint, resizelist_hint) + import_from_mixin, newlist_hint, resizelist_hint, specialize) from rpython.rlib.buffer import Buffer -from rpython.rlib.rstring import StringBuilder +from rpython.rlib.rstring import StringBuilder, ByteListBuilder from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, oefmt @@ -12,7 +12,8 @@ from pypy.interpreter.gateway import WrappedDefault, interp2app, unwrap_spec from pypy.objspace.std.sliceobject import W_SliceObject from pypy.objspace.std.stdtypedef import StdTypeDef -from pypy.objspace.std.stringmethods import StringMethods +from pypy.objspace.std.stringmethods import StringMethods, _get_buffer +from pypy.objspace.std.bytesobject import W_BytesObject from pypy.objspace.std.util import get_positive_index NON_HEX_MSG = "non-hexadecimal number found in fromhex() arg at position %d" @@ -21,18 +22,21 @@ class W_BytearrayObject(W_Root): import_from_mixin(StringMethods) - def __init__(w_self, data): - w_self.data = data + def __init__(self, data): + self.data = data - def __repr__(w_self): + def __repr__(self): """representation for debugging purposes""" - return "%s(%s)" % (w_self.__class__.__name__, ''.join(w_self.data)) + return "%s(%s)" % (self.__class__.__name__, ''.join(self.data)) def buffer_w(self, space, flags): return BytearrayBuffer(self.data, False) def _new(self, value): - return W_BytearrayObject(_make_data(value)) + return W_BytearrayObject(value) + + def _new_from_buffer(self, buffer): + return W_BytearrayObject([buffer[i] for i in range(len(buffer))]) def _new_from_list(self, value): return W_BytearrayObject(value) @@ -51,7 +55,11 @@ return space.wrap(ord(character)) def _val(self, space): - return ''.join(self.data) + return self.data + + @staticmethod + def _use_rstr_ops(space, w_other): + return False @staticmethod def _op_val(space, w_other): @@ -61,10 +69,15 @@ assert len(char) == 1 return str(char)[0] - _builder = StringBuilder + def _multi_chr(self, char): + return [char] + + @staticmethod + def _builder(size=100): + return ByteListBuilder(size) def _newlist_unwrapped(self, space, res): - return space.newlist([W_BytearrayObject(_make_data(i)) for i in res]) + return space.newlist([W_BytearrayObject(i) for i in res]) def _isupper(self, ch): return ch.isupper() @@ -200,66 +213,108 @@ return self.descr_repr(space) def descr_eq(self, space, w_other): + if isinstance(w_other, W_BytearrayObject): + return space.newbool(self.data == w_other.data) + try: - res = self._val(space) == self._op_val(space, w_other) + buffer = _get_buffer(space, w_other) except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise - return space.newbool(res) + + value = self._val(space) + buffer_len = buffer.getlength() + + if len(value) != buffer_len: + return space.newbool(False) + + min_length = min(len(value), buffer_len) + return space.newbool(_memcmp(value, buffer, min_length) == 0) def descr_ne(self, space, w_other): + if isinstance(w_other, W_BytearrayObject): + return space.newbool(self.data != w_other.data) + try: - res = self._val(space) != self._op_val(space, w_other) + buffer = _get_buffer(space, w_other) except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise - return space.newbool(res) + + value = self._val(space) + buffer_len = buffer.getlength() + + if len(value) != buffer_len: + return space.newbool(True) + + min_length = min(len(value), buffer_len) + return space.newbool(_memcmp(value, buffer, min_length) != 0) + + def _comparison_helper(self, space, w_other): + value = self._val(space) + + if isinstance(w_other, W_BytearrayObject): + other = w_other.data + other_len = len(other) + cmp = _memcmp(value, other, min(len(value), len(other))) + elif isinstance(w_other, W_BytesObject): + other = self._op_val(space, w_other) + other_len = len(other) + cmp = _memcmp(value, other, min(len(value), len(other))) + else: + try: + buffer = _get_buffer(space, w_other) + except OperationError as e: + if e.match(space, space.w_TypeError): + return False, 0, 0 + raise + other_len = len(buffer) + cmp = _memcmp(value, buffer, min(len(value), len(buffer))) + + return True, cmp, other_len def descr_lt(self, space, w_other): - try: - res = self._val(space) < self._op_val(space, w_other) - except OperationError as e: - if e.match(space, space.w_TypeError): - return space.w_NotImplemented - raise - return space.newbool(res) + success, cmp, other_len = self._comparison_helper(space, w_other) + if not success: + return space.w_NotImplemented + return space.newbool(cmp < 0 or (cmp == 0 and self._len() < other_len)) def descr_le(self, space, w_other): - try: - res = self._val(space) <= self._op_val(space, w_other) - except OperationError as e: - if e.match(space, space.w_TypeError): - return space.w_NotImplemented - raise - return space.newbool(res) + success, cmp, other_len = self._comparison_helper(space, w_other) + if not success: + return space.w_NotImplemented + return space.newbool(cmp < 0 or (cmp == 0 and self._len() <= other_len)) def descr_gt(self, space, w_other): - try: - res = self._val(space) > self._op_val(space, w_other) - except OperationError as e: - if e.match(space, space.w_TypeError): - return space.w_NotImplemented - raise From noreply at buildbot.pypy.org Sat Jul 12 23:48:50 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 12 Jul 2014 23:48:50 +0200 (CEST) Subject: [pypy-commit] pypy py3k: restore bytes/memoryview operations Message-ID: <20140712214850.D20571C0231@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r72427:9d88ecc3ead2 Date: 2014-07-12 14:47 -0700 http://bitbucket.org/pypy/pypy/changeset/9d88ecc3ead2/ Log: restore bytes/memoryview operations diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -428,9 +428,7 @@ @staticmethod def _use_rstr_ops(space, w_other): - from pypy.objspace.std.unicodeobject import W_UnicodeObject - return (isinstance(w_other, W_BytesObject) or - isinstance(w_other, W_UnicodeObject)) + return True @staticmethod def _op_val(space, w_other): From noreply at buildbot.pypy.org Sat Jul 12 23:48:52 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 12 Jul 2014 23:48:52 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge upstream Message-ID: <20140712214852.347121C0231@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r72428:bf8917ab2b8a Date: 2014-07-12 14:48 -0700 http://bitbucket.org/pypy/pypy/changeset/bf8917ab2b8a/ Log: merge upstream diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -55,7 +55,7 @@ space.startup() if rlocale.HAVE_LANGINFO: try: - rlocale.setlocale(rlocale.LC_ALL, '') + rlocale.setlocale(rlocale.LC_CTYPE, '') except rlocale.LocaleError: pass w_executable = space.fsdecode(space.wrapbytes(argv[0])) From noreply at buildbot.pypy.org Sun Jul 13 01:05:19 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 13 Jul 2014 01:05:19 +0200 (CEST) Subject: [pypy-commit] pypy default: fix tostring() on empty arrays Message-ID: <20140712230519.B662E1C2FBD@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r72429:c686b0a3a21a Date: 2014-07-12 16:03 -0700 http://bitbucket.org/pypy/pypy/changeset/c686b0a3a21a/ Log: fix tostring() on empty arrays diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -228,8 +228,11 @@ Convert the array to an array of machine values and return the string representation. """ + size = self.len + if size == 0: + return space.wrap('') cbuf = self._charbuf_start() - s = rffi.charpsize2str(cbuf, self.len * self.itemsize) + s = rffi.charpsize2str(cbuf, size * self.itemsize) self._charbuf_stop() return self.space.wrap(s) diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -418,6 +418,10 @@ assert self.array('u', unicode('hello')).tounicode() == \ unicode('hello') + def test_empty_tostring(self): + a = self.array('l') + assert a.tostring() == b'' + def test_buffer(self): a = self.array('h', 'Hi') buf = buffer(a) From noreply at buildbot.pypy.org Sun Jul 13 13:31:22 2014 From: noreply at buildbot.pypy.org (waedt) Date: Sun, 13 Jul 2014 13:31:22 +0200 (CEST) Subject: [pypy-commit] pypy utf8-unicode2: Fix _sre Message-ID: <20140713113122.D5D5B1D2AA2@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: utf8-unicode2 Changeset: r72430:f19e309962b5 Date: 2014-07-10 10:26 -0500 http://bitbucket.org/pypy/pypy/changeset/f19e309962b5/ Log: Fix _sre diff --git a/pypy/module/_sre/interp_sre.py b/pypy/module/_sre/interp_sre.py --- a/pypy/module/_sre/interp_sre.py +++ b/pypy/module/_sre/interp_sre.py @@ -5,6 +5,7 @@ from pypy.interpreter.typedef import make_weakref_descr from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.interpreter.error import OperationError +from pypy.interpreter.utf8 import utf8ord from rpython.rlib.rarithmetic import intmask from rpython.rlib import jit @@ -28,6 +29,15 @@ import pypy.objspace.std.unicodeobject set_unicode_db(pypy.objspace.std.unicodeobject.unicodedb) + +# Monkey patch UnicodeMatchContext so we can use our own unicode type instead +# of the built-in unicode. +def _utf8_UnicodeMatchContext_str(self, index): + rsre_core.check_nonneg(index) + return utf8ord(self._unicodestr, index) +rsre_core.UnicodeMatchContext.str = _utf8_UnicodeMatchContext_str + + # ____________________________________________________________ # From noreply at buildbot.pypy.org Sun Jul 13 13:31:24 2014 From: noreply at buildbot.pypy.org (waedt) Date: Sun, 13 Jul 2014 13:31:24 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix underlying list of bytearrays being reused inappropriately Message-ID: <20140713113124.2870B1D2AA2@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: Changeset: r72431:2eef95188f80 Date: 2014-07-13 05:50 -0500 http://bitbucket.org/pypy/pypy/changeset/2eef95188f80/ Log: Fix underlying list of bytearrays being reused inappropriately diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -41,6 +41,8 @@ return ''.join(self.data) def _new(self, value): + if value is self.data: + value = value[:] return W_BytearrayObject(value) def _new_from_buffer(self, buffer): diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -455,7 +455,7 @@ d = width - len(value) if d > 0: fillchar = self._multi_chr(fillchar[0]) - value += d * fillchar + value = value + fillchar * d return self._new(value) diff --git a/pypy/objspace/std/test/test_bytearrayobject.py b/pypy/objspace/std/test/test_bytearrayobject.py --- a/pypy/objspace/std/test/test_bytearrayobject.py +++ b/pypy/objspace/std/test/test_bytearrayobject.py @@ -223,6 +223,20 @@ check(bytearray('abc').rstrip(memoryview('c')), 'ab') check(bytearray('aba').strip('a'), 'b') + def test_xjust_no_mutate(self): + # a previous regression + b = bytearray(b'') + assert b.ljust(1) == bytearray(b' ') + assert not len(b) + + b2 = b.ljust(0) + b2 += b' ' + assert not len(b) + + b2 = b.rjust(0) + b2 += b' ' + assert not len(b) + def test_split(self): # methods that should return a sequence of bytearrays def check(result, expected): From noreply at buildbot.pypy.org Mon Jul 14 06:18:13 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 14 Jul 2014 06:18:13 +0200 (CEST) Subject: [pypy-commit] pypy default: implement __iter__for scalars (yuyichao) Message-ID: <20140714041813.552921C0231@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r72432:46446b3987d3 Date: 2014-07-14 07:10 +0300 http://bitbucket.org/pypy/pypy/changeset/46446b3987d3/ Log: implement __iter__for scalars (yuyichao) diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -153,12 +153,10 @@ raise OperationError(space.w_IndexError, space.wrap( "invalid index to scalar variable")) - ''' def descr_iter(self, space): # Making numpy scalar non-iterable with a valid __getitem__ method raise oefmt(space.w_TypeError, "'%T' object is not iterable", self) - ''' def descr_str(self, space): return space.wrap(self.get_dtype(space).itemtype.str_format(self)) @@ -513,6 +511,9 @@ return space.wrap(dtype.itemtype.to_str(read_val)) return read_val + def descr_iter(self, space): + return space.newseqiter(self) + def descr_setitem(self, space, w_item, w_value): if space.isinstance_w(w_item, space.w_basestring): item = space.str_w(w_item) @@ -562,7 +563,7 @@ __new__ = interp2app(W_GenericBox.descr__new__.im_func), __getitem__ = interp2app(W_GenericBox.descr_getitem), - #__iter__ = interp2app(W_GenericBox.descr_iter), + __iter__ = interp2app(W_GenericBox.descr_iter), __str__ = interp2app(W_GenericBox.descr_str), __repr__ = interp2app(W_GenericBox.descr_str), __format__ = interp2app(W_GenericBox.descr_format), @@ -784,6 +785,7 @@ __new__ = interp2app(W_VoidBox.descr__new__.im_func), __getitem__ = interp2app(W_VoidBox.descr_getitem), __setitem__ = interp2app(W_VoidBox.descr_setitem), + __iter__ = interp2app(W_VoidBox.descr_iter), ) W_CharacterBox.typedef = TypeDef("numpy.character", W_FlexibleBox.typedef, diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -292,7 +292,6 @@ assert np.isnan(b/a) def test_scalar_iter(self): - skip('not implemented yet') from numpypy import int8, int16, int32, int64, float32, float64 for t in int8, int16, int32, int64, float32, float64: try: From noreply at buildbot.pypy.org Mon Jul 14 22:32:59 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 14 Jul 2014 22:32:59 +0200 (CEST) Subject: [pypy-commit] pypy default: redo changeset 8344e652a12e Message-ID: <20140714203259.AB0E91C05F1@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r72433:faab0439c184 Date: 2014-07-14 21:14 +0300 http://bitbucket.org/pypy/pypy/changeset/faab0439c184/ Log: redo changeset 8344e652a12e diff --git a/pypy/module/micronumpy/descriptor.py b/pypy/module/micronumpy/descriptor.py --- a/pypy/module/micronumpy/descriptor.py +++ b/pypy/module/micronumpy/descriptor.py @@ -6,7 +6,7 @@ from pypy.interpreter.typedef import (TypeDef, GetSetProperty, interp_attrproperty, interp_attrproperty_w) from rpython.rlib import jit -from rpython.rlib.objectmodel import specialize +from rpython.rlib.objectmodel import specialize, compute_hash from rpython.rlib.rarithmetic import r_longlong, r_ulonglong from pypy.module.micronumpy import types, boxes, base, support, constants as NPY from pypy.module.micronumpy.appbridge import get_appbridge_cache @@ -254,8 +254,38 @@ def descr_ne(self, space, w_other): return space.wrap(not self.eq(space, w_other)) + def _compute_hash(self, space, x): + from rpython.rlib.rarithmetic import intmask + if self.fields is None and self.subdtype is None: + endian = self.byteorder + if endian == NPY.NATIVE: + endian = NPY.NATBYTE + flags = 0 + y = 0x345678 + y = intmask((1000003 * y) ^ ord(self.kind[0])) + y = intmask((1000003 * y) ^ ord(endian[0])) + y = intmask((1000003 * y) ^ flags) + y = intmask((1000003 * y) ^ self.elsize) + if self.is_flexible(): + y = intmask((1000003 * y) ^ self.alignment) + return intmask((1000003 * x) ^ y) + if self.fields is not None: + for name, (offset, subdtype) in self.fields.iteritems(): + assert isinstance(subdtype, W_Dtype) + y = intmask(1000003 * (0x345678 ^ compute_hash(name))) + y = intmask(1000003 * (y ^ compute_hash(offset))) + y = intmask(1000003 * (y ^ subdtype._compute_hash(space, + 0x345678))) + x = intmask(x ^ y) + if self.subdtype is not None: + for s in self.shape: + x = intmask((1000003 * x) ^ compute_hash(s)) + x = self.base._compute_hash(space, x) + return x + def descr_hash(self, space): - return space.hash(self.descr_reduce(space)) + return space.wrap(self._compute_hash(space, 0x345678)) + def descr_str(self, space): if self.fields: diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -368,15 +368,30 @@ d5 = numpy.dtype([('f0', 'i4'), ('f1', d2)]) d6 = numpy.dtype([('f0', 'i4'), ('f1', d3)]) import sys - if '__pypy__' not in sys.builtin_module_names: - assert hash(d1) == hash(d2) - assert hash(d1) != hash(d3) - assert hash(d4) == hash(d5) - assert hash(d4) != hash(d6) - else: - for d in [d1, d2, d3, d4, d5, d6]: - raises(TypeError, hash, d) + assert hash(d1) == hash(d2) + assert hash(d1) != hash(d3) + assert hash(d4) == hash(d5) + assert hash(d4) != hash(d6) + def test_record_hash(self): + from numpy import dtype + # make sure the fields hash return different value + # for different order of field in a structure + + # swap names + t1 = dtype([('x', ' Author: mattip Branch: Changeset: r72434:26c78d4a4421 Date: 2014-07-14 23:25 +0300 http://bitbucket.org/pypy/pypy/changeset/26c78d4a4421/ Log: fix dtype hash - fields can be dicts or {} or None diff --git a/pypy/module/micronumpy/descriptor.py b/pypy/module/micronumpy/descriptor.py --- a/pypy/module/micronumpy/descriptor.py +++ b/pypy/module/micronumpy/descriptor.py @@ -73,7 +73,7 @@ self.base = subdtype.base def __repr__(self): - if self.fields is not None: + if self.fields: return '' % self.fields return '' % self.itemtype @@ -256,7 +256,7 @@ def _compute_hash(self, space, x): from rpython.rlib.rarithmetic import intmask - if self.fields is None and self.subdtype is None: + if not self.fields and self.subdtype is None: endian = self.byteorder if endian == NPY.NATIVE: endian = NPY.NATBYTE @@ -269,7 +269,7 @@ if self.is_flexible(): y = intmask((1000003 * y) ^ self.alignment) return intmask((1000003 * x) ^ y) - if self.fields is not None: + if self.fields: for name, (offset, subdtype) in self.fields.iteritems(): assert isinstance(subdtype, W_Dtype) y = intmask(1000003 * (0x345678 ^ compute_hash(name))) diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -428,6 +428,8 @@ for t in [np.int_, np.float_]: dt = np.dtype(t) dt1 = dt.newbyteorder().newbyteorder() + assert dt.isbuiltin + assert not dt1.isbuiltin dt2 = dt.newbyteorder("<") dt3 = dt.newbyteorder(">") assert dt.byteorder != dt1.byteorder From noreply at buildbot.pypy.org Mon Jul 14 23:05:03 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 14 Jul 2014 23:05:03 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: draft of a draft Message-ID: <20140714210503.622FE1D2335@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5354:192a9484ee46 Date: 2014-07-14 23:05 +0200 http://bitbucket.org/pypy/extradoc/changeset/192a9484ee46/ Log: draft of a draft diff --git a/talk/ep2014/stm/talk.rst b/talk/ep2014/stm/talk.rst new file mode 100644 --- /dev/null +++ b/talk/ep2014/stm/talk.rst @@ -0,0 +1,62 @@ +------------------------------------------------------------------------------ +Using All These Cores: Transactional Memory in PyPy +------------------------------------------------------------------------------ + + +=========================================== +Part 1 - Intro and Current Status +=========================================== + +xxx + + +=========================================== +Part 2 - Under The Hood +=========================================== + + +- pictures "GIL" and "no GIL" + +- zoom with reads and writes + +- keep boundaries, each block is a _transaction_ + +- completely the same semantics as when run with a GIL + +- write-write conflict + +- deadlock detection and resolution by abort-retry + +- read-write conflict: avoids (1) crashes, + (2) reads-from-the-past, (3) reads-from-the-future + +- reads are more common than writes: optimize read barriers + +- pypy-stm: write a thread-local flag "this object has been read", + show code for read barrier and fast-path of write barrier + +- reads are not synchronized at all between CPUs, but it's wrong + to read data written by other in-progress transactions; + so we have to write elsewhere + +- but what if we read later an object we modified? doing any kind + of check in the read barrier makes it much more costly + +- a solution would be to give each thread its own "segment" of + memory, and copy data between them only at known points + +- mmap trick: we do that, but we use mmap sharing to view the same + pages of memory at several addresses in memory + +- show clang source code and assembler for %gs + +- picture with 15/16 objects, 1/16 read markers, one page control data + +- picture with nursery -- the GC can use the same write barrier + + +=========================================== +Part 3 - Multithreading Revisited +=========================================== + +xxx From noreply at buildbot.pypy.org Tue Jul 15 14:30:05 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 15 Jul 2014 14:30:05 +0200 (CEST) Subject: [pypy-commit] benchmarks default: add an nqueens multithreaded benchmark Message-ID: <20140715123005.70F9D1D2A8D@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r265:dd64736032e4 Date: 2014-07-15 14:31 +0200 http://bitbucket.org/pypy/benchmarks/changeset/dd64736032e4/ Log: add an nqueens multithreaded benchmark diff --git a/multithread/nqueens/nqueens.py b/multithread/nqueens/nqueens.py new file mode 100644 --- /dev/null +++ b/multithread/nqueens/nqueens.py @@ -0,0 +1,68 @@ +# -*- coding: utf-8 -*- +# Based on code from http://code.activestate.com/recipes/576647/ +# by Raymond Hettinger +# +# It is a bit problematic because find_solutions spends half +# of the execution time constructing the permutations. Thus +# only half the execution is parallelised. + + +import sys +import time, random +from common.abstract_threading import ( + atomic, Future, set_thread_pool, ThreadPool, + hint_commit_soon) + +from itertools import permutations +import itertools + + +def chunks(iterable, size): + it = iter(iterable) + item = list(itertools.islice(it, size)) + while item: + yield item + item = list(itertools.islice(it, size)) + + + +def check_solutions(n, cols, perms): + sols = [] + with atomic: + for vec in perms: + if n == len(set(vec[i]+i for i in cols)) \ + == len(set(vec[i]-i for i in cols)): + sols.append(vec) + return sols + + +def find_solutions(n): + solutions = [] + fs = [] + cols = range(n) + for perms in chunks(permutations(cols), 100000): + hint_commit_soon() + fs.append(Future(check_solutions, n, cols, perms)) + print "Futures:", len(fs) + for f in fs: + solutions.extend(f()) + + print "found:", len(solutions) + + +def run(threads=2, n=10): + threads = int(threads) + n = int(n) + + set_thread_pool(ThreadPool(threads)) + + find_solutions(n) + + + # shutdown current pool + set_thread_pool(None) + + + +if __name__ == "__main__": + run() From noreply at buildbot.pypy.org Tue Jul 15 15:43:42 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Tue, 15 Jul 2014 15:43:42 +0200 (CEST) Subject: [pypy-commit] pypy wrap-bytes: A branch to experiment with space.wrap(bytes): only accept proven ascii strings Message-ID: <20140715134342.2A0D61C13BD@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: wrap-bytes Changeset: r72435:e033e11382a4 Date: 2014-07-08 21:08 +0200 http://bitbucket.org/pypy/pypy/changeset/e033e11382a4/ Log: A branch to experiment with space.wrap(bytes): only accept proven ascii strings From noreply at buildbot.pypy.org Tue Jul 15 15:43:43 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Tue, 15 Jul 2014 15:43:43 +0200 (CEST) Subject: [pypy-commit] pypy SomeString-charclass: A branch to experiment with "character class", annotation about the content of a string Message-ID: <20140715134343.834581C13BD@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: SomeString-charclass Changeset: r72436:412df9699d24 Date: 2014-07-13 14:42 +0200 http://bitbucket.org/pypy/pypy/changeset/412df9699d24/ Log: A branch to experiment with "character class", annotation about the content of a string From noreply at buildbot.pypy.org Tue Jul 15 15:43:44 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Tue, 15 Jul 2014 15:43:44 +0200 (CEST) Subject: [pypy-commit] pypy SomeString-charclass: Replace no_nul by the more general "character kind". Message-ID: <20140715134344.D06971C13BD@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: SomeString-charclass Changeset: r72437:d9ddbd244e06 Date: 2014-07-13 18:39 +0200 http://bitbucket.org/pypy/pypy/changeset/d9ddbd244e06/ Log: Replace no_nul by the more general "character kind". diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -9,7 +9,7 @@ SomeDict, SomeUnicodeCodePoint, SomeUnicodeString, SomeTuple, SomeImpossibleValue, s_ImpossibleValue, SomeInstance, SomeBuiltinMethod, SomeIterator, SomePBC, SomeNone, SomeFloat, s_None, - SomeByteArray, SomeWeakRef, SomeSingleFloat, + SomeByteArray, SomeWeakRef, SomeSingleFloat, AnyChar, AsciiChar, SomeLongFloat, SomeType, SomeConstantType, unionof, UnionError, read_can_only_throw, add_knowntypedata, merge_knowntypedata,) @@ -372,12 +372,14 @@ def union((str1, str2)): can_be_None = str1.can_be_None or str2.can_be_None - no_nul = str1.no_nul and str2.no_nul - return SomeString(can_be_None=can_be_None, no_nul=no_nul) + charkind = str1.charkind.union(str2.charkind) + return SomeString(can_be_None=can_be_None, + charkind=charkind) def add((str1, str2)): # propagate const-ness to help getattr(obj, 'prefix' + const_name) - result = SomeString(no_nul=str1.no_nul and str2.no_nul) + charkind = str1.charkind.union(str2.charkind) + result = SomeString(charkind=charkind) if str1.is_immutable_constant() and str2.is_immutable_constant(): result.const = str1.const + str2.const return result @@ -407,8 +409,8 @@ class __extend__(pairtype(SomeChar, SomeChar)): def union((chr1, chr2)): - no_nul = chr1.no_nul and chr2.no_nul - return SomeChar(no_nul=no_nul) + charkind = chr1.charkind.union(chr2.charkind) + return SomeChar(charkind=charkind) class __extend__(pairtype(SomeChar, SomeUnicodeCodePoint), @@ -442,17 +444,15 @@ SomeUnicodeString))): raise AnnotatorError( "string formatting mixing strings and unicode not supported") - no_nul = s_string.no_nul + charkind = s_string.charkind for s_item in s_tuple.items: - if isinstance(s_item, SomeFloat): - pass # or s_item is a subclass, like SomeInteger - elif (isinstance(s_item, SomeString) or - isinstance(s_item, SomeUnicodeString)) and s_item.no_nul: - pass + if isinstance(s_item, SomeFloat): # or a subclass, like SomeInteger + charkind = charkind.union(AsciiChar()) + elif isinstance(s_item, (SomeString, SomeUnicodeString)): + charkind = charkind.union(s_item.charkind) else: - no_nul = False - break - return s_string.__class__(no_nul=no_nul) + charkind = AnyChar() # Be conservative + return s_string.__class__(charkind=charkind) class __extend__(pairtype(SomeString, SomeObject), @@ -616,19 +616,19 @@ class __extend__(pairtype(SomeString, SomeInteger)): def getitem((str1, int2)): - return SomeChar(no_nul=str1.no_nul) + return SomeChar(charkind=str1.charkind) getitem.can_only_throw = [] getitem_key = getitem def getitem_idx((str1, int2)): - return SomeChar(no_nul=str1.no_nul) + return SomeChar(charkind=str1.charkind) getitem_idx.can_only_throw = [IndexError] getitem_idx_key = getitem_idx - def mul((str1, int2)): # xxx do we want to support this - return SomeString(no_nul=str1.no_nul) + def mul((str1, int2)): + return SomeString(charkind=str1.charkind) class __extend__(pairtype(SomeUnicodeString, SomeInteger)): def getitem((str1, int2)): diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -12,7 +12,8 @@ SomeBuiltin, SomePBC, SomeInteger, TLS, SomeUnicodeCodePoint, s_None, s_ImpossibleValue, SomeBool, SomeTuple, SomeImpossibleValue, SomeUnicodeString, SomeList, HarmlesslyBlocked, - SomeWeakRef, SomeByteArray, SomeConstantType) + SomeWeakRef, SomeByteArray, SomeConstantType, + charkind_from_const) from rpython.annotator.classdef import InstanceSource, ClassDef from rpython.annotator.listdef import ListDef, ListItem from rpython.annotator.dictdef import DictDef @@ -230,11 +231,11 @@ else: raise Exception("seeing a prebuilt long (value %s)" % hex(x)) elif issubclass(tp, str): # py.lib uses annotated str subclasses - no_nul = not '\x00' in x + charkind = charkind_from_const(x) if len(x) == 1: - result = SomeChar(no_nul=no_nul) + result = SomeChar(charkind=charkind) else: - result = SomeString(no_nul=no_nul) + result = SomeString(charkind=charkind) elif tp is unicode: if len(x) == 1: result = SomeUnicodeCodePoint() diff --git a/rpython/annotator/listdef.py b/rpython/annotator/listdef.py --- a/rpython/annotator/listdef.py +++ b/rpython/annotator/listdef.py @@ -1,4 +1,4 @@ -from rpython.annotator.model import s_ImpossibleValue +from rpython.annotator.model import s_ImpossibleValue, s_Str0 from rpython.annotator.model import SomeList, SomeString from rpython.annotator.model import unionof, TLS, UnionError, AnnotatorError @@ -210,5 +210,5 @@ #else: it's fine, don't set immutable=True at all (see # test_can_merge_immutable_list_with_regular_list) -s_list_of_strings = SomeList(ListDef(None, SomeString(no_nul=True), +s_list_of_strings = SomeList(ListDef(None, s_Str0, resized = True)) diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -217,6 +217,49 @@ self.knowntypedata = knowntypedata +# Character classes. + +class AnyChar(object): + no_nul = False + _instances = {} + + def __new__(cls): + return cls._instances[cls] + + @classmethod + def _register(cls): + cls._instances[cls] = object.__new__(cls) + + def __repr__(self): + return type(self).__name__ + + def __eq__(self, other): + if TLS.check_str_without_nul: + if self.no_nul != other.no_nul: + return False + return True + + def union(self, other): + return self +AnyChar._register() + +class NoNulChar(AnyChar): + no_nul = True + + def union(self, other): + if other.no_nul: + return self + else: + return AnyChar() +NoNulChar._register() + +AsciiChar = AnyChar # So far + +def charkind_from_const(value): + if '\x00' not in value: + return NoNulChar() + return AnyChar() + class SomeStringOrUnicode(SomeObject): """Base class for shared implementation of SomeString, SomeUnicodeString and SomeByteArray. @@ -225,36 +268,31 @@ immutable = True can_be_None = False - no_nul = False # No NUL character in the string. - def __init__(self, can_be_None=False, no_nul=False): + def __init__(self, can_be_None=False, charkind=None): assert type(self) is not SomeStringOrUnicode if can_be_None: self.can_be_None = True - if no_nul: - assert self.immutable #'no_nul' cannot be used with SomeByteArray - self.no_nul = True + if charkind: + # charkind cannot be used with SomeByteArray + assert self.immutable + self.charkind = charkind + else: + self.charkind = AnyChar() def can_be_none(self): return self.can_be_None - def __eq__(self, other): - if self.__class__ is not other.__class__: - return False - d1 = self.__dict__ - d2 = other.__dict__ - if not TLS.check_str_without_nul: - d1 = d1.copy() - d1['no_nul'] = 0 - d2 = d2.copy() - d2['no_nul'] = 0 - return d1 == d2 - def nonnoneify(self): - return self.__class__(can_be_None=False, no_nul=self.no_nul) + return self.__class__(can_be_None=False, + charkind=self.charkind) def nonnulify(self): - return self.__class__(can_be_None=self.can_be_None, no_nul=True) + if self.charkind == NoNulChar(): + charkind = NoNulChar() + elif self.charkind == AnyChar(): + charkind = NoNulChar() + return self.__class__(can_be_None=self.can_be_None, charkind=charkind) class SomeString(SomeStringOrUnicode): @@ -262,7 +300,7 @@ knowntype = str def noneify(self): - return SomeString(can_be_None=True, no_nul=self.no_nul) + return SomeString(can_be_None=True, charkind=self.charkind) class SomeUnicodeString(SomeStringOrUnicode): @@ -270,7 +308,7 @@ knowntype = unicode def noneify(self): - return SomeUnicodeString(can_be_None=True, no_nul=self.no_nul) + return SomeUnicodeString(can_be_None=True, charkind=self.charkind) class SomeByteArray(SomeStringOrUnicode): @@ -282,18 +320,18 @@ "Stands for an object known to be a string of length 1." can_be_None = False - def __init__(self, no_nul=False): # no 'can_be_None' argument here - if no_nul: - self.no_nul = True + def __init__(self, charkind=None): + # no 'can_be_None' argument here + SomeString.__init__(self, charkind=charkind) class SomeUnicodeCodePoint(SomeUnicodeString): "Stands for an object known to be a unicode codepoint." can_be_None = False - def __init__(self, no_nul=False): # no 'can_be_None' argument here - if no_nul: - self.no_nul = True + def __init__(self, charkind=False): + # no 'can_be_None' argument here + SomeUnicodeString.__init__(self, charkind=charkind) SomeString.basestringclass = SomeString SomeString.basecharclass = SomeChar @@ -586,8 +624,8 @@ s_Bool = SomeBool() s_Int = SomeInteger() s_ImpossibleValue = SomeImpossibleValue() -s_Str0 = SomeString(no_nul=True) -s_Unicode0 = SomeUnicodeString(no_nul=True) +s_Str0 = SomeString(charkind=NoNulChar) +s_Unicode0 = SomeUnicodeString(charkind=NoNulChar) # ____________________________________________________________ diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -428,7 +428,7 @@ return ''.join(g(n)) s = a.build_types(f, [int]) assert s.knowntype == str - assert s.no_nul + assert s.charkind.no_nul def test_str_split(self): a = self.RPythonAnnotator() @@ -441,26 +441,26 @@ s = a.build_types(f, [int]) assert isinstance(s, annmodel.SomeList) s_item = s.listdef.listitem.s_value - assert s_item.no_nul + assert s_item.charkind.no_nul def test_str_split_nul(self): def f(n): return n.split('\0')[0] a = self.RPythonAnnotator() a.translator.config.translation.check_str_without_nul = True - s = a.build_types(f, [annmodel.SomeString(no_nul=False, can_be_None=False)]) + s = a.build_types(f, [annmodel.SomeString()]) assert isinstance(s, annmodel.SomeString) assert not s.can_be_None - assert s.no_nul + assert s.charkind.no_nul def g(n): return n.split('\0', 1)[0] a = self.RPythonAnnotator() a.translator.config.translation.check_str_without_nul = True - s = a.build_types(g, [annmodel.SomeString(no_nul=False, can_be_None=False)]) + s = a.build_types(g, [annmodel.SomeString()]) assert isinstance(s, annmodel.SomeString) assert not s.can_be_None - assert not s.no_nul + assert not s.charkind.no_nul def test_str_splitlines(self): a = self.RPythonAnnotator() @@ -479,8 +479,9 @@ return a_str.rstrip(' ') else: return a_str.lstrip(' ') - s = a.build_types(f, [int, annmodel.SomeString(no_nul=True)]) - assert s.no_nul + s = a.build_types(f, [int, annmodel.SomeString( + charkind=annmodel.NoNulChar)]) + assert s.charkind.no_nul def test_str_mul(self): a = self.RPythonAnnotator() @@ -2016,7 +2017,7 @@ a = self.RPythonAnnotator() s = a.build_types(f, [int]) assert s.can_be_None - assert s.no_nul + assert s.charkind.no_nul def test_str_or_None(self): def f(a): @@ -2032,7 +2033,7 @@ a = self.RPythonAnnotator() s = a.build_types(f, [int]) assert s.can_be_None - assert s.no_nul + assert s.charkind.no_nul def test_emulated_pbc_call_simple(self): def f(a,b): @@ -2098,15 +2099,16 @@ a = self.RPythonAnnotator() s = a.build_types(f, []) assert isinstance(s, annmodel.SomeString) - assert s.no_nul + assert s.charkind.no_nul def test_mul_str0(self): def f(s): return s*10 a = self.RPythonAnnotator() - s = a.build_types(f, [annmodel.SomeString(no_nul=True)]) + s = a.build_types(f, [annmodel.SomeString( + charkind=annmodel.NoNulChar())]) assert isinstance(s, annmodel.SomeString) - assert s.no_nul + assert s.charkind.no_nul def test_getitem_str0(self): def f(s, n): @@ -2120,10 +2122,11 @@ a = self.RPythonAnnotator() a.translator.config.translation.check_str_without_nul = True - s = a.build_types(f, [annmodel.SomeString(no_nul=True), + s = a.build_types(f, [annmodel.SomeString( + charkind=annmodel.NoNulChar()), annmodel.SomeInteger()]) assert isinstance(s, annmodel.SomeString) - assert s.no_nul + assert s.charkind.no_nul def test_non_none_and_none_with_isinstance(self): class A(object): @@ -3353,7 +3356,7 @@ a = self.RPythonAnnotator() s = a.build_types(f, [str]) assert isinstance(s, annmodel.SomeString) - assert s.no_nul + assert s.charkind.no_nul def f(x): return u'a'.replace(x, u'b') @@ -3861,10 +3864,10 @@ return i a = self.RPythonAnnotator() a.translator.config.translation.check_str_without_nul = True - s = a.build_types(f, [annmodel.SomeString(no_nul=False)]) + s = a.build_types(f, [annmodel.SomeString(charkind=annmodel.AnyChar())]) assert isinstance(s, annmodel.SomeString) assert s.can_be_None - assert s.no_nul + assert s.charkind.no_nul def test_no___call__(self): class X(object): @@ -3884,7 +3887,7 @@ a = self.RPythonAnnotator() s = a.build_types(fn, []) assert isinstance(s, annmodel.SomeString) - assert s.no_nul + assert s.charkind.no_nul def test_os_getenv(self): import os @@ -3893,7 +3896,7 @@ a = self.RPythonAnnotator() s = a.build_types(fn, []) assert isinstance(s, annmodel.SomeString) - assert s.no_nul + assert s.charkind.no_nul def test_base_iter(self): class A(object): diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -9,7 +9,7 @@ SomeString, SomeChar, SomeList, SomeDict, SomeTuple, SomeImpossibleValue, SomeUnicodeCodePoint, SomeInstance, SomeBuiltin, SomeBuiltinMethod, SomeFloat, SomeIterator, SomePBC, SomeNone, SomeType, s_ImpossibleValue, - s_Bool, s_None, unionof, add_knowntypedata, + s_Bool, s_None, unionof, add_knowntypedata, NoNulChar, HarmlesslyBlocked, SomeWeakRef, SomeUnicodeString, SomeByteArray) from rpython.annotator.bookkeeper import getbookkeeper, immutablevalue from rpython.annotator import builtin @@ -459,13 +459,13 @@ return SomeInteger(nonneg=True) def method_strip(self, chr=None): - return self.basestringclass(no_nul=self.no_nul) + return self.basestringclass(charkind=self.charkind) def method_lstrip(self, chr=None): - return self.basestringclass(no_nul=self.no_nul) + return self.basestringclass(charkind=self.charkind) def method_rstrip(self, chr=None): - return self.basestringclass(no_nul=self.no_nul) + return self.basestringclass(charkind=self.charkind) def method_join(self, s_list): if s_None.contains(s_list): @@ -475,8 +475,8 @@ if isinstance(self, SomeUnicodeString): return immutablevalue(u"") return immutablevalue("") - no_nul = self.no_nul and s_item.no_nul - return self.basestringclass(no_nul=no_nul) + charkind = self.charkind.union(s_item.charkind) + return self.basestringclass(charkind=charkind) def iter(self): return SomeIterator(self) @@ -487,23 +487,23 @@ def method_split(self, patt, max=-1): if max == -1 and patt.is_constant() and patt.const == "\0": - no_nul = True + charkind = NoNulChar else: - no_nul = self.no_nul - s_item = self.basestringclass(no_nul=no_nul) + charkind = self.charkind + s_item = self.basestringclass(charkind=charkind) return getbookkeeper().newlist(s_item) def method_rsplit(self, patt, max=-1): - s_item = self.basestringclass(no_nul=self.no_nul) + s_item = self.basestringclass(charkind=self.charkind) return getbookkeeper().newlist(s_item) def method_replace(self, s1, s2): - return self.basestringclass(no_nul=self.no_nul and s2.no_nul) + return self.basestringclass( + charkind=self.charkind.union(s2.charkind)) def getslice(self, s_start, s_stop): check_negative_slice(s_start, s_stop) - result = self.basestringclass(no_nul=self.no_nul) - return result + return self.basestringclass(charkind=self.charkind) def op_contains(self, s_element): if s_element.is_constant() and s_element.const == "\0": diff --git a/rpython/rlib/rmarshal.py b/rpython/rlib/rmarshal.py --- a/rpython/rlib/rmarshal.py +++ b/rpython/rlib/rmarshal.py @@ -230,16 +230,14 @@ raise ValueError("expected a string") length = readlong(loader) return assert_str0(readstr(loader, length)) -add_loader(annmodel.SomeString(can_be_None=False, no_nul=True), - load_string_nonul) +add_loader(annmodel.s_Str0, load_string_nonul) def load_string(loader): if readchr(loader) != TYPE_STRING: raise ValueError("expected a string") length = readlong(loader) return readstr(loader, length) -add_loader(annmodel.SomeString(can_be_None=False, no_nul=False), - load_string) +add_loader(annmodel.SomeString(), load_string) def load_string_or_none_nonul(loader): t = readchr(loader) @@ -250,8 +248,7 @@ return None else: raise ValueError("expected a string or None") -add_loader(annmodel.SomeString(can_be_None=True, no_nul=True), - load_string_or_none_nonul) +add_loader(annmodel.s_Str0.noneify(), load_string_or_none_nonul) def load_string_or_none(loader): t = readchr(loader) @@ -262,8 +259,7 @@ return None else: raise ValueError("expected a string or None") -add_loader(annmodel.SomeString(can_be_None=True, no_nul=False), - load_string_or_none) +add_loader(annmodel.SomeString().noneify(), load_string_or_none) # ____________________________________________________________ # diff --git a/rpython/rlib/rstring.py b/rpython/rlib/rstring.py --- a/rpython/rlib/rstring.py +++ b/rpython/rlib/rstring.py @@ -3,7 +3,7 @@ import sys from rpython.annotator.model import (SomeObject, SomeString, s_None, SomeChar, - SomeInteger, SomeUnicodeCodePoint, SomeUnicodeString, SomePBC) + SomeInteger, SomeUnicodeCodePoint, SomeUnicodeString, SomePBC, NoNulChar) from rpython.rtyper.llannotation import SomePtr from rpython.rlib import jit from rpython.rlib.objectmodel import newlist_hint, specialize @@ -526,11 +526,11 @@ if s_None.contains(s_obj): return s_obj assert isinstance(s_obj, (SomeString, SomeUnicodeString)) - if s_obj.no_nul: + if s_obj.charkind.no_nul: return s_obj new_s_obj = SomeObject.__new__(s_obj.__class__) new_s_obj.__dict__ = s_obj.__dict__.copy() - new_s_obj.no_nul = True + new_s_obj.charkind = NoNulChar() return new_s_obj def specialize_call(self, hop): @@ -548,7 +548,7 @@ def compute_result_annotation(self, s_obj): if not isinstance(s_obj, (SomeString, SomeUnicodeString)): return s_obj - if not s_obj.no_nul: + if not s_obj.charkind.no_nul: raise ValueError("Value is not no_nul") def specialize_call(self, hop): diff --git a/rpython/rlib/test/test_signature.py b/rpython/rlib/test/test_signature.py --- a/rpython/rlib/test/test_signature.py +++ b/rpython/rlib/test/test_signature.py @@ -119,8 +119,8 @@ @signature(types.unicode0(), returns=types.str0()) def f(u): return 'str' - assert getsig(f) == [model.SomeUnicodeString(no_nul=True), - model.SomeString(no_nul=True)] + assert getsig(f) == [model.SomeUnicodeString.s_Str0, + model.SomeString.s_Unicode0] def test_ptr(): policy = LowLevelAnnotatorPolicy() diff --git a/rpython/rlib/types.py b/rpython/rlib/types.py --- a/rpython/rlib/types.py +++ b/rpython/rlib/types.py @@ -36,11 +36,14 @@ def unicode0(): - return model.SomeUnicodeString(no_nul=True) + return model.s_Unicode0() def str(can_be_None=False): - return model.SomeString(can_be_None=can_be_None) + if can_be_None: + return model.SomeString().noneify() + else: + return model.SomeString() def bytearray(): @@ -48,7 +51,7 @@ def str0(): - return model.SomeString(no_nul=True) + return model.s_str0 def char(): diff --git a/rpython/rtyper/module/ll_os_environ.py b/rpython/rtyper/module/ll_os_environ.py --- a/rpython/rtyper/module/ll_os_environ.py +++ b/rpython/rtyper/module/ll_os_environ.py @@ -181,8 +181,7 @@ register_external(r_envitems, [], [(str0, str0)], export_name='ll_os.ll_os_envitems', llimpl=envitems_llimpl) -register_external(r_getenv, [str0], - annmodel.SomeString(can_be_None=True, no_nul=True), +register_external(r_getenv, [str0], str0.noneify(), export_name='ll_os.ll_os_getenv', llimpl=getenv_llimpl) register_external(r_putenv, [str0, str0], annmodel.s_None, diff --git a/rpython/rtyper/test/test_extfunc.py b/rpython/rtyper/test/test_extfunc.py --- a/rpython/rtyper/test/test_extfunc.py +++ b/rpython/rtyper/test/test_extfunc.py @@ -138,7 +138,7 @@ assert isinstance(s, annmodel.SomeString) def test_str0(self): - str0 = annmodel.SomeString(no_nul=True) + str0 = annmodel.s_Str0 def os_open(s): pass register_external(os_open, [str0], None) @@ -156,7 +156,7 @@ a.build_types(g, [str0]) # Does not raise def test_list_of_str0(self): - str0 = annmodel.SomeString(no_nul=True) + str0 = annmodel.s_Str0 def os_execve(l): pass register_external(os_execve, [[str0]], None) From noreply at buildbot.pypy.org Tue Jul 15 15:43:46 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Tue, 15 Jul 2014 15:43:46 +0200 (CEST) Subject: [pypy-commit] pypy SomeString-charclass: Progress, add AsciiChar kind. Message-ID: <20140715134346.20F2E1C13BD@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: SomeString-charclass Changeset: r72438:58e84b592e0a Date: 2014-07-13 20:12 +0200 http://bitbucket.org/pypy/pypy/changeset/58e84b592e0a/ Log: Progress, add AsciiChar kind. diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -220,7 +220,10 @@ # Character classes. class AnyChar(object): + """A character of any value.""" no_nul = False + is_ascii = False + _instances = {} def __new__(cls): @@ -244,6 +247,9 @@ AnyChar._register() class NoNulChar(AnyChar): + """Any character except NUL '\0'. + Strings of this kind can be converted to char* with no loss.""" + no_nul = True def union(self, other): @@ -253,13 +259,35 @@ return AnyChar() NoNulChar._register() -AsciiChar = AnyChar # So far +class AsciiChar(NoNulChar): + """A character in the range(1, 128). + + Strings of this kind can be decoded faster to unicode.""" + + is_ascii = True + + def union(self, other): + if other.is_ascii: + return self + elif other.no_nul: + return NoNulChar() + else: + return AnyChar() +AsciiChar._register() + def charkind_from_const(value): + try: + value.decode('ascii') + except UnicodeDecodeError: + pass + else: + return AsciiChar() if '\x00' not in value: return NoNulChar() return AnyChar() + class SomeStringOrUnicode(SomeObject): """Base class for shared implementation of SomeString, SomeUnicodeString and SomeByteArray. @@ -284,14 +312,16 @@ return self.can_be_None def nonnoneify(self): - return self.__class__(can_be_None=False, - charkind=self.charkind) + return self.basestringclass(can_be_None=False, charkind=self.charkind) + + def noneify(self): + return self.basestringclass(can_be_None=True, charkind=self.charkind) def nonnulify(self): - if self.charkind == NoNulChar(): - charkind = NoNulChar() - elif self.charkind == AnyChar(): - charkind = NoNulChar() + if self.charkind.no_nul: + return self + assert type(self.charkind) is AnyChar # so far the only one. + charkind = NoNulChar() return self.__class__(can_be_None=self.can_be_None, charkind=charkind) @@ -299,17 +329,11 @@ "Stands for an object which is known to be a string." knowntype = str - def noneify(self): - return SomeString(can_be_None=True, charkind=self.charkind) - class SomeUnicodeString(SomeStringOrUnicode): "Stands for an object which is known to be an unicode string" knowntype = unicode - def noneify(self): - return SomeUnicodeString(can_be_None=True, charkind=self.charkind) - class SomeByteArray(SomeStringOrUnicode): immutable = False diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -487,7 +487,7 @@ def method_split(self, patt, max=-1): if max == -1 and patt.is_constant() and patt.const == "\0": - charkind = NoNulChar + charkind = NoNulChar() else: charkind = self.charkind s_item = self.basestringclass(charkind=charkind) diff --git a/rpython/rlib/rstring.py b/rpython/rlib/rstring.py --- a/rpython/rlib/rstring.py +++ b/rpython/rlib/rstring.py @@ -523,15 +523,9 @@ _about_ = assert_str0 def compute_result_annotation(self, s_obj): - if s_None.contains(s_obj): + if s_None.contains(s_obj): # probably a future str_or_None return s_obj - assert isinstance(s_obj, (SomeString, SomeUnicodeString)) - if s_obj.charkind.no_nul: - return s_obj - new_s_obj = SomeObject.__new__(s_obj.__class__) - new_s_obj.__dict__ = s_obj.__dict__.copy() - new_s_obj.charkind = NoNulChar() - return new_s_obj + return s_obj.nonnulify() def specialize_call(self, hop): hop.exception_cannot_occur() From noreply at buildbot.pypy.org Tue Jul 15 15:43:47 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Tue, 15 Jul 2014 15:43:47 +0200 (CEST) Subject: [pypy-commit] pypy SomeString-charclass: Add Utf8Char kind. Will be used by space.identifier_w() Message-ID: <20140715134347.51D961C13BD@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: SomeString-charclass Changeset: r72439:85c3cda1fc4d Date: 2014-07-13 20:24 +0200 http://bitbucket.org/pypy/pypy/changeset/85c3cda1fc4d/ Log: Add Utf8Char kind. Will be used by space.identifier_w() diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -223,6 +223,7 @@ """A character of any value.""" no_nul = False is_ascii = False + is_utf8 = False _instances = {} @@ -259,7 +260,24 @@ return AnyChar() NoNulChar._register() -class AsciiChar(NoNulChar): +class Utf8Char(NoNulChar): + """A character compatible with utf8 encoding. + + Does not mean that the string can always be decoded with utf8, + specially for slices or single characters. This kind indicates that + utf8 is the encoding to use when converting to unicode.""" + is_utf8 = True + + def union(self, other): + if other.is_utf8: + return self + elif other.no_nul: + return NoNulChar() + else: + return AnyChar() +Utf8Char._register() + +class AsciiChar(Utf8Char): """A character in the range(1, 128). Strings of this kind can be decoded faster to unicode.""" @@ -269,6 +287,8 @@ def union(self, other): if other.is_ascii: return self + elif other.is_utf8: + return Utf8Char() elif other.no_nul: return NoNulChar() else: @@ -277,6 +297,8 @@ def charkind_from_const(value): + # Probably no need to handle utf-8, we don't have such constants + # in pypy code. try: value.decode('ascii') except UnicodeDecodeError: From noreply at buildbot.pypy.org Tue Jul 15 15:43:48 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Tue, 15 Jul 2014 15:43:48 +0200 (CEST) Subject: [pypy-commit] pypy wrap-bytes: hg merge SomeString-charclass, directly into py3k Message-ID: <20140715134348.B44571C13BD@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: wrap-bytes Changeset: r72440:65a411f16d55 Date: 2014-07-13 20:29 +0200 http://bitbucket.org/pypy/pypy/changeset/65a411f16d55/ Log: hg merge SomeString-charclass, directly into py3k diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -9,7 +9,7 @@ SomeDict, SomeUnicodeCodePoint, SomeUnicodeString, SomeTuple, SomeImpossibleValue, s_ImpossibleValue, SomeInstance, SomeBuiltinMethod, SomeIterator, SomePBC, SomeNone, SomeFloat, s_None, - SomeByteArray, SomeWeakRef, SomeSingleFloat, + SomeByteArray, SomeWeakRef, SomeSingleFloat, AnyChar, AsciiChar, SomeLongFloat, SomeType, SomeConstantType, unionof, UnionError, read_can_only_throw, add_knowntypedata, merge_knowntypedata,) @@ -372,12 +372,14 @@ def union((str1, str2)): can_be_None = str1.can_be_None or str2.can_be_None - no_nul = str1.no_nul and str2.no_nul - return SomeString(can_be_None=can_be_None, no_nul=no_nul) + charkind = str1.charkind.union(str2.charkind) + return SomeString(can_be_None=can_be_None, + charkind=charkind) def add((str1, str2)): # propagate const-ness to help getattr(obj, 'prefix' + const_name) - result = SomeString(no_nul=str1.no_nul and str2.no_nul) + charkind = str1.charkind.union(str2.charkind) + result = SomeString(charkind=charkind) if str1.is_immutable_constant() and str2.is_immutable_constant(): result.const = str1.const + str2.const return result @@ -407,8 +409,8 @@ class __extend__(pairtype(SomeChar, SomeChar)): def union((chr1, chr2)): - no_nul = chr1.no_nul and chr2.no_nul - return SomeChar(no_nul=no_nul) + charkind = chr1.charkind.union(chr2.charkind) + return SomeChar(charkind=charkind) class __extend__(pairtype(SomeChar, SomeUnicodeCodePoint), @@ -442,17 +444,15 @@ SomeUnicodeString))): raise AnnotatorError( "string formatting mixing strings and unicode not supported") - no_nul = s_string.no_nul + charkind = s_string.charkind for s_item in s_tuple.items: - if isinstance(s_item, SomeFloat): - pass # or s_item is a subclass, like SomeInteger - elif (isinstance(s_item, SomeString) or - isinstance(s_item, SomeUnicodeString)) and s_item.no_nul: - pass + if isinstance(s_item, SomeFloat): # or a subclass, like SomeInteger + charkind = charkind.union(AsciiChar()) + elif isinstance(s_item, (SomeString, SomeUnicodeString)): + charkind = charkind.union(s_item.charkind) else: - no_nul = False - break - return s_string.__class__(no_nul=no_nul) + charkind = AnyChar() # Be conservative + return s_string.__class__(charkind=charkind) class __extend__(pairtype(SomeString, SomeObject), @@ -616,19 +616,19 @@ class __extend__(pairtype(SomeString, SomeInteger)): def getitem((str1, int2)): - return SomeChar(no_nul=str1.no_nul) + return SomeChar(charkind=str1.charkind) getitem.can_only_throw = [] getitem_key = getitem def getitem_idx((str1, int2)): - return SomeChar(no_nul=str1.no_nul) + return SomeChar(charkind=str1.charkind) getitem_idx.can_only_throw = [IndexError] getitem_idx_key = getitem_idx - def mul((str1, int2)): # xxx do we want to support this - return SomeString(no_nul=str1.no_nul) + def mul((str1, int2)): + return SomeString(charkind=str1.charkind) class __extend__(pairtype(SomeUnicodeString, SomeInteger)): def getitem((str1, int2)): diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -12,7 +12,8 @@ SomeBuiltin, SomePBC, SomeInteger, TLS, SomeUnicodeCodePoint, s_None, s_ImpossibleValue, SomeBool, SomeTuple, SomeImpossibleValue, SomeUnicodeString, SomeList, HarmlesslyBlocked, - SomeWeakRef, SomeByteArray, SomeConstantType) + SomeWeakRef, SomeByteArray, SomeConstantType, + charkind_from_const) from rpython.annotator.classdef import InstanceSource, ClassDef from rpython.annotator.listdef import ListDef, ListItem from rpython.annotator.dictdef import DictDef @@ -230,11 +231,11 @@ else: raise Exception("seeing a prebuilt long (value %s)" % hex(x)) elif issubclass(tp, str): # py.lib uses annotated str subclasses - no_nul = not '\x00' in x + charkind = charkind_from_const(x) if len(x) == 1: - result = SomeChar(no_nul=no_nul) + result = SomeChar(charkind=charkind) else: - result = SomeString(no_nul=no_nul) + result = SomeString(charkind=charkind) elif tp is unicode: if len(x) == 1: result = SomeUnicodeCodePoint() diff --git a/rpython/annotator/listdef.py b/rpython/annotator/listdef.py --- a/rpython/annotator/listdef.py +++ b/rpython/annotator/listdef.py @@ -1,4 +1,4 @@ -from rpython.annotator.model import s_ImpossibleValue +from rpython.annotator.model import s_ImpossibleValue, s_Str0 from rpython.annotator.model import SomeList, SomeString from rpython.annotator.model import unionof, TLS, UnionError, AnnotatorError @@ -210,5 +210,5 @@ #else: it's fine, don't set immutable=True at all (see # test_can_merge_immutable_list_with_regular_list) -s_list_of_strings = SomeList(ListDef(None, SomeString(no_nul=True), +s_list_of_strings = SomeList(ListDef(None, s_Str0, resized = True)) diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -217,6 +217,99 @@ self.knowntypedata = knowntypedata +# Character classes. + +class AnyChar(object): + """A character of any value.""" + no_nul = False + is_ascii = False + is_utf8 = False + + _instances = {} + + def __new__(cls): + return cls._instances[cls] + + @classmethod + def _register(cls): + cls._instances[cls] = object.__new__(cls) + + def __repr__(self): + return type(self).__name__ + + def __eq__(self, other): + if TLS.check_str_without_nul: + if self.no_nul != other.no_nul: + return False + return True + + def union(self, other): + return self +AnyChar._register() + +class NoNulChar(AnyChar): + """Any character except NUL '\0'. + Strings of this kind can be converted to char* with no loss.""" + + no_nul = True + + def union(self, other): + if other.no_nul: + return self + else: + return AnyChar() +NoNulChar._register() + +class Utf8Char(NoNulChar): + """A character compatible with utf8 encoding. + + Does not mean that the string can always be decoded with utf8, + specially for slices or single characters. This kind indicates that + utf8 is the encoding to use when converting to unicode.""" + is_utf8 = True + + def union(self, other): + if other.is_utf8: + return self + elif other.no_nul: + return NoNulChar() + else: + return AnyChar() +Utf8Char._register() + +class AsciiChar(Utf8Char): + """A character in the range(1, 128). + + Strings of this kind can be decoded faster to unicode.""" + + is_ascii = True + + def union(self, other): + if other.is_ascii: + return self + elif other.is_utf8: + return Utf8Char() + elif other.no_nul: + return NoNulChar() + else: + return AnyChar() +AsciiChar._register() + + +def charkind_from_const(value): + # Probably no need to handle utf-8, we don't have such constants + # in pypy code. + try: + value.decode('ascii') + except UnicodeDecodeError: + pass + else: + return AsciiChar() + if '\x00' not in value: + return NoNulChar() + return AnyChar() + + class SomeStringOrUnicode(SomeObject): """Base class for shared implementation of SomeString, SomeUnicodeString and SomeByteArray. @@ -225,53 +318,44 @@ immutable = True can_be_None = False - no_nul = False # No NUL character in the string. - def __init__(self, can_be_None=False, no_nul=False): + def __init__(self, can_be_None=False, charkind=None): assert type(self) is not SomeStringOrUnicode if can_be_None: self.can_be_None = True - if no_nul: - assert self.immutable #'no_nul' cannot be used with SomeByteArray - self.no_nul = True + if charkind: + # charkind cannot be used with SomeByteArray + assert self.immutable + self.charkind = charkind + else: + self.charkind = AnyChar() def can_be_none(self): return self.can_be_None - def __eq__(self, other): - if self.__class__ is not other.__class__: - return False - d1 = self.__dict__ - d2 = other.__dict__ - if not TLS.check_str_without_nul: - d1 = d1.copy() - d1['no_nul'] = 0 - d2 = d2.copy() - d2['no_nul'] = 0 - return d1 == d2 + def nonnoneify(self): + return self.basestringclass(can_be_None=False, charkind=self.charkind) - def nonnoneify(self): - return self.__class__(can_be_None=False, no_nul=self.no_nul) + def noneify(self): + return self.basestringclass(can_be_None=True, charkind=self.charkind) def nonnulify(self): - return self.__class__(can_be_None=self.can_be_None, no_nul=True) + if self.charkind.no_nul: + return self + assert type(self.charkind) is AnyChar # so far the only one. + charkind = NoNulChar() + return self.__class__(can_be_None=self.can_be_None, charkind=charkind) class SomeString(SomeStringOrUnicode): "Stands for an object which is known to be a string." knowntype = str - def noneify(self): - return SomeString(can_be_None=True, no_nul=self.no_nul) - class SomeUnicodeString(SomeStringOrUnicode): "Stands for an object which is known to be an unicode string" knowntype = unicode - def noneify(self): - return SomeUnicodeString(can_be_None=True, no_nul=self.no_nul) - class SomeByteArray(SomeStringOrUnicode): immutable = False @@ -282,18 +366,18 @@ "Stands for an object known to be a string of length 1." can_be_None = False - def __init__(self, no_nul=False): # no 'can_be_None' argument here - if no_nul: - self.no_nul = True + def __init__(self, charkind=None): + # no 'can_be_None' argument here + SomeString.__init__(self, charkind=charkind) class SomeUnicodeCodePoint(SomeUnicodeString): "Stands for an object known to be a unicode codepoint." can_be_None = False - def __init__(self, no_nul=False): # no 'can_be_None' argument here - if no_nul: - self.no_nul = True + def __init__(self, charkind=False): + # no 'can_be_None' argument here + SomeUnicodeString.__init__(self, charkind=charkind) SomeString.basestringclass = SomeString SomeString.basecharclass = SomeChar @@ -586,8 +670,8 @@ s_Bool = SomeBool() s_Int = SomeInteger() s_ImpossibleValue = SomeImpossibleValue() -s_Str0 = SomeString(no_nul=True) -s_Unicode0 = SomeUnicodeString(no_nul=True) +s_Str0 = SomeString(charkind=NoNulChar) +s_Unicode0 = SomeUnicodeString(charkind=NoNulChar) # ____________________________________________________________ diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -428,7 +428,7 @@ return ''.join(g(n)) s = a.build_types(f, [int]) assert s.knowntype == str - assert s.no_nul + assert s.charkind.no_nul def test_str_split(self): a = self.RPythonAnnotator() @@ -441,26 +441,26 @@ s = a.build_types(f, [int]) assert isinstance(s, annmodel.SomeList) s_item = s.listdef.listitem.s_value - assert s_item.no_nul + assert s_item.charkind.no_nul def test_str_split_nul(self): def f(n): return n.split('\0')[0] a = self.RPythonAnnotator() a.translator.config.translation.check_str_without_nul = True - s = a.build_types(f, [annmodel.SomeString(no_nul=False, can_be_None=False)]) + s = a.build_types(f, [annmodel.SomeString()]) assert isinstance(s, annmodel.SomeString) assert not s.can_be_None - assert s.no_nul + assert s.charkind.no_nul def g(n): return n.split('\0', 1)[0] a = self.RPythonAnnotator() a.translator.config.translation.check_str_without_nul = True - s = a.build_types(g, [annmodel.SomeString(no_nul=False, can_be_None=False)]) + s = a.build_types(g, [annmodel.SomeString()]) assert isinstance(s, annmodel.SomeString) assert not s.can_be_None - assert not s.no_nul + assert not s.charkind.no_nul def test_str_splitlines(self): a = self.RPythonAnnotator() @@ -479,8 +479,9 @@ return a_str.rstrip(' ') else: return a_str.lstrip(' ') - s = a.build_types(f, [int, annmodel.SomeString(no_nul=True)]) - assert s.no_nul + s = a.build_types(f, [int, annmodel.SomeString( + charkind=annmodel.NoNulChar)]) + assert s.charkind.no_nul def test_str_mul(self): a = self.RPythonAnnotator() @@ -2016,7 +2017,7 @@ a = self.RPythonAnnotator() s = a.build_types(f, [int]) assert s.can_be_None - assert s.no_nul + assert s.charkind.no_nul def test_str_or_None(self): def f(a): @@ -2032,7 +2033,7 @@ a = self.RPythonAnnotator() s = a.build_types(f, [int]) assert s.can_be_None - assert s.no_nul + assert s.charkind.no_nul def test_emulated_pbc_call_simple(self): def f(a,b): @@ -2098,15 +2099,16 @@ a = self.RPythonAnnotator() s = a.build_types(f, []) assert isinstance(s, annmodel.SomeString) - assert s.no_nul + assert s.charkind.no_nul def test_mul_str0(self): def f(s): return s*10 a = self.RPythonAnnotator() - s = a.build_types(f, [annmodel.SomeString(no_nul=True)]) + s = a.build_types(f, [annmodel.SomeString( + charkind=annmodel.NoNulChar())]) assert isinstance(s, annmodel.SomeString) - assert s.no_nul + assert s.charkind.no_nul def test_getitem_str0(self): def f(s, n): @@ -2120,10 +2122,11 @@ a = self.RPythonAnnotator() a.translator.config.translation.check_str_without_nul = True - s = a.build_types(f, [annmodel.SomeString(no_nul=True), + s = a.build_types(f, [annmodel.SomeString( + charkind=annmodel.NoNulChar()), annmodel.SomeInteger()]) assert isinstance(s, annmodel.SomeString) - assert s.no_nul + assert s.charkind.no_nul def test_non_none_and_none_with_isinstance(self): class A(object): @@ -3353,7 +3356,7 @@ a = self.RPythonAnnotator() s = a.build_types(f, [str]) assert isinstance(s, annmodel.SomeString) - assert s.no_nul + assert s.charkind.no_nul def f(x): return u'a'.replace(x, u'b') @@ -3861,10 +3864,10 @@ return i a = self.RPythonAnnotator() a.translator.config.translation.check_str_without_nul = True - s = a.build_types(f, [annmodel.SomeString(no_nul=False)]) + s = a.build_types(f, [annmodel.SomeString(charkind=annmodel.AnyChar())]) assert isinstance(s, annmodel.SomeString) assert s.can_be_None - assert s.no_nul + assert s.charkind.no_nul def test_no___call__(self): class X(object): @@ -3884,7 +3887,7 @@ a = self.RPythonAnnotator() s = a.build_types(fn, []) assert isinstance(s, annmodel.SomeString) - assert s.no_nul + assert s.charkind.no_nul def test_os_getenv(self): import os @@ -3893,7 +3896,7 @@ a = self.RPythonAnnotator() s = a.build_types(fn, []) assert isinstance(s, annmodel.SomeString) - assert s.no_nul + assert s.charkind.no_nul def test_base_iter(self): class A(object): diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -9,7 +9,7 @@ SomeString, SomeChar, SomeList, SomeDict, SomeTuple, SomeImpossibleValue, SomeUnicodeCodePoint, SomeInstance, SomeBuiltin, SomeBuiltinMethod, SomeFloat, SomeIterator, SomePBC, SomeNone, SomeType, s_ImpossibleValue, - s_Bool, s_None, unionof, add_knowntypedata, + s_Bool, s_None, unionof, add_knowntypedata, NoNulChar, HarmlesslyBlocked, SomeWeakRef, SomeUnicodeString, SomeByteArray) from rpython.annotator.bookkeeper import getbookkeeper, immutablevalue from rpython.annotator import builtin @@ -459,13 +459,13 @@ return SomeInteger(nonneg=True) def method_strip(self, chr=None): - return self.basestringclass(no_nul=self.no_nul) + return self.basestringclass(charkind=self.charkind) def method_lstrip(self, chr=None): - return self.basestringclass(no_nul=self.no_nul) + return self.basestringclass(charkind=self.charkind) def method_rstrip(self, chr=None): - return self.basestringclass(no_nul=self.no_nul) + return self.basestringclass(charkind=self.charkind) def method_join(self, s_list): if s_None.contains(s_list): @@ -475,8 +475,8 @@ if isinstance(self, SomeUnicodeString): return immutablevalue(u"") return immutablevalue("") - no_nul = self.no_nul and s_item.no_nul - return self.basestringclass(no_nul=no_nul) + charkind = self.charkind.union(s_item.charkind) + return self.basestringclass(charkind=charkind) def iter(self): return SomeIterator(self) @@ -487,23 +487,23 @@ def method_split(self, patt, max=-1): if max == -1 and patt.is_constant() and patt.const == "\0": - no_nul = True + charkind = NoNulChar() else: - no_nul = self.no_nul - s_item = self.basestringclass(no_nul=no_nul) + charkind = self.charkind + s_item = self.basestringclass(charkind=charkind) return getbookkeeper().newlist(s_item) def method_rsplit(self, patt, max=-1): - s_item = self.basestringclass(no_nul=self.no_nul) + s_item = self.basestringclass(charkind=self.charkind) return getbookkeeper().newlist(s_item) def method_replace(self, s1, s2): - return self.basestringclass(no_nul=self.no_nul and s2.no_nul) + return self.basestringclass( + charkind=self.charkind.union(s2.charkind)) def getslice(self, s_start, s_stop): check_negative_slice(s_start, s_stop) - result = self.basestringclass(no_nul=self.no_nul) - return result + return self.basestringclass(charkind=self.charkind) def op_contains(self, s_element): if s_element.is_constant() and s_element.const == "\0": diff --git a/rpython/rlib/rmarshal.py b/rpython/rlib/rmarshal.py --- a/rpython/rlib/rmarshal.py +++ b/rpython/rlib/rmarshal.py @@ -230,16 +230,14 @@ raise ValueError("expected a string") length = readlong(loader) return assert_str0(readstr(loader, length)) -add_loader(annmodel.SomeString(can_be_None=False, no_nul=True), - load_string_nonul) +add_loader(annmodel.s_Str0, load_string_nonul) def load_string(loader): if readchr(loader) != TYPE_STRING: raise ValueError("expected a string") length = readlong(loader) return readstr(loader, length) -add_loader(annmodel.SomeString(can_be_None=False, no_nul=False), - load_string) +add_loader(annmodel.SomeString(), load_string) def load_string_or_none_nonul(loader): t = readchr(loader) @@ -250,8 +248,7 @@ return None else: raise ValueError("expected a string or None") -add_loader(annmodel.SomeString(can_be_None=True, no_nul=True), - load_string_or_none_nonul) +add_loader(annmodel.s_Str0.noneify(), load_string_or_none_nonul) def load_string_or_none(loader): t = readchr(loader) @@ -262,8 +259,7 @@ return None else: raise ValueError("expected a string or None") -add_loader(annmodel.SomeString(can_be_None=True, no_nul=False), - load_string_or_none) +add_loader(annmodel.SomeString().noneify(), load_string_or_none) # ____________________________________________________________ # diff --git a/rpython/rlib/rstring.py b/rpython/rlib/rstring.py --- a/rpython/rlib/rstring.py +++ b/rpython/rlib/rstring.py @@ -3,7 +3,7 @@ import sys from rpython.annotator.model import (SomeObject, SomeString, s_None, SomeChar, - SomeInteger, SomeUnicodeCodePoint, SomeUnicodeString, SomePBC) + SomeInteger, SomeUnicodeCodePoint, SomeUnicodeString, SomePBC, NoNulChar) from rpython.rtyper.llannotation import SomePtr from rpython.rlib import jit from rpython.rlib.objectmodel import newlist_hint, specialize @@ -523,15 +523,9 @@ _about_ = assert_str0 def compute_result_annotation(self, s_obj): - if s_None.contains(s_obj): + if s_None.contains(s_obj): # probably a future str_or_None return s_obj - assert isinstance(s_obj, (SomeString, SomeUnicodeString)) - if s_obj.no_nul: - return s_obj - new_s_obj = SomeObject.__new__(s_obj.__class__) - new_s_obj.__dict__ = s_obj.__dict__.copy() - new_s_obj.no_nul = True - return new_s_obj + return s_obj.nonnulify() def specialize_call(self, hop): hop.exception_cannot_occur() @@ -548,7 +542,7 @@ def compute_result_annotation(self, s_obj): if not isinstance(s_obj, (SomeString, SomeUnicodeString)): return s_obj - if not s_obj.no_nul: + if not s_obj.charkind.no_nul: raise ValueError("Value is not no_nul") def specialize_call(self, hop): diff --git a/rpython/rlib/test/test_signature.py b/rpython/rlib/test/test_signature.py --- a/rpython/rlib/test/test_signature.py +++ b/rpython/rlib/test/test_signature.py @@ -119,8 +119,8 @@ @signature(types.unicode0(), returns=types.str0()) def f(u): return 'str' - assert getsig(f) == [model.SomeUnicodeString(no_nul=True), - model.SomeString(no_nul=True)] + assert getsig(f) == [model.SomeUnicodeString.s_Str0, + model.SomeString.s_Unicode0] def test_ptr(): policy = LowLevelAnnotatorPolicy() diff --git a/rpython/rlib/types.py b/rpython/rlib/types.py --- a/rpython/rlib/types.py +++ b/rpython/rlib/types.py @@ -36,11 +36,14 @@ def unicode0(): - return model.SomeUnicodeString(no_nul=True) + return model.s_Unicode0() def str(can_be_None=False): - return model.SomeString(can_be_None=can_be_None) + if can_be_None: + return model.SomeString().noneify() + else: + return model.SomeString() def bytearray(): @@ -48,7 +51,7 @@ def str0(): - return model.SomeString(no_nul=True) + return model.s_str0 def char(): diff --git a/rpython/rtyper/module/ll_os_environ.py b/rpython/rtyper/module/ll_os_environ.py --- a/rpython/rtyper/module/ll_os_environ.py +++ b/rpython/rtyper/module/ll_os_environ.py @@ -181,8 +181,7 @@ register_external(r_envitems, [], [(str0, str0)], export_name='ll_os.ll_os_envitems', llimpl=envitems_llimpl) -register_external(r_getenv, [str0], - annmodel.SomeString(can_be_None=True, no_nul=True), +register_external(r_getenv, [str0], str0.noneify(), export_name='ll_os.ll_os_getenv', llimpl=getenv_llimpl) register_external(r_putenv, [str0, str0], annmodel.s_None, diff --git a/rpython/rtyper/test/test_extfunc.py b/rpython/rtyper/test/test_extfunc.py --- a/rpython/rtyper/test/test_extfunc.py +++ b/rpython/rtyper/test/test_extfunc.py @@ -138,7 +138,7 @@ assert isinstance(s, annmodel.SomeString) def test_str0(self): - str0 = annmodel.SomeString(no_nul=True) + str0 = annmodel.s_Str0 def os_open(s): pass register_external(os_open, [str0], None) @@ -156,7 +156,7 @@ a.build_types(g, [str0]) # Does not raise def test_list_of_str0(self): - str0 = annmodel.SomeString(no_nul=True) + str0 = annmodel.s_Str0 def os_execve(l): pass register_external(os_execve, [[str0]], None) From noreply at buildbot.pypy.org Tue Jul 15 15:43:49 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Tue, 15 Jul 2014 15:43:49 +0200 (CEST) Subject: [pypy-commit] pypy SomeString-charclass: oops Message-ID: <20140715134349.F2BE81C13BD@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: SomeString-charclass Changeset: r72441:c1a7030e7a99 Date: 2014-07-13 20:32 +0200 http://bitbucket.org/pypy/pypy/changeset/c1a7030e7a99/ Log: oops diff --git a/rpython/rlib/types.py b/rpython/rlib/types.py --- a/rpython/rlib/types.py +++ b/rpython/rlib/types.py @@ -51,7 +51,7 @@ def str0(): - return model.s_str0 + return model.s_Str0 def char(): From noreply at buildbot.pypy.org Tue Jul 15 15:43:51 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Tue, 15 Jul 2014 15:43:51 +0200 (CEST) Subject: [pypy-commit] pypy SomeString-charclass: Oops2 Message-ID: <20140715134351.30D271C13BD@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: SomeString-charclass Changeset: r72442:2e13869f67ab Date: 2014-07-13 20:42 +0200 http://bitbucket.org/pypy/pypy/changeset/2e13869f67ab/ Log: Oops2 diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -670,8 +670,8 @@ s_Bool = SomeBool() s_Int = SomeInteger() s_ImpossibleValue = SomeImpossibleValue() -s_Str0 = SomeString(charkind=NoNulChar) -s_Unicode0 = SomeUnicodeString(charkind=NoNulChar) +s_Str0 = SomeString(charkind=NoNulChar()) +s_Unicode0 = SomeUnicodeString(charkind=NoNulChar()) # ____________________________________________________________ From noreply at buildbot.pypy.org Tue Jul 15 15:43:52 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Tue, 15 Jul 2014 15:43:52 +0200 (CEST) Subject: [pypy-commit] pypy SomeString-charclass: Fix, and add support for "x in 'ascii_str'". Message-ID: <20140715134352.713721C13BD@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: SomeString-charclass Changeset: r72443:e9087bbe0338 Date: 2014-07-13 21:40 +0200 http://bitbucket.org/pypy/pypy/changeset/e9087bbe0338/ Log: Fix, and add support for "x in 'ascii_str'". diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -241,7 +241,8 @@ if TLS.check_str_without_nul: if self.no_nul != other.no_nul: return False - return True + return (self.is_ascii == other.is_ascii and + self.is_utf8 == other.is_utf8) def union(self, other): return self diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -3869,6 +3869,19 @@ assert s.can_be_None assert s.charkind.no_nul + def test_contains_ascii(self): + chars = '01234567890abcdef' + def f(i): + if i in chars: + return i + else: + return None + a = self.RPythonAnnotator() + s = a.build_types(f, [annmodel.SomeString(charkind=annmodel.AnyChar())]) + assert isinstance(s, annmodel.SomeString) + assert s.can_be_None + assert s.charkind.is_ascii + def test_no___call__(self): class X(object): def __call__(self): diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -9,7 +9,7 @@ SomeString, SomeChar, SomeList, SomeDict, SomeTuple, SomeImpossibleValue, SomeUnicodeCodePoint, SomeInstance, SomeBuiltin, SomeBuiltinMethod, SomeFloat, SomeIterator, SomePBC, SomeNone, SomeType, s_ImpossibleValue, - s_Bool, s_None, unionof, add_knowntypedata, NoNulChar, + s_Bool, s_None, unionof, add_knowntypedata, NoNulChar, AsciiChar, HarmlesslyBlocked, SomeWeakRef, SomeUnicodeString, SomeByteArray) from rpython.annotator.bookkeeper import getbookkeeper, immutablevalue from rpython.annotator import builtin @@ -506,7 +506,17 @@ return self.basestringclass(charkind=self.charkind) def op_contains(self, s_element): - if s_element.is_constant() and s_element.const == "\0": + if self.is_constant() and self.const.isalnum(): + r = SomeBool() + bk = getbookkeeper() + op = bk._find_current_op(opname="contains", arity=2, pos=0, s_type=self) + # raise TypeError(op.args) + knowntypedata = {} + add_knowntypedata(knowntypedata, True, [op.args[1]], + SomeString(charkind=AsciiChar())) + r.set_knowntypedata(knowntypedata) + return r + elif s_element.is_constant() and s_element.const == "\0": r = SomeBool() bk = getbookkeeper() op = bk._find_current_op(opname="contains", arity=2, pos=0, s_type=self) From noreply at buildbot.pypy.org Tue Jul 15 15:43:53 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Tue, 15 Jul 2014 15:43:53 +0200 (CEST) Subject: [pypy-commit] pypy wrap-bytes: hg merge SomeString-charclass Message-ID: <20140715134353.B466D1C13BD@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: wrap-bytes Changeset: r72444:a5ac58fb49e1 Date: 2014-07-13 21:42 +0200 http://bitbucket.org/pypy/pypy/changeset/a5ac58fb49e1/ Log: hg merge SomeString-charclass diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -241,7 +241,8 @@ if TLS.check_str_without_nul: if self.no_nul != other.no_nul: return False - return True + return (self.is_ascii == other.is_ascii and + self.is_utf8 == other.is_utf8) def union(self, other): return self @@ -670,8 +671,8 @@ s_Bool = SomeBool() s_Int = SomeInteger() s_ImpossibleValue = SomeImpossibleValue() -s_Str0 = SomeString(charkind=NoNulChar) -s_Unicode0 = SomeUnicodeString(charkind=NoNulChar) +s_Str0 = SomeString(charkind=NoNulChar()) +s_Unicode0 = SomeUnicodeString(charkind=NoNulChar()) # ____________________________________________________________ diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -3869,6 +3869,19 @@ assert s.can_be_None assert s.charkind.no_nul + def test_contains_ascii(self): + chars = '01234567890abcdef' + def f(i): + if i in chars: + return i + else: + return None + a = self.RPythonAnnotator() + s = a.build_types(f, [annmodel.SomeString(charkind=annmodel.AnyChar())]) + assert isinstance(s, annmodel.SomeString) + assert s.can_be_None + assert s.charkind.is_ascii + def test_no___call__(self): class X(object): def __call__(self): diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -9,7 +9,7 @@ SomeString, SomeChar, SomeList, SomeDict, SomeTuple, SomeImpossibleValue, SomeUnicodeCodePoint, SomeInstance, SomeBuiltin, SomeBuiltinMethod, SomeFloat, SomeIterator, SomePBC, SomeNone, SomeType, s_ImpossibleValue, - s_Bool, s_None, unionof, add_knowntypedata, NoNulChar, + s_Bool, s_None, unionof, add_knowntypedata, NoNulChar, AsciiChar, HarmlesslyBlocked, SomeWeakRef, SomeUnicodeString, SomeByteArray) from rpython.annotator.bookkeeper import getbookkeeper, immutablevalue from rpython.annotator import builtin @@ -506,7 +506,17 @@ return self.basestringclass(charkind=self.charkind) def op_contains(self, s_element): - if s_element.is_constant() and s_element.const == "\0": + if self.is_constant() and self.const.isalnum(): + r = SomeBool() + bk = getbookkeeper() + op = bk._find_current_op(opname="contains", arity=2, pos=0, s_type=self) + # raise TypeError(op.args) + knowntypedata = {} + add_knowntypedata(knowntypedata, True, [op.args[1]], + SomeString(charkind=AsciiChar())) + r.set_knowntypedata(knowntypedata) + return r + elif s_element.is_constant() and s_element.const == "\0": r = SomeBool() bk = getbookkeeper() op = bk._find_current_op(opname="contains", arity=2, pos=0, s_type=self) diff --git a/rpython/rlib/types.py b/rpython/rlib/types.py --- a/rpython/rlib/types.py +++ b/rpython/rlib/types.py @@ -51,7 +51,7 @@ def str0(): - return model.s_str0 + return model.s_Str0 def char(): From noreply at buildbot.pypy.org Tue Jul 15 15:43:54 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Tue, 15 Jul 2014 15:43:54 +0200 (CEST) Subject: [pypy-commit] pypy SomeString-charclass: Improve test, and better "improve" the merge of AsciiString with SomeChar. Message-ID: <20140715134354.EB4861C13BD@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: SomeString-charclass Changeset: r72445:7b4a559af4b2 Date: 2014-07-13 22:53 +0200 http://bitbucket.org/pypy/pypy/changeset/7b4a559af4b2/ Log: Improve test, and better "improve" the merge of AsciiString with SomeChar. diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -376,6 +376,16 @@ return SomeString(can_be_None=can_be_None, charkind=charkind) + def improve((obj, improvement)): + if obj.charkind.union(improvement.charkind) is obj.charkind: + charkind = improvement.charkind + else: + charkind = obj.charkind + if type(obj) is SomeString: + return type(improvement)(charkind=charkind) + else: + return type(obj)(charkind=charkind) + def add((str1, str2)): # propagate const-ness to help getattr(obj, 'prefix' + const_name) charkind = str1.charkind.union(str2.charkind) @@ -458,8 +468,15 @@ class __extend__(pairtype(SomeString, SomeObject), pairtype(SomeUnicodeString, SomeObject)): - def mod((s_string, args)): - return s_string.__class__() + def mod((s_string, s_item)): + charkind = s_string.charkind + if isinstance(s_item, SomeFloat): # or a subclass, like SomeInteger + charkind = charkind.union(AsciiChar()) + elif isinstance(s_item, (SomeString, SomeUnicodeString)): + charkind = charkind.union(s_item.charkind) + else: + charkind = AnyChar() # Be conservative + return s_string.__class__(charkind=charkind) class __extend__(pairtype(SomeFloat, SomeFloat)): diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -3875,11 +3875,15 @@ if i in chars: return i else: - return None + return 'z' a = self.RPythonAnnotator() s = a.build_types(f, [annmodel.SomeString(charkind=annmodel.AnyChar())]) assert isinstance(s, annmodel.SomeString) - assert s.can_be_None + assert s.charkind.is_ascii + # + a = self.RPythonAnnotator() + s = a.build_types(f, [annmodel.SomeChar(charkind=annmodel.AnyChar())]) + assert isinstance(s, annmodel.SomeString) assert s.charkind.is_ascii def test_no___call__(self): diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -9,7 +9,7 @@ SomeString, SomeChar, SomeList, SomeDict, SomeTuple, SomeImpossibleValue, SomeUnicodeCodePoint, SomeInstance, SomeBuiltin, SomeBuiltinMethod, SomeFloat, SomeIterator, SomePBC, SomeNone, SomeType, s_ImpossibleValue, - s_Bool, s_None, unionof, add_knowntypedata, NoNulChar, AsciiChar, + s_Bool, s_None, unionof, add_knowntypedata, NoNulChar, AsciiChar, AnyChar, HarmlesslyBlocked, SomeWeakRef, SomeUnicodeString, SomeByteArray) from rpython.annotator.bookkeeper import getbookkeeper, immutablevalue from rpython.annotator import builtin @@ -178,6 +178,9 @@ return getbookkeeper().immutablevalue(bool(self.const)) return s_Bool + def str(self): + return SomeString(charkind=AsciiChar) + class __extend__(SomeInteger): def invert(self): @@ -506,26 +509,20 @@ return self.basestringclass(charkind=self.charkind) def op_contains(self, s_element): - if self.is_constant() and self.const.isalnum(): - r = SomeBool() + r = SomeBool() + knowntypedata = {} + if self.charkind != AnyChar(): bk = getbookkeeper() op = bk._find_current_op(opname="contains", arity=2, pos=0, s_type=self) - # raise TypeError(op.args) - knowntypedata = {} add_knowntypedata(knowntypedata, True, [op.args[1]], - SomeString(charkind=AsciiChar())) - r.set_knowntypedata(knowntypedata) - return r - elif s_element.is_constant() and s_element.const == "\0": - r = SomeBool() + s_element.basestringclass(charkind=self.charkind)) + if s_element.is_constant() and s_element.const == "\0": bk = getbookkeeper() op = bk._find_current_op(opname="contains", arity=2, pos=0, s_type=self) - knowntypedata = {} add_knowntypedata(knowntypedata, False, [op.args[0]], self.nonnulify()) + if knowntypedata: r.set_knowntypedata(knowntypedata) - return r - else: - return SomeObject.op_contains(self, s_element) + return r op_contains.can_only_throw = [] def method_format(self, *args): From noreply at buildbot.pypy.org Tue Jul 15 15:43:56 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Tue, 15 Jul 2014 15:43:56 +0200 (CEST) Subject: [pypy-commit] pypy wrap-bytes: hg merge SomeString-charclass Message-ID: <20140715134356.58F161C13BD@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: wrap-bytes Changeset: r72446:a160002e8499 Date: 2014-07-13 23:02 +0200 http://bitbucket.org/pypy/pypy/changeset/a160002e8499/ Log: hg merge SomeString-charclass diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -376,6 +376,16 @@ return SomeString(can_be_None=can_be_None, charkind=charkind) + def improve((obj, improvement)): + if obj.charkind.union(improvement.charkind) is obj.charkind: + charkind = improvement.charkind + else: + charkind = obj.charkind + if type(obj) is SomeString: + return type(improvement)(charkind=charkind) + else: + return type(obj)(charkind=charkind) + def add((str1, str2)): # propagate const-ness to help getattr(obj, 'prefix' + const_name) charkind = str1.charkind.union(str2.charkind) @@ -458,8 +468,15 @@ class __extend__(pairtype(SomeString, SomeObject), pairtype(SomeUnicodeString, SomeObject)): - def mod((s_string, args)): - return s_string.__class__() + def mod((s_string, s_item)): + charkind = s_string.charkind + if isinstance(s_item, SomeFloat): # or a subclass, like SomeInteger + charkind = charkind.union(AsciiChar()) + elif isinstance(s_item, (SomeString, SomeUnicodeString)): + charkind = charkind.union(s_item.charkind) + else: + charkind = AnyChar() # Be conservative + return s_string.__class__(charkind=charkind) class __extend__(pairtype(SomeFloat, SomeFloat)): diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -3875,11 +3875,15 @@ if i in chars: return i else: - return None + return 'z' a = self.RPythonAnnotator() s = a.build_types(f, [annmodel.SomeString(charkind=annmodel.AnyChar())]) assert isinstance(s, annmodel.SomeString) - assert s.can_be_None + assert s.charkind.is_ascii + # + a = self.RPythonAnnotator() + s = a.build_types(f, [annmodel.SomeChar(charkind=annmodel.AnyChar())]) + assert isinstance(s, annmodel.SomeString) assert s.charkind.is_ascii def test_no___call__(self): diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -9,7 +9,7 @@ SomeString, SomeChar, SomeList, SomeDict, SomeTuple, SomeImpossibleValue, SomeUnicodeCodePoint, SomeInstance, SomeBuiltin, SomeBuiltinMethod, SomeFloat, SomeIterator, SomePBC, SomeNone, SomeType, s_ImpossibleValue, - s_Bool, s_None, unionof, add_knowntypedata, NoNulChar, AsciiChar, + s_Bool, s_None, unionof, add_knowntypedata, NoNulChar, AsciiChar, AnyChar, HarmlesslyBlocked, SomeWeakRef, SomeUnicodeString, SomeByteArray) from rpython.annotator.bookkeeper import getbookkeeper, immutablevalue from rpython.annotator import builtin @@ -178,6 +178,9 @@ return getbookkeeper().immutablevalue(bool(self.const)) return s_Bool + def str(self): + return SomeString(charkind=AsciiChar) + class __extend__(SomeInteger): def invert(self): @@ -506,26 +509,20 @@ return self.basestringclass(charkind=self.charkind) def op_contains(self, s_element): - if self.is_constant() and self.const.isalnum(): - r = SomeBool() + r = SomeBool() + knowntypedata = {} + if self.charkind != AnyChar(): bk = getbookkeeper() op = bk._find_current_op(opname="contains", arity=2, pos=0, s_type=self) - # raise TypeError(op.args) - knowntypedata = {} add_knowntypedata(knowntypedata, True, [op.args[1]], - SomeString(charkind=AsciiChar())) - r.set_knowntypedata(knowntypedata) - return r - elif s_element.is_constant() and s_element.const == "\0": - r = SomeBool() + s_element.basestringclass(charkind=self.charkind)) + if s_element.is_constant() and s_element.const == "\0": bk = getbookkeeper() op = bk._find_current_op(opname="contains", arity=2, pos=0, s_type=self) - knowntypedata = {} add_knowntypedata(knowntypedata, False, [op.args[0]], self.nonnulify()) + if knowntypedata: r.set_knowntypedata(knowntypedata) - return r - else: - return SomeObject.op_contains(self, s_element) + return r op_contains.can_only_throw = [] def method_format(self, *args): From noreply at buildbot.pypy.org Tue Jul 15 16:25:57 2014 From: noreply at buildbot.pypy.org (waedt) Date: Tue, 15 Jul 2014 16:25:57 +0200 (CEST) Subject: [pypy-commit] pypy utf8-unicode2: Fix _rawffi module Message-ID: <20140715142557.601E41C05F1@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: utf8-unicode2 Changeset: r72447:cc1160f9014e Date: 2014-07-13 07:34 -0500 http://bitbucket.org/pypy/pypy/changeset/cc1160f9014e/ Log: Fix _rawffi module diff --git a/pypy/interpreter/test/test_utf8.py b/pypy/interpreter/test/test_utf8.py --- a/pypy/interpreter/test/test_utf8.py +++ b/pypy/interpreter/test/test_utf8.py @@ -51,7 +51,7 @@ if sys.maxunicode < 65536: assert l[:3] == [u'A', u'\u010F', u'\u20AC'] else: - assert l == [u'A', u'\u010F', u'\u20AC', u'\U00001F63D'] + assert l == [u'A', u'\u010F', u'\u20AC', u'\U0001F63D'] def test_reverse_iterator(): s = build_utf8str() @@ -197,7 +197,7 @@ def test_copy_to_wcharp(): s = build_utf8str() - if sys.maxunicode < 0x10000: + if sys.maxunicode < 0x10000 and rffi.sizeof(rffi.WCHAR_T) == 4: # The last character requires a surrogate pair on narrow builds and # so won't be converted correctly by rffi.wcharp2unicode s = s[:-1] @@ -206,3 +206,27 @@ u = rffi.wcharp2unicode(wcharp) rffi.free_wcharp(wcharp) assert s == u + +def test_from_wcharp(): + def check(u): + wcharp = rffi.unicode2wcharp(u) + s = Utf8Str.from_wcharp(wcharp) + rffi.free_wcharp(wcharp) + assert s == u + check(u'A\u010F\u20AC\U0001F63D') + check(u'0xDCC0 ') + check(u'0xDCC0') + +def test_from_wcharpn(): + u = u'A\u010F\u20AC\U0001F63D' + wcharp = rffi.unicode2wcharp(u) + s = Utf8Str.from_wcharpn(wcharp, 3) + assert s == u[:3] + + s = Utf8Str.from_wcharpn(wcharp, 4) + if sys.maxunicode == 0xFFFF: + assert s == u[:4] + else: + assert s == u + + rffi.free_wcharp(wcharp) diff --git a/pypy/interpreter/utf8.py b/pypy/interpreter/utf8.py --- a/pypy/interpreter/utf8.py +++ b/pypy/interpreter/utf8.py @@ -4,6 +4,14 @@ from rpython.rlib.unicodedata import unicodedb_5_2_0 as unicodedb from rpython.rlib.rarithmetic import r_uint from rpython.rtyper.lltypesystem import rffi +from rpython.rtyper.lltypesystem import lltype + +wchar_rint = rffi.r_uint +WCHAR_INTP = rffi.UINTP +if rffi.sizeof(rffi.WCHAR_T) == 2: + wchar_rint = rffi.r_ushort + WCHAR_INTP = rffi.USHORTP + def utf8chr(value): # Like unichr, but returns a Utf8Str object @@ -415,15 +423,89 @@ byte_pos -= 1 return byte_pos - def copy_to_wcharp(self): - # XXX Temporary solution. This won't work on correctly on systems - # where sizeof(wchar_t) == 2. Also, it copies twice. - from pypy.interpreter.utf8_codecs import unicode_encode_unicode_internal - from rpython.rlib.runicode import MAXUNICODE - bytes = unicode_encode_unicode_internal(self, len(self), 'strict') - return rffi.cast(rffi.CWCHARP, rffi.str2charp(bytes)) + def copy_to_wcharp(self, track_allocation=True): + length = len(self) + 1 + if rffi.sizeof(rffi.WCHAR_T) == 2: + for c in self.codepoint_iter(): + if c > 0xFFFF: + length += 1 + array = lltype.malloc(WCHAR_INTP.TO, length, flavor='raw', + track_allocation=track_allocation) + from pypy.interpreter.utf8_codecs import create_surrogate_pair + i = 0; + for c in self.codepoint_iter(): + if rffi.sizeof(rffi.WCHAR_T) == 2: + c1, c2 = create_surrogate_pair(c) + array[i] = wchar_rint(c1) + if c2: + i += 1 + array[i] = wchar_rint(c2) + else: + array[i] = wchar_rint(c) + + i += 1 + + array[i] = wchar_rint(0) + array = rffi.cast(rffi.CWCHARP, array) + return array + + @staticmethod + def from_wcharp(wcharp): + array = rffi.cast(WCHAR_INTP, wcharp) + builder = Utf8Builder() + i = 0; + while True: + c = int(array[i]) + if c == 0: + break + + if rffi.sizeof(rffi.WCHAR_T) == 2: + if 0xD800 <= c <= 0xDBFF: + i += 1 + c2 = int(array[i]) + if c2 == 0: + builder.append(c) + break + elif not (0xDC00 <= c2 <= 0xDFFF): + builder.append(c) + c = c2 + else: + c = (((c & 0x3FF)<<10) | (c2 & 0x3FF)) + 0x10000; + + builder.append(c) + i += 1 + + return builder.build() + + @staticmethod + def from_wcharpn(wcharp, size): + array = rffi.cast(WCHAR_INTP, wcharp) + builder = Utf8Builder() + i = 0; + while i < size: + c = int(array[i]) + if c == 0: + break + + if rffi.sizeof(rffi.WCHAR_T) == 2: + if i != size - 1 and 0xD800 <= c <= 0xDBFF: + i += 1 + c2 = int(array[i]) + if c2 == 0: + builder.append(c) + break + elif not (0xDC00 <= c2 <= 0xDFFF): + builder.append(c) + c = c2 + else: + c = (((c & 0x3FF)<<10) | (c2 & 0x3FF)) + 0x10000; + + builder.append(c) + i += 1 + + return builder.build() class Utf8Builder(object): @specialize.argtype(1) diff --git a/pypy/interpreter/utf8_codecs.py b/pypy/interpreter/utf8_codecs.py --- a/pypy/interpreter/utf8_codecs.py +++ b/pypy/interpreter/utf8_codecs.py @@ -784,6 +784,13 @@ result.append(r) return result.build(), pos, bo +def create_surrogate_pair(val): + if val >= 0x10000: + return (0xD800 | ((val-0x10000) >> 10), + 0xDC00 | ((val-0x10000) & 0x3FF)) + else: + return val, 0 + def unicode_encode_utf_16_helper(s, size, errors, errorhandler=None, byteorder='little'): @@ -803,10 +810,7 @@ while i < size: ch = utf8ord(s, i) i += 1 - ch2 = 0 - if ch >= 0x10000: - ch2 = 0xDC00 | ((ch-0x10000) & 0x3FF) - ch = 0xD800 | ((ch-0x10000) >> 10) + ch, ch2 = create_surrogate_pair(ch) _STORECHAR(result, ch, byteorder) if ch2: diff --git a/pypy/module/_rawffi/alt/interp_funcptr.py b/pypy/module/_rawffi/alt/interp_funcptr.py --- a/pypy/module/_rawffi/alt/interp_funcptr.py +++ b/pypy/module/_rawffi/alt/interp_funcptr.py @@ -168,7 +168,7 @@ self.argchain.arg(addr) def handle_unichar_p(self, w_ffitype, w_obj, unicodeval): - buf = rffi.unicode2wcharp(unicodeval) + buf = unicodeval.copy_to_wcharp() self.w_func.to_free.append(rffi.cast(rffi.VOIDP, buf)) addr = rffi.cast(rffi.ULONG, buf) self.argchain.arg(addr) diff --git a/pypy/module/_rawffi/alt/test/test_type_converter.py b/pypy/module/_rawffi/alt/test/test_type_converter.py --- a/pypy/module/_rawffi/alt/test/test_type_converter.py +++ b/pypy/module/_rawffi/alt/test/test_type_converter.py @@ -1,6 +1,7 @@ import sys from rpython.rlib.rarithmetic import r_uint, r_singlefloat, r_longlong, r_ulonglong from rpython.rlib.libffi import IS_32_BIT +from pypy.interpreter.utf8 import Utf8Str from pypy.module._rawffi.alt.interp_ffitype import app_types, descr_new_pointer from pypy.module._rawffi.alt.type_converter import FromAppLevelConverter, ToAppLevelConverter @@ -58,7 +59,8 @@ def test_char(self): space = self.space self.check(app_types.char, space.wrap('a'), ord('a')) - self.check(app_types.unichar, space.wrap(u'\u1234'), 0x1234) + self.check(app_types.unichar, + space.wrap(Utf8Str.from_unicode(u'\u1234')), 0x1234) def test_signed_longlong(self): space = self.space @@ -120,8 +122,11 @@ def test_strings(self): # first, try automatic conversion from applevel self.check(app_types.char_p, self.space.wrap('foo'), 'foo') - self.check(app_types.unichar_p, self.space.wrap(u'foo\u1234'), u'foo\u1234') - self.check(app_types.unichar_p, self.space.wrap('foo'), u'foo') + self.check(app_types.unichar_p, + self.space.wrap(Utf8Str.from_unicode(u'foo\u1234')), + Utf8Str.from_unicode(u'foo\u1234')) + self.check(app_types.unichar_p, self.space.wrap('foo'), + Utf8Str.from_unicode(u'foo')) # then, try to pass explicit pointers self.check(app_types.char_p, self.space.wrap(42), 42) self.check(app_types.unichar_p, self.space.wrap(42), 42) diff --git a/pypy/module/_rawffi/alt/type_converter.py b/pypy/module/_rawffi/alt/type_converter.py --- a/pypy/module/_rawffi/alt/type_converter.py +++ b/pypy/module/_rawffi/alt/type_converter.py @@ -2,6 +2,7 @@ from rpython.rlib import jit from rpython.rlib.rarithmetic import r_uint from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.utf8 import utf8chr from pypy.module._rawffi.structure import W_StructureInstance, W_Structure from pypy.module._rawffi.alt.interp_ffitype import app_types @@ -228,7 +229,7 @@ return space.wrap(chr(ucharval)) elif w_ffitype.is_unichar(): wcharval = self.get_unichar(w_ffitype) - return space.wrap(unichr(wcharval)) + return space.wrap(utf8chr(int(wcharval))) elif w_ffitype.is_double(): return self._float(w_ffitype) elif w_ffitype.is_singlefloat(): diff --git a/pypy/module/_rawffi/array.py b/pypy/module/_rawffi/array.py --- a/pypy/module/_rawffi/array.py +++ b/pypy/module/_rawffi/array.py @@ -42,14 +42,27 @@ if not space.is_none(w_items): items_w = space.unpackiterable(w_items) iterlength = len(items_w) - if iterlength > length: + + double_length_items = 0 + if rffi.sizeof(rffi.WCHAR_T) == 2: + # On systems where sizeof(wchar_t) = 2, the resulting array + # needs to be encoded in utf-16. As a result, codepoints larger + # than 0xFFFF will occupy two array values + for w_i in items_w: + if space.isinstance_w(w_i, space.w_unicode): + u = space.unicode_w(w_i) + if len(u) == 0 and utf8ord(u) > 0xFFFF: + double_length_items += 1 + + if iterlength + double_length_items > length: raise OperationError(space.w_ValueError, space.wrap("too many items for specified" " array length")) - for num in range(iterlength): - w_item = items_w[num] - unwrap_value(space, write_ptr, result.ll_buffer, num, - self.itemcode, w_item) + i = 0 + for w_item in items_w: + i += unwrap_value(space, write_ptr, result.ll_buffer, i, + self.itemcode, w_item) + return space.wrap(result) def descr_repr(self, space): diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -2,6 +2,9 @@ from pypy.interpreter.error import OperationError, oefmt, wrap_oserror from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty +from pypy.interpreter.utf8 import ( + Utf8Str, utf8ord, utf8chr, WCHAR_INTP, wchar_rint) +from pypy.interpreter.utf8_codecs import create_surrogate_pair from rpython.rlib.clibffi import * from rpython.rtyper.lltypesystem import lltype, rffi @@ -85,6 +88,7 @@ LL_TYPEMAP['X'] = rffi.CCHARP LL_TYPEMAP['v'] = rffi.SHORT + def letter2tp(space, key): from pypy.module._rawffi.array import PRIMITIVE_ARRAY_TYPES try: @@ -269,6 +273,8 @@ ptr_val = t_array[0] return ptr_val else: + if T is rffi.CWCHARP: + return utf8chr(int(rffi.cast(WCHAR_INTP, ptr)[ofs])) return rffi.cast(T, ptr)[ofs] read_ptr._annspecialcase_ = 'specialize:arg(2)' @@ -382,14 +388,18 @@ else: ptr = unwrap_truncate_int(rffi.VOIDP, space, w_arg) push_func(add_arg, argdesc, ptr) + return 1 elif letter == "d": push_func(add_arg, argdesc, space.float_w(w_arg)) + return 1 elif letter == "f": push_func(add_arg, argdesc, rffi.cast(rffi.FLOAT, space.float_w(w_arg))) + return 1 elif letter == "g": push_func(add_arg, argdesc, rffi.cast(rffi.LONGDOUBLE, space.float_w(w_arg))) + return 1 elif letter == "c": s = space.str_w(w_arg) if len(s) != 1: @@ -397,20 +407,31 @@ "Expected string of length one as character")) val = s[0] push_func(add_arg, argdesc, val) + return 1 elif letter == 'u': s = space.unicode_w(w_arg) if len(s) != 1: raise OperationError(space.w_TypeError, w( "Expected unicode string of length one as wide character")) - val = s[0] - push_func(add_arg, argdesc, val) + + val = utf8ord(s) + if rffi.sizeof(rffi.WCHAR_T) == 2 and val > 0xFFFF: + # Utf-16 must be used on systems with a 2 byte wchar_t to + # encode codepoints > 0xFFFF + c1, c2 = create_surrogate_pair(val) + push_func(add_arg, argdesc, wchar_rint(c1)) + push_func(add_arg, argdesc+1, wchar_rint(c2)) + return 2 + else: + push_func(add_arg, argdesc, wchar_rint(val)) + return 1 else: for c in unroll_letters_for_numbers: if letter == c: TP = LL_TYPEMAP[c] val = unwrap_truncate_int(TP, space, w_arg) push_func(add_arg, argdesc, val) - return + return 1 else: raise OperationError(space.w_TypeError, space.wrap("cannot directly write value")) @@ -559,9 +580,9 @@ return space.w_None wcharp_addr = rffi.cast(rffi.CWCHARP, address) if maxlength == -1: - s = rffi.wcharp2unicode(wcharp_addr) + s = Utf8Str.from_wcharp(wcharp_addr) else: - s = rffi.wcharp2unicoden(wcharp_addr, maxlength) + s = Utf8Str.from_wcharpn(wcharp_addr, maxlength) return space.wrap(s) @unwrap_spec(address=r_uint, maxlength=int) From noreply at buildbot.pypy.org Tue Jul 15 18:12:24 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 15 Jul 2014 18:12:24 +0200 (CEST) Subject: [pypy-commit] benchmarks default: add another (bad) benchmark (qsort) Message-ID: <20140715161224.1181D1D22F0@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r266:225bf81cf984 Date: 2014-07-15 18:14 +0200 http://bitbucket.org/pypy/benchmarks/changeset/225bf81cf984/ Log: add another (bad) benchmark (qsort) diff --git a/multithread/quick_sort/quick_sort.py b/multithread/quick_sort/quick_sort.py new file mode 100644 --- /dev/null +++ b/multithread/quick_sort/quick_sort.py @@ -0,0 +1,105 @@ +# -*- coding: utf-8 -*- + + + +import sys +import time, random +from common.abstract_threading import ( + atomic, Future, set_thread_pool, ThreadPool, + hint_commit_soon, print_abort_info) + +import itertools +from collections import deque + + +def chunks(iterable, size): + it = iter(iterable) + item = list(itertools.islice(it, size)) + while item: + yield item + item = list(itertools.islice(it, size)) + + + +def qsort(xs, l0, n): + if n < 2: + return + pivot = xs[l0 + n // 2] + l = l0 + r = l + n - 1 + while l <= r: + if xs[l] < pivot: + l += 1 + continue + if xs[r] > pivot: + r -= 1 + continue + xs[l], xs[r] = xs[r], xs[l] + l += 1 + r -= 1 + qsort(xs, l0, r - l0 + 1) + qsort(xs, l, l0 + n - l) + + +def qsort_f(xs, l0, n, level): + if n < 2: + return [] + + pivot = xs[l0 + n // 2] + l = l0 + r = l + n - 1 + while l <= r: + with atomic: + if xs[l] < pivot: + l += 1 + continue + if xs[r] > pivot: + r -= 1 + continue + xs[l], xs[r] = xs[r], xs[l] + l += 1 + r -= 1 + + fs = [] + #right_amount = 1000 > n // 2 > 505 + right_amount = level == 4 + if right_amount: + fs.append(Future(qsort_f, xs, l0, r - l0 + 1, level+1)) + fs.append(Future(qsort_f, xs, l, l0 + n - l, level+1)) + else: + fs.extend(qsort_f(xs, l0, r - l0 + 1, level+1)) + fs.extend(qsort_f(xs, l, l0 + n - l, level+1)) + #print_abort_info(0.0000001) + + return fs + + +def wait_for_futures(fs): + while fs: + f = fs.pop() + fs.extend(f()) + +def run(threads=2, n=100000): + threads = int(threads) + n = int(n) + + set_thread_pool(ThreadPool(threads)) + + + to_sort = range(n) + random.seed(121) + random.shuffle(to_sort) + s = deque(to_sort) + # qsort(s, 0, len(s)) + + fs = qsort_f(s, 0, len(s), 0) + wait_for_futures(fs) + + + # shutdown current pool + set_thread_pool(None) + + + +if __name__ == "__main__": + run() From noreply at buildbot.pypy.org Tue Jul 15 18:25:33 2014 From: noreply at buildbot.pypy.org (waedt) Date: Tue, 15 Jul 2014 18:25:33 +0200 (CEST) Subject: [pypy-commit] pypy utf8-unicode2: Fix cpyext, but better this time Message-ID: <20140715162533.04AB21D2934@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: utf8-unicode2 Changeset: r72448:aeafcd4cceb6 Date: 2014-07-15 11:20 -0500 http://bitbucket.org/pypy/pypy/changeset/aeafcd4cceb6/ Log: Fix cpyext, but better this time diff --git a/pypy/interpreter/test/test_utf8.py b/pypy/interpreter/test/test_utf8.py --- a/pypy/interpreter/test/test_utf8.py +++ b/pypy/interpreter/test/test_utf8.py @@ -230,3 +230,11 @@ assert s == u rffi.free_wcharp(wcharp) + +def test_from_wcharpsize(): + u = u'A\u010F\0\u20AC\U0001F63D' + wcharp = rffi.unicode2wcharp(u) + s = Utf8Str.from_wcharpsize(wcharp, 4) + assert s == u[:4] + + rffi.free_wcharp(wcharp) diff --git a/pypy/interpreter/test/test_utf8_codecs.py b/pypy/interpreter/test/test_utf8_codecs.py --- a/pypy/interpreter/test/test_utf8_codecs.py +++ b/pypy/interpreter/test/test_utf8_codecs.py @@ -735,9 +735,10 @@ def test_encode_decimal(self): encoder = self.getencoder('decimal') - assert encoder(u' 12, 34 ', 8, None) == ' 12, 34 ' - py.test.raises(UnicodeEncodeError, encoder, u' 12, \u1234 ', 7, None) - assert encoder(u'u\u1234', 2, 'replace') == 'u?' + assert encoder(Utf8Str(' 12, 34 '), 8, None) == ' 12, 34 ' + py.test.raises(UnicodeEncodeError, encoder, + Utf8Str.from_unicode(u' 12, \u1234 '), 7, None) + assert encoder(Utf8Str.from_unicode(u'u\u1234'), 2, 'replace') == 'u?' class TestTranslation(object): diff --git a/pypy/interpreter/utf8.py b/pypy/interpreter/utf8.py --- a/pypy/interpreter/utf8.py +++ b/pypy/interpreter/utf8.py @@ -507,6 +507,29 @@ return builder.build() + @staticmethod + def from_wcharpsize(wcharp, size): + array = rffi.cast(WCHAR_INTP, wcharp) + builder = Utf8Builder() + i = 0; + while i < size: + c = int(array[i]) + + if rffi.sizeof(rffi.WCHAR_T) == 2: + if i != size - 1 and 0xD800 <= c <= 0xDBFF: + i += 1 + c2 = int(array[i]) + if not (0xDC00 <= c2 <= 0xDFFF): + builder.append(c) + c = c2 + else: + c = (((c & 0x3FF)<<10) | (c2 & 0x3FF)) + 0x10000; + + builder.append(c) + i += 1 + + return builder.build() + class Utf8Builder(object): @specialize.argtype(1) def __init__(self, init_size=None): diff --git a/pypy/interpreter/utf8_codecs.py b/pypy/interpreter/utf8_codecs.py --- a/pypy/interpreter/utf8_codecs.py +++ b/pypy/interpreter/utf8_codecs.py @@ -1410,7 +1410,7 @@ errorhandler('strict', 'mbcs', msg, s, 0, 0) if size == 0: - return u"", 0 + return Utf8Str(""), 0 if force_ignore or errors == 'ignore': flags = 0 @@ -1505,7 +1505,7 @@ result = StringBuilder(size) pos = 0 while pos < size: - ch = ord(s[pos]) + ch = utf8ord(s, pos) if unicodedb.isspace(ch): result.append(' ') pos += 1 @@ -1526,7 +1526,7 @@ collstart = pos collend = collstart + 1 while collend < size: - ch = ord(s[collend]) + ch = utf8ord(s, collend) try: if (0 < ch < 256 or unicodedb.isspace(ch) or diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -226,19 +226,13 @@ start = space.int_w(space.getattr(w_exc, space.wrap('start'))) w_end = space.getattr(w_exc, space.wrap('end')) end = space.int_w(w_end) - builder = UnicodeBuilder() + builder = Utf8Builder() pos = start while pos < end: code = utf8ord(obj, pos) - if (MAXUNICODE == 0xffff and 0xD800 <= code <= 0xDBFF and - pos + 1 < end and 0xDC00 <= ord(obj[pos+1]) <= 0xDFFF): - code = (code & 0x03FF) << 10 - code |= ord(obj[pos+1]) & 0x03FF - code += 0x10000 - pos += 1 - builder.append(u"&#") - builder.append(unicode(str(code))) - builder.append(u";") + builder.append("&#") + builder.append(str(code)) + builder.append(";") pos += 1 return space.newtuple([space.wrap(builder.build()), w_end]) else: diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py --- a/pypy/module/cpyext/object.py +++ b/pypy/module/cpyext/object.py @@ -11,6 +11,7 @@ from pypy.module.cpyext.pyerrors import PyErr_NoMemory, PyErr_BadInternalCall from pypy.objspace.std.typeobject import W_TypeObject from pypy.interpreter.error import OperationError +from pypy.interpreter.utf8 import Utf8Str import pypy.module.__builtin__.operation as operation @@ -239,7 +240,7 @@ the Python expression unicode(o). Called by the unicode() built-in function.""" if w_obj is None: - return space.wrap(u"") + return space.wrap(Utf8Str("")) return space.call_function(space.w_unicode, w_obj) @cpython_api([PyObject, PyObject], rffi.INT_real, error=-1) diff --git a/pypy/module/cpyext/test/test_codecs.py b/pypy/module/cpyext/test/test_codecs.py --- a/pypy/module/cpyext/test/test_codecs.py +++ b/pypy/module/cpyext/test/test_codecs.py @@ -1,4 +1,5 @@ # encoding: iso-8859-15 +from pypy.interpreter.utf8 import Utf8Str from pypy.module.cpyext.test.test_api import BaseApiTest from rpython.rtyper.lltypesystem import rffi, lltype @@ -6,7 +7,8 @@ def test_incremental(self, space, api): utf8 = rffi.str2charp('utf-8') w_encoder = api.PyCodec_IncrementalEncoder(utf8, None) - w_encoded = space.call_method(w_encoder, 'encode', space.wrap(u'späm')) + w_encoded = space.call_method(w_encoder, 'encode', + space.wrap(Utf8Str.from_unicode(u'späm'))) w_decoder = api.PyCodec_IncrementalDecoder(utf8, None) w_decoded = space.call_method(w_decoder, 'decode', w_encoded) assert space.unwrap(w_decoded) == u'späm' diff --git a/pypy/module/cpyext/test/test_object.py b/pypy/module/cpyext/test/test_object.py --- a/pypy/module/cpyext/test/test_object.py +++ b/pypy/module/cpyext/test/test_object.py @@ -1,5 +1,6 @@ import py +from pypy.interpreter.utf8 import Utf8Str from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase from rpython.rtyper.lltypesystem import rffi, lltype @@ -188,7 +189,7 @@ assert ptr[0] == -1 assert api.PyObject_Cmp(w("a"), w("a"), ptr) == 0 assert ptr[0] == 0 - assert api.PyObject_Cmp(w(u"\xe9"), w("\xe9"), ptr) < 0 + assert api.PyObject_Cmp(w(Utf8Str.from_unicode(u"\xe9")), w("\xe9"), ptr) < 0 assert api.PyErr_Occurred() api.PyErr_Clear() diff --git a/pypy/module/cpyext/test/test_sequence.py b/pypy/module/cpyext/test/test_sequence.py --- a/pypy/module/cpyext/test/test_sequence.py +++ b/pypy/module/cpyext/test/test_sequence.py @@ -1,5 +1,6 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.interpreter.error import OperationError +from pypy.interpreter.utf8 import Utf8Str from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext import sequence import py.test @@ -73,7 +74,7 @@ def test_contains(self, space, api): w_t = space.wrap((1, 'ha')) - assert api.PySequence_Contains(w_t, space.wrap(u'ha')) + assert api.PySequence_Contains(w_t, space.wrap(Utf8Str.from_unicode('ha'))) assert not api.PySequence_Contains(w_t, space.wrap(2)) assert api.PySequence_Contains(space.w_None, space.wrap(2)) == -1 assert api.PyErr_Occurred() diff --git a/pypy/module/cpyext/test/test_unicodeobject.py b/pypy/module/cpyext/test/test_unicodeobject.py --- a/pypy/module/cpyext/test/test_unicodeobject.py +++ b/pypy/module/cpyext/test/test_unicodeobject.py @@ -1,4 +1,5 @@ # encoding: iso-8859-15 +from pypy.interpreter.utf8 import Utf8Str from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase from pypy.module.cpyext.unicodeobject import ( @@ -78,10 +79,11 @@ class TestUnicode(BaseApiTest): def test_unicodeobject(self, space, api): - assert api.PyUnicode_GET_SIZE(space.wrap(u'sp�m')) == 4 - assert api.PyUnicode_GetSize(space.wrap(u'sp�m')) == 4 + wrap_u = lambda x: space.wrap(Utf8Str.from_unicode(x)) + assert api.PyUnicode_GET_SIZE(wrap_u(u'sp�m')) == 4 + assert api.PyUnicode_GetSize(wrap_u(u'sp�m')) == 4 unichar = rffi.sizeof(Py_UNICODE) - assert api.PyUnicode_GET_DATA_SIZE(space.wrap(u'sp�m')) == 4 * unichar + assert api.PyUnicode_GET_DATA_SIZE(wrap_u(u'sp�m')) == 4 * unichar encoding = rffi.charp2str(api.PyUnicode_GetDefaultEncoding()) w_default_encoding = space.call_function( @@ -103,7 +105,8 @@ rffi.free_charp(prev_encoding) def test_AS(self, space, api): - word = space.wrap(u'spam') + wrap_u = lambda x: space.wrap(Utf8Str.from_unicode(x)) + word = space.wrap(Utf8Str('spam')) array = rffi.cast(rffi.CWCHARP, api.PyUnicode_AS_DATA(word)) array2 = api.PyUnicode_AS_UNICODE(word) array3 = api.PyUnicode_AsUnicode(word) @@ -115,10 +118,10 @@ space.wrap('spam')) utf_8 = rffi.str2charp('utf-8') - encoded = api.PyUnicode_AsEncodedString(space.wrap(u'sp�m'), + encoded = api.PyUnicode_AsEncodedString(wrap_u(u'sp�m'), utf_8, None) assert space.unwrap(encoded) == 'sp\xc3\xa4m' - encoded_obj = api.PyUnicode_AsEncodedObject(space.wrap(u'sp�m'), + encoded_obj = api.PyUnicode_AsEncodedObject(wrap_u(u'sp�m'), utf_8, None) assert space.eq_w(encoded, encoded_obj) self.raises(space, api, TypeError, api.PyUnicode_AsEncodedString, @@ -127,7 +130,7 @@ space.wrap(''), None, None) ascii = rffi.str2charp('ascii') replace = rffi.str2charp('replace') - encoded = api.PyUnicode_AsEncodedString(space.wrap(u'sp�m'), + encoded = api.PyUnicode_AsEncodedString(wrap_u(u'sp�m'), ascii, replace) assert space.unwrap(encoded) == 'sp?m' rffi.free_charp(utf_8) @@ -135,9 +138,9 @@ rffi.free_charp(ascii) buf = rffi.unicode2wcharp(u"12345") - api.PyUnicode_AsWideChar(space.wrap(u'longword'), buf, 5) + api.PyUnicode_AsWideChar(wrap_u(u'longword'), buf, 5) assert rffi.wcharp2unicode(buf) == 'longw' - api.PyUnicode_AsWideChar(space.wrap(u'a'), buf, 5) + api.PyUnicode_AsWideChar(wrap_u(u'a'), buf, 5) assert rffi.wcharp2unicode(buf) == 'a' rffi.free_wcharp(buf) @@ -175,7 +178,7 @@ lltype.free(ar, flavor='raw') def test_AsUTF8String(self, space, api): - w_u = space.wrap(u'sp�m') + w_u = space.wrap(Utf8Str.from_unicode(u'sp�m')) w_res = api.PyUnicode_AsUTF8String(w_u) assert space.type(w_res) is space.w_str assert space.unwrap(w_res) == 'sp\xc3\xa4m' @@ -293,7 +296,7 @@ assert api.Py_UNICODE_TONUMERIC(u'\N{VULGAR FRACTION ONE HALF}') == .5 def test_fromobject(self, space, api): - w_u = space.wrap(u'a') + w_u = space.wrap(Utf8Str('a')) assert api.PyUnicode_FromObject(w_u) is w_u assert space.unwrap( api.PyUnicode_FromObject(space.wrap('test'))) == u'test' @@ -308,7 +311,8 @@ assert space.isinstance_w(w_text, space.w_unicode) assert space.unwrap(w_text) == u"test" - assert api.PyUnicode_FromEncodedObject(space.wrap(u"test"), b_encoding, None) is None + assert api.PyUnicode_FromEncodedObject(space.wrap(Utf8Str("test")), + b_encoding, None) is None assert api.PyErr_Occurred() is space.w_TypeError assert api.PyUnicode_FromEncodedObject(space.wrap(1), b_encoding, None) is None assert api.PyErr_Occurred() is space.w_TypeError @@ -319,7 +323,7 @@ def test_decode_null_encoding(self, space, api): null_charp = lltype.nullptr(rffi.CCHARP.TO) - u_text = u'abcdefg' + u_text = Utf8Str('abcdefg') s_text = space.str_w(api.PyUnicode_AsEncodedString(space.wrap(u_text), null_charp, null_charp)) b_text = rffi.str2charp(s_text) assert space.unwrap(api.PyUnicode_Decode(b_text, len(s_text), null_charp, null_charp)) == u_text @@ -347,7 +351,7 @@ def test_escape(self, space, api): def test(ustr): - w_ustr = space.wrap(ustr.decode('Unicode-Escape')) + w_ustr = space.wrap(Utf8Str.from_unicode(ustr.decode('Unicode-Escape'))) result = api.PyUnicode_AsUnicodeEscapeString(w_ustr) assert space.eq_w(space.wrap(ustr), result) @@ -357,12 +361,12 @@ def test_ascii(self, space, api): ustr = "abcdef" - w_ustr = space.wrap(ustr.decode("ascii")) + w_ustr = space.wrap(Utf8Str.from_unicode(ustr.decode("ascii"))) result = api.PyUnicode_AsASCIIString(w_ustr) assert space.eq_w(space.wrap(ustr), result) - w_ustr = space.wrap(u"abcd\xe9f") + w_ustr = space.wrap(Utf8Str.from_unicode(u"abcd\xe9f")) self.raises(space, api, UnicodeEncodeError, api.PyUnicode_AsASCIIString, w_ustr) def test_decode_utf16(self, space, api): @@ -441,19 +445,19 @@ assert api.PyUnicode_Compare(space.wrap('a'), space.wrap('b')) == -1 def test_copy(self, space, api): - w_x = space.wrap(u"abcd\u0660") + w_x = space.wrap(Utf8Str.from_unicode(u"abcd\u0660")) target_chunk, _ = rffi.alloc_unicodebuffer(space.int_w(space.len(w_x))) #lltype.malloc(Py_UNICODE, space.int_w(space.len(w_x)), flavor='raw') x_chunk = api.PyUnicode_AS_UNICODE(w_x) api.Py_UNICODE_COPY(target_chunk, x_chunk, 4) - w_y = space.wrap(rffi.wcharpsize2unicode(target_chunk, 4)) + w_y = space.wrap(Utf8Str.from_wcharpsize(target_chunk, 4)) - assert space.eq_w(w_y, space.wrap(u"abcd")) + assert space.eq_w(w_y, space.wrap(Utf8Str("abcd"))) size = api.PyUnicode_GET_SIZE(w_x) api.Py_UNICODE_COPY(target_chunk, x_chunk, size) - w_y = space.wrap(rffi.wcharpsize2unicode(target_chunk, size)) + w_y = space.wrap(Utf8Str.from_wcharpsize(target_chunk, size)) assert space.eq_w(w_y, w_x) @@ -463,7 +467,7 @@ s = 'abcdefg' data = rffi.str2charp(s) w_u = api.PyUnicode_DecodeASCII(data, len(s), lltype.nullptr(rffi.CCHARP.TO)) - assert space.eq_w(w_u, space.wrap(u"abcdefg")) + assert space.eq_w(w_u, space.wrap(Utf8Str("abcdefg"))) rffi.free_charp(data) s = 'abcd\xFF' @@ -489,7 +493,7 @@ s = 'abcdefg' data = rffi.str2charp(s) w_u = api.PyUnicode_DecodeLatin1(data, len(s), lltype.nullptr(rffi.CCHARP.TO)) - assert space.eq_w(w_u, space.wrap(u"abcdefg")) + assert space.eq_w(w_u, space.wrap(Utf8Str("abcdefg"))) rffi.free_charp(data) uni = u'abcdefg' @@ -499,19 +503,19 @@ rffi.free_wcharp(data) ustr = "abcdef" - w_ustr = space.wrap(ustr.decode("ascii")) + w_ustr = space.wrap(Utf8Str.from_unicode(ustr.decode("ascii"))) result = api.PyUnicode_AsLatin1String(w_ustr) assert space.eq_w(space.wrap(ustr), result) def test_format(self, space, api): - w_format = space.wrap(u'hi %s') - w_args = space.wrap((u'test',)) + w_format = space.wrap(Utf8Str('hi %s')) + w_args = space.wrap((Utf8Str('test'),)) w_formated = api.PyUnicode_Format(w_format, w_args) assert space.unwrap(w_formated) == space.unwrap(space.mod(w_format, w_args)) def test_join(self, space, api): - w_sep = space.wrap(u'') - w_seq = space.wrap([u'a', u'b']) + w_sep = space.wrap(Utf8Str('')) + w_seq = space.wrap([Utf8Str('a'), Utf8Str('b')]) w_joined = api.PyUnicode_Join(w_sep, w_seq) assert space.unwrap(w_joined) == u'ab' @@ -524,16 +528,16 @@ assert space.unwrap(w_char) == u'\uFFFF' def test_replace(self, space, api): - w_str = space.wrap(u"abababab") - w_substr = space.wrap(u"a") - w_replstr = space.wrap(u"z") + w_str = space.wrap(Utf8Str("abababab")) + w_substr = space.wrap(Utf8Str("a")) + w_replstr = space.wrap(Utf8Str("z")) assert u"zbzbabab" == space.unwrap( api.PyUnicode_Replace(w_str, w_substr, w_replstr, 2)) assert u"zbzbzbzb" == space.unwrap( api.PyUnicode_Replace(w_str, w_substr, w_replstr, -1)) def test_tailmatch(self, space, api): - w_str = space.wrap(u"abcdef") + w_str = space.wrap(Utf8Str("abcdef")) # prefix match assert api.PyUnicode_Tailmatch(w_str, space.wrap("cde"), 2, 9, -1) == 1 assert api.PyUnicode_Tailmatch(w_str, space.wrap("cde"), 2, 4, -1) == 0 # ends at 'd' @@ -552,28 +556,30 @@ 2, 10, 1) def test_count(self, space, api): - w_str = space.wrap(u"abcabdab") - assert api.PyUnicode_Count(w_str, space.wrap(u"ab"), 0, -1) == 2 - assert api.PyUnicode_Count(w_str, space.wrap(u"ab"), 0, 2) == 1 - assert api.PyUnicode_Count(w_str, space.wrap(u"ab"), -5, 30) == 2 + wrap_u = lambda x: space.wrap(Utf8Str.from_unicode(x)) + w_str = wrap_u(u"abcabdab") + assert api.PyUnicode_Count(w_str, wrap_u(u"ab"), 0, -1) == 2 + assert api.PyUnicode_Count(w_str, wrap_u(u"ab"), 0, 2) == 1 + assert api.PyUnicode_Count(w_str, wrap_u(u"ab"), -5, 30) == 2 def test_find(self, space, api): - w_str = space.wrap(u"abcabcd") - assert api.PyUnicode_Find(w_str, space.wrap(u"c"), 0, 7, 1) == 2 - assert api.PyUnicode_Find(w_str, space.wrap(u"c"), 3, 7, 1) == 5 - assert api.PyUnicode_Find(w_str, space.wrap(u"c"), 0, 7, -1) == 5 - assert api.PyUnicode_Find(w_str, space.wrap(u"c"), 3, 7, -1) == 5 - assert api.PyUnicode_Find(w_str, space.wrap(u"c"), 0, 4, -1) == 2 - assert api.PyUnicode_Find(w_str, space.wrap(u"z"), 0, 4, -1) == -1 + wrap_u = lambda x: space.wrap(Utf8Str.from_unicode(x)) + w_str = wrap_u("abcabcd") + assert api.PyUnicode_Find(w_str, wrap_u(u"c"), 0, 7, 1) == 2 + assert api.PyUnicode_Find(w_str, wrap_u(u"c"), 3, 7, 1) == 5 + assert api.PyUnicode_Find(w_str, wrap_u(u"c"), 0, 7, -1) == 5 + assert api.PyUnicode_Find(w_str, wrap_u(u"c"), 3, 7, -1) == 5 + assert api.PyUnicode_Find(w_str, wrap_u(u"c"), 0, 4, -1) == 2 + assert api.PyUnicode_Find(w_str, wrap_u(u"z"), 0, 4, -1) == -1 def test_split(self, space, api): - w_str = space.wrap(u"a\nb\nc\nd") + w_str = space.wrap(Utf8Str("a\nb\nc\nd")) assert "[u'a', u'b', u'c', u'd']" == space.unwrap(space.repr( api.PyUnicode_Split(w_str, space.wrap('\n'), -1))) assert r"[u'a', u'b', u'c\nd']" == space.unwrap(space.repr( api.PyUnicode_Split(w_str, space.wrap('\n'), 2))) assert r"[u'a', u'b', u'c d']" == space.unwrap(space.repr( - api.PyUnicode_Split(space.wrap(u'a\nb c d'), None, 2))) + api.PyUnicode_Split(space.wrap(Utf8Str('a\nb c d')), None, 2))) assert "[u'a', u'b', u'c', u'd']" == space.unwrap(space.repr( api.PyUnicode_Splitlines(w_str, 0))) assert r"[u'a\n', u'b\n', u'c\n', u'd']" == space.unwrap(space.repr( diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py --- a/pypy/module/cpyext/unicodeobject.py +++ b/pypy/module/cpyext/unicodeobject.py @@ -1,4 +1,5 @@ from pypy.interpreter.error import OperationError +from pypy.interpreter.utf8 import Utf8Str from pypy.interpreter import utf8_codecs from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.unicodedata import unicodedb @@ -71,7 +72,7 @@ be modified after this call. """ py_uni = rffi.cast(PyUnicodeObject, py_obj) - s = rffi.wcharpsize2unicode(py_uni.c_buffer, py_uni.c_size) + s = Utf8Str.from_wcharpsize(py_uni.c_buffer, py_uni.c_size) w_obj = space.wrap(s) track_reference(space, py_obj, w_obj) return w_obj @@ -330,7 +331,7 @@ Therefore, modification of the resulting Unicode object is only allowed when u is NULL.""" if wchar_p: - s = rffi.wcharpsize2unicode(wchar_p, length) + s = rffi.Utf8Str.from_wcharpsize(wchar_p, length) return make_ref(space, space.wrap(s)) else: return rffi.cast(PyObject, new_empty_unicode(space, length)) @@ -495,7 +496,7 @@ """Encode the Py_UNICODE buffer of the given size and return a Python string object. Return NULL if an exception was raised by the codec.""" - w_u = space.wrap(rffi.wcharpsize2unicode(s, size)) + w_u = space.wrap(Utf8Str.from_wcharpsize(s, size)) if errors: w_errors = space.wrap(rffi.charp2str(errors)) else: @@ -635,7 +636,7 @@ Returns 0 on success, -1 on failure. """ - u = rffi.wcharpsize2unicode(s, length) + u = Utf8Str.from_wcharpsize(s, length) if llerrors: errors = rffi.charp2str(llerrors) else: From noreply at buildbot.pypy.org Tue Jul 15 22:42:02 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 15 Jul 2014 22:42:02 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: in-(slow)-progress Message-ID: <20140715204202.E10621C0091@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5355:f0be65f30f5a Date: 2014-07-15 22:42 +0200 http://bitbucket.org/pypy/extradoc/changeset/f0be65f30f5a/ Log: in-(slow)-progress diff --git a/talk/ep2014/stm/talk.rst b/talk/ep2014/stm/talk.rst --- a/talk/ep2014/stm/talk.rst +++ b/talk/ep2014/stm/talk.rst @@ -7,7 +7,51 @@ Part 1 - Intro and Current Status =========================================== -xxx +- stm/demo/ + +- stm/bottle/ + +- transaction module; multiprocessing-like Pool(); etc. + +- a large demo of some well-known program where + it solves everything?... there is no such thing + because the large program's author have already + solved it + +- compare with garbage collection in C: + + - usually you do it with malloc()/free() + + - sometimes you need more control, and e.g. you add + some reference counts + + - sometimes you use more specialized versions for + performance, e.g. allocate in a pool and throw it + completely away at the end of some phase + + - Boehm GC, a GC for C: what kind of demo can you + present for it? You take a C program, remove all + free(), relink malloc() to Boehm, and it works + more slowly... + + - nevertheless, GCC did exactly that. Why? + +- so, the GIL: we already have different workarounds for + different kinds of problems (use "multiprocessing"; or + start N processes and have them communicate in one + way or another) + +- this talk is about the GIL's equivalent of the Boehm GC + for C: simplify your life for some problems, with a + reasonable performance cost + +- the problems are: + + - anything where the GIL is a blocker, obviously + + - but also any program with "often-parallelizable" + sections of code + =========================================== @@ -33,7 +77,8 @@ - reads are more common than writes: optimize read barriers - pypy-stm: write a thread-local flag "this object has been read", - show code for read barrier and fast-path of write barrier + show code for read barrier and fast-path of write barrier; + note about using the C library for CPython too - reads are not synchronized at all between CPUs, but it's wrong to read data written by other in-progress transactions; @@ -55,8 +100,10 @@ - picture with nursery -- the GC can use the same write barrier + =========================================== Part 3 - Multithreading Revisited =========================================== -xxx + +- From noreply at buildbot.pypy.org Wed Jul 16 07:58:54 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 16 Jul 2014 07:58:54 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140716055854.AFCD01D284F@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r72449:487697aaa730 Date: 2014-07-14 18:29 -0700 http://bitbucket.org/pypy/pypy/changeset/487697aaa730/ Log: merge default diff too long, truncating to 2000 out of 6441 lines diff --git a/lib-python/2.7/ctypes/__init__.py b/lib-python/2.7/ctypes/__init__.py --- a/lib-python/2.7/ctypes/__init__.py +++ b/lib-python/2.7/ctypes/__init__.py @@ -389,12 +389,13 @@ func.__name__ = name_or_ordinal return func -class PyDLL(CDLL): - """This class represents the Python library itself. It allows to - access Python API functions. The GIL is not released, and - Python exceptions are handled correctly. - """ - _func_flags_ = _FUNCFLAG_CDECL | _FUNCFLAG_PYTHONAPI +# Not in PyPy +#class PyDLL(CDLL): +# """This class represents the Python library itself. It allows to +# access Python API functions. The GIL is not released, and +# Python exceptions are handled correctly. +# """ +# _func_flags_ = _FUNCFLAG_CDECL | _FUNCFLAG_PYTHONAPI if _os.name in ("nt", "ce"): @@ -447,15 +448,8 @@ return self._dlltype(name) cdll = LibraryLoader(CDLL) -pydll = LibraryLoader(PyDLL) - -if _os.name in ("nt", "ce"): - pythonapi = PyDLL("python dll", None, _sys.dllhandle) -elif _sys.platform == "cygwin": - pythonapi = PyDLL("libpython%d.%d.dll" % _sys.version_info[:2]) -else: - pythonapi = PyDLL(None) - +# not on PyPy +#pydll = LibraryLoader(PyDLL) if _os.name in ("nt", "ce"): windll = LibraryLoader(WinDLL) diff --git a/lib-python/2.7/ctypes/test/test_values.py b/lib-python/2.7/ctypes/test/test_values.py --- a/lib-python/2.7/ctypes/test/test_values.py +++ b/lib-python/2.7/ctypes/test/test_values.py @@ -4,6 +4,7 @@ import unittest from ctypes import * +from ctypes.test import xfail import _ctypes_test @@ -23,7 +24,8 @@ class Win_ValuesTestCase(unittest.TestCase): """This test only works when python itself is a dll/shared library""" - + + @xfail def test_optimizeflag(self): # This test accesses the Py_OptimizeFlag intger, which is # exported by the Python dll. @@ -40,6 +42,7 @@ else: self.assertEqual(opt, 2) + @xfail def test_frozentable(self): # Python exports a PyImport_FrozenModules symbol. This is a # pointer to an array of struct _frozen entries. The end of the @@ -75,6 +78,7 @@ from ctypes import _pointer_type_cache del _pointer_type_cache[struct_frozen] + @xfail def test_undefined(self): self.assertRaises(ValueError, c_int.in_dll, pydll, "Undefined_Symbol") diff --git a/lib-python/2.7/test/test_gdbm.py b/lib-python/2.7/test/test_gdbm.py --- a/lib-python/2.7/test/test_gdbm.py +++ b/lib-python/2.7/test/test_gdbm.py @@ -98,6 +98,17 @@ self.assertTrue(key in self.g) self.assertTrue(self.g.has_key(key)) + def test_unicode_key(self): + key = u'ab' + value = u'cd' + self.g = gdbm.open(filename, 'cf') + self.g[key] = value + self.g.close() + self.g = gdbm.open(filename, 'r') + self.assertEquals(self.g[key], value) + self.assertTrue(key in self.g) + self.assertTrue(self.g.has_key(key)) + def test_main(): run_unittest(TestGdbm) diff --git a/lib-python/2.7/timeit.py b/lib-python/2.7/timeit.py --- a/lib-python/2.7/timeit.py +++ b/lib-python/2.7/timeit.py @@ -55,11 +55,6 @@ import gc import sys import time -try: - import itertools -except ImportError: - # Must be an older Python version (see timeit() below) - itertools = None __all__ = ["Timer"] @@ -81,7 +76,8 @@ def inner(_it, _timer): %(setup)s _t0 = _timer() - for _i in _it: + while _it > 0: + _it -= 1 %(stmt)s _t1 = _timer() return _t1 - _t0 @@ -96,7 +92,8 @@ def inner(_it, _timer, _func=func): setup() _t0 = _timer() - for _i in _it: + while _it > 0: + _it -= 1 _func() _t1 = _timer() return _t1 - _t0 @@ -133,9 +130,19 @@ else: raise ValueError("setup is neither a string nor callable") self.src = src # Save for traceback display - code = compile(src, dummy_src_name, "exec") - exec code in globals(), ns - self.inner = ns["inner"] + def make_inner(): + # PyPy tweak: recompile the source code each time before + # calling inner(). There are situations like Issue #1776 + # where PyPy tries to reuse the JIT code from before, + # but that's not going to work: the first thing the + # function does is the "-s" statement, which may declare + # new classes (here a namedtuple). We end up with + # bridges from the inner loop; more and more of them + # every time we call inner(). + code = compile(src, dummy_src_name, "exec") + exec code in globals(), ns + return ns["inner"] + self.make_inner = make_inner elif hasattr(stmt, '__call__'): self.src = None if isinstance(setup, basestring): @@ -144,7 +151,8 @@ exec _setup in globals(), ns elif not hasattr(setup, '__call__'): raise ValueError("setup is neither a string nor callable") - self.inner = _template_func(setup, stmt) + inner = _template_func(setup, stmt) + self.make_inner = lambda: inner else: raise ValueError("stmt is neither a string nor callable") @@ -185,15 +193,12 @@ to one million. The main statement, the setup statement and the timer function to be used are passed to the constructor. """ - if itertools: - it = itertools.repeat(None, number) - else: - it = [None] * number + inner = self.make_inner() gcold = gc.isenabled() if '__pypy__' not in sys.builtin_module_names: gc.disable() # only do that on CPython try: - timing = self.inner(it, self.timer) + timing = inner(number, self.timer) finally: if gcold: gc.enable() diff --git a/lib_pypy/_gdbm.py b/lib_pypy/_gdbm.py --- a/lib_pypy/_gdbm.py +++ b/lib_pypy/_gdbm.py @@ -75,8 +75,8 @@ def _raise_from_errno(self): if ffi.errno: - raise error(os.strerror(ffi.errno)) - raise error(lib.gdbm_strerror(lib.gdbm_errno)) + raise error(ffi.errno, os.strerror(ffi.errno)) + raise error(lib.gdbm_errno, lib.gdbm_strerror(lib.gdbm_errno)) def __len__(self): if self.size < 0: @@ -151,7 +151,7 @@ def _check_closed(self): if not self.ll_dbm: - raise error("GDBM object has already been closed") + raise error(0, "GDBM object has already been closed") __del__ = close @@ -180,7 +180,7 @@ elif flags[0] == 'n': iflags = lib.GDBM_NEWDB else: - raise error("First flag must be one of 'r', 'w', 'c' or 'n'") + raise error(0, "First flag must be one of 'r', 'w', 'c' or 'n'") for flag in flags[1:]: if flag == 'f': iflags |= lib.GDBM_FAST @@ -189,7 +189,7 @@ elif flag == 'u': iflags |= lib.GDBM_NOLOCK else: - raise error("Flag '%s' not supported" % flag) + raise error(0, "Flag '%s' not supported" % flag) return gdbm(filename, iflags, mode) open_flags = "rwcnfsu" diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.8.2" -__version_info__ = (0, 8, 2) +__version__ = "0.8.6" +__version_info__ = (0, 8, 6) diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -55,8 +55,7 @@ # _cffi_backend.so compiled. import _cffi_backend as backend from . import __version__ - assert (backend.__version__ == __version__ or - backend.__version__ == __version__[:3]) + assert backend.__version__ == __version__ # (If you insist you can also try to pass the option # 'backend=backend_ctypes.CTypesBackend()', but don't # rely on it! It's probably not going to work well.) diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -435,14 +435,14 @@ enumerator, enumerator, enumvalue)) prnt(' char buf[64];') prnt(' if ((%s) < 0)' % enumerator) - prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % enumerator) + prnt(' sprintf(buf, "%%ld", (long)(%s));' % enumerator) prnt(' else') - prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' % + prnt(' sprintf(buf, "%%lu", (unsigned long)(%s));' % enumerator) - prnt(' snprintf(out_error, 255,' + prnt(' sprintf(out_error,' ' "%s has the real value %s, not %s",') prnt(' "%s", buf, "%d");' % ( - enumerator, enumvalue)) + enumerator[:100], enumvalue)) prnt(' return -1;') prnt(' }') prnt(' return 0;') diff --git a/pypy/doc/ctypes-implementation.rst b/pypy/doc/ctypes-implementation.rst --- a/pypy/doc/ctypes-implementation.rst +++ b/pypy/doc/ctypes-implementation.rst @@ -72,13 +72,11 @@ Here is a list of the limitations and missing features of the current implementation: -* ``ctypes.pythonapi`` lets you access the CPython C API emulation layer - of PyPy, at your own risks and without doing anything sensible about - the GIL. Since PyPy 2.3, these functions are also named with an extra - "Py", for example ``PyPyInt_FromLong()``. Basically, don't use this, - but it might more or less work in simple cases if you do. (Obviously, - assuming the PyObject pointers you get have any particular fields in - any particular order is just going to crash.) +* ``ctypes.pythonapi`` is missing. In previous versions, it was present + and redirected to the `cpyext` C API emulation layer, but our + implementation did not do anything sensible about the GIL and the + functions were named with an extra "Py", for example + ``PyPyInt_FromLong()``. It was removed for being unhelpful. * We copy Python strings instead of having pointers to raw buffers diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -171,16 +171,21 @@ You might be interested in our `benchmarking site`_ and our `jit documentation`_. -Note that the JIT has a very high warm-up cost, meaning that the -programs are slow at the beginning. If you want to compare the timings -with CPython, even relatively simple programs need to run *at least* one -second, preferrably at least a few seconds. Large, complicated programs -need even more time to warm-up the JIT. +`Your tests are not a benchmark`_: tests tend to be slow under PyPy +because they run exactly once; if they are good tests, they exercise +various corner cases in your code. This is a bad case for JIT +compilers. Note also that our JIT has a very high warm-up cost, meaning +that any program is slow at the beginning. If you want to compare the +timings with CPython, even relatively simple programs need to run *at +least* one second, preferrably at least a few seconds. Large, +complicated programs need even more time to warm-up the JIT. .. _`benchmarking site`: http://speed.pypy.org .. _`jit documentation`: jit/index.html +.. _`your tests are not a benchmark`: http://alexgaynor.net/2013/jul/15/your-tests-are-not-benchmark/ + --------------------------------------------------------------- Couldn't the JIT dump and reload already-compiled machine code? --------------------------------------------------------------- @@ -465,9 +470,13 @@ This is documented (here__ and here__). It needs 4 GB of RAM to run "rpython targetpypystandalone" on top of PyPy, a bit more when running -on CPython. If you have less than 4 GB it will just swap forever (or -fail if you don't have enough swap). On 32-bit, divide the numbers by -two. +on top of CPython. If you have less than 4 GB free, it will just swap +forever (or fail if you don't have enough swap). And we mean *free:* +if the machine has 4 GB *in total,* then it will swap. + +On 32-bit, divide the numbers by two. (We didn't try recently, but in +the past it was possible to compile a 32-bit version on a 2 GB Linux +machine with nothing else running: no Gnome/KDE, for example.) .. __: http://pypy.org/download.html#building-from-source .. __: https://pypy.readthedocs.org/en/latest/getting-started-python.html#translating-the-pypy-python-interpreter diff --git a/pypy/doc/jit-hooks.rst b/pypy/doc/jit-hooks.rst --- a/pypy/doc/jit-hooks.rst +++ b/pypy/doc/jit-hooks.rst @@ -34,7 +34,7 @@ aborted due to some reason. The hook will be invoked with the siagnture: - ``hook(jitdriver_name, greenkey, reason)`` + ``hook(jitdriver_name, greenkey, reason, oplist)`` Reason is a string, the meaning of other arguments is the same as attributes on JitLoopInfo object diff --git a/pypy/doc/objspace-proxies.rst b/pypy/doc/objspace-proxies.rst --- a/pypy/doc/objspace-proxies.rst +++ b/pypy/doc/objspace-proxies.rst @@ -26,6 +26,16 @@ Transparent Proxies ================================ +.. warning:: + + This is a feature that was tried experimentally long ago, and we + found no really good use cases. The basic functionality is still + there, but we don't recommend using it. Some of the examples below + might not work any more (e.g. you can't tproxy a list object any + more). The rest can be done by hacking in standard Python. If + anyone is interested in working on tproxy again, he is welcome, but + we don't regard this as an interesting extension. + PyPy's Transparent Proxies allow routing of operations on objects to a callable. Application level code can customize objects without interfering with the type system - ``type(proxied_list) is list`` holds true diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -28,7 +28,8 @@ Introduction ============ -``pypy-stm`` is a variant of the regular PyPy interpreter. With caveats_ +``pypy-stm`` is a variant of the regular PyPy interpreter. (This +version supports Python 2.7; see below for `Python 3`_.) With caveats_ listed below, it should be in theory within 20%-50% slower than a regular PyPy, comparing the JIT version in both cases (but see below!). It is called @@ -92,9 +93,9 @@ We're busy fixing them as we find them; feel free to `report bugs`_. * It runs with an overhead as low as 20% on examples like "richards". - There are also other examples with higher overheads --up to 10x for - "translate.py"-- which we are still trying to understand. One suspect - is our partial GC implementation, see below. + There are also other examples with higher overheads --currently up to + 2x for "translate.py"-- which we are still trying to understand. + One suspect is our partial GC implementation, see below. * Currently limited to 1.5 GB of RAM (this is just a parameter in `core.h`__). Memory overflows are not correctly handled; they cause @@ -111,9 +112,8 @@ * The GC is new; although clearly inspired by PyPy's regular GC, it misses a number of optimizations for now. Programs allocating large - numbers of small objects that don't immediately die, as well as - programs that modify large lists or dicts, suffer from these missing - optimizations. + numbers of small objects that don't immediately die (surely a common + situation) suffer from these missing optimizations. * The GC has no support for destructors: the ``__del__`` method is never called (including on file objects, which won't be closed for you). @@ -138,6 +138,25 @@ +Python 3 +======== + +In this document I describe "pypy-stm", which is based on PyPy's Python +2.7 interpreter. Supporting Python 3 should take about half an +afternoon of work. Obviously, what I *don't* mean is that by tomorrow +you can have a finished and polished "pypy3-stm" product. General py3k +work is still missing; and general stm work is also still missing. But +they are rather independent from each other, as usual in PyPy. The +required afternoon of work will certainly be done one of these days now +that the internal interfaces seem to stabilize. + +The same is true for other languages implemented in the RPython +framework, although the amount of work to put there might vary, because +the STM framework within RPython is currently targeting the PyPy +interpreter and other ones might have slightly different needs. + + + User Guide ========== @@ -490,8 +509,6 @@ The last two lines are special; they are an internal marker read by ``transactional_memory.print_abort_info()``. -These statistics are not printed out for the main thread, for now. - Reference to implementation details ----------------------------------- diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -30,3 +30,23 @@ x86-64, the JIT backend has a special optimization that lets it emit directly a single MOV from a %gs- or %fs-based address. It seems actually to give a good boost in performance. + +.. branch: fast-gil +A faster way to handle the GIL, particularly in JIT code. The GIL is +now a composite of two concepts: a global number (it's just set from +1 to 0 and back around CALL_RELEASE_GIL), and a real mutex. If there +are threads waiting to acquire the GIL, one of them is actively +checking the global number every 0.1 ms to 1 ms. Overall, JIT loops +full of external function calls now run a bit faster (if no thread was +started yet), or a *lot* faster (if threads were started already). + +.. branch: jit-get-errno +Optimize the errno handling in the JIT, notably around external +function calls. Linux-only. + +.. branch: disable_pythonapi +Remove non-functioning ctypes.pyhonapi and ctypes.PyDLL, document this +incompatibility with cpython. Recast sys.dllhandle to an int. + +.. branch: scalar-operations +Fix performance regression on ufunc(, ) in numpy. diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -132,19 +132,23 @@ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Download http://www.gzip.org/zlib/zlib-1.2.3.tar.gz and extract it in -the base directory. Then compile:: +the base directory. Then compile as a static library:: cd zlib-1.2.3 nmake -f win32\Makefile.msc - copy zlib1.dll \zlib.dll + copy zlib1.lib + copy zlib.h zconf.h The bz2 compression library ~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Get the same version of bz2 used by python and compile as a static library:: svn export http://svn.python.org/projects/external/bzip2-1.0.6 cd bzip2-1.0.6 nmake -f makefile.msc - copy bzip.dll \bzip.dll + copy libbz2.lib + copy bzlib.h + The sqlite3 database library ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -166,7 +170,8 @@ is actually enough for pypy). Then, copy the file ``win32\bin\release\libexpat.dll`` somewhere in -your PATH. +your PATH, ``win32\bin\release\libexpat.lib`` somewhere in LIB, and +both ``lib\expat.h`` and ``lib\expat_external.h`` somewhere in INCLUDE. The OpenSSL library ~~~~~~~~~~~~~~~~~~~ diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -654,8 +654,11 @@ else: # translated case follows. self.threadlocals is either from # 'pypy.interpreter.miscutils' or 'pypy.module.thread.threadlocals'. - # the result is assumed to be non-null: enter_thread() was called. - return self.threadlocals.get_ec() + # the result is assumed to be non-null: enter_thread() was called + # by space.startup(). + ec = self.threadlocals.get_ec() + assert ec is not None + return ec def _freeze_(self): return True @@ -957,6 +960,13 @@ """ return self.unpackiterable(w_iterable, expected_length) + def listview_no_unpack(self, w_iterable): + """ Same as listview() if cheap. If 'w_iterable' is something like + a generator, for example, then return None instead. + May return None anyway. + """ + return None + def listview_bytes(self, w_list): """ Return a list of unwrapped strings out of a list of strings. If the argument is not a list or does not contain only strings, return None. diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -499,6 +499,13 @@ """ +class UserDelCallback(object): + def __init__(self, w_obj, callback, descrname): + self.w_obj = w_obj + self.callback = callback + self.descrname = descrname + self.next = None + class UserDelAction(AsyncAction): """An action that invokes all pending app-level __del__() method. This is done as an action instead of immediately when the @@ -509,13 +516,19 @@ def __init__(self, space): AsyncAction.__init__(self, space) - self.dying_objects = [] + self.dying_objects = None + self.dying_objects_last = None self.finalizers_lock_count = 0 self.enabled_at_app_level = True self._invoke_immediately = False def register_callback(self, w_obj, callback, descrname): - self.dying_objects.append((w_obj, callback, descrname)) + cb = UserDelCallback(w_obj, callback, descrname) + if self.dying_objects_last is None: + self.dying_objects = cb + else: + self.dying_objects_last.next = cb + self.dying_objects_last = cb if not self._invoke_immediately: self.fire() else: @@ -532,13 +545,33 @@ # avoid too deep recursions of the kind of __del__ being called # while in the middle of another __del__ call. pending = self.dying_objects - self.dying_objects = [] + self.dying_objects = None + self.dying_objects_last = None space = self.space - for i in range(len(pending)): - w_obj, callback, descrname = pending[i] - pending[i] = (None, None, None) + while pending is not None: try: - callback(w_obj) + pending.callback(pending.w_obj) except OperationError, e: - e.write_unraisable(space, descrname, w_obj) + e.write_unraisable(space, pending.descrname, pending.w_obj) e.clear(space) # break up reference cycles + pending = pending.next + # + # Note: 'dying_objects' used to be just a regular list instead + # of a chained list. This was the cause of "leaks" if we have a + # program that constantly creates new objects with finalizers. + # Here is why: say 'dying_objects' is a long list, and there + # are n instances in it. Then we spend some time in this + # function, possibly triggering more GCs, but keeping the list + # of length n alive. Then the list is suddenly freed at the + # end, and we return to the user program. At this point the + # GC limit is still very high, because just before, there was + # a list of length n alive. Assume that the program continues + # to allocate a lot of instances with finalizers. The high GC + # limit means that it could allocate a lot of instances before + # reaching it --- possibly more than n. So the whole procedure + # repeats with higher and higher values of n. + # + # This does not occur in the current implementation because + # there is no list of length n: if n is large, then the GC + # will run several times while walking the list, but it will + # see lower and lower memory usage, with no lower bound of n. diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -61,6 +61,13 @@ return self.send_ex(w_arg) def send_ex(self, w_arg, operr=None): + pycode = self.pycode + if jit.we_are_jitted() and should_not_inline(pycode): + generatorentry_driver.jit_merge_point(gen=self, w_arg=w_arg, + operr=operr, pycode=pycode) + return self._send_ex(w_arg, operr) + + def _send_ex(self, w_arg, operr): space = self.space if self.running: raise OperationError(space.w_ValueError, @@ -72,8 +79,7 @@ if operr is None: operr = OperationError(space.w_StopIteration, space.w_None) raise operr - # XXX it's not clear that last_instr should be promoted at all - # but as long as it is necessary for call_assembler, let's do it early + last_instr = jit.promote(frame.last_instr) if last_instr == -1: if w_arg and not space.is_w(w_arg, space.w_None): @@ -219,3 +225,38 @@ "interrupting generator of ") break block = block.previous + + + +def get_printable_location_genentry(bytecode): + return '%s ' % (bytecode.get_repr(),) +generatorentry_driver = jit.JitDriver(greens=['pycode'], + reds=['gen', 'w_arg', 'operr'], + get_printable_location = + get_printable_location_genentry, + name='generatorentry') + +from pypy.tool.stdlib_opcode import HAVE_ARGUMENT, opmap +YIELD_VALUE = opmap['YIELD_VALUE'] + + at jit.elidable_promote() +def should_not_inline(pycode): + # Should not inline generators with more than one "yield", + # as an approximative fix (see issue #1782). There are cases + # where it slows things down; for example calls to a simple + # generator that just produces a few simple values with a few + # consecutive "yield" statements. It fixes the near-infinite + # slow-down in issue #1782, though... + count_yields = 0 + code = pycode.co_code + n = len(code) + i = 0 + while i < n: + c = code[i] + op = ord(c) + if op == YIELD_VALUE: + count_yields += 1 + i += 1 + if op >= HAVE_ARGUMENT: + i += 2 + return count_yields >= 2 diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/test_generator.py --- a/pypy/interpreter/test/test_generator.py +++ b/pypy/interpreter/test/test_generator.py @@ -300,3 +300,20 @@ yield 1 raise StopIteration assert tuple(f()) == (1,) + + +def test_should_not_inline(space): + from pypy.interpreter.generator import should_not_inline + w_co = space.appexec([], '''(): + def g(x): + yield x + 5 + return g.func_code + ''') + assert should_not_inline(w_co) == False + w_co = space.appexec([], '''(): + def g(x): + yield x + 5 + yield x + 6 + return g.func_code + ''') + assert should_not_inline(w_co) == True diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -74,13 +74,12 @@ 'hidden_applevel' : 'interp_magic.hidden_applevel', 'lookup_special' : 'interp_magic.lookup_special', 'do_what_I_mean' : 'interp_magic.do_what_I_mean', - 'list_strategy' : 'interp_magic.list_strategy', 'validate_fd' : 'interp_magic.validate_fd', 'resizelist_hint' : 'interp_magic.resizelist_hint', 'newlist_hint' : 'interp_magic.newlist_hint', 'add_memory_pressure' : 'interp_magic.add_memory_pressure', 'newdict' : 'interp_dict.newdict', - 'dictstrategy' : 'interp_dict.dictstrategy', + 'strategy' : 'interp_magic.strategy', # dict,set,list 'set_debug' : 'interp_magic.set_debug', 'locals_to_fast' : 'interp_magic.locals_to_fast', 'normalize_exc' : 'interp_magic.normalize_exc', diff --git a/pypy/module/__pypy__/interp_dict.py b/pypy/module/__pypy__/interp_dict.py --- a/pypy/module/__pypy__/interp_dict.py +++ b/pypy/module/__pypy__/interp_dict.py @@ -1,7 +1,6 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec -from pypy.objspace.std.dictmultiobject import W_DictMultiObject @unwrap_spec(type=str) def newdict(space, type): @@ -31,13 +30,3 @@ return space.newdict(strdict=True) else: raise oefmt(space.w_TypeError, "unknown type of dict %s", type) - -def dictstrategy(space, w_obj): - """ dictstrategy(dict) - - show the underlaying strategy used by a dict object - """ - if not isinstance(w_obj, W_DictMultiObject): - raise OperationError(space.w_TypeError, - space.wrap("expecting dict object")) - return space.wrap('%r' % (w_obj.strategy,)) diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -2,7 +2,9 @@ from pypy.interpreter.gateway import WrappedDefault, unwrap_spec from pypy.interpreter.pyframe import PyFrame from rpython.rlib.objectmodel import we_are_translated +from pypy.objspace.std.dictmultiobject import W_DictMultiObject from pypy.objspace.std.listobject import W_ListObject +from pypy.objspace.std.setobject import W_BaseSetObject from pypy.objspace.std.typeobject import MethodCache from pypy.objspace.std.mapdict import MapAttrCache from rpython.rlib import rposix, rgc @@ -74,12 +76,23 @@ def do_what_I_mean(space): return space.wrap(42) -def list_strategy(space, w_list): - if isinstance(w_list, W_ListObject): - return space.wrap(w_list.strategy._applevel_repr) + +def strategy(space, w_obj): + """ strategy(dict or list or set) + + Return the underlying strategy currently used by a dict, list or set object + """ + if isinstance(w_obj, W_DictMultiObject): + name = w_obj.strategy.__class__.__name__ + elif isinstance(w_obj, W_ListObject): + name = w_obj.strategy.__class__.__name__ + elif isinstance(w_obj, W_BaseSetObject): + name = w_obj.strategy.__class__.__name__ else: - w_msg = space.wrap("Can only get the list strategy of a list") - raise OperationError(space.w_TypeError, w_msg) + raise OperationError(space.w_TypeError, + space.wrap("expecting dict or list or set object")) + return space.wrap(name) + @unwrap_spec(fd='c_int') def validate_fd(space, fd): diff --git a/pypy/module/__pypy__/test/test_special.py b/pypy/module/__pypy__/test/test_special.py --- a/pypy/module/__pypy__/test/test_special.py +++ b/pypy/module/__pypy__/test/test_special.py @@ -75,24 +75,38 @@ assert x == 42 def test_list_strategy(self): - from __pypy__ import list_strategy + from __pypy__ import strategy l = [1, 2, 3] - assert list_strategy(l) == "int" - l = list(range(1, 2)) - assert list_strategy(l) == "int" + assert strategy(l) == "IntegerListStrategy" l = [b"a", b"b", b"c"] - assert list_strategy(l) == "bytes" - l = ["a", "b", "c"] - assert list_strategy(l) == "unicode" + assert strategy(l) == "BytesListStrategy" + l = [u"a", u"b", u"c"] + assert strategy(l) == "UnicodeListStrategy" l = [1.1, 2.2, 3.3] - assert list_strategy(l) == "float" + assert strategy(l) == "FloatListStrategy" l = [1, "b", 3] - assert list_strategy(l) == "object" + assert strategy(l) == "ObjectListStrategy" l = [] - assert list_strategy(l) == "empty" + assert strategy(l) == "EmptyListStrategy" o = 5 - raises(TypeError, list_strategy, 5) + raises(TypeError, strategy, 5) + + def test_dict_strategy(self): + from __pypy__ import strategy + + d = {} + assert strategy(d) == "EmptyDictStrategy" + d = {1: None, 5: None} + assert strategy(d) == "IntDictStrategy" + + def test_set_strategy(self): + from __pypy__ import strategy + + s = set() + assert strategy(s) == "EmptySetStrategy" + s = set([2, 3, 4]) + assert strategy(s) == "IntegerSetStrategy" def test_normalize_exc(self): from __pypy__ import normalize_exc diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -8,7 +8,7 @@ appleveldefs = { } interpleveldefs = { - '__version__': 'space.wrap("0.8.2")', + '__version__': 'space.wrap("0.8.6")', 'load_library': 'libraryobj.load_library', diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py --- a/pypy/module/_cffi_backend/ctypefunc.py +++ b/pypy/module/_cffi_backend/ctypefunc.py @@ -4,7 +4,7 @@ import sys -from rpython.rlib import jit, clibffi, jit_libffi +from rpython.rlib import jit, clibffi, jit_libffi, rgc from rpython.rlib.jit_libffi import (CIF_DESCRIPTION, CIF_DESCRIPTION_P, FFI_TYPE, FFI_TYPE_P, FFI_TYPE_PP, SIZE_OF_FFI_ARG) from rpython.rlib.objectmodel import we_are_translated, instantiate @@ -63,6 +63,7 @@ CifDescrBuilder(fvarargs, self.ctitem).rawallocate(ctypefunc) return ctypefunc + @rgc.must_be_light_finalizer def __del__(self): if self.cif_descr: lltype.free(self.cif_descr, flavor='raw') @@ -156,8 +157,8 @@ data = rffi.ptradd(buffer, cif_descr.exchange_args[i]) flag = get_mustfree_flag(data) if flag == 1: - raw_string = rffi.cast(rffi.CCHARPP, data)[0] - lltype.free(raw_string, flavor='raw') + raw_cdata = rffi.cast(rffi.CCHARPP, data)[0] + lltype.free(raw_cdata, flavor='raw') lltype.free(buffer, flavor='raw') return w_res diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -3188,4 +3188,4 @@ def test_version(): # this test is here mostly for PyPy - assert __version__ == "0.8.2" + assert __version__ == "0.8.6" diff --git a/pypy/module/_io/interp_textio.py b/pypy/module/_io/interp_textio.py --- a/pypy/module/_io/interp_textio.py +++ b/pypy/module/_io/interp_textio.py @@ -467,10 +467,6 @@ space.wrap("<_io.TextIOWrapper %s%sencoding=%r>"), w_args ) - def isatty_w(self, space): - self._check_init(space) - return space.call_method(self.w_buffer, "isatty") - def readable_w(self, space): self._check_init(space) return space.call_method(self.w_buffer, "readable") @@ -483,6 +479,10 @@ self._check_init(space) return space.call_method(self.w_buffer, "seekable") + def isatty_w(self, space): + self._check_init(space) + return space.call_method(self.w_buffer, "isatty") + def fileno_w(self, space): self._check_init(space) return space.call_method(self.w_buffer, "fileno") @@ -1065,10 +1065,10 @@ close = interp2app(W_TextIOWrapper.close_w), line_buffering = interp_attrproperty("line_buffering", W_TextIOWrapper), - isatty = interp2app(W_TextIOWrapper.isatty_w), readable = interp2app(W_TextIOWrapper.readable_w), writable = interp2app(W_TextIOWrapper.writable_w), seekable = interp2app(W_TextIOWrapper.seekable_w), + isatty = interp2app(W_TextIOWrapper.isatty_w), fileno = interp2app(W_TextIOWrapper.fileno_w), _dealloc_warn = interp2app(W_TextIOWrapper._dealloc_warn_w), name = GetSetProperty(W_TextIOWrapper.name_get_w), diff --git a/pypy/module/_io/test/test_textio.py b/pypy/module/_io/test/test_textio.py --- a/pypy/module/_io/test/test_textio.py +++ b/pypy/module/_io/test/test_textio.py @@ -31,6 +31,12 @@ t = _io.TextIOWrapper(b) assert t.readable() assert t.seekable() + # + class CustomFile(object): + def isatty(self): return 'YES' + readable = writable = seekable = lambda self: False + t = _io.TextIOWrapper(CustomFile()) + assert t.isatty() == 'YES' def test_default_implementations(self): import _io diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py --- a/pypy/module/_lsprof/interp_lsprof.py +++ b/pypy/module/_lsprof/interp_lsprof.py @@ -11,7 +11,7 @@ from rpython.rlib.rtimer import read_timestamp, _is_64_bit from rpython.rtyper.lltypesystem import rffi, lltype from rpython.translator.tool.cbuild import ExternalCompilationInfo -from rpython.conftest import cdir +from rpython.translator import cdir from rpython.rlib.rarithmetic import r_longlong import time, sys diff --git a/pypy/module/_socket/__init__.py b/pypy/module/_socket/__init__.py --- a/pypy/module/_socket/__init__.py +++ b/pypy/module/_socket/__init__.py @@ -6,8 +6,8 @@ } interpleveldefs = { - 'SocketType': 'interp_socket.W_RSocket', - 'socket' : 'interp_socket.W_RSocket', + 'SocketType': 'interp_socket.W_Socket', + 'socket' : 'interp_socket.W_Socket', 'error' : 'interp_socket.get_error(space, "error")', 'herror' : 'interp_socket.get_error(space, "herror")', 'gaierror' : 'interp_socket.get_error(space, "gaierror")', diff --git a/pypy/module/_socket/interp_func.py b/pypy/module/_socket/interp_func.py --- a/pypy/module/_socket/interp_func.py +++ b/pypy/module/_socket/interp_func.py @@ -1,9 +1,13 @@ +from rpython.rlib import rsocket +from rpython.rlib.rsocket import SocketError, INVALID_SOCKET + +from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import unwrap_spec, WrappedDefault from pypy.module._socket.interp_socket import ( - converted_error, W_RSocket, addr_as_object, fill_from_object, get_error) -from rpython.rlib import rsocket -from rpython.rlib.rsocket import SocketError, INVALID_SOCKET -from pypy.interpreter.error import OperationError + converted_error, W_Socket, addr_as_object, fill_from_object, get_error, + ipaddr_from_object +) + def gethostname(space): """gethostname() -> string @@ -156,10 +160,13 @@ AF_UNIX if defined on the platform; otherwise, the default is AF_INET. """ try: - sock1, sock2 = rsocket.socketpair(family, type, proto, W_RSocket) + sock1, sock2 = rsocket.socketpair(family, type, proto) except SocketError, e: raise converted_error(space, e) - return space.newtuple([space.wrap(sock1), space.wrap(sock2)]) + return space.newtuple([ + space.wrap(W_Socket(sock1)), + space.wrap(W_Socket(sock2)) + ]) # The following 4 functions refuse all negative numbers, like CPython 2.6. # They could also check that the argument is not too large, but CPython 2.6 diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -1,13 +1,18 @@ +from rpython.rlib import rsocket +from rpython.rlib.rarithmetic import intmask +from rpython.rlib.rsocket import ( + RSocket, AF_INET, SOCK_STREAM, SocketError, SocketErrorWithErrno, + RSocketError +) +from rpython.rtyper.lltypesystem import lltype, rffi + +from pypy.interpreter import gateway from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.typedef import TypeDef, interp_attrproperty +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault -from rpython.rlib.rarithmetic import intmask -from rpython.rtyper.lltypesystem import lltype, rffi -from rpython.rlib import rsocket -from rpython.rlib.rsocket import RSocket, AF_INET, SOCK_STREAM -from rpython.rlib.rsocket import SocketError, SocketErrorWithErrno, RSocketError -from pypy.interpreter.error import OperationError, oefmt -from pypy.interpreter import gateway +from pypy.interpreter.typedef import ( + GetSetProperty, TypeDef, interp_attrproperty, make_weakref_descr +) # XXX Hack to seperate rpython and pypy @@ -128,13 +133,16 @@ return addr -class W_RSocket(W_Root, RSocket): +class W_Socket(W_Root): # for _dealloc_warn space = None + def __init__(self, sock): + self.sock = sock + def descr_new(space, w_subtype, __args__): - sock = space.allocate_instance(W_RSocket, w_subtype) + sock = space.allocate_instance(W_Socket, w_subtype) return space.wrap(sock) @unwrap_spec(family=int, type=int, proto=int, @@ -143,10 +151,11 @@ w_fileno=None): try: if not space.is_w(w_fileno, space.w_None): - W_RSocket.__init__(self, family, type, proto, - fd=space.c_filedescriptor_w(w_fileno)) + sock = RSocket(family, type, proto, + fd=space.c_filedescriptor_w(w_fileno)) else: - W_RSocket.__init__(self, family, type, proto) + sock = RSocket(family, type, proto) + W_Socket.__init__(self, sock) self.space = space except SocketError, e: raise converted_error(space, e) @@ -154,16 +163,26 @@ def __del__(self): self.clear_all_weakrefs() if self.space: - self.enqueue_for_destruction(self.space, W_RSocket.destructor, + self.enqueue_for_destruction(self.space, W_Socket.destructor, 'internal __del__ of ') def destructor(self): - assert isinstance(self, W_RSocket) + assert isinstance(self, W_Socket) if self.fd != rsocket.INVALID_SOCKET: try: self._dealloc_warn() finally: self.close_w(self.space) + # --XXX-- + + def get_type_w(self, space): + return space.wrap(self.sock.type) + + def get_proto_w(self, space): + return space.wrap(self.sock.proto) + + def get_family_w(self, space): + return space.wrap(self.sock.family) def _dealloc_warn(self): space = self.space @@ -184,20 +203,20 @@ For IP sockets, the address info is a pair (hostaddr, port). """ try: - fd, addr = self.accept() + fd, addr = self.sock.accept() return space.newtuple([space.wrap(fd), addr_as_object(addr, fd, space)]) - except SocketError, e: + except SocketError as e: raise converted_error(space, e) # convert an Address into an app-level object def addr_as_object(self, space, address): - return addr_as_object(address, self.fd, space) + return addr_as_object(address, self.sock.fd, space) # convert an app-level object into an Address # based on the current socket's family def addr_from_object(self, space, w_address): - return addr_from_object(self.family, space, w_address) + return addr_from_object(self.sock.family, space, w_address) def bind_w(self, space, w_addr): """bind(address) @@ -207,8 +226,8 @@ sockets the address is a tuple (ifname, proto [,pkttype [,hatype]]) """ try: - self.bind(self.addr_from_object(space, w_addr)) - except SocketError, e: + self.sock.bind(self.addr_from_object(space, w_addr)) + except SocketError as e: raise converted_error(space, e) def close_w(self, space): @@ -217,7 +236,7 @@ Close the socket. It cannot be used after this call. """ try: - self.close() + self.sock.close() except SocketError: # cpython doesn't return any errors on close pass @@ -229,8 +248,8 @@ is a pair (host, port). """ try: - self.connect(self.addr_from_object(space, w_addr)) - except SocketError, e: + self.sock.connect(self.addr_from_object(space, w_addr)) + except SocketError as e: raise converted_error(space, e) def connect_ex_w(self, space, w_addr): @@ -241,9 +260,9 @@ """ try: addr = self.addr_from_object(space, w_addr) - except SocketError, e: + except SocketError as e: raise converted_error(space, e) - error = self.connect_ex(addr) + error = self.sock.connect_ex(addr) return space.wrap(error) def fileno_w(self, space): @@ -251,7 +270,7 @@ Return the integer file descriptor of the socket. """ - return space.wrap(intmask(self.fd)) + return space.wrap(intmask(self.sock.fd)) def detach_w(self, space): """detach() @@ -269,9 +288,9 @@ info is a pair (hostaddr, port). """ try: - addr = self.getpeername() - return addr_as_object(addr, self.fd, space) - except SocketError, e: + addr = self.sock.getpeername() + return addr_as_object(addr, self.sock.fd, space) + except SocketError as e: raise converted_error(space, e) def getsockname_w(self, space): @@ -281,9 +300,9 @@ info is a pair (hostaddr, port). """ try: - addr = self.getsockname() - return addr_as_object(addr, self.fd, space) - except SocketError, e: + addr = self.sock.getsockname() + return addr_as_object(addr, self.sock.fd, space) + except SocketError as e: raise converted_error(space, e) @unwrap_spec(level=int, optname=int) @@ -296,11 +315,11 @@ """ if w_buflen is None: try: - return space.wrap(self.getsockopt_int(level, optname)) - except SocketError, e: + return space.wrap(self.sock.getsockopt_int(level, optname)) + except SocketError as e: raise converted_error(space, e) buflen = space.int_w(w_buflen) - return space.wrapbytes(self.getsockopt(level, optname, buflen)) + return space.wrapbytes(self.sock.getsockopt(level, optname, buflen)) def gettimeout_w(self, space): """gettimeout() -> timeout @@ -308,7 +327,7 @@ Returns the timeout in floating seconds associated with socket operations. A timeout of None indicates that timeouts on socket """ - timeout = self.gettimeout() + timeout = self.sock.gettimeout() if timeout < 0.0: return space.w_None return space.wrap(timeout) @@ -322,8 +341,8 @@ will allow before refusing new connections. """ try: - self.listen(backlog) - except SocketError, e: + self.sock.listen(backlog) + except SocketError as e: raise converted_error(space, e) @unwrap_spec(buffersize='nonnegint', flags=int) @@ -336,8 +355,8 @@ the remote end is closed and all data is read, return the empty string. """ try: - data = self.recv(buffersize, flags) - except SocketError, e: + data = self.sock.recv(buffersize, flags) + except SocketError as e: raise converted_error(space, e) return space.wrapbytes(data) @@ -348,13 +367,13 @@ Like recv(buffersize, flags) but also return the sender's address info. """ try: - data, addr = self.recvfrom(buffersize, flags) + data, addr = self.sock.recvfrom(buffersize, flags) if addr: - w_addr = addr_as_object(addr, self.fd, space) + w_addr = addr_as_object(addr, self.sock.fd, space) else: w_addr = space.w_None return space.newtuple([space.wrapbytes(data), w_addr]) - except SocketError, e: + except SocketError as e: raise converted_error(space, e) @unwrap_spec(data='bufferstr', flags=int) @@ -366,8 +385,8 @@ sent; this may be less than len(data) if the network is busy. """ try: - count = self.send(data, flags) - except SocketError, e: + count = self.sock.send(data, flags) + except SocketError as e: raise converted_error(space, e) return space.wrap(count) @@ -381,8 +400,9 @@ to tell how much data has been sent. """ try: - self.sendall(data, flags, space.getexecutioncontext().checksignals) - except SocketError, e: + self.sock.sendall( + data, flags, space.getexecutioncontext().checksignals) + except SocketError as e: raise converted_error(space, e) @unwrap_spec(data='bufferstr') @@ -402,8 +422,8 @@ w_addr = w_param3 try: addr = self.addr_from_object(space, w_addr) - count = self.sendto(data, flags, addr) - except SocketError, e: + count = self.sock.sendto(data, flags, addr) + except SocketError as e: raise converted_error(space, e) return space.wrap(count) @@ -415,7 +435,7 @@ setblocking(True) is equivalent to settimeout(None); setblocking(False) is equivalent to settimeout(0.0). """ - self.setblocking(flag) + self.sock.setblocking(flag) @unwrap_spec(level=int, optname=int) def setsockopt_w(self, space, level, optname, w_optval): @@ -429,13 +449,13 @@ except: optval = space.bytes_w(w_optval) try: - self.setsockopt(level, optname, optval) - except SocketError, e: + self.sock.setsockopt(level, optname, optval) + except SocketError as e: raise converted_error(space, e) return try: - self.setsockopt_int(level, optname, optval) - except SocketError, e: + self.sock.setsockopt_int(level, optname, optval) + except SocketError as e: raise converted_error(space, e) def settimeout_w(self, space, w_timeout): @@ -453,7 +473,7 @@ if timeout < 0.0: raise OperationError(space.w_ValueError, space.wrap('Timeout value out of range')) - self.settimeout(timeout) + self.sock.settimeout(timeout) @unwrap_spec(nbytes=int, flags=int) def recv_into_w(self, space, w_buffer, nbytes=0, flags=0): @@ -462,8 +482,8 @@ if nbytes == 0 or nbytes > lgt: nbytes = lgt try: - return space.wrap(self.recvinto(rwbuffer, nbytes, flags)) - except SocketError, e: + return space.wrap(self.sock.recvinto(rwbuffer, nbytes, flags)) + except SocketError as e: raise converted_error(space, e) @unwrap_spec(nbytes=int, flags=int) @@ -473,13 +493,13 @@ if nbytes == 0 or nbytes > lgt: nbytes = lgt try: - readlgt, addr = self.recvfrom_into(rwbuffer, nbytes, flags) + readlgt, addr = self.sock.recvfrom_into(rwbuffer, nbytes, flags) if addr: - w_addr = addr_as_object(addr, self.fd, space) + w_addr = addr_as_object(addr, self.sock.fd, space) else: w_addr = space.w_None return space.newtuple([space.wrap(readlgt), w_addr]) - except SocketError, e: + except SocketError as e: raise converted_error(space, e) @unwrap_spec(cmd=int) @@ -511,7 +531,7 @@ option_ptr.c_keepaliveinterval = space.uint_w(w_interval) res = _c.WSAIoctl( - self.fd, cmd, value_ptr, value_size, + self.sock.fd, cmd, value_ptr, value_size, rffi.NULL, 0, recv_ptr, rffi.NULL, rffi.NULL) if res < 0: raise converted_error(space, rsocket.last_error()) @@ -532,8 +552,8 @@ (flag == SHUT_RDWR). """ try: - self.shutdown(how) - except SocketError, e: + self.sock.shutdown(how) + except SocketError as e: raise converted_error(space, e) #------------------------------------------------------------ @@ -609,10 +629,10 @@ socketmethods = {} for methodname in socketmethodnames: - method = getattr(W_RSocket, methodname + '_w') + method = getattr(W_Socket, methodname + '_w') socketmethods[methodname] = interp2app(method) -W_RSocket.typedef = TypeDef("_socket.socket", +W_Socket.typedef = TypeDef("_socket.socket", __doc__ = """\ socket([family[, type[, proto]]]) -> socket object @@ -648,10 +668,10 @@ shutdown(how) -- shut down traffic in one or both directions [*] not available on all platforms!""", - __new__ = interp2app(W_RSocket.descr_new.im_func), - __init__ = interp2app(W_RSocket.descr_init), - type = interp_attrproperty('type', W_RSocket), - proto = interp_attrproperty('proto', W_RSocket), - family = interp_attrproperty('family', W_RSocket), + __new__ = interp2app(W_Socket.descr_new.im_func), + __init__ = interp2app(W_Socket.descr_init), + type = interp_attrproperty('type', W_Socket), + proto = interp_attrproperty('proto', W_Socket), + family = interp_attrproperty('family', W_Socket), ** socketmethods ) diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -434,47 +434,47 @@ lgt = rwbuffer.getlength() if num_bytes < 0 or num_bytes > lgt: num_bytes = lgt - raw_buf, gc_buf = rffi.alloc_buffer(num_bytes) - while True: - err = 0 - count = libssl_SSL_read(self.ssl, raw_buf, num_bytes) - err = libssl_SSL_get_error(self.ssl, count) + with rffi.scoped_alloc_buffer(num_bytes) as buf: + while True: + err = 0 - if err == SSL_ERROR_WANT_READ: - sockstate = check_socket_and_wait_for_timeout( - space, w_socket, False) - elif err == SSL_ERROR_WANT_WRITE: - sockstate = check_socket_and_wait_for_timeout( - space, w_socket, True) - elif (err == SSL_ERROR_ZERO_RETURN and - libssl_SSL_get_shutdown(self.ssl) == SSL_RECEIVED_SHUTDOWN): - rffi.keep_buffer_alive_until_here(raw_buf, gc_buf) - if space.is_none(w_buf): - return space.wrapbytes('') + count = libssl_SSL_read(self.ssl, buf.raw, num_bytes) + err = libssl_SSL_get_error(self.ssl, count) + + if err == SSL_ERROR_WANT_READ: + sockstate = check_socket_and_wait_for_timeout(self.space, + self.w_socket, False) + elif err == SSL_ERROR_WANT_WRITE: + sockstate = check_socket_and_wait_for_timeout(self.space, + self.w_socket, True) + elif (err == SSL_ERROR_ZERO_RETURN and + libssl_SSL_get_shutdown(self.ssl) == SSL_RECEIVED_SHUTDOWN): + if space.is_none(w_buf): + return space.wrapbytes('') + else: + return space.wrap(0) else: - return space.wrap(0) - else: - sockstate = SOCKET_OPERATION_OK + sockstate = SOCKET_OPERATION_OK - if sockstate == SOCKET_HAS_TIMED_OUT: - raise ssl_error(space, "The read operation timed out") - elif sockstate == SOCKET_IS_NONBLOCKING: - break + if sockstate == SOCKET_HAS_TIMED_OUT: + raise ssl_error(self.space, "The read operation timed out") + elif sockstate == SOCKET_IS_NONBLOCKING: + break - if err == SSL_ERROR_WANT_READ or err == SSL_ERROR_WANT_WRITE: - continue - else: - break + if err == SSL_ERROR_WANT_READ or err == SSL_ERROR_WANT_WRITE: + continue + else: + break - if count <= 0: - raise _ssl_seterror(space, self, count) + if count <= 0: + raise _ssl_seterror(self.space, self, count) - result = rffi.str_from_buffer(raw_buf, gc_buf, num_bytes, count) - rffi.keep_buffer_alive_until_here(raw_buf, gc_buf) + result = buf.str(count) + if rwbuffer is not None: rwbuffer.setslice(0, result) - return space.wrap(count) + return self.space.wrap(count) else: return space.wrapbytes(result) diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -242,8 +242,11 @@ Convert the array to an array of machine values and return the bytes representation. """ + size = self.len + if size == 0: + return space.wrap('') cbuf = self._charbuf_start() - s = rffi.charpsize2str(cbuf, self.len * self.itemsize) + s = rffi.charpsize2str(cbuf, size * self.itemsize) self._charbuf_stop() return self.space.wrapbytes(s) @@ -668,6 +671,10 @@ def make_array(mytype): W_ArrayBase = globals()['W_ArrayBase'] + unpack_driver = jit.JitDriver(name='unpack_array', + greens=['tp'], + reds=['self', 'w_iterator']) + class W_Array(W_ArrayBase): itemsize = mytype.bytes typecode = mytype.typecode @@ -712,6 +719,10 @@ return rffi.cast(mytype.itemtype, item) # # "regular" case: it fits in an rpython integer (lltype.Signed) + # or it is a float + return self.item_from_int_or_float(item) + + def item_from_int_or_float(self, item): result = rffi.cast(mytype.itemtype, item) if mytype.canoverflow: if rffi.cast(lltype.Signed, result) != item: @@ -724,8 +735,8 @@ % mytype.bytes) if not mytype.signed: msg = 'un' + msg # 'signed' => 'unsigned' - raise OperationError(space.w_OverflowError, - space.wrap(msg)) + raise OperationError(self.space.w_OverflowError, + self.space.wrap(msg)) return result def __del__(self): @@ -772,27 +783,65 @@ def fromsequence(self, w_seq): space = self.space oldlen = self.len - try: - new = space.len_w(w_seq) - self.setlen(self.len + new) - except OperationError: - pass + newlen = oldlen - i = 0 - try: - if mytype.typecode == 'u': - myiter = space.unpackiterable - else: - myiter = space.listview - for w_i in myiter(w_seq): - if oldlen + i >= self.len: - self.setlen(oldlen + i + 1) - self.buffer[oldlen + i] = self.item_w(w_i) - i += 1 - except OperationError: - self.setlen(oldlen + i) - raise - self.setlen(oldlen + i) + # optimized case for arrays of integers or floats + if mytype.unwrap == 'int_w': + lst = space.listview_int(w_seq) + elif mytype.unwrap == 'float_w': + lst = space.listview_float(w_seq) + else: + lst = None + if lst is not None: + self.setlen(oldlen + len(lst)) + try: + buf = self.buffer + for num in lst: + buf[newlen] = self.item_from_int_or_float(num) + newlen += 1 + except OperationError: + self.setlen(newlen) + raise + return + + # this is the common case: w_seq is a list or a tuple + lst_w = space.listview_no_unpack(w_seq) + if lst_w is not None: + self.setlen(oldlen + len(lst_w)) + buf = self.buffer + try: + for w_num in lst_w: + # note: self.item_w() might invoke arbitrary code. + # In case it resizes the same array, then strange + # things may happen, but as we don't reload 'buf' + # we know that one is big enough for all items + # (so at least we avoid crashes) + buf[newlen] = self.item_w(w_num) + newlen += 1 + except OperationError: + if buf == self.buffer: + self.setlen(newlen) + raise + return + + self._fromiterable(w_seq) + + def _fromiterable(self, w_seq): + # a more careful case if w_seq happens to be a very large + # iterable: don't copy the items into some intermediate list + w_iterator = self.space.iter(w_seq) + tp = self.space.type(w_iterator) + while True: + unpack_driver.jit_merge_point(tp=tp, self=self, + w_iterator=w_iterator) + space = self.space + try: + w_item = space.next(w_iterator) + except OperationError, e: + if not e.match(space, space.w_StopIteration): + raise + break # done + self.descr_append(space, w_item) def extend(self, w_iterable, accept_different_array=False): space = self.space @@ -835,8 +884,9 @@ def descr_append(self, space, w_x): x = self.item_w(w_x) - self.setlen(self.len + 1) - self.buffer[self.len - 1] = x + index = self.len + self.setlen(index + 1) + self.buffer[index] = x # List interface def descr_count(self, space, w_val): diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -404,6 +404,10 @@ raises(ValueError, self.array('i').tounicode) assert self.array('u', 'hello').tounicode() == 'hello' + def test_empty_tostring(self): + a = self.array('l') + assert a.tostring() == b'' + def test_buffer(self): a = self.array('h', b'Hi') buf = memoryview(a) diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -10,7 +10,7 @@ from rpython.rtyper.lltypesystem import ll2ctypes from rpython.rtyper.annlowlevel import llhelper from rpython.rlib.objectmodel import we_are_translated -from rpython.conftest import cdir +from rpython.translator import cdir from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.translator.gensupp import NameManager from rpython.tool.udir import udir diff --git a/pypy/module/cpyext/listobject.py b/pypy/module/cpyext/listobject.py --- a/pypy/module/cpyext/listobject.py +++ b/pypy/module/cpyext/listobject.py @@ -46,11 +46,11 @@ IndexError exception.""" if not isinstance(w_list, W_ListObject): PyErr_BadInternalCall(space) - wrappeditems = w_list.getitems() - if index < 0 or index >= len(wrappeditems): + if index < 0 or index >= w_list.length(): raise OperationError(space.w_IndexError, space.wrap( "list index out of range")) - return borrow_from(w_list, wrappeditems[index]) + w_item = w_list.getitem(index) + return borrow_from(w_list, w_item) @cpython_api([PyObject, PyObject], rffi.INT_real, error=-1) diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -207,12 +207,7 @@ import sys if sys.platform != "win32" or sys.version_info < (2, 6): skip("Windows Python >= 2.6 only") - assert sys.dllhandle - assert sys.dllhandle.getaddressindll('PyPyErr_NewException') - import ctypes # slow - PyUnicode_GetDefaultEncoding = ctypes.pythonapi.PyPyUnicode_GetDefaultEncoding - PyUnicode_GetDefaultEncoding.restype = ctypes.c_char_p - assert PyUnicode_GetDefaultEncoding() == 'ascii' + assert isinstance(sys.dllhandle, int) class AppTestCpythonExtensionBase(LeakCheckingTest): diff --git a/pypy/module/cpyext/test/test_unicodeobject.py b/pypy/module/cpyext/test/test_unicodeobject.py --- a/pypy/module/cpyext/test/test_unicodeobject.py +++ b/pypy/module/cpyext/test/test_unicodeobject.py @@ -560,8 +560,8 @@ def test_copy(self, space, api): w_x = space.wrap(u"abcd\u0660") - target_chunk, _ = rffi.alloc_unicodebuffer(space.int_w(space.len(w_x))) - #lltype.malloc(Py_UNICODE, space.int_w(space.len(w_x)), flavor='raw') + count1 = space.int_w(space.len(w_x)) + target_chunk = lltype.malloc(rffi.CWCHARP.TO, count1, flavor='raw') x_chunk = api.PyUnicode_AS_UNICODE(w_x) api.Py_UNICODE_COPY(target_chunk, x_chunk, 4) diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -12,7 +12,7 @@ 'scalar' : 'ctors.build_scalar', 'array': 'ctors.array', 'zeros': 'ctors.zeros', - 'empty': 'ctors.zeros', + 'empty': 'ctors.empty', 'empty_like': 'ctors.empty_like', 'fromstring': 'ctors.fromstring', 'frombuffer': 'ctors.frombuffer', diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -18,7 +18,12 @@ pass -class W_NDimArray(W_Root): +class W_NumpyObject(W_Root): + """Base class for ndarrays and scalars (aka boxes).""" + _attrs_ = [] + + +class W_NDimArray(W_NumpyObject): __metaclass__ = extendabletype def __init__(self, implementation): @@ -28,12 +33,12 @@ self.implementation = implementation @staticmethod - def from_shape(space, shape, dtype, order='C', w_instance=None): + def from_shape(space, shape, dtype, order='C', w_instance=None, zero=True): from pypy.module.micronumpy import concrete from pypy.module.micronumpy.strides import calc_strides strides, backstrides = calc_strides(shape, dtype.base, order) impl = concrete.ConcreteArray(shape, dtype.base, order, strides, - backstrides) + backstrides, zero=zero) if w_instance: return wrap_impl(space, space.type(w_instance), w_instance, impl) return W_NDimArray(impl) @@ -85,6 +90,14 @@ w_val = dtype.coerce(space, space.wrap(0)) return convert_to_array(space, w_val) + @staticmethod + def from_scalar(space, w_scalar): + """Convert a scalar into a 0-dim array""" + dtype = w_scalar.get_dtype(space) + w_arr = W_NDimArray.from_shape(space, [], dtype) + w_arr.set_scalar_value(w_scalar) + return w_arr + def convert_to_array(space, w_obj): from pypy.module.micronumpy.ctors import array diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -1,4 +1,3 @@ -from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.mixedmodule import MixedModule @@ -14,7 +13,7 @@ from rpython.rtyper.lltypesystem import lltype, rffi from rpython.tool.sourcetools import func_with_new_name from pypy.module.micronumpy import constants as NPY -from pypy.module.micronumpy.base import W_NDimArray +from pypy.module.micronumpy.base import W_NDimArray, W_NumpyObject from pypy.module.micronumpy.concrete import VoidBoxStorage from pypy.module.micronumpy.flagsobj import W_FlagsObject @@ -126,7 +125,7 @@ return ret -class W_GenericBox(W_Root): +class W_GenericBox(W_NumpyObject): _attrs_ = ['w_flags'] def descr__new__(space, w_subtype, __args__): @@ -136,6 +135,12 @@ def get_dtype(self, space): return self._get_dtype(space) + def is_scalar(self): + return True + + def get_scalar_value(self): + return self + def item(self, space): return self.get_dtype(space).itemtype.to_builtin_type(space, self) @@ -148,6 +153,13 @@ raise OperationError(space.w_IndexError, space.wrap( "invalid index to scalar variable")) + ''' + def descr_iter(self, space): + # Making numpy scalar non-iterable with a valid __getitem__ method + raise oefmt(space.w_TypeError, + "'%T' object is not iterable", self) + ''' + def descr_str(self, space): return space.wrap(self.get_dtype(space).itemtype.str_format(self)) @@ -539,6 +551,7 @@ __new__ = interp2app(W_GenericBox.descr__new__.im_func), __getitem__ = interp2app(W_GenericBox.descr_getitem), + #__iter__ = interp2app(W_GenericBox.descr_iter), __str__ = interp2app(W_GenericBox.descr_str), __repr__ = interp2app(W_GenericBox.descr_str), __format__ = interp2app(W_GenericBox.descr_format), diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -369,9 +369,11 @@ class ConcreteArray(ConcreteArrayNotOwning): - def __init__(self, shape, dtype, order, strides, backstrides, storage=lltype.nullptr(RAW_STORAGE)): + def __init__(self, shape, dtype, order, strides, backstrides, + storage=lltype.nullptr(RAW_STORAGE), zero=True): if storage == lltype.nullptr(RAW_STORAGE): - storage = dtype.itemtype.malloc(support.product(shape) * dtype.elsize) + storage = dtype.itemtype.malloc(support.product(shape) * + dtype.elsize, zero=zero) ConcreteArrayNotOwning.__init__(self, shape, dtype, order, strides, backstrides, storage) diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -4,7 +4,8 @@ from rpython.rlib.rstring import strip_spaces from rpython.rtyper.lltypesystem import lltype, rffi from pypy.module.micronumpy import descriptor, loop -from pypy.module.micronumpy.base import W_NDimArray, convert_to_array +from pypy.module.micronumpy.base import ( + W_NDimArray, convert_to_array, W_NumpyObject) from pypy.module.micronumpy.converters import shape_converter @@ -24,24 +25,44 @@ return box +def try_array_method(space, w_object, w_dtype=None): + w___array__ = space.lookup(w_object, "__array__") + if w___array__ is None: + return None + if w_dtype is None: + w_dtype = space.w_None + w_array = space.get_and_call_function(w___array__, w_object, w_dtype) + if isinstance(w_array, W_NDimArray): + return w_array + else: + raise oefmt(space.w_ValueError, + "object __array__ method not producing an array") + + @unwrap_spec(ndmin=int, copy=bool, subok=bool) def array(space, w_object, w_dtype=None, copy=True, w_order=None, subok=False, ndmin=0): + w_res = _array(space, w_object, w_dtype, copy, w_order, subok) + shape = w_res.get_shape() + if len(shape) < ndmin: + shape = [1] * (ndmin - len(shape)) + shape + impl = w_res.implementation.set_shape(space, w_res, shape) + if w_res is w_object: + return W_NDimArray(impl) + else: + w_res.implementation = impl + return w_res + +def _array(space, w_object, w_dtype=None, copy=True, w_order=None, subok=False): from pypy.module.micronumpy import strides # for anything that isn't already an array, try __array__ method first if not isinstance(w_object, W_NDimArray): - w___array__ = space.lookup(w_object, "__array__") - if w___array__ is not None: - if space.is_none(w_dtype): - w_dtype = space.w_None - w_array = space.get_and_call_function(w___array__, w_object, w_dtype) - if isinstance(w_array, W_NDimArray): - # feed w_array back into array() for other properties - return array(space, w_array, w_dtype, False, w_order, subok, ndmin) - else: - raise oefmt(space.w_ValueError, - "object __array__ method not producing an array") + w_array = try_array_method(space, w_object, w_dtype) + if w_array is not None: + # continue with w_array, but do further operations in place + w_object = w_array + copy = False dtype = descriptor.decode_w_dtype(space, w_dtype) @@ -57,19 +78,10 @@ # arrays with correct dtype if isinstance(w_object, W_NDimArray) and \ (space.is_none(w_dtype) or w_object.get_dtype() is dtype): - shape = w_object.get_shape() if copy: - w_ret = w_object.descr_copy(space) + return w_object.descr_copy(space) else: - if ndmin <= len(shape): - return w_object - new_impl = w_object.implementation.set_shape(space, w_object, shape) - w_ret = W_NDimArray(new_impl) - if ndmin > len(shape): - shape = [1] * (ndmin - len(shape)) + shape - w_ret.implementation = w_ret.implementation.set_shape(space, - w_ret, shape) - return w_ret + return w_object # not an array or incorrect dtype shape, elems_w = strides.find_shape_and_elems(space, w_object, dtype) @@ -81,8 +93,6 @@ # promote S0 -> S1, U0 -> U1 dtype = descriptor.variable_dtype(space, dtype.char + '1') - if ndmin > len(shape): - shape = [1] * (ndmin - len(shape)) + shape w_arr = W_NDimArray.from_shape(space, shape, dtype, order=order) if len(elems_w) == 1: w_arr.set_scalar_value(dtype.coerce(space, elems_w[0])) @@ -91,13 +101,46 @@ return w_arr -def zeros(space, w_shape, w_dtype=None, w_order=None): +def numpify(space, w_object): + """Convert the object to a W_NumpyObject""" + # XXX: code duplication with _array() + from pypy.module.micronumpy import strides + if isinstance(w_object, W_NumpyObject): + return w_object + # for anything that isn't already an array, try __array__ method first + w_array = try_array_method(space, w_object) + if w_array is not None: + return w_array + + shape, elems_w = strides.find_shape_and_elems(space, w_object, None) + dtype = strides.find_dtype_for_seq(space, elems_w, None) + if dtype is None: + dtype = descriptor.get_dtype_cache(space).w_float64dtype + elif dtype.is_str_or_unicode() and dtype.elsize < 1: + # promote S0 -> S1, U0 -> U1 + dtype = descriptor.variable_dtype(space, dtype.char + '1') + + if len(elems_w) == 1: + return dtype.coerce(space, elems_w[0]) + else: + w_arr = W_NDimArray.from_shape(space, shape, dtype) + loop.assign(space, w_arr, elems_w) + return w_arr + + +def _zeros_or_empty(space, w_shape, w_dtype, w_order, zero): From noreply at buildbot.pypy.org Wed Jul 16 07:58:56 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 16 Jul 2014 07:58:56 +0200 (CEST) Subject: [pypy-commit] pypy py3k: fix per py3 Message-ID: <20140716055856.6EE701D284F@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r72450:c2eb80203244 Date: 2014-07-15 10:26 -0700 http://bitbucket.org/pypy/pypy/changeset/c2eb80203244/ Log: fix per py3 diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -168,12 +168,11 @@ def destructor(self): assert isinstance(self, W_Socket) - if self.fd != rsocket.INVALID_SOCKET: + if self.sock.fd != rsocket.INVALID_SOCKET: try: self._dealloc_warn() finally: self.close_w(self.space) - # --XXX-- def get_type_w(self, space): return space.wrap(self.sock.type) @@ -278,7 +277,7 @@ Close the socket object without closing the underlying file descriptor. The object cannot be used after this call, but the file descriptor can be reused for other purposes. The file descriptor is returned.""" - fd = self.detach() + fd = self.sock.detach() return space.wrap(intmask(fd)) def getpeername_w(self, space): @@ -670,8 +669,8 @@ [*] not available on all platforms!""", __new__ = interp2app(W_Socket.descr_new.im_func), __init__ = interp2app(W_Socket.descr_init), - type = interp_attrproperty('type', W_Socket), - proto = interp_attrproperty('proto', W_Socket), - family = interp_attrproperty('family', W_Socket), + type = GetSetProperty(W_Socket.get_type_w), + proto = GetSetProperty(W_Socket.get_proto_w), + family = GetSetProperty(W_Socket.get_family_w), ** socketmethods ) From noreply at buildbot.pypy.org Wed Jul 16 07:58:57 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 16 Jul 2014 07:58:57 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140716055857.D2C771D284F@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r72451:6934ff2f69be Date: 2014-07-15 22:57 -0700 http://bitbucket.org/pypy/pypy/changeset/6934ff2f69be/ Log: merge default diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -153,12 +153,10 @@ raise OperationError(space.w_IndexError, space.wrap( "invalid index to scalar variable")) - ''' def descr_iter(self, space): # Making numpy scalar non-iterable with a valid __getitem__ method raise oefmt(space.w_TypeError, "'%T' object is not iterable", self) - ''' def descr_str(self, space): return space.wrap(self.get_dtype(space).itemtype.str_format(self)) @@ -501,6 +499,9 @@ return space.wrap(dtype.itemtype.to_str(read_val)) return read_val + def descr_iter(self, space): + return space.newseqiter(self) + def descr_setitem(self, space, w_item, w_value): if (space.isinstance_w(w_item, space.w_bytes) or space.isinstance_w(w_item, space.w_unicode)): @@ -551,7 +552,7 @@ __new__ = interp2app(W_GenericBox.descr__new__.im_func), __getitem__ = interp2app(W_GenericBox.descr_getitem), - #__iter__ = interp2app(W_GenericBox.descr_iter), + __iter__ = interp2app(W_GenericBox.descr_iter), __str__ = interp2app(W_GenericBox.descr_str), __repr__ = interp2app(W_GenericBox.descr_str), __format__ = interp2app(W_GenericBox.descr_format), @@ -772,6 +773,7 @@ __new__ = interp2app(W_VoidBox.descr__new__.im_func), __getitem__ = interp2app(W_VoidBox.descr_getitem), __setitem__ = interp2app(W_VoidBox.descr_setitem), + __iter__ = interp2app(W_VoidBox.descr_iter), ) W_CharacterBox.typedef = TypeDef("numpy.character", W_FlexibleBox.typedef, diff --git a/pypy/module/micronumpy/descriptor.py b/pypy/module/micronumpy/descriptor.py --- a/pypy/module/micronumpy/descriptor.py +++ b/pypy/module/micronumpy/descriptor.py @@ -6,7 +6,7 @@ from pypy.interpreter.typedef import (TypeDef, GetSetProperty, interp_attrproperty, interp_attrproperty_w) from rpython.rlib import jit -from rpython.rlib.objectmodel import specialize +from rpython.rlib.objectmodel import specialize, compute_hash from rpython.rlib.rarithmetic import r_longlong, r_ulonglong from pypy.module.micronumpy import types, boxes, base, support, constants as NPY from pypy.module.micronumpy.appbridge import get_appbridge_cache @@ -73,7 +73,7 @@ self.base = subdtype.base def __repr__(self): - if self.fields is not None: + if self.fields: return '' % self.fields return '' % self.itemtype @@ -254,8 +254,38 @@ def descr_ne(self, space, w_other): return space.wrap(not self.eq(space, w_other)) + def _compute_hash(self, space, x): + from rpython.rlib.rarithmetic import intmask + if not self.fields and self.subdtype is None: + endian = self.byteorder + if endian == NPY.NATIVE: + endian = NPY.NATBYTE + flags = 0 + y = 0x345678 + y = intmask((1000003 * y) ^ ord(self.kind[0])) + y = intmask((1000003 * y) ^ ord(endian[0])) + y = intmask((1000003 * y) ^ flags) + y = intmask((1000003 * y) ^ self.elsize) + if self.is_flexible(): + y = intmask((1000003 * y) ^ self.alignment) + return intmask((1000003 * x) ^ y) + if self.fields: + for name, (offset, subdtype) in self.fields.iteritems(): + assert isinstance(subdtype, W_Dtype) + y = intmask(1000003 * (0x345678 ^ compute_hash(name))) + y = intmask(1000003 * (y ^ compute_hash(offset))) + y = intmask(1000003 * (y ^ subdtype._compute_hash(space, + 0x345678))) + x = intmask(x ^ y) + if self.subdtype is not None: + for s in self.shape: + x = intmask((1000003 * x) ^ compute_hash(s)) + x = self.base._compute_hash(space, x) + return x + def descr_hash(self, space): - return space.hash(self.descr_reduce(space)) + return space.wrap(self._compute_hash(space, 0x345678)) + def descr_str(self, space): if self.fields: diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -367,15 +367,30 @@ d5 = numpy.dtype([('f0', 'i4'), ('f1', d2)]) d6 = numpy.dtype([('f0', 'i4'), ('f1', d3)]) import sys - if '__pypy__' not in sys.builtin_module_names: - assert hash(d1) == hash(d2) - assert hash(d1) != hash(d3) - assert hash(d4) == hash(d5) - assert hash(d4) != hash(d6) - else: - for d in [d1, d2, d3, d4, d5, d6]: - raises(TypeError, hash, d) + assert hash(d1) == hash(d2) + assert hash(d1) != hash(d3) + assert hash(d4) == hash(d5) + assert hash(d4) != hash(d6) + def test_record_hash(self): + from numpy import dtype + # make sure the fields hash return different value + # for different order of field in a structure + + # swap names + t1 = dtype([('x', '") assert dt.byteorder != dt1.byteorder diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -292,7 +292,6 @@ assert np.isnan(b/a) def test_scalar_iter(self): - skip('not implemented yet') from numpypy import int8, int16, int32, int64, float32, float64 for t in int8, int16, int32, int64, float32, float64: try: From noreply at buildbot.pypy.org Wed Jul 16 07:59:02 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 16 Jul 2014 07:59:02 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: merge py3k Message-ID: <20140716055902.72C8E1D284F@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72452:a306aa63a658 Date: 2014-07-15 22:58 -0700 http://bitbucket.org/pypy/pypy/changeset/a306aa63a658/ Log: merge py3k diff too long, truncating to 2000 out of 13217 lines diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -10,3 +10,7 @@ 20e51c4389ed4469b66bb9d6289ce0ecfc82c4b9 release-2.3.0 0000000000000000000000000000000000000000 release-2.3.0 394146e9bb673514c61f0150ab2013ccf78e8de7 release-2.3 +32f35069a16d819b58c1b6efb17c44e3e53397b2 release-2.2=3.1 +32f35069a16d819b58c1b6efb17c44e3e53397b2 release-2.3.1 +32f35069a16d819b58c1b6efb17c44e3e53397b2 release-2.2=3.1 +0000000000000000000000000000000000000000 release-2.2=3.1 diff --git a/lib-python/2.7/ctypes/__init__.py b/lib-python/2.7/ctypes/__init__.py --- a/lib-python/2.7/ctypes/__init__.py +++ b/lib-python/2.7/ctypes/__init__.py @@ -389,12 +389,13 @@ func.__name__ = name_or_ordinal return func -class PyDLL(CDLL): - """This class represents the Python library itself. It allows to - access Python API functions. The GIL is not released, and - Python exceptions are handled correctly. - """ - _func_flags_ = _FUNCFLAG_CDECL | _FUNCFLAG_PYTHONAPI +# Not in PyPy +#class PyDLL(CDLL): +# """This class represents the Python library itself. It allows to +# access Python API functions. The GIL is not released, and +# Python exceptions are handled correctly. +# """ +# _func_flags_ = _FUNCFLAG_CDECL | _FUNCFLAG_PYTHONAPI if _os.name in ("nt", "ce"): @@ -447,15 +448,8 @@ return self._dlltype(name) cdll = LibraryLoader(CDLL) -pydll = LibraryLoader(PyDLL) - -if _os.name in ("nt", "ce"): - pythonapi = PyDLL("python dll", None, _sys.dllhandle) -elif _sys.platform == "cygwin": - pythonapi = PyDLL("libpython%d.%d.dll" % _sys.version_info[:2]) -else: - pythonapi = PyDLL(None) - +# not on PyPy +#pydll = LibraryLoader(PyDLL) if _os.name in ("nt", "ce"): windll = LibraryLoader(WinDLL) diff --git a/lib-python/2.7/ctypes/test/test_values.py b/lib-python/2.7/ctypes/test/test_values.py --- a/lib-python/2.7/ctypes/test/test_values.py +++ b/lib-python/2.7/ctypes/test/test_values.py @@ -4,6 +4,7 @@ import unittest from ctypes import * +from ctypes.test import xfail import _ctypes_test @@ -23,7 +24,8 @@ class Win_ValuesTestCase(unittest.TestCase): """This test only works when python itself is a dll/shared library""" - + + @xfail def test_optimizeflag(self): # This test accesses the Py_OptimizeFlag intger, which is # exported by the Python dll. @@ -40,6 +42,7 @@ else: self.assertEqual(opt, 2) + @xfail def test_frozentable(self): # Python exports a PyImport_FrozenModules symbol. This is a # pointer to an array of struct _frozen entries. The end of the @@ -75,6 +78,7 @@ from ctypes import _pointer_type_cache del _pointer_type_cache[struct_frozen] + @xfail def test_undefined(self): self.assertRaises(ValueError, c_int.in_dll, pydll, "Undefined_Symbol") diff --git a/lib-python/2.7/test/test_gdbm.py b/lib-python/2.7/test/test_gdbm.py --- a/lib-python/2.7/test/test_gdbm.py +++ b/lib-python/2.7/test/test_gdbm.py @@ -98,6 +98,17 @@ self.assertTrue(key in self.g) self.assertTrue(self.g.has_key(key)) + def test_unicode_key(self): + key = u'ab' + value = u'cd' + self.g = gdbm.open(filename, 'cf') + self.g[key] = value + self.g.close() + self.g = gdbm.open(filename, 'r') + self.assertEquals(self.g[key], value) + self.assertTrue(key in self.g) + self.assertTrue(self.g.has_key(key)) + def test_main(): run_unittest(TestGdbm) diff --git a/lib-python/2.7/timeit.py b/lib-python/2.7/timeit.py --- a/lib-python/2.7/timeit.py +++ b/lib-python/2.7/timeit.py @@ -55,11 +55,6 @@ import gc import sys import time -try: - import itertools -except ImportError: - # Must be an older Python version (see timeit() below) - itertools = None __all__ = ["Timer"] @@ -81,7 +76,8 @@ def inner(_it, _timer): %(setup)s _t0 = _timer() - for _i in _it: + while _it > 0: + _it -= 1 %(stmt)s _t1 = _timer() return _t1 - _t0 @@ -96,7 +92,8 @@ def inner(_it, _timer, _func=func): setup() _t0 = _timer() - for _i in _it: + while _it > 0: + _it -= 1 _func() _t1 = _timer() return _t1 - _t0 @@ -133,9 +130,19 @@ else: raise ValueError("setup is neither a string nor callable") self.src = src # Save for traceback display - code = compile(src, dummy_src_name, "exec") - exec code in globals(), ns - self.inner = ns["inner"] + def make_inner(): + # PyPy tweak: recompile the source code each time before + # calling inner(). There are situations like Issue #1776 + # where PyPy tries to reuse the JIT code from before, + # but that's not going to work: the first thing the + # function does is the "-s" statement, which may declare + # new classes (here a namedtuple). We end up with + # bridges from the inner loop; more and more of them + # every time we call inner(). + code = compile(src, dummy_src_name, "exec") + exec code in globals(), ns + return ns["inner"] + self.make_inner = make_inner elif hasattr(stmt, '__call__'): self.src = None if isinstance(setup, basestring): @@ -144,7 +151,8 @@ exec _setup in globals(), ns elif not hasattr(setup, '__call__'): raise ValueError("setup is neither a string nor callable") - self.inner = _template_func(setup, stmt) + inner = _template_func(setup, stmt) + self.make_inner = lambda: inner else: raise ValueError("stmt is neither a string nor callable") @@ -185,15 +193,12 @@ to one million. The main statement, the setup statement and the timer function to be used are passed to the constructor. """ - if itertools: - it = itertools.repeat(None, number) - else: - it = [None] * number + inner = self.make_inner() gcold = gc.isenabled() if '__pypy__' not in sys.builtin_module_names: gc.disable() # only do that on CPython try: - timing = self.inner(it, self.timer) + timing = inner(number, self.timer) finally: if gcold: gc.enable() diff --git a/lib-python/3/distutils/sysconfig_pypy.py b/lib-python/3/distutils/sysconfig_pypy.py --- a/lib-python/3/distutils/sysconfig_pypy.py +++ b/lib-python/3/distutils/sysconfig_pypy.py @@ -52,7 +52,7 @@ if prefix is None: prefix = PREFIX if standard_lib: - return os.path.join(prefix, "lib-python", get_python_version()) + return os.path.join(prefix, "lib-python", sys.version[0]) return os.path.join(prefix, 'site-packages') diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -218,6 +218,8 @@ if restype is None: import ctypes restype = ctypes.c_int + if self._argtypes_ is None: + self._argtypes_ = [] self._ptr = self._getfuncptr_fromaddress(self._argtypes_, restype) self._check_argtypes_for_fastpath() return diff --git a/lib_pypy/_gdbm.py b/lib_pypy/_gdbm.py --- a/lib_pypy/_gdbm.py +++ b/lib_pypy/_gdbm.py @@ -75,8 +75,8 @@ def _raise_from_errno(self): if ffi.errno: - raise error(os.strerror(ffi.errno)) - raise error(lib.gdbm_strerror(lib.gdbm_errno)) + raise error(ffi.errno, os.strerror(ffi.errno)) + raise error(lib.gdbm_errno, lib.gdbm_strerror(lib.gdbm_errno)) def __len__(self): if self.size < 0: @@ -151,7 +151,7 @@ def _check_closed(self): if not self.ll_dbm: - raise error("GDBM object has already been closed") + raise error(0, "GDBM object has already been closed") __del__ = close @@ -180,7 +180,7 @@ elif flags[0] == 'n': iflags = lib.GDBM_NEWDB else: - raise error("First flag must be one of 'r', 'w', 'c' or 'n'") + raise error(0, "First flag must be one of 'r', 'w', 'c' or 'n'") for flag in flags[1:]: if flag == 'f': iflags |= lib.GDBM_FAST @@ -189,7 +189,7 @@ elif flag == 'u': iflags |= lib.GDBM_NOLOCK else: - raise error("Flag '%s' not supported" % flag) + raise error(0, "Flag '%s' not supported" % flag) return gdbm(filename, iflags, mode) open_flags = "rwcnfsu" diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_pypy_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -13,7 +13,15 @@ k1 = k1.lstrip('0x').rstrip('L') k2 = hex(binascii.crc32(key[1::2]) & 0xffffffff) k2 = k2.lstrip('0').rstrip('L') - output_dir = tempfile.gettempdir() + os.path.sep + 'tmp_%s%s' %(k1, k2) + try: + username = os.environ['USER'] #linux, et al + except KeyError: + try: + username = os.environ['USERNAME'] #windows + except KeyError: + username = os.getuid() + output_dir = tempfile.gettempdir() + os.path.sep + 'tmp_%s_%s%s' % ( + username, k1, k2) if not os.path.exists(output_dir): os.mkdir(output_dir) return output_dir diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.8.2" -__version_info__ = (0, 8, 2) +__version__ = "0.8.6" +__version_info__ = (0, 8, 6) diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -55,8 +55,7 @@ # _cffi_backend.so compiled. import _cffi_backend as backend from . import __version__ - assert (backend.__version__ == __version__ or - backend.__version__ == __version__[:3]) + assert backend.__version__ == __version__ # (If you insist you can also try to pass the option # 'backend=backend_ctypes.CTypesBackend()', but don't # rely on it! It's probably not going to work well.) @@ -443,6 +442,10 @@ for enumname, enumval in zip(tp.enumerators, tp.enumvalues): if enumname not in library.__dict__: library.__dict__[enumname] = enumval + for key, val in ffi._parser._int_constants.items(): + if key not in library.__dict__: + library.__dict__[key] = val + copied_enums.append(True) if name in library.__dict__: return diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -24,6 +24,7 @@ _r_partial_array = re.compile(r"\[\s*\.\.\.\s*\]") _r_words = re.compile(r"\w+|\S") _parser_cache = None +_r_int_literal = re.compile(r"^0?x?[0-9a-f]+u?l?$", re.IGNORECASE) def _get_parser(): global _parser_cache @@ -99,6 +100,7 @@ self._structnode2type = weakref.WeakKeyDictionary() self._override = False self._packed = False + self._int_constants = {} def _parse(self, csource): csource, macros = _preprocess(csource) @@ -128,9 +130,10 @@ finally: if lock is not None: lock.release() - return ast, macros + # csource will be used to find buggy source text + return ast, macros, csource - def convert_pycparser_error(self, e, csource): + def _convert_pycparser_error(self, e, csource): # xxx look for ":NUM:" at the start of str(e) and try to interpret # it as a line number line = None @@ -142,6 +145,12 @@ csourcelines = csource.splitlines() if 1 <= linenum <= len(csourcelines): line = csourcelines[linenum-1] + return line + + def convert_pycparser_error(self, e, csource): + line = self._convert_pycparser_error(e, csource) + + msg = str(e) if line: msg = 'cannot parse "%s"\n%s' % (line.strip(), msg) else: @@ -160,14 +169,9 @@ self._packed = prev_packed def _internal_parse(self, csource): - ast, macros = self._parse(csource) + ast, macros, csource = self._parse(csource) # add the macros - for key, value in macros.items(): - value = value.strip() - if value != '...': - raise api.CDefError('only supports the syntax "#define ' - '%s ..." for now (literally)' % key) - self._declare('macro ' + key, value) + self._process_macros(macros) # find the first "__dotdotdot__" and use that as a separator # between the repeated typedefs and the real csource iterator = iter(ast.ext) @@ -175,27 +179,61 @@ if decl.name == '__dotdotdot__': break # - for decl in iterator: - if isinstance(decl, pycparser.c_ast.Decl): - self._parse_decl(decl) - elif isinstance(decl, pycparser.c_ast.Typedef): - if not decl.name: - raise api.CDefError("typedef does not declare any name", - decl) - if (isinstance(decl.type.type, pycparser.c_ast.IdentifierType) - and decl.type.type.names == ['__dotdotdot__']): - realtype = model.unknown_type(decl.name) - elif (isinstance(decl.type, pycparser.c_ast.PtrDecl) and - isinstance(decl.type.type, pycparser.c_ast.TypeDecl) and - isinstance(decl.type.type.type, - pycparser.c_ast.IdentifierType) and - decl.type.type.type.names == ['__dotdotdot__']): - realtype = model.unknown_ptr_type(decl.name) + try: + for decl in iterator: + if isinstance(decl, pycparser.c_ast.Decl): + self._parse_decl(decl) + elif isinstance(decl, pycparser.c_ast.Typedef): + if not decl.name: + raise api.CDefError("typedef does not declare any name", + decl) + if (isinstance(decl.type.type, pycparser.c_ast.IdentifierType) + and decl.type.type.names == ['__dotdotdot__']): + realtype = model.unknown_type(decl.name) + elif (isinstance(decl.type, pycparser.c_ast.PtrDecl) and + isinstance(decl.type.type, pycparser.c_ast.TypeDecl) and + isinstance(decl.type.type.type, + pycparser.c_ast.IdentifierType) and + decl.type.type.type.names == ['__dotdotdot__']): + realtype = model.unknown_ptr_type(decl.name) + else: + realtype = self._get_type(decl.type, name=decl.name) + self._declare('typedef ' + decl.name, realtype) else: - realtype = self._get_type(decl.type, name=decl.name) - self._declare('typedef ' + decl.name, realtype) + raise api.CDefError("unrecognized construct", decl) + except api.FFIError as e: + msg = self._convert_pycparser_error(e, csource) + if msg: + e.args = (e.args[0] + "\n *** Err: %s" % msg,) + raise + + def _add_constants(self, key, val): + if key in self._int_constants: + raise api.FFIError( + "multiple declarations of constant: %s" % (key,)) + self._int_constants[key] = val + + def _process_macros(self, macros): + for key, value in macros.items(): + value = value.strip() + match = _r_int_literal.search(value) + if match is not None: + int_str = match.group(0).lower().rstrip("ul") + + # "010" is not valid oct in py3 + if (int_str.startswith("0") and + int_str != "0" and + not int_str.startswith("0x")): + int_str = "0o" + int_str[1:] + + pyvalue = int(int_str, 0) + self._add_constants(key, pyvalue) + elif value == '...': + self._declare('macro ' + key, value) else: - raise api.CDefError("unrecognized construct", decl) + raise api.CDefError('only supports the syntax "#define ' + '%s ..." (literally) or "#define ' + '%s 0x1FF" for now' % (key, key)) def _parse_decl(self, decl): node = decl.type @@ -227,7 +265,7 @@ self._declare('variable ' + decl.name, tp) def parse_type(self, cdecl): - ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl) + ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl)[:2] assert not macros exprnode = ast.ext[-1].type.args.params[0] if isinstance(exprnode, pycparser.c_ast.ID): @@ -306,7 +344,8 @@ if ident == 'void': return model.void_type if ident == '__dotdotdot__': - raise api.FFIError('bad usage of "..."') + raise api.FFIError(':%d: bad usage of "..."' % + typenode.coord.line) return resolve_common_type(ident) # if isinstance(type, pycparser.c_ast.Struct): @@ -333,7 +372,8 @@ return self._get_struct_union_enum_type('union', typenode, name, nested=True) # - raise api.FFIError("bad or unsupported type declaration") + raise api.FFIError(":%d: bad or unsupported type declaration" % + typenode.coord.line) def _parse_function_type(self, typenode, funcname=None): params = list(getattr(typenode.args, 'params', [])) @@ -499,6 +539,10 @@ if (isinstance(exprnode, pycparser.c_ast.UnaryOp) and exprnode.op == '-'): return -self._parse_constant(exprnode.expr) + # load previously defined int constant + if (isinstance(exprnode, pycparser.c_ast.ID) and + exprnode.name in self._int_constants): + return self._int_constants[exprnode.name] # if partial_length_ok: if (isinstance(exprnode, pycparser.c_ast.ID) and @@ -506,8 +550,8 @@ self._partial_length = True return '...' # - raise api.FFIError("unsupported expression: expected a " - "simple numeric constant") + raise api.FFIError(":%d: unsupported expression: expected a " + "simple numeric constant" % exprnode.coord.line) def _build_enum_type(self, explicit_name, decls): if decls is not None: @@ -522,6 +566,7 @@ if enum.value is not None: nextenumvalue = self._parse_constant(enum.value) enumvalues.append(nextenumvalue) + self._add_constants(enum.name, nextenumvalue) nextenumvalue += 1 enumvalues = tuple(enumvalues) tp = model.EnumType(explicit_name, enumerators, enumvalues) @@ -535,3 +580,5 @@ kind = name.split(' ', 1)[0] if kind in ('typedef', 'struct', 'union', 'enum'): self._declare(name, tp) + for k, v in other._int_constants.items(): + self._add_constants(k, v) diff --git a/lib_pypy/cffi/ffiplatform.py b/lib_pypy/cffi/ffiplatform.py --- a/lib_pypy/cffi/ffiplatform.py +++ b/lib_pypy/cffi/ffiplatform.py @@ -38,6 +38,7 @@ import distutils.errors # dist = Distribution({'ext_modules': [ext]}) + dist.parse_config_files() options = dist.get_option_dict('build_ext') options['force'] = ('ffiplatform', True) options['build_lib'] = ('ffiplatform', tmpdir) diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -89,43 +89,54 @@ # by generate_cpy_function_method(). prnt('static PyMethodDef _cffi_methods[] = {') self._generate("method") - prnt(' {"_cffi_setup", _cffi_setup, METH_VARARGS},') - prnt(' {NULL, NULL} /* Sentinel */') + prnt(' {"_cffi_setup", _cffi_setup, METH_VARARGS, NULL},') + prnt(' {NULL, NULL, 0, NULL} /* Sentinel */') prnt('};') prnt() # # standard init. modname = self.verifier.get_module_name() - if sys.version_info >= (3,): - prnt('static struct PyModuleDef _cffi_module_def = {') - prnt(' PyModuleDef_HEAD_INIT,') - prnt(' "%s",' % modname) - prnt(' NULL,') - prnt(' -1,') - prnt(' _cffi_methods,') - prnt(' NULL, NULL, NULL, NULL') - prnt('};') - prnt() - initname = 'PyInit_%s' % modname - createmod = 'PyModule_Create(&_cffi_module_def)' - errorcase = 'return NULL' - finalreturn = 'return lib' - else: - initname = 'init%s' % modname - createmod = 'Py_InitModule("%s", _cffi_methods)' % modname - errorcase = 'return' - finalreturn = 'return' + constants = self._chained_list_constants[False] + prnt('#if PY_MAJOR_VERSION >= 3') + prnt() + prnt('static struct PyModuleDef _cffi_module_def = {') + prnt(' PyModuleDef_HEAD_INIT,') + prnt(' "%s",' % modname) + prnt(' NULL,') + prnt(' -1,') + prnt(' _cffi_methods,') + prnt(' NULL, NULL, NULL, NULL') + prnt('};') + prnt() prnt('PyMODINIT_FUNC') - prnt('%s(void)' % initname) + prnt('PyInit_%s(void)' % modname) prnt('{') prnt(' PyObject *lib;') - prnt(' lib = %s;' % createmod) - prnt(' if (lib == NULL || %s < 0)' % ( - self._chained_list_constants[False],)) - prnt(' %s;' % errorcase) - prnt(' _cffi_init();') - prnt(' %s;' % finalreturn) + prnt(' lib = PyModule_Create(&_cffi_module_def);') + prnt(' if (lib == NULL)') + prnt(' return NULL;') + prnt(' if (%s < 0 || _cffi_init() < 0) {' % (constants,)) + prnt(' Py_DECREF(lib);') + prnt(' return NULL;') + prnt(' }') + prnt(' return lib;') prnt('}') + prnt() + prnt('#else') + prnt() + prnt('PyMODINIT_FUNC') + prnt('init%s(void)' % modname) + prnt('{') + prnt(' PyObject *lib;') + prnt(' lib = Py_InitModule("%s", _cffi_methods);' % modname) + prnt(' if (lib == NULL)') + prnt(' return;') + prnt(' if (%s < 0 || _cffi_init() < 0)' % (constants,)) + prnt(' return;') + prnt(' return;') + prnt('}') + prnt() + prnt('#endif') def load_library(self): # XXX review all usages of 'self' here! @@ -394,7 +405,7 @@ meth = 'METH_O' else: meth = 'METH_VARARGS' - self._prnt(' {"%s", _cffi_f_%s, %s},' % (name, name, meth)) + self._prnt(' {"%s", _cffi_f_%s, %s, NULL},' % (name, name, meth)) _loading_cpy_function = _loaded_noop @@ -481,8 +492,8 @@ if tp.fldnames is None: return # nothing to do with opaque structs layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) - self._prnt(' {"%s", %s, METH_NOARGS},' % (layoutfuncname, - layoutfuncname)) + self._prnt(' {"%s", %s, METH_NOARGS, NULL},' % (layoutfuncname, + layoutfuncname)) def _loading_struct_or_union(self, tp, prefix, name, module): if tp.fldnames is None: @@ -589,13 +600,7 @@ 'variable type'),)) assert delayed else: - prnt(' if (LONG_MIN <= (%s) && (%s) <= LONG_MAX)' % (name, name)) - prnt(' o = PyInt_FromLong((long)(%s));' % (name,)) - prnt(' else if ((%s) <= 0)' % (name,)) - prnt(' o = PyLong_FromLongLong((long long)(%s));' % (name,)) - prnt(' else') - prnt(' o = PyLong_FromUnsignedLongLong(' - '(unsigned long long)(%s));' % (name,)) + prnt(' o = _cffi_from_c_int_const(%s);' % name) prnt(' if (o == NULL)') prnt(' return -1;') if size_too: @@ -632,13 +637,18 @@ # ---------- # enums + def _enum_funcname(self, prefix, name): + # "$enum_$1" => "___D_enum____D_1" + name = name.replace('$', '___D_') + return '_cffi_e_%s_%s' % (prefix, name) + def _generate_cpy_enum_decl(self, tp, name, prefix='enum'): if tp.partial: for enumerator in tp.enumerators: self._generate_cpy_const(True, enumerator, delayed=False) return # - funcname = '_cffi_e_%s_%s' % (prefix, name) + funcname = self._enum_funcname(prefix, name) prnt = self._prnt prnt('static int %s(PyObject *lib)' % funcname) prnt('{') @@ -760,17 +770,30 @@ #include #include -#ifdef MS_WIN32 -#include /* for alloca() */ -typedef __int8 int8_t; -typedef __int16 int16_t; -typedef __int32 int32_t; -typedef __int64 int64_t; -typedef unsigned __int8 uint8_t; -typedef unsigned __int16 uint16_t; -typedef unsigned __int32 uint32_t; -typedef unsigned __int64 uint64_t; -typedef unsigned char _Bool; +/* this block of #ifs should be kept exactly identical between + c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py */ +#if defined(_MSC_VER) +# include /* for alloca() */ +# if _MSC_VER < 1600 /* MSVC < 2010 */ + typedef __int8 int8_t; + typedef __int16 int16_t; + typedef __int32 int32_t; + typedef __int64 int64_t; + typedef unsigned __int8 uint8_t; + typedef unsigned __int16 uint16_t; + typedef unsigned __int32 uint32_t; + typedef unsigned __int64 uint64_t; +# else +# include +# endif +# if _MSC_VER < 1800 /* MSVC < 2013 */ + typedef unsigned char _Bool; +# endif +#else +# include +# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) +# include +# endif #endif #if PY_MAJOR_VERSION < 3 @@ -795,6 +818,15 @@ #define _cffi_to_c_double PyFloat_AsDouble #define _cffi_to_c_float PyFloat_AsDouble +#define _cffi_from_c_int_const(x) \ + (((x) > 0) ? \ + ((unsigned long long)(x) <= (unsigned long long)LONG_MAX) ? \ + PyInt_FromLong((long)(x)) : \ + PyLong_FromUnsignedLongLong((unsigned long long)(x)) : \ + ((long long)(x) >= (long long)LONG_MIN) ? \ + PyInt_FromLong((long)(x)) : \ + PyLong_FromLongLong((long long)(x))) + #define _cffi_from_c_int(x, type) \ (((type)-1) > 0 ? /* unsigned */ \ (sizeof(type) < sizeof(long) ? PyInt_FromLong(x) : \ @@ -804,14 +836,14 @@ PyLong_FromLongLong(x))) #define _cffi_to_c_int(o, type) \ - (sizeof(type) == 1 ? (((type)-1) > 0 ? _cffi_to_c_u8(o) \ - : _cffi_to_c_i8(o)) : \ - sizeof(type) == 2 ? (((type)-1) > 0 ? _cffi_to_c_u16(o) \ - : _cffi_to_c_i16(o)) : \ - sizeof(type) == 4 ? (((type)-1) > 0 ? _cffi_to_c_u32(o) \ - : _cffi_to_c_i32(o)) : \ - sizeof(type) == 8 ? (((type)-1) > 0 ? _cffi_to_c_u64(o) \ - : _cffi_to_c_i64(o)) : \ + (sizeof(type) == 1 ? (((type)-1) > 0 ? (type)_cffi_to_c_u8(o) \ + : (type)_cffi_to_c_i8(o)) : \ + sizeof(type) == 2 ? (((type)-1) > 0 ? (type)_cffi_to_c_u16(o) \ + : (type)_cffi_to_c_i16(o)) : \ + sizeof(type) == 4 ? (((type)-1) > 0 ? (type)_cffi_to_c_u32(o) \ + : (type)_cffi_to_c_i32(o)) : \ + sizeof(type) == 8 ? (((type)-1) > 0 ? (type)_cffi_to_c_u64(o) \ + : (type)_cffi_to_c_i64(o)) : \ (Py_FatalError("unsupported size for type " #type), 0)) #define _cffi_to_c_i8 \ @@ -885,25 +917,32 @@ return PyBool_FromLong(was_alive); } -static void _cffi_init(void) +static int _cffi_init(void) { - PyObject *module = PyImport_ImportModule("_cffi_backend"); - PyObject *c_api_object; + PyObject *module, *c_api_object = NULL; + module = PyImport_ImportModule("_cffi_backend"); if (module == NULL) - return; + goto failure; c_api_object = PyObject_GetAttrString(module, "_C_API"); if (c_api_object == NULL) - return; + goto failure; if (!PyCapsule_CheckExact(c_api_object)) { - Py_DECREF(c_api_object); PyErr_SetNone(PyExc_ImportError); - return; + goto failure; } memcpy(_cffi_exports, PyCapsule_GetPointer(c_api_object, "cffi"), _CFFI_NUM_EXPORTS * sizeof(void *)); + + Py_DECREF(module); Py_DECREF(c_api_object); + return 0; + + failure: + Py_XDECREF(module); + Py_XDECREF(c_api_object); + return -1; } #define _cffi_type(num) ((CTypeDescrObject *)PyList_GET_ITEM(_cffi_types, num)) diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -249,10 +249,10 @@ prnt(' /* %s */' % str(e)) # cannot verify it, ignore prnt('}') self.export_symbols.append(layoutfuncname) - prnt('ssize_t %s(ssize_t i)' % (layoutfuncname,)) + prnt('intptr_t %s(intptr_t i)' % (layoutfuncname,)) prnt('{') prnt(' struct _cffi_aligncheck { char x; %s y; };' % cname) - prnt(' static ssize_t nums[] = {') + prnt(' static intptr_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') for fname, ftype, fbitsize in tp.enumfields(): @@ -276,7 +276,7 @@ return # nothing to do with opaque structs layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) # - BFunc = self.ffi._typeof_locked("ssize_t(*)(ssize_t)")[0] + BFunc = self.ffi._typeof_locked("intptr_t(*)(intptr_t)")[0] function = module.load_function(BFunc, layoutfuncname) layout = [] num = 0 @@ -410,13 +410,18 @@ # ---------- # enums + def _enum_funcname(self, prefix, name): + # "$enum_$1" => "___D_enum____D_1" + name = name.replace('$', '___D_') + return '_cffi_e_%s_%s' % (prefix, name) + def _generate_gen_enum_decl(self, tp, name, prefix='enum'): if tp.partial: for enumerator in tp.enumerators: self._generate_gen_const(True, enumerator) return # - funcname = '_cffi_e_%s_%s' % (prefix, name) + funcname = self._enum_funcname(prefix, name) self.export_symbols.append(funcname) prnt = self._prnt prnt('int %s(char *out_error)' % funcname) @@ -430,14 +435,14 @@ enumerator, enumerator, enumvalue)) prnt(' char buf[64];') prnt(' if ((%s) < 0)' % enumerator) - prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % enumerator) + prnt(' sprintf(buf, "%%ld", (long)(%s));' % enumerator) prnt(' else') - prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' % + prnt(' sprintf(buf, "%%lu", (unsigned long)(%s));' % enumerator) - prnt(' snprintf(out_error, 255,' + prnt(' sprintf(out_error,' ' "%s has the real value %s, not %s",') prnt(' "%s", buf, "%d");' % ( - enumerator, enumvalue)) + enumerator[:100], enumvalue)) prnt(' return -1;') prnt(' }') prnt(' return 0;') @@ -453,7 +458,7 @@ else: BType = self.ffi._typeof_locked("char[]")[0] BFunc = self.ffi._typeof_locked("int(*)(char*)")[0] - funcname = '_cffi_e_%s_%s' % (prefix, name) + funcname = self._enum_funcname(prefix, name) function = module.load_function(BFunc, funcname) p = self.ffi.new(BType, 256) if function(p) < 0: @@ -547,20 +552,29 @@ #include #include /* XXX for ssize_t on some platforms */ -#ifdef _WIN32 -# include -# define snprintf _snprintf -typedef __int8 int8_t; -typedef __int16 int16_t; -typedef __int32 int32_t; -typedef __int64 int64_t; -typedef unsigned __int8 uint8_t; -typedef unsigned __int16 uint16_t; -typedef unsigned __int32 uint32_t; -typedef unsigned __int64 uint64_t; -typedef SSIZE_T ssize_t; -typedef unsigned char _Bool; +/* this block of #ifs should be kept exactly identical between + c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py */ +#if defined(_MSC_VER) +# include /* for alloca() */ +# if _MSC_VER < 1600 /* MSVC < 2010 */ + typedef __int8 int8_t; + typedef __int16 int16_t; + typedef __int32 int32_t; + typedef __int64 int64_t; + typedef unsigned __int8 uint8_t; + typedef unsigned __int16 uint16_t; + typedef unsigned __int32 uint32_t; + typedef unsigned __int64 uint64_t; +# else +# include +# endif +# if _MSC_VER < 1800 /* MSVC < 2013 */ + typedef unsigned char _Bool; +# endif #else -# include +# include +# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) +# include +# endif #endif ''' diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -116,7 +116,7 @@ try: for name in modlist: __import__(name) - except (ImportError, CompilationError, py.test.skip.Exception), e: + except (ImportError, CompilationError, py.test.skip.Exception) as e: errcls = e.__class__.__name__ raise Exception( "The module %r is disabled\n" % (modname,) + diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -105,7 +105,7 @@ while True: try: w_key = space.next(w_iter) - except OperationError, e: + except OperationError as e: if not e.match(space, space.w_StopIteration): raise # re-raise other app-level exceptions break @@ -348,8 +348,12 @@ **objects** - Normal rules apply. Special methods are not honoured, except ``__init__``, - ``__del__`` and ``__iter__``. + Normal rules apply. The only special methods that are honoured are + ``__init__``, ``__del__``, ``__len__``, ``__getitem__``, ``__setitem__``, + ``__getslice__``, ``__setslice__``, and ``__iter__``. To handle slicing, + ``__getslice__`` and ``__setslice__`` must be used; using ``__getitem__`` and + ``__setitem__`` for slicing isn't supported. Additionally, using negative + indices for slicing is still not support, even when using ``__getslice__``. This layout makes the number of types to take care about quite limited. @@ -567,7 +571,7 @@ try: ... - except OperationError, e: + except OperationError as e: if not e.match(space, space.w_XxxError): raise ... diff --git a/pypy/doc/config/translation.log.txt b/pypy/doc/config/translation.log.txt --- a/pypy/doc/config/translation.log.txt +++ b/pypy/doc/config/translation.log.txt @@ -2,4 +2,4 @@ These must be enabled by setting the PYPYLOG environment variable. The exact set of features supported by PYPYLOG is described in -pypy/translation/c/src/debug_print.h. +rpython/translator/c/src/debug_print.h. diff --git a/pypy/doc/ctypes-implementation.rst b/pypy/doc/ctypes-implementation.rst --- a/pypy/doc/ctypes-implementation.rst +++ b/pypy/doc/ctypes-implementation.rst @@ -72,13 +72,11 @@ Here is a list of the limitations and missing features of the current implementation: -* ``ctypes.pythonapi`` lets you access the CPython C API emulation layer - of PyPy, at your own risks and without doing anything sensible about - the GIL. Since PyPy 2.3, these functions are also named with an extra - "Py", for example ``PyPyInt_FromLong()``. Basically, don't use this, - but it might more or less work in simple cases if you do. (Obviously, - assuming the PyObject pointers you get have any particular fields in - any particular order is just going to crash.) +* ``ctypes.pythonapi`` is missing. In previous versions, it was present + and redirected to the `cpyext` C API emulation layer, but our + implementation did not do anything sensible about the GIL and the + functions were named with an extra "Py", for example + ``PyPyInt_FromLong()``. It was removed for being unhelpful. * We copy Python strings instead of having pointers to raw buffers diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.rst +++ b/pypy/doc/extradoc.rst @@ -8,6 +8,9 @@ *Articles about PyPy published so far, most recent first:* (bibtex_ file) +* `A Way Forward in Parallelising Dynamic Languages`_, + R. Meier, A. Rigo + * `Runtime Feedback in a Meta-Tracing JIT for Efficient Dynamic Languages`_, C.F. Bolz, A. Cuni, M. Fijalkowski, M. Leuschel, S. Pedroni, A. Rigo @@ -71,6 +74,7 @@ .. _bibtex: https://bitbucket.org/pypy/extradoc/raw/tip/talk/bibtex.bib +.. _`A Way Forward in Parallelising Dynamic Languages`: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/icooolps2014/position-paper.pdf .. _`Runtime Feedback in a Meta-Tracing JIT for Efficient Dynamic Languages`: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/icooolps2011/jit-hints.pdf .. _`Allocation Removal by Partial Evaluation in a Tracing JIT`: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/pepm2011/bolz-allocation-removal.pdf .. _`Towards a Jitting VM for Prolog Execution`: http://www.stups.uni-duesseldorf.de/mediawiki/images/a/a7/Pub-BoLeSch2010.pdf @@ -93,6 +97,11 @@ Talks and Presentations ---------------------------------- +*This part is no longer updated.* The complete list is here__ (in +alphabetical order). + +.. __: https://bitbucket.org/pypy/extradoc/src/extradoc/talk/ + Talks in 2010 +++++++++++++ diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -171,16 +171,21 @@ You might be interested in our `benchmarking site`_ and our `jit documentation`_. -Note that the JIT has a very high warm-up cost, meaning that the -programs are slow at the beginning. If you want to compare the timings -with CPython, even relatively simple programs need to run *at least* one -second, preferrably at least a few seconds. Large, complicated programs -need even more time to warm-up the JIT. +`Your tests are not a benchmark`_: tests tend to be slow under PyPy +because they run exactly once; if they are good tests, they exercise +various corner cases in your code. This is a bad case for JIT +compilers. Note also that our JIT has a very high warm-up cost, meaning +that any program is slow at the beginning. If you want to compare the +timings with CPython, even relatively simple programs need to run *at +least* one second, preferrably at least a few seconds. Large, +complicated programs need even more time to warm-up the JIT. .. _`benchmarking site`: http://speed.pypy.org .. _`jit documentation`: jit/index.html +.. _`your tests are not a benchmark`: http://alexgaynor.net/2013/jul/15/your-tests-are-not-benchmark/ + --------------------------------------------------------------- Couldn't the JIT dump and reload already-compiled machine code? --------------------------------------------------------------- @@ -465,9 +470,13 @@ This is documented (here__ and here__). It needs 4 GB of RAM to run "rpython targetpypystandalone" on top of PyPy, a bit more when running -on CPython. If you have less than 4 GB it will just swap forever (or -fail if you don't have enough swap). On 32-bit, divide the numbers by -two. +on top of CPython. If you have less than 4 GB free, it will just swap +forever (or fail if you don't have enough swap). And we mean *free:* +if the machine has 4 GB *in total,* then it will swap. + +On 32-bit, divide the numbers by two. (We didn't try recently, but in +the past it was possible to compile a 32-bit version on a 2 GB Linux +machine with nothing else running: no Gnome/KDE, for example.) .. __: http://pypy.org/download.html#building-from-source .. __: https://pypy.readthedocs.org/en/latest/getting-started-python.html#translating-the-pypy-python-interpreter diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -6,6 +6,7 @@ .. toctree:: + release-2.3.1.rst release-2.3.0.rst release-2.2.1.rst release-2.2.0.rst diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -40,7 +40,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 2.3.0`_: the latest official release +* `Release 2.3.1`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -110,7 +110,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 2.3.0`: http://pypy.org/download.html +.. _`Release 2.3.1`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html diff --git a/pypy/doc/jit-hooks.rst b/pypy/doc/jit-hooks.rst --- a/pypy/doc/jit-hooks.rst +++ b/pypy/doc/jit-hooks.rst @@ -34,7 +34,7 @@ aborted due to some reason. The hook will be invoked with the siagnture: - ``hook(jitdriver_name, greenkey, reason)`` + ``hook(jitdriver_name, greenkey, reason, oplist)`` Reason is a string, the meaning of other arguments is the same as attributes on JitLoopInfo object diff --git a/pypy/doc/man/pypy.1.rst b/pypy/doc/man/pypy.1.rst --- a/pypy/doc/man/pypy.1.rst +++ b/pypy/doc/man/pypy.1.rst @@ -95,13 +95,12 @@ ``PYPYLOG`` If set to a non-empty value, enable logging, the format is: - *fname* + *fname* or *+fname* logging for profiling: includes all ``debug_start``/``debug_stop`` but not any nested ``debug_print``. *fname* can be ``-`` to log to *stderr*. - Note that using a : in fname is a bad idea, Windows - users, beware. + The *+fname* form can be used if there is a *:* in fname ``:``\ *fname* Full logging, including ``debug_print``. diff --git a/pypy/doc/objspace-proxies.rst b/pypy/doc/objspace-proxies.rst --- a/pypy/doc/objspace-proxies.rst +++ b/pypy/doc/objspace-proxies.rst @@ -26,6 +26,16 @@ Transparent Proxies ================================ +.. warning:: + + This is a feature that was tried experimentally long ago, and we + found no really good use cases. The basic functionality is still + there, but we don't recommend using it. Some of the examples below + might not work any more (e.g. you can't tproxy a list object any + more). The rest can be done by hacking in standard Python. If + anyone is interested in working on tproxy again, he is welcome, but + we don't regard this as an interesting extension. + PyPy's Transparent Proxies allow routing of operations on objects to a callable. Application level code can customize objects without interfering with the type system - ``type(proxied_list) is list`` holds true diff --git a/pypy/doc/release-pypy3-2.3.1.rst b/pypy/doc/release-pypy3-2.3.1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-pypy3-2.3.1.rst @@ -0,0 +1,69 @@ +===================== +PyPy3 2.3.1 - Fulcrum +===================== + +We're pleased to announce the first stable release of PyPy3. PyPy3 +targets Python 3 (3.2.5) compatibility. + +We would like to thank all of the people who donated_ to the `py3k proposal`_ +for supporting the work that went into this. + +You can download the PyPy3 2.3.1 release here: + + http://pypy.org/download.html#pypy3-2-3-1 + +Highlights +========== + +* The first stable release of PyPy3: support for Python 3! + +* The stdlib has been updated to Python 3.2.5 + +* Additional support for the u'unicode' syntax (`PEP 414`_) from Python 3.3 + +* Updates from the default branch, such as incremental GC and various JIT + improvements + +* Resolved some notable JIT performance regressions from PyPy2: + + - Re-enabled the previously disabled collection (list/dict/set) strategies + + - Resolved performance of iteration over range objects + + - Resolved handling of Python 3's exception __context__ unnecessarily forcing + frame object overhead + +.. _`PEP 414`: http://legacy.python.org/dev/peps/pep-0414/ + +What is PyPy? +============== + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7.6 or 3.2.5. It's fast due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows, +and OpenBSD, +as well as newer ARM hardware (ARMv6 or ARMv7, with VFPv3) running Linux. + +While we support 32 bit python on Windows, work on the native Windows 64 +bit python is still stalling, we would welcome a volunteer +to `handle that`_. + +.. _`handle that`: http://doc.pypy.org/en/latest/windows.html#what-is-missing-for-a-full-64-bit-translation + +How to use PyPy? +================= + +We suggest using PyPy from a `virtualenv`_. Once you have a virtualenv +installed, you can follow instructions from `pypy documentation`_ on how +to proceed. This document also covers other `installation schemes`_. + +.. _donated: http://morepypy.blogspot.com/2012/01/py3k-and-numpy-first-stage-thanks-to.html +.. _`py3k proposal`: http://pypy.org/py3donate.html +.. _`pypy documentation`: http://doc.pypy.org/en/latest/getting-started.html#installing-using-virtualenv +.. _`virtualenv`: http://www.virtualenv.org/en/latest/ +.. _`installation schemes`: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy + + +Cheers, +the PyPy team diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -28,7 +28,8 @@ Introduction ============ -``pypy-stm`` is a variant of the regular PyPy interpreter. With caveats_ +``pypy-stm`` is a variant of the regular PyPy interpreter. (This +version supports Python 2.7; see below for `Python 3`_.) With caveats_ listed below, it should be in theory within 20%-50% slower than a regular PyPy, comparing the JIT version in both cases (but see below!). It is called @@ -92,9 +93,9 @@ We're busy fixing them as we find them; feel free to `report bugs`_. * It runs with an overhead as low as 20% on examples like "richards". - There are also other examples with higher overheads --up to 10x for - "translate.py"-- which we are still trying to understand. One suspect - is our partial GC implementation, see below. + There are also other examples with higher overheads --currently up to + 2x for "translate.py"-- which we are still trying to understand. + One suspect is our partial GC implementation, see below. * Currently limited to 1.5 GB of RAM (this is just a parameter in `core.h`__). Memory overflows are not correctly handled; they cause @@ -111,9 +112,8 @@ * The GC is new; although clearly inspired by PyPy's regular GC, it misses a number of optimizations for now. Programs allocating large - numbers of small objects that don't immediately die, as well as - programs that modify large lists or dicts, suffer from these missing - optimizations. + numbers of small objects that don't immediately die (surely a common + situation) suffer from these missing optimizations. * The GC has no support for destructors: the ``__del__`` method is never called (including on file objects, which won't be closed for you). @@ -138,6 +138,25 @@ +Python 3 +======== + +In this document I describe "pypy-stm", which is based on PyPy's Python +2.7 interpreter. Supporting Python 3 should take about half an +afternoon of work. Obviously, what I *don't* mean is that by tomorrow +you can have a finished and polished "pypy3-stm" product. General py3k +work is still missing; and general stm work is also still missing. But +they are rather independent from each other, as usual in PyPy. The +required afternoon of work will certainly be done one of these days now +that the internal interfaces seem to stabilize. + +The same is true for other languages implemented in the RPython +framework, although the amount of work to put there might vary, because +the STM framework within RPython is currently targeting the PyPy +interpreter and other ones might have slightly different needs. + + + User Guide ========== @@ -490,8 +509,6 @@ The last two lines are special; they are an internal marker read by ``transactional_memory.print_abort_info()``. -These statistics are not printed out for the main thread, for now. - Reference to implementation details ----------------------------------- diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,6 +3,50 @@ ======================= .. this is a revision shortly after release-2.3.x -.. startrev: 87fdc76bccb4 +.. startrev: ca9b7cf02cf4 +.. branch: fix-bytearray-complexity +Bytearray operations no longer copy the bytearray unnecessarily +Added support for ``__getitem__``, ``__setitem__``, ``__getslice__``, +``__setslice__``, and ``__len__`` to RPython + +.. branch: stringbuilder2-perf +Give the StringBuilder a more flexible internal structure, with a +chained list of strings instead of just one string. This make it +more efficient when building large strings, e.g. with cStringIO(). + +Also, use systematically jit.conditional_call() instead of regular +branches. This lets the JIT make more linear code, at the cost of +forcing a bit more data (to be passed as arguments to +conditional_calls). I would expect the net result to be a slight +slow-down on some simple benchmarks and a speed-up on bigger +programs. + +.. branch: ec-threadlocal +Change the executioncontext's lookup to be done by reading a thread- +local variable (which is implemented in C using '__thread' if +possible, and pthread_getspecific() otherwise). On Linux x86 and +x86-64, the JIT backend has a special optimization that lets it emit +directly a single MOV from a %gs- or %fs-based address. It seems +actually to give a good boost in performance. + +.. branch: fast-gil +A faster way to handle the GIL, particularly in JIT code. The GIL is +now a composite of two concepts: a global number (it's just set from +1 to 0 and back around CALL_RELEASE_GIL), and a real mutex. If there +are threads waiting to acquire the GIL, one of them is actively +checking the global number every 0.1 ms to 1 ms. Overall, JIT loops +full of external function calls now run a bit faster (if no thread was +started yet), or a *lot* faster (if threads were started already). + +.. branch: jit-get-errno +Optimize the errno handling in the JIT, notably around external +function calls. Linux-only. + +.. branch: disable_pythonapi +Remove non-functioning ctypes.pyhonapi and ctypes.PyDLL, document this +incompatibility with cpython. Recast sys.dllhandle to an int. + +.. branch: scalar-operations +Fix performance regression on ufunc(, ) in numpy. diff --git a/pypy/doc/whatsnew-pypy3-2.3.1.rst b/pypy/doc/whatsnew-pypy3-2.3.1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/whatsnew-pypy3-2.3.1.rst @@ -0,0 +1,6 @@ +========================= +What's new in PyPy3 2.3.1 +========================= + +.. this is a revision shortly after pypy3-release-2.3.x +.. startrev: 0137d8e6657d diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -132,19 +132,23 @@ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Download http://www.gzip.org/zlib/zlib-1.2.3.tar.gz and extract it in -the base directory. Then compile:: +the base directory. Then compile as a static library:: cd zlib-1.2.3 nmake -f win32\Makefile.msc - copy zlib1.dll \zlib.dll + copy zlib1.lib + copy zlib.h zconf.h The bz2 compression library ~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Get the same version of bz2 used by python and compile as a static library:: svn export http://svn.python.org/projects/external/bzip2-1.0.6 cd bzip2-1.0.6 nmake -f makefile.msc - copy bzip.dll \bzip.dll + copy libbz2.lib + copy bzlib.h + The sqlite3 database library ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -166,7 +170,8 @@ is actually enough for pypy). Then, copy the file ``win32\bin\release\libexpat.dll`` somewhere in -your PATH. +your PATH, ``win32\bin\release\libexpat.lib`` somewhere in LIB, and +both ``lib\expat.h`` and ``lib\expat_external.h`` somewhere in INCLUDE. The OpenSSL library ~~~~~~~~~~~~~~~~~~~ diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -31,8 +31,6 @@ if w_dict is not None: # for tests w_entry_point = space.getitem(w_dict, space.wrap('entry_point')) w_run_toplevel = space.getitem(w_dict, space.wrap('run_toplevel')) - w_call_finish_gateway = space.wrap(gateway.interp2app(call_finish)) - w_call_startup_gateway = space.wrap(gateway.interp2app(call_startup)) withjit = space.config.objspace.usemodules.pypyjit def entry_point(argv): @@ -54,10 +52,10 @@ argv = argv[:1] + argv[3:] try: try: - space.call_function(w_run_toplevel, w_call_startup_gateway) + space.startup() if rlocale.HAVE_LANGINFO: try: - rlocale.setlocale(rlocale.LC_ALL, '') + rlocale.setlocale(rlocale.LC_CTYPE, '') except rlocale.LocaleError: pass w_executable = space.fsdecode(space.wrapbytes(argv[0])) @@ -76,7 +74,7 @@ return 1 finally: try: - space.call_function(w_run_toplevel, w_call_finish_gateway) + space.finish() except OperationError, e: debug("OperationError:") debug(" operror-type: " + e.w_type.getname(space).encode('utf-8')) @@ -191,11 +189,6 @@ 'pypy_thread_attach': pypy_thread_attach, 'pypy_setup_home': pypy_setup_home} -def call_finish(space): - space.finish() - -def call_startup(space): - space.startup() # _____ Define and setup target ___ diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -373,6 +373,7 @@ def startup(self): # To be called before using the space + self.threadlocals.enter_thread(self) # Initialize already imported builtin modules from pypy.interpreter.module import Module @@ -630,30 +631,36 @@ """NOT_RPYTHON: Abstract method that should put some minimal content into the w_builtins.""" - @jit.loop_invariant def getexecutioncontext(self): "Return what we consider to be the active execution context." # Important: the annotator must not see a prebuilt ExecutionContext: # you should not see frames while you translate # so we make sure that the threadlocals never *have* an # ExecutionContext during translation. - if self.config.translating and not we_are_translated(): - assert self.threadlocals.getvalue() is None, ( - "threadlocals got an ExecutionContext during translation!") - try: - return self._ec_during_translation - except AttributeError: - ec = self.createexecutioncontext() - self._ec_during_translation = ec + if not we_are_translated(): + if self.config.translating: + assert self.threadlocals.get_ec() is None, ( + "threadlocals got an ExecutionContext during translation!") + try: + return self._ec_during_translation + except AttributeError: + ec = self.createexecutioncontext() + self._ec_during_translation = ec + return ec + else: + ec = self.threadlocals.get_ec() + if ec is None: + self.threadlocals.enter_thread(self) + ec = self.threadlocals.get_ec() return ec - # normal case follows. The 'thread' module installs a real - # thread-local object in self.threadlocals, so this builds - # and caches a new ec in each thread. - ec = self.threadlocals.getvalue() - if ec is None: - ec = self.createexecutioncontext() - self.threadlocals.setvalue(ec) - return ec + else: + # translated case follows. self.threadlocals is either from + # 'pypy.interpreter.miscutils' or 'pypy.module.thread.threadlocals'. + # the result is assumed to be non-null: enter_thread() was called + # by space.startup(). + ec = self.threadlocals.get_ec() + assert ec is not None + return ec def _freeze_(self): return True @@ -955,6 +962,13 @@ """ return self.unpackiterable(w_iterable, expected_length) + def listview_no_unpack(self, w_iterable): + """ Same as listview() if cheap. If 'w_iterable' is something like + a generator, for example, then return None instead. + May return None anyway. + """ + return None + def listview_bytes(self, w_list): """ Return a list of unwrapped strings out of a list of strings. If the argument is not a list or does not contain only strings, return None. diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -499,6 +499,13 @@ """ +class UserDelCallback(object): + def __init__(self, w_obj, callback, descrname): + self.w_obj = w_obj + self.callback = callback + self.descrname = descrname + self.next = None + class UserDelAction(AsyncAction): """An action that invokes all pending app-level __del__() method. This is done as an action instead of immediately when the @@ -509,13 +516,19 @@ def __init__(self, space): AsyncAction.__init__(self, space) - self.dying_objects = [] + self.dying_objects = None + self.dying_objects_last = None self.finalizers_lock_count = 0 self.enabled_at_app_level = True self._invoke_immediately = False def register_callback(self, w_obj, callback, descrname): - self.dying_objects.append((w_obj, callback, descrname)) + cb = UserDelCallback(w_obj, callback, descrname) + if self.dying_objects_last is None: + self.dying_objects = cb + else: + self.dying_objects_last.next = cb + self.dying_objects_last = cb if not self._invoke_immediately: self.fire() else: @@ -532,13 +545,33 @@ # avoid too deep recursions of the kind of __del__ being called # while in the middle of another __del__ call. pending = self.dying_objects - self.dying_objects = [] + self.dying_objects = None + self.dying_objects_last = None space = self.space - for i in range(len(pending)): - w_obj, callback, descrname = pending[i] - pending[i] = (None, None, None) + while pending is not None: try: - callback(w_obj) + pending.callback(pending.w_obj) except OperationError, e: - e.write_unraisable(space, descrname, w_obj) + e.write_unraisable(space, pending.descrname, pending.w_obj) e.clear(space) # break up reference cycles + pending = pending.next + # + # Note: 'dying_objects' used to be just a regular list instead + # of a chained list. This was the cause of "leaks" if we have a + # program that constantly creates new objects with finalizers. + # Here is why: say 'dying_objects' is a long list, and there + # are n instances in it. Then we spend some time in this + # function, possibly triggering more GCs, but keeping the list + # of length n alive. Then the list is suddenly freed at the + # end, and we return to the user program. At this point the + # GC limit is still very high, because just before, there was + # a list of length n alive. Assume that the program continues + # to allocate a lot of instances with finalizers. The high GC + # limit means that it could allocate a lot of instances before + # reaching it --- possibly more than n. So the whole procedure + # repeats with higher and higher values of n. + # + # This does not occur in the current implementation because + # there is no list of length n: if n is large, then the GC + # will run several times while walking the list, but it will + # see lower and lower memory usage, with no lower bound of n. diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -930,7 +930,7 @@ "use unwrap_spec(...=WrappedDefault(default))" % ( self._code.identifier, name, defaultval)) defs_w.append(None) - else: + elif name != '__args__' and name != 'args_w': spec = unwrap_spec[i] if isinstance(defaultval, str) and spec not in [str]: defs_w.append(space.wrapbytes(defaultval)) diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -61,6 +61,13 @@ return self.send_ex(w_arg) def send_ex(self, w_arg, operr=None): + pycode = self.pycode + if jit.we_are_jitted() and should_not_inline(pycode): + generatorentry_driver.jit_merge_point(gen=self, w_arg=w_arg, + operr=operr, pycode=pycode) + return self._send_ex(w_arg, operr) + + def _send_ex(self, w_arg, operr): space = self.space if self.running: raise OperationError(space.w_ValueError, @@ -72,8 +79,7 @@ if operr is None: operr = OperationError(space.w_StopIteration, space.w_None) raise operr - # XXX it's not clear that last_instr should be promoted at all - # but as long as it is necessary for call_assembler, let's do it early + last_instr = jit.promote(frame.last_instr) if last_instr == -1: if w_arg and not space.is_w(w_arg, space.w_None): @@ -219,3 +225,38 @@ "interrupting generator of ") break block = block.previous + + + +def get_printable_location_genentry(bytecode): + return '%s ' % (bytecode.get_repr(),) +generatorentry_driver = jit.JitDriver(greens=['pycode'], + reds=['gen', 'w_arg', 'operr'], + get_printable_location = + get_printable_location_genentry, + name='generatorentry') + +from pypy.tool.stdlib_opcode import HAVE_ARGUMENT, opmap +YIELD_VALUE = opmap['YIELD_VALUE'] + + at jit.elidable_promote() +def should_not_inline(pycode): + # Should not inline generators with more than one "yield", + # as an approximative fix (see issue #1782). There are cases + # where it slows things down; for example calls to a simple + # generator that just produces a few simple values with a few + # consecutive "yield" statements. It fixes the near-infinite + # slow-down in issue #1782, though... + count_yields = 0 + code = pycode.co_code + n = len(code) + i = 0 + while i < n: + c = code[i] + op = ord(c) + if op == YIELD_VALUE: + count_yields += 1 + i += 1 + if op >= HAVE_ARGUMENT: + i += 2 + return count_yields >= 2 diff --git a/pypy/interpreter/miscutils.py b/pypy/interpreter/miscutils.py --- a/pypy/interpreter/miscutils.py +++ b/pypy/interpreter/miscutils.py @@ -11,11 +11,14 @@ """ _value = None - def getvalue(self): + def get_ec(self): return self._value - def setvalue(self, value): - self._value = value + def enter_thread(self, space): + self._value = space.createexecutioncontext() + + def try_enter_thread(self, space): + return False def signals_enabled(self): return True diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -755,6 +755,22 @@ never_called py.test.raises(AssertionError, space.wrap, gateway.interp2app_temp(g)) + def test_unwrap_spec_default_applevel_bug2(self): + space = self.space + def g(space, w_x, w_y=None, __args__=None): + return w_x + w_g = space.wrap(gateway.interp2app_temp(g)) + w_42 = space.call_function(w_g, space.wrap(42)) + assert space.int_w(w_42) == 42 + py.test.raises(gateway.OperationError, space.call_function, w_g) + # + def g(space, w_x, w_y=None, args_w=None): + return w_x + w_g = space.wrap(gateway.interp2app_temp(g)) + w_42 = space.call_function(w_g, space.wrap(42)) + assert space.int_w(w_42) == 42 + py.test.raises(gateway.OperationError, space.call_function, w_g) + def test_interp2app_doc(self): space = self.space def f(space, w_x): diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/test_generator.py --- a/pypy/interpreter/test/test_generator.py +++ b/pypy/interpreter/test/test_generator.py @@ -300,3 +300,20 @@ yield 1 raise StopIteration assert tuple(f()) == (1,) + + +def test_should_not_inline(space): + from pypy.interpreter.generator import should_not_inline + w_co = space.appexec([], '''(): + def g(x): + yield x + 5 + return g.func_code + ''') + assert should_not_inline(w_co) == False + w_co = space.appexec([], '''(): + def g(x): + yield x + 5 + yield x + 6 + return g.func_code + ''') + assert should_not_inline(w_co) == True diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -74,13 +74,12 @@ 'hidden_applevel' : 'interp_magic.hidden_applevel', 'lookup_special' : 'interp_magic.lookup_special', 'do_what_I_mean' : 'interp_magic.do_what_I_mean', - 'list_strategy' : 'interp_magic.list_strategy', 'validate_fd' : 'interp_magic.validate_fd', 'resizelist_hint' : 'interp_magic.resizelist_hint', 'newlist_hint' : 'interp_magic.newlist_hint', 'add_memory_pressure' : 'interp_magic.add_memory_pressure', 'newdict' : 'interp_dict.newdict', - 'dictstrategy' : 'interp_dict.dictstrategy', + 'strategy' : 'interp_magic.strategy', # dict,set,list 'set_debug' : 'interp_magic.set_debug', 'locals_to_fast' : 'interp_magic.locals_to_fast', 'normalize_exc' : 'interp_magic.normalize_exc', diff --git a/pypy/module/__pypy__/interp_dict.py b/pypy/module/__pypy__/interp_dict.py --- a/pypy/module/__pypy__/interp_dict.py +++ b/pypy/module/__pypy__/interp_dict.py @@ -1,7 +1,6 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec -from pypy.objspace.std.dictmultiobject import W_DictMultiObject @unwrap_spec(type=str) def newdict(space, type): @@ -31,13 +30,3 @@ return space.newdict(strdict=True) else: raise oefmt(space.w_TypeError, "unknown type of dict %s", type) - -def dictstrategy(space, w_obj): - """ dictstrategy(dict) - - show the underlaying strategy used by a dict object - """ - if not isinstance(w_obj, W_DictMultiObject): - raise OperationError(space.w_TypeError, - space.wrap("expecting dict object")) - return space.wrap('%r' % (w_obj.strategy,)) diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -2,7 +2,9 @@ from pypy.interpreter.gateway import WrappedDefault, unwrap_spec from pypy.interpreter.pyframe import PyFrame from rpython.rlib.objectmodel import we_are_translated +from pypy.objspace.std.dictmultiobject import W_DictMultiObject from pypy.objspace.std.listobject import W_ListObject +from pypy.objspace.std.setobject import W_BaseSetObject from pypy.objspace.std.typeobject import MethodCache from pypy.objspace.std.mapdict import MapAttrCache from rpython.rlib import rposix, rgc @@ -74,12 +76,23 @@ def do_what_I_mean(space): return space.wrap(42) -def list_strategy(space, w_list): - if isinstance(w_list, W_ListObject): - return space.wrap(w_list.strategy._applevel_repr) + +def strategy(space, w_obj): + """ strategy(dict or list or set) + + Return the underlying strategy currently used by a dict, list or set object + """ + if isinstance(w_obj, W_DictMultiObject): + name = w_obj.strategy.__class__.__name__ + elif isinstance(w_obj, W_ListObject): + name = w_obj.strategy.__class__.__name__ + elif isinstance(w_obj, W_BaseSetObject): + name = w_obj.strategy.__class__.__name__ else: - w_msg = space.wrap("Can only get the list strategy of a list") - raise OperationError(space.w_TypeError, w_msg) + raise OperationError(space.w_TypeError, + space.wrap("expecting dict or list or set object")) + return space.wrap(name) + @unwrap_spec(fd='c_int') def validate_fd(space, fd): diff --git a/pypy/module/__pypy__/test/test_special.py b/pypy/module/__pypy__/test/test_special.py --- a/pypy/module/__pypy__/test/test_special.py +++ b/pypy/module/__pypy__/test/test_special.py @@ -75,24 +75,38 @@ assert x == 42 def test_list_strategy(self): - from __pypy__ import list_strategy + from __pypy__ import strategy l = [1, 2, 3] - assert list_strategy(l) == "int" - l = list(range(1, 2)) - assert list_strategy(l) == "int" + assert strategy(l) == "IntegerListStrategy" l = [b"a", b"b", b"c"] - assert list_strategy(l) == "bytes" - l = ["a", "b", "c"] - assert list_strategy(l) == "unicode" + assert strategy(l) == "BytesListStrategy" + l = [u"a", u"b", u"c"] + assert strategy(l) == "UnicodeListStrategy" l = [1.1, 2.2, 3.3] - assert list_strategy(l) == "float" + assert strategy(l) == "FloatListStrategy" l = [1, "b", 3] - assert list_strategy(l) == "object" + assert strategy(l) == "ObjectListStrategy" l = [] - assert list_strategy(l) == "empty" + assert strategy(l) == "EmptyListStrategy" o = 5 - raises(TypeError, list_strategy, 5) + raises(TypeError, strategy, 5) + + def test_dict_strategy(self): + from __pypy__ import strategy + + d = {} + assert strategy(d) == "EmptyDictStrategy" + d = {1: None, 5: None} + assert strategy(d) == "IntDictStrategy" + + def test_set_strategy(self): + from __pypy__ import strategy + + s = set() + assert strategy(s) == "EmptySetStrategy" + s = set([2, 3, 4]) + assert strategy(s) == "IntegerSetStrategy" def test_normalize_exc(self): from __pypy__ import normalize_exc diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -8,7 +8,7 @@ appleveldefs = { } interpleveldefs = { - '__version__': 'space.wrap("0.8.2")', + '__version__': 'space.wrap("0.8.6")', 'load_library': 'libraryobj.load_library', diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py --- a/pypy/module/_cffi_backend/ccallback.py +++ b/pypy/module/_cffi_backend/ccallback.py @@ -183,9 +183,12 @@ misc._raw_memclear(ll_res, SIZE_OF_FFI_ARG) return # + must_leave = False ec = None + space = callback.space try: - ec = cerrno.get_errno_container(callback.space) + must_leave = space.threadlocals.try_enter_thread(space) + ec = cerrno.get_errno_container(space) cerrno.save_errno_into(ec, e) extra_line = '' try: @@ -206,5 +209,7 @@ except OSError: pass callback.write_error_return_value(ll_res) + if must_leave: + space.threadlocals.leave_thread(space) if ec is not None: cerrno.restore_errno_from(ec) diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py --- a/pypy/module/_cffi_backend/ctypefunc.py +++ b/pypy/module/_cffi_backend/ctypefunc.py @@ -4,7 +4,7 @@ import sys -from rpython.rlib import jit, clibffi, jit_libffi +from rpython.rlib import jit, clibffi, jit_libffi, rgc from rpython.rlib.jit_libffi import (CIF_DESCRIPTION, CIF_DESCRIPTION_P, FFI_TYPE, FFI_TYPE_P, FFI_TYPE_PP, SIZE_OF_FFI_ARG) from rpython.rlib.objectmodel import we_are_translated, instantiate @@ -63,6 +63,7 @@ CifDescrBuilder(fvarargs, self.ctitem).rawallocate(ctypefunc) return ctypefunc + @rgc.must_be_light_finalizer def __del__(self): if self.cif_descr: lltype.free(self.cif_descr, flavor='raw') @@ -156,8 +157,8 @@ data = rffi.ptradd(buffer, cif_descr.exchange_args[i]) flag = get_mustfree_flag(data) if flag == 1: - raw_string = rffi.cast(rffi.CCHARPP, data)[0] - lltype.free(raw_string, flavor='raw') + raw_cdata = rffi.cast(rffi.CCHARPP, data)[0] + lltype.free(raw_cdata, flavor='raw') lltype.free(buffer, flavor='raw') return w_res diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -3188,4 +3188,4 @@ def test_version(): # this test is here mostly for PyPy - assert __version__ == "0.8.2" + assert __version__ == "0.8.6" diff --git a/pypy/module/_io/interp_textio.py b/pypy/module/_io/interp_textio.py --- a/pypy/module/_io/interp_textio.py +++ b/pypy/module/_io/interp_textio.py @@ -467,10 +467,6 @@ space.wrap("<_io.TextIOWrapper %s%sencoding=%r>"), w_args ) - def isatty_w(self, space): - self._check_init(space) - return space.call_method(self.w_buffer, "isatty") - def readable_w(self, space): self._check_init(space) return space.call_method(self.w_buffer, "readable") @@ -483,6 +479,10 @@ self._check_init(space) return space.call_method(self.w_buffer, "seekable") + def isatty_w(self, space): + self._check_init(space) + return space.call_method(self.w_buffer, "isatty") + def fileno_w(self, space): From noreply at buildbot.pypy.org Wed Jul 16 11:36:45 2014 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 16 Jul 2014 11:36:45 +0200 (CEST) Subject: [pypy-commit] pypy default: Experimental logparser2json Message-ID: <20140716093645.8C4DE1D34F9@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r72453:889b1712379e Date: 2014-07-16 11:36 +0200 http://bitbucket.org/pypy/pypy/changeset/889b1712379e/ Log: Experimental logparser2json diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1377,49 +1377,53 @@ def do_residual_call(self, funcbox, argboxes, descr, pc, assembler_call=False, assembler_call_jd=None): - # First build allboxes: it may need some reordering from the - # list provided in argboxes, depending on the order in which - # the arguments are expected by the function - # - allboxes = self._build_allboxes(funcbox, argboxes, descr) - effectinfo = descr.get_extra_info() - if (assembler_call or - effectinfo.check_forces_virtual_or_virtualizable()): - # residual calls require attention to keep virtualizables in-sync - self.metainterp.clear_exception() - if effectinfo.oopspecindex == EffectInfo.OS_JIT_FORCE_VIRTUAL: - resbox = self._do_jit_force_virtual(allboxes, descr, pc) + debug_start("jit-residual-call") + try: + # First build allboxes: it may need some reordering from the + # list provided in argboxes, depending on the order in which + # the arguments are expected by the function + # + allboxes = self._build_allboxes(funcbox, argboxes, descr) + effectinfo = descr.get_extra_info() + if (assembler_call or + effectinfo.check_forces_virtual_or_virtualizable()): + # residual calls require attention to keep virtualizables in-sync + self.metainterp.clear_exception() + if effectinfo.oopspecindex == EffectInfo.OS_JIT_FORCE_VIRTUAL: + resbox = self._do_jit_force_virtual(allboxes, descr, pc) + if resbox is not None: + return resbox + self.metainterp.vable_and_vrefs_before_residual_call() + resbox = self.metainterp.execute_and_record_varargs( + rop.CALL_MAY_FORCE, allboxes, descr=descr) + if effectinfo.is_call_release_gil(): + self.metainterp.direct_call_release_gil() + self.metainterp.vrefs_after_residual_call() + vablebox = None + if assembler_call: + vablebox = self.metainterp.direct_assembler_call( + assembler_call_jd) if resbox is not None: - return resbox - self.metainterp.vable_and_vrefs_before_residual_call() - resbox = self.metainterp.execute_and_record_varargs( - rop.CALL_MAY_FORCE, allboxes, descr=descr) - if effectinfo.is_call_release_gil(): - self.metainterp.direct_call_release_gil() - self.metainterp.vrefs_after_residual_call() - vablebox = None - if assembler_call: - vablebox = self.metainterp.direct_assembler_call( - assembler_call_jd) - if resbox is not None: - self.make_result_of_lastop(resbox) - self.metainterp.vable_after_residual_call(funcbox) - self.metainterp.generate_guard(rop.GUARD_NOT_FORCED, None) - if vablebox is not None: - self.metainterp.history.record(rop.KEEPALIVE, [vablebox], None) - self.metainterp.handle_possible_exception() - # XXX refactor: direct_libffi_call() is a hack - if effectinfo.oopspecindex == effectinfo.OS_LIBFFI_CALL: - self.metainterp.direct_libffi_call() - return resbox - else: - effect = effectinfo.extraeffect - if effect == effectinfo.EF_LOOPINVARIANT: - return self.execute_varargs(rop.CALL_LOOPINVARIANT, allboxes, - descr, False, False) - exc = effectinfo.check_can_raise() - pure = effectinfo.check_is_elidable() - return self.execute_varargs(rop.CALL, allboxes, descr, exc, pure) + self.make_result_of_lastop(resbox) + self.metainterp.vable_after_residual_call(funcbox) + self.metainterp.generate_guard(rop.GUARD_NOT_FORCED, None) + if vablebox is not None: + self.metainterp.history.record(rop.KEEPALIVE, [vablebox], None) + self.metainterp.handle_possible_exception() + # XXX refactor: direct_libffi_call() is a hack + if effectinfo.oopspecindex == effectinfo.OS_LIBFFI_CALL: + self.metainterp.direct_libffi_call() + return resbox + else: + effect = effectinfo.extraeffect + if effect == effectinfo.EF_LOOPINVARIANT: + return self.execute_varargs(rop.CALL_LOOPINVARIANT, allboxes, + descr, False, False) + exc = effectinfo.check_can_raise() + pure = effectinfo.check_is_elidable() + return self.execute_varargs(rop.CALL, allboxes, descr, exc, pure) + finally: + debug_stop("jit-residual-call") def do_conditional_call(self, condbox, funcbox, argboxes, descr, pc): if isinstance(condbox, ConstInt) and condbox.value == 0: diff --git a/rpython/jit/tool/oparser_model.py b/rpython/jit/tool/oparser_model.py --- a/rpython/jit/tool/oparser_model.py +++ b/rpython/jit/tool/oparser_model.py @@ -124,6 +124,15 @@ class ExtendedTreeLoop(model.TreeLoop): + def as_json(self): + return { + 'comment': self.comment, + 'name': self.name, + 'operations': [op.as_json() for op in self.operations], + 'inputargs': self.inputargs, + 'last_offset': self.last_offset + } + def getboxes(self): def opboxes(operations): for op in operations: diff --git a/rpython/tool/jitlogparser/logparser2json.py b/rpython/tool/jitlogparser/logparser2json.py new file mode 100755 --- /dev/null +++ b/rpython/tool/jitlogparser/logparser2json.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python +""" Convert logfile (from jit-log-opt and jit-backend) to json format. +Usage: + +logparser2json.py +""" + +import os +import sys +import json +from rpython.tool.jitlogparser.parser import import_log, parse_log_counts +from rpython.tool.logparser import extract_category +from rpython.tool.jitlogparser.storage import LoopStorage + +def mangle_descr(descr): + if descr.startswith('TargetToken('): + return descr[len('TargetToken('):-1] + if descr.startswith(' Author: Sebastian Pawluś Branch: Changeset: r72454:674383905846 Date: 2014-07-16 12:11 +0200 http://bitbucket.org/pypy/pypy/changeset/674383905846/ Log: logparser2json indentation diff --git a/rpython/tool/jitlogparser/logparser2json.py b/rpython/tool/jitlogparser/logparser2json.py --- a/rpython/tool/jitlogparser/logparser2json.py +++ b/rpython/tool/jitlogparser/logparser2json.py @@ -35,7 +35,7 @@ if not loop.descr.startswith('bridge')] storage.loop_dict = create_loop_dict(loops) json.dump([loop.force_asm().as_json() for loop in storage.loops], - open(outfilename, "w")) + open(outfilename, "w"), indent=4) if __name__ == '__main__': if len(sys.argv) != 3: From noreply at buildbot.pypy.org Wed Jul 16 16:16:53 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 16 Jul 2014 16:16:53 +0200 (CEST) Subject: [pypy-commit] pypy default: Move the class declaration outside the function. There is no Message-ID: <20140716141653.3B1911C0091@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72455:ace3f0199007 Date: 2014-07-16 16:16 +0200 http://bitbucket.org/pypy/pypy/changeset/ace3f0199007/ Log: Move the class declaration outside the function. There is no reason to have a local class: it doesn't depend on anything. This fixes issue #1818. diff --git a/lib-python/2.7/xml/sax/saxutils.py b/lib-python/2.7/xml/sax/saxutils.py --- a/lib-python/2.7/xml/sax/saxutils.py +++ b/lib-python/2.7/xml/sax/saxutils.py @@ -98,13 +98,14 @@ except AttributeError: pass # wrap a binary writer with TextIOWrapper - class UnbufferedTextIOWrapper(io.TextIOWrapper): - def write(self, s): - super(UnbufferedTextIOWrapper, self).write(s) - self.flush() - return UnbufferedTextIOWrapper(buffer, encoding=encoding, + return _UnbufferedTextIOWrapper(buffer, encoding=encoding, errors='xmlcharrefreplace', newline='\n') +# PyPy: moved this class outside the function above +class _UnbufferedTextIOWrapper(io.TextIOWrapper): + def write(self, s): + super(_UnbufferedTextIOWrapper, self).write(s) + self.flush() class XMLGenerator(handler.ContentHandler): From noreply at buildbot.pypy.org Wed Jul 16 17:13:46 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 16 Jul 2014 17:13:46 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Update Message-ID: <20140716151346.790201D2856@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5356:f11577c01209 Date: 2014-07-16 17:13 +0200 http://bitbucket.org/pypy/extradoc/changeset/f11577c01209/ Log: Update diff --git a/talk/ep2014/stm/talk.rst b/talk/ep2014/stm/talk.rst --- a/talk/ep2014/stm/talk.rst +++ b/talk/ep2014/stm/talk.rst @@ -45,13 +45,15 @@ for C: simplify your life for some problems, with a reasonable performance cost -- the problems are: +- this is about anything where the GIL is a blocker, obviously - - anything where the GIL is a blocker, obviously +- but also any program with "often-parallelizable" sections of + code - - but also any program with "often-parallelizable" - sections of code - +- there are actually a lot of such programs around when we think about + it: Bottle, Twisted, Tornado, etc. serve multiple clients; many + CPU-consuming programs at some point need to iterate over some + large-ish data structure in some mostly-composable way; etc. =========================================== @@ -99,11 +101,3 @@ - picture with nursery -- the GC can use the same write barrier - - -=========================================== -Part 3 - Multithreading Revisited -=========================================== - - -- From noreply at buildbot.pypy.org Wed Jul 16 20:08:28 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 16 Jul 2014 20:08:28 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Now I'm finally progressing :-) Thanks Remi for the blueprint of the ICOOOLPS slides Message-ID: <20140716180828.C7E4F1D284F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5357:af502143bac9 Date: 2014-07-16 20:08 +0200 http://bitbucket.org/pypy/extradoc/changeset/af502143bac9/ Log: Now I'm finally progressing :-) Thanks Remi for the blueprint of the ICOOOLPS slides diff --git a/talk/ep2014/stm/Makefile b/talk/ep2014/stm/Makefile new file mode 100644 --- /dev/null +++ b/talk/ep2014/stm/Makefile @@ -0,0 +1,2 @@ +talk.html: talk.rst stylesheet.css + rst2s5 talk.rst --stylesheet-path=stylesheet.css > talk.html diff --git a/talk/ep2014/stm/stylesheet.css b/talk/ep2014/stm/stylesheet.css new file mode 100644 --- /dev/null +++ b/talk/ep2014/stm/stylesheet.css @@ -0,0 +1,330 @@ +/* +:Author: David Goodger (goodger at python.org) +:Id: $Id: html4css1.css 7614 2013-02-21 15:55:51Z milde $ +:Copyright: This stylesheet has been placed in the public domain. + +Default cascading style sheet for the HTML output of Docutils. + +See http://docutils.sf.net/docs/howto/html-stylesheets.html for how to +customize this style sheet. +*/ + +/* used to remove borders from tables and images */ +.borderless, table.borderless td, table.borderless th { + border: 0 } + +table.borderless td, table.borderless th { + /* Override padding for "table.docutils td" with "! important". + The right padding separates the table cells. */ + padding: 0 0.5em 0 0 ! important } + +.first { + /* Override more specific margin styles with "! important". */ + margin-top: 0 ! important } + +.last, .with-subtitle { + margin-bottom: 0 ! important } + +.hidden { + display: none } + +a.toc-backref { + text-decoration: none ; + color: black } + +blockquote.epigraph { + margin: 2em 5em ; } + +dl.docutils dd { + margin-bottom: 0.5em } + +object[type="image/svg+xml"], object[type="application/x-shockwave-flash"] { + overflow: hidden; +} + +/* Uncomment (and remove this text!) to get bold-faced definition list terms +dl.docutils dt { + font-weight: bold } +*/ + +div.abstract { + margin: 2em 5em } + +div.abstract p.topic-title { + font-weight: bold ; + text-align: center } + +div.admonition, div.attention, div.caution, div.danger, div.error, +div.hint, div.important, div.note, div.tip, div.warning { + margin: 2em ; + border: medium outset ; + padding: 1em } + +div.admonition p.admonition-title, div.hint p.admonition-title, +div.important p.admonition-title, div.note p.admonition-title, +div.tip p.admonition-title { + font-weight: bold ; + font-family: sans-serif } + +div.attention p.admonition-title, div.caution p.admonition-title, +div.danger p.admonition-title, div.error p.admonition-title, +div.warning p.admonition-title, .code .error { + color: red ; + font-weight: bold ; + font-family: sans-serif } + +/* Uncomment (and remove this text!) to get reduced vertical space in + compound paragraphs. +div.compound .compound-first, div.compound .compound-middle { + margin-bottom: 0.5em } + +div.compound .compound-last, div.compound .compound-middle { + margin-top: 0.5em } +*/ + +div.dedication { + margin: 2em 5em ; + text-align: center ; + font-style: italic } + +div.dedication p.topic-title { + font-weight: bold ; + font-style: normal } + +div.figure { + margin-left: 2em ; + margin-right: 2em } + +div.footer, div.header { + clear: both; + font-size: smaller } + +div.line-block { + display: block ; + margin-top: 1em ; + margin-bottom: 1em } + +div.line-block div.line-block { + margin-top: 0 ; + margin-bottom: 0 ; + margin-left: 1.5em } + +div.sidebar { + margin: 0 0 0.5em 1em ; + border: medium outset ; + padding: 1em ; + background-color: #ffffee ; + width: 40% ; + float: right ; + clear: right } + +div.sidebar p.rubric { + font-family: sans-serif ; + font-size: medium } + +div.system-messages { + margin: 5em } + +div.system-messages h1 { + color: red } + +div.system-message { + border: medium outset ; + padding: 1em } + +div.system-message p.system-message-title { + color: red ; + font-weight: bold } + +div.topic { + margin: 2em } + +h1.section-subtitle, h2.section-subtitle, h3.section-subtitle, +h4.section-subtitle, h5.section-subtitle, h6.section-subtitle { + margin-top: 0.4em } + +h1.title { + text-align: center } + +h2.subtitle { + text-align: center } + +hr.docutils { + width: 75% } + +img.align-left, .figure.align-left, object.align-left { + clear: left ; + float: left ; + margin-right: 1em } + +img.align-right, .figure.align-right, object.align-right { + clear: right ; + float: right ; + margin-left: 1em } + +img.align-center, .figure.align-center, object.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +.align-left { + text-align: left } + +.align-center { + clear: both ; + text-align: center } + +.align-right { + text-align: right } + +/* reset inner alignment in figures */ +div.align-right { + text-align: inherit } + +/* div.align-center * { */ +/* text-align: left } */ + +ol.simple, ul.simple { + margin-bottom: 1em } + +ol.arabic { + list-style: decimal } + +ol.loweralpha { + list-style: lower-alpha } + +ol.upperalpha { + list-style: upper-alpha } + +ol.lowerroman { + list-style: lower-roman } + +ol.upperroman { + list-style: upper-roman } + +p.attribution { + text-align: right ; + margin-left: 50% } + +p.caption { + font-style: italic } + +p.credits { + font-style: italic ; + font-size: smaller } + +p.label { + white-space: nowrap } + +p.rubric { + font-weight: bold ; + font-size: larger ; + color: maroon ; + text-align: center } + +p.sidebar-title { + font-family: sans-serif ; + font-weight: bold ; + font-size: larger } + +p.sidebar-subtitle { + font-family: sans-serif ; + font-weight: bold } + +p.topic-title { + font-weight: bold } + +pre.address { + margin-bottom: 0 ; + margin-top: 0 ; + font: inherit } + +pre.literal-block, pre.doctest-block, pre.math, pre.code { + margin-left: 2em ; + margin-right: 2em } + +pre.code .ln { color: grey; } /* line numbers */ +pre.code, code { background-color: #eeeeee } +pre.code .comment, code .comment { color: #5C6576 } +pre.code .keyword, code .keyword { color: #3B0D06; font-weight: bold } +pre.code .literal.string, code .literal.string { color: #0C5404 } +pre.code .name.builtin, code .name.builtin { color: #352B84 } +pre.code .deleted, code .deleted { background-color: #DEB0A1} +pre.code .inserted, code .inserted { background-color: #A3D289} + +span.classifier { + font-family: sans-serif ; + font-style: oblique } + +span.classifier-delimiter { + font-family: sans-serif ; + font-weight: bold } + +span.interpreted { + font-family: sans-serif } + +span.option { + white-space: nowrap } + +span.pre { + white-space: pre } + +span.problematic { + color: red } + +span.section-subtitle { + /* font-size relative to parent (h1..h6 element) */ + font-size: 80% } + +table.citation { + border-left: solid 1px gray; + margin-left: 1px } + +table.docinfo { + margin: 2em 4em } + +table.docutils { + margin-top: 0.5em ; + margin-bottom: 0.5em } + +table.footnote { + border-left: solid 1px black; + margin-left: 1px } + +table.docutils td, table.docutils th, +table.docinfo td, table.docinfo th { + padding-left: 0.5em ; + padding-right: 0.5em ; + vertical-align: top } + +table.docutils th.field-name, table.docinfo th.docinfo-name { + font-weight: bold ; + text-align: left ; + white-space: nowrap ; + padding-left: 0 } + +/* "booktabs" style (no vertical lines) */ +table.docutils.booktabs { + border: 0px; + border-top: 2px solid; + border-bottom: 2px solid; + border-collapse: collapse; +} +table.docutils.booktabs * { + border: 0px; +} +table.docutils.booktabs th { + border-bottom: thin solid; + text-align: left; +} + +h1 tt.docutils, h2 tt.docutils, h3 tt.docutils, +h4 tt.docutils, h5 tt.docutils, h6 tt.docutils { + font-size: 100% } + +ul.auto-toc { + list-style-type: none } + +.slide li { + padding-top: 0.2em } diff --git a/talk/ep2014/stm/talk.html b/talk/ep2014/stm/talk.html new file mode 100644 --- /dev/null +++ b/talk/ep2014/stm/talk.html @@ -0,0 +1,577 @@ + + + + + + + +Using All These Cores: Transactional Memory in PyPy + + + + + + + + + + + + + + +
        +
        +
        + + +
        +
        +
        +

        Using All These Cores: Transactional Memory in PyPy

        + +

        Armin Rigo - EuroPython 2014

        +
        +
        +
        +

        Part 1 - Intro and Current Status

        +
        +
        +

        Why is there a GIL?

        +
          +
        • GIL = Global Interpreter Lock
        • +
        • initially: CPython was single threaded
        • +
        • for concurrency (but not parallelism):
            +
          • provide concurrently running threads
          • +
          +
        • +
        • easiest way to retrofit into interpreter:
            +
          • acquire GIL around the execution of bytecode instructions
          • +
          • easy for refcounting, too
          • +
          +
        • +
        +
        +
        +

        Consequences (+)

        +
          +
        • atomic & isolated instructions:
            +
          • things like list.append() are atomic
          • +
          • tons of websites mention this
          • +
          • latent races if Python becomes really parallel
          • +
          +
        • +
        • sequential consistency:
            +
          • less surprises; "all variables volatile"
          • +
          +
        • +
        +
        +
        +

        Consequences (-)

        +
          +
        • obviously, no parallelism
        • +
        • GIL not available to application:
            +
          • all difficulties of concurrency still there
          • +
          • need application-level locking
          • +
          +
        • +
        +
        +
        +

        Removing the GIL

        +
          +
          1. +
          2. Fine-grained locking
          3. +
          +
        • +
          1. +
          2. Shared-nothing
          3. +
          +
        • +
          1. +
          2. Transactional memory
          3. +
          +
        • +
        +
        +
        +

        Fine-grained locking

        +
          +
        • replace GIL with locks on objs / data structures
        • +
        • accessing different objects can run in parallel
        • +
        • harder to implement:
            +
          • many locks -> deadlock risks
          • +
          • refcounting issue
          • +
          +
        • +
        • overhead of lock/unlock on objs:
            +
          • Jython depends on JVM for good lock removal
          • +
          +
        • +
        • still need application-level locking
        • +
        +
        +
        +

        Shared-nothing

        +
          +
        • each independent part of the program gets its own interpreter
        • +
        • simple implementation
        • +
        • gives workaround instead of direct replacement
        • +
        • not compatible to existing threaded applications, a priori
        • +
        • explicit communication:
            +
          • good: clean model, no locks
          • +
          • bad: limitations, overhead
          • +
          +
        • +
        +
        +
        +

        Transactional Memory

        +
          +
        • like GIL, but instead of locking, each thread runs optimistically
        • +
        • "easy" to implement:
            +
          • GIL acquire -> transaction start
          • +
          • GIL release -> transaction commit
          • +
          +
        • +
        • overhead: cross-checking conflicting memory reads and writes, +and if necessary, cancel and restart transactions
        • +
        • HTM, STM, or some hybrids exist:
            +
          • but mostly still research-only
          • +
          +
        • +
        +
        +
        +

        Big Point

        +
          +
        • application-level locks still needed...
        • +
        • but can be very coarse:
            +
          • the idea is to make sure, internally, that one transaction +covers the whole time during which the lock was acquired
          • +
          +
        • +
        +
        +
        +

        Demo 1

        +
          +
        • Bottle web server
        • +
        +
        +
        +

        PyPy-STM

        +
          +
        • implementation of a specially-tailored STM:
            +
          • a reusable C library
          • +
          • called STMGC-C7
          • +
          +
        • +
        • used in PyPy to replace the GIL
        • +
        • current status:
            +
          • basics work
          • +
          • tons of things to improve
          • +
          • tons of things to improve
          • +
          • tons of things to improve
          • +
          • tons of things to improve
          • +
          • tons of things to improve
          • +
          • tons of things to improve
          • +
          +
        • +
        +
        +
        +

        Demo 2

        +
          +
        • counting primes
        • +
        +
        +
        +

        Summary

        +
          +
        • Transactional Memory is still too researchy for production
        • +
        • Has the potential to enable parallelism:
            +
          • as a replacement of multiprocessing
          • +
          • but also in existing applications not written for that
          • +
          • as long as they do multiple things that are "often independent"
          • +
          +
        • +
        +
        +
        +

        Part 2 - Under The Hood

        +
          +
        • pictures "GIL" and "no GIL"
        • +
        • zoom with reads and writes
        • +
        • keep boundaries, each block is a _transaction_
        • +
        • completely the same semantics as when run with a GIL
        • +
        • write-write conflict
        • +
        • deadlock detection and resolution by abort-retry
        • +
        • read-write conflict: avoids (1) crashes, +(2) reads-from-the-past, (3) reads-from-the-future
        • +
        • reads are more common than writes: optimize read barriers
        • +
        • pypy-stm: write a thread-local flag "this object has been read", +show code for read barrier and fast-path of write barrier; +note about using the C library for CPython too
        • +
        • reads are not synchronized at all between CPUs, but it's wrong +to read data written by other in-progress transactions; +so we have to write elsewhere
        • +
        • but what if we read later an object we modified? doing any kind +of check in the read barrier makes it much more costly
        • +
        • a solution would be to give each thread its own "segment" of +memory, and copy data between them only at known points
        • +
        • mmap trick: we do that, but we use mmap sharing to view the same +pages of memory at several addresses in memory
        • +
        • show clang source code and assembler for %gs
        • +
        • picture with 15/16 objects, 1/16 read markers, one page control data
        • +
        • picture with nursery -- the GC can use the same write barrier
        • +
        +
        +
        + + diff --git a/talk/ep2014/stm/talk.rst b/talk/ep2014/stm/talk.rst --- a/talk/ep2014/stm/talk.rst +++ b/talk/ep2014/stm/talk.rst @@ -2,63 +2,190 @@ Using All These Cores: Transactional Memory in PyPy ------------------------------------------------------------------------------ +.. raw:: html -=========================================== +
        + +**Armin Rigo - EuroPython 2014** + +.. raw:: html + +
        + + Part 1 - Intro and Current Status -=========================================== +--------------------------------- -- stm/demo/ -- stm/bottle/ +Why is there a GIL? +------------------- -- transaction module; multiprocessing-like Pool(); etc. +* GIL = Global Interpreter Lock -- a large demo of some well-known program where - it solves everything?... there is no such thing - because the large program's author have already - solved it +* initially: CPython was single threaded -- compare with garbage collection in C: +* for concurrency (but not parallelism): - - usually you do it with malloc()/free() + - provide concurrently running threads - - sometimes you need more control, and e.g. you add - some reference counts +* easiest way to retrofit into interpreter: - - sometimes you use more specialized versions for - performance, e.g. allocate in a pool and throw it - completely away at the end of some phase + - acquire GIL around the execution of bytecode instructions - - Boehm GC, a GC for C: what kind of demo can you - present for it? You take a C program, remove all - free(), relink malloc() to Boehm, and it works - more slowly... + - easy for refcounting, too - - nevertheless, GCC did exactly that. Why? -- so, the GIL: we already have different workarounds for - different kinds of problems (use "multiprocessing"; or - start N processes and have them communicate in one - way or another) +Consequences (+) +---------------- -- this talk is about the GIL's equivalent of the Boehm GC - for C: simplify your life for some problems, with a - reasonable performance cost +* atomic & isolated instructions: -- this is about anything where the GIL is a blocker, obviously + - things like ``list.append()`` are atomic + - tons of websites mention this + - latent races if Python becomes really parallel -- but also any program with "often-parallelizable" sections of - code +* sequential consistency: -- there are actually a lot of such programs around when we think about - it: Bottle, Twisted, Tornado, etc. serve multiple clients; many - CPU-consuming programs at some point need to iterate over some - large-ish data structure in some mostly-composable way; etc. + - less surprises; "all variables volatile" -=========================================== +Consequences (-) +---------------- + +* obviously, no parallelism + +* GIL not available to application: + + - all difficulties of concurrency still there + - need application-level locking + + +Removing the GIL +---------------- + +* 1. Fine-grained locking + +* 2. Shared-nothing + +* 3. Transactional memory + + +Fine-grained locking +-------------------- + +* replace GIL with locks on objs / data structures + +* accessing different objects can run in parallel + +* harder to implement: + + - many locks -> deadlock risks + - refcounting issue + +* overhead of lock/unlock on objs: + + - Jython depends on JVM for good lock removal + +* still need application-level locking + + +Shared-nothing +-------------- + +* each independent part of the program gets its own interpreter + +* simple implementation + +* gives workaround instead of direct replacement + +* not compatible to existing threaded applications, a priori + +* explicit communication: + + - good: clean model, no locks + - bad: limitations, overhead + + +Transactional Memory +-------------------- + +* like GIL, but instead of locking, each thread runs optimistically + +* "easy" to implement: + + - GIL acquire -> transaction start + + - GIL release -> transaction commit + +* overhead: cross-checking conflicting memory reads and writes, + and if necessary, cancel and restart transactions + +* HTM, STM, or some hybrids exist: + + - but mostly still research-only + + +Big Point +---------------------------- + +* application-level locks still needed... + +* but *can be very coarse:* + + - the idea is to make sure, internally, that one transaction + covers the whole time during which the lock was acquired + + +Demo 1 +------ + +* Bottle web server + + +PyPy-STM +-------- + +* implementation of a specially-tailored STM: + + - a reusable C library + - called STMGC-C7 + +* used in PyPy to replace the GIL + +* current status: + + - basics work + - tons of things to improve + - tons of things to improve + - tons of things to improve + - tons of things to improve + - tons of things to improve + - tons of things to improve + + +Demo 2 +------ + +* counting primes + + +Summary +------- + +* Transactional Memory is still too researchy for production + +* Has the potential to enable parallelism: + + - as a replacement of ``multiprocessing`` + + - but also in existing applications not written for that + + - as long as they do multiple things that are "often independent" + + + Part 2 - Under The Hood -=========================================== +----------------------- - pictures "GIL" and "no GIL" diff --git a/talk/ep2014/stm/ui b/talk/ep2014/stm/ui new file mode 120000 --- /dev/null +++ b/talk/ep2014/stm/ui @@ -0,0 +1,1 @@ +../../stanford-ee380-2011/ui \ No newline at end of file From noreply at buildbot.pypy.org Wed Jul 16 20:30:03 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 16 Jul 2014 20:30:03 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: updates Message-ID: <20140716183003.056CB1D2857@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5358:8d75833923b3 Date: 2014-07-16 20:27 +0200 http://bitbucket.org/pypy/extradoc/changeset/8d75833923b3/ Log: updates diff --git a/talk/ep2014/stm/stylesheet.css b/talk/ep2014/stm/stylesheet.css --- a/talk/ep2014/stm/stylesheet.css +++ b/talk/ep2014/stm/stylesheet.css @@ -99,6 +99,9 @@ clear: both; font-size: smaller } +div#header { + height: 88px ! important } + div.line-block { display: block ; margin-top: 1em ; diff --git a/talk/ep2014/stm/talk.html b/talk/ep2014/stm/talk.html --- a/talk/ep2014/stm/talk.html +++ b/talk/ep2014/stm/talk.html @@ -109,6 +109,9 @@ clear: both; font-size: smaller } +div#header { + height: 88px ! important } + div.line-block { display: block ; margin-top: 1em ; @@ -513,6 +516,15 @@
      • used in PyPy to replace the GIL
      • +
      • could also be used in CPython
          +
        • but refcounting needs replacing
        • +
        +
      • + + +
        +

        PyPy-STM status

        +
        • current status:
          • basics work
          • tons of things to improve
          • @@ -521,6 +533,7 @@
          • tons of things to improve
          • tons of things to improve
          • tons of things to improve
          • +
          • tons of things to improve
        @@ -535,7 +548,7 @@

        Summary

        • Transactional Memory is still too researchy for production
        • -
        • Has the potential to enable parallelism:
            +
          • Potential to enable parallelism:
            • as a replacement of multiprocessing
            • but also in existing applications not written for that
            • as long as they do multiple things that are "often independent"
            • diff --git a/talk/ep2014/stm/talk.rst b/talk/ep2014/stm/talk.rst --- a/talk/ep2014/stm/talk.rst +++ b/talk/ep2014/stm/talk.rst @@ -152,6 +152,14 @@ * used in PyPy to replace the GIL +* could also be used in CPython + + - but refcounting needs replacing + + +PyPy-STM status +--------------- + * current status: - basics work @@ -161,6 +169,7 @@ - tons of things to improve - tons of things to improve - tons of things to improve + - tons of things to improve Demo 2 @@ -174,7 +183,7 @@ * Transactional Memory is still too researchy for production -* Has the potential to enable parallelism: +* Potential to enable parallelism: - as a replacement of ``multiprocessing`` From noreply at buildbot.pypy.org Wed Jul 16 20:30:04 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 16 Jul 2014 20:30:04 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Part 2 might become an interactive session instead of slides Message-ID: <20140716183004.47B331D2857@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5359:a3b54af8a1bd Date: 2014-07-16 20:29 +0200 http://bitbucket.org/pypy/extradoc/changeset/a3b54af8a1bd/ Log: Part 2 might become an interactive session instead of slides diff --git a/talk/ep2014/stm/talk.html b/talk/ep2014/stm/talk.html --- a/talk/ep2014/stm/talk.html +++ b/talk/ep2014/stm/talk.html @@ -558,32 +558,7 @@

        Part 2 - Under The Hood

        -
          -
        • pictures "GIL" and "no GIL"
        • -
        • zoom with reads and writes
        • -
        • keep boundaries, each block is a _transaction_
        • -
        • completely the same semantics as when run with a GIL
        • -
        • write-write conflict
        • -
        • deadlock detection and resolution by abort-retry
        • -
        • read-write conflict: avoids (1) crashes, -(2) reads-from-the-past, (3) reads-from-the-future
        • -
        • reads are more common than writes: optimize read barriers
        • -
        • pypy-stm: write a thread-local flag "this object has been read", -show code for read barrier and fast-path of write barrier; -note about using the C library for CPython too
        • -
        • reads are not synchronized at all between CPUs, but it's wrong -to read data written by other in-progress transactions; -so we have to write elsewhere
        • -
        • but what if we read later an object we modified? doing any kind -of check in the read barrier makes it much more costly
        • -
        • a solution would be to give each thread its own "segment" of -memory, and copy data between them only at known points
        • -
        • mmap trick: we do that, but we use mmap sharing to view the same -pages of memory at several addresses in memory
        • -
        • show clang source code and assembler for %gs
        • -
        • picture with 15/16 objects, 1/16 read markers, one page control data
        • -
        • picture with nursery -- the GC can use the same write barrier
        • -
        +

        STMGC-C7

        diff --git a/talk/ep2014/stm/talk.rst b/talk/ep2014/stm/talk.rst --- a/talk/ep2014/stm/talk.rst +++ b/talk/ep2014/stm/talk.rst @@ -196,44 +196,4 @@ Part 2 - Under The Hood ----------------------- - -- pictures "GIL" and "no GIL" - -- zoom with reads and writes - -- keep boundaries, each block is a _transaction_ - -- completely the same semantics as when run with a GIL - -- write-write conflict - -- deadlock detection and resolution by abort-retry - -- read-write conflict: avoids (1) crashes, - (2) reads-from-the-past, (3) reads-from-the-future - -- reads are more common than writes: optimize read barriers - -- pypy-stm: write a thread-local flag "this object has been read", - show code for read barrier and fast-path of write barrier; - note about using the C library for CPython too - -- reads are not synchronized at all between CPUs, but it's wrong - to read data written by other in-progress transactions; - so we have to write elsewhere - -- but what if we read later an object we modified? doing any kind - of check in the read barrier makes it much more costly - -- a solution would be to give each thread its own "segment" of - memory, and copy data between them only at known points - -- mmap trick: we do that, but we use mmap sharing to view the same - pages of memory at several addresses in memory - -- show clang source code and assembler for %gs - -- picture with 15/16 objects, 1/16 read markers, one page control data - -- picture with nursery -- the GC can use the same write barrier - +**STMGC-C7** From noreply at buildbot.pypy.org Wed Jul 16 20:49:31 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 16 Jul 2014 20:49:31 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Add a figure. Add the demo directory. Message-ID: <20140716184931.90A511D2857@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5360:b6f43bff4d5a Date: 2014-07-16 20:49 +0200 http://bitbucket.org/pypy/extradoc/changeset/b6f43bff4d5a/ Log: Add a figure. Add the demo directory. diff --git a/talk/ep2014/stm/demo/bench-multiprocessing.py b/talk/ep2014/stm/demo/bench-multiprocessing.py new file mode 100644 --- /dev/null +++ b/talk/ep2014/stm/demo/bench-multiprocessing.py @@ -0,0 +1,15 @@ +from pyprimes import isprime +from multiprocessing import Pool + +def process(nstart): + subtotal = 0 + for n in xrange(nstart, nstart + 20000): + if isprime(n): + subtotal += 1 + return subtotal + +pool = Pool(4) +results = pool.map(process, xrange(0, 5000000, 20000)) +total = sum(results) + +print total diff --git a/talk/ep2014/stm/demo/bench-multithread.py b/talk/ep2014/stm/demo/bench-multithread.py new file mode 100644 --- /dev/null +++ b/talk/ep2014/stm/demo/bench-multithread.py @@ -0,0 +1,26 @@ +from pyprimes import isprime +from Queue import Queue, Empty + +subtotals = Queue() +queued = Queue() +nstarts = xrange(0, 5000000, 20000) +for nstart in nstarts: + queued.put(nstart) + +def f(): + while True: + nstart = queued.get() + subtotal = 0 + for n in xrange(nstart, nstart + 20000): + if isprime(n): + subtotal += 1 + subtotals.put(subtotal) + +import thread +for j in range(2): + thread.start_new_thread(f, ()) + +total = 0 +for n in nstarts: + total += subtotals.get() +print total diff --git a/talk/ep2014/stm/demo/bench-simple.py b/talk/ep2014/stm/demo/bench-simple.py new file mode 100644 --- /dev/null +++ b/talk/ep2014/stm/demo/bench-simple.py @@ -0,0 +1,8 @@ +from pyprimes import isprime + +total = 0 +for n in xrange(5000000): + if isprime(n): + total += 1 + +print total diff --git a/talk/ep2014/stm/demo/bench-stm.py b/talk/ep2014/stm/demo/bench-stm.py new file mode 100644 --- /dev/null +++ b/talk/ep2014/stm/demo/bench-stm.py @@ -0,0 +1,28 @@ +from pyprimes import isprime +from Queue import Queue, Empty +from __pypy__.thread import atomic + +subtotals = Queue() +queued = Queue() +nstarts = xrange(0, 5000000, 20000) +for nstart in nstarts: + queued.put(nstart) + +def f(): + while True: + nstart = queued.get() + subtotal = 0 + for n in xrange(nstart, nstart + 20000): + with atomic: + if isprime(n): + subtotal += 1 + subtotals.put(subtotal) + +import thread +for j in range(2): + thread.start_new_thread(f, ()) + +total = 0 +for n in nstarts: + total += subtotals.get() +print total diff --git a/talk/ep2014/stm/demo/pyprimes.py b/talk/ep2014/stm/demo/pyprimes.py new file mode 100644 --- /dev/null +++ b/talk/ep2014/stm/demo/pyprimes.py @@ -0,0 +1,1193 @@ +#!/usr/bin/env python + +## Module pyprimes.py +## +## Copyright (c) 2012 Steven D'Aprano. +## +## Permission is hereby granted, free of charge, to any person obtaining +## a copy of this software and associated documentation files (the +## "Software"), to deal in the Software without restriction, including +## without limitation the rights to use, copy, modify, merge, publish, +## distribute, sublicense, and/or sell copies of the Software, and to +## permit persons to whom the Software is furnished to do so, subject to +## the following conditions: +## +## The above copyright notice and this permission notice shall be +## included in all copies or substantial portions of the Software. +## +## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +## EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +## MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +## IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +## CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +## TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +## SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +"""Generate and test for small primes using a variety of algorithms +implemented in pure Python. + +This module includes functions for generating prime numbers, primality +testing, and factorising numbers into prime factors. Prime numbers are +positive integers with no factors other than themselves and 1. + + +Generating prime numbers +======================== + +To generate an unending stream of prime numbers, use the ``primes()`` +generator function: + + primes(): + Yield prime numbers 2, 3, 5, 7, 11, ... + + + >>> p = primes() + >>> [next(p) for _ in range(10)] + [2, 3, 5, 7, 11, 13, 17, 19, 23, 29] + + +To efficiently generate pairs of (isprime(i), i) for integers i, use the +generator functions ``checked_ints()`` and ``checked_oddints()``: + + checked_ints() + Yield pairs of (isprime(i), i) for i=0,1,2,3,4,5... + + checked_oddints() + Yield pairs of (isprime(i), i) for odd i=1,3,5,7... + + + >>> it = checked_ints() + >>> [next(it) for _ in range(5)] + [(False, 0), (False, 1), (True, 2), (True, 3), (False, 4)] + + +Other convenience functions wrapping ``primes()`` are: + + ------------------ ---------------------------------------------------- + Function Description + ------------------ ---------------------------------------------------- + nprimes(n) Yield the first n primes, then stop. + nth_prime(n) Return the nth prime number. + prime_count(x) Return the number of primes less than or equal to x. + primes_below(x) Yield the primes less than or equal to x. + primes_above(x) Yield the primes strictly greater than x. + primesum(n) Return the sum of the first n primes. + primesums() Yield the partial sums of the prime numbers. + ------------------ ---------------------------------------------------- + + +Primality testing +================= + +These functions test whether numbers are prime or not. Primality tests fall +into two categories: exact tests, and probabilistic tests. + +Exact tests are guaranteed to give the correct result, but may be slow, +particularly for large arguments. Probabilistic tests do not guarantee +correctness, but may be much faster for large arguments. + +To test whether an integer is prime, use the ``isprime`` function: + + isprime(n) + Return True if n is prime, otherwise return False. + + + >>> isprime(101) + True + >>> isprime(102) + False + + +Exact primality tests are: + + isprime_naive(n) + Naive and slow trial division test for n being prime. + + isprime_division(n) + A less naive trial division test for n being prime. + + isprime_regex(n) + Uses a regex to test if n is a prime number. + + .. NOTE:: ``isprime_regex`` should be considered a novelty + rather than a serious test, as it is very slow. + + +Probabilistic tests do not guarantee correctness, but can be faster for +large arguments. There are two probabilistic tests: + + fermat(n [, base]) + Fermat primality test, returns True if n is a weak probable + prime to the given base, otherwise False. + + miller_rabin(n [, base]) + Miller-Rabin primality test, returns True if n is a strong + probable prime to the given base, otherwise False. + + +Both guarantee no false negatives: if either function returns False, the +number being tested is certainly composite. However, both are subject to false +positives: if they return True, the number is only possibly prime. + + + >>> fermat(12400013) # composite 23*443*1217 + False + >>> miller_rabin(14008971) # composite 3*947*4931 + False + + +Prime factorisation +=================== + +These functions return or yield the prime factors of an integer. + + factors(n) + Return a list of the prime factors of n. + + factorise(n) + Yield tuples (factor, count) for n. + + +The ``factors(n)`` function lists repeated factors: + + + >>> factors(37*37*109) + [37, 37, 109] + + +The ``factorise(n)`` generator yields a 2-tuple for each unique factor, giving +the factor itself and the number of times it is repeated: + + >>> list(factorise(37*37*109)) + [(37, 2), (109, 1)] + + +Alternative and toy prime number generators +=========================================== + +These functions are alternative methods of generating prime numbers. Unless +otherwise stated, they generate prime numbers lazily on demand. These are +supplied for educational purposes and are generally slower or less efficient +than the preferred ``primes()`` generator. + + -------------- -------------------------------------------------------- + Function Description + -------------- -------------------------------------------------------- + croft() Yield prime numbers using the Croft Spiral sieve. + erat(n) Return primes up to n by the sieve of Eratosthenes. + sieve() Yield primes using the sieve of Eratosthenes. + cookbook() Yield primes using "Python Cookbook" algorithm. + wheel() Yield primes by wheel factorization. + -------------- -------------------------------------------------------- + + .. TIP:: In the current implementation, the fastest of these + generators is aliased as ``primes()``. + + +""" + + +from __future__ import division + + +import functools +import itertools +import random + +from re import match as _re_match + + +# Module metadata. +__version__ = "0.1.2a" +__date__ = "2012-08-25" +__author__ = "Steven D'Aprano" +__author_email__ = "steve+python at pearwood.info" + +__all__ = ['primes', 'checked_ints', 'checked_oddints', 'nprimes', + 'primes_above', 'primes_below', 'nth_prime', 'prime_count', + 'primesum', 'primesums', 'warn_probably', 'isprime', 'factors', + 'factorise', + ] + + +# ============================ +# Python 2.x/3.x compatibility +# ============================ + +# This module should support 2.5+, including Python 3. + +try: + next +except NameError: + # No next() builtin, so we're probably running Python 2.5. + # Use a simplified version (without support for default). + def next(iterator): + return iterator.next() + +try: + range = xrange +except NameError: + # No xrange built-in, so we're almost certainly running Python3 + # and range is already a lazy iterator. + assert type(range(3)) is not list + +try: + from itertools import ifilter as filter, izip as zip +except ImportError: + # Python 3, where filter and zip are already lazy. + assert type(filter(None, [1, 2])) is not list + assert type(zip("ab", [1, 2])) is not list + +try: + from itertools import compress +except ImportError: + # Must be Python 2.x, so we need to roll our own. + def compress(data, selectors): + """compress('ABCDEF', [1,0,1,0,1,1]) --> A C E F""" + return (d for d, s in zip(data, selectors) if s) + +try: + from math import isfinite +except ImportError: + # Python 2.6 or older. + try: + from math import isnan, isinf + except ImportError: + # Python 2.5. Quick and dirty substitutes. + def isnan(x): + return x != x + def isinf(x): + return x - x != 0 + def isfinite(x): + return not (isnan(x) or isinf(x)) + + +# ===================== +# Helpers and utilities +# ===================== + +def _validate_int(obj): + """Raise an exception if obj is not an integer.""" + m = int(obj + 0) # May raise TypeError, or OverflowError. + if obj != m: + raise ValueError('expected an integer but got %r' % obj) + + +def _validate_num(obj): + """Raise an exception if obj is not a finite real number.""" + m = obj + 0 # May raise TypeError. + if not isfinite(m): + raise ValueError('expected a finite real number but got %r' % obj) + + +def _base_to_bases(base, n): + if isinstance(base, tuple): + bases = base + else: + bases = (base,) + for b in bases: + _validate_int(b) + if not 1 <= b < n: + # Note that b=1 is a degenerate case which is always a prime + # witness for both the Fermat and Miller-Rabin tests. I mention + # this for completeness, not because we need to do anything + # about it. + raise ValueError('base %d out of range 1...%d' % (b, n-1)) + return bases + + +# ======================= +# Prime number generators +# ======================= + +# The preferred generator to use is ``primes()``, which will be set to the +# "best" of these generators. (If you disagree with my judgement of best, +# feel free to use the generator of your choice.) + + +def erat(n): + """Return a list of primes up to and including n. + + This is a fixed-size version of the Sieve of Eratosthenes, using an + adaptation of the traditional algorithm. + + >>> erat(30) + [2, 3, 5, 7, 11, 13, 17, 19, 23, 29] + >>> erat(10000) == list(primes_below(10000)) + True + + """ + _validate_int(n) + # Generate a fixed array of integers. + arr = list(range(n+1)) # A list is faster than an array + # Cross out 0 and 1 since they aren't prime. + arr[0] = arr[1] = None + i = 2 + while i*i <= n: + # Cross out all the multiples of i starting from i**2. + for p in range(i*i, n+1, i): + arr[p] = None + # Advance to the next number not crossed off. + i += 1 + while i <= n and arr[i] is None: + i += 1 + return list(filter(None, arr)) + + +def sieve(): + """Yield prime integers using the Sieve of Eratosthenes. + + This algorithm is modified to generate the primes lazily rather than the + traditional version which operates on a fixed size array of integers. + """ + # This is based on a paper by Melissa E. O'Neill, with an implementation + # given by Gerald Britton: + # http://mail.python.org/pipermail/python-list/2009-January/1188529.html + innersieve = sieve() + prevsq = 1 + table = {} + i = 2 + while True: + # Note: this explicit test is slightly faster than using + # prime = table.pop(i, None) and testing for None. + if i in table: + prime = table[i] + del table[i] + nxt = i + prime + while nxt in table: + nxt += prime + table[nxt] = prime + else: + yield i + if i > prevsq: + j = next(innersieve) + prevsq = j**2 + table[prevsq] = j + i += 1 + + +def cookbook(): + """Yield prime integers lazily using the Sieve of Eratosthenes. + + Another version of the algorithm, based on the Python Cookbook, + 2nd Edition, recipe 18.10, variant erat2. + """ + # http://onlamp.com/pub/a/python/excerpt/pythonckbk_chap1/index1.html?page=2 + table = {} + yield 2 + # Iterate over [3, 5, 7, 9, ...]. The following is equivalent to, but + # faster than, (2*i+1 for i in itertools.count(1)) + for q in itertools.islice(itertools.count(3), 0, None, 2): + # Note: this explicit test is marginally faster than using + # table.pop(i, None) and testing for None. + if q in table: + p = table[q]; del table[q] # Faster than pop. + x = p + q + while x in table or not (x & 1): + x += p + table[x] = p + else: + table[q*q] = q + yield q + + +def croft(): + """Yield prime integers using the Croft Spiral sieve. + + This is a variant of wheel factorisation modulo 30. + """ + # Implementation is based on erat3 from here: + # http://stackoverflow.com/q/2211990 + # and this website: + # http://www.primesdemystified.com/ + # Memory usage increases roughly linearly with the number of primes seen. + # dict ``roots`` stores an entry x:p for every prime p. + for p in (2, 3, 5): + yield p + roots = {9: 3, 25: 5} # Map d**2 -> d. + primeroots = frozenset((1, 7, 11, 13, 17, 19, 23, 29)) + selectors = (1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0) + for q in compress( + # Iterate over prime candidates 7, 9, 11, 13, ... + itertools.islice(itertools.count(7), 0, None, 2), + # Mask out those that can't possibly be prime. + itertools.cycle(selectors) + ): + # Using dict membership testing instead of pop gives a + # 5-10% speedup over the first three million primes. + if q in roots: + p = roots[q] + del roots[q] + x = q + 2*p + while x in roots or (x % 30) not in primeroots: + x += 2*p + roots[x] = p + else: + roots[q*q] = q + yield q + + +def wheel(): + """Generate prime numbers using wheel factorisation modulo 210.""" + for i in (2, 3, 5, 7, 11): + yield i + # The following constants are taken from the paper by O'Neill. + spokes = (2, 4, 2, 4, 6, 2, 6, 4, 2, 4, 6, 6, 2, 6, 4, 2, 6, 4, 6, + 8, 4, 2, 4, 2, 4, 8, 6, 4, 6, 2, 4, 6, 2, 6, 6, 4, 2, 4, 6, 2, + 6, 4, 2, 4, 2, 10, 2, 10) + assert len(spokes) == 48 + # This removes about 77% of the composites that we would otherwise + # need to divide by. + found = [(11, 121)] # Smallest prime we care about, and its square. + for incr in itertools.cycle(spokes): + i += incr + for p, p2 in found: + if p2 > i: # i must be a prime. + found.append((i, i*i)) + yield i + break + elif i % p == 0: # i must be composite. + break + else: # This should never happen. + raise RuntimeError("internal error: ran out of prime divisors") + + +# This is the preferred way of generating prime numbers. Set this to the +# fastest/best generator. +primes = croft + + +# === Algorithms to avoid === + +class Awful: + """Awful and naive prime functions namespace. + + A collection of prime-related algorithms which are supplied for + educational purposes, as toys, curios, or as terrible warnings on + what **not** to do. + + None of these methods have acceptable performance; they are barely + tolerable even for the first 100 primes. + """ + + # === Prime number generators === + + @staticmethod + def naive_primes1(): + """Generate prime numbers naively, and REALLY slowly. + + >>> p = Awful.naive_primes1() + >>> [next(p) for _ in range(10)] + [2, 3, 5, 7, 11, 13, 17, 19, 23, 29] + + This is about as awful as a straight-forward algorithm to generate + primes can get without deliberate pessimation. This algorithm does + not make even the most trivial optimizations: + + - it tests all numbers as potential primes, whether odd or even, + instead of skipping even numbers apart from 2; + - it checks for primality by dividing against every number less + than the candidate prime itself, instead of stopping at the + square root of the candidate; + - it fails to bail out early when it finds a factor, instead + pointlessly keeps testing. + + The result is that this is horribly slow. + """ + i = 2 + yield i + while True: + i += 1 + composite = False + for p in range(2, i): + if i%p == 0: + composite = True + if not composite: # It must be a prime. + yield i + + @staticmethod + def naive_primes2(): + """Generate prime numbers naively, and very slowly. + + >>> p = Awful.naive_primes2() + >>> [next(p) for _ in range(10)] + [2, 3, 5, 7, 11, 13, 17, 19, 23, 29] + + This is a little better than ``naive_primes1``, but still horribly + slow. It makes a single optimization by using a short-circuit test + for primality testing: as soon as a factor is found, the candidate + is rejected immediately. + """ + i = 2 + yield i + while True: + i += 1 + if all(i%p != 0 for p in range(2, i)): + yield i + + @staticmethod + def naive_primes3(): + """Generate prime numbers naively, and very slowly. + + >>> p = Awful.naive_primes3() + >>> [next(p) for _ in range(10)] + [2, 3, 5, 7, 11, 13, 17, 19, 23, 29] + + This is an incremental improvement over ``naive_primes2`` by only + testing odd numbers as potential primes and factors. + """ + yield 2 + i = 3 + yield i + while True: + i += 2 + if all(i%p != 0 for p in range(3, i, 2)): + yield i + + @staticmethod + def trial_division(): + """Generate prime numbers using a simple trial division algorithm. + + >>> p = Awful.trial_division() + >>> [next(p) for _ in range(10)] + [2, 3, 5, 7, 11, 13, 17, 19, 23, 29] + + This is the first non-naive algorithm. Due to its simplicity, it may + perform acceptably for the first hundred or so primes, if your needs + are not very demanding. However, it does not scale well for large + numbers of primes. + + This uses three optimizations: + + - only test odd numbers for primality; + - only check against the prime factors already seen; + - stop checking at the square root of the number being tested. + + With these three optimizations, we get asymptotic behaviour of + O(N*sqrt(N)/(log N)**2) where N is the number of primes found. + + Despite these , this is still unacceptably slow, especially + as the list of memorised primes grows. + """ + yield 2 + primes = [2] + i = 3 + while True: + it = itertools.takewhile(lambda p, i=i: p*p <= i, primes) + if all(i%p != 0 for p in it): + primes.append(i) + yield i + i += 2 + + @staticmethod + def turner(): + """Generate prime numbers very slowly using Euler's sieve. + + >>> p = Awful.turner() + >>> [next(p) for _ in range(10)] + [2, 3, 5, 7, 11, 13, 17, 19, 23, 29] + + The function is named for David Turner, who developed this implementation + in a paper in 1975. Due to its simplicity, it has become very popular, + particularly in Haskell circles where it is usually implemented as some + variation of:: + + primes = sieve [2..] + sieve (p : xs) = p : sieve [x | x <- xs, x `mod` p > 0] + + This algorithm is sometimes wrongly described as the Sieve of + Eratosthenes, but it is not, it is a version of Euler's Sieve. + + Although simple, it is extremely slow and inefficient, with + asymptotic behaviour of O(N**2/(log N)**2) which is even worse than + trial division, and only marginally better than ``naive_primes1``. + O'Neill calls this the "Sleight on Eratosthenes". + """ + # References: + # http://en.wikipedia.org/wiki/Sieve_of_Eratosthenes + # http://en.literateprograms.org/Sieve_of_Eratosthenes_(Haskell) + # http://www.cs.hmc.edu/~oneill/papers/Sieve-JFP.pdf + # http://www.haskell.org/haskellwiki/Prime_numbers + nums = itertools.count(2) + while True: + prime = next(nums) + yield prime + nums = filter(lambda v, p=prime: (v % p) != 0, nums) + + # === Prime number testing === + + @staticmethod + def isprime_naive(n): + """Naive primality test using naive and unoptimized trial division. + + >>> Awful.isprime_naive(17) + True + >>> Awful.isprime_naive(18) + False + + Naive, slow but thorough test for primality using unoptimized trial + division. This function does far too much work, and consequently is very + slow, but it is simple enough to verify by eye. + """ + _validate_int(n) + if n == 2: return True + if n < 2 or n % 2 == 0: return False + for i in range(3, int(n**0.5)+1, 2): + if n % i == 0: + return False + return True + + @staticmethod + def isprime_regex(n): + """Slow primality test using a regular expression. + + >>> Awful.isprime_regex(11) + True + >>> Awful.isprime_regex(15) + False + + Unsurprisingly, this is not efficient, and should be treated as a + novelty rather than a serious implementation. It is O(N^2) in time + and O(N) in memory: in other words, slow and expensive. + """ + _validate_int(n) + return not _re_match(r'^1?$|^(11+?)\1+$', '1'*n) + # For a Perl or Ruby version of this, see here: + # http://montreal.pm.org/tech/neil_kandalgaonkar.shtml + # http://www.noulakaz.net/weblog/2007/03/18/a-regular-expression-to-check-for-prime-numbers/ + + + +# ===================== +# Convenience functions +# ===================== + +def checked_ints(): + """Yield tuples (isprime(i), i) for integers i=0, 1, 2, 3, 4, ... + + >>> it = checked_ints() + >>> [next(it) for _ in range(6)] + [(False, 0), (False, 1), (True, 2), (True, 3), (False, 4), (True, 5)] + + """ + oddnums = checked_oddints() + yield (False, 0) + yield next(oddnums) + yield (True, 2) + for t in oddnums: + yield t + yield (False, t[1]+1) + + +def checked_oddints(): + """Yield tuples (isprime(i), i) for odd integers i=1, 3, 5, 7, 9, ... + + >>> it = checked_oddints() + >>> [next(it) for _ in range(6)] + [(False, 1), (True, 3), (True, 5), (True, 7), (False, 9), (True, 11)] + >>> [next(it) for _ in range(6)] + [(True, 13), (False, 15), (True, 17), (True, 19), (False, 21), (True, 23)] + + """ + yield (False, 1) + odd_primes = primes() + _ = next(odd_primes) # Skip 2. + prev = 1 + for p in odd_primes: + # Yield the non-primes between the previous prime and + # the current one. + for i in itertools.islice(itertools.count(prev + 2), 0, None, 2): + if i >= p: break + yield (False, i) + # And yield the current prime. + yield (True, p) + prev = p + + +def nprimes(n): + """Convenience function that yields the first n primes. + + >>> list(nprimes(10)) + [2, 3, 5, 7, 11, 13, 17, 19, 23, 29] + + """ + _validate_int(n) + return itertools.islice(primes(), n) + + +def primes_above(x): + """Convenience function that yields primes strictly greater than x. + + >>> next(primes_above(200)) + 211 + + """ + _validate_num(x) + it = primes() + # Consume the primes below x as fast as possible, then yield the rest. + p = next(it) + while p <= x: + p = next(it) + yield p + for p in it: + yield p + + +def primes_below(x): + """Convenience function yielding primes less than or equal to x. + + >>> list(primes_below(20)) + [2, 3, 5, 7, 11, 13, 17, 19] + + """ + _validate_num(x) + for p in primes(): + if p > x: + return + yield p + + +def nth_prime(n): + """nth_prime(n) -> int + + Return the nth prime number, starting counting from 1. Equivalent to + p-subscript-n in standard maths notation. + + >>> nth_prime(1) # First prime is 2. + 2 + >>> nth_prime(5) + 11 + >>> nth_prime(50) + 229 + + """ + # http://www.research.att.com/~njas/sequences/A000040 + _validate_int(n) + if n < 1: + raise ValueError('argument must be a positive integer') + return next(itertools.islice(primes(), n-1, None)) + + +def prime_count(x): + """prime_count(x) -> int + + Returns the number of prime numbers less than or equal to x. + It is also known as the Prime Counting Function, or pi(x). + (Not to be confused with the constant pi = 3.1415....) + + >>> prime_count(20) + 8 + >>> prime_count(10000) + 1229 + + The number of primes less than x is approximately x/(ln x - 1). + """ + # See also: http://primes.utm.edu/howmany.shtml + # http://mathworld.wolfram.com/PrimeCountingFunction.html + _validate_num(x) + return sum(1 for p in primes_below(x)) + + +def primesum(n): + """primesum(n) -> int + + primesum(n) returns the sum of the first n primes. + + >>> primesum(9) + 100 + >>> primesum(49) + 4888 + + The sum of the first n primes is approximately n**2*ln(n)/2. + """ + # See: http://mathworld.wolfram.com/PrimeSums.html + # http://www.research.att.com/~njas/sequences/A007504 + _validate_int(n) + return sum(nprimes(n)) + + +def primesums(): + """Yield the partial sums of the prime numbers. + + >>> p = primesums() + >>> [next(p) for _ in range(5)] # primes 2, 3, 5, 7, 11, ... + [2, 5, 10, 17, 28] + + """ + n = 0 + for p in primes(): + n += p + yield n + + +# ================= +# Primality testing +# ================= + +def isprime(n, trials=25, warn=False): + """Return True if n is a prime number, and False if it is not. + + >>> isprime(101) + True + >>> isprime(102) + False + + ========== ======================================================= + Argument Description + ========== ======================================================= + n Number being tested for primality. + trials Count of primality tests to perform (default 25). + warn If true, warn on inexact results. (Default is false.) + ========== ======================================================= + + For values of ``n`` under approximately 341 trillion, this function is + exact and the arguments ``trials`` and ``warn`` are ignored. + + Above this cut-off value, this function may be probabilistic with a small + chance of wrongly reporting a composite (non-prime) number as prime. Such + composite numbers wrongly reported as prime are "false positive" errors. + + The argument ``trials`` controls the risk of a false positive error. The + larger number of trials, the less the chance of an error (and the slower + the function). With the default value of 25, you can expect roughly one + such error every million trillion tests, which in practical terms is + essentially "never". + + ``isprime`` cannot give a false negative error: if it reports a number is + composite, it is certainly composite, but if it reports a number is prime, + it may be only probably prime. If you pass a true value for argument + ``warn``, then a warning will be raised if the result is probabilistic. + """ + _validate_int(n) + # Deal with trivial cases first. + if n < 2: + return False + elif n == 2: + return True + elif n%2 == 0: + return False + elif n <= 7: # 3, 5, 7 + return True + is_probabilistic, bases = _choose_bases(n, trials) + is_prime = miller_rabin(n, bases) + if is_prime and is_probabilistic and warn: + import warnings + warnings.warn("number is only probably prime not certainly prime") + return is_prime + + +def _choose_bases(n, count): + """Choose appropriate bases for the Miller-Rabin primality test. + + If n is small enough, returns a tuple of bases which are provably + deterministic for that n. If n is too large, return a selection of + possibly random bases. + + With k distinct Miller-Rabin tests, the probability of a false + positive result is no more than 1/(4**k). + """ + # The Miller-Rabin test is deterministic and completely accurate for + # moderate sizes of n using a surprisingly tiny number of tests. + # See: Pomerance, Selfridge and Wagstaff (1980), and Jaeschke (1993) + # http://en.wikipedia.org/wiki/Miller%E2%80%93Rabin_primality_test + prob = False + if n < 1373653: # ~1.3 million + bases = (2, 3) + elif n < 9080191: # ~9.0 million + bases = (31, 73) + elif n < 4759123141: # ~4.7 billion + # Note to self: checked up to approximately 394 million in 9 hours. + bases = (2, 7, 61) + elif n < 2152302898747: # ~2.1 trillion + bases = (2, 3, 5, 7, 11) + elif n < 3474749660383: # ~3.4 trillion + bases = (2, 3, 5, 7, 11, 13) + elif n < 341550071728321: # ~341 trillion + bases = (2, 3, 5, 7, 11, 13, 17) + else: + # n is sufficiently large that we have to use a probabilistic test. + prob = True + bases = tuple([random.randint(2, n-1) for _ in range(count)]) + # FIXME Because bases are chosen at random, there may be duplicates + # although with extremely small probability given the size of n. + # FIXME Is it worthwhile to special case some of the lower, easier + # bases? bases = [2, 3, 5, 7, 11, 13, 17] + [random... ]? + # Note: we can always be deterministic, no matter how large N is, by + # exhaustive testing against each i in the inclusive range + # 1 ... min(n-1, floor(2*(ln N)**2)). We don't do this, because it is + # expensive for large N, and of no real practical benefit. + return prob, bases + + +def isprime_division(n): + """isprime_division(integer) -> True|False + + Exact primality test returning True if the argument is a prime number, + otherwise False. + + >>> isprime_division(11) + True + >>> isprime_division(12) + False + + This function uses trial division by the primes, skipping non-primes. + """ + _validate_int(n) + if n < 2: + return False + limit = n**0.5 + for divisor in primes(): + if divisor > limit: break + if n % divisor == 0: return False + return True + + +# === Probabilistic primality tests === + +def fermat(n, base=2): + """fermat(n [, base]) -> True|False + + ``fermat(n, base)`` is a probabilistic test for primality which returns + True if integer n is a weak probable prime to the given integer base, + otherwise n is definitely composite and False is returned. + + ``base`` must be a positive integer between 1 and n-1 inclusive, or a + tuple of such bases. By default, base=2. + + If ``fermat`` returns False, that is definite proof that n is composite: + there are no false negatives. However, if it returns True, that is only + provisional evidence that n is prime. For example: + + >>> fermat(99, 7) + False + >>> fermat(29, 7) + True + + We can conclude that 99 is definitely composite, and state that 7 is a + witness that 29 may be prime. + + As the Fermat test is probabilistic, composite numbers will sometimes + pass a test, or even repeated tests: + + >>> fermat(3*11*17, 7) # A pseudoprime to base 7. + True + + You can perform multiple tests with a single call by passing a tuple of + ints as ``base``. The number must pass the Fermat test for all the bases + in order to return True. If any test fails, ``fermat`` will return False. + + >>> fermat(41041, (17, 23, 356, 359)) # 41041 = 7*11*13*41 + True + >>> fermat(41041, (17, 23, 356, 359, 363)) + False + + If a number passes ``k`` Fermat tests, we can conclude that the + probability that it is either a prime number, or a particular type of + pseudoprime known as a Carmichael number, is at least ``1 - (1/2**k)``. + """ + # http://en.wikipedia.org/wiki/Fermat_primality_test + _validate_int(n) + bases = _base_to_bases(base, n) + # Deal with the simple deterministic cases first. + if n < 2: + return False + elif n == 2: + return True + elif n % 2 == 0: + return False + # Now the Fermat test proper. + for a in bases: + if pow(a, n-1, n) != 1: + return False # n is certainly composite. + return True # All of the bases are witnesses for n being prime. + + +def miller_rabin(n, base=2): + """miller_rabin(integer [, base]) -> True|False + + ``miller_rabin(n, base)`` is a probabilistic test for primality which + returns True if integer n is a strong probable prime to the given integer + base, otherwise n is definitely composite and False is returned. + + ``base`` must be a positive integer between 1 and n-1 inclusive, or a + tuple of such bases. By default, base=2. + + If ``miller_rabin`` returns False, that is definite proof that n is + composite: there are no false negatives. However, if it returns True, + that is only provisional evidence that n is prime: + + >>> miller_rabin(99, 7) + False + >>> miller_rabin(29, 7) + True + + We can conclude from this that 99 is definitely composite, and that 29 is + possibly prime. + + As the Miller-Rabin test is probabilistic, composite numbers will + sometimes pass one or more tests: + + >>> miller_rabin(3*11*17, 103) # 3*11*17=561, the 1st Carmichael number. + True + + You can perform multiple tests with a single call by passing a tuple of + ints as ``base``. The number must pass the Miller-Rabin test for each of + the bases before it will return True. If any test fails, ``miller_rabin`` + will return False. + + >>> miller_rabin(41041, (16, 92, 100, 256)) # 41041 = 7*11*13*41 + True + >>> miller_rabin(41041, (16, 92, 100, 256, 288)) + False + + If a number passes ``k`` Miller-Rabin tests, we can conclude that the + probability that it is a prime number is at least ``1 - (1/4**k)``. + """ + # http://en.wikipedia.org/wiki/Miller%E2%80%93Rabin_primality_test + _validate_int(n) + bases = _base_to_bases(base, n) + # Deal with the trivial cases. + if n < 2: + return False + if n == 2: + return True + elif n % 2 == 0: + return False + # Now perform the Miller-Rabin test proper. + # Start by writing n-1 as 2**s * d. + d, s = _factor2(n-1) + for a in bases: + if _is_composite(a, d, s, n): + return False # n is definitely composite. + # If we get here, all of the bases are witnesses for n being prime. + return True + + +def _factor2(n): + """Factorise positive integer n as d*2**i, and return (d, i). + + >>> _factor2(768) + (3, 8) + >>> _factor2(18432) + (9, 11) + + Private function used internally by ``miller_rabin``. + """ + assert n > 0 and int(n) == n + i = 0 + d = n + while 1: + q, r = divmod(d, 2) + if r == 1: + break + i += 1 + d = q + assert d%2 == 1 + assert d*2**i == n + return (d, i) + + +def _is_composite(b, d, s, n): + """_is_composite(b, d, s, n) -> True|False + + Tests base b to see if it is a witness for n being composite. Returns + True if n is definitely composite, otherwise False if it *may* be prime. + + >>> _is_composite(4, 3, 7, 385) + True + >>> _is_composite(221, 3, 7, 385) + False + + Private function used internally by ``miller_rabin``. + """ + assert d*2**s == n-1 + if pow(b, d, n) == 1: + return False + for i in range(s): + if pow(b, 2**i * d, n) == n-1: + return False + return True + + +# =================== +# Prime factorisation +# =================== + +if __debug__: + # Set _EXTRA_CHECKS to True to enable potentially expensive assertions + # in the factors() and factorise() functions. This is only defined or + # checked when assertions are enabled. + _EXTRA_CHECKS = False + + +def factors(n): + """factors(integer) -> [list of factors] + + Returns a list of the (mostly) prime factors of integer n. For negative + integers, -1 is included as a factor. If n is 0 or 1, [n] is returned as + the only factor. Otherwise all the factors will be prime. + + >>> factors(-693) + [-1, 3, 3, 7, 11] + >>> factors(55614) + [2, 3, 13, 23, 31] + + """ + _validate_int(n) + result = [] + for p, count in factorise(n): + result.extend([p]*count) + if __debug__: + # The following test only occurs if assertions are on. + if _EXTRA_CHECKS: + prod = 1 + for x in result: + prod *= x + assert prod == n, ('factors(%d) failed multiplication test' % n) + return result + + +def factorise(n): + """factorise(integer) -> yield factors of integer lazily + + >>> list(factorise(3*7*7*7*11)) + [(3, 1), (7, 3), (11, 1)] + + Yields tuples of (factor, count) where each factor is unique and usually + prime, and count is an integer 1 or larger. + + The factors are prime, except under the following circumstances: if the + argument n is negative, -1 is included as a factor; if n is 0 or 1, it + is given as the only factor. For all other integer n, all of the factors + returned are prime. + """ + _validate_int(n) + if n in (0, 1, -1): + yield (n, 1) + return + elif n < 0: + yield (-1, 1) + n = -n + assert n >= 2 + for p in primes(): + if p*p > n: break + count = 0 + while n % p == 0: + count += 1 + n //= p + if count: + yield (p, count) + if n != 1: + if __debug__: + # The following test only occurs if assertions are on. + if _EXTRA_CHECKS: + assert isprime(n), ('failed isprime test for %d' % n) + yield (n, 1) + + + +if __name__ == '__main__': + import doctest + doctest.testmod() + diff --git a/talk/ep2014/stm/fig4.svg b/talk/ep2014/stm/fig4.svg new file mode 100644 --- /dev/null +++ b/talk/ep2014/stm/fig4.svg @@ -0,0 +1,4 @@ + + + + diff --git a/talk/ep2014/stm/talk.html b/talk/ep2014/stm/talk.html --- a/talk/ep2014/stm/talk.html +++ b/talk/ep2014/stm/talk.html @@ -497,10 +497,17 @@
      • but can be very coarse:
        • the idea is to make sure, internally, that one transaction covers the whole time during which the lock was acquired
        • +
        • even two big transactions can hopefully run in parallel
        • +
        • even if they both acquire and release the same lock
      • +
        +

        Big Point

        + +fig4.svg +

        Demo 1

          diff --git a/talk/ep2014/stm/talk.rst b/talk/ep2014/stm/talk.rst --- a/talk/ep2014/stm/talk.rst +++ b/talk/ep2014/stm/talk.rst @@ -135,6 +135,16 @@ - the idea is to make sure, internally, that one transaction covers the whole time during which the lock was acquired + - even two big transactions can hopefully run in parallel + + - even if they both acquire and release the *same* lock + + +Big Point +--------- + +.. image:: fig4.svg + Demo 1 ------ From noreply at buildbot.pypy.org Wed Jul 16 21:16:58 2014 From: noreply at buildbot.pypy.org (wlav) Date: Wed, 16 Jul 2014 21:16:58 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: cling fixes Message-ID: <20140716191658.D9E161C33B9@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r72456:0de503991f11 Date: 2014-07-16 12:16 -0700 http://bitbucket.org/pypy/pypy/changeset/0de503991f11/ Log: cling fixes diff --git a/pypy/module/cppyy/capi/builtin_capi.py b/pypy/module/cppyy/capi/builtin_capi.py --- a/pypy/module/cppyy/capi/builtin_capi.py +++ b/pypy/module/cppyy/capi/builtin_capi.py @@ -3,6 +3,7 @@ import reflex_capi as backend #import cint_capi as backend +#import cling_capi as backend from pypy.module.cppyy.capi.capi_types import C_SCOPE, C_TYPE, C_OBJECT,\ C_METHOD, C_INDEX, C_INDEX_ARRAY, WLAVC_INDEX,\ diff --git a/pypy/module/cppyy/capi/cling_capi.py b/pypy/module/cppyy/capi/cling_capi.py --- a/pypy/module/cppyy/capi/cling_capi.py +++ b/pypy/module/cppyy/capi/cling_capi.py @@ -14,12 +14,13 @@ (config_stat, incdir) = commands.getstatusoutput("root-config --incdir") if os.environ.get("ROOTSYS"): - if config_stat != 0: # presumably Reflex-only - rootincpath = [os.path.join(os.environ["ROOTSYS"], "interpreter/cling/include"), - os.path.join(os.environ["ROOTSYS"], "interpreter/llvm/inst/include")] + rootincpath = [os.path.join(os.environ["ROOTSYS"], "interpreter/cling/include"), + os.path.join(os.environ["ROOTSYS"], "interpreter/llvm/inst/include")] + if config_stat != 0: + rootincpath.append(os.path.join(os.environ["ROOTSYS"], "include")) rootlibpath = [os.path.join(os.environ["ROOTSYS"], "lib64"), os.path.join(os.environ["ROOTSYS"], "lib")] else: - rootincpath = [incdir] + rootincpath.append(incdir) rootlibpath = commands.getoutput("root-config --libdir").split() else: if config_stat == 0: @@ -45,7 +46,7 @@ includes=["clingcwrapper.h"], library_dirs=rootlibpath, libraries=["Cling"], - compile_extra=["-fno-strict-aliasing"], + compile_extra=["-fno-strict-aliasing", "-std=c++11"], use_cpp_linker=True, ) diff --git a/pypy/module/cppyy/src/clingcwrapper.cxx b/pypy/module/cppyy/src/clingcwrapper.cxx --- a/pypy/module/cppyy/src/clingcwrapper.cxx +++ b/pypy/module/cppyy/src/clingcwrapper.cxx @@ -21,7 +21,7 @@ #include "cling/Interpreter/DynamicLibraryManager.h" #include "cling/Interpreter/Interpreter.h" #include "cling/Interpreter/LookupHelper.h" -#include "cling/Interpreter/StoredValueRef.h" +#include "cling/Interpreter/Value.h" #include "cling/MetaProcessor/MetaProcessor.h" #include "llvm/ADT/SmallVector.h" @@ -157,7 +157,8 @@ cppyy_scope_t cppyy_get_scope(const char* scope_name) { const cling::LookupHelper& lh = gCppyy_Cling->getLookupHelper(); const Type* type = 0; - const Decl* decl = lh.findScope(scope_name, &type, /* intantiateTemplate= */ true); + const Decl* decl = lh.findScope(scope_name, + cling::LookupHelper::NoDiagnostics, &type, /* intantiateTemplate= */ true); if (!decl) { //std::string buf = TClassEdit::InsertStd(name); //decl = lh.findScope(buf, &type, /* intantiateTemplate= */ true); @@ -190,8 +191,8 @@ // TODO: expect the below to live in libCling.so static CPPYY_Cling_Wrapper_t make_wrapper(const FunctionDecl* fdecl); -static void exec_with_valref_return(void* address, cling::StoredValueRef* ret, const FunctionDecl*); -static long long sv_to_long_long(const cling::StoredValueRef& svref); +static void exec_with_valref_return(void* address, cling::Value*, const FunctionDecl*); +static long long sv_to_long_long(const cling::Value& val); // -- TODO: expect the above to live in libCling.so @@ -200,7 +201,7 @@ if (s_wrappers.find(method) == s_wrappers.end()) { make_wrapper((FunctionDecl*)method); } - cling::StoredValueRef ret; + cling::Value ret; // std::vector arguments = build_args(nargs, args); // CPPYY_Cling_Wrapper_t cb = (CPPYY_Cling_Wrapper_t)method; exec_with_valref_return((void*)self, &ret, (FunctionDecl*)method); @@ -362,7 +363,7 @@ // R__LOCKGUARD2(gInterpreterMutex); std::cout << " NOW LOADING: " << lib_name << std::endl; - cling::StoredValueRef call_res; + cling::Value call_res; cling::Interpreter::CompilationResult comp_res = cling::Interpreter::kSuccess; std::ostringstream line; line << "#include \"" << lib_name << ".h\""; @@ -377,8 +378,8 @@ // UpdateListOfLoadedSharedLibraries(); // } switch (res) { - case cling::DynamicLibraryManager::kLoadLibSuccess: return (void*)1; - case cling::DynamicLibraryManager::kLoadLibExists: return (void*)2; + case cling::DynamicLibraryManager::kLoadLibSuccess: return (void*)1; + case cling::DynamicLibraryManager::kLoadLibAlreadyLoaded: return (void*)2; default: break; }; return (void*)1; @@ -389,68 +390,130 @@ // TODO: expect the below to live in libCling.so -template -T sv_to_long_long_u_or_not(const cling::StoredValueRef& svref) { - const cling::Value& valref = svref.get(); - QualType QT = valref.getClangType(); - if (QT.isNull()) { - print_error("sv_to_long_long_u_or_not", "null type!"); - return 0; - } - llvm::GenericValue gv = valref.getGV(); - if (QT->isMemberPointerType()) { - const MemberPointerType* MPT = - QT->getAs(); - if (MPT->isMemberDataPointer()) { - return (T) (ptrdiff_t) gv.PointerVal; - } - return (T) gv.PointerVal; - } - if (QT->isPointerType() || QT->isArrayType() || QT->isRecordType() || - QT->isReferenceType()) { - return (T) gv.PointerVal; - } - if (const EnumType* ET = llvm::dyn_cast(&*QT)) { - if (ET->getDecl()->getIntegerType()->hasSignedIntegerRepresentation()) - return (T) gv.IntVal.getSExtValue(); - else - return (T) gv.IntVal.getZExtValue(); - } - if (const BuiltinType* BT = llvm::dyn_cast(&*QT)) { - if (BT->isSignedInteger()) { - return gv.IntVal.getSExtValue(); - } else if (BT->isUnsignedInteger()) { - return (T) gv.IntVal.getZExtValue(); - } else { - switch (BT->getKind()) { - case BuiltinType::Float: - return (T) gv.FloatVal; - case BuiltinType::Double: - return (T) gv.DoubleVal; - case BuiltinType::LongDouble: - // FIXME: Implement this! - break; - case BuiltinType::NullPtr: - // C++11 nullptr - return 0; - default: break; - } +template +returnType sv_to(const cling::Value& val) +{ + QualType QT = val.getType(); + if (QT->isMemberPointerType()) { + const MemberPointerType* MPT = QT->getAs(); + if (MPT->isMemberDataPointer()) { + return (returnType) (ptrdiff_t)val.getPtr(); } - } - print_error("sv_to_long_long_u_or_not", "cannot handle this type!"); - QT->dump(); - return 0; -} + return (returnType) (long) val.getPtr(); + } + if (QT->isPointerType() || QT->isArrayType() || QT->isRecordType() || + QT->isReferenceType()) { + return (returnType) (long) val.getPtr(); + } + if (const EnumType* ET = dyn_cast(&*QT)) { + if (ET->getDecl()->getIntegerType()->hasSignedIntegerRepresentation()) + return (returnType) val.getLL(); + else + return (returnType) val.getULL(); + } + if (const BuiltinType* BT = + dyn_cast(&*QT)) { + // + // WARNING!!! + // + // This switch is organized in order-of-declaration + // so that the produced assembly code is optimal. + // Do not reorder! + // + switch (BT->getKind()) { + case BuiltinType::Void: + // CINT used to expect a result of 0. + return (returnType) 0; + break; + // + // Unsigned Types + // + case BuiltinType::Bool: + case BuiltinType::Char_U: // char on targets where it is unsigned + case BuiltinType::UChar: + return (returnType) val.getULL(); + break; -static long long sv_to_long_long(const cling::StoredValueRef& svref) { - return sv_to_long_long_u_or_not(svref); + case BuiltinType::WChar_U: + // wchar_t on targets where it is unsigned + // The standard doesn't allow to specify signednedd of wchar_t + // thus this maps simply to wchar_t. + return (returnType) (wchar_t) val.getULL(); + break; + + case BuiltinType::Char16: + case BuiltinType::Char32: + case BuiltinType::UShort: + case BuiltinType::UInt: + case BuiltinType::ULong: + case BuiltinType::ULongLong: + return (returnType) val.getULL(); + break; + + case BuiltinType::UInt128: + // __uint128_t + break; + + // + // Signed Types + // + case BuiltinType::Char_S: // char on targets where it is signed + case BuiltinType::SChar: + return (returnType) val.getLL(); + break; + + case BuiltinType::WChar_S: + // wchar_t on targets where it is signed + // The standard doesn't allow to specify signednedd of wchar_t + // thus this maps simply to wchar_t. + return (returnType) (wchar_t) val.getLL(); + break; + + case BuiltinType::Short: + case BuiltinType::Int: + case BuiltinType::Long: + case BuiltinType::LongLong: + return (returnType) val.getLL(); + break; + + case BuiltinType::Int128: + break; + + case BuiltinType::Half: + // half in OpenCL, __fp16 in ARM NEON + break; + + case BuiltinType::Float: + return (returnType) val.getFloat(); + break; + case BuiltinType::Double: + return (returnType) val.getDouble(); + break; + case BuiltinType::LongDouble: + return (returnType) val.getLongDouble(); + break; + + case BuiltinType::NullPtr: + return (returnType) 0; + break; + + default: + break; + } + } + print_error("sv_to", "invalid type"); + QT->dump(); + return 0; } static -unsigned long long sv_to_ulong_long(const cling::StoredValueRef& svref) { - return sv_to_long_long_u_or_not(svref); +long long sv_to_long_long(const cling::Value& val) { + return sv_to(val); } - +static +unsigned long long sv_to_ulong_long(const cling::Value& val) { + return sv_to(val); +} namespace { @@ -772,230 +835,168 @@ (*wrapper)(address, (int)0/*num_args*/, (void**)vp_ary.data(), ret); } +static void +exec_with_valref_return(void* address, cling::Value* ret) const +{ + if (!ret) { + exec(address, 0); + return; + } + std::cout << " USING DECL: " << fdecl << std::endl; + const FunctionDecl* FD = fMethod->GetMethodDecl(); + ASTContext& Context = FD->getASTContext(); -static void exec_with_valref_return(void* address, cling::StoredValueRef* ret, const FunctionDecl* fdecl) { - if (!ret) { - exec(address, 0, fdecl); - return; - } - std::cout << " USING DECL: " << fdecl << std::endl; - fdecl->dump(); - ASTContext& Context = fdecl->getASTContext(); + if (const CXXConstructorDecl* CD = dyn_cast(FD)) { + const TypeDecl* TD = dyn_cast(CD->getDeclContext()); + QualType ClassTy(TD->getTypeForDecl(), 0); + QualType QT = Context.getLValueReferenceType(ClassTy); + *ret = cling::Value(QT, fInterp); + // Store the new()'ed address in getPtr() + exec(address, &ret->getPtr()); + return; + } + QualType QT = FD->getReturnType().getCanonicalType(); + if (QT->isReferenceType()) { + *ret = cling::Value(QT, 0); + exec(address, &ret->getPtr()); + return; + } + else if (QT->isMemberPointerType()) { + const MemberPointerType* MPT = QT->getAs(); + if (MPT->isMemberDataPointer()) { + // A member data pointer is a actually a struct with one + // member of ptrdiff_t, the offset from the base of the object + // storage to the storage for the designated data member. + // But that's not relevant: we use it as a non-builtin, allocated + // type. + *ret = cling::Value(QT, fInterp); + exec(address, ret->getPtr()); + return; + } + // We are a function member pointer. + *ret = cling::Value(QT, fInterp); + exec(address, &ret->getPtr()); + return; + } + else if (QT->isPointerType() || QT->isArrayType()) { + // Note: ArrayType is an illegal function return value type. + *ret = cling::Value(QT, 0); + exec(address, &ret->getPtr()); + return; + } + else if (QT->isRecordType()) { + *ret = cling::Value(QT, fInterp); + exec(address, ret->getPtr()); + return; + } + else if (const EnumType* ET = dyn_cast(&*QT)) { + // Note: We may need to worry about the underlying type + // of the enum here. + (void) ET; + *ret = cling::Value(QT, 0); + execWithLL(address, QT, ret); + return; + } + else if (const BuiltinType* BT = dyn_cast(&*QT)) { + *ret = cling::Value(QT, 0); + switch (BT->getKind()) { + case BuiltinType::Void: + exec(address, 0); + break; - if (const CXXConstructorDecl* CD = llvm::dyn_cast(fdecl)) { - const TypeDecl* TD = llvm::dyn_cast(CD->getDeclContext()); - QualType ClassTy(TD->getTypeForDecl(), 0); - QualType QT = Context.getLValueReferenceType(ClassTy); - llvm::GenericValue gv; - exec(address, &gv.PointerVal, fdecl); - *ret = cling::StoredValueRef::bitwiseCopy( - *gCppyy_Cling, cling::Value(gv, QT)); - return; - } - QualType QT = fdecl->getResultType().getCanonicalType(); - if (QT->isReferenceType()) { - llvm::GenericValue gv; - exec(address, &gv.PointerVal, fdecl); - *ret = cling::StoredValueRef::bitwiseCopy( - *gCppyy_Cling, cling::Value(gv, QT)); - return; - } - else if (QT->isMemberPointerType()) { - const MemberPointerType* MPT = - QT->getAs(); - if (MPT->isMemberDataPointer()) { - // A member data pointer is a actually a struct with one - // member of ptrdiff_t, the offset from the base of the object - // storage to the storage for the designated data member. - llvm::GenericValue gv; - exec(address, &gv.PointerVal, fdecl); - *ret = cling::StoredValueRef::bitwiseCopy( - *gCppyy_Cling, cling::Value(gv, QT)); - return; - } - // We are a function member pointer. - llvm::GenericValue gv; - exec(address, &gv.PointerVal, fdecl); - *ret = cling::StoredValueRef::bitwiseCopy( - *gCppyy_Cling, cling::Value(gv, QT)); - return; - } - else if (QT->isPointerType() || QT->isArrayType()) { - // Note: ArrayType is an illegal function return value type. - llvm::GenericValue gv; - exec(address, &gv.PointerVal, fdecl); - *ret = cling::StoredValueRef::bitwiseCopy( - *gCppyy_Cling, cling::Value(gv, QT)); - return; - } - else if (QT->isRecordType()) { - uint64_t size = Context.getTypeSizeInChars(QT).getQuantity(); - void* p = ::operator new(size); - exec(address, p, fdecl); - *ret = cling::StoredValueRef::bitwiseCopy( - *gCppyy_Cling, cling::Value(llvm::PTOGV(p), QT)); - return; - } - else if (const EnumType* ET = llvm::dyn_cast(&*QT)) { - // Note: We may need to worry about the underlying type - // of the enum here. - (void) ET; - uint64_t numBits = Context.getTypeSize(QT); - int retVal = 0; - exec(address, &retVal, fdecl); - llvm::GenericValue gv; - gv.IntVal = llvm::APInt(numBits, (uint64_t)retVal, true /*isSigned*/); - *ret = cling::StoredValueRef::bitwiseCopy( - *gCppyy_Cling, cling::Value(gv, QT)); - return; - } - else if (const BuiltinType* BT = llvm::dyn_cast(&*QT)) { - llvm::GenericValue gv; + // + // Unsigned Types + // + case BuiltinType::Bool: + execWithULL(address, QT, ret); + break; + case BuiltinType::Char_U: // char on targets where it is unsigned + case BuiltinType::UChar: + execWithULL(address, QT, ret); + break; + case BuiltinType::WChar_U: + // wchar_t on targets where it is unsigned. + // The standard doesn't allow to specify signednedd of wchar_t + // thus this maps simply to wchar_t. + execWithULL(address, QT, ret); + break; + case BuiltinType::Char16: + print_error("exec_with_valref_return", "invalid type: char16_t"); + break; + case BuiltinType::Char32: + print_error("exec_with_valref_return", "invalid type char32_t"); + break; + case BuiltinType::UShort: + execWithULL(address, QT, ret); + break; + case BuiltinType::UInt: + execWithULL(address, QT, ret); + break; + case BuiltinType::ULong: + execWithULL(address, QT, ret); + break; + case BuiltinType::ULongLong: + execWithULL(address, QT, ret); + break; + case BuiltinType::UInt128: + print_error("exec_with_valref_return", "invalid type __uint128_t"); + break; - uint64_t numBits = Context.getTypeSize(QT); - switch (BT->getKind()) { - // - // builtin types - // - case BuiltinType::Void: { - exec(address, 0, fdecl); - return; - } - // - // unsigned integral types - // - case BuiltinType::Bool: { - bool retVal = false; - exec(address, &retVal, fdecl); - gv.IntVal = llvm::APInt(numBits, (uint64_t)retVal, false /*isSigned*/); + // + // Signed Types + // + case BuiltinType::Char_S: // char on targets where it is signed + case BuiltinType::SChar: + execWithLL(address, QT, ret); break; - } - case BuiltinType::Char_U: { - // char on targets where it is unsigned - char retVal = '\0'; - exec(address, &retVal, fdecl); - gv.IntVal = llvm::APInt(numBits, (uint64_t) retVal, false /*isSigned*/); - break; - } - case BuiltinType::UChar: { - unsigned char retVal = '\0'; - exec(address, &retVal, fdecl); - gv.IntVal = llvm::APInt(numBits, (uint64_t) retVal, false /*isSigned*/); - break; - } - case BuiltinType::WChar_U: { - // wchar_t on targets where it is unsigned. - // The standard doesn't allow to specify signedness of wchar_t - // thus this maps simply to wchar_t. - wchar_t retVal = L'\0'; - exec(address, &retVal, fdecl); - gv.IntVal = llvm::APInt(numBits, (uint64_t) retVal, false /*isSigned*/); - break; - } - case BuiltinType::UShort: { - unsigned short retVal = 0; - exec(address, &retVal, fdecl); - gv.IntVal = llvm::APInt(numBits, (uint64_t) retVal, false /*isSigned*/); - break; - } - case BuiltinType::UInt: { - unsigned int retVal = 0; - exec(address, &retVal, fdecl); - gv.IntVal = llvm::APInt(numBits, (uint64_t) retVal, false /*isSigned*/); - break; - } - case BuiltinType::ULong: { - // unsigned long - unsigned long retVal = 0; - exec(address, &retVal, fdecl); - gv.IntVal = llvm::APInt(numBits, (uint64_t) retVal, false /*isSigned*/); - break; - } - case BuiltinType::ULongLong: { - // unsigned long long - unsigned long long retVal = 0; - exec(address, &retVal, fdecl); - gv.IntVal = llvm::APInt(numBits, (uint64_t) retVal, false /*isSigned*/); - break; - } - // - // signed integral types - // - case BuiltinType::Char_S: { - // char on targets where it is signed - char retVal = '\0'; - exec(address, &retVal, fdecl); - gv.IntVal = llvm::APInt(numBits, (uint64_t) retVal, true /*isSigned*/); - break; - } - case BuiltinType::SChar: { - // signed char - signed char retVal = '\0'; - exec(address, &retVal, fdecl); - gv.IntVal = llvm::APInt(numBits, (uint64_t) retVal, true /*isSigned*/); - break; - } - case BuiltinType::WChar_S: { + case BuiltinType::WChar_S: // wchar_t on targets where it is signed. // The standard doesn't allow to specify signednedd of wchar_t // thus this maps simply to wchar_t. - wchar_t retVal = L'\0'; - exec(address, &retVal, fdecl); - gv.IntVal = llvm::APInt(numBits, (uint64_t) retVal, true /*isSigned*/); + execWithLL(address, QT, ret); break; - } - case BuiltinType::Short: { - // short - short retVal = 0; - exec(address, &retVal, fdecl); - gv.IntVal = llvm::APInt(numBits, (uint64_t) retVal, true /*isSigned*/); + case BuiltinType::Short: + execWithLL(address, QT, ret); break; - } - case BuiltinType::Int: { - // int - int retVal = 0; - exec(address, &retVal, fdecl); - gv.IntVal = llvm::APInt(numBits, (uint64_t) retVal, true /*isSigned*/); + case BuiltinType::Int: + execWithLL(address, QT, ret); break; - } - case BuiltinType::Long: { - long retVal = 0; - exec(address, &retVal, fdecl); - gv.IntVal = llvm::APInt(numBits, (uint64_t) retVal, true /*isSigned*/); + case BuiltinType::Long: + execWithLL(address, QT, ret); break; - } - case BuiltinType::LongLong: { - long long retVal = 0; - exec(address, &retVal, fdecl); - gv.IntVal = llvm::APInt(numBits, (uint64_t) retVal, true /*isSigned*/); + case BuiltinType::LongLong: + execWithLL(address, QT, ret); break; - } - case BuiltinType::Float: { - exec(address, &gv.FloatVal, fdecl); + case BuiltinType::Int128: + print_error("exec_with_valref_return", "invalid type __int128_t"); break; - } - case BuiltinType::Double: { - exec(address, &gv.DoubleVal, fdecl); + case BuiltinType::Half: + // half in OpenCL, __fp16 in ARM NEON + print_error("exec_with_valref_return", "invalid type Half"); break; - } - case BuiltinType::Char16: - case BuiltinType::Char32: - case BuiltinType::Half: - case BuiltinType::Int128: - case BuiltinType::UInt128: - case BuiltinType::LongDouble: - case BuiltinType::NullPtr: - default: { - print_error("exec_with_valref", "unsupported return type"); - return; - } - } - - *ret = cling::StoredValueRef::bitwiseCopy(*gCppyy_Cling, cling::Value(gv, QT)); - return; - } - - std::cout << "exec_with_valref: some error occurred ... " << std::endl; + case BuiltinType::Float: + exec(address, &ret->getFloat()); + break; + case BuiltinType::Double: + exec(address, &ret->getDouble()); + break; + case BuiltinType::LongDouble: + exec(address, &ret->getLongDouble()); + break; + // + // Language-Specific Types + // + case BuiltinType::NullPtr: + // C++11 nullptr + print_error("exec_with_valref_return", "invalid type nullptr"); + break; + default: + break; + } + return; + } + print_error("exec_with_valref_return", "unrecognized return type"); + QT->dump(); } From noreply at buildbot.pypy.org Wed Jul 16 21:17:00 2014 From: noreply at buildbot.pypy.org (wlav) Date: Wed, 16 Jul 2014 21:17:00 +0200 (CEST) Subject: [pypy-commit] pypy reflex-support: updates for handling of null-ptrs and enums Message-ID: <20140716191700.3341F1C33B9@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r72457:6fd647e28232 Date: 2014-07-16 12:16 -0700 http://bitbucket.org/pypy/pypy/changeset/6fd647e28232/ Log: updates for handling of null-ptrs and enums diff --git a/pypy/module/cppyy/test/test_datatypes.py b/pypy/module/cppyy/test/test_datatypes.py --- a/pypy/module/cppyy/test/test_datatypes.py +++ b/pypy/module/cppyy/test/test_datatypes.py @@ -391,6 +391,15 @@ CppyyTestData.s_ldouble = math.pi assert c.s_ldouble == math.pi + # enum types + assert raises(AttributeError, getattr, CppyyTestData, 'kBanana') + if self.capi_identity == 'Cling': # detailed enum support only in Cling + assert raises(TypeError, setattr, CppyyTestData, 'kLots', 42) + c.s_enum = CppyyTestData.kLots + assert CppyyTestData.s_enum == CppyyTestData.kLots + CppyyTestData.s_enum = CppyyTestData.kNothing + assert c.s_enum == CppyyTestData.kNothing + c.__destruct__() def test07_range_access(self): @@ -772,8 +781,13 @@ def address_equality_test(a, b): assert cppyy.addressof(a) == cppyy.addressof(b) b2 = cppyy.bind_object(a, CppyyTestData) + b.m_int = 888 + assert b.m_int == 888 + assert b == b2 and b.m_int == b2.m_int assert b is b2 # memory regulator recycles b3 = cppyy.bind_object(cppyy.addressof(a), CppyyTestData) + assert b3.m_int == 888 + assert b == b3 and b.m_int == b3.m_int assert b is b3 # likewise address_equality_test(c.m_voidp, c2) diff --git a/pypy/module/cppyy/test/test_fragile.py b/pypy/module/cppyy/test/test_fragile.py --- a/pypy/module/cppyy/test/test_fragile.py +++ b/pypy/module/cppyy/test/test_fragile.py @@ -70,7 +70,7 @@ assert fragile.D().check() == ord('D') d = fragile.D() - raises(TypeError, d.overload, None) + raises(TypeError, d.overload, 1.) raises(TypeError, d.overload, None, None, None) d.overload('a') @@ -88,8 +88,10 @@ assert fragile.E().check() == ord('E') e = fragile.E() - raises(TypeError, e.overload, None) - raises(TypeError, getattr, e, 'm_pp_no_such') + # TODO: figure out the desired behavior here; right now, an opaque + # pointer is returned to allow passing back to C++ code + #raises(TypeError, e.overload, None) + #raises(TypeError, getattr, e, 'm_pp_no_such') def test05_wrong_arg_addressof(self): """Test addressof() error reporting""" @@ -184,17 +186,17 @@ assert "TypeError: wrong number of arguments" in str(e) try: - d.overload(None) # raises TypeError + d.overload(1.) # raises TypeError assert 0 except TypeError, e: assert "fragile::D::overload()" in str(e) assert "TypeError: wrong number of arguments" in str(e) assert "fragile::D::overload(fragile::no_such_class*)" in str(e) - assert "TypeError: no converter available for 'fragile::no_such_class*'" in str(e) + # assert "TypeError: no converter available for 'fragile::no_such_class*'" in str(e) assert "fragile::D::overload(char, int)" in str(e) - assert "TypeError: expected string, got NoneType object" in str(e) + assert "TypeError: expected string, got float object" in str(e) assert "fragile::D::overload(int, fragile::no_such_class*)" in str(e) - assert "TypeError: expected integer, got NoneType object" in str(e) + assert "TypeError: expected integer, got float object" in str(e) j = fragile.J() assert fragile.J.method1.__doc__ == j.method1.__doc__ From noreply at buildbot.pypy.org Wed Jul 16 22:55:29 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 16 Jul 2014 22:55:29 +0200 (CEST) Subject: [pypy-commit] pypy default: prefer space from inter2app func args Message-ID: <20140716205529.243A51C0091@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r72458:5b3b60ba4e33 Date: 2014-07-16 13:54 -0700 http://bitbucket.org/pypy/pypy/changeset/5b3b60ba4e33/ Log: prefer space from inter2app func args diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -133,11 +133,11 @@ self._issuer[0] = '\0' self.shutdown_seen_zero = False - def server(self): - return self.space.wrap(rffi.charp2str(self._server)) + def server(self, space): + return space.wrap(rffi.charp2str(self._server)) - def issuer(self): - return self.space.wrap(rffi.charp2str(self._issuer)) + def issuer(self, space): + return space.wrap(rffi.charp2str(self._issuer)) def __del__(self): self.enqueue_for_destruction(self.space, SSLObject.destructor, @@ -155,21 +155,21 @@ lltype.free(self._issuer, flavor='raw') @unwrap_spec(data='bufferstr') - def write(self, data): + def write(self, space, data): """write(s) -> len Writes the string s into the SSL object. Returns the number of bytes written.""" - self._refresh_nonblocking(self.space) + self._refresh_nonblocking(space) - sockstate = check_socket_and_wait_for_timeout(self.space, + sockstate = check_socket_and_wait_for_timeout(space, self.w_socket, True) if sockstate == SOCKET_HAS_TIMED_OUT: - raise ssl_error(self.space, "The write operation timed out") + raise ssl_error(space, "The write operation timed out") elif sockstate == SOCKET_HAS_BEEN_CLOSED: - raise ssl_error(self.space, "Underlying socket has been closed.") + raise ssl_error(space, "Underlying socket has been closed.") elif sockstate == SOCKET_TOO_LARGE_FOR_SELECT: - raise ssl_error(self.space, "Underlying socket too large for select().") + raise ssl_error(space, "Underlying socket too large for select().") num_bytes = 0 while True: @@ -179,18 +179,18 @@ err = libssl_SSL_get_error(self.ssl, num_bytes) if err == SSL_ERROR_WANT_READ: - sockstate = check_socket_and_wait_for_timeout(self.space, + sockstate = check_socket_and_wait_for_timeout(space, self.w_socket, False) elif err == SSL_ERROR_WANT_WRITE: - sockstate = check_socket_and_wait_for_timeout(self.space, + sockstate = check_socket_and_wait_for_timeout(space, self.w_socket, True) else: sockstate = SOCKET_OPERATION_OK if sockstate == SOCKET_HAS_TIMED_OUT: - raise ssl_error(self.space, "The write operation timed out") + raise ssl_error(space, "The write operation timed out") elif sockstate == SOCKET_HAS_BEEN_CLOSED: - raise ssl_error(self.space, "Underlying socket has been closed.") + raise ssl_error(space, "Underlying socket has been closed.") elif sockstate == SOCKET_IS_NONBLOCKING: break @@ -200,38 +200,38 @@ break if num_bytes > 0: - return self.space.wrap(num_bytes) + return space.wrap(num_bytes) else: - raise _ssl_seterror(self.space, self, num_bytes) + raise _ssl_seterror(space, self, num_bytes) - def pending(self): + def pending(self, space): """pending() -> count Returns the number of already decrypted bytes available for read, pending on the connection.""" count = libssl_SSL_pending(self.ssl) if count < 0: - raise _ssl_seterror(self.space, self, count) - return self.space.wrap(count) + raise _ssl_seterror(space, self, count) + return space.wrap(count) @unwrap_spec(num_bytes=int) - def read(self, num_bytes=1024): + def read(self, space, num_bytes=1024): """read([len]) -> string Read up to len bytes from the SSL socket.""" count = libssl_SSL_pending(self.ssl) if not count: - sockstate = check_socket_and_wait_for_timeout(self.space, - self.w_socket, False) + sockstate = check_socket_and_wait_for_timeout(space, self.w_socket, + False) if sockstate == SOCKET_HAS_TIMED_OUT: - raise ssl_error(self.space, "The read operation timed out") + raise ssl_error(space, "The read operation timed out") elif sockstate == SOCKET_TOO_LARGE_FOR_SELECT: - raise ssl_error(self.space, "Underlying socket too large for select().") + raise ssl_error(space, "Underlying socket too large for select().") elif sockstate == SOCKET_HAS_BEEN_CLOSED: if libssl_SSL_get_shutdown(self.ssl) == SSL_RECEIVED_SHUTDOWN: - return self.space.wrap('') - raise ssl_error(self.space, "Socket closed without SSL shutdown handshake") + return space.wrap('') + raise ssl_error(space, "Socket closed without SSL shutdown handshake") with rffi.scoped_alloc_buffer(num_bytes) as buf: while True: @@ -241,19 +241,19 @@ err = libssl_SSL_get_error(self.ssl, count) if err == SSL_ERROR_WANT_READ: - sockstate = check_socket_and_wait_for_timeout(self.space, + sockstate = check_socket_and_wait_for_timeout(space, self.w_socket, False) elif err == SSL_ERROR_WANT_WRITE: - sockstate = check_socket_and_wait_for_timeout(self.space, + sockstate = check_socket_and_wait_for_timeout(space, self.w_socket, True) elif (err == SSL_ERROR_ZERO_RETURN and libssl_SSL_get_shutdown(self.ssl) == SSL_RECEIVED_SHUTDOWN): - return self.space.wrap("") + return space.wrap("") else: sockstate = SOCKET_OPERATION_OK if sockstate == SOCKET_HAS_TIMED_OUT: - raise ssl_error(self.space, "The read operation timed out") + raise ssl_error(space, "The read operation timed out") elif sockstate == SOCKET_IS_NONBLOCKING: break @@ -263,11 +263,11 @@ break if count <= 0: - raise _ssl_seterror(self.space, self, count) + raise _ssl_seterror(space, self, count) result = buf.str(count) - return self.space.wrap(result) + return space.wrap(result) def _refresh_nonblocking(self, space): # just in case the blocking state of the socket has been changed @@ -361,18 +361,18 @@ ssl_err = libssl_SSL_get_error(self.ssl, ret) if ssl_err == SSL_ERROR_WANT_READ: sockstate = check_socket_and_wait_for_timeout( - self.space, self.w_socket, False) + space, self.w_socket, False) elif ssl_err == SSL_ERROR_WANT_WRITE: sockstate = check_socket_and_wait_for_timeout( - self.space, self.w_socket, True) + space, self.w_socket, True) else: break if sockstate == SOCKET_HAS_TIMED_OUT: if ssl_err == SSL_ERROR_WANT_READ: - raise ssl_error(self.space, "The read operation timed out") + raise ssl_error(space, "The read operation timed out") else: - raise ssl_error(self.space, "The write operation timed out") + raise ssl_error(space, "The write operation timed out") elif sockstate == SOCKET_TOO_LARGE_FOR_SELECT: raise ssl_error(space, "Underlying socket too large for select().") elif sockstate != SOCKET_OPERATION_OK: @@ -410,7 +410,7 @@ return space.newtuple([w_name, w_proto, w_bits]) @unwrap_spec(der=bool) - def peer_certificate(self, der=False): + def peer_certificate(self, space, der=False): """peer_certificate([der=False]) -> certificate Returns the certificate for the peer. If no certificate was provided, @@ -422,7 +422,7 @@ peer certificate, or None if no certificate was provided. This will return the certificate even if it wasn't validated.""" if not self.peer_cert: - return self.space.w_None + return space.w_None if der: # return cert in DER-encoded format @@ -430,20 +430,19 @@ buf_ptr[0] = lltype.nullptr(rffi.CCHARP.TO) length = libssl_i2d_X509(self.peer_cert, buf_ptr) if length < 0: - raise _ssl_seterror(self.space, self, length) + raise _ssl_seterror(space, self, length) try: # this is actually an immutable bytes sequence - return self.space.wrap(rffi.charpsize2str(buf_ptr[0], - length)) + return space.wrap(rffi.charpsize2str(buf_ptr[0], length)) finally: libssl_OPENSSL_free(buf_ptr[0]) else: verification = libssl_SSL_CTX_get_verify_mode( libssl_SSL_get_SSL_CTX(self.ssl)) if not verification & SSL_VERIFY_PEER: - return self.space.newdict() + return space.newdict() else: - return _decode_certificate(self.space, self.peer_cert) + return _decode_certificate(space, self.peer_cert) def _decode_certificate(space, certificate, verbose=False): w_retval = space.newdict() From noreply at buildbot.pypy.org Wed Jul 16 22:55:30 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 16 Jul 2014 22:55:30 +0200 (CEST) Subject: [pypy-commit] pypy default: cleanup Message-ID: <20140716205530.642A21C0091@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r72459:1fa3f31534b0 Date: 2014-07-16 13:54 -0700 http://bitbucket.org/pypy/pypy/changeset/1fa3f31534b0/ Log: cleanup diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -1,14 +1,12 @@ -from __future__ import with_statement -from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib import rpoll, rsocket +from rpython.rlib.rarithmetic import intmask +from rpython.rlib.ropenssl import * +from rpython.rtyper.lltypesystem import lltype, rffi + +from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError -from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef -from pypy.interpreter.gateway import interp2app, unwrap_spec - -from rpython.rlib.rarithmetic import intmask -from rpython.rlib import rpoll, rsocket -from rpython.rlib.ropenssl import * - from pypy.module._socket import interp_socket @@ -83,19 +81,15 @@ Mix string into the OpenSSL PRNG state. entropy (a float) is a lower bound on the entropy contained in string.""" - - buf = rffi.str2charp(string) - try: + with rffi.scoped_str2charp(string) as buf: libssl_RAND_add(buf, len(string), entropy) - finally: - rffi.free_charp(buf) def RAND_status(space): """RAND_status() -> 0 or 1 - Returns 1 if the OpenSSL PRNG has been seeded with enough data and 0 if not. - It is necessary to seed the PRNG with RAND_add() on some platforms before - using the ssl() function.""" + Returns 1 if the OpenSSL PRNG has been seeded with enough data + and 0 if not. It is necessary to seed the PRNG with RAND_add() + on some platforms before using the ssl() function.""" res = libssl_RAND_status() return space.wrap(res) @@ -107,16 +101,12 @@ Queries the entropy gather daemon (EGD) on socket path. Returns number of bytes read. Raises socket.sslerror if connection to EGD fails or if it does provide enough data to seed PRNG.""" - - socket_path = rffi.str2charp(path) - try: + with rffi.scoped_str2charp(path) as socket_path: bytes = libssl_RAND_egd(socket_path) - finally: - rffi.free_charp(socket_path) if bytes == -1: - msg = "EGD connection failed or EGD did not return" - msg += " enough data to seed the PRNG" - raise ssl_error(space, msg) + raise ssl_error(space, + "EGD connection failed or EGD did not return " + "enough data to seed the PRNG") return space.wrap(bytes) @@ -127,9 +117,11 @@ self.ctx = lltype.nullptr(SSL_CTX.TO) self.ssl = lltype.nullptr(SSL.TO) self.peer_cert = lltype.nullptr(X509.TO) - self._server = lltype.malloc(rffi.CCHARP.TO, X509_NAME_MAXLEN, flavor='raw') + self._server = lltype.malloc(rffi.CCHARP.TO, X509_NAME_MAXLEN, + flavor='raw') self._server[0] = '\0' - self._issuer = lltype.malloc(rffi.CCHARP.TO, X509_NAME_MAXLEN, flavor='raw') + self._issuer = lltype.malloc(rffi.CCHARP.TO, X509_NAME_MAXLEN, + flavor='raw') self._issuer[0] = '\0' self.shutdown_seen_zero = False @@ -162,8 +154,7 @@ of bytes written.""" self._refresh_nonblocking(space) - sockstate = check_socket_and_wait_for_timeout(space, - self.w_socket, True) + sockstate = checkwait(space, self.w_socket, True) if sockstate == SOCKET_HAS_TIMED_OUT: raise ssl_error(space, "The write operation timed out") elif sockstate == SOCKET_HAS_BEEN_CLOSED: @@ -179,11 +170,9 @@ err = libssl_SSL_get_error(self.ssl, num_bytes) if err == SSL_ERROR_WANT_READ: - sockstate = check_socket_and_wait_for_timeout(space, - self.w_socket, False) + sockstate = checkwait(space, self.w_socket, False) elif err == SSL_ERROR_WANT_WRITE: - sockstate = check_socket_and_wait_for_timeout(space, - self.w_socket, True) + sockstate = checkwait(space, self.w_socket, True) else: sockstate = SOCKET_OPERATION_OK @@ -219,19 +208,19 @@ """read([len]) -> string Read up to len bytes from the SSL socket.""" - count = libssl_SSL_pending(self.ssl) if not count: - sockstate = check_socket_and_wait_for_timeout(space, self.w_socket, - False) + sockstate = checkwait(space, self.w_socket, False) if sockstate == SOCKET_HAS_TIMED_OUT: raise ssl_error(space, "The read operation timed out") elif sockstate == SOCKET_TOO_LARGE_FOR_SELECT: - raise ssl_error(space, "Underlying socket too large for select().") + raise ssl_error(space, + "Underlying socket too large for select().") elif sockstate == SOCKET_HAS_BEEN_CLOSED: if libssl_SSL_get_shutdown(self.ssl) == SSL_RECEIVED_SHUTDOWN: return space.wrap('') - raise ssl_error(space, "Socket closed without SSL shutdown handshake") + raise ssl_error(space, + "Socket closed without SSL shutdown handshake") with rffi.scoped_alloc_buffer(num_bytes) as buf: while True: @@ -241,11 +230,9 @@ err = libssl_SSL_get_error(self.ssl, count) if err == SSL_ERROR_WANT_READ: - sockstate = check_socket_and_wait_for_timeout(space, - self.w_socket, False) + sockstate = checkwait(space, self.w_socket, False) elif err == SSL_ERROR_WANT_WRITE: - sockstate = check_socket_and_wait_for_timeout(space, - self.w_socket, True) + sockstate = checkwait(space, self.w_socket, True) elif (err == SSL_ERROR_ZERO_RETURN and libssl_SSL_get_shutdown(self.ssl) == SSL_RECEIVED_SHUTDOWN): return space.wrap("") @@ -286,11 +273,9 @@ err = libssl_SSL_get_error(self.ssl, ret) # XXX PyErr_CheckSignals() if err == SSL_ERROR_WANT_READ: - sockstate = check_socket_and_wait_for_timeout( - space, self.w_socket, False) + sockstate = checkwait(space, self.w_socket, False) elif err == SSL_ERROR_WANT_WRITE: - sockstate = check_socket_and_wait_for_timeout( - space, self.w_socket, True) + sockstate = checkwait(space, self.w_socket, True) else: sockstate = SOCKET_OPERATION_OK if sockstate == SOCKET_HAS_TIMED_OUT: @@ -298,7 +283,8 @@ elif sockstate == SOCKET_HAS_BEEN_CLOSED: raise ssl_error(space, "Underlying socket has been closed.") elif sockstate == SOCKET_TOO_LARGE_FOR_SELECT: - raise ssl_error(space, "Underlying socket too large for select().") + raise ssl_error(space, + "Underlying socket too large for select().") elif sockstate == SOCKET_IS_NONBLOCKING: break @@ -330,7 +316,6 @@ self._refresh_nonblocking(space) zeros = 0 - while True: # Disable read-ahead so that unwrap can work correctly. # Otherwise OpenSSL might read in too much data, @@ -360,11 +345,9 @@ # Possibly retry shutdown until timeout or failure ssl_err = libssl_SSL_get_error(self.ssl, ret) if ssl_err == SSL_ERROR_WANT_READ: - sockstate = check_socket_and_wait_for_timeout( - space, self.w_socket, False) + sockstate = checkwait(space, self.w_socket, False) elif ssl_err == SSL_ERROR_WANT_WRITE: - sockstate = check_socket_and_wait_for_timeout( - space, self.w_socket, True) + sockstate = checkwait(space, self.w_socket, True) else: break @@ -374,7 +357,8 @@ else: raise ssl_error(space, "The write operation timed out") elif sockstate == SOCKET_TOO_LARGE_FOR_SELECT: - raise ssl_error(space, "Underlying socket too large for select().") + raise ssl_error(space, + "Underlying socket too large for select().") elif sockstate != SOCKET_OPERATION_OK: # Retain the SSL error code break @@ -392,35 +376,29 @@ return space.w_None name = libssl_SSL_CIPHER_get_name(current) - if name: - w_name = space.wrap(rffi.charp2str(name)) - else: - w_name = space.w_None + w_name = space.wrap(rffi.charp2str(name)) if name else space.w_None proto = libssl_SSL_CIPHER_get_version(current) - if proto: - w_proto = space.wrap(rffi.charp2str(proto)) - else: - w_proto = space.w_None + w_proto = space.wrap(rffi.charp2str(proto)) if proto else space.w_None bits = libssl_SSL_CIPHER_get_bits(current, lltype.nullptr(rffi.INTP.TO)) w_bits = space.newint(bits) - return space.newtuple([w_name, w_proto, w_bits]) @unwrap_spec(der=bool) def peer_certificate(self, space, der=False): """peer_certificate([der=False]) -> certificate - Returns the certificate for the peer. If no certificate was provided, - returns None. If a certificate was provided, but not validated, returns - an empty dictionary. Otherwise returns a dict containing information - about the peer certificate. + Returns the certificate for the peer. If no certificate was + provided, returns None. If a certificate was provided, but not + validated, returns an empty dictionary. Otherwise returns a + dict containing information about the peer certificate. - If the optional argument is True, returns a DER-encoded copy of the - peer certificate, or None if no certificate was provided. This will - return the certificate even if it wasn't validated.""" + If the optional argument is True, returns a DER-encoded copy of + the peer certificate, or None if no certificate was provided. + This will return the certificate even if it wasn't validated. + """ if not self.peer_cert: return space.w_None @@ -579,15 +557,16 @@ name = libssl_sk_GENERAL_NAME_value(names, j) gntype = intmask(name[0].c_type) if gntype == GEN_DIRNAME: - # we special-case DirName as a tuple of tuples of attributes + # we special-case DirName as a tuple of tuples of + # attributes dirname = libssl_pypy_GENERAL_NAME_dirn(name) w_t = space.newtuple([ space.wrap("DirName"), _create_tuple_for_X509_NAME(space, dirname) ]) elif gntype in (GEN_EMAIL, GEN_DNS, GEN_URI): - # GENERAL_NAME_print() doesn't handle NULL bytes in ASN1_string - # correctly, CVE-2013-4238 + # GENERAL_NAME_print() doesn't handle NULL bytes in + # ASN1_string correctly, CVE-2013-4238 if gntype == GEN_EMAIL: v = space.wrap("email") elif gntype == GEN_DNS: @@ -664,26 +643,11 @@ sock_fd = space.int_w(space.call_method(w_sock, "fileno")) w_timeout = space.call_method(w_sock, "gettimeout") - if space.is_none(w_timeout): - has_timeout = False - else: - has_timeout = True - if space.is_none(w_key_file): - key_file = None - else: - key_file = space.str_w(w_key_file) - if space.is_none(w_cert_file): - cert_file = None - else: - cert_file = space.str_w(w_cert_file) - if space.is_none(w_cacerts_file): - cacerts_file = None - else: - cacerts_file = space.str_w(w_cacerts_file) - if space.is_none(w_ciphers): - ciphers = None - else: - ciphers = space.str_w(w_ciphers) + has_timeout = not space.is_none(w_timeout) + key_file = space.str_or_None_w(w_key_file) + cert_file = space.str_or_None_w(w_cert_file) + cacerts_file = space.str_or_None_w(w_cacerts_file) + ciphers = space.str_or_None_w(w_ciphers) if side == PY_SSL_SERVER and (not key_file or not cert_file): raise ssl_error(space, "Both the key & certificate files " @@ -746,8 +710,8 @@ libssl_SSL_set_fd(ss.ssl, sock_fd) # set the socket for SSL # The ACCEPT_MOVING_WRITE_BUFFER flag is necessary because the address # of a str object may be changed by the garbage collector. - libssl_SSL_set_mode(ss.ssl, - SSL_MODE_AUTO_RETRY | SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER) + libssl_SSL_set_mode( + ss.ssl, SSL_MODE_AUTO_RETRY | SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER) # If the socket is in non-blocking mode or timeout mode, set the BIO # to non-blocking mode (blocking is the default) @@ -764,7 +728,7 @@ ss.w_socket = w_sock return ss -def check_socket_and_wait_for_timeout(space, w_sock, writing): +def checkwait(space, w_sock, writing): """If the socket has a timeout, do a select()/poll() on the socket. The argument writing indicates the direction. Returns one of the possibilities in the timeout_state enum (above).""" From noreply at buildbot.pypy.org Wed Jul 16 22:55:31 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 16 Jul 2014 22:55:31 +0200 (CEST) Subject: [pypy-commit] pypy default: might as well use is_none here Message-ID: <20140716205531.B65E41C0091@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r72460:cb8e34d4eaf3 Date: 2014-07-16 13:54 -0700 http://bitbucket.org/pypy/pypy/changeset/cb8e34d4eaf3/ Log: might as well use is_none here diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1501,9 +1501,7 @@ return buf.as_str() def str_or_None_w(self, w_obj): - if self.is_w(w_obj, self.w_None): - return None - return self.str_w(w_obj) + return None if self.is_none(w_obj) else self.str_w(w_obj) def str_w(self, w_obj): return w_obj.str_w(self) diff --git a/pypy/module/_winreg/interp_winreg.py b/pypy/module/_winreg/interp_winreg.py --- a/pypy/module/_winreg/interp_winreg.py +++ b/pypy/module/_winreg/interp_winreg.py @@ -667,11 +667,6 @@ space.wrap(nValues[0]), space.wrap(l)]) -def str_or_None_w(space, w_obj): - if space.is_w(w_obj, space.w_None): - return None - return space.str_w(w_obj) - def ConnectRegistry(space, w_machine, w_hkey): """key = ConnectRegistry(computer_name, key) @@ -683,7 +678,7 @@ The return value is the handle of the opened key. If the function fails, an EnvironmentError exception is raised.""" - machine = str_or_None_w(space, w_machine) + machine = space.str_or_None_w(w_machine) hkey = hkey_w(w_hkey, space) with lltype.scoped_alloc(rwinreg.PHKEY.TO, 1) as rethkey: ret = rwinreg.RegConnectRegistry(machine, hkey, rethkey) From noreply at buildbot.pypy.org Wed Jul 16 23:07:18 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 16 Jul 2014 23:07:18 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140716210718.D49591D2A23@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r72461:cd0604d4ee65 Date: 2014-07-16 14:05 -0700 http://bitbucket.org/pypy/pypy/changeset/cd0604d4ee65/ Log: merge default diff --git a/lib-python/2.7/xml/sax/saxutils.py b/lib-python/2.7/xml/sax/saxutils.py --- a/lib-python/2.7/xml/sax/saxutils.py +++ b/lib-python/2.7/xml/sax/saxutils.py @@ -98,13 +98,14 @@ except AttributeError: pass # wrap a binary writer with TextIOWrapper - class UnbufferedTextIOWrapper(io.TextIOWrapper): - def write(self, s): - super(UnbufferedTextIOWrapper, self).write(s) - self.flush() - return UnbufferedTextIOWrapper(buffer, encoding=encoding, + return _UnbufferedTextIOWrapper(buffer, encoding=encoding, errors='xmlcharrefreplace', newline='\n') +# PyPy: moved this class outside the function above +class _UnbufferedTextIOWrapper(io.TextIOWrapper): + def write(self, s): + super(_UnbufferedTextIOWrapper, self).write(s) + self.flush() class XMLGenerator(handler.ContentHandler): diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1459,9 +1459,7 @@ return buf.as_str() def str_or_None_w(self, w_obj): - if self.is_w(w_obj, self.w_None): - return None - return self.str_w(w_obj) + return None if self.is_none(w_obj) else self.str_w(w_obj) def str_w(self, w_obj): """ diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -1,17 +1,16 @@ -from __future__ import with_statement -from rpython.rtyper.lltypesystem import rffi, lltype -from pypy.interpreter.error import OperationError, oefmt, wrap_oserror -from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.typedef import TypeDef, GetSetProperty -from pypy.interpreter.gateway import interp2app, unwrap_spec +import weakref +from rpython.rlib import rpoll, rsocket from rpython.rlib.rarithmetic import intmask -from rpython.rlib import rpoll, rsocket from rpython.rlib.ropenssl import * from rpython.rlib.rposix import get_errno, set_errno +from rpython.rtyper.lltypesystem import lltype, rffi +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.error import OperationError, oefmt, wrap_oserror +from pypy.interpreter.gateway import interp2app, unwrap_spec +from pypy.interpreter.typedef import GetSetProperty, TypeDef from pypy.module._socket import interp_socket -import weakref ## user defined constants @@ -296,19 +295,15 @@ Mix string into the OpenSSL PRNG state. entropy (a float) is a lower bound on the entropy contained in string.""" - - buf = rffi.str2charp(string) - try: + with rffi.scoped_str2charp(string) as buf: libssl_RAND_add(buf, len(string), entropy) - finally: - rffi.free_charp(buf) def RAND_status(space): """RAND_status() -> 0 or 1 - Returns 1 if the OpenSSL PRNG has been seeded with enough data and 0 if not. - It is necessary to seed the PRNG with RAND_add() on some platforms before - using the ssl() function.""" + Returns 1 if the OpenSSL PRNG has been seeded with enough data + and 0 if not. It is necessary to seed the PRNG with RAND_add() + on some platforms before using the ssl() function.""" res = libssl_RAND_status() return space.wrap(res) @@ -320,16 +315,12 @@ Queries the entropy gather daemon (EGD) on socket path. Returns number of bytes read. Raises socket.sslerror if connection to EGD fails or if it does provide enough data to seed PRNG.""" - - socket_path = rffi.str2charp(path) - try: + with rffi.scoped_str2charp(path) as socket_path: bytes = libssl_RAND_egd(socket_path) - finally: - rffi.free_charp(socket_path) if bytes == -1: - msg = "EGD connection failed or EGD did not return" - msg += " enough data to seed the PRNG" - raise ssl_error(space, msg) + raise ssl_error(space, + "EGD connection failed or EGD did not return " + "enough data to seed the PRNG") return space.wrap(bytes) @@ -354,7 +345,7 @@ of bytes written.""" w_socket = self._get_socket(space) - sockstate = check_socket_and_wait_for_timeout(space, w_socket, True) + sockstate = checkwait(space, w_socket, True) if sockstate == SOCKET_HAS_TIMED_OUT: raise ssl_error(space, "The write operation timed out") elif sockstate == SOCKET_HAS_BEEN_CLOSED: @@ -370,11 +361,9 @@ err = libssl_SSL_get_error(self.ssl, num_bytes) if err == SSL_ERROR_WANT_READ: - sockstate = check_socket_and_wait_for_timeout( - space, w_socket, False) + sockstate = checkwait(space, w_socket, False) elif err == SSL_ERROR_WANT_WRITE: - sockstate = check_socket_and_wait_for_timeout( - space, w_socket, True) + sockstate = checkwait(space, w_socket, True) else: sockstate = SOCKET_OPERATION_OK @@ -414,19 +403,20 @@ count = libssl_SSL_pending(self.ssl) if not count: - sockstate = check_socket_and_wait_for_timeout( - space, w_socket, False) + sockstate = checkwait(space, w_socket, False) if sockstate == SOCKET_HAS_TIMED_OUT: raise ssl_error(space, "The read operation timed out") elif sockstate == SOCKET_TOO_LARGE_FOR_SELECT: - raise ssl_error(space, "Underlying socket too large for select().") + raise ssl_error(space, + "Underlying socket too large for select().") elif sockstate == SOCKET_HAS_BEEN_CLOSED: if libssl_SSL_get_shutdown(self.ssl) == SSL_RECEIVED_SHUTDOWN: if space.is_none(w_buf): return space.wrapbytes('') else: return space.wrap(0) - raise ssl_error(space, "Socket closed without SSL shutdown handshake") + raise ssl_error(space, + "Socket closed without SSL shutdown handshake") rwbuffer = None if not space.is_none(w_buf): @@ -443,11 +433,9 @@ err = libssl_SSL_get_error(self.ssl, count) if err == SSL_ERROR_WANT_READ: - sockstate = check_socket_and_wait_for_timeout(self.space, - self.w_socket, False) + sockstate = checkwait(space, self.w_socket, False) elif err == SSL_ERROR_WANT_WRITE: - sockstate = check_socket_and_wait_for_timeout(self.space, - self.w_socket, True) + sockstate = checkwait(space, self.w_socket, True) elif (err == SSL_ERROR_ZERO_RETURN and libssl_SSL_get_shutdown(self.ssl) == SSL_RECEIVED_SHUTDOWN): if space.is_none(w_buf): @@ -458,7 +446,7 @@ sockstate = SOCKET_OPERATION_OK if sockstate == SOCKET_HAS_TIMED_OUT: - raise ssl_error(self.space, "The read operation timed out") + raise ssl_error(space, "The read operation timed out") elif sockstate == SOCKET_IS_NONBLOCKING: break @@ -468,7 +456,7 @@ break if count <= 0: - raise _ssl_seterror(self.space, self, count) + raise _ssl_seterror(space, self, count) result = buf.str(count) @@ -501,11 +489,9 @@ err = libssl_SSL_get_error(self.ssl, ret) # XXX PyErr_CheckSignals() if err == SSL_ERROR_WANT_READ: - sockstate = check_socket_and_wait_for_timeout( - space, w_socket, False) + sockstate = checkwait(space, w_socket, False) elif err == SSL_ERROR_WANT_WRITE: - sockstate = check_socket_and_wait_for_timeout( - space, w_socket, True) + sockstate = checkwait(space, w_socket, True) else: sockstate = SOCKET_OPERATION_OK if sockstate == SOCKET_HAS_TIMED_OUT: @@ -513,7 +499,8 @@ elif sockstate == SOCKET_HAS_BEEN_CLOSED: raise ssl_error(space, "Underlying socket has been closed.") elif sockstate == SOCKET_TOO_LARGE_FOR_SELECT: - raise ssl_error(space, "Underlying socket too large for select().") + raise ssl_error(space, + "Underlying socket too large for select().") elif sockstate == SOCKET_IS_NONBLOCKING: break @@ -538,7 +525,6 @@ raise ssl_error(space, "Underlying socket has been closed") zeros = 0 - while True: # Disable read-ahead so that unwrap can work correctly. # Otherwise OpenSSL might read in too much data, @@ -568,11 +554,9 @@ # Possibly retry shutdown until timeout or failure ssl_err = libssl_SSL_get_error(self.ssl, ret) if ssl_err == SSL_ERROR_WANT_READ: - sockstate = check_socket_and_wait_for_timeout( - space, w_socket, False) + sockstate = checkwait(space, w_socket, False) elif ssl_err == SSL_ERROR_WANT_WRITE: - sockstate = check_socket_and_wait_for_timeout( - space, w_socket, True) + sockstate = checkwait(space, w_socket, True) else: break @@ -582,7 +566,8 @@ else: raise ssl_error(space, "The write operation timed out") elif sockstate == SOCKET_TOO_LARGE_FOR_SELECT: - raise ssl_error(space, "Underlying socket too large for select().") + raise ssl_error(space, + "Underlying socket too large for select().") elif sockstate != SOCKET_OPERATION_OK: # Retain the SSL error code break @@ -600,35 +585,29 @@ return space.w_None name = libssl_SSL_CIPHER_get_name(current) - if name: - w_name = space.wrap(rffi.charp2str(name)) - else: - w_name = space.w_None + w_name = space.wrap(rffi.charp2str(name)) if name else space.w_None proto = libssl_SSL_CIPHER_get_version(current) - if proto: - w_proto = space.wrap(rffi.charp2str(proto)) - else: - w_proto = space.w_None + w_proto = space.wrap(rffi.charp2str(proto)) if proto else space.w_None bits = libssl_SSL_CIPHER_get_bits(current, lltype.nullptr(rffi.INTP.TO)) w_bits = space.newint(bits) - return space.newtuple([w_name, w_proto, w_bits]) @unwrap_spec(der=bool) def peer_certificate(self, space, der=False): """peer_certificate([der=False]) -> certificate - Returns the certificate for the peer. If no certificate was provided, - returns None. If a certificate was provided, but not validated, returns - an empty dictionary. Otherwise returns a dict containing information - about the peer certificate. + Returns the certificate for the peer. If no certificate was + provided, returns None. If a certificate was provided, but not + validated, returns an empty dictionary. Otherwise returns a + dict containing information about the peer certificate. - If the optional argument is True, returns a DER-encoded copy of the - peer certificate, or None if no certificate was provided. This will - return the certificate even if it wasn't validated.""" + If the optional argument is True, returns a DER-encoded copy of + the peer certificate, or None if no certificate was provided. + This will return the certificate even if it wasn't validated. + """ if not self.peer_cert: return space.w_None @@ -641,8 +620,7 @@ raise _ssl_seterror(space, self, length) try: # this is actually an immutable bytes sequence - return space.wrap(rffi.charpsize2str(buf_ptr[0], - length)) + return space.wrap(rffi.charpsize2str(buf_ptr[0], length)) finally: libssl_OPENSSL_free(buf_ptr[0]) else: @@ -788,15 +766,16 @@ name = libssl_sk_GENERAL_NAME_value(names, j) gntype = intmask(name[0].c_type) if gntype == GEN_DIRNAME: - # we special-case DirName as a tuple of tuples of attributes + # we special-case DirName as a tuple of tuples of + # attributes dirname = libssl_pypy_GENERAL_NAME_dirn(name) w_t = space.newtuple([ space.wrap("DirName"), _create_tuple_for_X509_NAME(space, dirname) ]) elif gntype in (GEN_EMAIL, GEN_DNS, GEN_URI): - # GENERAL_NAME_print() doesn't handle NULL bytes in ASN1_string - # correctly, CVE-2013-4238 + # GENERAL_NAME_print() doesn't handle NULL bytes in + # ASN1_string correctly, CVE-2013-4238 if gntype == GEN_EMAIL: v = space.wrap("email") elif gntype == GEN_DNS: @@ -870,17 +849,14 @@ sock_fd = space.int_w(space.call_method(w_sock, "fileno")) w_timeout = space.call_method(w_sock, "gettimeout") - if space.is_none(w_timeout): - has_timeout = False - else: - has_timeout = True + has_timeout = not space.is_none(w_timeout) ss.ssl = libssl_SSL_new(ctx) # new ssl struct libssl_SSL_set_fd(ss.ssl, sock_fd) # set the socket for SSL # The ACCEPT_MOVING_WRITE_BUFFER flag is necessary because the address # of a str object may be changed by the garbage collector. - libssl_SSL_set_mode(ss.ssl, - SSL_MODE_AUTO_RETRY | SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER) + libssl_SSL_set_mode( + ss.ssl, SSL_MODE_AUTO_RETRY | SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER) if server_hostname: libssl_SSL_set_tlsext_host_name(ss.ssl, server_hostname); @@ -900,7 +876,7 @@ ss.w_socket = weakref.ref(w_sock) return ss -def check_socket_and_wait_for_timeout(space, w_sock, writing): +def checkwait(space, w_sock, writing): """If the socket has a timeout, do a select()/poll() on the socket. The argument writing indicates the direction. Returns one of the possibilities in the timeout_state enum (above).""" diff --git a/pypy/module/_winreg/interp_winreg.py b/pypy/module/_winreg/interp_winreg.py --- a/pypy/module/_winreg/interp_winreg.py +++ b/pypy/module/_winreg/interp_winreg.py @@ -667,11 +667,6 @@ space.wrap(nValues[0]), space.wrap(l)]) -def str_or_None_w(space, w_obj): - if space.is_w(w_obj, space.w_None): - return None - return space.str_w(w_obj) - def ConnectRegistry(space, w_machine, w_hkey): """key = ConnectRegistry(computer_name, key) @@ -683,7 +678,7 @@ The return value is the handle of the opened key. If the function fails, an EnvironmentError exception is raised.""" - machine = str_or_None_w(space, w_machine) + machine = space.str_or_None_w(w_machine) hkey = hkey_w(w_hkey, space) with lltype.scoped_alloc(rwinreg.PHKEY.TO, 1) as rethkey: ret = rwinreg.RegConnectRegistry(machine, hkey, rethkey) diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1377,49 +1377,53 @@ def do_residual_call(self, funcbox, argboxes, descr, pc, assembler_call=False, assembler_call_jd=None): - # First build allboxes: it may need some reordering from the - # list provided in argboxes, depending on the order in which - # the arguments are expected by the function - # - allboxes = self._build_allboxes(funcbox, argboxes, descr) - effectinfo = descr.get_extra_info() - if (assembler_call or - effectinfo.check_forces_virtual_or_virtualizable()): - # residual calls require attention to keep virtualizables in-sync - self.metainterp.clear_exception() - if effectinfo.oopspecindex == EffectInfo.OS_JIT_FORCE_VIRTUAL: - resbox = self._do_jit_force_virtual(allboxes, descr, pc) + debug_start("jit-residual-call") + try: + # First build allboxes: it may need some reordering from the + # list provided in argboxes, depending on the order in which + # the arguments are expected by the function + # + allboxes = self._build_allboxes(funcbox, argboxes, descr) + effectinfo = descr.get_extra_info() + if (assembler_call or + effectinfo.check_forces_virtual_or_virtualizable()): + # residual calls require attention to keep virtualizables in-sync + self.metainterp.clear_exception() + if effectinfo.oopspecindex == EffectInfo.OS_JIT_FORCE_VIRTUAL: + resbox = self._do_jit_force_virtual(allboxes, descr, pc) + if resbox is not None: + return resbox + self.metainterp.vable_and_vrefs_before_residual_call() + resbox = self.metainterp.execute_and_record_varargs( + rop.CALL_MAY_FORCE, allboxes, descr=descr) + if effectinfo.is_call_release_gil(): + self.metainterp.direct_call_release_gil() + self.metainterp.vrefs_after_residual_call() + vablebox = None + if assembler_call: + vablebox = self.metainterp.direct_assembler_call( + assembler_call_jd) if resbox is not None: - return resbox - self.metainterp.vable_and_vrefs_before_residual_call() - resbox = self.metainterp.execute_and_record_varargs( - rop.CALL_MAY_FORCE, allboxes, descr=descr) - if effectinfo.is_call_release_gil(): - self.metainterp.direct_call_release_gil() - self.metainterp.vrefs_after_residual_call() - vablebox = None - if assembler_call: - vablebox = self.metainterp.direct_assembler_call( - assembler_call_jd) - if resbox is not None: - self.make_result_of_lastop(resbox) - self.metainterp.vable_after_residual_call(funcbox) - self.metainterp.generate_guard(rop.GUARD_NOT_FORCED, None) - if vablebox is not None: - self.metainterp.history.record(rop.KEEPALIVE, [vablebox], None) - self.metainterp.handle_possible_exception() - # XXX refactor: direct_libffi_call() is a hack - if effectinfo.oopspecindex == effectinfo.OS_LIBFFI_CALL: - self.metainterp.direct_libffi_call() - return resbox - else: - effect = effectinfo.extraeffect - if effect == effectinfo.EF_LOOPINVARIANT: - return self.execute_varargs(rop.CALL_LOOPINVARIANT, allboxes, - descr, False, False) - exc = effectinfo.check_can_raise() - pure = effectinfo.check_is_elidable() - return self.execute_varargs(rop.CALL, allboxes, descr, exc, pure) + self.make_result_of_lastop(resbox) + self.metainterp.vable_after_residual_call(funcbox) + self.metainterp.generate_guard(rop.GUARD_NOT_FORCED, None) + if vablebox is not None: + self.metainterp.history.record(rop.KEEPALIVE, [vablebox], None) + self.metainterp.handle_possible_exception() + # XXX refactor: direct_libffi_call() is a hack + if effectinfo.oopspecindex == effectinfo.OS_LIBFFI_CALL: + self.metainterp.direct_libffi_call() + return resbox + else: + effect = effectinfo.extraeffect + if effect == effectinfo.EF_LOOPINVARIANT: + return self.execute_varargs(rop.CALL_LOOPINVARIANT, allboxes, + descr, False, False) + exc = effectinfo.check_can_raise() + pure = effectinfo.check_is_elidable() + return self.execute_varargs(rop.CALL, allboxes, descr, exc, pure) + finally: + debug_stop("jit-residual-call") def do_conditional_call(self, condbox, funcbox, argboxes, descr, pc): if isinstance(condbox, ConstInt) and condbox.value == 0: diff --git a/rpython/jit/tool/oparser_model.py b/rpython/jit/tool/oparser_model.py --- a/rpython/jit/tool/oparser_model.py +++ b/rpython/jit/tool/oparser_model.py @@ -124,6 +124,15 @@ class ExtendedTreeLoop(model.TreeLoop): + def as_json(self): + return { + 'comment': self.comment, + 'name': self.name, + 'operations': [op.as_json() for op in self.operations], + 'inputargs': self.inputargs, + 'last_offset': self.last_offset + } + def getboxes(self): def opboxes(operations): for op in operations: diff --git a/rpython/tool/jitlogparser/logparser2json.py b/rpython/tool/jitlogparser/logparser2json.py new file mode 100755 --- /dev/null +++ b/rpython/tool/jitlogparser/logparser2json.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python +""" Convert logfile (from jit-log-opt and jit-backend) to json format. +Usage: + +logparser2json.py +""" + +import os +import sys +import json +from rpython.tool.jitlogparser.parser import import_log, parse_log_counts +from rpython.tool.logparser import extract_category +from rpython.tool.jitlogparser.storage import LoopStorage + +def mangle_descr(descr): + if descr.startswith('TargetToken('): + return descr[len('TargetToken('):-1] + if descr.startswith(' Author: Philip Jenvey Branch: py3k Changeset: r72462:bab7e4800a77 Date: 2014-07-16 14:49 -0700 http://bitbucket.org/pypy/pypy/changeset/bab7e4800a77/ Log: fix from previous merge diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -433,9 +433,9 @@ err = libssl_SSL_get_error(self.ssl, count) if err == SSL_ERROR_WANT_READ: - sockstate = checkwait(space, self.w_socket, False) + sockstate = checkwait(space, w_socket, False) elif err == SSL_ERROR_WANT_WRITE: - sockstate = checkwait(space, self.w_socket, True) + sockstate = checkwait(space, w_socket, True) elif (err == SSL_ERROR_ZERO_RETURN and libssl_SSL_get_shutdown(self.ssl) == SSL_RECEIVED_SHUTDOWN): if space.is_none(w_buf): @@ -462,7 +462,7 @@ if rwbuffer is not None: rwbuffer.setslice(0, result) - return self.space.wrap(count) + return space.wrap(count) else: return space.wrapbytes(result) From noreply at buildbot.pypy.org Thu Jul 17 00:07:09 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 17 Jul 2014 00:07:09 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: merge py3k Message-ID: <20140716220709.61E321D2857@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72463:7536507ca6e5 Date: 2014-07-16 14:50 -0700 http://bitbucket.org/pypy/pypy/changeset/7536507ca6e5/ Log: merge py3k diff --git a/lib-python/2.7/xml/sax/saxutils.py b/lib-python/2.7/xml/sax/saxutils.py --- a/lib-python/2.7/xml/sax/saxutils.py +++ b/lib-python/2.7/xml/sax/saxutils.py @@ -98,13 +98,14 @@ except AttributeError: pass # wrap a binary writer with TextIOWrapper - class UnbufferedTextIOWrapper(io.TextIOWrapper): - def write(self, s): - super(UnbufferedTextIOWrapper, self).write(s) - self.flush() - return UnbufferedTextIOWrapper(buffer, encoding=encoding, + return _UnbufferedTextIOWrapper(buffer, encoding=encoding, errors='xmlcharrefreplace', newline='\n') +# PyPy: moved this class outside the function above +class _UnbufferedTextIOWrapper(io.TextIOWrapper): + def write(self, s): + super(_UnbufferedTextIOWrapper, self).write(s) + self.flush() class XMLGenerator(handler.ContentHandler): diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1461,9 +1461,7 @@ return buf.as_str() def str_or_None_w(self, w_obj): - if self.is_w(w_obj, self.w_None): - return None - return self.str_w(w_obj) + return None if self.is_none(w_obj) else self.str_w(w_obj) def str_w(self, w_obj): """ diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -1,18 +1,17 @@ -from __future__ import with_statement -from rpython.rtyper.lltypesystem import rffi, lltype -from pypy.interpreter.error import OperationError, oefmt, wrap_oserror -from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.typedef import TypeDef, GetSetProperty -from pypy.interpreter.gateway import interp2app, unwrap_spec +import weakref +from rpython.rlib import rpoll, rsocket from rpython.rlib.rarithmetic import intmask -from rpython.rlib import rpoll, rsocket from rpython.rlib.ropenssl import * from rpython.rlib.rposix import get_errno, set_errno +from rpython.rtyper.lltypesystem import lltype, rffi +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.error import OperationError, oefmt, wrap_oserror +from pypy.interpreter.gateway import interp2app, unwrap_spec +from pypy.interpreter.typedef import GetSetProperty, TypeDef from pypy.module._socket import interp_socket from pypy.module.exceptions import interp_exceptions -import weakref ## user defined constants @@ -303,19 +302,15 @@ Mix string into the OpenSSL PRNG state. entropy (a float) is a lower bound on the entropy contained in string.""" - - buf = rffi.str2charp(string) - try: + with rffi.scoped_str2charp(string) as buf: libssl_RAND_add(buf, len(string), entropy) - finally: - rffi.free_charp(buf) def RAND_status(space): """RAND_status() -> 0 or 1 - Returns 1 if the OpenSSL PRNG has been seeded with enough data and 0 if not. - It is necessary to seed the PRNG with RAND_add() on some platforms before - using the ssl() function.""" + Returns 1 if the OpenSSL PRNG has been seeded with enough data + and 0 if not. It is necessary to seed the PRNG with RAND_add() + on some platforms before using the ssl() function.""" res = libssl_RAND_status() return space.wrap(res) @@ -327,16 +322,12 @@ Queries the entropy gather daemon (EGD) on socket path. Returns number of bytes read. Raises socket.sslerror if connection to EGD fails or if it does provide enough data to seed PRNG.""" - - socket_path = rffi.str2charp(path) - try: + with rffi.scoped_str2charp(path) as socket_path: bytes = libssl_RAND_egd(socket_path) - finally: - rffi.free_charp(socket_path) if bytes == -1: - msg = "EGD connection failed or EGD did not return" - msg += " enough data to seed the PRNG" - raise ssl_error(space, msg) + raise ssl_error(space, + "EGD connection failed or EGD did not return " + "enough data to seed the PRNG") return space.wrap(bytes) @@ -361,7 +352,7 @@ of bytes written.""" w_socket = self._get_socket(space) - sockstate = check_socket_and_wait_for_timeout(space, w_socket, True) + sockstate = checkwait(space, w_socket, True) if sockstate == SOCKET_HAS_TIMED_OUT: raise ssl_error(space, "The write operation timed out") elif sockstate == SOCKET_HAS_BEEN_CLOSED: @@ -377,11 +368,9 @@ err = libssl_SSL_get_error(self.ssl, num_bytes) if err == SSL_ERROR_WANT_READ: - sockstate = check_socket_and_wait_for_timeout( - space, w_socket, False) + sockstate = checkwait(space, w_socket, False) elif err == SSL_ERROR_WANT_WRITE: - sockstate = check_socket_and_wait_for_timeout( - space, w_socket, True) + sockstate = checkwait(space, w_socket, True) else: sockstate = SOCKET_OPERATION_OK @@ -421,19 +410,20 @@ count = libssl_SSL_pending(self.ssl) if not count: - sockstate = check_socket_and_wait_for_timeout( - space, w_socket, False) + sockstate = checkwait(space, w_socket, False) if sockstate == SOCKET_HAS_TIMED_OUT: raise ssl_error(space, "The read operation timed out") elif sockstate == SOCKET_TOO_LARGE_FOR_SELECT: - raise ssl_error(space, "Underlying socket too large for select().") + raise ssl_error(space, + "Underlying socket too large for select().") elif sockstate == SOCKET_HAS_BEEN_CLOSED: if libssl_SSL_get_shutdown(self.ssl) == SSL_RECEIVED_SHUTDOWN: if space.is_none(w_buf): return space.wrapbytes('') else: return space.wrap(0) - raise ssl_error(space, "Socket closed without SSL shutdown handshake") + raise ssl_error(space, + "Socket closed without SSL shutdown handshake") rwbuffer = None if not space.is_none(w_buf): @@ -450,11 +440,9 @@ err = libssl_SSL_get_error(self.ssl, count) if err == SSL_ERROR_WANT_READ: - sockstate = check_socket_and_wait_for_timeout(self.space, - self.w_socket, False) + sockstate = checkwait(space, w_socket, False) elif err == SSL_ERROR_WANT_WRITE: - sockstate = check_socket_and_wait_for_timeout(self.space, - self.w_socket, True) + sockstate = checkwait(space, w_socket, True) elif (err == SSL_ERROR_ZERO_RETURN and libssl_SSL_get_shutdown(self.ssl) == SSL_RECEIVED_SHUTDOWN): if space.is_none(w_buf): @@ -465,7 +453,7 @@ sockstate = SOCKET_OPERATION_OK if sockstate == SOCKET_HAS_TIMED_OUT: - raise ssl_error(self.space, "The read operation timed out") + raise ssl_error(space, "The read operation timed out") elif sockstate == SOCKET_IS_NONBLOCKING: break @@ -475,13 +463,13 @@ break if count <= 0: - raise _ssl_seterror(self.space, self, count) + raise _ssl_seterror(space, self, count) result = buf.str(count) if rwbuffer is not None: rwbuffer.setslice(0, result) - return self.space.wrap(count) + return space.wrap(count) else: return space.wrapbytes(result) @@ -508,11 +496,9 @@ err = libssl_SSL_get_error(self.ssl, ret) # XXX PyErr_CheckSignals() if err == SSL_ERROR_WANT_READ: - sockstate = check_socket_and_wait_for_timeout( - space, w_socket, False) + sockstate = checkwait(space, w_socket, False) elif err == SSL_ERROR_WANT_WRITE: - sockstate = check_socket_and_wait_for_timeout( - space, w_socket, True) + sockstate = checkwait(space, w_socket, True) else: sockstate = SOCKET_OPERATION_OK if sockstate == SOCKET_HAS_TIMED_OUT: @@ -520,7 +506,8 @@ elif sockstate == SOCKET_HAS_BEEN_CLOSED: raise ssl_error(space, "Underlying socket has been closed.") elif sockstate == SOCKET_TOO_LARGE_FOR_SELECT: - raise ssl_error(space, "Underlying socket too large for select().") + raise ssl_error(space, + "Underlying socket too large for select().") elif sockstate == SOCKET_IS_NONBLOCKING: break @@ -545,7 +532,6 @@ raise ssl_error(space, "Underlying socket has been closed") zeros = 0 - while True: # Disable read-ahead so that unwrap can work correctly. # Otherwise OpenSSL might read in too much data, @@ -575,11 +561,9 @@ # Possibly retry shutdown until timeout or failure ssl_err = libssl_SSL_get_error(self.ssl, ret) if ssl_err == SSL_ERROR_WANT_READ: - sockstate = check_socket_and_wait_for_timeout( - space, w_socket, False) + sockstate = checkwait(space, w_socket, False) elif ssl_err == SSL_ERROR_WANT_WRITE: - sockstate = check_socket_and_wait_for_timeout( - space, w_socket, True) + sockstate = checkwait(space, w_socket, True) else: break @@ -589,7 +573,8 @@ else: raise ssl_error(space, "The write operation timed out") elif sockstate == SOCKET_TOO_LARGE_FOR_SELECT: - raise ssl_error(space, "Underlying socket too large for select().") + raise ssl_error(space, + "Underlying socket too large for select().") elif sockstate != SOCKET_OPERATION_OK: # Retain the SSL error code break @@ -607,35 +592,29 @@ return space.w_None name = libssl_SSL_CIPHER_get_name(current) - if name: - w_name = space.wrap(rffi.charp2str(name)) - else: - w_name = space.w_None + w_name = space.wrap(rffi.charp2str(name)) if name else space.w_None proto = libssl_SSL_CIPHER_get_version(current) - if proto: - w_proto = space.wrap(rffi.charp2str(proto)) - else: - w_proto = space.w_None + w_proto = space.wrap(rffi.charp2str(proto)) if proto else space.w_None bits = libssl_SSL_CIPHER_get_bits(current, lltype.nullptr(rffi.INTP.TO)) w_bits = space.newint(bits) - return space.newtuple([w_name, w_proto, w_bits]) @unwrap_spec(der=bool) def peer_certificate(self, space, der=False): """peer_certificate([der=False]) -> certificate - Returns the certificate for the peer. If no certificate was provided, - returns None. If a certificate was provided, but not validated, returns - an empty dictionary. Otherwise returns a dict containing information - about the peer certificate. + Returns the certificate for the peer. If no certificate was + provided, returns None. If a certificate was provided, but not + validated, returns an empty dictionary. Otherwise returns a + dict containing information about the peer certificate. - If the optional argument is True, returns a DER-encoded copy of the - peer certificate, or None if no certificate was provided. This will - return the certificate even if it wasn't validated.""" + If the optional argument is True, returns a DER-encoded copy of + the peer certificate, or None if no certificate was provided. + This will return the certificate even if it wasn't validated. + """ if not self.peer_cert: return space.w_None @@ -648,8 +627,7 @@ raise _ssl_seterror(space, self, length) try: # this is actually an immutable bytes sequence - return space.wrap(rffi.charpsize2str(buf_ptr[0], - length)) + return space.wrap(rffi.charpsize2str(buf_ptr[0], length)) finally: libssl_OPENSSL_free(buf_ptr[0]) else: @@ -795,15 +773,16 @@ name = libssl_sk_GENERAL_NAME_value(names, j) gntype = intmask(name[0].c_type) if gntype == GEN_DIRNAME: - # we special-case DirName as a tuple of tuples of attributes + # we special-case DirName as a tuple of tuples of + # attributes dirname = libssl_pypy_GENERAL_NAME_dirn(name) w_t = space.newtuple([ space.wrap("DirName"), _create_tuple_for_X509_NAME(space, dirname) ]) elif gntype in (GEN_EMAIL, GEN_DNS, GEN_URI): - # GENERAL_NAME_print() doesn't handle NULL bytes in ASN1_string - # correctly, CVE-2013-4238 + # GENERAL_NAME_print() doesn't handle NULL bytes in + # ASN1_string correctly, CVE-2013-4238 if gntype == GEN_EMAIL: v = space.wrap("email") elif gntype == GEN_DNS: @@ -877,17 +856,14 @@ sock_fd = space.int_w(space.call_method(w_sock, "fileno")) w_timeout = space.call_method(w_sock, "gettimeout") - if space.is_none(w_timeout): - has_timeout = False - else: - has_timeout = True + has_timeout = not space.is_none(w_timeout) ss.ssl = libssl_SSL_new(ctx) # new ssl struct libssl_SSL_set_fd(ss.ssl, sock_fd) # set the socket for SSL # The ACCEPT_MOVING_WRITE_BUFFER flag is necessary because the address # of a str object may be changed by the garbage collector. - libssl_SSL_set_mode(ss.ssl, - SSL_MODE_AUTO_RETRY | SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER) + libssl_SSL_set_mode( + ss.ssl, SSL_MODE_AUTO_RETRY | SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER) if server_hostname: libssl_SSL_set_tlsext_host_name(ss.ssl, server_hostname); @@ -907,7 +883,7 @@ ss.w_socket = weakref.ref(w_sock) return ss -def check_socket_and_wait_for_timeout(space, w_sock, writing): +def checkwait(space, w_sock, writing): """If the socket has a timeout, do a select()/poll() on the socket. The argument writing indicates the direction. Returns one of the possibilities in the timeout_state enum (above).""" diff --git a/pypy/module/_winreg/interp_winreg.py b/pypy/module/_winreg/interp_winreg.py --- a/pypy/module/_winreg/interp_winreg.py +++ b/pypy/module/_winreg/interp_winreg.py @@ -667,11 +667,6 @@ space.wrap(nValues[0]), space.wrap(l)]) -def str_or_None_w(space, w_obj): - if space.is_w(w_obj, space.w_None): - return None - return space.str_w(w_obj) - def ConnectRegistry(space, w_machine, w_hkey): """key = ConnectRegistry(computer_name, key) @@ -683,7 +678,7 @@ The return value is the handle of the opened key. If the function fails, an EnvironmentError exception is raised.""" - machine = str_or_None_w(space, w_machine) + machine = space.str_or_None_w(w_machine) hkey = hkey_w(w_hkey, space) with lltype.scoped_alloc(rwinreg.PHKEY.TO, 1) as rethkey: ret = rwinreg.RegConnectRegistry(machine, hkey, rethkey) diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1377,49 +1377,53 @@ def do_residual_call(self, funcbox, argboxes, descr, pc, assembler_call=False, assembler_call_jd=None): - # First build allboxes: it may need some reordering from the - # list provided in argboxes, depending on the order in which - # the arguments are expected by the function - # - allboxes = self._build_allboxes(funcbox, argboxes, descr) - effectinfo = descr.get_extra_info() - if (assembler_call or - effectinfo.check_forces_virtual_or_virtualizable()): - # residual calls require attention to keep virtualizables in-sync - self.metainterp.clear_exception() - if effectinfo.oopspecindex == EffectInfo.OS_JIT_FORCE_VIRTUAL: - resbox = self._do_jit_force_virtual(allboxes, descr, pc) + debug_start("jit-residual-call") + try: + # First build allboxes: it may need some reordering from the + # list provided in argboxes, depending on the order in which + # the arguments are expected by the function + # + allboxes = self._build_allboxes(funcbox, argboxes, descr) + effectinfo = descr.get_extra_info() + if (assembler_call or + effectinfo.check_forces_virtual_or_virtualizable()): + # residual calls require attention to keep virtualizables in-sync + self.metainterp.clear_exception() + if effectinfo.oopspecindex == EffectInfo.OS_JIT_FORCE_VIRTUAL: + resbox = self._do_jit_force_virtual(allboxes, descr, pc) + if resbox is not None: + return resbox + self.metainterp.vable_and_vrefs_before_residual_call() + resbox = self.metainterp.execute_and_record_varargs( + rop.CALL_MAY_FORCE, allboxes, descr=descr) + if effectinfo.is_call_release_gil(): + self.metainterp.direct_call_release_gil() + self.metainterp.vrefs_after_residual_call() + vablebox = None + if assembler_call: + vablebox = self.metainterp.direct_assembler_call( + assembler_call_jd) if resbox is not None: - return resbox - self.metainterp.vable_and_vrefs_before_residual_call() - resbox = self.metainterp.execute_and_record_varargs( - rop.CALL_MAY_FORCE, allboxes, descr=descr) - if effectinfo.is_call_release_gil(): - self.metainterp.direct_call_release_gil() - self.metainterp.vrefs_after_residual_call() - vablebox = None - if assembler_call: - vablebox = self.metainterp.direct_assembler_call( - assembler_call_jd) - if resbox is not None: - self.make_result_of_lastop(resbox) - self.metainterp.vable_after_residual_call(funcbox) - self.metainterp.generate_guard(rop.GUARD_NOT_FORCED, None) - if vablebox is not None: - self.metainterp.history.record(rop.KEEPALIVE, [vablebox], None) - self.metainterp.handle_possible_exception() - # XXX refactor: direct_libffi_call() is a hack - if effectinfo.oopspecindex == effectinfo.OS_LIBFFI_CALL: - self.metainterp.direct_libffi_call() - return resbox - else: - effect = effectinfo.extraeffect - if effect == effectinfo.EF_LOOPINVARIANT: - return self.execute_varargs(rop.CALL_LOOPINVARIANT, allboxes, - descr, False, False) - exc = effectinfo.check_can_raise() - pure = effectinfo.check_is_elidable() - return self.execute_varargs(rop.CALL, allboxes, descr, exc, pure) + self.make_result_of_lastop(resbox) + self.metainterp.vable_after_residual_call(funcbox) + self.metainterp.generate_guard(rop.GUARD_NOT_FORCED, None) + if vablebox is not None: + self.metainterp.history.record(rop.KEEPALIVE, [vablebox], None) + self.metainterp.handle_possible_exception() + # XXX refactor: direct_libffi_call() is a hack + if effectinfo.oopspecindex == effectinfo.OS_LIBFFI_CALL: + self.metainterp.direct_libffi_call() + return resbox + else: + effect = effectinfo.extraeffect + if effect == effectinfo.EF_LOOPINVARIANT: + return self.execute_varargs(rop.CALL_LOOPINVARIANT, allboxes, + descr, False, False) + exc = effectinfo.check_can_raise() + pure = effectinfo.check_is_elidable() + return self.execute_varargs(rop.CALL, allboxes, descr, exc, pure) + finally: + debug_stop("jit-residual-call") def do_conditional_call(self, condbox, funcbox, argboxes, descr, pc): if isinstance(condbox, ConstInt) and condbox.value == 0: diff --git a/rpython/jit/tool/oparser_model.py b/rpython/jit/tool/oparser_model.py --- a/rpython/jit/tool/oparser_model.py +++ b/rpython/jit/tool/oparser_model.py @@ -124,6 +124,15 @@ class ExtendedTreeLoop(model.TreeLoop): + def as_json(self): + return { + 'comment': self.comment, + 'name': self.name, + 'operations': [op.as_json() for op in self.operations], + 'inputargs': self.inputargs, + 'last_offset': self.last_offset + } + def getboxes(self): def opboxes(operations): for op in operations: diff --git a/rpython/tool/jitlogparser/logparser2json.py b/rpython/tool/jitlogparser/logparser2json.py new file mode 100755 --- /dev/null +++ b/rpython/tool/jitlogparser/logparser2json.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python +""" Convert logfile (from jit-log-opt and jit-backend) to json format. +Usage: + +logparser2json.py +""" + +import os +import sys +import json +from rpython.tool.jitlogparser.parser import import_log, parse_log_counts +from rpython.tool.logparser import extract_category +from rpython.tool.jitlogparser.storage import LoopStorage + +def mangle_descr(descr): + if descr.startswith('TargetToken('): + return descr[len('TargetToken('):-1] + if descr.startswith(' Author: Philip Jenvey Branch: Changeset: r72464:24db6697b691 Date: 2014-07-16 14:51 -0700 http://bitbucket.org/pypy/pypy/changeset/24db6697b691/ Log: py3k compat diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/test_generator.py --- a/pypy/interpreter/test/test_generator.py +++ b/pypy/interpreter/test/test_generator.py @@ -17,7 +17,7 @@ yield 1 assert g.gi_running g = f() - assert g.gi_code is f.func_code + assert g.gi_code is f.__code__ assert g.__name__ == 'f' assert g.gi_frame is not None assert not g.gi_running @@ -26,7 +26,7 @@ raises(StopIteration, g.next) assert not g.gi_running assert g.gi_frame is None - assert g.gi_code is f.func_code + assert g.gi_code is f.__code__ assert g.__name__ == 'f' def test_generator3(self): @@ -286,13 +286,13 @@ w_co = space.appexec([], '''(): def g(x): yield x + 5 - return g.func_code + return g.__code__ ''') assert should_not_inline(w_co) == False w_co = space.appexec([], '''(): def g(x): yield x + 5 yield x + 6 - return g.func_code + return g.__code__ ''') assert should_not_inline(w_co) == True From noreply at buildbot.pypy.org Thu Jul 17 10:59:04 2014 From: noreply at buildbot.pypy.org (Conrad Calmez) Date: Thu, 17 Jul 2014 10:59:04 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk stmgc-c7: changed argument parsing again for easier usage of smalltalk args Message-ID: <20140717085904.3126E1D2ADB@cobra.cs.uni-duesseldorf.de> Author: Conrad Calmez Branch: stmgc-c7 Changeset: r896:a0e39d69fbe0 Date: 2014-07-17 10:57 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/a0e39d69fbe0/ Log: changed argument parsing again for easier usage of smalltalk args now you can stop regular argument parsing with "--" and all further arguments are for the smalltalk image diff --git a/images/Squeak4.5-12568.changes b/images/Squeak4.5-12568.changes --- a/images/Squeak4.5-12568.changes +++ b/images/Squeak4.5-12568.changes @@ -15094,4 +15094,4 @@ ifFalse: [ self organizeMeetingWith: chameleon. first := nil. ] ] atomic value. " tmpString := 'A Chameneos was in the meeting place and first is now: ' . SPyVM print: tmpString. "! ! !CPBChameneos methodsFor: 'as yet unclassified' stamp: 'hh 7/10/2014 15:12' prior: 34771049! run: meetingPlace [ color == #faded ] whileFalse: [ "SPyVM print: 'Chameneos goes to meeting place' , self color." meetingPlace reachedBy: self. - waitingForPair lock. "SPyVM print: 'Chameneos met another one and releases lock' , self color." ]! ! ----QUIT----{10 July 2014 . 3:13 pm} Squeak4.5-benchmarks.image priorSource: 1252802! ----STARTUP----{10 July 2014 . 3:16:52 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/images/Squeak4.5-benchmarks.image! ----QUIT/NOSAVE----{10 July 2014 . 3:17:21 pm} Squeak4.5-benchmarks.image priorSource: 1253908! ----QUIT/NOSAVE----{10 July 2014 . 1:19:47 pm} Squeak4.5-benchmarks.image priorSource: 1253908! ----STARTUP----{10 July 2014 . 5:00:09 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/images/Squeak4.5-benchmarks.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 7/10/2014 17:01'! OSLockTest | lock | lock := OSLock new. lock release.! ! ----QUIT----{10 July 2014 . 5:01:29 pm} Squeak4.5-benchmarks.image priorSource: 1253908! ----STARTUP----{10 July 2014 . 5:04:01 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/images/Squeak4.5-benchmarks.image! ----QUIT/NOSAVE----{10 July 2014 . 5:05:51 pm} Squeak4.5-benchmarks.image priorSource: 1254571! ----STARTUP----{10 July 2014 . 5:35:54 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/images/Squeak4.5-benchmarks.image! ----QUIT/NOSAVE----{10 July 2014 . 5:36:06 pm} Squeak4.5-benchmarks.image priorSource: 1254571! ----STARTUP----{10 July 2014 . 5:44:18 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT/NOSAVE----{10 July 2014 . 5:44:25 pm} Squeak4.5-12568.image priorSource: 1254571! ----QUIT/NOSAVE----{10 July 2014 . 6:11:07 pm} Squeak4.5-12568.image priorSource: 1254571! ----STARTUP----{10 July 2014 . 6:11:32 pm} as /Users/conrad/Repositories/master-project/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT/NOSAVE----{10 July 2014 . 6:22:16 pm} Squeak4.5-12568.image priorSource: 1254571! \ No newline at end of file + waitingForPair lock. "SPyVM print: 'Chameneos met another one and releases lock' , self color." ]! ! ----QUIT----{10 July 2014 . 3:13 pm} Squeak4.5-benchmarks.image priorSource: 1252802! ----STARTUP----{10 July 2014 . 3:16:52 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/images/Squeak4.5-benchmarks.image! ----QUIT/NOSAVE----{10 July 2014 . 3:17:21 pm} Squeak4.5-benchmarks.image priorSource: 1253908! ----QUIT/NOSAVE----{10 July 2014 . 1:19:47 pm} Squeak4.5-benchmarks.image priorSource: 1253908! ----STARTUP----{10 July 2014 . 5:00:09 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/images/Squeak4.5-benchmarks.image! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'hh 7/10/2014 17:01'! OSLockTest | lock | lock := OSLock new. lock release.! ! ----QUIT----{10 July 2014 . 5:01:29 pm} Squeak4.5-benchmarks.image priorSource: 1253908! ----STARTUP----{10 July 2014 . 5:04:01 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/images/Squeak4.5-benchmarks.image! ----QUIT/NOSAVE----{10 July 2014 . 5:05:51 pm} Squeak4.5-benchmarks.image priorSource: 1254571! ----STARTUP----{10 July 2014 . 5:35:54 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/images/Squeak4.5-benchmarks.image! ----QUIT/NOSAVE----{10 July 2014 . 5:36:06 pm} Squeak4.5-benchmarks.image priorSource: 1254571! ----STARTUP----{10 July 2014 . 5:44:18 pm} as /home/hub/hpi/stm/src/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT/NOSAVE----{10 July 2014 . 5:44:25 pm} Squeak4.5-12568.image priorSource: 1254571! ----QUIT/NOSAVE----{10 July 2014 . 6:11:07 pm} Squeak4.5-12568.image priorSource: 1254571! ----STARTUP----{10 July 2014 . 6:11:32 pm} as /Users/conrad/Repositories/master-project/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT/NOSAVE----{10 July 2014 . 6:22:16 pm} Squeak4.5-12568.image priorSource: 1254571! ----STARTUP----{17 July 2014 . 10:51:14 am} as /Users/conrad/Repositories/master-project/lang-smalltalk/images/Squeak4.5-12568.image! ----QUIT/NOSAVE----{17 July 2014 . 10:51:23 am} Squeak4.5-12568.image priorSource: 1254571! ----STARTUP----{17 July 2014 . 10:51:30 am} as /Users/conrad/Repositories/master-project/lang-smalltalk/images/Squeak4.5-12568.image! Smalltalk arguments! !Integer methodsFor: '*SPy-Benchmarks' stamp: 'cc 7/17/2014 10:52'! argTest | | SPyVM print: 'Arguments were', Smalltalk arguments.! ! ----SNAPSHOT----{17 July 2014 . 10:54:14 am} Squeak4.5-12568.image priorSource: 1254571! \ No newline at end of file diff --git a/images/Squeak4.5-12568.image b/images/Squeak4.5-12568.image index 72cb111b874a0858232e56cb11d5c95ddf8337eb..421974741f7d894e680a98bf8e91400220498d7a GIT binary patch [cut] diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -125,8 +125,8 @@ -r|--run [code string] -b|--benchmark [code string] -p|--poll_events - -s|--smalltalk-args [argument to pass] [image path, default: Squeak.image] + [-- [SMALLTALK_ARG [SMALLTALK_ARG ...]]] """ % argv[0] @@ -146,10 +146,13 @@ code = None as_benchmark = False smalltalk_args = [] + parse_smalltalk_args = False while idx < len(argv): arg = argv[idx] - if arg in ["-h", "--help"]: + if parse_smalltalk_args: + smalltalk_args.append(argv[idx]) + elif arg in ["-h", "--help"]: _usage(argv) return 0 elif arg in ["-j", "--jit"]: @@ -183,9 +186,8 @@ code = argv[idx + 1] as_benchmark = True idx += 1 - elif arg in ["-s", "--smalltalk-args"]: - smalltalk_args.append(argv[idx + 1]) - idx += 1 + elif arg in ["--"]: + parse_smalltalk_args = True elif path is None: path = argv[idx] else: From noreply at buildbot.pypy.org Thu Jul 17 15:44:43 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 17 Jul 2014 15:44:43 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Tweaks Message-ID: <20140717134443.21D961C0091@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5361:49a0972a00d9 Date: 2014-07-17 15:44 +0200 http://bitbucket.org/pypy/extradoc/changeset/49a0972a00d9/ Log: Tweaks diff --git a/talk/ep2014/stm/talk.html b/talk/ep2014/stm/talk.html --- a/talk/ep2014/stm/talk.html +++ b/talk/ep2014/stm/talk.html @@ -383,6 +383,18 @@

          Part 1 - Intro and Current Status

          +
          +

          Introduction

          +
            +
          • PyPy-STM: Software Transactional Memory
          • +
          • On-going research project:
              +
            • by Remi Meier and myself
            • +
            • helped by crowdfunding, thanks to all donors
            • +
            +
          • +
          • Started as a EuroPython 2011 lightning talk
          • +
          +

          Why is there a GIL?

            @@ -495,10 +507,8 @@
            • application-level locks still needed...
            • but can be very coarse:
                -
              • the idea is to make sure, internally, that one transaction -covers the whole time during which the lock was acquired
              • even two big transactions can hopefully run in parallel
              • -
              • even if they both acquire and release the same lock
              • +
              • even if they both acquire and release the same lock
            @@ -511,6 +521,7 @@

            Demo 1

              +
            • "Twisted apps made parallel out of the box"
            • Bottle web server
            @@ -534,6 +545,7 @@
            • current status:
              • basics work
              • +
              • best case 25-40% overhead (much better than originally planned)
              • tons of things to improve
              • tons of things to improve
              • tons of things to improve
              • @@ -561,6 +573,10 @@
              • as long as they do multiple things that are "often independent"
            • +
            • Keep locks coarse-grained:
                +
              • need to track and fix issues in case of systematic conflicts
              • +
              +
          diff --git a/talk/ep2014/stm/talk.rst b/talk/ep2014/stm/talk.rst --- a/talk/ep2014/stm/talk.rst +++ b/talk/ep2014/stm/talk.rst @@ -17,6 +17,19 @@ --------------------------------- +Introduction +------------ + +* PyPy-STM: Software Transactional Memory + +* On-going research project: + + - by Remi Meier and myself + - helped by crowdfunding, thanks to all donors + +* Started as a EuroPython 2011 lightning talk + + Why is there a GIL? ------------------- @@ -132,12 +145,9 @@ * but *can be very coarse:* - - the idea is to make sure, internally, that one transaction - covers the whole time during which the lock was acquired - - even two big transactions can hopefully run in parallel - - even if they both acquire and release the *same* lock + - even if they both *acquire and release the same lock* Big Point @@ -149,6 +159,8 @@ Demo 1 ------ +* "Twisted apps made parallel out of the box" + * Bottle web server @@ -173,6 +185,7 @@ * current status: - basics work + - best case 25-40% overhead (much better than originally planned) - tons of things to improve - tons of things to improve - tons of things to improve @@ -201,6 +214,9 @@ - as long as they do multiple things that are "often independent" +* Keep locks coarse-grained: + + - need to track and fix issues in case of systematic conflicts Part 2 - Under The Hood From noreply at buildbot.pypy.org Thu Jul 17 22:14:26 2014 From: noreply at buildbot.pypy.org (yuyichao) Date: Thu, 17 Jul 2014 22:14:26 +0200 (CEST) Subject: [pypy-commit] pypy py3k-posix-decode: fix ascii decoding error in the posix module Message-ID: <20140717201426.B3E901C12F0@cobra.cs.uni-duesseldorf.de> Author: Yichao Yu Branch: py3k-posix-decode Changeset: r72465:9014f04de7c5 Date: 2014-07-09 12:41 +0800 http://bitbucket.org/pypy/pypy/changeset/9014f04de7c5/ Log: fix ascii decoding error in the posix module diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -488,8 +488,7 @@ cur = os.getlogin() except OSError, e: raise wrap_oserror(space, e) - else: - return space.wrap(cur) + return space.fsdecode(space.wrapbytes(cur)) # ____________________________________________________________ @@ -693,14 +692,21 @@ except OSError, e: raise wrap_oserror(space, e) - at unwrap_spec(path='fsencode') -def readlink(space, path): +def readlink(space, w_path): "Return a string representing the path to which the symbolic link points." + is_unicode = space.isinstance_w(w_path, space.w_unicode) + if is_unicode: + path = space.fsencode_w(w_path) + else: + path = space.bytes0_w(w_path) try: result = os.readlink(path) except OSError, e: - raise wrap_oserror(space, e, path) - return space.wrap(result) + raise wrap_oserror2(space, e, w_path) + w_result = space.wrapbytes(result) + if is_unicode: + return space.fsdecode(w_result) + return w_result before_fork_hooks = [] after_fork_child_hooks = [] @@ -890,7 +896,8 @@ r = os.uname() except OSError, e: raise wrap_oserror(space, e) - l_w = [space.wrap(i) for i in [r[0], r[1], r[2], r[3], r[4]]] + l_w = [space.fsdecode(space.wrapbytes(i)) + for i in [r[0], r[1], r[2], r[3], r[4]]] return space.newtuple(l_w) def getuid(space): @@ -1217,7 +1224,7 @@ @unwrap_spec(fd=c_int) def ttyname(space, fd): try: - return space.wrap(os.ttyname(fd)) + return space.fsdecode(space.wrapbytes(os.ttyname(fd))) except OSError, e: raise wrap_oserror(space, e) @@ -1352,7 +1359,7 @@ Return the name of the controlling terminal for this process. """ - return space.wrap(os.ctermid()) + return space.fsdecode(space.wrapbytes(os.ctermid())) @unwrap_spec(fd=c_int) def device_encoding(space, fd): From noreply at buildbot.pypy.org Thu Jul 17 22:14:28 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 17 Jul 2014 22:14:28 +0200 (CEST) Subject: [pypy-commit] pypy py3k: Merged in yuyichao/pypy/py3k-posix-decode (pull request #247) Message-ID: <20140717201428.1F1991C12F0@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r72466:706409dc5979 Date: 2014-07-17 13:14 -0700 http://bitbucket.org/pypy/pypy/changeset/706409dc5979/ Log: Merged in yuyichao/pypy/py3k-posix-decode (pull request #247) fix ascii decoding error in the posix module diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -488,8 +488,7 @@ cur = os.getlogin() except OSError, e: raise wrap_oserror(space, e) - else: - return space.wrap(cur) + return space.fsdecode(space.wrapbytes(cur)) # ____________________________________________________________ @@ -695,14 +694,21 @@ except OSError, e: raise wrap_oserror(space, e) - at unwrap_spec(path='fsencode') -def readlink(space, path): +def readlink(space, w_path): "Return a string representing the path to which the symbolic link points." + is_unicode = space.isinstance_w(w_path, space.w_unicode) + if is_unicode: + path = space.fsencode_w(w_path) + else: + path = space.bytes0_w(w_path) try: result = os.readlink(path) except OSError, e: - raise wrap_oserror(space, e, path) - return space.wrap(result) + raise wrap_oserror2(space, e, w_path) + w_result = space.wrapbytes(result) + if is_unicode: + return space.fsdecode(w_result) + return w_result before_fork_hooks = [] after_fork_child_hooks = [] @@ -892,7 +898,8 @@ r = os.uname() except OSError, e: raise wrap_oserror(space, e) - l_w = [space.wrap(i) for i in [r[0], r[1], r[2], r[3], r[4]]] + l_w = [space.fsdecode(space.wrapbytes(i)) + for i in [r[0], r[1], r[2], r[3], r[4]]] return space.newtuple(l_w) def getuid(space): @@ -1219,7 +1226,7 @@ @unwrap_spec(fd=c_int) def ttyname(space, fd): try: - return space.wrap(os.ttyname(fd)) + return space.fsdecode(space.wrapbytes(os.ttyname(fd))) except OSError, e: raise wrap_oserror(space, e) @@ -1354,7 +1361,7 @@ Return the name of the controlling terminal for this process. """ - return space.wrap(os.ctermid()) + return space.fsdecode(space.wrapbytes(os.ctermid())) @unwrap_spec(fd=c_int) def device_encoding(space, fd): From noreply at buildbot.pypy.org Thu Jul 17 22:14:45 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 17 Jul 2014 22:14:45 +0200 (CEST) Subject: [pypy-commit] pypy py3k-posix-decode: Close branch py3k-posix-decode Message-ID: <20140717201445.70AE41C12F0@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-posix-decode Changeset: r72467:42120ecaf5ae Date: 2014-07-17 13:14 -0700 http://bitbucket.org/pypy/pypy/changeset/42120ecaf5ae/ Log: Close branch py3k-posix-decode From noreply at buildbot.pypy.org Fri Jul 18 14:08:22 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 18 Jul 2014 14:08:22 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Filed in BalloonEngine class form the image on the vref branch. Message-ID: <20140718120822.581AC1C01D2@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r897:f60624a57139 Date: 2014-07-10 15:50 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/f60624a57139/ Log: Filed in BalloonEngine class form the image on the vref branch. This removes some errors at startup, but also adds some new errors. Image feels smoother, fps is up. diff too long, truncating to 2000 out of 2375 lines diff --git a/images/Squeak4.5-noBitBlt.changes b/images/Squeak4.5-noBitBlt.changes --- a/images/Squeak4.5-noBitBlt.changes +++ b/images/Squeak4.5-noBitBlt.changes @@ -12208,4 +12208,144 @@ ]. "self footer." - ^ self! ! ----QUIT----{2 April 2014 . 11:59:41 am} Squeak4.5-noBitBlt.image priorSource: 15812182! ----STARTUP----{3 July 2014 . 11:14:14 am} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! SystemOrganization addCategory: #Anton! Object subclass: #AntonMatrix instanceVariableNames: 'fields columns rows' classVariableNames: '' poolDictionaries: '' category: 'Anton'! Object subclass: #AntonMatrix instanceVariableNames: 'fields columns rows' classVariableNames: '' poolDictionaries: '' category: 'Anton'! !AntonMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 10:17'! at: point ^ self x: point x y: point y! ! !AntonMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 10:16'! at: point put: number ^ self x: point x y: point y put: number! ! !AntonMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 10:12'! columns ^ columns! ! !AntonMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 10:55'! fieldsDo: block (1 to: self rows) do: [ :row | (1 to: self columns) do: [ :column | block value: row value: column ] ].! ! !AntonMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 10:56'! fill: block self fieldsDo: [ :x :y | self x: x y: y put: (block value: x value: y) ].! ! !AntonMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 10:12'! rows ^ rows! ! !AntonMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 10:17'! x: x y: y ^ fields at: (self offsetX: x y: y)! ! !AntonMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 10:17'! x: x y: y put: number fields at: (self offsetX: x y: y) put: number! ! !AntonMatrix methodsFor: 'private' stamp: 'ag 7/3/2014 10:44'! offsetX: x y: y ^ (y-1) * columns + x! ! !AntonMatrix methodsFor: 'initialization' stamp: 'ag 7/3/2014 10:43'! initializeFields: f rows: r rows := r. (f size \\ r) = 0 ifFalse: [ self error: 'Illegal initialization.' ]. columns := f size / r. fields := f.! ! !AntonMatrix methodsFor: 'initialization' stamp: 'ag 7/3/2014 10:18'! initializeRows: r columns: c rows := r. columns := c. fields := Array new: rows * columns.! ! !AntonMatrix methodsFor: 'math' stamp: 'ag 7/3/2014 10:30'! * other | result | (self columns = other rows and: [ self rows = other columns ]) ifFalse: [ ^ self error: 'Cannot multiply, wrong dimensions.' ]. result := AntonMatrix rows: self rows columns: other columns. (1 to: self rows) do: [ :row | (1 to: other columns) do: [ :column | | value | value := 0. (1 to: self columns) do: [ :i | value := value + ((self x: i y: row) * (other x: column y: i)) ]. result x: column y: row put: value ] ]. ^ result! ! !AntonMatrix methodsFor: 'printing' stamp: 'ag 7/3/2014 10:47'! printOn: s (1 to: self rows) do: [ :row | (1 to: self columns) do: [ :column | s nextPutAll: (self x: column y: row) asString. s nextPutAll: ' ' ]. s nextPutAll: String cr ].! ! !AntonMatrix methodsFor: 'benchmarking' stamp: 'ag 7/3/2014 11:01'! fillRandomFloats: generator | max | max := SmallInteger maxVal sqrt asInteger. self fill: [ :x :y | max atRandom: generator ].! ! !AntonMatrix methodsFor: 'benchmarking' stamp: 'ag 7/3/2014 11:02'! fillRandomInts: generator "Fill with SmallInteger values small enough to stay SmallIntegers after multiplication." self fill: [ :x :y | generator next * 100 ].! ! "-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- "! AntonMatrix class instanceVariableNames: ''! !AntonMatrix class methodsFor: 'benchmarking' stamp: 'ag 7/3/2014 11:13'! benchFloats: numOfRuns multiplicationsPerRun: mults rows: r columns: c | generator | generator := Random seed: 23456432. numOfRuns timesRepeat: [ | a b | a := AntonMatrix rows: r columns: c. b := AntonMatrix rows: r columns: c. a fillRandomFloats: generator. b fillRandomFloats: generator. mults timesRepeat: [ a * b ] ].! ! !AntonMatrix class methodsFor: 'benchmarking' stamp: 'ag 7/3/2014 11:03'! benchInts: numOfRuns multiplicationsPerRun: mults rows: r columns: c | generator | generator := Random seed: 23456432. numOfRuns timesRepeat: [ | a b | a := AntonMatrix rows: r columns: c. b := AntonMatrix rows: r columns: c. a fillRandomInts: generator. b fillRandomInts: generator. mults timesRepeat: [ a * b ] ].! ! !AntonMatrix class methodsFor: 'instance creation' stamp: 'ag 7/3/2014 10:35'! fields: fields rows: r ^ self basicNew initializeFields: fields rows: r! ! !AntonMatrix class methodsFor: 'instance creation' stamp: 'ag 7/3/2014 10:19'! rows: r columns: c ^ self basicNew initializeRows: r columns: c; yourself! ! Object subclass: #AntonMatrixBenchmark instanceVariableNames: '' classVariableNames: 'Cols Mults NumOfRuns Rows' poolDictionaries: '' category: 'Anton'! Object subclass: #AntonMatrixBenchmark instanceVariableNames: '' classVariableNames: 'Cols Mults NumOfRuns Rows' poolDictionaries: '' category: 'Anton'! !AntonMatrixBenchmark methodsFor: 'bench' stamp: 'ag 7/3/2014 11:19'! benchFloats AntonMatrix benchFloats: NumOfRuns multiplicationsPerRun: Mults rows: Rows columns: Cols.! ! !AntonMatrixBenchmark methodsFor: 'bench' stamp: 'ag 7/3/2014 11:18'! benchInts AntonMatrix benchInts: NumOfRuns multiplicationsPerRun: Mults rows: Rows columns: Cols.! ! "-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- "! AntonMatrixBenchmark class instanceVariableNames: ''! !AntonMatrixBenchmark class methodsFor: 'initialization' stamp: 'ag 7/3/2014 11:24'! config: spec | tokens nextInt | tokens := spec findTokens: ' '. nextInt := [ :default | (tokens ifEmpty: [ nil ] ifNotEmptyDo: #removeFirst) asInteger ifNil: [ default ] ]. NumOfRuns := nextInt value: 10. Mults := nextInt value: 100. Rows := nextInt value: 100. Cols := nextInt value: 100.! ! !AntonMatrixBenchmark class methodsFor: 'initialization' stamp: 'ag 7/3/2014 11:20'! initialize super initialize. NumOfRuns := 10. Mults := 100. Cols := 100. Rows := 100.! ! AntonMatrixBenchmark initialize! ----End fileIn of C:\Dev\lang-smalltalk\Anton.st----! !SmallInteger methodsFor: 'as yet unclassified' stamp: 'ag 7/3/2014 11:27'! benchMatrixInt: spec AntonMatrixBenchmark config: spec. ! ! !SmallInteger methodsFor: 'as yet unclassified' stamp: 'ag 7/3/2014 11:28' prior: 49374034! benchMatrixInt: spec AntonMatrixBenchmark config: spec. ^ Benchmarks runMatching: 'AntonMatrix' iterations: self benchmarkIterations! ! !SmallInteger methodsFor: 'as yet unclassified' stamp: 'ag 7/3/2014 11:28'! benchMatrix: spec AntonMatrixBenchmark config: spec. ^ Benchmarks runMatching: 'AntonMatrix' iterations: self benchmarkIterations! ! SystemOrganization renameCategory: #Anton toBe: #'Matrix-Benchmarks'! Smalltalk renameClassNamed: #AntonMatrix as: #BenchMatrix! Object subclass: #SimpleMatrixBenchmark instanceVariableNames: '' classVariableNames: 'Cols Mults NumOfRuns Rows' poolDictionaries: '' category: 'Matrix-Benchmarks'! Smalltalk removeClassNamed: #SimpleMatrixBenchmark! Smalltalk renameClassNamed: #AntonMatrixBenchmark as: #SimpleMatrixBenchmark! SmallInteger removeSelector: #benchMatrixInt:! !SmallInteger methodsFor: 'as yet unclassified' stamp: 'ag 7/3/2014 11:30' prior: 49374406! benchMatrix: spec SimpleMatrixBenchmark config: spec. ^ Benchmarks runMatching: 'SimpleMatrixBenchmark' iterations: self benchmarkIterations! ! !Benchmarks class methodsFor: 'benchmarks' stamp: 'ag 7/3/2014 11:31' prior: 49367383! allBenchmarks ^ { CPBAStarBenchmark. CPBBinaryTreeBenchmark. CPBBlowfishSuite. CPBChameneosBenchmark. CPBDeltaBlueBenchmark. CPBMandelbrotBenchmarkSuite. CPBNBodyBenchmark. "CPBPolymorphyBenchmark." "Commented out because it compiled code in setup." CPBRichardsBenchmark. CPBSplayTreeBenchmark. SimpleMatrixBenchmark. }! ! ----QUIT----{3 July 2014 . 11:32:10 am} Squeak4.5-noBitBlt.image priorSource: 15813551! ----STARTUP----{3 July 2014 . 11:34:49 am} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! SMarkSuite subclass: #SimpleMatrixBenchmark instanceVariableNames: '' classVariableNames: 'Cols Mults NumOfRuns Rows' poolDictionaries: '' category: 'Matrix-Benchmarks'! !SimpleMatrixBenchmark methodsFor: 'bench' stamp: 'ag 7/3/2014 11:37' prior: 49372902! benchFloats BenchMatrix benchFloats: NumOfRuns multiplicationsPerRun: Mults rows: Rows columns: Cols.! ! !SimpleMatrixBenchmark methodsFor: 'bench' stamp: 'ag 7/3/2014 11:37' prior: 49373080! benchInts BenchMatrix benchInts: NumOfRuns multiplicationsPerRun: Mults rows: Rows columns: Cols.! ! SimpleMatrixBenchmark config: '5 5 5 5'! Benchmarks runMatching: 'SimpleMatrixBenchmark' iterations: 1! !SimpleMatrixBenchmark class methodsFor: 'initialization' stamp: 'ag 7/3/2014 11:38' prior: 49373773! initialize super initialize. NumOfRuns := 10. Mults := 10. Cols := 10. Rows := 10.! ! self initialize! !SimpleMatrixBenchmark class methodsFor: 'initialization' stamp: 'ag 7/3/2014 11:39' prior: 49376651! initialize "self initialize" super initialize. NumOfRuns := 10. Mults := 10. Cols := 10. Rows := 10.! ! ----QUIT----{3 July 2014 . 11:39:08 am} Squeak4.5-noBitBlt.image priorSource: 15821257! ----STARTUP----{3 July 2014 . 11:48:06 am} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! !BenchMatrix class methodsFor: 'benchmarking' stamp: 'ag 7/3/2014 11:49' prior: 49371447! benchFloats: numOfRuns multiplicationsPerRun: mults rows: r columns: c | generator | generator := Random seed: 23456432. numOfRuns timesRepeat: [ | a b | a := BenchMatrix rows: r columns: c. b := BenchMatrix rows: c columns: r. a fillRandomFloats: generator. b fillRandomFloats: generator. mults timesRepeat: [ a * b ] ].! ! !BenchMatrix class methodsFor: 'benchmarking' stamp: 'ag 7/3/2014 11:49' prior: 49371861! benchInts: numOfRuns multiplicationsPerRun: mults rows: r columns: c | generator | generator := Random seed: 23456432. numOfRuns timesRepeat: [ | a b | a := BenchMatrix rows: r columns: c. b := BenchMatrix rows: c columns: r. a fillRandomInts: generator. b fillRandomInts: generator. mults timesRepeat: [ a * b ] ].! ! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 11:51' prior: 49368902! fieldsDo: block (1 to: self rows) do: [ :row | (1 to: self columns) do: [ :column | block value: column value: row ] ].! ! 1 benchMatrix: '1 10 100 10'! 1 benchMatrix: '1 10 100 10'! 1 benchMatrix: '1 10 10 100'! 1 benchMatrix: '1 10 10 1000'! ----QUIT----{3 July 2014 . 11:51:44 am} Squeak4.5-noBitBlt.image priorSource: 15822543! ----STARTUP----{3 July 2014 . 12:30:20 pm} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! Array variableSubclass: #BenchMatrix instanceVariableNames: 'columns rows' classVariableNames: '' poolDictionaries: '' category: 'Matrix-Benchmarks'! Array variableSubclass: #BenchMatrix instanceVariableNames: 'rows' classVariableNames: '' poolDictionaries: '' category: 'Matrix-Benchmarks'! BenchMatrix removeSelector: #at:! BenchMatrix removeSelector: #at:put:! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:31' prior: 49368813! columns ^ self size / rows! ! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:31' prior: 49379008! columns ^ self size / rows! ! 11/2! 11//2! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:33' prior: 49378103! fieldsDo: block (1 to: self size) do: [ :i | block value: i \\ rows value: i // rows ].! ! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:34' prior: 49379251! fieldsDo: block 1 to: self size do: [ :i | block value: i \\ rows value: i // rows ].! ! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:34' prior: 49369340! x: x y: y ^ self at: (self offsetX: x y: y)! ! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:34' prior: 49369457! x: x y: y put: number self at: (self offsetX: x y: y) put: number! ! a := BenchMatrix fields: #( 3 2 1 1 0 2 ) rows: 2! b := BenchMatrix fields: #( 1 2 0 1 4 0 ) rows: 3! a! a! a rows! a columns! BenchMatrix removeSelector: #initializeRows:columns:! !BenchMatrix methodsFor: 'initialization' stamp: 'ag 7/3/2014 12:36'! initializeRows: r rows := r.! ! BenchMatrix removeSelector: #initializeFields:rows:! !BenchMatrix class methodsFor: 'instance creation' stamp: 'ag 7/3/2014 12:37' prior: 49372274! fields: fields rows: r | columns f rows | rows := r. (f size \\ r) = 0 ifFalse: [ self error: 'Illegal initialization.' ]. columns := f size / r. " fields := f." ^ self basicNew initializeFields: fields rows: r! ! !BenchMatrix methodsFor: 'initialization' stamp: 'ag 7/3/2014 12:37'! rows: r rows := r.! ! BenchMatrix removeSelector: #initializeRows:! Array withAll: #(1 2 3)! !BenchMatrix class methodsFor: 'instance creation' stamp: 'ag 7/3/2014 12:39' prior: 49380248! fields: fields rows: r (fields size \\ r) = 0 ifFalse: [ self error: 'Illegal initialization.' ]. ^ (self withAll: fields) rows: r; yourself! ! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:39' prior: 49379122! columns ^ self size // rows! ! !BenchMatrix class methodsFor: 'instance creation' stamp: 'ag 7/3/2014 12:40' prior: 49372433! rows: r columns: c ^ (self new: r * c) rows: r; fillZeros; yourself! ! !BenchMatrix methodsFor: 'initialization' stamp: 'ag 7/3/2014 12:40'! fillZeros self fill: [ :x :y | 0 ].! ! i! i \\ rows! i //rows! rows! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:42' prior: 49379428! fieldsDo: block 1 to: self size do: [ :i | block value: i \\ rows + 1 value: i // rows + 1 ].! ! x := BenchMatrix rows: 4 columns: 3.! x! o := OrderedCollection new.! x fieldsDo: [ :x :y | o add: x -> y ].! o! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:43' prior: 49381404! fieldsDo: block 0 to: self size do: [ :i | block value: i \\ rows + 1 value: i // rows + 1 ].! ! o := OrderedCollection new.! x fieldsDo: [ :x :y | o add: x -> y ].! o! x := BenchMatrix rows: 4 columns: 3.! x! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:44' prior: 49381705! fieldsDo: block 0 to: self size + 1 do: [ :i | block value: i \\ rows + 1 value: i // rows + 1 ].! ! x := BenchMatrix rows: 4 columns: 3.! x! o := OrderedCollection new.! x fieldsDo: [ :x :y | o add: x -> y ].! o! o size! x size! o size! o asSet size! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:46' prior: 49382006! fieldsDo: block 1 to: self size do: [ :i | block value: i \\ rows + 1 value: i // rows + 1 ].! ! o := OrderedCollection new.! x fieldsDo: [ :x :y | o add: x -> y ].! o! o size! o ! 1 \\ 4! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:46' prior: 49382353! fieldsDo: block 1 to: self size do: [ :i | block value: i \\ rows value: i // rows + 1 ].! ! o := OrderedCollection new.! x fieldsDo: [ :x :y | o add: x -> y ].! o size! o ! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:50' prior: 49382634! fieldsDo: block | columns | columns := self columns. 1 to: self size do: [ :i | block value: i \\ columns value: i // columns + 1 ].! ! Array variableSubclass: #BenchMatrix instanceVariableNames: 'rows columns' classVariableNames: '' poolDictionaries: '' category: 'Matrix-Benchmarks'! Array variableSubclass: #BenchMatrix instanceVariableNames: 'rows columns' classVariableNames: '' poolDictionaries: '' category: 'Matrix-Benchmarks'! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:51' prior: 49382898! fieldsDo: block 1 to: self size do: [ :i | block value: i \\ columns value: i // columns + 1 ].! ! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:51' prior: 49380969! columns ^ columns! ! !BenchMatrix methodsFor: 'initialization' stamp: 'ag 7/3/2014 12:51' prior: 49380543! rows: r rows := r. columns := self size // r.! ! x := BenchMatrix rows: 4 columns: 3.! x! o := OrderedCollection new. x fieldsDo: [ :x :y | o add: x -> y ]. ! ox! o! !BenchMatrix methodsFor: 'initialization' stamp: 'ag 7/3/2014 12:52' prior: 49381247! fillZeros self atAllPut: 0.! ! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:53' prior: 49383432! fieldsDo: block 0 to: self size - 1 do: [ :i | block value: i \\ columns + 1 value: i // columns + 1 ].! ! o := OrderedCollection new. x fieldsDo: [ :x :y | o add: x -> y ].! o size! o! a := BenchMatrix fields: #( 3 2 1 1 0 2 ) rows: 2! b := BenchMatrix fields: #( 1 2 0 1 4 0 ) rows: 3! !BenchMatrix methodsFor: 'math' stamp: 'ag 7/3/2014 12:55' prior: 49370092! * other | result | (self columns = other rows and: [ self rows = other columns ]) ifFalse: [ ^ self error: 'Cannot multiply, wrong dimensions.' ]. result := BenchMatrix rows: self rows columns: other columns. (1 to: self rows) do: [ :row | (1 to: other columns) do: [ :column | | value | value := 0. (1 to: self columns) do: [ :i | value := value + ((self x: i y: row) * (other x: column y: i)) ]. result x: column y: row put: value ] ]. ^ result! ! a * b! self assert: (Array withAll: (a * b)) = #(7 8 9 2)! BenchMatrix class organization addCategory: #test! !BenchMatrix class methodsFor: 'test' stamp: 'ag 7/3/2014 12:57'! tinyTest "self tinyTest" | a b | a := BenchMatrix fields: #( 3 2 1 1 0 2 ) rows: 2. b := BenchMatrix fields: #( 1 2 0 1 4 0 ) rows: 3. self assert: (Array withAll: (a * b)) = #(7 8 9 2).! ! self tinyTest! 1 benchMatrix: '1 3 5 5'! 1 benchMatrix: '1 10 5 5'! ----QUIT----{3 July 2014 . 12:58:52 pm} Squeak4.5-noBitBlt.image priorSource: 15823926! ----STARTUP----{3 July 2014 . 1:05:04 pm} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! a := BenchMatrix rows: 20 columns: 20. b := BenchMatrix rows: 20 columns: 20. generator := Random seed: 13243456. a fillRandomInts: generator. b fillRandomInts: generator.! (a collect: #class) asSet! (b collect: #class) asSet! a := BenchMatrix rows: 20 columns: 20. b := BenchMatrix rows: 20 columns: 20. generator := Random seed: 13243456. a fillRandomFloats: generator. b fillRandomInts: generator.! (b collect: #class) asSet! (a collect: #class) asSet! !BenchMatrix methodsFor: 'benchmarking' stamp: 'ag 7/3/2014 13:08' prior: 49371100! fillRandomInts: generator "Fill with SmallInteger values small enough to stay SmallIntegers after multiplication." | max | max := SmallInteger maxVal sqrt asInteger. self fill: [ :x :y | max atRandom: generator ]. ! ! !BenchMatrix methodsFor: 'benchmarking' stamp: 'ag 7/3/2014 13:08' prior: 49370897! fillRandomFloats: generator self fill: [ :x :y | generator next * 100 ].! ! a := BenchMatrix rows: 20 columns: 20. b := BenchMatrix rows: 20 columns: 20. generator := Random seed: 13243456. a fillRandomFloats: generator. b fillRandomInts: generator. (a collect: #class) asSet ! a := BenchMatrix rows: 20 columns: 20. b := BenchMatrix rows: 20 columns: 20. generator := Random seed: 13243456. a fillRandomFloats: generator. b fillRandomInts: generator. (b collect: #class) asSet ! a := BenchMatrix rows: 20 columns: 20. b := BenchMatrix rows: 20 columns: 20. generator := Random seed: 13243456. a fillRandomInts: generator. b fillRandomInts: generator. ! c := a * b! (c collect: #class) asSet! !BenchMatrix methodsFor: 'benchmarking' stamp: 'ag 7/3/2014 13:09' prior: 49386143! fillRandomInts: generator "Fill with SmallInteger values small enough to stay SmallIntegers after multiplication." | max | max := 1000. self fill: [ :x :y | max atRandom: generator ]. ! ! a := BenchMatrix rows: 20 columns: 20. b := BenchMatrix rows: 20 columns: 20. generator := Random seed: 13243456. a fillRandomInts: generator. b fillRandomInts: generator. c := a * b. (c collect: #class) asSet! ----QUIT----{3 July 2014 . 1:09:37 pm} Squeak4.5-noBitBlt.image priorSource: 15830973! ----STARTUP----{3 July 2014 . 8:26:43 pm} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! a := BenchMatrix rows: 20 columns: 20. b := BenchMatrix rows: 20 columns: 20. generator := Random seed: 13243456. a fillRandomInts: generator. b fillRandomInts: generator.! c := a * b.! (c collect: #class) asSet! c! !SmallInteger methodsFor: 'as yet unclassified' stamp: 'ag 7/3/2014 20:27'! testMatrix ! ! !SmallInteger methodsFor: 'as yet unclassified' stamp: 'ag 7/3/2014 20:28' prior: 49388134! testMatrix | a b c generator | a := BenchMatrix rows: 20 columns: 20. b := BenchMatrix rows: 20 columns: 20. generator := Random seed: 13243456. a fillRandomInts: generator. b fillRandomInts: generator. c := a * b. ^ (c collect: #class) asSet asString! ! 5 testMatrix! ----SNAPSHOT----{3 July 2014 . 8:28:40 pm} Squeak4.5-noBitBlt.1.image priorSource: 15833215! ----QUIT----{3 July 2014 . 8:28:49 pm} Squeak4.5-noBitBlt.1.image priorSource: 15834093! ----STARTUP----{3 July 2014 . 9:02:43 pm} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! !BenchMatrix methodsFor: 'initialization' stamp: 'ag 7/3/2014 21:03' prior: 49383727! rows: r rows := r asFloat. columns := (self size // r) asFloat.! ! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 21:03' prior: 49369257! rows ^ rows asInteger! ! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 21:03' prior: 49383617! columns ^ columns asInteger! ! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 21:04' prior: 49384103! fieldsDo: block 0 to: self size - 1 do: [ :i | block value: i \\ self columns + 1 value: i // self columns + 1 ].! ! ----QUIT----{3 July 2014 . 9:04:33 pm} Squeak4.5-noBitBlt.image priorSource: 15834187! \ No newline at end of file + ^ self! ! ----QUIT----{2 April 2014 . 11:59:41 am} Squeak4.5-noBitBlt.image priorSource: 15812182! ----STARTUP----{3 July 2014 . 11:14:14 am} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! SystemOrganization addCategory: #Anton! Object subclass: #AntonMatrix instanceVariableNames: 'fields columns rows' classVariableNames: '' poolDictionaries: '' category: 'Anton'! Object subclass: #AntonMatrix instanceVariableNames: 'fields columns rows' classVariableNames: '' poolDictionaries: '' category: 'Anton'! !AntonMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 10:17'! at: point ^ self x: point x y: point y! ! !AntonMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 10:16'! at: point put: number ^ self x: point x y: point y put: number! ! !AntonMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 10:12'! columns ^ columns! ! !AntonMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 10:55'! fieldsDo: block (1 to: self rows) do: [ :row | (1 to: self columns) do: [ :column | block value: row value: column ] ].! ! !AntonMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 10:56'! fill: block self fieldsDo: [ :x :y | self x: x y: y put: (block value: x value: y) ].! ! !AntonMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 10:12'! rows ^ rows! ! !AntonMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 10:17'! x: x y: y ^ fields at: (self offsetX: x y: y)! ! !AntonMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 10:17'! x: x y: y put: number fields at: (self offsetX: x y: y) put: number! ! !AntonMatrix methodsFor: 'private' stamp: 'ag 7/3/2014 10:44'! offsetX: x y: y ^ (y-1) * columns + x! ! !AntonMatrix methodsFor: 'initialization' stamp: 'ag 7/3/2014 10:43'! initializeFields: f rows: r rows := r. (f size \\ r) = 0 ifFalse: [ self error: 'Illegal initialization.' ]. columns := f size / r. fields := f.! ! !AntonMatrix methodsFor: 'initialization' stamp: 'ag 7/3/2014 10:18'! initializeRows: r columns: c rows := r. columns := c. fields := Array new: rows * columns.! ! !AntonMatrix methodsFor: 'math' stamp: 'ag 7/3/2014 10:30'! * other | result | (self columns = other rows and: [ self rows = other columns ]) ifFalse: [ ^ self error: 'Cannot multiply, wrong dimensions.' ]. result := AntonMatrix rows: self rows columns: other columns. (1 to: self rows) do: [ :row | (1 to: other columns) do: [ :column | | value | value := 0. (1 to: self columns) do: [ :i | value := value + ((self x: i y: row) * (other x: column y: i)) ]. result x: column y: row put: value ] ]. ^ result! ! !AntonMatrix methodsFor: 'printing' stamp: 'ag 7/3/2014 10:47'! printOn: s (1 to: self rows) do: [ :row | (1 to: self columns) do: [ :column | s nextPutAll: (self x: column y: row) asString. s nextPutAll: ' ' ]. s nextPutAll: String cr ].! ! !AntonMatrix methodsFor: 'benchmarking' stamp: 'ag 7/3/2014 11:01'! fillRandomFloats: generator | max | max := SmallInteger maxVal sqrt asInteger. self fill: [ :x :y | max atRandom: generator ].! ! !AntonMatrix methodsFor: 'benchmarking' stamp: 'ag 7/3/2014 11:02'! fillRandomInts: generator "Fill with SmallInteger values small enough to stay SmallIntegers after multiplication." self fill: [ :x :y | generator next * 100 ].! ! "-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- "! AntonMatrix class instanceVariableNames: ''! !AntonMatrix class methodsFor: 'benchmarking' stamp: 'ag 7/3/2014 11:13'! benchFloats: numOfRuns multiplicationsPerRun: mults rows: r columns: c | generator | generator := Random seed: 23456432. numOfRuns timesRepeat: [ | a b | a := AntonMatrix rows: r columns: c. b := AntonMatrix rows: r columns: c. a fillRandomFloats: generator. b fillRandomFloats: generator. mults timesRepeat: [ a * b ] ].! ! !AntonMatrix class methodsFor: 'benchmarking' stamp: 'ag 7/3/2014 11:03'! benchInts: numOfRuns multiplicationsPerRun: mults rows: r columns: c | generator | generator := Random seed: 23456432. numOfRuns timesRepeat: [ | a b | a := AntonMatrix rows: r columns: c. b := AntonMatrix rows: r columns: c. a fillRandomInts: generator. b fillRandomInts: generator. mults timesRepeat: [ a * b ] ].! ! !AntonMatrix class methodsFor: 'instance creation' stamp: 'ag 7/3/2014 10:35'! fields: fields rows: r ^ self basicNew initializeFields: fields rows: r! ! !AntonMatrix class methodsFor: 'instance creation' stamp: 'ag 7/3/2014 10:19'! rows: r columns: c ^ self basicNew initializeRows: r columns: c; yourself! ! Object subclass: #AntonMatrixBenchmark instanceVariableNames: '' classVariableNames: 'Cols Mults NumOfRuns Rows' poolDictionaries: '' category: 'Anton'! Object subclass: #AntonMatrixBenchmark instanceVariableNames: '' classVariableNames: 'Cols Mults NumOfRuns Rows' poolDictionaries: '' category: 'Anton'! !AntonMatrixBenchmark methodsFor: 'bench' stamp: 'ag 7/3/2014 11:19'! benchFloats AntonMatrix benchFloats: NumOfRuns multiplicationsPerRun: Mults rows: Rows columns: Cols.! ! !AntonMatrixBenchmark methodsFor: 'bench' stamp: 'ag 7/3/2014 11:18'! benchInts AntonMatrix benchInts: NumOfRuns multiplicationsPerRun: Mults rows: Rows columns: Cols.! ! "-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- "! AntonMatrixBenchmark class instanceVariableNames: ''! !AntonMatrixBenchmark class methodsFor: 'initialization' stamp: 'ag 7/3/2014 11:24'! config: spec | tokens nextInt | tokens := spec findTokens: ' '. nextInt := [ :default | (tokens ifEmpty: [ nil ] ifNotEmptyDo: #removeFirst) asInteger ifNil: [ default ] ]. NumOfRuns := nextInt value: 10. Mults := nextInt value: 100. Rows := nextInt value: 100. Cols := nextInt value: 100.! ! !AntonMatrixBenchmark class methodsFor: 'initialization' stamp: 'ag 7/3/2014 11:20'! initialize super initialize. NumOfRuns := 10. Mults := 100. Cols := 100. Rows := 100.! ! AntonMatrixBenchmark initialize! ----End fileIn of C:\Dev\lang-smalltalk\Anton.st----! !SmallInteger methodsFor: 'as yet unclassified' stamp: 'ag 7/3/2014 11:27'! benchMatrixInt: spec AntonMatrixBenchmark config: spec. ! ! !SmallInteger methodsFor: 'as yet unclassified' stamp: 'ag 7/3/2014 11:28' prior: 49374034! benchMatrixInt: spec AntonMatrixBenchmark config: spec. ^ Benchmarks runMatching: 'AntonMatrix' iterations: self benchmarkIterations! ! !SmallInteger methodsFor: 'as yet unclassified' stamp: 'ag 7/3/2014 11:28'! benchMatrix: spec AntonMatrixBenchmark config: spec. ^ Benchmarks runMatching: 'AntonMatrix' iterations: self benchmarkIterations! ! SystemOrganization renameCategory: #Anton toBe: #'Matrix-Benchmarks'! Smalltalk renameClassNamed: #AntonMatrix as: #BenchMatrix! Object subclass: #SimpleMatrixBenchmark instanceVariableNames: '' classVariableNames: 'Cols Mults NumOfRuns Rows' poolDictionaries: '' category: 'Matrix-Benchmarks'! Smalltalk removeClassNamed: #SimpleMatrixBenchmark! Smalltalk renameClassNamed: #AntonMatrixBenchmark as: #SimpleMatrixBenchmark! SmallInteger removeSelector: #benchMatrixInt:! !SmallInteger methodsFor: 'as yet unclassified' stamp: 'ag 7/3/2014 11:30' prior: 49374406! benchMatrix: spec SimpleMatrixBenchmark config: spec. ^ Benchmarks runMatching: 'SimpleMatrixBenchmark' iterations: self benchmarkIterations! ! !Benchmarks class methodsFor: 'benchmarks' stamp: 'ag 7/3/2014 11:31' prior: 49367383! allBenchmarks ^ { CPBAStarBenchmark. CPBBinaryTreeBenchmark. CPBBlowfishSuite. CPBChameneosBenchmark. CPBDeltaBlueBenchmark. CPBMandelbrotBenchmarkSuite. CPBNBodyBenchmark. "CPBPolymorphyBenchmark." "Commented out because it compiled code in setup." CPBRichardsBenchmark. CPBSplayTreeBenchmark. SimpleMatrixBenchmark. }! ! ----QUIT----{3 July 2014 . 11:32:10 am} Squeak4.5-noBitBlt.image priorSource: 15813551! ----STARTUP----{3 July 2014 . 11:34:49 am} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! SMarkSuite subclass: #SimpleMatrixBenchmark instanceVariableNames: '' classVariableNames: 'Cols Mults NumOfRuns Rows' poolDictionaries: '' category: 'Matrix-Benchmarks'! !SimpleMatrixBenchmark methodsFor: 'bench' stamp: 'ag 7/3/2014 11:37' prior: 49372902! benchFloats BenchMatrix benchFloats: NumOfRuns multiplicationsPerRun: Mults rows: Rows columns: Cols.! ! !SimpleMatrixBenchmark methodsFor: 'bench' stamp: 'ag 7/3/2014 11:37' prior: 49373080! benchInts BenchMatrix benchInts: NumOfRuns multiplicationsPerRun: Mults rows: Rows columns: Cols.! ! SimpleMatrixBenchmark config: '5 5 5 5'! Benchmarks runMatching: 'SimpleMatrixBenchmark' iterations: 1! !SimpleMatrixBenchmark class methodsFor: 'initialization' stamp: 'ag 7/3/2014 11:38' prior: 49373773! initialize super initialize. NumOfRuns := 10. Mults := 10. Cols := 10. Rows := 10.! ! self initialize! !SimpleMatrixBenchmark class methodsFor: 'initialization' stamp: 'ag 7/3/2014 11:39' prior: 49376651! initialize "self initialize" super initialize. NumOfRuns := 10. Mults := 10. Cols := 10. Rows := 10.! ! ----QUIT----{3 July 2014 . 11:39:08 am} Squeak4.5-noBitBlt.image priorSource: 15821257! ----STARTUP----{3 July 2014 . 11:48:06 am} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! !BenchMatrix class methodsFor: 'benchmarking' stamp: 'ag 7/3/2014 11:49' prior: 49371447! benchFloats: numOfRuns multiplicationsPerRun: mults rows: r columns: c | generator | generator := Random seed: 23456432. numOfRuns timesRepeat: [ | a b | a := BenchMatrix rows: r columns: c. b := BenchMatrix rows: c columns: r. a fillRandomFloats: generator. b fillRandomFloats: generator. mults timesRepeat: [ a * b ] ].! ! !BenchMatrix class methodsFor: 'benchmarking' stamp: 'ag 7/3/2014 11:49' prior: 49371861! benchInts: numOfRuns multiplicationsPerRun: mults rows: r columns: c | generator | generator := Random seed: 23456432. numOfRuns timesRepeat: [ | a b | a := BenchMatrix rows: r columns: c. b := BenchMatrix rows: c columns: r. a fillRandomInts: generator. b fillRandomInts: generator. mults timesRepeat: [ a * b ] ].! ! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 11:51' prior: 49368902! fieldsDo: block (1 to: self rows) do: [ :row | (1 to: self columns) do: [ :column | block value: column value: row ] ].! ! 1 benchMatrix: '1 10 100 10'! 1 benchMatrix: '1 10 100 10'! 1 benchMatrix: '1 10 10 100'! 1 benchMatrix: '1 10 10 1000'! ----QUIT----{3 July 2014 . 11:51:44 am} Squeak4.5-noBitBlt.image priorSource: 15822543! ----STARTUP----{3 July 2014 . 12:30:20 pm} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! Array variableSubclass: #BenchMatrix instanceVariableNames: 'columns rows' classVariableNames: '' poolDictionaries: '' category: 'Matrix-Benchmarks'! Array variableSubclass: #BenchMatrix instanceVariableNames: 'rows' classVariableNames: '' poolDictionaries: '' category: 'Matrix-Benchmarks'! BenchMatrix removeSelector: #at:! BenchMatrix removeSelector: #at:put:! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:31' prior: 49368813! columns ^ self size / rows! ! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:31' prior: 49379008! columns ^ self size / rows! ! 11/2! 11//2! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:33' prior: 49378103! fieldsDo: block (1 to: self size) do: [ :i | block value: i \\ rows value: i // rows ].! ! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:34' prior: 49379251! fieldsDo: block 1 to: self size do: [ :i | block value: i \\ rows value: i // rows ].! ! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:34' prior: 49369340! x: x y: y ^ self at: (self offsetX: x y: y)! ! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:34' prior: 49369457! x: x y: y put: number self at: (self offsetX: x y: y) put: number! ! a := BenchMatrix fields: #( 3 2 1 1 0 2 ) rows: 2! b := BenchMatrix fields: #( 1 2 0 1 4 0 ) rows: 3! a! a! a rows! a columns! BenchMatrix removeSelector: #initializeRows:columns:! !BenchMatrix methodsFor: 'initialization' stamp: 'ag 7/3/2014 12:36'! initializeRows: r rows := r.! ! BenchMatrix removeSelector: #initializeFields:rows:! !BenchMatrix class methodsFor: 'instance creation' stamp: 'ag 7/3/2014 12:37' prior: 49372274! fields: fields rows: r | columns f rows | rows := r. (f size \\ r) = 0 ifFalse: [ self error: 'Illegal initialization.' ]. columns := f size / r. " fields := f." ^ self basicNew initializeFields: fields rows: r! ! !BenchMatrix methodsFor: 'initialization' stamp: 'ag 7/3/2014 12:37'! rows: r rows := r.! ! BenchMatrix removeSelector: #initializeRows:! Array withAll: #(1 2 3)! !BenchMatrix class methodsFor: 'instance creation' stamp: 'ag 7/3/2014 12:39' prior: 49380248! fields: fields rows: r (fields size \\ r) = 0 ifFalse: [ self error: 'Illegal initialization.' ]. ^ (self withAll: fields) rows: r; yourself! ! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:39' prior: 49379122! columns ^ self size // rows! ! !BenchMatrix class methodsFor: 'instance creation' stamp: 'ag 7/3/2014 12:40' prior: 49372433! rows: r columns: c ^ (self new: r * c) rows: r; fillZeros; yourself! ! !BenchMatrix methodsFor: 'initialization' stamp: 'ag 7/3/2014 12:40'! fillZeros self fill: [ :x :y | 0 ].! ! i! i \\ rows! i //rows! rows! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:42' prior: 49379428! fieldsDo: block 1 to: self size do: [ :i | block value: i \\ rows + 1 value: i // rows + 1 ].! ! x := BenchMatrix rows: 4 columns: 3.! x! o := OrderedCollection new.! x fieldsDo: [ :x :y | o add: x -> y ].! o! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:43' prior: 49381404! fieldsDo: block 0 to: self size do: [ :i | block value: i \\ rows + 1 value: i // rows + 1 ].! ! o := OrderedCollection new.! x fieldsDo: [ :x :y | o add: x -> y ].! o! x := BenchMatrix rows: 4 columns: 3.! x! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:44' prior: 49381705! fieldsDo: block 0 to: self size + 1 do: [ :i | block value: i \\ rows + 1 value: i // rows + 1 ].! ! x := BenchMatrix rows: 4 columns: 3.! x! o := OrderedCollection new.! x fieldsDo: [ :x :y | o add: x -> y ].! o! o size! x size! o size! o asSet size! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:46' prior: 49382006! fieldsDo: block 1 to: self size do: [ :i | block value: i \\ rows + 1 value: i // rows + 1 ].! ! o := OrderedCollection new.! x fieldsDo: [ :x :y | o add: x -> y ].! o! o size! o ! 1 \\ 4! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:46' prior: 49382353! fieldsDo: block 1 to: self size do: [ :i | block value: i \\ rows value: i // rows + 1 ].! ! o := OrderedCollection new.! x fieldsDo: [ :x :y | o add: x -> y ].! o size! o ! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:50' prior: 49382634! fieldsDo: block | columns | columns := self columns. 1 to: self size do: [ :i | block value: i \\ columns value: i // columns + 1 ].! ! Array variableSubclass: #BenchMatrix instanceVariableNames: 'rows columns' classVariableNames: '' poolDictionaries: '' category: 'Matrix-Benchmarks'! Array variableSubclass: #BenchMatrix instanceVariableNames: 'rows columns' classVariableNames: '' poolDictionaries: '' category: 'Matrix-Benchmarks'! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:51' prior: 49382898! fieldsDo: block 1 to: self size do: [ :i | block value: i \\ columns value: i // columns + 1 ].! ! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:51' prior: 49380969! columns ^ columns! ! !BenchMatrix methodsFor: 'initialization' stamp: 'ag 7/3/2014 12:51' prior: 49380543! rows: r rows := r. columns := self size // r.! ! x := BenchMatrix rows: 4 columns: 3.! x! o := OrderedCollection new. x fieldsDo: [ :x :y | o add: x -> y ]. ! ox! o! !BenchMatrix methodsFor: 'initialization' stamp: 'ag 7/3/2014 12:52' prior: 49381247! fillZeros self atAllPut: 0.! ! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 12:53' prior: 49383432! fieldsDo: block 0 to: self size - 1 do: [ :i | block value: i \\ columns + 1 value: i // columns + 1 ].! ! o := OrderedCollection new. x fieldsDo: [ :x :y | o add: x -> y ].! o size! o! a := BenchMatrix fields: #( 3 2 1 1 0 2 ) rows: 2! b := BenchMatrix fields: #( 1 2 0 1 4 0 ) rows: 3! !BenchMatrix methodsFor: 'math' stamp: 'ag 7/3/2014 12:55' prior: 49370092! * other | result | (self columns = other rows and: [ self rows = other columns ]) ifFalse: [ ^ self error: 'Cannot multiply, wrong dimensions.' ]. result := BenchMatrix rows: self rows columns: other columns. (1 to: self rows) do: [ :row | (1 to: other columns) do: [ :column | | value | value := 0. (1 to: self columns) do: [ :i | value := value + ((self x: i y: row) * (other x: column y: i)) ]. result x: column y: row put: value ] ]. ^ result! ! a * b! self assert: (Array withAll: (a * b)) = #(7 8 9 2)! BenchMatrix class organization addCategory: #test! !BenchMatrix class methodsFor: 'test' stamp: 'ag 7/3/2014 12:57'! tinyTest "self tinyTest" | a b | a := BenchMatrix fields: #( 3 2 1 1 0 2 ) rows: 2. b := BenchMatrix fields: #( 1 2 0 1 4 0 ) rows: 3. self assert: (Array withAll: (a * b)) = #(7 8 9 2).! ! self tinyTest! 1 benchMatrix: '1 3 5 5'! 1 benchMatrix: '1 10 5 5'! ----QUIT----{3 July 2014 . 12:58:52 pm} Squeak4.5-noBitBlt.image priorSource: 15823926! ----STARTUP----{3 July 2014 . 1:05:04 pm} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! a := BenchMatrix rows: 20 columns: 20. b := BenchMatrix rows: 20 columns: 20. generator := Random seed: 13243456. a fillRandomInts: generator. b fillRandomInts: generator.! (a collect: #class) asSet! (b collect: #class) asSet! a := BenchMatrix rows: 20 columns: 20. b := BenchMatrix rows: 20 columns: 20. generator := Random seed: 13243456. a fillRandomFloats: generator. b fillRandomInts: generator.! (b collect: #class) asSet! (a collect: #class) asSet! !BenchMatrix methodsFor: 'benchmarking' stamp: 'ag 7/3/2014 13:08' prior: 49371100! fillRandomInts: generator "Fill with SmallInteger values small enough to stay SmallIntegers after multiplication." | max | max := SmallInteger maxVal sqrt asInteger. self fill: [ :x :y | max atRandom: generator ]. ! ! !BenchMatrix methodsFor: 'benchmarking' stamp: 'ag 7/3/2014 13:08' prior: 49370897! fillRandomFloats: generator self fill: [ :x :y | generator next * 100 ].! ! a := BenchMatrix rows: 20 columns: 20. b := BenchMatrix rows: 20 columns: 20. generator := Random seed: 13243456. a fillRandomFloats: generator. b fillRandomInts: generator. (a collect: #class) asSet ! a := BenchMatrix rows: 20 columns: 20. b := BenchMatrix rows: 20 columns: 20. generator := Random seed: 13243456. a fillRandomFloats: generator. b fillRandomInts: generator. (b collect: #class) asSet ! a := BenchMatrix rows: 20 columns: 20. b := BenchMatrix rows: 20 columns: 20. generator := Random seed: 13243456. a fillRandomInts: generator. b fillRandomInts: generator. ! c := a * b! (c collect: #class) asSet! !BenchMatrix methodsFor: 'benchmarking' stamp: 'ag 7/3/2014 13:09' prior: 49386143! fillRandomInts: generator "Fill with SmallInteger values small enough to stay SmallIntegers after multiplication." | max | max := 1000. self fill: [ :x :y | max atRandom: generator ]. ! ! a := BenchMatrix rows: 20 columns: 20. b := BenchMatrix rows: 20 columns: 20. generator := Random seed: 13243456. a fillRandomInts: generator. b fillRandomInts: generator. c := a * b. (c collect: #class) asSet! ----QUIT----{3 July 2014 . 1:09:37 pm} Squeak4.5-noBitBlt.image priorSource: 15830973! ----STARTUP----{3 July 2014 . 8:26:43 pm} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! a := BenchMatrix rows: 20 columns: 20. b := BenchMatrix rows: 20 columns: 20. generator := Random seed: 13243456. a fillRandomInts: generator. b fillRandomInts: generator.! c := a * b.! (c collect: #class) asSet! c! !SmallInteger methodsFor: 'as yet unclassified' stamp: 'ag 7/3/2014 20:27'! testMatrix ! ! !SmallInteger methodsFor: 'as yet unclassified' stamp: 'ag 7/3/2014 20:28' prior: 49388134! testMatrix | a b c generator | a := BenchMatrix rows: 20 columns: 20. b := BenchMatrix rows: 20 columns: 20. generator := Random seed: 13243456. a fillRandomInts: generator. b fillRandomInts: generator. c := a * b. ^ (c collect: #class) asSet asString! ! 5 testMatrix! ----SNAPSHOT----{3 July 2014 . 8:28:40 pm} Squeak4.5-noBitBlt.1.image priorSource: 15833215! ----QUIT----{3 July 2014 . 8:28:49 pm} Squeak4.5-noBitBlt.1.image priorSource: 15834093! ----STARTUP----{3 July 2014 . 9:02:43 pm} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! !BenchMatrix methodsFor: 'initialization' stamp: 'ag 7/3/2014 21:03' prior: 49383727! rows: r rows := r asFloat. columns := (self size // r) asFloat.! ! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 21:03' prior: 49369257! rows ^ rows asInteger! ! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 21:03' prior: 49383617! columns ^ columns asInteger! ! !BenchMatrix methodsFor: 'accessing' stamp: 'ag 7/3/2014 21:04' prior: 49384103! fieldsDo: block 0 to: self size - 1 do: [ :i | block value: i \\ self columns + 1 value: i // self columns + 1 ].! ! ----QUIT----{3 July 2014 . 9:04:33 pm} Squeak4.5-noBitBlt.image priorSource: 15834187! ----STARTUP----{10 July 2014 . 3:46:58 pm} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! ----SNAPSHOT----{10 July 2014 . 3:47:56 pm} Squeak4.5-noBitBlt.image priorSource: 15834963! 'From Squeak4.4 of 28 April 2013 [latest update: #12627] on 10 July 2014 at 3:45:28 pm'! Object subclass: #BalloonEngine instanceVariableNames: 'workBuffer span bitBlt forms clipRect destOffset externals aaLevel edgeTransform colorTransform deferred postFlushNeeded' classVariableNames: 'BezierStats BufferCache CacheProtect Counts Debug Times' poolDictionaries: 'BalloonEngineConstants' category: 'Balloon-Engine'! Object subclass: #BalloonEngine instanceVariableNames: 'workBuffer span bitBlt forms clipRect destOffset externals aaLevel edgeTransform colorTransform deferred postFlushNeeded' classVariableNames: 'BezierStats BufferCache CacheProtect Counts Debug Times' poolDictionaries: 'BalloonEngineConstants' category: 'Balloon-Engine'! !BalloonEngine commentStamp: '' prior: 17214650! BalloonEngine is the representative for the Balloon engine inside Squeak. For most purposes it should not be used directly but via BalloonCanvas since this ensures proper initialization and is polymorphic with other canvas uses.! !BalloonEngine methodsFor: 'accessing' stamp: 'ar 11/11/1998 23:04' prior: 17214951! aaLevel ^aaLevel ifNil:[1]! ! !BalloonEngine methodsFor: 'accessing' stamp: 'ar 11/11/1998 23:04' prior: 17215052! aaLevel: anInteger aaLevel := (anInteger min: 4) max: 1.! ! !BalloonEngine methodsFor: 'accessing' stamp: 'ar 10/29/1998 01:51' prior: 17215183! aaTransform "Return a transformation for the current anti-aliasing level" | matrix | matrix := MatrixTransform2x3 withScale: (self aaLevel) asFloat asPoint. matrix offset: (self aaLevel // 2) asFloat asPoint. ^matrix composedWith:(MatrixTransform2x3 withOffset: destOffset asFloatPoint)! ! !BalloonEngine methodsFor: 'accessing' stamp: 'ar 10/13/1998 03:04' prior: 17215548! bitBlt ^bitBlt! ! !BalloonEngine methodsFor: 'accessing' stamp: 'ar 5/28/2000 15:02' prior: 17215636! bitBlt: aBitBlt bitBlt := aBitBlt. bitBlt isNil ifTrue:[^self]. self class primitiveSetBitBltPlugin: bitBlt getPluginName. self clipRect: bitBlt clipRect. bitBlt sourceForm: (Form extent: span size @ 1 depth: 32 bits: span); sourceRect: (0 at 0 extent: 1 at span size); colorMap: (Color colorMapIfNeededFrom: 32 to: bitBlt destForm depth); combinationRule: (bitBlt destForm depth >= 8 ifTrue:[34] ifFalse:[Form paint]).! ! !BalloonEngine methodsFor: 'accessing' stamp: 'ar 11/1/1998 02:57' prior: 17216136! clipRect ^clipRect! ! !BalloonEngine methodsFor: 'accessing' stamp: 'ar 10/13/1998 02:44' prior: 17216229! clipRect: aRect clipRect := aRect truncated! ! !BalloonEngine methodsFor: 'accessing' stamp: 'ar 11/24/1998 15:04' prior: 17216347! colorTransform ^colorTransform! ! !BalloonEngine methodsFor: 'accessing' stamp: 'ar 11/24/1998 15:04' prior: 17216452! colorTransform: aColorTransform colorTransform := aColorTransform! ! !BalloonEngine methodsFor: 'accessing' stamp: 'ar 12/30/1998 11:24' prior: 17216592! deferred ^deferred! ! !BalloonEngine methodsFor: 'accessing' stamp: 'ar 12/30/1998 11:24' prior: 17216685! deferred: aBoolean deferred := aBoolean.! ! !BalloonEngine methodsFor: 'accessing' stamp: 'ar 11/1/1998 02:56' prior: 17216799! destOffset ^destOffset! ! !BalloonEngine methodsFor: 'accessing' stamp: 'ar 11/12/1998 00:22' prior: 17216896! destOffset: aPoint destOffset := aPoint asIntegerPoint. bitBlt destX: aPoint x; destY: aPoint y.! ! !BalloonEngine methodsFor: 'accessing' stamp: 'ar 11/25/1998 22:34' prior: 17217068! edgeTransform ^edgeTransform! ! !BalloonEngine methodsFor: 'accessing' stamp: 'ar 11/25/1998 22:34' prior: 17217171! edgeTransform: aTransform edgeTransform := aTransform.! ! !BalloonEngine methodsFor: 'accessing' stamp: 'ar 10/29/1998 01:51' prior: 17217300! fullTransformFrom: aMatrix | m | m := self aaTransform composedWith: aMatrix. "m offset: m offset + destOffset." ^m! ! !BalloonEngine methodsFor: 'copying' stamp: 'ar 11/25/1998 00:45' prior: 17217492! canProceedAfter: failureReason "Check if we can proceed after the failureReason indicated." | newBuffer | failureReason = GErrorNeedFlush ifTrue:[ "Need to flush engine before proceeding" self copyBits. self reset. ^true]. failureReason = GErrorNoMoreSpace ifTrue:[ "Work buffer is too small" newBuffer := workBuffer species new: workBuffer size * 2. self primCopyBufferFrom: workBuffer to: newBuffer. workBuffer := newBuffer. ^true]. "Not handled" ^false! ! !BalloonEngine methodsFor: 'copying' stamp: 'ar 3/6/2001 12:06' prior: 17218041! copyBits (bitBlt notNil and:[bitBlt destForm notNil]) ifTrue:[bitBlt destForm unhibernate]. self copyLoopFaster.! ! !BalloonEngine methodsFor: 'copying' stamp: 'ar 11/14/1998 19:32' prior: 17218227! copyLoop "This is the basic rendering loop using as little primitive support as possible." | finished edge fill | edge := BalloonEdgeData new. fill := BalloonFillData new. self primInitializeProcessing. "Initialize the GE for processing" [self primFinishedProcessing] whileFalse:[ "Step 1: Process the edges in the global edge table that will be added in this step" [finished := self primNextGlobalEdgeEntryInto: edge. finished] whileFalse:[ edge source: (externals at: edge index). edge stepToFirstScanLine. self primAddActiveEdgeTableEntryFrom: edge]. "Step 2: Scan the active edge table" [finished := self primNextFillEntryInto: fill. finished] whileFalse:[ fill source: (externals at: fill index). "Compute the new fill" fill computeFill. "And mix it in the out buffer" self primMergeFill: fill destForm bits from: fill]. "Step 3: Display the current span buffer if necessary" self primDisplaySpanBuffer. "Step 4: Advance and resort the active edge table" [finished := self primNextActiveEdgeEntryInto: edge. finished] whileFalse:[ "If the index is zero then the edge has been handled by the GE" edge source: (externals at: edge index). edge stepToNextScanLine. self primChangeActiveEdgeTableEntryFrom: edge]. ]. self primGetTimes: Times. self primGetCounts: Counts. self primGetBezierStats: BezierStats.! ! !BalloonEngine methodsFor: 'copying' stamp: 'ar 11/14/1998 19:32' prior: 17219678! copyLoopFaster "This is a copy loop drawing one scan line at a time" | edge fill reason | edge := BalloonEdgeData new. fill := BalloonFillData new. [self primFinishedProcessing] whileFalse:[ reason := self primRenderScanline: edge with: fill. "reason ~= 0 means there has been a problem" reason = 0 ifFalse:[ self processStopReason: reason edge: edge fill: fill. ]. ]. self primGetTimes: Times. self primGetCounts: Counts. self primGetBezierStats: BezierStats.! ! !BalloonEngine methodsFor: 'copying' stamp: 'ar 11/14/1998 19:33' prior: 17220230! copyLoopFastest "This is a copy loop drawing the entire image" | edge fill reason | edge := BalloonEdgeData new. fill := BalloonFillData new. [self primFinishedProcessing] whileFalse:[ reason := self primRenderImage: edge with: fill. "reason ~= 0 means there has been a problem" reason = 0 ifFalse:[ self processStopReason: reason edge: edge fill: fill. ]. ]. self primGetTimes: Times. self primGetCounts: Counts. self primGetBezierStats: BezierStats.! ! !BalloonEngine methodsFor: 'copying' stamp: 'ar 11/11/1998 21:19' prior: 17220773! processStopReason: reason edge: edge fill: fill "The engine has stopped because of some reason. Try to figure out how to respond and do the necessary actions." "Note: The order of operations below can affect the speed" "Process unknown fills first" reason = GErrorFillEntry ifTrue:[ fill source: (externals at: fill index). "Compute the new fill" fill computeFill. "And mix it in the out buffer" ^self primMergeFill: fill destForm bits from: fill]. "Process unknown steppings in the AET second" reason = GErrorAETEntry ifTrue:[ edge source: (externals at: edge index). edge stepToNextScanLine. ^self primChangeActiveEdgeTableEntryFrom: edge]. "Process unknown entries in the GET third" reason = GErrorGETEntry ifTrue:[ edge source: (externals at: edge index). edge stepToFirstScanLine. ^self primAddActiveEdgeTableEntryFrom: edge]. "Process generic problems last" (self canProceedAfter: reason) ifTrue:[^self]. "Okay." ^self error:'Unkown stop reason in graphics engine' ! ! !BalloonEngine methodsFor: 'drawing' stamp: 'ar 10/11/1999 16:49' prior: 17221856! drawBezierShape: points fill: fillStyle borderWidth: borderWidth borderColor: borderFill transform: aTransform | fills | self edgeTransform: aTransform. self resetIfNeeded. fills := self registerFill: fillStyle and: borderFill. self primAddBezierShape: points segments: (points size) // 3 fill: (fills at: 1) lineWidth: borderWidth lineFill: (fills at: 2). self postFlushIfNeeded.! ! !BalloonEngine methodsFor: 'drawing' stamp: 'ar 11/26/1998 19:44' prior: 17222322! drawCompressedShape: shape transform: aTransform | fillIndexList | self edgeTransform: aTransform. self resetIfNeeded. fillIndexList := self registerFills: shape fillStyles. self primAddCompressedShape: shape points segments: shape numSegments leftFills: shape leftFills rightFills: shape rightFills lineWidths: shape lineWidths lineFills: shape lineFills fillIndexList: fillIndexList. self postFlushIfNeeded.! ! !BalloonEngine methodsFor: 'drawing' stamp: 'ar 1/15/1999 03:02' prior: 17222822! drawGeneralBezierShape: contours fill: fillStyle borderWidth: borderWidth borderColor: borderFill transform: aTransform | fills | self edgeTransform: aTransform. self resetIfNeeded. fills := self registerFill: fillStyle and: borderFill. contours do:[:points| self primAddBezierShape: points segments: (points size // 3) fill: (fills at: 1) lineWidth: borderWidth lineFill: (fills at: 2). "Note: To avoid premature flushing of the pipeline we need to reset the flush bit within the engine." self primFlushNeeded: false. ]. "And set the flush bit afterwards" self primFlushNeeded: true. self postFlushIfNeeded.! ! !BalloonEngine methodsFor: 'drawing' stamp: 'ar 1/15/1999 03:02' prior: 17223531! drawGeneralPolygon: contours fill: fillStyle borderWidth: borderWidth borderColor: borderFill transform: aTransform | fills | self edgeTransform: aTransform. self resetIfNeeded. fills := self registerFill: fillStyle and: borderFill. contours do:[:points| self primAddPolygon: points segments: points size fill: (fills at: 1) lineWidth: borderWidth lineFill: (fills at: 2). "Note: To avoid premature flushing of the pipeline we need to reset the flush bit within the engine." self primFlushNeeded: false. ]. "And set the flush bit afterwards" self primFlushNeeded: true. self postFlushIfNeeded.! ! !BalloonEngine methodsFor: 'drawing' stamp: 'ar 11/26/1998 19:45' prior: 17224226! drawOval: rect fill: fillStyle borderWidth: borderWidth borderColor: borderColor transform: aMatrix | fills | self edgeTransform: aMatrix. self resetIfNeeded. fills := self registerFill: fillStyle and: borderColor. self primAddOvalFrom: rect origin to: rect corner fillIndex: (fills at: 1) borderWidth: borderWidth borderColor: (fills at: 2). self postFlushIfNeeded.! ! !BalloonEngine methodsFor: 'drawing' stamp: 'ar 11/26/1998 19:45' prior: 17224684! drawPolygon: points fill: fillStyle borderWidth: borderWidth borderColor: borderFill transform: aTransform | fills | self edgeTransform: aTransform. self resetIfNeeded. fills := self registerFill: fillStyle and: borderFill. self primAddPolygon: points segments: points size fill: (fills at: 1) lineWidth: borderWidth lineFill: (fills at: 2). self postFlushIfNeeded.! ! !BalloonEngine methodsFor: 'drawing' stamp: 'ar 11/26/1998 19:45' prior: 17225136! drawRectangle: rect fill: fillStyle borderWidth: borderWidth borderColor: borderColor transform: aMatrix | fills | self edgeTransform: aMatrix. self resetIfNeeded. fills := self registerFill: fillStyle and: borderColor. self primAddRectFrom: rect origin to: rect corner fillIndex: (fills at: 1) borderWidth: borderWidth borderColor: (fills at: 2). self postFlushIfNeeded.! ! !BalloonEngine methodsFor: 'drawing' stamp: 'bf 4/3/2004 01:36' prior: 17225597! registerFill: aFillStyle "Register the given fill style." | theForm | aFillStyle ifNil:[^0]. aFillStyle isSolidFill ifTrue:[^aFillStyle scaledPixelValue32]. aFillStyle isGradientFill ifTrue:[ ^self primAddGradientFill: aFillStyle pixelRamp from: aFillStyle origin along: aFillStyle direction normal: aFillStyle normal radial: aFillStyle isRadialFill ]. aFillStyle isBitmapFill ifTrue:[ theForm := aFillStyle form asSourceForm. theForm unhibernate. forms := forms copyWith: theForm. ^self primAddBitmapFill: theForm colormap: (theForm colormapIfNeededForDepth: 32) tile: aFillStyle isTiled from: aFillStyle origin along: aFillStyle direction normal: aFillStyle normal xIndex: forms size]. ^0! ! !BalloonEngine methodsFor: 'drawing' stamp: 'ar 11/26/1998 19:45' prior: 17226418! registerFill: fill1 and: fill2 ^self registerFills: (Array with: fill1 with: fill2)! ! !BalloonEngine methodsFor: 'drawing' stamp: 'ar 1/14/1999 15:24' prior: 17226573! registerFill: aFillStyle transform: aTransform aFillStyle ifNil:[^0]. aFillStyle isSolidFill ifTrue:[^aFillStyle scaledPixelValue32]. aFillStyle isGradientFill ifTrue:[ ^self primAddGradientFill: aFillStyle pixelRamp from: aFillStyle origin along: aFillStyle direction normal: aFillStyle normal radial: aFillStyle isRadialFill matrix: aTransform. ]. ^0! ! !BalloonEngine methodsFor: 'drawing' stamp: 'di 11/21/1999 20:15' prior: 17227025! registerFills: fills | fillIndexList index fillIndex | ((colorTransform notNil and:[colorTransform isAlphaTransform]) or:[ fills anySatisfy: [:any| any notNil and:[any isTranslucent]]]) ifTrue:[ self flush. self reset. postFlushNeeded := true]. fillIndexList := WordArray new: fills size. index := 1. [index <= fills size] whileTrue:[ fillIndex := self registerFill: (fills at: index). fillIndex == nil ifTrue:[index := 1] "Need to start over" ifFalse:[fillIndexList at: index put: fillIndex. index := index+1] ]. ^fillIndexList! ! !BalloonEngine methodsFor: 'experimental' stamp: 'ar 11/12/1998 19:53' prior: 17227670! registerBezier: aCurve transformation: aMatrix self primAddBezierFrom: aCurve start to: aCurve end via: aCurve via leftFillIndex: (self registerFill: aCurve leftFill transform: aMatrix) rightFillIndex: (self registerFill: aCurve rightFill transform: aMatrix) matrix: aMatrix! ! !BalloonEngine methodsFor: 'experimental' stamp: 'ar 11/11/1998 21:15' prior: 17228032! registerBoundary: boundaryObject transformation: aMatrix | external | external := boundaryObject asEdgeRepresentation: (self fullTransformFrom: aMatrix). self subdivideExternalEdge: external from: boundaryObject. ! ! !BalloonEngine methodsFor: 'experimental' stamp: 'ar 11/12/1998 19:54' prior: 17228325! registerExternalEdge: externalEdge from: boundaryObject externals addLast: externalEdge. self primAddExternalEdge: externals size initialX: externalEdge initialX initialY: externalEdge initialY initialZ: externalEdge initialZ leftFillIndex: (self registerFill: boundaryObject leftFill transform: nil) rightFillIndex: (self registerFill: boundaryObject rightFill transform: nil)! ! !BalloonEngine methodsFor: 'experimental' stamp: 'ar 11/12/1998 19:54' prior: 17228791! registerLine: aLine transformation: aMatrix self primAddLineFrom: aLine start to: aLine end leftFillIndex: (self registerFill: aLine leftFill transform: aMatrix) rightFillIndex: (self registerFill: aLine rightFill transform: aMatrix) matrix: aMatrix! ! !BalloonEngine methodsFor: 'experimental' stamp: 'ar 11/11/1998 21:15' prior: 17229124! subdivideExternalEdge: external from: boundaryObject | external2 | external2 := external subdivide. external2 notNil ifTrue:[ self subdivideExternalEdge: external from: boundaryObject. self subdivideExternalEdge: external2 from: boundaryObject. ] ifFalse:[ self registerExternalEdge: external from: boundaryObject. ].! ! !BalloonEngine methodsFor: 'initialize' stamp: 'ar 11/25/1998 22:29' prior: 17229528! flush "Force all pending primitives onscreen" workBuffer ifNil:[^self]. self copyBits. self release.! ! !BalloonEngine methodsFor: 'initialize' stamp: 'nk 9/26/2003 10:52' prior: 17229706! initialize | w | w := Display width > 2048 ifTrue: [ 4096 ] ifFalse: [ 2048 ]. externals := OrderedCollection new: 100. span := Bitmap new: w. bitBlt := nil. self bitBlt: ((BitBlt toForm: Display) destRect: Display boundingBox; yourself). forms := #(). deferred := false.! ! !BalloonEngine methodsFor: 'initialize' stamp: 'ar 11/25/1998 22:42' prior: 17230060! postFlushIfNeeded From noreply at buildbot.pypy.org Fri Jul 18 14:08:23 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 18 Jul 2014 14:08:23 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Fixed error-reporting and other outputs after parsing arguments. Message-ID: <20140718120823.8477F1C01D2@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r898:3146c682cb30 Date: 2014-07-10 16:39 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/3146c682cb30/ Log: Fixed error-reporting and other outputs after parsing arguments. Added flag to disable specialized storage strategies. diff --git a/spyvm/display.py b/spyvm/display.py --- a/spyvm/display.py +++ b/spyvm/display.py @@ -229,7 +229,7 @@ return elif c_type == RSDL.QUIT: from spyvm.error import Exit - raise Exit("Window closed..") + raise Exit("Window closed") finally: lltype.free(event, flavor='raw') diff --git a/spyvm/objspace.py b/spyvm/objspace.py --- a/spyvm/objspace.py +++ b/spyvm/objspace.py @@ -8,6 +8,10 @@ class ObjSpace(object): def __init__(self): + # If this is True, then no optimizing storage strategies will be used. + # Intended for performance comparisons. Breaks tests. + self.no_specialized_storage = [False] + self.classtable = {} self.objtable = {} self._executable_path = [""] # XXX: we cannot set the attribute diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -374,7 +374,6 @@ @expose_primitive(FAIL) def func(interp, s_frame, argcount): - from spyvm.error import Exit if s_frame.w_method().lookup_selector == 'doesNotUnderstand:': print '' print s_frame.print_stack() @@ -837,7 +836,7 @@ @expose_primitive(QUIT, unwrap_spec=[object]) def func(interp, s_frame, w_rcvr): from spyvm.error import Exit - raise Exit('Quit-Primitive called..') + raise Exit('Quit-Primitive called') @expose_primitive(EXIT_TO_DEBUGGER, unwrap_spec=[object]) def func(interp, s_frame, w_rcvr): diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -8,10 +8,6 @@ from rpython.rlib.rstruct.runpack import runpack from rpython.rtyper.lltypesystem import rffi, lltype -# If this is True, then no optimizing storage strategies will be used. -# Intended for performance comparisons. Breaks tests. -no_specialized_storage = False - class AbstractShadow(object): """A shadow is an optional extra bit of information that can be attached at run-time to any Smalltalk object. @@ -170,14 +166,14 @@ def empty_storage(space, w_self, size, weak=False): if weak: return WeakListStorageShadow(space, w_self, size) - if no_specialized_storage: + if space.no_specialized_storage[0]: return ListStorageShadow(space, w_self, size) return AllNilStorageShadow(space, w_self, size) def find_storage_for_objects(space, vars, weak=False): if weak: return WeakListStorageShadow - if no_specialized_storage: + if space.no_specialized_storage[0]: return ListStorageShadow specialized_strategies = 3 all_nil_can_handle = True diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -38,6 +38,7 @@ -s - After num stack frames, the entire stack will be dumped to the heap. This breaks performance, but protects agains stack overflow. num <= 0 disables stack protection (default: %d) + -S - Disable specialized storage strategies; always use generic ListStorage Logging parameters: -t|--trace - Output a trace of each message, primitive, return value and process switch. @@ -49,13 +50,25 @@ def get_parameter(argv, idx, arg): if len(argv) < idx + 1: - raise RuntimeError("Error: missing argument after %s" % arg) + raise error.Exit("Missing argument after %s" % arg) return argv[idx], idx + 1 +def get_int_parameter(argv, idx, arg): + param, idx = get_parameter(argv, idx, arg) + try: + result = int(param) + except ValueError, e: + raise error.Exit("Non-int argument after %s" % arg) + return result, idx + +def print_error(str): + os.write(2, str + os.linesep) + prebuilt_space = objspace.ObjSpace() def entry_point(argv): # == Main execution parameters + path = None selector = None code = "" number = 0 @@ -68,54 +81,61 @@ max_stack_depth = constants.MAX_LOOP_DEPTH trace = False - path = argv[1] if len(argv) > 1 else "Squeak.image" - idx = 2 - - while idx < len(argv): - arg = argv[idx] - idx += 1 - if arg in ["-h", "--help"]: - _usage(argv) - return 0 - elif arg in ["-j", "--jit"]: - jitarg, idx = get_parameter(argv, idx, arg) - jit.set_user_param(interpreter.Interpreter.jit_driver, jitarg) - elif arg in ["-n", "--number"]: - numarg, idx = get_parameter(argv, idx, arg) - number = int(numarg) - have_number = True - elif arg in ["-m", "--method"]: - selector, idx = get_parameter(argv, idx, arg) - elif arg in ["-t", "--trace"]: - trace = True - elif arg in ["-p", "--poll"]: - poll = True - elif arg in ["-a", "--arg"]: - stringarg, idx = get_parameter(argv, idx, arg) - elif arg in ["-r", "--run"]: - code, idx = get_parameter(argv, idx, arg) - elif arg in ["-i", "--no-interrupts"]: - interrupts = False - elif arg in ["-s"]: - arg, idx = get_parameter(argv, idx, arg) - max_stack_depth = int(arg) - elif arg in ["-P", "--process"]: - headless = False - elif arg in ["-u"]: - from spyvm.plugins.vmdebugging import stop_ui_process - stop_ui_process() - elif arg in ["-l", "--storage-log"]: - storage_logger.activate() - elif arg in ["-L", "--storage-log-aggregate"]: - storage_logger.activate(aggregate=True) - elif arg in ["-E", "--storage-log-elements"]: - storage_logger.activate(elements=True) - else: - _usage(argv) - return -1 - - if code and selector: - raise RuntimeError("Cannot handle both -r and -m.") + space = prebuilt_space + idx = 1 + try: + while idx < len(argv): + arg = argv[idx] + idx += 1 + if arg in ["-h", "--help"]: + _usage(argv) + return 0 + elif arg in ["-j", "--jit"]: + jitarg, idx = get_parameter(argv, idx, arg) + jit.set_user_param(interpreter.Interpreter.jit_driver, jitarg) + elif arg in ["-n", "--number"]: + number, idx = get_int_parameter(argv, idx, arg) + have_number = True + elif arg in ["-m", "--method"]: + selector, idx = get_parameter(argv, idx, arg) + elif arg in ["-t", "--trace"]: + trace = True + elif arg in ["-p", "--poll"]: + poll = True + elif arg in ["-a", "--arg"]: + stringarg, idx = get_parameter(argv, idx, arg) + elif arg in ["-r", "--run"]: + code, idx = get_parameter(argv, idx, arg) + elif arg in ["-i", "--no-interrupts"]: + interrupts = False + elif arg in ["-s"]: + max_stack_depth, idx = get_int_parameter(argv, idx, arg) + elif arg in ["-P", "--process"]: + headless = False + elif arg in ["-S"]: + space.no_specialized_storage[0] = True + elif arg in ["-u"]: + from spyvm.plugins.vmdebugging import stop_ui_process + stop_ui_process() + elif arg in ["-l", "--storage-log"]: + storage_logger.activate() + elif arg in ["-L", "--storage-log-aggregate"]: + storage_logger.activate(aggregate=True) + elif arg in ["-E", "--storage-log-elements"]: + storage_logger.activate(elements=True) + elif path is None: + path = arg + else: + _usage(argv) + return -1 + + if path is None: + path = "Squeak.image" + if code and selector: + raise error.Exit("Cannot handle both -r and -m.") + except error.Exit as e: + print_error("Parameter error: %s" % e.msg) + return 1 path = rpath.rabspath(path) try: @@ -125,17 +145,17 @@ finally: f.close() except OSError as e: - os.write(2, "%s -- %s (LoadError)\n" % (os.strerror(e.errno), path)) + print_error("%s -- %s (LoadError)" % (os.strerror(e.errno), path)) return 1 # Load & prepare image and environment - space = prebuilt_space image_reader = squeakimage.reader_for_image(space, squeakimage.Stream(data=imagedata)) image = create_image(space, image_reader) interp = interpreter.Interpreter(space, image, image_name=path, trace=trace, evented=not poll, interrupts=interrupts, max_stack_depth=max_stack_depth) space.runtime_setup(argv[0]) + print_error("") # Line break after image-loading characters # Create context to be executed if code or selector: @@ -172,30 +192,31 @@ selector = "DoIt%d" % int(time.time()) space = interp.space w_receiver_class = w_receiver.getclass(space) + + # The suppress_process_switch flag is a hack/workaround to enable compiling code + # before having initialized the image cleanly. The problem is that the TimingSemaphore is not yet + # registered (primitive 136 not called), so the idle process will never be left once it is entered. + # TODO - Find a way to cleanly initialize the image, without executing the active_context of the image. + # Instead, we want to execute our own context. Then remove this flag (and all references to it) + interp.space.suppress_process_switch[0] = True try: - try: - # The suppress_process_switch flag is a hack/workaround to enable compiling code - # before having initialized the image cleanly. The problem is that the TimingSemaphore is not yet - # registered (primitive 136 not called), so the idle process will never be left once it is entered. - # TODO - Find a way to cleanly initialize the image, without executing the active_context of the image. - # Instead, we want to execute our own context. Then remove this flag (and all references to it) - interp.space.suppress_process_switch[0] = True - w_result = interp.perform( - w_receiver_class, - "compile:classified:notifying:", - w_arguments = [space.wrap_string("%s\r\n%s" % (selector, code)), - space.wrap_string("spy-run-code"), - space.w_nil] - ) - finally: - interp.space.suppress_process_switch[0] = False + w_result = interp.perform( + w_receiver_class, + "compile:classified:notifying:", + w_arguments = [space.wrap_string("%s\r\n%s" % (selector, code)), + space.wrap_string("spy-run-code"), + space.w_nil] + ) + # TODO - is this expected in every image? if not isinstance(w_result, model.W_BytesObject) or w_result.as_string() != selector: - print "Compilation failed, unexpected result: %s" % result_string(w_result) + print_error("Compilation failed, unexpected result: %s" % result_string(w_result)) return None except error.Exit, e: - print "Exited while compiling code: %s" % e.msg + print_error("Exited while compiling code: %s" % e.msg) return None + finally: + interp.space.suppress_process_switch[0] = False w_receiver_class.as_class_get_shadow(space).s_methoddict().sync_method_cache() return selector @@ -234,11 +255,10 @@ return w_active_context.as_context_get_shadow(space) def execute_context(interp, s_frame, measure=False): - print "" # Line break after image-loading-indicator characters try: return interp.interpret_toplevel(s_frame.w_self()) except error.Exit, e: - print "Exited: %s" % e.msg + print_error("Exited: %s" % e.msg) return None # _____ Target and Main _____ From noreply at buildbot.pypy.org Fri Jul 18 14:08:24 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 18 Jul 2014 14:08:24 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Optimizing storage strategies. Message-ID: <20140718120824.B9F771C01D2@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r899:2c3b6f965b1a Date: 2014-07-10 21:29 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/2c3b6f965b1a/ Log: Optimizing storage strategies. diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -614,7 +614,7 @@ def switch_shadow(self, new_shadow, w_element=None): old_shadow = self.assert_shadow() - new_shadow.copy_from(old_shadow) + old_shadow.copy_into(new_shadow) self.store_shadow(new_shadow) new_shadow.attach_shadow() self.log_storage("Switched", old_shadow, w_element=w_element) diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -38,16 +38,27 @@ def size(self): raise NotImplementedError("Abstract class") + # This will invoke an appropriate copy_from_* method. + # Overwriting this allows optimized transitions between certain storage types. + def copy_into(self, other_shadow): + other_shadow.copy_from(self) + def attach_shadow(self): pass def copy_field_from(self, n0, other_shadow): self.store(n0, other_shadow.fetch(n0)) - # This can be overwritten to change the order of initialization. def copy_from(self, other_shadow): assert self.size() == other_shadow.size() for i in range(self.size()): self.copy_field_from(i, other_shadow) + + def copy_from_AllNil(self, all_nil_storage): + self.copy_from(all_nil_storage) + def copy_from_SmallIntegerOrNil(self, small_int_storage): + self.copy_from(small_int_storage) + def copy_from_FloatOrNil(self, float_storage): + self.copy_from(float_storage) class AbstractStorageShadow(AbstractShadow): _attrs_ = [] @@ -80,6 +91,8 @@ if n0 >= self._size: raise IndexError return self.space.w_nil + def copy_into(self, other_shadow): + other_shadow.copy_from_AllNil(self) def do_store(self, n0, w_value): pass def size(self): @@ -117,6 +130,9 @@ self.storage[n0] = self.nil_value else: self.storage[n0] = self.unwrap(self.space, w_val) + + def copy_from_AllNil(self, all_nil_storage): + pass # Already initialized # This is to avoid code duplication @objectmodel.specialize.arg(0) @@ -143,6 +159,8 @@ @staticmethod def unwrap(space, w_val): return space.unwrap_int(w_val) + def copy_into(self, other_shadow): + other_shadow.copy_from_SmallIntegerOrNil(self) class FloatOrNilStorageShadow(AbstractStorageShadow): repr_classname = "FloatOrNilStorageShadow" @@ -162,6 +180,8 @@ @staticmethod def unwrap(space, w_val): return space.unwrap_float(w_val) + def copy_into(self, other_shadow): + other_shadow.copy_from_FloatOrNil(self) def empty_storage(space, w_self, size, weak=False): if weak: @@ -170,6 +190,7 @@ return ListStorageShadow(space, w_self, size) return AllNilStorageShadow(space, w_self, size) + at jit.unroll_safe def find_storage_for_objects(space, vars, weak=False): if weak: return WeakListStorageShadow @@ -212,7 +233,10 @@ def copy_from(self, other_shadow): if self.size() != other_shadow.size(): self.initialize_storage(other_shadow.size()) - AbstractShadow.copy_from(self, other_shadow) + for i in range(self.size()): + w_val = other_shadow.fetch(i) + if not w_val.is_nil(self.space): + self.store(i, w_val) class ListStorageShadow(AbstractStorageShadow): _attrs_ = ['storage'] From noreply at buildbot.pypy.org Fri Jul 18 14:08:25 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 18 Jul 2014 14:08:25 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Merged. Message-ID: <20140718120825.EDE171C01D2@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r900:f71c6de0dab8 Date: 2014-07-10 21:31 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/f71c6de0dab8/ Log: Merged. diff --git a/spyvm/constants.py b/spyvm/constants.py --- a/spyvm/constants.py +++ b/spyvm/constants.py @@ -190,6 +190,5 @@ # Interpreter constants # -MAX_LOOP_DEPTH = 100 INTERRUPT_COUNTER_SIZE = 10000 CompileTime = time.time() diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -4,7 +4,7 @@ from spyvm import model, constants, primitives, conftest, wrapper from spyvm.tool.bitmanipulation import splitter -from rpython.rlib import jit +from rpython.rlib import jit, rstackovf from rpython.rlib import objectmodel, unroll class MissingBytecode(Exception): @@ -24,7 +24,7 @@ class Interpreter(object): _immutable_fields_ = ["space", "image", "image_name", - "max_stack_depth", "interrupt_counter_size", + "interrupt_counter_size", "startup_time", "evented", "interrupts"] jit_driver = jit.JitDriver( @@ -35,8 +35,7 @@ ) def __init__(self, space, image=None, image_name="", - trace=False, evented=True, interrupts=True, - max_stack_depth=constants.MAX_LOOP_DEPTH): + trace=False, evented=True, interrupts=True): import time # === Initialize immutable variables @@ -47,7 +46,6 @@ self.startup_time = image.startup_time else: self.startup_time = constants.CompileTime - self.max_stack_depth = max_stack_depth self.evented = evented self.interrupts = interrupts try: @@ -57,7 +55,6 @@ # === Initialize mutable variables self.interrupt_check_counter = self.interrupt_counter_size - self.current_stack_depth = 0 self.next_wakeup_tick = 0 self.trace = trace self.trace_proxy = False @@ -66,7 +63,6 @@ # This is the top-level loop and is not invoked recursively. s_new_context = w_active_context.as_context_get_shadow(self.space) while True: - assert self.current_stack_depth == 0 s_sender = s_new_context.s_sender() try: self.loop_bytecodes(s_new_context) @@ -76,11 +72,13 @@ print "====== StackOverflow, contexts forced to heap at: %s" % e.s_new_context.short_str() s_new_context = e.s_new_context except Return, nlr: + assert nlr.s_target_context or nlr.is_local s_new_context = s_sender - while s_new_context is not nlr.s_target_context: - s_sender = s_new_context.s_sender() - s_new_context._activate_unwind_context(self) - s_new_context = s_sender + if not nlr.is_local: + while s_new_context is not nlr.s_target_context: + s_sender = s_new_context.s_sender() + s_new_context._activate_unwind_context(self) + s_new_context = s_sender s_new_context.push(nlr.value) except ProcessSwitch, p: assert not self.space.suppress_process_switch[0], "ProcessSwitch should be disabled..." @@ -110,11 +108,16 @@ try: self.step(s_context) except Return, nlr: - if nlr.s_target_context is not s_context: + if nlr.s_target_context is s_context or nlr.is_local: + s_context.push(nlr.value) + else: + if nlr.s_target_context is None: + # This is the case where we are returning to our sender. + # Mark the return as local, so our sender will take it + nlr.is_local = True s_context._activate_unwind_context(self) raise nlr - else: - s_context.push(nlr.value) + # This is a wrapper around loop_bytecodes that cleanly enters/leaves the frame # and handles the stack overflow protection mechanism. @@ -122,16 +125,11 @@ try: if s_frame._s_sender is None and s_sender is not None: s_frame.store_s_sender(s_sender, raise_error=False) - - self.current_stack_depth += 1 - if self.max_stack_depth > 0: - if self.current_stack_depth >= self.max_stack_depth: - raise StackOverflow(s_frame) - # Now (continue to) execute the context bytecodes self.loop_bytecodes(s_frame, may_context_switch) - finally: - self.current_stack_depth -= 1 + except rstackovf.StackOverflow: + rstackovf.check_stack_overflow() + raise StackOverflow(s_frame) def step(self, context): bytecode = context.fetch_next_bytecode() @@ -205,7 +203,7 @@ s_frame = self.create_toplevel_context(w_receiver, selector, w_selector, w_arguments) self.interrupt_check_counter = self.interrupt_counter_size return self.interpret_toplevel(s_frame.w_self()) - + def create_toplevel_context(self, w_receiver, selector="", w_selector=None, w_arguments=[]): if w_selector is None: assert selector, "Need either string or W_Object selector" @@ -213,7 +211,7 @@ w_selector = self.image.w_asSymbol else: w_selector = self.perform(self.space.wrap_string(selector), "asSymbol") - + w_method = model.W_CompiledMethod(self.space, header=512) w_method.literalatput0(self.space, 1, w_selector) assert len(w_arguments) <= 7 @@ -225,7 +223,7 @@ return s_frame def padding(self, symbol=' '): - return symbol * self.current_stack_depth + return symbol class ReturnFromTopLevel(Exception): _attrs_ = ["object"] @@ -233,10 +231,11 @@ self.object = object class Return(Exception): - _attrs_ = ["value", "s_target_context"] + _attrs_ = ["value", "s_target_context", "is_local"] def __init__(self, s_target_context, w_result): self.value = w_result self.s_target_context = s_target_context + self.is_local = False class ContextSwitchException(Exception): """General Exception that causes the interpreter to leave @@ -632,7 +631,7 @@ interp.padding(), code, w_method.safe_identifier_string(), w_selector.str_content()) raise e - def _return(self, return_value, interp, s_return_to): + def _return(self, return_value, interp, local_return=False): # unfortunately, this assert is not true for some tests. TODO fix this. # assert self._stack_ptr == self.tempsize() @@ -640,36 +639,47 @@ if interp.trace: print '%s<- %s' % (interp.padding(), return_value.as_repr_string()) - if s_return_to is None: - # This should never happen while executing a normal image. - raise ReturnFromTopLevel(return_value) + if self.home_is_self() or local_return: + # a local return just needs to go up the stack once. there + # it will find the sender as a local, and we don't have to + # force the reference + s_return_to = None + if self.s_sender() is None: + # This should never happen while executing a normal image. + raise ReturnFromTopLevel(return_value) + else: + s_return_to = self.s_home().s_sender() + if s_return_to is None: + # This should never happen while executing a normal image. + raise ReturnFromTopLevel(return_value) + raise Return(s_return_to, return_value) # ====== Send/Return bytecodes ====== @bytecode_implementation() def returnReceiverBytecode(self, interp, current_bytecode): - return self._return(self.w_receiver(), interp, self.s_home().s_sender()) + return self._return(self.w_receiver(), interp) @bytecode_implementation() def returnTrueBytecode(self, interp, current_bytecode): - return self._return(interp.space.w_true, interp, self.s_home().s_sender()) + return self._return(interp.space.w_true, interp) @bytecode_implementation() def returnFalseBytecode(self, interp, current_bytecode): - return self._return(interp.space.w_false, interp, self.s_home().s_sender()) + return self._return(interp.space.w_false, interp) @bytecode_implementation() def returnNilBytecode(self, interp, current_bytecode): - return self._return(interp.space.w_nil, interp, self.s_home().s_sender()) + return self._return(interp.space.w_nil, interp) @bytecode_implementation() def returnTopFromMethodBytecode(self, interp, current_bytecode): - return self._return(self.pop(), interp, self.s_home().s_sender()) + return self._return(self.pop(), interp) @bytecode_implementation() def returnTopFromBlockBytecode(self, interp, current_bytecode): - return self._return(self.pop(), interp, self.s_sender()) + return self._return(self.pop(), interp, local_return=True) @bytecode_implementation() def sendLiteralSelectorBytecode(self, interp, current_bytecode): @@ -738,9 +748,7 @@ # ====== Misc ====== def _activate_unwind_context(self, interp): - # TODO put the constant somewhere else. - # Primitive 198 is used in BlockClosure >> ensure: - if self.is_closure_context() or self.w_method().primitive() != 198: + if self.is_closure_context() or not self.is_BlockClosure_ensure(): self.mark_returned() return # The first temp is executed flag for both #ensure: and #ifCurtailed: @@ -750,7 +758,8 @@ try: self.bytecodePrimValue(interp, 0) except Return, nlr: - if self is not nlr.s_target_context: + assert nlr.s_target_context or nlr.is_local + if self is not nlr.s_target_context and not nlr.is_local: raise nlr finally: self.mark_returned() @@ -964,11 +973,9 @@ # in order to enable tracing/jumping for message sends etc. def debugging(): def stepping_debugger_init(original): - def meth(self, space, image=None, image_name="", trace=False, - max_stack_depth=constants.MAX_LOOP_DEPTH): + def meth(self, space, image=None, image_name="", trace=False): return_value = original(self, space, image=image, - image_name=image_name, trace=trace, - max_stack_depth=max_stack_depth) + image_name=image_name, trace=trace) # ############################################################## self.message_stepping = False diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -797,6 +797,12 @@ def is_closure_context(self): raise NotImplementedError() + def is_BlockClosure_ensure(self): + raise NotImplementedError() + + def home_is_self(self): + raise NotImplementedError() + # === Other properties of Contexts === def mark_returned(self): @@ -1015,6 +1021,12 @@ def is_closure_context(self): return True + def is_BlockClosure_ensure(self): + return False + + def home_is_self(self): + return False + # === Temporary variables === def gettemp(self, index): @@ -1094,7 +1106,7 @@ return '[] in %s' % self.w_method().get_identifier_string() class MethodContextShadow(ContextPartShadow): - _attrs_ = ['closure', '_w_receiver', '_w_method'] + _attrs_ = ['closure', '_w_receiver', '_w_method', '_is_BlockClosure_ensure'] repr_classname = "MethodContextShadow" # === Initialization === @@ -1116,6 +1128,7 @@ self.init_stack_and_temps() else: self._w_method = None + self._is_BlockClosure_ensure = False argc = len(arguments) for i0 in range(argc): @@ -1188,6 +1201,9 @@ def store_w_method(self, w_method): assert isinstance(w_method, model.W_CompiledMethod) self._w_method = w_method + if w_method: + # Primitive 198 is used in BlockClosure >> ensure: + self._is_BlockClosure_ensure = (w_method.primitive() == 198) def w_receiver(self): return self._w_receiver @@ -1206,6 +1222,12 @@ def is_closure_context(self): return self.closure is not None + def is_BlockClosure_ensure(self): + return self._is_BlockClosure_ensure + + def home_is_self(self): + return not self.is_closure_context() + # ______________________________________________________________________ # Marriage of MethodContextShadows with PointerObjects only when required diff --git a/spyvm/test/jittest/base.py b/spyvm/test/jittest/base.py --- a/spyvm/test/jittest/base.py +++ b/spyvm/test/jittest/base.py @@ -18,7 +18,7 @@ def run(self, spy, tmpdir, code): logfile = str(tmpdir.join("x.pypylog")) proc = subprocess.Popen( - [str(spy), "-r", code.replace("\n", "\r\n"), BenchmarkImage], + [str(spy), BenchmarkImage, "-r", code.replace("\n", "\r\n")], cwd=str(tmpdir), env={"PYPYLOG": "jit-log-opt:%s" % logfile, "SDL_VIDEODRIVER": "dummy"} diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -14,13 +14,13 @@ print """ Usage: %s [-r|-m] [-naHu] [-jpis] [-tlLE] - image path (default: Squeak.image) - + Execution mode: (no flags) - Image will be normally opened. -r|--run - Code will be compiled and executed, result printed. -m|--method - Selector will be sent to a SmallInteger, result printed. -h|--help - Output this and exit. - + Execution parameters: -n|--num - Only with -m or -r, SmallInteger to be used as receiver (default: nil). -a|--arg - Only with -m, will be used as single String argument. @@ -30,7 +30,7 @@ in the image and execute the context directly. The image window will probably not open. Good for benchmarking. -u - Only with -m or -r, try to stop UI-process at startup. Can help benchmarking. - + Other parameters: -j|--jit - jitargs will be passed to the jit configuration. -p|--poll - Actively poll for events. Try this if the image is not responding well. @@ -45,8 +45,8 @@ -l|--storage-log - Output a log of storage operations. -L|--storage-log-aggregate - Output an aggregated storage log at the end of execution. -E|--storage-log-elements - Include classnames of elements into the storage log. - - """ % (argv[0], constants.MAX_LOOP_DEPTH) + + """ % argv[0] def get_parameter(argv, idx, arg): if len(argv) < idx + 1: @@ -78,7 +78,6 @@ # == Other parameters poll = False interrupts = True - max_stack_depth = constants.MAX_LOOP_DEPTH trace = False space = prebuilt_space @@ -147,13 +146,13 @@ except OSError as e: print_error("%s -- %s (LoadError)" % (os.strerror(e.errno), path)) return 1 - + # Load & prepare image and environment image_reader = squeakimage.reader_for_image(space, squeakimage.Stream(data=imagedata)) image = create_image(space, image_reader) interp = interpreter.Interpreter(space, image, image_name=path, trace=trace, evented=not poll, - interrupts=interrupts, max_stack_depth=max_stack_depth) + interrupts=interrupts) space.runtime_setup(argv[0]) print_error("") # Line break after image-loading characters @@ -175,7 +174,7 @@ context = active_context(interp.space) else: context = active_context(interp.space) - + w_result = execute_context(interp, context) print result_string(w_result) storage_logger.print_aggregated_log() @@ -219,13 +218,13 @@ interp.space.suppress_process_switch[0] = False w_receiver_class.as_class_get_shadow(space).s_methoddict().sync_method_cache() return selector - + def create_context(interp, w_receiver, selector, stringarg): args = [] if stringarg: args.append(interp.space.wrap_string(stringarg)) return interp.create_toplevel_context(w_receiver, selector, w_arguments = args) - + def create_process(interp, s_frame): space = interp.space w_active_process = wrapper.scheduler(space).active_process() @@ -242,10 +241,10 @@ priority = 7 w_benchmark_proc.store(space, 1, s_frame.w_self()) w_benchmark_proc.store(space, 2, space.wrap_int(priority)) - + # Make process eligible for scheduling wrapper.ProcessWrapper(space, w_benchmark_proc).put_to_sleep() - + def active_context(space): w_active_process = wrapper.scheduler(space).active_process() active_process = wrapper.ProcessWrapper(space, w_active_process) @@ -260,7 +259,7 @@ except error.Exit, e: print_error("Exited: %s" % e.msg) return None - + # _____ Target and Main _____ def target(driver, *args): From noreply at buildbot.pypy.org Fri Jul 18 14:08:27 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 18 Jul 2014 14:08:27 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Fixed help string. Message-ID: <20140718120827.288781C01D2@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r901:37ace0c8899d Date: 2014-07-12 13:16 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/37ace0c8899d/ Log: Fixed help string. diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -12,7 +12,7 @@ def _usage(argv): print """ - Usage: %s [-r|-m] [-naHu] [-jpis] [-tlLE] + Usage: %s [-r|-m|-h] [-naPu] [-jpis] [-tlLE] - image path (default: Squeak.image) Execution mode: @@ -35,9 +35,6 @@ -j|--jit - jitargs will be passed to the jit configuration. -p|--poll - Actively poll for events. Try this if the image is not responding well. -i|--no-interrupts - Disable timer interrupt. Disables non-cooperative scheduling. - -s - After num stack frames, the entire stack will be dumped to the heap. - This breaks performance, but protects agains stack overflow. - num <= 0 disables stack protection (default: %d) -S - Disable specialized storage strategies; always use generic ListStorage Logging parameters: From noreply at buildbot.pypy.org Fri Jul 18 14:08:28 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 18 Jul 2014 14:08:28 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Added a stack_deth mechanism, but only if interp.trace is set. Message-ID: <20140718120828.5FFEA1C01D2@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r902:4c9262d36e3b Date: 2014-07-13 15:55 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/4c9262d36e3b/ Log: Added a stack_deth mechanism, but only if interp.trace is set. Added a ConstantFlag class to make CLI flags (and other boolean flags) constant for jit traces (promoted). A few code cleanups. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -1,7 +1,7 @@ import py import os from spyvm.shadow import ContextPartShadow, MethodContextShadow, BlockContextShadow, MethodNotFound -from spyvm import model, constants, primitives, conftest, wrapper +from spyvm import model, constants, primitives, conftest, wrapper, objspace from spyvm.tool.bitmanipulation import splitter from rpython.rlib import jit, rstackovf @@ -36,8 +36,6 @@ def __init__(self, space, image=None, image_name="", trace=False, evented=True, interrupts=True): - import time - # === Initialize immutable variables self.space = space self.image = image @@ -56,8 +54,9 @@ # === Initialize mutable variables self.interrupt_check_counter = self.interrupt_counter_size self.next_wakeup_tick = 0 - self.trace = trace - self.trace_proxy = False + self.trace = objspace.ConstantFlag(trace) + self.trace_proxy = objspace.ConstantFlag() + self.stack_depth = 0 def loop(self, w_active_context): # This is the top-level loop and is not invoked recursively. @@ -68,7 +67,7 @@ self.loop_bytecodes(s_new_context) raise Exception("loop_bytecodes left without raising...") except StackOverflow, e: - if self.trace: + if self.is_tracing(): print "====== StackOverflow, contexts forced to heap at: %s" % e.s_new_context.short_str() s_new_context = e.s_new_context except Return, nlr: @@ -81,8 +80,8 @@ s_new_context = s_sender s_new_context.push(nlr.value) except ProcessSwitch, p: - assert not self.space.suppress_process_switch[0], "ProcessSwitch should be disabled..." - if self.trace: + assert not self.space.suppress_process_switch.is_set(), "ProcessSwitch should be disabled..." + if self.is_tracing(): print "====== Switched process from: %s" % s_new_context.short_str() print "====== to: %s " % p.s_new_context.short_str() s_new_context = p.s_new_context @@ -118,16 +117,19 @@ s_context._activate_unwind_context(self) raise nlr - # This is a wrapper around loop_bytecodes that cleanly enters/leaves the frame # and handles the stack overflow protection mechanism. def stack_frame(self, s_frame, s_sender, may_context_switch=True): try: + if self.is_tracing(): + self.stack_depth += 1 if s_frame._s_sender is None and s_sender is not None: s_frame.store_s_sender(s_sender, raise_error=False) # Now (continue to) execute the context bytecodes self.loop_bytecodes(s_frame, may_context_switch) except rstackovf.StackOverflow: + if self.is_tracing(): + self.stack_depth -= 1 rstackovf.check_stack_overflow() raise StackOverflow(s_frame) @@ -184,7 +186,7 @@ wrapper.SemaphoreWrapper(self.space, semaphore).signal(s_frame) # We have no finalization process, so far. # We do not support external semaphores. - # In cog, the method to add such a semaphore is only called in GC. + # In cog, the method to add such a semaphore is only called in GC. def time_now(self): import time @@ -222,8 +224,12 @@ s_frame.push_all(list(w_arguments)) return s_frame + def is_tracing(self): + return self.trace.is_set() + def padding(self, symbol=' '): - return symbol + assert self.is_tracing() + return self.stack_depth * symbol class ReturnFromTopLevel(Exception): _attrs_ = ["object"] @@ -245,9 +251,9 @@ self.s_new_context = s_new_context class StackOverflow(ContextSwitchException): - """This causes the current jit-loop to be left. - This is an experimental mechanism to avoid stack-overflow errors - on OS level, and we suspect it breaks jit performance at least sometimes.""" + """This causes the current jit-loop to be left, thus avoiding stack overflows. + This breaks performance, so it should rarely happen. + In case of severe performance problems, execute with -t and check if this occurrs.""" class ProcessSwitch(ContextSwitchException): """This causes the interpreter to switch the executed context.""" @@ -569,7 +575,7 @@ self.pop() # receiver # ###################################################################### - if interp.trace: + if interp.is_tracing(): print interp.padding() + s_frame.short_str() return interp.stack_frame(s_frame, self) @@ -586,7 +592,7 @@ s_frame = w_method.create_frame(interp.space, receiver, w_args) # ###################################################################### - if interp.trace: + if interp.is_tracing(): print '%s %s %s: #%s' % (interp.padding('#'), special_selector, s_frame.short_str(), w_args) if not objectmodel.we_are_translated(): import pdb; pdb.set_trace() @@ -617,7 +623,7 @@ def _call_primitive(self, code, interp, argcount, w_method, w_selector): # ################################################################## - if interp.trace: + if interp.is_tracing(): print "%s-> primitive %d \t(in %s, named #%s)" % ( interp.padding(), code, self.w_method().get_identifier_string(), w_selector.str_content()) func = primitives.prim_holder.prim_table[code] @@ -626,7 +632,7 @@ # the primitive pushes the result (if any) onto the stack itself return func(interp, self, argcount, w_method) except primitives.PrimitiveFailedError, e: - if interp.trace: + if interp.is_tracing(): print "%s primitive %d FAILED\t (in %s, named %s)" % ( interp.padding(), code, w_method.safe_identifier_string(), w_selector.str_content()) raise e @@ -636,7 +642,7 @@ # assert self._stack_ptr == self.tempsize() # ################################################################## - if interp.trace: + if interp.is_tracing(): print '%s<- %s' % (interp.padding(), return_value.as_repr_string()) if self.home_is_self() or local_return: @@ -644,16 +650,16 @@ # it will find the sender as a local, and we don't have to # force the reference s_return_to = None - if self.s_sender() is None: - # This should never happen while executing a normal image. - raise ReturnFromTopLevel(return_value) + return_from_top = self.s_sender() is None else: s_return_to = self.s_home().s_sender() - if s_return_to is None: - # This should never happen while executing a normal image. - raise ReturnFromTopLevel(return_value) - - raise Return(s_return_to, return_value) + return_from_top = s_return_to is None + + if return_from_top: + # This should never happen while executing a normal image. + raise ReturnFromTopLevel(return_value) + else: + raise Return(s_return_to, return_value) # ====== Send/Return bytecodes ====== diff --git a/spyvm/interpreter_proxy.py b/spyvm/interpreter_proxy.py --- a/spyvm/interpreter_proxy.py +++ b/spyvm/interpreter_proxy.py @@ -17,7 +17,7 @@ from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib.unroll import unrolling_iterable -from spyvm import error, model +from spyvm import error, model, objspace sqInt = rffi.INT sqLong = rffi.LONG @@ -50,7 +50,7 @@ def wrapped(*c_arguments): assert len_unwrap_spec == len(c_arguments) args = () - if IProxy.trace_proxy: + if IProxy.trace_proxy.is_set(): print 'Called InterpreterProxy >> %s' % func.func_name, assert IProxy.s_frame is not None and IProxy.space is not None and IProxy.interp is not None try: @@ -63,7 +63,7 @@ else: args += (c_arg, ) result = func(*args) - if IProxy.trace_proxy: + if IProxy.trace_proxy.is_set(): print '\t-> %s' % result if result_type is oop: assert isinstance(result, model.W_Object) @@ -80,7 +80,7 @@ else: return result except error.PrimitiveFailedError: - if IProxy.trace_proxy: + if IProxy.trace_proxy.is_set(): print '\t-> failed' IProxy.failed() from rpython.rlib.objectmodel import we_are_translated @@ -999,6 +999,7 @@ self.object_map = {} self.loaded_modules = {} self.remappable_objects = [] + self.trace_proxy = objspace.ConstantFlag() self.reset() def reset(self): @@ -1007,7 +1008,7 @@ self.argcount = 0 self.w_method = None self.fail_reason = 0 - self.trace_proxy = False + self.trace_proxy.unset() def call(self, signature, interp, s_frame, argcount, w_method): self.initialize_from_call(signature, interp, s_frame, argcount, w_method) @@ -1049,7 +1050,7 @@ self.argcount = argcount self.w_method = w_method self.space = interp.space - self.trace_proxy = interp.trace_proxy + self.trace_proxy.set_to(interp.trace_proxy.is_set()) # ensure that space.w_nil gets the first possible oop self.object_to_oop(self.space.w_nil) diff --git a/spyvm/objspace.py b/spyvm/objspace.py --- a/spyvm/objspace.py +++ b/spyvm/objspace.py @@ -6,11 +6,33 @@ from rpython.rlib.objectmodel import instantiate, specialize from rpython.rlib.rarithmetic import intmask, r_uint, int_between +class ConstantFlag(object): + """Boolean flag that can be edited, but will be promoted + to a constant when jitting.""" + + def __init__(self, set_initially=False): + self.flag = [set_initially] + + def is_set(self): + flag = jit.promote(self.flag[0]) + return flag + + def set(self): + self.flag[0] = True + + def unset(self): + self.flag[0] = False + + def set_to(self, flag): + self.flag[0] = flag + class ObjSpace(object): def __init__(self): - # If this is True, then no optimizing storage strategies will be used. + # If this flag is set, then no optimizing storage strategies will be used. # Intended for performance comparisons. Breaks tests. - self.no_specialized_storage = [False] + self.no_specialized_storage = ConstantFlag() + # This is a hack; see compile_code() in targetimageloadingsmalltalk.py + self.suppress_process_switch = ConstantFlag() self.classtable = {} self.objtable = {} @@ -25,9 +47,6 @@ self.make_bootstrap_classes() self.make_bootstrap_objects() - - # This is a hack; see compile_code() in targetimageloadingsmalltalk.py - self.suppress_process_switch = [False] def find_executable(self, executable): if os.sep in executable or (os.name == "nt" and ":" in executable): diff --git a/spyvm/plugins/vmdebugging.py b/spyvm/plugins/vmdebugging.py --- a/spyvm/plugins/vmdebugging.py +++ b/spyvm/plugins/vmdebugging.py @@ -10,22 +10,22 @@ @DebuggingPlugin.expose_primitive(unwrap_spec=[object]) def trace(interp, s_frame, w_rcvr): - interp.trace = True + interp.trace.set() return w_rcvr @DebuggingPlugin.expose_primitive(unwrap_spec=[object]) def untrace(interp, s_frame, w_rcvr): - interp.trace = False + interp.trace.unset() return w_rcvr @DebuggingPlugin.expose_primitive(unwrap_spec=[object]) def trace_proxy(interp, s_frame, w_rcvr): - interp.trace_proxy = True + interp.trace_proxy.set() return w_rcvr @DebuggingPlugin.expose_primitive(unwrap_spec=[object]) def untrace_proxy(interp, s_frame, w_rcvr): - interp.trace_proxy = False + interp.trace_proxy.unset() return w_rcvr @DebuggingPlugin.expose_primitive(unwrap_spec=[object]) diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -186,7 +186,7 @@ def empty_storage(space, w_self, size, weak=False): if weak: return WeakListStorageShadow(space, w_self, size) - if space.no_specialized_storage[0]: + if space.no_specialized_storage.is_set(): return ListStorageShadow(space, w_self, size) return AllNilStorageShadow(space, w_self, size) @@ -194,7 +194,7 @@ def find_storage_for_objects(space, vars, weak=False): if weak: return WeakListStorageShadow - if space.no_specialized_storage[0]: + if space.no_specialized_storage.is_set(): return ListStorageShadow specialized_strategies = 3 all_nil_can_handle = True diff --git a/spyvm/test/test_largeinteger.py b/spyvm/test/test_largeinteger.py --- a/spyvm/test/test_largeinteger.py +++ b/spyvm/test/test_largeinteger.py @@ -8,7 +8,7 @@ space, interp, _, _ = read_image('bootstrapped.image') w = space.w copy_to_module(locals(), __name__) - interp.trace = False + interp.trace.unset() space.initialize_class(space.w_String, interp) def teardown_module(): @@ -37,8 +37,8 @@ w_selector = space.get_special_selector(selector) except Exception: w_selector = find_symbol_in_methoddict_of(selector, w(intmask(candidates[0])).getclass(space).shadow) - - interp.trace=trace + + interp.trace.set_to(trace) for i, v in enumerate(candidates): x = w_l(v) if j is None: @@ -50,7 +50,7 @@ y = w_l(j) z = perform_primitive(x, w_selector, y) assert r_uint(z.value) == r_uint(operation(v, y.value)) - interp.trace=False + interp.trace.unset() def test_bitAnd(): do_primitive("bitAnd:", operator.and_) diff --git a/spyvm/wrapper.py b/spyvm/wrapper.py --- a/spyvm/wrapper.py +++ b/spyvm/wrapper.py @@ -93,7 +93,7 @@ active_priority = active_process.priority() priority = self.priority() if priority > active_priority: - if not self.space.suppress_process_switch[0]: + if not self.space.suppress_process_switch.is_set(): active_process.deactivate(s_current_frame) self.activate() else: @@ -104,7 +104,7 @@ def suspend(self, s_current_frame): if self.is_active_process(): - if not self.space.suppress_process_switch[0]: + if not self.space.suppress_process_switch.is_set(): assert self.my_list().is_nil(self.space) w_process = scheduler(self.space).pop_highest_priority_process() self.deactivate(s_current_frame, put_to_sleep=False) diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -109,7 +109,7 @@ elif arg in ["-P", "--process"]: headless = False elif arg in ["-S"]: - space.no_specialized_storage[0] = True + space.no_specialized_storage.set() elif arg in ["-u"]: from spyvm.plugins.vmdebugging import stop_ui_process stop_ui_process() @@ -156,9 +156,9 @@ # Create context to be executed if code or selector: if not have_number: - w_receiver = interp.space.w_nil + w_receiver = space.w_nil else: - w_receiver = interp.space.wrap_int(number) + w_receiver = space.wrap_int(number) if code: selector = compile_code(interp, w_receiver, code) if selector is None: @@ -168,9 +168,9 @@ context = s_frame else: create_process(interp, s_frame) - context = active_context(interp.space) + context = active_context(space) else: - context = active_context(interp.space) + context = active_context(space) w_result = execute_context(interp, context) print result_string(w_result) @@ -184,7 +184,6 @@ return w_result.as_repr_string().replace('\r', '\n') def compile_code(interp, w_receiver, code): - import time selector = "DoIt%d" % int(time.time()) space = interp.space w_receiver_class = w_receiver.getclass(space) @@ -194,7 +193,7 @@ # registered (primitive 136 not called), so the idle process will never be left once it is entered. # TODO - Find a way to cleanly initialize the image, without executing the active_context of the image. # Instead, we want to execute our own context. Then remove this flag (and all references to it) - interp.space.suppress_process_switch[0] = True + space.suppress_process_switch.set() try: w_result = interp.perform( w_receiver_class, @@ -212,7 +211,7 @@ print_error("Exited while compiling code: %s" % e.msg) return None finally: - interp.space.suppress_process_switch[0] = False + space.suppress_process_switch.unset() w_receiver_class.as_class_get_shadow(space).s_methoddict().sync_method_cache() return selector From noreply at buildbot.pypy.org Fri Jul 18 14:08:29 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 18 Jul 2014 14:08:29 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Printing better information in case of a doesNotUnderstand: Message-ID: <20140718120829.7F0381C01D2@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r903:b73849087f13 Date: 2014-07-13 16:42 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/b73849087f13/ Log: Printing better information in case of a doesNotUnderstand: diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -821,9 +821,10 @@ def as_string(self): if self.bytes is not None: - return "".join(self.bytes) + string = "".join(self.bytes) else: - return "".join([self.c_bytes[i] for i in range(self.size())]) + string = "".join([self.c_bytes[i] for i in range(self.size())]) + return string.replace('\r', '\n') def invariant(self): if not W_AbstractObjectWithClassReference.invariant(self): diff --git a/spyvm/objspace.py b/spyvm/objspace.py --- a/spyvm/objspace.py +++ b/spyvm/objspace.py @@ -33,6 +33,7 @@ self.no_specialized_storage = ConstantFlag() # This is a hack; see compile_code() in targetimageloadingsmalltalk.py self.suppress_process_switch = ConstantFlag() + self.headless = ConstantFlag() self.classtable = {} self.objtable = {} diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -2,10 +2,8 @@ import inspect import math import operator -from spyvm import model, shadow -from spyvm import constants, display -from spyvm.error import PrimitiveFailedError, \ - PrimitiveNotYetWrittenError +from spyvm import model, shadow, error, constants, display +from spyvm.error import PrimitiveFailedError, PrimitiveNotYetWrittenError from spyvm import wrapper from rpython.rlib import rarithmetic, rfloat, unroll, jit @@ -374,14 +372,26 @@ @expose_primitive(FAIL) def func(interp, s_frame, argcount): - if s_frame.w_method().lookup_selector == 'doesNotUnderstand:': - print '' - print s_frame.print_stack() - w_message = s_frame.peek(0) - print ("%s" % w_message).replace('\r', '\n') - print ("%s" % s_frame.peek(1)).replace('\r', '\n') - if isinstance(w_message, model.W_PointersObject): - print ('%s' % w_message.fetch_all(s_frame.space)).replace('\r', '\n') + if interp.space.headless.is_set() and s_frame.w_method().lookup_selector == 'doesNotUnderstand:': + w_msg = s_frame.peek(1) + if isinstance(w_msg, model.W_BytesObject): + print "== Error message: %s" % w_msg.as_string() + print "== VM Stack:%s" % s_frame.print_stack() + print "== Message:" + for w_argument in s_frame.w_arguments(): + print w_argument.as_repr_string() + if isinstance(w_argument, model.W_PointersObject): + fields = w_argument.fetch_all(interp.space) + for i, w_field in enumerate(fields): + print "\t%s" % w_field.as_repr_string() + if i == 1 and isinstance(w_field, model.W_PointersObject): + # These are the arguments to the not-undersood message + for w_field_field in w_field.fetch_all(interp.space): + print "\t\t%s" % w_field_field.as_repr_string() + w_stack = s_frame.peek(0) + if isinstance(w_stack, model.W_BytesObject): + print "== Squeak stack:\n%s" % w_stack.as_string() + raise error.Exit("Unhandled doesNotUnderstand:") raise PrimitiveFailedError() # ___________________________________________________________________________ diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -929,6 +929,9 @@ # ______________________________________________________________________ # Printing + def argument_strings(self): + return [ w_arg.as_repr_string() for w_arg in self.w_arguments() ] + def __str__(self): retval = self.short_str() retval += "\n%s" % self.w_method().bytecode_string(markBytecode=self.pc() + 1) @@ -1099,7 +1102,7 @@ # === Printing === - def argument_strings(self): + def w_arguments(self): return [] def method_str(self): @@ -1255,13 +1258,9 @@ # === Printing === - def argument_strings(self): + def w_arguments(self): argcount = self.w_method().argsize - tempsize = self.w_method().tempsize() - args = [] - for i in range(argcount): - args.append(self.peek(tempsize - i - 1).as_repr_string()) - return args + return [ self.stack_get(i) for i in range(argcount) ] def method_str(self): block = '[] in ' if self.is_closure_context() else '' diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -165,6 +165,7 @@ return -1 # Compilation failed, message is printed. s_frame = create_context(interp, w_receiver, selector, stringarg) if headless: + space.headless.set() context = s_frame else: create_process(interp, s_frame) From noreply at buildbot.pypy.org Fri Jul 18 14:08:30 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 18 Jul 2014 14:08:30 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Fixed tracing and doesNotUnderstand outputs Message-ID: <20140718120830.A910B1C01D2@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r904:c5189bb59f35 Date: 2014-07-13 17:17 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/c5189bb59f35/ Log: Fixed tracing and doesNotUnderstand outputs diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -128,10 +128,11 @@ # Now (continue to) execute the context bytecodes self.loop_bytecodes(s_frame, may_context_switch) except rstackovf.StackOverflow: + rstackovf.check_stack_overflow() + raise StackOverflow(s_frame) + finally: if self.is_tracing(): self.stack_depth -= 1 - rstackovf.check_stack_overflow() - raise StackOverflow(s_frame) def step(self, context): bytecode = context.fetch_next_bytecode() @@ -229,7 +230,7 @@ def padding(self, symbol=' '): assert self.is_tracing() - return self.stack_depth * symbol + return symbol * self.stack_depth class ReturnFromTopLevel(Exception): _attrs_ = ["object"] diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -370,24 +370,31 @@ FAIL = 19 +def get_string(w_obj): + if isinstance(w_obj, model.W_BytesObject): + return w_obj.as_string() + return w_obj.as_repr_string() + @expose_primitive(FAIL) def func(interp, s_frame, argcount): if interp.space.headless.is_set() and s_frame.w_method().lookup_selector == 'doesNotUnderstand:': - w_msg = s_frame.peek(1) - if isinstance(w_msg, model.W_BytesObject): - print "== Error message: %s" % w_msg.as_string() + print "== Error message: %s" % get_string(s_frame.peek(1)) + print "== Receiver: %s" % s_frame.w_receiver().as_repr_string() + w_arguments = s_frame.w_arguments() + if len(w_arguments) >= 1: + w_message = w_arguments[0] + if isinstance(w_message, model.W_PointersObject): + fields = w_message.fetch_all(interp.space) + if len(fields) >= 1: + print "== Selector: %s" % get_string(fields[0]) + if len(fields) >= 2: + w_args = fields[0] + if isinstance(w_args, model.W_PointersObject): + arg_strings = [ get_string(w_arg) for w_arg in w_args.fetch_all(interp.space) ] + print "== Arguments: %s" % ', '.join(arg_strings) + else: + print "== Message: %s" % w_message print "== VM Stack:%s" % s_frame.print_stack() - print "== Message:" - for w_argument in s_frame.w_arguments(): - print w_argument.as_repr_string() - if isinstance(w_argument, model.W_PointersObject): - fields = w_argument.fetch_all(interp.space) - for i, w_field in enumerate(fields): - print "\t%s" % w_field.as_repr_string() - if i == 1 and isinstance(w_field, model.W_PointersObject): - # These are the arguments to the not-undersood message - for w_field_field in w_field.fetch_all(interp.space): - print "\t\t%s" % w_field_field.as_repr_string() w_stack = s_frame.peek(0) if isinstance(w_stack, model.W_BytesObject): print "== Squeak stack:\n%s" % w_stack.as_string() From noreply at buildbot.pypy.org Fri Jul 18 14:08:31 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 18 Jul 2014 14:08:31 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Fixed tests and removed obsolete tests. Message-ID: <20140718120831.BE2EE1C01D2@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r905:6656aa03e7b0 Date: 2014-07-14 13:32 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/6656aa03e7b0/ Log: Fixed tests and removed obsolete tests. diff --git a/spyvm/test/test_interpreter.py b/spyvm/test/test_interpreter.py --- a/spyvm/test/test_interpreter.py +++ b/spyvm/test/test_interpreter.py @@ -26,8 +26,11 @@ if retval is not None: return retval.w_self() except interpreter.Return, nlr: - nlr.s_target_context.push(nlr.value) - return nlr.s_target_context.w_self() + new_context = nlr.s_target_context + if new_context is None: + new_context = ctxt.s_sender() + new_context.push(nlr.value) + return new_context.w_self() def assert_list(list, expected): for i in range(len(list)): @@ -975,81 +978,6 @@ 2, "value:value:"]], test) -def test_stacking_interpreter(): - # | testBlock | - # testBlock := [ :aNumber | - # aNumber = 0 - # ifTrue: [ 0 ] - # ifFalse: [ (testBlock value: aNumber - 1) + aNumber ]]. - # ^ testBlock value: 11 - interp = TestInterpreter(space, max_stack_depth=3) - #create a method with the correct bytecodes and a literal - bytes = reduce(operator.add, map(chr, [0x8a, 0x01, 0x68, 0x10, 0x8f, 0x11, - 0x00, 0x11, 0x10, 0x75, 0xb6, 0x9a, 0x75, 0xa4, 0x09, 0x8c, 0x00, 0x01, - 0x10, 0x76, 0xb1, 0xca, 0x10, 0xb0, 0x7d, 0x8e, 0x00, 0x00, 0x8c, 0x00, - 0x00, 0x20, 0xca, 0x7c])) - w_method = model.W_CompiledMethod(space, len(bytes)) - w_method.islarge = 1 - w_method.bytes = bytes - w_method.argsize=0 - w_method._tempsize=1 - w_method.setliterals([space.wrap_int(11)]) - - #create a frame for that method - w_frame = w_method.create_frame(space, space.wrap_int(0), []).w_self() - try: - interp.loop(w_frame) - except interpreter.ReturnFromTopLevel, e: - assert space.unwrap_int(e.object) == 66 - except interpreter.StackOverflow, e: - assert False - try: - interp = TestInterpreter(space, image_name="", max_stack_depth=10) - interp._loop = True - interp.loop_bytecodes(w_method.create_frame(space, space.wrap_int(0), [])) - except interpreter.StackOverflow, e: - assert isinstance(e.s_new_context, shadow.MethodContextShadow) - except interpreter.ReturnFromTopLevel, e: - assert False - -class StackTestInterpreter(TestInterpreter): - def stack_frame(self, s_frame, s_sender, may_interrupt=True): - stack_depth = self.current_stack_depth - for i in range(stack_depth + 1): - assert sys._getframe(5 + i * 7).f_code.co_name == 'loop_bytecodes' - assert sys._getframe(6 + stack_depth * 7).f_code.co_name == 'loop' - return interpreter.Interpreter.stack_frame(self, s_frame, s_sender, may_interrupt) - -def test_actual_stackdepth(): - # | testBlock | - # testBlock := [ :aNumber | - # aNumber = 0 - # ifTrue: [ 2 ] - # ifFalse: [ (testBlock value: aNumber - 1) + aNumber ]]. - # ^ testBlock value: 11 - interp = StackTestInterpreter(space, max_stack_depth=10) - #create a method with the correct bytecodes and a literal - bytes = reduce(operator.add, map(chr, [0x8a, 0x01, 0x68, 0x10, 0x8f, 0x11, - 0x00, 0x11, 0x10, 0x75, 0xb6, 0x9a, 0x77, 0xa4, 0x09, 0x8c, 0x00, 0x01, - 0x10, 0x76, 0xb1, 0xca, 0x10, 0xb0, 0x7d, 0x8e, 0x00, 0x00, 0x8c, 0x00, - 0x00, 0x20, 0xca, 0x7c])) - - w_method = model.W_CompiledMethod(space, len(bytes)) - w_method.islarge = 1 - w_method.bytes = bytes - w_method.argsize=0 - w_method._tempsize=1 - w_method.setliterals([space.wrap_int(11)]) - - #create a frame for that method - w_frame = w_method.create_frame(space, space.wrap_int(0), []).w_self() - try: - interp.loop(w_frame) - except interpreter.ReturnFromTopLevel, e: - assert space.unwrap_int(e.object) == 68 - except interpreter.StackOverflow, e: - assert False - def test_c_stack_reset_on_sender_chain_manipulation(): bytes = reduce(operator.add, map(chr, [0x84, 0xc0, 0x00])) w_frame, s_frame = new_frame(bytes) From noreply at buildbot.pypy.org Fri Jul 18 14:08:32 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 18 Jul 2014 14:08:32 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Fixed test. Message-ID: <20140718120832.D83981C01D2@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r906:84e2ca8cd10e Date: 2014-07-14 13:35 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/84e2ca8cd10e/ Log: Fixed test. diff --git a/spyvm/test/test_shadow.py b/spyvm/test/test_shadow.py --- a/spyvm/test/test_shadow.py +++ b/spyvm/test/test_shadow.py @@ -195,11 +195,12 @@ assert s_object.fetch(1).value == 13 def test_cached_object_shadow(): - w_o = space.wrap_list([0, 1, 2, 3, 4, 5, 6, 7]) + l = map(space.w, [0, 1, 2, 3, 4, 5, 6, 7]) + w_o = space.wrap_list(l) s_o = w_o.as_cached_object_get_shadow(space) version = s_o.version for i in range(w_o.size()): - assert w_o.at0(space, i) == i + assert w_o.at0(space, i) == l[i] w_o.atput0(space, 0, 8) assert version is not s_o.version assert w_o.at0(space, 0) == 8 From noreply at buildbot.pypy.org Fri Jul 18 14:08:34 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 18 Jul 2014 14:08:34 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Removed obsolete CLI flag. Message-ID: <20140718120834.0A9E61C01D2@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r907:0b36d560ebf1 Date: 2014-07-14 13:35 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/0b36d560ebf1/ Log: Removed obsolete CLI flag. diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -12,7 +12,7 @@ def _usage(argv): print """ - Usage: %s [-r|-m|-h] [-naPu] [-jpis] [-tlLE] + Usage: %s [-r|-m|-h] [-naPu] [-jpiS] [-tlLE] - image path (default: Squeak.image) Execution mode: @@ -104,8 +104,6 @@ code, idx = get_parameter(argv, idx, arg) elif arg in ["-i", "--no-interrupts"]: interrupts = False - elif arg in ["-s"]: - max_stack_depth, idx = get_int_parameter(argv, idx, arg) elif arg in ["-P", "--process"]: headless = False elif arg in ["-S"]: @@ -172,7 +170,7 @@ context = active_context(space) else: context = active_context(space) - + w_result = execute_context(interp, context) print result_string(w_result) storage_logger.print_aggregated_log() From noreply at buildbot.pypy.org Fri Jul 18 14:08:35 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 18 Jul 2014 14:08:35 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: - Added a parameterized --trace option to control the stack depth up to which the trace is performed. Message-ID: <20140718120835.3EB7E1C01D2@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r908:cedc9509b5c7 Date: 2014-07-14 18:35 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/cedc9509b5c7/ Log: - Added a parameterized --trace option to control the stack depth up to which the trace is performed. - Added helper to step through bytecode executions. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -35,7 +35,7 @@ ) def __init__(self, space, image=None, image_name="", - trace=False, evented=True, interrupts=True): + trace_depth=-1, evented=True, interrupts=True): # === Initialize immutable variables self.space = space self.image = image @@ -54,7 +54,7 @@ # === Initialize mutable variables self.interrupt_check_counter = self.interrupt_counter_size self.next_wakeup_tick = 0 - self.trace = objspace.ConstantFlag(trace) + self.trace_depth = trace_depth self.trace_proxy = objspace.ConstantFlag() self.stack_depth = 0 @@ -225,12 +225,36 @@ s_frame.push_all(list(w_arguments)) return s_frame + # ============== Methods for tracing, printing and debugging ============== + + def activate_trace(self, trace_depth=0): + self.trace_depth = trace_depth + + def deactivate_trace(self): + self.trace_depth = -1 + def is_tracing(self): - return self.trace.is_set() + return jit.promote(self.trace_depth) >= 0 - def padding(self, symbol=' '): - assert self.is_tracing() - return symbol * self.stack_depth + def print_padded(self, str): + depth = jit.promote(self.trace_depth) + assert depth >= 0 + if self.stack_depth <= depth: + print (' ' * self.stack_depth) + str + + def activate_debug_bytecode(self): + "NOT_RPYTHON" + def do_break(self): + import pdb + if self.break_on_bytecodes: + pdb.set_trace() + Interpreter.debug_bytecode = do_break + self.break_on_bytecodes = True + + def debug_bytecode(self): + # This is for debugging. In a pdb console, execute the following: + # self.activate_debug_bytecode() + pass class ReturnFromTopLevel(Exception): _attrs_ = ["object"] @@ -287,7 +311,7 @@ parameters += (self.fetch_next_bytecode(), ) i = i + 1 # This is a good place to step through bytecodes. - # import pdb; pdb.set_trace() + interp.debug_bytecode() return actual_implementation_method(self, interp, current_bytecode, *parameters) bytecode_implementation_wrapper.func_name = actual_implementation_method.func_name return bytecode_implementation_wrapper @@ -577,7 +601,7 @@ # ###################################################################### if interp.is_tracing(): - print interp.padding() + s_frame.short_str() + interp.print_padded('-> ' + s_frame.short_str()) return interp.stack_frame(s_frame, self) @@ -594,7 +618,7 @@ # ###################################################################### if interp.is_tracing(): - print '%s %s %s: #%s' % (interp.padding('#'), special_selector, s_frame.short_str(), w_args) + interp.print_padded('-> %s %s' % (special_selector, s_frame.short_str())) if not objectmodel.we_are_translated(): import pdb; pdb.set_trace() @@ -625,8 +649,8 @@ def _call_primitive(self, code, interp, argcount, w_method, w_selector): # ################################################################## if interp.is_tracing(): - print "%s-> primitive %d \t(in %s, named #%s)" % ( - interp.padding(), code, self.w_method().get_identifier_string(), w_selector.str_content()) + interp.print_padded("-> primitive %d \t(in %s, named #%s)" % ( + code, self.w_method().get_identifier_string(), w_selector.str_content())) func = primitives.prim_holder.prim_table[code] try: # note: argcount does not include rcvr @@ -634,8 +658,8 @@ return func(interp, self, argcount, w_method) except primitives.PrimitiveFailedError, e: if interp.is_tracing(): - print "%s primitive %d FAILED\t (in %s, named %s)" % ( - interp.padding(), code, w_method.safe_identifier_string(), w_selector.str_content()) + interp.print_padded("-- primitive %d FAILED\t (in %s, named %s)" % ( + code, w_method.safe_identifier_string(), w_selector.str_content())) raise e def _return(self, return_value, interp, local_return=False): @@ -644,7 +668,7 @@ # ################################################################## if interp.is_tracing(): - print '%s<- %s' % (interp.padding(), return_value.as_repr_string()) + interp.print_padded('<- ' + return_value.as_repr_string()) if self.home_is_self() or local_return: # a local return just needs to go up the stack once. there diff --git a/spyvm/interpreter_proxy.py b/spyvm/interpreter_proxy.py --- a/spyvm/interpreter_proxy.py +++ b/spyvm/interpreter_proxy.py @@ -1016,8 +1016,8 @@ # eventual errors are caught by the calling function (EXTERNAL_CALL) external_function = rffi.cast(func_bool_void, self.loadFunctionFrom(signature[0], signature[1])) - if interp.trace: - print "%sCalling %s >> %s" % (interp.padding(), signature[0], signature[1]) + if interp.is_tracing(): + interp.print_padded("Calling %s >> %s" % (signature[0], signature[1])) external_function() if not self.fail_reason == 0: diff --git a/spyvm/plugins/vmdebugging.py b/spyvm/plugins/vmdebugging.py --- a/spyvm/plugins/vmdebugging.py +++ b/spyvm/plugins/vmdebugging.py @@ -10,12 +10,12 @@ @DebuggingPlugin.expose_primitive(unwrap_spec=[object]) def trace(interp, s_frame, w_rcvr): - interp.trace.set() + interp.activate_trace() return w_rcvr @DebuggingPlugin.expose_primitive(unwrap_spec=[object]) def untrace(interp, s_frame, w_rcvr): - interp.trace.unset() + interp.deactivate_trace() return w_rcvr @DebuggingPlugin.expose_primitive(unwrap_spec=[object]) diff --git a/spyvm/test/test_largeinteger.py b/spyvm/test/test_largeinteger.py --- a/spyvm/test/test_largeinteger.py +++ b/spyvm/test/test_largeinteger.py @@ -8,7 +8,7 @@ space, interp, _, _ = read_image('bootstrapped.image') w = space.w copy_to_module(locals(), __name__) - interp.trace.unset() + interp.deactivate_trace() space.initialize_class(space.w_String, interp) def teardown_module(): @@ -38,7 +38,10 @@ except Exception: w_selector = find_symbol_in_methoddict_of(selector, w(intmask(candidates[0])).getclass(space).shadow) - interp.trace.set_to(trace) + if trace: + interp.activate_trace() + else: + interp.deactivate_trace() for i, v in enumerate(candidates): x = w_l(v) if j is None: @@ -50,7 +53,7 @@ y = w_l(j) z = perform_primitive(x, w_selector, y) assert r_uint(z.value) == r_uint(operation(v, y.value)) - interp.trace.unset() + interp.deactivate_trace() def test_bitAnd(): do_primitive("bitAnd:", operator.and_) diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -12,7 +12,7 @@ def _usage(argv): print """ - Usage: %s [-r|-m|-h] [-naPu] [-jpiS] [-tlLE] + Usage: %s [-r|-m|-h] [-naPu] [-jpiS] [-tTlLE] - image path (default: Squeak.image) Execution mode: @@ -39,6 +39,7 @@ Logging parameters: -t|--trace - Output a trace of each message, primitive, return value and process switch. + -T - Like -t, but limit the stack depth for the trace to . -l|--storage-log - Output a log of storage operations. -L|--storage-log-aggregate - Output an aggregated storage log at the end of execution. -E|--storage-log-elements - Include classnames of elements into the storage log. @@ -75,7 +76,7 @@ # == Other parameters poll = False interrupts = True - trace = False + trace_depth = -1 space = prebuilt_space idx = 1 @@ -95,7 +96,11 @@ elif arg in ["-m", "--method"]: selector, idx = get_parameter(argv, idx, arg) elif arg in ["-t", "--trace"]: - trace = True + trace_depth = sys.maxint + elif arg in ["-T"]: + trace_depth, idx = get_int_parameter(argv, idx, arg) + if trace_depth < 0: + raise error.Exit("Need argument >= 0 for -T.") elif arg in ["-p", "--poll"]: poll = True elif arg in ["-a", "--arg"]: @@ -146,7 +151,7 @@ image_reader = squeakimage.reader_for_image(space, squeakimage.Stream(data=imagedata)) image = create_image(space, image_reader) interp = interpreter.Interpreter(space, image, image_name=path, - trace=trace, evented=not poll, + trace_depth=trace_depth, evented=not poll, interrupts=interrupts) space.runtime_setup(argv[0]) print_error("") # Line break after image-loading characters From noreply at buildbot.pypy.org Fri Jul 18 14:08:36 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 18 Jul 2014 14:08:36 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Fixed bug in CompiledMethod: literals were set to nil when the header was changed. Caused incorrectly compiled code. Message-ID: <20140718120836.66AA11C01D2@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r909:c063fbae6f06 Date: 2014-07-14 18:39 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/c063fbae6f06/ Log: Fixed bug in CompiledMethod: literals were set to nil when the header was changed. Caused incorrectly compiled code. diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -1154,7 +1154,7 @@ def compute_pos(self, n): return n * (NATIVE_DEPTH / self._depth) - + # XXX Shouldn't compiledmethod have class reference for subclassed compiled # methods? class W_CompiledMethod(W_AbstractObjectWithIdentityHash): @@ -1185,20 +1185,22 @@ def __init__(self, space, bytecount=0, header=0): self.bytes = ["\x00"] * bytecount - self.setheader(space, header) + self.setheader(space, header, initializing=True) def fillin(self, space, g_self): # Implicitely sets the header, including self.literalsize for i, w_object in enumerate(g_self.get_pointers()): - self.literalatput0(space, i, w_object) + self.literalatput0(space, i, w_object, initializing=True) self.setbytes(g_self.get_bytes()[self.bytecodeoffset():]) # === Setters === - def setheader(self, space, header): + def setheader(self, space, header, initializing=False): _primitive, literalsize, islarge, tempsize, argsize = constants.decode_compiled_method_header(header) - self.literalsize = literalsize - self.literals = [space.w_nil] * self.literalsize + if initializing or self.literalsize != literalsize: + # Keep the literals if possible. + self.literalsize = literalsize + self.literals = [space.w_nil] * self.literalsize self.header = header self.argsize = argsize self._tempsize = tempsize @@ -1314,11 +1316,11 @@ return space.wrap_int(self.getheader()) else: return self.getliteral(index0 - 1) - - def literalatput0(self, space, index0, w_value): + + def literalatput0(self, space, index0, w_value, initializing=False): if index0 == 0: header = space.unwrap_int(w_value) - self.setheader(space, header) + self.setheader(space, header, initializing=initializing) else: self.setliteral(index0 - 1, w_value) From noreply at buildbot.pypy.org Fri Jul 18 14:08:37 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 18 Jul 2014 14:08:37 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Improved crash report when running in headless mode. Message-ID: <20140718120837.7F17F1C01D2@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r910:9a0c4bf44ab6 Date: 2014-07-14 18:42 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/9a0c4bf44ab6/ Log: Improved crash report when running in headless mode. Refactored flushCache primitive to walk the GC roots with less memory overhead. Not allocating huge lists containing all objects, instead walking recursively. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -224,7 +224,7 @@ s_frame.push(w_receiver) s_frame.push_all(list(w_arguments)) return s_frame - + # ============== Methods for tracing, printing and debugging ============== def activate_trace(self, trace_depth=0): @@ -585,7 +585,7 @@ w_method = receiverclassshadow.lookup(w_selector) except MethodNotFound: return self._doesNotUnderstand(w_selector, argcount, interp, receiver) - + code = w_method.primitive() if code: if w_arguments: @@ -615,7 +615,7 @@ s_class = receiver.class_shadow(self.space) w_method = s_class.lookup(w_special_selector) s_frame = w_method.create_frame(interp.space, receiver, w_args) - + # ###################################################################### if interp.is_tracing(): interp.print_padded('-> %s %s' % (special_selector, s_frame.short_str())) @@ -635,13 +635,15 @@ self.pop() # The receiver, already known. try: + if interp.space.headless.is_set(): + primitives.exitFromHeadlessExecution(self, "doesNotUnderstand:", w_message) return self._sendSpecialSelector(interp, receiver, "doesNotUnderstand", [w_message]) except MethodNotFound: from spyvm.shadow import ClassShadow s_class = receiver.class_shadow(self.space) assert isinstance(s_class, ClassShadow) - print "Missing doesNotUnderstand in hierarchy of %s" % s_class.getname() - raise + from spyvm import error + raise error.Exit("Missing doesNotUnderstand in hierarchy of %s" % s_class.getname()) def _mustBeBoolean(self, interp, receiver): return self._sendSpecialSelector(interp, receiver, "mustBeBoolean") @@ -665,11 +667,11 @@ def _return(self, return_value, interp, local_return=False): # unfortunately, this assert is not true for some tests. TODO fix this. # assert self._stack_ptr == self.tempsize() - + # ################################################################## if interp.is_tracing(): interp.print_padded('<- ' + return_value.as_repr_string()) - + if self.home_is_self() or local_return: # a local return just needs to go up the stack once. there # it will find the sender as a local, and we don't have to diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -6,7 +6,7 @@ from spyvm.error import PrimitiveFailedError, PrimitiveNotYetWrittenError from spyvm import wrapper -from rpython.rlib import rarithmetic, rfloat, unroll, jit +from rpython.rlib import rarithmetic, rfloat, unroll, jit, objectmodel def assert_class(interp, w_obj, w_class): if not w_obj.getclass(interp.space).is_same_object(w_class): @@ -375,30 +375,34 @@ return w_obj.as_string() return w_obj.as_repr_string() +def exitFromHeadlessExecution(s_frame, selector="", w_message=None): + if not objectmodel.we_are_translated(): + import pdb; pdb.set_trace() + print "== Receiver: %s" % s_frame.w_receiver().as_repr_string() + if isinstance(w_message, model.W_PointersObject): + fields = w_message.fetch_all(s_frame.space) + if len(fields) >= 1: + print "== Selector: %s" % get_string(fields[0]) + if len(fields) >= 2: + w_args = fields[1] + if isinstance(w_args, model.W_PointersObject): + arg_strings = [ get_string(w_arg) for w_arg in w_args.fetch_all(s_frame.space) ] + if len(arg_strings) > 0: + print "== Arguments: %s" % ', '.join(arg_strings) + print "== Smalltalk Stack:%s" % s_frame.print_stack() + if selector == "": + selector = s_frame.w_method().lookup_selector + raise error.Exit("Unhandled %s in headless mode." % selector) + @expose_primitive(FAIL) def func(interp, s_frame, argcount): - if interp.space.headless.is_set() and s_frame.w_method().lookup_selector == 'doesNotUnderstand:': - print "== Error message: %s" % get_string(s_frame.peek(1)) - print "== Receiver: %s" % s_frame.w_receiver().as_repr_string() - w_arguments = s_frame.w_arguments() - if len(w_arguments) >= 1: - w_message = w_arguments[0] - if isinstance(w_message, model.W_PointersObject): - fields = w_message.fetch_all(interp.space) - if len(fields) >= 1: - print "== Selector: %s" % get_string(fields[0]) - if len(fields) >= 2: - w_args = fields[0] - if isinstance(w_args, model.W_PointersObject): - arg_strings = [ get_string(w_arg) for w_arg in w_args.fetch_all(interp.space) ] - print "== Arguments: %s" % ', '.join(arg_strings) - else: - print "== Message: %s" % w_message - print "== VM Stack:%s" % s_frame.print_stack() - w_stack = s_frame.peek(0) - if isinstance(w_stack, model.W_BytesObject): - print "== Squeak stack:\n%s" % w_stack.as_string() - raise error.Exit("Unhandled doesNotUnderstand:") + if interp.space.headless.is_set(): + w_message = None + if s_frame.w_method().lookup_selector == 'doesNotUnderstand:': + w_arguments = s_frame.w_arguments() + if len(w_arguments) >= 1: + w_message = w_arguments[0] + exitFromHeadlessExecution(s_frame, w_message=w_message) raise PrimitiveFailedError() # ___________________________________________________________________________ @@ -725,6 +729,9 @@ @expose_primitive(BE_DISPLAY, unwrap_spec=[object]) def func(interp, s_frame, w_rcvr): + if interp.space.headless.is_set(): + exitFromHeadlessExecution(s_frame) + if not isinstance(w_rcvr, model.W_PointersObject) or w_rcvr.size() < 4: raise PrimitiveFailedError # the fields required are bits (a pointer to a Bitmap), width, height, depth @@ -857,7 +864,6 @@ @expose_primitive(EXIT_TO_DEBUGGER, unwrap_spec=[object]) def func(interp, s_frame, w_rcvr): - from rpython.rlib import objectmodel if not objectmodel.we_are_translated(): import pdb; pdb.set_trace() raise PrimitiveNotYetWrittenError() @@ -930,36 +936,38 @@ w_class.as_class_get_shadow(interp.space).flush_method_caches() return w_rcvr + at objectmodel.specialize.arg(0) +def walk_gc_references(func, gcrefs): + from rpython.rlib import rgc + for gcref in gcrefs: + if gcref and not rgc.get_gcflag_extra(gcref): + try: + rgc.toggle_gcflag_extra(gcref) + func(gcref) + walk_gc_references(func, rgc.get_rpy_referents(gcref)) + finally: + rgc.toggle_gcflag_extra(gcref) + at objectmodel.specialize.arg(0) +def walk_gc_objects(func): + from rpython.rlib import rgc + walk_gc_references(func, rgc.get_rpy_roots()) + + at objectmodel.specialize.arg(0, 1) +def walk_gc_objects_of_type(type, func): + from rpython.rlib import rgc + def check_type(gcref): + w_obj = rgc.try_cast_gcref_to_instance(type, gcref) + if w_obj: + func(w_obj) + walk_gc_objects(check_type) + if not stm_enabled(): - # XXX: We don't have a global symbol cache. Instead, we get all - # method dictionary shadows (those exists for all methodDicts that - # have been modified) and flush them + # XXX: We don't have a global symbol cache. Instead, we walk all + # MethodDictionaryShadow objects and flush them. @expose_primitive(SYMBOL_FLUSH_CACHE, unwrap_spec=[object]) def func(interp, s_frame, w_rcvr): - dicts_s = [] - from rpython.rlib import rgc - - roots = [gcref for gcref in rgc.get_rpy_roots() if gcref] - pending = roots[:] - while pending: - gcref = pending.pop() - if not rgc.get_gcflag_extra(gcref): - rgc.toggle_gcflag_extra(gcref) - w_obj = rgc.try_cast_gcref_to_instance(shadow.MethodDictionaryShadow, gcref) - if w_obj is not None: - dicts_s.append(w_obj) - pending.extend(rgc.get_rpy_referents(gcref)) - - while roots: - gcref = roots.pop() - if rgc.get_gcflag_extra(gcref): - rgc.toggle_gcflag_extra(gcref) - roots.extend(rgc.get_rpy_referents(gcref)) - - for s_dict in dicts_s: - if s_dict.invalid: - s_dict.sync_method_cache() + walk_gc_objects_of_type(shadow.MethodDictionaryShadow, lambda s_dict: s_dict.flush_method_cache()) return w_rcvr # ___________________________________________________________________________ From noreply at buildbot.pypy.org Fri Jul 18 14:08:38 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 18 Jul 2014 14:08:38 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Commented out flushCache primitive. We should not need it, since we monitor changes to MethodDictionary objects directly. This primitive takes too long and prevents running the VM in interpreted mode. Message-ID: <20140718120838.9CBA71C01D2@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r911:63016d515218 Date: 2014-07-14 18:44 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/63016d515218/ Log: Commented out flushCache primitive. We should not need it, since we monitor changes to MethodDictionary objects directly. This primitive takes too long and prevents running the VM in interpreted mode. diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -967,7 +967,9 @@ # MethodDictionaryShadow objects and flush them. @expose_primitive(SYMBOL_FLUSH_CACHE, unwrap_spec=[object]) def func(interp, s_frame, w_rcvr): - walk_gc_objects_of_type(shadow.MethodDictionaryShadow, lambda s_dict: s_dict.flush_method_cache()) + # This takes a long time (at least in interpreted mode), and is not really necessary. + # We are monitoring changes to MethodDictionaries, so there is no need for the image to tell us. + #walk_gc_objects_of_type(shadow.MethodDictionaryShadow, lambda s_dict: s_dict.flush_method_cache()) return w_rcvr # ___________________________________________________________________________ From noreply at buildbot.pypy.org Fri Jul 18 14:08:39 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 18 Jul 2014 14:08:39 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Some fixes to storage strategies: Message-ID: <20140718120839.CCD4D1C01D2@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r912:541b01ff2814 Date: 2014-07-14 20:22 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/541b01ff2814/ Log: Some fixes to storage strategies: - Added size parameter to each Shadow constructor - Optimized transitions between certain strategies. diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -693,7 +693,7 @@ old_shadow = self._get_shadow() shadow = old_shadow if not isinstance(old_shadow, TheClass): - shadow = TheClass(space, self) + shadow = TheClass(space, self, old_shadow.size()) self.switch_shadow(shadow) return shadow diff --git a/spyvm/plugins/bitblt.py b/spyvm/plugins/bitblt.py --- a/spyvm/plugins/bitblt.py +++ b/spyvm/plugins/bitblt.py @@ -727,8 +727,8 @@ _attrs_ = ["w_bits", "width", "height", "depth", "offsetX", "offsetY", "msb", "pixPerWord", "pitch", "invalid"] - def __init__(self, space, w_self): - AbstractCachingShadow.__init__(self, space, w_self) + def __init__(self, space, w_self, size): + AbstractCachingShadow.__init__(self, space, w_self, size) self.invalid = False def intOrIfNil(self, w_int, i): diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -1314,7 +1314,7 @@ # The block bytecodes are stored inline: so we skip past the # byteodes to invoke this primitive to find them (hence +2) initialip = s_frame.pc() + 2 - s_new_context = shadow.BlockContextShadow(interp.space, None, w_method_context, argcnt, initialip) + s_new_context = shadow.BlockContextShadow(interp.space, None, 0, w_method_context, argcnt, initialip) return s_new_context.w_self() @expose_primitive(VALUE, result_is_new_frame=True) diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -17,7 +17,7 @@ provides_getname = False repr_classname = "AbstractShadow" - def __init__(self, space, w_self): + def __init__(self, space, w_self, size): self.space = space assert w_self is None or isinstance(w_self, model.W_PointersObject) self._w_self = w_self @@ -64,7 +64,7 @@ _attrs_ = [] repr_classname = "AbstractStorageShadow" def __init__(self, space, w_self, size): - AbstractShadow.__init__(self, space, w_self) + AbstractShadow.__init__(self, space, w_self, size) def store(self, n0, w_val): if self.can_contain(w_val): return self.do_store(n0, w_val) @@ -79,6 +79,15 @@ raise NotImplementedError() def generalized_strategy_for(self, w_val): raise NotImplementedError() + + def copy_from_AllNil(self, all_nil_storage): + pass # Already initialized + def copy_from(self, other_shadow): + assert self.size() == other_shadow.size() + for i in range(self.size()): + w_val = other_shadow.fetch(i) + if not w_val.is_nil(self.space): # nil fields already initialized + self.store(i, w_val) class AllNilStorageShadow(AbstractStorageShadow): repr_classname = "AllNilStorageShadow" @@ -130,9 +139,6 @@ self.storage[n0] = self.nil_value else: self.storage[n0] = self.unwrap(self.space, w_val) - - def copy_from_AllNil(self, all_nil_storage): - pass # Already initialized # This is to avoid code duplication @objectmodel.specialize.arg(0) @@ -230,13 +236,6 @@ self.initialize_storage(size) def size(self): return len(self.storage) - def copy_from(self, other_shadow): - if self.size() != other_shadow.size(): - self.initialize_storage(other_shadow.size()) - for i in range(self.size()): - w_val = other_shadow.fetch(i) - if not w_val.is_nil(self.space): - self.store(i, w_val) class ListStorageShadow(AbstractStorageShadow): _attrs_ = ['storage'] @@ -273,8 +272,8 @@ import_from_mixin(version.VersionMixin) version = None - def __init__(self, space, w_self): - ListStorageShadow.__init__(self, space, w_self, 0) + def __init__(self, space, w_self, size): + ListStorageShadow.__init__(self, space, w_self, size) self.changed() # ____________________________________________________________ @@ -305,9 +304,9 @@ provides_getname = True repr_classname = "ClassShadow" - def __init__(self, space, w_self): + def __init__(self, space, w_self, size): self.subclass_s = {} - AbstractCachingShadow.__init__(self, space, w_self) + AbstractCachingShadow.__init__(self, space, w_self, size) def store(self, n0, w_val): AbstractCachingShadow.store(self, n0, w_val) @@ -425,7 +424,7 @@ def flush_method_caches(self): look_in_shadow = self while look_in_shadow is not None: - look_in_shadow.s_methoddict().sync_method_cache() + look_in_shadow.s_methoddict().flush_method_cache() look_in_shadow = look_in_shadow._s_superclass def new(self, extrasize=0): @@ -535,7 +534,7 @@ "NOT_RPYTHON" # this is only for testing. if self._s_methoddict is None: w_methoddict = model.W_PointersObject(self.space, None, 2) - w_methoddict.store(self.space, 1, model.W_PointersObject(self.space, None, 0)) + w_methoddict.store(self.space, constants.METHODDICT_VALUES_INDEX, model.W_PointersObject(self.space, None, 0)) self.store_s_methoddict(w_methoddict.as_methoddict_get_shadow(self.space)) self.s_methoddict().invalid = False @@ -553,11 +552,11 @@ _attrs_ = ['methoddict', 'invalid', 's_class'] repr_classname = "MethodDictionaryShadow" - def __init__(self, space, w_self): + def __init__(self, space, w_self, size): self.invalid = True self.s_class = None self.methoddict = {} - ListStorageShadow.__init__(self, space, w_self, 0) + ListStorageShadow.__init__(self, space, w_self, size) def update(self): self.sync_method_cache() @@ -567,7 +566,7 @@ return None # we may be invalid if Smalltalk code did not call flushCache return self.methoddict.get(w_selector, None) - # Remove update call for changes to ourselves: + # We do not call update() after changes to ourselves: # Whenever a method is added, it's keyword is added to w_self, then the # w_compiled_method is added to our observee. # sync_method_cache at this point would not have the desired effect, because in @@ -575,17 +574,30 @@ # its contents array is filled with the value belonging to the new key. def store(self, n0, w_value): ListStorageShadow.store(self, n0, w_value) - self.invalid = True - + if n0 == constants.METHODDICT_VALUES_INDEX: + self.setup_notification() + if n0 >= constants.METHODDICT_NAMES_INDEX: + self.invalid = True + + def setup_notification(self): + self.w_values().as_observed_get_shadow(self.space).notify(self) + + def w_values(self): + w_values = self.fetch(constants.METHODDICT_VALUES_INDEX) + assert isinstance(w_values, model.W_PointersObject) + return w_values + + def flush_method_cache(self): + # Lazy synchronization: Only flush the cache, if we are already synchronized. + if self.invalid: + self.sync_method_cache() + def sync_method_cache(self): if self.size() == 0: return - w_values = self.fetch(constants.METHODDICT_VALUES_INDEX) - assert isinstance(w_values, model.W_PointersObject) - s_values = w_values.as_observed_get_shadow(self.space) - s_values.notify(self) + self.methoddict = {} size = self.size() - constants.METHODDICT_NAMES_INDEX - self.methoddict = {} + w_values = self.w_values() for i in range(size): w_selector = self.w_self().fetch(self.space, constants.METHODDICT_NAMES_INDEX+i) if not w_selector.is_nil(self.space): @@ -614,12 +626,12 @@ _attrs_ = ['_w_self_size'] repr_classname = "AbstractRedirectingShadow" - def __init__(self, space, w_self): - AbstractShadow.__init__(self, space, w_self) + def __init__(self, space, w_self, size): if w_self is not None: self._w_self_size = w_self.size() else: - self._w_self_size = 0 + self._w_self_size = size + AbstractShadow.__init__(self, space, w_self, self._w_self_size) def size(self): return self._w_self_size @@ -641,9 +653,9 @@ # ______________________________________________________________________ # Initialization - def __init__(self, space, w_self): + def __init__(self, space, w_self, size=0): self._s_sender = None - AbstractRedirectingShadow.__init__(self, space, w_self) + AbstractRedirectingShadow.__init__(self, space, w_self, size) self.instances_w = {} def copy_field_from(self, n0, other_shadow): @@ -981,14 +993,14 @@ # === Initialization === - def __init__(self, space, w_self=None, w_home=None, argcnt=0, initialip=0): + def __init__(self, space, w_self=None, size=0, w_home=None, argcnt=0, initialip=0): self = jit.hint(self, access_directly=True, fresh_virtualizable=True) creating_w_self = w_self is None if creating_w_self: s_home = w_home.as_methodcontext_get_shadow(space) contextsize = s_home.size() - s_home.tempsize() w_self = model.W_PointersObject(space, space.w_BlockContext, contextsize) - ContextPartShadow.__init__(self, space, w_self) + ContextPartShadow.__init__(self, space, w_self, size) if creating_w_self: w_self.store_shadow(self) self.store_expected_argument_count(argcnt) @@ -1115,10 +1127,10 @@ # === Initialization === @jit.unroll_safe - def __init__(self, space, w_self=None, w_method=None, w_receiver=None, + def __init__(self, space, w_self=None, size=0, w_method=None, w_receiver=None, arguments=[], closure=None, pc=0): self = jit.hint(self, access_directly=True, fresh_virtualizable=True) - ContextPartShadow.__init__(self, space, w_self) + ContextPartShadow.__init__(self, space, w_self, size) self.store_w_receiver(w_receiver) self.store_pc(pc) self.closure = closure @@ -1280,8 +1292,8 @@ class ObserveeShadow(ListStorageShadow): _attrs_ = ['dependent'] repr_classname = "ObserveeShadow" - def __init__(self, space, w_self): - ListStorageShadow.__init__(self, space, w_self, 0) + def __init__(self, space, w_self, size): + ListStorageShadow.__init__(self, space, w_self, size) self.dependent = None def store(self, n0, w_value): From noreply at buildbot.pypy.org Fri Jul 18 14:08:41 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 18 Jul 2014 14:08:41 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Added safety-net try-except block to print Exception messages (not printed by itself in translated mode). Message-ID: <20140718120841.07FF11C01D2@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r913:0f59f7f54a2c Date: 2014-07-15 22:54 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/0f59f7f54a2c/ Log: Added safety-net try-except block to print Exception messages (not printed by itself in translated mode). diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -64,6 +64,16 @@ prebuilt_space = objspace.ObjSpace() +def safe_entry_point(argv): + try: + return entry_point(argv) + except error.Exit, e: + print_error("Exited: %s" % e.msg) + return -1 + except Exception, e: + print_error("Exception: %s" % str(e)) + return -1 + def entry_point(argv): # == Main execution parameters path = None @@ -146,7 +156,7 @@ except OSError as e: print_error("%s -- %s (LoadError)" % (os.strerror(e.errno), path)) return 1 - + # Load & prepare image and environment image_reader = squeakimage.reader_for_image(space, squeakimage.Stream(data=imagedata)) image = create_image(space, image_reader) @@ -164,8 +174,6 @@ w_receiver = space.wrap_int(number) if code: selector = compile_code(interp, w_receiver, code) - if selector is None: - return -1 # Compilation failed, message is printed. s_frame = create_context(interp, w_receiver, selector, stringarg) if headless: space.headless.set() @@ -198,24 +206,19 @@ # TODO - Find a way to cleanly initialize the image, without executing the active_context of the image. # Instead, we want to execute our own context. Then remove this flag (and all references to it) space.suppress_process_switch.set() - try: - w_result = interp.perform( - w_receiver_class, - "compile:classified:notifying:", - w_arguments = [space.wrap_string("%s\r\n%s" % (selector, code)), - space.wrap_string("spy-run-code"), - space.w_nil] - ) - - # TODO - is this expected in every image? - if not isinstance(w_result, model.W_BytesObject) or w_result.as_string() != selector: - print_error("Compilation failed, unexpected result: %s" % result_string(w_result)) - return None - except error.Exit, e: - print_error("Exited while compiling code: %s" % e.msg) - return None - finally: - space.suppress_process_switch.unset() + + w_result = interp.perform( + w_receiver_class, + "compile:classified:notifying:", + w_arguments = [space.wrap_string("%s\r\n%s" % (selector, code)), + space.wrap_string("spy-run-code"), + space.w_nil] + ) + # TODO - is this expected in every image? + if not isinstance(w_result, model.W_BytesObject) or w_result.as_string() != selector: + raise error.Exit("Unexpected compilation result (probably failed to compile): %s" % result_string(w_result)) + space.suppress_process_switch.unset() + w_receiver_class.as_class_get_shadow(space).s_methoddict().sync_method_cache() return selector @@ -253,12 +256,8 @@ active_process.store_suspended_context(space.w_nil) return w_active_context.as_context_get_shadow(space) -def execute_context(interp, s_frame, measure=False): - try: - return interp.interpret_toplevel(s_frame.w_self()) - except error.Exit, e: - print_error("Exited: %s" % e.msg) - return None +def execute_context(interp, s_frame): + return interp.interpret_toplevel(s_frame.w_self()) # _____ Target and Main _____ @@ -269,11 +268,11 @@ if hasattr(rgc, "stm_is_enabled"): driver.config.translation.stm = True driver.config.translation.thread = True - return entry_point, None + return safe_entry_point, None def jitpolicy(self): from rpython.jit.codewriter.policy import JitPolicy return JitPolicy() if __name__ == "__main__": - entry_point(sys.argv) + safe_entry_point(sys.argv) From noreply at buildbot.pypy.org Fri Jul 18 14:08:42 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 18 Jul 2014 14:08:42 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Fixed an initialization issue Message-ID: <20140718120842.2B4951C01D2@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r914:a742cab321c8 Date: 2014-07-15 23:51 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/a742cab321c8/ Log: Fixed an initialization issue diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -614,8 +614,8 @@ def switch_shadow(self, new_shadow, w_element=None): old_shadow = self.assert_shadow() + self.store_shadow(new_shadow) old_shadow.copy_into(new_shadow) - self.store_shadow(new_shadow) new_shadow.attach_shadow() self.log_storage("Switched", old_shadow, w_element=w_element) From noreply at buildbot.pypy.org Fri Jul 18 14:08:43 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 18 Jul 2014 14:08:43 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Added a name for the executable Message-ID: <20140718120843.516C61C01D2@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r915:c52509373e77 Date: 2014-07-16 00:19 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/c52509373e77/ Log: Added a name for the executable diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -268,6 +268,7 @@ if hasattr(rgc, "stm_is_enabled"): driver.config.translation.stm = True driver.config.translation.thread = True + driver.exe_name = "rsqueak" return safe_entry_point, None def jitpolicy(self): From noreply at buildbot.pypy.org Fri Jul 18 14:08:59 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 18 Jul 2014 14:08:59 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Changes to the benchmarks. We really should put this on Monticello or something. Message-ID: <20140718120859.0BD2B1C01D2@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r916:30a053f8596e Date: 2014-07-16 11:07 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/30a053f8596e/ Log: Changes to the benchmarks. We really should put this on Monticello or something. diff --git a/images/Squeak4.5-noBitBlt.changes b/images/Squeak4.5-noBitBlt.changes --- a/images/Squeak4.5-noBitBlt.changes +++ b/images/Squeak4.5-noBitBlt.changes @@ -12348,4 +12348,276 @@ ^ 0! ! !BalloonEngine methodsFor: 'primitives-misc' stamp: 'tfel 7/2/2014 17:13' prior: 17242774! primInitializeBuffer: buffer - ^ 0! ! !BalloonEngine methodsFor: 'profiling' stamp: 'ar 11/11/1998 21:16' prior: 17242962! doAddCompressedShape: points segments: nSegments leftFills: leftFills rightFills: rightFills lineWidths: lineWidths lineFills: lineFills fillIndexList: fillIndexList matrix: aMatrix "Note: This method is for profiling the overhead of loading a compressed shape into the engine." ^self primAddCompressedShape: points segments: nSegments leftFills: leftFills rightFills: rightFills lineWidths: lineWidths lineFills: lineFills fillIndexList: fillIndexList matrix: aMatrix! ! !BalloonEngine methodsFor: 'as yet unclassified' stamp: 'tfel 1/7/2014 18:07' prior: 47363563! error: aString ^self! ! "-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- "! BalloonEngine class instanceVariableNames: ''! !BalloonEngine class methodsFor: 'accessing' stamp: 'ar 10/25/1998 17:37' prior: 17243620! debug: aBoolean "BalloonEngine debug: true" "BalloonEngine debug: false" Debug := aBoolean! ! !BalloonEngine class methodsFor: 'accessing' stamp: 'ar 2/2/2001 15:47' prior: 17243791! doProfileStats: aBool "Note: On Macintosh systems turning on profiling can significantly degrade the performance of Balloon since we're using the high accuracy timer for measuring." "BalloonEngine doProfileStats: true" "BalloonEngine doProfileStats: false" ^false! ! !BalloonEngine class methodsFor: 'accessing' stamp: 'ar 10/30/1998 23:57' prior: 17244200! printBezierStats "BalloonEngine printBezierStats" "BalloonEngine resetBezierStats" Transcript cr; nextPutAll:'Bezier statistics:'; crtab; print: (BezierStats at: 1); tab; nextPutAll:' non-monoton curves splitted'; crtab; print: (BezierStats at: 2); tab; nextPutAll:' curves splitted for numerical accuracy'; crtab; print: (BezierStats at: 3); tab; nextPutAll:' curves splitted to avoid integer overflow'; crtab; print: (BezierStats at: 4); tab; nextPutAll:' curves internally converted to lines'; endEntry.! ! !BalloonEngine class methodsFor: 'accessing' stamp: 'ar 10/28/1998 23:59' prior: 17244801! printStat: time count: n string: aString Transcript cr; print: time; tab; nextPutAll:' mSecs -- '; print: n; tab; nextPutAll:' ops -- '; print: ((time asFloat / (n max: 1) asFloat) roundTo: 0.01); tab; nextPutAll: ' avg. mSecs/op -- '; nextPutAll: aString.! ! !BalloonEngine class methodsFor: 'accessing' stamp: 'ar 1/12/1999 10:52' prior: 17245153! printStats "BalloonEngine doProfileStats: true" "BalloonEngine printStats" "BalloonEngine resetStats" Transcript cr; nextPutAll:'/************** BalloonEngine statistics ****************/'. self printStat: (Times at: 1) count: (Counts at: 1) string: 'Initialization'. self printStat: (Times at: 2) count: (Counts at: 2) string: 'Finish test'. self printStat: (Times at: 3) count: (Counts at: 3) string: 'Fetching/Adding GET entries'. self printStat: (Times at: 4) count: (Counts at: 4) string: 'Adding AET entries'. self printStat: (Times at: 5) count: (Counts at: 5) string: 'Fetching/Computing fills'. self printStat: (Times at: 6) count: (Counts at: 6) string: 'Merging fills'. self printStat: (Times at: 7) count: (Counts at: 7) string: 'Displaying span buffer'. self printStat: (Times at: 8) count: (Counts at: 8) string: 'Fetching/Updating AET entries'. self printStat: (Times at: 9) count: (Counts at: 9) string: 'Changing AET entries'. Transcript cr; print: Times sum; nextPutAll:' mSecs for all operations'. Transcript cr; print: Counts sum; nextPutAll: ' overall operations'. Transcript endEntry.! ! !BalloonEngine class methodsFor: 'accessing' stamp: 'ar 10/30/1998 23:57' prior: 17246355! resetBezierStats BezierStats := WordArray new: 4.! ! !BalloonEngine class methodsFor: 'accessing' stamp: 'ar 10/28/1998 23:38' prior: 17246485! resetStats Times := WordArray new: 10. Counts := WordArray new: 10.! ! !BalloonEngine class methodsFor: 'class initialization' stamp: 'ar 11/11/1998 22:49' prior: 17246646! initialize "BalloonEngine initialize" BufferCache := WeakArray new: 1. Smalltalk garbageCollect. "Make the cache old" CacheProtect := Semaphore forMutualExclusion. Times := WordArray new: 10. Counts := WordArray new: 10. BezierStats := WordArray new: 4. Debug ifNil:[Debug := false].! ! !BalloonEngine class methodsFor: 'private' stamp: 'ar 11/11/1998 22:50' prior: 17247016! allocateOrRecycleBuffer: initialSize "Try to recycly a buffer. If this is not possibly, create a new one." | buffer | CacheProtect critical:[ buffer := BufferCache at: 1. BufferCache at: 1 put: nil. ]. ^buffer ifNil:[BalloonBuffer new: initialSize]! ! !BalloonEngine class methodsFor: 'private' stamp: 'ar 5/28/2000 22:17' prior: 17247350! primitiveSetBitBltPlugin: pluginName ^nil! ! !BalloonEngine class methodsFor: 'private' stamp: 'eem 6/11/2008 13:00' prior: 55068329! recycleBuffer: balloonBuffer "Try to keep the buffer for later drawing operations." CacheProtect critical:[ | buffer | buffer := BufferCache at: 1. (buffer isNil or:[buffer size < balloonBuffer size] ) ifTrue:[BufferCache at: 1 put: balloonBuffer]. ].! ! BalloonEngine initialize! ----End fileIn of C:\Dev\lang-smalltalk\images\BalloonEngine.st----! ----SNAPSHOT----{10 July 2014 . 3:48:10 pm} Squeak4.5-noBitBlt.image priorSource: 15835154! ----SNAPSHOT----{10 July 2014 . 1:49:32 pm} Squeak4.5-noBitBlt.image priorSource: 15870393! \ No newline at end of file + ^ 0! ! !BalloonEngine methodsFor: 'profiling' stamp: 'ar 11/11/1998 21:16' prior: 17242962! doAddCompressedShape: points segments: nSegments leftFills: leftFills rightFills: rightFills lineWidths: lineWidths lineFills: lineFills fillIndexList: fillIndexList matrix: aMatrix "Note: This method is for profiling the overhead of loading a compressed shape into the engine." ^self primAddCompressedShape: points segments: nSegments leftFills: leftFills rightFills: rightFills lineWidths: lineWidths lineFills: lineFills fillIndexList: fillIndexList matrix: aMatrix! ! !BalloonEngine methodsFor: 'as yet unclassified' stamp: 'tfel 1/7/2014 18:07' prior: 47363563! error: aString ^self! ! "-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- "! BalloonEngine class instanceVariableNames: ''! !BalloonEngine class methodsFor: 'accessing' stamp: 'ar 10/25/1998 17:37' prior: 17243620! debug: aBoolean "BalloonEngine debug: true" "BalloonEngine debug: false" Debug := aBoolean! ! !BalloonEngine class methodsFor: 'accessing' stamp: 'ar 2/2/2001 15:47' prior: 17243791! doProfileStats: aBool "Note: On Macintosh systems turning on profiling can significantly degrade the performance of Balloon since we're using the high accuracy timer for measuring." "BalloonEngine doProfileStats: true" "BalloonEngine doProfileStats: false" ^false! ! !BalloonEngine class methodsFor: 'accessing' stamp: 'ar 10/30/1998 23:57' prior: 17244200! printBezierStats "BalloonEngine printBezierStats" "BalloonEngine resetBezierStats" Transcript cr; nextPutAll:'Bezier statistics:'; crtab; print: (BezierStats at: 1); tab; nextPutAll:' non-monoton curves splitted'; crtab; print: (BezierStats at: 2); tab; nextPutAll:' curves splitted for numerical accuracy'; crtab; print: (BezierStats at: 3); tab; nextPutAll:' curves splitted to avoid integer overflow'; crtab; print: (BezierStats at: 4); tab; nextPutAll:' curves internally converted to lines'; endEntry.! ! !BalloonEngine class methodsFor: 'accessing' stamp: 'ar 10/28/1998 23:59' prior: 17244801! printStat: time count: n string: aString Transcript cr; print: time; tab; nextPutAll:' mSecs -- '; print: n; tab; nextPutAll:' ops -- '; print: ((time asFloat / (n max: 1) asFloat) roundTo: 0.01); tab; nextPutAll: ' avg. mSecs/op -- '; nextPutAll: aString.! ! !BalloonEngine class methodsFor: 'accessing' stamp: 'ar 1/12/1999 10:52' prior: 17245153! printStats "BalloonEngine doProfileStats: true" "BalloonEngine printStats" "BalloonEngine resetStats" Transcript cr; nextPutAll:'/************** BalloonEngine statistics ****************/'. self printStat: (Times at: 1) count: (Counts at: 1) string: 'Initialization'. self printStat: (Times at: 2) count: (Counts at: 2) string: 'Finish test'. self printStat: (Times at: 3) count: (Counts at: 3) string: 'Fetching/Adding GET entries'. self printStat: (Times at: 4) count: (Counts at: 4) string: 'Adding AET entries'. self printStat: (Times at: 5) count: (Counts at: 5) string: 'Fetching/Computing fills'. self printStat: (Times at: 6) count: (Counts at: 6) string: 'Merging fills'. self printStat: (Times at: 7) count: (Counts at: 7) string: 'Displaying span buffer'. self printStat: (Times at: 8) count: (Counts at: 8) string: 'Fetching/Updating AET entries'. self printStat: (Times at: 9) count: (Counts at: 9) string: 'Changing AET entries'. Transcript cr; print: Times sum; nextPutAll:' mSecs for all operations'. Transcript cr; print: Counts sum; nextPutAll: ' overall operations'. Transcript endEntry.! ! !BalloonEngine class methodsFor: 'accessing' stamp: 'ar 10/30/1998 23:57' prior: 17246355! resetBezierStats BezierStats := WordArray new: 4.! ! !BalloonEngine class methodsFor: 'accessing' stamp: 'ar 10/28/1998 23:38' prior: 17246485! resetStats Times := WordArray new: 10. Counts := WordArray new: 10.! ! !BalloonEngine class methodsFor: 'class initialization' stamp: 'ar 11/11/1998 22:49' prior: 17246646! initialize "BalloonEngine initialize" BufferCache := WeakArray new: 1. Smalltalk garbageCollect. "Make the cache old" CacheProtect := Semaphore forMutualExclusion. Times := WordArray new: 10. Counts := WordArray new: 10. BezierStats := WordArray new: 4. Debug ifNil:[Debug := false].! ! !BalloonEngine class methodsFor: 'private' stamp: 'ar 11/11/1998 22:50' prior: 17247016! allocateOrRecycleBuffer: initialSize "Try to recycly a buffer. If this is not possibly, create a new one." | buffer | CacheProtect critical:[ buffer := BufferCache at: 1. BufferCache at: 1 put: nil. ]. ^buffer ifNil:[BalloonBuffer new: initialSize]! ! !BalloonEngine class methodsFor: 'private' stamp: 'ar 5/28/2000 22:17' prior: 17247350! primitiveSetBitBltPlugin: pluginName ^nil! ! !BalloonEngine class methodsFor: 'private' stamp: 'eem 6/11/2008 13:00' prior: 55068329! recycleBuffer: balloonBuffer "Try to keep the buffer for later drawing operations." CacheProtect critical:[ | buffer | buffer := BufferCache at: 1. (buffer isNil or:[buffer size < balloonBuffer size] ) ifTrue:[BufferCache at: 1 put: balloonBuffer]. ].! ! BalloonEngine initialize! ----End fileIn of C:\Dev\lang-smalltalk\images\BalloonEngine.st----! ----SNAPSHOT----{10 July 2014 . 3:48:10 pm} Squeak4.5-noBitBlt.image priorSource: 15835154! ----SNAPSHOT----{10 July 2014 . 1:49:32 pm} Squeak4.5-noBitBlt.image priorSource: 15870393! ----STARTUP----{15 July 2014 . 11:59:16 am} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! !SmallInteger methodsFor: 'as yet unclassified' stamp: 'ag 7/15/2014 11:59' prior: 49367231! runBenchmarks ^ Benchmarks printAllResults: self benchmarkIterations! ! !SmallInteger methodsFor: 'as yet unclassified' stamp: 'ag 7/15/2014 12:00'! printBenchmarks ^ Benchmarks printAllResults: self benchmarkIterations! ! !SmallInteger methodsFor: 'as yet unclassified' stamp: 'ag 7/15/2014 12:00'! printBenchmarks: substring ^ Benchmarks printMatching: substring iterations: self benchmarkIterations! ! !SmallInteger methodsFor: 'as yet unclassified' stamp: 'ag 7/15/2014 12:00' prior: 49425209! runBenchmarks ^ Benchmarks printAll: self benchmarkIterations! ! SmallInteger removeSelector: #testMatrix! SmallInteger removeSelector: #withArgsFoo! !Benchmarks class methodsFor: 'benchmarks' stamp: 'ag 7/15/2014 12:01'! printAll ^ self printAll: 5! ! !Benchmarks class methodsFor: 'benchmarks' stamp: 'ag 7/15/2014 12:02'! printAll: iterations ^ self print: self allBenchmarks iterations: iterations! ! !Benchmarks class methodsFor: 'benchmarks' stamp: 'ag 7/15/2014 12:02' prior: 49426046! printAll: iterations ^ self printMatching: self allBenchmarks iterations: iterations! ! !Benchmarks class methodsFor: 'benchmarks' stamp: 'ag 7/15/2014 12:02'! printMatching: substring iterations: iterations | benchmarks | benchmarks := self selectBenchmarks: substring. benchmarks ifEmpty: [ ^ String streamContents: [ :str | str nextPutAll: 'No benchmarks matched "'; nextPutAll: substring; nextPutAll: '"'; cr; nextPutAll: 'Available benchmarks:'. self allBenchmarkNames do: [ :name | str cr; nextPutAll: name ] ] ]. ^ self run: benchmarks iterations: iterations! ! !Benchmarks class methodsFor: 'benchmarks' stamp: 'ag 7/15/2014 12:03' prior: 49426380! printMatching: substring iterations: iterations | benchmarks | benchmarks := self selectBenchmarksOrString: substring. benchmarks isString ifTrue: [ ^ benchmarks ]. ^ self run: benchmarks iterations: iterations! ! !Benchmarks class methodsFor: 'benchmarks' stamp: 'ag 7/15/2014 12:04'! selectBenchmarksOrString: substring | benchmarks | benchmarks := self selectBenchmarks: substring. benchmarks ifEmpty: [ ^ String streamContents: [ :str | str nextPutAll: 'No benchmarks matched "'; nextPutAll: substring; nextPutAll: '"'; cr; nextPutAll: 'Available benchmarks:'. self allBenchmarkNames do: [ :name | str cr; nextPutAll: name ] ] ]. ^ benchmarks! ! !Benchmarks class methodsFor: 'private' stamp: 'ag 7/15/2014 12:04' prior: 49427211! selectBenchmarksOrString: substring | benchmarks | benchmarks := self selectBenchmarks: substring. benchmarks ifEmpty: [ ^ String streamContents: [ :str | str nextPutAll: 'No benchmarks matched "'; nextPutAll: substring; nextPutAll: '"'; cr; nextPutAll: 'Available benchmarks:'. self allBenchmarkNames do: [ :name | str cr; nextPutAll: name ] ] ]. ^ benchmarks! ! !Benchmarks class methodsFor: 'benchmarks' stamp: 'ag 7/15/2014 12:04' prior: 49286631! runMatching: substring iterations: iterations | benchmarks | benchmarks := self selectBenchmarksOrString: substring. benchmarks isString ifTrue: [ ^ benchmarks ]. ^ self run: benchmarks iterations: iterations! ! !Benchmarks class methodsFor: 'benchmarks' stamp: 'ag 7/15/2014 12:05' prior: 49426919! printMatching: substring iterations: iterations | benchmarks | benchmarks := self selectBenchmarksOrString: substring. benchmarks isString ifTrue: [ ^ benchmarks ]. ^ self print: benchmarks iterations: iterations! ! {'a'. 'b'}. ','. 'a,b'! {'a'. 'b'}. ','. 'a,b'! !Benchmarks class methodsFor: 'private' stamp: 'ag 7/15/2014 12:12'! print: benchmarks iterations: iterations ^ String streamContents: [ :str | benchmarks do: [ :bench | | instance | instance := bench new. str nextPutAll: String cr, instance name. str nextPutAll: ':', String cr. (SMarkRunner getResults: instance with: iterations) do: [ :result | str nextPutAll: result asString] separatedBy: [ str nextPut: $, ] ] ]! ! !Benchmarks class methodsFor: 'private' stamp: 'ag 7/15/2014 12:12' prior: 49428843! print: benchmarks iterations: iterations ^ String streamContents: [ :str | benchmarks do: [ :bench | | instance | instance := bench new. str nextPutAll: String cr, instance name. str nextPutAll: ':', String cr. (SMarkRunner getResults: instance with: iterations) do: [ :result | str nextPutAll: result asString ] separatedBy: [ str nextPut: $, ] ] ]! ! !SMarkRunner class methodsFor: 'benchmarking' stamp: 'ag 7/15/2014 12:14'! getResults: aSuite with: nIterations + + ^ (self execute: aSuite with: nIterations) results! ! ----QUIT----{15 July 2014 . 12:14:32 pm} Squeak4.5-noBitBlt.image priorSource: 15870393! ----STARTUP----{15 July 2014 . 12:15:33 pm} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! SMarkRunner getResults: CPBBinaryTreeBenchmark new with: 5! results := SMarkRunner getResults: CPBBinaryTreeBenchmark new with: 5! results! results values anyOne! results values anyOne first! results values anyOne first! results values anyOne first total! a := nil.! results do: [ :key | a := key ]! a! results keysDo: [ :key | a := key ]! results keysDo: [ :key | a := key ]! a! b := nil.! results keysAndValuesDo: [ :key :value | a := key. b:= value ]! a! b! String streamContents: [ :str | str cr ]! !Benchmarks class methodsFor: 'private' stamp: 'ag 7/15/2014 12:21' prior: 49429305! print: benchmarks iterations: iterations ^ String streamContents: [ :str | benchmarks do: [ :bench | (SMarkRunner getResults: bench new with: iterations) keysAndValuesDo: [ :name :results | str cr; nextPutAll: name; nextPut: $:; cr. results do: [ :result | str nextPutAll: result total asString ] separatedBy: [ str nextPut: $, ] ] ] ]! ! ----QUIT----{15 July 2014 . 12:22:06 pm} Squeak4.5-noBitBlt.image priorSource: 15875419! ----STARTUP----{15 July 2014 . 12:22:48 pm} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! Smalltalk garbageCollect! !SMarkRunner methodsFor: 'benchmarking' stamp: 'ag 7/15/2014 12:23' prior: 49235421! performBenchmark: aSelector + currentBenchmark := aSelector. + + 1 to: numIterations do: [:i| + "self timedBenchmarkExecution: aSelector." + Smalltalk garbageCollect. suite runBenchmark: aSelector. + ]. + + currentBenchmark := nil. + + ^ results at: (suite benchmarkNameForSelector: aSelector)! ! ----QUIT----{15 July 2014 . 12:24:01 pm} Squeak4.5-noBitBlt.image priorSource: 15876594! ----STARTUP----{15 July 2014 . 12:25:13 pm} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! Smalltalk garbageCollectMost.! !SMarkRunner methodsFor: 'benchmarking' stamp: 'ag 7/15/2014 12:25' prior: 49431333! performBenchmark: aSelector + currentBenchmark := aSelector. + + 1 to: numIterations do: [:i| + "self timedBenchmarkExecution: aSelector." + Smalltalk garbageCollectMost. suite runBenchmark: aSelector. + ]. + + currentBenchmark := nil. + + ^ results at: (suite benchmarkNameForSelector: aSelector)! ! ----QUIT----{15 July 2014 . 12:25:31 pm} Squeak4.5-noBitBlt.image priorSource: 15877197! ----STARTUP----{15 July 2014 . 12:30:53 pm} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! !CPBAStarBenchmark methodsFor: 'initialize-release' stamp: 'ag 7/15/2014 12:31' prior: 48592510! benchAStar astar graph: graph1; findPath; reset . astar graph: graph2; findPath; reset.! ! !CPBAStarBenchmark methodsFor: 'initialize-release' stamp: 'ag 7/15/2014 12:32'! benchAStarGraph1 astar graph: graph1; findPath; reset .! ! !CPBAStarBenchmark methodsFor: 'initialize-release' stamp: 'ag 7/15/2014 12:32'! benchAStarGraph2 astar graph: graph2; findPath; reset .! ! CPBAStarBenchmark removeSelector: #benchAStar! !CPBBinaryTreeBenchmark methodsFor: 'testing' stamp: 'ag 7/15/2014 12:33' prior: 48678079! benchBinaryTree + "starts the binary tree benchmark" + + self binarytrees: 30 .! ! ----QUIT----{15 July 2014 . 12:33:52 pm} Squeak4.5-noBitBlt.image priorSource: 15877811! ----STARTUP----{15 July 2014 . 12:34:45 pm} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! !CPBBinaryTreeBenchmark methodsFor: 'testing' stamp: 'ag 7/15/2014 12:34' prior: 49433076! benchBinaryTree + "starts the binary tree benchmark" + + self binarytrees: 20 .! ! ----SNAPSHOT----{15 July 2014 . 12:34:56 pm} Squeak4.5-noBitBlt.image priorSource: 15878724! !CPBBinaryTreeBenchmark methodsFor: 'testing' stamp: 'ag 7/15/2014 12:36' prior: 49433442! benchBinaryTree + "starts the binary tree benchmark" + + self binarytrees: 12 .! ! ----QUIT----{15 July 2014 . 12:36:45 pm} Squeak4.5-noBitBlt.image priorSource: 15879090! ----STARTUP----{15 July 2014 . 12:37 pm} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! !CPBBinaryTreeBenchmark methodsFor: 'testing' stamp: 'ag 7/15/2014 12:37' prior: 49433708! benchBinaryTree + "starts the binary tree benchmark" + + self binarytrees: 11 .! ! ----SNAPSHOT----{15 July 2014 . 12:37:16 pm} Squeak4.5-noBitBlt.image priorSource: 15879356! !CPBBinaryTreeBenchmark methodsFor: 'testing' stamp: 'ag 7/15/2014 12:37' prior: 49434071! benchBinaryTree + "starts the binary tree benchmark" + + self binarytrees: 12 .! ! ----SNAPSHOT----{15 July 2014 . 12:37:42 pm} Squeak4.5-noBitBlt.image priorSource: 15879719! #('0000000000000000' 'FFFFFFFFFFFFFFFF' '3000000000000000' '1111111111111111' '0123456789ABCDEF' '1111111111111111' '0000000000000000' 'FEDCBA9876543210' '7CA110454A1A6E57' '0131D9619DC1376E' '07A1133E4A0B2686' '3849674C2602319E' '04B915BA43FEB5B6' '0113B970FD34F2CE' '0170F175468FB5E6' '43297FAD38E373FE' '07A7137045DA2A16' '04689104C2FD3B2F' '37D06BB516CB7546' '1F08260D1AC2465E' '584023641ABA6176' '025816164629B007' '49793EBC79B3258F' '4FB05E1515AB73A7' '49E95D6D4CA229BF' '018310DC409B26D6' '1C587F1C13924FEF' '0101010101010101' '1F1F1F1F0E0E0E0E' 'E0FEE0FEF1FEF1FE' '0000000000000000' 'FFFFFFFFFFFFFFFF' '0123456789ABCDEF' 'FEDCBA9876543210') size! !CPBBlowfishProfiling methodsFor: 'as yet unclassified' stamp: 'ag 7/15/2014 12:44' prior: 48762750! longDecryptionTest + "(1 to: keys size)" + (27 to: keys size) + do: [ :each | + | key clearText cipherText enc | + key := keys at: each. + clearText := clear at: each. + cipherText := encrypted at: each. + enc := CPBBlowfish decrypt: cipherText with: key ]! ! !CPBBlowfishProfiling methodsFor: 'as yet unclassified' stamp: 'ag 7/15/2014 12:45' prior: 48763107! longEncryptionTest + "(1 to: keys size)" + (27 to: keys size) + do: [ :each | + | key clearText cipherText enc | + key := keys at: each. + clearText := clear at: each. + cipherText := encrypted at: each. + enc := CPBBlowfish encrypt: clearText with: key ]! ! ----SNAPSHOT----{15 July 2014 . 12:45:24 pm} Squeak4.5-noBitBlt.image priorSource: 15879985! !CPBBlowfishProfiling methodsFor: 'as yet unclassified' stamp: 'ag 7/15/2014 12:46' prior: 49435642! longEncryptionTest + "(1 to: keys size)" + (31 to: keys size) + do: [ :each | + | key clearText cipherText enc | + key := keys at: each. + clearText := clear at: each. + cipherText := encrypted at: each. + enc := CPBBlowfish encrypt: clearText with: key ]! ! !CPBBlowfishSuite methodsFor: 'as yet unclassified' stamp: 'ag 7/15/2014 12:46'! benchBlowfishDecryption + blowfish longDecryptionTest.! ! !CPBBlowfishSuite methodsFor: 'as yet unclassified' stamp: 'ag 7/15/2014 12:46'! benchBlowfishEncryption + blowfish longEncryptionTest. ! ! CPBBlowfishSuite removeSelector: #benchBlowfish! !CPBBlowfishSuite methodsFor: 'as yet unclassified' stamp: 'ag 7/15/2014 12:46' prior: 49436460! benchBlowfishDecryption + blowfish longDecryptionTest: 5.! ! !CPBBlowfishSuite methodsFor: 'as yet unclassified' stamp: 'ag 7/15/2014 12:46' prior: 49436805! benchBlowfishDecryption + blowfish decryptionTest: 5.! ! !CPBBlowfishSuite methodsFor: 'as yet unclassified' stamp: 'ag 7/15/2014 12:47' prior: 49436599! benchBlowfishEncryption + blowfish encryptionTest: 5. ! ! !CPBBlowfishProfiling methodsFor: 'as yet unclassified' stamp: 'ag 7/15/2014 12:47'! encryptionTest: numKeys + (1 to: (numKeys max: keys size)) + do: [ :each | + | key clearText cipherText enc | + key := keys at: each. + clearText := clear at: each. + cipherText := encrypted at: each. + enc := CPBBlowfish encrypt: clearText with: key ]! ! !CPBBlowfishProfiling methodsFor: 'as yet unclassified' stamp: 'ag 7/15/2014 12:48'! decryptionTest: numKeys + (1 to: (numKeys max: keys size)) + do: [ :each | + | key clearText cipherText enc | + key := keys at: each. + clearText := clear at: each. + cipherText := encrypted at: each. + enc := CPBBlowfish decrypt: cipherText with: key ]! ! !CPBBlowfishProfiling methodsFor: 'as yet unclassified' stamp: 'ag 7/15/2014 12:48' prior: 49435269! longDecryptionTest + (1 to: keys size) + do: [ :each | + | key clearText cipherText enc | + key := keys at: each. + clearText := clear at: each. + cipherText := encrypted at: each. + enc := CPBBlowfish decrypt: cipherText with: key ]! ! !CPBBlowfishProfiling methodsFor: 'as yet unclassified' stamp: 'ag 7/15/2014 12:48' prior: 49436108! longEncryptionTest + self decryptionTest: keys size! ! !CPBBlowfishProfiling methodsFor: 'as yet unclassified' stamp: 'ag 7/15/2014 12:48' prior: 49438329! longEncryptionTest + self encryptionTest: keys size! ! !CPBBlowfishProfiling methodsFor: 'as yet unclassified' stamp: 'ag 7/15/2014 12:48' prior: 49437979! longDecryptionTest + self decryptionTest: keys size! ! ----SNAPSHOT----{15 July 2014 . 12:49 pm} Squeak4.5-noBitBlt.image priorSource: 15881480! !CPBBlowfishProfiling methodsFor: 'as yet unclassified' stamp: 'ag 7/15/2014 12:53' prior: 48759322! initialize + super initialize. + self + initializeClear; + initializeEncrypted; + initializeKeys. clear removeFirst: 5. encrypted removeFirst: 5. keys removeFirst: 5.! ! !CPBBlowfishProfiling methodsFor: 'as yet unclassified' stamp: 'ag 7/15/2014 12:53' prior: 49438888! initialize | removedKeys | + super initialize. + self + initializeClear; + initializeEncrypted; + initializeKeys. removedKeys := 8. clear removeFirst: removedKeys. encrypted removeFirst: removedKeys. keys removeFirst: removedKeys.! ! ----SNAPSHOT----{15 July 2014 . 12:53:59 pm} Squeak4.5-noBitBlt.image priorSource: 15884263! !CPBBlowfishSuite methodsFor: 'as yet unclassified' stamp: 'ag 7/15/2014 12:55' prior: 49436963! benchBlowfishDecryption + blowfish decryptionTest: 1.! ! !CPBBlowfishSuite methodsFor: 'as yet unclassified' stamp: 'ag 7/15/2014 12:55' prior: 49437117! benchBlowfishEncryption + blowfish encryptionTest: 1. ! ! ----SNAPSHOT----{15 July 2014 . 12:55:59 pm} Squeak4.5-noBitBlt.image priorSource: 15884974! !CPBBlowfishProfiling methodsFor: 'as yet unclassified' stamp: 'ag 7/15/2014 12:58' prior: 49437611! decryptionTest: numKeys + (1 to: (numKeys min: keys size)) + do: [ :each | + | key clearText cipherText enc | + key := keys at: each. + clearText := clear at: each. + cipherText := encrypted at: each. + enc := CPBBlowfish decrypt: cipherText with: key ]! ! !CPBBlowfishProfiling methodsFor: 'as yet unclassified' stamp: 'ag 7/15/2014 12:58' prior: 49437260! encryptionTest: numKeys + (1 to: (numKeys min: keys size)) + do: [ :each | + | key clearText cipherText enc | + key := keys at: each. + clearText := clear at: each. + cipherText := encrypted at: each. + enc := CPBBlowfish encrypt: clearText with: key ]! ! ----SNAPSHOT----{15 July 2014 . 12:59:56 pm} Squeak4.5-noBitBlt.image priorSource: 15885377! !CPBBlowfishSuite methodsFor: 'as yet unclassified' stamp: 'ag 7/15/2014 13:00' prior: 49439598! benchBlowfishDecryption + blowfish decryptionTest: 3.! ! !CPBBlowfishSuite methodsFor: 'as yet unclassified' stamp: 'ag 7/15/2014 13:00' prior: 49439752! benchBlowfishEncryption + blowfish encryptionTest: 3. ! ! ----SNAPSHOT----{15 July 2014 . 1:01:31 pm} Squeak4.5-noBitBlt.image priorSource: 15886206! ----STARTUP----{15 July 2014 . 5:30:13 pm} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! !CPBPlanner class methodsFor: 'benchmarks' stamp: 'ag 7/15/2014 17:32' prior: 48911172! standardBenchmark + "This the combined benchmark." + "Planner standardBenchmark" + + self chainTest: 100. + self projectionTest: 100! ! ----SNAPSHOT----{15 July 2014 . 5:32:20 pm} Squeak4.5-noBitBlt.image priorSource: 15886609! !CPBPlanner class methodsFor: 'benchmarks' stamp: 'ag 7/15/2014 17:33' prior: 48908366! chainTest: n + "Do chain-of-equality-constraints performance tests." + + | vars editConstraint plan planner | + planner := CPBPlanner new. + vars := (1 to: n+1) collect: [ :i | CPBVariable new]. + + "thread a chain of equality constraints through the variables" + 1 to: n do: + [ :i || v1 v2 | + v1 := vars at: i. + v2 := vars at: i + 1. + CPBEqualityConstraint var: v1 var: v2 strength: #required]. + + CPBStayConstraint var: vars last strength: #strongDefault. + editConstraint := CPBEditConstraint var: (vars first) strength: #preferred. + plan := planner extractPlanFromConstraints: (Array with: editConstraint). + 1 to: n do: [ :v | + vars first value: v. + plan execute. + vars last value ~= v ifTrue: [self error: 'Chain test failed!!']]. + editConstraint destroyConstraint! ! !CPBPlanner class methodsFor: 'benchmarks' stamp: 'ag 7/15/2014 17:33' prior: 49441326! standardBenchmark + "This the combined benchmark." + "Planner standardBenchmark" + + self chainTest: 400. + self projectionTest: 400! ! ----SNAPSHOT----{15 July 2014 . 5:33:57 pm} Squeak4.5-noBitBlt.image priorSource: 15887059! ----SNAPSHOT----{15 July 2014 . 5:34 pm} Squeak4.5-noBitBlt.image priorSource: 15888463! !CPBPlanner class methodsFor: 'benchmarks' stamp: 'ag 7/15/2014 17:34' prior: 49442730! standardBenchmark + "This the combined benchmark." + "Planner standardBenchmark" + + self chainTest: 1000. + self projectionTest: 1000! ! ----SNAPSHOT----{15 July 2014 . 5:34:58 pm} Squeak4.5-noBitBlt.image priorSource: 15888556! !CPBNBodyBenchmark methodsFor: 'nbody' stamp: 'ag 7/15/2014 17:51' prior: 49054097! benchNBody + "helper method to run the n body benchmark" + + self runBenchmarkFor: 2500 withStep: 0.01d0 .! ! ----SNAPSHOT----{15 July 2014 . 5:51:16 pm} Squeak4.5-noBitBlt.image priorSource: 15888902! !CPBNBodyBenchmark methodsFor: 'nbody' stamp: 'ag 7/15/2014 17:52' prior: 49443512! benchNBody + "helper method to run the n body benchmark" + + self runBenchmarkFor: 3500 withStep: 0.01d0 .! ! ----SNAPSHOT----{15 July 2014 . 5:52:12 pm} Squeak4.5-noBitBlt.image priorSource: 15889190! !CPBRichardsBenchmark methodsFor: 'testing' stamp: 'ag 7/15/2014 17:57'! executeExampleSchedule + | scheduler queue | + scheduler := CPBScheduler new . + scheduler addIdleTask: (CPBScheduler idIdle) withPriority: 0 withQueue: nil times: (CPBRichardsBenchmark iterations) . + + queue := CPBPacket link: nil id: (CPBScheduler idWorker) kind: (CPBPacket kindWork) . + queue := CPBPacket link: queue id: (CPBScheduler idWorker) kind: (CPBPacket kindWork) . + scheduler addWorkerTask: (CPBScheduler idWorker) withPriority: 1000 withQueue: queue . + + queue := CPBPacket link: nil id: (CPBScheduler idDeviceA) kind: (CPBPacket kindDevice) . + queue := CPBPacket link: queue id: (CPBScheduler idDeviceA) kind: (CPBPacket kindDevice) . + queue := CPBPacket link: queue id: (CPBScheduler idDeviceA) kind: (CPBPacket kindDevice) . + scheduler addHandlerTask: (CPBScheduler idHandlerA) withPriority: 2000 withQueue: queue . + + queue := CPBPacket link: nil id: (CPBScheduler idDeviceB) kind: (CPBPacket kindDevice) . + queue := CPBPacket link: queue id: (CPBScheduler idDeviceB) kind: (CPBPacket kindDevice) . + queue := CPBPacket link: queue id: (CPBScheduler idDeviceB) kind: (CPBPacket kindDevice) . + scheduler addHandlerTask: (CPBScheduler idHandlerB) withPriority: 3000 withQueue: queue . + + scheduler addDeviceTask: (CPBScheduler idDeviceA) withPriority: 4000 withQueue: nil . + scheduler addDeviceTask: (CPBScheduler idDeviceB) withPriority: 5000 withQueue: nil . + + scheduler schedule . + + ((scheduler queueCount ~~ (CPBRichardsBenchmark expectedQueueCount)) or: + (scheduler holdCount ~~ (CPBRichardsBenchmark expectedHoldCount))) ifTrue: [ + Transcript + show: 'Error during execution: queueCount= '; + show: (scheduler queueCount); + show: ', holdCount='; + show: (scheduler holdCount); + show: '.'; + cr . + ] . +! ! !CPBRichardsBenchmark methodsFor: 'testing' stamp: 'ag 7/15/2014 17:58' prior: 49197044! benchRichards 50 timesRepeat: [ self executeExampleSchedule ].! ! !CPBRichardsBenchmark methodsFor: 'testing' stamp: 'ag 7/15/2014 17:58' prior: 49445901! benchRichards 100 timesRepeat: [ self executeExampleSchedule ].! ! ----SNAPSHOT----{15 July 2014 . 5:58:28 pm} Squeak4.5-noBitBlt.image priorSource: 15889478! !CPBRichardsBenchmark methodsFor: 'testing' stamp: 'ag 7/15/2014 17:59' prior: 49446059! benchRichards 200 timesRepeat: [ self executeExampleSchedule ].! ! ----SNAPSHOT----{15 July 2014 . 5:59:34 pm} Squeak4.5-noBitBlt.image priorSource: 15891696! !CPBSplayTreeBenchmark methodsFor: 'as yet unclassified' stamp: 'ag 7/15/2014 18:05' prior: 49268315! setUp + splayTree := nil. + kSplayTreeSize := 1000. "8000" + kSplayTreeModifications := 80. "80." + kSplayTreePayloadDepth := 5. + keyCounter := 0. + + Transcript showln: 'entering SplayTree setup'. + + self splayTree: (CPBSplaytree new). + + 1 to: self splayTreeSize do: [:i | + self insertNewNode. + ]! ! ----SNAPSHOT----{15 July 2014 . 6:05:51 pm} Squeak4.5-noBitBlt.image priorSource: 15891948! !CPBSplayTreeBenchmark methodsFor: 'as yet unclassified' stamp: 'ag 7/15/2014 18:06' prior: 49446576! setUp + splayTree := nil. + kSplayTreeSize := 1000. "8000" + kSplayTreeModifications := 300. "80." + kSplayTreePayloadDepth := 5. + keyCounter := 0. + + Transcript showln: 'entering SplayTree setup'. + + self splayTree: (CPBSplaytree new). + + 1 to: self splayTreeSize do: [:i | + self insertNewNode. + ]! ! ----SNAPSHOT----{15 July 2014 . 6:06:41 pm} Squeak4.5-noBitBlt.image priorSource: 15892442! !CPBSplayTreeBenchmark methodsFor: 'as yet unclassified' stamp: 'ag 7/15/2014 18:07' prior: 49447070! setUp + splayTree := nil. + kSplayTreeSize := 1000. "8000" + kSplayTreeModifications := 600. "80." + kSplayTreePayloadDepth := 5. + keyCounter := 0. + + Transcript showln: 'entering SplayTree setup'. + + self splayTree: (CPBSplaytree new). + + 1 to: self splayTreeSize do: [:i | + self insertNewNode. + ]! ! !CPBSplayTreeBenchmark methodsFor: 'as yet unclassified' stamp: 'ag 7/15/2014 18:08' prior: 49447565! setUp + splayTree := nil. + kSplayTreeSize := 1000. "8000" + kSplayTreeModifications := 500. "80." + kSplayTreePayloadDepth := 10. + keyCounter := 0. + + Transcript showln: 'entering SplayTree setup'. + + self splayTree: (CPBSplaytree new). + + 1 to: self splayTreeSize do: [:i | + self insertNewNode. + ]! ! ----SNAPSHOT----{15 July 2014 . 6:08:20 pm} Squeak4.5-noBitBlt.image priorSource: 15892937! !CPBSplayTreeBenchmark methodsFor: 'as yet unclassified' stamp: 'ag 7/15/2014 18:09' prior: 49447967! setUp + splayTree := nil. + kSplayTreeSize := 1000. "8000" + kSplayTreeModifications := 500. "80." + kSplayTreePayloadDepth := 7. + keyCounter := 0. + + Transcript showln: 'entering SplayTree setup'. + + self splayTree: (CPBSplaytree new). + + 1 to: self splayTreeSize do: [:i | + self insertNewNode. + ]! ! ----SNAPSHOT----{15 July 2014 . 6:09:41 pm} Squeak4.5-noBitBlt.image priorSource: 15893835! !CPBSplayTreeBenchmark methodsFor: 'as yet unclassified' stamp: 'ag 7/15/2014 18:10' prior: 49448463! setUp + splayTree := nil. + kSplayTreeSize := 1000. "8000" + kSplayTreeModifications := 500. "80." + kSplayTreePayloadDepth := 6. + keyCounter := 0. + + Transcript showln: 'entering SplayTree setup'. + + self splayTree: (CPBSplaytree new). + + 1 to: self splayTreeSize do: [:i | + self insertNewNode. + ]! ! ----SNAPSHOT----{15 July 2014 . 6:10:56 pm} Squeak4.5-noBitBlt.image priorSource: 15894330! \ No newline at end of file diff --git a/images/Squeak4.5-noBitBlt.image b/images/Squeak4.5-noBitBlt.image index 175e9df2180e298a4db4f8f60698e45e1b468425..ed92c78c940799d91bb94a8ed8527076db6816c7 GIT binary patch [cut] From noreply at buildbot.pypy.org Fri Jul 18 14:09:00 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 18 Jul 2014 14:09:00 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Moved SenderChainManipulation exception to interpreter.py, as subclass of ContextSwitchException. Message-ID: <20140718120900.46CAE1C01D2@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r917:25b23527e013 Date: 2014-07-18 13:34 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/25b23527e013/ Log: Moved SenderChainManipulation exception to interpreter.py, as subclass of ContextSwitchException. Catching SenderChainManipulation explicitely in toplevel loop. Added "sender is not new_sender" condition to raising SenderChainManipulation. Fixed test. diff --git a/spyvm/error.py b/spyvm/error.py --- a/spyvm/error.py +++ b/spyvm/error.py @@ -30,7 +30,3 @@ _attrs_ = ["msg"] def __init__(self, msg): self.msg = msg - -class SenderChainManipulation(Exception): - def __init__(self, manipulated_context): - self.s_context = manipulated_context diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -70,6 +70,10 @@ if self.is_tracing(): print "====== StackOverflow, contexts forced to heap at: %s" % e.s_new_context.short_str() s_new_context = e.s_new_context + except SenderChainManipulation, e: + if self.is_tracing(): + print "====== SenderChainManipulation, contexts forced to heap at: %s" % e.s_new_context.short_str() + s_new_context = e.s_new_context except Return, nlr: assert nlr.s_target_context or nlr.is_local s_new_context = s_sender @@ -198,13 +202,13 @@ def interpret_toplevel(self, w_frame): try: + self.interrupt_check_counter = self.interrupt_counter_size self.loop(w_frame) except ReturnFromTopLevel, e: return e.object def perform(self, w_receiver, selector="", w_selector=None, w_arguments=[]): s_frame = self.create_toplevel_context(w_receiver, selector, w_selector, w_arguments) - self.interrupt_check_counter = self.interrupt_counter_size return self.interpret_toplevel(s_frame.w_self()) def create_toplevel_context(self, w_receiver, selector="", w_selector=None, w_arguments=[]): @@ -276,13 +280,18 @@ self.s_new_context = s_new_context class StackOverflow(ContextSwitchException): - """This causes the current jit-loop to be left, thus avoiding stack overflows. + """This causes the current jit-loop to be left, dumping all virtualized objects to the heap. This breaks performance, so it should rarely happen. In case of severe performance problems, execute with -t and check if this occurrs.""" class ProcessSwitch(ContextSwitchException): - """This causes the interpreter to switch the executed context.""" + """This causes the interpreter to switch the executed context. + Triggered when switching the process.""" +class SenderChainManipulation(ContextSwitchException): + """Manipulation of the sender chain can invalidate the jitted C stack. + We have to dump all virtual objects and rebuild the stack. + We try to raise this as rarely as possible and as late as possible.""" import rpython.rlib.unroll if hasattr(unroll, "unrolling_zero"): @@ -753,15 +762,16 @@ association = wrapper.AssociationWrapper(self.space, w_association) self.push(association.value()) elif opType == 5: + # TODO - the following two special cases should not be necessary try: self.w_receiver().store(self.space, third, self.top()) - except error.SenderChainManipulation, e: - raise StackOverflow(self) + except SenderChainManipulation, e: + raise SenderChainManipulation(self) elif opType == 6: try: self.w_receiver().store(self.space, third, self.pop()) - except error.SenderChainManipulation, e: - raise StackOverflow(self) + except SenderChainManipulation, e: + raise SenderChainManipulation(self) elif opType == 7: w_association = self.w_method().getliteral(third) association = wrapper.AssociationWrapper(self.space, w_association) diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -659,10 +659,11 @@ self.instances_w = {} def copy_field_from(self, n0, other_shadow): + from spyvm.interpreter import SenderChainManipulation try: AbstractRedirectingShadow.copy_field_from(self, n0, other_shadow) - except error.SenderChainManipulation, e: - assert e.s_context == self + except SenderChainManipulation, e: + assert e.s_new_context == self def copy_from(self, other_shadow): # Some fields have to be initialized before the rest, to ensure correct initialization. @@ -725,9 +726,11 @@ # === Sender === def store_s_sender(self, s_sender, raise_error=True): - self._s_sender = s_sender - if raise_error: - raise error.SenderChainManipulation(self) + if s_sender is not self._s_sender: + self._s_sender = s_sender + if raise_error: + from spyvm.interpreter import SenderChainManipulation + raise SenderChainManipulation(self) def w_sender(self): sender = self.s_sender() diff --git a/spyvm/test/test_interpreter.py b/spyvm/test/test_interpreter.py --- a/spyvm/test/test_interpreter.py +++ b/spyvm/test/test_interpreter.py @@ -983,4 +983,4 @@ w_frame, s_frame = new_frame(bytes) s_frame.store_w_receiver(w_frame) s_frame.push(w_frame) - py.test.raises(interpreter.StackOverflow, step_in_interp, s_frame) + py.test.raises(interpreter.SenderChainManipulation, step_in_interp, s_frame) From noreply at buildbot.pypy.org Fri Jul 18 14:09:01 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 18 Jul 2014 14:09:01 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Printing stack trace in non-translated mode Message-ID: <20140718120901.794501C01D2@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r918:2a1d5616e87f Date: 2014-07-18 13:38 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/2a1d5616e87f/ Log: Printing stack trace in non-translated mode diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -3,7 +3,7 @@ import os from rpython.rlib.streamio import open_file_as_stream -from rpython.rlib import jit, rpath +from rpython.rlib import jit, rpath, objectmodel from spyvm import model, interpreter, squeakimage, objspace, wrapper,\ error, shadow, storage_logger, constants @@ -72,6 +72,9 @@ return -1 except Exception, e: print_error("Exception: %s" % str(e)) + if not objectmodel.we_are_translated(): + import traceback + traceback.print_exc() return -1 def entry_point(argv): From noreply at buildbot.pypy.org Fri Jul 18 14:09:02 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Fri, 18 Jul 2014 14:09:02 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Merged. Message-ID: <20140718120902.C6CFC1C01D2@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r919:8c1727512fd2 Date: 2014-07-18 13:50 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/8c1727512fd2/ Log: Merged. Removed configurable trace depth (unnecessary). diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -35,7 +35,7 @@ ) def __init__(self, space, image=None, image_name="", - trace_depth=-1, evented=True, interrupts=True): + trace=False, evented=True, interrupts=True): # === Initialize immutable variables self.space = space self.image = image @@ -54,7 +54,7 @@ # === Initialize mutable variables self.interrupt_check_counter = self.interrupt_counter_size self.next_wakeup_tick = 0 - self.trace_depth = trace_depth + self.trace = trace self.trace_proxy = objspace.ConstantFlag() self.stack_depth = 0 @@ -230,21 +230,13 @@ return s_frame # ============== Methods for tracing, printing and debugging ============== - - def activate_trace(self, trace_depth=0): - self.trace_depth = trace_depth - - def deactivate_trace(self): - self.trace_depth = -1 - + def is_tracing(self): - return jit.promote(self.trace_depth) >= 0 - + return jit.promote(self.trace) + def print_padded(self, str): - depth = jit.promote(self.trace_depth) - assert depth >= 0 - if self.stack_depth <= depth: - print (' ' * self.stack_depth) + str + assert self.is_tracing() + print (' ' * self.stack_depth) + str def activate_debug_bytecode(self): "NOT_RPYTHON" diff --git a/spyvm/plugins/vmdebugging.py b/spyvm/plugins/vmdebugging.py --- a/spyvm/plugins/vmdebugging.py +++ b/spyvm/plugins/vmdebugging.py @@ -10,12 +10,12 @@ @DebuggingPlugin.expose_primitive(unwrap_spec=[object]) def trace(interp, s_frame, w_rcvr): - interp.activate_trace() + interp.trace = True return w_rcvr @DebuggingPlugin.expose_primitive(unwrap_spec=[object]) def untrace(interp, s_frame, w_rcvr): - interp.deactivate_trace() + interp.trace = False return w_rcvr @DebuggingPlugin.expose_primitive(unwrap_spec=[object]) diff --git a/spyvm/test/test_largeinteger.py b/spyvm/test/test_largeinteger.py --- a/spyvm/test/test_largeinteger.py +++ b/spyvm/test/test_largeinteger.py @@ -8,7 +8,7 @@ space, interp, _, _ = read_image('bootstrapped.image') w = space.w copy_to_module(locals(), __name__) - interp.deactivate_trace() + interp.trace = False space.initialize_class(space.w_String, interp) def teardown_module(): @@ -38,10 +38,7 @@ except Exception: w_selector = find_symbol_in_methoddict_of(selector, w(intmask(candidates[0])).getclass(space).shadow) - if trace: - interp.activate_trace() - else: - interp.deactivate_trace() + interp.trace = trace for i, v in enumerate(candidates): x = w_l(v) if j is None: @@ -53,7 +50,7 @@ y = w_l(j) z = perform_primitive(x, w_selector, y) assert r_uint(z.value) == r_uint(operation(v, y.value)) - interp.deactivate_trace() + interp.trace = False def test_bitAnd(): do_primitive("bitAnd:", operator.and_) diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -12,7 +12,7 @@ def _usage(argv): print """ - Usage: %s [-r|-m|-h] [-naPu] [-jpiS] [-tTlLE] + Usage: %s [-r|-m|-h] [-naPu] [-jpiS] [-tlLE] - image path (default: Squeak.image) Execution mode: @@ -39,7 +39,6 @@ Logging parameters: -t|--trace - Output a trace of each message, primitive, return value and process switch. - -T - Like -t, but limit the stack depth for the trace to . -l|--storage-log - Output a log of storage operations. -L|--storage-log-aggregate - Output an aggregated storage log at the end of execution. -E|--storage-log-elements - Include classnames of elements into the storage log. @@ -89,7 +88,7 @@ # == Other parameters poll = False interrupts = True - trace_depth = -1 + trace = False space = prebuilt_space idx = 1 @@ -109,11 +108,7 @@ elif arg in ["-m", "--method"]: selector, idx = get_parameter(argv, idx, arg) elif arg in ["-t", "--trace"]: - trace_depth = sys.maxint - elif arg in ["-T"]: - trace_depth, idx = get_int_parameter(argv, idx, arg) - if trace_depth < 0: - raise error.Exit("Need argument >= 0 for -T.") + trace = True elif arg in ["-p", "--poll"]: poll = True elif arg in ["-a", "--arg"]: @@ -164,7 +159,7 @@ image_reader = squeakimage.reader_for_image(space, squeakimage.Stream(data=imagedata)) image = create_image(space, image_reader) interp = interpreter.Interpreter(space, image, image_name=path, - trace_depth=trace_depth, evented=not poll, + trace=trace, evented=not poll, interrupts=interrupts) space.runtime_setup(argv[0]) print_error("") # Line break after image-loading characters From noreply at buildbot.pypy.org Sat Jul 19 13:57:30 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 19 Jul 2014 13:57:30 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Copy and adapt the talk from pycon-italy-2014 (thanks antocuni) Message-ID: <20140719115730.1810B1D2317@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5362:3dd14da26db0 Date: 2014-07-19 13:57 +0200 http://bitbucket.org/pypy/extradoc/changeset/3dd14da26db0/ Log: Copy and adapt the talk from pycon-italy-2014 (thanks antocuni) diff --git a/talk/pycon-italy-2014/Makefile b/talk/ep2014/status/Makefile copy from talk/pycon-italy-2014/Makefile copy to talk/ep2014/status/Makefile --- a/talk/pycon-italy-2014/Makefile +++ b/talk/ep2014/status/Makefile @@ -1,10 +1,10 @@ # you can find rst2beamer.py here: -# http://codespeak.net/svn/user/antocuni/bin/rst2beamer.py +# https://bitbucket.org/antocuni/env/raw/default/bin/rst2beamer.py # WARNING: to work, it needs this patch for docutils # https://sourceforge.net/tracker/?func=detail&atid=422032&aid=1459707&group_id=38414 -talk.pdf: talk.rst author.latex title.latex stylesheet.latex +talk.pdf: talk.rst author.latex stylesheet.latex python `which rst2beamer.py` --stylesheet=stylesheet.latex --documentoptions=14pt talk.rst talk.latex || exit #/home/antocuni/.virtualenvs/rst2beamer/bin/python `which rst2beamer.py` --stylesheet=stylesheet.latex --documentoptions=14pt talk.rst talk.latex || exit sed 's/\\date{}/\\input{author.latex}/' -i talk.latex || exit diff --git a/talk/pycon-italy-2014/author.latex b/talk/ep2014/status/author.latex copy from talk/pycon-italy-2014/author.latex copy to talk/ep2014/status/author.latex --- a/talk/pycon-italy-2014/author.latex +++ b/talk/ep2014/status/author.latex @@ -1,8 +1,10 @@ \definecolor{rrblitbackground}{rgb}{0.0, 0.0, 0.0} -\title[PyPy Status]{PyPy Status\\\small{(no, PyPy is not dead)}} -\author[antocuni] -{Antonio Cuni} +\title[PyPy Status Talk]{PyPy Status Talk\\\small{(no no, PyPy is not dead)}} +\author[arigo, rguillebert] +{Armin Rigo, Romain Guillebert\\ +based on a PyCon Italia talk by Antonio Cuni\\ +\includegraphics[width=80px]{../../img/py-web-new.png}} -\institute{PyCon Cinque} -\date{May 24, 2014} +\institute{EuroPython} +\date{July 22, 2014} diff --git a/talk/pycon-italy-2014/beamerdefs.txt b/talk/ep2014/status/beamerdefs.txt copy from talk/pycon-italy-2014/beamerdefs.txt copy to talk/ep2014/status/beamerdefs.txt diff --git a/talk/ep2014/status/speed.png b/talk/ep2014/status/speed.png new file mode 100644 index 0000000000000000000000000000000000000000..33fe20ac9d81ddbd3ced48f52f9717693dc15518 GIT binary patch [cut] diff --git a/talk/pycon-italy-2014/stylesheet.latex b/talk/ep2014/status/stylesheet.latex copy from talk/pycon-italy-2014/stylesheet.latex copy to talk/ep2014/status/stylesheet.latex diff --git a/talk/ep2014/status/talk.pdf b/talk/ep2014/status/talk.pdf new file mode 100644 index 0000000000000000000000000000000000000000..f9532b263b7bf1aa53ba0d807a875af9991c6a39 GIT binary patch [cut] diff --git a/talk/pycon-italy-2014/talk.pdf.info b/talk/ep2014/status/talk.pdf.info copy from talk/pycon-italy-2014/talk.pdf.info copy to talk/ep2014/status/talk.pdf.info diff --git a/talk/pycon-italy-2014/talk.rst b/talk/ep2014/status/talk.rst copy from talk/pycon-italy-2014/talk.rst copy to talk/ep2014/status/talk.rst --- a/talk/pycon-italy-2014/talk.rst +++ b/talk/ep2014/status/talk.rst @@ -4,18 +4,6 @@ PyPy Status ================================ -About me ---------- - -- PyPy core dev - -- ``pdb++``, ``fancycompleter``, ... - -- Consultant, trainer - -- http://antocuni.eu - - PyPy is not dead ---------------- @@ -114,6 +102,8 @@ * ~7x faster than standard PHP + * comparable speed as HHVM + * http://hippyvm.com/ @@ -121,18 +111,27 @@ Fundraising campaign --------------------- -- py3k: 50'852 $ of 105'000 $ (48.4%) +- py3k: 52'000 $ of 105'000 $ (50%) -- numpy: 48'121 $ of 60'000 $ (80.2%) +- numpy: 48'000 $ of 60'000 $ (80%) - STM, 1st call: 25'000 $ -- STM, 2nd call: 2'097 $ of 80'000 $ (2.6%) - - * more on STM later +- STM, 2nd call: 3'000 $ of 80'000 $ (4%) - thank to all donors! +Commercial support +------------------ + +- We offer commercial support for PyPy + +- Consultancy and training + +- Performance issues for open- or closed-source programs, porting, + improving support in parts of the Python or non-Python interpreters, + etc. + Current status --------------- @@ -155,11 +154,11 @@ - numpy: in-progress (more later) -Speed: 6.3x faster than CPython +Speed: 6.5x faster than CPython -------------------------------- .. image:: speed.png - :scale: 47% + :scale: 44% ARM @@ -252,92 +251,19 @@ * by Armin Rigo and Remi Meier -STM semantics -------------- - -- N threads - -- Each thread split into atomic blocks - -- Sequential execution in some arbitrary order - -- In practice: - -- Parallel execution, conflicts solved by STM - - -Unit of execution (1) ---------------------- - -- Atomic blocks == 1 Python bytecode - -- Threads are executed in arbitrary order, but bytecodes are atomic - -- ==> Same semantics as GIL - -- "and this will solve the GIL problem" (A. Rigo, EuroPython 2011 lighting talk) - -Unit of execution (2) +Current status for STM ---------------------- -- Larger atomic blocks - -- ``with atomic:`` - -- Much easier to use than explicit locks - -- Can be hidden by libraries to provide even higher level paradigms - - * e.g.: Twisted apps made parallel out of the box - -Race conditions ---------------- - -- They don't magically disappear - -- With explicit locks - - * ==> BOOM - - * you fix bugs by preventing race conditions - -- With atomic blocks - - * ==> Rollaback - - * Performance penalty - - * You optimize by preventing race conditions - -- Fast&broken vs. Slower&correct - - -Implementation ---------------- - -- Conflicts detection, commit and rollaback is costly - -- Original goal (2011): 2x-5x slower than PyPy without STM - - * But parallelizable! - -|pause| - -- Current goal (2014): 25% slower than PyPy without STM - -- Yes, that's 10x less overhead than original goal - -- mmap black magic - -Current status ---------------- - - Preliminary versions of pypy-jit-stm available -- The JIT overhead is still a bit too high +- The overhead is still a bit too high and hard to precict - Lots of polishing needed +- More fundamentally, how to best use it is still unknown + +- See talk tomorrow + Contacts, Q&A -------------- @@ -346,13 +272,4 @@ - http://morepypy.blogspot.com/ -- twitter: @antocuni - -- Available for consultancy & training: - - * http://antocuni.eu - - * info at antocuni.eu - - Any question? - From noreply at buildbot.pypy.org Sat Jul 19 14:13:38 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Sat, 19 Jul 2014 14:13:38 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Refactoring, reduced code duplication. Message-ID: <20140719121338.9FDEE1C104D@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r920:2dd2b3555772 Date: 2014-07-18 14:21 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/2dd2b3555772/ Log: Refactoring, reduced code duplication. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -66,13 +66,9 @@ try: self.loop_bytecodes(s_new_context) raise Exception("loop_bytecodes left without raising...") - except StackOverflow, e: + except ContextSwitchException, e: if self.is_tracing(): - print "====== StackOverflow, contexts forced to heap at: %s" % e.s_new_context.short_str() - s_new_context = e.s_new_context - except SenderChainManipulation, e: - if self.is_tracing(): - print "====== SenderChainManipulation, contexts forced to heap at: %s" % e.s_new_context.short_str() + e.print_trace(s_new_context) s_new_context = e.s_new_context except Return, nlr: assert nlr.s_target_context or nlr.is_local @@ -83,13 +79,7 @@ s_new_context._activate_unwind_context(self) s_new_context = s_sender s_new_context.push(nlr.value) - except ProcessSwitch, p: - assert not self.space.suppress_process_switch.is_set(), "ProcessSwitch should be disabled..." - if self.is_tracing(): - print "====== Switched process from: %s" % s_new_context.short_str() - print "====== to: %s " % p.s_new_context.short_str() - s_new_context = p.s_new_context - + def loop_bytecodes(self, s_context, may_context_switch=True): old_pc = 0 if not jit.we_are_jitted() and may_context_switch: @@ -267,23 +257,34 @@ class ContextSwitchException(Exception): """General Exception that causes the interpreter to leave the current context.""" + _attrs_ = ["s_new_context"] + type = "ContextSwitch" def __init__(self, s_new_context): self.s_new_context = s_new_context - + + def print_trace(self, old_context): + print "====== %s, contexts forced to heap at: %s" % (self.type, self.s_new_context.short_str()) + class StackOverflow(ContextSwitchException): """This causes the current jit-loop to be left, dumping all virtualized objects to the heap. This breaks performance, so it should rarely happen. In case of severe performance problems, execute with -t and check if this occurrs.""" - + type = "Stack Overflow" + class ProcessSwitch(ContextSwitchException): """This causes the interpreter to switch the executed context. Triggered when switching the process.""" - + + def print_trace(self, old_context): + print "====== Switched process from: %s" % old_context.short_str() + print "====== to: %s " % self.s_new_context.short_str() + class SenderChainManipulation(ContextSwitchException): """Manipulation of the sender chain can invalidate the jitted C stack. We have to dump all virtual objects and rebuild the stack. We try to raise this as rarely as possible and as late as possible.""" + type = "Sender Manipulation" import rpython.rlib.unroll if hasattr(unroll, "unrolling_zero"): From noreply at buildbot.pypy.org Sat Jul 19 14:13:40 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Sat, 19 Jul 2014 14:13:40 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-context-state: Delaying SenderChainManipulation as much as possible. Message-ID: <20140719121340.006541C104D@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-context-state Changeset: r921:76c81645836d Date: 2014-07-18 15:29 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/76c81645836d/ Log: Delaying SenderChainManipulation as much as possible. Added a 'state' field to Context objects, can be Inactive, Active or Dirty. Setting the sender of an Active context makes it Dirty. When a Dirty context is left, SenderChainManipulation will be raised, forcing all remaining contexts from the stack to the heap. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -1,6 +1,6 @@ import py import os -from spyvm.shadow import ContextPartShadow, MethodContextShadow, BlockContextShadow, MethodNotFound +from spyvm.shadow import ContextPartShadow, MethodContextShadow, BlockContextShadow, MethodNotFound, ActiveContext, InactiveContext, DirtyContext from spyvm import model, constants, primitives, conftest, wrapper, objspace from spyvm.tool.bitmanipulation import splitter @@ -118,8 +118,10 @@ if self.is_tracing(): self.stack_depth += 1 if s_frame._s_sender is None and s_sender is not None: - s_frame.store_s_sender(s_sender, raise_error=False) + s_frame.store_s_sender(s_sender) # Now (continue to) execute the context bytecodes + assert s_frame.state is InactiveContext + s_frame.state = ActiveContext self.loop_bytecodes(s_frame, may_context_switch) except rstackovf.StackOverflow: rstackovf.check_stack_overflow() @@ -127,7 +129,11 @@ finally: if self.is_tracing(): self.stack_depth -= 1 - + dirty_frame = s_frame.state is DirtyContext + s_frame.state = InactiveContext + if dirty_frame: + raise SenderChainManipulation(s_frame) + def step(self, context): bytecode = context.fetch_next_bytecode() for entry in UNROLLING_BYTECODE_RANGES: @@ -755,16 +761,9 @@ association = wrapper.AssociationWrapper(self.space, w_association) self.push(association.value()) elif opType == 5: - # TODO - the following two special cases should not be necessary - try: - self.w_receiver().store(self.space, third, self.top()) - except SenderChainManipulation, e: - raise SenderChainManipulation(self) + self.w_receiver().store(self.space, third, self.top()) elif opType == 6: - try: - self.w_receiver().store(self.space, third, self.pop()) - except SenderChainManipulation, e: - raise SenderChainManipulation(self) + self.w_receiver().store(self.space, third, self.pop()) elif opType == 7: w_association = self.w_method().getliteral(third) association = wrapper.AssociationWrapper(self.space, w_association) diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -636,18 +636,29 @@ def size(self): return self._w_self_size +class ContextState(object): + def __init__(self, name): + self.name = name + def __str__(self): + return self.name + def __repr__(self): + return self.name +InactiveContext = ContextState("InactiveContext") +ActiveContext = ContextState("ActiveContext") +DirtyContext = ContextState("DirtyContext") + class ContextPartShadow(AbstractRedirectingShadow): __metaclass__ = extendabletype _attrs_ = ['_s_sender', '_pc', '_temps_and_stack', - '_stack_ptr', 'instances_w'] + '_stack_ptr', 'instances_w', 'state'] repr_classname = "ContextPartShadow" _virtualizable_ = [ '_s_sender', "_pc", "_temps_and_stack[*]", "_stack_ptr", - "_w_self", "_w_self_size" + "_w_self", "_w_self_size", 'state' ] # ______________________________________________________________________ @@ -657,13 +668,7 @@ self._s_sender = None AbstractRedirectingShadow.__init__(self, space, w_self, size) self.instances_w = {} - - def copy_field_from(self, n0, other_shadow): - from spyvm.interpreter import SenderChainManipulation - try: - AbstractRedirectingShadow.copy_field_from(self, n0, other_shadow) - except SenderChainManipulation, e: - assert e.s_new_context == self + self.state = InactiveContext def copy_from(self, other_shadow): # Some fields have to be initialized before the rest, to ensure correct initialization. @@ -705,7 +710,7 @@ if n0 == constants.CTXPART_SENDER_INDEX: assert isinstance(w_value, model.W_PointersObject) if w_value.is_nil(self.space): - self.store_s_sender(None, raise_error=False) + self.store_s_sender(None) else: self.store_s_sender(w_value.as_context_get_shadow(self.space)) return @@ -725,12 +730,12 @@ # === Sender === - def store_s_sender(self, s_sender, raise_error=True): + def store_s_sender(self, s_sender): if s_sender is not self._s_sender: self._s_sender = s_sender - if raise_error: - from spyvm.interpreter import SenderChainManipulation - raise SenderChainManipulation(self) + # If new sender is None, we are just being marked as returned. + if s_sender is not None and self.state is ActiveContext: + self.state = DirtyContext def w_sender(self): sender = self.s_sender() @@ -822,7 +827,7 @@ def mark_returned(self): self.store_pc(-1) - self.store_s_sender(None, raise_error=False) + self.store_s_sender(None) def is_returned(self): return self.pc() == -1 and self.w_sender().is_nil(self.space) From noreply at buildbot.pypy.org Sat Jul 19 14:13:41 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Sat, 19 Jul 2014 14:13:41 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-context-state: Fixed tests, added tests. Message-ID: <20140719121341.2E00F1C104D@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-context-state Changeset: r922:2d1854d40231 Date: 2014-07-18 15:29 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/2d1854d40231/ Log: Fixed tests, added tests. diff --git a/spyvm/test/test_interpreter.py b/spyvm/test/test_interpreter.py --- a/spyvm/test/test_interpreter.py +++ b/spyvm/test/test_interpreter.py @@ -978,9 +978,32 @@ 2, "value:value:"]], test) -def test_c_stack_reset_on_sender_chain_manipulation(): - bytes = reduce(operator.add, map(chr, [0x84, 0xc0, 0x00])) +def test_frame_dirty_if_active(): + bytes = reduce(operator.add, map(chr, [0x84, 0xc0, 0x00])) + returnReceiverBytecode w_frame, s_frame = new_frame(bytes) s_frame.store_w_receiver(w_frame) s_frame.push(w_frame) - py.test.raises(interpreter.SenderChainManipulation, step_in_interp, s_frame) + s_frame.state = shadow.ActiveContext + step_in_interp(s_frame) + assert s_frame.state is shadow.DirtyContext + +def test_frame_not_dirty_if_inactive(): + bytes = reduce(operator.add, map(chr, [0x84, 0xc0, 0x00])) + returnReceiverBytecode + w_frame, s_frame = new_frame(bytes) + w_other_frame, s_other_frame = new_frame("") + s_frame.store_w_receiver(w_other_frame) + s_frame.push(w_frame) + s_frame.state = shadow.ActiveContext + step_in_interp(s_frame) + assert s_frame.state is shadow.ActiveContext + assert s_other_frame.state is shadow.InactiveContext + +def test_raise_SenderManipulation_on_dirty_frame(): + w_frame, s_frame = new_frame(returnReceiverBytecode) + s_frame.state = shadow.DirtyContext + def run_frame(): + #import pdb; pdb.set_trace() + interp._loop = True + interp.stack_frame(s_frame, None) + py.test.raises(interpreter.SenderChainManipulation, run_frame) + \ No newline at end of file diff --git a/spyvm/test/test_zin_squeak_4_5_image.py b/spyvm/test/test_zin_squeak_4_5_image.py --- a/spyvm/test/test_zin_squeak_4_5_image.py +++ b/spyvm/test/test_zin_squeak_4_5_image.py @@ -43,7 +43,7 @@ # create a frame for our newly crafted method with a valid sender (to avoid raising returnFromTop to early) s_initial_frame = create_method(chr(0x7c)).create_frame(space, w(0), []) s_frame = w_method.create_frame(space, w(0)) - s_frame.store_s_sender(s_initial_frame, raise_error=False) + s_frame.store_s_sender(s_initial_frame) try: interp.loop(s_frame.w_self()) @@ -70,7 +70,7 @@ # create a frame for our newly crafted method with a valid sender (to avoid raising returnFromTop to early) s_initial_frame = create_method(chr(0x7c)).create_frame(space, w(0)) s_frame = w_method.create_frame(space, w(0)) - s_frame.store_s_sender(s_initial_frame, raise_error=False) + s_frame.store_s_sender(s_initial_frame) try: interp.loop(s_frame.w_self()) diff --git a/spyvm/test/util.py b/spyvm/test/util.py --- a/spyvm/test/util.py +++ b/spyvm/test/util.py @@ -85,7 +85,7 @@ if not self._loop: # this test is done to not loop in test, but rather step just once where wanted # Unfortunately, we have to mimick some of the original behaviour. - s_new_frame.store_s_sender(s_sender, raise_error=False) + s_new_frame.store_s_sender(s_sender) return s_new_frame return interpreter.Interpreter.stack_frame(self, s_new_frame, s_sender, may_context_switch) From noreply at buildbot.pypy.org Sat Jul 19 14:13:42 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Sat, 19 Jul 2014 14:13:42 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-context-state: Refactored Return-mechanism to make the context state and late sender-chain-manipulation refactoring work. Message-ID: <20140719121342.6434F1C104D@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-context-state Changeset: r923:a27bd7d3d458 Date: 2014-07-19 13:15 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/a27bd7d3d458/ Log: Refactored Return-mechanism to make the context state and late sender-chain-manipulation refactoring work. Added LocalReturn class. stack_frame() handles dispatching of Return types, always invoked together with loop_bytecodes now. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -60,25 +60,58 @@ def loop(self, w_active_context): # This is the top-level loop and is not invoked recursively. - s_new_context = w_active_context.as_context_get_shadow(self.space) + s_context = w_active_context.as_context_get_shadow(self.space) while True: - s_sender = s_new_context.s_sender() + s_sender = s_context.s_sender() try: - self.loop_bytecodes(s_new_context) + self.stack_frame(s_context, None) raise Exception("loop_bytecodes left without raising...") except ContextSwitchException, e: if self.is_tracing(): - e.print_trace(s_new_context) - s_new_context = e.s_new_context - except Return, nlr: - assert nlr.s_target_context or nlr.is_local - s_new_context = s_sender - if not nlr.is_local: - while s_new_context is not nlr.s_target_context: - s_sender = s_new_context.s_sender() - s_new_context._activate_unwind_context(self) - s_new_context = s_sender - s_new_context.push(nlr.value) + e.print_trace() + s_context = e.s_new_context + except LocalReturn, ret: + s_context = self.unwind_context_chain(s_sender, s_sender, ret.value) + except Return, ret: + s_context = self.unwind_context_chain(s_sender, ret.s_target_context, ret.value) + except NonVirtualReturn, ret: + if self.is_tracing(): + ret.print_trace() + s_context = self.unwind_context_chain(ret.s_current_context, ret.s_target_context, ret.value) + + # This is a wrapper around loop_bytecodes that cleanly enters/leaves the frame, + # handles the stack overflow protection mechanism and handles/dispatches Returns. + def stack_frame(self, s_frame, s_sender, may_context_switch=True): + try: + if self.is_tracing(): + self.stack_depth += 1 + if s_frame._s_sender is None and s_sender is not None: + s_frame.store_s_sender(s_sender) + # Now (continue to) execute the context bytecodes + assert s_frame.state is InactiveContext + s_frame.state = ActiveContext + self.loop_bytecodes(s_frame, may_context_switch) + except rstackovf.StackOverflow: + rstackovf.check_stack_overflow() + raise StackOverflow(s_frame) + except Return, e: + # Do not catch NonVirtualReturn. If there are multiple dirty contexts + # on the stack, the inner-most one will count. + if s_frame.state is DirtyContext: + s_sender = s_frame.s_sender() + s_frame._activate_unwind_context(self) + target_context = s_sender if e.is_local else e.s_target_context + raise NonVirtualReturn(target_context, e.value, s_sender) + else: + s_frame._activate_unwind_context(self) + if e.s_target_context is s_sender or e.is_local: + raise LocalReturn(e.value) + else: + raise e + finally: + if self.is_tracing(): + self.stack_depth -= 1 + s_frame.state = InactiveContext def loop_bytecodes(self, s_context, may_context_switch=True): old_pc = 0 @@ -100,39 +133,22 @@ s_context=s_context) try: self.step(s_context) - except Return, nlr: - if nlr.s_target_context is s_context or nlr.is_local: - s_context.push(nlr.value) - else: - if nlr.s_target_context is None: - # This is the case where we are returning to our sender. - # Mark the return as local, so our sender will take it - nlr.is_local = True - s_context._activate_unwind_context(self) - raise nlr - - # This is a wrapper around loop_bytecodes that cleanly enters/leaves the frame - # and handles the stack overflow protection mechanism. - def stack_frame(self, s_frame, s_sender, may_context_switch=True): - try: - if self.is_tracing(): - self.stack_depth += 1 - if s_frame._s_sender is None and s_sender is not None: - s_frame.store_s_sender(s_sender) - # Now (continue to) execute the context bytecodes - assert s_frame.state is InactiveContext - s_frame.state = ActiveContext - self.loop_bytecodes(s_frame, may_context_switch) - except rstackovf.StackOverflow: - rstackovf.check_stack_overflow() - raise StackOverflow(s_frame) - finally: - if self.is_tracing(): - self.stack_depth -= 1 - dirty_frame = s_frame.state is DirtyContext - s_frame.state = InactiveContext - if dirty_frame: - raise SenderChainManipulation(s_frame) + except LocalReturn, ret: + s_context.push(ret.value) + + def unwind_context_chain(self, start_context, target_context, return_value): + if start_context is None: + # This is the toplevel frame. Execution ended. + raise ReturnFromTopLevel(return_value) + assert target_context + context = start_context + while context is not target_context: + assert context, "Sender chain ended without finding return-context." + s_sender = context.s_sender() + context._activate_unwind_context(self) + context = s_sender + context.push(return_value) + return context def step(self, context): bytecode = context.fetch_next_bytecode() @@ -253,44 +269,60 @@ def __init__(self, object): self.object = object -class Return(Exception): - _attrs_ = ["value", "s_target_context", "is_local"] +class LocalReturn(Exception): + _attrs_ = ["value"] + def __init__(self, value): + self.value = value + +class AbstractReturn(Exception): + _attrs_ = ["value", "s_target_context"] def __init__(self, s_target_context, w_result): self.value = w_result self.s_target_context = s_target_context - self.is_local = False + +class Return(AbstractReturn): + """This is the basic Return, handled on the C-stack, + without forcing all contexts to the heap.""" + _attrs_ = ["is_local"] + def __init__(self, s_target_context, w_result, is_local): + AbstractReturn.__init__(self, s_target_context, w_result) + self.is_local = is_local + +class NonVirtualReturn(AbstractReturn): + """This Return will be passed through the entire C-stack built from stack_frame() + invokations. Used when the sender-chain has been manipulated. + s_current_context is the context where the outermost loop() + will start unrolling the context-chain and looking for s_target_context.""" + + _attrs_ = ["s_current_context"] + def __init__(self, s_target_context, w_result, s_current_context): + AbstractReturn.__init__(self, s_target_context, w_result) + self.s_current_context = s_current_context + + def print_trace(self): + print "====== Sender Chain Manipulation, contexts forced to heap at: %s" % self.s_current_context.short_str() class ContextSwitchException(Exception): """General Exception that causes the interpreter to leave the current context.""" - _attrs_ = ["s_new_context"] - type = "ContextSwitch" def __init__(self, s_new_context): self.s_new_context = s_new_context - - def print_trace(self, old_context): - print "====== %s, contexts forced to heap at: %s" % (self.type, self.s_new_context.short_str()) - + class StackOverflow(ContextSwitchException): """This causes the current jit-loop to be left, dumping all virtualized objects to the heap. This breaks performance, so it should rarely happen. In case of severe performance problems, execute with -t and check if this occurrs.""" - type = "Stack Overflow" + def print_trace(self): + print "====== Stack Overflow, contexts forced to heap at: %s" % self.s_new_context.short_str() + class ProcessSwitch(ContextSwitchException): """This causes the interpreter to switch the executed context. Triggered when switching the process.""" - def print_trace(self, old_context): - print "====== Switched process from: %s" % old_context.short_str() - print "====== to: %s " % self.s_new_context.short_str() - -class SenderChainManipulation(ContextSwitchException): - """Manipulation of the sender chain can invalidate the jitted C stack. - We have to dump all virtual objects and rebuild the stack. - We try to raise this as rarely as possible and as late as possible.""" - type = "Sender Manipulation" + def print_trace(self): + print "====== Switched Process to: %s" % self.s_new_context.short_str() import rpython.rlib.unroll if hasattr(unroll, "unrolling_zero"): @@ -306,7 +338,6 @@ return unrolling_int(int.__rsub__(self, other)) unrolling_zero = unrolling_int(0) - # This is a decorator for bytecode implementation methods. # parameter_bytes=N means N additional bytes are fetched as parameters. def bytecode_implementation(parameter_bytes=0): @@ -685,16 +716,12 @@ # it will find the sender as a local, and we don't have to # force the reference s_return_to = None - return_from_top = self.s_sender() is None + is_local = True else: s_return_to = self.s_home().s_sender() - return_from_top = s_return_to is None + is_local = False - if return_from_top: - # This should never happen while executing a normal image. - raise ReturnFromTopLevel(return_value) - else: - raise Return(s_return_to, return_value) + raise Return(s_return_to, return_value, is_local) # ====== Send/Return bytecodes ====== From noreply at buildbot.pypy.org Sat Jul 19 14:13:43 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Sat, 19 Jul 2014 14:13:43 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-context-state: Fixed ensure: mechanism. Fixed test. Message-ID: <20140719121343.94F481C104D@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-context-state Changeset: r924:f5b3945a1fdc Date: 2014-07-19 13:48 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/f5b3945a1fdc/ Log: Fixed ensure: mechanism. Fixed test. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -819,10 +819,8 @@ self.push(self.gettemp(0)) # push the first argument try: self.bytecodePrimValue(interp, 0) - except Return, nlr: - assert nlr.s_target_context or nlr.is_local - if self is not nlr.s_target_context and not nlr.is_local: - raise nlr + except LocalReturn, ret: + pass # Local return value of ensure: block is ignored finally: self.mark_returned() diff --git a/spyvm/test/test_interpreter.py b/spyvm/test/test_interpreter.py --- a/spyvm/test/test_interpreter.py +++ b/spyvm/test/test_interpreter.py @@ -979,7 +979,7 @@ test) def test_frame_dirty_if_active(): - bytes = reduce(operator.add, map(chr, [0x84, 0xc0, 0x00])) + returnReceiverBytecode + bytes = reduce(operator.add, map(chr, [0x84, 0xc0, 0x00])) w_frame, s_frame = new_frame(bytes) s_frame.store_w_receiver(w_frame) s_frame.push(w_frame) @@ -988,7 +988,7 @@ assert s_frame.state is shadow.DirtyContext def test_frame_not_dirty_if_inactive(): - bytes = reduce(operator.add, map(chr, [0x84, 0xc0, 0x00])) + returnReceiverBytecode + bytes = reduce(operator.add, map(chr, [0x84, 0xc0, 0x00])) w_frame, s_frame = new_frame(bytes) w_other_frame, s_other_frame = new_frame("") s_frame.store_w_receiver(w_other_frame) @@ -998,12 +998,14 @@ assert s_frame.state is shadow.ActiveContext assert s_other_frame.state is shadow.InactiveContext -def test_raise_SenderManipulation_on_dirty_frame(): - w_frame, s_frame = new_frame(returnReceiverBytecode) - s_frame.state = shadow.DirtyContext - def run_frame(): - #import pdb; pdb.set_trace() - interp._loop = True +def test_raise_NonVirtualReturn_on_dirty_frame(): + bytes = reduce(operator.add, map(chr, [0x84, 0xc0, 0x00])) + returnTopFromMethodBytecode + w_frame, s_frame = new_frame(bytes) + s_frame.store_w_receiver(w_frame) + s_frame.push(w_frame) + + interp._loop = True + def do_test(): interp.stack_frame(s_frame, None) - py.test.raises(interpreter.SenderChainManipulation, run_frame) + py.test.raises(interpreter.NonVirtualReturn, do_test) \ No newline at end of file From noreply at buildbot.pypy.org Sat Jul 19 14:41:58 2014 From: noreply at buildbot.pypy.org (waedt) Date: Sat, 19 Jul 2014 14:41:58 +0200 (CEST) Subject: [pypy-commit] pypy utf8-unicode2: Fix _multibytecodec Message-ID: <20140719124158.EF4F71C0DCA@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: utf8-unicode2 Changeset: r72468:e70f582fd5dc Date: 2014-07-17 01:43 -0500 http://bitbucket.org/pypy/pypy/changeset/e70f582fd5dc/ Log: Fix _multibytecodec diff --git a/pypy/interpreter/utf8.py b/pypy/interpreter/utf8.py --- a/pypy/interpreter/utf8.py +++ b/pypy/interpreter/utf8.py @@ -2,9 +2,8 @@ from rpython.rlib.objectmodel import specialize from rpython.rlib.runicode import utf8_code_length from rpython.rlib.unicodedata import unicodedb_5_2_0 as unicodedb -from rpython.rlib.rarithmetic import r_uint -from rpython.rtyper.lltypesystem import rffi -from rpython.rtyper.lltypesystem import lltype +from rpython.rlib.rarithmetic import r_uint, intmask +from rpython.rtyper.lltypesystem import rffi, lltype wchar_rint = rffi.r_uint WCHAR_INTP = rffi.UINTP @@ -464,7 +463,7 @@ if rffi.sizeof(rffi.WCHAR_T) == 2: if 0xD800 <= c <= 0xDBFF: i += 1 - c2 = int(array[i]) + c2 = intmask(array[i]) if c2 == 0: builder.append(c) break @@ -485,7 +484,7 @@ builder = Utf8Builder() i = 0; while i < size: - c = int(array[i]) + c = intmask(array[i]) if c == 0: break @@ -513,7 +512,7 @@ builder = Utf8Builder() i = 0; while i < size: - c = int(array[i]) + c = intmask(array[i]) if rffi.sizeof(rffi.WCHAR_T) == 2: if i != size - 1 and 0xD800 <= c <= 0xDBFF: diff --git a/pypy/module/_multibytecodec/c_codecs.py b/pypy/module/_multibytecodec/c_codecs.py --- a/pypy/module/_multibytecodec/c_codecs.py +++ b/pypy/module/_multibytecodec/c_codecs.py @@ -1,8 +1,9 @@ import py from rpython.rtyper.lltypesystem import lltype, rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo +from pypy.interpreter.utf8 import Utf8Str -UNICODE_REPLACEMENT_CHARACTER = u'\uFFFD' +UNICODE_REPLACEMENT_CHARACTER = Utf8Str.from_unicode(u'\uFFFD') class EncodeDecodeError(Exception): @@ -139,7 +140,7 @@ errorcb, namecb, stringdata) src = pypy_cjk_dec_outbuf(decodebuf) length = pypy_cjk_dec_outlen(decodebuf) - return rffi.wcharpsize2unicode(src, length) + return Utf8Str.from_wcharpsize(src, length) # finally: rffi.free_nonmovingbuffer(stringdata, inbuf) @@ -164,18 +165,18 @@ if errors == "strict": raise EncodeDecodeError(start, end, reason) elif errors == "ignore": - replace = u"" + replace = Utf8Str("") elif errors == "replace": replace = UNICODE_REPLACEMENT_CHARACTER else: assert errorcb replace, end = errorcb(errors, namecb, reason, stringdata, start, end) - inbuf = rffi.get_nonmoving_unicodebuffer(replace) + inbuf = replace.copy_to_wcharp() try: r = pypy_cjk_dec_replace_on_error(decodebuf, inbuf, len(replace), end) finally: - rffi.free_nonmoving_unicodebuffer(replace, inbuf) + rffi.free_wcharp(inbuf) if r == MBERR_NOMEMORY: raise MemoryError @@ -222,7 +223,7 @@ def encodeex(encodebuf, unicodedata, errors="strict", errorcb=None, namecb=None, ignore_error=0): inleft = len(unicodedata) - inbuf = rffi.get_nonmoving_unicodebuffer(unicodedata) + inbuf = unicodedata.copy_to_wcharp() try: if pypy_cjk_enc_init(encodebuf, inbuf, inleft) < 0: raise MemoryError @@ -247,7 +248,7 @@ return rffi.charpsize2str(src, length) # finally: - rffi.free_nonmoving_unicodebuffer(unicodedata, inbuf) + rffi.free_wcharp(inbuf) def multibytecodec_encerror(encodebuf, e, errors, errorcb, namecb, unicodedata): @@ -273,7 +274,7 @@ elif errors == "replace": codec = pypy_cjk_enc_getcodec(encodebuf) try: - replace = encode(codec, u"?") + replace = encode(codec, Utf8Str("?")) except EncodeDecodeError: replace = "?" else: diff --git a/pypy/module/_multibytecodec/test/test_c_codecs.py b/pypy/module/_multibytecodec/test/test_c_codecs.py --- a/pypy/module/_multibytecodec/test/test_c_codecs.py +++ b/pypy/module/_multibytecodec/test/test_c_codecs.py @@ -1,4 +1,5 @@ import py +from pypy.interpreter.utf8 import Utf8Str from pypy.module._multibytecodec.c_codecs import getcodec, codecs from pypy.module._multibytecodec.c_codecs import decode, encode from pypy.module._multibytecodec.c_codecs import EncodeDecodeError @@ -95,37 +96,38 @@ def test_encode_hz(): c = getcodec("hz") - s = encode(c, u'foobar') + s = encode(c, Utf8Str('foobar')) assert s == 'foobar' and type(s) is str - s = encode(c, u'\u5f95\u6cef') + s = encode(c, Utf8Str.from_unicode(u'\u5f95\u6cef')) assert s == '~{abc}~}' def test_encode_hz_error(): # error c = getcodec("hz") - e = py.test.raises(EncodeDecodeError, encode, c, u'abc\u1234def').value + e = py.test.raises(EncodeDecodeError, encode, c, + Utf8Str.from_unicode(u'abc\u1234def')).value assert e.start == 3 assert e.end == 4 assert e.reason == "illegal multibyte sequence" def test_encode_hz_ignore(): c = getcodec("hz") - s = encode(c, u'abc\u1234def', 'ignore') + s = encode(c, Utf8Str.from_unicode(u'abc\u1234def'), 'ignore') assert s == 'abcdef' def test_encode_hz_replace(): c = getcodec("hz") - s = encode(c, u'abc\u1234def', 'replace') + s = encode(c, Utf8Str.from_unicode(u'abc\u1234def'), 'replace') assert s == 'abc?def' def test_encode_jisx0208(): c = getcodec('iso2022_jp') - s = encode(c, u'\u83ca\u5730\u6642\u592b') + s = encode(c, Utf8Str.from_unicode(u'\u83ca\u5730\u6642\u592b')) assert s == '\x1b$B5FCO;~IW\x1b(B' and type(s) is str def test_encode_custom_error_handler_bytes(): c = getcodec("hz") def errorhandler(errors, enc, msg, t, startingpos, endingpos): return None, '\xc3', endingpos - s = encode(c, u'abc\u1234def', 'foo', errorhandler) + s = encode(c, Utf8Str.from_unicode(u'abc\u1234def'), 'foo', errorhandler) assert '\xc3' in s diff --git a/pypy/module/sys/vm.py b/pypy/module/sys/vm.py --- a/pypy/module/sys/vm.py +++ b/pypy/module/sys/vm.py @@ -3,11 +3,11 @@ """ from rpython.rlib import jit -from rpython.rlib.runicode import MAXUNICODE from pypy.interpreter import gateway from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import unwrap_spec +from pypy.interpreter.utf8_codecs import MAXUNICODE # ____________________________________________________________ From noreply at buildbot.pypy.org Sat Jul 19 14:42:00 2014 From: noreply at buildbot.pypy.org (waedt) Date: Sat, 19 Jul 2014 14:42:00 +0200 (CEST) Subject: [pypy-commit] pypy utf8-unicode2: Fix _cffi_backend Message-ID: <20140719124200.534BF1C0DCA@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: utf8-unicode2 Changeset: r72469:fbbabe9aebd1 Date: 2014-07-17 05:23 -0500 http://bitbucket.org/pypy/pypy/changeset/fbbabe9aebd1/ Log: Fix _cffi_backend diff --git a/pypy/interpreter/test/test_utf8.py b/pypy/interpreter/test/test_utf8.py --- a/pypy/interpreter/test/test_utf8.py +++ b/pypy/interpreter/test/test_utf8.py @@ -195,18 +195,21 @@ assert s.rsplit(' ', 2) == u.rsplit(' ', 2) assert s.rsplit('\n') == [s] -def test_copy_to_wcharp(): +def test_copy_to_new_wcharp(): s = build_utf8str() if sys.maxunicode < 0x10000 and rffi.sizeof(rffi.WCHAR_T) == 4: # The last character requires a surrogate pair on narrow builds and # so won't be converted correctly by rffi.wcharp2unicode s = s[:-1] - wcharp = s.copy_to_wcharp() + wcharp = s.copy_to_new_wcharp() u = rffi.wcharp2unicode(wcharp) rffi.free_wcharp(wcharp) assert s == u + with s.scoped_wcharp_copy(): + assert s == u + def test_from_wcharp(): def check(u): wcharp = rffi.unicode2wcharp(u) diff --git a/pypy/interpreter/utf8.py b/pypy/interpreter/utf8.py --- a/pypy/interpreter/utf8.py +++ b/pypy/interpreter/utf8.py @@ -422,7 +422,7 @@ byte_pos -= 1 return byte_pos - def copy_to_wcharp(self, track_allocation=True): + def copy_to_new_wcharp(self, track_allocation=True): length = len(self) + 1 if rffi.sizeof(rffi.WCHAR_T) == 2: for c in self.codepoint_iter(): @@ -431,24 +431,34 @@ array = lltype.malloc(WCHAR_INTP.TO, length, flavor='raw', track_allocation=track_allocation) + + self.copy_to_wcharp(array, 0, length) + array[length - 1] = wchar_rint(0) + + array = rffi.cast(rffi.CWCHARP, array) + return array + + def copy_to_wcharp(self, dst, dststart, length): from pypy.interpreter.utf8_codecs import create_surrogate_pair i = 0; for c in self.codepoint_iter(): + if i == length: + break + if rffi.sizeof(rffi.WCHAR_T) == 2: c1, c2 = create_surrogate_pair(c) - array[i] = wchar_rint(c1) + dst[i + dststart] = wchar_rint(c1) if c2: i += 1 - array[i] = wchar_rint(c2) + dst[i + dststart] = wchar_rint(c2) else: - array[i] = wchar_rint(c) + dst[i + dststart] = wchar_rint(c) i += 1 - array[i] = wchar_rint(0) - array = rffi.cast(rffi.CWCHARP, array) - return array + def scoped_wcharp_copy(self): + return WCharContextManager(self) @staticmethod def from_wcharp(wcharp): @@ -600,6 +610,15 @@ def build(self): return Utf8Str(self._builder.build(), self._is_ascii) +class WCharContextManager(object): + def __init__(self, str): + self.str = str + def __enter__(self): + self.data = self.str.copy_to_new_wcharp() + return self.data + def __exit__(self, *args): + rffi.free_wcharp(self.data) + # _______________________________________________ # iter.current is the current (ie the last returned) element diff --git a/pypy/module/__pypy__/interp_builders.py b/pypy/module/__pypy__/interp_builders.py --- a/pypy/module/__pypy__/interp_builders.py +++ b/pypy/module/__pypy__/interp_builders.py @@ -2,7 +2,8 @@ from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef -from rpython.rlib.rstring import UnicodeBuilder, StringBuilder +from pypy.interpreter.utf8 import Utf8Builder +from rpython.rlib.rstring import StringBuilder from rpython.tool.sourcetools import func_with_new_name @@ -62,4 +63,4 @@ return W_Builder W_StringBuilder = create_builder("StringBuilder", str, StringBuilder) -W_UnicodeBuilder = create_builder("UnicodeBuilder", unicode, UnicodeBuilder) +W_UnicodeBuilder = create_builder("UnicodeBuilder", unicode, Utf8Builder) diff --git a/pypy/module/_cffi_backend/ctypeprim.py b/pypy/module/_cffi_backend/ctypeprim.py --- a/pypy/module/_cffi_backend/ctypeprim.py +++ b/pypy/module/_cffi_backend/ctypeprim.py @@ -9,7 +9,9 @@ from rpython.rlib import jit from rpython.rtyper.lltypesystem import lltype, rffi +from pypy.interpreter import utf8 from pypy.interpreter.error import oefmt +from pypy.interpreter.utf8 import Utf8Str, utf8ord from pypy.module._cffi_backend import cdataobj, misc from pypy.module._cffi_backend.ctypeobj import W_CType @@ -46,7 +48,7 @@ raise oefmt(space.w_TypeError, "cannot cast unicode string of length %d to ctype '%s'", len(s), self.name) - return ord(s[0]) + return utf8ord(s) def cast(self, w_ob): from pypy.module._cffi_backend import ctypeptr @@ -128,12 +130,12 @@ _attrs_ = [] def cast_to_int(self, cdata): - unichardata = rffi.cast(rffi.CWCHARP, cdata) - return self.space.wrap(ord(unichardata[0])) + unichardata = rffi.cast(utf8.WCHAR_INTP, cdata) + return self.space.wrap(intmask(unichardata[0])) def convert_to_object(self, cdata): unichardata = rffi.cast(rffi.CWCHARP, cdata) - s = rffi.wcharpsize2unicode(unichardata, 1) + s = Utf8Str.from_wcharpsize(unichardata, 1) return self.space.wrap(s) def string(self, cdataobj, maxlen): @@ -154,7 +156,7 @@ def convert_from_object(self, cdata, w_ob): value = self._convert_to_unichar(w_ob) - rffi.cast(rffi.CWCHARP, cdata)[0] = value + rffi.cast(utf8.WCHAR_INTP, cdata)[0] = utf8.wchar_rint(utf8ord(value)) class W_CTypePrimitiveSigned(W_CTypePrimitive): diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py --- a/pypy/module/_cffi_backend/ctypeptr.py +++ b/pypy/module/_cffi_backend/ctypeptr.py @@ -9,6 +9,8 @@ from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw, copy_unicode_to_raw +from pypy.interpreter import utf8 +from pypy.interpreter.utf8 import Utf8Str from pypy.interpreter.error import OperationError, oefmt, wrap_oserror from pypy.module._cffi_backend import cdataobj, misc, ctypeprim, ctypevoid from pypy.module._cffi_backend.ctypeobj import W_CType @@ -98,10 +100,11 @@ raise oefmt(space.w_IndexError, "initializer unicode string is too long for '%s' " "(got %d characters)", self.name, n) - unichardata = rffi.cast(rffi.CWCHARP, cdata) - copy_unicode_to_raw(llunicode(s), unichardata, 0, n) + + unichardata = rffi.cast(utf8.WCHAR_INTP, cdata) + s.copy_to_wcharp(unichardata, 0, n) if n != self.length: - unichardata[n] = u'\x00' + unichardata[n] = utf8.wchar_rint(0) else: raise self._convert_error("list or tuple", w_ob) @@ -131,9 +134,9 @@ if self.is_unichar_ptr_or_array(): cdata = rffi.cast(rffi.CWCHARP, cdata) if length < 0: - u = rffi.wcharp2unicode(cdata) + u = Utf8Str.from_wcharp(cdata) else: - u = rffi.wcharp2unicoden(cdata, length) + u = Utf8Str.from_wcharpn(cdata, length) keepalive_until_here(cdataobj) return space.wrap(u) # diff --git a/pypy/module/_multibytecodec/c_codecs.py b/pypy/module/_multibytecodec/c_codecs.py --- a/pypy/module/_multibytecodec/c_codecs.py +++ b/pypy/module/_multibytecodec/c_codecs.py @@ -172,7 +172,7 @@ assert errorcb replace, end = errorcb(errors, namecb, reason, stringdata, start, end) - inbuf = replace.copy_to_wcharp() + inbuf = replace.copy_to_new_wcharp() try: r = pypy_cjk_dec_replace_on_error(decodebuf, inbuf, len(replace), end) finally: @@ -223,7 +223,7 @@ def encodeex(encodebuf, unicodedata, errors="strict", errorcb=None, namecb=None, ignore_error=0): inleft = len(unicodedata) - inbuf = unicodedata.copy_to_wcharp() + inbuf = unicodedata.copy_to_new_wcharp() try: if pypy_cjk_enc_init(encodebuf, inbuf, inleft) < 0: raise MemoryError diff --git a/pypy/module/_rawffi/alt/interp_funcptr.py b/pypy/module/_rawffi/alt/interp_funcptr.py --- a/pypy/module/_rawffi/alt/interp_funcptr.py +++ b/pypy/module/_rawffi/alt/interp_funcptr.py @@ -168,7 +168,7 @@ self.argchain.arg(addr) def handle_unichar_p(self, w_ffitype, w_obj, unicodeval): - buf = unicodeval.copy_to_wcharp() + buf = unicodeval.copy_to_new_wcharp() self.w_func.to_free.append(rffi.cast(rffi.VOIDP, buf)) addr = rffi.cast(rffi.ULONG, buf) self.argchain.arg(addr) diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py --- a/pypy/module/cpyext/unicodeobject.py +++ b/pypy/module/cpyext/unicodeobject.py @@ -210,7 +210,7 @@ # Copy unicode buffer w_unicode = from_ref(space, ref) u = space.unicode_w(w_unicode) - ref_unicode.c_buffer = u.copy_to_wcharp() + ref_unicode.c_buffer = u.copy_to_new_wcharp() return ref_unicode.c_buffer @cpython_api([PyObject], rffi.CWCHARP) From noreply at buildbot.pypy.org Sat Jul 19 14:42:01 2014 From: noreply at buildbot.pypy.org (waedt) Date: Sat, 19 Jul 2014 14:42:01 +0200 (CEST) Subject: [pypy-commit] pypy utf8-unicode2: Fix _io Message-ID: <20140719124201.ACC021C0DCA@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: utf8-unicode2 Changeset: r72470:ed2146bad83c Date: 2014-07-17 23:18 -0500 http://bitbucket.org/pypy/pypy/changeset/ed2146bad83c/ Log: Fix _io diff --git a/pypy/module/_io/interp_stringio.py b/pypy/module/_io/interp_stringio.py --- a/pypy/module/_io/interp_stringio.py +++ b/pypy/module/_io/interp_stringio.py @@ -1,6 +1,7 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.typedef import ( TypeDef, generic_new_descr, GetSetProperty) +from pypy.interpreter.utf8 import Utf8Str, utf8ord from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.module._io.interp_textio import W_TextIOBase, W_IncrementalNewlineDecoder from pypy.module._io.interp_iobase import convert_size @@ -26,8 +27,8 @@ else: newline = space.unicode_w(w_newline) - if (newline is not None and newline != u"" and newline != u"\n" and - newline != u"\r" and newline != u"\r\n"): + if (newline is not None and len(newline) != 0 and + newline not in (Utf8Str('\n'), Utf8Str('\r\n'), Utf8Str('\r'))): # Not using oefmt() because I don't know how to ues it # with unicode raise OperationError(space.w_ValueError, @@ -37,9 +38,9 @@ ) if newline is not None: self.readnl = newline - self.readuniversal = newline is None or newline == u"" + self.readuniversal = newline is None or len(newline) == 0 self.readtranslate = newline is None - if newline and newline[0] == u"\r": + if newline and utf8ord(newline) == ord("\r"): self.writenl = newline if self.readuniversal: self.w_decoder = space.call_function( @@ -112,7 +113,7 @@ if len(self.buf) > newlength: self.buf = self.buf[:newlength] if len(self.buf) < newlength: - self.buf.extend([u'\0'] * (newlength - len(self.buf))) + self.buf.extend([Utf8Str('\0')] * (newlength - len(self.buf))) def write(self, string): length = len(string) @@ -156,21 +157,21 @@ start = self.pos available = len(self.buf) - start if available <= 0: - return space.wrap(u"") + return space.wrap(Utf8Str("")) if size >= 0 and size <= available: end = start + size else: end = len(self.buf) assert 0 <= start <= end self.pos = end - return space.wrap(u''.join(self.buf[start:end])) + return space.wrap(Utf8Str('').join(self.buf[start:end])) def readline_w(self, space, w_limit=None): self._check_closed(space) limit = convert_size(space, w_limit) if self.pos >= len(self.buf): - return space.wrap(u"") + return space.wrap(Utf8Str("")) start = self.pos if limit < 0 or limit > len(self.buf) - self.pos: @@ -181,7 +182,7 @@ endpos, consumed = self._find_line_ending( # XXX: super inefficient, makes a copy of the entire contents. - u"".join(self.buf), + Utf8Str("").join(self.buf), start, end ) @@ -191,7 +192,7 @@ endpos = end assert endpos >= 0 self.pos = endpos - return space.wrap(u"".join(self.buf[start:endpos])) + return space.wrap(Utf8Str("").join(self.buf[start:endpos])) @unwrap_spec(pos=int, mode=int) def seek_w(self, space, pos, mode=0): @@ -234,7 +235,7 @@ def getvalue_w(self, space): self._check_closed(space) - return space.wrap(u''.join(self.buf)) + return space.wrap(Utf8Str('').join(self.buf)) def readable_w(self, space): self._check_closed(space) diff --git a/pypy/module/_io/interp_textio.py b/pypy/module/_io/interp_textio.py --- a/pypy/module/_io/interp_textio.py +++ b/pypy/module/_io/interp_textio.py @@ -6,11 +6,11 @@ from pypy.interpreter.typedef import ( GetSetProperty, TypeDef, generic_new_descr, interp_attrproperty, interp_attrproperty_w) +from pypy.interpreter.utf8 import Utf8Str, Utf8Builder, utf8ord from pypy.module._codecs import interp_codecs from pypy.module._io.interp_iobase import W_IOBase, convert_size, trap_eintr from rpython.rlib.rarithmetic import intmask, r_uint, r_ulonglong from rpython.rlib.rbigint import rbigint -from rpython.rlib.rstring import UnicodeBuilder STATE_ZERO, STATE_OK, STATE_DETACHED = range(3) @@ -29,17 +29,17 @@ def __init__(self, space): self.w_newlines_dict = { - SEEN_CR: space.wrap(u"\r"), - SEEN_LF: space.wrap(u"\n"), - SEEN_CRLF: space.wrap(u"\r\n"), + SEEN_CR: space.wrap(Utf8Str("\r")), + SEEN_LF: space.wrap(Utf8Str("\n")), + SEEN_CRLF: space.wrap(Utf8Str("\r\n")), SEEN_CR | SEEN_LF: space.newtuple( - [space.wrap(u"\r"), space.wrap(u"\n")]), + [space.wrap(Utf8Str("\r")), space.wrap(Utf8Str("\n"))]), SEEN_CR | SEEN_CRLF: space.newtuple( - [space.wrap(u"\r"), space.wrap(u"\r\n")]), + [space.wrap(Utf8Str("\r")), space.wrap(Utf8Str("\r\n"))]), SEEN_LF | SEEN_CRLF: space.newtuple( - [space.wrap(u"\n"), space.wrap(u"\r\n")]), + [space.wrap(Utf8Str("\n")), space.wrap(Utf8Str("\r\n"))]), SEEN_CR | SEEN_LF | SEEN_CRLF: space.newtuple( - [space.wrap(u"\r"), space.wrap(u"\n"), space.wrap(u"\r\n")]), + [space.wrap(Utf8Str("\r")), space.wrap(Utf8Str("\n")), space.wrap(Utf8Str("\r\n"))]), } @unwrap_spec(translate=int) @@ -76,7 +76,7 @@ output = space.unicode_w(w_output) output_len = len(output) if self.pendingcr and (final or output_len): - output = u'\r' + output + output = Utf8Str('\r') + output self.pendingcr = False output_len += 1 @@ -85,13 +85,13 @@ if not final and output_len > 0: last = output_len - 1 assert last >= 0 - if output[last] == u'\r': + if output[last] == Utf8Str('\r'): output = output[:last] self.pendingcr = True output_len -= 1 if output_len == 0: - return space.wrap(u"") + return space.wrap(Utf8Str("")) # Record which newlines are read and do newline translation if # desired, all in one pass. @@ -101,12 +101,12 @@ # for the \r only_lf = False if seennl == SEEN_LF or seennl == 0: - only_lf = (output.find(u'\r') < 0) + only_lf = (output.find(Utf8Str('\r')) < 0) if only_lf: # If not already seen, quick scan for a possible "\n" character. # (there's nothing else to be done, even when in translation mode) - if seennl == 0 and output.find(u'\n') >= 0: + if seennl == 0 and output.find('\n') >= 0: seennl |= SEEN_LF # Finished: we have scanned for newlines, and none of them # need translating. @@ -115,32 +115,32 @@ while i < output_len: if seennl == SEEN_ALL: break - c = output[i] + c = utf8ord(output, i) i += 1 - if c == u'\n': + if c == ord('\n'): seennl |= SEEN_LF - elif c == u'\r': - if i < output_len and output[i] == u'\n': + elif c == ord('\r'): + if i < output_len and utf8ord(output, i) == ord('\n'): seennl |= SEEN_CRLF i += 1 else: seennl |= SEEN_CR - elif output.find(u'\r') >= 0: + elif output.find('\r') >= 0: # Translate! - builder = UnicodeBuilder(output_len) + builder = Utf8Builder(output_len) i = 0 while i < output_len: - c = output[i] + c = utf8ord(output, i) i += 1 - if c == u'\n': + if c == ord('\n'): seennl |= SEEN_LF - elif c == u'\r': - if i < output_len and output[i] == u'\n': + elif c == ord('\r'): + if i < output_len and utf8ord(output, i) == ord('\n'): seennl |= SEEN_CRLF i += 1 else: seennl |= SEEN_CR - builder.append(u'\n') + builder.append('\n') continue builder.append(c) output = builder.build() @@ -217,7 +217,7 @@ if self.readtranslate: # Newlines are already translated, only search for \n - pos = line.find(u'\n', start, end) + pos = line.find('\n', start, end) if pos >= 0: return pos - start + 1, 0 else: @@ -229,16 +229,16 @@ while True: # Fast path for non-control chars. The loop always ends # since the Py_UNICODE storage is NUL-terminated. - while i < size and line[start + i] > '\r': + while i < size and utf8ord(line, start + i) > ord('\r'): i += 1 if i >= size: return -1, size - ch = line[start + i] + ch = utf8ord(line, start + i) i += 1 - if ch == '\n': + if ch == ord('\n'): return i, 0 - if ch == '\r': - if line[start + i] == '\n': + if ch == ord('\r'): + if utf8ord(line, start + i) == ord('\n'): return i + 1, 0 else: return i, 0 @@ -371,7 +371,8 @@ newline = None else: newline = space.unicode_w(w_newline) - if newline and newline not in (u'\n', u'\r\n', u'\r'): + if newline and newline not in (Utf8Str('\n'), Utf8Str('\r\n'), + Utf8Str('\r')): r = space.str_w(space.repr(w_newline)) raise OperationError(space.w_ValueError, space.wrap( "illegal newline value: %s" % (r,))) @@ -382,13 +383,13 @@ self.readtranslate = newline is None self.readnl = newline - self.writetranslate = (newline != u'') + self.writetranslate = (newline != Utf8Str('')) if not self.readuniversal: self.writenl = self.readnl - if self.writenl == u'\n': + if self.writenl == Utf8Str('\n'): self.writenl = None elif _WINDOWS: - self.writenl = u"\r\n" + self.writenl = Utf8Str("\r\n") else: self.writenl = None @@ -508,7 +509,7 @@ def _get_decoded_chars(self, size): if self.decoded_chars is None: - return u"" + return Utf8Str("") available = len(self.decoded_chars) - self.decoded_chars_used if size < 0 or size > available: @@ -603,7 +604,7 @@ return w_final remaining = size - builder = UnicodeBuilder(size) + builder = Utf8Builder(size) # Keep reading chunks until we have n characters to return while True: @@ -710,12 +711,12 @@ if chunks: if line: chunks.append(line) - line = u''.join(chunks) + line = Utf8Str('').join(chunks) if line: return space.wrap(line) else: - return space.wrap(u'') + return space.wrap(Utf8Str('')) # _____________________________________________________________ # write methods @@ -736,15 +737,16 @@ haslf = False if (self.writetranslate and self.writenl) or self.line_buffering: - if text.find(u'\n') >= 0: + if text.find('\n') >= 0: haslf = True if haslf and self.writetranslate and self.writenl: - w_text = space.call_method(w_text, "replace", space.wrap(u'\n'), + w_text = space.call_method(w_text, "replace", + space.wrap(Utf8Str('\n')), space.wrap(self.writenl)) text = space.unicode_w(w_text) needflush = False - if self.line_buffering and (haslf or text.find(u'\r') >= 0): + if self.line_buffering and (haslf or text.find('\r') >= 0): needflush = True # XXX What if we were just reading? From noreply at buildbot.pypy.org Sat Jul 19 14:42:03 2014 From: noreply at buildbot.pypy.org (waedt) Date: Sat, 19 Jul 2014 14:42:03 +0200 (CEST) Subject: [pypy-commit] pypy utf8-unicode2: Fix unicodedb Message-ID: <20140719124203.0D6391C0DCA@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: utf8-unicode2 Changeset: r72471:6ae77b6a146a Date: 2014-07-19 07:40 -0500 http://bitbucket.org/pypy/pypy/changeset/6ae77b6a146a/ Log: Fix unicodedb diff --git a/pypy/module/unicodedata/interp_ucd.py b/pypy/module/unicodedata/interp_ucd.py --- a/pypy/module/unicodedata/interp_ucd.py +++ b/pypy/module/unicodedata/interp_ucd.py @@ -6,11 +6,11 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError from pypy.interpreter.typedef import TypeDef, interp_attrproperty +from pypy.interpreter.utf8 import utf8chr from rpython.rlib.rarithmetic import r_longlong from rpython.rlib.objectmodel import we_are_translated -from rpython.rlib.runicode import MAXUNICODE from rpython.rlib.unicodedata import unicodedb_5_2_0, unicodedb_3_2_0 -from rpython.rlib.runicode import code_to_unichr, ord_accepts_surrogate +from rpython.rlib.runicode import ord_accepts_surrogate import sys @@ -30,47 +30,15 @@ # unicode code point. -if MAXUNICODE > 0xFFFF: - # Target is wide build - def unichr_to_code_w(space, w_unichr): - if not space.isinstance_w(w_unichr, space.w_unicode): +def unichr_to_code_w(space, w_unichr): + if not space.isinstance_w(w_unichr, space.w_unicode): + raise OperationError(space.w_TypeError, space.wrap( + 'argument 1 must be unicode')) + + if not space.len_w(w_unichr) == 1: raise OperationError(space.w_TypeError, space.wrap( - 'argument 1 must be unicode')) - - if not we_are_translated() and sys.maxunicode == 0xFFFF: - # Host CPython is narrow build, accept surrogates - try: - return ord_accepts_surrogate(space.unicode_w(w_unichr)) - except TypeError: - raise OperationError(space.w_TypeError, space.wrap( 'need a single Unicode character as parameter')) - else: - if not space.len_w(w_unichr) == 1: - raise OperationError(space.w_TypeError, space.wrap( - 'need a single Unicode character as parameter')) - return space.int_w(space.ord(w_unichr)) - -else: - # Target is narrow build - def unichr_to_code_w(space, w_unichr): - if not space.isinstance_w(w_unichr, space.w_unicode): - raise OperationError(space.w_TypeError, space.wrap( - 'argument 1 must be unicode')) - - if not we_are_translated() and sys.maxunicode > 0xFFFF: - # Host CPython is wide build, forbid surrogates - if not space.len_w(w_unichr) == 1: - raise OperationError(space.w_TypeError, space.wrap( - 'need a single Unicode character as parameter')) - return space.int_w(space.ord(w_unichr)) - - else: - # Accept surrogates - try: - return ord_accepts_surrogate(space.unicode_w(w_unichr)) - except TypeError: - raise OperationError(space.w_TypeError, space.wrap( - 'need a single Unicode character as parameter')) + return space.int_w(space.ord(w_unichr)) class UCD(W_Root): @@ -108,7 +76,7 @@ except KeyError: msg = space.mod(space.wrap("undefined character name '%s'"), space.wrap(name)) raise OperationError(space.w_KeyError, msg) - return space.wrap(code_to_unichr(code)) + return space.wrap(utf8chr(code)) def name(self, space, w_unichr, w_default=None): code = unichr_to_code_w(space, w_unichr) From noreply at buildbot.pypy.org Sat Jul 19 14:42:04 2014 From: noreply at buildbot.pypy.org (waedt) Date: Sat, 19 Jul 2014 14:42:04 +0200 (CEST) Subject: [pypy-commit] pypy utf8-unicode2: Remove some stray unicode literals Message-ID: <20140719124204.566071C0DCA@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: utf8-unicode2 Changeset: r72472:821f21a34f0a Date: 2014-07-19 07:41 -0500 http://bitbucket.org/pypy/pypy/changeset/821f21a34f0a/ Log: Remove some stray unicode literals diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1551,7 +1551,7 @@ "Like unicode_w, but rejects strings with NUL bytes." from rpython.rlib import rstring result = w_obj.unicode_w(self) - if u'\x00' in result: + if Utf8Str('\x00') in result: raise OperationError(self.w_TypeError, self.wrap( 'argument must be a unicode string without NUL characters')) return rstring.assert_str0(result) diff --git a/pypy/interpreter/utf8_codecs.py b/pypy/interpreter/utf8_codecs.py --- a/pypy/interpreter/utf8_codecs.py +++ b/pypy/interpreter/utf8_codecs.py @@ -727,7 +727,7 @@ else: bo = 1 if size == 0: - return u'', 0, bo + return Utf8Str(''), 0, bo if bo == -1: # force little endian ihi = 1 @@ -911,7 +911,7 @@ else: bo = 1 if size == 0: - return u'', 0, bo + return Utf8Str(''), 0, bo if bo == -1: # force little endian iorder = [0, 1, 2, 3] @@ -1285,7 +1285,7 @@ if errorhandler is None: errorhandler = default_unicode_error_decode if size == 0: - return u'', 0 + return Utf8Str(''), 0 pos = 0 result = Utf8Builder(size) From noreply at buildbot.pypy.org Sat Jul 19 20:36:24 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 19 Jul 2014 20:36:24 +0200 (CEST) Subject: [pypy-commit] pypy py3k: fix bytes result Message-ID: <20140719183624.D95961C0DCA@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r72473:e2fd970b748f Date: 2014-07-18 16:48 -0700 http://bitbucket.org/pypy/pypy/changeset/e2fd970b748f/ Log: fix bytes result diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -244,7 +244,7 @@ """ size = self.len if size == 0: - return space.wrap('') + return space.wrapbytes('') cbuf = self._charbuf_start() s = rffi.charpsize2str(cbuf, size * self.itemsize) self._charbuf_stop() From noreply at buildbot.pypy.org Sat Jul 19 20:36:26 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 19 Jul 2014 20:36:26 +0200 (CEST) Subject: [pypy-commit] pypy py3k: issue1556: cache json object keys (cpython issue7451) in _pypyjson and Message-ID: <20140719183626.4B7D61C0DCA@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r72474:ed9ae55d00c8 Date: 2014-07-18 16:48 -0700 http://bitbucket.org/pypy/pypy/changeset/ed9ae55d00c8/ Log: issue1556: cache json object keys (cpython issue7451) in _pypyjson and re-enable it now that it fully passes test_json diff --git a/lib-python/3/json/__init__.py b/lib-python/3/json/__init__.py --- a/lib-python/3/json/__init__.py +++ b/lib-python/3/json/__init__.py @@ -107,6 +107,12 @@ __author__ = 'Bob Ippolito ' +try: + # PyPy speedup, the interface is different than CPython's _json + import _pypyjson +except ImportError: + _pypyjson = None + from .decoder import JSONDecoder from .encoder import JSONEncoder @@ -316,7 +322,7 @@ if (cls is None and object_hook is None and parse_int is None and parse_float is None and parse_constant is None and object_pairs_hook is None and not kw): - return _default_decoder.decode(s) + return _pypyjson.loads(s) if _pypyjson else _default_decoder.decode(s) if cls is None: cls = JSONDecoder if object_hook is not None: diff --git a/pypy/module/_pypyjson/interp_decoder.py b/pypy/module/_pypyjson/interp_decoder.py --- a/pypy/module/_pypyjson/interp_decoder.py +++ b/pypy/module/_pypyjson/interp_decoder.py @@ -56,6 +56,7 @@ self.end_ptr = lltype.malloc(rffi.CCHARPP.TO, 1, flavor='raw') self.pos = 0 self.last_type = TYPE_UNKNOWN + self.memo = {} def close(self): rffi.free_charp(self.ll_chars) @@ -261,6 +262,8 @@ w_name = self.decode_any(i) if self.last_type != TYPE_STRING: self._raise("Key name must be string for object starting at char %d", start) + w_name = self.memo.setdefault(self.space.unicode_w(w_name), w_name) + i = self.skip_whitespace(self.pos) ch = self.ll_chars[i] if ch != ':': diff --git a/pypy/module/_pypyjson/test/test__pypyjson.py b/pypy/module/_pypyjson/test/test__pypyjson.py --- a/pypy/module/_pypyjson/test/test__pypyjson.py +++ b/pypy/module/_pypyjson/test/test__pypyjson.py @@ -187,4 +187,12 @@ import _pypyjson # http://json.org/JSON_checker/test/fail25.json s = '["\ttab\tcharacter\tin\tstring\t"]' - raises(ValueError, "_pypyjson.loads(s)") \ No newline at end of file + raises(ValueError, "_pypyjson.loads(s)") + + def test_keys_reuse(self): + import _pypyjson + s = '[{"a_key": 1, "b_\xe9": 2}, {"a_key": 3, "b_\xe9": 4}]' + rval = _pypyjson.loads(s) + (a, b), (c, d) = sorted(rval[0]), sorted(rval[1]) + assert a is c + assert b is d From noreply at buildbot.pypy.org Sat Jul 19 20:36:27 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 19 Jul 2014 20:36:27 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140719183627.AD3671C0DCA@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r72475:8064c75eb975 Date: 2014-07-19 11:35 -0700 http://bitbucket.org/pypy/pypy/changeset/8064c75eb975/ Log: merge default diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/test_generator.py --- a/pypy/interpreter/test/test_generator.py +++ b/pypy/interpreter/test/test_generator.py @@ -307,13 +307,13 @@ w_co = space.appexec([], '''(): def g(x): yield x + 5 - return g.func_code + return g.__code__ ''') assert should_not_inline(w_co) == False w_co = space.appexec([], '''(): def g(x): yield x + 5 yield x + 6 - return g.func_code + return g.__code__ ''') assert should_not_inline(w_co) == True From noreply at buildbot.pypy.org Mon Jul 21 10:30:59 2014 From: noreply at buildbot.pypy.org (timfel) Date: Mon, 21 Jul 2014 10:30:59 +0200 (CEST) Subject: [pypy-commit] pypy default: fix compilation with Visual Studio 2013 Message-ID: <20140721083059.28F461D2A6B@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r72476:dbdcc057562b Date: 2014-07-21 10:24 +0200 http://bitbucket.org/pypy/pypy/changeset/dbdcc057562b/ Log: fix compilation with Visual Studio 2013 diff --git a/rpython/translator/platform/windows.py b/rpython/translator/platform/windows.py --- a/rpython/translator/platform/windows.py +++ b/rpython/translator/platform/windows.py @@ -375,21 +375,28 @@ for rule in rules: m.rule(*rule) + if len(headers_to_precompile)>0 and self.version >= 80: + # at least from VS2013 onwards we need to include PCH + # objects in the final link command + linkobjs = 'stdafx.obj @<<\n$(OBJECTS)\n<<' + else: + linkobjs = '@<<\n$(OBJECTS)\n<<' + if self.version < 80: m.rule('$(TARGET)', '$(OBJECTS)', [ '$(CC_LINK) /nologo $(LDFLAGS) $(LDFLAGSEXTRA) /out:$@' +\ - ' $(LIBDIRS) $(LIBS) @<<\n$(OBJECTS)\n<<', + ' $(LIBDIRS) $(LIBS) ' + linkobjs, ]) else: m.rule('$(TARGET)', '$(OBJECTS)', [ '$(CC_LINK) /nologo $(LDFLAGS) $(LDFLAGSEXTRA)' + \ ' $(LINKFILES) /out:$@ $(LIBDIRS) $(LIBS) /MANIFEST' + \ - ' /MANIFESTFILE:$*.manifest @<<\n$(OBJECTS)\n<<', + ' /MANIFESTFILE:$*.manifest ' + linkobjs, 'mt.exe -nologo -manifest $*.manifest -outputresource:$@;1', ]) m.rule('debugmode_$(TARGET)', '$(OBJECTS)', [ '$(CC_LINK) /nologo /DEBUG $(LDFLAGS) $(LDFLAGSEXTRA)' + \ - ' $(LINKFILES) /out:$@ $(LIBDIRS) $(LIBS) @<<\n$(OBJECTS)\n<<', + ' $(LINKFILES) /out:$@ $(LIBDIRS) $(LIBS) ' + linkobjs, ]) if shared: From noreply at buildbot.pypy.org Mon Jul 21 11:10:02 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 21 Jul 2014 11:10:02 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-gcrefs: Extracted walking of gc references in separate module. Message-ID: <20140721091002.6FC8A1C3273@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-gcrefs Changeset: r925:8476f6a16d21 Date: 2014-07-21 11:10 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/8476f6a16d21/ Log: Extracted walking of gc references in separate module. Implemented walking the heap correctly without collecting a huge list of all objects. someInstance primitive still fails in interpreted mode, due to MemoryError in get_rpy_roots(). Don't see a way to fix except to go 64 bit. Translated version segfaults. diff --git a/spyvm/gcrefs.py b/spyvm/gcrefs.py new file mode 100644 --- /dev/null +++ b/spyvm/gcrefs.py @@ -0,0 +1,67 @@ + +from rpython.rlib import rgc, objectmodel, jit + +# ======== Internal functions ======== + +def flag(gcref): + return rgc.get_gcflag_extra(gcref) + +def toggle_flag(gcref): + rgc.toggle_gcflag_extra(gcref) + +def references(gcref): + return rgc.get_rpy_referents(gcref) + +def gc_roots(): + return rgc.get_rpy_roots() + +def _clear_all_flags(gcrefs): + for gcref in gcrefs: + if gcref and flag(gcref): + toggle_flag(gcref) + _clear_all_flags(references(gcref)) + +def _walk_gc_references(func, extra_parameter, collect_into, gcrefs): + for gcref in gcrefs: + if gcref and not flag(gcref): + toggle_flag(gcref) + result = func(gcref, extra_parameter) + if result is not None: + collect_into.append(result) + _walk_gc_references(func, extra_parameter, collect_into, references(gcref)) + return collect_into + +# ======== API of this module ======== +# The extra_parameter is here to avoid creating closures in the function parameters, +# and still be able to pass some context into the functions. It should always be a short tuple, +# so that rpython can autmatically specialize these functions. If it fails to do so, annotate +# all functions with extra_parameter with @objectmodel.specialize.argtype(2). + +def try_cast(type, gcref): + return rgc.try_cast_gcref_to_instance(type, gcref) + + at jit.dont_look_inside +def walk_gc_references(func, extra_parameter = None): + roots = gc_roots() + result = _walk_gc_references(func, extra_parameter, [], roots) + _clear_all_flags(roots) + _clear_all_flags(gc_roots()) # Just in case + return result + +def walk_gc_references_of_type(type, func, extra_parameter = None): + def check_type(gcref, extra): + type, func, extra_parameter = extra + w_obj = try_cast(type, gcref) + if w_obj: + func(w_obj, extra_parameter) + return None + walk_gc_references(check_type, (type, func, extra_parameter)) + +def collect_gc_references_of_type(type, filter_func = lambda obj, extra: True, extra_parameter = None): + def check_type(gcref, extra): + type, filter_func, extra_parameter = extra + w_obj = try_cast(type, gcref) + if w_obj and filter_func(w_obj, extra_parameter): + return w_obj + return None + return walk_gc_references(check_type, (type, filter_func, extra_parameter)) diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -4,7 +4,7 @@ import operator from spyvm import model, shadow, error, constants, display from spyvm.error import PrimitiveFailedError, PrimitiveNotYetWrittenError -from spyvm import wrapper +from spyvm import wrapper, gcrefs from rpython.rlib import rarithmetic, rfloat, unroll, jit, objectmodel @@ -540,43 +540,25 @@ w_frame.store(interp.space, constants.CTXPART_STACKP_INDEX, interp.space.wrap_int(stackp)) return w_frame - def stm_enabled(): """NOT RPYTHON""" from rpython.rlib import rgc return hasattr(rgc, "stm_is_enabled") and rgc.stm_is_enabled() + if stm_enabled(): def get_instances_array(space, s_frame, w_class): return [] else: def get_instances_array(space, s_frame, w_class): - # This primitive returns some instance of the class on the stack. - # Not sure quite how to do this; maintain a weak list of all - # existing instances or something? - match_w = s_frame.instances_array(w_class) - if match_w is None: - match_w = [] - from rpython.rlib import rgc - - roots = [gcref for gcref in rgc.get_rpy_roots() if gcref] - pending = roots[:] - while pending: - gcref = pending.pop() - if not rgc.get_gcflag_extra(gcref): - rgc.toggle_gcflag_extra(gcref) - w_obj = rgc.try_cast_gcref_to_instance(model.W_Object, gcref) - if (w_obj is not None and w_obj.has_class() - and w_obj.getclass(space) is w_class): - match_w.append(w_obj) - pending.extend(rgc.get_rpy_referents(gcref)) - - while roots: - gcref = roots.pop() - if rgc.get_gcflag_extra(gcref): - rgc.toggle_gcflag_extra(gcref) - roots.extend(rgc.get_rpy_referents(gcref)) - s_frame.store_instances_array(w_class, match_w) - return match_w + # TODO find a better way then always pre-collecting the entire list of objects. + instances = s_frame.instances_array(w_class) + if instances is None: + def filter_w_obj(w_obj, extra): + w_class, space = extra + return w_obj.has_class() and w_obj.getclass(space) is w_class + instances = gcrefs.collect_gc_references_of_type(model.W_Object, filter_w_obj, (w_class, space)) + s_frame.store_instances_array(w_class, instances) + return instances @expose_primitive(SOME_INSTANCE, unwrap_spec=[object]) def func(interp, s_frame, w_class): @@ -935,32 +917,6 @@ w_class = assert_pointers(w_class) w_class.as_class_get_shadow(interp.space).flush_method_caches() return w_rcvr - - at objectmodel.specialize.arg(0) -def walk_gc_references(func, gcrefs): - from rpython.rlib import rgc - for gcref in gcrefs: - if gcref and not rgc.get_gcflag_extra(gcref): - try: - rgc.toggle_gcflag_extra(gcref) - func(gcref) - walk_gc_references(func, rgc.get_rpy_referents(gcref)) - finally: - rgc.toggle_gcflag_extra(gcref) - - at objectmodel.specialize.arg(0) -def walk_gc_objects(func): - from rpython.rlib import rgc - walk_gc_references(func, rgc.get_rpy_roots()) - - at objectmodel.specialize.arg(0, 1) -def walk_gc_objects_of_type(type, func): - from rpython.rlib import rgc - def check_type(gcref): - w_obj = rgc.try_cast_gcref_to_instance(type, gcref) - if w_obj: - func(w_obj) - walk_gc_objects(check_type) if not stm_enabled(): # XXX: We don't have a global symbol cache. Instead, we walk all @@ -969,7 +925,7 @@ def func(interp, s_frame, w_rcvr): # This takes a long time (at least in interpreted mode), and is not really necessary. # We are monitoring changes to MethodDictionaries, so there is no need for the image to tell us. - #walk_gc_objects_of_type(shadow.MethodDictionaryShadow, lambda s_dict: s_dict.flush_method_cache()) + # gcrefs.walk_gc_references_of_type(shadow.MethodDictionaryShadow, lambda s_dict: s_dict.flush_method_cache()) return w_rcvr # ___________________________________________________________________________ From noreply at buildbot.pypy.org Mon Jul 21 17:48:53 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 21 Jul 2014 17:48:53 +0200 (CEST) Subject: [pypy-commit] benchmarks default: some quicksort changes Message-ID: <20140721154853.9D6F91C06A7@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r267:76d14fcfb50a Date: 2014-07-21 17:48 +0200 http://bitbucket.org/pypy/benchmarks/changeset/76d14fcfb50a/ Log: some quicksort changes diff --git a/multithread/quick_sort/quick_sort.py b/multithread/quick_sort/quick_sort.py --- a/multithread/quick_sort/quick_sort.py +++ b/multithread/quick_sort/quick_sort.py @@ -41,6 +41,7 @@ qsort(xs, l, l0 + n - l) + def qsort_f(xs, l0, n, level): if n < 2: return [] @@ -50,25 +51,35 @@ r = l + n - 1 while l <= r: with atomic: - if xs[l] < pivot: + xl = xs[l] + if xl < pivot: l += 1 continue - if xs[r] > pivot: + xr = xs[r] + if xr > pivot: r -= 1 continue - xs[l], xs[r] = xs[r], xs[l] - l += 1 - r -= 1 + xs[l], xs[r] = xr, xl + l += 1 + r -= 1 fs = [] #right_amount = 1000 > n // 2 > 505 - right_amount = level == 4 - if right_amount: - fs.append(Future(qsort_f, xs, l0, r - l0 + 1, level+1)) - fs.append(Future(qsort_f, xs, l, l0 + n - l, level+1)) + do_futures = level == 4 + largs = (xs, l0, r - l0 + 1, level+1) + rargs = (xs, l, l0 + n - l, level+1) + if do_futures: + fs.append(Future(qsort_f, *largs)) + fs.append(Future(qsort_f, *rargs)) else: - fs.extend(qsort_f(xs, l0, r - l0 + 1, level+1)) - fs.extend(qsort_f(xs, l, l0 + n - l, level+1)) + if level > 4 and n < 100: + with atomic: + fs.extend(qsort_f(*largs)) + with atomic: + fs.extend(qsort_f(*rargs)) + else: + fs.extend(qsort_f(*largs)) + fs.extend(qsort_f(*rargs)) #print_abort_info(0.0000001) return fs @@ -79,26 +90,35 @@ f = fs.pop() fs.extend(f()) -def run(threads=2, n=100000): +def run(threads=2, n=20000): threads = int(threads) n = int(n) set_thread_pool(ThreadPool(threads)) + to_sort = range(n) + t = 0 + for i in range(20): + with atomic: + random.seed(i) + random.shuffle(to_sort) + s = deque(to_sort) + # qsort(s, 0, len(s)) + hint_commit_soon() - to_sort = range(n) - random.seed(121) - random.shuffle(to_sort) - s = deque(to_sort) - # qsort(s, 0, len(s)) - - fs = qsort_f(s, 0, len(s), 0) - wait_for_futures(fs) - + t -= time.time() + # start as future, otherwise we get more threads + # than we want (+1 for the main thread) + fs = Future(qsort_f, s, 0, len(s), 0) + wait_for_futures(fs()) + #assert sorted(to_sort) == list(s) + t += time.time() # shutdown current pool set_thread_pool(None) + return t + if __name__ == "__main__": From noreply at buildbot.pypy.org Mon Jul 21 17:48:55 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 21 Jul 2014 17:48:55 +0200 (CEST) Subject: [pypy-commit] benchmarks default: the primes counter Message-ID: <20140721154855.22D1A1C06A7@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r268:bd9befb5e179 Date: 2014-07-21 17:48 +0200 http://bitbucket.org/pypy/benchmarks/changeset/bd9befb5e179/ Log: the primes counter diff --git a/multithread/primes/primes.py b/multithread/primes/primes.py new file mode 100644 --- /dev/null +++ b/multithread/primes/primes.py @@ -0,0 +1,78 @@ +# -*- coding: utf-8 -*- + +# from https://github.com/Tinche/stm-playground + + +import sys +import time, random +from common.abstract_threading import ( + atomic, Future, set_thread_pool, ThreadPool, + hint_commit_soon, print_abort_info) + +from itertools import izip, chain, repeat + +from Queue import Queue +from pyprimes import isprime +import threading + +def check_prime(num): + return isprime(num), num + + +def grouper(n, iterable, padvalue=None): + "grouper(3, 'abcdefg', 'x') --> ('a','b','c'), ('d','e','f'), ('g','x','x')" + return izip(*[chain(iterable, repeat(padvalue, n-1))]*n) + + +poison_pill = object() + +def worker(tasks, results): + while True: + batch = tasks.get() + if batch is poison_pill: + tasks.task_done() + return + + result = [] + for task in batch: + with atomic: + result.append(check_prime(task)) + results.put(result) + + tasks.task_done() + + + +def run(threads=2, n=2000000): + threads = int(threads) + n = int(n) + + LIMIT = n + BATCH_SIZE = 1000 + + tasks = Queue() + results = Queue() + print("Starting...") + + with atomic: + for batch in grouper(BATCH_SIZE, xrange(LIMIT), 1): + tasks.put(list(batch)) + for _ in xrange(threads): + tasks.put(poison_pill) + + for _ in xrange(threads): + t = threading.Thread(target=worker, args=(tasks, results)) + t.start() + tasks.join() + + count = 0 + while not results.empty(): + batch_results = results.get() + count += sum(1 for res in batch_results if res[0]) + + return count + + + +if __name__ == "__main__": + run() diff --git a/multithread/primes/pyprimes.py b/multithread/primes/pyprimes.py new file mode 100644 --- /dev/null +++ b/multithread/primes/pyprimes.py @@ -0,0 +1,1193 @@ +#!/usr/bin/env python + +## Module pyprimes.py +## +## Copyright (c) 2012 Steven D'Aprano. +## +## Permission is hereby granted, free of charge, to any person obtaining +## a copy of this software and associated documentation files (the +## "Software"), to deal in the Software without restriction, including +## without limitation the rights to use, copy, modify, merge, publish, +## distribute, sublicense, and/or sell copies of the Software, and to +## permit persons to whom the Software is furnished to do so, subject to +## the following conditions: +## +## The above copyright notice and this permission notice shall be +## included in all copies or substantial portions of the Software. +## +## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +## EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +## MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +## IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +## CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +## TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +## SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +"""Generate and test for small primes using a variety of algorithms +implemented in pure Python. + +This module includes functions for generating prime numbers, primality +testing, and factorising numbers into prime factors. Prime numbers are +positive integers with no factors other than themselves and 1. + + +Generating prime numbers +======================== + +To generate an unending stream of prime numbers, use the ``primes()`` +generator function: + + primes(): + Yield prime numbers 2, 3, 5, 7, 11, ... + + + >>> p = primes() + >>> [next(p) for _ in range(10)] + [2, 3, 5, 7, 11, 13, 17, 19, 23, 29] + + +To efficiently generate pairs of (isprime(i), i) for integers i, use the +generator functions ``checked_ints()`` and ``checked_oddints()``: + + checked_ints() + Yield pairs of (isprime(i), i) for i=0,1,2,3,4,5... + + checked_oddints() + Yield pairs of (isprime(i), i) for odd i=1,3,5,7... + + + >>> it = checked_ints() + >>> [next(it) for _ in range(5)] + [(False, 0), (False, 1), (True, 2), (True, 3), (False, 4)] + + +Other convenience functions wrapping ``primes()`` are: + + ------------------ ---------------------------------------------------- + Function Description + ------------------ ---------------------------------------------------- + nprimes(n) Yield the first n primes, then stop. + nth_prime(n) Return the nth prime number. + prime_count(x) Return the number of primes less than or equal to x. + primes_below(x) Yield the primes less than or equal to x. + primes_above(x) Yield the primes strictly greater than x. + primesum(n) Return the sum of the first n primes. + primesums() Yield the partial sums of the prime numbers. + ------------------ ---------------------------------------------------- + + +Primality testing +================= + +These functions test whether numbers are prime or not. Primality tests fall +into two categories: exact tests, and probabilistic tests. + +Exact tests are guaranteed to give the correct result, but may be slow, +particularly for large arguments. Probabilistic tests do not guarantee +correctness, but may be much faster for large arguments. + +To test whether an integer is prime, use the ``isprime`` function: + + isprime(n) + Return True if n is prime, otherwise return False. + + + >>> isprime(101) + True + >>> isprime(102) + False + + +Exact primality tests are: + + isprime_naive(n) + Naive and slow trial division test for n being prime. + + isprime_division(n) + A less naive trial division test for n being prime. + + isprime_regex(n) + Uses a regex to test if n is a prime number. + + .. NOTE:: ``isprime_regex`` should be considered a novelty + rather than a serious test, as it is very slow. + + +Probabilistic tests do not guarantee correctness, but can be faster for +large arguments. There are two probabilistic tests: + + fermat(n [, base]) + Fermat primality test, returns True if n is a weak probable + prime to the given base, otherwise False. + + miller_rabin(n [, base]) + Miller-Rabin primality test, returns True if n is a strong + probable prime to the given base, otherwise False. + + +Both guarantee no false negatives: if either function returns False, the +number being tested is certainly composite. However, both are subject to false +positives: if they return True, the number is only possibly prime. + + + >>> fermat(12400013) # composite 23*443*1217 + False + >>> miller_rabin(14008971) # composite 3*947*4931 + False + + +Prime factorisation +=================== + +These functions return or yield the prime factors of an integer. + + factors(n) + Return a list of the prime factors of n. + + factorise(n) + Yield tuples (factor, count) for n. + + +The ``factors(n)`` function lists repeated factors: + + + >>> factors(37*37*109) + [37, 37, 109] + + +The ``factorise(n)`` generator yields a 2-tuple for each unique factor, giving +the factor itself and the number of times it is repeated: + + >>> list(factorise(37*37*109)) + [(37, 2), (109, 1)] + + +Alternative and toy prime number generators +=========================================== + +These functions are alternative methods of generating prime numbers. Unless +otherwise stated, they generate prime numbers lazily on demand. These are +supplied for educational purposes and are generally slower or less efficient +than the preferred ``primes()`` generator. + + -------------- -------------------------------------------------------- + Function Description + -------------- -------------------------------------------------------- + croft() Yield prime numbers using the Croft Spiral sieve. + erat(n) Return primes up to n by the sieve of Eratosthenes. + sieve() Yield primes using the sieve of Eratosthenes. + cookbook() Yield primes using "Python Cookbook" algorithm. + wheel() Yield primes by wheel factorization. + -------------- -------------------------------------------------------- + + .. TIP:: In the current implementation, the fastest of these + generators is aliased as ``primes()``. + + +""" + + +from __future__ import division + + +import functools +import itertools +import random + +from re import match as _re_match + + +# Module metadata. +__version__ = "0.1.2a" +__date__ = "2012-08-25" +__author__ = "Steven D'Aprano" +__author_email__ = "steve+python at pearwood.info" + +__all__ = ['primes', 'checked_ints', 'checked_oddints', 'nprimes', + 'primes_above', 'primes_below', 'nth_prime', 'prime_count', + 'primesum', 'primesums', 'warn_probably', 'isprime', 'factors', + 'factorise', + ] + + +# ============================ +# Python 2.x/3.x compatibility +# ============================ + +# This module should support 2.5+, including Python 3. + +try: + next +except NameError: + # No next() builtin, so we're probably running Python 2.5. + # Use a simplified version (without support for default). + def next(iterator): + return iterator.next() + +try: + range = xrange +except NameError: + # No xrange built-in, so we're almost certainly running Python3 + # and range is already a lazy iterator. + assert type(range(3)) is not list + +try: + from itertools import ifilter as filter, izip as zip +except ImportError: + # Python 3, where filter and zip are already lazy. + assert type(filter(None, [1, 2])) is not list + assert type(zip("ab", [1, 2])) is not list + +try: + from itertools import compress +except ImportError: + # Must be Python 2.x, so we need to roll our own. + def compress(data, selectors): + """compress('ABCDEF', [1,0,1,0,1,1]) --> A C E F""" + return (d for d, s in zip(data, selectors) if s) + +try: + from math import isfinite +except ImportError: + # Python 2.6 or older. + try: + from math import isnan, isinf + except ImportError: + # Python 2.5. Quick and dirty substitutes. + def isnan(x): + return x != x + def isinf(x): + return x - x != 0 + def isfinite(x): + return not (isnan(x) or isinf(x)) + + +# ===================== +# Helpers and utilities +# ===================== + +def _validate_int(obj): + """Raise an exception if obj is not an integer.""" + m = int(obj + 0) # May raise TypeError, or OverflowError. + if obj != m: + raise ValueError('expected an integer but got %r' % obj) + + +def _validate_num(obj): + """Raise an exception if obj is not a finite real number.""" + m = obj + 0 # May raise TypeError. + if not isfinite(m): + raise ValueError('expected a finite real number but got %r' % obj) + + +def _base_to_bases(base, n): + if isinstance(base, tuple): + bases = base + else: + bases = (base,) + for b in bases: + _validate_int(b) + if not 1 <= b < n: + # Note that b=1 is a degenerate case which is always a prime + # witness for both the Fermat and Miller-Rabin tests. I mention + # this for completeness, not because we need to do anything + # about it. + raise ValueError('base %d out of range 1...%d' % (b, n-1)) + return bases + + +# ======================= +# Prime number generators +# ======================= + +# The preferred generator to use is ``primes()``, which will be set to the +# "best" of these generators. (If you disagree with my judgement of best, +# feel free to use the generator of your choice.) + + +def erat(n): + """Return a list of primes up to and including n. + + This is a fixed-size version of the Sieve of Eratosthenes, using an + adaptation of the traditional algorithm. + + >>> erat(30) + [2, 3, 5, 7, 11, 13, 17, 19, 23, 29] + >>> erat(10000) == list(primes_below(10000)) + True + + """ + _validate_int(n) + # Generate a fixed array of integers. + arr = list(range(n+1)) # A list is faster than an array + # Cross out 0 and 1 since they aren't prime. + arr[0] = arr[1] = None + i = 2 + while i*i <= n: + # Cross out all the multiples of i starting from i**2. + for p in range(i*i, n+1, i): + arr[p] = None + # Advance to the next number not crossed off. + i += 1 + while i <= n and arr[i] is None: + i += 1 + return list(filter(None, arr)) + + +def sieve(): + """Yield prime integers using the Sieve of Eratosthenes. + + This algorithm is modified to generate the primes lazily rather than the + traditional version which operates on a fixed size array of integers. + """ + # This is based on a paper by Melissa E. O'Neill, with an implementation + # given by Gerald Britton: + # http://mail.python.org/pipermail/python-list/2009-January/1188529.html + innersieve = sieve() + prevsq = 1 + table = {} + i = 2 + while True: + # Note: this explicit test is slightly faster than using + # prime = table.pop(i, None) and testing for None. + if i in table: + prime = table[i] + del table[i] + nxt = i + prime + while nxt in table: + nxt += prime + table[nxt] = prime + else: + yield i + if i > prevsq: + j = next(innersieve) + prevsq = j**2 + table[prevsq] = j + i += 1 + + +def cookbook(): + """Yield prime integers lazily using the Sieve of Eratosthenes. + + Another version of the algorithm, based on the Python Cookbook, + 2nd Edition, recipe 18.10, variant erat2. + """ + # http://onlamp.com/pub/a/python/excerpt/pythonckbk_chap1/index1.html?page=2 + table = {} + yield 2 + # Iterate over [3, 5, 7, 9, ...]. The following is equivalent to, but + # faster than, (2*i+1 for i in itertools.count(1)) + for q in itertools.islice(itertools.count(3), 0, None, 2): + # Note: this explicit test is marginally faster than using + # table.pop(i, None) and testing for None. + if q in table: + p = table[q]; del table[q] # Faster than pop. + x = p + q + while x in table or not (x & 1): + x += p + table[x] = p + else: + table[q*q] = q + yield q + + +def croft(): + """Yield prime integers using the Croft Spiral sieve. + + This is a variant of wheel factorisation modulo 30. + """ + # Implementation is based on erat3 from here: + # http://stackoverflow.com/q/2211990 + # and this website: + # http://www.primesdemystified.com/ + # Memory usage increases roughly linearly with the number of primes seen. + # dict ``roots`` stores an entry x:p for every prime p. + for p in (2, 3, 5): + yield p + roots = {9: 3, 25: 5} # Map d**2 -> d. + primeroots = frozenset((1, 7, 11, 13, 17, 19, 23, 29)) + selectors = (1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0) + for q in compress( + # Iterate over prime candidates 7, 9, 11, 13, ... + itertools.islice(itertools.count(7), 0, None, 2), + # Mask out those that can't possibly be prime. + itertools.cycle(selectors) + ): + # Using dict membership testing instead of pop gives a + # 5-10% speedup over the first three million primes. + if q in roots: + p = roots[q] + del roots[q] + x = q + 2*p + while x in roots or (x % 30) not in primeroots: + x += 2*p + roots[x] = p + else: + roots[q*q] = q + yield q + + +def wheel(): + """Generate prime numbers using wheel factorisation modulo 210.""" + for i in (2, 3, 5, 7, 11): + yield i + # The following constants are taken from the paper by O'Neill. + spokes = (2, 4, 2, 4, 6, 2, 6, 4, 2, 4, 6, 6, 2, 6, 4, 2, 6, 4, 6, + 8, 4, 2, 4, 2, 4, 8, 6, 4, 6, 2, 4, 6, 2, 6, 6, 4, 2, 4, 6, 2, + 6, 4, 2, 4, 2, 10, 2, 10) + assert len(spokes) == 48 + # This removes about 77% of the composites that we would otherwise + # need to divide by. + found = [(11, 121)] # Smallest prime we care about, and its square. + for incr in itertools.cycle(spokes): + i += incr + for p, p2 in found: + if p2 > i: # i must be a prime. + found.append((i, i*i)) + yield i + break + elif i % p == 0: # i must be composite. + break + else: # This should never happen. + raise RuntimeError("internal error: ran out of prime divisors") + + +# This is the preferred way of generating prime numbers. Set this to the +# fastest/best generator. +primes = croft + + +# === Algorithms to avoid === + +class Awful: + """Awful and naive prime functions namespace. + + A collection of prime-related algorithms which are supplied for + educational purposes, as toys, curios, or as terrible warnings on + what **not** to do. + + None of these methods have acceptable performance; they are barely + tolerable even for the first 100 primes. + """ + + # === Prime number generators === + + @staticmethod + def naive_primes1(): + """Generate prime numbers naively, and REALLY slowly. + + >>> p = Awful.naive_primes1() + >>> [next(p) for _ in range(10)] + [2, 3, 5, 7, 11, 13, 17, 19, 23, 29] + + This is about as awful as a straight-forward algorithm to generate + primes can get without deliberate pessimation. This algorithm does + not make even the most trivial optimizations: + + - it tests all numbers as potential primes, whether odd or even, + instead of skipping even numbers apart from 2; + - it checks for primality by dividing against every number less + than the candidate prime itself, instead of stopping at the + square root of the candidate; + - it fails to bail out early when it finds a factor, instead + pointlessly keeps testing. + + The result is that this is horribly slow. + """ + i = 2 + yield i + while True: + i += 1 + composite = False + for p in range(2, i): + if i%p == 0: + composite = True + if not composite: # It must be a prime. + yield i + + @staticmethod + def naive_primes2(): + """Generate prime numbers naively, and very slowly. + + >>> p = Awful.naive_primes2() + >>> [next(p) for _ in range(10)] + [2, 3, 5, 7, 11, 13, 17, 19, 23, 29] + + This is a little better than ``naive_primes1``, but still horribly + slow. It makes a single optimization by using a short-circuit test + for primality testing: as soon as a factor is found, the candidate + is rejected immediately. + """ + i = 2 + yield i + while True: + i += 1 + if all(i%p != 0 for p in range(2, i)): + yield i + + @staticmethod + def naive_primes3(): + """Generate prime numbers naively, and very slowly. + + >>> p = Awful.naive_primes3() + >>> [next(p) for _ in range(10)] + [2, 3, 5, 7, 11, 13, 17, 19, 23, 29] + + This is an incremental improvement over ``naive_primes2`` by only + testing odd numbers as potential primes and factors. + """ + yield 2 + i = 3 + yield i + while True: + i += 2 + if all(i%p != 0 for p in range(3, i, 2)): + yield i + + @staticmethod + def trial_division(): + """Generate prime numbers using a simple trial division algorithm. + + >>> p = Awful.trial_division() + >>> [next(p) for _ in range(10)] + [2, 3, 5, 7, 11, 13, 17, 19, 23, 29] + + This is the first non-naive algorithm. Due to its simplicity, it may + perform acceptably for the first hundred or so primes, if your needs + are not very demanding. However, it does not scale well for large + numbers of primes. + + This uses three optimizations: + + - only test odd numbers for primality; + - only check against the prime factors already seen; + - stop checking at the square root of the number being tested. + + With these three optimizations, we get asymptotic behaviour of + O(N*sqrt(N)/(log N)**2) where N is the number of primes found. + + Despite these , this is still unacceptably slow, especially + as the list of memorised primes grows. + """ + yield 2 + primes = [2] + i = 3 + while True: + it = itertools.takewhile(lambda p, i=i: p*p <= i, primes) + if all(i%p != 0 for p in it): + primes.append(i) + yield i + i += 2 + + @staticmethod + def turner(): + """Generate prime numbers very slowly using Euler's sieve. + + >>> p = Awful.turner() + >>> [next(p) for _ in range(10)] + [2, 3, 5, 7, 11, 13, 17, 19, 23, 29] + + The function is named for David Turner, who developed this implementation + in a paper in 1975. Due to its simplicity, it has become very popular, + particularly in Haskell circles where it is usually implemented as some + variation of:: + + primes = sieve [2..] + sieve (p : xs) = p : sieve [x | x <- xs, x `mod` p > 0] + + This algorithm is sometimes wrongly described as the Sieve of + Eratosthenes, but it is not, it is a version of Euler's Sieve. + + Although simple, it is extremely slow and inefficient, with + asymptotic behaviour of O(N**2/(log N)**2) which is even worse than + trial division, and only marginally better than ``naive_primes1``. + O'Neill calls this the "Sleight on Eratosthenes". + """ + # References: + # http://en.wikipedia.org/wiki/Sieve_of_Eratosthenes + # http://en.literateprograms.org/Sieve_of_Eratosthenes_(Haskell) + # http://www.cs.hmc.edu/~oneill/papers/Sieve-JFP.pdf + # http://www.haskell.org/haskellwiki/Prime_numbers + nums = itertools.count(2) + while True: + prime = next(nums) + yield prime + nums = filter(lambda v, p=prime: (v % p) != 0, nums) + + # === Prime number testing === + + @staticmethod + def isprime_naive(n): + """Naive primality test using naive and unoptimized trial division. + + >>> Awful.isprime_naive(17) + True + >>> Awful.isprime_naive(18) + False + + Naive, slow but thorough test for primality using unoptimized trial + division. This function does far too much work, and consequently is very + slow, but it is simple enough to verify by eye. + """ + _validate_int(n) + if n == 2: return True + if n < 2 or n % 2 == 0: return False + for i in range(3, int(n**0.5)+1, 2): + if n % i == 0: + return False + return True + + @staticmethod + def isprime_regex(n): + """Slow primality test using a regular expression. + + >>> Awful.isprime_regex(11) + True + >>> Awful.isprime_regex(15) + False + + Unsurprisingly, this is not efficient, and should be treated as a + novelty rather than a serious implementation. It is O(N^2) in time + and O(N) in memory: in other words, slow and expensive. + """ + _validate_int(n) + return not _re_match(r'^1?$|^(11+?)\1+$', '1'*n) + # For a Perl or Ruby version of this, see here: + # http://montreal.pm.org/tech/neil_kandalgaonkar.shtml + # http://www.noulakaz.net/weblog/2007/03/18/a-regular-expression-to-check-for-prime-numbers/ + + + +# ===================== +# Convenience functions +# ===================== + +def checked_ints(): + """Yield tuples (isprime(i), i) for integers i=0, 1, 2, 3, 4, ... + + >>> it = checked_ints() + >>> [next(it) for _ in range(6)] + [(False, 0), (False, 1), (True, 2), (True, 3), (False, 4), (True, 5)] + + """ + oddnums = checked_oddints() + yield (False, 0) + yield next(oddnums) + yield (True, 2) + for t in oddnums: + yield t + yield (False, t[1]+1) + + +def checked_oddints(): + """Yield tuples (isprime(i), i) for odd integers i=1, 3, 5, 7, 9, ... + + >>> it = checked_oddints() + >>> [next(it) for _ in range(6)] + [(False, 1), (True, 3), (True, 5), (True, 7), (False, 9), (True, 11)] + >>> [next(it) for _ in range(6)] + [(True, 13), (False, 15), (True, 17), (True, 19), (False, 21), (True, 23)] + + """ + yield (False, 1) + odd_primes = primes() + _ = next(odd_primes) # Skip 2. + prev = 1 + for p in odd_primes: + # Yield the non-primes between the previous prime and + # the current one. + for i in itertools.islice(itertools.count(prev + 2), 0, None, 2): + if i >= p: break + yield (False, i) + # And yield the current prime. + yield (True, p) + prev = p + + +def nprimes(n): + """Convenience function that yields the first n primes. + + >>> list(nprimes(10)) + [2, 3, 5, 7, 11, 13, 17, 19, 23, 29] + + """ + _validate_int(n) + return itertools.islice(primes(), n) + + +def primes_above(x): + """Convenience function that yields primes strictly greater than x. + + >>> next(primes_above(200)) + 211 + + """ + _validate_num(x) + it = primes() + # Consume the primes below x as fast as possible, then yield the rest. + p = next(it) + while p <= x: + p = next(it) + yield p + for p in it: + yield p + + +def primes_below(x): + """Convenience function yielding primes less than or equal to x. + + >>> list(primes_below(20)) + [2, 3, 5, 7, 11, 13, 17, 19] + + """ + _validate_num(x) + for p in primes(): + if p > x: + return + yield p + + +def nth_prime(n): + """nth_prime(n) -> int + + Return the nth prime number, starting counting from 1. Equivalent to + p-subscript-n in standard maths notation. + + >>> nth_prime(1) # First prime is 2. + 2 + >>> nth_prime(5) + 11 + >>> nth_prime(50) + 229 + + """ + # http://www.research.att.com/~njas/sequences/A000040 + _validate_int(n) + if n < 1: + raise ValueError('argument must be a positive integer') + return next(itertools.islice(primes(), n-1, None)) + + +def prime_count(x): + """prime_count(x) -> int + + Returns the number of prime numbers less than or equal to x. + It is also known as the Prime Counting Function, or pi(x). + (Not to be confused with the constant pi = 3.1415....) + + >>> prime_count(20) + 8 + >>> prime_count(10000) + 1229 + + The number of primes less than x is approximately x/(ln x - 1). + """ + # See also: http://primes.utm.edu/howmany.shtml + # http://mathworld.wolfram.com/PrimeCountingFunction.html + _validate_num(x) + return sum(1 for p in primes_below(x)) + + +def primesum(n): + """primesum(n) -> int + + primesum(n) returns the sum of the first n primes. + + >>> primesum(9) + 100 + >>> primesum(49) + 4888 + + The sum of the first n primes is approximately n**2*ln(n)/2. + """ + # See: http://mathworld.wolfram.com/PrimeSums.html + # http://www.research.att.com/~njas/sequences/A007504 + _validate_int(n) + return sum(nprimes(n)) + + +def primesums(): + """Yield the partial sums of the prime numbers. + + >>> p = primesums() + >>> [next(p) for _ in range(5)] # primes 2, 3, 5, 7, 11, ... + [2, 5, 10, 17, 28] + + """ + n = 0 + for p in primes(): + n += p + yield n + + +# ================= +# Primality testing +# ================= + +def isprime(n, trials=25, warn=False): + """Return True if n is a prime number, and False if it is not. + + >>> isprime(101) + True + >>> isprime(102) + False + + ========== ======================================================= + Argument Description + ========== ======================================================= + n Number being tested for primality. + trials Count of primality tests to perform (default 25). + warn If true, warn on inexact results. (Default is false.) + ========== ======================================================= + + For values of ``n`` under approximately 341 trillion, this function is + exact and the arguments ``trials`` and ``warn`` are ignored. + + Above this cut-off value, this function may be probabilistic with a small + chance of wrongly reporting a composite (non-prime) number as prime. Such + composite numbers wrongly reported as prime are "false positive" errors. + + The argument ``trials`` controls the risk of a false positive error. The + larger number of trials, the less the chance of an error (and the slower + the function). With the default value of 25, you can expect roughly one + such error every million trillion tests, which in practical terms is + essentially "never". + + ``isprime`` cannot give a false negative error: if it reports a number is + composite, it is certainly composite, but if it reports a number is prime, + it may be only probably prime. If you pass a true value for argument + ``warn``, then a warning will be raised if the result is probabilistic. + """ + _validate_int(n) + # Deal with trivial cases first. + if n < 2: + return False + elif n == 2: + return True + elif n%2 == 0: + return False + elif n <= 7: # 3, 5, 7 + return True + is_probabilistic, bases = _choose_bases(n, trials) + is_prime = miller_rabin(n, bases) + if is_prime and is_probabilistic and warn: + import warnings + warnings.warn("number is only probably prime not certainly prime") + return is_prime + + +def _choose_bases(n, count): + """Choose appropriate bases for the Miller-Rabin primality test. + + If n is small enough, returns a tuple of bases which are provably + deterministic for that n. If n is too large, return a selection of + possibly random bases. + + With k distinct Miller-Rabin tests, the probability of a false + positive result is no more than 1/(4**k). + """ + # The Miller-Rabin test is deterministic and completely accurate for + # moderate sizes of n using a surprisingly tiny number of tests. + # See: Pomerance, Selfridge and Wagstaff (1980), and Jaeschke (1993) + # http://en.wikipedia.org/wiki/Miller%E2%80%93Rabin_primality_test + prob = False + if n < 1373653: # ~1.3 million + bases = (2, 3) + elif n < 9080191: # ~9.0 million + bases = (31, 73) + elif n < 4759123141: # ~4.7 billion + # Note to self: checked up to approximately 394 million in 9 hours. + bases = (2, 7, 61) + elif n < 2152302898747: # ~2.1 trillion + bases = (2, 3, 5, 7, 11) + elif n < 3474749660383: # ~3.4 trillion + bases = (2, 3, 5, 7, 11, 13) + elif n < 341550071728321: # ~341 trillion + bases = (2, 3, 5, 7, 11, 13, 17) + else: + # n is sufficiently large that we have to use a probabilistic test. + prob = True + bases = tuple([random.randint(2, n-1) for _ in range(count)]) + # FIXME Because bases are chosen at random, there may be duplicates + # although with extremely small probability given the size of n. + # FIXME Is it worthwhile to special case some of the lower, easier + # bases? bases = [2, 3, 5, 7, 11, 13, 17] + [random... ]? + # Note: we can always be deterministic, no matter how large N is, by + # exhaustive testing against each i in the inclusive range + # 1 ... min(n-1, floor(2*(ln N)**2)). We don't do this, because it is + # expensive for large N, and of no real practical benefit. + return prob, bases + + +def isprime_division(n): + """isprime_division(integer) -> True|False + + Exact primality test returning True if the argument is a prime number, + otherwise False. + + >>> isprime_division(11) + True + >>> isprime_division(12) + False + + This function uses trial division by the primes, skipping non-primes. + """ + _validate_int(n) + if n < 2: + return False + limit = n**0.5 + for divisor in primes(): + if divisor > limit: break + if n % divisor == 0: return False + return True + + +# === Probabilistic primality tests === + +def fermat(n, base=2): + """fermat(n [, base]) -> True|False + + ``fermat(n, base)`` is a probabilistic test for primality which returns + True if integer n is a weak probable prime to the given integer base, + otherwise n is definitely composite and False is returned. + + ``base`` must be a positive integer between 1 and n-1 inclusive, or a + tuple of such bases. By default, base=2. + + If ``fermat`` returns False, that is definite proof that n is composite: + there are no false negatives. However, if it returns True, that is only + provisional evidence that n is prime. For example: + + >>> fermat(99, 7) + False + >>> fermat(29, 7) + True + + We can conclude that 99 is definitely composite, and state that 7 is a + witness that 29 may be prime. + + As the Fermat test is probabilistic, composite numbers will sometimes + pass a test, or even repeated tests: + + >>> fermat(3*11*17, 7) # A pseudoprime to base 7. + True + + You can perform multiple tests with a single call by passing a tuple of + ints as ``base``. The number must pass the Fermat test for all the bases + in order to return True. If any test fails, ``fermat`` will return False. + + >>> fermat(41041, (17, 23, 356, 359)) # 41041 = 7*11*13*41 + True + >>> fermat(41041, (17, 23, 356, 359, 363)) + False + + If a number passes ``k`` Fermat tests, we can conclude that the + probability that it is either a prime number, or a particular type of + pseudoprime known as a Carmichael number, is at least ``1 - (1/2**k)``. + """ + # http://en.wikipedia.org/wiki/Fermat_primality_test + _validate_int(n) + bases = _base_to_bases(base, n) + # Deal with the simple deterministic cases first. + if n < 2: + return False + elif n == 2: + return True + elif n % 2 == 0: + return False + # Now the Fermat test proper. + for a in bases: + if pow(a, n-1, n) != 1: + return False # n is certainly composite. + return True # All of the bases are witnesses for n being prime. + + +def miller_rabin(n, base=2): + """miller_rabin(integer [, base]) -> True|False + + ``miller_rabin(n, base)`` is a probabilistic test for primality which + returns True if integer n is a strong probable prime to the given integer + base, otherwise n is definitely composite and False is returned. + + ``base`` must be a positive integer between 1 and n-1 inclusive, or a + tuple of such bases. By default, base=2. + + If ``miller_rabin`` returns False, that is definite proof that n is + composite: there are no false negatives. However, if it returns True, + that is only provisional evidence that n is prime: + + >>> miller_rabin(99, 7) + False + >>> miller_rabin(29, 7) + True + + We can conclude from this that 99 is definitely composite, and that 29 is + possibly prime. + + As the Miller-Rabin test is probabilistic, composite numbers will + sometimes pass one or more tests: + + >>> miller_rabin(3*11*17, 103) # 3*11*17=561, the 1st Carmichael number. + True + + You can perform multiple tests with a single call by passing a tuple of + ints as ``base``. The number must pass the Miller-Rabin test for each of + the bases before it will return True. If any test fails, ``miller_rabin`` + will return False. + + >>> miller_rabin(41041, (16, 92, 100, 256)) # 41041 = 7*11*13*41 + True + >>> miller_rabin(41041, (16, 92, 100, 256, 288)) + False + + If a number passes ``k`` Miller-Rabin tests, we can conclude that the + probability that it is a prime number is at least ``1 - (1/4**k)``. + """ + # http://en.wikipedia.org/wiki/Miller%E2%80%93Rabin_primality_test + _validate_int(n) + bases = _base_to_bases(base, n) + # Deal with the trivial cases. + if n < 2: + return False + if n == 2: + return True + elif n % 2 == 0: + return False + # Now perform the Miller-Rabin test proper. + # Start by writing n-1 as 2**s * d. + d, s = _factor2(n-1) + for a in bases: + if _is_composite(a, d, s, n): + return False # n is definitely composite. + # If we get here, all of the bases are witnesses for n being prime. + return True + + +def _factor2(n): + """Factorise positive integer n as d*2**i, and return (d, i). + + >>> _factor2(768) + (3, 8) + >>> _factor2(18432) + (9, 11) + + Private function used internally by ``miller_rabin``. + """ + assert n > 0 and int(n) == n + i = 0 + d = n + while 1: + q, r = divmod(d, 2) + if r == 1: + break + i += 1 + d = q + assert d%2 == 1 + assert d*2**i == n + return (d, i) + + +def _is_composite(b, d, s, n): + """_is_composite(b, d, s, n) -> True|False + + Tests base b to see if it is a witness for n being composite. Returns + True if n is definitely composite, otherwise False if it *may* be prime. + + >>> _is_composite(4, 3, 7, 385) + True + >>> _is_composite(221, 3, 7, 385) + False + + Private function used internally by ``miller_rabin``. + """ + assert d*2**s == n-1 + if pow(b, d, n) == 1: + return False + for i in range(s): + if pow(b, 2**i * d, n) == n-1: + return False + return True + + +# =================== +# Prime factorisation +# =================== + +if __debug__: + # Set _EXTRA_CHECKS to True to enable potentially expensive assertions + # in the factors() and factorise() functions. This is only defined or + # checked when assertions are enabled. + _EXTRA_CHECKS = False + + +def factors(n): + """factors(integer) -> [list of factors] + + Returns a list of the (mostly) prime factors of integer n. For negative + integers, -1 is included as a factor. If n is 0 or 1, [n] is returned as + the only factor. Otherwise all the factors will be prime. + + >>> factors(-693) + [-1, 3, 3, 7, 11] + >>> factors(55614) + [2, 3, 13, 23, 31] + + """ + _validate_int(n) + result = [] + for p, count in factorise(n): + result.extend([p]*count) + if __debug__: + # The following test only occurs if assertions are on. + if _EXTRA_CHECKS: + prod = 1 + for x in result: + prod *= x + assert prod == n, ('factors(%d) failed multiplication test' % n) + return result + + +def factorise(n): + """factorise(integer) -> yield factors of integer lazily + + >>> list(factorise(3*7*7*7*11)) + [(3, 1), (7, 3), (11, 1)] + + Yields tuples of (factor, count) where each factor is unique and usually + prime, and count is an integer 1 or larger. + + The factors are prime, except under the following circumstances: if the + argument n is negative, -1 is included as a factor; if n is 0 or 1, it + is given as the only factor. For all other integer n, all of the factors + returned are prime. + """ + _validate_int(n) + if n in (0, 1, -1): + yield (n, 1) + return + elif n < 0: + yield (-1, 1) + n = -n + assert n >= 2 + for p in primes(): + if p*p > n: break + count = 0 + while n % p == 0: + count += 1 + n //= p + if count: + yield (p, count) + if n != 1: + if __debug__: + # The following test only occurs if assertions are on. + if _EXTRA_CHECKS: + assert isprime(n), ('failed isprime test for %d' % n) + yield (n, 1) + + + +if __name__ == '__main__': + import doctest + doctest.testmod() + From noreply at buildbot.pypy.org Mon Jul 21 18:39:11 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 21 Jul 2014 18:39:11 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Fixed WeakMessageSend class. Message-ID: <20140721163911.05C0D1C06A7@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r926:64d91d5de341 Date: 2014-07-21 18:35 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/64d91d5de341/ Log: Fixed WeakMessageSend class. Errors at startup are gone, and print- its in workspace work!! diff --git a/images/Squeak4.5-noBitBlt.changes b/images/Squeak4.5-noBitBlt.changes --- a/images/Squeak4.5-noBitBlt.changes +++ b/images/Squeak4.5-noBitBlt.changes @@ -12620,4 +12620,15 @@ 1 to: self splayTreeSize do: [:i | self insertNewNode. - ]! ! ----SNAPSHOT----{15 July 2014 . 6:10:56 pm} Squeak4.5-noBitBlt.image priorSource: 15894330! \ No newline at end of file + ]! ! ----SNAPSHOT----{15 July 2014 . 6:10:56 pm} Squeak4.5-noBitBlt.image priorSource: 15894330! + +----QUIT/NOSAVE----{21 July 2014 . 4:18:39 pm} Squeak4.5-noBitBlt.image priorSource: 15894825! ----STARTUP----{21 July 2014 . 6:19:06 pm} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! !WeakMessageSend methodsFor: 'private' stamp: 'ag 7/21/2014 18:20' prior: 34321504! withEnsuredReceiverAndArgumentsDo: aBlock otherwise: altBlock "Grab real references to receiver and arguments. If they still exist, evaluate aBlock." "Return if my receiver has gone away" | r a | r := self receiver. r ifNil: [ ^altBlock value ]. "Make sure that my arguments haven't gone away" arguments ifNil: [ ^ altBlock value ]. a := Array withAll: arguments. a with: shouldBeNil do: [ :arg :flag | arg ifNil: [ flag ifFalse: [ ^altBlock value ]] ]. ^aBlock value: r value: a! ! ----QUIT----{21 July 2014 . 6:20:43 pm} Squeak4.5-noBitBlt.image priorSource: 15894825! + +----QUIT/NOSAVE----{21 July 2014 . 4:21:36 pm} Squeak4.5-noBitBlt.image priorSource: 15895702! ----STARTUP----{21 July 2014 . 6:21:54 pm} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! !WeakMessageSend methodsFor: 'comparing' stamp: 'ag 7/21/2014 18:22' prior: 33144463! = anObject "Compare equal to equivalent MessageSend" ^ anObject isMessageSend and: [self receiver == anObject receiver and: [selector == anObject selector and: [(Array withAll: self arguments) = (Array withAll: anObject arguments)]]] ! ! !WeakMessageSend methodsFor: 'private' stamp: 'ag 7/21/2014 18:23' prior: 49449636! withEnsuredReceiverAndArgumentsDo: aBlock otherwise: altBlock "Grab real references to receiver and arguments. If they still exist, evaluate aBlock." "Return if my receiver has gone away" | r a | r := self receiver. r ifNil: [ ^altBlock value ]. "Make sure that my arguments haven't gone away" a := Array withAll: self arguments. a with: shouldBeNil do: [ :arg :flag | arg ifNil: [ flag ifFalse: [ ^altBlock value ]] ]. ^aBlock value: r value: a! ! ----QUIT----{21 July 2014 . 6:23:49 pm} Squeak4.5-noBitBlt.image priorSource: 15895702! ----STARTUP----{21 July 2014 . 6:31:05 pm} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! !WeakMessageSend methodsFor: 'accessing' stamp: 'ag 7/21/2014 18:31'! shouldBeNil ^ shouldBeNil ifNil: [ Array new ]! ! !WeakMessageSend methodsFor: 'private' stamp: 'ag 7/21/2014 18:31' prior: 33148869! isAnyArgumentGarbage "Make sure that my arguments haven't gone away" arguments ifNotNil: [ arguments with: self shouldBeNil do: [ :arg :flag | (flag not and: [arg isNil]) ifTrue: [^true] ] ]. ^false ! ! !WeakMessageSend methodsFor: 'private' stamp: 'ag 7/21/2014 18:31' prior: 49450841! withEnsuredReceiverAndArgumentsDo: aBlock otherwise: altBlock "Grab real references to receiver and arguments. If they still exist, evaluate aBlock." "Return if my receiver has gone away" | r a | r := self receiver. r ifNil: [ ^altBlock value ]. "Make sure that my arguments haven't gone away" a := Array withAll: self arguments. a with: self shouldBeNil do: [ :arg :flag | arg ifNil: [ flag ifFalse: [ ^altBlock value ]] ]. ^aBlock value: r value: a! ! !WeakMessageSend methodsFor: 'private' stamp: 'ag 7/21/2014 18:32' prior: 34360552! withEnsuredReceiverAndArgumentsDo: aBlock withEnoughArguments: anArray otherwise: altBlock "call the selector with enough arguments from arguments and anArray" | r selfArgs enoughArgs | r := self receiver. r ifNil: [ ^altBlock value ]. selfArgs := self arguments. selfArgs with: self shouldBeNil do: [ :arg :flag | arg ifNil: [ flag ifFalse: [ ^altBlock value ]] ]. enoughArgs := Array new: selector numArgs. enoughArgs replaceFrom: 1 to: ( selfArgs size min: enoughArgs size) with: selfArgs startingAt: 1. enoughArgs size > selfArgs size ifTrue: [ enoughArgs replaceFrom: selfArgs size + 1 to: (selfArgs size + anArray size min: enoughArgs size) with: anArray startingAt: 1. ]. ^aBlock value: r value: enoughArgs! ! ----QUIT----{21 July 2014 . 6:32:32 pm} Squeak4.5-noBitBlt.image priorSource: 15896872! + +----STARTUP----{21 July 2014 . 4:32:52 pm} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! + + +1+1! + +----QUIT/NOSAVE----{21 July 2014 . 4:33:15 pm} Squeak4.5-noBitBlt.image priorSource: 15898877! \ No newline at end of file diff --git a/images/Squeak4.5-noBitBlt.image b/images/Squeak4.5-noBitBlt.image index ed92c78c940799d91bb94a8ed8527076db6816c7..00843c2c83f9c11e5dcfa3b9927bd415d0a22cd8 GIT binary patch [cut] From noreply at buildbot.pypy.org Tue Jul 22 04:34:25 2014 From: noreply at buildbot.pypy.org (wenzhuman) Date: Tue, 22 Jul 2014 04:34:25 +0200 (CEST) Subject: [pypy-commit] pypy gc_no_cleanup_nursery: merge default Message-ID: <20140722023425.A15CD1C06A7@cobra.cs.uni-duesseldorf.de> Author: wenzhuman Branch: gc_no_cleanup_nursery Changeset: r72477:733a54b31533 Date: 2014-07-21 15:11 +0000 http://bitbucket.org/pypy/pypy/changeset/733a54b31533/ Log: merge default diff too long, truncating to 2000 out of 9153 lines diff --git a/lib-python/2.7/ctypes/__init__.py b/lib-python/2.7/ctypes/__init__.py --- a/lib-python/2.7/ctypes/__init__.py +++ b/lib-python/2.7/ctypes/__init__.py @@ -389,12 +389,13 @@ func.__name__ = name_or_ordinal return func -class PyDLL(CDLL): - """This class represents the Python library itself. It allows to - access Python API functions. The GIL is not released, and - Python exceptions are handled correctly. - """ - _func_flags_ = _FUNCFLAG_CDECL | _FUNCFLAG_PYTHONAPI +# Not in PyPy +#class PyDLL(CDLL): +# """This class represents the Python library itself. It allows to +# access Python API functions. The GIL is not released, and +# Python exceptions are handled correctly. +# """ +# _func_flags_ = _FUNCFLAG_CDECL | _FUNCFLAG_PYTHONAPI if _os.name in ("nt", "ce"): @@ -447,15 +448,8 @@ return self._dlltype(name) cdll = LibraryLoader(CDLL) -pydll = LibraryLoader(PyDLL) - -if _os.name in ("nt", "ce"): - pythonapi = PyDLL("python dll", None, _sys.dllhandle) -elif _sys.platform == "cygwin": - pythonapi = PyDLL("libpython%d.%d.dll" % _sys.version_info[:2]) -else: - pythonapi = PyDLL(None) - +# not on PyPy +#pydll = LibraryLoader(PyDLL) if _os.name in ("nt", "ce"): windll = LibraryLoader(WinDLL) diff --git a/lib-python/2.7/ctypes/test/test_values.py b/lib-python/2.7/ctypes/test/test_values.py --- a/lib-python/2.7/ctypes/test/test_values.py +++ b/lib-python/2.7/ctypes/test/test_values.py @@ -4,6 +4,7 @@ import unittest from ctypes import * +from ctypes.test import xfail import _ctypes_test @@ -23,7 +24,8 @@ class Win_ValuesTestCase(unittest.TestCase): """This test only works when python itself is a dll/shared library""" - + + @xfail def test_optimizeflag(self): # This test accesses the Py_OptimizeFlag intger, which is # exported by the Python dll. @@ -40,6 +42,7 @@ else: self.assertEqual(opt, 2) + @xfail def test_frozentable(self): # Python exports a PyImport_FrozenModules symbol. This is a # pointer to an array of struct _frozen entries. The end of the @@ -75,6 +78,7 @@ from ctypes import _pointer_type_cache del _pointer_type_cache[struct_frozen] + @xfail def test_undefined(self): self.assertRaises(ValueError, c_int.in_dll, pydll, "Undefined_Symbol") diff --git a/lib-python/2.7/test/test_gdbm.py b/lib-python/2.7/test/test_gdbm.py --- a/lib-python/2.7/test/test_gdbm.py +++ b/lib-python/2.7/test/test_gdbm.py @@ -98,6 +98,17 @@ self.assertTrue(key in self.g) self.assertTrue(self.g.has_key(key)) + def test_unicode_key(self): + key = u'ab' + value = u'cd' + self.g = gdbm.open(filename, 'cf') + self.g[key] = value + self.g.close() + self.g = gdbm.open(filename, 'r') + self.assertEquals(self.g[key], value) + self.assertTrue(key in self.g) + self.assertTrue(self.g.has_key(key)) + def test_main(): run_unittest(TestGdbm) diff --git a/lib-python/2.7/timeit.py b/lib-python/2.7/timeit.py --- a/lib-python/2.7/timeit.py +++ b/lib-python/2.7/timeit.py @@ -55,11 +55,6 @@ import gc import sys import time -try: - import itertools -except ImportError: - # Must be an older Python version (see timeit() below) - itertools = None __all__ = ["Timer"] @@ -81,7 +76,8 @@ def inner(_it, _timer): %(setup)s _t0 = _timer() - for _i in _it: + while _it > 0: + _it -= 1 %(stmt)s _t1 = _timer() return _t1 - _t0 @@ -96,7 +92,8 @@ def inner(_it, _timer, _func=func): setup() _t0 = _timer() - for _i in _it: + while _it > 0: + _it -= 1 _func() _t1 = _timer() return _t1 - _t0 @@ -133,9 +130,19 @@ else: raise ValueError("setup is neither a string nor callable") self.src = src # Save for traceback display - code = compile(src, dummy_src_name, "exec") - exec code in globals(), ns - self.inner = ns["inner"] + def make_inner(): + # PyPy tweak: recompile the source code each time before + # calling inner(). There are situations like Issue #1776 + # where PyPy tries to reuse the JIT code from before, + # but that's not going to work: the first thing the + # function does is the "-s" statement, which may declare + # new classes (here a namedtuple). We end up with + # bridges from the inner loop; more and more of them + # every time we call inner(). + code = compile(src, dummy_src_name, "exec") + exec code in globals(), ns + return ns["inner"] + self.make_inner = make_inner elif hasattr(stmt, '__call__'): self.src = None if isinstance(setup, basestring): @@ -144,7 +151,8 @@ exec _setup in globals(), ns elif not hasattr(setup, '__call__'): raise ValueError("setup is neither a string nor callable") - self.inner = _template_func(setup, stmt) + inner = _template_func(setup, stmt) + self.make_inner = lambda: inner else: raise ValueError("stmt is neither a string nor callable") @@ -185,15 +193,12 @@ to one million. The main statement, the setup statement and the timer function to be used are passed to the constructor. """ - if itertools: - it = itertools.repeat(None, number) - else: - it = [None] * number + inner = self.make_inner() gcold = gc.isenabled() if '__pypy__' not in sys.builtin_module_names: gc.disable() # only do that on CPython try: - timing = self.inner(it, self.timer) + timing = inner(number, self.timer) finally: if gcold: gc.enable() diff --git a/lib-python/2.7/xml/sax/saxutils.py b/lib-python/2.7/xml/sax/saxutils.py --- a/lib-python/2.7/xml/sax/saxutils.py +++ b/lib-python/2.7/xml/sax/saxutils.py @@ -98,13 +98,14 @@ except AttributeError: pass # wrap a binary writer with TextIOWrapper - class UnbufferedTextIOWrapper(io.TextIOWrapper): - def write(self, s): - super(UnbufferedTextIOWrapper, self).write(s) - self.flush() - return UnbufferedTextIOWrapper(buffer, encoding=encoding, + return _UnbufferedTextIOWrapper(buffer, encoding=encoding, errors='xmlcharrefreplace', newline='\n') +# PyPy: moved this class outside the function above +class _UnbufferedTextIOWrapper(io.TextIOWrapper): + def write(self, s): + super(_UnbufferedTextIOWrapper, self).write(s) + self.flush() class XMLGenerator(handler.ContentHandler): diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -219,6 +219,8 @@ if restype is None: import ctypes restype = ctypes.c_int + if self._argtypes_ is None: + self._argtypes_ = [] self._ptr = self._getfuncptr_fromaddress(self._argtypes_, restype) self._check_argtypes_for_fastpath() return diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.8.2" -__version_info__ = (0, 8, 2) +__version__ = "0.8.6" +__version_info__ = (0, 8, 6) diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -55,8 +55,7 @@ # _cffi_backend.so compiled. import _cffi_backend as backend from . import __version__ - assert (backend.__version__ == __version__ or - backend.__version__ == __version__[:3]) + assert backend.__version__ == __version__ # (If you insist you can also try to pass the option # 'backend=backend_ctypes.CTypesBackend()', but don't # rely on it! It's probably not going to work well.) @@ -443,6 +442,10 @@ for enumname, enumval in zip(tp.enumerators, tp.enumvalues): if enumname not in library.__dict__: library.__dict__[enumname] = enumval + for key, val in ffi._parser._int_constants.items(): + if key not in library.__dict__: + library.__dict__[key] = val + copied_enums.append(True) if name in library.__dict__: return diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -24,6 +24,7 @@ _r_partial_array = re.compile(r"\[\s*\.\.\.\s*\]") _r_words = re.compile(r"\w+|\S") _parser_cache = None +_r_int_literal = re.compile(r"^0?x?[0-9a-f]+u?l?$", re.IGNORECASE) def _get_parser(): global _parser_cache @@ -99,6 +100,7 @@ self._structnode2type = weakref.WeakKeyDictionary() self._override = False self._packed = False + self._int_constants = {} def _parse(self, csource): csource, macros = _preprocess(csource) @@ -128,9 +130,10 @@ finally: if lock is not None: lock.release() - return ast, macros + # csource will be used to find buggy source text + return ast, macros, csource - def convert_pycparser_error(self, e, csource): + def _convert_pycparser_error(self, e, csource): # xxx look for ":NUM:" at the start of str(e) and try to interpret # it as a line number line = None @@ -142,6 +145,12 @@ csourcelines = csource.splitlines() if 1 <= linenum <= len(csourcelines): line = csourcelines[linenum-1] + return line + + def convert_pycparser_error(self, e, csource): + line = self._convert_pycparser_error(e, csource) + + msg = str(e) if line: msg = 'cannot parse "%s"\n%s' % (line.strip(), msg) else: @@ -160,14 +169,9 @@ self._packed = prev_packed def _internal_parse(self, csource): - ast, macros = self._parse(csource) + ast, macros, csource = self._parse(csource) # add the macros - for key, value in macros.items(): - value = value.strip() - if value != '...': - raise api.CDefError('only supports the syntax "#define ' - '%s ..." for now (literally)' % key) - self._declare('macro ' + key, value) + self._process_macros(macros) # find the first "__dotdotdot__" and use that as a separator # between the repeated typedefs and the real csource iterator = iter(ast.ext) @@ -175,27 +179,61 @@ if decl.name == '__dotdotdot__': break # - for decl in iterator: - if isinstance(decl, pycparser.c_ast.Decl): - self._parse_decl(decl) - elif isinstance(decl, pycparser.c_ast.Typedef): - if not decl.name: - raise api.CDefError("typedef does not declare any name", - decl) - if (isinstance(decl.type.type, pycparser.c_ast.IdentifierType) - and decl.type.type.names == ['__dotdotdot__']): - realtype = model.unknown_type(decl.name) - elif (isinstance(decl.type, pycparser.c_ast.PtrDecl) and - isinstance(decl.type.type, pycparser.c_ast.TypeDecl) and - isinstance(decl.type.type.type, - pycparser.c_ast.IdentifierType) and - decl.type.type.type.names == ['__dotdotdot__']): - realtype = model.unknown_ptr_type(decl.name) + try: + for decl in iterator: + if isinstance(decl, pycparser.c_ast.Decl): + self._parse_decl(decl) + elif isinstance(decl, pycparser.c_ast.Typedef): + if not decl.name: + raise api.CDefError("typedef does not declare any name", + decl) + if (isinstance(decl.type.type, pycparser.c_ast.IdentifierType) + and decl.type.type.names == ['__dotdotdot__']): + realtype = model.unknown_type(decl.name) + elif (isinstance(decl.type, pycparser.c_ast.PtrDecl) and + isinstance(decl.type.type, pycparser.c_ast.TypeDecl) and + isinstance(decl.type.type.type, + pycparser.c_ast.IdentifierType) and + decl.type.type.type.names == ['__dotdotdot__']): + realtype = model.unknown_ptr_type(decl.name) + else: + realtype = self._get_type(decl.type, name=decl.name) + self._declare('typedef ' + decl.name, realtype) else: - realtype = self._get_type(decl.type, name=decl.name) - self._declare('typedef ' + decl.name, realtype) + raise api.CDefError("unrecognized construct", decl) + except api.FFIError as e: + msg = self._convert_pycparser_error(e, csource) + if msg: + e.args = (e.args[0] + "\n *** Err: %s" % msg,) + raise + + def _add_constants(self, key, val): + if key in self._int_constants: + raise api.FFIError( + "multiple declarations of constant: %s" % (key,)) + self._int_constants[key] = val + + def _process_macros(self, macros): + for key, value in macros.items(): + value = value.strip() + match = _r_int_literal.search(value) + if match is not None: + int_str = match.group(0).lower().rstrip("ul") + + # "010" is not valid oct in py3 + if (int_str.startswith("0") and + int_str != "0" and + not int_str.startswith("0x")): + int_str = "0o" + int_str[1:] + + pyvalue = int(int_str, 0) + self._add_constants(key, pyvalue) + elif value == '...': + self._declare('macro ' + key, value) else: - raise api.CDefError("unrecognized construct", decl) + raise api.CDefError('only supports the syntax "#define ' + '%s ..." (literally) or "#define ' + '%s 0x1FF" for now' % (key, key)) def _parse_decl(self, decl): node = decl.type @@ -227,7 +265,7 @@ self._declare('variable ' + decl.name, tp) def parse_type(self, cdecl): - ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl) + ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl)[:2] assert not macros exprnode = ast.ext[-1].type.args.params[0] if isinstance(exprnode, pycparser.c_ast.ID): @@ -306,7 +344,8 @@ if ident == 'void': return model.void_type if ident == '__dotdotdot__': - raise api.FFIError('bad usage of "..."') + raise api.FFIError(':%d: bad usage of "..."' % + typenode.coord.line) return resolve_common_type(ident) # if isinstance(type, pycparser.c_ast.Struct): @@ -333,7 +372,8 @@ return self._get_struct_union_enum_type('union', typenode, name, nested=True) # - raise api.FFIError("bad or unsupported type declaration") + raise api.FFIError(":%d: bad or unsupported type declaration" % + typenode.coord.line) def _parse_function_type(self, typenode, funcname=None): params = list(getattr(typenode.args, 'params', [])) @@ -499,6 +539,10 @@ if (isinstance(exprnode, pycparser.c_ast.UnaryOp) and exprnode.op == '-'): return -self._parse_constant(exprnode.expr) + # load previously defined int constant + if (isinstance(exprnode, pycparser.c_ast.ID) and + exprnode.name in self._int_constants): + return self._int_constants[exprnode.name] # if partial_length_ok: if (isinstance(exprnode, pycparser.c_ast.ID) and @@ -506,8 +550,8 @@ self._partial_length = True return '...' # - raise api.FFIError("unsupported expression: expected a " - "simple numeric constant") + raise api.FFIError(":%d: unsupported expression: expected a " + "simple numeric constant" % exprnode.coord.line) def _build_enum_type(self, explicit_name, decls): if decls is not None: @@ -522,6 +566,7 @@ if enum.value is not None: nextenumvalue = self._parse_constant(enum.value) enumvalues.append(nextenumvalue) + self._add_constants(enum.name, nextenumvalue) nextenumvalue += 1 enumvalues = tuple(enumvalues) tp = model.EnumType(explicit_name, enumerators, enumvalues) @@ -535,3 +580,5 @@ kind = name.split(' ', 1)[0] if kind in ('typedef', 'struct', 'union', 'enum'): self._declare(name, tp) + for k, v in other._int_constants.items(): + self._add_constants(k, v) diff --git a/lib_pypy/cffi/ffiplatform.py b/lib_pypy/cffi/ffiplatform.py --- a/lib_pypy/cffi/ffiplatform.py +++ b/lib_pypy/cffi/ffiplatform.py @@ -38,6 +38,7 @@ import distutils.errors # dist = Distribution({'ext_modules': [ext]}) + dist.parse_config_files() options = dist.get_option_dict('build_ext') options['force'] = ('ffiplatform', True) options['build_lib'] = ('ffiplatform', tmpdir) diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -89,43 +89,54 @@ # by generate_cpy_function_method(). prnt('static PyMethodDef _cffi_methods[] = {') self._generate("method") - prnt(' {"_cffi_setup", _cffi_setup, METH_VARARGS},') - prnt(' {NULL, NULL} /* Sentinel */') + prnt(' {"_cffi_setup", _cffi_setup, METH_VARARGS, NULL},') + prnt(' {NULL, NULL, 0, NULL} /* Sentinel */') prnt('};') prnt() # # standard init. modname = self.verifier.get_module_name() - if sys.version_info >= (3,): - prnt('static struct PyModuleDef _cffi_module_def = {') - prnt(' PyModuleDef_HEAD_INIT,') - prnt(' "%s",' % modname) - prnt(' NULL,') - prnt(' -1,') - prnt(' _cffi_methods,') - prnt(' NULL, NULL, NULL, NULL') - prnt('};') - prnt() - initname = 'PyInit_%s' % modname - createmod = 'PyModule_Create(&_cffi_module_def)' - errorcase = 'return NULL' - finalreturn = 'return lib' - else: - initname = 'init%s' % modname - createmod = 'Py_InitModule("%s", _cffi_methods)' % modname - errorcase = 'return' - finalreturn = 'return' + constants = self._chained_list_constants[False] + prnt('#if PY_MAJOR_VERSION >= 3') + prnt() + prnt('static struct PyModuleDef _cffi_module_def = {') + prnt(' PyModuleDef_HEAD_INIT,') + prnt(' "%s",' % modname) + prnt(' NULL,') + prnt(' -1,') + prnt(' _cffi_methods,') + prnt(' NULL, NULL, NULL, NULL') + prnt('};') + prnt() prnt('PyMODINIT_FUNC') - prnt('%s(void)' % initname) + prnt('PyInit_%s(void)' % modname) prnt('{') prnt(' PyObject *lib;') - prnt(' lib = %s;' % createmod) - prnt(' if (lib == NULL || %s < 0)' % ( - self._chained_list_constants[False],)) - prnt(' %s;' % errorcase) - prnt(' _cffi_init();') - prnt(' %s;' % finalreturn) + prnt(' lib = PyModule_Create(&_cffi_module_def);') + prnt(' if (lib == NULL)') + prnt(' return NULL;') + prnt(' if (%s < 0 || _cffi_init() < 0) {' % (constants,)) + prnt(' Py_DECREF(lib);') + prnt(' return NULL;') + prnt(' }') + prnt(' return lib;') prnt('}') + prnt() + prnt('#else') + prnt() + prnt('PyMODINIT_FUNC') + prnt('init%s(void)' % modname) + prnt('{') + prnt(' PyObject *lib;') + prnt(' lib = Py_InitModule("%s", _cffi_methods);' % modname) + prnt(' if (lib == NULL)') + prnt(' return;') + prnt(' if (%s < 0 || _cffi_init() < 0)' % (constants,)) + prnt(' return;') + prnt(' return;') + prnt('}') + prnt() + prnt('#endif') def load_library(self): # XXX review all usages of 'self' here! @@ -394,7 +405,7 @@ meth = 'METH_O' else: meth = 'METH_VARARGS' - self._prnt(' {"%s", _cffi_f_%s, %s},' % (name, name, meth)) + self._prnt(' {"%s", _cffi_f_%s, %s, NULL},' % (name, name, meth)) _loading_cpy_function = _loaded_noop @@ -481,8 +492,8 @@ if tp.fldnames is None: return # nothing to do with opaque structs layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) - self._prnt(' {"%s", %s, METH_NOARGS},' % (layoutfuncname, - layoutfuncname)) + self._prnt(' {"%s", %s, METH_NOARGS, NULL},' % (layoutfuncname, + layoutfuncname)) def _loading_struct_or_union(self, tp, prefix, name, module): if tp.fldnames is None: @@ -589,13 +600,7 @@ 'variable type'),)) assert delayed else: - prnt(' if (LONG_MIN <= (%s) && (%s) <= LONG_MAX)' % (name, name)) - prnt(' o = PyInt_FromLong((long)(%s));' % (name,)) - prnt(' else if ((%s) <= 0)' % (name,)) - prnt(' o = PyLong_FromLongLong((long long)(%s));' % (name,)) - prnt(' else') - prnt(' o = PyLong_FromUnsignedLongLong(' - '(unsigned long long)(%s));' % (name,)) + prnt(' o = _cffi_from_c_int_const(%s);' % name) prnt(' if (o == NULL)') prnt(' return -1;') if size_too: @@ -632,13 +637,18 @@ # ---------- # enums + def _enum_funcname(self, prefix, name): + # "$enum_$1" => "___D_enum____D_1" + name = name.replace('$', '___D_') + return '_cffi_e_%s_%s' % (prefix, name) + def _generate_cpy_enum_decl(self, tp, name, prefix='enum'): if tp.partial: for enumerator in tp.enumerators: self._generate_cpy_const(True, enumerator, delayed=False) return # - funcname = '_cffi_e_%s_%s' % (prefix, name) + funcname = self._enum_funcname(prefix, name) prnt = self._prnt prnt('static int %s(PyObject *lib)' % funcname) prnt('{') @@ -760,17 +770,30 @@ #include #include -#ifdef MS_WIN32 -#include /* for alloca() */ -typedef __int8 int8_t; -typedef __int16 int16_t; -typedef __int32 int32_t; -typedef __int64 int64_t; -typedef unsigned __int8 uint8_t; -typedef unsigned __int16 uint16_t; -typedef unsigned __int32 uint32_t; -typedef unsigned __int64 uint64_t; -typedef unsigned char _Bool; +/* this block of #ifs should be kept exactly identical between + c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py */ +#if defined(_MSC_VER) +# include /* for alloca() */ +# if _MSC_VER < 1600 /* MSVC < 2010 */ + typedef __int8 int8_t; + typedef __int16 int16_t; + typedef __int32 int32_t; + typedef __int64 int64_t; + typedef unsigned __int8 uint8_t; + typedef unsigned __int16 uint16_t; + typedef unsigned __int32 uint32_t; + typedef unsigned __int64 uint64_t; +# else +# include +# endif +# if _MSC_VER < 1800 /* MSVC < 2013 */ + typedef unsigned char _Bool; +# endif +#else +# include +# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) +# include +# endif #endif #if PY_MAJOR_VERSION < 3 @@ -795,6 +818,15 @@ #define _cffi_to_c_double PyFloat_AsDouble #define _cffi_to_c_float PyFloat_AsDouble +#define _cffi_from_c_int_const(x) \ + (((x) > 0) ? \ + ((unsigned long long)(x) <= (unsigned long long)LONG_MAX) ? \ + PyInt_FromLong((long)(x)) : \ + PyLong_FromUnsignedLongLong((unsigned long long)(x)) : \ + ((long long)(x) >= (long long)LONG_MIN) ? \ + PyInt_FromLong((long)(x)) : \ + PyLong_FromLongLong((long long)(x))) + #define _cffi_from_c_int(x, type) \ (((type)-1) > 0 ? /* unsigned */ \ (sizeof(type) < sizeof(long) ? PyInt_FromLong(x) : \ @@ -804,14 +836,14 @@ PyLong_FromLongLong(x))) #define _cffi_to_c_int(o, type) \ - (sizeof(type) == 1 ? (((type)-1) > 0 ? _cffi_to_c_u8(o) \ - : _cffi_to_c_i8(o)) : \ - sizeof(type) == 2 ? (((type)-1) > 0 ? _cffi_to_c_u16(o) \ - : _cffi_to_c_i16(o)) : \ - sizeof(type) == 4 ? (((type)-1) > 0 ? _cffi_to_c_u32(o) \ - : _cffi_to_c_i32(o)) : \ - sizeof(type) == 8 ? (((type)-1) > 0 ? _cffi_to_c_u64(o) \ - : _cffi_to_c_i64(o)) : \ + (sizeof(type) == 1 ? (((type)-1) > 0 ? (type)_cffi_to_c_u8(o) \ + : (type)_cffi_to_c_i8(o)) : \ + sizeof(type) == 2 ? (((type)-1) > 0 ? (type)_cffi_to_c_u16(o) \ + : (type)_cffi_to_c_i16(o)) : \ + sizeof(type) == 4 ? (((type)-1) > 0 ? (type)_cffi_to_c_u32(o) \ + : (type)_cffi_to_c_i32(o)) : \ + sizeof(type) == 8 ? (((type)-1) > 0 ? (type)_cffi_to_c_u64(o) \ + : (type)_cffi_to_c_i64(o)) : \ (Py_FatalError("unsupported size for type " #type), 0)) #define _cffi_to_c_i8 \ @@ -885,25 +917,32 @@ return PyBool_FromLong(was_alive); } -static void _cffi_init(void) +static int _cffi_init(void) { - PyObject *module = PyImport_ImportModule("_cffi_backend"); - PyObject *c_api_object; + PyObject *module, *c_api_object = NULL; + module = PyImport_ImportModule("_cffi_backend"); if (module == NULL) - return; + goto failure; c_api_object = PyObject_GetAttrString(module, "_C_API"); if (c_api_object == NULL) - return; + goto failure; if (!PyCapsule_CheckExact(c_api_object)) { - Py_DECREF(c_api_object); PyErr_SetNone(PyExc_ImportError); - return; + goto failure; } memcpy(_cffi_exports, PyCapsule_GetPointer(c_api_object, "cffi"), _CFFI_NUM_EXPORTS * sizeof(void *)); + + Py_DECREF(module); Py_DECREF(c_api_object); + return 0; + + failure: + Py_XDECREF(module); + Py_XDECREF(c_api_object); + return -1; } #define _cffi_type(num) ((CTypeDescrObject *)PyList_GET_ITEM(_cffi_types, num)) diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -249,10 +249,10 @@ prnt(' /* %s */' % str(e)) # cannot verify it, ignore prnt('}') self.export_symbols.append(layoutfuncname) - prnt('ssize_t %s(ssize_t i)' % (layoutfuncname,)) + prnt('intptr_t %s(intptr_t i)' % (layoutfuncname,)) prnt('{') prnt(' struct _cffi_aligncheck { char x; %s y; };' % cname) - prnt(' static ssize_t nums[] = {') + prnt(' static intptr_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') for fname, ftype, fbitsize in tp.enumfields(): @@ -276,7 +276,7 @@ return # nothing to do with opaque structs layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) # - BFunc = self.ffi._typeof_locked("ssize_t(*)(ssize_t)")[0] + BFunc = self.ffi._typeof_locked("intptr_t(*)(intptr_t)")[0] function = module.load_function(BFunc, layoutfuncname) layout = [] num = 0 @@ -410,13 +410,18 @@ # ---------- # enums + def _enum_funcname(self, prefix, name): + # "$enum_$1" => "___D_enum____D_1" + name = name.replace('$', '___D_') + return '_cffi_e_%s_%s' % (prefix, name) + def _generate_gen_enum_decl(self, tp, name, prefix='enum'): if tp.partial: for enumerator in tp.enumerators: self._generate_gen_const(True, enumerator) return # - funcname = '_cffi_e_%s_%s' % (prefix, name) + funcname = self._enum_funcname(prefix, name) self.export_symbols.append(funcname) prnt = self._prnt prnt('int %s(char *out_error)' % funcname) @@ -430,14 +435,14 @@ enumerator, enumerator, enumvalue)) prnt(' char buf[64];') prnt(' if ((%s) < 0)' % enumerator) - prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % enumerator) + prnt(' sprintf(buf, "%%ld", (long)(%s));' % enumerator) prnt(' else') - prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' % + prnt(' sprintf(buf, "%%lu", (unsigned long)(%s));' % enumerator) - prnt(' snprintf(out_error, 255,' + prnt(' sprintf(out_error,' ' "%s has the real value %s, not %s",') prnt(' "%s", buf, "%d");' % ( - enumerator, enumvalue)) + enumerator[:100], enumvalue)) prnt(' return -1;') prnt(' }') prnt(' return 0;') @@ -453,7 +458,7 @@ else: BType = self.ffi._typeof_locked("char[]")[0] BFunc = self.ffi._typeof_locked("int(*)(char*)")[0] - funcname = '_cffi_e_%s_%s' % (prefix, name) + funcname = self._enum_funcname(prefix, name) function = module.load_function(BFunc, funcname) p = self.ffi.new(BType, 256) if function(p) < 0: @@ -547,20 +552,29 @@ #include #include /* XXX for ssize_t on some platforms */ -#ifdef _WIN32 -# include -# define snprintf _snprintf -typedef __int8 int8_t; -typedef __int16 int16_t; -typedef __int32 int32_t; -typedef __int64 int64_t; -typedef unsigned __int8 uint8_t; -typedef unsigned __int16 uint16_t; -typedef unsigned __int32 uint32_t; -typedef unsigned __int64 uint64_t; -typedef SSIZE_T ssize_t; -typedef unsigned char _Bool; +/* this block of #ifs should be kept exactly identical between + c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py */ +#if defined(_MSC_VER) +# include /* for alloca() */ +# if _MSC_VER < 1600 /* MSVC < 2010 */ + typedef __int8 int8_t; + typedef __int16 int16_t; + typedef __int32 int32_t; + typedef __int64 int64_t; + typedef unsigned __int8 uint8_t; + typedef unsigned __int16 uint16_t; + typedef unsigned __int32 uint32_t; + typedef unsigned __int64 uint64_t; +# else +# include +# endif +# if _MSC_VER < 1800 /* MSVC < 2013 */ + typedef unsigned char _Bool; +# endif #else -# include +# include +# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) +# include +# endif #endif ''' diff --git a/lib_pypy/gdbm.py b/lib_pypy/gdbm.py --- a/lib_pypy/gdbm.py +++ b/lib_pypy/gdbm.py @@ -50,6 +50,8 @@ pass def _fromstr(key): + if isinstance(key, unicode): + key = key.encode("ascii") if not isinstance(key, str): raise TypeError("gdbm mappings have string indices only") return {'dptr': ffi.new("char[]", key), 'dsize': len(key)} @@ -71,8 +73,8 @@ def _raise_from_errno(self): if ffi.errno: - raise error(os.strerror(ffi.errno)) - raise error(lib.gdbm_strerror(lib.gdbm_errno)) + raise error(ffi.errno, os.strerror(ffi.errno)) + raise error(lib.gdbm_errno, lib.gdbm_strerror(lib.gdbm_errno)) def __len__(self): if self.size < 0: @@ -141,7 +143,7 @@ def _check_closed(self): if not self.ll_dbm: - raise error("GDBM object has already been closed") + raise error(0, "GDBM object has already been closed") __del__ = close @@ -159,7 +161,7 @@ elif flags[0] == 'n': iflags = lib.GDBM_NEWDB else: - raise error("First flag must be one of 'r', 'w', 'c' or 'n'") + raise error(0, "First flag must be one of 'r', 'w', 'c' or 'n'") for flag in flags[1:]: if flag == 'f': iflags |= lib.GDBM_FAST @@ -168,7 +170,7 @@ elif flag == 'u': iflags |= lib.GDBM_NOLOCK else: - raise error("Flag '%s' not supported" % flag) + raise error(0, "Flag '%s' not supported" % flag) return gdbm(filename, iflags, mode) open_flags = "rwcnfsu" diff --git a/pypy/doc/ctypes-implementation.rst b/pypy/doc/ctypes-implementation.rst --- a/pypy/doc/ctypes-implementation.rst +++ b/pypy/doc/ctypes-implementation.rst @@ -72,13 +72,11 @@ Here is a list of the limitations and missing features of the current implementation: -* ``ctypes.pythonapi`` lets you access the CPython C API emulation layer - of PyPy, at your own risks and without doing anything sensible about - the GIL. Since PyPy 2.3, these functions are also named with an extra - "Py", for example ``PyPyInt_FromLong()``. Basically, don't use this, - but it might more or less work in simple cases if you do. (Obviously, - assuming the PyObject pointers you get have any particular fields in - any particular order is just going to crash.) +* ``ctypes.pythonapi`` is missing. In previous versions, it was present + and redirected to the `cpyext` C API emulation layer, but our + implementation did not do anything sensible about the GIL and the + functions were named with an extra "Py", for example + ``PyPyInt_FromLong()``. It was removed for being unhelpful. * We copy Python strings instead of having pointers to raw buffers diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.rst +++ b/pypy/doc/extradoc.rst @@ -8,6 +8,9 @@ *Articles about PyPy published so far, most recent first:* (bibtex_ file) +* `A Way Forward in Parallelising Dynamic Languages`_, + R. Meier, A. Rigo + * `Runtime Feedback in a Meta-Tracing JIT for Efficient Dynamic Languages`_, C.F. Bolz, A. Cuni, M. Fijalkowski, M. Leuschel, S. Pedroni, A. Rigo @@ -71,6 +74,7 @@ .. _bibtex: https://bitbucket.org/pypy/extradoc/raw/tip/talk/bibtex.bib +.. _`A Way Forward in Parallelising Dynamic Languages`: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/icooolps2014/position-paper.pdf .. _`Runtime Feedback in a Meta-Tracing JIT for Efficient Dynamic Languages`: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/icooolps2011/jit-hints.pdf .. _`Allocation Removal by Partial Evaluation in a Tracing JIT`: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/pepm2011/bolz-allocation-removal.pdf .. _`Towards a Jitting VM for Prolog Execution`: http://www.stups.uni-duesseldorf.de/mediawiki/images/a/a7/Pub-BoLeSch2010.pdf @@ -93,6 +97,11 @@ Talks and Presentations ---------------------------------- +*This part is no longer updated.* The complete list is here__ (in +alphabetical order). + +.. __: https://bitbucket.org/pypy/extradoc/src/extradoc/talk/ + Talks in 2010 +++++++++++++ diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -171,16 +171,21 @@ You might be interested in our `benchmarking site`_ and our `jit documentation`_. -Note that the JIT has a very high warm-up cost, meaning that the -programs are slow at the beginning. If you want to compare the timings -with CPython, even relatively simple programs need to run *at least* one -second, preferrably at least a few seconds. Large, complicated programs -need even more time to warm-up the JIT. +`Your tests are not a benchmark`_: tests tend to be slow under PyPy +because they run exactly once; if they are good tests, they exercise +various corner cases in your code. This is a bad case for JIT +compilers. Note also that our JIT has a very high warm-up cost, meaning +that any program is slow at the beginning. If you want to compare the +timings with CPython, even relatively simple programs need to run *at +least* one second, preferrably at least a few seconds. Large, +complicated programs need even more time to warm-up the JIT. .. _`benchmarking site`: http://speed.pypy.org .. _`jit documentation`: jit/index.html +.. _`your tests are not a benchmark`: http://alexgaynor.net/2013/jul/15/your-tests-are-not-benchmark/ + --------------------------------------------------------------- Couldn't the JIT dump and reload already-compiled machine code? --------------------------------------------------------------- @@ -465,9 +470,13 @@ This is documented (here__ and here__). It needs 4 GB of RAM to run "rpython targetpypystandalone" on top of PyPy, a bit more when running -on CPython. If you have less than 4 GB it will just swap forever (or -fail if you don't have enough swap). On 32-bit, divide the numbers by -two. +on top of CPython. If you have less than 4 GB free, it will just swap +forever (or fail if you don't have enough swap). And we mean *free:* +if the machine has 4 GB *in total,* then it will swap. + +On 32-bit, divide the numbers by two. (We didn't try recently, but in +the past it was possible to compile a 32-bit version on a 2 GB Linux +machine with nothing else running: no Gnome/KDE, for example.) .. __: http://pypy.org/download.html#building-from-source .. __: https://pypy.readthedocs.org/en/latest/getting-started-python.html#translating-the-pypy-python-interpreter diff --git a/pypy/doc/jit-hooks.rst b/pypy/doc/jit-hooks.rst --- a/pypy/doc/jit-hooks.rst +++ b/pypy/doc/jit-hooks.rst @@ -34,7 +34,7 @@ aborted due to some reason. The hook will be invoked with the siagnture: - ``hook(jitdriver_name, greenkey, reason)`` + ``hook(jitdriver_name, greenkey, reason, oplist)`` Reason is a string, the meaning of other arguments is the same as attributes on JitLoopInfo object diff --git a/pypy/doc/objspace-proxies.rst b/pypy/doc/objspace-proxies.rst --- a/pypy/doc/objspace-proxies.rst +++ b/pypy/doc/objspace-proxies.rst @@ -26,6 +26,16 @@ Transparent Proxies ================================ +.. warning:: + + This is a feature that was tried experimentally long ago, and we + found no really good use cases. The basic functionality is still + there, but we don't recommend using it. Some of the examples below + might not work any more (e.g. you can't tproxy a list object any + more). The rest can be done by hacking in standard Python. If + anyone is interested in working on tproxy again, he is welcome, but + we don't regard this as an interesting extension. + PyPy's Transparent Proxies allow routing of operations on objects to a callable. Application level code can customize objects without interfering with the type system - ``type(proxied_list) is list`` holds true diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -28,7 +28,8 @@ Introduction ============ -``pypy-stm`` is a variant of the regular PyPy interpreter. With caveats_ +``pypy-stm`` is a variant of the regular PyPy interpreter. (This +version supports Python 2.7; see below for `Python 3`_.) With caveats_ listed below, it should be in theory within 20%-50% slower than a regular PyPy, comparing the JIT version in both cases (but see below!). It is called @@ -92,9 +93,9 @@ We're busy fixing them as we find them; feel free to `report bugs`_. * It runs with an overhead as low as 20% on examples like "richards". - There are also other examples with higher overheads --up to 10x for - "translate.py"-- which we are still trying to understand. One suspect - is our partial GC implementation, see below. + There are also other examples with higher overheads --currently up to + 2x for "translate.py"-- which we are still trying to understand. + One suspect is our partial GC implementation, see below. * Currently limited to 1.5 GB of RAM (this is just a parameter in `core.h`__). Memory overflows are not correctly handled; they cause @@ -111,9 +112,8 @@ * The GC is new; although clearly inspired by PyPy's regular GC, it misses a number of optimizations for now. Programs allocating large - numbers of small objects that don't immediately die, as well as - programs that modify large lists or dicts, suffer from these missing - optimizations. + numbers of small objects that don't immediately die (surely a common + situation) suffer from these missing optimizations. * The GC has no support for destructors: the ``__del__`` method is never called (including on file objects, which won't be closed for you). @@ -138,6 +138,25 @@ +Python 3 +======== + +In this document I describe "pypy-stm", which is based on PyPy's Python +2.7 interpreter. Supporting Python 3 should take about half an +afternoon of work. Obviously, what I *don't* mean is that by tomorrow +you can have a finished and polished "pypy3-stm" product. General py3k +work is still missing; and general stm work is also still missing. But +they are rather independent from each other, as usual in PyPy. The +required afternoon of work will certainly be done one of these days now +that the internal interfaces seem to stabilize. + +The same is true for other languages implemented in the RPython +framework, although the amount of work to put there might vary, because +the STM framework within RPython is currently targeting the PyPy +interpreter and other ones might have slightly different needs. + + + User Guide ========== @@ -490,8 +509,6 @@ The last two lines are special; they are an internal marker read by ``transactional_memory.print_abort_info()``. -These statistics are not printed out for the main thread, for now. - Reference to implementation details ----------------------------------- diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -22,3 +22,31 @@ conditional_calls). I would expect the net result to be a slight slow-down on some simple benchmarks and a speed-up on bigger programs. + +.. branch: ec-threadlocal +Change the executioncontext's lookup to be done by reading a thread- +local variable (which is implemented in C using '__thread' if +possible, and pthread_getspecific() otherwise). On Linux x86 and +x86-64, the JIT backend has a special optimization that lets it emit +directly a single MOV from a %gs- or %fs-based address. It seems +actually to give a good boost in performance. + +.. branch: fast-gil +A faster way to handle the GIL, particularly in JIT code. The GIL is +now a composite of two concepts: a global number (it's just set from +1 to 0 and back around CALL_RELEASE_GIL), and a real mutex. If there +are threads waiting to acquire the GIL, one of them is actively +checking the global number every 0.1 ms to 1 ms. Overall, JIT loops +full of external function calls now run a bit faster (if no thread was +started yet), or a *lot* faster (if threads were started already). + +.. branch: jit-get-errno +Optimize the errno handling in the JIT, notably around external +function calls. Linux-only. + +.. branch: disable_pythonapi +Remove non-functioning ctypes.pyhonapi and ctypes.PyDLL, document this +incompatibility with cpython. Recast sys.dllhandle to an int. + +.. branch: scalar-operations +Fix performance regression on ufunc(, ) in numpy. diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -132,19 +132,23 @@ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Download http://www.gzip.org/zlib/zlib-1.2.3.tar.gz and extract it in -the base directory. Then compile:: +the base directory. Then compile as a static library:: cd zlib-1.2.3 nmake -f win32\Makefile.msc - copy zlib1.dll \zlib.dll + copy zlib1.lib + copy zlib.h zconf.h The bz2 compression library ~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Get the same version of bz2 used by python and compile as a static library:: svn export http://svn.python.org/projects/external/bzip2-1.0.6 cd bzip2-1.0.6 nmake -f makefile.msc - copy bzip.dll \bzip.dll + copy libbz2.lib + copy bzlib.h + The sqlite3 database library ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -166,7 +170,8 @@ is actually enough for pypy). Then, copy the file ``win32\bin\release\libexpat.dll`` somewhere in -your PATH. +your PATH, ``win32\bin\release\libexpat.lib`` somewhere in LIB, and +both ``lib\expat.h`` and ``lib\expat_external.h`` somewhere in INCLUDE. The OpenSSL library ~~~~~~~~~~~~~~~~~~~ diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -30,8 +30,6 @@ if w_dict is not None: # for tests w_entry_point = space.getitem(w_dict, space.wrap('entry_point')) w_run_toplevel = space.getitem(w_dict, space.wrap('run_toplevel')) - w_call_finish_gateway = space.wrap(gateway.interp2app(call_finish)) - w_call_startup_gateway = space.wrap(gateway.interp2app(call_startup)) withjit = space.config.objspace.usemodules.pypyjit def entry_point(argv): @@ -53,7 +51,7 @@ argv = argv[:1] + argv[3:] try: try: - space.call_function(w_run_toplevel, w_call_startup_gateway) + space.startup() w_executable = space.wrap(argv[0]) w_argv = space.newlist([space.wrap(s) for s in argv[1:]]) w_exitcode = space.call_function(w_entry_point, w_executable, w_argv) @@ -69,7 +67,7 @@ return 1 finally: try: - space.call_function(w_run_toplevel, w_call_finish_gateway) + space.finish() except OperationError, e: debug("OperationError:") debug(" operror-type: " + e.w_type.getname(space)) @@ -184,11 +182,6 @@ 'pypy_thread_attach': pypy_thread_attach, 'pypy_setup_home': pypy_setup_home} -def call_finish(space): - space.finish() - -def call_startup(space): - space.startup() # _____ Define and setup target ___ diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -395,6 +395,7 @@ def startup(self): # To be called before using the space + self.threadlocals.enter_thread(self) # Initialize already imported builtin modules from pypy.interpreter.module import Module @@ -639,30 +640,36 @@ """NOT_RPYTHON: Abstract method that should put some minimal content into the w_builtins.""" - @jit.loop_invariant def getexecutioncontext(self): "Return what we consider to be the active execution context." # Important: the annotator must not see a prebuilt ExecutionContext: # you should not see frames while you translate # so we make sure that the threadlocals never *have* an # ExecutionContext during translation. - if self.config.translating and not we_are_translated(): - assert self.threadlocals.getvalue() is None, ( - "threadlocals got an ExecutionContext during translation!") - try: - return self._ec_during_translation - except AttributeError: - ec = self.createexecutioncontext() - self._ec_during_translation = ec + if not we_are_translated(): + if self.config.translating: + assert self.threadlocals.get_ec() is None, ( + "threadlocals got an ExecutionContext during translation!") + try: + return self._ec_during_translation + except AttributeError: + ec = self.createexecutioncontext() + self._ec_during_translation = ec + return ec + else: + ec = self.threadlocals.get_ec() + if ec is None: + self.threadlocals.enter_thread(self) + ec = self.threadlocals.get_ec() return ec - # normal case follows. The 'thread' module installs a real - # thread-local object in self.threadlocals, so this builds - # and caches a new ec in each thread. - ec = self.threadlocals.getvalue() - if ec is None: - ec = self.createexecutioncontext() - self.threadlocals.setvalue(ec) - return ec + else: + # translated case follows. self.threadlocals is either from + # 'pypy.interpreter.miscutils' or 'pypy.module.thread.threadlocals'. + # the result is assumed to be non-null: enter_thread() was called + # by space.startup(). + ec = self.threadlocals.get_ec() + assert ec is not None + return ec def _freeze_(self): return True @@ -963,6 +970,13 @@ """ return self.unpackiterable(w_iterable, expected_length) + def listview_no_unpack(self, w_iterable): + """ Same as listview() if cheap. If 'w_iterable' is something like + a generator, for example, then return None instead. + May return None anyway. + """ + return None + def listview_bytes(self, w_list): """ Return a list of unwrapped strings out of a list of strings. If the argument is not a list or does not contain only strings, return None. @@ -1487,9 +1501,7 @@ return buf.as_str() def str_or_None_w(self, w_obj): - if self.is_w(w_obj, self.w_None): - return None - return self.str_w(w_obj) + return None if self.is_none(w_obj) else self.str_w(w_obj) def str_w(self, w_obj): return w_obj.str_w(self) diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -496,6 +496,13 @@ """ +class UserDelCallback(object): + def __init__(self, w_obj, callback, descrname): + self.w_obj = w_obj + self.callback = callback + self.descrname = descrname + self.next = None + class UserDelAction(AsyncAction): """An action that invokes all pending app-level __del__() method. This is done as an action instead of immediately when the @@ -506,12 +513,18 @@ def __init__(self, space): AsyncAction.__init__(self, space) - self.dying_objects = [] + self.dying_objects = None + self.dying_objects_last = None self.finalizers_lock_count = 0 self.enabled_at_app_level = True def register_callback(self, w_obj, callback, descrname): - self.dying_objects.append((w_obj, callback, descrname)) + cb = UserDelCallback(w_obj, callback, descrname) + if self.dying_objects_last is None: + self.dying_objects = cb + else: + self.dying_objects_last.next = cb + self.dying_objects_last = cb self.fire() def perform(self, executioncontext, frame): @@ -525,13 +538,33 @@ # avoid too deep recursions of the kind of __del__ being called # while in the middle of another __del__ call. pending = self.dying_objects - self.dying_objects = [] + self.dying_objects = None + self.dying_objects_last = None space = self.space - for i in range(len(pending)): - w_obj, callback, descrname = pending[i] - pending[i] = (None, None, None) + while pending is not None: try: - callback(w_obj) + pending.callback(pending.w_obj) except OperationError, e: - e.write_unraisable(space, descrname, w_obj) + e.write_unraisable(space, pending.descrname, pending.w_obj) e.clear(space) # break up reference cycles + pending = pending.next + # + # Note: 'dying_objects' used to be just a regular list instead + # of a chained list. This was the cause of "leaks" if we have a + # program that constantly creates new objects with finalizers. + # Here is why: say 'dying_objects' is a long list, and there + # are n instances in it. Then we spend some time in this + # function, possibly triggering more GCs, but keeping the list + # of length n alive. Then the list is suddenly freed at the + # end, and we return to the user program. At this point the + # GC limit is still very high, because just before, there was + # a list of length n alive. Assume that the program continues + # to allocate a lot of instances with finalizers. The high GC + # limit means that it could allocate a lot of instances before + # reaching it --- possibly more than n. So the whole procedure + # repeats with higher and higher values of n. + # + # This does not occur in the current implementation because + # there is no list of length n: if n is large, then the GC + # will run several times while walking the list, but it will + # see lower and lower memory usage, with no lower bound of n. diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -895,7 +895,7 @@ "use unwrap_spec(...=WrappedDefault(default))" % ( self._code.identifier, name, defaultval)) defs_w.append(None) - else: + elif name != '__args__' and name != 'args_w': defs_w.append(space.wrap(defaultval)) if self._code._unwrap_spec: UNDEFINED = object() diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -61,6 +61,13 @@ return self.send_ex(w_arg) def send_ex(self, w_arg, operr=None): + pycode = self.pycode + if jit.we_are_jitted() and should_not_inline(pycode): + generatorentry_driver.jit_merge_point(gen=self, w_arg=w_arg, + operr=operr, pycode=pycode) + return self._send_ex(w_arg, operr) + + def _send_ex(self, w_arg, operr): space = self.space if self.running: raise OperationError(space.w_ValueError, @@ -72,8 +79,7 @@ if operr is None: operr = OperationError(space.w_StopIteration, space.w_None) raise operr - # XXX it's not clear that last_instr should be promoted at all - # but as long as it is necessary for call_assembler, let's do it early + last_instr = jit.promote(frame.last_instr) if last_instr == -1: if w_arg and not space.is_w(w_arg, space.w_None): @@ -214,3 +220,38 @@ "interrupting generator of ") break block = block.previous + + + +def get_printable_location_genentry(bytecode): + return '%s ' % (bytecode.get_repr(),) +generatorentry_driver = jit.JitDriver(greens=['pycode'], + reds=['gen', 'w_arg', 'operr'], + get_printable_location = + get_printable_location_genentry, + name='generatorentry') + +from pypy.tool.stdlib_opcode import HAVE_ARGUMENT, opmap +YIELD_VALUE = opmap['YIELD_VALUE'] + + at jit.elidable_promote() +def should_not_inline(pycode): + # Should not inline generators with more than one "yield", + # as an approximative fix (see issue #1782). There are cases + # where it slows things down; for example calls to a simple + # generator that just produces a few simple values with a few + # consecutive "yield" statements. It fixes the near-infinite + # slow-down in issue #1782, though... + count_yields = 0 + code = pycode.co_code + n = len(code) + i = 0 + while i < n: + c = code[i] + op = ord(c) + if op == YIELD_VALUE: + count_yields += 1 + i += 1 + if op >= HAVE_ARGUMENT: + i += 2 + return count_yields >= 2 diff --git a/pypy/interpreter/miscutils.py b/pypy/interpreter/miscutils.py --- a/pypy/interpreter/miscutils.py +++ b/pypy/interpreter/miscutils.py @@ -11,11 +11,14 @@ """ _value = None - def getvalue(self): + def get_ec(self): return self._value - def setvalue(self, value): - self._value = value + def enter_thread(self, space): + self._value = space.createexecutioncontext() + + def try_enter_thread(self, space): + return False def signals_enabled(self): return True diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -726,6 +726,22 @@ never_called py.test.raises(AssertionError, space.wrap, gateway.interp2app_temp(g)) + def test_unwrap_spec_default_applevel_bug2(self): + space = self.space + def g(space, w_x, w_y=None, __args__=None): + return w_x + w_g = space.wrap(gateway.interp2app_temp(g)) + w_42 = space.call_function(w_g, space.wrap(42)) + assert space.int_w(w_42) == 42 + py.test.raises(gateway.OperationError, space.call_function, w_g) + # + def g(space, w_x, w_y=None, args_w=None): + return w_x + w_g = space.wrap(gateway.interp2app_temp(g)) + w_42 = space.call_function(w_g, space.wrap(42)) + assert space.int_w(w_42) == 42 + py.test.raises(gateway.OperationError, space.call_function, w_g) + def test_interp2app_doc(self): space = self.space def f(space, w_x): diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/test_generator.py --- a/pypy/interpreter/test/test_generator.py +++ b/pypy/interpreter/test/test_generator.py @@ -17,7 +17,7 @@ yield 1 assert g.gi_running g = f() - assert g.gi_code is f.func_code + assert g.gi_code is f.__code__ assert g.__name__ == 'f' assert g.gi_frame is not None assert not g.gi_running @@ -26,7 +26,7 @@ raises(StopIteration, g.next) assert not g.gi_running assert g.gi_frame is None - assert g.gi_code is f.func_code + assert g.gi_code is f.__code__ assert g.__name__ == 'f' def test_generator3(self): @@ -278,4 +278,21 @@ def f(): yield 1 raise StopIteration - assert tuple(f()) == (1,) \ No newline at end of file + assert tuple(f()) == (1,) + + +def test_should_not_inline(space): + from pypy.interpreter.generator import should_not_inline + w_co = space.appexec([], '''(): + def g(x): + yield x + 5 + return g.__code__ + ''') + assert should_not_inline(w_co) == False + w_co = space.appexec([], '''(): + def g(x): + yield x + 5 + yield x + 6 + return g.__code__ + ''') + assert should_not_inline(w_co) == True diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -73,13 +73,12 @@ 'builtinify' : 'interp_magic.builtinify', 'lookup_special' : 'interp_magic.lookup_special', 'do_what_I_mean' : 'interp_magic.do_what_I_mean', - 'list_strategy' : 'interp_magic.list_strategy', 'validate_fd' : 'interp_magic.validate_fd', 'resizelist_hint' : 'interp_magic.resizelist_hint', 'newlist_hint' : 'interp_magic.newlist_hint', 'add_memory_pressure' : 'interp_magic.add_memory_pressure', 'newdict' : 'interp_dict.newdict', - 'dictstrategy' : 'interp_dict.dictstrategy', + 'strategy' : 'interp_magic.strategy', # dict,set,list 'set_debug' : 'interp_magic.set_debug', 'locals_to_fast' : 'interp_magic.locals_to_fast', } diff --git a/pypy/module/__pypy__/interp_dict.py b/pypy/module/__pypy__/interp_dict.py --- a/pypy/module/__pypy__/interp_dict.py +++ b/pypy/module/__pypy__/interp_dict.py @@ -1,7 +1,6 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec -from pypy.objspace.std.dictmultiobject import W_DictMultiObject @unwrap_spec(type=str) def newdict(space, type): @@ -31,13 +30,3 @@ return space.newdict(strdict=True) else: raise oefmt(space.w_TypeError, "unknown type of dict %s", type) - -def dictstrategy(space, w_obj): - """ dictstrategy(dict) - - show the underlaying strategy used by a dict object - """ - if not isinstance(w_obj, W_DictMultiObject): - raise OperationError(space.w_TypeError, - space.wrap("expecting dict object")) - return space.wrap('%r' % (w_obj.strategy,)) diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -2,7 +2,9 @@ from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter.pyframe import PyFrame from rpython.rlib.objectmodel import we_are_translated +from pypy.objspace.std.dictmultiobject import W_DictMultiObject from pypy.objspace.std.listobject import W_ListObject +from pypy.objspace.std.setobject import W_BaseSetObject from pypy.objspace.std.typeobject import MethodCache from pypy.objspace.std.mapdict import MapAttrCache from rpython.rlib import rposix, rgc @@ -70,12 +72,23 @@ def do_what_I_mean(space): return space.wrap(42) -def list_strategy(space, w_list): - if isinstance(w_list, W_ListObject): - return space.wrap(w_list.strategy._applevel_repr) + +def strategy(space, w_obj): + """ strategy(dict or list or set) + + Return the underlying strategy currently used by a dict, list or set object + """ + if isinstance(w_obj, W_DictMultiObject): + name = w_obj.strategy.__class__.__name__ + elif isinstance(w_obj, W_ListObject): + name = w_obj.strategy.__class__.__name__ + elif isinstance(w_obj, W_BaseSetObject): + name = w_obj.strategy.__class__.__name__ else: - w_msg = space.wrap("Can only get the list strategy of a list") - raise OperationError(space.w_TypeError, w_msg) + raise OperationError(space.w_TypeError, + space.wrap("expecting dict or list or set object")) + return space.wrap(name) + @unwrap_spec(fd='c_int') def validate_fd(space, fd): diff --git a/pypy/module/__pypy__/test/test_special.py b/pypy/module/__pypy__/test/test_special.py --- a/pypy/module/__pypy__/test/test_special.py +++ b/pypy/module/__pypy__/test/test_special.py @@ -46,26 +46,42 @@ assert x == 42 def test_list_strategy(self): - from __pypy__ import list_strategy + from __pypy__ import strategy l = [1, 2, 3] - assert list_strategy(l) == "int" + assert strategy(l) == "IntegerListStrategy" l = ["a", "b", "c"] - assert list_strategy(l) == "bytes" + assert strategy(l) == "BytesListStrategy" l = [u"a", u"b", u"c"] - assert list_strategy(l) == "unicode" + assert strategy(l) == "UnicodeListStrategy" l = [1.1, 2.2, 3.3] - assert list_strategy(l) == "float" + assert strategy(l) == "FloatListStrategy" l = range(3) - assert list_strategy(l) == "simple_range" + assert strategy(l) == "SimpleRangeListStrategy" l = range(1, 2) - assert list_strategy(l) == "range" + assert strategy(l) == "RangeListStrategy" l = [1, "b", 3] - assert list_strategy(l) == "object" + assert strategy(l) == "ObjectListStrategy" l = [] - assert list_strategy(l) == "empty" + assert strategy(l) == "EmptyListStrategy" o = 5 - raises(TypeError, list_strategy, 5) + raises(TypeError, strategy, 5) + + def test_dict_strategy(self): + from __pypy__ import strategy + + d = {} + assert strategy(d) == "EmptyDictStrategy" + d = {1: None, 5: None} + assert strategy(d) == "IntDictStrategy" + + def test_set_strategy(self): + from __pypy__ import strategy + + s = set() + assert strategy(s) == "EmptySetStrategy" + s = set([2, 3, 4]) + assert strategy(s) == "IntegerSetStrategy" class AppTestJitFeatures(object): diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -8,7 +8,7 @@ appleveldefs = { } interpleveldefs = { - '__version__': 'space.wrap("0.8.2")', + '__version__': 'space.wrap("0.8.6")', 'load_library': 'libraryobj.load_library', diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py --- a/pypy/module/_cffi_backend/ccallback.py +++ b/pypy/module/_cffi_backend/ccallback.py @@ -183,9 +183,12 @@ misc._raw_memclear(ll_res, SIZE_OF_FFI_ARG) return # + must_leave = False ec = None + space = callback.space try: - ec = cerrno.get_errno_container(callback.space) + must_leave = space.threadlocals.try_enter_thread(space) + ec = cerrno.get_errno_container(space) cerrno.save_errno_into(ec, e) extra_line = '' try: @@ -206,5 +209,7 @@ except OSError: pass callback.write_error_return_value(ll_res) + if must_leave: + space.threadlocals.leave_thread(space) if ec is not None: cerrno.restore_errno_from(ec) diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py --- a/pypy/module/_cffi_backend/ctypefunc.py +++ b/pypy/module/_cffi_backend/ctypefunc.py @@ -4,7 +4,7 @@ import sys -from rpython.rlib import jit, clibffi, jit_libffi +from rpython.rlib import jit, clibffi, jit_libffi, rgc from rpython.rlib.jit_libffi import (CIF_DESCRIPTION, CIF_DESCRIPTION_P, FFI_TYPE, FFI_TYPE_P, FFI_TYPE_PP, SIZE_OF_FFI_ARG) from rpython.rlib.objectmodel import we_are_translated, instantiate @@ -63,6 +63,7 @@ CifDescrBuilder(fvarargs, self.ctitem).rawallocate(ctypefunc) return ctypefunc + @rgc.must_be_light_finalizer def __del__(self): if self.cif_descr: lltype.free(self.cif_descr, flavor='raw') @@ -156,8 +157,8 @@ data = rffi.ptradd(buffer, cif_descr.exchange_args[i]) flag = get_mustfree_flag(data) if flag == 1: - raw_string = rffi.cast(rffi.CCHARPP, data)[0] - lltype.free(raw_string, flavor='raw') + raw_cdata = rffi.cast(rffi.CCHARPP, data)[0] + lltype.free(raw_cdata, flavor='raw') lltype.free(buffer, flavor='raw') return w_res diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -3188,4 +3188,4 @@ def test_version(): # this test is here mostly for PyPy - assert __version__ == "0.8.2" + assert __version__ == "0.8.6" diff --git a/pypy/module/_io/interp_textio.py b/pypy/module/_io/interp_textio.py --- a/pypy/module/_io/interp_textio.py +++ b/pypy/module/_io/interp_textio.py @@ -458,6 +458,10 @@ self._check_init(space) return space.call_method(self.w_buffer, "seekable") + def isatty_w(self, space): + self._check_init(space) + return space.call_method(self.w_buffer, "isatty") + def fileno_w(self, space): self._check_init(space) return space.call_method(self.w_buffer, "fileno") @@ -1035,6 +1039,7 @@ readable = interp2app(W_TextIOWrapper.readable_w), writable = interp2app(W_TextIOWrapper.writable_w), seekable = interp2app(W_TextIOWrapper.seekable_w), + isatty = interp2app(W_TextIOWrapper.isatty_w), fileno = interp2app(W_TextIOWrapper.fileno_w), name = GetSetProperty(W_TextIOWrapper.name_get_w), buffer = interp_attrproperty_w("w_buffer", cls=W_TextIOWrapper), diff --git a/pypy/module/_io/test/test_textio.py b/pypy/module/_io/test/test_textio.py --- a/pypy/module/_io/test/test_textio.py +++ b/pypy/module/_io/test/test_textio.py @@ -25,6 +25,12 @@ t = _io.TextIOWrapper(b) assert t.readable() assert t.seekable() + # + class CustomFile(object): + def isatty(self): return 'YES' + readable = writable = seekable = lambda self: False + t = _io.TextIOWrapper(CustomFile()) + assert t.isatty() == 'YES' def test_default_implementations(self): import _io diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py --- a/pypy/module/_lsprof/interp_lsprof.py +++ b/pypy/module/_lsprof/interp_lsprof.py @@ -11,7 +11,7 @@ from rpython.rlib.rtimer import read_timestamp, _is_64_bit from rpython.rtyper.lltypesystem import rffi, lltype from rpython.translator.tool.cbuild import ExternalCompilationInfo -from rpython.conftest import cdir +from rpython.translator import cdir from rpython.rlib.rarithmetic import r_longlong import time, sys diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -508,7 +508,10 @@ argshapes = unpack_argshapes(space, w_args) resshape = unpack_resshape(space, w_res) ffi_args = [shape.get_basic_ffi_type() for shape in argshapes] - ffi_res = resshape.get_basic_ffi_type() + if resshape is not None: + ffi_res = resshape.get_basic_ffi_type() + else: + ffi_res = ffi_type_void try: ptr = RawFuncPtr('???', ffi_args, ffi_res, rffi.cast(rffi.VOIDP, addr), flags) diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -353,6 +353,11 @@ assert ptr[0] == rawcall.buffer ptr.free() + def test_raw_callable_returning_void(self): + import _rawffi + _rawffi.FuncPtr(0, [], None) + # assert did not crash + def test_short_addition(self): import _rawffi lib = _rawffi.CDLL(self.lib_name) diff --git a/pypy/module/_socket/__init__.py b/pypy/module/_socket/__init__.py --- a/pypy/module/_socket/__init__.py +++ b/pypy/module/_socket/__init__.py @@ -6,8 +6,8 @@ } interpleveldefs = { - 'SocketType': 'interp_socket.W_RSocket', - 'socket' : 'interp_socket.W_RSocket', + 'SocketType': 'interp_socket.W_Socket', + 'socket' : 'interp_socket.W_Socket', 'error' : 'interp_socket.get_error(space, "error")', 'herror' : 'interp_socket.get_error(space, "herror")', 'gaierror' : 'interp_socket.get_error(space, "gaierror")', diff --git a/pypy/module/_socket/interp_func.py b/pypy/module/_socket/interp_func.py --- a/pypy/module/_socket/interp_func.py +++ b/pypy/module/_socket/interp_func.py @@ -1,8 +1,12 @@ -from pypy.interpreter.gateway import unwrap_spec, WrappedDefault -from pypy.module._socket.interp_socket import converted_error, W_RSocket, addr_as_object, ipaddr_from_object from rpython.rlib import rsocket from rpython.rlib.rsocket import SocketError, INVALID_SOCKET + from pypy.interpreter.error import OperationError +from pypy.interpreter.gateway import unwrap_spec, WrappedDefault +from pypy.module._socket.interp_socket import ( + converted_error, W_Socket, addr_as_object, ipaddr_from_object +) + def gethostname(space): """gethostname() -> string @@ -136,10 +140,10 @@ The remaining arguments are the same as for socket(). """ try: - sock = rsocket.fromfd(fd, family, type, proto, W_RSocket) + sock = rsocket.fromfd(fd, family, type, proto) except SocketError, e: raise converted_error(space, e) - return space.wrap(sock) + return space.wrap(W_Socket(sock)) @unwrap_spec(family=int, type=int, proto=int) def socketpair(space, family=rsocket.socketpair_default_family, @@ -153,10 +157,13 @@ AF_UNIX if defined on the platform; otherwise, the default is AF_INET. """ try: - sock1, sock2 = rsocket.socketpair(family, type, proto, W_RSocket) + sock1, sock2 = rsocket.socketpair(family, type, proto) except SocketError, e: raise converted_error(space, e) - return space.newtuple([space.wrap(sock1), space.wrap(sock2)]) + return space.newtuple([ + space.wrap(W_Socket(sock1)), + space.wrap(W_Socket(sock2)) + ]) # The following 4 functions refuse all negative numbers, like CPython 2.6. # They could also check that the argument is not too large, but CPython 2.6 diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -1,14 +1,18 @@ +from rpython.rlib import rsocket +from rpython.rlib.rarithmetic import intmask +from rpython.rlib.rsocket import ( + RSocket, AF_INET, SOCK_STREAM, SocketError, SocketErrorWithErrno, + RSocketError +) +from rpython.rtyper.lltypesystem import lltype, rffi + +from pypy.interpreter import gateway from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.typedef import TypeDef, make_weakref_descr,\ - interp_attrproperty +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault -from rpython.rlib.rarithmetic import intmask -from rpython.rtyper.lltypesystem import lltype, rffi -from rpython.rlib import rsocket -from rpython.rlib.rsocket import RSocket, AF_INET, SOCK_STREAM -from rpython.rlib.rsocket import SocketError, SocketErrorWithErrno, RSocketError -from pypy.interpreter.error import OperationError, oefmt -from pypy.interpreter import gateway +from pypy.interpreter.typedef import ( + GetSetProperty, TypeDef, make_weakref_descr +) # XXX Hack to seperate rpython and pypy @@ -124,10 +128,18 @@ return addr -class W_RSocket(W_Root, RSocket): - def __del__(self): - self.clear_all_weakrefs() - RSocket.__del__(self) +class W_Socket(W_Root): + def __init__(self, sock): + self.sock = sock + + def get_type_w(self, space): + return space.wrap(self.sock.type) + + def get_proto_w(self, space): + return space.wrap(self.sock.proto) + + def get_family_w(self, space): + return space.wrap(self.sock.family) def accept_w(self, space): """accept() -> (socket object, address info) @@ -137,22 +149,22 @@ info is a pair (hostaddr, port). """ try: - fd, addr = self.accept() + fd, addr = self.sock.accept() sock = rsocket.make_socket( - fd, self.family, self.type, self.proto, W_RSocket) - return space.newtuple([space.wrap(sock), + fd, self.sock.family, self.sock.type, self.sock.proto) + return space.newtuple([space.wrap(W_Socket(sock)), addr_as_object(addr, sock.fd, space)]) - except SocketError, e: + except SocketError as e: raise converted_error(space, e) # convert an Address into an app-level object def addr_as_object(self, space, address): - return addr_as_object(address, self.fd, space) + return addr_as_object(address, self.sock.fd, space) # convert an app-level object into an Address From noreply at buildbot.pypy.org Tue Jul 22 04:34:27 2014 From: noreply at buildbot.pypy.org (wenzhuman) Date: Tue, 22 Jul 2014 04:34:27 +0200 (CEST) Subject: [pypy-commit] pypy gc_no_cleanup_nursery: add zero_gc_pointers for array of GcPointer Message-ID: <20140722023427.06E431C06A7@cobra.cs.uni-duesseldorf.de> Author: wenzhuman Branch: gc_no_cleanup_nursery Changeset: r72478:50b677698120 Date: 2014-07-21 17:49 +0000 http://bitbucket.org/pypy/pypy/changeset/50b677698120/ Log: add zero_gc_pointers for array of GcPointer diff --git a/rpython/jit/metainterp/gc.py b/rpython/jit/metainterp/gc.py --- a/rpython/jit/metainterp/gc.py +++ b/rpython/jit/metainterp/gc.py @@ -26,7 +26,7 @@ malloc_zero_filled = True class GC_incminimark(GcDescription): - malloc_zero_filled = True + malloc_zero_filled = False def get_description(config): diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -1266,20 +1266,38 @@ def gen_zero_gc_pointers(TYPE, v, llops, previous_steps=None): if previous_steps is None: previous_steps = [] - assert isinstance(TYPE, lltype.Struct) - for name in TYPE._names: - c_name = rmodel.inputconst(lltype.Void, name) - FIELD = getattr(TYPE, name) - if isinstance(FIELD, lltype.Ptr) and FIELD._needsgc(): - c_null = rmodel.inputconst(FIELD, lltype.nullptr(FIELD.TO)) - if not previous_steps: - llops.genop('bare_setfield', [v, c_name, c_null]) - else: - llops.genop('bare_setinteriorfield', + assert (isinstance(TYPE, lltype.Struct) or isinstance(TYPE, lltype.Array)) + if isinstance(TYPE, lltype.Struct): + for name in TYPE._names: + c_name = rmodel.inputconst(lltype.Void, name) + FIELD = getattr(TYPE, name) + #handle ptr field in GcStruct + if isinstance(FIELD, lltype.Ptr) and FIELD._needsgc(): + c_null = rmodel.inputconst(FIELD, lltype.nullptr(FIELD.TO)) + if not previous_steps: + llops.genop('bare_setfield', [v, c_name, c_null]) + else: + llops.genop('bare_setinteriorfield', [v] + previous_steps + [c_name, c_null]) - elif isinstance(FIELD, lltype.Struct): - gen_zero_gc_pointers(FIELD, v, llops, previous_steps + [c_name]) - + #handle inside GcStruct field + elif isinstance(FIELD, lltype.Struct): + gen_zero_gc_pointers(FIELD, v, llops, previous_steps + [c_name]) + #handle inside GcArray field + elif isinstance(FIELD, lltype.Array): + gen_zero_gc_pointers(FIELD, v, llops, previous_steps + [c_name]) + if isinstance(TYPE, lltype.Array): + ITEM = TYPE.OF + if previous_steps: + v = llop.genop('getinteriorfield',[v]+previous_steps) + arr_size = llops.genop('getarraysize',[v]) + for i in range(arr_size): + #handle an array of GcPtr + if isinstance(ITEM, lltype.Ptr) and ITEM._needsgc(): + c_null = rmodel.inputconst(ITEM, lltype.nullptr(ITEM.TO)) + llops.genop('bare_setarrayitem',[v, i, c_null]) + if isinstance(ITEM, lltype.Struct) or isinstance(ITEM, lltype.GcArray): + array_item = llops.genop('getarrayitem',[v,i]) + gen_zero_gc_pointers(FIELD, array_item, llops, previous_steps) # ____________________________________________________________ From noreply at buildbot.pypy.org Tue Jul 22 12:14:03 2014 From: noreply at buildbot.pypy.org (rguillebert) Date: Tue, 22 Jul 2014 12:14:03 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Add info Message-ID: <20140722101403.CC7EF1D231E@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: extradoc Changeset: r5363:ae1bfc1320b2 Date: 2014-07-22 12:13 +0200 http://bitbucket.org/pypy/extradoc/changeset/ae1bfc1320b2/ Log: Add info diff --git a/talk/ep2014/status/talk.pdf b/talk/ep2014/status/talk.pdf index f9532b263b7bf1aa53ba0d807a875af9991c6a39..8526e0ddddaa7ea79199e3af9c68b27da1de432d GIT binary patch [cut] diff --git a/talk/ep2014/status/talk.rst b/talk/ep2014/status/talk.rst --- a/talk/ep2014/status/talk.rst +++ b/talk/ep2014/status/talk.rst @@ -4,6 +4,17 @@ PyPy Status ================================ +Who Am I +-------- + +* rguillebert on twitter and irc + +* GSoC student on PyPy/Cython + +* PyPy contributor since 2011 + +* Worked on Py3k and Numpy + PyPy is not dead ---------------- @@ -256,7 +267,7 @@ - Preliminary versions of pypy-jit-stm available -- The overhead is still a bit too high and hard to precict +- The overhead is still a bit too high and hard to predict - Lots of polishing needed @@ -272,4 +283,6 @@ - http://morepypy.blogspot.com/ +- #pypy at freenode.net + - Any question? From noreply at buildbot.pypy.org Tue Jul 22 12:22:06 2014 From: noreply at buildbot.pypy.org (rguillebert) Date: Tue, 22 Jul 2014 12:22:06 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Add baroquesoftware Message-ID: <20140722102206.E2A171D24AF@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: extradoc Changeset: r5364:3334209f26d3 Date: 2014-07-22 12:22 +0200 http://bitbucket.org/pypy/extradoc/changeset/3334209f26d3/ Log: Add baroquesoftware diff --git a/talk/ep2014/status/talk.pdf b/talk/ep2014/status/talk.pdf index 8526e0ddddaa7ea79199e3af9c68b27da1de432d..ddbec1104c2a94e56c7ec21e61c879b534382932 GIT binary patch [cut] diff --git a/talk/ep2014/status/talk.rst b/talk/ep2014/status/talk.rst --- a/talk/ep2014/status/talk.rst +++ b/talk/ep2014/status/talk.rst @@ -143,6 +143,8 @@ improving support in parts of the Python or non-Python interpreters, etc. +- http://baroquesoftware.com + Current status --------------- From noreply at buildbot.pypy.org Tue Jul 22 13:41:15 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 22 Jul 2014 13:41:15 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: add is_atomic() Message-ID: <20140722114115.342A61C101E@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r72479:7e8ed49b6d70 Date: 2014-07-22 13:40 +0200 http://bitbucket.org/pypy/pypy/changeset/7e8ed49b6d70/ Log: add is_atomic() diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -42,6 +42,7 @@ 'reset_longest_abort_info':'interp_atomic.reset_longest_abort_info', 'getsegmentlimit': 'interp_atomic.getsegmentlimit', 'hint_commit_soon': 'interp_atomic.hint_commit_soon', + 'is_atomic': 'interp_atomic.is_atomic', 'error': 'space.fromcache(pypy.module.thread.error.Cache).w_error', } def activate(self, space): diff --git a/pypy/module/__pypy__/interp_atomic.py b/pypy/module/__pypy__/interp_atomic.py --- a/pypy/module/__pypy__/interp_atomic.py +++ b/pypy/module/__pypy__/interp_atomic.py @@ -60,6 +60,15 @@ else: return space.wrap(1) +def is_atomic(space): + if space.config.translation.stm: + from rpython.rlib.rstm import is_atomic + return space.wrap(is_atomic()) + else: + giltl = space.threadlocals + return space.wrap(giltl.is_atomic) + + @unwrap_spec(mintime=float) def longest_abort_info(space, mintime=0.0): if space.config.translation.stm: diff --git a/pypy/module/__pypy__/test/test_atomic.py b/pypy/module/__pypy__/test/test_atomic.py --- a/pypy/module/__pypy__/test/test_atomic.py +++ b/pypy/module/__pypy__/test/test_atomic.py @@ -1,11 +1,10 @@ from __future__ import with_statement from pypy.module.thread.test.support import GenericTestThread -from pypy.module.__pypy__.interp_atomic import bdecode from rpython.rtyper.lltypesystem import rffi def test_bdecode(space): - + from pypy.module.__pypy__.interp_atomic import bdecode def bdec(s, expected): p = rffi.str2charp(s) w_obj, q = bdecode(space, p) @@ -27,7 +26,7 @@ from __pypy__ import thread for atomic in thread.atomic, thread.exclusive_atomic: with atomic: - pass + assert thread.is_atomic() try: with atomic: raise ValueError @@ -38,22 +37,28 @@ from __pypy__ import thread with thread.atomic: with thread.atomic: - pass + assert thread.is_atomic() + assert thread.is_atomic() + assert not thread.is_atomic() def test_nest_composable_below_exclusive(self): from __pypy__ import thread with thread.exclusive_atomic: with thread.atomic: with thread.atomic: - pass + assert thread.is_atomic() + assert thread.is_atomic() + assert thread.is_atomic() + assert not thread.is_atomic() def test_nest_exclusive_fails(self): from __pypy__ import thread try: with thread.exclusive_atomic: with thread.exclusive_atomic: - pass + assert thread.is_atomic() except thread.error, e: + assert not thread.is_atomic() assert e.message == "exclusive_atomic block can't be entered inside another atomic block" def test_nest_exclusive_fails2(self): @@ -61,6 +66,8 @@ try: with thread.atomic: with thread.exclusive_atomic: - pass + assert thread.is_atomic() + assert thread.is_atomic() except thread.error, e: + assert not thread.is_atomic() assert e.message == "exclusive_atomic block can't be entered inside another atomic block" From noreply at buildbot.pypy.org Tue Jul 22 13:58:35 2014 From: noreply at buildbot.pypy.org (rguillebert) Date: Tue, 22 Jul 2014 13:58:35 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Kill stuff Message-ID: <20140722115835.16D301C101E@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: extradoc Changeset: r5365:dacc44cdb7af Date: 2014-07-22 13:57 +0200 http://bitbucket.org/pypy/extradoc/changeset/dacc44cdb7af/ Log: Kill stuff diff --git a/talk/ep2014/status/talk.pdf b/talk/ep2014/status/talk.pdf index ddbec1104c2a94e56c7ec21e61c879b534382932..447f7b485ff254fb76dc366dbce2dc84e936e122 GIT binary patch [cut] diff --git a/talk/ep2014/status/talk.rst b/talk/ep2014/status/talk.rst --- a/talk/ep2014/status/talk.rst +++ b/talk/ep2014/status/talk.rst @@ -9,11 +9,11 @@ * rguillebert on twitter and irc +* PyPy contributor since 2011 + * GSoC student on PyPy/Cython -* PyPy contributor since 2011 - -* Worked on Py3k and Numpy +* Worked on Py3k and Numpy support PyPy is not dead ---------------- @@ -26,12 +26,6 @@ - PyPy is healthy and alive -|pause| - -- WARNING: This talk is boring - - * "it just works" - What is PyPy? -------------- @@ -49,11 +43,6 @@ - **FAST** -* Whatever (dynamic) language you want - - - smalltalk, prolog, PHP, javascript, ... - - PyPy: past two years (1) ----------------------------- @@ -63,16 +52,12 @@ * stackless + JIT (eventlet, gevent, ...) -|pause| - - PyPy 2.1 (July 2013) * stable ARM * py3k (3.2.3), numpy, general improvements, bugfixes -|pause| - - PyPy 2.2 (November 2013) * incremental GC, faster JSON @@ -87,13 +72,13 @@ - PyPy 2.3 (May 2014) -- Lot of internal refactoring + * Lot of internal refactoring -- C API for embedding + * C API for embedding - * pypy + uWSGI (thanks to Roberto De Ioris) + - pypy + uWSGI (thanks to Roberto De Ioris) -- the usual, boring, general improvements + * the usual, boring, general improvements More PyPy-powered languages @@ -231,26 +216,6 @@ - Fast on CPython, super-fast on PyPy - -cppyy ------- - -- Interface to C++ - -- Based on reflection, no need to write wrappers - -- PyPy-only, similar to PyCintex for CPython - -- Main use case: ROOT - - * http://root.cern.ch - - * "a set of OO frameworks with all the functionality needed to handle and - analyze large amounts of data in a very efficient way" - -- 3x faster than CPython - - The future: STM ---------------- From noreply at buildbot.pypy.org Tue Jul 22 14:07:29 2014 From: noreply at buildbot.pypy.org (rguillebert) Date: Tue, 22 Jul 2014 14:07:29 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Kill, kill, kill Message-ID: <20140722120729.73E451C3273@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: extradoc Changeset: r5366:71780b755a05 Date: 2014-07-22 14:07 +0200 http://bitbucket.org/pypy/extradoc/changeset/71780b755a05/ Log: Kill, kill, kill diff --git a/talk/ep2014/status/talk.pdf b/talk/ep2014/status/talk.pdf index 447f7b485ff254fb76dc366dbce2dc84e936e122..dddab2362990645489cffbd5a4fb451b9b8b7751 GIT binary patch [cut] diff --git a/talk/ep2014/status/talk.rst b/talk/ep2014/status/talk.rst --- a/talk/ep2014/status/talk.rst +++ b/talk/ep2014/status/talk.rst @@ -102,8 +102,6 @@ * http://hippyvm.com/ - - Fundraising campaign --------------------- @@ -147,7 +145,7 @@ - Lots of CFFI modules around: - * pygame_cffi, psycopg2_cffi, lxml + * pygame_cffi, psycopg2cffi, lxml - numpy: in-progress (more later) @@ -178,9 +176,7 @@ - as usual, in-progress -- ~80% of numpy implemented - - * 2336 passing tests out of 3265 +- 2336 passing tests out of 3265 * http://buildbot.pypy.org/numpy-status/latest.html @@ -214,7 +210,7 @@ - Alternative to C-API, ctypes, Cython, etc. -- Fast on CPython, super-fast on PyPy +- Fast on CPython, super-fast on PyPy, Jython support in the future The future: STM ---------------- @@ -223,26 +219,16 @@ - Strategy to solve race conditions -- "Finger crossed", rollback in case of conflicts - - On-going research project * by Armin Rigo and Remi Meier -Current status for STM ----------------------- - - Preliminary versions of pypy-jit-stm available -- The overhead is still a bit too high and hard to predict - - Lots of polishing needed -- More fundamentally, how to best use it is still unknown - - See talk tomorrow - Contacts, Q&A -------------- From noreply at buildbot.pypy.org Tue Jul 22 14:12:18 2014 From: noreply at buildbot.pypy.org (rguillebert) Date: Tue, 22 Jul 2014 14:12:18 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Kill and rephrase Message-ID: <20140722121218.F1FC91C3273@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: extradoc Changeset: r5367:5ab06d210dc6 Date: 2014-07-22 14:12 +0200 http://bitbucket.org/pypy/extradoc/changeset/5ab06d210dc6/ Log: Kill and rephrase diff --git a/talk/ep2014/status/talk.pdf b/talk/ep2014/status/talk.pdf index dddab2362990645489cffbd5a4fb451b9b8b7751..61bb368d0398a0f044b40b08e1e24e1e24ad1fd3 GIT binary patch [cut] diff --git a/talk/ep2014/status/talk.rst b/talk/ep2014/status/talk.rst --- a/talk/ep2014/status/talk.rst +++ b/talk/ep2014/status/talk.rst @@ -217,16 +217,12 @@ - Software Transactional Memory -- Strategy to solve race conditions +- Solving the GIL problem -- On-going research project - - * by Armin Rigo and Remi Meier +- Without bringing the threads and locks mess - Preliminary versions of pypy-jit-stm available -- Lots of polishing needed - - See talk tomorrow Contacts, Q&A From noreply at buildbot.pypy.org Tue Jul 22 14:15:10 2014 From: noreply at buildbot.pypy.org (rguillebert) Date: Tue, 22 Jul 2014 14:15:10 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Commit modified pdf Message-ID: <20140722121510.40AB31C3273@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: extradoc Changeset: r5368:9d5389abdfea Date: 2014-07-22 14:15 +0200 http://bitbucket.org/pypy/extradoc/changeset/9d5389abdfea/ Log: Commit modified pdf diff --git a/talk/ep2014/status/talk.pdf b/talk/ep2014/status/talk.pdf index 61bb368d0398a0f044b40b08e1e24e1e24ad1fd3..1ad85fa0fe34a4a237ca405797b9dbd596e54ece GIT binary patch [cut] From noreply at buildbot.pypy.org Tue Jul 22 14:28:17 2014 From: noreply at buildbot.pypy.org (rguillebert) Date: Tue, 22 Jul 2014 14:28:17 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Rephrase Message-ID: <20140722122817.E86531C101E@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: extradoc Changeset: r5369:3ffd361a4541 Date: 2014-07-22 14:28 +0200 http://bitbucket.org/pypy/extradoc/changeset/3ffd361a4541/ Log: Rephrase diff --git a/talk/ep2014/status/talk.pdf b/talk/ep2014/status/talk.pdf index 1ad85fa0fe34a4a237ca405797b9dbd596e54ece..aa6078c72592f3c1704408ce9759135c24a70c28 GIT binary patch [cut] diff --git a/talk/ep2014/status/talk.rst b/talk/ep2014/status/talk.rst --- a/talk/ep2014/status/talk.rst +++ b/talk/ep2014/status/talk.rst @@ -113,7 +113,7 @@ - STM, 2nd call: 3'000 $ of 80'000 $ (4%) -- thank to all donors! +- Thanks to all donors! Commercial support ------------------ @@ -166,23 +166,23 @@ - ~7.5x faster than CPython on ARM -- thanks to Raspberry-Pi foundation +- Thanks to the Raspberry-Pi foundation -- distributed as part of Raspbian OS +- Distributed as part of Raspbian OS numpy ----- -- as usual, in-progress +- As usual, in-progress - 2336 passing tests out of 3265 * http://buildbot.pypy.org/numpy-status/latest.html -- just try it +- Just try it -- no scipy :-/ +- No scipy :-/ py3k @@ -192,9 +192,9 @@ - 3.3: branch started, in-progress -- some missing optimizations +- Some missing optimizations - * getting better + * Getting better CFFI From noreply at buildbot.pypy.org Tue Jul 22 15:01:12 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 22 Jul 2014 15:01:12 +0200 (CEST) Subject: [pypy-commit] benchmarks default: minor optimisation Message-ID: <20140722130112.645011D231E@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r269:5ccc3f08f3bd Date: 2014-07-22 14:00 +0200 http://bitbucket.org/pypy/benchmarks/changeset/5ccc3f08f3bd/ Log: minor optimisation diff --git a/multithread/common/abstract_threading.py b/multithread/common/abstract_threading.py --- a/multithread/common/abstract_threading.py +++ b/multithread/common/abstract_threading.py @@ -156,9 +156,10 @@ def __call__(self): - with self._cond: - while not self._done: - self._cond.wait() + if not self._done: + with self._cond: + while not self._done: + self._cond.wait() if self._exception: raise self._exception From noreply at buildbot.pypy.org Tue Jul 22 15:01:13 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 22 Jul 2014 15:01:13 +0200 (CEST) Subject: [pypy-commit] benchmarks default: some tuning :-/ Message-ID: <20140722130113.E2F6E1D231E@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r270:34385976da5c Date: 2014-07-22 14:01 +0200 http://bitbucket.org/pypy/benchmarks/changeset/34385976da5c/ Log: some tuning :-/ diff --git a/multithread/quick_sort/quick_sort.py b/multithread/quick_sort/quick_sort.py --- a/multithread/quick_sort/quick_sort.py +++ b/multithread/quick_sort/quick_sort.py @@ -64,21 +64,33 @@ r -= 1 fs = [] - #right_amount = 1000 > n // 2 > 505 + # only start futures on a single level: do_futures = level == 4 largs = (xs, l0, r - l0 + 1, level+1) rargs = (xs, l, l0 + n - l, level+1) + leftf, rightf = False, False + if do_futures: - fs.append(Future(qsort_f, *largs)) - fs.append(Future(qsort_f, *rargs)) - else: - if level > 4 and n < 100: + if largs[2] > 2000: + fs.append(Future(qsort_f, *largs)) + leftf = True + + if rargs[2] > 2000: + fs.append(Future(qsort_f, *rargs)) + rightf = True + + if not leftf: + if level >= 4 and largs[2] < 500: with atomic: fs.extend(qsort_f(*largs)) + else: + fs.extend(qsort_f(*largs)) + + if not rightf: + if level >= 4 and rargs[2] < 500: with atomic: fs.extend(qsort_f(*rargs)) else: - fs.extend(qsort_f(*largs)) fs.extend(qsort_f(*rargs)) #print_abort_info(0.0000001) @@ -104,7 +116,6 @@ random.shuffle(to_sort) s = deque(to_sort) # qsort(s, 0, len(s)) - hint_commit_soon() t -= time.time() # start as future, otherwise we get more threads From noreply at buildbot.pypy.org Tue Jul 22 15:01:15 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 22 Jul 2014 15:01:15 +0200 (CEST) Subject: [pypy-commit] benchmarks default: some other weird benchmark Message-ID: <20140722130115.5EC0A1D231E@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r271:a1256c44c37d Date: 2014-07-22 15:01 +0200 http://bitbucket.org/pypy/benchmarks/changeset/a1256c44c37d/ Log: some other weird benchmark diff --git a/multithread/common/abstract_threading.py b/multithread/common/abstract_threading.py --- a/multithread/common/abstract_threading.py +++ b/multithread/common/abstract_threading.py @@ -172,8 +172,10 @@ def _task(self, func, *args, **kwargs): with self._cond: try: + hint_commit_soon() with atomic: self._result = func(*args, **kwargs) + hint_commit_soon() except Exception as e: self._exception = e finally: diff --git a/multithread/perlin_noise/perlin_noise.py b/multithread/perlin_noise/perlin_noise.py new file mode 100644 --- /dev/null +++ b/multithread/perlin_noise/perlin_noise.py @@ -0,0 +1,95 @@ +# -*- coding: utf-8 -*- +# from http://rosettacode.org/wiki/Perlin_noise#Python + +import sys +import time, random +from common.abstract_threading import (AtomicFuture, + atomic, Future, set_thread_pool, ThreadPool, + hint_commit_soon, print_abort_info) + +import itertools +from collections import deque + +p = [None] * 512 +permutation = [151,160,137,91,90,15, + 131,13,201,95,96,53,194,233,7,225,140,36,103,30,69,142,8,99,37,240,21,10,23, + 190, 6,148,247,120,234,75,0,26,197,62,94,252,219,203,117,35,11,32,57,177,33, + 88,237,149,56,87,174,20,125,136,171,168, 68,175,74,165,71,134,139,48,27,166, + 77,146,158,231,83,111,229,122,60,211,133,230,220,105,92,41,55,46,245,40,244, + 102,143,54, 65,25,63,161, 1,216,80,73,209,76,132,187,208, 89,18,169,200,196, + 135,130,116,188,159,86,164,100,109,198,173,186, 3,64,52,217,226,250,124,123, + 5,202,38,147,118,126,255,82,85,212,207,206,59,227,47,16,58,17,182,189,28,42, + 223,183,170,213,119,248,152, 2,44,154,163, 70,221,153,101,155,167, 43,172,9, + 129,22,39,253, 19,98,108,110,79,113,224,232,178,185, 112,104,218,246,97,228, + 251,34,242,193,238,210,144,12,191,179,162,241, 81,51,145,235,249,14,239,107, + 49,192,214, 31,181,199,106,157,184, 84,204,176,115,121,50,45,127, 4,150,254, + 138,236,205,93,222,114,67,29,24,72,243,141,128,195,78,66,215,61,156,180] + +for i in range(256): + p[256+i] = p[i] = permutation[i] + +def perlin_noise(x, y, z): + X = int(x) & 255 # FIND UNIT CUBE THAT + Y = int(y) & 255 # CONTAINS POINT. + Z = int(z) & 255 + x -= int(x) # FIND RELATIVE X,Y,Z + y -= int(y) # OF POINT IN CUBE. + z -= int(z) + u = fade(x) # COMPUTE FADE CURVES + v = fade(y) # FOR EACH OF X,Y,Z. + w = fade(z) + A = p[X ]+Y; AA = p[A]+Z; AB = p[A+1]+Z # HASH COORDINATES OF + B = p[X+1]+Y; BA = p[B]+Z; BB = p[B+1]+Z # THE 8 CUBE CORNERS, + + return lerp(w, lerp(v, lerp(u, grad(p[AA ], x , y , z ), # AND ADD + grad(p[BA ], x-1, y , z )), # BLENDED + lerp(u, grad(p[AB ], x , y-1, z ), # RESULTS + grad(p[BB ], x-1, y-1, z ))),# FROM 8 + lerp(v, lerp(u, grad(p[AA+1], x , y , z-1 ), # CORNERS + grad(p[BA+1], x-1, y , z-1 )), # OF CUBE + lerp(u, grad(p[AB+1], x , y-1, z-1 ), + grad(p[BB+1], x-1, y-1, z-1 )))) + +def fade(t): + return t ** 3 * (t * (t * 6 - 15) + 10) + +def lerp(t, a, b): + return a + t * (b - a) + +def grad(hash, x, y, z): + h = hash & 15 # CONVERT LO 4 BITS OF HASH CODE + u = x if h<8 else y # INTO 12 GRADIENT DIRECTIONS. + v = y if h<4 else (x if h in (12, 14) else z) + return (u if (h&1) == 0 else -u) + (v if (h&2) == 0 else -v) + + + + +def work(n, x): + res = [] + hint_commit_soon() + for y in range(n): + with atomic: + for z in range(n): + res.append(perlin_noise(x, y, z)) + hint_commit_soon() + return res + + +def run(threads=2, n=60): + threads = int(threads) + n = int(n) + + set_thread_pool(ThreadPool(threads)) + + res = [] + for x in range(n): + res.append(Future(work, 400, x)) + res = [f() for f in res] + + # shutdown current pool + set_thread_pool(None) + + +if __name__ == "__main__": + run() From noreply at buildbot.pypy.org Tue Jul 22 17:03:05 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 22 Jul 2014 17:03:05 +0200 (CEST) Subject: [pypy-commit] benchmarks default: optimise with atomic Message-ID: <20140722150305.6D2191D231E@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r272:85cc07891f49 Date: 2014-07-22 17:03 +0200 http://bitbucket.org/pypy/benchmarks/changeset/85cc07891f49/ Log: optimise with atomic diff --git a/multithread/perlin_noise/perlin_noise.py b/multithread/perlin_noise/perlin_noise.py --- a/multithread/perlin_noise/perlin_noise.py +++ b/multithread/perlin_noise/perlin_noise.py @@ -68,15 +68,15 @@ def work(n, x): res = [] hint_commit_soon() - for y in range(n): - with atomic: - for z in range(n): - res.append(perlin_noise(x, y, z)) + with atomic: + for y in xrange(n): + for z in xrange(n): + res.append(perlin_noise(x/3., y/3., z/3.)) hint_commit_soon() return res -def run(threads=2, n=60): +def run(threads=2, n=50): threads = int(threads) n = int(n) From noreply at buildbot.pypy.org Tue Jul 22 17:26:37 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 22 Jul 2014 17:26:37 +0200 (CEST) Subject: [pypy-commit] benchmarks default: finding mersenne primes. finally some unproblematic microbench Message-ID: <20140722152637.C376D1D24AF@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r273:fd2da4da8f33 Date: 2014-07-22 17:26 +0200 http://bitbucket.org/pypy/benchmarks/changeset/fd2da4da8f33/ Log: finding mersenne primes. finally some unproblematic microbench diff --git a/multithread/mersenne/mersenne.py b/multithread/mersenne/mersenne.py new file mode 100644 --- /dev/null +++ b/multithread/mersenne/mersenne.py @@ -0,0 +1,89 @@ +# -*- coding: utf-8 -*- + + + +import sys +import time, random +from common.abstract_threading import ( + atomic, Future, set_thread_pool, ThreadPool, + hint_commit_soon, print_abort_info) + +import itertools +from collections import deque + + +from sys import stdout +from math import sqrt, log + + +def chunks(iterable, size): + it = iter(iterable) + item = list(itertools.islice(it, size)) + while item: + yield item + item = list(itertools.islice(it, size)) + + +def is_prime ( p ): + if p == 2: return True # Lucas-Lehmer test only works on odd primes + elif p <= 1 or p % 2 == 0: return False + else: + for i in range(3, int(sqrt(p))+1, 2 ): + if p % i == 0: return False + return True + +def is_mersenne_prime ( p ): + if p == 2: + return True + else: + m_p = ( 1 << p ) - 1 + s = 4 + for i in range(3, p+1): + s = (s ** 2 - 2) % m_p + return s == 0 + + +def work(ps, counter, upb_count): + if counter[0] >= upb_count: + return + + for p in ps: + with atomic: + if is_prime(p) and is_mersenne_prime(p): + #print p + counter[0] += 1 + if counter[0] >= upb_count: + break + + +def run(threads=2, n=2000): + threads = int(threads) + n = int(n) + + set_thread_pool(ThreadPool(threads)) + + + precision = n # maximum requested number of decimal places of 2 ** MP-1 # + long_bits_width = precision * log(10, 2) + upb_prime = int( long_bits_width - 1 ) / 2 # no unsigned # + upb_count = 45 # find 45 mprimes if int was given enough bits # + + print " Finding Mersenne primes in M[2..%d]:" % upb_prime + + counter = [0] + fs = [] + for ps in chunks(xrange(2, upb_prime+1), 500): + fs.append(Future(work, ps, counter, upb_count)) + + [f() for f in fs] + print "found", counter[0] + + # shutdown current pool + set_thread_pool(None) + + return + + + +if __name__ == "__main__": + run() From noreply at buildbot.pypy.org Tue Jul 22 18:01:40 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 22 Jul 2014 18:01:40 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Tweaks Message-ID: <20140722160140.87A2A1D231E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5370:3190704f9cd2 Date: 2014-07-22 18:01 +0200 http://bitbucket.org/pypy/extradoc/changeset/3190704f9cd2/ Log: Tweaks diff --git a/talk/ep2014/stm/talk.html b/talk/ep2014/stm/talk.html --- a/talk/ep2014/stm/talk.html +++ b/talk/ep2014/stm/talk.html @@ -488,7 +488,7 @@

          Transactional Memory

            -
          • like GIL, but instead of locking, each thread runs optimistically
          • +
          • like GIL, but instead of blocking, each thread runs optimistically
          • "easy" to implement:
            • GIL acquire -> transaction start
            • GIL release -> transaction commit
            • @@ -507,7 +507,7 @@
              • application-level locks still needed...
              • but can be very coarse:
                  -
                • even two big transactions can hopefully run in parallel
                • +
                • even two big transactions can optimistically run in parallel
                • even if they both acquire and release the same lock
              • @@ -546,6 +546,7 @@
              • current status:
                • basics work
                • best case 25-40% overhead (much better than originally planned)
                • +
                • parallelizing user locks not done yet
                • tons of things to improve
                • tons of things to improve
                • tons of things to improve
                • @@ -563,18 +564,38 @@
                • counting primes
          +
          +

          Benefits

          +
            +
          • Keep locks coarse-grained
          • +
          • Potential to enable parallelism:
              +
            • in CPU-bound multithreaded programs
            • +
            • or as a replacement of multiprocessing
            • +
            • but also in existing applications not written for that
            • +
            • as long as they do multiple things that are "often independent"
            • +
            +
          • +
          +
          +
          +

          Issues

          +
            +
          • Performance hit: 25-40% everywhere (may be ok)
          • +
          • Keep locks coarse-grained:
              +
            • but in case of systematic conflicts, performance is bad again
            • +
            • need to track and fix them
            • +
            • need tool support (debugger/profiler)
            • +
            +
          • +
          +

          Summary

          • Transactional Memory is still too researchy for production
          • -
          • Potential to enable parallelism:
              -
            • as a replacement of multiprocessing
            • -
            • but also in existing applications not written for that
            • -
            • as long as they do multiple things that are "often independent"
            • -
            -
          • -
          • Keep locks coarse-grained:
              -
            • need to track and fix issues in case of systematic conflicts
            • +
            • But it has the potential to enable "easier parallelism"
            • +
            • Still alpha but slowly getting there!
            diff --git a/talk/ep2014/stm/talk.rst b/talk/ep2014/stm/talk.rst --- a/talk/ep2014/stm/talk.rst +++ b/talk/ep2014/stm/talk.rst @@ -122,7 +122,7 @@ Transactional Memory -------------------- -* like GIL, but instead of locking, each thread runs optimistically +* like GIL, but instead of blocking, each thread runs optimistically * "easy" to implement: @@ -145,7 +145,7 @@ * but *can be very coarse:* - - even two big transactions can hopefully run in parallel + - even two big transactions can optimistically run in parallel - even if they both *acquire and release the same lock* @@ -186,6 +186,7 @@ - basics work - best case 25-40% overhead (much better than originally planned) + - parallelizing user locks not done yet - tons of things to improve - tons of things to improve - tons of things to improve @@ -201,22 +202,46 @@ * counting primes +Benefits +-------- + +* Keep locks coarse-grained + +* Potential to enable parallelism: + + - in CPU-bound multithreaded programs + + - or as a replacement of ``multiprocessing`` + + - but also in existing applications not written for that + + - as long as they do multiple things that are "often independent" + + +Issues +------ + +* Performance hit: 25-40% everywhere (may be ok) + +* Keep locks coarse-grained: + + - but in case of systematic conflicts, performance is bad again + + - need to track and fix them + + - need tool support (debugger/profiler) + + Summary ------- * Transactional Memory is still too researchy for production -* Potential to enable parallelism: +* But it has the potential to enable "easier parallelism" - - as a replacement of ``multiprocessing`` +* Still alpha but slowly getting there! - - but also in existing applications not written for that - - - as long as they do multiple things that are "often independent" - -* Keep locks coarse-grained: - - - need to track and fix issues in case of systematic conflicts + - see http://morepypy.blogspot.com/ Part 2 - Under The Hood From noreply at buildbot.pypy.org Tue Jul 22 19:47:37 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Tue, 22 Jul 2014 19:47:37 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-display-refactoring: Deleted obsolete Exception class Message-ID: <20140722174737.D1B321C0748@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-display-refactoring Changeset: r927:3b12d3eb24dc Date: 2014-07-21 17:02 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/3b12d3eb24dc/ Log: Deleted obsolete Exception class diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -1467,8 +1467,3 @@ if isinstance(s_class, ClassShadow): return "%s >> #%s" % (s_class.getname(), self.lookup_selector) return "#%s" % self.lookup_selector - -class DetachingShadowError(Exception): - def __init__(self, old_shadow, new_shadow_class): - self.old_shadow = old_shadow - self.new_shadow_class = new_shadow_class From noreply at buildbot.pypy.org Tue Jul 22 19:47:39 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Tue, 22 Jul 2014 19:47:39 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-display-refactoring: Trying to get AssertionErrors to print their message. Message-ID: <20140722174739.0E6B31C0748@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-display-refactoring Changeset: r928:6a516e497789 Date: 2014-07-21 18:05 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/6a516e497789/ Log: Trying to get AssertionErrors to print their message. diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -69,7 +69,7 @@ except error.Exit, e: print_error("Exited: %s" % e.msg) return -1 - except Exception, e: + except BaseException, e: print_error("Exception: %s" % str(e)) if not objectmodel.we_are_translated(): import traceback From noreply at buildbot.pypy.org Tue Jul 22 19:47:40 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Tue, 22 Jul 2014 19:47:40 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-display-refactoring: Extracted Display classes from model into a seperate module model_display. Message-ID: <20140722174740.43FC51C0748@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-display-refactoring Changeset: r929:2dac2e226121 Date: 2014-07-21 18:08 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/2dac2e226121/ Log: Extracted Display classes from model into a seperate module model_display. diff --git a/spyvm/constants.py b/spyvm/constants.py --- a/spyvm/constants.py +++ b/spyvm/constants.py @@ -54,6 +54,11 @@ BLKCLSR_NUMARGS = 2 BLKCLSR_SIZE = 3 +FORM_BITS = 0 +FORM_WIDTH = 1 +FORM_HEIGHT = 2 +FORM_DEPTH = 3 + # ___________________________________________________________________________ # Miscellaneous constants diff --git a/spyvm/interpreter_proxy.py b/spyvm/interpreter_proxy.py --- a/spyvm/interpreter_proxy.py +++ b/spyvm/interpreter_proxy.py @@ -17,7 +17,7 @@ from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib.unroll import unrolling_iterable -from spyvm import error, model, objspace +from spyvm import error, model, model_display, objspace sqInt = rffi.INT sqLong = rffi.LONG @@ -242,7 +242,7 @@ return w_object.convert_to_c_layout() elif isinstance(w_object, model.W_BytesObject): return rffi.cast(sqIntArrayPtr, w_object.convert_to_c_layout()) - elif isinstance(w_object, model.W_DisplayBitmap): + elif isinstance(w_object, model_display.W_DisplayBitmap): return rffi.cast(sqIntArrayPtr, w_object.convert_to_c_layout()) else: raise ProxyFunctionFailed @@ -529,7 +529,7 @@ @expose_on_virtual_machine_proxy([], int) def fullDisplayUpdate(): w_display = IProxy.space.objtable['w_display'] - if isinstance(w_display, model.W_DisplayBitmap): + if isinstance(w_display, model_display.W_DisplayBitmap): w_display.update_from_buffer() w_display.flush_to_screen() return 0 @@ -559,16 +559,7 @@ # display memory space = IProxy.space if w_dest_form.is_same_object(space.objtable['w_display']): - w_bitmap = w_dest_form.fetch(space, 0) - if not isinstance(w_bitmap, model.W_DisplayBitmap): - assert isinstance(w_bitmap, model.W_WordsObject) - w_display_bitmap = w_bitmap.as_display_bitmap( - w_dest_form, - IProxy.interp, - sdldisplay=None - ) - else: - w_display_bitmap = w_bitmap + w_display_bitmap = model_display.get_display_bitmap(IProxy.interp, w_dest_form) w_display_bitmap.update_from_buffer() w_display_bitmap.flush_to_screen() return 0 diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -987,26 +987,6 @@ c_words[i] = intmask(old_words[i]) self.words = None return c_words - - def as_display_bitmap(self, w_form, interp, sdldisplay=None): - width = interp.space.unwrap_int(w_form.fetch(interp.space, 1)) - height = interp.space.unwrap_int(w_form.fetch(interp.space, 2)) - depth = interp.space.unwrap_int(w_form.fetch(interp.space, 3)) - if not sdldisplay: - from spyvm import display - sdldisplay = display.SDLDisplay(interp.image_name) - sdldisplay.set_video_mode(width, height, depth) - w_display_bitmap = W_DisplayBitmap.create( - interp.space, - self.getclass(interp.space), - self.size(), - depth, - sdldisplay - ) - for idx in range(self.size()): - w_display_bitmap.setword(idx, self.getword(idx)) - w_form.store(interp.space, 0, w_display_bitmap) - return w_display_bitmap def _become(self, w_other): assert isinstance(w_other, W_WordsObject) @@ -1019,144 +999,6 @@ if self.words is None: lltype.free(self.c_words, flavor='raw') -class W_DisplayBitmap(W_AbstractObjectWithClassReference): - _attrs_ = ['pixelbuffer', '_realsize', '_real_depth_buffer', 'display', '_depth'] - _immutable_fields_ = ['_realsize', 'display', '_depth'] - repr_classname = "W_DisplayBitmap" - - pixelbuffer = None - - @staticmethod - def create(space, w_class, size, depth, display): - if depth < 8: - return W_MappingDisplayBitmap(space, w_class, size * (8 / depth), depth, display) - elif depth == 8: - return W_8BitDisplayBitmap(space, w_class, size, depth, display) - elif depth == 16: - return W_16BitDisplayBitmap(space, w_class, size, depth, display) - else: - return W_DisplayBitmap(space, w_class, size, depth, display) - - def repr_content(self): - return "len=%d depth=%d %s" % (self.size(), self._depth, self.str_content()) - - def __init__(self, space, w_class, size, depth, display): - W_AbstractObjectWithClassReference.__init__(self, space, w_class) - self._real_depth_buffer = lltype.malloc(rffi.CArray(rffi.UINT), size, flavor='raw') - self._realsize = size - self.display = display - self._depth = depth - - def at0(self, space, index0): - val = self.getword(index0) - return space.wrap_uint(val) - - def atput0(self, space, index0, w_value): - word = space.unwrap_uint(w_value) - self.setword(index0, word) - - def flush_to_screen(self): - self.display.flip() - - def size(self): - return self._realsize - - def invariant(self): - return False - - def clone(self, space): - w_result = W_WordsObject(space, self.getclass(space), self._realsize) - n = 0 - while n < self._realsize: - w_result.words[n] = self.getword(n) - n += 1 - return w_result - - def getword(self, n): - assert self.size() > n >= 0 - return self._real_depth_buffer[n] - - def setword(self, n, word): - self._real_depth_buffer[n] = word - self.display.get_pixelbuffer()[n] = word - - def is_array_object(self): - return True - - def update_from_buffer(self): - for i in range(self._realsize): - self.setword(i, self.getword(i)) - - def convert_to_c_layout(self): - return self._real_depth_buffer - - def can_become(self, w_other): - # TODO - implement _become() for this class. Impossible due to _immutable_fields_? - return False - - def __del__(self): - lltype.free(self._real_depth_buffer, flavor='raw') - - -class W_16BitDisplayBitmap(W_DisplayBitmap): - repr_classname = "W_16BitDisplayBitmap" - def setword(self, n, word): - self._real_depth_buffer[n] = word - mask = 0b11111 - lsb = (r_uint(word) & r_uint(0xffff0000)) >> 16 - msb = (r_uint(word) & r_uint(0x0000ffff)) - - lsb = ( - ((lsb >> 10) & mask) | - (((lsb >> 5) & mask) << 6) | - ((lsb & mask) << 11) - ) - msb = ( - ((msb >> 10) & mask) | - (((msb >> 5) & mask) << 6) | - ((msb & mask) << 11) - ) - - self.display.get_pixelbuffer()[n] = r_uint(lsb | (msb << 16)) - - -class W_8BitDisplayBitmap(W_DisplayBitmap): - repr_classname = "W_8BitDisplayBitmap" - def setword(self, n, word): - self._real_depth_buffer[n] = word - self.display.get_pixelbuffer()[n] = r_uint( - (word >> 24) | - ((word >> 8) & 0x0000ff00) | - ((word << 8) & 0x00ff0000) | - (word << 24) - ) - -NATIVE_DEPTH = 8 -class W_MappingDisplayBitmap(W_DisplayBitmap): - repr_classname = "W_MappingDisplayBitmap" - @jit.unroll_safe - def setword(self, n, word): - self._real_depth_buffer[n] = word - word = r_uint(word) - pos = self.compute_pos(n) - assert self._depth <= 4 - rshift = 32 - self._depth - for i in xrange(8 / self._depth): - if pos >= self.size(): - return - mapword = r_uint(0) - for i in xrange(4): - pixel = r_uint(word) >> rshift - mapword |= (r_uint(pixel) << (i * 8)) - word <<= self._depth - self.display.get_pixelbuffer()[pos] = mapword - pos += 1 - - def compute_pos(self, n): - return n * (NATIVE_DEPTH / self._depth) - -# XXX Shouldn't compiledmethod have class reference for subclassed compiled -# methods? class W_CompiledMethod(W_AbstractObjectWithIdentityHash): """My instances are methods suitable for interpretation by the virtual machine. This is the only class in the system whose instances intermix both indexable pointer fields and indexable integer fields. diff --git a/spyvm/objspace.py b/spyvm/objspace.py --- a/spyvm/objspace.py +++ b/spyvm/objspace.py @@ -1,6 +1,6 @@ import os -from spyvm import constants, model, shadow, wrapper, version +from spyvm import constants, model, model_display, shadow, wrapper, version from spyvm.error import UnwrappingError, WrappingError, PrimitiveFailedError from rpython.rlib import jit, rpath from rpython.rlib.objectmodel import instantiate, specialize @@ -237,7 +237,7 @@ w_display = self.objtable['w_display'] if w_display: w_bitmap = w_display.fetch(self, 0) - if isinstance(w_bitmap, model.W_DisplayBitmap): + if isinstance(w_bitmap, model_display.W_DisplayBitmap): return w_bitmap.display raise PrimitiveFailedError("No display") diff --git a/spyvm/plugins/bitblt.py b/spyvm/plugins/bitblt.py --- a/spyvm/plugins/bitblt.py +++ b/spyvm/plugins/bitblt.py @@ -1,4 +1,4 @@ -from spyvm import model +from spyvm import model_display, model from spyvm.error import PrimitiveFailedError from spyvm.shadow import AbstractCachingShadow from spyvm.plugins.plugin import Plugin @@ -32,7 +32,7 @@ s_frame.push(interp.space.wrap_int(s_bitblt.bitCount)) elif w_dest_form.is_same_object(space.objtable['w_display']): w_bitmap = w_dest_form.fetch(space, 0) - assert isinstance(w_bitmap, model.W_DisplayBitmap) + assert isinstance(w_bitmap, model_display.W_DisplayBitmap) w_bitmap.flush_to_screen() return w_rcvr @@ -741,7 +741,7 @@ self.w_bits = self.fetch(0) if self.w_bits.is_nil(self.space): return - if not (isinstance(self.w_bits, model.W_WordsObject) or isinstance(self.w_bits, model.W_DisplayBitmap)): + if not (isinstance(self.w_bits, model.W_WordsObject) or isinstance(self.w_bits, model_display.W_DisplayBitmap)): return self.width = self.intOrIfNil(self.fetch(1), 0) self.height = self.intOrIfNil(self.fetch(2), 0) diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -2,7 +2,7 @@ import inspect import math import operator -from spyvm import model, shadow, error, constants, display +from spyvm import model, model_display, shadow, error, constants, display from spyvm.error import PrimitiveFailedError, PrimitiveNotYetWrittenError from spyvm import wrapper @@ -676,7 +676,7 @@ w_display = interp.space.objtable['w_display'] if w_dest_form.is_same_object(w_display): w_bitmap = w_display.fetch(interp.space, 0) - assert isinstance(w_bitmap, model.W_DisplayBitmap) + assert isinstance(w_bitmap, model_display.W_DisplayBitmap) w_bitmap.flush_to_screen() return w_rcvr except shadow.MethodNotFound: @@ -748,23 +748,19 @@ w_prev_display = interp.space.objtable['w_display'] if w_prev_display: w_prev_bitmap = w_prev_display.fetch(interp.space, 0) - if isinstance(w_prev_bitmap, model.W_DisplayBitmap): + if isinstance(w_prev_bitmap, model_display.W_DisplayBitmap): sdldisplay = w_prev_bitmap.display sdldisplay.set_video_mode(width, height, depth) - if isinstance(w_bitmap, model.W_DisplayBitmap): + if isinstance(w_bitmap, model_display.W_DisplayBitmap): assert (sdldisplay is None) or (sdldisplay is w_bitmap.display) sdldisplay = w_bitmap.display sdldisplay.set_video_mode(width, height, depth) w_display_bitmap = w_bitmap else: assert isinstance(w_bitmap, model.W_WordsObject) - w_display_bitmap = w_bitmap.as_display_bitmap( - w_rcvr, - interp, - sdldisplay=sdldisplay - ) - + w_display_bitmap = model_display.get_display_bitmap(interp, w_rcvr, sdldisplay=sdldisplay) + w_display_bitmap.flush_to_screen() if interp.image: interp.image.lastWindowSize = (width << 16) + height @@ -1142,7 +1138,7 @@ raise PrimitiveFailedError for i in xrange(w_arg.size()): w_arg.setchar(i, chr(new_value)) - elif isinstance(w_arg, model.W_WordsObject) or isinstance(w_arg, model.W_DisplayBitmap): + elif isinstance(w_arg, model.W_WordsObject) or isinstance(w_arg, model_display.W_DisplayBitmap): for i in xrange(w_arg.size()): w_arg.setword(i, new_value) else: diff --git a/spyvm/test/test_model.py b/spyvm/test/test_model.py --- a/spyvm/test/test_model.py +++ b/spyvm/test/test_model.py @@ -1,5 +1,5 @@ import py, math, socket -from spyvm import model, shadow, objspace, error, display +from spyvm import model, model_display, shadow, objspace, error, display from spyvm.shadow import MethodNotFound, WEAK_POINTERS from rpython.rlib.rarithmetic import intmask, r_uint from rpython.rtyper.lltypesystem import lltype, rffi @@ -387,7 +387,7 @@ d = display.SDLDisplay("test") d.set_video_mode(32, 10, 1) - target = model.W_DisplayBitmap.create(space, space.w_Array, 10, 1, d) + target = model_display.W_DisplayBitmap.create(space, space.w_Array, 10, 1, d) target.setword(0, r_uint(0xFF00)) assert bin(target.getword(0)) == bin(0xFF00) target.setword(0, r_uint(0x00FF00FF)) @@ -411,7 +411,7 @@ d = display.SDLDisplay("test") d.set_video_mode(18, 5, 1) - dbitmap = model.W_DisplayBitmap.create(space, space.w_Array, 5, 1, d) + dbitmap = model_display.W_DisplayBitmap.create(space, space.w_Array, 5, 1, d) assert dbitmap.compute_pos(0) == 0 assert dbitmap.compute_pos(1) == 8 diff --git a/spyvm/test/test_primitives.py b/spyvm/test/test_primitives.py --- a/spyvm/test/test_primitives.py +++ b/spyvm/test/test_primitives.py @@ -1,5 +1,5 @@ import py, os, math, time -from spyvm import model, shadow, interpreter, constants, primitives, objspace, wrapper, display +from spyvm import model, model_display, shadow, interpreter, constants, primitives, objspace, wrapper, display from spyvm.primitives import prim_table, PrimitiveFailedError from spyvm.plugins import bitblt from rpython.rlib.rfloat import INFINITY, NAN, isinf, isnan @@ -721,7 +721,7 @@ assert space.objtable["w_display"] is mock_display w_bitmap = mock_display.fetch(space, 0) assert w_bitmap is not w_wordbmp - assert isinstance(w_bitmap, model.W_DisplayBitmap) + assert isinstance(w_bitmap, model_display.W_DisplayBitmap) sdldisplay = w_bitmap.display assert isinstance(sdldisplay, display.SDLDisplay) @@ -733,7 +733,7 @@ prim(primitives.BE_DISPLAY, [mock_display2]) assert space.objtable["w_display"] is mock_display2 w_bitmap2 = mock_display.fetch(space, 0) - assert isinstance(w_bitmap2, model.W_DisplayBitmap) + assert isinstance(w_bitmap2, model_display.W_DisplayBitmap) assert w_bitmap.display is w_bitmap2.display assert sdldisplay.width == 32 assert sdldisplay.height == 10 From noreply at buildbot.pypy.org Tue Jul 22 19:47:41 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Tue, 22 Jul 2014 19:47:41 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-display-refactoring: Added --hacks parameter to enable run_spy_hacks method. Message-ID: <20140722174741.607261C0748@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-display-refactoring Changeset: r930:b5dbc52fd7e3 Date: 2014-07-21 18:35 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/b5dbc52fd7e3/ Log: Added --hacks parameter to enable run_spy_hacks method. diff --git a/spyvm/objspace.py b/spyvm/objspace.py --- a/spyvm/objspace.py +++ b/spyvm/objspace.py @@ -33,6 +33,7 @@ self.no_specialized_storage = ConstantFlag() # This is a hack; see compile_code() in targetimageloadingsmalltalk.py self.suppress_process_switch = ConstantFlag() + self.run_spy_hacks = ConstantFlag() self.headless = ConstantFlag() self.classtable = {} diff --git a/spyvm/squeakimage.py b/spyvm/squeakimage.py --- a/spyvm/squeakimage.py +++ b/spyvm/squeakimage.py @@ -386,12 +386,13 @@ self.startup_time = time.time() def run_spy_hacks(self, space): - pass - # w_display = space.objtable["w_display"] - # if w_display is not None and not w_display.is_nil(space): - # if space.unwrap_int(w_display.fetch(space, 3)) < 8: - # # non-native indexed color depth not well supported - # w_display.store(space, 3, space.wrap_int(8)) + if not space.run_spy_hacks.is_set(): + return + w_display = space.objtable["w_display"] + if w_display is not None and not w_display.is_nil(space): + if space.unwrap_int(w_display.fetch(space, 3)) < 8: + # non-native indexed color depth not well supported + w_display.store(space, 3, space.wrap_int(8)) def find_symbol(self, space, reader, symbol): w_dnu = self.special(constants.SO_DOES_NOT_UNDERSTAND) diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -36,6 +36,7 @@ -p|--poll - Actively poll for events. Try this if the image is not responding well. -i|--no-interrupts - Disable timer interrupt. Disables non-cooperative scheduling. -S - Disable specialized storage strategies; always use generic ListStorage + --hacks - Enable Spy hacks. Set display color depth to 8. Logging parameters: -t|--trace - Output a trace of each message, primitive, return value and process switch. @@ -119,6 +120,8 @@ interrupts = False elif arg in ["-P", "--process"]: headless = False + elif arg in ["--hacks"]: + space.run_spy_hacks.set() elif arg in ["-S"]: space.no_specialized_storage.set() elif arg in ["-u"]: From noreply at buildbot.pypy.org Tue Jul 22 19:47:42 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Tue, 22 Jul 2014 19:47:42 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-display-refactoring: Clean error message on EXIT_TO_DEBUGGER primitive. Message-ID: <20140722174742.7C34E1C0748@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-display-refactoring Changeset: r931:ef90a282ad1f Date: 2014-07-21 18:43 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/ef90a282ad1f/ Log: Clean error message on EXIT_TO_DEBUGGER primitive. diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -860,8 +860,8 @@ @expose_primitive(EXIT_TO_DEBUGGER, unwrap_spec=[object]) def func(interp, s_frame, w_rcvr): - if not objectmodel.we_are_translated(): - import pdb; pdb.set_trace() + if interp.space.headless.is_set(): + exitFromHeadlessExecution(s_frame, "EXIT_TO_DEBUGGER") raise PrimitiveNotYetWrittenError() @expose_primitive(CHANGE_CLASS, unwrap_spec=[object, object], no_result=True) From noreply at buildbot.pypy.org Tue Jul 22 19:47:43 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Tue, 22 Jul 2014 19:47:43 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-display-refactoring: Added FormWrapper to clean up display handling code. Message-ID: <20140722174743.9FBA81C0748@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-display-refactoring Changeset: r932:f151c3506b41 Date: 2014-07-21 19:16 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/f151c3506b41/ Log: Added FormWrapper to clean up display handling code. diff --git a/spyvm/interpreter_proxy.py b/spyvm/interpreter_proxy.py --- a/spyvm/interpreter_proxy.py +++ b/spyvm/interpreter_proxy.py @@ -17,7 +17,7 @@ from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib.unroll import unrolling_iterable -from spyvm import error, model, model_display, objspace +from spyvm import error, model, model_display, objspace, wrapper sqInt = rffi.INT sqLong = rffi.LONG @@ -559,7 +559,8 @@ # display memory space = IProxy.space if w_dest_form.is_same_object(space.objtable['w_display']): - w_display_bitmap = model_display.get_display_bitmap(IProxy.interp, w_dest_form) + form = wrapper.FormWrapper(space, w_dest_form) + w_display_bitmap = form.get_display_bitmap(IProxy.interp) w_display_bitmap.update_from_buffer() w_display_bitmap.flush_to_screen() return 0 diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -734,14 +734,13 @@ if not isinstance(w_rcvr, model.W_PointersObject) or w_rcvr.size() < 4: raise PrimitiveFailedError - # the fields required are bits (a pointer to a Bitmap), width, height, depth - - # XXX: TODO get the initial image TODO: figure out whether we - # should decide the width an report it in the other SCREEN_SIZE - w_bitmap = w_rcvr.fetch(interp.space, 0) - width = interp.space.unwrap_int(w_rcvr.fetch(interp.space, 1)) - height = interp.space.unwrap_int(w_rcvr.fetch(interp.space, 2)) - depth = interp.space.unwrap_int(w_rcvr.fetch(interp.space, 3)) + + # TODO: figure out whether we should decide the width an report it in the SCREEN_SIZE primitive + form = wrapper.FormWrapper(interp.space, w_rcvr) + w_bitmap = form.bits() + width = form.width() + height = form.height() + depth = form.depth() sdldisplay = None @@ -751,7 +750,7 @@ if isinstance(w_prev_bitmap, model_display.W_DisplayBitmap): sdldisplay = w_prev_bitmap.display sdldisplay.set_video_mode(width, height, depth) - + if isinstance(w_bitmap, model_display.W_DisplayBitmap): assert (sdldisplay is None) or (sdldisplay is w_bitmap.display) sdldisplay = w_bitmap.display @@ -759,7 +758,7 @@ w_display_bitmap = w_bitmap else: assert isinstance(w_bitmap, model.W_WordsObject) - w_display_bitmap = model_display.get_display_bitmap(interp, w_rcvr, sdldisplay=sdldisplay) + w_display_bitmap = form.get_display_bitmap(interp, sdldisplay) w_display_bitmap.flush_to_screen() if interp.image: diff --git a/spyvm/wrapper.py b/spyvm/wrapper.py --- a/spyvm/wrapper.py +++ b/spyvm/wrapper.py @@ -1,4 +1,4 @@ -from spyvm import model, constants +from spyvm import model, model_display, constants from spyvm.error import FatalError, WrapperException, PrimitiveFailedError class Wrapper(object): @@ -263,6 +263,21 @@ def size(self): return self._w_self.size() - constants.BLKCLSR_SIZE +class FormWrapper(Wrapper): + bits, store_bits = make_getter_setter(constants.FORM_BITS) + width, store_width = make_int_getter_setter(constants.FORM_WIDTH) + height, store_height = make_int_getter_setter(constants.FORM_HEIGHT) + depth, store_depth = make_int_getter_setter(constants.FORM_DEPTH) + + def get_display_bitmap(self, interp, sdldisplay=None): + w_bitmap = self.bits() + if not isinstance(w_bitmap, model_display.W_DisplayBitmap): + w_display_bitmap = model_display.from_words_object(interp, w_bitmap, self, sdldisplay) + self.store_bits(w_display_bitmap) + else: + w_display_bitmap = w_bitmap + return w_display_bitmap + # XXX Wrappers below are not used yet. class OffsetWrapper(Wrapper): offset_x = make_int_getter(0) From noreply at buildbot.pypy.org Tue Jul 22 19:47:44 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Tue, 22 Jul 2014 19:47:44 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-display-refactoring: Made image_name available from object space. Message-ID: <20140722174744.CFCAF1C0748@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-display-refactoring Changeset: r933:97b7b7828d10 Date: 2014-07-21 19:19 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/97b7b7828d10/ Log: Made image_name available from object space. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -23,7 +23,7 @@ class Interpreter(object): - _immutable_fields_ = ["space", "image", "image_name", + _immutable_fields_ = ["space", "image", "interrupt_counter_size", "startup_time", "evented", "interrupts"] @@ -34,12 +34,11 @@ get_printable_location=get_printable_location ) - def __init__(self, space, image=None, image_name="", + def __init__(self, space, image=None, trace=False, evented=True, interrupts=True): # === Initialize immutable variables self.space = space self.image = image - self.image_name = image_name if image: self.startup_time = image.startup_time else: @@ -1009,9 +1008,8 @@ # in order to enable tracing/jumping for message sends etc. def debugging(): def stepping_debugger_init(original): - def meth(self, space, image=None, image_name="", trace=False): - return_value = original(self, space, image=image, - image_name=image_name, trace=trace) + def meth(self, space, image=None, trace=False): + return_value = original(self, space, image=image, trace=trace) # ############################################################## self.message_stepping = False diff --git a/spyvm/objspace.py b/spyvm/objspace.py --- a/spyvm/objspace.py +++ b/spyvm/objspace.py @@ -14,8 +14,7 @@ self.flag = [set_initially] def is_set(self): - flag = jit.promote(self.flag[0]) - return flag + return jit.promote(self.flag[0]) def set(self): self.flag[0] = True @@ -26,6 +25,16 @@ def set_to(self, flag): self.flag[0] = flag +class ConstantString(object): + def __init__(self): + self.value = [""] + + def get(self): + return jit.promote(self.value[0]) + + def set(self, value): + self.value[0] = value + class ObjSpace(object): def __init__(self): # If this flag is set, then no optimizing storage strategies will be used. @@ -38,8 +47,8 @@ self.classtable = {} self.objtable = {} - self._executable_path = [""] # XXX: we cannot set the attribute - # directly on the frozen objectspace + self._executable_path = ConstantString() + self._image_name = ConstantString() # Create the nil object. # Circumvent the constructor because nil is already referenced there. @@ -62,11 +71,12 @@ break return rpath.rabspath(executable) - def runtime_setup(self, executable): + def runtime_setup(self, executable, image_name): fullpath = rpath.rabspath(self.find_executable(executable)) i = fullpath.rfind(os.path.sep) + 1 assert i > 0 - self._executable_path[0] = fullpath[:i] + self._executable_path.set(fullpath[:i]) + self._image_name.set(image_name) def populate_special_objects(self, specials): for name, idx in constants.objects_in_special_object_table.items(): @@ -77,7 +87,10 @@ self.classtable["w_Metaclass"] = self.w_SmallInteger.w_class.w_class def executable_path(self): - return self._executable_path[0] + return self._executable_path.get() + + def image_name(self): + return self._image_name.get() def add_bootstrap_class(self, name, cls): self.classtable[name] = cls diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -982,7 +982,7 @@ def func(interp, s_frame, argument_count): if argument_count == 0: s_frame.pop() - return interp.space.wrap_string(interp.image_name) + return interp.space.wrap_string(interp.space.image_name()) elif argument_count == 1: pass # XXX raise PrimitiveFailedError diff --git a/spyvm/test/test_primitives.py b/spyvm/test/test_primitives.py --- a/spyvm/test/test_primitives.py +++ b/spyvm/test/test_primitives.py @@ -47,7 +47,8 @@ frame = context for i in range(len(stack)): frame.as_context_get_shadow(space).push(stack[i]) - interp = TestInterpreter(space, image_name=IMAGENAME) + interp = TestInterpreter(space) + interp.space._image_name.set(IMAGENAME) return interp, frame, len(stack) def _prim(space, code, stack, context = None): @@ -680,7 +681,7 @@ closure = space.newClosure(w_frame, 4, 0, []) s_frame = w_frame.as_methodcontext_get_shadow(space) - interp = TestInterpreter(space, image_name=IMAGENAME) + interp = TestInterpreter(space) interp._loop = True try: diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -161,10 +161,10 @@ # Load & prepare image and environment image_reader = squeakimage.reader_for_image(space, squeakimage.Stream(data=imagedata)) image = create_image(space, image_reader) - interp = interpreter.Interpreter(space, image, image_name=path, + interp = interpreter.Interpreter(space, image, trace=trace, evented=not poll, interrupts=interrupts) - space.runtime_setup(argv[0]) + space.runtime_setup(argv[0], path) print_error("") # Line break after image-loading characters # Create context to be executed From noreply at buildbot.pypy.org Tue Jul 22 19:47:47 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Tue, 22 Jul 2014 19:47:47 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-display-refactoring: Refactoring. Message-ID: <20140722174747.05A001C0748@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-display-refactoring Changeset: r934:2d4fa0b3f3b7 Date: 2014-07-22 15:51 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/2d4fa0b3f3b7/ Log: Refactoring. The SDLDisplay instance is globally accessible in the space. Cleaned up the process for a form to become the display. Added model_display.py (forgotten in previous commit). Fixed MappingDisplay: handling the case that width is not dividable by 32. diff --git a/images/Squeak4.5-noBitBlt.changes b/images/Squeak4.5-noBitBlt.changes --- a/images/Squeak4.5-noBitBlt.changes +++ b/images/Squeak4.5-noBitBlt.changes @@ -12620,4 +12620,8 @@ 1 to: self splayTreeSize do: [:i | self insertNewNode. - ]! ! ----SNAPSHOT----{15 July 2014 . 6:10:56 pm} Squeak4.5-noBitBlt.image priorSource: 15894330! \ No newline at end of file + ]! ! ----SNAPSHOT----{15 July 2014 . 6:10:56 pm} Squeak4.5-noBitBlt.image priorSource: 15894330! + +----QUIT/NOSAVE----{21 July 2014 . 7:09:22 pm} Squeak4.5-noBitBlt.image priorSource: 15894825! + +----QUIT----{21 July 2014 . 7:10:12 pm} Squeak4.5-noBitBlt.image priorSource: 15894825! ----STARTUP----{22 July 2014 . 10:55:07 am} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! Smalltalk specialObjectsArray at:15! (Smalltalk specialObjectsArray at:15) depth! \ No newline at end of file diff --git a/spyvm/display.py b/spyvm/display.py --- a/spyvm/display.py +++ b/spyvm/display.py @@ -35,12 +35,13 @@ WindowEventPaint = 5 WindowEventStinks = 6 +MINIMUM_DEPTH = 8 class SDLDisplay(object): _attrs_ = ["screen", "width", "height", "depth", "surface", "has_surface", "mouse_position", "button", "key", "interrupt_key", "_defer_updates", - "_deferred_event", "pixelbuffer"] - _immutable_fields_ = ["pixelbuffer?"] + "_deferred_event", "bpp", "pitch"] + #_immutable_fields_ = ["pixelbuffer?"] def __init__(self, title): assert RSDL.Init(RSDL.INIT_VIDEO) >= 0 @@ -58,26 +59,31 @@ def set_video_mode(self, w, h, d): assert w > 0 and h > 0 assert d in [1, 2, 4, 8, 16, 32] + if d < MINIMUM_DEPTH: + d = MINIMUM_DEPTH self.width = w self.height = h self.depth = d flags = RSDL.HWPALETTE | RSDL.RESIZABLE | RSDL.ASYNCBLIT | RSDL.DOUBLEBUF - if d < 8: - d = 8 self.screen = RSDL.SetVideoMode(w, h, d, flags) if not self.screen: print "Could not open display at depth %d" % d raise RuntimeError - elif d == 8: + elif d == MINIMUM_DEPTH: self.set_squeak_colormap(self.screen) - self.pixelbuffer = rffi.cast(rffi.UINTP, self.screen.c_pixels) - - def get_pixelbuffer(self): - return jit.promote(self.pixelbuffer) - + self.bpp = rffi.getintfield(self.screen.c_format, 'c_BytesPerPixel') + self.pitch = rffi.getintfield(self.screen, 'c_pitch') + + def get_pixelbuffer_UCHAR(self): + # return jit.promote(rffi.cast(RSDL.Uint8P, self.screen.c_pixels)) + return jit.promote(self.screen.c_pixels) + + def get_pixelbuffer_UINT(self): + return jit.promote(rffi.cast(RSDL.Uint32P, self.screen.c_pixels)) + def defer_updates(self, flag): self._defer_updates = flag - + def flip(self, force=False): if (not self._defer_updates) or force: RSDL.Flip(self.screen) diff --git a/spyvm/interpreter_proxy.py b/spyvm/interpreter_proxy.py --- a/spyvm/interpreter_proxy.py +++ b/spyvm/interpreter_proxy.py @@ -560,7 +560,7 @@ space = IProxy.space if w_dest_form.is_same_object(space.objtable['w_display']): form = wrapper.FormWrapper(space, w_dest_form) - w_display_bitmap = form.get_display_bitmap(IProxy.interp) + w_display_bitmap = form.get_display_bitmap() w_display_bitmap.update_from_buffer() w_display_bitmap.flush_to_screen() return 0 @@ -1000,7 +1000,7 @@ self.argcount = 0 self.w_method = None self.fail_reason = 0 - self.trace_proxy.unset() + self.trace_proxy.deactivate() def call(self, signature, interp, s_frame, argcount, w_method): self.initialize_from_call(signature, interp, s_frame, argcount, w_method) @@ -1042,7 +1042,7 @@ self.argcount = argcount self.w_method = w_method self.space = interp.space - self.trace_proxy.set_to(interp.trace_proxy.is_set()) + self.trace_proxy.set(interp.trace_proxy.is_set()) # ensure that space.w_nil gets the first possible oop self.object_to_oop(self.space.w_nil) diff --git a/spyvm/model_display.py b/spyvm/model_display.py new file mode 100644 --- /dev/null +++ b/spyvm/model_display.py @@ -0,0 +1,203 @@ + +from spyvm import model, constants, display +from rpython.rlib import jit +from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rlib.rarithmetic import r_uint + +def from_words_object(w_obj, form): + depth = form.depth() + space = form.space + size = w_obj.size() + w_class = w_obj.getclass(space) + + if depth < 8: + w_display_bitmap = W_MappingDisplayBitmap(space, w_class, size, depth, form.width()) + elif depth == 8: + w_display_bitmap = W_8BitDisplayBitmap(space, w_class, size, depth) + elif depth == 16: + w_display_bitmap = W_16BitDisplayBitmap(space, w_class, size, depth) + else: + w_display_bitmap = W_DisplayBitmap(space, w_class, size, depth) + + for idx in range(size): + w_display_bitmap.setword(idx, w_obj.getword(idx)) + + return w_display_bitmap + +invert_byte_order = [False] + +def invert(): + inv = invert_byte_order[0] + return jit.promote(inv) + +class W_DisplayBitmap(model.W_AbstractObjectWithClassReference): + _attrs_ = ['pixelbuffer_words', '_real_depth_buffer', '_realsize', 'display', '_depth'] + _immutable_fields_ = ['pixelbuffer_words?', '_real_depth_buffer', '_realsize', 'display', '_depth'] + repr_classname = "W_DisplayBitmap" + + def __init__(self, space, w_class, size, depth): + model.W_AbstractObjectWithClassReference.__init__(self, space, w_class) + self._real_depth_buffer = lltype.malloc(rffi.CArray(rffi.UINT), size, flavor='raw') + self._realsize = size + self._depth = depth + self.display = space.display() + self.relinquish_display() + + # === Object access + + def at0(self, space, index0): + val = self.getword(index0) + return space.wrap_uint(val) + + def atput0(self, space, index0, w_value): + word = space.unwrap_uint(w_value) + self.setword(index0, word) + + def getword(self, n): + assert self.size() > n >= 0 + return self._real_depth_buffer[n] + + def setword(self, n, word): + self._real_depth_buffer[n] = word + if self.pixelbuffer_words > 0: + self.set_pixelbuffer_word(n, word) + + def size(self): + return self._realsize + + # === Graphics + + def pixelbuffer_UINT(self): + return self.display.get_pixelbuffer_UINT() + + def pixelbuffer_UCHAR(self): + return self.display.get_pixelbuffer_UCHAR() + + def set_pixelbuffer_word(self, n, word): + self.pixelbuffer_UINT()[n] = word + + def take_over_display(self): + # Make sure FrameWrapper.take_over_display() is called first for the correct Frame object. + pixel_per_word = constants.BYTES_PER_WORD / (self.display.depth / 8) + self.pixelbuffer_words = self.display.width * self.display.height / pixel_per_word + self.update_from_buffer() + + def relinquish_display(self): + self.pixelbuffer_words = 0 + + def flush_to_screen(self): + self.display.flip() + + def update_from_buffer(self): + if self.pixelbuffer_words > 0: + for i in range(self.size()): + self.set_pixelbuffer_word(i, self.getword(i)) + + # === Misc + + def invariant(self): + return False + + def clone(self, space): + w_result = model.W_WordsObject(space, self.getclass(space), self.size()) + for n in range(self.size()): + w_result.setword(n, self.getword(n)) + return w_result + + def is_array_object(self): + return True + + def convert_to_c_layout(self): + return self._real_depth_buffer + + def can_become(self, w_other): + # TODO - implement _become() for this class. Impossible due to _immutable_fields_? + return False + + def __del__(self): + lltype.free(self._real_depth_buffer, flavor='raw') + + def repr_content(self): + return "len=%d depth=%d %s" % (self.size(), self._depth, self.str_content()) + +class W_16BitDisplayBitmap(W_DisplayBitmap): + + repr_classname = "W_16BitDisplayBitmap" + + def set_pixelbuffer_word(self, n, word): + mask = 0b11111 + lsb = (r_uint(word) & r_uint(0xffff0000)) >> 16 + msb = (r_uint(word) & r_uint(0x0000ffff)) + + # Invert order of rgb-components + lsb = ( + ((lsb >> 10) & mask) | + (((lsb >> 5) & mask) << 6) | + ((lsb & mask) << 11) + ) + msb = ( + ((msb >> 10) & mask) | + (((msb >> 5) & mask) << 6) | + ((msb & mask) << 11) + ) + + self.pixelbuffer_UINT()[n] = r_uint(lsb | (msb << 16)) + +class W_8BitDisplayBitmap(W_DisplayBitmap): + + repr_classname = "W_8BitDisplayBitmap" + + def set_pixelbuffer_word(self, n, word): + if invert(): + # Invert the byte-order. + self.pixelbuffer_UINT()[n] = r_uint( + (word >> 24) | + ((word >> 8) & 0x0000ff00) | + ((word << 8) & 0x00ff0000) | + (word << 24) + ) + else: + self.pixelbuffer_UINT()[n] = r_uint(word) + +class W_MappingDisplayBitmap(W_DisplayBitmap): + + repr_classname = "W_MappingDisplayBitmap" + _attrs_ = ['mapping_factor', 'words_per_line', 'bits_in_last_word', 'width'] + _immutable_fields_ = ['mapping_factor', 'words_per_line', 'bits_in_last_word', 'width'] + + pixel_per_word = constants.BYTES_PER_WORD + + def __init__(self, space, w_class, size, depth, width): + assert depth in [1, 2, 4] + width = r_uint(width) + self.width = width + self.mapping_factor = display.MINIMUM_DEPTH / depth + self.words_per_line = r_uint(width / 32 + 1) + self.bits_in_last_word = width % 32 + W_DisplayBitmap.__init__(self, space, w_class, size, depth) + + @jit.unroll_safe + def set_pixelbuffer_word(self, n, word): + n = r_uint(n) + word = r_uint(word) + pos = self.compute_pos(n) + buf = self.display.screen.c_pixels + + if (n+1) % self.words_per_line == 0: + # This is the last word on the line. A few bits are cut off. + bits = self.bits_in_last_word + else: + bits = 32 + + depth = r_uint(self._depth) + rshift = 32 - depth + for i in range(r_uint(bits) / depth): + pixel = word >> rshift + buf[pos] = rffi.cast(rffi.UCHAR, pixel) + word <<= self._depth + pos += 1 + + def compute_pos(self, n): + word_on_line = n % self.words_per_line + complete_lines = r_uint((n - word_on_line) / self.words_per_line) + return complete_lines * self.width + 32*word_on_line diff --git a/spyvm/objspace.py b/spyvm/objspace.py --- a/spyvm/objspace.py +++ b/spyvm/objspace.py @@ -1,39 +1,47 @@ import os -from spyvm import constants, model, model_display, shadow, wrapper, version +from spyvm import constants, model, model_display, shadow, wrapper, version, display from spyvm.error import UnwrappingError, WrappingError, PrimitiveFailedError from rpython.rlib import jit, rpath -from rpython.rlib.objectmodel import instantiate, specialize +from rpython.rlib.objectmodel import instantiate, specialize, import_from_mixin from rpython.rlib.rarithmetic import intmask, r_uint, int_between -class ConstantFlag(object): - """Boolean flag that can be edited, but will be promoted +class ConstantMixin(object): + """Mixin for constant values that can be edited, but will be promoted to a constant when jitting.""" - def __init__(self, set_initially=False): - self.flag = [set_initially] - - def is_set(self): - return jit.promote(self.flag[0]) - - def set(self): - self.flag[0] = True - - def unset(self): - self.flag[0] = False - - def set_to(self, flag): - self.flag[0] = flag - -class ConstantString(object): - def __init__(self): - self.value = [""] - - def get(self): - return jit.promote(self.value[0]) + def __init__(self, initial_value = None): + if initial_value is None: + initial_value = self.default_value + self.value = [initial_value] def set(self, value): self.value[0] = value + + def get(self): + value = jit.promote(self.value[0]) + return value + +class ConstantFlag(object): + import_from_mixin(ConstantMixin) + default_value = False + def is_set(self): + return self.get() + def activate(self): + self.set(True) + def deactivate(self): + self.set(False) + +class ConstantString(object): + import_from_mixin(ConstantMixin) + default_value = "" + def get(self): + # Promoting does not work on strings... + return self.value[0] + +class ConstantObject(object): + import_from_mixin(ConstantMixin) + default_value = None class ObjSpace(object): def __init__(self): @@ -49,6 +57,7 @@ self.objtable = {} self._executable_path = ConstantString() self._image_name = ConstantString() + self._display = ConstantObject() # Create the nil object. # Circumvent the constructor because nil is already referenced there. @@ -85,12 +94,6 @@ self.objtable[name] = specials[idx] # XXX this is kind of hacky, but I don't know where else to get Metaclass self.classtable["w_Metaclass"] = self.w_SmallInteger.w_class.w_class - - def executable_path(self): - return self._executable_path.get() - - def image_name(self): - return self._image_name.get() def add_bootstrap_class(self, name, cls): self.classtable[name] = cls @@ -138,14 +141,8 @@ name = "w_" + name if not name in self.objtable: self.add_bootstrap_object(name, None) - - @specialize.arg(1) - def get_special_selector(self, selector): - i0 = constants.find_selectorindex(selector) - self.w_special_selectors.as_cached_object_get_shadow(self) - return self.w_special_selectors.fetch(self, i0) - # methods for wrapping and unwrapping stuff + # ============= Methods for wrapping and unwrapping stuff ============= def wrap_int(self, val): from spyvm import constants @@ -247,14 +244,30 @@ return [w_array.at0(self, i) for i in range(w_array.size())] - def get_display(self): - w_display = self.objtable['w_display'] - if w_display: - w_bitmap = w_display.fetch(self, 0) - if isinstance(w_bitmap, model_display.W_DisplayBitmap): - return w_bitmap.display - raise PrimitiveFailedError("No display") - + # ============= Access to static information ============= + + @specialize.arg(1) + def get_special_selector(self, selector): + i0 = constants.find_selectorindex(selector) + self.w_special_selectors.as_cached_object_get_shadow(self) + return self.w_special_selectors.fetch(self, i0) + + def executable_path(self): + return self._executable_path.get() + + def image_name(self): + return self._image_name.get() + + def display(self): + disp = self._display.get() + if disp is None: + # Create lazy to allow headless execution. + disp = display.SDLDisplay(self.image_name()) + self._display.set(disp) + return disp + + # ============= Other Methods ============= + def _freeze_(self): return True diff --git a/spyvm/plugins/vmdebugging.py b/spyvm/plugins/vmdebugging.py --- a/spyvm/plugins/vmdebugging.py +++ b/spyvm/plugins/vmdebugging.py @@ -20,12 +20,12 @@ @DebuggingPlugin.expose_primitive(unwrap_spec=[object]) def trace_proxy(interp, s_frame, w_rcvr): - interp.trace_proxy.set() + interp.trace_proxy.activate() return w_rcvr @DebuggingPlugin.expose_primitive(unwrap_spec=[object]) def untrace_proxy(interp, s_frame, w_rcvr): - interp.trace_proxy.unset() + interp.trace_proxy.deactivate() return w_rcvr @DebuggingPlugin.expose_primitive(unwrap_spec=[object]) diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -644,7 +644,7 @@ @expose_primitive(MOUSE_POINT, unwrap_spec=[object]) def func(interp, s_frame, w_rcvr): - x, y = interp.space.get_display().mouse_point() + x, y = interp.space.display().mouse_point() w_point = model.W_PointersObject(interp.space, interp.space.w_Point, 2) w_point.store(interp.space, 0, interp.space.wrap_int(x)) w_point.store(interp.space, 1, interp.space.wrap_int(y)) @@ -656,7 +656,7 @@ def func(interp, s_frame, w_rcvr, w_into): if not interp.evented: raise PrimitiveFailedError() - ary = interp.space.get_display().get_next_event(time=interp.time_now()) + ary = interp.space.display().get_next_event(time=interp.time_now()) for i in range(8): w_into.store(interp.space, i, interp.space.wrap_int(ary[i])) # XXX - hack @@ -731,40 +731,23 @@ def func(interp, s_frame, w_rcvr): if interp.space.headless.is_set(): exitFromHeadlessExecution(s_frame) - if not isinstance(w_rcvr, model.W_PointersObject) or w_rcvr.size() < 4: raise PrimitiveFailedError + old_display = interp.space.objtable['w_display'] + if isinstance(old_display, model_display.W_DisplayBitmap): + old_display.relinquish_display() + interp.space.objtable['w_display'] = w_rcvr + # TODO: figure out whether we should decide the width an report it in the SCREEN_SIZE primitive form = wrapper.FormWrapper(interp.space, w_rcvr) - w_bitmap = form.bits() - width = form.width() - height = form.height() - depth = form.depth() - - sdldisplay = None - - w_prev_display = interp.space.objtable['w_display'] - if w_prev_display: - w_prev_bitmap = w_prev_display.fetch(interp.space, 0) - if isinstance(w_prev_bitmap, model_display.W_DisplayBitmap): - sdldisplay = w_prev_bitmap.display - sdldisplay.set_video_mode(width, height, depth) + form.take_over_display() + w_display_bitmap = form.get_display_bitmap() + w_display_bitmap.take_over_display() + w_display_bitmap.flush_to_screen() - if isinstance(w_bitmap, model_display.W_DisplayBitmap): - assert (sdldisplay is None) or (sdldisplay is w_bitmap.display) - sdldisplay = w_bitmap.display - sdldisplay.set_video_mode(width, height, depth) - w_display_bitmap = w_bitmap - else: - assert isinstance(w_bitmap, model.W_WordsObject) - w_display_bitmap = form.get_display_bitmap(interp, sdldisplay) - - w_display_bitmap.flush_to_screen() if interp.image: - interp.image.lastWindowSize = (width << 16) + height - interp.space.objtable['w_display'] = w_rcvr - + interp.image.lastWindowSize = (form.width() << 16) + form.height() return w_rcvr @expose_primitive(STRING_REPLACE, unwrap_spec=[object, index1_0, index1_0, object, index1_0]) @@ -807,12 +790,12 @@ @expose_primitive(MOUSE_BUTTONS, unwrap_spec=[object]) def func(interp, s_frame, w_rcvr): - btn = interp.space.get_display().mouse_button() + btn = interp.space.display().mouse_button() return interp.space.wrap_int(btn) @expose_primitive(KBD_NEXT, unwrap_spec=[object]) def func(interp, s_frame, w_rcvr): - code = interp.space.get_display().next_keycode() + code = interp.space.display().next_keycode() if code & 0xFF == 0: return interp.space.w_nil else: @@ -820,7 +803,7 @@ @expose_primitive(KBD_PEEK, unwrap_spec=[object]) def func(interp, s_frame, w_rcvr): - code = interp.space.get_display().peek_keycode() + code = interp.space.display().peek_keycode() if code & 0xFF == 0: return interp.space.w_nil else: @@ -999,7 +982,7 @@ @expose_primitive(DEFER_UPDATES, unwrap_spec=[object, bool]) def func(interp, s_frame, w_receiver, flag): - sdldisplay = interp.space.get_display() + sdldisplay = interp.space.display() sdldisplay.defer_updates(flag) return w_receiver @@ -1053,7 +1036,7 @@ @expose_primitive(SET_INTERRUPT_KEY, unwrap_spec=[object, int]) def func(interp, s_frame, w_rcvr, encoded_key): - interp.space.get_display().set_interrupt_key(interp.space, encoded_key) + interp.space.display().set_interrupt_key(interp.space, encoded_key) return w_rcvr @expose_primitive(INTERRUPT_SEMAPHORE, unwrap_spec=[object, object]) @@ -1526,7 +1509,7 @@ @expose_primitive(FORCE_DISPLAY_UPDATE, unwrap_spec=[object]) def func(interp, s_frame, w_rcvr): - interp.space.get_display().flip(force=True) + interp.space.display().flip(force=True) return w_rcvr # ___________________________________________________________________________ diff --git a/spyvm/test/test_model.py b/spyvm/test/test_model.py --- a/spyvm/test/test_model.py +++ b/spyvm/test/test_model.py @@ -404,15 +404,7 @@ assert target.pixelbuffer[i] == 0x0 def test_display_offset_computation(): - - def get_pixelbuffer(self): - return lltype.malloc(rffi.ULONGP.TO, self.width * self.height * 32, flavor='raw') - display.SDLDisplay.get_pixelbuffer = get_pixelbuffer - d = display.SDLDisplay("test") - d.set_video_mode(18, 5, 1) - - dbitmap = model_display.W_DisplayBitmap.create(space, space.w_Array, 5, 1, d) - + dbitmap = model_display.W_MappingDisplayBitmap(space, space.w_Array, 5, 1) assert dbitmap.compute_pos(0) == 0 assert dbitmap.compute_pos(1) == 8 assert dbitmap.size() == 5 * 8 diff --git a/spyvm/test/test_primitives.py b/spyvm/test/test_primitives.py --- a/spyvm/test/test_primitives.py +++ b/spyvm/test/test_primitives.py @@ -765,7 +765,7 @@ raise DisplayFlush try: - monkeypatch.setattr(space.get_display().__class__, "flip", flush_to_screen_mock) + monkeypatch.setattr(space.display().__class__, "flip", flush_to_screen_mock) with py.test.raises(DisplayFlush): prim(primitives.FORCE_DISPLAY_UPDATE, [mock_display]) finally: diff --git a/spyvm/wrapper.py b/spyvm/wrapper.py --- a/spyvm/wrapper.py +++ b/spyvm/wrapper.py @@ -269,15 +269,24 @@ height, store_height = make_int_getter_setter(constants.FORM_HEIGHT) depth, store_depth = make_int_getter_setter(constants.FORM_DEPTH) - def get_display_bitmap(self, interp, sdldisplay=None): + def create_display_bitmap(self): + w_display_bitmap = model_display.from_words_object(self.bits(), self) + self.store_bits(w_display_bitmap) + return w_display_bitmap + + def get_display_bitmap(self): w_bitmap = self.bits() if not isinstance(w_bitmap, model_display.W_DisplayBitmap): - w_display_bitmap = model_display.from_words_object(interp, w_bitmap, self, sdldisplay) - self.store_bits(w_display_bitmap) + w_display_bitmap = self.create_display_bitmap() else: w_display_bitmap = w_bitmap + if w_display_bitmap._depth != self.depth(): + w_display_bitmap = self.create_display_bitmap() return w_display_bitmap - + + def take_over_display(self): + self.space.display().set_video_mode(self.width(), self.height(), self.depth()) + # XXX Wrappers below are not used yet. class OffsetWrapper(Wrapper): offset_x = make_int_getter(0) diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -121,9 +121,12 @@ elif arg in ["-P", "--process"]: headless = False elif arg in ["--hacks"]: - space.run_spy_hacks.set() + space.run_spy_hacks.activate() + elif arg in ["--invert"]: + from spyvm import model_display + model_display.invert_byte_order[0] = True elif arg in ["-S"]: - space.no_specialized_storage.set() + space.no_specialized_storage.activate() elif arg in ["-u"]: from spyvm.plugins.vmdebugging import stop_ui_process stop_ui_process() @@ -177,7 +180,7 @@ selector = compile_code(interp, w_receiver, code) s_frame = create_context(interp, w_receiver, selector, stringarg) if headless: - space.headless.set() + space.headless.activate() context = s_frame else: create_process(interp, s_frame) @@ -206,7 +209,7 @@ # registered (primitive 136 not called), so the idle process will never be left once it is entered. # TODO - Find a way to cleanly initialize the image, without executing the active_context of the image. # Instead, we want to execute our own context. Then remove this flag (and all references to it) - space.suppress_process_switch.set() + space.suppress_process_switch.activate() w_result = interp.perform( w_receiver_class, @@ -218,7 +221,7 @@ # TODO - is this expected in every image? if not isinstance(w_result, model.W_BytesObject) or w_result.as_string() != selector: raise error.Exit("Unexpected compilation result (probably failed to compile): %s" % result_string(w_result)) - space.suppress_process_switch.unset() + space.suppress_process_switch.deactivate() w_receiver_class.as_class_get_shadow(space).s_methoddict().sync_method_cache() return selector From noreply at buildbot.pypy.org Tue Jul 22 19:47:48 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Tue, 22 Jul 2014 19:47:48 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-display-refactoring: Finally got the 1-bit drawing correctly when width is no multiple of 32. Message-ID: <20140722174748.2AC151C0748@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-display-refactoring Changeset: r935:5dc629a29736 Date: 2014-07-22 19:47 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/5dc629a29736/ Log: Finally got the 1-bit drawing correctly when width is no multiple of 32. diff --git a/spyvm/model_display.py b/spyvm/model_display.py --- a/spyvm/model_display.py +++ b/spyvm/model_display.py @@ -1,6 +1,6 @@ from spyvm import model, constants, display -from rpython.rlib import jit +from rpython.rlib import jit, objectmodel from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib.rarithmetic import r_uint @@ -11,7 +11,7 @@ w_class = w_obj.getclass(space) if depth < 8: - w_display_bitmap = W_MappingDisplayBitmap(space, w_class, size, depth, form.width()) + w_display_bitmap = W_MappingDisplayBitmap(space, w_class, size, depth) elif depth == 8: w_display_bitmap = W_8BitDisplayBitmap(space, w_class, size, depth) elif depth == 16: @@ -159,45 +159,47 @@ else: self.pixelbuffer_UINT()[n] = r_uint(word) +BITS = r_uint(32) class W_MappingDisplayBitmap(W_DisplayBitmap): repr_classname = "W_MappingDisplayBitmap" - _attrs_ = ['mapping_factor', 'words_per_line', 'bits_in_last_word', 'width'] - _immutable_fields_ = ['mapping_factor', 'words_per_line', 'bits_in_last_word', 'width'] + _attrs_ = ['words_per_line', 'bits_in_last_word', 'pitch'] + _immutable_fields_ = ['words_per_line?', 'bits_in_last_word?', 'pitch?'] - pixel_per_word = constants.BYTES_PER_WORD + def __init__(self, space, w_class, size, depth): + assert depth in [1, 2, 4] + W_DisplayBitmap.__init__(self, space, w_class, size, depth) - def __init__(self, space, w_class, size, depth, width): - assert depth in [1, 2, 4] - width = r_uint(width) - self.width = width - self.mapping_factor = display.MINIMUM_DEPTH / depth - self.words_per_line = r_uint(width / 32 + 1) - self.bits_in_last_word = width % 32 - W_DisplayBitmap.__init__(self, space, w_class, size, depth) + def take_over_display(self): + pitch = r_uint(self.display.pitch) + self.pitch = pitch + self.bits_in_last_word = pitch % BITS + self.words_per_line = r_uint((pitch - self.bits_in_last_word) / BITS) + if self.bits_in_last_word > 0: + self.words_per_line += 1 + W_DisplayBitmap.take_over_display(self) @jit.unroll_safe def set_pixelbuffer_word(self, n, word): n = r_uint(n) - word = r_uint(word) - pos = self.compute_pos(n) - buf = self.display.screen.c_pixels - - if (n+1) % self.words_per_line == 0: + if ((n+1) % self.words_per_line) == 0 and self.bits_in_last_word > 0: # This is the last word on the line. A few bits are cut off. bits = self.bits_in_last_word else: - bits = 32 + bits = BITS + word = r_uint(word) + pos = self.compute_pos(n) + buf = rffi.ptradd(self.display.screen.c_pixels, pos) depth = r_uint(self._depth) - rshift = 32 - depth - for i in range(r_uint(bits) / depth): + rshift = BITS - depth + for i in range(bits / depth): pixel = word >> rshift - buf[pos] = rffi.cast(rffi.UCHAR, pixel) - word <<= self._depth - pos += 1 - + buf[i] = rffi.cast(rffi.UCHAR, pixel) + word <<= depth + def compute_pos(self, n): word_on_line = n % self.words_per_line - complete_lines = r_uint((n - word_on_line) / self.words_per_line) - return complete_lines * self.width + 32*word_on_line + y = r_uint((n - word_on_line) / self.words_per_line) + x = word_on_line * BITS / r_uint(self._depth) + return y * r_uint(self.pitch) + x From noreply at buildbot.pypy.org Tue Jul 22 23:27:03 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Tue, 22 Jul 2014 23:27:03 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-display-refactoring: Added comment. Message-ID: <20140722212703.53D301D2875@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-display-refactoring Changeset: r936:6f3f768d7ce8 Date: 2014-07-22 21:01 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/6f3f768d7ce8/ Log: Added comment. diff --git a/spyvm/model_display.py b/spyvm/model_display.py --- a/spyvm/model_display.py +++ b/spyvm/model_display.py @@ -171,7 +171,7 @@ W_DisplayBitmap.__init__(self, space, w_class, size, depth) def take_over_display(self): - pitch = r_uint(self.display.pitch) + pitch = r_uint(self.display.pitch) # The pitch is different from the width input to SDL! self.pitch = pitch self.bits_in_last_word = pitch % BITS self.words_per_line = r_uint((pitch - self.bits_in_last_word) / BITS) @@ -202,4 +202,4 @@ word_on_line = n % self.words_per_line y = r_uint((n - word_on_line) / self.words_per_line) x = word_on_line * BITS / r_uint(self._depth) - return y * r_uint(self.pitch) + x + return y * self.pitch + x From noreply at buildbot.pypy.org Tue Jul 22 23:27:05 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Tue, 22 Jul 2014 23:27:05 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-display-refactoring: Reverted changes file back to storage version. Accidentally committed. Message-ID: <20140722212705.C5BC91D2875@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-display-refactoring Changeset: r937:123f68a9dffe Date: 2014-07-22 21:37 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/123f68a9dffe/ Log: Reverted changes file back to storage version. Accidentally committed. diff --git a/images/Squeak4.5-noBitBlt.changes b/images/Squeak4.5-noBitBlt.changes --- a/images/Squeak4.5-noBitBlt.changes +++ b/images/Squeak4.5-noBitBlt.changes @@ -12622,6 +12622,13 @@ self insertNewNode. ]! ! ----SNAPSHOT----{15 July 2014 . 6:10:56 pm} Squeak4.5-noBitBlt.image priorSource: 15894330! -----QUIT/NOSAVE----{21 July 2014 . 7:09:22 pm} Squeak4.5-noBitBlt.image priorSource: 15894825! - -----QUIT----{21 July 2014 . 7:10:12 pm} Squeak4.5-noBitBlt.image priorSource: 15894825! ----STARTUP----{22 July 2014 . 10:55:07 am} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! Smalltalk specialObjectsArray at:15! (Smalltalk specialObjectsArray at:15) depth! \ No newline at end of file +----QUIT/NOSAVE----{21 July 2014 . 4:18:39 pm} Squeak4.5-noBitBlt.image priorSource: 15894825! ----STARTUP----{21 July 2014 . 6:19:06 pm} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! !WeakMessageSend methodsFor: 'private' stamp: 'ag 7/21/2014 18:20' prior: 34321504! withEnsuredReceiverAndArgumentsDo: aBlock otherwise: altBlock "Grab real references to receiver and arguments. If they still exist, evaluate aBlock." "Return if my receiver has gone away" | r a | r := self receiver. r ifNil: [ ^altBlock value ]. "Make sure that my arguments haven't gone away" arguments ifNil: [ ^ altBlock value ]. a := Array withAll: arguments. a with: shouldBeNil do: [ :arg :flag | arg ifNil: [ flag ifFalse: [ ^altBlock value ]] ]. ^aBlock value: r value: a! ! ----QUIT----{21 July 2014 . 6:20:43 pm} Squeak4.5-noBitBlt.image priorSource: 15894825! + +----QUIT/NOSAVE----{21 July 2014 . 4:21:36 pm} Squeak4.5-noBitBlt.image priorSource: 15895702! ----STARTUP----{21 July 2014 . 6:21:54 pm} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! !WeakMessageSend methodsFor: 'comparing' stamp: 'ag 7/21/2014 18:22' prior: 33144463! = anObject "Compare equal to equivalent MessageSend" ^ anObject isMessageSend and: [self receiver == anObject receiver and: [selector == anObject selector and: [(Array withAll: self arguments) = (Array withAll: anObject arguments)]]] ! ! !WeakMessageSend methodsFor: 'private' stamp: 'ag 7/21/2014 18:23' prior: 49449636! withEnsuredReceiverAndArgumentsDo: aBlock otherwise: altBlock "Grab real references to receiver and arguments. If they still exist, evaluate aBlock." "Return if my receiver has gone away" | r a | r := self receiver. r ifNil: [ ^altBlock value ]. "Make sure that my arguments haven't gone away" a := Array withAll: self arguments. a with: shouldBeNil do: [ :arg :flag | arg ifNil: [ flag ifFalse: [ ^altBlock value ]] ]. ^aBlock value: r value: a! ! ----QUIT----{21 July 2014 . 6:23:49 pm} Squeak4.5-noBitBlt.image priorSource: 15895702! ----STARTUP----{21 July 2014 . 6:31:05 pm} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! !WeakMessageSend methodsFor: 'accessing' stamp: 'ag 7/21/2014 18:31'! shouldBeNil ^ shouldBeNil ifNil: [ Array new ]! ! !WeakMessageSend methodsFor: 'private' stamp: 'ag 7/21/2014 18:31' prior: 33148869! isAnyArgumentGarbage "Make sure that my arguments haven't gone away" arguments ifNotNil: [ arguments with: self shouldBeNil do: [ :arg :flag | (flag not and: [arg isNil]) ifTrue: [^true] ] ]. ^false ! ! !WeakMessageSend methodsFor: 'private' stamp: 'ag 7/21/2014 18:31' prior: 49450841! withEnsuredReceiverAndArgumentsDo: aBlock otherwise: altBlock "Grab real references to receiver and arguments. If they still exist, evaluate aBlock." "Return if my receiver has gone away" | r a | r := self receiver. r ifNil: [ ^altBlock value ]. "Make sure that my arguments haven't gone away" a := Array withAll: self arguments. a with: self shouldBeNil do: [ :arg :flag | arg ifNil: [ flag ifFalse: [ ^altBlock value ]] ]. ^aBlock value: r value: a! ! !WeakMessageSend methodsFor: 'private' stamp: 'ag 7/21/2014 18:32' prior: 34360552! withEnsuredReceiverAndArgumentsDo: aBlock withEnoughArguments: anArray otherwise: altBlock "call the selector with enough arguments from arguments and anArray" | r selfArgs enoughArgs | r := self receiver. r ifNil: [ ^altBlock value ]. selfArgs := self arguments. selfArgs with: self shouldBeNil do: [ :arg :flag | arg ifNil: [ flag ifFalse: [ ^altBlock value ]] ]. enoughArgs := Array new: selector numArgs. enoughArgs replaceFrom: 1 to: ( selfArgs size min: enoughArgs size) with: selfArgs startingAt: 1. enoughArgs size > selfArgs size ifTrue: [ enoughArgs replaceFrom: selfArgs size + 1 to: (selfArgs size + anArray size min: enoughArgs size) with: anArray startingAt: 1. ]. ^aBlock value: r value: enoughArgs! ! ----QUIT----{21 July 2014 . 6:32:32 pm} Squeak4.5-noBitBlt.image priorSource: 15896872! + +----STARTUP----{21 July 2014 . 4:32:52 pm} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! + + +1+1! + +----QUIT/NOSAVE----{21 July 2014 . 4:33:15 pm} Squeak4.5-noBitBlt.image priorSource: 15898877! \ No newline at end of file From noreply at buildbot.pypy.org Tue Jul 22 23:27:06 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Tue, 22 Jul 2014 23:27:06 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-display-refactoring: Removed --invert flag. Message-ID: <20140722212706.F09B21D2875@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-display-refactoring Changeset: r938:39973aaf497d Date: 2014-07-22 21:54 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/39973aaf497d/ Log: Removed --invert flag. diff --git a/spyvm/model_display.py b/spyvm/model_display.py --- a/spyvm/model_display.py +++ b/spyvm/model_display.py @@ -24,12 +24,6 @@ return w_display_bitmap -invert_byte_order = [False] - -def invert(): - inv = invert_byte_order[0] - return jit.promote(inv) - class W_DisplayBitmap(model.W_AbstractObjectWithClassReference): _attrs_ = ['pixelbuffer_words', '_real_depth_buffer', '_realsize', 'display', '_depth'] _immutable_fields_ = ['pixelbuffer_words?', '_real_depth_buffer', '_realsize', 'display', '_depth'] @@ -148,16 +142,13 @@ repr_classname = "W_8BitDisplayBitmap" def set_pixelbuffer_word(self, n, word): - if invert(): - # Invert the byte-order. - self.pixelbuffer_UINT()[n] = r_uint( - (word >> 24) | - ((word >> 8) & 0x0000ff00) | - ((word << 8) & 0x00ff0000) | - (word << 24) - ) - else: - self.pixelbuffer_UINT()[n] = r_uint(word) + # Invert the byte-order. + self.pixelbuffer_UINT()[n] = r_uint( + (word >> 24) | + ((word >> 8) & 0x0000ff00) | + ((word << 8) & 0x00ff0000) | + (word << 24) + ) BITS = r_uint(32) class W_MappingDisplayBitmap(W_DisplayBitmap): diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -122,9 +122,6 @@ headless = False elif arg in ["--hacks"]: space.run_spy_hacks.activate() - elif arg in ["--invert"]: - from spyvm import model_display - model_display.invert_byte_order[0] = True elif arg in ["-S"]: space.no_specialized_storage.activate() elif arg in ["-u"]: From noreply at buildbot.pypy.org Tue Jul 22 23:27:08 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Tue, 22 Jul 2014 23:27:08 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-display-refactoring: Cleanups. Message-ID: <20140722212708.263171D2875@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-display-refactoring Changeset: r939:e363f2952c1e Date: 2014-07-22 21:58 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/e363f2952c1e/ Log: Cleanups. diff --git a/spyvm/display.py b/spyvm/display.py --- a/spyvm/display.py +++ b/spyvm/display.py @@ -74,11 +74,7 @@ self.bpp = rffi.getintfield(self.screen.c_format, 'c_BytesPerPixel') self.pitch = rffi.getintfield(self.screen, 'c_pitch') - def get_pixelbuffer_UCHAR(self): - # return jit.promote(rffi.cast(RSDL.Uint8P, self.screen.c_pixels)) - return jit.promote(self.screen.c_pixels) - - def get_pixelbuffer_UINT(self): + def get_pixelbuffer(self): return jit.promote(rffi.cast(RSDL.Uint32P, self.screen.c_pixels)) def defer_updates(self, flag): diff --git a/spyvm/model_display.py b/spyvm/model_display.py --- a/spyvm/model_display.py +++ b/spyvm/model_display.py @@ -61,14 +61,14 @@ # === Graphics - def pixelbuffer_UINT(self): - return self.display.get_pixelbuffer_UINT() + def pixelbuffer(self): + return self.display.get_pixelbuffer() def pixelbuffer_UCHAR(self): return self.display.get_pixelbuffer_UCHAR() def set_pixelbuffer_word(self, n, word): - self.pixelbuffer_UINT()[n] = word + self.pixelbuffer()[n] = word def take_over_display(self): # Make sure FrameWrapper.take_over_display() is called first for the correct Frame object. @@ -135,7 +135,7 @@ ((msb & mask) << 11) ) - self.pixelbuffer_UINT()[n] = r_uint(lsb | (msb << 16)) + self.pixelbuffer()[n] = r_uint(lsb | (msb << 16)) class W_8BitDisplayBitmap(W_DisplayBitmap): @@ -143,7 +143,7 @@ def set_pixelbuffer_word(self, n, word): # Invert the byte-order. - self.pixelbuffer_UINT()[n] = r_uint( + self.pixelbuffer()[n] = r_uint( (word >> 24) | ((word >> 8) & 0x0000ff00) | ((word << 8) & 0x00ff0000) | From noreply at buildbot.pypy.org Tue Jul 22 23:27:10 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Tue, 22 Jul 2014 23:27:10 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Merged. Message-ID: <20140722212710.B48211D2875@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r940:ad963bc84269 Date: 2014-07-22 22:36 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/ad963bc84269/ Log: Merged. diff --git a/spyvm/constants.py b/spyvm/constants.py --- a/spyvm/constants.py +++ b/spyvm/constants.py @@ -54,6 +54,11 @@ BLKCLSR_NUMARGS = 2 BLKCLSR_SIZE = 3 +FORM_BITS = 0 +FORM_WIDTH = 1 +FORM_HEIGHT = 2 +FORM_DEPTH = 3 + # ___________________________________________________________________________ # Miscellaneous constants diff --git a/spyvm/display.py b/spyvm/display.py --- a/spyvm/display.py +++ b/spyvm/display.py @@ -35,12 +35,13 @@ WindowEventPaint = 5 WindowEventStinks = 6 +MINIMUM_DEPTH = 8 class SDLDisplay(object): _attrs_ = ["screen", "width", "height", "depth", "surface", "has_surface", "mouse_position", "button", "key", "interrupt_key", "_defer_updates", - "_deferred_event", "pixelbuffer"] - _immutable_fields_ = ["pixelbuffer?"] + "_deferred_event", "bpp", "pitch"] + #_immutable_fields_ = ["pixelbuffer?"] def __init__(self, title): assert RSDL.Init(RSDL.INIT_VIDEO) >= 0 @@ -58,26 +59,27 @@ def set_video_mode(self, w, h, d): assert w > 0 and h > 0 assert d in [1, 2, 4, 8, 16, 32] + if d < MINIMUM_DEPTH: + d = MINIMUM_DEPTH self.width = w self.height = h self.depth = d flags = RSDL.HWPALETTE | RSDL.RESIZABLE | RSDL.ASYNCBLIT | RSDL.DOUBLEBUF - if d < 8: - d = 8 self.screen = RSDL.SetVideoMode(w, h, d, flags) if not self.screen: print "Could not open display at depth %d" % d raise RuntimeError - elif d == 8: + elif d == MINIMUM_DEPTH: self.set_squeak_colormap(self.screen) - self.pixelbuffer = rffi.cast(rffi.UINTP, self.screen.c_pixels) - + self.bpp = rffi.getintfield(self.screen.c_format, 'c_BytesPerPixel') + self.pitch = rffi.getintfield(self.screen, 'c_pitch') + def get_pixelbuffer(self): - return jit.promote(self.pixelbuffer) - + return jit.promote(rffi.cast(RSDL.Uint32P, self.screen.c_pixels)) + def defer_updates(self, flag): self._defer_updates = flag - + def flip(self, force=False): if (not self._defer_updates) or force: RSDL.Flip(self.screen) diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -23,7 +23,7 @@ class Interpreter(object): - _immutable_fields_ = ["space", "image", "image_name", + _immutable_fields_ = ["space", "image", "interrupt_counter_size", "startup_time", "evented", "interrupts"] @@ -34,12 +34,11 @@ get_printable_location=get_printable_location ) - def __init__(self, space, image=None, image_name="", + def __init__(self, space, image=None, trace=False, evented=True, interrupts=True): # === Initialize immutable variables self.space = space self.image = image - self.image_name = image_name if image: self.startup_time = image.startup_time else: @@ -1009,9 +1008,8 @@ # in order to enable tracing/jumping for message sends etc. def debugging(): def stepping_debugger_init(original): - def meth(self, space, image=None, image_name="", trace=False): - return_value = original(self, space, image=image, - image_name=image_name, trace=trace) + def meth(self, space, image=None, trace=False): + return_value = original(self, space, image=image, trace=trace) # ############################################################## self.message_stepping = False diff --git a/spyvm/interpreter_proxy.py b/spyvm/interpreter_proxy.py --- a/spyvm/interpreter_proxy.py +++ b/spyvm/interpreter_proxy.py @@ -17,7 +17,7 @@ from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib.unroll import unrolling_iterable -from spyvm import error, model, objspace +from spyvm import error, model, model_display, objspace, wrapper sqInt = rffi.INT sqLong = rffi.LONG @@ -242,7 +242,7 @@ return w_object.convert_to_c_layout() elif isinstance(w_object, model.W_BytesObject): return rffi.cast(sqIntArrayPtr, w_object.convert_to_c_layout()) - elif isinstance(w_object, model.W_DisplayBitmap): + elif isinstance(w_object, model_display.W_DisplayBitmap): return rffi.cast(sqIntArrayPtr, w_object.convert_to_c_layout()) else: raise ProxyFunctionFailed @@ -529,7 +529,7 @@ @expose_on_virtual_machine_proxy([], int) def fullDisplayUpdate(): w_display = IProxy.space.objtable['w_display'] - if isinstance(w_display, model.W_DisplayBitmap): + if isinstance(w_display, model_display.W_DisplayBitmap): w_display.update_from_buffer() w_display.flush_to_screen() return 0 @@ -559,16 +559,8 @@ # display memory space = IProxy.space if w_dest_form.is_same_object(space.objtable['w_display']): - w_bitmap = w_dest_form.fetch(space, 0) - if not isinstance(w_bitmap, model.W_DisplayBitmap): - assert isinstance(w_bitmap, model.W_WordsObject) - w_display_bitmap = w_bitmap.as_display_bitmap( - w_dest_form, - IProxy.interp, - sdldisplay=None - ) - else: - w_display_bitmap = w_bitmap + form = wrapper.FormWrapper(space, w_dest_form) + w_display_bitmap = form.get_display_bitmap() w_display_bitmap.update_from_buffer() w_display_bitmap.flush_to_screen() return 0 @@ -1008,7 +1000,7 @@ self.argcount = 0 self.w_method = None self.fail_reason = 0 - self.trace_proxy.unset() + self.trace_proxy.deactivate() def call(self, signature, interp, s_frame, argcount, w_method): self.initialize_from_call(signature, interp, s_frame, argcount, w_method) @@ -1050,7 +1042,7 @@ self.argcount = argcount self.w_method = w_method self.space = interp.space - self.trace_proxy.set_to(interp.trace_proxy.is_set()) + self.trace_proxy.set(interp.trace_proxy.is_set()) # ensure that space.w_nil gets the first possible oop self.object_to_oop(self.space.w_nil) diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -987,26 +987,6 @@ c_words[i] = intmask(old_words[i]) self.words = None return c_words - - def as_display_bitmap(self, w_form, interp, sdldisplay=None): - width = interp.space.unwrap_int(w_form.fetch(interp.space, 1)) - height = interp.space.unwrap_int(w_form.fetch(interp.space, 2)) - depth = interp.space.unwrap_int(w_form.fetch(interp.space, 3)) - if not sdldisplay: - from spyvm import display - sdldisplay = display.SDLDisplay(interp.image_name) - sdldisplay.set_video_mode(width, height, depth) - w_display_bitmap = W_DisplayBitmap.create( - interp.space, - self.getclass(interp.space), - self.size(), - depth, - sdldisplay - ) - for idx in range(self.size()): - w_display_bitmap.setword(idx, self.getword(idx)) - w_form.store(interp.space, 0, w_display_bitmap) - return w_display_bitmap def _become(self, w_other): assert isinstance(w_other, W_WordsObject) @@ -1019,144 +999,6 @@ if self.words is None: lltype.free(self.c_words, flavor='raw') -class W_DisplayBitmap(W_AbstractObjectWithClassReference): - _attrs_ = ['pixelbuffer', '_realsize', '_real_depth_buffer', 'display', '_depth'] - _immutable_fields_ = ['_realsize', 'display', '_depth'] - repr_classname = "W_DisplayBitmap" - - pixelbuffer = None - - @staticmethod - def create(space, w_class, size, depth, display): - if depth < 8: - return W_MappingDisplayBitmap(space, w_class, size * (8 / depth), depth, display) - elif depth == 8: - return W_8BitDisplayBitmap(space, w_class, size, depth, display) - elif depth == 16: - return W_16BitDisplayBitmap(space, w_class, size, depth, display) - else: - return W_DisplayBitmap(space, w_class, size, depth, display) - - def repr_content(self): - return "len=%d depth=%d %s" % (self.size(), self._depth, self.str_content()) - - def __init__(self, space, w_class, size, depth, display): - W_AbstractObjectWithClassReference.__init__(self, space, w_class) - self._real_depth_buffer = lltype.malloc(rffi.CArray(rffi.UINT), size, flavor='raw') - self._realsize = size - self.display = display - self._depth = depth - - def at0(self, space, index0): - val = self.getword(index0) - return space.wrap_uint(val) - - def atput0(self, space, index0, w_value): - word = space.unwrap_uint(w_value) - self.setword(index0, word) - - def flush_to_screen(self): - self.display.flip() - - def size(self): - return self._realsize - - def invariant(self): - return False - - def clone(self, space): - w_result = W_WordsObject(space, self.getclass(space), self._realsize) - n = 0 - while n < self._realsize: - w_result.words[n] = self.getword(n) - n += 1 - return w_result - - def getword(self, n): - assert self.size() > n >= 0 - return self._real_depth_buffer[n] - - def setword(self, n, word): - self._real_depth_buffer[n] = word - self.display.get_pixelbuffer()[n] = word - - def is_array_object(self): - return True - - def update_from_buffer(self): - for i in range(self._realsize): - self.setword(i, self.getword(i)) - - def convert_to_c_layout(self): - return self._real_depth_buffer - - def can_become(self, w_other): - # TODO - implement _become() for this class. Impossible due to _immutable_fields_? - return False - - def __del__(self): - lltype.free(self._real_depth_buffer, flavor='raw') - - -class W_16BitDisplayBitmap(W_DisplayBitmap): - repr_classname = "W_16BitDisplayBitmap" - def setword(self, n, word): - self._real_depth_buffer[n] = word - mask = 0b11111 - lsb = (r_uint(word) & r_uint(0xffff0000)) >> 16 - msb = (r_uint(word) & r_uint(0x0000ffff)) - - lsb = ( - ((lsb >> 10) & mask) | - (((lsb >> 5) & mask) << 6) | - ((lsb & mask) << 11) - ) - msb = ( - ((msb >> 10) & mask) | - (((msb >> 5) & mask) << 6) | - ((msb & mask) << 11) - ) - - self.display.get_pixelbuffer()[n] = r_uint(lsb | (msb << 16)) - - -class W_8BitDisplayBitmap(W_DisplayBitmap): - repr_classname = "W_8BitDisplayBitmap" - def setword(self, n, word): - self._real_depth_buffer[n] = word - self.display.get_pixelbuffer()[n] = r_uint( - (word >> 24) | - ((word >> 8) & 0x0000ff00) | - ((word << 8) & 0x00ff0000) | - (word << 24) - ) - -NATIVE_DEPTH = 8 -class W_MappingDisplayBitmap(W_DisplayBitmap): - repr_classname = "W_MappingDisplayBitmap" - @jit.unroll_safe - def setword(self, n, word): - self._real_depth_buffer[n] = word - word = r_uint(word) - pos = self.compute_pos(n) - assert self._depth <= 4 - rshift = 32 - self._depth - for i in xrange(8 / self._depth): - if pos >= self.size(): - return - mapword = r_uint(0) - for i in xrange(4): - pixel = r_uint(word) >> rshift - mapword |= (r_uint(pixel) << (i * 8)) - word <<= self._depth - self.display.get_pixelbuffer()[pos] = mapword - pos += 1 - - def compute_pos(self, n): - return n * (NATIVE_DEPTH / self._depth) - -# XXX Shouldn't compiledmethod have class reference for subclassed compiled -# methods? class W_CompiledMethod(W_AbstractObjectWithIdentityHash): """My instances are methods suitable for interpretation by the virtual machine. This is the only class in the system whose instances intermix both indexable pointer fields and indexable integer fields. @@ -1467,8 +1309,3 @@ if isinstance(s_class, ClassShadow): return "%s >> #%s" % (s_class.getname(), self.lookup_selector) return "#%s" % self.lookup_selector - -class DetachingShadowError(Exception): - def __init__(self, old_shadow, new_shadow_class): - self.old_shadow = old_shadow - self.new_shadow_class = new_shadow_class diff --git a/spyvm/model_display.py b/spyvm/model_display.py new file mode 100644 --- /dev/null +++ b/spyvm/model_display.py @@ -0,0 +1,196 @@ + +from spyvm import model, constants, display +from rpython.rlib import jit, objectmodel +from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rlib.rarithmetic import r_uint + +def from_words_object(w_obj, form): + depth = form.depth() + space = form.space + size = w_obj.size() + w_class = w_obj.getclass(space) + + if depth < 8: + w_display_bitmap = W_MappingDisplayBitmap(space, w_class, size, depth) + elif depth == 8: + w_display_bitmap = W_8BitDisplayBitmap(space, w_class, size, depth) + elif depth == 16: + w_display_bitmap = W_16BitDisplayBitmap(space, w_class, size, depth) + else: + w_display_bitmap = W_DisplayBitmap(space, w_class, size, depth) + + for idx in range(size): + w_display_bitmap.setword(idx, w_obj.getword(idx)) + + return w_display_bitmap + +class W_DisplayBitmap(model.W_AbstractObjectWithClassReference): + _attrs_ = ['pixelbuffer_words', '_real_depth_buffer', '_realsize', 'display', '_depth'] + _immutable_fields_ = ['pixelbuffer_words?', '_real_depth_buffer', '_realsize', 'display', '_depth'] + repr_classname = "W_DisplayBitmap" + + def __init__(self, space, w_class, size, depth): + model.W_AbstractObjectWithClassReference.__init__(self, space, w_class) + self._real_depth_buffer = lltype.malloc(rffi.CArray(rffi.UINT), size, flavor='raw') + self._realsize = size + self._depth = depth + self.display = space.display() + self.relinquish_display() + + # === Object access + + def at0(self, space, index0): + val = self.getword(index0) + return space.wrap_uint(val) + + def atput0(self, space, index0, w_value): + word = space.unwrap_uint(w_value) + self.setword(index0, word) + + def getword(self, n): + assert self.size() > n >= 0 + return self._real_depth_buffer[n] + + def setword(self, n, word): + self._real_depth_buffer[n] = word + if self.pixelbuffer_words > 0: + self.set_pixelbuffer_word(n, word) + + def size(self): + return self._realsize + + # === Graphics + + def pixelbuffer(self): + return self.display.get_pixelbuffer() + + def pixelbuffer_UCHAR(self): + return self.display.get_pixelbuffer_UCHAR() + + def set_pixelbuffer_word(self, n, word): + self.pixelbuffer()[n] = word + + def take_over_display(self): + # Make sure FrameWrapper.take_over_display() is called first for the correct Frame object. + pixel_per_word = constants.BYTES_PER_WORD / (self.display.depth / 8) + self.pixelbuffer_words = self.display.width * self.display.height / pixel_per_word + self.update_from_buffer() + + def relinquish_display(self): + self.pixelbuffer_words = 0 + + def flush_to_screen(self): + self.display.flip() + + def update_from_buffer(self): + if self.pixelbuffer_words > 0: + for i in range(self.size()): + self.set_pixelbuffer_word(i, self.getword(i)) + + # === Misc + + def invariant(self): + return False + + def clone(self, space): + w_result = model.W_WordsObject(space, self.getclass(space), self.size()) + for n in range(self.size()): + w_result.setword(n, self.getword(n)) + return w_result + + def is_array_object(self): + return True + + def convert_to_c_layout(self): + return self._real_depth_buffer + + def can_become(self, w_other): + # TODO - implement _become() for this class. Impossible due to _immutable_fields_? + return False + + def __del__(self): + lltype.free(self._real_depth_buffer, flavor='raw') + + def repr_content(self): + return "len=%d depth=%d %s" % (self.size(), self._depth, self.str_content()) + +class W_16BitDisplayBitmap(W_DisplayBitmap): + + repr_classname = "W_16BitDisplayBitmap" + + def set_pixelbuffer_word(self, n, word): + mask = 0b11111 + lsb = (r_uint(word) & r_uint(0xffff0000)) >> 16 + msb = (r_uint(word) & r_uint(0x0000ffff)) + + # Invert order of rgb-components + lsb = ( + ((lsb >> 10) & mask) | + (((lsb >> 5) & mask) << 6) | + ((lsb & mask) << 11) + ) + msb = ( + ((msb >> 10) & mask) | + (((msb >> 5) & mask) << 6) | + ((msb & mask) << 11) + ) + + self.pixelbuffer()[n] = r_uint(lsb | (msb << 16)) + +class W_8BitDisplayBitmap(W_DisplayBitmap): + + repr_classname = "W_8BitDisplayBitmap" + + def set_pixelbuffer_word(self, n, word): + # Invert the byte-order. + self.pixelbuffer()[n] = r_uint( + (word >> 24) | + ((word >> 8) & 0x0000ff00) | + ((word << 8) & 0x00ff0000) | + (word << 24) + ) + +BITS = r_uint(32) +class W_MappingDisplayBitmap(W_DisplayBitmap): + + repr_classname = "W_MappingDisplayBitmap" + _attrs_ = ['words_per_line', 'bits_in_last_word', 'pitch'] + _immutable_fields_ = ['words_per_line?', 'bits_in_last_word?', 'pitch?'] + + def __init__(self, space, w_class, size, depth): + assert depth in [1, 2, 4] + W_DisplayBitmap.__init__(self, space, w_class, size, depth) + + def take_over_display(self): + pitch = r_uint(self.display.pitch) # The pitch is different from the width input to SDL! + self.pitch = pitch + self.bits_in_last_word = pitch % BITS + self.words_per_line = r_uint((pitch - self.bits_in_last_word) / BITS) + if self.bits_in_last_word > 0: + self.words_per_line += 1 + W_DisplayBitmap.take_over_display(self) + + @jit.unroll_safe + def set_pixelbuffer_word(self, n, word): + n = r_uint(n) + if ((n+1) % self.words_per_line) == 0 and self.bits_in_last_word > 0: + # This is the last word on the line. A few bits are cut off. + bits = self.bits_in_last_word + else: + bits = BITS + + word = r_uint(word) + pos = self.compute_pos(n) + buf = rffi.ptradd(self.display.screen.c_pixels, pos) + depth = r_uint(self._depth) + rshift = BITS - depth + for i in range(bits / depth): + pixel = word >> rshift + buf[i] = rffi.cast(rffi.UCHAR, pixel) + word <<= depth + + def compute_pos(self, n): + word_on_line = n % self.words_per_line + y = r_uint((n - word_on_line) / self.words_per_line) + x = word_on_line * BITS / r_uint(self._depth) + return y * self.pitch + x diff --git a/spyvm/objspace.py b/spyvm/objspace.py --- a/spyvm/objspace.py +++ b/spyvm/objspace.py @@ -1,30 +1,47 @@ import os -from spyvm import constants, model, shadow, wrapper, version +from spyvm import constants, model, model_display, shadow, wrapper, version, display from spyvm.error import UnwrappingError, WrappingError, PrimitiveFailedError from rpython.rlib import jit, rpath -from rpython.rlib.objectmodel import instantiate, specialize +from rpython.rlib.objectmodel import instantiate, specialize, import_from_mixin from rpython.rlib.rarithmetic import intmask, r_uint, int_between -class ConstantFlag(object): - """Boolean flag that can be edited, but will be promoted +class ConstantMixin(object): + """Mixin for constant values that can be edited, but will be promoted to a constant when jitting.""" - def __init__(self, set_initially=False): - self.flag = [set_initially] + def __init__(self, initial_value = None): + if initial_value is None: + initial_value = self.default_value + self.value = [initial_value] + def set(self, value): + self.value[0] = value + + def get(self): + value = jit.promote(self.value[0]) + return value + +class ConstantFlag(object): + import_from_mixin(ConstantMixin) + default_value = False def is_set(self): - flag = jit.promote(self.flag[0]) - return flag + return self.get() + def activate(self): + self.set(True) + def deactivate(self): + self.set(False) + +class ConstantString(object): + import_from_mixin(ConstantMixin) + default_value = "" + def get(self): + # Promoting does not work on strings... + return self.value[0] - def set(self): - self.flag[0] = True - - def unset(self): - self.flag[0] = False - - def set_to(self, flag): - self.flag[0] = flag +class ConstantObject(object): + import_from_mixin(ConstantMixin) + default_value = None class ObjSpace(object): def __init__(self): @@ -33,12 +50,14 @@ self.no_specialized_storage = ConstantFlag() # This is a hack; see compile_code() in targetimageloadingsmalltalk.py self.suppress_process_switch = ConstantFlag() + self.run_spy_hacks = ConstantFlag() self.headless = ConstantFlag() self.classtable = {} self.objtable = {} - self._executable_path = [""] # XXX: we cannot set the attribute - # directly on the frozen objectspace + self._executable_path = ConstantString() + self._image_name = ConstantString() + self._display = ConstantObject() # Create the nil object. # Circumvent the constructor because nil is already referenced there. @@ -61,11 +80,12 @@ break return rpath.rabspath(executable) - def runtime_setup(self, executable): + def runtime_setup(self, executable, image_name): fullpath = rpath.rabspath(self.find_executable(executable)) i = fullpath.rfind(os.path.sep) + 1 assert i > 0 - self._executable_path[0] = fullpath[:i] + self._executable_path.set(fullpath[:i]) + self._image_name.set(image_name) def populate_special_objects(self, specials): for name, idx in constants.objects_in_special_object_table.items(): @@ -74,9 +94,6 @@ self.objtable[name] = specials[idx] # XXX this is kind of hacky, but I don't know where else to get Metaclass self.classtable["w_Metaclass"] = self.w_SmallInteger.w_class.w_class - - def executable_path(self): - return self._executable_path[0] def add_bootstrap_class(self, name, cls): self.classtable[name] = cls @@ -124,14 +141,8 @@ name = "w_" + name if not name in self.objtable: self.add_bootstrap_object(name, None) - - @specialize.arg(1) - def get_special_selector(self, selector): - i0 = constants.find_selectorindex(selector) - self.w_special_selectors.as_cached_object_get_shadow(self) - return self.w_special_selectors.fetch(self, i0) - # methods for wrapping and unwrapping stuff + # ============= Methods for wrapping and unwrapping stuff ============= def wrap_int(self, val): from spyvm import constants @@ -233,14 +244,30 @@ return [w_array.at0(self, i) for i in range(w_array.size())] - def get_display(self): - w_display = self.objtable['w_display'] - if w_display: - w_bitmap = w_display.fetch(self, 0) - if isinstance(w_bitmap, model.W_DisplayBitmap): - return w_bitmap.display - raise PrimitiveFailedError("No display") - + # ============= Access to static information ============= + + @specialize.arg(1) + def get_special_selector(self, selector): + i0 = constants.find_selectorindex(selector) + self.w_special_selectors.as_cached_object_get_shadow(self) + return self.w_special_selectors.fetch(self, i0) + + def executable_path(self): + return self._executable_path.get() + + def image_name(self): + return self._image_name.get() + + def display(self): + disp = self._display.get() + if disp is None: + # Create lazy to allow headless execution. + disp = display.SDLDisplay(self.image_name()) + self._display.set(disp) + return disp + + # ============= Other Methods ============= + def _freeze_(self): return True diff --git a/spyvm/plugins/bitblt.py b/spyvm/plugins/bitblt.py --- a/spyvm/plugins/bitblt.py +++ b/spyvm/plugins/bitblt.py @@ -1,4 +1,4 @@ -from spyvm import model +from spyvm import model_display, model from spyvm.error import PrimitiveFailedError from spyvm.shadow import AbstractCachingShadow from spyvm.plugins.plugin import Plugin @@ -32,7 +32,7 @@ s_frame.push(interp.space.wrap_int(s_bitblt.bitCount)) elif w_dest_form.is_same_object(space.objtable['w_display']): w_bitmap = w_dest_form.fetch(space, 0) - assert isinstance(w_bitmap, model.W_DisplayBitmap) + assert isinstance(w_bitmap, model_display.W_DisplayBitmap) w_bitmap.flush_to_screen() return w_rcvr @@ -741,7 +741,7 @@ self.w_bits = self.fetch(0) if self.w_bits.is_nil(self.space): return - if not (isinstance(self.w_bits, model.W_WordsObject) or isinstance(self.w_bits, model.W_DisplayBitmap)): + if not (isinstance(self.w_bits, model.W_WordsObject) or isinstance(self.w_bits, model_display.W_DisplayBitmap)): return self.width = self.intOrIfNil(self.fetch(1), 0) self.height = self.intOrIfNil(self.fetch(2), 0) diff --git a/spyvm/plugins/vmdebugging.py b/spyvm/plugins/vmdebugging.py --- a/spyvm/plugins/vmdebugging.py +++ b/spyvm/plugins/vmdebugging.py @@ -20,12 +20,12 @@ @DebuggingPlugin.expose_primitive(unwrap_spec=[object]) def trace_proxy(interp, s_frame, w_rcvr): - interp.trace_proxy.set() + interp.trace_proxy.activate() return w_rcvr @DebuggingPlugin.expose_primitive(unwrap_spec=[object]) def untrace_proxy(interp, s_frame, w_rcvr): - interp.trace_proxy.unset() + interp.trace_proxy.deactivate() return w_rcvr @DebuggingPlugin.expose_primitive(unwrap_spec=[object]) diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -2,7 +2,7 @@ import inspect import math import operator -from spyvm import model, shadow, error, constants, display +from spyvm import model, model_display, shadow, error, constants, display from spyvm.error import PrimitiveFailedError, PrimitiveNotYetWrittenError from spyvm import wrapper @@ -644,7 +644,7 @@ @expose_primitive(MOUSE_POINT, unwrap_spec=[object]) def func(interp, s_frame, w_rcvr): - x, y = interp.space.get_display().mouse_point() + x, y = interp.space.display().mouse_point() w_point = model.W_PointersObject(interp.space, interp.space.w_Point, 2) w_point.store(interp.space, 0, interp.space.wrap_int(x)) w_point.store(interp.space, 1, interp.space.wrap_int(y)) @@ -656,7 +656,7 @@ def func(interp, s_frame, w_rcvr, w_into): if not interp.evented: raise PrimitiveFailedError() - ary = interp.space.get_display().get_next_event(time=interp.time_now()) + ary = interp.space.display().get_next_event(time=interp.time_now()) for i in range(8): w_into.store(interp.space, i, interp.space.wrap_int(ary[i])) # XXX - hack @@ -676,7 +676,7 @@ w_display = interp.space.objtable['w_display'] if w_dest_form.is_same_object(w_display): w_bitmap = w_display.fetch(interp.space, 0) - assert isinstance(w_bitmap, model.W_DisplayBitmap) + assert isinstance(w_bitmap, model_display.W_DisplayBitmap) w_bitmap.flush_to_screen() return w_rcvr except shadow.MethodNotFound: @@ -731,45 +731,23 @@ def func(interp, s_frame, w_rcvr): if interp.space.headless.is_set(): exitFromHeadlessExecution(s_frame) - if not isinstance(w_rcvr, model.W_PointersObject) or w_rcvr.size() < 4: raise PrimitiveFailedError - # the fields required are bits (a pointer to a Bitmap), width, height, depth - - # XXX: TODO get the initial image TODO: figure out whether we - # should decide the width an report it in the other SCREEN_SIZE - w_bitmap = w_rcvr.fetch(interp.space, 0) - width = interp.space.unwrap_int(w_rcvr.fetch(interp.space, 1)) - height = interp.space.unwrap_int(w_rcvr.fetch(interp.space, 2)) - depth = interp.space.unwrap_int(w_rcvr.fetch(interp.space, 3)) - - sdldisplay = None - - w_prev_display = interp.space.objtable['w_display'] - if w_prev_display: - w_prev_bitmap = w_prev_display.fetch(interp.space, 0) - if isinstance(w_prev_bitmap, model.W_DisplayBitmap): - sdldisplay = w_prev_bitmap.display - sdldisplay.set_video_mode(width, height, depth) - - if isinstance(w_bitmap, model.W_DisplayBitmap): - assert (sdldisplay is None) or (sdldisplay is w_bitmap.display) - sdldisplay = w_bitmap.display - sdldisplay.set_video_mode(width, height, depth) - w_display_bitmap = w_bitmap - else: - assert isinstance(w_bitmap, model.W_WordsObject) - w_display_bitmap = w_bitmap.as_display_bitmap( - w_rcvr, - interp, - sdldisplay=sdldisplay - ) - + + old_display = interp.space.objtable['w_display'] + if isinstance(old_display, model_display.W_DisplayBitmap): + old_display.relinquish_display() + interp.space.objtable['w_display'] = w_rcvr + + # TODO: figure out whether we should decide the width an report it in the SCREEN_SIZE primitive + form = wrapper.FormWrapper(interp.space, w_rcvr) + form.take_over_display() + w_display_bitmap = form.get_display_bitmap() + w_display_bitmap.take_over_display() w_display_bitmap.flush_to_screen() + if interp.image: - interp.image.lastWindowSize = (width << 16) + height - interp.space.objtable['w_display'] = w_rcvr - + interp.image.lastWindowSize = (form.width() << 16) + form.height() return w_rcvr @expose_primitive(STRING_REPLACE, unwrap_spec=[object, index1_0, index1_0, object, index1_0]) @@ -812,12 +790,12 @@ @expose_primitive(MOUSE_BUTTONS, unwrap_spec=[object]) def func(interp, s_frame, w_rcvr): - btn = interp.space.get_display().mouse_button() + btn = interp.space.display().mouse_button() return interp.space.wrap_int(btn) @expose_primitive(KBD_NEXT, unwrap_spec=[object]) def func(interp, s_frame, w_rcvr): - code = interp.space.get_display().next_keycode() + code = interp.space.display().next_keycode() if code & 0xFF == 0: return interp.space.w_nil else: @@ -825,7 +803,7 @@ @expose_primitive(KBD_PEEK, unwrap_spec=[object]) def func(interp, s_frame, w_rcvr): - code = interp.space.get_display().peek_keycode() + code = interp.space.display().peek_keycode() if code & 0xFF == 0: return interp.space.w_nil else: @@ -864,8 +842,8 @@ @expose_primitive(EXIT_TO_DEBUGGER, unwrap_spec=[object]) def func(interp, s_frame, w_rcvr): - if not objectmodel.we_are_translated(): - import pdb; pdb.set_trace() + if interp.space.headless.is_set(): + exitFromHeadlessExecution(s_frame, "EXIT_TO_DEBUGGER") raise PrimitiveNotYetWrittenError() @expose_primitive(CHANGE_CLASS, unwrap_spec=[object, object], no_result=True) @@ -987,7 +965,7 @@ def func(interp, s_frame, argument_count): if argument_count == 0: s_frame.pop() - return interp.space.wrap_string(interp.image_name) + return interp.space.wrap_string(interp.space.image_name()) elif argument_count == 1: pass # XXX raise PrimitiveFailedError @@ -1004,7 +982,7 @@ @expose_primitive(DEFER_UPDATES, unwrap_spec=[object, bool]) def func(interp, s_frame, w_receiver, flag): - sdldisplay = interp.space.get_display() + sdldisplay = interp.space.display() sdldisplay.defer_updates(flag) return w_receiver @@ -1058,7 +1036,7 @@ @expose_primitive(SET_INTERRUPT_KEY, unwrap_spec=[object, int]) def func(interp, s_frame, w_rcvr, encoded_key): - interp.space.get_display().set_interrupt_key(interp.space, encoded_key) + interp.space.display().set_interrupt_key(interp.space, encoded_key) return w_rcvr @expose_primitive(INTERRUPT_SEMAPHORE, unwrap_spec=[object, object]) @@ -1142,7 +1120,7 @@ raise PrimitiveFailedError for i in xrange(w_arg.size()): w_arg.setchar(i, chr(new_value)) - elif isinstance(w_arg, model.W_WordsObject) or isinstance(w_arg, model.W_DisplayBitmap): + elif isinstance(w_arg, model.W_WordsObject) or isinstance(w_arg, model_display.W_DisplayBitmap): for i in xrange(w_arg.size()): w_arg.setword(i, new_value) else: @@ -1531,7 +1509,7 @@ @expose_primitive(FORCE_DISPLAY_UPDATE, unwrap_spec=[object]) def func(interp, s_frame, w_rcvr): - interp.space.get_display().flip(force=True) + interp.space.display().flip(force=True) return w_rcvr # ___________________________________________________________________________ diff --git a/spyvm/squeakimage.py b/spyvm/squeakimage.py --- a/spyvm/squeakimage.py +++ b/spyvm/squeakimage.py @@ -386,12 +386,13 @@ self.startup_time = time.time() def run_spy_hacks(self, space): - pass - # w_display = space.objtable["w_display"] - # if w_display is not None and not w_display.is_nil(space): - # if space.unwrap_int(w_display.fetch(space, 3)) < 8: - # # non-native indexed color depth not well supported - # w_display.store(space, 3, space.wrap_int(8)) + if not space.run_spy_hacks.is_set(): + return + w_display = space.objtable["w_display"] + if w_display is not None and not w_display.is_nil(space): + if space.unwrap_int(w_display.fetch(space, 3)) < 8: + # non-native indexed color depth not well supported + w_display.store(space, 3, space.wrap_int(8)) def find_symbol(self, space, reader, symbol): w_dnu = self.special(constants.SO_DOES_NOT_UNDERSTAND) diff --git a/spyvm/test/test_model.py b/spyvm/test/test_model.py --- a/spyvm/test/test_model.py +++ b/spyvm/test/test_model.py @@ -1,5 +1,5 @@ import py, math, socket -from spyvm import model, shadow, objspace, error, display +from spyvm import model, model_display, shadow, objspace, error, display from spyvm.shadow import MethodNotFound, WEAK_POINTERS from rpython.rlib.rarithmetic import intmask, r_uint from rpython.rtyper.lltypesystem import lltype, rffi @@ -387,7 +387,7 @@ d = display.SDLDisplay("test") d.set_video_mode(32, 10, 1) - target = model.W_DisplayBitmap.create(space, space.w_Array, 10, 1, d) + target = model_display.W_DisplayBitmap.create(space, space.w_Array, 10, 1, d) target.setword(0, r_uint(0xFF00)) assert bin(target.getword(0)) == bin(0xFF00) target.setword(0, r_uint(0x00FF00FF)) @@ -404,15 +404,7 @@ assert target.pixelbuffer[i] == 0x0 def test_display_offset_computation(): - - def get_pixelbuffer(self): - return lltype.malloc(rffi.ULONGP.TO, self.width * self.height * 32, flavor='raw') - display.SDLDisplay.get_pixelbuffer = get_pixelbuffer - d = display.SDLDisplay("test") - d.set_video_mode(18, 5, 1) - - dbitmap = model.W_DisplayBitmap.create(space, space.w_Array, 5, 1, d) - + dbitmap = model_display.W_MappingDisplayBitmap(space, space.w_Array, 5, 1) assert dbitmap.compute_pos(0) == 0 assert dbitmap.compute_pos(1) == 8 assert dbitmap.size() == 5 * 8 diff --git a/spyvm/test/test_primitives.py b/spyvm/test/test_primitives.py --- a/spyvm/test/test_primitives.py +++ b/spyvm/test/test_primitives.py @@ -1,5 +1,5 @@ import py, os, math, time -from spyvm import model, shadow, interpreter, constants, primitives, objspace, wrapper, display +from spyvm import model, model_display, shadow, interpreter, constants, primitives, objspace, wrapper, display from spyvm.primitives import prim_table, PrimitiveFailedError from spyvm.plugins import bitblt from rpython.rlib.rfloat import INFINITY, NAN, isinf, isnan @@ -47,7 +47,8 @@ frame = context for i in range(len(stack)): frame.as_context_get_shadow(space).push(stack[i]) - interp = TestInterpreter(space, image_name=IMAGENAME) + interp = TestInterpreter(space) + interp.space._image_name.set(IMAGENAME) return interp, frame, len(stack) def _prim(space, code, stack, context = None): @@ -680,7 +681,7 @@ closure = space.newClosure(w_frame, 4, 0, []) s_frame = w_frame.as_methodcontext_get_shadow(space) - interp = TestInterpreter(space, image_name=IMAGENAME) + interp = TestInterpreter(space) interp._loop = True try: @@ -721,7 +722,7 @@ assert space.objtable["w_display"] is mock_display w_bitmap = mock_display.fetch(space, 0) assert w_bitmap is not w_wordbmp - assert isinstance(w_bitmap, model.W_DisplayBitmap) + assert isinstance(w_bitmap, model_display.W_DisplayBitmap) sdldisplay = w_bitmap.display assert isinstance(sdldisplay, display.SDLDisplay) @@ -733,7 +734,7 @@ prim(primitives.BE_DISPLAY, [mock_display2]) assert space.objtable["w_display"] is mock_display2 w_bitmap2 = mock_display.fetch(space, 0) - assert isinstance(w_bitmap2, model.W_DisplayBitmap) + assert isinstance(w_bitmap2, model_display.W_DisplayBitmap) assert w_bitmap.display is w_bitmap2.display assert sdldisplay.width == 32 assert sdldisplay.height == 10 @@ -764,7 +765,7 @@ raise DisplayFlush try: - monkeypatch.setattr(space.get_display().__class__, "flip", flush_to_screen_mock) + monkeypatch.setattr(space.display().__class__, "flip", flush_to_screen_mock) with py.test.raises(DisplayFlush): prim(primitives.FORCE_DISPLAY_UPDATE, [mock_display]) finally: diff --git a/spyvm/wrapper.py b/spyvm/wrapper.py --- a/spyvm/wrapper.py +++ b/spyvm/wrapper.py @@ -1,4 +1,4 @@ -from spyvm import model, constants +from spyvm import model, model_display, constants from spyvm.error import FatalError, WrapperException, PrimitiveFailedError class Wrapper(object): @@ -263,6 +263,30 @@ def size(self): return self._w_self.size() - constants.BLKCLSR_SIZE +class FormWrapper(Wrapper): + bits, store_bits = make_getter_setter(constants.FORM_BITS) + width, store_width = make_int_getter_setter(constants.FORM_WIDTH) + height, store_height = make_int_getter_setter(constants.FORM_HEIGHT) + depth, store_depth = make_int_getter_setter(constants.FORM_DEPTH) + + def create_display_bitmap(self): + w_display_bitmap = model_display.from_words_object(self.bits(), self) + self.store_bits(w_display_bitmap) + return w_display_bitmap + + def get_display_bitmap(self): + w_bitmap = self.bits() + if not isinstance(w_bitmap, model_display.W_DisplayBitmap): + w_display_bitmap = self.create_display_bitmap() + else: + w_display_bitmap = w_bitmap + if w_display_bitmap._depth != self.depth(): + w_display_bitmap = self.create_display_bitmap() + return w_display_bitmap + + def take_over_display(self): + self.space.display().set_video_mode(self.width(), self.height(), self.depth()) + # XXX Wrappers below are not used yet. class OffsetWrapper(Wrapper): offset_x = make_int_getter(0) diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -36,6 +36,7 @@ -p|--poll - Actively poll for events. Try this if the image is not responding well. -i|--no-interrupts - Disable timer interrupt. Disables non-cooperative scheduling. -S - Disable specialized storage strategies; always use generic ListStorage + --hacks - Enable Spy hacks. Set display color depth to 8. Logging parameters: -t|--trace - Output a trace of each message, primitive, return value and process switch. @@ -69,7 +70,7 @@ except error.Exit, e: print_error("Exited: %s" % e.msg) return -1 - except Exception, e: + except BaseException, e: print_error("Exception: %s" % str(e)) if not objectmodel.we_are_translated(): import traceback @@ -119,8 +120,10 @@ interrupts = False elif arg in ["-P", "--process"]: headless = False + elif arg in ["--hacks"]: + space.run_spy_hacks.activate() elif arg in ["-S"]: - space.no_specialized_storage.set() + space.no_specialized_storage.activate() elif arg in ["-u"]: from spyvm.plugins.vmdebugging import stop_ui_process stop_ui_process() @@ -158,10 +161,10 @@ # Load & prepare image and environment image_reader = squeakimage.reader_for_image(space, squeakimage.Stream(data=imagedata)) image = create_image(space, image_reader) - interp = interpreter.Interpreter(space, image, image_name=path, + interp = interpreter.Interpreter(space, image, trace=trace, evented=not poll, interrupts=interrupts) - space.runtime_setup(argv[0]) + space.runtime_setup(argv[0], path) print_error("") # Line break after image-loading characters # Create context to be executed @@ -174,7 +177,7 @@ selector = compile_code(interp, w_receiver, code) s_frame = create_context(interp, w_receiver, selector, stringarg) if headless: - space.headless.set() + space.headless.activate() context = s_frame else: create_process(interp, s_frame) @@ -203,7 +206,7 @@ # registered (primitive 136 not called), so the idle process will never be left once it is entered. # TODO - Find a way to cleanly initialize the image, without executing the active_context of the image. # Instead, we want to execute our own context. Then remove this flag (and all references to it) - space.suppress_process_switch.set() + space.suppress_process_switch.activate() w_result = interp.perform( w_receiver_class, @@ -215,7 +218,7 @@ # TODO - is this expected in every image? if not isinstance(w_result, model.W_BytesObject) or w_result.as_string() != selector: raise error.Exit("Unexpected compilation result (probably failed to compile): %s" % result_string(w_result)) - space.suppress_process_switch.unset() + space.suppress_process_switch.deactivate() w_receiver_class.as_class_get_shadow(space).s_methoddict().sync_method_cache() return selector From noreply at buildbot.pypy.org Tue Jul 22 23:27:24 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Tue, 22 Jul 2014 23:27:24 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-context-state: Merged. Message-ID: <20140722212724.9FFF61D2875@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-context-state Changeset: r941:4e7e88455fcc Date: 2014-07-22 23:12 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/4e7e88455fcc/ Log: Merged. diff --git a/images/Squeak4.5-noBitBlt.changes b/images/Squeak4.5-noBitBlt.changes --- a/images/Squeak4.5-noBitBlt.changes +++ b/images/Squeak4.5-noBitBlt.changes @@ -12620,4 +12620,15 @@ 1 to: self splayTreeSize do: [:i | self insertNewNode. - ]! ! ----SNAPSHOT----{15 July 2014 . 6:10:56 pm} Squeak4.5-noBitBlt.image priorSource: 15894330! \ No newline at end of file + ]! ! ----SNAPSHOT----{15 July 2014 . 6:10:56 pm} Squeak4.5-noBitBlt.image priorSource: 15894330! + +----QUIT/NOSAVE----{21 July 2014 . 4:18:39 pm} Squeak4.5-noBitBlt.image priorSource: 15894825! ----STARTUP----{21 July 2014 . 6:19:06 pm} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! !WeakMessageSend methodsFor: 'private' stamp: 'ag 7/21/2014 18:20' prior: 34321504! withEnsuredReceiverAndArgumentsDo: aBlock otherwise: altBlock "Grab real references to receiver and arguments. If they still exist, evaluate aBlock." "Return if my receiver has gone away" | r a | r := self receiver. r ifNil: [ ^altBlock value ]. "Make sure that my arguments haven't gone away" arguments ifNil: [ ^ altBlock value ]. a := Array withAll: arguments. a with: shouldBeNil do: [ :arg :flag | arg ifNil: [ flag ifFalse: [ ^altBlock value ]] ]. ^aBlock value: r value: a! ! ----QUIT----{21 July 2014 . 6:20:43 pm} Squeak4.5-noBitBlt.image priorSource: 15894825! + +----QUIT/NOSAVE----{21 July 2014 . 4:21:36 pm} Squeak4.5-noBitBlt.image priorSource: 15895702! ----STARTUP----{21 July 2014 . 6:21:54 pm} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! !WeakMessageSend methodsFor: 'comparing' stamp: 'ag 7/21/2014 18:22' prior: 33144463! = anObject "Compare equal to equivalent MessageSend" ^ anObject isMessageSend and: [self receiver == anObject receiver and: [selector == anObject selector and: [(Array withAll: self arguments) = (Array withAll: anObject arguments)]]] ! ! !WeakMessageSend methodsFor: 'private' stamp: 'ag 7/21/2014 18:23' prior: 49449636! withEnsuredReceiverAndArgumentsDo: aBlock otherwise: altBlock "Grab real references to receiver and arguments. If they still exist, evaluate aBlock." "Return if my receiver has gone away" | r a | r := self receiver. r ifNil: [ ^altBlock value ]. "Make sure that my arguments haven't gone away" a := Array withAll: self arguments. a with: shouldBeNil do: [ :arg :flag | arg ifNil: [ flag ifFalse: [ ^altBlock value ]] ]. ^aBlock value: r value: a! ! ----QUIT----{21 July 2014 . 6:23:49 pm} Squeak4.5-noBitBlt.image priorSource: 15895702! ----STARTUP----{21 July 2014 . 6:31:05 pm} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! !WeakMessageSend methodsFor: 'accessing' stamp: 'ag 7/21/2014 18:31'! shouldBeNil ^ shouldBeNil ifNil: [ Array new ]! ! !WeakMessageSend methodsFor: 'private' stamp: 'ag 7/21/2014 18:31' prior: 33148869! isAnyArgumentGarbage "Make sure that my arguments haven't gone away" arguments ifNotNil: [ arguments with: self shouldBeNil do: [ :arg :flag | (flag not and: [arg isNil]) ifTrue: [^true] ] ]. ^false ! ! !WeakMessageSend methodsFor: 'private' stamp: 'ag 7/21/2014 18:31' prior: 49450841! withEnsuredReceiverAndArgumentsDo: aBlock otherwise: altBlock "Grab real references to receiver and arguments. If they still exist, evaluate aBlock." "Return if my receiver has gone away" | r a | r := self receiver. r ifNil: [ ^altBlock value ]. "Make sure that my arguments haven't gone away" a := Array withAll: self arguments. a with: self shouldBeNil do: [ :arg :flag | arg ifNil: [ flag ifFalse: [ ^altBlock value ]] ]. ^aBlock value: r value: a! ! !WeakMessageSend methodsFor: 'private' stamp: 'ag 7/21/2014 18:32' prior: 34360552! withEnsuredReceiverAndArgumentsDo: aBlock withEnoughArguments: anArray otherwise: altBlock "call the selector with enough arguments from arguments and anArray" | r selfArgs enoughArgs | r := self receiver. r ifNil: [ ^altBlock value ]. selfArgs := self arguments. selfArgs with: self shouldBeNil do: [ :arg :flag | arg ifNil: [ flag ifFalse: [ ^altBlock value ]] ]. enoughArgs := Array new: selector numArgs. enoughArgs replaceFrom: 1 to: ( selfArgs size min: enoughArgs size) with: selfArgs startingAt: 1. enoughArgs size > selfArgs size ifTrue: [ enoughArgs replaceFrom: selfArgs size + 1 to: (selfArgs size + anArray size min: enoughArgs size) with: anArray startingAt: 1. ]. ^aBlock value: r value: enoughArgs! ! ----QUIT----{21 July 2014 . 6:32:32 pm} Squeak4.5-noBitBlt.image priorSource: 15896872! + +----STARTUP----{21 July 2014 . 4:32:52 pm} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! + + +1+1! + +----QUIT/NOSAVE----{21 July 2014 . 4:33:15 pm} Squeak4.5-noBitBlt.image priorSource: 15898877! \ No newline at end of file diff --git a/images/Squeak4.5-noBitBlt.image b/images/Squeak4.5-noBitBlt.image index ed92c78c940799d91bb94a8ed8527076db6816c7..00843c2c83f9c11e5dcfa3b9927bd415d0a22cd8 GIT binary patch [cut] diff --git a/spyvm/constants.py b/spyvm/constants.py --- a/spyvm/constants.py +++ b/spyvm/constants.py @@ -54,6 +54,11 @@ BLKCLSR_NUMARGS = 2 BLKCLSR_SIZE = 3 +FORM_BITS = 0 +FORM_WIDTH = 1 +FORM_HEIGHT = 2 +FORM_DEPTH = 3 + # ___________________________________________________________________________ # Miscellaneous constants diff --git a/spyvm/display.py b/spyvm/display.py --- a/spyvm/display.py +++ b/spyvm/display.py @@ -35,12 +35,13 @@ WindowEventPaint = 5 WindowEventStinks = 6 +MINIMUM_DEPTH = 8 class SDLDisplay(object): _attrs_ = ["screen", "width", "height", "depth", "surface", "has_surface", "mouse_position", "button", "key", "interrupt_key", "_defer_updates", - "_deferred_event", "pixelbuffer"] - _immutable_fields_ = ["pixelbuffer?"] + "_deferred_event", "bpp", "pitch"] + #_immutable_fields_ = ["pixelbuffer?"] def __init__(self, title): assert RSDL.Init(RSDL.INIT_VIDEO) >= 0 @@ -58,26 +59,27 @@ def set_video_mode(self, w, h, d): assert w > 0 and h > 0 assert d in [1, 2, 4, 8, 16, 32] + if d < MINIMUM_DEPTH: + d = MINIMUM_DEPTH self.width = w self.height = h self.depth = d flags = RSDL.HWPALETTE | RSDL.RESIZABLE | RSDL.ASYNCBLIT | RSDL.DOUBLEBUF - if d < 8: - d = 8 self.screen = RSDL.SetVideoMode(w, h, d, flags) if not self.screen: print "Could not open display at depth %d" % d raise RuntimeError - elif d == 8: + elif d == MINIMUM_DEPTH: self.set_squeak_colormap(self.screen) - self.pixelbuffer = rffi.cast(rffi.UINTP, self.screen.c_pixels) - + self.bpp = rffi.getintfield(self.screen.c_format, 'c_BytesPerPixel') + self.pitch = rffi.getintfield(self.screen, 'c_pitch') + def get_pixelbuffer(self): - return jit.promote(self.pixelbuffer) - + return jit.promote(rffi.cast(RSDL.Uint32P, self.screen.c_pixels)) + def defer_updates(self, flag): self._defer_updates = flag - + def flip(self, force=False): if (not self._defer_updates) or force: RSDL.Flip(self.screen) diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -23,7 +23,7 @@ class Interpreter(object): - _immutable_fields_ = ["space", "image", "image_name", + _immutable_fields_ = ["space", "image", "interrupt_counter_size", "startup_time", "evented", "interrupts"] @@ -34,12 +34,11 @@ get_printable_location=get_printable_location ) - def __init__(self, space, image=None, image_name="", + def __init__(self, space, image=None, trace=False, evented=True, interrupts=True): # === Initialize immutable variables self.space = space self.image = image - self.image_name = image_name if image: self.startup_time = image.startup_time else: @@ -1033,9 +1032,8 @@ # in order to enable tracing/jumping for message sends etc. def debugging(): def stepping_debugger_init(original): - def meth(self, space, image=None, image_name="", trace=False): - return_value = original(self, space, image=image, - image_name=image_name, trace=trace) + def meth(self, space, image=None, trace=False): + return_value = original(self, space, image=image, trace=trace) # ############################################################## self.message_stepping = False diff --git a/spyvm/interpreter_proxy.py b/spyvm/interpreter_proxy.py --- a/spyvm/interpreter_proxy.py +++ b/spyvm/interpreter_proxy.py @@ -17,7 +17,7 @@ from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib.unroll import unrolling_iterable -from spyvm import error, model, objspace +from spyvm import error, model, model_display, objspace, wrapper sqInt = rffi.INT sqLong = rffi.LONG @@ -242,7 +242,7 @@ return w_object.convert_to_c_layout() elif isinstance(w_object, model.W_BytesObject): return rffi.cast(sqIntArrayPtr, w_object.convert_to_c_layout()) - elif isinstance(w_object, model.W_DisplayBitmap): + elif isinstance(w_object, model_display.W_DisplayBitmap): return rffi.cast(sqIntArrayPtr, w_object.convert_to_c_layout()) else: raise ProxyFunctionFailed @@ -529,7 +529,7 @@ @expose_on_virtual_machine_proxy([], int) def fullDisplayUpdate(): w_display = IProxy.space.objtable['w_display'] - if isinstance(w_display, model.W_DisplayBitmap): + if isinstance(w_display, model_display.W_DisplayBitmap): w_display.update_from_buffer() w_display.flush_to_screen() return 0 @@ -559,16 +559,8 @@ # display memory space = IProxy.space if w_dest_form.is_same_object(space.objtable['w_display']): - w_bitmap = w_dest_form.fetch(space, 0) - if not isinstance(w_bitmap, model.W_DisplayBitmap): - assert isinstance(w_bitmap, model.W_WordsObject) - w_display_bitmap = w_bitmap.as_display_bitmap( - w_dest_form, - IProxy.interp, - sdldisplay=None - ) - else: - w_display_bitmap = w_bitmap + form = wrapper.FormWrapper(space, w_dest_form) + w_display_bitmap = form.get_display_bitmap() w_display_bitmap.update_from_buffer() w_display_bitmap.flush_to_screen() return 0 @@ -1008,7 +1000,7 @@ self.argcount = 0 self.w_method = None self.fail_reason = 0 - self.trace_proxy.unset() + self.trace_proxy.deactivate() def call(self, signature, interp, s_frame, argcount, w_method): self.initialize_from_call(signature, interp, s_frame, argcount, w_method) @@ -1050,7 +1042,7 @@ self.argcount = argcount self.w_method = w_method self.space = interp.space - self.trace_proxy.set_to(interp.trace_proxy.is_set()) + self.trace_proxy.set(interp.trace_proxy.is_set()) # ensure that space.w_nil gets the first possible oop self.object_to_oop(self.space.w_nil) diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -987,26 +987,6 @@ c_words[i] = intmask(old_words[i]) self.words = None return c_words - - def as_display_bitmap(self, w_form, interp, sdldisplay=None): - width = interp.space.unwrap_int(w_form.fetch(interp.space, 1)) - height = interp.space.unwrap_int(w_form.fetch(interp.space, 2)) - depth = interp.space.unwrap_int(w_form.fetch(interp.space, 3)) - if not sdldisplay: - from spyvm import display - sdldisplay = display.SDLDisplay(interp.image_name) - sdldisplay.set_video_mode(width, height, depth) - w_display_bitmap = W_DisplayBitmap.create( - interp.space, - self.getclass(interp.space), - self.size(), - depth, - sdldisplay - ) - for idx in range(self.size()): - w_display_bitmap.setword(idx, self.getword(idx)) - w_form.store(interp.space, 0, w_display_bitmap) - return w_display_bitmap def _become(self, w_other): assert isinstance(w_other, W_WordsObject) @@ -1019,144 +999,6 @@ if self.words is None: lltype.free(self.c_words, flavor='raw') -class W_DisplayBitmap(W_AbstractObjectWithClassReference): - _attrs_ = ['pixelbuffer', '_realsize', '_real_depth_buffer', 'display', '_depth'] - _immutable_fields_ = ['_realsize', 'display', '_depth'] - repr_classname = "W_DisplayBitmap" - - pixelbuffer = None - - @staticmethod - def create(space, w_class, size, depth, display): - if depth < 8: - return W_MappingDisplayBitmap(space, w_class, size * (8 / depth), depth, display) - elif depth == 8: - return W_8BitDisplayBitmap(space, w_class, size, depth, display) - elif depth == 16: - return W_16BitDisplayBitmap(space, w_class, size, depth, display) - else: - return W_DisplayBitmap(space, w_class, size, depth, display) - - def repr_content(self): - return "len=%d depth=%d %s" % (self.size(), self._depth, self.str_content()) - - def __init__(self, space, w_class, size, depth, display): - W_AbstractObjectWithClassReference.__init__(self, space, w_class) - self._real_depth_buffer = lltype.malloc(rffi.CArray(rffi.UINT), size, flavor='raw') - self._realsize = size - self.display = display - self._depth = depth - - def at0(self, space, index0): - val = self.getword(index0) - return space.wrap_uint(val) - - def atput0(self, space, index0, w_value): - word = space.unwrap_uint(w_value) - self.setword(index0, word) - - def flush_to_screen(self): - self.display.flip() - - def size(self): - return self._realsize - - def invariant(self): - return False - - def clone(self, space): - w_result = W_WordsObject(space, self.getclass(space), self._realsize) - n = 0 - while n < self._realsize: - w_result.words[n] = self.getword(n) - n += 1 - return w_result - - def getword(self, n): - assert self.size() > n >= 0 - return self._real_depth_buffer[n] - - def setword(self, n, word): - self._real_depth_buffer[n] = word - self.display.get_pixelbuffer()[n] = word - - def is_array_object(self): - return True - - def update_from_buffer(self): - for i in range(self._realsize): - self.setword(i, self.getword(i)) - - def convert_to_c_layout(self): - return self._real_depth_buffer - - def can_become(self, w_other): - # TODO - implement _become() for this class. Impossible due to _immutable_fields_? - return False - - def __del__(self): - lltype.free(self._real_depth_buffer, flavor='raw') - - -class W_16BitDisplayBitmap(W_DisplayBitmap): - repr_classname = "W_16BitDisplayBitmap" - def setword(self, n, word): - self._real_depth_buffer[n] = word - mask = 0b11111 - lsb = (r_uint(word) & r_uint(0xffff0000)) >> 16 - msb = (r_uint(word) & r_uint(0x0000ffff)) - - lsb = ( - ((lsb >> 10) & mask) | - (((lsb >> 5) & mask) << 6) | - ((lsb & mask) << 11) - ) - msb = ( - ((msb >> 10) & mask) | - (((msb >> 5) & mask) << 6) | - ((msb & mask) << 11) - ) - - self.display.get_pixelbuffer()[n] = r_uint(lsb | (msb << 16)) - - -class W_8BitDisplayBitmap(W_DisplayBitmap): - repr_classname = "W_8BitDisplayBitmap" - def setword(self, n, word): - self._real_depth_buffer[n] = word - self.display.get_pixelbuffer()[n] = r_uint( - (word >> 24) | - ((word >> 8) & 0x0000ff00) | - ((word << 8) & 0x00ff0000) | - (word << 24) - ) - -NATIVE_DEPTH = 8 -class W_MappingDisplayBitmap(W_DisplayBitmap): - repr_classname = "W_MappingDisplayBitmap" - @jit.unroll_safe - def setword(self, n, word): - self._real_depth_buffer[n] = word - word = r_uint(word) - pos = self.compute_pos(n) - assert self._depth <= 4 - rshift = 32 - self._depth - for i in xrange(8 / self._depth): - if pos >= self.size(): - return - mapword = r_uint(0) - for i in xrange(4): - pixel = r_uint(word) >> rshift - mapword |= (r_uint(pixel) << (i * 8)) - word <<= self._depth - self.display.get_pixelbuffer()[pos] = mapword - pos += 1 - - def compute_pos(self, n): - return n * (NATIVE_DEPTH / self._depth) - -# XXX Shouldn't compiledmethod have class reference for subclassed compiled -# methods? class W_CompiledMethod(W_AbstractObjectWithIdentityHash): """My instances are methods suitable for interpretation by the virtual machine. This is the only class in the system whose instances intermix both indexable pointer fields and indexable integer fields. @@ -1467,8 +1309,3 @@ if isinstance(s_class, ClassShadow): return "%s >> #%s" % (s_class.getname(), self.lookup_selector) return "#%s" % self.lookup_selector - -class DetachingShadowError(Exception): - def __init__(self, old_shadow, new_shadow_class): - self.old_shadow = old_shadow - self.new_shadow_class = new_shadow_class diff --git a/spyvm/model_display.py b/spyvm/model_display.py new file mode 100644 --- /dev/null +++ b/spyvm/model_display.py @@ -0,0 +1,196 @@ + +from spyvm import model, constants, display +from rpython.rlib import jit, objectmodel +from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rlib.rarithmetic import r_uint + +def from_words_object(w_obj, form): + depth = form.depth() + space = form.space + size = w_obj.size() + w_class = w_obj.getclass(space) + + if depth < 8: + w_display_bitmap = W_MappingDisplayBitmap(space, w_class, size, depth) + elif depth == 8: + w_display_bitmap = W_8BitDisplayBitmap(space, w_class, size, depth) + elif depth == 16: + w_display_bitmap = W_16BitDisplayBitmap(space, w_class, size, depth) + else: + w_display_bitmap = W_DisplayBitmap(space, w_class, size, depth) + + for idx in range(size): + w_display_bitmap.setword(idx, w_obj.getword(idx)) + + return w_display_bitmap + +class W_DisplayBitmap(model.W_AbstractObjectWithClassReference): + _attrs_ = ['pixelbuffer_words', '_real_depth_buffer', '_realsize', 'display', '_depth'] + _immutable_fields_ = ['pixelbuffer_words?', '_real_depth_buffer', '_realsize', 'display', '_depth'] + repr_classname = "W_DisplayBitmap" + + def __init__(self, space, w_class, size, depth): + model.W_AbstractObjectWithClassReference.__init__(self, space, w_class) + self._real_depth_buffer = lltype.malloc(rffi.CArray(rffi.UINT), size, flavor='raw') + self._realsize = size + self._depth = depth + self.display = space.display() + self.relinquish_display() + + # === Object access + + def at0(self, space, index0): + val = self.getword(index0) + return space.wrap_uint(val) + + def atput0(self, space, index0, w_value): + word = space.unwrap_uint(w_value) + self.setword(index0, word) + + def getword(self, n): + assert self.size() > n >= 0 + return self._real_depth_buffer[n] + + def setword(self, n, word): + self._real_depth_buffer[n] = word + if self.pixelbuffer_words > 0: + self.set_pixelbuffer_word(n, word) + + def size(self): + return self._realsize + + # === Graphics + + def pixelbuffer(self): + return self.display.get_pixelbuffer() + + def pixelbuffer_UCHAR(self): + return self.display.get_pixelbuffer_UCHAR() + + def set_pixelbuffer_word(self, n, word): + self.pixelbuffer()[n] = word + + def take_over_display(self): + # Make sure FrameWrapper.take_over_display() is called first for the correct Frame object. + pixel_per_word = constants.BYTES_PER_WORD / (self.display.depth / 8) + self.pixelbuffer_words = self.display.width * self.display.height / pixel_per_word + self.update_from_buffer() + + def relinquish_display(self): + self.pixelbuffer_words = 0 + + def flush_to_screen(self): + self.display.flip() + + def update_from_buffer(self): + if self.pixelbuffer_words > 0: + for i in range(self.size()): + self.set_pixelbuffer_word(i, self.getword(i)) + + # === Misc + + def invariant(self): + return False + + def clone(self, space): + w_result = model.W_WordsObject(space, self.getclass(space), self.size()) + for n in range(self.size()): + w_result.setword(n, self.getword(n)) + return w_result + + def is_array_object(self): + return True + + def convert_to_c_layout(self): + return self._real_depth_buffer + + def can_become(self, w_other): + # TODO - implement _become() for this class. Impossible due to _immutable_fields_? + return False + + def __del__(self): + lltype.free(self._real_depth_buffer, flavor='raw') + + def repr_content(self): + return "len=%d depth=%d %s" % (self.size(), self._depth, self.str_content()) + +class W_16BitDisplayBitmap(W_DisplayBitmap): + + repr_classname = "W_16BitDisplayBitmap" + + def set_pixelbuffer_word(self, n, word): + mask = 0b11111 + lsb = (r_uint(word) & r_uint(0xffff0000)) >> 16 + msb = (r_uint(word) & r_uint(0x0000ffff)) + + # Invert order of rgb-components + lsb = ( + ((lsb >> 10) & mask) | + (((lsb >> 5) & mask) << 6) | + ((lsb & mask) << 11) + ) + msb = ( + ((msb >> 10) & mask) | + (((msb >> 5) & mask) << 6) | + ((msb & mask) << 11) + ) + + self.pixelbuffer()[n] = r_uint(lsb | (msb << 16)) + +class W_8BitDisplayBitmap(W_DisplayBitmap): + + repr_classname = "W_8BitDisplayBitmap" + + def set_pixelbuffer_word(self, n, word): + # Invert the byte-order. + self.pixelbuffer()[n] = r_uint( + (word >> 24) | + ((word >> 8) & 0x0000ff00) | + ((word << 8) & 0x00ff0000) | + (word << 24) + ) + +BITS = r_uint(32) +class W_MappingDisplayBitmap(W_DisplayBitmap): + + repr_classname = "W_MappingDisplayBitmap" + _attrs_ = ['words_per_line', 'bits_in_last_word', 'pitch'] + _immutable_fields_ = ['words_per_line?', 'bits_in_last_word?', 'pitch?'] + + def __init__(self, space, w_class, size, depth): + assert depth in [1, 2, 4] + W_DisplayBitmap.__init__(self, space, w_class, size, depth) + + def take_over_display(self): + pitch = r_uint(self.display.pitch) # The pitch is different from the width input to SDL! + self.pitch = pitch + self.bits_in_last_word = pitch % BITS + self.words_per_line = r_uint((pitch - self.bits_in_last_word) / BITS) + if self.bits_in_last_word > 0: + self.words_per_line += 1 + W_DisplayBitmap.take_over_display(self) + + @jit.unroll_safe + def set_pixelbuffer_word(self, n, word): + n = r_uint(n) + if ((n+1) % self.words_per_line) == 0 and self.bits_in_last_word > 0: + # This is the last word on the line. A few bits are cut off. + bits = self.bits_in_last_word + else: + bits = BITS + + word = r_uint(word) + pos = self.compute_pos(n) + buf = rffi.ptradd(self.display.screen.c_pixels, pos) + depth = r_uint(self._depth) + rshift = BITS - depth + for i in range(bits / depth): + pixel = word >> rshift + buf[i] = rffi.cast(rffi.UCHAR, pixel) + word <<= depth + + def compute_pos(self, n): + word_on_line = n % self.words_per_line + y = r_uint((n - word_on_line) / self.words_per_line) + x = word_on_line * BITS / r_uint(self._depth) + return y * self.pitch + x diff --git a/spyvm/objspace.py b/spyvm/objspace.py --- a/spyvm/objspace.py +++ b/spyvm/objspace.py @@ -1,30 +1,47 @@ import os -from spyvm import constants, model, shadow, wrapper, version +from spyvm import constants, model, model_display, shadow, wrapper, version, display from spyvm.error import UnwrappingError, WrappingError, PrimitiveFailedError from rpython.rlib import jit, rpath -from rpython.rlib.objectmodel import instantiate, specialize +from rpython.rlib.objectmodel import instantiate, specialize, import_from_mixin from rpython.rlib.rarithmetic import intmask, r_uint, int_between -class ConstantFlag(object): - """Boolean flag that can be edited, but will be promoted +class ConstantMixin(object): + """Mixin for constant values that can be edited, but will be promoted to a constant when jitting.""" - def __init__(self, set_initially=False): - self.flag = [set_initially] + def __init__(self, initial_value = None): + if initial_value is None: + initial_value = self.default_value + self.value = [initial_value] + def set(self, value): + self.value[0] = value + + def get(self): + value = jit.promote(self.value[0]) + return value + +class ConstantFlag(object): + import_from_mixin(ConstantMixin) + default_value = False def is_set(self): - flag = jit.promote(self.flag[0]) - return flag + return self.get() + def activate(self): + self.set(True) + def deactivate(self): + self.set(False) + +class ConstantString(object): + import_from_mixin(ConstantMixin) + default_value = "" + def get(self): + # Promoting does not work on strings... + return self.value[0] - def set(self): - self.flag[0] = True - - def unset(self): - self.flag[0] = False - - def set_to(self, flag): - self.flag[0] = flag +class ConstantObject(object): + import_from_mixin(ConstantMixin) + default_value = None class ObjSpace(object): def __init__(self): @@ -33,12 +50,14 @@ self.no_specialized_storage = ConstantFlag() # This is a hack; see compile_code() in targetimageloadingsmalltalk.py self.suppress_process_switch = ConstantFlag() + self.run_spy_hacks = ConstantFlag() self.headless = ConstantFlag() self.classtable = {} self.objtable = {} - self._executable_path = [""] # XXX: we cannot set the attribute - # directly on the frozen objectspace + self._executable_path = ConstantString() + self._image_name = ConstantString() + self._display = ConstantObject() # Create the nil object. # Circumvent the constructor because nil is already referenced there. @@ -61,11 +80,12 @@ break return rpath.rabspath(executable) - def runtime_setup(self, executable): + def runtime_setup(self, executable, image_name): fullpath = rpath.rabspath(self.find_executable(executable)) i = fullpath.rfind(os.path.sep) + 1 assert i > 0 - self._executable_path[0] = fullpath[:i] + self._executable_path.set(fullpath[:i]) + self._image_name.set(image_name) def populate_special_objects(self, specials): for name, idx in constants.objects_in_special_object_table.items(): @@ -74,9 +94,6 @@ self.objtable[name] = specials[idx] # XXX this is kind of hacky, but I don't know where else to get Metaclass self.classtable["w_Metaclass"] = self.w_SmallInteger.w_class.w_class - - def executable_path(self): - return self._executable_path[0] def add_bootstrap_class(self, name, cls): self.classtable[name] = cls @@ -124,14 +141,8 @@ name = "w_" + name if not name in self.objtable: self.add_bootstrap_object(name, None) - - @specialize.arg(1) - def get_special_selector(self, selector): - i0 = constants.find_selectorindex(selector) - self.w_special_selectors.as_cached_object_get_shadow(self) - return self.w_special_selectors.fetch(self, i0) - # methods for wrapping and unwrapping stuff + # ============= Methods for wrapping and unwrapping stuff ============= def wrap_int(self, val): from spyvm import constants @@ -233,14 +244,30 @@ return [w_array.at0(self, i) for i in range(w_array.size())] - def get_display(self): - w_display = self.objtable['w_display'] - if w_display: - w_bitmap = w_display.fetch(self, 0) - if isinstance(w_bitmap, model.W_DisplayBitmap): - return w_bitmap.display - raise PrimitiveFailedError("No display") - + # ============= Access to static information ============= + + @specialize.arg(1) + def get_special_selector(self, selector): + i0 = constants.find_selectorindex(selector) + self.w_special_selectors.as_cached_object_get_shadow(self) + return self.w_special_selectors.fetch(self, i0) + + def executable_path(self): + return self._executable_path.get() + + def image_name(self): + return self._image_name.get() + + def display(self): + disp = self._display.get() + if disp is None: + # Create lazy to allow headless execution. + disp = display.SDLDisplay(self.image_name()) + self._display.set(disp) + return disp + + # ============= Other Methods ============= + def _freeze_(self): return True diff --git a/spyvm/plugins/bitblt.py b/spyvm/plugins/bitblt.py --- a/spyvm/plugins/bitblt.py +++ b/spyvm/plugins/bitblt.py @@ -1,4 +1,4 @@ -from spyvm import model +from spyvm import model_display, model from spyvm.error import PrimitiveFailedError from spyvm.shadow import AbstractCachingShadow from spyvm.plugins.plugin import Plugin @@ -32,7 +32,7 @@ s_frame.push(interp.space.wrap_int(s_bitblt.bitCount)) elif w_dest_form.is_same_object(space.objtable['w_display']): w_bitmap = w_dest_form.fetch(space, 0) - assert isinstance(w_bitmap, model.W_DisplayBitmap) + assert isinstance(w_bitmap, model_display.W_DisplayBitmap) w_bitmap.flush_to_screen() return w_rcvr @@ -741,7 +741,7 @@ self.w_bits = self.fetch(0) if self.w_bits.is_nil(self.space): return - if not (isinstance(self.w_bits, model.W_WordsObject) or isinstance(self.w_bits, model.W_DisplayBitmap)): + if not (isinstance(self.w_bits, model.W_WordsObject) or isinstance(self.w_bits, model_display.W_DisplayBitmap)): return self.width = self.intOrIfNil(self.fetch(1), 0) self.height = self.intOrIfNil(self.fetch(2), 0) diff --git a/spyvm/plugins/vmdebugging.py b/spyvm/plugins/vmdebugging.py --- a/spyvm/plugins/vmdebugging.py +++ b/spyvm/plugins/vmdebugging.py @@ -20,12 +20,12 @@ @DebuggingPlugin.expose_primitive(unwrap_spec=[object]) def trace_proxy(interp, s_frame, w_rcvr): - interp.trace_proxy.set() + interp.trace_proxy.activate() return w_rcvr @DebuggingPlugin.expose_primitive(unwrap_spec=[object]) def untrace_proxy(interp, s_frame, w_rcvr): - interp.trace_proxy.unset() + interp.trace_proxy.deactivate() return w_rcvr @DebuggingPlugin.expose_primitive(unwrap_spec=[object]) diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -2,7 +2,7 @@ import inspect import math import operator -from spyvm import model, shadow, error, constants, display +from spyvm import model, model_display, shadow, error, constants, display from spyvm.error import PrimitiveFailedError, PrimitiveNotYetWrittenError from spyvm import wrapper @@ -644,7 +644,7 @@ @expose_primitive(MOUSE_POINT, unwrap_spec=[object]) def func(interp, s_frame, w_rcvr): - x, y = interp.space.get_display().mouse_point() + x, y = interp.space.display().mouse_point() w_point = model.W_PointersObject(interp.space, interp.space.w_Point, 2) w_point.store(interp.space, 0, interp.space.wrap_int(x)) w_point.store(interp.space, 1, interp.space.wrap_int(y)) @@ -656,7 +656,7 @@ def func(interp, s_frame, w_rcvr, w_into): if not interp.evented: raise PrimitiveFailedError() - ary = interp.space.get_display().get_next_event(time=interp.time_now()) + ary = interp.space.display().get_next_event(time=interp.time_now()) for i in range(8): w_into.store(interp.space, i, interp.space.wrap_int(ary[i])) # XXX - hack @@ -676,7 +676,7 @@ w_display = interp.space.objtable['w_display'] if w_dest_form.is_same_object(w_display): w_bitmap = w_display.fetch(interp.space, 0) - assert isinstance(w_bitmap, model.W_DisplayBitmap) + assert isinstance(w_bitmap, model_display.W_DisplayBitmap) w_bitmap.flush_to_screen() return w_rcvr except shadow.MethodNotFound: @@ -731,45 +731,23 @@ def func(interp, s_frame, w_rcvr): if interp.space.headless.is_set(): exitFromHeadlessExecution(s_frame) - if not isinstance(w_rcvr, model.W_PointersObject) or w_rcvr.size() < 4: raise PrimitiveFailedError - # the fields required are bits (a pointer to a Bitmap), width, height, depth - - # XXX: TODO get the initial image TODO: figure out whether we - # should decide the width an report it in the other SCREEN_SIZE - w_bitmap = w_rcvr.fetch(interp.space, 0) - width = interp.space.unwrap_int(w_rcvr.fetch(interp.space, 1)) - height = interp.space.unwrap_int(w_rcvr.fetch(interp.space, 2)) - depth = interp.space.unwrap_int(w_rcvr.fetch(interp.space, 3)) - - sdldisplay = None - - w_prev_display = interp.space.objtable['w_display'] - if w_prev_display: - w_prev_bitmap = w_prev_display.fetch(interp.space, 0) - if isinstance(w_prev_bitmap, model.W_DisplayBitmap): - sdldisplay = w_prev_bitmap.display - sdldisplay.set_video_mode(width, height, depth) - - if isinstance(w_bitmap, model.W_DisplayBitmap): - assert (sdldisplay is None) or (sdldisplay is w_bitmap.display) - sdldisplay = w_bitmap.display - sdldisplay.set_video_mode(width, height, depth) - w_display_bitmap = w_bitmap - else: - assert isinstance(w_bitmap, model.W_WordsObject) - w_display_bitmap = w_bitmap.as_display_bitmap( - w_rcvr, - interp, - sdldisplay=sdldisplay - ) - + + old_display = interp.space.objtable['w_display'] + if isinstance(old_display, model_display.W_DisplayBitmap): + old_display.relinquish_display() + interp.space.objtable['w_display'] = w_rcvr + + # TODO: figure out whether we should decide the width an report it in the SCREEN_SIZE primitive + form = wrapper.FormWrapper(interp.space, w_rcvr) + form.take_over_display() + w_display_bitmap = form.get_display_bitmap() + w_display_bitmap.take_over_display() w_display_bitmap.flush_to_screen() + if interp.image: - interp.image.lastWindowSize = (width << 16) + height - interp.space.objtable['w_display'] = w_rcvr - + interp.image.lastWindowSize = (form.width() << 16) + form.height() return w_rcvr @expose_primitive(STRING_REPLACE, unwrap_spec=[object, index1_0, index1_0, object, index1_0]) @@ -812,12 +790,12 @@ @expose_primitive(MOUSE_BUTTONS, unwrap_spec=[object]) def func(interp, s_frame, w_rcvr): - btn = interp.space.get_display().mouse_button() + btn = interp.space.display().mouse_button() return interp.space.wrap_int(btn) @expose_primitive(KBD_NEXT, unwrap_spec=[object]) def func(interp, s_frame, w_rcvr): - code = interp.space.get_display().next_keycode() + code = interp.space.display().next_keycode() if code & 0xFF == 0: return interp.space.w_nil else: @@ -825,7 +803,7 @@ @expose_primitive(KBD_PEEK, unwrap_spec=[object]) def func(interp, s_frame, w_rcvr): - code = interp.space.get_display().peek_keycode() + code = interp.space.display().peek_keycode() if code & 0xFF == 0: return interp.space.w_nil else: @@ -864,8 +842,8 @@ @expose_primitive(EXIT_TO_DEBUGGER, unwrap_spec=[object]) def func(interp, s_frame, w_rcvr): - if not objectmodel.we_are_translated(): - import pdb; pdb.set_trace() + if interp.space.headless.is_set(): + exitFromHeadlessExecution(s_frame, "EXIT_TO_DEBUGGER") raise PrimitiveNotYetWrittenError() @expose_primitive(CHANGE_CLASS, unwrap_spec=[object, object], no_result=True) @@ -987,7 +965,7 @@ def func(interp, s_frame, argument_count): if argument_count == 0: s_frame.pop() - return interp.space.wrap_string(interp.image_name) + return interp.space.wrap_string(interp.space.image_name()) elif argument_count == 1: pass # XXX raise PrimitiveFailedError @@ -1004,7 +982,7 @@ @expose_primitive(DEFER_UPDATES, unwrap_spec=[object, bool]) def func(interp, s_frame, w_receiver, flag): - sdldisplay = interp.space.get_display() + sdldisplay = interp.space.display() sdldisplay.defer_updates(flag) return w_receiver @@ -1058,7 +1036,7 @@ @expose_primitive(SET_INTERRUPT_KEY, unwrap_spec=[object, int]) def func(interp, s_frame, w_rcvr, encoded_key): - interp.space.get_display().set_interrupt_key(interp.space, encoded_key) + interp.space.display().set_interrupt_key(interp.space, encoded_key) return w_rcvr @expose_primitive(INTERRUPT_SEMAPHORE, unwrap_spec=[object, object]) @@ -1142,7 +1120,7 @@ raise PrimitiveFailedError for i in xrange(w_arg.size()): w_arg.setchar(i, chr(new_value)) - elif isinstance(w_arg, model.W_WordsObject) or isinstance(w_arg, model.W_DisplayBitmap): + elif isinstance(w_arg, model.W_WordsObject) or isinstance(w_arg, model_display.W_DisplayBitmap): for i in xrange(w_arg.size()): w_arg.setword(i, new_value) else: @@ -1531,7 +1509,7 @@ @expose_primitive(FORCE_DISPLAY_UPDATE, unwrap_spec=[object]) def func(interp, s_frame, w_rcvr): - interp.space.get_display().flip(force=True) + interp.space.display().flip(force=True) return w_rcvr # ___________________________________________________________________________ diff --git a/spyvm/squeakimage.py b/spyvm/squeakimage.py --- a/spyvm/squeakimage.py +++ b/spyvm/squeakimage.py @@ -386,12 +386,13 @@ self.startup_time = time.time() def run_spy_hacks(self, space): - pass - # w_display = space.objtable["w_display"] - # if w_display is not None and not w_display.is_nil(space): - # if space.unwrap_int(w_display.fetch(space, 3)) < 8: - # # non-native indexed color depth not well supported - # w_display.store(space, 3, space.wrap_int(8)) + if not space.run_spy_hacks.is_set(): + return + w_display = space.objtable["w_display"] + if w_display is not None and not w_display.is_nil(space): + if space.unwrap_int(w_display.fetch(space, 3)) < 8: + # non-native indexed color depth not well supported + w_display.store(space, 3, space.wrap_int(8)) def find_symbol(self, space, reader, symbol): w_dnu = self.special(constants.SO_DOES_NOT_UNDERSTAND) diff --git a/spyvm/test/test_model.py b/spyvm/test/test_model.py --- a/spyvm/test/test_model.py +++ b/spyvm/test/test_model.py @@ -1,5 +1,5 @@ import py, math, socket -from spyvm import model, shadow, objspace, error, display +from spyvm import model, model_display, shadow, objspace, error, display from spyvm.shadow import MethodNotFound, WEAK_POINTERS from rpython.rlib.rarithmetic import intmask, r_uint from rpython.rtyper.lltypesystem import lltype, rffi @@ -387,7 +387,7 @@ d = display.SDLDisplay("test") d.set_video_mode(32, 10, 1) - target = model.W_DisplayBitmap.create(space, space.w_Array, 10, 1, d) + target = model_display.W_DisplayBitmap.create(space, space.w_Array, 10, 1, d) target.setword(0, r_uint(0xFF00)) assert bin(target.getword(0)) == bin(0xFF00) target.setword(0, r_uint(0x00FF00FF)) @@ -404,15 +404,7 @@ assert target.pixelbuffer[i] == 0x0 def test_display_offset_computation(): - - def get_pixelbuffer(self): - return lltype.malloc(rffi.ULONGP.TO, self.width * self.height * 32, flavor='raw') - display.SDLDisplay.get_pixelbuffer = get_pixelbuffer - d = display.SDLDisplay("test") - d.set_video_mode(18, 5, 1) - - dbitmap = model.W_DisplayBitmap.create(space, space.w_Array, 5, 1, d) - + dbitmap = model_display.W_MappingDisplayBitmap(space, space.w_Array, 5, 1) assert dbitmap.compute_pos(0) == 0 assert dbitmap.compute_pos(1) == 8 assert dbitmap.size() == 5 * 8 diff --git a/spyvm/test/test_primitives.py b/spyvm/test/test_primitives.py --- a/spyvm/test/test_primitives.py +++ b/spyvm/test/test_primitives.py @@ -1,5 +1,5 @@ import py, os, math, time -from spyvm import model, shadow, interpreter, constants, primitives, objspace, wrapper, display +from spyvm import model, model_display, shadow, interpreter, constants, primitives, objspace, wrapper, display from spyvm.primitives import prim_table, PrimitiveFailedError from spyvm.plugins import bitblt from rpython.rlib.rfloat import INFINITY, NAN, isinf, isnan @@ -47,7 +47,8 @@ frame = context for i in range(len(stack)): frame.as_context_get_shadow(space).push(stack[i]) - interp = TestInterpreter(space, image_name=IMAGENAME) + interp = TestInterpreter(space) + interp.space._image_name.set(IMAGENAME) return interp, frame, len(stack) def _prim(space, code, stack, context = None): @@ -680,7 +681,7 @@ closure = space.newClosure(w_frame, 4, 0, []) s_frame = w_frame.as_methodcontext_get_shadow(space) - interp = TestInterpreter(space, image_name=IMAGENAME) + interp = TestInterpreter(space) interp._loop = True try: @@ -721,7 +722,7 @@ assert space.objtable["w_display"] is mock_display w_bitmap = mock_display.fetch(space, 0) assert w_bitmap is not w_wordbmp - assert isinstance(w_bitmap, model.W_DisplayBitmap) + assert isinstance(w_bitmap, model_display.W_DisplayBitmap) sdldisplay = w_bitmap.display assert isinstance(sdldisplay, display.SDLDisplay) @@ -733,7 +734,7 @@ prim(primitives.BE_DISPLAY, [mock_display2]) assert space.objtable["w_display"] is mock_display2 w_bitmap2 = mock_display.fetch(space, 0) - assert isinstance(w_bitmap2, model.W_DisplayBitmap) + assert isinstance(w_bitmap2, model_display.W_DisplayBitmap) assert w_bitmap.display is w_bitmap2.display assert sdldisplay.width == 32 assert sdldisplay.height == 10 @@ -764,7 +765,7 @@ raise DisplayFlush try: - monkeypatch.setattr(space.get_display().__class__, "flip", flush_to_screen_mock) + monkeypatch.setattr(space.display().__class__, "flip", flush_to_screen_mock) with py.test.raises(DisplayFlush): prim(primitives.FORCE_DISPLAY_UPDATE, [mock_display]) finally: diff --git a/spyvm/wrapper.py b/spyvm/wrapper.py --- a/spyvm/wrapper.py +++ b/spyvm/wrapper.py @@ -1,4 +1,4 @@ -from spyvm import model, constants +from spyvm import model, model_display, constants from spyvm.error import FatalError, WrapperException, PrimitiveFailedError class Wrapper(object): @@ -263,6 +263,30 @@ def size(self): return self._w_self.size() - constants.BLKCLSR_SIZE +class FormWrapper(Wrapper): + bits, store_bits = make_getter_setter(constants.FORM_BITS) + width, store_width = make_int_getter_setter(constants.FORM_WIDTH) + height, store_height = make_int_getter_setter(constants.FORM_HEIGHT) + depth, store_depth = make_int_getter_setter(constants.FORM_DEPTH) + + def create_display_bitmap(self): + w_display_bitmap = model_display.from_words_object(self.bits(), self) + self.store_bits(w_display_bitmap) + return w_display_bitmap + + def get_display_bitmap(self): + w_bitmap = self.bits() + if not isinstance(w_bitmap, model_display.W_DisplayBitmap): + w_display_bitmap = self.create_display_bitmap() + else: + w_display_bitmap = w_bitmap + if w_display_bitmap._depth != self.depth(): + w_display_bitmap = self.create_display_bitmap() + return w_display_bitmap + + def take_over_display(self): + self.space.display().set_video_mode(self.width(), self.height(), self.depth()) + # XXX Wrappers below are not used yet. class OffsetWrapper(Wrapper): offset_x = make_int_getter(0) diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -36,6 +36,7 @@ -p|--poll - Actively poll for events. Try this if the image is not responding well. -i|--no-interrupts - Disable timer interrupt. Disables non-cooperative scheduling. -S - Disable specialized storage strategies; always use generic ListStorage + --hacks - Enable Spy hacks. Set display color depth to 8. Logging parameters: -t|--trace - Output a trace of each message, primitive, return value and process switch. @@ -69,7 +70,7 @@ except error.Exit, e: print_error("Exited: %s" % e.msg) return -1 - except Exception, e: + except BaseException, e: print_error("Exception: %s" % str(e)) if not objectmodel.we_are_translated(): import traceback @@ -119,8 +120,10 @@ interrupts = False elif arg in ["-P", "--process"]: headless = False + elif arg in ["--hacks"]: + space.run_spy_hacks.activate() elif arg in ["-S"]: - space.no_specialized_storage.set() + space.no_specialized_storage.activate() elif arg in ["-u"]: from spyvm.plugins.vmdebugging import stop_ui_process stop_ui_process() @@ -158,10 +161,10 @@ # Load & prepare image and environment image_reader = squeakimage.reader_for_image(space, squeakimage.Stream(data=imagedata)) image = create_image(space, image_reader) - interp = interpreter.Interpreter(space, image, image_name=path, + interp = interpreter.Interpreter(space, image, trace=trace, evented=not poll, interrupts=interrupts) - space.runtime_setup(argv[0]) + space.runtime_setup(argv[0], path) print_error("") # Line break after image-loading characters # Create context to be executed @@ -174,7 +177,7 @@ selector = compile_code(interp, w_receiver, code) s_frame = create_context(interp, w_receiver, selector, stringarg) if headless: - space.headless.set() + space.headless.activate() context = s_frame else: create_process(interp, s_frame) @@ -203,7 +206,7 @@ # registered (primitive 136 not called), so the idle process will never be left once it is entered. # TODO - Find a way to cleanly initialize the image, without executing the active_context of the image. # Instead, we want to execute our own context. Then remove this flag (and all references to it) - space.suppress_process_switch.set() + space.suppress_process_switch.activate() w_result = interp.perform( w_receiver_class, @@ -215,7 +218,7 @@ # TODO - is this expected in every image? if not isinstance(w_result, model.W_BytesObject) or w_result.as_string() != selector: raise error.Exit("Unexpected compilation result (probably failed to compile): %s" % result_string(w_result)) - space.suppress_process_switch.unset() + space.suppress_process_switch.deactivate() w_receiver_class.as_class_get_shadow(space).s_methoddict().sync_method_cache() return selector From noreply at buildbot.pypy.org Wed Jul 23 07:31:52 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 23 Jul 2014 07:31:52 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: in-progress Message-ID: <20140723053152.1B8401D3562@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5371:59cb8a939bcc Date: 2014-07-23 07:31 +0200 http://bitbucket.org/pypy/extradoc/changeset/59cb8a939bcc/ Log: in-progress diff --git a/talk/ep2014/stm/talk.rst b/talk/ep2014/stm/talk.rst --- a/talk/ep2014/stm/talk.rst +++ b/talk/ep2014/stm/talk.rst @@ -138,6 +138,33 @@ - but mostly still research-only +PyPy-STM +-------- + +* implementation of a specially-tailored STM ("hard" part): + + - a reusable C library + - called STMGC-C7 + +* used in PyPy to replace the GIL ("easy" part) + +* could also be used in CPython + + - but refcounting needs replacing + + +Commits +--------- + +.. image:: fig4.svg + + +Demo +------ + +* counting primes + + Big Point ---------------------------- @@ -150,33 +177,31 @@ - even if they both *acquire and release the same lock* -Big Point ---------- +Long Transactions +----------------- .. image:: fig4.svg -Demo 1 +Demo ------ -* "Twisted apps made parallel out of the box" - * Bottle web server -PyPy-STM --------- +PyPy-STM Programming Model +--------------------------- -* implementation of a specially-tailored STM: - - - a reusable C library - - called STMGC-C7 +* threads-and-locks, fully compatible with the GIL -* used in PyPy to replace the GIL +* this is not "everybody should use careful explicit threading + with all the locking issues" -* could also be used in CPython +* instead, PyPy-STM pushes forward: - - but refcounting needs replacing + - use a thread pool library + + - coarse locking, inside that library only PyPy-STM status @@ -186,7 +211,7 @@ - basics work - best case 25-40% overhead (much better than originally planned) - - parallelizing user locks not done yet + - parallelizing user locks not done yet (see ``with atomic``) - tons of things to improve - tons of things to improve - tons of things to improve @@ -196,20 +221,14 @@ - tons of things to improve -Demo 2 ------- - -* counting primes - - -Benefits --------- +Summary: Benefits +----------------- * Keep locks coarse-grained * Potential to enable parallelism: - - in CPU-bound multithreaded programs + - in any CPU-bound multithreaded program - or as a replacement of ``multiprocessing`` @@ -218,10 +237,8 @@ - as long as they do multiple things that are "often independent" -Issues ------- - -* Performance hit: 25-40% everywhere (may be ok) +Summary: Issues +--------------- * Keep locks coarse-grained: @@ -229,13 +246,15 @@ - need to track and fix them - - need tool support (debugger/profiler) + - need tool to support this (debugger/profiler) +* Performance hit: 25-40% everywhere (may be ok) -Summary -------- -* Transactional Memory is still too researchy for production +Summary: PyPy-STM +----------------- + +* Not production-ready * But it has the potential to enable "easier parallelism" @@ -248,3 +267,39 @@ ----------------------- **STMGC-C7** + + +Overview +-------- + +* Say we want to run two threads + +* We reserve twice the memory + +* Thread 1 reads/writes "memory segment" 1 + +* Thread 2 reads/writes "memory segment" 2 + +* Upon commit, we (try to) copy the changes to the other segment + + +Trick #1 +-------- + +* Objects contain pointers to each other + +* These pointers are relative instead of absolute: + + - + + +Trick #1 +-------- + +* Most objects are the same in all segments: + + - so we share the memory + + - ``mmap() MAP_SHARED`` trickery + + From noreply at buildbot.pypy.org Wed Jul 23 08:02:34 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 23 Jul 2014 08:02:34 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Finish the slides Message-ID: <20140723060234.C5F031D3633@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5372:3c5ba3e46ec5 Date: 2014-07-23 08:02 +0200 http://bitbucket.org/pypy/extradoc/changeset/3c5ba3e46ec5/ Log: Finish the slides diff --git a/talk/ep2014/stm/talk.html b/talk/ep2014/stm/talk.html --- a/talk/ep2014/stm/talk.html +++ b/talk/ep2014/stm/talk.html @@ -502,40 +502,64 @@
          -
          -

          Big Point

          +
          +

          PyPy-STM

            -
          • application-level locks still needed...
          • +
          • implementation of a specially-tailored STM ("hard" part):
              +
            • a reusable C library
            • +
            • called STMGC-C7
            • +
            +
          • +
          • used in PyPy to replace the GIL ("easy" part)
          • +
          • could also be used in CPython
              +
            • but refcounting needs replacing
            • +
            +
          • +
          +
          +
          +

          How does it work?

          + +fig4.svg +
          +
          +

          Demo

          +
            +
          • counting primes
          • +
          +
          +
          +

          Long Transactions

          +
            +
          • threads and application-level locks still needed...
          • but can be very coarse:
              -
            • even two big transactions can optimistically run in parallel
            • +
            • two transactions can optimistically run in parallel
            • even if they both acquire and release the same lock
            • +
            • internally, drive the transaction lengths by the locks we acquire
          -

          Big Point

          +

          Long Transactions

          fig4.svg
          -
          -

          Demo 1

          +
          +

          Demo

            -
          • "Twisted apps made parallel out of the box"
          • Bottle web server
          -
          -

          PyPy-STM

          +
          +

          PyPy-STM Programming Model

            -
          • implementation of a specially-tailored STM:
              -
            • a reusable C library
            • -
            • called STMGC-C7
            • -
            -
          • -
          • used in PyPy to replace the GIL
          • -
          • could also be used in CPython
              -
            • but refcounting needs replacing
            • +
            • threads-and-locks, fully compatible with the GIL
            • +
            • this is not "everybody should use careful explicit threading +with all the locking issues"
            • +
            • instead, PyPy-STM pushes forward:
                +
              • use a thread pool library
              • +
              • coarse locking, inside that library only
            @@ -546,7 +570,7 @@
          • current status:
            • basics work
            • best case 25-40% overhead (much better than originally planned)
            • -
            • parallelizing user locks not done yet
            • +
            • parallelizing user locks not done yet (see "with atomic")
            • tons of things to improve
            • tons of things to improve
            • tons of things to improve
            • @@ -558,52 +582,113 @@
          -
          -

          Demo 2

          +
          +

          Summary: Benefits

            -
          • counting primes
          • -
          -
          -
          -

          Benefits

          -
            -
          • Keep locks coarse-grained
          • Potential to enable parallelism:
              -
            • in CPU-bound multithreaded programs
            • +
            • in any CPU-bound multithreaded program
            • or as a replacement of multiprocessing
            • but also in existing applications not written for that
            • as long as they do multiple things that are "often independent"
          • +
          • Keep locks coarse-grained
          -
          -

          Issues

          +
          +

          Summary: Issues

            -
          • Performance hit: 25-40% everywhere (may be ok)
          • Keep locks coarse-grained:
            • but in case of systematic conflicts, performance is bad again
            • need to track and fix them
            • -
            • need tool support (debugger/profiler)
            • +
            • need tool to support this (debugger/profiler)
          • +
          • Performance hit: 25-40% over a plain PyPy-JIT (may be ok)
          -
          -

          Summary

          +
          +

          Summary: PyPy-STM

            -
          • Transactional Memory is still too researchy for production
          • -
          • But it has the potential to enable "easier parallelism"
          • +
          • Not production-ready
          • +
          • But it has the potential to enable "easier parallelism for everybody"
          • Still alpha but slowly getting there!
          • +
          • Crowdfunding! +

          Part 2 - Under The Hood

          STMGC-C7

          +
          +

          Overview

          +
            +
          • Say we want to run N = 2 threads
          • +
          • We reserve twice the memory
          • +
          • Thread 1 reads/writes "memory segment" 1
          • +
          • Thread 2 reads/writes "memory segment" 2
          • +
          • Upon commit, we (try to) copy the changes to the other segment
          • +
          +
          +
          +

          Trick #1

          +
            +
          • Objects contain pointers to each other
          • +
          • These pointers are relative instead of absolute:
              +
            • accessed as if they were "thread-local data"
            • +
            • the x86 has a zero-cost way to do that (%fs, %gs)
            • +
            • supported in clang (not gcc so far)
            • +
            +
          • +
          +
          +
          +

          Trick #2

          +
            +
          • With Trick #1, most objects are exactly identical in all N segments:
              +
            • so we share the memory
            • +
            • mmap() MAP_SHARED
            • +
            • actual memory usage is multiplied by much less than N
            • +
            +
          • +
          • Newly allocated objects are directly in shared pages:
              +
            • we don't actually need to copy all new objects at commit, +but only the few old objects modified
            • +
            +
          • +
          +
          +
          +

          Barriers

          +
            +
          • Need to record all reads and writes done by a transaction
          • +
          • Extremely cheap way to do that:
              +
            • Read: set a flag in thread-local memory (one byte)
            • +
            • Write into a newly allocated object: nothing to do
            • +
            • Write into an old object: add the object to a list
            • +
            +
          • +
          • Commit: check if each object from that list conflicts with +a read flag set in some other thread
          • +
          +
          +
          +

          ...

          +
          +
          +

          Thank You

          + +
          diff --git a/talk/ep2014/stm/talk.rst b/talk/ep2014/stm/talk.rst --- a/talk/ep2014/stm/talk.rst +++ b/talk/ep2014/stm/talk.rst @@ -153,8 +153,8 @@ - but refcounting needs replacing -Commits ---------- +How does it work? +----------------- .. image:: fig4.svg @@ -165,17 +165,19 @@ * counting primes -Big Point +Long Transactions ---------------------------- -* application-level locks still needed... +* threads and application-level locks still needed... * but *can be very coarse:* - - even two big transactions can optimistically run in parallel + - two transactions can optimistically run in parallel - even if they both *acquire and release the same lock* + - internally, drive the transaction lengths by the locks we acquire + Long Transactions ----------------- @@ -211,7 +213,7 @@ - basics work - best case 25-40% overhead (much better than originally planned) - - parallelizing user locks not done yet (see ``with atomic``) + - parallelizing user locks not done yet (see "with atomic") - tons of things to improve - tons of things to improve - tons of things to improve @@ -224,8 +226,6 @@ Summary: Benefits ----------------- -* Keep locks coarse-grained - * Potential to enable parallelism: - in any CPU-bound multithreaded program @@ -236,6 +236,8 @@ - as long as they do multiple things that are "often independent" +* Keep locks coarse-grained + Summary: Issues --------------- @@ -248,7 +250,7 @@ - need tool to support this (debugger/profiler) -* Performance hit: 25-40% everywhere (may be ok) +* Performance hit: 25-40% over a plain PyPy-JIT (may be ok) Summary: PyPy-STM @@ -256,12 +258,16 @@ * Not production-ready -* But it has the potential to enable "easier parallelism" +* But it has the potential to enable "easier parallelism for everybody" * Still alpha but slowly getting there! - see http://morepypy.blogspot.com/ +* Crowdfunding! + + - see http://pypy.org/ + Part 2 - Under The Hood ----------------------- @@ -272,7 +278,7 @@ Overview -------- -* Say we want to run two threads +* Say we want to run N = 2 threads * We reserve twice the memory @@ -290,16 +296,56 @@ * These pointers are relative instead of absolute: - - + - accessed as if they were "thread-local data" + - the x86 has a zero-cost way to do that (``%fs``, ``%gs``) -Trick #1 + - supported in clang (not gcc so far) + + +Trick #2 -------- -* Most objects are the same in all segments: +* With Trick #1, most objects are exactly identical in all N segments: - so we share the memory - - ``mmap() MAP_SHARED`` trickery + - ``mmap() MAP_SHARED`` + - actual memory usage is multiplied by much less than N +* Newly allocated objects are directly in shared pages: + + - we don't actually need to copy *all new objects* at commit, + but only the few *old objects* modified + + +Barriers +-------- + +* Need to record all reads and writes done by a transaction + +* Extremely cheap way to do that: + + - *Read:* set a flag in thread-local memory (one byte) + + - *Write* into a newly allocated object: nothing to do + + - *Write* into an old object: add the object to a list + +* Commit: check if each object from that list conflicts with + a read flag set in some other thread + + +... +------------------- + + +Thank You +--------- + +* http://morepypy.blogspot.com/ + +* http://pypy.org/ + +* irc: ``#pypy`` on freenode.net From noreply at buildbot.pypy.org Wed Jul 23 10:51:58 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 23 Jul 2014 10:51:58 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: tweaks Message-ID: <20140723085158.172551C0325@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5373:8fe97fa6f212 Date: 2014-07-23 10:51 +0200 http://bitbucket.org/pypy/extradoc/changeset/8fe97fa6f212/ Log: tweaks diff --git a/talk/ep2014/stm/demo/bench-multiprocessing.py b/talk/ep2014/stm/demo/bench-multiprocessing.py --- a/talk/ep2014/stm/demo/bench-multiprocessing.py +++ b/talk/ep2014/stm/demo/bench-multiprocessing.py @@ -8,7 +8,7 @@ subtotal += 1 return subtotal -pool = Pool(4) +pool = Pool(2) results = pool.map(process, xrange(0, 5000000, 20000)) total = sum(results) diff --git a/talk/ep2014/stm/talk.html b/talk/ep2014/stm/talk.html --- a/talk/ep2014/stm/talk.html +++ b/talk/ep2014/stm/talk.html @@ -535,7 +535,6 @@
        • but can be very coarse:
          • two transactions can optimistically run in parallel
          • even if they both acquire and release the same lock
          • -
          • internally, drive the transaction lengths by the locks we acquire
        @@ -558,19 +557,30 @@
      • this is not "everybody should use careful explicit threading with all the locking issues"
      • instead, PyPy-STM pushes forward:
          -
        • use a thread pool library
        • +
        • make or use a thread pool library
        • coarse locking, inside that library only
      • +
        +

        PyPy-STM Programming Model

        +
          +
        • e.g.:
            +
          • multiprocessing-like thread pool
          • +
          • Twisted/Tornado/Bottle extension
          • +
          • Stackless/greenlet/gevent extension
          • +
          +
        • +
        +

        PyPy-STM status

        • current status:
          • basics work
          • best case 25-40% overhead (much better than originally planned)
          • -
          • parallelizing user locks not done yet (see "with atomic")
          • +
          • app locks not done yet ("with atomic" workaround)
          • tons of things to improve
          • tons of things to improve
          • tons of things to improve
          • @@ -604,7 +614,7 @@
          • need tool to support this (debugger/profiler)
        • -
        • Performance hit: 25-40% over a plain PyPy-JIT (may be ok)
        • +
        • Performance hit: 25-40% slower than a plain PyPy-JIT (may be ok)
        @@ -678,7 +688,7 @@ a read flag set in some other thread
        -
        +

        ...

        diff --git a/talk/ep2014/stm/talk.rst b/talk/ep2014/stm/talk.rst --- a/talk/ep2014/stm/talk.rst +++ b/talk/ep2014/stm/talk.rst @@ -176,8 +176,6 @@ - even if they both *acquire and release the same lock* - - internally, drive the transaction lengths by the locks we acquire - Long Transactions ----------------- @@ -201,11 +199,23 @@ * instead, PyPy-STM pushes forward: - - use a thread pool library + - make or use a thread pool library - coarse locking, inside that library only +PyPy-STM Programming Model +-------------------------- + +* e.g.: + + - ``multiprocessing``-like thread pool + + - Twisted/Tornado/Bottle extension + + - Stackless/greenlet/gevent extension + + PyPy-STM status --------------- @@ -213,7 +223,7 @@ - basics work - best case 25-40% overhead (much better than originally planned) - - parallelizing user locks not done yet (see "with atomic") + - app locks not done yet ("with atomic" workaround) - tons of things to improve - tons of things to improve - tons of things to improve @@ -250,7 +260,7 @@ - need tool to support this (debugger/profiler) -* Performance hit: 25-40% over a plain PyPy-JIT (may be ok) +* Performance hit: 25-40% slower than a plain PyPy-JIT (may be ok) Summary: PyPy-STM From noreply at buildbot.pypy.org Thu Jul 24 10:48:11 2014 From: noreply at buildbot.pypy.org (stefanor) Date: Thu, 24 Jul 2014 10:48:11 +0200 (CEST) Subject: [pypy-commit] cffi default: ARM64 support. Tests pass under qemu. Message-ID: <20140724084811.E2EB11C022A@cobra.cs.uni-duesseldorf.de> Author: Stefano Rivera Branch: Changeset: r1550:af4e381b5e99 Date: 2014-07-24 10:47 +0200 http://bitbucket.org/cffi/cffi/changeset/af4e381b5e99/ Log: ARM64 support. Tests pass under qemu. Fixes #136. Thanks Will Newton . diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -3622,7 +3622,7 @@ #ifdef MS_WIN32 sflags |= SF_MSVC_BITFIELDS; #else -# ifdef __arm__ +# if defined(__arm__) || defined(__aarch64__) sflags |= SF_GCC_ARM_BITFIELDS; # else sflags |= SF_GCC_X86_BITFIELDS; diff --git a/testing/test_ffi_backend.py b/testing/test_ffi_backend.py --- a/testing/test_ffi_backend.py +++ b/testing/test_ffi_backend.py @@ -122,7 +122,7 @@ self.check("int a:2; short b:15; char c:2; char y;", 5, 4, 8) self.check("int a:2; char b:1; char c:1; char y;", 1, 4, 4) - @pytest.mark.skipif("platform.machine().startswith('arm')") + @pytest.mark.skipif("platform.machine().startswith(('arm', 'aarch64'))") def test_bitfield_anonymous_no_align(self): L = FFI().alignof("long long") self.check("char y; int :1;", 0, 1, 2) @@ -135,7 +135,8 @@ self.check("char x; long long z:57; char y;", L + 8, L, L + 8 + L) self.check("char x; long long :57; char y;", L + 8, 1, L + 9) - @pytest.mark.skipif("not platform.machine().startswith('arm')") + @pytest.mark.skipif( + "not platform.machine().startswith(('arm', 'aarch64'))") def test_bitfield_anonymous_align_arm(self): L = FFI().alignof("long long") self.check("char y; int :1;", 0, 4, 4) @@ -148,7 +149,7 @@ self.check("char x; long long z:57; char y;", L + 8, L, L + 8 + L) self.check("char x; long long :57; char y;", L + 8, L, L + 8 + L) - @pytest.mark.skipif("platform.machine().startswith('arm')") + @pytest.mark.skipif("platform.machine().startswith(('arm', 'aarch64'))") def test_bitfield_zero(self): L = FFI().alignof("long long") self.check("char y; int :0;", 0, 1, 4) @@ -159,7 +160,8 @@ self.check("char x; int :0; short b:1; char y;", 5, 2, 6) self.check("int a:1; int :0; int b:1; char y;", 5, 4, 8) - @pytest.mark.skipif("not platform.machine().startswith('arm')") + @pytest.mark.skipif( + "not platform.machine().startswith(('arm', 'aarch64'))") def test_bitfield_zero_arm(self): L = FFI().alignof("long long") self.check("char y; int :0;", 0, 4, 4) From noreply at buildbot.pypy.org Thu Jul 24 11:27:12 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Thu, 24 Jul 2014 11:27:12 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Added --safe-trace flag which omits printing contents of byte objects. Message-ID: <20140724092712.C2DDD1C3273@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r942:adcfa00d78d1 Date: 2014-07-23 19:27 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/adcfa00d78d1/ Log: Added --safe-trace flag which omits printing contents of byte objects. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -648,12 +648,13 @@ def _mustBeBoolean(self, interp, receiver): return self._sendSpecialSelector(interp, receiver, "mustBeBoolean") - + def _call_primitive(self, code, interp, argcount, w_method, w_selector): # ################################################################## if interp.is_tracing(): - interp.print_padded("-> primitive %d \t(in %s, named #%s)" % ( - code, self.w_method().get_identifier_string(), w_selector.str_content())) + interp.print_padded("-> primitive %d \t(in %s, named %s)" % ( + code, self.w_method().get_identifier_string(), + w_selector.selector_string())) func = primitives.prim_holder.prim_table[code] try: # note: argcount does not include rcvr @@ -662,7 +663,7 @@ except primitives.PrimitiveFailedError, e: if interp.is_tracing(): interp.print_padded("-- primitive %d FAILED\t (in %s, named %s)" % ( - code, w_method.safe_identifier_string(), w_selector.str_content())) + code, w_method.safe_identifier_string(), w_selector.selector_string())) raise e def _return(self, return_value, interp, local_return=False): diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -194,6 +194,9 @@ def repr_content(self): return self.str_content() + + def selector_string(self): + return self.as_repr_string() class W_SmallInteger(W_Object): """Boxed integer value""" @@ -817,15 +820,22 @@ return self._size def str_content(self): - return "'%s'" % self.as_string() + if self.has_class() and self.w_class.has_space(): + if self.w_class.space().omit_printing_raw_bytes.is_set(): + return "" + else: + return "'%s'" % self.as_string().replace('\r', '\n') def as_string(self): if self.bytes is not None: string = "".join(self.bytes) else: string = "".join([self.c_bytes[i] for i in range(self.size())]) - return string.replace('\r', '\n') - + return string + + def selector_string(self): + return "#" + self.as_string() + def invariant(self): if not W_AbstractObjectWithClassReference.invariant(self): return False diff --git a/spyvm/objspace.py b/spyvm/objspace.py --- a/spyvm/objspace.py +++ b/spyvm/objspace.py @@ -52,6 +52,7 @@ self.suppress_process_switch = ConstantFlag() self.run_spy_hacks = ConstantFlag() self.headless = ConstantFlag() + self.omit_printing_raw_bytes = ConstantFlag() self.classtable = {} self.objtable = {} diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -12,7 +12,7 @@ def _usage(argv): print """ - Usage: %s [-r|-m|-h] [-naPu] [-jpiS] [-tlLE] + Usage: %s [-r|-m|-h] [-naPu] [-jpiS] [-tslLE] - image path (default: Squeak.image) Execution mode: @@ -40,6 +40,7 @@ Logging parameters: -t|--trace - Output a trace of each message, primitive, return value and process switch. + -s|--safe-trace - Like -t, but without printing contents of BytesObjects -l|--storage-log - Output a log of storage operations. -L|--storage-log-aggregate - Output an aggregated storage log at the end of execution. -E|--storage-log-elements - Include classnames of elements into the storage log. @@ -110,6 +111,9 @@ selector, idx = get_parameter(argv, idx, arg) elif arg in ["-t", "--trace"]: trace = True + elif arg in ["-s", "--safe-trace"]: + trace = True + space.omit_printing_raw_bytes.activate() elif arg in ["-p", "--poll"]: poll = True elif arg in ["-a", "--arg"]: From noreply at buildbot.pypy.org Thu Jul 24 11:27:25 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Thu, 24 Jul 2014 11:27:25 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-context-state: Updated test. Message-ID: <20140724092725.60D351C3273@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-context-state Changeset: r943:369091d66f5d Date: 2014-07-23 15:30 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/369091d66f5d/ Log: Updated test. diff --git a/spyvm/test/test_model.py b/spyvm/test/test_model.py --- a/spyvm/test/test_model.py +++ b/spyvm/test/test_model.py @@ -403,12 +403,23 @@ for i in xrange(6, 8): assert target.pixelbuffer[i] == 0x0 -def test_display_offset_computation(): - dbitmap = model_display.W_MappingDisplayBitmap(space, space.w_Array, 5, 1) +def test_display_offset_computation_even(): + dbitmap = model_display.W_MappingDisplayBitmap(space, space.w_Array, 200, 1) + dbitmap.pitch = 64 + dbitmap.words_per_line = 2 assert dbitmap.compute_pos(0) == 0 - assert dbitmap.compute_pos(1) == 8 - assert dbitmap.size() == 5 * 8 + assert dbitmap.compute_pos(1) == 32 + assert dbitmap.compute_pos(2) == 64 +def test_display_offset_computation_uneven(): + dbitmap = model_display.W_MappingDisplayBitmap(space, space.w_Array, 200, 1) + dbitmap.pitch = 67 + dbitmap.words_per_line = 2 + assert dbitmap.compute_pos(0) == 0 + assert dbitmap.compute_pos(1) == 32 + assert dbitmap.compute_pos(2) == 67 + assert dbitmap.compute_pos(3) == 67 + 32 + @py.test.mark.skipif("socket.gethostname() == 'precise32'") def test_weak_pointers(): w_cls = bootstrap_class(2) From noreply at buildbot.pypy.org Thu Jul 24 11:27:26 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Thu, 24 Jul 2014 11:27:26 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-context-state: Added --safe-trace flag which omits printing contents of byte objects. Message-ID: <20140724092726.860B51C3273@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-context-state Changeset: r944:eca2665b8824 Date: 2014-07-23 19:27 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/eca2665b8824/ Log: Added --safe-trace flag which omits printing contents of byte objects. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -685,12 +685,13 @@ def _mustBeBoolean(self, interp, receiver): return self._sendSpecialSelector(interp, receiver, "mustBeBoolean") - + def _call_primitive(self, code, interp, argcount, w_method, w_selector): # ################################################################## if interp.is_tracing(): - interp.print_padded("-> primitive %d \t(in %s, named #%s)" % ( - code, self.w_method().get_identifier_string(), w_selector.str_content())) + interp.print_padded("-> primitive %d \t(in %s, named %s)" % ( + code, self.w_method().get_identifier_string(), + w_selector.selector_string())) func = primitives.prim_holder.prim_table[code] try: # note: argcount does not include rcvr @@ -699,7 +700,7 @@ except primitives.PrimitiveFailedError, e: if interp.is_tracing(): interp.print_padded("-- primitive %d FAILED\t (in %s, named %s)" % ( - code, w_method.safe_identifier_string(), w_selector.str_content())) + code, w_method.safe_identifier_string(), w_selector.selector_string())) raise e def _return(self, return_value, interp, local_return=False): diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -194,6 +194,9 @@ def repr_content(self): return self.str_content() + + def selector_string(self): + return self.as_repr_string() class W_SmallInteger(W_Object): """Boxed integer value""" @@ -817,15 +820,22 @@ return self._size def str_content(self): - return "'%s'" % self.as_string() + if self.has_class() and self.w_class.has_space(): + if self.w_class.space().omit_printing_raw_bytes.is_set(): + return "" + else: + return "'%s'" % self.as_string().replace('\r', '\n') def as_string(self): if self.bytes is not None: string = "".join(self.bytes) else: string = "".join([self.c_bytes[i] for i in range(self.size())]) - return string.replace('\r', '\n') - + return string + + def selector_string(self): + return "#" + self.as_string() + def invariant(self): if not W_AbstractObjectWithClassReference.invariant(self): return False diff --git a/spyvm/objspace.py b/spyvm/objspace.py --- a/spyvm/objspace.py +++ b/spyvm/objspace.py @@ -52,6 +52,7 @@ self.suppress_process_switch = ConstantFlag() self.run_spy_hacks = ConstantFlag() self.headless = ConstantFlag() + self.omit_printing_raw_bytes = ConstantFlag() self.classtable = {} self.objtable = {} diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -12,7 +12,7 @@ def _usage(argv): print """ - Usage: %s [-r|-m|-h] [-naPu] [-jpiS] [-tlLE] + Usage: %s [-r|-m|-h] [-naPu] [-jpiS] [-tslLE] - image path (default: Squeak.image) Execution mode: @@ -40,6 +40,7 @@ Logging parameters: -t|--trace - Output a trace of each message, primitive, return value and process switch. + -s|--safe-trace - Like -t, but without printing contents of BytesObjects -l|--storage-log - Output a log of storage operations. -L|--storage-log-aggregate - Output an aggregated storage log at the end of execution. -E|--storage-log-elements - Include classnames of elements into the storage log. @@ -110,6 +111,9 @@ selector, idx = get_parameter(argv, idx, arg) elif arg in ["-t", "--trace"]: trace = True + elif arg in ["-s", "--safe-trace"]: + trace = True + space.omit_printing_raw_bytes.activate() elif arg in ["-p", "--poll"]: poll = True elif arg in ["-a", "--arg"]: From noreply at buildbot.pypy.org Thu Jul 24 11:27:27 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Thu, 24 Jul 2014 11:27:27 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-context-state: Merged. Message-ID: <20140724092727.93CF81C3273@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-context-state Changeset: r945:6f021854ceab Date: 2014-07-23 19:28 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/6f021854ceab/ Log: Merged. From noreply at buildbot.pypy.org Thu Jul 24 14:12:15 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 24 Jul 2014 14:12:15 +0200 (CEST) Subject: [pypy-commit] pypy default: Issue #1817: missing support for virtual_refs pointing to several different types in the same RPython program Message-ID: <20140724121215.052931C1106@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72480:35fdf446e439 Date: 2014-07-24 14:11 +0200 http://bitbucket.org/pypy/pypy/changeset/35fdf446e439/ Log: Issue #1817: missing support for virtual_refs pointing to several different types in the same RPython program diff --git a/rpython/jit/metainterp/test/test_virtualref.py b/rpython/jit/metainterp/test/test_virtualref.py --- a/rpython/jit/metainterp/test/test_virtualref.py +++ b/rpython/jit/metainterp/test/test_virtualref.py @@ -34,7 +34,7 @@ # def check_call(op, fname): assert op.opname == 'direct_call' - assert op.args[0].value._obj._name == fname + assert op.args[0].value._obj._name.startswith(fname) # ops = [op for block, op in graph.iterblockops()] check_call(ops[-3], 'virtual_ref') diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -340,6 +340,7 @@ # ____________________________________________________________ # VRefs + at specialize.argtype(0) def virtual_ref(x): """Creates a 'vref' object that contains a reference to 'x'. Calls to virtual_ref/virtual_ref_finish must be properly nested. The idea @@ -351,6 +352,7 @@ return DirectJitVRef(x) virtual_ref.oopspec = 'virtual_ref(x)' + at specialize.argtype(1) def virtual_ref_finish(vref, x): """See docstring in virtual_ref(x)""" keepalive_until_here(x) # otherwise the whole function call is removed From noreply at buildbot.pypy.org Fri Jul 25 11:19:03 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 25 Jul 2014 11:19:03 +0200 (CEST) Subject: [pypy-commit] pypy default: Issue #1823: fix _m_ispad(), which always returned 0 Message-ID: <20140725091903.281091C024A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72481:71ab3dcb1309 Date: 2014-07-25 11:18 +0200 http://bitbucket.org/pypy/pypy/changeset/71ab3dcb1309/ Log: Issue #1823: fix _m_ispad(), which always returned 0 diff --git a/lib_pypy/_curses.py b/lib_pypy/_curses.py --- a/lib_pypy/_curses.py +++ b/lib_pypy/_curses.py @@ -309,11 +309,9 @@ #endif int _m_ispad(WINDOW *win) { -#if defined WINDOW_HAS_FLAGS + // may not have _flags (and possibly _ISPAD), + // but for now let's assume that always has it return (win->_flags & _ISPAD); -#else - return 0; -#endif } void _m_getsyx(int *yx) { From noreply at buildbot.pypy.org Fri Jul 25 21:17:11 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 25 Jul 2014 21:17:11 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: merge py3k Message-ID: <20140725191711.554351C04B7@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72482:fc86a76771f4 Date: 2014-07-20 11:06 -0700 http://bitbucket.org/pypy/pypy/changeset/fc86a76771f4/ Log: merge py3k diff --git a/lib-python/3/json/__init__.py b/lib-python/3/json/__init__.py --- a/lib-python/3/json/__init__.py +++ b/lib-python/3/json/__init__.py @@ -104,6 +104,12 @@ __author__ = 'Bob Ippolito ' +try: + # PyPy speedup, the interface is different than CPython's _json + import _pypyjson +except ImportError: + _pypyjson = None + from .decoder import JSONDecoder from .encoder import JSONEncoder @@ -313,7 +319,7 @@ if (cls is None and object_hook is None and parse_int is None and parse_float is None and parse_constant is None and object_pairs_hook is None and not kw): - return _default_decoder.decode(s) + return _pypyjson.loads(s) if _pypyjson else _default_decoder.decode(s) if cls is None: cls = JSONDecoder if object_hook is not None: diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/test_generator.py --- a/pypy/interpreter/test/test_generator.py +++ b/pypy/interpreter/test/test_generator.py @@ -307,13 +307,13 @@ w_co = space.appexec([], '''(): def g(x): yield x + 5 - return g.func_code + return g.__code__ ''') assert should_not_inline(w_co) == False w_co = space.appexec([], '''(): def g(x): yield x + 5 yield x + 6 - return g.func_code + return g.__code__ ''') assert should_not_inline(w_co) == True diff --git a/pypy/module/_pypyjson/interp_decoder.py b/pypy/module/_pypyjson/interp_decoder.py --- a/pypy/module/_pypyjson/interp_decoder.py +++ b/pypy/module/_pypyjson/interp_decoder.py @@ -56,6 +56,7 @@ self.end_ptr = lltype.malloc(rffi.CCHARPP.TO, 1, flavor='raw') self.pos = 0 self.last_type = TYPE_UNKNOWN + self.memo = {} def close(self): rffi.free_charp(self.ll_chars) @@ -261,6 +262,8 @@ w_name = self.decode_any(i) if self.last_type != TYPE_STRING: self._raise("Key name must be string for object starting at char %d", start) + w_name = self.memo.setdefault(self.space.unicode_w(w_name), w_name) + i = self.skip_whitespace(self.pos) ch = self.ll_chars[i] if ch != ':': diff --git a/pypy/module/_pypyjson/test/test__pypyjson.py b/pypy/module/_pypyjson/test/test__pypyjson.py --- a/pypy/module/_pypyjson/test/test__pypyjson.py +++ b/pypy/module/_pypyjson/test/test__pypyjson.py @@ -187,4 +187,12 @@ import _pypyjson # http://json.org/JSON_checker/test/fail25.json s = '["\ttab\tcharacter\tin\tstring\t"]' - raises(ValueError, "_pypyjson.loads(s)") \ No newline at end of file + raises(ValueError, "_pypyjson.loads(s)") + + def test_keys_reuse(self): + import _pypyjson + s = '[{"a_key": 1, "b_\xe9": 2}, {"a_key": 3, "b_\xe9": 4}]' + rval = _pypyjson.loads(s) + (a, b), (c, d) = sorted(rval[0]), sorted(rval[1]) + assert a is c + assert b is d diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -244,7 +244,7 @@ """ size = self.len if size == 0: - return space.wrap('') + return space.wrapbytes('') cbuf = self._charbuf_start() s = rffi.charpsize2str(cbuf, size * self.itemsize) self._charbuf_stop() diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -488,8 +488,7 @@ cur = os.getlogin() except OSError, e: raise wrap_oserror(space, e) - else: - return space.wrap(cur) + return space.fsdecode(space.wrapbytes(cur)) # ____________________________________________________________ @@ -702,14 +701,21 @@ except OSError, e: raise wrap_oserror(space, e) - at unwrap_spec(path='fsencode') -def readlink(space, path): +def readlink(space, w_path): "Return a string representing the path to which the symbolic link points." + is_unicode = space.isinstance_w(w_path, space.w_unicode) + if is_unicode: + path = space.fsencode_w(w_path) + else: + path = space.bytes0_w(w_path) try: result = os.readlink(path) except OSError, e: - raise wrap_oserror(space, e, path) - return space.wrap(result) + raise wrap_oserror2(space, e, w_path) + w_result = space.wrapbytes(result) + if is_unicode: + return space.fsdecode(w_result) + return w_result before_fork_hooks = [] after_fork_child_hooks = [] @@ -899,7 +905,8 @@ r = os.uname() except OSError, e: raise wrap_oserror(space, e) - l_w = [space.wrap(i) for i in [r[0], r[1], r[2], r[3], r[4]]] + l_w = [space.fsdecode(space.wrapbytes(i)) + for i in [r[0], r[1], r[2], r[3], r[4]]] w_tuple = space.newtuple(l_w) w_uname_result = space.getattr(space.getbuiltinmodule(os.name), space.wrap('uname_result')) @@ -1229,7 +1236,7 @@ @unwrap_spec(fd=c_int) def ttyname(space, fd): try: - return space.wrap(os.ttyname(fd)) + return space.fsdecode(space.wrapbytes(os.ttyname(fd))) except OSError, e: raise wrap_oserror(space, e) @@ -1364,7 +1371,7 @@ Return the name of the controlling terminal for this process. """ - return space.wrap(os.ctermid()) + return space.fsdecode(space.wrapbytes(os.ctermid())) @unwrap_spec(fd=c_int) def device_encoding(space, fd): From noreply at buildbot.pypy.org Fri Jul 25 21:17:12 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 25 Jul 2014 21:17:12 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: fix OSError aliases (Environment/IOError) causing an obscure clash of names w/ Message-ID: <20140725191712.9D4361C04B7@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72483:2fd763f1298c Date: 2014-07-25 12:15 -0700 http://bitbucket.org/pypy/pypy/changeset/2fd763f1298c/ Log: fix OSError aliases (Environment/IOError) causing an obscure clash of names w/ export_struct as used by cpyext. fixes translation but breaks their cpyext API names for now (+ a failing test for this) diff --git a/pypy/module/_io/interp_io.py b/pypy/module/_io/interp_io.py --- a/pypy/module/_io/interp_io.py +++ b/pypy/module/_io/interp_io.py @@ -4,7 +4,6 @@ from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import ( TypeDef, interp_attrproperty, generic_new_descr) -from pypy.module.exceptions.interp_exceptions import W_IOError from pypy.module._io.interp_fileio import W_FileIO from pypy.module._io.interp_textio import W_TextIOWrapper from rpython.rtyper.module.ll_os_stat import STAT_FIELD_TYPES diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -454,6 +454,10 @@ # PyExc_NameError, PyExc_MemoryError, PyExc_RuntimeError, # PyExc_UnicodeEncodeError, PyExc_UnicodeDecodeError, ... for exc_name in exceptions.Module.interpleveldefs.keys(): + if exc_name in ('EnvironmentError', 'IOError'): + # FIXME: aliases of OSError cause a clash of names via + # export_struct + continue GLOBALS['PyExc_' + exc_name] = ( 'PyTypeObject*', 'space.gettypeobject(interp_exceptions.W_%s.typedef)'% (exc_name, )) diff --git a/pypy/module/cpyext/test/test_exception.py b/pypy/module/cpyext/test/test_exception.py --- a/pypy/module/cpyext/test/test_exception.py +++ b/pypy/module/cpyext/test/test_exception.py @@ -1,4 +1,5 @@ from pypy.module.cpyext.test.test_api import BaseApiTest +from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase from pypy.module.cpyext.pyobject import make_ref class TestExceptions(BaseApiTest): @@ -27,3 +28,16 @@ api.PyException_SetCause(w_exc, make_ref(space, w_cause)) assert space.is_w(api.PyException_GetCause(w_exc), w_cause) + +class AppTestExceptions(AppTestCpythonExtensionBase): + + def test_OSError_aliases(self): + module = self.import_extension('foo', [ + ("get_aliases", "METH_NOARGS", + """ + return PyTuple_Pack(2, + PyExc_EnvironmentError, + PyExc_IOError); + """), + ]) + assert module.get_aliases() == (OSError, OSError) diff --git a/pypy/module/exceptions/__init__.py b/pypy/module/exceptions/__init__.py --- a/pypy/module/exceptions/__init__.py +++ b/pypy/module/exceptions/__init__.py @@ -21,14 +21,14 @@ 'ConnectionResetError': 'interp_exceptions.W_ConnectionResetError', 'DeprecationWarning' : 'interp_exceptions.W_DeprecationWarning', 'EOFError' : 'interp_exceptions.W_EOFError', - 'EnvironmentError' : 'interp_exceptions.W_EnvironmentError', + 'EnvironmentError' : 'interp_exceptions.W_OSError', 'Exception' : 'interp_exceptions.W_Exception', 'FileExistsError': 'interp_exceptions.W_FileExistsError', 'FileNotFoundError': 'interp_exceptions.W_FileNotFoundError', 'FloatingPointError' : 'interp_exceptions.W_FloatingPointError', 'FutureWarning' : 'interp_exceptions.W_FutureWarning', 'GeneratorExit' : 'interp_exceptions.W_GeneratorExit', - 'IOError' : 'interp_exceptions.W_IOError', + 'IOError' : 'interp_exceptions.W_OSError', 'ImportError' : 'interp_exceptions.W_ImportError', 'ImportWarning' : 'interp_exceptions.W_ImportWarning', 'IndentationError' : 'interp_exceptions.W_IndentationError', diff --git a/pypy/module/exceptions/interp_exceptions.py b/pypy/module/exceptions/interp_exceptions.py --- a/pypy/module/exceptions/interp_exceptions.py +++ b/pypy/module/exceptions/interp_exceptions.py @@ -497,9 +497,6 @@ filename = readwrite_attrproperty_w('w_filename', W_OSError), ) -W_EnvironmentError = W_OSError -W_IOError = W_OSError - class W_WindowsError(W_OSError): """MS-Windows OS system call failed.""" From noreply at buildbot.pypy.org Fri Jul 25 22:12:43 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 25 Jul 2014 22:12:43 +0200 (CEST) Subject: [pypy-commit] pypy vendor/stdlib-3.3.5: fix the version Message-ID: <20140725201243.624951C0588@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: vendor/stdlib-3.3.5 Changeset: r72484:80b492d79663 Date: 2014-07-25 13:07 -0700 http://bitbucket.org/pypy/pypy/changeset/80b492d79663/ Log: fix the version diff --git a/lib-python/stdlib-version.txt b/lib-python/stdlib-version.txt --- a/lib-python/stdlib-version.txt +++ b/lib-python/stdlib-version.txt @@ -6,4 +6,4 @@ 2.7:: 3a1db0d2747e (2.7) v2.7.6 3:: - cef745775b65 (3.2) v3.2.5 + 62cf4e77f785 (3.3) v3.3.5 From noreply at buildbot.pypy.org Fri Jul 25 22:12:44 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 25 Jul 2014 22:12:44 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: merge vendor/stdlib-3.3.5 Message-ID: <20140725201244.AFDF21C0588@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72485:78836101ec63 Date: 2014-07-25 13:08 -0700 http://bitbucket.org/pypy/pypy/changeset/78836101ec63/ Log: merge vendor/stdlib-3.3.5 diff --git a/lib-python/stdlib-version.txt b/lib-python/stdlib-version.txt --- a/lib-python/stdlib-version.txt +++ b/lib-python/stdlib-version.txt @@ -6,4 +6,4 @@ 2.7:: 3a1db0d2747e (2.7) v2.7.6 3:: - cef745775b65 (3.2) v3.2.5 + 62cf4e77f785 (3.3) v3.3.5 From noreply at buildbot.pypy.org Fri Jul 25 22:12:45 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 25 Jul 2014 22:12:45 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: fix the version Message-ID: <20140725201245.E6F9F1C0588@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72486:ff5661747625 Date: 2014-07-25 13:11 -0700 http://bitbucket.org/pypy/pypy/changeset/ff5661747625/ Log: fix the version diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -20,13 +20,13 @@ /* Version parsed out into numeric values */ #define PY_MAJOR_VERSION 3 -#define PY_MINOR_VERSION 2 +#define PY_MINOR_VERSION 3 #define PY_MICRO_VERSION 5 #define PY_RELEASE_LEVEL PY_RELEASE_LEVEL_FINAL #define PY_RELEASE_SERIAL 0 /* Version as a string */ -#define PY_VERSION "3.2.5" +#define PY_VERSION "3.3.5" /* PyPy version as a string */ #define PYPY_VERSION "2.4.0-alpha0" diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -6,7 +6,7 @@ from pypy.interpreter import gateway #XXX # the release serial 42 is not in range(16) -CPYTHON_VERSION = (3, 2, 5, "final", 0) +CPYTHON_VERSION = (3, 3, 5, "final", 0) #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h From noreply at buildbot.pypy.org Sat Jul 26 11:04:11 2014 From: noreply at buildbot.pypy.org (kvas) Date: Sat, 26 Jul 2014 11:04:11 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: NoneType can be constructed. Message-ID: <20140726090411.62F051C085C@cobra.cs.uni-duesseldorf.de> Author: Vasily Kuznetsov Branch: py3.3 Changeset: r72487:032296946827 Date: 2014-07-26 11:00 +0200 http://bitbucket.org/pypy/pypy/changeset/032296946827/ Log: NoneType can be constructed. diff --git a/pypy/module/__builtin__/test/test_construct_singletons.py b/pypy/module/__builtin__/test/test_construct_singletons.py new file mode 100644 --- /dev/null +++ b/pypy/module/__builtin__/test/test_construct_singletons.py @@ -0,0 +1,7 @@ +class AppTestConstructSingletons: + + def test_construct_singletons(self): + none_type = type(None) + assert none_type() is None + raises(TypeError, none_type, 1, 2) + raises(TypeError, none_type, a=1, b=2) diff --git a/pypy/objspace/std/nonetype.py b/pypy/objspace/std/nonetype.py --- a/pypy/objspace/std/nonetype.py +++ b/pypy/objspace/std/nonetype.py @@ -1,8 +1,15 @@ from pypy.objspace.std.stdtypedef import StdTypeDef +from pypy.interpreter import gateway +def descr__new__(space, w_type): + return space.w_None + # ____________________________________________________________ none_typedef = StdTypeDef("NoneType", + __new__ = gateway.interp2app(descr__new__) ) none_typedef.acceptable_as_base_class = False + + From noreply at buildbot.pypy.org Sat Jul 26 12:32:29 2014 From: noreply at buildbot.pypy.org (Arjun Naik) Date: Sat, 26 Jul 2014 12:32:29 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Added the missing log2 in the math module. Message-ID: <20140726103229.36EE01D358B@cobra.cs.uni-duesseldorf.de> Author: Arjun Naik Branch: py3.3 Changeset: r72488:e54b7fe58ecf Date: 2014-07-26 10:41 +0200 http://bitbucket.org/pypy/pypy/changeset/e54b7fe58ecf/ Log: Added the missing log2 in the math module. diff --git a/pypy/module/math/__init__.py b/pypy/module/math/__init__.py --- a/pypy/module/math/__init__.py +++ b/pypy/module/math/__init__.py @@ -23,6 +23,7 @@ 'frexp' : 'interp_math.frexp', 'degrees' : 'interp_math.degrees', 'log' : 'interp_math.log', + 'log2' : 'interp_math.log2', 'log10' : 'interp_math.log10', 'fmod' : 'interp_math.fmod', 'atan' : 'interp_math.atan', diff --git a/pypy/module/math/interp_math.py b/pypy/module/math/interp_math.py --- a/pypy/module/math/interp_math.py +++ b/pypy/module/math/interp_math.py @@ -228,6 +228,11 @@ return math1(space, math.log, w_base) return _log_any(space, w_x, base) +def log2(space, w_x): + """log2(x) -> the base 2 logarithm of x. + """ + return _log_any(space, w_x, 2.0) + def log10(space, w_x): """log10(x) -> the base 10 logarithm of x. """ diff --git a/pypy/module/math/test/test_math.py b/pypy/module/math/test/test_math.py --- a/pypy/module/math/test/test_math.py +++ b/pypy/module/math/test/test_math.py @@ -148,6 +148,19 @@ raises(ValueError, math.log1p, -1) raises(ValueError, math.log1p, -100) + def test_log2(self): + import math + self.ftest(math.log2(0.125), -3) + self.ftest(math.log2(0.5), -1) + self.ftest(math.log2(4), 2) + + def test_log10(self): + import math + self.ftest(math.log10(0.1), -1) + self.ftest(math.log10(10), 1) + self.ftest(math.log10(100), 2) + self.ftest(math.log10(0.01), -2) + def test_acosh(self): import math self.ftest(math.acosh(1), 0) From noreply at buildbot.pypy.org Sat Jul 26 12:32:30 2014 From: noreply at buildbot.pypy.org (Arjun Naik) Date: Sat, 26 Jul 2014 12:32:30 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Added missing log2 in math module. Message-ID: <20140726103230.8E7FF1D358B@cobra.cs.uni-duesseldorf.de> Author: Arjun Naik Branch: py3.3 Changeset: r72489:342ebe4c4d7f Date: 2014-07-26 11:16 +0200 http://bitbucket.org/pypy/pypy/changeset/342ebe4c4d7f/ Log: Added missing log2 in math module. diff --git a/pypy/module/math/__init__.py b/pypy/module/math/__init__.py --- a/pypy/module/math/__init__.py +++ b/pypy/module/math/__init__.py @@ -23,6 +23,7 @@ 'frexp' : 'interp_math.frexp', 'degrees' : 'interp_math.degrees', 'log' : 'interp_math.log', + 'log2' : 'interp_math.log2', 'log10' : 'interp_math.log10', 'fmod' : 'interp_math.fmod', 'atan' : 'interp_math.atan', diff --git a/pypy/module/math/interp_math.py b/pypy/module/math/interp_math.py --- a/pypy/module/math/interp_math.py +++ b/pypy/module/math/interp_math.py @@ -228,6 +228,11 @@ return math1(space, math.log, w_base) return _log_any(space, w_x, base) +def log2(space, w_x): + """log2(x) -> the base 2 logarithm of x. + """ + return _log_any(space, w_x, 2.0) + def log10(space, w_x): """log10(x) -> the base 10 logarithm of x. """ diff --git a/pypy/module/math/test/test_math.py b/pypy/module/math/test/test_math.py --- a/pypy/module/math/test/test_math.py +++ b/pypy/module/math/test/test_math.py @@ -148,6 +148,19 @@ raises(ValueError, math.log1p, -1) raises(ValueError, math.log1p, -100) + def test_log2(self): + import math + self.ftest(math.log2(0.125), -3) + self.ftest(math.log2(0.5), -1) + self.ftest(math.log2(4), 2) + + def test_log10(self): + import math + self.ftest(math.log10(0.1), -1) + self.ftest(math.log10(10), 1) + self.ftest(math.log10(100), 2) + self.ftest(math.log10(0.01), -2) + def test_acosh(self): import math self.ftest(math.acosh(1), 0) From noreply at buildbot.pypy.org Sat Jul 26 12:32:31 2014 From: noreply at buildbot.pypy.org (kvas) Date: Sat, 26 Jul 2014 12:32:31 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: __dir__ is allowed to return a tuple (it's converted to list like in cpython3). Message-ID: <20140726103231.BD3E91D358B@cobra.cs.uni-duesseldorf.de> Author: Vasily Kuznetsov Branch: py3.3 Changeset: r72490:71501209fc12 Date: 2014-07-26 11:22 +0200 http://bitbucket.org/pypy/pypy/changeset/71501209fc12/ Log: __dir__ is allowed to return a tuple (it's converted to list like in cpython3). diff --git a/pypy/module/__builtin__/app_inspect.py b/pypy/module/__builtin__/app_inspect.py --- a/pypy/module/__builtin__/app_inspect.py +++ b/pypy/module/__builtin__/app_inspect.py @@ -52,6 +52,8 @@ dir_meth = lookup_special(obj, "__dir__") if dir_meth is not None: result = dir_meth() + if isinstance(result, tuple): + result = list(result) if not isinstance(result, list): raise TypeError("__dir__() must return a list, not %r" % ( type(result),)) diff --git a/pypy/module/__builtin__/test/test_dir.py b/pypy/module/__builtin__/test/test_dir.py new file mode 100644 --- /dev/null +++ b/pypy/module/__builtin__/test/test_dir.py @@ -0,0 +1,12 @@ +class AppTestDir: + + def test_dir_obj__dir__tuple(self): + """When __dir__ method returns a tuple, python3 converts it to list.""" + + class Foo(object): + def __dir__(self): + return ("b", "c", "a") + + res = dir(Foo()) + assert isinstance(res, list) + assert res == ["a", "b", "c"] From noreply at buildbot.pypy.org Sat Jul 26 12:32:33 2014 From: noreply at buildbot.pypy.org (Boglarka Vezer) Date: Sat, 26 Jul 2014 12:32:33 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: fixing typo in range hash function Message-ID: <20140726103233.01ED61D358B@cobra.cs.uni-duesseldorf.de> Author: Boglarka Vezer Branch: py3.3 Changeset: r72491:b8a56b701025 Date: 2014-07-26 11:26 +0200 http://bitbucket.org/pypy/pypy/changeset/b8a56b701025/ Log: fixing typo in range hash function diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py --- a/pypy/module/__builtin__/functional.py +++ b/pypy/module/__builtin__/functional.py @@ -482,7 +482,7 @@ def descr_hash(self, space): if space.eq_w(self.w_length, space.wrap(0)): w_tup = space.newtuple([self.w_length, space.w_None, space.w_None]) - elif space.eq_w(self.w_length, space.wrap(0)): + elif space.eq_w(self.w_length, space.wrap(1)): w_tup = space.newtuple([self.w_length, self.w_start, space.w_None]) else: w_tup = space.newtuple([self.w_length, self.w_start, self.w_step]) diff --git a/pypy/module/__builtin__/test/test_functional.py b/pypy/module/__builtin__/test/test_functional.py --- a/pypy/module/__builtin__/test/test_functional.py +++ b/pypy/module/__builtin__/test/test_functional.py @@ -484,7 +484,7 @@ for a in test_ranges: for b in test_ranges: if a == b: - assert (hash(a), hash(b)) + assert hash(a) == hash(b) # Ranges are unequal to other types (even sequence types) assert (range(0) == ()) is False From noreply at buildbot.pypy.org Sat Jul 26 12:32:34 2014 From: noreply at buildbot.pypy.org (kvas) Date: Sat, 26 Jul 2014 12:32:34 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: __dir__ is allowed to return any iterable like in cpython3. Message-ID: <20140726103234.29F091D358B@cobra.cs.uni-duesseldorf.de> Author: Vasily Kuznetsov Branch: py3.3 Changeset: r72492:72a3a7810d5c Date: 2014-07-26 12:05 +0200 http://bitbucket.org/pypy/pypy/changeset/72a3a7810d5c/ Log: __dir__ is allowed to return any iterable like in cpython3. diff --git a/pypy/module/__builtin__/app_inspect.py b/pypy/module/__builtin__/app_inspect.py --- a/pypy/module/__builtin__/app_inspect.py +++ b/pypy/module/__builtin__/app_inspect.py @@ -52,11 +52,8 @@ dir_meth = lookup_special(obj, "__dir__") if dir_meth is not None: result = dir_meth() - if isinstance(result, tuple): - result = list(result) if not isinstance(result, list): - raise TypeError("__dir__() must return a list, not %r" % ( - type(result),)) + result = list(result) # Will throw TypeError if not iterable result.sort() return result elif isinstance(obj, types.ModuleType): diff --git a/pypy/module/__builtin__/test/test_dir.py b/pypy/module/__builtin__/test/test_dir.py --- a/pypy/module/__builtin__/test/test_dir.py +++ b/pypy/module/__builtin__/test/test_dir.py @@ -1,12 +1,26 @@ class AppTestDir: def test_dir_obj__dir__tuple(self): - """When __dir__ method returns a tuple, python3 converts it to list.""" - + """If __dir__ method returns a tuple, cpython3 converts it to list.""" class Foo(object): def __dir__(self): return ("b", "c", "a") - res = dir(Foo()) assert isinstance(res, list) assert res == ["a", "b", "c"] + + def test_dir_obj__dir__genexp(self): + """Generator expression is also converted to list by cpython3.""" + class Foo(object): + def __dir__(self): + return (i for i in ["b", "c", "a"]) + res = dir(Foo()) + assert isinstance(res, list) + assert res == ["a", "b", "c"] + + def test_dir_obj__dir__noniter(self): + """If result of __dir__ is not iterable, it's an error.""" + class Foo(object): + def __dir__(self): + return 42 + raises(TypeError, dir, Foo()) From noreply at buildbot.pypy.org Sat Jul 26 13:02:24 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Sat, 26 Jul 2014 13:02:24 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: patching cpython3 csv test as the exception message has bad grammar Message-ID: <20140726110224.970001D36A6@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: py3.3 Changeset: r72493:c94a608f5616 Date: 2014-07-26 11:11 +0200 http://bitbucket.org/pypy/pypy/changeset/c94a608f5616/ Log: patching cpython3 csv test as the exception message has bad grammar diff --git a/lib-python/3/test/test_csv.py b/lib-python/3/test/test_csv.py --- a/lib-python/3/test/test_csv.py +++ b/lib-python/3/test/test_csv.py @@ -766,8 +766,9 @@ mydialect.quotechar = "''" with self.assertRaises(csv.Error) as cm: mydialect() + # NOTE: Patched exception message since cpython uses bad grammar (cpython issue22076) self.assertEqual(str(cm.exception), - '"quotechar" must be an 1-character string') + '"quotechar" must be a 1-character string') mydialect.quotechar = 4 with self.assertRaises(csv.Error) as cm: @@ -789,14 +790,16 @@ mydialect.delimiter = ":::" with self.assertRaises(csv.Error) as cm: mydialect() + # NOTE: Patched exception message since cpython uses bad grammar (cpython issue22076) self.assertEqual(str(cm.exception), - '"delimiter" must be an 1-character string') + '"delimiter" must be a 1-character string') mydialect.delimiter = "" with self.assertRaises(csv.Error) as cm: mydialect() + # NOTE: Patched exception message since cpython uses bad grammar (cpython issue22076) self.assertEqual(str(cm.exception), - '"delimiter" must be an 1-character string') + '"delimiter" must be a 1-character string') mydialect.delimiter = b"," with self.assertRaises(csv.Error) as cm: From noreply at buildbot.pypy.org Sat Jul 26 13:02:25 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Sat, 26 Jul 2014 13:02:25 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: fixing _csv lineterminator exception message Message-ID: <20140726110225.BF46C1D36A6@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: py3.3 Changeset: r72494:cbfe89ead8c2 Date: 2014-07-26 12:12 +0200 http://bitbucket.org/pypy/pypy/changeset/cbfe89ead8c2/ Log: fixing _csv lineterminator exception message diff --git a/pypy/module/_csv/interp_csv.py b/pypy/module/_csv/interp_csv.py --- a/pypy/module/_csv/interp_csv.py +++ b/pypy/module/_csv/interp_csv.py @@ -34,10 +34,15 @@ return default return space.int_w(w_src) -def _get_str(space, w_src, default): +def _get_str(space, w_src, default, attrname): if w_src is None: return default - return space.unicode_w(w_src) + try: + return space.unicode_w(w_src) + except OperationError as e: + if e.match(space, space.w_TypeError): + raise oefmt(space.w_TypeError, '"%s" must be a string', attrname) + raise def _get_char(space, w_src, default, name): if w_src is None: @@ -91,7 +96,7 @@ dialect.delimiter = _get_char(space, w_delimiter, u',', 'delimiter') dialect.doublequote = _get_bool(space, w_doublequote, True) dialect.escapechar = _get_char(space, w_escapechar, u'\0', 'escapechar') - dialect.lineterminator = _get_str(space, w_lineterminator, u'\r\n') + dialect.lineterminator = _get_str(space, w_lineterminator, u'\r\n', 'lineterminator') dialect.quotechar = _get_char(space, w_quotechar, u'"', 'quotechar') tmp_quoting = _get_int(space, w_quoting, QUOTE_MINIMAL) dialect.skipinitialspace = _get_bool(space, w_skipinitialspace, False) diff --git a/pypy/module/_csv/test/test_dialect.py b/pypy/module/_csv/test/test_dialect.py --- a/pypy/module/_csv/test/test_dialect.py +++ b/pypy/module/_csv/test/test_dialect.py @@ -67,6 +67,9 @@ kwargs = {name: value} raises(TypeError, _csv.register_dialect, 'foo1', **kwargs) + exc_info = raises(TypeError, _csv.register_dialect, 'foo1', lineterminator=4) + assert exc_info.value.args[0] == '"lineterminator" must be a string' + def test_bool_arg(self): # boolean arguments take *any* object and use its truth-value import _csv From noreply at buildbot.pypy.org Sat Jul 26 13:02:26 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Sat, 26 Jul 2014 13:02:26 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Merged pypy/pypy/py3.3 into py3.3 Message-ID: <20140726110226.E76B61D36A6@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: py3.3 Changeset: r72495:61fd7b56f3ed Date: 2014-07-26 12:41 +0200 http://bitbucket.org/pypy/pypy/changeset/61fd7b56f3ed/ Log: Merged pypy/pypy/py3.3 into py3.3 diff --git a/pypy/module/__builtin__/app_inspect.py b/pypy/module/__builtin__/app_inspect.py --- a/pypy/module/__builtin__/app_inspect.py +++ b/pypy/module/__builtin__/app_inspect.py @@ -53,8 +53,7 @@ if dir_meth is not None: result = dir_meth() if not isinstance(result, list): - raise TypeError("__dir__() must return a list, not %r" % ( - type(result),)) + result = list(result) # Will throw TypeError if not iterable result.sort() return result elif isinstance(obj, types.ModuleType): diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py --- a/pypy/module/__builtin__/functional.py +++ b/pypy/module/__builtin__/functional.py @@ -482,7 +482,7 @@ def descr_hash(self, space): if space.eq_w(self.w_length, space.wrap(0)): w_tup = space.newtuple([self.w_length, space.w_None, space.w_None]) - elif space.eq_w(self.w_length, space.wrap(0)): + elif space.eq_w(self.w_length, space.wrap(1)): w_tup = space.newtuple([self.w_length, self.w_start, space.w_None]) else: w_tup = space.newtuple([self.w_length, self.w_start, self.w_step]) diff --git a/pypy/module/__builtin__/test/test_construct_singletons.py b/pypy/module/__builtin__/test/test_construct_singletons.py new file mode 100644 --- /dev/null +++ b/pypy/module/__builtin__/test/test_construct_singletons.py @@ -0,0 +1,7 @@ +class AppTestConstructSingletons: + + def test_construct_singletons(self): + none_type = type(None) + assert none_type() is None + raises(TypeError, none_type, 1, 2) + raises(TypeError, none_type, a=1, b=2) diff --git a/pypy/module/__builtin__/test/test_dir.py b/pypy/module/__builtin__/test/test_dir.py new file mode 100644 --- /dev/null +++ b/pypy/module/__builtin__/test/test_dir.py @@ -0,0 +1,26 @@ +class AppTestDir: + + def test_dir_obj__dir__tuple(self): + """If __dir__ method returns a tuple, cpython3 converts it to list.""" + class Foo(object): + def __dir__(self): + return ("b", "c", "a") + res = dir(Foo()) + assert isinstance(res, list) + assert res == ["a", "b", "c"] + + def test_dir_obj__dir__genexp(self): + """Generator expression is also converted to list by cpython3.""" + class Foo(object): + def __dir__(self): + return (i for i in ["b", "c", "a"]) + res = dir(Foo()) + assert isinstance(res, list) + assert res == ["a", "b", "c"] + + def test_dir_obj__dir__noniter(self): + """If result of __dir__ is not iterable, it's an error.""" + class Foo(object): + def __dir__(self): + return 42 + raises(TypeError, dir, Foo()) diff --git a/pypy/module/__builtin__/test/test_functional.py b/pypy/module/__builtin__/test/test_functional.py --- a/pypy/module/__builtin__/test/test_functional.py +++ b/pypy/module/__builtin__/test/test_functional.py @@ -484,7 +484,7 @@ for a in test_ranges: for b in test_ranges: if a == b: - assert (hash(a), hash(b)) + assert hash(a) == hash(b) # Ranges are unequal to other types (even sequence types) assert (range(0) == ()) is False diff --git a/pypy/module/math/__init__.py b/pypy/module/math/__init__.py --- a/pypy/module/math/__init__.py +++ b/pypy/module/math/__init__.py @@ -23,6 +23,7 @@ 'frexp' : 'interp_math.frexp', 'degrees' : 'interp_math.degrees', 'log' : 'interp_math.log', + 'log2' : 'interp_math.log2', 'log10' : 'interp_math.log10', 'fmod' : 'interp_math.fmod', 'atan' : 'interp_math.atan', diff --git a/pypy/module/math/interp_math.py b/pypy/module/math/interp_math.py --- a/pypy/module/math/interp_math.py +++ b/pypy/module/math/interp_math.py @@ -228,6 +228,11 @@ return math1(space, math.log, w_base) return _log_any(space, w_x, base) +def log2(space, w_x): + """log2(x) -> the base 2 logarithm of x. + """ + return _log_any(space, w_x, 2.0) + def log10(space, w_x): """log10(x) -> the base 10 logarithm of x. """ diff --git a/pypy/module/math/test/test_math.py b/pypy/module/math/test/test_math.py --- a/pypy/module/math/test/test_math.py +++ b/pypy/module/math/test/test_math.py @@ -148,6 +148,19 @@ raises(ValueError, math.log1p, -1) raises(ValueError, math.log1p, -100) + def test_log2(self): + import math + self.ftest(math.log2(0.125), -3) + self.ftest(math.log2(0.5), -1) + self.ftest(math.log2(4), 2) + + def test_log10(self): + import math + self.ftest(math.log10(0.1), -1) + self.ftest(math.log10(10), 1) + self.ftest(math.log10(100), 2) + self.ftest(math.log10(0.01), -2) + def test_acosh(self): import math self.ftest(math.acosh(1), 0) diff --git a/pypy/objspace/std/nonetype.py b/pypy/objspace/std/nonetype.py --- a/pypy/objspace/std/nonetype.py +++ b/pypy/objspace/std/nonetype.py @@ -1,8 +1,15 @@ from pypy.objspace.std.stdtypedef import StdTypeDef +from pypy.interpreter import gateway +def descr__new__(space, w_type): + return space.w_None + # ____________________________________________________________ none_typedef = StdTypeDef("NoneType", + __new__ = gateway.interp2app(descr__new__) ) none_typedef.acceptable_as_base_class = False + + From noreply at buildbot.pypy.org Sat Jul 26 13:05:44 2014 From: noreply at buildbot.pypy.org (mswart) Date: Sat, 26 Jul 2014 13:05:44 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk stmgc-c7: End image loading with \n Message-ID: <20140726110544.BD4F31D36A6@cobra.cs.uni-duesseldorf.de> Author: Malte Swart Branch: stmgc-c7 Changeset: r946:a772ee2447d9 Date: 2014-07-26 13:05 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/a772ee2447d9/ Log: End image loading with \n For better reading of further output, end image loading with \n. By this the ouput is separated by a new line. diff --git a/spyvm/squeakimage.py b/spyvm/squeakimage.py --- a/spyvm/squeakimage.py +++ b/spyvm/squeakimage.py @@ -231,6 +231,7 @@ self.init_w_objects() self.fillin_w_objects() self.synchronize_shadows() + os.write(2, "\n") def read_version(self): # 1 word version From noreply at buildbot.pypy.org Sat Jul 26 13:25:47 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 26 Jul 2014 13:25:47 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: _posixsubprocess is not optional anymore. Message-ID: <20140726112547.C3D881C04B7@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r72496:2e4e787ecfa6 Date: 2014-07-26 13:22 +0200 http://bitbucket.org/pypy/pypy/changeset/2e4e787ecfa6/ Log: _posixsubprocess is not optional anymore. diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -17,7 +17,7 @@ usemodules = ['binascii', 'posix', 'signal', 'struct', 'rctime'] # py3k os.open uses subprocess, requiring the following per platform if os.name != 'nt': - usemodules += ['fcntl', 'select'] + usemodules += ['fcntl', 'select', '_posixsubprocess'] else: usemodules += ['_rawffi', 'thread'] mod.space = gettestobjspace(usemodules=usemodules) From noreply at buildbot.pypy.org Sat Jul 26 13:55:40 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 26 Jul 2014 13:55:40 +0200 (CEST) Subject: [pypy-commit] pypy default: Oops. If, during translation, we execute app-level code that contains a Message-ID: <20140726115540.B8B211C06AE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72497:c97bfecbfc61 Date: 2014-07-26 13:54 +0200 http://bitbucket.org/pypy/pypy/changeset/c97bfecbfc61/ Log: Oops. If, during translation, we execute app-level code that contains a "continue" in a "try:" block, and if we're translating with "-Ojit", then crash. This case occurs in the py3.3 branch. diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -200,7 +200,7 @@ elif opcode == opcodedesc.BREAK_LOOP.index: next_instr = self.BREAK_LOOP(oparg, next_instr) elif opcode == opcodedesc.CONTINUE_LOOP.index: - next_instr = self.CONTINUE_LOOP(oparg, next_instr) + return self.CONTINUE_LOOP(oparg, next_instr) elif opcode == opcodedesc.FOR_ITER.index: next_instr = self.FOR_ITER(oparg, next_instr) elif opcode == opcodedesc.JUMP_FORWARD.index: From noreply at buildbot.pypy.org Sat Jul 26 13:57:46 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 26 Jul 2014 13:57:46 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Oops. If, during translation, we execute app-level code that contains a Message-ID: <20140726115746.9C0BD1C085C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: py3.3 Changeset: r72498:5fbb8de72550 Date: 2014-07-26 13:54 +0200 http://bitbucket.org/pypy/pypy/changeset/5fbb8de72550/ Log: Oops. If, during translation, we execute app-level code that contains a "continue" in a "try:" block, and if we're translating with "-Ojit", then crash. This case occurs in the py3.3 branch. diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -204,7 +204,7 @@ elif opcode == opcodedesc.BREAK_LOOP.index: next_instr = self.BREAK_LOOP(oparg, next_instr) elif opcode == opcodedesc.CONTINUE_LOOP.index: - next_instr = self.CONTINUE_LOOP(oparg, next_instr) + return self.CONTINUE_LOOP(oparg, next_instr) elif opcode == opcodedesc.FOR_ITER.index: next_instr = self.FOR_ITER(oparg, next_instr) elif opcode == opcodedesc.JUMP_FORWARD.index: From noreply at buildbot.pypy.org Sat Jul 26 13:57:47 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 26 Jul 2014 13:57:47 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Remove unused return value Message-ID: <20140726115747.C5B3C1C085C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: py3.3 Changeset: r72499:900ace194cd5 Date: 2014-07-26 13:56 +0200 http://bitbucket.org/pypy/pypy/changeset/900ace194cd5/ Log: Remove unused return value diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -1022,7 +1022,6 @@ raise w_value = space.w_None self.pushvalue(w_value) - return next_instr else: # iter remains on stack, w_retval is value to be yielded. self.pushvalue(w_retval) From noreply at buildbot.pypy.org Sat Jul 26 13:57:48 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 26 Jul 2014 13:57:48 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: merge heads Message-ID: <20140726115748.EB3921C085C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: py3.3 Changeset: r72500:38d2f98adca7 Date: 2014-07-26 13:57 +0200 http://bitbucket.org/pypy/pypy/changeset/38d2f98adca7/ Log: merge heads diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -17,7 +17,7 @@ usemodules = ['binascii', 'posix', 'signal', 'struct', 'rctime'] # py3k os.open uses subprocess, requiring the following per platform if os.name != 'nt': - usemodules += ['fcntl', 'select'] + usemodules += ['fcntl', 'select', '_posixsubprocess'] else: usemodules += ['_rawffi', 'thread'] mod.space = gettestobjspace(usemodules=usemodules) From noreply at buildbot.pypy.org Sat Jul 26 13:59:58 2014 From: noreply at buildbot.pypy.org (agobi) Date: Sat, 26 Jul 2014 13:59:58 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: creating the RTLD_* constants in the module posix Message-ID: <20140726115958.1DC2B1C085C@cobra.cs.uni-duesseldorf.de> Author: Attila Gobi Branch: py3.3 Changeset: r72501:2e121b41a4c3 Date: 2014-07-26 13:47 +0200 http://bitbucket.org/pypy/pypy/changeset/2e121b41a4c3/ Log: creating the RTLD_* constants in the module posix diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -1,5 +1,6 @@ from pypy.interpreter.mixedmodule import MixedModule from rpython.rtyper.module.ll_os import RegisterOs +from rpython.rlib import rdynload import os exec 'import %s as posix' % os.name @@ -173,6 +174,12 @@ if hasattr(os, name): interpleveldefs[name] = 'interp_posix.' + name + for _name in ["RTLD_LAZY", "RTLD_NOW", "RTLD_GLOBAL", "RTLD_LOCAL", + "RTLD_NODELETE", "RTLD_NOLOAD", "RTLD_DEEPBIND"]: + if getattr(rdynload.cConfig, _name) is not None: + interpleveldefs[_name] = 'space.wrap(%d)' % ( + getattr(rdynload.cConfig, _name),) + # os.py uses this list to build os.supports_dir_fd() and os.supports_fd(). # Fill with e.g. HAVE_FCHDIR, when os.chdir() supports file descriptors. interpleveldefs['_have_functions'] = 'space.newlist([])' diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -1042,6 +1042,12 @@ # just ensure it returns something reasonable assert encoding is None or type(encoding) is str + def test_rtld_constants(self): + # check presence of major RTLD_* constants + self.posix.RTLD_LAZY + self.posix.RTLD_NOW + self.posix.RTLD_GLOBAL + self.posix.RTLD_LOCAL class AppTestEnvironment(object): def setup_class(cls): From noreply at buildbot.pypy.org Sat Jul 26 13:59:59 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 26 Jul 2014 13:59:59 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: merge heads Message-ID: <20140726115959.796191C085C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: py3.3 Changeset: r72502:2ff4e3b542b6 Date: 2014-07-26 13:59 +0200 http://bitbucket.org/pypy/pypy/changeset/2ff4e3b542b6/ Log: merge heads diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -204,7 +204,7 @@ elif opcode == opcodedesc.BREAK_LOOP.index: next_instr = self.BREAK_LOOP(oparg, next_instr) elif opcode == opcodedesc.CONTINUE_LOOP.index: - next_instr = self.CONTINUE_LOOP(oparg, next_instr) + return self.CONTINUE_LOOP(oparg, next_instr) elif opcode == opcodedesc.FOR_ITER.index: next_instr = self.FOR_ITER(oparg, next_instr) elif opcode == opcodedesc.JUMP_FORWARD.index: @@ -1022,7 +1022,6 @@ raise w_value = space.w_None self.pushvalue(w_value) - return next_instr else: # iter remains on stack, w_retval is value to be yielded. self.pushvalue(w_retval) From noreply at buildbot.pypy.org Sat Jul 26 14:49:54 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sat, 26 Jul 2014 14:49:54 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs: hg merge default Message-ID: <20140726124954.EC2AC1C050D@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: improve-docs Changeset: r72503:9ca2657fe663 Date: 2014-07-26 14:49 +0200 http://bitbucket.org/pypy/pypy/changeset/9ca2657fe663/ Log: hg merge default diff too long, truncating to 2000 out of 33446 lines diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -10,3 +10,7 @@ 20e51c4389ed4469b66bb9d6289ce0ecfc82c4b9 release-2.3.0 0000000000000000000000000000000000000000 release-2.3.0 394146e9bb673514c61f0150ab2013ccf78e8de7 release-2.3 +32f35069a16d819b58c1b6efb17c44e3e53397b2 release-2.2=3.1 +32f35069a16d819b58c1b6efb17c44e3e53397b2 release-2.3.1 +32f35069a16d819b58c1b6efb17c44e3e53397b2 release-2.2=3.1 +0000000000000000000000000000000000000000 release-2.2=3.1 diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -128,6 +128,7 @@ Stian Andreassen Laurence Tratt Wanja Saatkamp + Ivan Sichmann Freitas Gerald Klix Mike Blume Oscar Nierstrasz @@ -212,7 +213,9 @@ Alejandro J. Cura Jacob Oscarson Travis Francis Athougies + Ryan Gonzalez Kristjan Valur Jonsson + Sebastian Pawluś Neil Blakey-Milner anatoly techtonik Lutz Paelike @@ -245,6 +248,7 @@ Michael Hudson-Doyle Anders Sigfridsson Yasir Suhail + rafalgalczynski at gmail.com Floris Bruynooghe Laurens Van Houtven Akira Li @@ -274,6 +278,8 @@ Zooko Wilcox-O Hearn Tomer Chachamu Christopher Groskopf + Asmo Soinio + Stefan Marr jiaaro opassembler.py Antony Lee diff --git a/lib-python/2.7/ctypes/__init__.py b/lib-python/2.7/ctypes/__init__.py --- a/lib-python/2.7/ctypes/__init__.py +++ b/lib-python/2.7/ctypes/__init__.py @@ -389,12 +389,13 @@ func.__name__ = name_or_ordinal return func -class PyDLL(CDLL): - """This class represents the Python library itself. It allows to - access Python API functions. The GIL is not released, and - Python exceptions are handled correctly. - """ - _func_flags_ = _FUNCFLAG_CDECL | _FUNCFLAG_PYTHONAPI +# Not in PyPy +#class PyDLL(CDLL): +# """This class represents the Python library itself. It allows to +# access Python API functions. The GIL is not released, and +# Python exceptions are handled correctly. +# """ +# _func_flags_ = _FUNCFLAG_CDECL | _FUNCFLAG_PYTHONAPI if _os.name in ("nt", "ce"): @@ -447,15 +448,8 @@ return self._dlltype(name) cdll = LibraryLoader(CDLL) -pydll = LibraryLoader(PyDLL) - -if _os.name in ("nt", "ce"): - pythonapi = PyDLL("python dll", None, _sys.dllhandle) -elif _sys.platform == "cygwin": - pythonapi = PyDLL("libpython%d.%d.dll" % _sys.version_info[:2]) -else: - pythonapi = PyDLL(None) - +# not on PyPy +#pydll = LibraryLoader(PyDLL) if _os.name in ("nt", "ce"): windll = LibraryLoader(WinDLL) diff --git a/lib-python/2.7/ctypes/test/test_values.py b/lib-python/2.7/ctypes/test/test_values.py --- a/lib-python/2.7/ctypes/test/test_values.py +++ b/lib-python/2.7/ctypes/test/test_values.py @@ -4,6 +4,7 @@ import unittest from ctypes import * +from ctypes.test import xfail import _ctypes_test @@ -23,7 +24,8 @@ class Win_ValuesTestCase(unittest.TestCase): """This test only works when python itself is a dll/shared library""" - + + @xfail def test_optimizeflag(self): # This test accesses the Py_OptimizeFlag intger, which is # exported by the Python dll. @@ -40,6 +42,7 @@ else: self.assertEqual(opt, 2) + @xfail def test_frozentable(self): # Python exports a PyImport_FrozenModules symbol. This is a # pointer to an array of struct _frozen entries. The end of the @@ -75,6 +78,7 @@ from ctypes import _pointer_type_cache del _pointer_type_cache[struct_frozen] + @xfail def test_undefined(self): self.assertRaises(ValueError, c_int.in_dll, pydll, "Undefined_Symbol") diff --git a/lib-python/2.7/imputil.py b/lib-python/2.7/imputil.py --- a/lib-python/2.7/imputil.py +++ b/lib-python/2.7/imputil.py @@ -422,7 +422,8 @@ saved back to the filesystem for future imports. The source file's modification timestamp must be provided as a Long value. """ - codestring = open(pathname, 'rU').read() + with open(pathname, 'rU') as fp: + codestring = fp.read() if codestring and codestring[-1] != '\n': codestring = codestring + '\n' code = __builtin__.compile(codestring, pathname, 'exec') @@ -603,8 +604,8 @@ self.desc = desc def import_file(self, filename, finfo, fqname): - fp = open(filename, self.desc[1]) - module = imp.load_module(fqname, fp, filename, self.desc) + with open(filename, self.desc[1]) as fp: + module = imp.load_module(fqname, fp, filename, self.desc) module.__file__ = filename return 0, module, { } diff --git a/lib-python/2.7/modulefinder.py b/lib-python/2.7/modulefinder.py --- a/lib-python/2.7/modulefinder.py +++ b/lib-python/2.7/modulefinder.py @@ -109,16 +109,16 @@ def run_script(self, pathname): self.msg(2, "run_script", pathname) - fp = open(pathname, READ_MODE) - stuff = ("", "r", imp.PY_SOURCE) - self.load_module('__main__', fp, pathname, stuff) + with open(pathname, READ_MODE) as fp: + stuff = ("", "r", imp.PY_SOURCE) + self.load_module('__main__', fp, pathname, stuff) def load_file(self, pathname): dir, name = os.path.split(pathname) name, ext = os.path.splitext(name) - fp = open(pathname, READ_MODE) - stuff = (ext, "r", imp.PY_SOURCE) - self.load_module(name, fp, pathname, stuff) + with open(pathname, READ_MODE) as fp: + stuff = (ext, "r", imp.PY_SOURCE) + self.load_module(name, fp, pathname, stuff) def import_hook(self, name, caller=None, fromlist=None, level=-1): self.msg(3, "import_hook", name, caller, fromlist, level) @@ -461,6 +461,8 @@ fp, buf, stuff = self.find_module("__init__", m.__path__) self.load_module(fqname, fp, buf, stuff) self.msgout(2, "load_package ->", m) + if fp: + fp.close() return m def add_module(self, fqname): diff --git a/lib-python/2.7/test/test_argparse.py b/lib-python/2.7/test/test_argparse.py --- a/lib-python/2.7/test/test_argparse.py +++ b/lib-python/2.7/test/test_argparse.py @@ -48,6 +48,9 @@ def tearDown(self): os.chdir(self.old_dir) + import gc + # Force a collection which should close FileType() options + gc.collect() for root, dirs, files in os.walk(self.temp_dir, topdown=False): for name in files: os.chmod(os.path.join(self.temp_dir, name), stat.S_IWRITE) diff --git a/lib-python/2.7/test/test_gdbm.py b/lib-python/2.7/test/test_gdbm.py --- a/lib-python/2.7/test/test_gdbm.py +++ b/lib-python/2.7/test/test_gdbm.py @@ -98,6 +98,17 @@ self.assertTrue(key in self.g) self.assertTrue(self.g.has_key(key)) + def test_unicode_key(self): + key = u'ab' + value = u'cd' + self.g = gdbm.open(filename, 'cf') + self.g[key] = value + self.g.close() + self.g = gdbm.open(filename, 'r') + self.assertEquals(self.g[key], value) + self.assertTrue(key in self.g) + self.assertTrue(self.g.has_key(key)) + def test_main(): run_unittest(TestGdbm) diff --git a/lib-python/2.7/timeit.py b/lib-python/2.7/timeit.py --- a/lib-python/2.7/timeit.py +++ b/lib-python/2.7/timeit.py @@ -55,11 +55,6 @@ import gc import sys import time -try: - import itertools -except ImportError: - # Must be an older Python version (see timeit() below) - itertools = None __all__ = ["Timer"] @@ -81,7 +76,8 @@ def inner(_it, _timer): %(setup)s _t0 = _timer() - for _i in _it: + while _it > 0: + _it -= 1 %(stmt)s _t1 = _timer() return _t1 - _t0 @@ -96,7 +92,8 @@ def inner(_it, _timer, _func=func): setup() _t0 = _timer() - for _i in _it: + while _it > 0: + _it -= 1 _func() _t1 = _timer() return _t1 - _t0 @@ -133,9 +130,19 @@ else: raise ValueError("setup is neither a string nor callable") self.src = src # Save for traceback display - code = compile(src, dummy_src_name, "exec") - exec code in globals(), ns - self.inner = ns["inner"] + def make_inner(): + # PyPy tweak: recompile the source code each time before + # calling inner(). There are situations like Issue #1776 + # where PyPy tries to reuse the JIT code from before, + # but that's not going to work: the first thing the + # function does is the "-s" statement, which may declare + # new classes (here a namedtuple). We end up with + # bridges from the inner loop; more and more of them + # every time we call inner(). + code = compile(src, dummy_src_name, "exec") + exec code in globals(), ns + return ns["inner"] + self.make_inner = make_inner elif hasattr(stmt, '__call__'): self.src = None if isinstance(setup, basestring): @@ -144,7 +151,8 @@ exec _setup in globals(), ns elif not hasattr(setup, '__call__'): raise ValueError("setup is neither a string nor callable") - self.inner = _template_func(setup, stmt) + inner = _template_func(setup, stmt) + self.make_inner = lambda: inner else: raise ValueError("stmt is neither a string nor callable") @@ -185,15 +193,12 @@ to one million. The main statement, the setup statement and the timer function to be used are passed to the constructor. """ - if itertools: - it = itertools.repeat(None, number) - else: - it = [None] * number + inner = self.make_inner() gcold = gc.isenabled() if '__pypy__' not in sys.builtin_module_names: gc.disable() # only do that on CPython try: - timing = self.inner(it, self.timer) + timing = inner(number, self.timer) finally: if gcold: gc.enable() diff --git a/lib-python/2.7/xml/sax/saxutils.py b/lib-python/2.7/xml/sax/saxutils.py --- a/lib-python/2.7/xml/sax/saxutils.py +++ b/lib-python/2.7/xml/sax/saxutils.py @@ -98,13 +98,14 @@ except AttributeError: pass # wrap a binary writer with TextIOWrapper - class UnbufferedTextIOWrapper(io.TextIOWrapper): - def write(self, s): - super(UnbufferedTextIOWrapper, self).write(s) - self.flush() - return UnbufferedTextIOWrapper(buffer, encoding=encoding, + return _UnbufferedTextIOWrapper(buffer, encoding=encoding, errors='xmlcharrefreplace', newline='\n') +# PyPy: moved this class outside the function above +class _UnbufferedTextIOWrapper(io.TextIOWrapper): + def write(self, s): + super(_UnbufferedTextIOWrapper, self).write(s) + self.flush() class XMLGenerator(handler.ContentHandler): diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -219,6 +219,8 @@ if restype is None: import ctypes restype = ctypes.c_int + if self._argtypes_ is None: + self._argtypes_ = [] self._ptr = self._getfuncptr_fromaddress(self._argtypes_, restype) self._check_argtypes_for_fastpath() return diff --git a/lib_pypy/_curses.py b/lib_pypy/_curses.py --- a/lib_pypy/_curses.py +++ b/lib_pypy/_curses.py @@ -309,11 +309,9 @@ #endif int _m_ispad(WINDOW *win) { -#if defined WINDOW_HAS_FLAGS + // may not have _flags (and possibly _ISPAD), + // but for now let's assume that always has it return (win->_flags & _ISPAD); -#else - return 0; -#endif } void _m_getsyx(int *yx) { diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_pypy_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -13,7 +13,15 @@ k1 = k1.lstrip('0x').rstrip('L') k2 = hex(binascii.crc32(key[1::2]) & 0xffffffff) k2 = k2.lstrip('0').rstrip('L') - output_dir = tempfile.gettempdir() + os.path.sep + 'tmp_%s%s' %(k1, k2) + try: + username = os.environ['USER'] #linux, et al + except KeyError: + try: + username = os.environ['USERNAME'] #windows + except KeyError: + username = os.getuid() + output_dir = tempfile.gettempdir() + os.path.sep + 'tmp_%s_%s%s' % ( + username, k1, k2) if not os.path.exists(output_dir): os.mkdir(output_dir) return output_dir diff --git a/lib_pypy/_tkinter/license.terms b/lib_pypy/_tkinter/license.terms new file mode 100644 --- /dev/null +++ b/lib_pypy/_tkinter/license.terms @@ -0,0 +1,39 @@ +This software is copyrighted by the Regents of the University of +California, Sun Microsystems, Inc., and other parties. The following +terms apply to all files associated with the software unless explicitly +disclaimed in individual files. + +The authors hereby grant permission to use, copy, modify, distribute, +and license this software and its documentation for any purpose, provided +that existing copyright notices are retained in all copies and that this +notice is included verbatim in any distributions. No written agreement, +license, or royalty fee is required for any of the authorized uses. +Modifications to this software may be copyrighted by their authors +and need not follow the licensing terms described here, provided that +the new terms are clearly indicated on the first page of each file where +they apply. + +IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY +FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES +ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY +DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, +INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE +IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE +NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR +MODIFICATIONS. + +GOVERNMENT USE: If you are acquiring this software on behalf of the +U.S. government, the Government shall have only "Restricted Rights" +in the software and related documentation as defined in the Federal +Acquisition Regulations (FARs) in Clause 52.227.19 (c) (2). If you +are acquiring the software on behalf of the Department of Defense, the +software shall be classified as "Commercial Computer Software" and the +Government shall have only "Restricted Rights" as defined in Clause +252.227-7013 (c) (1) of DFARs. Notwithstanding the foregoing, the +authors grant the U.S. Government and others acting in its behalf +permission to use and distribute the software in accordance with the +terms specified in this license. diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.8.2" -__version_info__ = (0, 8, 2) +__version__ = "0.8.6" +__version_info__ = (0, 8, 6) diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -55,8 +55,7 @@ # _cffi_backend.so compiled. import _cffi_backend as backend from . import __version__ - assert (backend.__version__ == __version__ or - backend.__version__ == __version__[:3]) + assert backend.__version__ == __version__ # (If you insist you can also try to pass the option # 'backend=backend_ctypes.CTypesBackend()', but don't # rely on it! It's probably not going to work well.) @@ -443,6 +442,10 @@ for enumname, enumval in zip(tp.enumerators, tp.enumvalues): if enumname not in library.__dict__: library.__dict__[enumname] = enumval + for key, val in ffi._parser._int_constants.items(): + if key not in library.__dict__: + library.__dict__[key] = val + copied_enums.append(True) if name in library.__dict__: return diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -24,6 +24,7 @@ _r_partial_array = re.compile(r"\[\s*\.\.\.\s*\]") _r_words = re.compile(r"\w+|\S") _parser_cache = None +_r_int_literal = re.compile(r"^0?x?[0-9a-f]+u?l?$", re.IGNORECASE) def _get_parser(): global _parser_cache @@ -99,6 +100,7 @@ self._structnode2type = weakref.WeakKeyDictionary() self._override = False self._packed = False + self._int_constants = {} def _parse(self, csource): csource, macros = _preprocess(csource) @@ -128,9 +130,10 @@ finally: if lock is not None: lock.release() - return ast, macros + # csource will be used to find buggy source text + return ast, macros, csource - def convert_pycparser_error(self, e, csource): + def _convert_pycparser_error(self, e, csource): # xxx look for ":NUM:" at the start of str(e) and try to interpret # it as a line number line = None @@ -142,6 +145,12 @@ csourcelines = csource.splitlines() if 1 <= linenum <= len(csourcelines): line = csourcelines[linenum-1] + return line + + def convert_pycparser_error(self, e, csource): + line = self._convert_pycparser_error(e, csource) + + msg = str(e) if line: msg = 'cannot parse "%s"\n%s' % (line.strip(), msg) else: @@ -160,14 +169,9 @@ self._packed = prev_packed def _internal_parse(self, csource): - ast, macros = self._parse(csource) + ast, macros, csource = self._parse(csource) # add the macros - for key, value in macros.items(): - value = value.strip() - if value != '...': - raise api.CDefError('only supports the syntax "#define ' - '%s ..." for now (literally)' % key) - self._declare('macro ' + key, value) + self._process_macros(macros) # find the first "__dotdotdot__" and use that as a separator # between the repeated typedefs and the real csource iterator = iter(ast.ext) @@ -175,27 +179,61 @@ if decl.name == '__dotdotdot__': break # - for decl in iterator: - if isinstance(decl, pycparser.c_ast.Decl): - self._parse_decl(decl) - elif isinstance(decl, pycparser.c_ast.Typedef): - if not decl.name: - raise api.CDefError("typedef does not declare any name", - decl) - if (isinstance(decl.type.type, pycparser.c_ast.IdentifierType) - and decl.type.type.names == ['__dotdotdot__']): - realtype = model.unknown_type(decl.name) - elif (isinstance(decl.type, pycparser.c_ast.PtrDecl) and - isinstance(decl.type.type, pycparser.c_ast.TypeDecl) and - isinstance(decl.type.type.type, - pycparser.c_ast.IdentifierType) and - decl.type.type.type.names == ['__dotdotdot__']): - realtype = model.unknown_ptr_type(decl.name) + try: + for decl in iterator: + if isinstance(decl, pycparser.c_ast.Decl): + self._parse_decl(decl) + elif isinstance(decl, pycparser.c_ast.Typedef): + if not decl.name: + raise api.CDefError("typedef does not declare any name", + decl) + if (isinstance(decl.type.type, pycparser.c_ast.IdentifierType) + and decl.type.type.names == ['__dotdotdot__']): + realtype = model.unknown_type(decl.name) + elif (isinstance(decl.type, pycparser.c_ast.PtrDecl) and + isinstance(decl.type.type, pycparser.c_ast.TypeDecl) and + isinstance(decl.type.type.type, + pycparser.c_ast.IdentifierType) and + decl.type.type.type.names == ['__dotdotdot__']): + realtype = model.unknown_ptr_type(decl.name) + else: + realtype = self._get_type(decl.type, name=decl.name) + self._declare('typedef ' + decl.name, realtype) else: - realtype = self._get_type(decl.type, name=decl.name) - self._declare('typedef ' + decl.name, realtype) + raise api.CDefError("unrecognized construct", decl) + except api.FFIError as e: + msg = self._convert_pycparser_error(e, csource) + if msg: + e.args = (e.args[0] + "\n *** Err: %s" % msg,) + raise + + def _add_constants(self, key, val): + if key in self._int_constants: + raise api.FFIError( + "multiple declarations of constant: %s" % (key,)) + self._int_constants[key] = val + + def _process_macros(self, macros): + for key, value in macros.items(): + value = value.strip() + match = _r_int_literal.search(value) + if match is not None: + int_str = match.group(0).lower().rstrip("ul") + + # "010" is not valid oct in py3 + if (int_str.startswith("0") and + int_str != "0" and + not int_str.startswith("0x")): + int_str = "0o" + int_str[1:] + + pyvalue = int(int_str, 0) + self._add_constants(key, pyvalue) + elif value == '...': + self._declare('macro ' + key, value) else: - raise api.CDefError("unrecognized construct", decl) + raise api.CDefError('only supports the syntax "#define ' + '%s ..." (literally) or "#define ' + '%s 0x1FF" for now' % (key, key)) def _parse_decl(self, decl): node = decl.type @@ -227,7 +265,7 @@ self._declare('variable ' + decl.name, tp) def parse_type(self, cdecl): - ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl) + ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl)[:2] assert not macros exprnode = ast.ext[-1].type.args.params[0] if isinstance(exprnode, pycparser.c_ast.ID): @@ -306,7 +344,8 @@ if ident == 'void': return model.void_type if ident == '__dotdotdot__': - raise api.FFIError('bad usage of "..."') + raise api.FFIError(':%d: bad usage of "..."' % + typenode.coord.line) return resolve_common_type(ident) # if isinstance(type, pycparser.c_ast.Struct): @@ -333,7 +372,8 @@ return self._get_struct_union_enum_type('union', typenode, name, nested=True) # - raise api.FFIError("bad or unsupported type declaration") + raise api.FFIError(":%d: bad or unsupported type declaration" % + typenode.coord.line) def _parse_function_type(self, typenode, funcname=None): params = list(getattr(typenode.args, 'params', [])) @@ -499,6 +539,10 @@ if (isinstance(exprnode, pycparser.c_ast.UnaryOp) and exprnode.op == '-'): return -self._parse_constant(exprnode.expr) + # load previously defined int constant + if (isinstance(exprnode, pycparser.c_ast.ID) and + exprnode.name in self._int_constants): + return self._int_constants[exprnode.name] # if partial_length_ok: if (isinstance(exprnode, pycparser.c_ast.ID) and @@ -506,8 +550,8 @@ self._partial_length = True return '...' # - raise api.FFIError("unsupported expression: expected a " - "simple numeric constant") + raise api.FFIError(":%d: unsupported expression: expected a " + "simple numeric constant" % exprnode.coord.line) def _build_enum_type(self, explicit_name, decls): if decls is not None: @@ -522,6 +566,7 @@ if enum.value is not None: nextenumvalue = self._parse_constant(enum.value) enumvalues.append(nextenumvalue) + self._add_constants(enum.name, nextenumvalue) nextenumvalue += 1 enumvalues = tuple(enumvalues) tp = model.EnumType(explicit_name, enumerators, enumvalues) @@ -535,3 +580,5 @@ kind = name.split(' ', 1)[0] if kind in ('typedef', 'struct', 'union', 'enum'): self._declare(name, tp) + for k, v in other._int_constants.items(): + self._add_constants(k, v) diff --git a/lib_pypy/cffi/ffiplatform.py b/lib_pypy/cffi/ffiplatform.py --- a/lib_pypy/cffi/ffiplatform.py +++ b/lib_pypy/cffi/ffiplatform.py @@ -38,6 +38,7 @@ import distutils.errors # dist = Distribution({'ext_modules': [ext]}) + dist.parse_config_files() options = dist.get_option_dict('build_ext') options['force'] = ('ffiplatform', True) options['build_lib'] = ('ffiplatform', tmpdir) diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -89,43 +89,54 @@ # by generate_cpy_function_method(). prnt('static PyMethodDef _cffi_methods[] = {') self._generate("method") - prnt(' {"_cffi_setup", _cffi_setup, METH_VARARGS},') - prnt(' {NULL, NULL} /* Sentinel */') + prnt(' {"_cffi_setup", _cffi_setup, METH_VARARGS, NULL},') + prnt(' {NULL, NULL, 0, NULL} /* Sentinel */') prnt('};') prnt() # # standard init. modname = self.verifier.get_module_name() - if sys.version_info >= (3,): - prnt('static struct PyModuleDef _cffi_module_def = {') - prnt(' PyModuleDef_HEAD_INIT,') - prnt(' "%s",' % modname) - prnt(' NULL,') - prnt(' -1,') - prnt(' _cffi_methods,') - prnt(' NULL, NULL, NULL, NULL') - prnt('};') - prnt() - initname = 'PyInit_%s' % modname - createmod = 'PyModule_Create(&_cffi_module_def)' - errorcase = 'return NULL' - finalreturn = 'return lib' - else: - initname = 'init%s' % modname - createmod = 'Py_InitModule("%s", _cffi_methods)' % modname - errorcase = 'return' - finalreturn = 'return' + constants = self._chained_list_constants[False] + prnt('#if PY_MAJOR_VERSION >= 3') + prnt() + prnt('static struct PyModuleDef _cffi_module_def = {') + prnt(' PyModuleDef_HEAD_INIT,') + prnt(' "%s",' % modname) + prnt(' NULL,') + prnt(' -1,') + prnt(' _cffi_methods,') + prnt(' NULL, NULL, NULL, NULL') + prnt('};') + prnt() prnt('PyMODINIT_FUNC') - prnt('%s(void)' % initname) + prnt('PyInit_%s(void)' % modname) prnt('{') prnt(' PyObject *lib;') - prnt(' lib = %s;' % createmod) - prnt(' if (lib == NULL || %s < 0)' % ( - self._chained_list_constants[False],)) - prnt(' %s;' % errorcase) - prnt(' _cffi_init();') - prnt(' %s;' % finalreturn) + prnt(' lib = PyModule_Create(&_cffi_module_def);') + prnt(' if (lib == NULL)') + prnt(' return NULL;') + prnt(' if (%s < 0 || _cffi_init() < 0) {' % (constants,)) + prnt(' Py_DECREF(lib);') + prnt(' return NULL;') + prnt(' }') + prnt(' return lib;') prnt('}') + prnt() + prnt('#else') + prnt() + prnt('PyMODINIT_FUNC') + prnt('init%s(void)' % modname) + prnt('{') + prnt(' PyObject *lib;') + prnt(' lib = Py_InitModule("%s", _cffi_methods);' % modname) + prnt(' if (lib == NULL)') + prnt(' return;') + prnt(' if (%s < 0 || _cffi_init() < 0)' % (constants,)) + prnt(' return;') + prnt(' return;') + prnt('}') + prnt() + prnt('#endif') def load_library(self): # XXX review all usages of 'self' here! @@ -394,7 +405,7 @@ meth = 'METH_O' else: meth = 'METH_VARARGS' - self._prnt(' {"%s", _cffi_f_%s, %s},' % (name, name, meth)) + self._prnt(' {"%s", _cffi_f_%s, %s, NULL},' % (name, name, meth)) _loading_cpy_function = _loaded_noop @@ -481,8 +492,8 @@ if tp.fldnames is None: return # nothing to do with opaque structs layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) - self._prnt(' {"%s", %s, METH_NOARGS},' % (layoutfuncname, - layoutfuncname)) + self._prnt(' {"%s", %s, METH_NOARGS, NULL},' % (layoutfuncname, + layoutfuncname)) def _loading_struct_or_union(self, tp, prefix, name, module): if tp.fldnames is None: @@ -589,13 +600,7 @@ 'variable type'),)) assert delayed else: - prnt(' if (LONG_MIN <= (%s) && (%s) <= LONG_MAX)' % (name, name)) - prnt(' o = PyInt_FromLong((long)(%s));' % (name,)) - prnt(' else if ((%s) <= 0)' % (name,)) - prnt(' o = PyLong_FromLongLong((long long)(%s));' % (name,)) - prnt(' else') - prnt(' o = PyLong_FromUnsignedLongLong(' - '(unsigned long long)(%s));' % (name,)) + prnt(' o = _cffi_from_c_int_const(%s);' % name) prnt(' if (o == NULL)') prnt(' return -1;') if size_too: @@ -632,13 +637,18 @@ # ---------- # enums + def _enum_funcname(self, prefix, name): + # "$enum_$1" => "___D_enum____D_1" + name = name.replace('$', '___D_') + return '_cffi_e_%s_%s' % (prefix, name) + def _generate_cpy_enum_decl(self, tp, name, prefix='enum'): if tp.partial: for enumerator in tp.enumerators: self._generate_cpy_const(True, enumerator, delayed=False) return # - funcname = '_cffi_e_%s_%s' % (prefix, name) + funcname = self._enum_funcname(prefix, name) prnt = self._prnt prnt('static int %s(PyObject *lib)' % funcname) prnt('{') @@ -760,17 +770,30 @@ #include #include -#ifdef MS_WIN32 -#include /* for alloca() */ -typedef __int8 int8_t; -typedef __int16 int16_t; -typedef __int32 int32_t; -typedef __int64 int64_t; -typedef unsigned __int8 uint8_t; -typedef unsigned __int16 uint16_t; -typedef unsigned __int32 uint32_t; -typedef unsigned __int64 uint64_t; -typedef unsigned char _Bool; +/* this block of #ifs should be kept exactly identical between + c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py */ +#if defined(_MSC_VER) +# include /* for alloca() */ +# if _MSC_VER < 1600 /* MSVC < 2010 */ + typedef __int8 int8_t; + typedef __int16 int16_t; + typedef __int32 int32_t; + typedef __int64 int64_t; + typedef unsigned __int8 uint8_t; + typedef unsigned __int16 uint16_t; + typedef unsigned __int32 uint32_t; + typedef unsigned __int64 uint64_t; +# else +# include +# endif +# if _MSC_VER < 1800 /* MSVC < 2013 */ + typedef unsigned char _Bool; +# endif +#else +# include +# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) +# include +# endif #endif #if PY_MAJOR_VERSION < 3 @@ -795,6 +818,15 @@ #define _cffi_to_c_double PyFloat_AsDouble #define _cffi_to_c_float PyFloat_AsDouble +#define _cffi_from_c_int_const(x) \ + (((x) > 0) ? \ + ((unsigned long long)(x) <= (unsigned long long)LONG_MAX) ? \ + PyInt_FromLong((long)(x)) : \ + PyLong_FromUnsignedLongLong((unsigned long long)(x)) : \ + ((long long)(x) >= (long long)LONG_MIN) ? \ + PyInt_FromLong((long)(x)) : \ + PyLong_FromLongLong((long long)(x))) + #define _cffi_from_c_int(x, type) \ (((type)-1) > 0 ? /* unsigned */ \ (sizeof(type) < sizeof(long) ? PyInt_FromLong(x) : \ @@ -804,14 +836,14 @@ PyLong_FromLongLong(x))) #define _cffi_to_c_int(o, type) \ - (sizeof(type) == 1 ? (((type)-1) > 0 ? _cffi_to_c_u8(o) \ - : _cffi_to_c_i8(o)) : \ - sizeof(type) == 2 ? (((type)-1) > 0 ? _cffi_to_c_u16(o) \ - : _cffi_to_c_i16(o)) : \ - sizeof(type) == 4 ? (((type)-1) > 0 ? _cffi_to_c_u32(o) \ - : _cffi_to_c_i32(o)) : \ - sizeof(type) == 8 ? (((type)-1) > 0 ? _cffi_to_c_u64(o) \ - : _cffi_to_c_i64(o)) : \ + (sizeof(type) == 1 ? (((type)-1) > 0 ? (type)_cffi_to_c_u8(o) \ + : (type)_cffi_to_c_i8(o)) : \ + sizeof(type) == 2 ? (((type)-1) > 0 ? (type)_cffi_to_c_u16(o) \ + : (type)_cffi_to_c_i16(o)) : \ + sizeof(type) == 4 ? (((type)-1) > 0 ? (type)_cffi_to_c_u32(o) \ + : (type)_cffi_to_c_i32(o)) : \ + sizeof(type) == 8 ? (((type)-1) > 0 ? (type)_cffi_to_c_u64(o) \ + : (type)_cffi_to_c_i64(o)) : \ (Py_FatalError("unsupported size for type " #type), 0)) #define _cffi_to_c_i8 \ @@ -885,25 +917,32 @@ return PyBool_FromLong(was_alive); } -static void _cffi_init(void) +static int _cffi_init(void) { - PyObject *module = PyImport_ImportModule("_cffi_backend"); - PyObject *c_api_object; + PyObject *module, *c_api_object = NULL; + module = PyImport_ImportModule("_cffi_backend"); if (module == NULL) - return; + goto failure; c_api_object = PyObject_GetAttrString(module, "_C_API"); if (c_api_object == NULL) - return; + goto failure; if (!PyCapsule_CheckExact(c_api_object)) { - Py_DECREF(c_api_object); PyErr_SetNone(PyExc_ImportError); - return; + goto failure; } memcpy(_cffi_exports, PyCapsule_GetPointer(c_api_object, "cffi"), _CFFI_NUM_EXPORTS * sizeof(void *)); + + Py_DECREF(module); Py_DECREF(c_api_object); + return 0; + + failure: + Py_XDECREF(module); + Py_XDECREF(c_api_object); + return -1; } #define _cffi_type(num) ((CTypeDescrObject *)PyList_GET_ITEM(_cffi_types, num)) diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -249,10 +249,10 @@ prnt(' /* %s */' % str(e)) # cannot verify it, ignore prnt('}') self.export_symbols.append(layoutfuncname) - prnt('ssize_t %s(ssize_t i)' % (layoutfuncname,)) + prnt('intptr_t %s(intptr_t i)' % (layoutfuncname,)) prnt('{') prnt(' struct _cffi_aligncheck { char x; %s y; };' % cname) - prnt(' static ssize_t nums[] = {') + prnt(' static intptr_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') for fname, ftype, fbitsize in tp.enumfields(): @@ -276,7 +276,7 @@ return # nothing to do with opaque structs layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) # - BFunc = self.ffi._typeof_locked("ssize_t(*)(ssize_t)")[0] + BFunc = self.ffi._typeof_locked("intptr_t(*)(intptr_t)")[0] function = module.load_function(BFunc, layoutfuncname) layout = [] num = 0 @@ -410,13 +410,18 @@ # ---------- # enums + def _enum_funcname(self, prefix, name): + # "$enum_$1" => "___D_enum____D_1" + name = name.replace('$', '___D_') + return '_cffi_e_%s_%s' % (prefix, name) + def _generate_gen_enum_decl(self, tp, name, prefix='enum'): if tp.partial: for enumerator in tp.enumerators: self._generate_gen_const(True, enumerator) return # - funcname = '_cffi_e_%s_%s' % (prefix, name) + funcname = self._enum_funcname(prefix, name) self.export_symbols.append(funcname) prnt = self._prnt prnt('int %s(char *out_error)' % funcname) @@ -430,14 +435,14 @@ enumerator, enumerator, enumvalue)) prnt(' char buf[64];') prnt(' if ((%s) < 0)' % enumerator) - prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % enumerator) + prnt(' sprintf(buf, "%%ld", (long)(%s));' % enumerator) prnt(' else') - prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' % + prnt(' sprintf(buf, "%%lu", (unsigned long)(%s));' % enumerator) - prnt(' snprintf(out_error, 255,' + prnt(' sprintf(out_error,' ' "%s has the real value %s, not %s",') prnt(' "%s", buf, "%d");' % ( - enumerator, enumvalue)) + enumerator[:100], enumvalue)) prnt(' return -1;') prnt(' }') prnt(' return 0;') @@ -453,7 +458,7 @@ else: BType = self.ffi._typeof_locked("char[]")[0] BFunc = self.ffi._typeof_locked("int(*)(char*)")[0] - funcname = '_cffi_e_%s_%s' % (prefix, name) + funcname = self._enum_funcname(prefix, name) function = module.load_function(BFunc, funcname) p = self.ffi.new(BType, 256) if function(p) < 0: @@ -547,20 +552,29 @@ #include #include /* XXX for ssize_t on some platforms */ -#ifdef _WIN32 -# include -# define snprintf _snprintf -typedef __int8 int8_t; -typedef __int16 int16_t; -typedef __int32 int32_t; -typedef __int64 int64_t; -typedef unsigned __int8 uint8_t; -typedef unsigned __int16 uint16_t; -typedef unsigned __int32 uint32_t; -typedef unsigned __int64 uint64_t; -typedef SSIZE_T ssize_t; -typedef unsigned char _Bool; +/* this block of #ifs should be kept exactly identical between + c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py */ +#if defined(_MSC_VER) +# include /* for alloca() */ +# if _MSC_VER < 1600 /* MSVC < 2010 */ + typedef __int8 int8_t; + typedef __int16 int16_t; + typedef __int32 int32_t; + typedef __int64 int64_t; + typedef unsigned __int8 uint8_t; + typedef unsigned __int16 uint16_t; + typedef unsigned __int32 uint32_t; + typedef unsigned __int64 uint64_t; +# else +# include +# endif +# if _MSC_VER < 1800 /* MSVC < 2013 */ + typedef unsigned char _Bool; +# endif #else -# include +# include +# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) +# include +# endif #endif ''' diff --git a/lib_pypy/gdbm.py b/lib_pypy/gdbm.py --- a/lib_pypy/gdbm.py +++ b/lib_pypy/gdbm.py @@ -50,6 +50,8 @@ pass def _fromstr(key): + if isinstance(key, unicode): + key = key.encode("ascii") if not isinstance(key, str): raise TypeError("gdbm mappings have string indices only") return {'dptr': ffi.new("char[]", key), 'dsize': len(key)} @@ -71,8 +73,8 @@ def _raise_from_errno(self): if ffi.errno: - raise error(os.strerror(ffi.errno)) - raise error(lib.gdbm_strerror(lib.gdbm_errno)) + raise error(ffi.errno, os.strerror(ffi.errno)) + raise error(lib.gdbm_errno, lib.gdbm_strerror(lib.gdbm_errno)) def __len__(self): if self.size < 0: @@ -141,7 +143,7 @@ def _check_closed(self): if not self.ll_dbm: - raise error("GDBM object has already been closed") + raise error(0, "GDBM object has already been closed") __del__ = close @@ -159,7 +161,7 @@ elif flags[0] == 'n': iflags = lib.GDBM_NEWDB else: - raise error("First flag must be one of 'r', 'w', 'c' or 'n'") + raise error(0, "First flag must be one of 'r', 'w', 'c' or 'n'") for flag in flags[1:]: if flag == 'f': iflags |= lib.GDBM_FAST @@ -168,7 +170,7 @@ elif flag == 'u': iflags |= lib.GDBM_NOLOCK else: - raise error("Flag '%s' not supported" % flag) + raise error(0, "Flag '%s' not supported" % flag) return gdbm(filename, iflags, mode) open_flags = "rwcnfsu" diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -113,7 +113,7 @@ try: for name in modlist: __import__(name) - except (ImportError, CompilationError, py.test.skip.Exception), e: + except (ImportError, CompilationError, py.test.skip.Exception) as e: errcls = e.__class__.__name__ raise Exception( "The module %r is disabled\n" % (modname,) + diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -101,7 +101,7 @@ while True: try: w_key = space.next(w_iter) - except OperationError, e: + except OperationError as e: if not e.match(space, space.w_StopIteration): raise # re-raise other app-level exceptions break @@ -271,7 +271,7 @@ try: ... - except OperationError, e: + except OperationError as e: if not e.match(space, space.w_XxxError): raise ... diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -25,13 +25,13 @@ on_rtd = os.environ.get('READTHEDOCS', None) == 'True' if not on_rtd: # only import and set the theme if we're building docs locally - try: - import sphinx_rtd_theme - html_theme = 'sphinx_rtd_theme' - html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] - except ImportError: - print('sphinx_rtd_theme is not installed') - html_theme = 'default' + try: + import sphinx_rtd_theme + html_theme = 'sphinx_rtd_theme' + html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] + except ImportError: + print('sphinx_rtd_theme is not installed') + html_theme = 'default' # otherwise, readthedocs.org uses their theme by default, so no need to specify it diff --git a/pypy/doc/config/translation.log.txt b/pypy/doc/config/translation.log.txt --- a/pypy/doc/config/translation.log.txt +++ b/pypy/doc/config/translation.log.txt @@ -2,4 +2,4 @@ These must be enabled by setting the PYPYLOG environment variable. The exact set of features supported by PYPYLOG is described in -pypy/translation/c/src/debug_print.h. +rpython/translator/c/src/debug_print.h. diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -98,6 +98,7 @@ Stian Andreassen Laurence Tratt Wanja Saatkamp + Ivan Sichmann Freitas Gerald Klix Mike Blume Oscar Nierstrasz @@ -182,7 +183,9 @@ Alejandro J. Cura Jacob Oscarson Travis Francis Athougies + Ryan Gonzalez Kristjan Valur Jonsson + Sebastian Pawluś Neil Blakey-Milner anatoly techtonik Lutz Paelike @@ -215,6 +218,7 @@ Michael Hudson-Doyle Anders Sigfridsson Yasir Suhail + rafalgalczynski at gmail.com Floris Bruynooghe Laurens Van Houtven Akira Li @@ -244,6 +248,8 @@ Zooko Wilcox-O Hearn Tomer Chachamu Christopher Groskopf + Asmo Soinio + Stefan Marr jiaaro opassembler.py Antony Lee diff --git a/pypy/doc/ctypes-implementation.rst b/pypy/doc/ctypes-implementation.rst --- a/pypy/doc/ctypes-implementation.rst +++ b/pypy/doc/ctypes-implementation.rst @@ -74,13 +74,11 @@ Here is a list of the limitations and missing features of the current implementation: -* ``ctypes.pythonapi`` lets you access the CPython C API emulation layer - of PyPy, at your own risks and without doing anything sensible about - the GIL. Since PyPy 2.3, these functions are also named with an extra - "Py", for example ``PyPyInt_FromLong()``. Basically, don't use this, - but it might more or less work in simple cases if you do. (Obviously, - assuming the PyObject pointers you get have any particular fields in - any particular order is just going to crash.) +* ``ctypes.pythonapi`` is missing. In previous versions, it was present + and redirected to the `cpyext` C API emulation layer, but our + implementation did not do anything sensible about the GIL and the + functions were named with an extra "Py", for example + ``PyPyInt_FromLong()``. It was removed for being unhelpful. * We copy Python strings instead of having pointers to raw buffers diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.rst +++ b/pypy/doc/extradoc.rst @@ -6,6 +6,9 @@ *Articles about PyPy published so far, most recent first:* (bibtex_ file) +* `A Way Forward in Parallelising Dynamic Languages`_, + R. Meier, A. Rigo + * `Runtime Feedback in a Meta-Tracing JIT for Efficient Dynamic Languages`_, C.F. Bolz, A. Cuni, M. Fijalkowski, M. Leuschel, S. Pedroni, A. Rigo @@ -69,6 +72,7 @@ .. _bibtex: https://bitbucket.org/pypy/extradoc/raw/tip/talk/bibtex.bib +.. _A Way Forward in Parallelising Dynamic Languages: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/icooolps2014/position-paper.pdf .. _Runtime Feedback in a Meta-Tracing JIT for Efficient Dynamic Languages: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/icooolps2011/jit-hints.pdf .. _Allocation Removal by Partial Evaluation in a Tracing JIT: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/pepm2011/bolz-allocation-removal.pdf .. _Towards a Jitting VM for Prolog Execution: http://www.stups.uni-duesseldorf.de/mediawiki/images/a/a7/Pub-BoLeSch2010.pdf @@ -91,6 +95,11 @@ Talks and Presentations ----------------------- +*This part is no longer updated.* The complete list is here__ (in +alphabetical order). + +.. __: https://bitbucket.org/pypy/extradoc/src/extradoc/talk/ + Talks in 2010 ~~~~~~~~~~~~~ diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -150,14 +150,18 @@ You might be interested in our `benchmarking site`_ and our :doc:`jit documentation `. -Note that the JIT has a very high warm-up cost, meaning that the -programs are slow at the beginning. If you want to compare the timings -with CPython, even relatively simple programs need to run *at least* one -second, preferrably at least a few seconds. Large, complicated programs -need even more time to warm-up the JIT. +`Your tests are not a benchmark`_: tests tend to be slow under PyPy +because they run exactly once; if they are good tests, they exercise +various corner cases in your code. This is a bad case for JIT +compilers. Note also that our JIT has a very high warm-up cost, meaning +that any program is slow at the beginning. If you want to compare the +timings with CPython, even relatively simple programs need to run *at +least* one second, preferrably at least a few seconds. Large, +complicated programs need even more time to warm-up the JIT. .. _benchmarking site: http://speed.pypy.org +.. _your tests are not a benchmark: http://alexgaynor.net/2013/jul/15/your-tests-are-not-benchmark/ Couldn't the JIT dump and reload already-compiled machine code? --------------------------------------------------------------- diff --git a/pypy/doc/jit-hooks.rst b/pypy/doc/jit-hooks.rst --- a/pypy/doc/jit-hooks.rst +++ b/pypy/doc/jit-hooks.rst @@ -34,7 +34,7 @@ aborted due to some reason. The hook will be invoked with the siagnture: - ``hook(jitdriver_name, greenkey, reason)`` + ``hook(jitdriver_name, greenkey, reason, oplist)`` Reason is a string, the meaning of other arguments is the same as attributes on JitLoopInfo object diff --git a/pypy/doc/man/pypy.1.rst b/pypy/doc/man/pypy.1.rst --- a/pypy/doc/man/pypy.1.rst +++ b/pypy/doc/man/pypy.1.rst @@ -95,13 +95,12 @@ ``PYPYLOG`` If set to a non-empty value, enable logging, the format is: - *fname* + *fname* or *+fname* logging for profiling: includes all ``debug_start``/``debug_stop`` but not any nested ``debug_print``. *fname* can be ``-`` to log to *stderr*. - Note that using a : in fname is a bad idea, Windows - users, beware. + The *+fname* form can be used if there is a *:* in fname ``:``\ *fname* Full logging, including ``debug_print``. diff --git a/pypy/doc/objspace-proxies.rst b/pypy/doc/objspace-proxies.rst --- a/pypy/doc/objspace-proxies.rst +++ b/pypy/doc/objspace-proxies.rst @@ -24,6 +24,16 @@ Transparent Proxies ------------------- +.. warning:: + + This is a feature that was tried experimentally long ago, and we + found no really good use cases. The basic functionality is still + there, but we don't recommend using it. Some of the examples below + might not work any more (e.g. you can't tproxy a list object any + more). The rest can be done by hacking in standard Python. If + anyone is interested in working on tproxy again, he is welcome, but + we don't regard this as an interesting extension. + PyPy's Transparent Proxies allow routing of operations on objects to a callable. Application-level code can customize objects without interfering with the type system - ``type(proxied_list) is list`` holds true diff --git a/pypy/doc/release-2.3.1.rst b/pypy/doc/release-2.3.1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.3.1.rst @@ -0,0 +1,81 @@ +================================================= +PyPy 2.3.1 - Terrestrial Arthropod Trap Revisited +================================================= + +We're pleased to announce PyPy 2.3.1, a feature-and-bugfix improvement over our +recent release last month. + +This release contains several bugfixes and enhancements. + +You can download the PyPy 2.3.1 release here: + + http://pypy.org/download.html + +We would like to thank our donors for the continued support of the PyPy +project, and for those who donate to our three sub-projects. +We've shown quite a bit of progress +but we're slowly running out of funds. +Please consider donating more, or even better convince your employer to donate, +so we can finish those projects! The three sub-projects are: + +* `Py3k`_ (supporting Python 3.x): the release PyPy3 2.3 is imminent. + +* `STM`_ (software transactional memory): a preview will be released very soon, + once we fix a few bugs + +* `NumPy`_ which requires installation of our fork of upstream numpy, available `on bitbucket`_ + +.. _`Py3k`: http://pypy.org/py3donate.html +.. _`STM`: http://pypy.org/tmdonate2.html +.. _`NumPy`: http://pypy.org/numpydonate.html +.. _`on bitbucket`: https://www.bitbucket.org/pypy/numpy + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy 2.3 and cpython 2.7.x`_ performance comparison; +note that cpython's speed has not changed since 2.7.2) +due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows, +and OpenBSD, +as well as newer ARM hardware (ARMv6 or ARMv7, with VFPv3) running Linux. + +While we support 32 bit python on Windows, work on the native Windows 64 +bit python is still stalling, we would welcome a volunteer +to `handle that`_. + +.. _`pypy 2.3 and cpython 2.7.x`: http://speed.pypy.org +.. _`handle that`: http://doc.pypy.org/en/latest/windows.html#what-is-missing-for-a-full-64-bit-translation + +Highlights +========== + +Issues with the 2.3 release were resolved after being reported by users to +our new issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at +#pypy. Here is a summary of the user-facing changes; +for more information see `whats-new`_: + +* The built-in ``struct`` module was renamed to ``_struct``, solving issues + with IDLE and other modules. + +* Support for compilation with gcc-4.9 + +* A rewrite of packaging.py which produces our downloadable packages to + modernize command line argument handling and to document third-party + contributions in our LICENSE file + +* A CFFI-based version of the gdbm module is now included in our downloads + +* Many issues were resolved_ since the 2.3 release on May 8 + +.. _`whats-new`: http://doc.pypy.org/en/latest/whatsnew-2.3.1.html +.. _resolved: https://bitbucket.org/pypy/pypy/issues?status=resolved +Please try it out and let us know what you think. We especially welcome +success stories, we know you are using PyPy, please tell us about it! + +Cheers + +The PyPy Team + diff --git a/pypy/doc/release-pypy3-2.3.1.rst b/pypy/doc/release-pypy3-2.3.1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-pypy3-2.3.1.rst @@ -0,0 +1,69 @@ +===================== +PyPy3 2.3.1 - Fulcrum +===================== + +We're pleased to announce the first stable release of PyPy3. PyPy3 +targets Python 3 (3.2.5) compatibility. + +We would like to thank all of the people who donated_ to the `py3k proposal`_ +for supporting the work that went into this. + +You can download the PyPy3 2.3.1 release here: + + http://pypy.org/download.html#pypy3-2-3-1 + +Highlights +========== + +* The first stable release of PyPy3: support for Python 3! + +* The stdlib has been updated to Python 3.2.5 + +* Additional support for the u'unicode' syntax (`PEP 414`_) from Python 3.3 + +* Updates from the default branch, such as incremental GC and various JIT + improvements + +* Resolved some notable JIT performance regressions from PyPy2: + + - Re-enabled the previously disabled collection (list/dict/set) strategies + + - Resolved performance of iteration over range objects + + - Resolved handling of Python 3's exception __context__ unnecessarily forcing + frame object overhead + +.. _`PEP 414`: http://legacy.python.org/dev/peps/pep-0414/ + +What is PyPy? +============== + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7.6 or 3.2.5. It's fast due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows, +and OpenBSD, +as well as newer ARM hardware (ARMv6 or ARMv7, with VFPv3) running Linux. + +While we support 32 bit python on Windows, work on the native Windows 64 +bit python is still stalling, we would welcome a volunteer +to `handle that`_. + +.. _`handle that`: http://doc.pypy.org/en/latest/windows.html#what-is-missing-for-a-full-64-bit-translation + +How to use PyPy? +================= + +We suggest using PyPy from a `virtualenv`_. Once you have a virtualenv +installed, you can follow instructions from `pypy documentation`_ on how +to proceed. This document also covers other `installation schemes`_. + +.. _donated: http://morepypy.blogspot.com/2012/01/py3k-and-numpy-first-stage-thanks-to.html +.. _`py3k proposal`: http://pypy.org/py3donate.html +.. _`pypy documentation`: http://doc.pypy.org/en/latest/getting-started.html#installing-using-virtualenv +.. _`virtualenv`: http://www.virtualenv.org/en/latest/ +.. _`installation schemes`: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy + + +Cheers, +the PyPy team diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -28,7 +28,8 @@ Introduction ============ -``pypy-stm`` is a variant of the regular PyPy interpreter. With caveats_ +``pypy-stm`` is a variant of the regular PyPy interpreter. (This +version supports Python 2.7; see below for `Python 3`_.) With caveats_ listed below, it should be in theory within 20%-50% slower than a regular PyPy, comparing the JIT version in both cases (but see below!). It is called @@ -92,9 +93,9 @@ We're busy fixing them as we find them; feel free to `report bugs`_. * It runs with an overhead as low as 20% on examples like "richards". - There are also other examples with higher overheads --up to 10x for - "translate.py"-- which we are still trying to understand. One suspect - is our partial GC implementation, see below. + There are also other examples with higher overheads --currently up to + 2x for "translate.py"-- which we are still trying to understand. + One suspect is our partial GC implementation, see below. * Currently limited to 1.5 GB of RAM (this is just a parameter in `core.h`__). Memory overflows are not correctly handled; they cause @@ -111,9 +112,8 @@ * The GC is new; although clearly inspired by PyPy's regular GC, it misses a number of optimizations for now. Programs allocating large - numbers of small objects that don't immediately die, as well as - programs that modify large lists or dicts, suffer from these missing - optimizations. + numbers of small objects that don't immediately die (surely a common + situation) suffer from these missing optimizations. * The GC has no support for destructors: the ``__del__`` method is never called (including on file objects, which won't be closed for you). @@ -138,6 +138,25 @@ +Python 3 +======== + +In this document I describe "pypy-stm", which is based on PyPy's Python +2.7 interpreter. Supporting Python 3 should take about half an +afternoon of work. Obviously, what I *don't* mean is that by tomorrow +you can have a finished and polished "pypy3-stm" product. General py3k +work is still missing; and general stm work is also still missing. But +they are rather independent from each other, as usual in PyPy. The +required afternoon of work will certainly be done one of these days now +that the internal interfaces seem to stabilize. + +The same is true for other languages implemented in the RPython +framework, although the amount of work to put there might vary, because +the STM framework within RPython is currently targeting the PyPy +interpreter and other ones might have slightly different needs. + + + User Guide ========== @@ -490,8 +509,6 @@ The last two lines are special; they are an internal marker read by ``transactional_memory.print_abort_info()``. -These statistics are not printed out for the main thread, for now. - Reference to implementation details ----------------------------------- diff --git a/pypy/doc/whatsnew-2.3.1.rst b/pypy/doc/whatsnew-2.3.1.rst --- a/pypy/doc/whatsnew-2.3.1.rst +++ b/pypy/doc/whatsnew-2.3.1.rst @@ -9,5 +9,16 @@ Support compilation with gcc-4.9 -Fixes for issues #1769, #1764, #1762, #1752 +Added support for the stdlib gdbm module via cffi +Annotator cleanups + +.. branch: release-2.3.x + +.. branch: unify-call-ops + +.. branch packaging +Use argparse for packaging.py, and add third-party components to LICENSE file. +Also mention that gdbm is GPL. +Do not crash the packaging process on failure in CFFI or license-building, +rather complete the build step and return -1. diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,11 +3,50 @@ ======================= .. this is a revision shortly after release-2.3.x -.. startrev: b2cc67adbaad +.. startrev: ca9b7cf02cf4 -Added support for the stdlib gdbm module via cffi +.. branch: fix-bytearray-complexity +Bytearray operations no longer copy the bytearray unnecessarily -Annotator cleanups +Added support for ``__getitem__``, ``__setitem__``, ``__getslice__``, +``__setslice__``, and ``__len__`` to RPython -.. branch: release-2.3.x +.. branch: stringbuilder2-perf +Give the StringBuilder a more flexible internal structure, with a +chained list of strings instead of just one string. This make it +more efficient when building large strings, e.g. with cStringIO(). +Also, use systematically jit.conditional_call() instead of regular +branches. This lets the JIT make more linear code, at the cost of +forcing a bit more data (to be passed as arguments to +conditional_calls). I would expect the net result to be a slight +slow-down on some simple benchmarks and a speed-up on bigger +programs. + +.. branch: ec-threadlocal +Change the executioncontext's lookup to be done by reading a thread- +local variable (which is implemented in C using '__thread' if +possible, and pthread_getspecific() otherwise). On Linux x86 and +x86-64, the JIT backend has a special optimization that lets it emit +directly a single MOV from a %gs- or %fs-based address. It seems +actually to give a good boost in performance. + +.. branch: fast-gil +A faster way to handle the GIL, particularly in JIT code. The GIL is +now a composite of two concepts: a global number (it's just set from +1 to 0 and back around CALL_RELEASE_GIL), and a real mutex. If there +are threads waiting to acquire the GIL, one of them is actively +checking the global number every 0.1 ms to 1 ms. Overall, JIT loops +full of external function calls now run a bit faster (if no thread was +started yet), or a *lot* faster (if threads were started already). + +.. branch: jit-get-errno +Optimize the errno handling in the JIT, notably around external +function calls. Linux-only. + +.. branch: disable_pythonapi +Remove non-functioning ctypes.pyhonapi and ctypes.PyDLL, document this +incompatibility with cpython. Recast sys.dllhandle to an int. + +.. branch: scalar-operations +Fix performance regression on ufunc(, ) in numpy. diff --git a/pypy/doc/whatsnew-pypy3-2.3.1.rst b/pypy/doc/whatsnew-pypy3-2.3.1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/whatsnew-pypy3-2.3.1.rst @@ -0,0 +1,6 @@ +========================= +What's new in PyPy3 2.3.1 +========================= + +.. this is a revision shortly after pypy3-release-2.3.x +.. startrev: 0137d8e6657d diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -30,8 +30,6 @@ if w_dict is not None: # for tests w_entry_point = space.getitem(w_dict, space.wrap('entry_point')) w_run_toplevel = space.getitem(w_dict, space.wrap('run_toplevel')) - w_call_finish_gateway = space.wrap(gateway.interp2app(call_finish)) - w_call_startup_gateway = space.wrap(gateway.interp2app(call_startup)) withjit = space.config.objspace.usemodules.pypyjit def entry_point(argv): @@ -53,7 +51,7 @@ argv = argv[:1] + argv[3:] try: try: - space.call_function(w_run_toplevel, w_call_startup_gateway) + space.startup() w_executable = space.wrap(argv[0]) w_argv = space.newlist([space.wrap(s) for s in argv[1:]]) w_exitcode = space.call_function(w_entry_point, w_executable, w_argv) @@ -69,7 +67,7 @@ return 1 finally: try: - space.call_function(w_run_toplevel, w_call_finish_gateway) + space.finish() except OperationError, e: debug("OperationError:") debug(" operror-type: " + e.w_type.getname(space)) @@ -184,11 +182,6 @@ 'pypy_thread_attach': pypy_thread_attach, 'pypy_setup_home': pypy_setup_home} -def call_finish(space): - space.finish() - -def call_startup(space): - space.startup() # _____ Define and setup target ___ diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -395,6 +395,7 @@ def startup(self): # To be called before using the space + self.threadlocals.enter_thread(self) # Initialize already imported builtin modules from pypy.interpreter.module import Module @@ -639,30 +640,36 @@ """NOT_RPYTHON: Abstract method that should put some minimal content into the w_builtins.""" - @jit.loop_invariant def getexecutioncontext(self): "Return what we consider to be the active execution context." # Important: the annotator must not see a prebuilt ExecutionContext: # you should not see frames while you translate # so we make sure that the threadlocals never *have* an # ExecutionContext during translation. - if self.config.translating and not we_are_translated(): - assert self.threadlocals.getvalue() is None, ( - "threadlocals got an ExecutionContext during translation!") - try: - return self._ec_during_translation - except AttributeError: - ec = self.createexecutioncontext() - self._ec_during_translation = ec + if not we_are_translated(): + if self.config.translating: + assert self.threadlocals.get_ec() is None, ( + "threadlocals got an ExecutionContext during translation!") + try: + return self._ec_during_translation + except AttributeError: + ec = self.createexecutioncontext() + self._ec_during_translation = ec + return ec + else: + ec = self.threadlocals.get_ec() + if ec is None: + self.threadlocals.enter_thread(self) + ec = self.threadlocals.get_ec() return ec - # normal case follows. The 'thread' module installs a real - # thread-local object in self.threadlocals, so this builds - # and caches a new ec in each thread. - ec = self.threadlocals.getvalue() - if ec is None: - ec = self.createexecutioncontext() - self.threadlocals.setvalue(ec) - return ec + else: + # translated case follows. self.threadlocals is either from + # 'pypy.interpreter.miscutils' or 'pypy.module.thread.threadlocals'. + # the result is assumed to be non-null: enter_thread() was called + # by space.startup(). + ec = self.threadlocals.get_ec() + assert ec is not None + return ec def _freeze_(self): return True @@ -963,6 +970,13 @@ """ return self.unpackiterable(w_iterable, expected_length) + def listview_no_unpack(self, w_iterable): + """ Same as listview() if cheap. If 'w_iterable' is something like + a generator, for example, then return None instead. + May return None anyway. + """ + return None + def listview_bytes(self, w_list): """ Return a list of unwrapped strings out of a list of strings. If the argument is not a list or does not contain only strings, return None. @@ -1487,9 +1501,7 @@ return buf.as_str() def str_or_None_w(self, w_obj): - if self.is_w(w_obj, self.w_None): - return None - return self.str_w(w_obj) + return None if self.is_none(w_obj) else self.str_w(w_obj) def str_w(self, w_obj): return w_obj.str_w(self) diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -496,6 +496,13 @@ """ +class UserDelCallback(object): + def __init__(self, w_obj, callback, descrname): + self.w_obj = w_obj + self.callback = callback + self.descrname = descrname + self.next = None + class UserDelAction(AsyncAction): """An action that invokes all pending app-level __del__() method. This is done as an action instead of immediately when the @@ -506,12 +513,18 @@ def __init__(self, space): AsyncAction.__init__(self, space) - self.dying_objects = [] + self.dying_objects = None + self.dying_objects_last = None self.finalizers_lock_count = 0 self.enabled_at_app_level = True def register_callback(self, w_obj, callback, descrname): - self.dying_objects.append((w_obj, callback, descrname)) + cb = UserDelCallback(w_obj, callback, descrname) + if self.dying_objects_last is None: + self.dying_objects = cb + else: + self.dying_objects_last.next = cb + self.dying_objects_last = cb self.fire() def perform(self, executioncontext, frame): @@ -525,13 +538,33 @@ # avoid too deep recursions of the kind of __del__ being called # while in the middle of another __del__ call. pending = self.dying_objects - self.dying_objects = [] + self.dying_objects = None + self.dying_objects_last = None space = self.space - for i in range(len(pending)): - w_obj, callback, descrname = pending[i] - pending[i] = (None, None, None) + while pending is not None: try: - callback(w_obj) + pending.callback(pending.w_obj) except OperationError, e: - e.write_unraisable(space, descrname, w_obj) + e.write_unraisable(space, pending.descrname, pending.w_obj) e.clear(space) # break up reference cycles + pending = pending.next + # + # Note: 'dying_objects' used to be just a regular list instead + # of a chained list. This was the cause of "leaks" if we have a + # program that constantly creates new objects with finalizers. + # Here is why: say 'dying_objects' is a long list, and there + # are n instances in it. Then we spend some time in this + # function, possibly triggering more GCs, but keeping the list + # of length n alive. Then the list is suddenly freed at the + # end, and we return to the user program. At this point the + # GC limit is still very high, because just before, there was + # a list of length n alive. Assume that the program continues + # to allocate a lot of instances with finalizers. The high GC + # limit means that it could allocate a lot of instances before + # reaching it --- possibly more than n. So the whole procedure + # repeats with higher and higher values of n. + # + # This does not occur in the current implementation because + # there is no list of length n: if n is large, then the GC + # will run several times while walking the list, but it will + # see lower and lower memory usage, with no lower bound of n. diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -895,7 +895,7 @@ "use unwrap_spec(...=WrappedDefault(default))" % ( self._code.identifier, name, defaultval)) defs_w.append(None) - else: + elif name != '__args__' and name != 'args_w': defs_w.append(space.wrap(defaultval)) if self._code._unwrap_spec: UNDEFINED = object() diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -61,6 +61,13 @@ return self.send_ex(w_arg) def send_ex(self, w_arg, operr=None): + pycode = self.pycode + if jit.we_are_jitted() and should_not_inline(pycode): + generatorentry_driver.jit_merge_point(gen=self, w_arg=w_arg, + operr=operr, pycode=pycode) + return self._send_ex(w_arg, operr) + + def _send_ex(self, w_arg, operr): space = self.space if self.running: raise OperationError(space.w_ValueError, @@ -72,8 +79,7 @@ if operr is None: operr = OperationError(space.w_StopIteration, space.w_None) raise operr - # XXX it's not clear that last_instr should be promoted at all - # but as long as it is necessary for call_assembler, let's do it early + last_instr = jit.promote(frame.last_instr) if last_instr == -1: if w_arg and not space.is_w(w_arg, space.w_None): @@ -214,3 +220,38 @@ "interrupting generator of ") break block = block.previous + + + +def get_printable_location_genentry(bytecode): + return '%s ' % (bytecode.get_repr(),) +generatorentry_driver = jit.JitDriver(greens=['pycode'], + reds=['gen', 'w_arg', 'operr'], + get_printable_location = + get_printable_location_genentry, + name='generatorentry') + +from pypy.tool.stdlib_opcode import HAVE_ARGUMENT, opmap +YIELD_VALUE = opmap['YIELD_VALUE'] + + at jit.elidable_promote() +def should_not_inline(pycode): + # Should not inline generators with more than one "yield", + # as an approximative fix (see issue #1782). There are cases + # where it slows things down; for example calls to a simple + # generator that just produces a few simple values with a few + # consecutive "yield" statements. It fixes the near-infinite + # slow-down in issue #1782, though... + count_yields = 0 + code = pycode.co_code + n = len(code) + i = 0 + while i < n: + c = code[i] + op = ord(c) + if op == YIELD_VALUE: + count_yields += 1 + i += 1 + if op >= HAVE_ARGUMENT: + i += 2 + return count_yields >= 2 diff --git a/pypy/interpreter/miscutils.py b/pypy/interpreter/miscutils.py --- a/pypy/interpreter/miscutils.py +++ b/pypy/interpreter/miscutils.py @@ -11,11 +11,14 @@ """ _value = None - def getvalue(self): + def get_ec(self): return self._value - def setvalue(self, value): - self._value = value + def enter_thread(self, space): + self._value = space.createexecutioncontext() + + def try_enter_thread(self, space): + return False def signals_enabled(self): return True diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -511,10 +511,10 @@ for i in range(min(len(varnames), self.getcode().co_nlocals)): name = varnames[i] w_value = self.locals_stack_w[i] - w_name = self.space.wrap(name) if w_value is not None: - self.space.setitem(self.w_locals, w_name, w_value) + self.space.setitem_str(self.w_locals, name, w_value) else: + w_name = self.space.wrap(name) try: self.space.delitem(self.w_locals, w_name) except OperationError as e: @@ -534,8 +534,7 @@ except ValueError: pass else: - w_name = self.space.wrap(name) - self.space.setitem(self.w_locals, w_name, w_value) + self.space.setitem_str(self.w_locals, name, w_value) @jit.unroll_safe From noreply at buildbot.pypy.org Sat Jul 26 14:52:48 2014 From: noreply at buildbot.pypy.org (Valentina Mukhamedzhanova) Date: Sat, 26 Jul 2014 14:52:48 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Fix zlib's test_decompress_eof. Message-ID: <20140726125248.4E4111C0695@cobra.cs.uni-duesseldorf.de> Author: Valentina Mukhamedzhanova Branch: py3.3 Changeset: r72504:c22750fbe7ad Date: 2014-07-26 14:46 +0200 http://bitbucket.org/pypy/pypy/changeset/c22750fbe7ad/ Log: Fix zlib's test_decompress_eof. diff --git a/pypy/module/zlib/interp_zlib.py b/pypy/module/zlib/interp_zlib.py --- a/pypy/module/zlib/interp_zlib.py +++ b/pypy/module/zlib/interp_zlib.py @@ -1,7 +1,7 @@ import sys from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.typedef import TypeDef, interp_attrproperty_bytes +from pypy.interpreter.typedef import TypeDef, interp_attrproperty_bytes, interp_attrproperty from pypy.interpreter.error import OperationError, oefmt from rpython.rlib.rarithmetic import intmask, r_uint from rpython.rlib.objectmodel import keepalive_until_here @@ -231,6 +231,7 @@ ZLibObject.__init__(self, space) self.unused_data = '' self.unconsumed_tail = '' + self.eof = False try: self.stream = rzlib.inflateInit(wbits) except rzlib.RZlibError, e: @@ -238,7 +239,7 @@ except ValueError: raise OperationError(space.w_ValueError, space.wrap("Invalid initialization option")) - + def __del__(self): """Automatically free the resources used by the stream.""" if self.stream: @@ -280,6 +281,7 @@ raise zlib_error(space, e.msg) string, finished, unused_len = result + self.eof = finished self._save_unconsumed_input(data, finished, unused_len) return space.wrapbytes(string) @@ -327,6 +329,7 @@ flush = interp2app(Decompress.flush), unused_data = interp_attrproperty_bytes('unused_data', Decompress), unconsumed_tail = interp_attrproperty_bytes('unconsumed_tail', Decompress), + eof = interp_attrproperty('eof', Decompress), __doc__ = """decompressobj([wbits]) -- Return a decompressor object. Optional arg wbits is the window buffer size. diff --git a/pypy/module/zlib/test/test_zlib.py b/pypy/module/zlib/test/test_zlib.py --- a/pypy/module/zlib/test/test_zlib.py +++ b/pypy/module/zlib/test/test_zlib.py @@ -166,6 +166,18 @@ dco = zlib.decompressobj() assert dco.flush() == b"" + def test_decompress_eof(self): + import zlib + x = b'x\x9cK\xcb\xcf\x07\x00\x02\x82\x01E' # 'foo' + dco = zlib.decompressobj() + assert dco.eof == False + dco.decompress(x[:-5]) + assert dco.eof == False + dco.decompress(x[-5:]) + assert dco.eof == True + dco.flush() + assert dco.eof == True + def test_decompress_incomplete_stream(self): import zlib # This is 'foo', deflated From noreply at buildbot.pypy.org Sat Jul 26 14:52:49 2014 From: noreply at buildbot.pypy.org (Valentina Mukhamedzhanova) Date: Sat, 26 Jul 2014 14:52:49 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Merge heads. Message-ID: <20140726125249.CD30A1C0695@cobra.cs.uni-duesseldorf.de> Author: Valentina Mukhamedzhanova Branch: py3.3 Changeset: r72505:4bba41190d41 Date: 2014-07-26 14:48 +0200 http://bitbucket.org/pypy/pypy/changeset/4bba41190d41/ Log: Merge heads. diff --git a/lib-python/3/json/__init__.py b/lib-python/3/json/__init__.py --- a/lib-python/3/json/__init__.py +++ b/lib-python/3/json/__init__.py @@ -104,6 +104,12 @@ __author__ = 'Bob Ippolito ' +try: + # PyPy speedup, the interface is different than CPython's _json + import _pypyjson +except ImportError: + _pypyjson = None + from .decoder import JSONDecoder from .encoder import JSONEncoder @@ -313,7 +319,7 @@ if (cls is None and object_hook is None and parse_int is None and parse_float is None and parse_constant is None and object_pairs_hook is None and not kw): - return _default_decoder.decode(s) + return _pypyjson.loads(s) if _pypyjson else _default_decoder.decode(s) if cls is None: cls = JSONDecoder if object_hook is not None: diff --git a/lib-python/3/test/test_csv.py b/lib-python/3/test/test_csv.py --- a/lib-python/3/test/test_csv.py +++ b/lib-python/3/test/test_csv.py @@ -766,8 +766,9 @@ mydialect.quotechar = "''" with self.assertRaises(csv.Error) as cm: mydialect() + # NOTE: Patched exception message since cpython uses bad grammar (cpython issue22076) self.assertEqual(str(cm.exception), - '"quotechar" must be an 1-character string') + '"quotechar" must be a 1-character string') mydialect.quotechar = 4 with self.assertRaises(csv.Error) as cm: @@ -789,14 +790,16 @@ mydialect.delimiter = ":::" with self.assertRaises(csv.Error) as cm: mydialect() + # NOTE: Patched exception message since cpython uses bad grammar (cpython issue22076) self.assertEqual(str(cm.exception), - '"delimiter" must be an 1-character string') + '"delimiter" must be a 1-character string') mydialect.delimiter = "" with self.assertRaises(csv.Error) as cm: mydialect() + # NOTE: Patched exception message since cpython uses bad grammar (cpython issue22076) self.assertEqual(str(cm.exception), - '"delimiter" must be an 1-character string') + '"delimiter" must be a 1-character string') mydialect.delimiter = b"," with self.assertRaises(csv.Error) as cm: diff --git a/lib-python/stdlib-version.txt b/lib-python/stdlib-version.txt --- a/lib-python/stdlib-version.txt +++ b/lib-python/stdlib-version.txt @@ -6,4 +6,4 @@ 2.7:: 3a1db0d2747e (2.7) v2.7.6 3:: - cef745775b65 (3.2) v3.2.5 + 62cf4e77f785 (3.3) v3.3.5 diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -204,7 +204,7 @@ elif opcode == opcodedesc.BREAK_LOOP.index: next_instr = self.BREAK_LOOP(oparg, next_instr) elif opcode == opcodedesc.CONTINUE_LOOP.index: - next_instr = self.CONTINUE_LOOP(oparg, next_instr) + return self.CONTINUE_LOOP(oparg, next_instr) elif opcode == opcodedesc.FOR_ITER.index: next_instr = self.FOR_ITER(oparg, next_instr) elif opcode == opcodedesc.JUMP_FORWARD.index: @@ -1022,7 +1022,6 @@ raise w_value = space.w_None self.pushvalue(w_value) - return next_instr else: # iter remains on stack, w_retval is value to be yielded. self.pushvalue(w_retval) diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/test_generator.py --- a/pypy/interpreter/test/test_generator.py +++ b/pypy/interpreter/test/test_generator.py @@ -307,13 +307,13 @@ w_co = space.appexec([], '''(): def g(x): yield x + 5 - return g.func_code + return g.__code__ ''') assert should_not_inline(w_co) == False w_co = space.appexec([], '''(): def g(x): yield x + 5 yield x + 6 - return g.func_code + return g.__code__ ''') assert should_not_inline(w_co) == True diff --git a/pypy/module/__builtin__/app_inspect.py b/pypy/module/__builtin__/app_inspect.py --- a/pypy/module/__builtin__/app_inspect.py +++ b/pypy/module/__builtin__/app_inspect.py @@ -53,8 +53,7 @@ if dir_meth is not None: result = dir_meth() if not isinstance(result, list): - raise TypeError("__dir__() must return a list, not %r" % ( - type(result),)) + result = list(result) # Will throw TypeError if not iterable result.sort() return result elif isinstance(obj, types.ModuleType): diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py --- a/pypy/module/__builtin__/functional.py +++ b/pypy/module/__builtin__/functional.py @@ -482,7 +482,7 @@ def descr_hash(self, space): if space.eq_w(self.w_length, space.wrap(0)): w_tup = space.newtuple([self.w_length, space.w_None, space.w_None]) - elif space.eq_w(self.w_length, space.wrap(0)): + elif space.eq_w(self.w_length, space.wrap(1)): w_tup = space.newtuple([self.w_length, self.w_start, space.w_None]) else: w_tup = space.newtuple([self.w_length, self.w_start, self.w_step]) diff --git a/pypy/module/__builtin__/test/test_construct_singletons.py b/pypy/module/__builtin__/test/test_construct_singletons.py new file mode 100644 --- /dev/null +++ b/pypy/module/__builtin__/test/test_construct_singletons.py @@ -0,0 +1,7 @@ +class AppTestConstructSingletons: + + def test_construct_singletons(self): + none_type = type(None) + assert none_type() is None + raises(TypeError, none_type, 1, 2) + raises(TypeError, none_type, a=1, b=2) diff --git a/pypy/module/__builtin__/test/test_dir.py b/pypy/module/__builtin__/test/test_dir.py new file mode 100644 --- /dev/null +++ b/pypy/module/__builtin__/test/test_dir.py @@ -0,0 +1,26 @@ +class AppTestDir: + + def test_dir_obj__dir__tuple(self): + """If __dir__ method returns a tuple, cpython3 converts it to list.""" + class Foo(object): + def __dir__(self): + return ("b", "c", "a") + res = dir(Foo()) + assert isinstance(res, list) + assert res == ["a", "b", "c"] + + def test_dir_obj__dir__genexp(self): + """Generator expression is also converted to list by cpython3.""" + class Foo(object): + def __dir__(self): + return (i for i in ["b", "c", "a"]) + res = dir(Foo()) + assert isinstance(res, list) + assert res == ["a", "b", "c"] + + def test_dir_obj__dir__noniter(self): + """If result of __dir__ is not iterable, it's an error.""" + class Foo(object): + def __dir__(self): + return 42 + raises(TypeError, dir, Foo()) diff --git a/pypy/module/__builtin__/test/test_functional.py b/pypy/module/__builtin__/test/test_functional.py --- a/pypy/module/__builtin__/test/test_functional.py +++ b/pypy/module/__builtin__/test/test_functional.py @@ -484,7 +484,7 @@ for a in test_ranges: for b in test_ranges: if a == b: - assert (hash(a), hash(b)) + assert hash(a) == hash(b) # Ranges are unequal to other types (even sequence types) assert (range(0) == ()) is False diff --git a/pypy/module/_csv/interp_csv.py b/pypy/module/_csv/interp_csv.py --- a/pypy/module/_csv/interp_csv.py +++ b/pypy/module/_csv/interp_csv.py @@ -34,10 +34,15 @@ return default return space.int_w(w_src) -def _get_str(space, w_src, default): +def _get_str(space, w_src, default, attrname): if w_src is None: return default - return space.unicode_w(w_src) + try: + return space.unicode_w(w_src) + except OperationError as e: + if e.match(space, space.w_TypeError): + raise oefmt(space.w_TypeError, '"%s" must be a string', attrname) + raise def _get_char(space, w_src, default, name): if w_src is None: @@ -91,7 +96,7 @@ dialect.delimiter = _get_char(space, w_delimiter, u',', 'delimiter') dialect.doublequote = _get_bool(space, w_doublequote, True) dialect.escapechar = _get_char(space, w_escapechar, u'\0', 'escapechar') - dialect.lineterminator = _get_str(space, w_lineterminator, u'\r\n') + dialect.lineterminator = _get_str(space, w_lineterminator, u'\r\n', 'lineterminator') dialect.quotechar = _get_char(space, w_quotechar, u'"', 'quotechar') tmp_quoting = _get_int(space, w_quoting, QUOTE_MINIMAL) dialect.skipinitialspace = _get_bool(space, w_skipinitialspace, False) diff --git a/pypy/module/_csv/test/test_dialect.py b/pypy/module/_csv/test/test_dialect.py --- a/pypy/module/_csv/test/test_dialect.py +++ b/pypy/module/_csv/test/test_dialect.py @@ -67,6 +67,9 @@ kwargs = {name: value} raises(TypeError, _csv.register_dialect, 'foo1', **kwargs) + exc_info = raises(TypeError, _csv.register_dialect, 'foo1', lineterminator=4) + assert exc_info.value.args[0] == '"lineterminator" must be a string' + def test_bool_arg(self): # boolean arguments take *any* object and use its truth-value import _csv diff --git a/pypy/module/_io/interp_io.py b/pypy/module/_io/interp_io.py --- a/pypy/module/_io/interp_io.py +++ b/pypy/module/_io/interp_io.py @@ -4,7 +4,6 @@ from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import ( TypeDef, interp_attrproperty, generic_new_descr) -from pypy.module.exceptions.interp_exceptions import W_IOError from pypy.module._io.interp_fileio import W_FileIO from pypy.module._io.interp_textio import W_TextIOWrapper from rpython.rtyper.module.ll_os_stat import STAT_FIELD_TYPES diff --git a/pypy/module/_pypyjson/interp_decoder.py b/pypy/module/_pypyjson/interp_decoder.py --- a/pypy/module/_pypyjson/interp_decoder.py +++ b/pypy/module/_pypyjson/interp_decoder.py @@ -56,6 +56,7 @@ self.end_ptr = lltype.malloc(rffi.CCHARPP.TO, 1, flavor='raw') self.pos = 0 self.last_type = TYPE_UNKNOWN + self.memo = {} def close(self): rffi.free_charp(self.ll_chars) @@ -261,6 +262,8 @@ w_name = self.decode_any(i) if self.last_type != TYPE_STRING: self._raise("Key name must be string for object starting at char %d", start) + w_name = self.memo.setdefault(self.space.unicode_w(w_name), w_name) + i = self.skip_whitespace(self.pos) ch = self.ll_chars[i] if ch != ':': diff --git a/pypy/module/_pypyjson/test/test__pypyjson.py b/pypy/module/_pypyjson/test/test__pypyjson.py --- a/pypy/module/_pypyjson/test/test__pypyjson.py +++ b/pypy/module/_pypyjson/test/test__pypyjson.py @@ -187,4 +187,12 @@ import _pypyjson # http://json.org/JSON_checker/test/fail25.json s = '["\ttab\tcharacter\tin\tstring\t"]' - raises(ValueError, "_pypyjson.loads(s)") \ No newline at end of file + raises(ValueError, "_pypyjson.loads(s)") + + def test_keys_reuse(self): + import _pypyjson + s = '[{"a_key": 1, "b_\xe9": 2}, {"a_key": 3, "b_\xe9": 4}]' + rval = _pypyjson.loads(s) + (a, b), (c, d) = sorted(rval[0]), sorted(rval[1]) + assert a is c + assert b is d diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -244,7 +244,7 @@ """ size = self.len if size == 0: - return space.wrap('') + return space.wrapbytes('') cbuf = self._charbuf_start() s = rffi.charpsize2str(cbuf, size * self.itemsize) self._charbuf_stop() diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -454,6 +454,10 @@ # PyExc_NameError, PyExc_MemoryError, PyExc_RuntimeError, # PyExc_UnicodeEncodeError, PyExc_UnicodeDecodeError, ... for exc_name in exceptions.Module.interpleveldefs.keys(): + if exc_name in ('EnvironmentError', 'IOError'): + # FIXME: aliases of OSError cause a clash of names via + # export_struct + continue GLOBALS['PyExc_' + exc_name] = ( 'PyTypeObject*', 'space.gettypeobject(interp_exceptions.W_%s.typedef)'% (exc_name, )) diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -20,13 +20,13 @@ /* Version parsed out into numeric values */ #define PY_MAJOR_VERSION 3 -#define PY_MINOR_VERSION 2 +#define PY_MINOR_VERSION 3 #define PY_MICRO_VERSION 5 #define PY_RELEASE_LEVEL PY_RELEASE_LEVEL_FINAL #define PY_RELEASE_SERIAL 0 /* Version as a string */ -#define PY_VERSION "3.2.5" +#define PY_VERSION "3.3.5" /* PyPy version as a string */ #define PYPY_VERSION "2.4.0-alpha0" diff --git a/pypy/module/cpyext/test/test_exception.py b/pypy/module/cpyext/test/test_exception.py --- a/pypy/module/cpyext/test/test_exception.py +++ b/pypy/module/cpyext/test/test_exception.py @@ -1,4 +1,5 @@ from pypy.module.cpyext.test.test_api import BaseApiTest +from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase from pypy.module.cpyext.pyobject import make_ref class TestExceptions(BaseApiTest): @@ -27,3 +28,16 @@ api.PyException_SetCause(w_exc, make_ref(space, w_cause)) assert space.is_w(api.PyException_GetCause(w_exc), w_cause) + +class AppTestExceptions(AppTestCpythonExtensionBase): + + def test_OSError_aliases(self): + module = self.import_extension('foo', [ + ("get_aliases", "METH_NOARGS", + """ + return PyTuple_Pack(2, + PyExc_EnvironmentError, + PyExc_IOError); + """), + ]) + assert module.get_aliases() == (OSError, OSError) diff --git a/pypy/module/exceptions/__init__.py b/pypy/module/exceptions/__init__.py --- a/pypy/module/exceptions/__init__.py +++ b/pypy/module/exceptions/__init__.py @@ -21,14 +21,14 @@ 'ConnectionResetError': 'interp_exceptions.W_ConnectionResetError', 'DeprecationWarning' : 'interp_exceptions.W_DeprecationWarning', 'EOFError' : 'interp_exceptions.W_EOFError', - 'EnvironmentError' : 'interp_exceptions.W_EnvironmentError', + 'EnvironmentError' : 'interp_exceptions.W_OSError', 'Exception' : 'interp_exceptions.W_Exception', 'FileExistsError': 'interp_exceptions.W_FileExistsError', 'FileNotFoundError': 'interp_exceptions.W_FileNotFoundError', 'FloatingPointError' : 'interp_exceptions.W_FloatingPointError', 'FutureWarning' : 'interp_exceptions.W_FutureWarning', 'GeneratorExit' : 'interp_exceptions.W_GeneratorExit', - 'IOError' : 'interp_exceptions.W_IOError', + 'IOError' : 'interp_exceptions.W_OSError', 'ImportError' : 'interp_exceptions.W_ImportError', 'ImportWarning' : 'interp_exceptions.W_ImportWarning', 'IndentationError' : 'interp_exceptions.W_IndentationError', diff --git a/pypy/module/exceptions/interp_exceptions.py b/pypy/module/exceptions/interp_exceptions.py --- a/pypy/module/exceptions/interp_exceptions.py +++ b/pypy/module/exceptions/interp_exceptions.py @@ -497,9 +497,6 @@ filename = readwrite_attrproperty_w('w_filename', W_OSError), ) -W_EnvironmentError = W_OSError -W_IOError = W_OSError - class W_WindowsError(W_OSError): """MS-Windows OS system call failed.""" diff --git a/pypy/module/math/__init__.py b/pypy/module/math/__init__.py --- a/pypy/module/math/__init__.py +++ b/pypy/module/math/__init__.py @@ -23,6 +23,7 @@ 'frexp' : 'interp_math.frexp', 'degrees' : 'interp_math.degrees', 'log' : 'interp_math.log', + 'log2' : 'interp_math.log2', 'log10' : 'interp_math.log10', 'fmod' : 'interp_math.fmod', 'atan' : 'interp_math.atan', diff --git a/pypy/module/math/interp_math.py b/pypy/module/math/interp_math.py --- a/pypy/module/math/interp_math.py +++ b/pypy/module/math/interp_math.py @@ -228,6 +228,11 @@ return math1(space, math.log, w_base) return _log_any(space, w_x, base) +def log2(space, w_x): + """log2(x) -> the base 2 logarithm of x. + """ + return _log_any(space, w_x, 2.0) + def log10(space, w_x): """log10(x) -> the base 10 logarithm of x. """ diff --git a/pypy/module/math/test/test_math.py b/pypy/module/math/test/test_math.py --- a/pypy/module/math/test/test_math.py +++ b/pypy/module/math/test/test_math.py @@ -148,6 +148,19 @@ raises(ValueError, math.log1p, -1) raises(ValueError, math.log1p, -100) + def test_log2(self): + import math + self.ftest(math.log2(0.125), -3) + self.ftest(math.log2(0.5), -1) + self.ftest(math.log2(4), 2) + + def test_log10(self): + import math + self.ftest(math.log10(0.1), -1) + self.ftest(math.log10(10), 1) + self.ftest(math.log10(100), 2) + self.ftest(math.log10(0.01), -2) + def test_acosh(self): import math self.ftest(math.acosh(1), 0) diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -1,5 +1,6 @@ from pypy.interpreter.mixedmodule import MixedModule from rpython.rtyper.module.ll_os import RegisterOs +from rpython.rlib import rdynload import os exec 'import %s as posix' % os.name @@ -173,6 +174,12 @@ if hasattr(os, name): interpleveldefs[name] = 'interp_posix.' + name + for _name in ["RTLD_LAZY", "RTLD_NOW", "RTLD_GLOBAL", "RTLD_LOCAL", + "RTLD_NODELETE", "RTLD_NOLOAD", "RTLD_DEEPBIND"]: + if getattr(rdynload.cConfig, _name) is not None: + interpleveldefs[_name] = 'space.wrap(%d)' % ( + getattr(rdynload.cConfig, _name),) + # os.py uses this list to build os.supports_dir_fd() and os.supports_fd(). # Fill with e.g. HAVE_FCHDIR, when os.chdir() supports file descriptors. interpleveldefs['_have_functions'] = 'space.newlist([])' diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -488,8 +488,7 @@ cur = os.getlogin() except OSError, e: raise wrap_oserror(space, e) - else: - return space.wrap(cur) + return space.fsdecode(space.wrapbytes(cur)) # ____________________________________________________________ @@ -702,14 +701,21 @@ except OSError, e: raise wrap_oserror(space, e) - at unwrap_spec(path='fsencode') -def readlink(space, path): +def readlink(space, w_path): "Return a string representing the path to which the symbolic link points." + is_unicode = space.isinstance_w(w_path, space.w_unicode) + if is_unicode: + path = space.fsencode_w(w_path) + else: + path = space.bytes0_w(w_path) try: result = os.readlink(path) except OSError, e: - raise wrap_oserror(space, e, path) - return space.wrap(result) + raise wrap_oserror2(space, e, w_path) + w_result = space.wrapbytes(result) + if is_unicode: + return space.fsdecode(w_result) + return w_result before_fork_hooks = [] after_fork_child_hooks = [] @@ -899,7 +905,8 @@ r = os.uname() except OSError, e: raise wrap_oserror(space, e) - l_w = [space.wrap(i) for i in [r[0], r[1], r[2], r[3], r[4]]] + l_w = [space.fsdecode(space.wrapbytes(i)) + for i in [r[0], r[1], r[2], r[3], r[4]]] w_tuple = space.newtuple(l_w) w_uname_result = space.getattr(space.getbuiltinmodule(os.name), space.wrap('uname_result')) @@ -1229,7 +1236,7 @@ @unwrap_spec(fd=c_int) def ttyname(space, fd): try: - return space.wrap(os.ttyname(fd)) + return space.fsdecode(space.wrapbytes(os.ttyname(fd))) except OSError, e: raise wrap_oserror(space, e) @@ -1364,7 +1371,7 @@ Return the name of the controlling terminal for this process. """ - return space.wrap(os.ctermid()) + return space.fsdecode(space.wrapbytes(os.ctermid())) @unwrap_spec(fd=c_int) def device_encoding(space, fd): diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -17,7 +17,7 @@ usemodules = ['binascii', 'posix', 'signal', 'struct', 'rctime'] # py3k os.open uses subprocess, requiring the following per platform if os.name != 'nt': - usemodules += ['fcntl', 'select'] + usemodules += ['fcntl', 'select', '_posixsubprocess'] else: usemodules += ['_rawffi', 'thread'] mod.space = gettestobjspace(usemodules=usemodules) @@ -1042,6 +1042,12 @@ # just ensure it returns something reasonable assert encoding is None or type(encoding) is str + def test_rtld_constants(self): + # check presence of major RTLD_* constants + self.posix.RTLD_LAZY + self.posix.RTLD_NOW + self.posix.RTLD_GLOBAL + self.posix.RTLD_LOCAL class AppTestEnvironment(object): def setup_class(cls): diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -6,7 +6,7 @@ from pypy.interpreter import gateway #XXX # the release serial 42 is not in range(16) -CPYTHON_VERSION = (3, 2, 5, "final", 0) +CPYTHON_VERSION = (3, 3, 5, "final", 0) #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h diff --git a/pypy/objspace/std/nonetype.py b/pypy/objspace/std/nonetype.py --- a/pypy/objspace/std/nonetype.py +++ b/pypy/objspace/std/nonetype.py @@ -1,8 +1,15 @@ from pypy.objspace.std.stdtypedef import StdTypeDef +from pypy.interpreter import gateway +def descr__new__(space, w_type): + return space.w_None + # ____________________________________________________________ none_typedef = StdTypeDef("NoneType", + __new__ = gateway.interp2app(descr__new__) ) none_typedef.acceptable_as_base_class = False + + From noreply at buildbot.pypy.org Sat Jul 26 15:08:00 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Sat, 26 Jul 2014 15:08:00 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Removing sys.flags.division_warning (removed in 3.3) Message-ID: <20140726130800.C84121C0695@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: py3.3 Changeset: r72506:d4642496139c Date: 2014-07-26 15:02 +0200 http://bitbucket.org/pypy/pypy/changeset/d4642496139c/ Log: Removing sys.flags.division_warning (removed in 3.3) diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -317,7 +317,6 @@ # Order is significant! sys_flags = ( "debug", - "division_warning", "inspect", "interactive", "optimize", diff --git a/pypy/module/sys/app.py b/pypy/module/sys/app.py --- a/pypy/module/sys/app.py +++ b/pypy/module/sys/app.py @@ -94,20 +94,19 @@ name = "sys.flags" debug = structseqfield(0) - division_warning = structseqfield(1) - inspect = structseqfield(2) - interactive = structseqfield(3) - optimize = structseqfield(4) - dont_write_bytecode = structseqfield(5) - no_user_site = structseqfield(6) - no_site = structseqfield(7) - ignore_environment = structseqfield(8) - verbose = structseqfield(9) - bytes_warning = structseqfield(10) - quiet = structseqfield(11) - hash_randomization = structseqfield(12) + inspect = structseqfield(1) + interactive = structseqfield(2) + optimize = structseqfield(3) + dont_write_bytecode = structseqfield(4) + no_user_site = structseqfield(5) + no_site = structseqfield(6) + ignore_environment = structseqfield(7) + verbose = structseqfield(8) + bytes_warning = structseqfield(9) + quiet = structseqfield(10) + hash_randomization = structseqfield(11) -null_sysflags = sysflags((0,)*13) +null_sysflags = sysflags((0,)*12) null__xoptions = {} From noreply at buildbot.pypy.org Sat Jul 26 15:08:02 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 26 Jul 2014 15:08:02 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Merged in numerodix/pypy/py3.3 (pull request #249) Message-ID: <20140726130802.1ABBE1C0695@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: py3.3 Changeset: r72507:a85920372113 Date: 2014-07-26 15:07 +0200 http://bitbucket.org/pypy/pypy/changeset/a85920372113/ Log: Merged in numerodix/pypy/py3.3 (pull request #249) Removing sys.flags.division_warning (removed in 3.3) diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -317,7 +317,6 @@ # Order is significant! sys_flags = ( "debug", - "division_warning", "inspect", "interactive", "optimize", diff --git a/pypy/module/sys/app.py b/pypy/module/sys/app.py --- a/pypy/module/sys/app.py +++ b/pypy/module/sys/app.py @@ -94,20 +94,19 @@ name = "sys.flags" debug = structseqfield(0) - division_warning = structseqfield(1) - inspect = structseqfield(2) - interactive = structseqfield(3) - optimize = structseqfield(4) - dont_write_bytecode = structseqfield(5) - no_user_site = structseqfield(6) - no_site = structseqfield(7) - ignore_environment = structseqfield(8) - verbose = structseqfield(9) - bytes_warning = structseqfield(10) - quiet = structseqfield(11) - hash_randomization = structseqfield(12) + inspect = structseqfield(1) + interactive = structseqfield(2) + optimize = structseqfield(3) + dont_write_bytecode = structseqfield(4) + no_user_site = structseqfield(5) + no_site = structseqfield(6) + ignore_environment = structseqfield(7) + verbose = structseqfield(8) + bytes_warning = structseqfield(9) + quiet = structseqfield(10) + hash_randomization = structseqfield(11) -null_sysflags = sysflags((0,)*13) +null_sysflags = sysflags((0,)*12) null__xoptions = {} From noreply at buildbot.pypy.org Sat Jul 26 15:18:13 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sat, 26 Jul 2014 15:18:13 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs: RPython docs: Use read the docs theme locally if available (copied from PyPy docs configuration). Message-ID: <20140726131813.129071C046A@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: improve-docs Changeset: r72508:8e884ef6d1d1 Date: 2014-07-26 15:17 +0200 http://bitbucket.org/pypy/pypy/changeset/8e884ef6d1d1/ Log: RPython docs: Use read the docs theme locally if available (copied from PyPy docs configuration). diff --git a/rpython/doc/conf.py b/rpython/doc/conf.py --- a/rpython/doc/conf.py +++ b/rpython/doc/conf.py @@ -18,6 +18,24 @@ # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) + +# -- Read The Docs theme config ------------------------------------------------ + +# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org +on_rtd = os.environ.get('READTHEDOCS', None) == 'True' + +if not on_rtd: # only import and set the theme if we're building docs locally + try: + import sphinx_rtd_theme + html_theme = 'sphinx_rtd_theme' + html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] + except ImportError: + print('sphinx_rtd_theme is not installed') + html_theme = 'default' + +# otherwise, readthedocs.org uses their theme by default, so no need to specify it + + # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. @@ -91,7 +109,7 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -html_theme = 'default' +#html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the From noreply at buildbot.pypy.org Sat Jul 26 15:28:11 2014 From: noreply at buildbot.pypy.org (Valentina Mukhamedzhanova) Date: Sat, 26 Jul 2014 15:28:11 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Add test_decompress_eof_incomplete_stream to test_zlib.py. Message-ID: <20140726132811.1631E1C0695@cobra.cs.uni-duesseldorf.de> Author: Valentina Mukhamedzhanova Branch: py3.3 Changeset: r72509:2b2441e85ee2 Date: 2014-07-26 15:14 +0200 http://bitbucket.org/pypy/pypy/changeset/2b2441e85ee2/ Log: Add test_decompress_eof_incomplete_stream to test_zlib.py. diff --git a/pypy/module/zlib/test/test_zlib.py b/pypy/module/zlib/test/test_zlib.py --- a/pypy/module/zlib/test/test_zlib.py +++ b/pypy/module/zlib/test/test_zlib.py @@ -177,6 +177,16 @@ assert dco.eof == True dco.flush() assert dco.eof == True + + def test_decompress_eof_incomplete_stream(self): + import zlib + x = b'x\x9cK\xcb\xcf\x07\x00\x02\x82\x01E' # 'foo' + dco = zlib.decompressobj() + assert dco.eof == False + dco.decompress(x[:-5]) + assert dco.eof == False + dco.flush() + assert dco.eof == False def test_decompress_incomplete_stream(self): import zlib From noreply at buildbot.pypy.org Sat Jul 26 15:28:12 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 26 Jul 2014 15:28:12 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: merge heads Message-ID: <20140726132812.441251C0695@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: py3.3 Changeset: r72510:4e00d712484e Date: 2014-07-26 15:27 +0200 http://bitbucket.org/pypy/pypy/changeset/4e00d712484e/ Log: merge heads diff --git a/pypy/module/zlib/test/test_zlib.py b/pypy/module/zlib/test/test_zlib.py --- a/pypy/module/zlib/test/test_zlib.py +++ b/pypy/module/zlib/test/test_zlib.py @@ -177,6 +177,16 @@ assert dco.eof == True dco.flush() assert dco.eof == True + + def test_decompress_eof_incomplete_stream(self): + import zlib + x = b'x\x9cK\xcb\xcf\x07\x00\x02\x82\x01E' # 'foo' + dco = zlib.decompressobj() + assert dco.eof == False + dco.decompress(x[:-5]) + assert dco.eof == False + dco.flush() + assert dco.eof == False def test_decompress_incomplete_stream(self): import zlib From noreply at buildbot.pypy.org Sat Jul 26 15:34:20 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 26 Jul 2014 15:34:20 +0200 (CEST) Subject: [pypy-commit] pypy default: Remove these old comments Message-ID: <20140726133420.9CE021C06AE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72511:e65dfce91ce5 Date: 2014-07-26 15:34 +0200 http://bitbucket.org/pypy/pypy/changeset/e65dfce91ce5/ Log: Remove these old comments diff --git a/pypy/objspace/std/iterobject.py b/pypy/objspace/std/iterobject.py --- a/pypy/objspace/std/iterobject.py +++ b/pypy/objspace/std/iterobject.py @@ -30,10 +30,6 @@ raise NotImplementedError def descr_reduce(self, space): - """ - XXX to do: remove this __reduce__ method and do - a registration with copy_reg, instead. - """ from pypy.interpreter.mixedmodule import MixedModule w_mod = space.getbuiltinmodule('_pickle_support') mod = space.interp_w(MixedModule, w_mod) @@ -125,10 +121,6 @@ self.index = space.int_w(self.w_len) + index def descr_reduce(self, space): - """ - XXX to do: remove this __reduce__ method and do - a registration with copy_reg, instead. - """ from pypy.interpreter.mixedmodule import MixedModule w_mod = space.getbuiltinmodule('_pickle_support') mod = space.interp_w(MixedModule, w_mod) From noreply at buildbot.pypy.org Sat Jul 26 15:36:13 2014 From: noreply at buildbot.pypy.org (kvas) Date: Sat, 26 Jul 2014 15:36:13 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Fixed pickling of iterators produced by filter(func, iter). Message-ID: <20140726133613.C70961C0A1B@cobra.cs.uni-duesseldorf.de> Author: Vasily Kuznetsov Branch: py3.3 Changeset: r72512:6aecbffc27c8 Date: 2014-07-26 15:32 +0200 http://bitbucket.org/pypy/pypy/changeset/6aecbffc27c8/ Log: Fixed pickling of iterators produced by filter(func, iter). diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py --- a/pypy/module/__builtin__/functional.py +++ b/pypy/module/__builtin__/functional.py @@ -711,6 +711,13 @@ if pred ^ self.reverse: return w_obj + def descr_reduce(self, space): + w_filter = space.getattr(space.getbuiltinmodule('builtins'), + space.wrap('filter')) + args = [space.w_None if self.no_predicate else self.w_predicate, + self.iterable] + return space.newtuple([w_filter, space.newtuple(args)]) + def W_Filter___new__(space, w_subtype, w_predicate, w_iterable): r = space.allocate_instance(W_Filter, w_subtype) @@ -722,6 +729,7 @@ __new__ = interp2app(W_Filter___new__), __iter__ = interp2app(W_Filter.iter_w), __next__ = interp2app(W_Filter.next_w), + __reduce__ = interp2app(W_Filter.descr_reduce), __doc__ = """\ Return an iterator yielding those items of iterable for which function(item) is true. If function is None, return the items that are true.""") diff --git a/pypy/module/__builtin__/test/test_filter_pickle.py b/pypy/module/__builtin__/test/test_filter_pickle.py new file mode 100644 --- /dev/null +++ b/pypy/module/__builtin__/test/test_filter_pickle.py @@ -0,0 +1,72 @@ +class AppTestFilterPickle: + + def test_filter_unpickle(self): + """Test just the unpickling.""" + import pickle + + # This is filter(None, 'abc') pickled with cpython + dump = b'\x80\x03cbuiltins\nfilter\nq\x00Ncbuiltins\niter\nq\x01X\x03'\ + b'\x00\x00\x00abcq\x02\x85q\x03Rq\x04K\x00b\x86q\x05Rq\x06.' + t = pickle.loads(dump) + assert list(t) == ['a', 'b', 'c'] + + def test_iterator_pickle(self): + """Pickle and unpickle just a simple iterator.""" + import pickle + + i0 = iter("abc") + i1 = iter("abc") + + d = pickle.dumps(i1) + i1 = pickle.loads(d) + + assert list(i0) == list(i1) + + def test_reduce_ex(self): + """""" + f0 = filter(None, "abc") + f1 = filter(None, "abc") + + print(f0) + r = f1.__reduce_ex__(3) + # __reduce_ex__ doesn't return any arguments to the filter, so the next + # line will fail with TypeError. + f1 = r[0](*r[1]) + + assert list(f0) == list(f1) + + def test_nonfilter_pickle(self): + """Pickle and unpickle a filter with no filtering.""" + import pickle + + f0 = filter(None, "abc") + d = pickle.dumps(f0) + f1 = pickle.loads(d) + + def test_filter_pickle(self): + """Clone of the original test.""" + import pickle + + def check_iter_pickle(it, seq): + itorg = it + d = pickle.dumps(it) + it = pickle.loads(d) + assert type(itorg) == type(it) + assert list(it) == seq + + #test the iterator after dropping one from it + it = pickle.loads(d) + try: + next(it) + except StopIteration: + return + d = pickle.dumps(it) + it = pickle.loads(d) + assert list(it) == seq[1:] + + # We use ord instead of filter_char because the filter function has to + # be defined in the global scope for the picking to work and we can't + # do it from this test. + f1 = filter(ord, "abcdeabcde") + f2 = filter(ord, "abcdeabcde") + check_iter_pickle(f1, list(f2)) From noreply at buildbot.pypy.org Sat Jul 26 15:36:14 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 26 Jul 2014 15:36:14 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: merge heads Message-ID: <20140726133614.E9EF31C0A1B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: py3.3 Changeset: r72513:b14679e8321d Date: 2014-07-26 15:35 +0200 http://bitbucket.org/pypy/pypy/changeset/b14679e8321d/ Log: merge heads diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py --- a/pypy/module/__builtin__/functional.py +++ b/pypy/module/__builtin__/functional.py @@ -711,6 +711,13 @@ if pred ^ self.reverse: return w_obj + def descr_reduce(self, space): + w_filter = space.getattr(space.getbuiltinmodule('builtins'), + space.wrap('filter')) + args = [space.w_None if self.no_predicate else self.w_predicate, + self.iterable] + return space.newtuple([w_filter, space.newtuple(args)]) + def W_Filter___new__(space, w_subtype, w_predicate, w_iterable): r = space.allocate_instance(W_Filter, w_subtype) @@ -722,6 +729,7 @@ __new__ = interp2app(W_Filter___new__), __iter__ = interp2app(W_Filter.iter_w), __next__ = interp2app(W_Filter.next_w), + __reduce__ = interp2app(W_Filter.descr_reduce), __doc__ = """\ Return an iterator yielding those items of iterable for which function(item) is true. If function is None, return the items that are true.""") diff --git a/pypy/module/__builtin__/test/test_filter_pickle.py b/pypy/module/__builtin__/test/test_filter_pickle.py new file mode 100644 --- /dev/null +++ b/pypy/module/__builtin__/test/test_filter_pickle.py @@ -0,0 +1,72 @@ +class AppTestFilterPickle: + + def test_filter_unpickle(self): + """Test just the unpickling.""" + import pickle + + # This is filter(None, 'abc') pickled with cpython + dump = b'\x80\x03cbuiltins\nfilter\nq\x00Ncbuiltins\niter\nq\x01X\x03'\ + b'\x00\x00\x00abcq\x02\x85q\x03Rq\x04K\x00b\x86q\x05Rq\x06.' + t = pickle.loads(dump) + assert list(t) == ['a', 'b', 'c'] + + def test_iterator_pickle(self): + """Pickle and unpickle just a simple iterator.""" + import pickle + + i0 = iter("abc") + i1 = iter("abc") + + d = pickle.dumps(i1) + i1 = pickle.loads(d) + + assert list(i0) == list(i1) + + def test_reduce_ex(self): + """""" + f0 = filter(None, "abc") + f1 = filter(None, "abc") + + print(f0) + r = f1.__reduce_ex__(3) + # __reduce_ex__ doesn't return any arguments to the filter, so the next + # line will fail with TypeError. + f1 = r[0](*r[1]) + + assert list(f0) == list(f1) + + def test_nonfilter_pickle(self): + """Pickle and unpickle a filter with no filtering.""" + import pickle + + f0 = filter(None, "abc") + d = pickle.dumps(f0) + f1 = pickle.loads(d) + + def test_filter_pickle(self): + """Clone of the original test.""" + import pickle + + def check_iter_pickle(it, seq): + itorg = it + d = pickle.dumps(it) + it = pickle.loads(d) + assert type(itorg) == type(it) + assert list(it) == seq + + #test the iterator after dropping one from it + it = pickle.loads(d) + try: + next(it) + except StopIteration: + return + d = pickle.dumps(it) + it = pickle.loads(d) + assert list(it) == seq[1:] + + # We use ord instead of filter_char because the filter function has to + # be defined in the global scope for the picking to work and we can't + # do it from this test. + f1 = filter(ord, "abcdeabcde") + f2 = filter(ord, "abcdeabcde") + check_iter_pickle(f1, list(f2)) From noreply at buildbot.pypy.org Sat Jul 26 15:54:09 2014 From: noreply at buildbot.pypy.org (Arjun Naik) Date: Sat, 26 Jul 2014 15:54:09 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Changed the _get_inttime() to raise OverflowError. Message-ID: <20140726135409.0D7F91C0588@cobra.cs.uni-duesseldorf.de> Author: Arjun Naik Branch: py3.3 Changeset: r72514:8ccff189b82f Date: 2014-07-26 15:11 +0200 http://bitbucket.org/pypy/pypy/changeset/8ccff189b82f/ Log: Changed the _get_inttime() to raise OverflowError. diff --git a/pypy/module/rctime/interp_time.py b/pypy/module/rctime/interp_time.py --- a/pypy/module/rctime/interp_time.py +++ b/pypy/module/rctime/interp_time.py @@ -364,7 +364,7 @@ # input doesn't fit in a time_t; call it an error. diff = seconds - rffi.cast(lltype.Float, t) if diff <= -1.0 or diff >= 1.0: - raise OperationError(space.w_ValueError, + raise OperationError(space.w_OverflowError, space.wrap("timestamp out of range for platform time_t")) return t diff --git a/pypy/module/rctime/test/test_rctime.py b/pypy/module/rctime/test/test_rctime.py --- a/pypy/module/rctime/test/test_rctime.py +++ b/pypy/module/rctime/test/test_rctime.py @@ -42,7 +42,7 @@ res = rctime.ctime(0) assert isinstance(res, str) rctime.ctime(rctime.time()) - raises(ValueError, rctime.ctime, 1E200) + raises(OverflowError, rctime.ctime, 1E200) raises(OverflowError, rctime.ctime, 10**900) for year in [-100, 100, 1000, 2000, 10000]: try: @@ -68,8 +68,8 @@ assert 0 <= (t1 - t0) < 1.2 t = rctime.time() assert rctime.gmtime(t) == rctime.gmtime(t) - raises(ValueError, rctime.gmtime, 2**64) - raises(ValueError, rctime.gmtime, -2**64) + raises(OverflowError, rctime.gmtime, 2**64) + raises(OverflowError, rctime.gmtime, -2**64) def test_localtime(self): import time as rctime From noreply at buildbot.pypy.org Sat Jul 26 15:54:10 2014 From: noreply at buildbot.pypy.org (Arjun Naik) Date: Sat, 26 Jul 2014 15:54:10 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Merge Message-ID: <20140726135410.4F99D1C0588@cobra.cs.uni-duesseldorf.de> Author: Arjun Naik Branch: py3.3 Changeset: r72515:2c99b4f912ad Date: 2014-07-26 15:52 +0200 http://bitbucket.org/pypy/pypy/changeset/2c99b4f912ad/ Log: Merge diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -317,7 +317,6 @@ # Order is significant! sys_flags = ( "debug", - "division_warning", "inspect", "interactive", "optimize", diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py --- a/pypy/module/__builtin__/functional.py +++ b/pypy/module/__builtin__/functional.py @@ -711,6 +711,13 @@ if pred ^ self.reverse: return w_obj + def descr_reduce(self, space): + w_filter = space.getattr(space.getbuiltinmodule('builtins'), + space.wrap('filter')) + args = [space.w_None if self.no_predicate else self.w_predicate, + self.iterable] + return space.newtuple([w_filter, space.newtuple(args)]) + def W_Filter___new__(space, w_subtype, w_predicate, w_iterable): r = space.allocate_instance(W_Filter, w_subtype) @@ -722,6 +729,7 @@ __new__ = interp2app(W_Filter___new__), __iter__ = interp2app(W_Filter.iter_w), __next__ = interp2app(W_Filter.next_w), + __reduce__ = interp2app(W_Filter.descr_reduce), __doc__ = """\ Return an iterator yielding those items of iterable for which function(item) is true. If function is None, return the items that are true.""") diff --git a/pypy/module/__builtin__/test/test_filter_pickle.py b/pypy/module/__builtin__/test/test_filter_pickle.py new file mode 100644 --- /dev/null +++ b/pypy/module/__builtin__/test/test_filter_pickle.py @@ -0,0 +1,72 @@ +class AppTestFilterPickle: + + def test_filter_unpickle(self): + """Test just the unpickling.""" + import pickle + + # This is filter(None, 'abc') pickled with cpython + dump = b'\x80\x03cbuiltins\nfilter\nq\x00Ncbuiltins\niter\nq\x01X\x03'\ + b'\x00\x00\x00abcq\x02\x85q\x03Rq\x04K\x00b\x86q\x05Rq\x06.' + t = pickle.loads(dump) + assert list(t) == ['a', 'b', 'c'] + + def test_iterator_pickle(self): + """Pickle and unpickle just a simple iterator.""" + import pickle + + i0 = iter("abc") + i1 = iter("abc") + + d = pickle.dumps(i1) + i1 = pickle.loads(d) + + assert list(i0) == list(i1) + + def test_reduce_ex(self): + """""" + f0 = filter(None, "abc") + f1 = filter(None, "abc") + + print(f0) + r = f1.__reduce_ex__(3) + # __reduce_ex__ doesn't return any arguments to the filter, so the next + # line will fail with TypeError. + f1 = r[0](*r[1]) + + assert list(f0) == list(f1) + + def test_nonfilter_pickle(self): + """Pickle and unpickle a filter with no filtering.""" + import pickle + + f0 = filter(None, "abc") + d = pickle.dumps(f0) + f1 = pickle.loads(d) + + def test_filter_pickle(self): + """Clone of the original test.""" + import pickle + + def check_iter_pickle(it, seq): + itorg = it + d = pickle.dumps(it) + it = pickle.loads(d) + assert type(itorg) == type(it) + assert list(it) == seq + + #test the iterator after dropping one from it + it = pickle.loads(d) + try: + next(it) + except StopIteration: + return + d = pickle.dumps(it) + it = pickle.loads(d) + assert list(it) == seq[1:] + + # We use ord instead of filter_char because the filter function has to + # be defined in the global scope for the picking to work and we can't + # do it from this test. + f1 = filter(ord, "abcdeabcde") + f2 = filter(ord, "abcdeabcde") + check_iter_pickle(f1, list(f2)) diff --git a/pypy/module/sys/app.py b/pypy/module/sys/app.py --- a/pypy/module/sys/app.py +++ b/pypy/module/sys/app.py @@ -94,20 +94,19 @@ name = "sys.flags" debug = structseqfield(0) - division_warning = structseqfield(1) - inspect = structseqfield(2) - interactive = structseqfield(3) - optimize = structseqfield(4) - dont_write_bytecode = structseqfield(5) - no_user_site = structseqfield(6) - no_site = structseqfield(7) - ignore_environment = structseqfield(8) - verbose = structseqfield(9) - bytes_warning = structseqfield(10) - quiet = structseqfield(11) - hash_randomization = structseqfield(12) + inspect = structseqfield(1) + interactive = structseqfield(2) + optimize = structseqfield(3) + dont_write_bytecode = structseqfield(4) + no_user_site = structseqfield(5) + no_site = structseqfield(6) + ignore_environment = structseqfield(7) + verbose = structseqfield(8) + bytes_warning = structseqfield(9) + quiet = structseqfield(10) + hash_randomization = structseqfield(11) -null_sysflags = sysflags((0,)*13) +null_sysflags = sysflags((0,)*12) null__xoptions = {} diff --git a/pypy/module/zlib/test/test_zlib.py b/pypy/module/zlib/test/test_zlib.py --- a/pypy/module/zlib/test/test_zlib.py +++ b/pypy/module/zlib/test/test_zlib.py @@ -177,6 +177,16 @@ assert dco.eof == True dco.flush() assert dco.eof == True + + def test_decompress_eof_incomplete_stream(self): + import zlib + x = b'x\x9cK\xcb\xcf\x07\x00\x02\x82\x01E' # 'foo' + dco = zlib.decompressobj() + assert dco.eof == False + dco.decompress(x[:-5]) + assert dco.eof == False + dco.flush() + assert dco.eof == False def test_decompress_incomplete_stream(self): import zlib From noreply at buildbot.pypy.org Sat Jul 26 16:00:49 2014 From: noreply at buildbot.pypy.org (kvas) Date: Sat, 26 Jul 2014 16:00:49 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Fixed pickling of iterators produced by map(func, iter, ...). Message-ID: <20140726140049.309011C1106@cobra.cs.uni-duesseldorf.de> Author: Vasily Kuznetsov Branch: py3.3 Changeset: r72516:b30c4330c8b3 Date: 2014-07-26 15:57 +0200 http://bitbucket.org/pypy/pypy/changeset/b30c4330c8b3/ Log: Fixed pickling of iterators produced by map(func, iter, ...). diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py --- a/pypy/module/__builtin__/functional.py +++ b/pypy/module/__builtin__/functional.py @@ -667,6 +667,12 @@ # the loop is out of the way of the JIT return [self.space.next(w_elem) for w_elem in self.iterators_w] + def descr_reduce(self, space): + w_map = space.getattr(space.getbuiltinmodule('builtins'), + space.wrap('map')) + args = [self.w_fun] + self.iterators_w + return space.newtuple([w_map, space.newtuple(args)]) + def W_Map___new__(space, w_subtype, w_fun, args_w): if len(args_w) == 0: @@ -681,6 +687,7 @@ __new__ = interp2app(W_Map___new__), __iter__ = interp2app(W_Map.iter_w), __next__ = interp2app(W_Map.next_w), + __reduce__ = interp2app(W_Map.descr_reduce), __doc__ = """\ Make an iterator that computes the function using arguments from each of the iterables. Stops when the shortest iterable is exhausted.""") diff --git a/pypy/module/__builtin__/test/test_map_pickle.py b/pypy/module/__builtin__/test/test_map_pickle.py new file mode 100644 --- /dev/null +++ b/pypy/module/__builtin__/test/test_map_pickle.py @@ -0,0 +1,28 @@ +class AppTestMapPickle: + + def test_map_pickle(self): + """Pickle a map with one sequence.""" + import pickle + + def pickle_unpickle(obj): + d = pickle.dumps(obj) + return pickle.loads(d) + + m1 = map(ord, "Is this the real life?") + m1_ = pickle_unpickle(m1) + + assert list(m1) == list(m1_) + + def test_map2_pickle(self): + """Pickle a map with multiple sequences.""" + import pickle + + def pickle_unpickle(obj): + d = pickle.dumps(obj) + return pickle.loads(d) + + m1 = map(max, "abc", "def") + m1_ = pickle_unpickle(m1) + + assert list(m1) == list(m1_) + From noreply at buildbot.pypy.org Sat Jul 26 16:06:27 2014 From: noreply at buildbot.pypy.org (kvas) Date: Sat, 26 Jul 2014 16:06:27 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Added a test for the situation when one of the iterators given to map has been already advanced (suggested by Armin Rigo). Message-ID: <20140726140627.B35601C1106@cobra.cs.uni-duesseldorf.de> Author: Vasily Kuznetsov Branch: py3.3 Changeset: r72517:88e3c7e587b4 Date: 2014-07-26 16:05 +0200 http://bitbucket.org/pypy/pypy/changeset/88e3c7e587b4/ Log: Added a test for the situation when one of the iterators given to map has been already advanced (suggested by Armin Rigo). diff --git a/pypy/module/__builtin__/test/test_map_pickle.py b/pypy/module/__builtin__/test/test_map_pickle.py --- a/pypy/module/__builtin__/test/test_map_pickle.py +++ b/pypy/module/__builtin__/test/test_map_pickle.py @@ -26,3 +26,19 @@ assert list(m1) == list(m1_) + def test_map2_adv_pickle(self): + """If some iterator was advanced, the pickling preserves it.""" + import pickle + + def pickle_unpickle(obj): + d = pickle.dumps(obj) + return pickle.loads(d) + + s1 = iter("abc") + s2 = iter("defg") + next(s2) + + m1 = map(max, s1, s2) + m1_ = pickle_unpickle(m1) + + assert list(m1) == list(m1_) From noreply at buildbot.pypy.org Sat Jul 26 16:10:40 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sat, 26 Jul 2014 16:10:40 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs: RPython docs: Delete contents.rst and move toctree to index.rst. Message-ID: <20140726141040.9C8001C046A@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: improve-docs Changeset: r72518:e8bf18c22cdb Date: 2014-07-26 15:23 +0200 http://bitbucket.org/pypy/pypy/changeset/e8bf18c22cdb/ Log: RPython docs: Delete contents.rst and move toctree to index.rst. diff --git a/rpython/doc/contents.rst b/rpython/doc/contents.rst deleted file mode 100644 --- a/rpython/doc/contents.rst +++ /dev/null @@ -1,19 +0,0 @@ -Table of Contents -=================================== - -Contents: - -.. toctree:: - :maxdepth: 2 - - getting-started - faq - rpython - rlib - rffi - translation - rtyper - garbage_collection - cli-backend - windows - diff --git a/rpython/doc/index.rst b/rpython/doc/index.rst --- a/rpython/doc/index.rst +++ b/rpython/doc/index.rst @@ -1,7 +1,23 @@ Welcome to RPython's documentation! =================================== -* :doc:`contents` +Table of Contents +----------------- + +.. toctree:: + :maxdepth: 1 + + getting-started + faq + rpython + rlib + rffi + translation + rtyper + garbage_collection + cli-backend + windows + Indices and tables ================== @@ -9,4 +25,3 @@ * :ref:`genindex` * :ref:`modindex` * :ref:`search` - From noreply at buildbot.pypy.org Sat Jul 26 16:10:41 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sat, 26 Jul 2014 16:10:41 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs: Rename title of rpython.rst from "RPython" to "RPython Language". Message-ID: <20140726141041.CA3C51C046A@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: improve-docs Changeset: r72519:79f23ae997e3 Date: 2014-07-26 16:10 +0200 http://bitbucket.org/pypy/pypy/changeset/79f23ae997e3/ Log: Rename title of rpython.rst from "RPython" to "RPython Language". diff --git a/rpython/doc/rpython.rst b/rpython/doc/rpython.rst --- a/rpython/doc/rpython.rst +++ b/rpython/doc/rpython.rst @@ -1,8 +1,8 @@ -RPython -======= +RPython Language +================ -RPython Definition ------------------- +Definition +---------- RPython is a restricted subset of Python that is amenable to static analysis. Although there are additions to the language and some things might surprisingly From noreply at buildbot.pypy.org Sat Jul 26 16:47:45 2014 From: noreply at buildbot.pypy.org (groggi) Date: Sat, 26 Jul 2014 16:47:45 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: extend and enable test cases for still missing feature Message-ID: <20140726144745.68C551C050D@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72520:e2b83c3965c0 Date: 2014-07-02 14:15 +0200 http://bitbucket.org/pypy/pypy/changeset/e2b83c3965c0/ Log: extend and enable test cases for still missing feature diff --git a/rpython/memory/gc/test/test_object_pinning.py b/rpython/memory/gc/test/test_object_pinning.py --- a/rpython/memory/gc/test/test_object_pinning.py +++ b/rpython/memory/gc/test/test_object_pinning.py @@ -76,25 +76,69 @@ ptr_old = self.stackroots[0] assert ptr_old.someInt == 100 - @py.test.mark.xfail(reason="Not implemented yet", run=False) - def test_pin_referenced_from_stackroot(self): - # XXX most likely somehow connected with `old_objects_pointing_to_young` - # (groggi) + def test_pin_referenced_from_stackroot_young(self): + # + # create both objects and reference the pinned one + # from the one that will be moved out of the + # nursery. root_ptr = self.malloc(S) next_ptr = self.malloc(S) self.write(root_ptr, 'next', next_ptr) self.stackroots.append(root_ptr) + # next_ptr.someInt = 100 - + root_ptr.someInt = 999 + # next_adr = llmemory.cast_ptr_to_adr(next_ptr) assert self.gc.pin(next_adr) + # + # in this step the 'root_ptr' object will be + # outside the nursery, pointing to the still + # young (because it's pinned) 'next_ptr'. + self.gc.collect() + # + root_ptr = self.stackroots[0] + assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(root_ptr)) + assert self.gc.is_in_nursery(next_adr) + assert next_ptr.someInt == 100 + assert root_ptr.next == next_ptr + # + # now we remove the reference to the pinned object and do a collect + # to check if the pinned object was removed from nursery. + self.write(root_ptr, 'next', lltype.nullptr(S)) + self.gc.collect() + try: + # should fail as this was the pinned object that is now collected + next_ptr.someInt = 0 + except RuntimeError as ex: + assert "freed" in str(ex) + def test_old_points_to_pinned(self): + # Test if we handle the case that an old object can point + # to a pinned object and keeps the pinned object alive by + # that. + # + # create the old object that will point to a pinned object + old_ptr = self.malloc(S) + old_ptr.someInt = 999 + self.stackroots.append(old_ptr) self.gc.collect() - - assert self.gc.is_in_nursery(adr) - assert next_ptr.someInt == 100 - root_ptr = self.stackroots[0] - assert root_ptr.next == next_ptr + assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(self.stackroots[0])) + # + # create the young pinned object and attach it to the old object + pinned_ptr = self.malloc(S) + pinned_ptr.someInt = 6 + assert self.gc.pin(llmemory.cast_ptr_to_adr(pinned_ptr)) + self.write(self.stackroots[0], 'next', pinned_ptr) + # + # let's check if everything stays in place before/after a collection + assert self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(pinned_ptr)) + self.gc.collect() + assert self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(pinned_ptr)) + # + self.stackroots[0].next.someInt = 100 + self.gc.collect() + assert self.stackroots[0].next.someInt == 100 def test_pin_old(self): ptr = self.malloc(S) @@ -109,22 +153,24 @@ # ^^^ should not be possible, struct is already old and won't # move. - def test_old_points_to_pinned(self): + def test_groggi(self): # Test if we handle the case that an old object can point # to a pinned object and keeps the pinned object alive by # that. # + # create the young pinned object and attach it to the old object + pinned_ptr = self.malloc(S) + pinned_ptr.someInt = 6 + assert self.gc.pin(llmemory.cast_ptr_to_adr(pinned_ptr)) + # # create the old object that will point to a pinned object old_ptr = self.malloc(S) + old_ptr.someInt = 999 self.stackroots.append(old_ptr) + self.write(self.stackroots[0], 'next', pinned_ptr) self.gc.collect() assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(self.stackroots[0])) # - # create the young pinned object and attach it to the old object - pinned_ptr = self.malloc(S) - assert self.gc.pin(llmemory.cast_ptr_to_adr(pinned_ptr)) - self.write(self.stackroots[0], 'next', pinned_ptr) - # # let's check if everything stays in place before/after a collection assert self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(pinned_ptr)) self.gc.collect() From noreply at buildbot.pypy.org Sat Jul 26 16:47:46 2014 From: noreply at buildbot.pypy.org (groggi) Date: Sat, 26 Jul 2014 16:47:46 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: ups. remove test used to play around Message-ID: <20140726144746.9D5001C050D@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72521:3fca2532c67d Date: 2014-07-02 14:43 +0200 http://bitbucket.org/pypy/pypy/changeset/3fca2532c67d/ Log: ups. remove test used to play around diff --git a/rpython/memory/gc/test/test_object_pinning.py b/rpython/memory/gc/test/test_object_pinning.py --- a/rpython/memory/gc/test/test_object_pinning.py +++ b/rpython/memory/gc/test/test_object_pinning.py @@ -153,33 +153,6 @@ # ^^^ should not be possible, struct is already old and won't # move. - def test_groggi(self): - # Test if we handle the case that an old object can point - # to a pinned object and keeps the pinned object alive by - # that. - # - # create the young pinned object and attach it to the old object - pinned_ptr = self.malloc(S) - pinned_ptr.someInt = 6 - assert self.gc.pin(llmemory.cast_ptr_to_adr(pinned_ptr)) - # - # create the old object that will point to a pinned object - old_ptr = self.malloc(S) - old_ptr.someInt = 999 - self.stackroots.append(old_ptr) - self.write(self.stackroots[0], 'next', pinned_ptr) - self.gc.collect() - assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(self.stackroots[0])) - # - # let's check if everything stays in place before/after a collection - assert self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(pinned_ptr)) - self.gc.collect() - assert self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(pinned_ptr)) - # - self.stackroots[0].next.someInt = 100 - self.gc.collect() - assert self.stackroots[0].next.someInt == 100 - def test_pin_malloc_pin(self): first_ptr = self.malloc(S) first_ptr.someInt = 101 From noreply at buildbot.pypy.org Sat Jul 26 16:47:47 2014 From: noreply at buildbot.pypy.org (groggi) Date: Sat, 26 Jul 2014 16:47:47 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: wip: implement use case "old object points to pinned object". Message-ID: <20140726144747.BA4B11C050D@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72522:754f9578ee1a Date: 2014-07-02 15:15 +0200 http://bitbucket.org/pypy/pypy/changeset/754f9578ee1a/ Log: wip: implement use case "old object points to pinned object". This is very much a work in progress commit. Still a lot to check, refactor and document. diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -380,6 +380,8 @@ # Counter tracking how many pinned objects currently reside inside # the nursery. self.pinned_objects_in_nursery = 0 + # TTT XXX + self.pinned_objects_keep_alive = self.AddressStack() # # Allocate a nursery. In case of auto_nursery_size, start by # allocating a very small nursery, enough to do things like look @@ -1601,6 +1603,15 @@ # them or make them old. if self.young_rawmalloced_objects: self.free_young_rawmalloced_objects() + + # + # In case we have to keep some pinned objects alive, add them + # to 'surviving_pinned_objects'. Such a case comes up if an old + # object references a pinned young one (pinned object inside + # the nursery). See '_trace_drag_out' for more details. + if self.pinned_objects_keep_alive.non_empty(): + self.pinned_objects_keep_alive.foreach( + self._populate_to_surviving_pinned_objects, None) # # All live nursery objects are out of the nursery or pinned inside # the nursery. Create nursery barriers to protect the pinned object, @@ -1620,6 +1631,7 @@ # clear the arena between the last pinned object (or arena start) # and the pinned object pinned_obj_size = llarena.getfakearenaaddress(cur) - prev + debug_print("before A") llarena.arena_reset(prev, pinned_obj_size, 2) # # clean up object's flags @@ -1634,6 +1646,7 @@ (size_gc_header + self.get_size(obj)) # # reset everything after the last pinned object till the end of the arena + debug_print("before B") llarena.arena_reset(prev, self.nursery_real_top - prev, 0) # # We assume that there are only a few pinned objects. Therefore, if there @@ -1644,6 +1657,7 @@ if prev - self.nursery >= self.nursery_cleanup: nursery_barriers.append(prev) else: + debug_print("before C") llarena.arena_reset(prev, self.nursery_cleanup, 2) nursery_barriers.append(prev + self.nursery_cleanup) # @@ -1665,6 +1679,12 @@ # debug_stop("gc-minor") + def _populate_to_surviving_pinned_objects(self, obj, ignored): + self.surviving_pinned_objects.append(obj) + # we have to update the counter each time, because it was set to 0 + # at the start of the *minor* collection. The 'obj' survives + # *major* collections and therefore also multiple minor collections. + self.pinned_objects_in_nursery += 1 def collect_roots_in_nursery(self): # we don't need to trace prebuilt GcStructs during a minor collect: @@ -1676,6 +1696,7 @@ callback = IncrementalMiniMarkGC._trace_drag_out1_marking_phase else: callback = IncrementalMiniMarkGC._trace_drag_out1 + # self.root_walker.walk_roots( callback, # stack roots callback, # static in prebuilt non-gc @@ -1772,7 +1793,7 @@ """obj must not be in the nursery. This copies all the young objects it references out of the nursery. """ - self.trace(obj, self._trace_drag_out, None) + self.trace(obj, self._trace_drag_out, obj) def trace_and_drag_out_of_nursery_partial(self, obj, start, stop): """Like trace_and_drag_out_of_nursery(), but limited to the array @@ -1801,13 +1822,18 @@ if not self.header(obj).tid & GCFLAG_VISITED: self.more_objects_to_trace.append(obj) - def _trace_drag_out(self, root, ignored): + def _trace_drag_out(self, root, parent): + # 'parent' is only set if we visit a pinned objects that is referenced + # by an other object. This is used to handle pinned object specially in + # such a case. obj = root.address[0] #print '_trace_drag_out(%x: %r)' % (hash(obj.ptr._obj), obj) + debug_print('_trace_drag_out(%x: %r)' % (hash(obj.ptr._obj), obj)) # # If 'obj' is not in the nursery, nothing to change -- expect # that we must set GCFLAG_VISITED_RMY on young raw-malloced objects. if not self.is_in_nursery(obj): + debug_print("\tnot in nursery") # cache usage trade-off: I think that it is a better idea to # check if 'obj' is in young_rawmalloced_objects with an access # to this (small) dictionary, rather than risk a lot of cache @@ -1818,6 +1844,7 @@ self._visit_young_rawmalloced_object(obj) return # + debug_print("\tin nursery") size_gc_header = self.gcheaderbuilder.size_gc_header if self.header(obj).tid & (GCFLAG_HAS_SHADOW | GCFLAG_PINNED) == 0: # @@ -1841,14 +1868,29 @@ if hdr.tid & GCFLAG_VISITED: # already visited and keeping track of the object return + # + if parent: + # pinned object is referenced by an other object. + # We must keep the pinned object alive between + # major collections for the case that an old + # object references the young pinned object as + # the old one is only visited every major collection. + debug_print("-> added to keep alive:", obj) + debug_print("-> ... because of parent:", parent) + self.pinned_objects_keep_alive.append(obj - size_gc_header) + self.write_barrier(parent) + else: + debug_print("-> usual pinned object") + self.surviving_pinned_objects.append( + llarena.getfakearenaaddress(obj - size_gc_header)) + self.pinned_objects_in_nursery += 1 + # hdr.tid |= GCFLAG_VISITED + debug_print("\tstate:", self.gc_state) # XXX add additional checks for unsupported pinned objects (groggi) # XXX implement unsupported object types with pinning ll_assert(not self.header(obj).tid & GCFLAG_HAS_CARDS, "pinned object with GCFLAG_HAS_CARDS not supported") - self.surviving_pinned_objects.append( - llarena.getfakearenaaddress(obj - size_gc_header)) - self.pinned_objects_in_nursery += 1 return else: # First visit to an object that has already a shadow. @@ -2017,6 +2059,8 @@ self.more_objects_to_trace = self.AddressStack() #END SCANNING elif self.gc_state == STATE_MARKING: + self.pinned_objects_keep_alive.delete() + self.pinned_objects_keep_alive = self.AddressStack() debug_print("number of objects to mark", self.objects_to_trace.length(), "plus", @@ -2144,6 +2188,8 @@ else: pass #XXX which exception to raise here. Should be unreachable. + debug_print("BLUB:", self.pinned_objects_keep_alive.length()) + debug_print("stopping, now in gc state: ", GC_STATES[self.gc_state]) debug_stop("gc-collect-step") @@ -2210,7 +2256,7 @@ # # Add the roots from the other sources. self.root_walker.walk_roots( - IncrementalMiniMarkGC._collect_ref_stk, # stack roots + IncrementalMiniMarkGC._collect_ref_stk2, # stack roots IncrementalMiniMarkGC._collect_ref_stk, # static in prebuilt non-gc structures None) # we don't need the static in all prebuilt gc objects # @@ -2228,6 +2274,17 @@ def _collect_obj(obj, objects_to_trace): objects_to_trace.append(obj) + def _collect_ref_stk2(self, root): + obj = root.address[0] + llop.debug_nonnull_pointer(lltype.Void, obj) + hdr = self.header(obj) + if hdr.tid & GCFLAG_PINNED: + # XXX think really hard about this (groggi): + # is a pinned object found on stack already + # being traced? + return + self.objects_to_trace.append(obj) + def _collect_ref_stk(self, root): obj = root.address[0] llop.debug_nonnull_pointer(lltype.Void, obj) @@ -2251,6 +2308,7 @@ return size_to_track def visit(self, obj): + debug_print("visit:", obj) # # 'obj' is a live object. Check GCFLAG_VISITED to know if we # have already seen it before. @@ -2261,9 +2319,15 @@ # flag set, then the object should be in 'prebuilt_root_objects', # and the GCFLAG_VISITED will be reset at the end of the # collection. + # + # XXX pinned object case doc (groggi) hdr = self.header(obj) - if hdr.tid & (GCFLAG_VISITED | GCFLAG_NO_HEAP_PTRS | GCFLAG_PINNED): - # XXX ^^^ update doc in any way because of GCFLAG_PINNED addition? (groggi) + if hdr.tid & GCFLAG_PINNED: + if self.gc_state == STATE_MARKING: + debug_print("STATE MARKING AND PINNED, ADD") + self.pinned_objects_keep_alive.append(obj - self.gcheaderbuilder.size_gc_header) + return 0 + elif hdr.tid & (GCFLAG_VISITED | GCFLAG_NO_HEAP_PTRS): return 0 # # It's the first time. We set the flag VISITED. The trick is From noreply at buildbot.pypy.org Sat Jul 26 16:47:48 2014 From: noreply at buildbot.pypy.org (groggi) Date: Sat, 26 Jul 2014 16:47:48 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: modify tests for new pinning implementation Message-ID: <20140726144748.DD31F1C050D@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72523:0bbc22db2b13 Date: 2014-07-13 12:42 +0200 http://bitbucket.org/pypy/pypy/changeset/0bbc22db2b13/ Log: modify tests for new pinning implementation pinned objects will stay inside the nursery as long as a major collection diff --git a/rpython/memory/gc/test/test_object_pinning.py b/rpython/memory/gc/test/test_object_pinning.py --- a/rpython/memory/gc/test/test_object_pinning.py +++ b/rpython/memory/gc/test/test_object_pinning.py @@ -46,7 +46,8 @@ class TestIncminimark(PinningGCTest): from rpython.memory.gc.incminimark import IncrementalMiniMarkGC as GCClass - def test_simple_pin(self): + def test_simple_pin_stack(self): + # create object, pin it and point from stackroots to it ptr = self.malloc(S) ptr.someInt = 100 self.stackroots.append(ptr) @@ -59,26 +60,33 @@ assert self.gc.is_in_nursery(adr) assert ptr.someInt == 100 - def test_simple_pin_unpin(self): + def test_simple_pin_unpin_stack(self): ptr = self.malloc(S) ptr.someInt = 100 + self.stackroots.append(ptr) + adr = llmemory.cast_ptr_to_adr(ptr) - # check if pin worked assert self.gc.pin(adr) + self.gc.collect() + assert self.gc.is_in_nursery(adr) assert ptr.someInt == 100 + # unpin and check if object is gone from nursery self.gc.unpin(adr) self.gc.collect() py.test.raises(RuntimeError, 'ptr.someInt') + + # check if we object is still accessible ptr_old = self.stackroots[0] + assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(ptr_old)) assert ptr_old.someInt == 100 def test_pin_referenced_from_stackroot_young(self): # - # create both objects and reference the pinned one + # create two objects and reference the pinned one # from the one that will be moved out of the # nursery. root_ptr = self.malloc(S) @@ -114,7 +122,7 @@ assert "freed" in str(ex) def test_old_points_to_pinned(self): - # Test if we handle the case that an old object can point + # Test if we handle the case that an already old object can point # to a pinned object and keeps the pinned object alive by # that. # @@ -123,7 +131,8 @@ old_ptr.someInt = 999 self.stackroots.append(old_ptr) self.gc.collect() - assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(self.stackroots[0])) + assert not self.gc.is_in_nursery( + llmemory.cast_ptr_to_adr(self.stackroots[0])) # # create the young pinned object and attach it to the old object pinned_ptr = self.malloc(S) @@ -178,11 +187,11 @@ ptr.someInt = 100 assert self.gc.pin(adr) self.gc.id(ptr) # allocate shadow - self.gc.minor_collection() + self.gc.collect() assert self.gc.is_in_nursery(adr) assert ptr.someInt == 100 self.gc.unpin(adr) - self.gc.minor_collection() # move to shadow + self.gc.collect() # move to shadow adr = llmemory.cast_ptr_to_adr(self.stackroots[0]) assert not self.gc.is_in_nursery(adr) @@ -193,11 +202,11 @@ ptr.someInt = 100 assert self.gc.pin(adr) self.gc.identityhash(ptr) # allocate shadow - self.gc.minor_collection() + self.gc.collect() assert self.gc.is_in_nursery(adr) assert ptr.someInt == 100 self.gc.unpin(adr) - self.gc.minor_collection() # move to shadow + self.gc.collect() # move to shadow adr = llmemory.cast_ptr_to_adr(self.stackroots[0]) assert not self.gc.is_in_nursery(adr) @@ -270,7 +279,7 @@ # +- nursery_free # +- nursery_top # - self.gc.minor_collection() + self.gc.collect() assert self.gc.nursery_free == self.gc.nursery_top assert self.gc.nursery_top == self.gc.nursery @@ -311,7 +320,7 @@ # +- nursery_free # self.gc.unpin(adr1) - self.gc.minor_collection() + self.gc.collect() assert self.gc.nursery_free == self.gc.nursery assert self.gc.nursery_top > self.gc.nursery_free @@ -353,7 +362,7 @@ # self.gc.unpin(adr1) self.gc.unpin(adr2) - self.gc.minor_collection() + self.gc.collect() assert self.gc.nursery_free == self.gc.nursery assert self.gc.nursery_free < self.gc.nursery_top @@ -413,7 +422,7 @@ self.gc.unpin(adr1) self.gc.unpin(adr2) self.gc.unpin(adr3) - self.gc.minor_collection() + self.gc.collect() assert self.gc.nursery_free == self.gc.nursery # the following assert is important: make sure that @@ -438,7 +447,7 @@ assert self.gc.pin(llmemory.cast_ptr_to_adr(ptr_stackroot_2)) assert self.gc.pinned_objects_in_nursery == 3 - self.gc.minor_collection() + self.gc.collect() # now the one not on the stack should be gone. assert self.gc.pinned_objects_in_nursery == 2 assert ptr_stackroot_1.someInt == 100 @@ -466,7 +475,7 @@ # there were some bugs regarding the 'arena_reset()' calls at # the end of the minor collection. This test brought them to light. self.fill_nursery_with_pinned_objects() - self.gc.minor_collection() + self.gc.collect() def test_pinning_limit(self): for instance_nr in xrange(self.gc.max_number_of_pinned_objects): From noreply at buildbot.pypy.org Sat Jul 26 16:47:50 2014 From: noreply at buildbot.pypy.org (groggi) Date: Sat, 26 Jul 2014 16:47:50 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: additional test to cover more cases of referencing a pinned object Message-ID: <20140726144750.163361C050D@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72524:513744aa7a90 Date: 2014-07-13 13:05 +0200 http://bitbucket.org/pypy/pypy/changeset/513744aa7a90/ Log: additional test to cover more cases of referencing a pinned object diff --git a/rpython/memory/gc/test/test_object_pinning.py b/rpython/memory/gc/test/test_object_pinning.py --- a/rpython/memory/gc/test/test_object_pinning.py +++ b/rpython/memory/gc/test/test_object_pinning.py @@ -149,6 +149,29 @@ self.gc.collect() assert self.stackroots[0].next.someInt == 100 + def test_old_and_stackroots_point_to_pinned(self): + # In this test case we point to a pinned object from an old object + # *and* from the stackroots + old_ptr = self.malloc(S) + old_ptr.someInt = 999 + self.stackroots.append(old_ptr) + self.gc.collect() # old_ptr is now old + old_ptr = self.stackroots[0] + assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(old_ptr)) + + pinned_ptr = self.malloc(S) + pinned_ptr.someInt = 111 + assert self.gc.pin(llmemory.cast_ptr_to_adr(pinned_ptr)) + + self.stackroots.append(pinned_ptr) + self.write(old_ptr, 'next', pinned_ptr) + + self.gc.collect() + # done with preparation. do some basic checks + assert self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(pinned_ptr)) + assert pinned_ptr.someInt == 111 + assert self.stackroots[0].next == pinned_ptr + def test_pin_old(self): ptr = self.malloc(S) ptr.someInt = 100 From noreply at buildbot.pypy.org Sat Jul 26 16:47:51 2014 From: noreply at buildbot.pypy.org (groggi) Date: Sat, 26 Jul 2014 16:47:51 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: generalize previously added test and use it to get two tests Message-ID: <20140726144751.280BB1C050D@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72525:4aa9c7ae56dd Date: 2014-07-13 13:13 +0200 http://bitbucket.org/pypy/pypy/changeset/4aa9c7ae56dd/ Log: generalize previously added test and use it to get two tests previouse test added in 513744aa7a905300b7389cd1c3724c0150762ee4 diff --git a/rpython/memory/gc/test/test_object_pinning.py b/rpython/memory/gc/test/test_object_pinning.py --- a/rpython/memory/gc/test/test_object_pinning.py +++ b/rpython/memory/gc/test/test_object_pinning.py @@ -149,22 +149,25 @@ self.gc.collect() assert self.stackroots[0].next.someInt == 100 - def test_old_and_stackroots_point_to_pinned(self): + def not_pinned_and_stackroots_point_to_pinned(self, make_old): # In this test case we point to a pinned object from an old object # *and* from the stackroots - old_ptr = self.malloc(S) - old_ptr.someInt = 999 - self.stackroots.append(old_ptr) - self.gc.collect() # old_ptr is now old - old_ptr = self.stackroots[0] - assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(old_ptr)) + obj_ptr = self.malloc(S) + obj_ptr.someInt = 999 + self.stackroots.append(obj_ptr) + if make_old: + self.gc.collect() # old_ptr is now old + obj_ptr = self.stackroots[0] + assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(obj_ptr)) + else: + assert self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(obj_ptr)) pinned_ptr = self.malloc(S) pinned_ptr.someInt = 111 assert self.gc.pin(llmemory.cast_ptr_to_adr(pinned_ptr)) self.stackroots.append(pinned_ptr) - self.write(old_ptr, 'next', pinned_ptr) + self.write(obj_ptr, 'next', pinned_ptr) self.gc.collect() # done with preparation. do some basic checks @@ -172,6 +175,12 @@ assert pinned_ptr.someInt == 111 assert self.stackroots[0].next == pinned_ptr + def test_old_and_stackroots_point_to_pinned(self): + self.not_pinned_and_stackroots_point_to_pinned(make_old=True) + + def test_young_and_stackroots_point_to_pinned(self): + self.not_pinned_and_stackroots_point_to_pinned(make_old=False) + def test_pin_old(self): ptr = self.malloc(S) ptr.someInt = 100 From noreply at buildbot.pypy.org Sat Jul 26 16:47:52 2014 From: noreply at buildbot.pypy.org (groggi) Date: Sat, 26 Jul 2014 16:47:52 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: unnecessary comment removed Message-ID: <20140726144752.9A61A1C050D@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72526:5868377b4b39 Date: 2014-07-13 13:48 +0200 http://bitbucket.org/pypy/pypy/changeset/5868377b4b39/ Log: unnecessary comment removed diff --git a/rpython/memory/gc/test/test_object_pinning.py b/rpython/memory/gc/test/test_object_pinning.py --- a/rpython/memory/gc/test/test_object_pinning.py +++ b/rpython/memory/gc/test/test_object_pinning.py @@ -156,7 +156,7 @@ obj_ptr.someInt = 999 self.stackroots.append(obj_ptr) if make_old: - self.gc.collect() # old_ptr is now old + self.gc.collect() obj_ptr = self.stackroots[0] assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(obj_ptr)) else: From noreply at buildbot.pypy.org Sat Jul 26 16:47:53 2014 From: noreply at buildbot.pypy.org (groggi) Date: Sat, 26 Jul 2014 16:47:53 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: Backout changeset 754f9578ee1aee71b29f8a4ea6b23a60f89be3c3 Message-ID: <20140726144753.B45231C050D@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72527:a4e98d2f48ff Date: 2014-07-13 14:23 +0200 http://bitbucket.org/pypy/pypy/changeset/a4e98d2f48ff/ Log: Backout changeset 754f9578ee1aee71b29f8a4ea6b23a60f89be3c3 diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -380,8 +380,6 @@ # Counter tracking how many pinned objects currently reside inside # the nursery. self.pinned_objects_in_nursery = 0 - # TTT XXX - self.pinned_objects_keep_alive = self.AddressStack() # # Allocate a nursery. In case of auto_nursery_size, start by # allocating a very small nursery, enough to do things like look @@ -1603,15 +1601,6 @@ # them or make them old. if self.young_rawmalloced_objects: self.free_young_rawmalloced_objects() - - # - # In case we have to keep some pinned objects alive, add them - # to 'surviving_pinned_objects'. Such a case comes up if an old - # object references a pinned young one (pinned object inside - # the nursery). See '_trace_drag_out' for more details. - if self.pinned_objects_keep_alive.non_empty(): - self.pinned_objects_keep_alive.foreach( - self._populate_to_surviving_pinned_objects, None) # # All live nursery objects are out of the nursery or pinned inside # the nursery. Create nursery barriers to protect the pinned object, @@ -1631,7 +1620,6 @@ # clear the arena between the last pinned object (or arena start) # and the pinned object pinned_obj_size = llarena.getfakearenaaddress(cur) - prev - debug_print("before A") llarena.arena_reset(prev, pinned_obj_size, 2) # # clean up object's flags @@ -1646,7 +1634,6 @@ (size_gc_header + self.get_size(obj)) # # reset everything after the last pinned object till the end of the arena - debug_print("before B") llarena.arena_reset(prev, self.nursery_real_top - prev, 0) # # We assume that there are only a few pinned objects. Therefore, if there @@ -1657,7 +1644,6 @@ if prev - self.nursery >= self.nursery_cleanup: nursery_barriers.append(prev) else: - debug_print("before C") llarena.arena_reset(prev, self.nursery_cleanup, 2) nursery_barriers.append(prev + self.nursery_cleanup) # @@ -1679,12 +1665,6 @@ # debug_stop("gc-minor") - def _populate_to_surviving_pinned_objects(self, obj, ignored): - self.surviving_pinned_objects.append(obj) - # we have to update the counter each time, because it was set to 0 - # at the start of the *minor* collection. The 'obj' survives - # *major* collections and therefore also multiple minor collections. - self.pinned_objects_in_nursery += 1 def collect_roots_in_nursery(self): # we don't need to trace prebuilt GcStructs during a minor collect: @@ -1696,7 +1676,6 @@ callback = IncrementalMiniMarkGC._trace_drag_out1_marking_phase else: callback = IncrementalMiniMarkGC._trace_drag_out1 - # self.root_walker.walk_roots( callback, # stack roots callback, # static in prebuilt non-gc @@ -1793,7 +1772,7 @@ """obj must not be in the nursery. This copies all the young objects it references out of the nursery. """ - self.trace(obj, self._trace_drag_out, obj) + self.trace(obj, self._trace_drag_out, None) def trace_and_drag_out_of_nursery_partial(self, obj, start, stop): """Like trace_and_drag_out_of_nursery(), but limited to the array @@ -1822,18 +1801,13 @@ if not self.header(obj).tid & GCFLAG_VISITED: self.more_objects_to_trace.append(obj) - def _trace_drag_out(self, root, parent): - # 'parent' is only set if we visit a pinned objects that is referenced - # by an other object. This is used to handle pinned object specially in - # such a case. + def _trace_drag_out(self, root, ignored): obj = root.address[0] #print '_trace_drag_out(%x: %r)' % (hash(obj.ptr._obj), obj) - debug_print('_trace_drag_out(%x: %r)' % (hash(obj.ptr._obj), obj)) # # If 'obj' is not in the nursery, nothing to change -- expect # that we must set GCFLAG_VISITED_RMY on young raw-malloced objects. if not self.is_in_nursery(obj): - debug_print("\tnot in nursery") # cache usage trade-off: I think that it is a better idea to # check if 'obj' is in young_rawmalloced_objects with an access # to this (small) dictionary, rather than risk a lot of cache @@ -1844,7 +1818,6 @@ self._visit_young_rawmalloced_object(obj) return # - debug_print("\tin nursery") size_gc_header = self.gcheaderbuilder.size_gc_header if self.header(obj).tid & (GCFLAG_HAS_SHADOW | GCFLAG_PINNED) == 0: # @@ -1868,29 +1841,14 @@ if hdr.tid & GCFLAG_VISITED: # already visited and keeping track of the object return - # - if parent: - # pinned object is referenced by an other object. - # We must keep the pinned object alive between - # major collections for the case that an old - # object references the young pinned object as - # the old one is only visited every major collection. - debug_print("-> added to keep alive:", obj) - debug_print("-> ... because of parent:", parent) - self.pinned_objects_keep_alive.append(obj - size_gc_header) - self.write_barrier(parent) - else: - debug_print("-> usual pinned object") - self.surviving_pinned_objects.append( - llarena.getfakearenaaddress(obj - size_gc_header)) - self.pinned_objects_in_nursery += 1 - # hdr.tid |= GCFLAG_VISITED - debug_print("\tstate:", self.gc_state) # XXX add additional checks for unsupported pinned objects (groggi) # XXX implement unsupported object types with pinning ll_assert(not self.header(obj).tid & GCFLAG_HAS_CARDS, "pinned object with GCFLAG_HAS_CARDS not supported") + self.surviving_pinned_objects.append( + llarena.getfakearenaaddress(obj - size_gc_header)) + self.pinned_objects_in_nursery += 1 return else: # First visit to an object that has already a shadow. @@ -2059,8 +2017,6 @@ self.more_objects_to_trace = self.AddressStack() #END SCANNING elif self.gc_state == STATE_MARKING: - self.pinned_objects_keep_alive.delete() - self.pinned_objects_keep_alive = self.AddressStack() debug_print("number of objects to mark", self.objects_to_trace.length(), "plus", @@ -2188,8 +2144,6 @@ else: pass #XXX which exception to raise here. Should be unreachable. - debug_print("BLUB:", self.pinned_objects_keep_alive.length()) - debug_print("stopping, now in gc state: ", GC_STATES[self.gc_state]) debug_stop("gc-collect-step") @@ -2256,7 +2210,7 @@ # # Add the roots from the other sources. self.root_walker.walk_roots( - IncrementalMiniMarkGC._collect_ref_stk2, # stack roots + IncrementalMiniMarkGC._collect_ref_stk, # stack roots IncrementalMiniMarkGC._collect_ref_stk, # static in prebuilt non-gc structures None) # we don't need the static in all prebuilt gc objects # @@ -2274,17 +2228,6 @@ def _collect_obj(obj, objects_to_trace): objects_to_trace.append(obj) - def _collect_ref_stk2(self, root): - obj = root.address[0] - llop.debug_nonnull_pointer(lltype.Void, obj) - hdr = self.header(obj) - if hdr.tid & GCFLAG_PINNED: - # XXX think really hard about this (groggi): - # is a pinned object found on stack already - # being traced? - return - self.objects_to_trace.append(obj) - def _collect_ref_stk(self, root): obj = root.address[0] llop.debug_nonnull_pointer(lltype.Void, obj) @@ -2308,7 +2251,6 @@ return size_to_track def visit(self, obj): - debug_print("visit:", obj) # # 'obj' is a live object. Check GCFLAG_VISITED to know if we # have already seen it before. @@ -2319,15 +2261,9 @@ # flag set, then the object should be in 'prebuilt_root_objects', # and the GCFLAG_VISITED will be reset at the end of the # collection. - # - # XXX pinned object case doc (groggi) hdr = self.header(obj) - if hdr.tid & GCFLAG_PINNED: - if self.gc_state == STATE_MARKING: - debug_print("STATE MARKING AND PINNED, ADD") - self.pinned_objects_keep_alive.append(obj - self.gcheaderbuilder.size_gc_header) - return 0 - elif hdr.tid & (GCFLAG_VISITED | GCFLAG_NO_HEAP_PTRS): + if hdr.tid & (GCFLAG_VISITED | GCFLAG_NO_HEAP_PTRS | GCFLAG_PINNED): + # XXX ^^^ update doc in any way because of GCFLAG_PINNED addition? (groggi) return 0 # # It's the first time. We set the flag VISITED. The trick is From noreply at buildbot.pypy.org Sat Jul 26 16:47:54 2014 From: noreply at buildbot.pypy.org (groggi) Date: Sat, 26 Jul 2014 16:47:54 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: revert 754f9578ee1aee71b29f8a4ea6b23a60f89be3c3 Message-ID: <20140726144754.CAD501C050D@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72528:cd9ec650486f Date: 2014-07-13 14:25 +0200 http://bitbucket.org/pypy/pypy/changeset/cd9ec650486f/ Log: revert 754f9578ee1aee71b29f8a4ea6b23a60f89be3c3 time to think first and implement afterwards diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -380,8 +380,6 @@ # Counter tracking how many pinned objects currently reside inside # the nursery. self.pinned_objects_in_nursery = 0 - # TTT XXX - self.pinned_objects_keep_alive = self.AddressStack() # # Allocate a nursery. In case of auto_nursery_size, start by # allocating a very small nursery, enough to do things like look @@ -1603,15 +1601,6 @@ # them or make them old. if self.young_rawmalloced_objects: self.free_young_rawmalloced_objects() - - # - # In case we have to keep some pinned objects alive, add them - # to 'surviving_pinned_objects'. Such a case comes up if an old - # object references a pinned young one (pinned object inside - # the nursery). See '_trace_drag_out' for more details. - if self.pinned_objects_keep_alive.non_empty(): - self.pinned_objects_keep_alive.foreach( - self._populate_to_surviving_pinned_objects, None) # # All live nursery objects are out of the nursery or pinned inside # the nursery. Create nursery barriers to protect the pinned object, @@ -1631,7 +1620,6 @@ # clear the arena between the last pinned object (or arena start) # and the pinned object pinned_obj_size = llarena.getfakearenaaddress(cur) - prev - debug_print("before A") llarena.arena_reset(prev, pinned_obj_size, 2) # # clean up object's flags @@ -1646,7 +1634,6 @@ (size_gc_header + self.get_size(obj)) # # reset everything after the last pinned object till the end of the arena - debug_print("before B") llarena.arena_reset(prev, self.nursery_real_top - prev, 0) # # We assume that there are only a few pinned objects. Therefore, if there @@ -1657,7 +1644,6 @@ if prev - self.nursery >= self.nursery_cleanup: nursery_barriers.append(prev) else: - debug_print("before C") llarena.arena_reset(prev, self.nursery_cleanup, 2) nursery_barriers.append(prev + self.nursery_cleanup) # @@ -1679,12 +1665,6 @@ # debug_stop("gc-minor") - def _populate_to_surviving_pinned_objects(self, obj, ignored): - self.surviving_pinned_objects.append(obj) - # we have to update the counter each time, because it was set to 0 - # at the start of the *minor* collection. The 'obj' survives - # *major* collections and therefore also multiple minor collections. - self.pinned_objects_in_nursery += 1 def collect_roots_in_nursery(self): # we don't need to trace prebuilt GcStructs during a minor collect: @@ -1696,7 +1676,6 @@ callback = IncrementalMiniMarkGC._trace_drag_out1_marking_phase else: callback = IncrementalMiniMarkGC._trace_drag_out1 - # self.root_walker.walk_roots( callback, # stack roots callback, # static in prebuilt non-gc @@ -1793,7 +1772,7 @@ """obj must not be in the nursery. This copies all the young objects it references out of the nursery. """ - self.trace(obj, self._trace_drag_out, obj) + self.trace(obj, self._trace_drag_out, None) def trace_and_drag_out_of_nursery_partial(self, obj, start, stop): """Like trace_and_drag_out_of_nursery(), but limited to the array @@ -1822,18 +1801,13 @@ if not self.header(obj).tid & GCFLAG_VISITED: self.more_objects_to_trace.append(obj) - def _trace_drag_out(self, root, parent): - # 'parent' is only set if we visit a pinned objects that is referenced - # by an other object. This is used to handle pinned object specially in - # such a case. + def _trace_drag_out(self, root, ignored): obj = root.address[0] #print '_trace_drag_out(%x: %r)' % (hash(obj.ptr._obj), obj) - debug_print('_trace_drag_out(%x: %r)' % (hash(obj.ptr._obj), obj)) # # If 'obj' is not in the nursery, nothing to change -- expect # that we must set GCFLAG_VISITED_RMY on young raw-malloced objects. if not self.is_in_nursery(obj): - debug_print("\tnot in nursery") # cache usage trade-off: I think that it is a better idea to # check if 'obj' is in young_rawmalloced_objects with an access # to this (small) dictionary, rather than risk a lot of cache @@ -1844,7 +1818,6 @@ self._visit_young_rawmalloced_object(obj) return # - debug_print("\tin nursery") size_gc_header = self.gcheaderbuilder.size_gc_header if self.header(obj).tid & (GCFLAG_HAS_SHADOW | GCFLAG_PINNED) == 0: # @@ -1868,29 +1841,14 @@ if hdr.tid & GCFLAG_VISITED: # already visited and keeping track of the object return - # - if parent: - # pinned object is referenced by an other object. - # We must keep the pinned object alive between - # major collections for the case that an old - # object references the young pinned object as - # the old one is only visited every major collection. - debug_print("-> added to keep alive:", obj) - debug_print("-> ... because of parent:", parent) - self.pinned_objects_keep_alive.append(obj - size_gc_header) - self.write_barrier(parent) - else: - debug_print("-> usual pinned object") - self.surviving_pinned_objects.append( - llarena.getfakearenaaddress(obj - size_gc_header)) - self.pinned_objects_in_nursery += 1 - # hdr.tid |= GCFLAG_VISITED - debug_print("\tstate:", self.gc_state) # XXX add additional checks for unsupported pinned objects (groggi) # XXX implement unsupported object types with pinning ll_assert(not self.header(obj).tid & GCFLAG_HAS_CARDS, "pinned object with GCFLAG_HAS_CARDS not supported") + self.surviving_pinned_objects.append( + llarena.getfakearenaaddress(obj - size_gc_header)) + self.pinned_objects_in_nursery += 1 return else: # First visit to an object that has already a shadow. @@ -2059,8 +2017,6 @@ self.more_objects_to_trace = self.AddressStack() #END SCANNING elif self.gc_state == STATE_MARKING: - self.pinned_objects_keep_alive.delete() - self.pinned_objects_keep_alive = self.AddressStack() debug_print("number of objects to mark", self.objects_to_trace.length(), "plus", @@ -2188,8 +2144,6 @@ else: pass #XXX which exception to raise here. Should be unreachable. - debug_print("BLUB:", self.pinned_objects_keep_alive.length()) - debug_print("stopping, now in gc state: ", GC_STATES[self.gc_state]) debug_stop("gc-collect-step") @@ -2256,7 +2210,7 @@ # # Add the roots from the other sources. self.root_walker.walk_roots( - IncrementalMiniMarkGC._collect_ref_stk2, # stack roots + IncrementalMiniMarkGC._collect_ref_stk, # stack roots IncrementalMiniMarkGC._collect_ref_stk, # static in prebuilt non-gc structures None) # we don't need the static in all prebuilt gc objects # @@ -2274,17 +2228,6 @@ def _collect_obj(obj, objects_to_trace): objects_to_trace.append(obj) - def _collect_ref_stk2(self, root): - obj = root.address[0] - llop.debug_nonnull_pointer(lltype.Void, obj) - hdr = self.header(obj) - if hdr.tid & GCFLAG_PINNED: - # XXX think really hard about this (groggi): - # is a pinned object found on stack already - # being traced? - return - self.objects_to_trace.append(obj) - def _collect_ref_stk(self, root): obj = root.address[0] llop.debug_nonnull_pointer(lltype.Void, obj) @@ -2308,7 +2251,6 @@ return size_to_track def visit(self, obj): - debug_print("visit:", obj) # # 'obj' is a live object. Check GCFLAG_VISITED to know if we # have already seen it before. @@ -2319,15 +2261,9 @@ # flag set, then the object should be in 'prebuilt_root_objects', # and the GCFLAG_VISITED will be reset at the end of the # collection. - # - # XXX pinned object case doc (groggi) hdr = self.header(obj) - if hdr.tid & GCFLAG_PINNED: - if self.gc_state == STATE_MARKING: - debug_print("STATE MARKING AND PINNED, ADD") - self.pinned_objects_keep_alive.append(obj - self.gcheaderbuilder.size_gc_header) - return 0 - elif hdr.tid & (GCFLAG_VISITED | GCFLAG_NO_HEAP_PTRS): + if hdr.tid & (GCFLAG_VISITED | GCFLAG_NO_HEAP_PTRS | GCFLAG_PINNED): + # XXX ^^^ update doc in any way because of GCFLAG_PINNED addition? (groggi) return 0 # # It's the first time. We set the flag VISITED. The trick is From noreply at buildbot.pypy.org Sat Jul 26 16:47:55 2014 From: noreply at buildbot.pypy.org (groggi) Date: Sat, 26 Jul 2014 16:47:55 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: generalize tests. tests pinning for minor collections and major ones now Message-ID: <20140726144755.DD2201C050D@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72529:daf002afa713 Date: 2014-07-13 14:52 +0200 http://bitbucket.org/pypy/pypy/changeset/daf002afa713/ Log: generalize tests. tests pinning for minor collections and major ones now diff --git a/rpython/memory/gc/test/test_object_pinning.py b/rpython/memory/gc/test/test_object_pinning.py --- a/rpython/memory/gc/test/test_object_pinning.py +++ b/rpython/memory/gc/test/test_object_pinning.py @@ -46,7 +46,7 @@ class TestIncminimark(PinningGCTest): from rpython.memory.gc.incminimark import IncrementalMiniMarkGC as GCClass - def test_simple_pin_stack(self): + def simple_pin_stack(self, collect_func): # create object, pin it and point from stackroots to it ptr = self.malloc(S) ptr.someInt = 100 @@ -55,12 +55,18 @@ adr = llmemory.cast_ptr_to_adr(ptr) assert self.gc.pin(adr) - self.gc.collect() + collect_func() assert self.gc.is_in_nursery(adr) assert ptr.someInt == 100 - def test_simple_pin_unpin_stack(self): + def test_simple_pin_stack_full_collect(self): + self.simple_pin_stack(self.gc.collect) + + def test_simple_pin_stack_minor_collect(self): + self.simple_pin_stack(self.gc.minor_collection) + + def simple_pin_unpin_stack(self, collect_func): ptr = self.malloc(S) ptr.someInt = 100 @@ -69,14 +75,14 @@ adr = llmemory.cast_ptr_to_adr(ptr) assert self.gc.pin(adr) - self.gc.collect() + collect_func() assert self.gc.is_in_nursery(adr) assert ptr.someInt == 100 # unpin and check if object is gone from nursery self.gc.unpin(adr) - self.gc.collect() + collect_func() py.test.raises(RuntimeError, 'ptr.someInt') # check if we object is still accessible @@ -84,6 +90,12 @@ assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(ptr_old)) assert ptr_old.someInt == 100 + def test_simple_pin_unpin_stack_full_collect(self): + self.simple_pin_unpin_stack(self.gc.collect) + + def test_simple_pin_unpin_stack_minor_collect(self): + self.simple_pin_unpin_stack(self.gc.minor_collection) + def test_pin_referenced_from_stackroot_young(self): # # create two objects and reference the pinned one @@ -150,7 +162,7 @@ assert self.stackroots[0].next.someInt == 100 def not_pinned_and_stackroots_point_to_pinned(self, make_old): - # In this test case we point to a pinned object from an old object + # In this test case we point to a pinned object from an (old) object # *and* from the stackroots obj_ptr = self.malloc(S) obj_ptr.someInt = 999 From noreply at buildbot.pypy.org Sat Jul 26 16:47:57 2014 From: noreply at buildbot.pypy.org (groggi) Date: Sat, 26 Jul 2014 16:47:57 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: shadow tests now tested with minor and full major collections. Message-ID: <20140726144757.0AFF11C050D@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72530:2b62d7537a85 Date: 2014-07-26 15:46 +0200 http://bitbucket.org/pypy/pypy/changeset/2b62d7537a85/ Log: shadow tests now tested with minor and full major collections. diff --git a/rpython/memory/gc/test/test_object_pinning.py b/rpython/memory/gc/test/test_object_pinning.py --- a/rpython/memory/gc/test/test_object_pinning.py +++ b/rpython/memory/gc/test/test_object_pinning.py @@ -224,36 +224,48 @@ assert first_ptr.someInt == 101 assert second_ptr.someInt == 102 - def test_pin_shadow_1(self): + def pin_shadow_1(self, collect_func): ptr = self.malloc(S) adr = llmemory.cast_ptr_to_adr(ptr) self.stackroots.append(ptr) ptr.someInt = 100 assert self.gc.pin(adr) self.gc.id(ptr) # allocate shadow - self.gc.collect() + collect_func() assert self.gc.is_in_nursery(adr) assert ptr.someInt == 100 self.gc.unpin(adr) - self.gc.collect() # move to shadow + collect_func() # move to shadow adr = llmemory.cast_ptr_to_adr(self.stackroots[0]) assert not self.gc.is_in_nursery(adr) - def test_pin_shadow_2(self): + def test_pin_shadow_1_minor(self): + self.pin_shadow_1(self.gc.minor_collection) + + def test_pin_shadow_1_full(self): + self.pin_shadow_1(self.gc.collect) + + def pin_shadow_2(self, collect_func): ptr = self.malloc(S) adr = llmemory.cast_ptr_to_adr(ptr) self.stackroots.append(ptr) ptr.someInt = 100 assert self.gc.pin(adr) self.gc.identityhash(ptr) # allocate shadow - self.gc.collect() + collect_func() assert self.gc.is_in_nursery(adr) assert ptr.someInt == 100 self.gc.unpin(adr) - self.gc.collect() # move to shadow + collect_func() # move to shadow adr = llmemory.cast_ptr_to_adr(self.stackroots[0]) assert not self.gc.is_in_nursery(adr) + def test_pin_shadow_2_minor(self): + self.pin_shadow_2(self.gc.minor_collection) + + def test_pin_shadow_2_full(self): + self.pin_shadow_2(self.gc.collect) + def test_pin_nursery_top_scenario1(self): ptr1 = self.malloc(S) adr1 = llmemory.cast_ptr_to_adr(ptr1) From noreply at buildbot.pypy.org Sat Jul 26 16:47:58 2014 From: noreply at buildbot.pypy.org (groggi) Date: Sat, 26 Jul 2014 16:47:58 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: add test to make sure not referenced pinned objects are collected Message-ID: <20140726144758.1E4FB1C050D@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72531:1f3c0f2801cb Date: 2014-07-26 15:48 +0200 http://bitbucket.org/pypy/pypy/changeset/1f3c0f2801cb/ Log: add test to make sure not referenced pinned objects are collected Basically we have an old object pointing to a young pinned one. The moment the old object is not used anymore (not referenced from stack) the GC should not only collect the old object but also the still pinned one. diff --git a/rpython/memory/gc/test/test_object_pinning.py b/rpython/memory/gc/test/test_object_pinning.py --- a/rpython/memory/gc/test/test_object_pinning.py +++ b/rpython/memory/gc/test/test_object_pinning.py @@ -96,6 +96,29 @@ def test_simple_pin_unpin_stack_minor_collect(self): self.simple_pin_unpin_stack(self.gc.minor_collection) + def test_pinned_obj_collected_after_old_object_collected(self): + root_ptr = self.malloc(S) + root_ptr.someInt = 999 + self.stackroots.append(root_ptr) + self.gc.collect() + + root_ptr = self.stackroots[0] + next_ptr = self.malloc(S) + next_ptr.someInt = 111 + assert self.gc.pin(llmemory.cast_ptr_to_adr(next_ptr)) + self.write(root_ptr, 'next', next_ptr) + self.gc.collect() + # check still alive + assert self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(root_ptr.next)) + self.stackroots.remove(root_ptr) + self.gc.collect() + # root_ptr was collected and therefore also the pinned object should + # be gone + try: + next_ptr.someInt = 101 + except RuntimeError as ex: + assert "freed" in str(ex) + def test_pin_referenced_from_stackroot_young(self): # # create two objects and reference the pinned one From noreply at buildbot.pypy.org Sat Jul 26 16:47:59 2014 From: noreply at buildbot.pypy.org (groggi) Date: Sat, 26 Jul 2014 16:47:59 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: add XXX todo item reminding to add more object pinning related tests Message-ID: <20140726144759.2C6521C050D@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72532:e02894d635ae Date: 2014-07-26 15:49 +0200 http://bitbucket.org/pypy/pypy/changeset/e02894d635ae/ Log: add XXX todo item reminding to add more object pinning related tests diff --git a/rpython/memory/gc/test/test_object_pinning.py b/rpython/memory/gc/test/test_object_pinning.py --- a/rpython/memory/gc/test/test_object_pinning.py +++ b/rpython/memory/gc/test/test_object_pinning.py @@ -119,6 +119,10 @@ except RuntimeError as ex: assert "freed" in str(ex) + # XXX more tests like the one above. Make list of all possible cases and + # write tests for each one. Also: minor/full major collection tests maybe + # needed + def test_pin_referenced_from_stackroot_young(self): # # create two objects and reference the pinned one From noreply at buildbot.pypy.org Sat Jul 26 16:48:00 2014 From: noreply at buildbot.pypy.org (groggi) Date: Sat, 26 Jul 2014 16:48:00 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: (wip) add handling for pinned objects referenced by old ones. Message-ID: <20140726144800.445B41C050D@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72533:cb701ab5f828 Date: 2014-07-26 16:46 +0200 http://bitbucket.org/pypy/pypy/changeset/cb701ab5f828/ Log: (wip) add handling for pinned objects referenced by old ones. The added AddressStack keeps track of old objects which point to young pinned objects. This way if a pinned object is only referenced from an old object, it isn't collected as before. This is a work in progress. For now test_object_pinning.py tests pass. However, there is still a lot to be tested and double checked (see XXX). In addition some refactoring & additional comments are in order after we make sure it works as expected. For now this changes stop pypy from being translated. diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -381,6 +381,14 @@ # the nursery. self.pinned_objects_in_nursery = 0 # + # Keeps track of objects pointing to pinned objects. These objects + # must be revisited every minor collection. Without this list + # any old object inside this list would only be visited in case a + # write barrier was triggered, which would result in not visiting + # the young pinned object and would therefore result in removing + # the pinned object. + self.old_objects_pointing_to_pinned = self.AddressStack() + # # Allocate a nursery. In case of auto_nursery_size, start by # allocating a very small nursery, enough to do things like look # up the env var, which requires the GC; and then really @@ -1502,6 +1510,10 @@ obj = obj + self.gcheaderbuilder.size_gc_header shadow = self.nursery_objects_shadows.get(obj) if shadow != NULL: + # visit shadow to keep it alive + # XXX seems like it is save to set GCFLAG_VISITED, however + # should be double checked + self.header(shadow).tid |= GCFLAG_VISITED new_shadow_object_dict.setitem(obj, shadow) # ---------- @@ -1554,6 +1566,13 @@ self.nursery_surviving_size = 0 self.collect_roots_in_nursery() # + # visit all objects that are known for pointing to pinned + # objects. This way we populate 'surviving_pinned_objects' + # with pinned object that are (only) visible from an old + # object. + self.old_objects_pointing_to_pinned.foreach( + self._visit_old_objects_pointing_to_pinned, None) + # while True: # If we are using card marking, do a partial trace of the arrays # that are flagged with GCFLAG_CARDS_SET. @@ -1665,6 +1684,8 @@ # debug_stop("gc-minor") + def _visit_old_objects_pointing_to_pinned(self, obj, ignore): + self.trace(obj, self._trace_drag_out, None) def collect_roots_in_nursery(self): # we don't need to trace prebuilt GcStructs during a minor collect: @@ -1772,7 +1793,7 @@ """obj must not be in the nursery. This copies all the young objects it references out of the nursery. """ - self.trace(obj, self._trace_drag_out, None) + self.trace(obj, self._trace_drag_out, obj) def trace_and_drag_out_of_nursery_partial(self, obj, start, stop): """Like trace_and_drag_out_of_nursery(), but limited to the array @@ -1801,7 +1822,7 @@ if not self.header(obj).tid & GCFLAG_VISITED: self.more_objects_to_trace.append(obj) - def _trace_drag_out(self, root, ignored): + def _trace_drag_out(self, root, parent): obj = root.address[0] #print '_trace_drag_out(%x: %r)' % (hash(obj.ptr._obj), obj) # @@ -1842,8 +1863,11 @@ # already visited and keeping track of the object return hdr.tid |= GCFLAG_VISITED + # + if parent: + self.old_objects_pointing_to_pinned.append(parent) + # # XXX add additional checks for unsupported pinned objects (groggi) - # XXX implement unsupported object types with pinning ll_assert(not self.header(obj).tid & GCFLAG_HAS_CARDS, "pinned object with GCFLAG_HAS_CARDS not supported") self.surviving_pinned_objects.append( @@ -2073,6 +2097,15 @@ #END MARKING elif self.gc_state == STATE_SWEEPING: # + # get rid of objects pointing to pinned objects that were not + # visited + new_old_objects_pointing_to_pinned = self.AddressStack() + self.old_objects_pointing_to_pinned.foreach( + self._sweep_old_objects_pointing_to_pinned, + new_old_objects_pointing_to_pinned) + self.old_objects_pointing_to_pinned.delete() + self.old_objects_pointing_to_pinned = new_old_objects_pointing_to_pinned + # if self.raw_malloc_might_sweep.non_empty(): # Walk all rawmalloced objects and free the ones that don't # have the GCFLAG_VISITED flag. Visit at most 'limit' objects. @@ -2147,6 +2180,10 @@ debug_print("stopping, now in gc state: ", GC_STATES[self.gc_state]) debug_stop("gc-collect-step") + def _sweep_old_objects_pointing_to_pinned(self, obj, new_list): + if self.header(obj).tid & GCFLAG_VISITED: + new_list.append(obj) + def _free_if_unvisited(self, hdr): size_gc_header = self.gcheaderbuilder.size_gc_header obj = hdr + size_gc_header From noreply at buildbot.pypy.org Sat Jul 26 17:39:58 2014 From: noreply at buildbot.pypy.org (kvas) Date: Sat, 26 Jul 2014 17:39:58 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Fixed test_construct_singletons for Ellipsis and NotImplemented as well, so now ellipsis and NotImplementedType can be constructed. Message-ID: <20140726153958.633CF1C03AC@cobra.cs.uni-duesseldorf.de> Author: Vasily Kuznetsov Branch: py3.3 Changeset: r72534:1c542d770ddb Date: 2014-07-26 16:25 +0200 http://bitbucket.org/pypy/pypy/changeset/1c542d770ddb/ Log: Fixed test_construct_singletons for Ellipsis and NotImplemented as well, so now ellipsis and NotImplementedType can be constructed. diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -950,11 +950,13 @@ Cell.typedef.acceptable_as_base_class = False Ellipsis.typedef = TypeDef("Ellipsis", + __new__ = interp2app(lambda space, w_type: space.w_Ellipsis), __repr__ = interp2app(Ellipsis.descr__repr__), ) Ellipsis.typedef.acceptable_as_base_class = False NotImplemented.typedef = TypeDef("NotImplemented", + __new__ = interp2app(lambda space, w_type: space.w_NotImplemented), __repr__ = interp2app(NotImplemented.descr__repr__), ) NotImplemented.typedef.acceptable_as_base_class = False diff --git a/pypy/module/__builtin__/test/test_construct_singletons.py b/pypy/module/__builtin__/test/test_construct_singletons.py --- a/pypy/module/__builtin__/test/test_construct_singletons.py +++ b/pypy/module/__builtin__/test/test_construct_singletons.py @@ -1,7 +1,8 @@ class AppTestConstructSingletons: def test_construct_singletons(self): - none_type = type(None) - assert none_type() is None - raises(TypeError, none_type, 1, 2) - raises(TypeError, none_type, a=1, b=2) + for const in None, Ellipsis, NotImplemented: + const_type = type(const) + assert const_type() is const + raises(TypeError, const_type, 1, 2) + raises(TypeError, const_type, a=1, b=2) From noreply at buildbot.pypy.org Sat Jul 26 17:39:59 2014 From: noreply at buildbot.pypy.org (kvas) Date: Sat, 26 Jul 2014 17:39:59 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Pickling of iterators produced by zip(seq, ...). Message-ID: <20140726153959.8CAE71C03AC@cobra.cs.uni-duesseldorf.de> Author: Vasily Kuznetsov Branch: py3.3 Changeset: r72535:1f1711d14798 Date: 2014-07-26 16:38 +0200 http://bitbucket.org/pypy/pypy/changeset/1f1711d14798/ Log: Pickling of iterators produced by zip(seq, ...). diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py --- a/pypy/module/__builtin__/functional.py +++ b/pypy/module/__builtin__/functional.py @@ -753,6 +753,12 @@ raise OperationError(self.space.w_StopIteration, self.space.w_None) return W_Map.next_w(self) + def descr_reduce(self, space): + w_zip = space.getattr(space.getbuiltinmodule('builtins'), + space.wrap('zip')) + return space.newtuple([w_zip, space.newtuple(self.iterators_w)]) + + def W_Zip___new__(space, w_subtype, args_w): r = space.allocate_instance(W_Zip, w_subtype) r.__init__(space, None, args_w) @@ -763,6 +769,7 @@ __new__ = interp2app(W_Zip___new__), __iter__ = interp2app(W_Zip.iter_w), __next__ = interp2app(W_Zip.next_w), + __reduce__ = interp2app(W_Zip.descr_reduce), __doc__ = """\ Return a zip object whose .__next__() method returns a tuple where the i-th element comes from the i-th iterable argument. The .__next__() diff --git a/pypy/module/__builtin__/test/test_zip_pickle.py b/pypy/module/__builtin__/test/test_zip_pickle.py new file mode 100644 --- /dev/null +++ b/pypy/module/__builtin__/test/test_zip_pickle.py @@ -0,0 +1,14 @@ +class AppTestZipPickle: + + def test_zip_pickle(self): + import pickle + + def pickle_unpickle(obj): + d = pickle.dumps(obj) + return pickle.loads(d) + + z1 = zip([1, 2, 3], [4, 5, 6]) + z1_ = pickle_unpickle(z1) + l1, l1_ = list(z1), list(z1_) + + assert l1 == l1_ From noreply at buildbot.pypy.org Sat Jul 26 17:40:00 2014 From: noreply at buildbot.pypy.org (kvas) Date: Sat, 26 Jul 2014 17:40:00 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Additional checks related to pickling of zip iterators. Message-ID: <20140726154000.B14781C03AC@cobra.cs.uni-duesseldorf.de> Author: Vasily Kuznetsov Branch: py3.3 Changeset: r72536:d26183fe9a2b Date: 2014-07-26 16:43 +0200 http://bitbucket.org/pypy/pypy/changeset/d26183fe9a2b/ Log: Additional checks related to pickling of zip iterators. diff --git a/pypy/module/__builtin__/test/test_zip_pickle.py b/pypy/module/__builtin__/test/test_zip_pickle.py --- a/pypy/module/__builtin__/test/test_zip_pickle.py +++ b/pypy/module/__builtin__/test/test_zip_pickle.py @@ -3,12 +3,16 @@ def test_zip_pickle(self): import pickle - def pickle_unpickle(obj): - d = pickle.dumps(obj) - return pickle.loads(d) + def check_pickle_unpickle(itr): + d = pickle.dumps(itr) + itr_ = pickle.loads(d) + lst, lst_ = list(itr), list(itr_) + assert lst == lst_ - z1 = zip([1, 2, 3], [4, 5, 6]) - z1_ = pickle_unpickle(z1) - l1, l1_ = list(z1), list(z1_) + check_pickle_unpickle(zip([1, 2, 3], [4, 5, 6])) + check_pickle_unpickle(zip()) - assert l1 == l1_ + a = iter("abc") + b = iter("cdef") + next(b) + check_pickle_unpickle(zip(a, b)) From noreply at buildbot.pypy.org Sat Jul 26 18:21:08 2014 From: noreply at buildbot.pypy.org (kvas) Date: Sat, 26 Jul 2014 18:21:08 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Refuse to sum bytes or bytearrays like cpython3 sum. Message-ID: <20140726162108.D6FA51C06AE@cobra.cs.uni-duesseldorf.de> Author: Vasily Kuznetsov Branch: py3.3 Changeset: r72537:c4be82e0fa27 Date: 2014-07-26 18:08 +0200 http://bitbucket.org/pypy/pypy/changeset/c4be82e0fa27/ Log: Refuse to sum bytes or bytearrays like cpython3 sum. diff --git a/pypy/module/__builtin__/app_functional.py b/pypy/module/__builtin__/app_functional.py --- a/pypy/module/__builtin__/app_functional.py +++ b/pypy/module/__builtin__/app_functional.py @@ -37,6 +37,10 @@ empty, returns start.""" if isinstance(start, str): raise TypeError("sum() can't sum strings [use ''.join(seq) instead]") + if isinstance(start, bytes): + raise TypeError("sum() can't sum bytes [use b''.join(seq) instead]") + if isinstance(start, bytearray): + raise TypeError("sum() can't sum bytearray [use b''.join(seq) instead]") last = start for x in sequence: # Very intentionally *not* +=, that would have different semantics if diff --git a/pypy/module/__builtin__/test/test_sum.py b/pypy/module/__builtin__/test/test_sum.py new file mode 100644 --- /dev/null +++ b/pypy/module/__builtin__/test/test_sum.py @@ -0,0 +1,9 @@ +class AppTestSum: + + def test_sum(self): + raises(TypeError, sum, [b'a', b'c'], b'') + raises(TypeError, sum, [bytearray(b'a'), bytearray(b'b')], + bytearray(b'')) + raises(TypeError, sum, [[1], [2], [3]]) + raises(TypeError, sum, [{2:3}]) + raises(TypeError, sum, [{2:3}]*2, {2:3}) From noreply at buildbot.pypy.org Sat Jul 26 21:43:38 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 26 Jul 2014 21:43:38 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: skip hash randomization test Message-ID: <20140726194338.B096F1C050D@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72538:a2f29529053a Date: 2014-07-26 12:27 -0700 http://bitbucket.org/pypy/pypy/changeset/a2f29529053a/ Log: skip hash randomization test diff --git a/lib-python/3/test/test_cmd_line.py b/lib-python/3/test/test_cmd_line.py --- a/lib-python/3/test/test_cmd_line.py +++ b/lib-python/3/test/test_cmd_line.py @@ -341,6 +341,9 @@ def test_hash_randomization(self): # Verify that -R enables hash randomization: self.verify_valid_flag('-R') + if test.support.check_impl_detail(cpython=False): + # PyPy doesn't support hash randomization + return hashes = [] for i in range(2): code = 'print(hash("spam"))' From noreply at buildbot.pypy.org Sat Jul 26 21:43:39 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 26 Jul 2014 21:43:39 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: missing import Message-ID: <20140726194339.E051A1C050D@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72539:6a34ba3e09b7 Date: 2014-07-26 12:27 -0700 http://bitbucket.org/pypy/pypy/changeset/6a34ba3e09b7/ Log: missing import diff --git a/lib-python/3/test/pickletester.py b/lib-python/3/test/pickletester.py --- a/lib-python/3/test/pickletester.py +++ b/lib-python/3/test/pickletester.py @@ -9,7 +9,7 @@ from test.support import ( TestFailed, TESTFN, run_with_locale, no_tracing, - _2G, _4G, bigmemtest, + _2G, _4G, bigmemtest, check_impl_detail ) from pickle import bytes_types From noreply at buildbot.pypy.org Sat Jul 26 21:43:41 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 26 Jul 2014 21:43:41 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: testing of __del__ needs a gc_collect Message-ID: <20140726194341.1B9711C050D@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72540:48d61dbba1ff Date: 2014-07-26 12:30 -0700 http://bitbucket.org/pypy/pypy/changeset/48d61dbba1ff/ Log: testing of __del__ needs a gc_collect diff --git a/lib-python/3/test/test_scope.py b/lib-python/3/test/test_scope.py --- a/lib-python/3/test/test_scope.py +++ b/lib-python/3/test/test_scope.py @@ -1,7 +1,8 @@ import unittest import weakref -from test.support import check_syntax_error, cpython_only, run_unittest +from test.support import ( + check_syntax_error, cpython_only, run_unittest, gc_collect) class ScopeTests(unittest.TestCase): @@ -422,6 +423,7 @@ for i in range(100): f1() + gc_collect() self.assertEqual(Foo.count, 0) def testClassAndGlobal(self): From noreply at buildbot.pypy.org Sat Jul 26 21:43:42 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 26 Jul 2014 21:43:42 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: skip new GIL API test Message-ID: <20140726194342.49F151C050D@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72541:3224b56f5c8e Date: 2014-07-26 12:36 -0700 http://bitbucket.org/pypy/pypy/changeset/3224b56f5c8e/ Log: skip new GIL API test diff --git a/lib-python/3/test/test_sys.py b/lib-python/3/test/test_sys.py --- a/lib-python/3/test/test_sys.py +++ b/lib-python/3/test/test_sys.py @@ -169,7 +169,8 @@ sys.setcheckinterval(n) self.assertEqual(sys.getcheckinterval(), n) - @unittest.skipUnless(threading, 'Threading required for this test.') + @unittest.skipUnless(hasattr(sys, 'getswitchinterval') and threading, + 'New GIL & threading required for this test.') def test_switchinterval(self): self.assertRaises(TypeError, sys.setswitchinterval) self.assertRaises(TypeError, sys.setswitchinterval, "a") From noreply at buildbot.pypy.org Sat Jul 26 21:43:43 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 26 Jul 2014 21:43:43 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: __basic/itemsize__ are impl details Message-ID: <20140726194343.8BDF41C050D@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72542:4f1d9525b86f Date: 2014-07-26 12:40 -0700 http://bitbucket.org/pypy/pypy/changeset/4f1d9525b86f/ Log: __basic/itemsize__ are impl details diff --git a/lib-python/3/test/test_types.py b/lib-python/3/test/test_types.py --- a/lib-python/3/test/test_types.py +++ b/lib-python/3/test/test_types.py @@ -1,6 +1,6 @@ # Python test set -- part 6, built-in types -from test.support import run_unittest, run_with_locale +from test.support import run_unittest, run_with_locale, impl_detail import collections import locale import sys @@ -566,6 +566,7 @@ for code in 'xXobns': self.assertRaises(ValueError, format, 0, ',' + code) + @impl_detail def test_internal_sizes(self): self.assertGreater(object.__basicsize__, 0) self.assertGreater(tuple.__itemsize__, 0) From noreply at buildbot.pypy.org Sat Jul 26 21:43:44 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 26 Jul 2014 21:43:44 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: clarify Message-ID: <20140726194344.B17511C050D@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72543:d8277de0ae4e Date: 2014-07-26 12:40 -0700 http://bitbucket.org/pypy/pypy/changeset/d8277de0ae4e/ Log: clarify diff --git a/lib-python/3/test/test_cmd_line.py b/lib-python/3/test/test_cmd_line.py --- a/lib-python/3/test/test_cmd_line.py +++ b/lib-python/3/test/test_cmd_line.py @@ -341,7 +341,7 @@ def test_hash_randomization(self): # Verify that -R enables hash randomization: self.verify_valid_flag('-R') - if test.support.check_impl_detail(cpython=False): + if test.support.check_impl_detail(pypy=True): # PyPy doesn't support hash randomization return hashes = [] From noreply at buildbot.pypy.org Sat Jul 26 22:05:29 2014 From: noreply at buildbot.pypy.org (groggi) Date: Sat, 26 Jul 2014 22:05:29 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: incminimark can be translated now Message-ID: <20140726200529.468E91C0945@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72544:f03b71e80cc6 Date: 2014-07-26 20:11 +0200 http://bitbucket.org/pypy/pypy/changeset/f03b71e80cc6/ Log: incminimark can be translated now diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -1685,7 +1685,7 @@ debug_stop("gc-minor") def _visit_old_objects_pointing_to_pinned(self, obj, ignore): - self.trace(obj, self._trace_drag_out, None) + self.trace(obj, self._trace_drag_out, NULL) def collect_roots_in_nursery(self): # we don't need to trace prebuilt GcStructs during a minor collect: @@ -1802,14 +1802,14 @@ ll_assert(start < stop, "empty or negative range " "in trace_and_drag_out_of_nursery_partial()") #print 'trace_partial:', start, stop, '\t', obj - self.trace_partial(obj, start, stop, self._trace_drag_out, None) + self.trace_partial(obj, start, stop, self._trace_drag_out, NULL) def _trace_drag_out1(self, root): - self._trace_drag_out(root, None) + self._trace_drag_out(root, NULL) def _trace_drag_out1_marking_phase(self, root): - self._trace_drag_out(root, None) + self._trace_drag_out(root, NULL) # # We are in the MARKING state: we must also record this object # if it was young. Don't bother with old objects in general, @@ -1864,7 +1864,7 @@ return hdr.tid |= GCFLAG_VISITED # - if parent: + if parent != NULL: self.old_objects_pointing_to_pinned.append(parent) # # XXX add additional checks for unsupported pinned objects (groggi) From noreply at buildbot.pypy.org Sat Jul 26 22:05:30 2014 From: noreply at buildbot.pypy.org (groggi) Date: Sat, 26 Jul 2014 22:05:30 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: 'NULL' -> 'llmemory.NULL'. Make it consistent. Use explicit version Message-ID: <20140726200530.6C2F21C0945@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72545:4a4fffb737e5 Date: 2014-07-26 20:44 +0200 http://bitbucket.org/pypy/pypy/changeset/4a4fffb737e5/ Log: 'NULL' -> 'llmemory.NULL'. Make it consistent. Use explicit version diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -82,7 +82,6 @@ # XXX update doc string to contain object pinning (groggi) WORD = LONG_BIT // 8 -NULL = llmemory.NULL first_gcflag = 1 << (LONG_BIT//2) @@ -302,10 +301,10 @@ # it gives a lower bound on the allowed size of the nursery. self.nonlarge_max = large_object - 1 # - self.nursery = NULL - self.nursery_free = NULL - self.nursery_top = NULL - self.nursery_real_top = NULL + self.nursery = llmemory.NULL + self.nursery_free = llmemory.NULL + self.nursery_top = llmemory.NULL + self.nursery_real_top = llmemory.NULL self.debug_tiny_nursery = -1 self.debug_rotating_nurseries = lltype.nullptr(NURSARRAY) self.extra_threshold = 0 @@ -1509,7 +1508,7 @@ # dict of shadows. obj = obj + self.gcheaderbuilder.size_gc_header shadow = self.nursery_objects_shadows.get(obj) - if shadow != NULL: + if shadow != llmemory.NULL: # visit shadow to keep it alive # XXX seems like it is save to set GCFLAG_VISITED, however # should be double checked @@ -1685,7 +1684,7 @@ debug_stop("gc-minor") def _visit_old_objects_pointing_to_pinned(self, obj, ignore): - self.trace(obj, self._trace_drag_out, NULL) + self.trace(obj, self._trace_drag_out, llmemory.NULL) def collect_roots_in_nursery(self): # we don't need to trace prebuilt GcStructs during a minor collect: @@ -1802,14 +1801,14 @@ ll_assert(start < stop, "empty or negative range " "in trace_and_drag_out_of_nursery_partial()") #print 'trace_partial:', start, stop, '\t', obj - self.trace_partial(obj, start, stop, self._trace_drag_out, NULL) + self.trace_partial(obj, start, stop, self._trace_drag_out, llmemory.NULL) def _trace_drag_out1(self, root): - self._trace_drag_out(root, NULL) + self._trace_drag_out(root, llmemory.NULL) def _trace_drag_out1_marking_phase(self, root): - self._trace_drag_out(root, NULL) + self._trace_drag_out(root, llmemory.NULL) # # We are in the MARKING state: we must also record this object # if it was young. Don't bother with old objects in general, @@ -1864,7 +1863,7 @@ return hdr.tid |= GCFLAG_VISITED # - if parent != NULL: + if parent != llmemory.NULL: self.old_objects_pointing_to_pinned.append(parent) # # XXX add additional checks for unsupported pinned objects (groggi) @@ -1877,7 +1876,7 @@ else: # First visit to an object that has already a shadow. newobj = self.nursery_objects_shadows.get(obj) - ll_assert(newobj != NULL, "GCFLAG_HAS_SHADOW but no shadow found") + ll_assert(newobj != llmemory.NULL, "GCFLAG_HAS_SHADOW but no shadow found") newhdr = newobj - size_gc_header # # Remove the flag GCFLAG_HAS_SHADOW, so that it doesn't get @@ -2352,7 +2351,7 @@ # collection if self.header(obj).tid & GCFLAG_HAS_SHADOW: shadow = self.nursery_objects_shadows.get(obj) - ll_assert(shadow != NULL, + ll_assert(shadow != llmemory.NULL, "GCFLAG_HAS_SHADOW but no shadow found") else: shadow = self._allocate_shadow(obj) From noreply at buildbot.pypy.org Sat Jul 26 23:11:16 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 26 Jul 2014 23:11:16 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: reapply workarounds from py3k branch Message-ID: <20140726211116.4A7B21C046A@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72546:9b1c5ecf653b Date: 2014-07-26 13:20 -0700 http://bitbucket.org/pypy/pypy/changeset/9b1c5ecf653b/ Log: reapply workarounds from py3k branch diff --git a/lib-python/3/test/test_pydoc.py b/lib-python/3/test/test_pydoc.py --- a/lib-python/3/test/test_pydoc.py +++ b/lib-python/3/test/test_pydoc.py @@ -18,7 +18,7 @@ from collections import namedtuple from test.script_helper import assert_python_ok from test.support import ( - TESTFN, rmtree, + TESTFN, rmtree, check_impl_detail, reap_children, reap_threads, captured_output, captured_stdout, captured_stderr, unlink ) @@ -105,7 +105,23 @@ expected_text_data_docstrings = tuple('\n | ' + s if s else '' for s in expected_data_docstrings) -expected_html_pattern = """ +if check_impl_detail(pypy=True): + # pydoc_mod.__builtins__ is always a module on PyPy (but a dict on + # CPython), hence an extra 'Modules' section + module_section = """ +
        + + + + +
         
        +Modules
               
        builtins
        +

        +""" +else: + module_section = "" + +expected_html_pattern = ("""
         
        @@ -113,7 +129,7 @@ >
        index
        %s%s

        This is a test module for test_pydoc

        -

        +

        """ + module_section + """\
         
        @@ -201,7 +217,7 @@ \x20\x20\x20\x20
                Nobody
        -""".strip() # ' <- emacs turd +""").strip() # ' <- emacs turd expected_html_data_docstrings = tuple(s.replace(' ', ' ') for s in expected_data_docstrings) @@ -433,6 +449,8 @@ # What we expect to get back: everything on object... expected = dict(vars(object)) + # __new__'s descriptor can be a staticmethod on PyPy + expected['__new__'] = object.__new__ # ...plus our unbound method... expected['method_returning_true'] = TestClass.method_returning_true # ...but not the non-methods on object. From noreply at buildbot.pypy.org Sat Jul 26 23:11:17 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 26 Jul 2014 23:11:17 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: restore the sysconfig_pypy usage Message-ID: <20140726211117.8CD981C046A@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72547:cfa2d06ed493 Date: 2014-07-26 14:05 -0700 http://bitbucket.org/pypy/pypy/changeset/cfa2d06ed493/ Log: restore the sysconfig_pypy usage diff --git a/lib-python/3/distutils/sysconfig.py b/lib-python/3/distutils/sysconfig.py --- a/lib-python/3/distutils/sysconfig.py +++ b/lib-python/3/distutils/sysconfig.py @@ -9,589 +9,17 @@ Email: """ -import os -import re import sys -from .errors import DistutilsPlatformError -# These are needed in a couple of spots, so just compute them once. -PREFIX = os.path.normpath(sys.prefix) -EXEC_PREFIX = os.path.normpath(sys.exec_prefix) -BASE_PREFIX = os.path.normpath(sys.base_prefix) -BASE_EXEC_PREFIX = os.path.normpath(sys.base_exec_prefix) +# The content of this file is redirected from +# sysconfig_cpython or sysconfig_pypy. -# Path to the base directory of the project. On Windows the binary may -# live in project/PCBuild9. If we're dealing with an x64 Windows build, -# it'll live in project/PCbuild/amd64. -# set for cross builds -if "_PYTHON_PROJECT_BASE" in os.environ: - project_base = os.path.abspath(os.environ["_PYTHON_PROJECT_BASE"]) +if '__pypy__' in sys.builtin_module_names: + from distutils.sysconfig_pypy import * + from distutils.sysconfig_pypy import _config_vars # needed by setuptools + from distutils.sysconfig_pypy import _variable_rx # read_setup_file() else: - project_base = os.path.dirname(os.path.abspath(sys.executable)) -if os.name == "nt" and "pcbuild" in project_base[-8:].lower(): - project_base = os.path.abspath(os.path.join(project_base, os.path.pardir)) -# PC/VS7.1 -if os.name == "nt" and "\\pc\\v" in project_base[-10:].lower(): - project_base = os.path.abspath(os.path.join(project_base, os.path.pardir, - os.path.pardir)) -# PC/AMD64 -if os.name == "nt" and "\\pcbuild\\amd64" in project_base[-14:].lower(): - project_base = os.path.abspath(os.path.join(project_base, os.path.pardir, - os.path.pardir)) - -# python_build: (Boolean) if true, we're either building Python or -# building an extension with an un-installed Python, so we use -# different (hard-wired) directories. -# Setup.local is available for Makefile builds including VPATH builds, -# Setup.dist is available on Windows -def _is_python_source_dir(d): - for fn in ("Setup.dist", "Setup.local"): - if os.path.isfile(os.path.join(d, "Modules", fn)): - return True - return False -_sys_home = getattr(sys, '_home', None) -if _sys_home and os.name == 'nt' and \ - _sys_home.lower().endswith(('pcbuild', 'pcbuild\\amd64')): - _sys_home = os.path.dirname(_sys_home) - if _sys_home.endswith('pcbuild'): # must be amd64 - _sys_home = os.path.dirname(_sys_home) -def _python_build(): - if _sys_home: - return _is_python_source_dir(_sys_home) - return _is_python_source_dir(project_base) -python_build = _python_build() - -# Calculate the build qualifier flags if they are defined. Adding the flags -# to the include and lib directories only makes sense for an installation, not -# an in-source build. -build_flags = '' -try: - if not python_build: - build_flags = sys.abiflags -except AttributeError: - # It's not a configure-based build, so the sys module doesn't have - # this attribute, which is fine. - pass - -def get_python_version(): - """Return a string containing the major and minor Python version, - leaving off the patchlevel. Sample return values could be '1.5' - or '2.2'. - """ - return sys.version[:3] - - -def get_python_inc(plat_specific=0, prefix=None): - """Return the directory containing installed Python header files. - - If 'plat_specific' is false (the default), this is the path to the - non-platform-specific header files, i.e. Python.h and so on; - otherwise, this is the path to platform-specific header files - (namely pyconfig.h). - - If 'prefix' is supplied, use it instead of sys.base_prefix or - sys.base_exec_prefix -- i.e., ignore 'plat_specific'. - """ - if prefix is None: - prefix = plat_specific and BASE_EXEC_PREFIX or BASE_PREFIX - if os.name == "posix": - if python_build: - # Assume the executable is in the build directory. The - # pyconfig.h file should be in the same directory. Since - # the build directory may not be the source directory, we - # must use "srcdir" from the makefile to find the "Include" - # directory. - base = _sys_home or project_base - if plat_specific: - return base - if _sys_home: - incdir = os.path.join(_sys_home, get_config_var('AST_H_DIR')) - else: - incdir = os.path.join(get_config_var('srcdir'), 'Include') - return os.path.normpath(incdir) - python_dir = 'python' + get_python_version() + build_flags - return os.path.join(prefix, "include", python_dir) - elif os.name == "nt": - return os.path.join(prefix, "include") - elif os.name == "os2": - return os.path.join(prefix, "Include") - else: - raise DistutilsPlatformError( - "I don't know where Python installs its C header files " - "on platform '%s'" % os.name) - - -def get_python_lib(plat_specific=0, standard_lib=0, prefix=None): - """Return the directory containing the Python library (standard or - site additions). - - If 'plat_specific' is true, return the directory containing - platform-specific modules, i.e. any module from a non-pure-Python - module distribution; otherwise, return the platform-shared library - directory. If 'standard_lib' is true, return the directory - containing standard Python library modules; otherwise, return the - directory for site-specific modules. - - If 'prefix' is supplied, use it instead of sys.base_prefix or - sys.base_exec_prefix -- i.e., ignore 'plat_specific'. - """ - if prefix is None: - if standard_lib: - prefix = plat_specific and BASE_EXEC_PREFIX or BASE_PREFIX - else: - prefix = plat_specific and EXEC_PREFIX or PREFIX - - if os.name == "posix": - libpython = os.path.join(prefix, - "lib", "python" + get_python_version()) - if standard_lib: - return libpython - else: - return os.path.join(libpython, "site-packages") - elif os.name == "nt": - if standard_lib: - return os.path.join(prefix, "Lib") - else: - if get_python_version() < "2.2": - return prefix - else: - return os.path.join(prefix, "Lib", "site-packages") - elif os.name == "os2": - if standard_lib: - return os.path.join(prefix, "Lib") - else: - return os.path.join(prefix, "Lib", "site-packages") - else: - raise DistutilsPlatformError( - "I don't know where Python installs its library " - "on platform '%s'" % os.name) - - - -def customize_compiler(compiler): - """Do any platform-specific customization of a CCompiler instance. - - Mainly needed on Unix, so we can plug in the information that - varies across Unices and is stored in Python's Makefile. - """ - if compiler.compiler_type == "unix": - if sys.platform == "darwin": - # Perform first-time customization of compiler-related - # config vars on OS X now that we know we need a compiler. - # This is primarily to support Pythons from binary - # installers. The kind and paths to build tools on - # the user system may vary significantly from the system - # that Python itself was built on. Also the user OS - # version and build tools may not support the same set - # of CPU architectures for universal builds. - global _config_vars - if not _config_vars.get('CUSTOMIZED_OSX_COMPILER', ''): - import _osx_support - _osx_support.customize_compiler(_config_vars) - _config_vars['CUSTOMIZED_OSX_COMPILER'] = 'True' - - (cc, cxx, opt, cflags, ccshared, ldshared, shlib_suffix, ar, ar_flags) = \ - get_config_vars('CC', 'CXX', 'OPT', 'CFLAGS', - 'CCSHARED', 'LDSHARED', 'SHLIB_SUFFIX', 'AR', 'ARFLAGS') - - if 'CC' in os.environ: - newcc = os.environ['CC'] - if (sys.platform == 'darwin' - and 'LDSHARED' not in os.environ - and ldshared.startswith(cc)): - # On OS X, if CC is overridden, use that as the default - # command for LDSHARED as well - ldshared = newcc + ldshared[len(cc):] - cc = newcc - if 'CXX' in os.environ: - cxx = os.environ['CXX'] - if 'LDSHARED' in os.environ: - ldshared = os.environ['LDSHARED'] - if 'CPP' in os.environ: - cpp = os.environ['CPP'] - else: - cpp = cc + " -E" # not always - if 'LDFLAGS' in os.environ: - ldshared = ldshared + ' ' + os.environ['LDFLAGS'] - if 'CFLAGS' in os.environ: - cflags = opt + ' ' + os.environ['CFLAGS'] - ldshared = ldshared + ' ' + os.environ['CFLAGS'] - if 'CPPFLAGS' in os.environ: - cpp = cpp + ' ' + os.environ['CPPFLAGS'] - cflags = cflags + ' ' + os.environ['CPPFLAGS'] - ldshared = ldshared + ' ' + os.environ['CPPFLAGS'] - if 'AR' in os.environ: - ar = os.environ['AR'] - if 'ARFLAGS' in os.environ: - archiver = ar + ' ' + os.environ['ARFLAGS'] - else: - archiver = ar + ' ' + ar_flags - - cc_cmd = cc + ' ' + cflags - compiler.set_executables( - preprocessor=cpp, - compiler=cc_cmd, - compiler_so=cc_cmd + ' ' + ccshared, - compiler_cxx=cxx, - linker_so=ldshared, - linker_exe=cc, - archiver=archiver) - - compiler.shared_lib_extension = shlib_suffix - - -def get_config_h_filename(): - """Return full pathname of installed pyconfig.h file.""" - if python_build: - if os.name == "nt": - inc_dir = os.path.join(_sys_home or project_base, "PC") - else: - inc_dir = _sys_home or project_base - else: - inc_dir = get_python_inc(plat_specific=1) - if get_python_version() < '2.2': - config_h = 'config.h' - else: - # The name of the config.h file changed in 2.2 - config_h = 'pyconfig.h' - return os.path.join(inc_dir, config_h) - - -def get_makefile_filename(): - """Return full pathname of installed Makefile from the Python build.""" - if python_build: - return os.path.join(_sys_home or project_base, "Makefile") - lib_dir = get_python_lib(plat_specific=0, standard_lib=1) - config_file = 'config-{}{}'.format(get_python_version(), build_flags) - return os.path.join(lib_dir, config_file, 'Makefile') - - -def parse_config_h(fp, g=None): - """Parse a config.h-style file. - - A dictionary containing name/value pairs is returned. If an - optional dictionary is passed in as the second argument, it is - used instead of a new dictionary. - """ - if g is None: - g = {} - define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n") - undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n") - # - while True: - line = fp.readline() - if not line: - break - m = define_rx.match(line) - if m: - n, v = m.group(1, 2) - try: v = int(v) - except ValueError: pass - g[n] = v - else: - m = undef_rx.match(line) - if m: - g[m.group(1)] = 0 - return g - - -# Regexes needed for parsing Makefile (and similar syntaxes, -# like old-style Setup files). -_variable_rx = re.compile("([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)") -_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)") -_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}") - -def parse_makefile(fn, g=None): - """Parse a Makefile-style file. - - A dictionary containing name/value pairs is returned. If an - optional dictionary is passed in as the second argument, it is - used instead of a new dictionary. - """ - from distutils.text_file import TextFile - fp = TextFile(fn, strip_comments=1, skip_blanks=1, join_lines=1, errors="surrogateescape") - - if g is None: - g = {} - done = {} - notdone = {} - - while True: - line = fp.readline() - if line is None: # eof - break - m = _variable_rx.match(line) - if m: - n, v = m.group(1, 2) - v = v.strip() - # `$$' is a literal `$' in make - tmpv = v.replace('$$', '') - - if "$" in tmpv: - notdone[n] = v - else: - try: - v = int(v) - except ValueError: - # insert literal `$' - done[n] = v.replace('$$', '$') - else: - done[n] = v - - # Variables with a 'PY_' prefix in the makefile. These need to - # be made available without that prefix through sysconfig. - # Special care is needed to ensure that variable expansion works, even - # if the expansion uses the name without a prefix. - renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS') - - # do variable interpolation here - while notdone: - for name in list(notdone): - value = notdone[name] - m = _findvar1_rx.search(value) or _findvar2_rx.search(value) - if m: - n = m.group(1) - found = True - if n in done: - item = str(done[n]) - elif n in notdone: - # get it on a subsequent round - found = False - elif n in os.environ: - # do it like make: fall back to environment - item = os.environ[n] - - elif n in renamed_variables: - if name.startswith('PY_') and name[3:] in renamed_variables: - item = "" - - elif 'PY_' + n in notdone: - found = False - - else: - item = str(done['PY_' + n]) - else: - done[n] = item = "" - if found: - after = value[m.end():] - value = value[:m.start()] + item + after - if "$" in after: - notdone[name] = value - else: - try: value = int(value) - except ValueError: - done[name] = value.strip() - else: - done[name] = value - del notdone[name] - - if name.startswith('PY_') \ - and name[3:] in renamed_variables: - - name = name[3:] - if name not in done: - done[name] = value - else: - # bogus variable reference; just drop it since we can't deal - del notdone[name] - - fp.close() - - # strip spurious spaces - for k, v in done.items(): - if isinstance(v, str): - done[k] = v.strip() - - # save the results in the global dictionary - g.update(done) - return g - - -def expand_makefile_vars(s, vars): - """Expand Makefile-style variables -- "${foo}" or "$(foo)" -- in - 'string' according to 'vars' (a dictionary mapping variable names to - values). Variables not present in 'vars' are silently expanded to the - empty string. The variable values in 'vars' should not contain further - variable expansions; if 'vars' is the output of 'parse_makefile()', - you're fine. Returns a variable-expanded version of 's'. - """ - - # This algorithm does multiple expansion, so if vars['foo'] contains - # "${bar}", it will expand ${foo} to ${bar}, and then expand - # ${bar}... and so forth. This is fine as long as 'vars' comes from - # 'parse_makefile()', which takes care of such expansions eagerly, - # according to make's variable expansion semantics. - - while True: - m = _findvar1_rx.search(s) or _findvar2_rx.search(s) - if m: - (beg, end) = m.span() - s = s[0:beg] + vars.get(m.group(1)) + s[end:] - else: - break - return s - - -_config_vars = None - -def _init_posix(): - """Initialize the module as appropriate for POSIX systems.""" - g = {} - # load the installed Makefile: - try: - filename = get_makefile_filename() - parse_makefile(filename, g) - except IOError as msg: - my_msg = "invalid Python installation: unable to open %s" % filename - if hasattr(msg, "strerror"): - my_msg = my_msg + " (%s)" % msg.strerror - - raise DistutilsPlatformError(my_msg) - - # load the installed pyconfig.h: - try: - filename = get_config_h_filename() - with open(filename) as file: - parse_config_h(file, g) - except IOError as msg: - my_msg = "invalid Python installation: unable to open %s" % filename - if hasattr(msg, "strerror"): - my_msg = my_msg + " (%s)" % msg.strerror - - raise DistutilsPlatformError(my_msg) - - # On AIX, there are wrong paths to the linker scripts in the Makefile - # -- these paths are relative to the Python source, but when installed - # the scripts are in another directory. - if python_build: - g['LDSHARED'] = g['BLDSHARED'] - - elif get_python_version() < '2.1': - # The following two branches are for 1.5.2 compatibility. - if sys.platform == 'aix4': # what about AIX 3.x ? - # Linker script is in the config directory, not in Modules as the - # Makefile says. - python_lib = get_python_lib(standard_lib=1) - ld_so_aix = os.path.join(python_lib, 'config', 'ld_so_aix') - python_exp = os.path.join(python_lib, 'config', 'python.exp') - - g['LDSHARED'] = "%s %s -bI:%s" % (ld_so_aix, g['CC'], python_exp) - - global _config_vars - _config_vars = g - - -def _init_nt(): - """Initialize the module as appropriate for NT""" - g = {} - # set basic install directories - g['LIBDEST'] = get_python_lib(plat_specific=0, standard_lib=1) - g['BINLIBDEST'] = get_python_lib(plat_specific=1, standard_lib=1) - - # XXX hmmm.. a normal install puts include files here - g['INCLUDEPY'] = get_python_inc(plat_specific=0) - - g['SO'] = '.pyd' - g['EXT_SUFFIX'] = '.pyd' - g['EXE'] = ".exe" - g['VERSION'] = get_python_version().replace(".", "") - g['BINDIR'] = os.path.dirname(os.path.abspath(sys.executable)) - - global _config_vars - _config_vars = g - - -def _init_os2(): - """Initialize the module as appropriate for OS/2""" - g = {} - # set basic install directories - g['LIBDEST'] = get_python_lib(plat_specific=0, standard_lib=1) - g['BINLIBDEST'] = get_python_lib(plat_specific=1, standard_lib=1) - - # XXX hmmm.. a normal install puts include files here - g['INCLUDEPY'] = get_python_inc(plat_specific=0) - - g['SO'] = '.pyd' - g['EXT_SUFFIX'] = '.pyd' - g['EXE'] = ".exe" - - global _config_vars - _config_vars = g - - -def get_config_vars(*args): - """With no arguments, return a dictionary of all configuration - variables relevant for the current platform. Generally this includes - everything needed to build extensions and install both pure modules and - extensions. On Unix, this means every variable defined in Python's - installed Makefile; on Windows it's a much smaller set. - - With arguments, return a list of values that result from looking up - each argument in the configuration variable dictionary. - """ - global _config_vars - if _config_vars is None: - func = globals().get("_init_" + os.name) - if func: - func() - else: - _config_vars = {} - - # Normalized versions of prefix and exec_prefix are handy to have; - # in fact, these are the standard versions used most places in the - # Distutils. - _config_vars['prefix'] = PREFIX - _config_vars['exec_prefix'] = EXEC_PREFIX - - # Always convert srcdir to an absolute path - srcdir = _config_vars.get('srcdir', project_base) - if os.name == 'posix': - if python_build: - # If srcdir is a relative path (typically '.' or '..') - # then it should be interpreted relative to the directory - # containing Makefile. - base = os.path.dirname(get_makefile_filename()) - srcdir = os.path.join(base, srcdir) - else: - # srcdir is not meaningful since the installation is - # spread about the filesystem. We choose the - # directory containing the Makefile since we know it - # exists. - srcdir = os.path.dirname(get_makefile_filename()) - _config_vars['srcdir'] = os.path.abspath(os.path.normpath(srcdir)) - - # Convert srcdir into an absolute path if it appears necessary. - # Normally it is relative to the build directory. However, during - # testing, for example, we might be running a non-installed python - # from a different directory. - if python_build and os.name == "posix": - base = project_base - if (not os.path.isabs(_config_vars['srcdir']) and - base != os.getcwd()): - # srcdir is relative and we are not in the same directory - # as the executable. Assume executable is in the build - # directory and make srcdir absolute. - srcdir = os.path.join(base, _config_vars['srcdir']) - _config_vars['srcdir'] = os.path.normpath(srcdir) - - # OS X platforms require special customization to handle - # multi-architecture, multi-os-version installers - if sys.platform == 'darwin': - import _osx_support - _osx_support.customize_config_vars(_config_vars) - - if args: - vals = [] - for name in args: - vals.append(_config_vars.get(name)) - return vals - else: - return _config_vars - -def get_config_var(name): - """Return the value of a single variable using the dictionary - returned by 'get_config_vars()'. Equivalent to - get_config_vars().get(name) - """ - return get_config_vars().get(name) + from distutils.sysconfig_cpython import * + from distutils.sysconfig_cpython import _config_vars # needed by setuptools + from distutils.sysconfig_cpython import _variable_rx # read_setup_file() diff --git a/lib-python/3/distutils/sysconfig_cpython.py b/lib-python/3/distutils/sysconfig_cpython.py --- a/lib-python/3/distutils/sysconfig_cpython.py +++ b/lib-python/3/distutils/sysconfig_cpython.py @@ -18,11 +18,17 @@ # These are needed in a couple of spots, so just compute them once. PREFIX = os.path.normpath(sys.prefix) EXEC_PREFIX = os.path.normpath(sys.exec_prefix) +BASE_PREFIX = os.path.normpath(sys.base_prefix) +BASE_EXEC_PREFIX = os.path.normpath(sys.base_exec_prefix) # Path to the base directory of the project. On Windows the binary may # live in project/PCBuild9. If we're dealing with an x64 Windows build, # it'll live in project/PCbuild/amd64. -project_base = os.path.dirname(os.path.abspath(sys.executable)) +# set for cross builds +if "_PYTHON_PROJECT_BASE" in os.environ: + project_base = os.path.abspath(os.environ["_PYTHON_PROJECT_BASE"]) +else: + project_base = os.path.dirname(os.path.abspath(sys.executable)) if os.name == "nt" and "pcbuild" in project_base[-8:].lower(): project_base = os.path.abspath(os.path.join(project_base, os.path.pardir)) # PC/VS7.1 @@ -39,11 +45,21 @@ # different (hard-wired) directories. # Setup.local is available for Makefile builds including VPATH builds, # Setup.dist is available on Windows -def _python_build(): +def _is_python_source_dir(d): for fn in ("Setup.dist", "Setup.local"): - if os.path.isfile(os.path.join(project_base, "Modules", fn)): + if os.path.isfile(os.path.join(d, "Modules", fn)): return True return False +_sys_home = getattr(sys, '_home', None) +if _sys_home and os.name == 'nt' and \ + _sys_home.lower().endswith(('pcbuild', 'pcbuild\\amd64')): + _sys_home = os.path.dirname(_sys_home) + if _sys_home.endswith('pcbuild'): # must be amd64 + _sys_home = os.path.dirname(_sys_home) +def _python_build(): + if _sys_home: + return _is_python_source_dir(_sys_home) + return _is_python_source_dir(project_base) python_build = _python_build() # Calculate the build qualifier flags if they are defined. Adding the flags @@ -74,11 +90,11 @@ otherwise, this is the path to platform-specific header files (namely pyconfig.h). - If 'prefix' is supplied, use it instead of sys.prefix or - sys.exec_prefix -- i.e., ignore 'plat_specific'. + If 'prefix' is supplied, use it instead of sys.base_prefix or + sys.base_exec_prefix -- i.e., ignore 'plat_specific'. """ if prefix is None: - prefix = plat_specific and EXEC_PREFIX or PREFIX + prefix = plat_specific and BASE_EXEC_PREFIX or BASE_PREFIX if os.name == "posix": if python_build: # Assume the executable is in the build directory. The @@ -86,12 +102,14 @@ # the build directory may not be the source directory, we # must use "srcdir" from the makefile to find the "Include" # directory. - base = os.path.dirname(os.path.abspath(sys.executable)) + base = _sys_home or project_base if plat_specific: return base + if _sys_home: + incdir = os.path.join(_sys_home, get_config_var('AST_H_DIR')) else: incdir = os.path.join(get_config_var('srcdir'), 'Include') - return os.path.normpath(incdir) + return os.path.normpath(incdir) python_dir = 'python' + get_python_version() + build_flags return os.path.join(prefix, "include", python_dir) elif os.name == "nt": @@ -115,11 +133,14 @@ containing standard Python library modules; otherwise, return the directory for site-specific modules. - If 'prefix' is supplied, use it instead of sys.prefix or - sys.exec_prefix -- i.e., ignore 'plat_specific'. + If 'prefix' is supplied, use it instead of sys.base_prefix or + sys.base_exec_prefix -- i.e., ignore 'plat_specific'. """ if prefix is None: - prefix = plat_specific and EXEC_PREFIX or PREFIX + if standard_lib: + prefix = plat_specific and BASE_EXEC_PREFIX or BASE_PREFIX + else: + prefix = plat_specific and EXEC_PREFIX or PREFIX if os.name == "posix": libpython = os.path.join(prefix, @@ -146,7 +167,7 @@ "I don't know where Python installs its library " "on platform '%s'" % os.name) -_USE_CLANG = None + def customize_compiler(compiler): """Do any platform-specific customization of a CCompiler instance. @@ -155,40 +176,32 @@ varies across Unices and is stored in Python's Makefile. """ if compiler.compiler_type == "unix": - (cc, cxx, opt, cflags, ccshared, ldshared, so_ext, ar, ar_flags) = \ + if sys.platform == "darwin": + # Perform first-time customization of compiler-related + # config vars on OS X now that we know we need a compiler. + # This is primarily to support Pythons from binary + # installers. The kind and paths to build tools on + # the user system may vary significantly from the system + # that Python itself was built on. Also the user OS + # version and build tools may not support the same set + # of CPU architectures for universal builds. + global _config_vars + if not _config_vars.get('CUSTOMIZED_OSX_COMPILER', ''): + import _osx_support + _osx_support.customize_compiler(_config_vars) + _config_vars['CUSTOMIZED_OSX_COMPILER'] = 'True' + + (cc, cxx, opt, cflags, ccshared, ldshared, shlib_suffix, ar, ar_flags) = \ get_config_vars('CC', 'CXX', 'OPT', 'CFLAGS', - 'CCSHARED', 'LDSHARED', 'SO', 'AR', 'ARFLAGS') + 'CCSHARED', 'LDSHARED', 'SHLIB_SUFFIX', 'AR', 'ARFLAGS') - newcc = None if 'CC' in os.environ: newcc = os.environ['CC'] - elif sys.platform == 'darwin' and cc == 'gcc-4.2': - # Issue #13590: - # Since Apple removed gcc-4.2 in Xcode 4.2, we can no - # longer assume it is available for extension module builds. - # If Python was built with gcc-4.2, check first to see if - # it is available on this system; if not, try to use clang - # instead unless the caller explicitly set CC. - global _USE_CLANG - if _USE_CLANG is None: - from distutils import log - from subprocess import Popen, PIPE - p = Popen("! type gcc-4.2 && type clang && exit 2", - shell=True, stdout=PIPE, stderr=PIPE) - p.wait() - if p.returncode == 2: - _USE_CLANG = True - log.warn("gcc-4.2 not found, using clang instead") - else: - _USE_CLANG = False - if _USE_CLANG: - newcc = 'clang' - if newcc: - # On OS X, if CC is overridden, use that as the default - # command for LDSHARED as well if (sys.platform == 'darwin' and 'LDSHARED' not in os.environ and ldshared.startswith(cc)): + # On OS X, if CC is overridden, use that as the default + # command for LDSHARED as well ldshared = newcc + ldshared[len(cc):] cc = newcc if 'CXX' in os.environ: @@ -225,16 +238,16 @@ linker_exe=cc, archiver=archiver) - compiler.shared_lib_extension = so_ext + compiler.shared_lib_extension = shlib_suffix def get_config_h_filename(): """Return full pathname of installed pyconfig.h file.""" if python_build: if os.name == "nt": - inc_dir = os.path.join(project_base, "PC") + inc_dir = os.path.join(_sys_home or project_base, "PC") else: - inc_dir = project_base + inc_dir = _sys_home or project_base else: inc_dir = get_python_inc(plat_specific=1) if get_python_version() < '2.2': @@ -248,7 +261,7 @@ def get_makefile_filename(): """Return full pathname of installed Makefile from the Python build.""" if python_build: - return os.path.join(os.path.dirname(sys.executable), "Makefile") + return os.path.join(_sys_home or project_base, "Makefile") lib_dir = get_python_lib(plat_specific=0, standard_lib=1) config_file = 'config-{}{}'.format(get_python_version(), build_flags) return os.path.join(lib_dir, config_file, 'Makefile') @@ -480,6 +493,7 @@ g['INCLUDEPY'] = get_python_inc(plat_specific=0) g['SO'] = '.pyd' + g['EXT_SUFFIX'] = '.pyd' g['EXE'] = ".exe" g['VERSION'] = get_python_version().replace(".", "") g['BINDIR'] = os.path.dirname(os.path.abspath(sys.executable)) @@ -499,6 +513,7 @@ g['INCLUDEPY'] = get_python_inc(plat_specific=0) g['SO'] = '.pyd' + g['EXT_SUFFIX'] = '.pyd' g['EXE'] = ".exe" global _config_vars @@ -510,7 +525,7 @@ variables relevant for the current platform. Generally this includes everything needed to build extensions and install both pure modules and extensions. On Unix, this means every variable defined in Python's - installed Makefile; on Windows and Mac OS it's a much smaller set. + installed Makefile; on Windows it's a much smaller set. With arguments, return a list of values that result from looking up each argument in the configuration variable dictionary. @@ -529,12 +544,29 @@ _config_vars['prefix'] = PREFIX _config_vars['exec_prefix'] = EXEC_PREFIX + # Always convert srcdir to an absolute path + srcdir = _config_vars.get('srcdir', project_base) + if os.name == 'posix': + if python_build: + # If srcdir is a relative path (typically '.' or '..') + # then it should be interpreted relative to the directory + # containing Makefile. + base = os.path.dirname(get_makefile_filename()) + srcdir = os.path.join(base, srcdir) + else: + # srcdir is not meaningful since the installation is + # spread about the filesystem. We choose the + # directory containing the Makefile since we know it + # exists. + srcdir = os.path.dirname(get_makefile_filename()) + _config_vars['srcdir'] = os.path.abspath(os.path.normpath(srcdir)) + # Convert srcdir into an absolute path if it appears necessary. # Normally it is relative to the build directory. However, during # testing, for example, we might be running a non-installed python # from a different directory. if python_build and os.name == "posix": - base = os.path.dirname(os.path.abspath(sys.executable)) + base = project_base if (not os.path.isabs(_config_vars['srcdir']) and base != os.getcwd()): # srcdir is relative and we are not in the same directory @@ -543,43 +575,11 @@ srcdir = os.path.join(base, _config_vars['srcdir']) _config_vars['srcdir'] = os.path.normpath(srcdir) + # OS X platforms require special customization to handle + # multi-architecture, multi-os-version installers if sys.platform == 'darwin': - kernel_version = os.uname()[2] # Kernel version (8.4.3) - major_version = int(kernel_version.split('.')[0]) - - if major_version < 8: - # On Mac OS X before 10.4, check if -arch and -isysroot - # are in CFLAGS or LDFLAGS and remove them if they are. - # This is needed when building extensions on a 10.3 system - # using a universal build of python. - for key in ('LDFLAGS', 'BASECFLAGS', - # a number of derived variables. These need to be - # patched up as well. - 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): - flags = _config_vars[key] - flags = re.sub('-arch\s+\w+\s', ' ', flags, re.ASCII) - flags = re.sub('-isysroot [^ \t]*', ' ', flags) - _config_vars[key] = flags - - else: - - # Allow the user to override the architecture flags using - # an environment variable. - # NOTE: This name was introduced by Apple in OSX 10.5 and - # is used by several scripting languages distributed with - # that OS release. - - if 'ARCHFLAGS' in os.environ: - arch = os.environ['ARCHFLAGS'] - for key in ('LDFLAGS', 'BASECFLAGS', - # a number of derived variables. These need to be - # patched up as well. - 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): - - flags = _config_vars[key] - flags = re.sub('-arch\s+\w+\s', ' ', flags) - flags = flags + ' ' + arch - _config_vars[key] = flags + import _osx_support + _osx_support.customize_config_vars(_config_vars) if args: vals = [] From noreply at buildbot.pypy.org Sat Jul 26 23:11:18 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 26 Jul 2014 23:11:18 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: reapply PyPy workarounds to fix build_ext Message-ID: <20140726211118.BF1DD1C046A@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72548:f43f86638f34 Date: 2014-07-26 14:06 -0700 http://bitbucket.org/pypy/pypy/changeset/f43f86638f34/ Log: reapply PyPy workarounds to fix build_ext diff --git a/lib-python/3/distutils/command/build_ext.py b/lib-python/3/distutils/command/build_ext.py --- a/lib-python/3/distutils/command/build_ext.py +++ b/lib-python/3/distutils/command/build_ext.py @@ -4,11 +4,10 @@ modules (currently limited to C extensions, should accommodate C++ extensions ASAP).""" -import sys, os, re +import sys, os, re, imp from distutils.core import Command from distutils.errors import * from distutils.sysconfig import customize_compiler, get_python_version -from distutils.sysconfig import get_config_h_filename from distutils.dep_util import newer_group from distutils.extension import Extension from distutils.util import get_platform @@ -36,6 +35,11 @@ from distutils.ccompiler import show_compilers show_compilers() +def _get_c_extension_suffix(): + for ext, mod, typ in imp.get_suffixes(): + if typ == imp.C_EXTENSION: + return ext + class build_ext(Command): @@ -204,11 +208,16 @@ # Append the source distribution include and library directories, # this allows distutils on windows to work in the source tree - self.include_dirs.append(os.path.dirname(get_config_h_filename())) + if 0: + # pypy has no config_h_filename directory + self.include_dirs.append(os.path.dirname(get_config_h_filename())) _sys_home = getattr(sys, '_home', None) if _sys_home: self.library_dirs.append(_sys_home) - if MSVC_VERSION >= 9: + if 1: + # pypy has no PCBuild directory + pass + elif MSVC_VERSION >= 9: # Use the .lib files for the correct architecture if self.plat_name == 'win32': suffix = '' @@ -675,10 +684,18 @@ # OS/2 has an 8 character module (extension) limit :-( if os.name == "os2": ext_path[len(ext_path) - 1] = ext_path[len(ext_path) - 1][:8] + # PyPy tweak: first try to get the C extension suffix from + # 'imp'. If it fails we fall back to the 'SO' config var, like + # the previous version of this code did. This should work for + # CPython too. The point is that on PyPy with cpyext, the + # config var 'SO' is just ".so" but we want to return + # ".pypy-VERSION.so" instead. + ext_suffix = _get_c_extension_suffix() + if ext_suffix is None: + ext_suffix = get_config_var('EXT_SUFFIX') # fall-back # extensions in debug_mode are named 'module_d.pyd' under windows - ext_suffix = get_config_var('EXT_SUFFIX') if os.name == 'nt' and self.debug: - return os.path.join(*ext_path) + '_d' + ext_suffix + ext_suffix = '_d.pyd' return os.path.join(*ext_path) + ext_suffix def get_export_symbols(self, ext): From noreply at buildbot.pypy.org Sat Jul 26 23:40:44 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 26 Jul 2014 23:40:44 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: fix function identifier clashing breaking translation Message-ID: <20140726214044.B33751C03AC@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72549:e6da813670d7 Date: 2014-07-26 14:40 -0700 http://bitbucket.org/pypy/pypy/changeset/e6da813670d7/ Log: fix function identifier clashing breaking translation diff --git a/pypy/interpreter/special.py b/pypy/interpreter/special.py --- a/pypy/interpreter/special.py +++ b/pypy/interpreter/special.py @@ -2,10 +2,20 @@ class Ellipsis(W_Root): + + @staticmethod + def descr_new_ellipsis(space, w_type): + return space.w_Ellipsis + def descr__repr__(self, space): return space.wrap('Ellipsis') class NotImplemented(W_Root): + + @staticmethod + def descr_new_notimplemented(space, w_type): + return space.w_NotImplemented + def descr__repr__(self, space): return space.wrap('NotImplemented') diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -950,13 +950,13 @@ Cell.typedef.acceptable_as_base_class = False Ellipsis.typedef = TypeDef("Ellipsis", - __new__ = interp2app(lambda space, w_type: space.w_Ellipsis), + __new__ = interp2app(Ellipsis.descr_new_ellipsis), __repr__ = interp2app(Ellipsis.descr__repr__), ) Ellipsis.typedef.acceptable_as_base_class = False NotImplemented.typedef = TypeDef("NotImplemented", - __new__ = interp2app(lambda space, w_type: space.w_NotImplemented), + __new__ = interp2app(NotImplemented.descr_new_notimplemented), __repr__ = interp2app(NotImplemented.descr__repr__), ) NotImplemented.typedef.acceptable_as_base_class = False From noreply at buildbot.pypy.org Sun Jul 27 10:25:54 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Sun, 27 Jul 2014 10:25:54 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: add _csv test_delimiter test and replicate cpython error handling Message-ID: <20140727082554.7EFC91C024A@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: py3.3 Changeset: r72550:44f52293da32 Date: 2014-07-27 10:15 +0200 http://bitbucket.org/pypy/pypy/changeset/44f52293da32/ Log: add _csv test_delimiter test and replicate cpython error handling diff --git a/pypy/module/_csv/interp_csv.py b/pypy/module/_csv/interp_csv.py --- a/pypy/module/_csv/interp_csv.py +++ b/pypy/module/_csv/interp_csv.py @@ -49,6 +49,8 @@ return default if space.is_w(w_src, space.w_None): return u'\0' + if not space.isinstance_w(w_src, space.w_unicode): + raise oefmt(space.w_TypeError, '"%s" must be string, not %T', name, w_src) src = space.unicode_w(w_src) if len(src) == 1: return src[0] @@ -109,7 +111,7 @@ if dialect.delimiter == u'\0': raise OperationError(space.w_TypeError, - space.wrap('delimiter must be set')) + space.wrap('"delimiter" must be a 1-character string')) if space.is_w(w_quotechar, space.w_None) and w_quoting is None: tmp_quoting = QUOTE_NONE diff --git a/pypy/module/_csv/test/test_dialect.py b/pypy/module/_csv/test/test_dialect.py --- a/pypy/module/_csv/test/test_dialect.py +++ b/pypy/module/_csv/test/test_dialect.py @@ -80,6 +80,21 @@ _csv.register_dialect('foo1', strict=_csv) # :-/ assert _csv.get_dialect('foo1').strict == True + def test_delimiter(self): + import _csv + + exc_info = raises(TypeError, _csv.register_dialect, 'foo1', delimiter=":::") + assert exc_info.value.args[0] == '"delimiter" must be a 1-character string' + + exc_info = raises(TypeError, _csv.register_dialect, 'foo1', delimiter="") + assert exc_info.value.args[0] == '"delimiter" must be a 1-character string' + + exc_info = raises(TypeError, _csv.register_dialect, 'foo1', delimiter=b",") + assert exc_info.value.args[0] == '"delimiter" must be string, not bytes' + + exc_info = raises(TypeError, _csv.register_dialect, 'foo1', delimiter=4) + assert exc_info.value.args[0] == '"delimiter" must be string, not int' + def test_line_terminator(self): # lineterminator can be the empty string import _csv From noreply at buildbot.pypy.org Sun Jul 27 10:25:55 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Sun, 27 Jul 2014 10:25:55 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: add _csv test_quotechar Message-ID: <20140727082555.B43B41C024A@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: py3.3 Changeset: r72551:d43ba084a258 Date: 2014-07-27 10:20 +0200 http://bitbucket.org/pypy/pypy/changeset/d43ba084a258/ Log: add _csv test_quotechar diff --git a/pypy/module/_csv/test/test_dialect.py b/pypy/module/_csv/test/test_dialect.py --- a/pypy/module/_csv/test/test_dialect.py +++ b/pypy/module/_csv/test/test_dialect.py @@ -80,6 +80,12 @@ _csv.register_dialect('foo1', strict=_csv) # :-/ assert _csv.get_dialect('foo1').strict == True + def test_quotechar(self): + import _csv + + exc_info = raises(TypeError, _csv.register_dialect, 'foo1', quotechar=4) + assert exc_info.value.args[0] == '"quotechar" must be string, not int' + def test_delimiter(self): import _csv From noreply at buildbot.pypy.org Sun Jul 27 11:40:26 2014 From: noreply at buildbot.pypy.org (groggi) Date: Sun, 27 Jul 2014 11:40:26 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: additional tests: old obj points to old obj pointing to pinned one. Message-ID: <20140727094026.D40011C024A@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72552:203c544e7906 Date: 2014-07-27 11:38 +0200 http://bitbucket.org/pypy/pypy/changeset/203c544e7906/ Log: additional tests: old obj points to old obj pointing to pinned one. In addition added some 'assert False' to be sure exceptions are thrown. diff --git a/rpython/memory/gc/test/test_object_pinning.py b/rpython/memory/gc/test/test_object_pinning.py --- a/rpython/memory/gc/test/test_object_pinning.py +++ b/rpython/memory/gc/test/test_object_pinning.py @@ -83,7 +83,11 @@ # unpin and check if object is gone from nursery self.gc.unpin(adr) collect_func() - py.test.raises(RuntimeError, 'ptr.someInt') + try: + assert ptr.someInt == 100 + assert False + except RuntimeError as ex: + assert "freed" in str(ex) # check if we object is still accessible ptr_old = self.stackroots[0] @@ -116,6 +120,7 @@ # be gone try: next_ptr.someInt = 101 + assert False except RuntimeError as ex: assert "freed" in str(ex) @@ -157,6 +162,7 @@ try: # should fail as this was the pinned object that is now collected next_ptr.someInt = 0 + assert False except RuntimeError as ex: assert "freed" in str(ex) @@ -220,6 +226,117 @@ def test_young_and_stackroots_point_to_pinned(self): self.not_pinned_and_stackroots_point_to_pinned(make_old=False) + def test_old_points_to_old_points_to_pinned_1(self): + # + # Scenario: + # stackroots points to 'root_ptr'. 'root_ptr' points to 'next_ptr'. + # 'next_ptr' points to the young and pinned 'pinned_ptr'. Here we + # remove the reference to 'next_ptr' from 'root_ptr' and check if it + # behaves as expected. + # + root_ptr = self.malloc(S) + root_ptr.someInt = 100 + self.stackroots.append(root_ptr) + self.gc.collect() + root_ptr = self.stackroots[0] + # + next_ptr = self.malloc(S) + next_ptr.someInt = 200 + self.write(root_ptr, 'next', next_ptr) + self.gc.collect() + next_ptr = root_ptr.next + # + # check if everything is as expected + assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(root_ptr)) + assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(next_ptr)) + assert root_ptr.someInt == 100 + assert next_ptr.someInt == 200 + # + pinned_ptr = self.malloc(S) + pinned_ptr.someInt = 300 + assert self.gc.pin(llmemory.cast_ptr_to_adr(pinned_ptr)) + self.write(next_ptr, 'next', pinned_ptr) + self.gc.collect() + # + # validate everything is as expected with 3 rounds of GC collecting + for _ in range(3): + self.gc.collect() + assert next_ptr.next == pinned_ptr + assert self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(pinned_ptr)) + assert pinned_ptr.someInt == 300 + assert root_ptr.someInt == 100 + assert next_ptr.someInt == 200 + # + # remove the reference to the pinned object + self.write(next_ptr, 'next', root_ptr) + self.gc.minor_collection() + # the minor collection visits all old objects pointing to pinned ones. + # therefore the pinned object should be gone + try: + pinned_ptr.someInt == 300 + assert False + except RuntimeError as ex: + assert "freed" in str(ex) + + def test_old_points_to_old_points_to_pinned_2(self): + # + # Scenario: + # stackroots points to 'root_ptr'. 'root_ptr' points to 'next_ptr'. + # 'next_ptr' points to the young and pinned 'pinned_ptr'. Here we + # remove 'root_ptr' from the stackroots and check if it behaves as + # expected. + # + root_ptr = self.malloc(S) + root_ptr.someInt = 100 + self.stackroots.append(root_ptr) + self.gc.collect() + root_ptr = self.stackroots[0] + # + next_ptr = self.malloc(S) + next_ptr.someInt = 200 + self.write(root_ptr, 'next', next_ptr) + self.gc.collect() + next_ptr = root_ptr.next + # + # check if everything is as expected + assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(root_ptr)) + assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(next_ptr)) + assert root_ptr.someInt == 100 + assert next_ptr.someInt == 200 + # + pinned_ptr = self.malloc(S) + pinned_ptr.someInt = 300 + assert self.gc.pin(llmemory.cast_ptr_to_adr(pinned_ptr)) + self.write(next_ptr, 'next', pinned_ptr) + self.gc.collect() + # + # validate everything is as expected with 3 rounds of GC collecting + for _ in range(3): + self.gc.collect() + assert next_ptr.next == pinned_ptr + assert self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(pinned_ptr)) + assert pinned_ptr.someInt == 300 + assert root_ptr.someInt == 100 + assert next_ptr.someInt == 200 + # + # remove the root from stackroots + self.stackroots.remove(root_ptr) + self.gc.minor_collection() + # + # the minor collection will still visit 'next_ptr', although + # 'root_ptr' is not part of the stackroots anymore. This makes + # sense as 'next_ptr' is removed only in the next major collection + assert next_ptr.next.someInt == 300 + # + # now we do a major collection and everything should be gone + self.gc.collect() + try: + pinned_ptr.someInt == 300 + assert False + except RuntimeError as ex: + assert "freed" in str(ex) + + def test_pin_old(self): ptr = self.malloc(S) ptr.someInt = 100 @@ -535,7 +652,11 @@ assert self.gc.pinned_objects_in_nursery == 2 assert ptr_stackroot_1.someInt == 100 assert ptr_stackroot_2.someInt == 100 - py.test.raises(RuntimeError, 'ptr_not_stackroot.someInt') # should be freed + try: + ptr_not_stackroot.someInt = 100 + assert False + except RuntimeError as ex: + assert "freed" in str(ex) def fill_nursery_with_pinned_objects(self): typeid = self.get_type_id(S) From noreply at buildbot.pypy.org Sun Jul 27 11:54:53 2014 From: noreply at buildbot.pypy.org (kvas) Date: Sun, 27 Jul 2014 11:54:53 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Handle optimize=1 in calls to compile builtin function (remove asserts). Message-ID: <20140727095453.9D3F11C0588@cobra.cs.uni-duesseldorf.de> Author: Vasily Kuznetsov Branch: py3.3 Changeset: r72553:52aa5e85cfdf Date: 2014-07-27 11:53 +0200 http://bitbucket.org/pypy/pypy/changeset/52aa5e85cfdf/ Log: Handle optimize=1 in calls to compile builtin function (remove asserts). diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py --- a/pypy/interpreter/astcompiler/codegen.py +++ b/pypy/interpreter/astcompiler/codegen.py @@ -435,9 +435,12 @@ self.error("illegal expression for augmented assignment", assign) def visit_Assert(self, asrt): + if self.compile_info.optimize >= 1: + return self.update_position(asrt.lineno) end = self.new_block() - self.emit_jump(ops.JUMP_IF_NOT_DEBUG, end) + if self.compile_info.optimize != 0: + self.emit_jump(ops.JUMP_IF_NOT_DEBUG, end) asrt.test.accept_jump_if(self, True, end) self.emit_op_name(ops.LOAD_GLOBAL, self.names, "AssertionError") if asrt.msg: diff --git a/pypy/interpreter/pycompiler.py b/pypy/interpreter/pycompiler.py --- a/pypy/interpreter/pycompiler.py +++ b/pypy/interpreter/pycompiler.py @@ -106,7 +106,7 @@ self.additional_rules = {} self.compiler_flags = self.future_flags.allowed_flags - def compile_ast(self, node, filename, mode, flags): + def compile_ast(self, node, filename, mode, flags, optimize=-1): if mode == 'eval': check = isinstance(node, ast.Expression) elif mode == 'exec': @@ -123,7 +123,8 @@ f_flags, f_lineno, f_col = fut future_pos = f_lineno, f_col flags |= f_flags - info = pyparse.CompileInfo(filename, mode, flags, future_pos) + info = pyparse.CompileInfo(filename, mode, flags, future_pos, + optimize=optimize) return self._compile_ast(node, info) def _compile_ast(self, node, info): @@ -163,8 +164,9 @@ e.wrap_info(space)) return mod - def compile(self, source, filename, mode, flags, hidden_applevel=False): + def compile(self, source, filename, mode, flags, hidden_applevel=False, + optimize=-1): info = pyparse.CompileInfo(filename, mode, flags, - hidden_applevel=hidden_applevel) + hidden_applevel=hidden_applevel, optimize=optimize) mod = self._compile_to_ast(source, info) return self._compile_ast(mod, info) diff --git a/pypy/interpreter/pyparser/pyparse.py b/pypy/interpreter/pyparser/pyparse.py --- a/pypy/interpreter/pyparser/pyparse.py +++ b/pypy/interpreter/pyparser/pyparse.py @@ -69,15 +69,21 @@ import. * hidden_applevel: Will this code unit and sub units be hidden at the applevel? + * optimize: optimization level: + -1 = same as interpreter, + 0 = no optmiziation, + 1 = remove asserts, + 2 = remove docstrings. """ def __init__(self, filename, mode="exec", flags=0, future_pos=(0, 0), - hidden_applevel=False): + hidden_applevel=False, optimize=-1): rstring.check_str0(filename) self.filename = filename self.mode = mode self.encoding = None self.flags = flags + self.optimize = optimize self.last_future_import = future_pos self.hidden_applevel = hidden_applevel diff --git a/pypy/module/__builtin__/compiling.py b/pypy/module/__builtin__/compiling.py --- a/pypy/module/__builtin__/compiling.py +++ b/pypy/module/__builtin__/compiling.py @@ -43,13 +43,12 @@ space.w_ValueError, space.wrap("compile() arg 3 must be 'exec', 'eval' or 'single'")) - # XXX: optimize flag is not used - if space.isinstance_w(w_source, space.gettypeobject(ast.AST.typedef)): ast_node = space.interp_w(ast.mod, w_source) ast_node.sync_app_attrs(space) ec.compiler.validate_ast(ast_node) - code = ec.compiler.compile_ast(ast_node, filename, mode, flags) + code = ec.compiler.compile_ast(ast_node, filename, mode, flags, + optimize=optimize) return space.wrap(code) flags |= consts.PyCF_SOURCE_IS_UTF8 @@ -59,7 +58,8 @@ if flags & consts.PyCF_ONLY_AST: code = ec.compiler.compile_to_ast(source, filename, mode, flags) else: - code = ec.compiler.compile(source, filename, mode, flags) + code = ec.compiler.compile(source, filename, mode, flags, + optimize=optimize) return space.wrap(code) diff --git a/pypy/module/__builtin__/test/test_compile.py b/pypy/module/__builtin__/test/test_compile.py new file mode 100644 --- /dev/null +++ b/pypy/module/__builtin__/test/test_compile.py @@ -0,0 +1,58 @@ +class AppTestCompile: + + # TODO: This test still fails for now because the docstrings are not + # removed with optimize=2. + def untest_compile(self): + import ast + + codestr = '''def f(): + """doc""" + try: + assert False + except AssertionError: + return (True, f.__doc__) + else: + return (False, f.__doc__) + ''' + + def f(): """doc""" + values = [(-1, __debug__, f.__doc__), + (0, True, 'doc'), + (1, False, 'doc'), + (2, False, None)] + + for optval, debugval, docstring in values: + # test both direct compilation and compilation via AST + codeobjs = [] + codeobjs.append( + compile(codestr, "", "exec", optimize=optval)) + tree = ast.parse(codestr) + codeobjs.append(compile(tree, "", "exec", optimize=optval)) + + for i, code in enumerate(codeobjs): + print(optval, debugval, docstring, i) + ns = {} + exec(code, ns) + rv = ns['f']() + assert rv == (debugval, docstring) + + def test_assert_remove(self): + """Test just removal of the asserts with optimize=1.""" + import ast + + code = """def f(): + assert False + """ + tree = ast.parse(code) + for to_compile in [code, tree]: + compiled = compile(to_compile, "", "exec", optimize=1) + ns = {} + exec(compiled, ns) + ns['f']() + + +# TODO: Remove docstrings with optimize=2. +# TODO: Check the value of __debug__ inside of the compiled block! +# According to the documentation, it should follow the optimize flag. +# TODO: It would also be good to test that with the assert is not removed and +# is executed when -O flag is set but optimize=0. From noreply at buildbot.pypy.org Sun Jul 27 11:56:15 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Sun, 27 Jul 2014 11:56:15 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: fix repr test_builtin_function to accept pypy repr for string split method Message-ID: <20140727095615.F39AE1C332E@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: py3.3 Changeset: r72554:f1c6ae47c0d4 Date: 2014-07-27 11:03 +0200 http://bitbucket.org/pypy/pypy/changeset/f1c6ae47c0d4/ Log: fix repr test_builtin_function to accept pypy repr for string split method diff --git a/lib-python/3/test/test_reprlib.py b/lib-python/3/test/test_reprlib.py --- a/lib-python/3/test/test_reprlib.py +++ b/lib-python/3/test/test_reprlib.py @@ -140,8 +140,12 @@ # Functions eq(repr(hash), '') # Methods - self.assertTrue(repr(''.split).startswith( - '", + ))) def test_range(self): eq = self.assertEqual From noreply at buildbot.pypy.org Sun Jul 27 11:56:17 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Sun, 27 Jul 2014 11:56:17 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: fix repr test_descriptors to accept pypy repr for dict.items Message-ID: <20140727095617.4CCDB1C332E@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: py3.3 Changeset: r72555:c0bb7244f113 Date: 2014-07-27 11:08 +0200 http://bitbucket.org/pypy/pypy/changeset/c0bb7244f113/ Log: fix repr test_descriptors to accept pypy repr for dict.items diff --git a/lib-python/3/test/test_reprlib.py b/lib-python/3/test/test_reprlib.py --- a/lib-python/3/test/test_reprlib.py +++ b/lib-python/3/test/test_reprlib.py @@ -182,9 +182,13 @@ self.assertRegex(r(x), r'') def test_descriptors(self): - eq = self.assertEqual # method descriptors - eq(repr(dict.items), "") + self.assertTrue(any(( + # cpython + repr(dict.items) == "", + # pypy + repr(dict.items).startswith(" Author: Martin Matusiak Branch: py3.3 Changeset: r72556:c799b96b7ec5 Date: 2014-07-27 11:45 +0200 http://bitbucket.org/pypy/pypy/changeset/c799b96b7ec5/ Log: add custom __repr__ to Cell diff --git a/pypy/interpreter/nestedscope.py b/pypy/interpreter/nestedscope.py --- a/pypy/interpreter/nestedscope.py +++ b/pypy/interpreter/nestedscope.py @@ -71,6 +71,14 @@ return "<%s(%s) at 0x%x>" % (self.__class__.__name__, content, uid(self)) + def descr__repr__(self, space): + if self.w_value is None: + content = "empty" + else: + content = "%s object at 0x%x" % (space.type(self.w_value).name, uid(self.w_value)) + s = "" % (uid(self), content) + return space.wrap(s.decode('utf-8')) + def descr__cell_contents(self, space): try: return self.get() diff --git a/pypy/interpreter/test/test_nestedscope.py b/pypy/interpreter/test/test_nestedscope.py --- a/pypy/interpreter/test/test_nestedscope.py +++ b/pypy/interpreter/test/test_nestedscope.py @@ -59,6 +59,28 @@ def test_lambda_in_genexpr(self): assert [x() for x in (lambda: x for x in range(10))] == list(range(10)) + def test_cell_repr(self): + import re + from reprlib import repr as r # Don't shadow builtin repr + + def get_cell(): + x = 42 + def inner(): + return x + return inner + x = get_cell().__closure__[0] + assert re.match(r'', repr(x)) + assert re.match(r'', r(x)) + + def get_cell(): + if False: + x = 42 + def inner(): + return x + return inner + x = get_cell().__closure__[0] + assert re.match(r'', repr(x)) + def test_cell_contents(self): def f(x): def f(y): diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -944,6 +944,7 @@ __eq__ = interp2app(Cell.descr__eq__), __hash__ = None, __reduce__ = interp2app(Cell.descr__reduce__), + __repr__ = interp2app(Cell.descr__repr__), __setstate__ = interp2app(Cell.descr__setstate__), cell_contents= GetSetProperty(Cell.descr__cell_contents, cls=Cell), ) From noreply at buildbot.pypy.org Sun Jul 27 11:56:19 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 27 Jul 2014 11:56:19 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Merged in numerodix/pypy/py3.3 (pull request #252) Message-ID: <20140727095619.A8C881C332E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: py3.3 Changeset: r72557:d4d93a364ca7 Date: 2014-07-27 11:55 +0200 http://bitbucket.org/pypy/pypy/changeset/d4d93a364ca7/ Log: Merged in numerodix/pypy/py3.3 (pull request #252) py3.3 fixes for reprlib tests diff --git a/lib-python/3/test/test_reprlib.py b/lib-python/3/test/test_reprlib.py --- a/lib-python/3/test/test_reprlib.py +++ b/lib-python/3/test/test_reprlib.py @@ -140,8 +140,12 @@ # Functions eq(repr(hash), '') # Methods - self.assertTrue(repr(''.split).startswith( - '", + ))) def test_range(self): eq = self.assertEqual @@ -178,9 +182,13 @@ self.assertRegex(r(x), r'') def test_descriptors(self): - eq = self.assertEqual # method descriptors - eq(repr(dict.items), "") + self.assertTrue(any(( + # cpython + repr(dict.items) == "", + # pypy + repr(dict.items).startswith("" % (self.__class__.__name__, content, uid(self)) + def descr__repr__(self, space): + if self.w_value is None: + content = "empty" + else: + content = "%s object at 0x%x" % (space.type(self.w_value).name, uid(self.w_value)) + s = "" % (uid(self), content) + return space.wrap(s.decode('utf-8')) + def descr__cell_contents(self, space): try: return self.get() diff --git a/pypy/interpreter/test/test_nestedscope.py b/pypy/interpreter/test/test_nestedscope.py --- a/pypy/interpreter/test/test_nestedscope.py +++ b/pypy/interpreter/test/test_nestedscope.py @@ -59,6 +59,28 @@ def test_lambda_in_genexpr(self): assert [x() for x in (lambda: x for x in range(10))] == list(range(10)) + def test_cell_repr(self): + import re + from reprlib import repr as r # Don't shadow builtin repr + + def get_cell(): + x = 42 + def inner(): + return x + return inner + x = get_cell().__closure__[0] + assert re.match(r'', repr(x)) + assert re.match(r'', r(x)) + + def get_cell(): + if False: + x = 42 + def inner(): + return x + return inner + x = get_cell().__closure__[0] + assert re.match(r'', repr(x)) + def test_cell_contents(self): def f(x): def f(y): diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -944,6 +944,7 @@ __eq__ = interp2app(Cell.descr__eq__), __hash__ = None, __reduce__ = interp2app(Cell.descr__reduce__), + __repr__ = interp2app(Cell.descr__repr__), __setstate__ = interp2app(Cell.descr__setstate__), cell_contents= GetSetProperty(Cell.descr__cell_contents, cls=Cell), ) From noreply at buildbot.pypy.org Sun Jul 27 12:22:10 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Sun, 27 Jul 2014 12:22:10 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Fixed printing of ByteObjects. Message-ID: <20140727102210.C92681C03AC@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r947:d8f09e784dd7 Date: 2014-07-24 19:23 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/d8f09e784dd7/ Log: Fixed printing of ByteObjects. diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -823,8 +823,7 @@ if self.has_class() and self.w_class.has_space(): if self.w_class.space().omit_printing_raw_bytes.is_set(): return "" - else: - return "'%s'" % self.as_string().replace('\r', '\n') + return "'%s'" % self.as_string().replace('\r', '\n') def as_string(self): if self.bytes is not None: From noreply at buildbot.pypy.org Sun Jul 27 12:22:11 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Sun, 27 Jul 2014 12:22:11 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-interpreter-refactoring: Intermediate commit. Message-ID: <20140727102211.E609D1C03AC@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-interpreter-refactoring Changeset: r948:3d889c32a173 Date: 2014-07-25 08:55 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/3d889c32a173/ Log: Intermediate commit. Added message to all SmalltalkExceptions, catching and printing at toplevel. Extracted interpreter_debugging.py and interpreter_bytecodes.py. diff too long, truncating to 2000 out of 2010 lines diff --git a/spyvm/error.py b/spyvm/error.py --- a/spyvm/error.py +++ b/spyvm/error.py @@ -1,30 +1,42 @@ -# some exception classes for the Smalltalk VM + +# Some exception classes for the Smalltalk VM class SmalltalkException(Exception): """Base class for Smalltalk exception hierarchy""" + exception_type = "SmalltalkException" + _attrs_ = ["msg"] + def __init__(self, msg=""): + self.msg = msg class PrimitiveFailedError(SmalltalkException): - pass + exception_type = "PrimitiveFailedError" class PrimitiveNotYetWrittenError(PrimitiveFailedError): - pass + exception_type = "PrimitiveNotYetWrittenError" class UnwrappingError(PrimitiveFailedError): - pass + exception_type = "UnwrappingError" class WrappingError(PrimitiveFailedError): - pass + exception_type = "WrappingError" class WrapperException(SmalltalkException): - def __init__(self, msg): - self.msg = msg + exception_type = "WrapperException" class FatalError(SmalltalkException): - def __init__(self, msg): - self.msg = msg + exception_type = "FatalError" class BlockCannotReturnError(SmalltalkException): - pass + exception_type = "BlockCannotReturnError" + +class MethodNotFound(SmalltalkException): + exception_type = "MethodNotFound" + +class MissingBytecode(SmalltalkException): + """Bytecode not implemented yet.""" + exception_type = "MissingBytecode" + def __init__(self, bytecodename): + SmalltalkException.__init__(self, "Missing bytecode encountered: %s" % bytecodename) class Exit(Exception): _attrs_ = ["msg"] diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -1,26 +1,60 @@ -import py import os -from spyvm.shadow import ContextPartShadow, MethodContextShadow, BlockContextShadow, MethodNotFound -from spyvm import model, constants, primitives, conftest, wrapper, objspace -from spyvm.tool.bitmanipulation import splitter -from rpython.rlib import jit, rstackovf -from rpython.rlib import objectmodel, unroll +from spyvm.shadow import MethodContextShadow +from spyvm import model, constants, wrapper, objspace, interpreter_bytecodes -class MissingBytecode(Exception): - """Bytecode not implemented yet.""" - def __init__(self, bytecodename): - self.bytecodename = bytecodename - print "MissingBytecode:", bytecodename # hack for debugging +from rpython.rlib import jit, rstackovf, unroll -class IllegalStoreError(Exception): - """Illegal Store.""" +class ReturnFromTopLevel(Exception): + _attrs_ = ["object"] + def __init__(self, object): + self.object = object + +class Return(Exception): + _attrs_ = ["value", "s_target_context", "is_local"] + def __init__(self, s_target_context, w_result): + self.value = w_result + self.s_target_context = s_target_context + self.is_local = False + +class ContextSwitchException(Exception): + """General Exception that causes the interpreter to leave + the current context.""" + + _attrs_ = ["s_new_context"] + type = "ContextSwitch" + def __init__(self, s_new_context): + self.s_new_context = s_new_context + + def print_trace(self, old_context): + print "====== %s, contexts forced to heap at: %s" % (self.type, self.s_new_context.short_str()) + +class StackOverflow(ContextSwitchException): + """This causes the current jit-loop to be left, dumping all virtualized objects to the heap. + This breaks performance, so it should rarely happen. + In case of severe performance problems, execute with -t and check if this occurrs.""" + type = "Stack Overflow" + +class ProcessSwitch(ContextSwitchException): + """This causes the interpreter to switch the executed context. + Triggered when switching the process.""" + + def print_trace(self, old_context): + print "====== Switched process from: %s" % old_context.short_str() + print "====== to: %s " % self.s_new_context.short_str() + +class SenderChainManipulation(ContextSwitchException): + """Manipulation of the sender chain can invalidate the jitted C stack. + We have to dump all virtual objects and rebuild the stack. + We try to raise this as rarely as possible and as late as possible.""" + type = "Sender Manipulation" + +UNROLLING_BYTECODE_RANGES = unroll.unrolling_iterable(interpreter_bytecodes.BYTECODE_RANGES) def get_printable_location(pc, self, method): bc = ord(method.bytes[pc]) name = method.safe_identifier_string() - return '(%s) [%d]: <%s>%s' % (name, pc, hex(bc), BYTECODE_NAMES[bc]) - + return '(%s) [%d]: <%s>%s' % (name, pc, hex(bc), interpreter_bytecodes.BYTECODE_NAMES[bc]) class Interpreter(object): _immutable_fields_ = ["space", "image", @@ -218,7 +252,7 @@ s_frame.push_all(list(w_arguments)) return s_frame - # ============== Methods for tracing, printing and debugging ============== + # ============== Methods for tracing and printing ============== def is_tracing(self): return jit.promote(self.trace) @@ -226,875 +260,6 @@ def print_padded(self, str): assert self.is_tracing() print (' ' * self.stack_depth) + str - - def activate_debug_bytecode(self): - "NOT_RPYTHON" - def do_break(self): - import pdb - if self.break_on_bytecodes: - pdb.set_trace() - Interpreter.debug_bytecode = do_break - self.break_on_bytecodes = True - - def debug_bytecode(self): - # This is for debugging. In a pdb console, execute the following: - # self.activate_debug_bytecode() - pass -class ReturnFromTopLevel(Exception): - _attrs_ = ["object"] - def __init__(self, object): - self.object = object - -class Return(Exception): - _attrs_ = ["value", "s_target_context", "is_local"] - def __init__(self, s_target_context, w_result): - self.value = w_result - self.s_target_context = s_target_context - self.is_local = False - -class ContextSwitchException(Exception): - """General Exception that causes the interpreter to leave - the current context.""" - - _attrs_ = ["s_new_context"] - type = "ContextSwitch" - def __init__(self, s_new_context): - self.s_new_context = s_new_context - - def print_trace(self, old_context): - print "====== %s, contexts forced to heap at: %s" % (self.type, self.s_new_context.short_str()) - -class StackOverflow(ContextSwitchException): - """This causes the current jit-loop to be left, dumping all virtualized objects to the heap. - This breaks performance, so it should rarely happen. - In case of severe performance problems, execute with -t and check if this occurrs.""" - type = "Stack Overflow" - -class ProcessSwitch(ContextSwitchException): - """This causes the interpreter to switch the executed context. - Triggered when switching the process.""" - - def print_trace(self, old_context): - print "====== Switched process from: %s" % old_context.short_str() - print "====== to: %s " % self.s_new_context.short_str() - -class SenderChainManipulation(ContextSwitchException): - """Manipulation of the sender chain can invalidate the jitted C stack. - We have to dump all virtual objects and rebuild the stack. - We try to raise this as rarely as possible and as late as possible.""" - type = "Sender Manipulation" - -import rpython.rlib.unroll -if hasattr(unroll, "unrolling_zero"): - unrolling_zero = unroll.unrolling_zero -else: - class unrolling_int(int, unroll.SpecTag): - def __add__(self, other): - return unrolling_int(int.__add__(self, other)) - __radd__ = __add__ - def __sub__(self, other): - return unrolling_int(int.__sub__(self, other)) - def __rsub__(self, other): - return unrolling_int(int.__rsub__(self, other)) - unrolling_zero = unrolling_int(0) - - -# This is a decorator for bytecode implementation methods. -# parameter_bytes=N means N additional bytes are fetched as parameters. -def bytecode_implementation(parameter_bytes=0): - def bytecode_implementation_decorator(actual_implementation_method): - @jit.unroll_safe - def bytecode_implementation_wrapper(self, interp, current_bytecode): - parameters = () - i = unrolling_zero - while i < parameter_bytes: - parameters += (self.fetch_next_bytecode(), ) - i = i + 1 - # This is a good place to step through bytecodes. - interp.debug_bytecode() - return actual_implementation_method(self, interp, current_bytecode, *parameters) - bytecode_implementation_wrapper.func_name = actual_implementation_method.func_name - return bytecode_implementation_wrapper - return bytecode_implementation_decorator - -def make_call_primitive_bytecode(primitive, selector, argcount, store_pc=False): - func = primitives.prim_table[primitive] - @bytecode_implementation() - def callPrimitive(self, interp, current_bytecode): - # WARNING: this is used for bytecodes for which it is safe to - # directly call the primitive. In general, it is not safe: for - # example, depending on the type of the receiver, bytecodePrimAt - # may invoke primitives.AT, primitives.STRING_AT, or anything - # else that the user put in a class in an 'at:' method. - # The rule of thumb is that primitives with only int and float - # in their unwrap_spec are safe. - try: - return func(interp, self, argcount) - except primitives.PrimitiveFailedError: - pass - return self._sendSelfSelectorSpecial(selector, argcount, interp) - callPrimitive.func_name = "callPrimitive_%s" % func.func_name - return callPrimitive - -def make_call_primitive_bytecode_classbased(a_class_name, a_primitive, alternative_class_name, alternative_primitive, selector, argcount): - @bytecode_implementation() - def callClassbasedPrimitive(self, interp, current_bytecode): - rcvr = self.peek(argcount) - receiver_class = rcvr.getclass(self.space) - try: - if receiver_class is getattr(self.space, a_class_name): - func = primitives.prim_table[a_primitive] - return func(interp, self, argcount) - elif receiver_class is getattr(self.space, alternative_class_name): - func = primitives.prim_table[alternative_primitive] - return func(interp, self, argcount) - except primitives.PrimitiveFailedError: - pass - return self._sendSelfSelectorSpecial(selector, argcount, interp) - callClassbasedPrimitive.func_name = "callClassbasedPrimitive_%s" % selector - return callClassbasedPrimitive - -# Some selectors cannot be overwritten, therefore no need to handle PrimitiveFailed. -def make_quick_call_primitive_bytecode(primitive_index, argcount): - func = primitives.prim_table[primitive_index] - @bytecode_implementation() - def quick_call_primitive_bytecode(self, interp, current_bytecode): - return func(interp, self, argcount) - return quick_call_primitive_bytecode - -# This is for bytecodes that actually implement a simple message-send. -# We do not optimize anything for these cases. -def make_send_selector_bytecode(selector, argcount): - @bytecode_implementation() - def selector_bytecode(self, interp, current_bytecode): - return self._sendSelfSelectorSpecial(selector, argcount, interp) - selector_bytecode.func_name = "selector_bytecode_%s" % selector - return selector_bytecode - -# ___________________________________________________________________________ -# Bytecode Implementations: -# -# "self" is always a ContextPartShadow instance. - -# __extend__ adds new methods to the ContextPartShadow class -class __extend__(ContextPartShadow): - - # ====== Push/Pop bytecodes ====== - - @bytecode_implementation() - def pushReceiverVariableBytecode(self, interp, current_bytecode): - index = current_bytecode & 15 - self.push(self.w_receiver().fetch(self.space, index)) - - @bytecode_implementation() - def pushTemporaryVariableBytecode(self, interp, current_bytecode): - index = current_bytecode & 15 - self.push(self.gettemp(index)) - - @bytecode_implementation() - def pushLiteralConstantBytecode(self, interp, current_bytecode): - index = current_bytecode & 31 - self.push(self.w_method().getliteral(index)) - - @bytecode_implementation() - def pushLiteralVariableBytecode(self, interp, current_bytecode): - # this bytecode assumes that literals[index] is an Association - # which is an object with two named vars, and fetches the second - # named var (the value). - index = current_bytecode & 31 - w_association = self.w_method().getliteral(index) - association = wrapper.AssociationWrapper(self.space, w_association) - self.push(association.value()) - - @bytecode_implementation() - def storeAndPopReceiverVariableBytecode(self, interp, current_bytecode): - index = current_bytecode & 7 - self.w_receiver().store(self.space, index, self.pop()) - - @bytecode_implementation() - def storeAndPopTemporaryVariableBytecode(self, interp, current_bytecode): - index = current_bytecode & 7 - self.settemp(index, self.pop()) - - @bytecode_implementation() - def pushReceiverBytecode(self, interp, current_bytecode): - self.push(self.w_receiver()) - - @bytecode_implementation() - def pushConstantTrueBytecode(self, interp, current_bytecode): - self.push(interp.space.w_true) - - @bytecode_implementation() - def pushConstantFalseBytecode(self, interp, current_bytecode): - self.push(interp.space.w_false) - - @bytecode_implementation() - def pushConstantNilBytecode(self, interp, current_bytecode): - self.push(interp.space.w_nil) - - @bytecode_implementation() - def pushConstantMinusOneBytecode(self, interp, current_bytecode): - self.push(interp.space.w_minus_one) - - @bytecode_implementation() - def pushConstantZeroBytecode(self, interp, current_bytecode): - self.push(interp.space.w_zero) - - @bytecode_implementation() - def pushConstantOneBytecode(self, interp, current_bytecode): - self.push(interp.space.w_one) - - @bytecode_implementation() - def pushConstantTwoBytecode(self, interp, current_bytecode): - self.push(interp.space.w_two) - - @bytecode_implementation() - def pushActiveContextBytecode(self, interp, current_bytecode): - self.push(self.w_self()) - - @bytecode_implementation() - def duplicateTopBytecode(self, interp, current_bytecode): - self.push(self.top()) - - @bytecode_implementation() - def popStackBytecode(self, interp, current_bytecode): - self.pop() - - @bytecode_implementation(parameter_bytes=1) - def pushNewArrayBytecode(self, interp, current_bytecode, descriptor): - arraySize, popIntoArray = splitter[7, 1](descriptor) - newArray = None - if popIntoArray == 1: - newArray = interp.space.wrap_list(self.pop_and_return_n(arraySize)) - else: - newArray = interp.space.w_Array.as_class_get_shadow(interp.space).new(arraySize) - self.push(newArray) - - # ====== Extended Push/Pop bytecodes ====== - - def _extendedVariableTypeAndIndex(self, descriptor): - return ((descriptor >> 6) & 3), (descriptor & 63) - - @bytecode_implementation(parameter_bytes=1) - def extendedPushBytecode(self, interp, current_bytecode, descriptor): - variableType, variableIndex = self._extendedVariableTypeAndIndex(descriptor) - if variableType == 0: - self.push(self.w_receiver().fetch(self.space, variableIndex)) - elif variableType == 1: - self.push(self.gettemp(variableIndex)) - elif variableType == 2: - self.push(self.w_method().getliteral(variableIndex)) - elif variableType == 3: - w_association = self.w_method().getliteral(variableIndex) - association = wrapper.AssociationWrapper(self.space, w_association) - self.push(association.value()) - else: - assert 0 - - def _extendedStoreBytecode(self, interp, current_bytecode, descriptor): - variableType, variableIndex = self._extendedVariableTypeAndIndex(descriptor) - if variableType == 0: - self.w_receiver().store(self.space, variableIndex, self.top()) - elif variableType == 1: - self.settemp(variableIndex, self.top()) - elif variableType == 2: - raise IllegalStoreError - elif variableType == 3: - w_association = self.w_method().getliteral(variableIndex) - association = wrapper.AssociationWrapper(self.space, w_association) - association.store_value(self.top()) - - @bytecode_implementation(parameter_bytes=1) - def extendedStoreBytecode(self, interp, current_bytecode, descriptor): - return self._extendedStoreBytecode(interp, current_bytecode, descriptor) - - @bytecode_implementation(parameter_bytes=1) - def extendedStoreAndPopBytecode(self, interp, current_bytecode, descriptor): - self._extendedStoreBytecode(interp, current_bytecode, descriptor) - self.pop() - - def _extract_index_and_temps(self, index_in_array, index_of_array): - w_indirectTemps = self.gettemp(index_of_array) - return index_in_array, w_indirectTemps - - @bytecode_implementation(parameter_bytes=2) - def pushRemoteTempLongBytecode(self, interp, current_bytecode, index_in_array, index_of_array): - index_in_array, w_indirectTemps = self._extract_index_and_temps(index_in_array, index_of_array) - self.push(w_indirectTemps.at0(self.space, index_in_array)) - - @bytecode_implementation(parameter_bytes=2) - def storeRemoteTempLongBytecode(self, interp, current_bytecode, index_in_array, index_of_array): - index_in_array, w_indirectTemps = self._extract_index_and_temps(index_in_array, index_of_array) - w_indirectTemps.atput0(self.space, index_in_array, self.top()) - - @bytecode_implementation(parameter_bytes=2) - def storeAndPopRemoteTempLongBytecode(self, interp, current_bytecode, index_in_array, index_of_array): - index_in_array, w_indirectTemps = self._extract_index_and_temps(index_in_array, index_of_array) - w_indirectTemps.atput0(self.space, index_in_array, self.pop()) - - @bytecode_implementation(parameter_bytes=3) - def pushClosureCopyCopiedValuesBytecode(self, interp, current_bytecode, descriptor, j, i): - """ Copied from Blogpost: http://www.mirandabanda.org/cogblog/2008/07/22/closures-part-ii-the-bytecodes/ - ContextPart>>pushClosureCopyNumCopiedValues: numCopied numArgs: numArgs blockSize: blockSize - "Simulate the action of a 'closure copy' bytecode whose result is the - new BlockClosure for the following code" - | copiedValues | - numCopied > 0 - ifTrue: - [copiedValues := Array new: numCopied. - numCopied to: 1 by: -1 do: - [:i| - copiedValues at: i put: self pop]] - ifFalse: - [copiedValues := nil]. - self push: (BlockClosure new - outerContext: self - startpc: pc - numArgs: numArgs - copiedValues: copiedValues). - self jump: blockSize - """ - - space = self.space - numArgs, numCopied = splitter[4, 4](descriptor) - blockSize = (j << 8) | i - # Create new instance of BlockClosure - w_closure = space.newClosure(self.w_self(), self.pc(), numArgs, - self.pop_and_return_n(numCopied)) - self.push(w_closure) - self._jump(blockSize) - - # ====== Helpers for send/return bytecodes ====== - - def _sendSelfSelector(self, w_selector, argcount, interp): - receiver = self.peek(argcount) - return self._sendSelector(w_selector, argcount, interp, - receiver, receiver.class_shadow(self.space)) - - def _sendSuperSelector(self, w_selector, argcount, interp): - compiledin_class = self.w_method().compiled_in() - assert isinstance(compiledin_class, model.W_PointersObject) - s_compiledin = compiledin_class.as_class_get_shadow(self.space) - return self._sendSelector(w_selector, argcount, interp, self.w_receiver(), - s_compiledin.s_superclass()) - - def _sendSelector(self, w_selector, argcount, interp, - receiver, receiverclassshadow, w_arguments=None): - assert argcount >= 0 - try: - w_method = receiverclassshadow.lookup(w_selector) - except MethodNotFound: - return self._doesNotUnderstand(w_selector, argcount, interp, receiver) - - code = w_method.primitive() - if code: - if w_arguments: - self.push_all(w_arguments) - try: - return self._call_primitive(code, interp, argcount, w_method, w_selector) - except primitives.PrimitiveFailedError: - pass # ignore this error and fall back to the Smalltalk version - if not w_arguments: - w_arguments = self.pop_and_return_n(argcount) - s_frame = w_method.create_frame(interp.space, receiver, w_arguments) - self.pop() # receiver - - # ###################################################################### - if interp.is_tracing(): - interp.print_padded('-> ' + s_frame.short_str()) - - return interp.stack_frame(s_frame, self) - - @objectmodel.specialize.arg(1) - def _sendSelfSelectorSpecial(self, selector, numargs, interp): - w_selector = self.space.get_special_selector(selector) - return self._sendSelfSelector(w_selector, numargs, interp) - - def _sendSpecialSelector(self, interp, receiver, special_selector, w_args=[]): - w_special_selector = self.space.objtable["w_" + special_selector] - s_class = receiver.class_shadow(self.space) - w_method = s_class.lookup(w_special_selector) - s_frame = w_method.create_frame(interp.space, receiver, w_args) - - # ###################################################################### - if interp.is_tracing(): - interp.print_padded('-> %s %s' % (special_selector, s_frame.short_str())) - if not objectmodel.we_are_translated(): - import pdb; pdb.set_trace() - - return interp.stack_frame(s_frame, self) - - def _doesNotUnderstand(self, w_selector, argcount, interp, receiver): - arguments = self.pop_and_return_n(argcount) - w_message_class = self.space.classtable["w_Message"] - assert isinstance(w_message_class, model.W_PointersObject) - s_message_class = w_message_class.as_class_get_shadow(self.space) - w_message = s_message_class.new() - w_message.store(self.space, 0, w_selector) - w_message.store(self.space, 1, self.space.wrap_list(arguments)) - self.pop() # The receiver, already known. - - try: - if interp.space.headless.is_set(): - primitives.exitFromHeadlessExecution(self, "doesNotUnderstand:", w_message) - return self._sendSpecialSelector(interp, receiver, "doesNotUnderstand", [w_message]) - except MethodNotFound: - from spyvm.shadow import ClassShadow - s_class = receiver.class_shadow(self.space) - assert isinstance(s_class, ClassShadow) - from spyvm import error - raise error.Exit("Missing doesNotUnderstand in hierarchy of %s" % s_class.getname()) - - def _mustBeBoolean(self, interp, receiver): - return self._sendSpecialSelector(interp, receiver, "mustBeBoolean") - - def _call_primitive(self, code, interp, argcount, w_method, w_selector): - # ################################################################## - if interp.is_tracing(): - interp.print_padded("-> primitive %d \t(in %s, named %s)" % ( - code, self.w_method().get_identifier_string(), - w_selector.selector_string())) - func = primitives.prim_holder.prim_table[code] - try: - # note: argcount does not include rcvr - # the primitive pushes the result (if any) onto the stack itself - return func(interp, self, argcount, w_method) - except primitives.PrimitiveFailedError, e: - if interp.is_tracing(): - interp.print_padded("-- primitive %d FAILED\t (in %s, named %s)" % ( - code, w_method.safe_identifier_string(), w_selector.selector_string())) - raise e - - def _return(self, return_value, interp, local_return=False): - # unfortunately, this assert is not true for some tests. TODO fix this. - # assert self._stack_ptr == self.tempsize() - - # ################################################################## - if interp.is_tracing(): - interp.print_padded('<- ' + return_value.as_repr_string()) - - if self.home_is_self() or local_return: - # a local return just needs to go up the stack once. there - # it will find the sender as a local, and we don't have to - # force the reference - s_return_to = None - return_from_top = self.s_sender() is None - else: - s_return_to = self.s_home().s_sender() - return_from_top = s_return_to is None - - if return_from_top: - # This should never happen while executing a normal image. - raise ReturnFromTopLevel(return_value) - else: - raise Return(s_return_to, return_value) - - # ====== Send/Return bytecodes ====== - - @bytecode_implementation() - def returnReceiverBytecode(self, interp, current_bytecode): - return self._return(self.w_receiver(), interp) - - @bytecode_implementation() - def returnTrueBytecode(self, interp, current_bytecode): - return self._return(interp.space.w_true, interp) - - @bytecode_implementation() - def returnFalseBytecode(self, interp, current_bytecode): - return self._return(interp.space.w_false, interp) - - @bytecode_implementation() - def returnNilBytecode(self, interp, current_bytecode): - return self._return(interp.space.w_nil, interp) - - @bytecode_implementation() - def returnTopFromMethodBytecode(self, interp, current_bytecode): - return self._return(self.pop(), interp) - - @bytecode_implementation() - def returnTopFromBlockBytecode(self, interp, current_bytecode): - return self._return(self.pop(), interp, local_return=True) - - @bytecode_implementation() - def sendLiteralSelectorBytecode(self, interp, current_bytecode): - w_selector = self.w_method().getliteral(current_bytecode & 15) - argcount = ((current_bytecode >> 4) & 3) - 1 - return self._sendSelfSelector(w_selector, argcount, interp) - - def _getExtendedSelectorArgcount(self, descriptor): - return ((self.w_method().getliteral(descriptor & 31)), - (descriptor >> 5)) - - @bytecode_implementation(parameter_bytes=1) - def singleExtendedSendBytecode(self, interp, current_bytecode, descriptor): - w_selector, argcount = self._getExtendedSelectorArgcount(descriptor) - return self._sendSelfSelector(w_selector, argcount, interp) - - @bytecode_implementation(parameter_bytes=2) - def doubleExtendedDoAnythingBytecode(self, interp, current_bytecode, second, third): - from spyvm import error - opType = second >> 5 - if opType == 0: - # selfsend - return self._sendSelfSelector(self.w_method().getliteral(third), - second & 31, interp) - elif opType == 1: - # supersend - return self._sendSuperSelector(self.w_method().getliteral(third), - second & 31, interp) - elif opType == 2: - # pushReceiver - self.push(self.w_receiver().fetch(self.space, third)) - elif opType == 3: - # pushLiteralConstant - self.push(self.w_method().getliteral(third)) - elif opType == 4: - # pushLiteralVariable - w_association = self.w_method().getliteral(third) - association = wrapper.AssociationWrapper(self.space, w_association) - self.push(association.value()) - elif opType == 5: - # TODO - the following two special cases should not be necessary - try: - self.w_receiver().store(self.space, third, self.top()) - except SenderChainManipulation, e: - raise SenderChainManipulation(self) - elif opType == 6: - try: - self.w_receiver().store(self.space, third, self.pop()) - except SenderChainManipulation, e: - raise SenderChainManipulation(self) - elif opType == 7: - w_association = self.w_method().getliteral(third) - association = wrapper.AssociationWrapper(self.space, w_association) - association.store_value(self.top()) - - @bytecode_implementation(parameter_bytes=1) - def singleExtendedSuperBytecode(self, interp, current_bytecode, descriptor): - w_selector, argcount = self._getExtendedSelectorArgcount(descriptor) - return self._sendSuperSelector(w_selector, argcount, interp) - - @bytecode_implementation(parameter_bytes=1) - def secondExtendedSendBytecode(self, interp, current_bytecode, descriptor): - w_selector = self.w_method().getliteral(descriptor & 63) - argcount = descriptor >> 6 - return self._sendSelfSelector(w_selector, argcount, interp) - - # ====== Misc ====== - - def _activate_unwind_context(self, interp): - if self.is_closure_context() or not self.is_BlockClosure_ensure(): - self.mark_returned() - return - # The first temp is executed flag for both #ensure: and #ifCurtailed: - if self.gettemp(1).is_nil(self.space): - self.settemp(1, self.space.w_true) # mark unwound - self.push(self.gettemp(0)) # push the first argument - try: - self.bytecodePrimValue(interp, 0) - except Return, nlr: - assert nlr.s_target_context or nlr.is_local - if self is not nlr.s_target_context and not nlr.is_local: - raise nlr - finally: - self.mark_returned() - - @bytecode_implementation() - def unknownBytecode(self, interp, current_bytecode): - raise MissingBytecode("unknownBytecode") - - @bytecode_implementation() - def experimentalBytecode(self, interp, current_bytecode): - raise MissingBytecode("experimentalBytecode") - - # ====== Jump bytecodes ====== - - def _jump(self, offset): - self.store_pc(self.pc() + offset) - - def _jumpConditional(self, interp, expecting_true, position): - if expecting_true: - w_expected = interp.space.w_true - w_alternative = interp.space.w_false - else: - w_alternative = interp.space.w_true - w_expected = interp.space.w_false - - # Don't check the class, just compare with only two Boolean instances. - w_bool = self.pop() - if w_expected.is_same_object(w_bool): - self._jump(position) - elif not w_alternative.is_same_object(w_bool): - self._mustBeBoolean(interp, w_bool) - - def _shortJumpOffset(self, current_bytecode): - return (current_bytecode & 7) + 1 - - def _longJumpOffset(self, current_bytecode, parameter): - return ((current_bytecode & 3) << 8) + parameter - - @bytecode_implementation() - def shortUnconditionalJumpBytecode(self, interp, current_bytecode): - self._jump(self._shortJumpOffset(current_bytecode)) - - @bytecode_implementation() - def shortConditionalJumpBytecode(self, interp, current_bytecode): - # The conditional jump is "jump on false" - self._jumpConditional(interp, False, self._shortJumpOffset(current_bytecode)) - - @bytecode_implementation(parameter_bytes=1) - def longUnconditionalJumpBytecode(self, interp, current_bytecode, parameter): - offset = (((current_bytecode & 7) - 4) << 8) + parameter - self._jump(offset) - - @bytecode_implementation(parameter_bytes=1) - def longJumpIfTrueBytecode(self, interp, current_bytecode, parameter): - self._jumpConditional(interp, True, self._longJumpOffset(current_bytecode, parameter)) - - @bytecode_implementation(parameter_bytes=1) - def longJumpIfFalseBytecode(self, interp, current_bytecode, parameter): - self._jumpConditional(interp, False, self._longJumpOffset(current_bytecode, parameter)) - - # ====== Bytecodes implemented with primitives and message sends ====== - - bytecodePrimAdd = make_call_primitive_bytecode(primitives.ADD, "+", 1) - bytecodePrimSubtract = make_call_primitive_bytecode(primitives.SUBTRACT, "-", 1) - bytecodePrimLessThan = make_call_primitive_bytecode (primitives.LESSTHAN, "<", 1) - bytecodePrimGreaterThan = make_call_primitive_bytecode(primitives.GREATERTHAN, ">", 1) - bytecodePrimLessOrEqual = make_call_primitive_bytecode(primitives.LESSOREQUAL, "<=", 1) - bytecodePrimGreaterOrEqual = make_call_primitive_bytecode(primitives.GREATEROREQUAL, ">=", 1) - bytecodePrimEqual = make_call_primitive_bytecode(primitives.EQUAL, "=", 1) - bytecodePrimNotEqual = make_call_primitive_bytecode(primitives.NOTEQUAL, "~=", 1) - bytecodePrimMultiply = make_call_primitive_bytecode(primitives.MULTIPLY, "*", 1) - bytecodePrimDivide = make_call_primitive_bytecode(primitives.DIVIDE, "/", 1) - bytecodePrimMod = make_call_primitive_bytecode(primitives.MOD, "\\\\", 1) - bytecodePrimMakePoint = make_call_primitive_bytecode(primitives.MAKE_POINT, "@", 1) - bytecodePrimBitShift = make_call_primitive_bytecode(primitives.BIT_SHIFT, "bitShift:", 1) - bytecodePrimDiv = make_call_primitive_bytecode(primitives.DIV, "//", 1) - bytecodePrimBitAnd = make_call_primitive_bytecode(primitives.BIT_AND, "bitAnd:", 1) - bytecodePrimBitOr = make_call_primitive_bytecode(primitives.BIT_OR, "bitOr:", 1) - - bytecodePrimAt = make_send_selector_bytecode("at:", 1) - bytecodePrimAtPut = make_send_selector_bytecode("at:put:", 2) - bytecodePrimSize = make_send_selector_bytecode("size", 0) - bytecodePrimNext = make_send_selector_bytecode("next", 0) - bytecodePrimNextPut = make_send_selector_bytecode("nextPut:", 1) - bytecodePrimAtEnd = make_send_selector_bytecode("atEnd", 0) - - bytecodePrimEquivalent = make_quick_call_primitive_bytecode(primitives.EQUIVALENT, 1) - bytecodePrimClass = make_quick_call_primitive_bytecode(primitives.CLASS, 0) - - bytecodePrimBlockCopy = make_call_primitive_bytecode(primitives.BLOCK_COPY, "blockCopy:", 1) - bytecodePrimValue = make_call_primitive_bytecode_classbased("w_BlockContext", primitives.VALUE, "w_BlockClosure", primitives.CLOSURE_VALUE, "value", 0) - bytecodePrimValueWithArg = make_call_primitive_bytecode_classbased("w_BlockContext", primitives.VALUE, "w_BlockClosure", primitives.CLOSURE_VALUE_, "value:", 1) - - bytecodePrimDo = make_send_selector_bytecode("do:", 1) - bytecodePrimNew = make_send_selector_bytecode("new", 0) - bytecodePrimNewWithArg = make_send_selector_bytecode("new:", 1) - bytecodePrimPointX = make_send_selector_bytecode("x", 0) - bytecodePrimPointY = make_send_selector_bytecode("y", 0) - -BYTECODE_RANGES = [ - ( 0, 15, "pushReceiverVariableBytecode"), - ( 16, 31, "pushTemporaryVariableBytecode"), - ( 32, 63, "pushLiteralConstantBytecode"), - ( 64, 95, "pushLiteralVariableBytecode"), - ( 96, 103, "storeAndPopReceiverVariableBytecode"), - (104, 111, "storeAndPopTemporaryVariableBytecode"), - (112, "pushReceiverBytecode"), - (113, "pushConstantTrueBytecode"), - (114, "pushConstantFalseBytecode"), - (115, "pushConstantNilBytecode"), - (116, "pushConstantMinusOneBytecode"), - (117, "pushConstantZeroBytecode"), - (118, "pushConstantOneBytecode"), - (119, "pushConstantTwoBytecode"), - (120, "returnReceiverBytecode"), - (121, "returnTrueBytecode"), - (122, "returnFalseBytecode"), - (123, "returnNilBytecode"), - (124, "returnTopFromMethodBytecode"), - (125, "returnTopFromBlockBytecode"), - (126, "unknownBytecode"), - (127, "unknownBytecode"), - (128, "extendedPushBytecode"), - (129, "extendedStoreBytecode"), - (130, "extendedStoreAndPopBytecode"), - (131, "singleExtendedSendBytecode"), - (132, "doubleExtendedDoAnythingBytecode"), - (133, "singleExtendedSuperBytecode"), - (134, "secondExtendedSendBytecode"), - (135, "popStackBytecode"), - (136, "duplicateTopBytecode"), - (137, "pushActiveContextBytecode"), - (138, "pushNewArrayBytecode"), - (139, "experimentalBytecode"), - (140, "pushRemoteTempLongBytecode"), - (141, "storeRemoteTempLongBytecode"), - (142, "storeAndPopRemoteTempLongBytecode"), - (143, "pushClosureCopyCopiedValuesBytecode"), - (144, 151, "shortUnconditionalJumpBytecode"), - (152, 159, "shortConditionalJumpBytecode"), - (160, 167, "longUnconditionalJumpBytecode"), - (168, 171, "longJumpIfTrueBytecode"), - (172, 175, "longJumpIfFalseBytecode"), - (176, "bytecodePrimAdd"), - (177, "bytecodePrimSubtract"), - (178, "bytecodePrimLessThan"), - (179, "bytecodePrimGreaterThan"), - (180, "bytecodePrimLessOrEqual"), - (181, "bytecodePrimGreaterOrEqual"), - (182, "bytecodePrimEqual"), - (183, "bytecodePrimNotEqual"), - (184, "bytecodePrimMultiply"), - (185, "bytecodePrimDivide"), - (186, "bytecodePrimMod"), - (187, "bytecodePrimMakePoint"), - (188, "bytecodePrimBitShift"), - (189, "bytecodePrimDiv"), - (190, "bytecodePrimBitAnd"), - (191, "bytecodePrimBitOr"), - (192, "bytecodePrimAt"), - (193, "bytecodePrimAtPut"), - (194, "bytecodePrimSize"), - (195, "bytecodePrimNext"), - (196, "bytecodePrimNextPut"), - (197, "bytecodePrimAtEnd"), - (198, "bytecodePrimEquivalent"), - (199, "bytecodePrimClass"), - (200, "bytecodePrimBlockCopy"), - (201, "bytecodePrimValue"), - (202, "bytecodePrimValueWithArg"), - (203, "bytecodePrimDo"), - (204, "bytecodePrimNew"), - (205, "bytecodePrimNewWithArg"), - (206, "bytecodePrimPointX"), - (207, "bytecodePrimPointY"), - (208, 255, "sendLiteralSelectorBytecode"), - ] - -from rpython.rlib.unroll import unrolling_iterable -UNROLLING_BYTECODE_RANGES = unrolling_iterable(BYTECODE_RANGES) - -def initialize_bytecode_names(): - result = [None] * 256 - for entry in BYTECODE_RANGES: - if len(entry) == 2: - result[entry[0]] = entry[1] - else: - for arg, pos in enumerate(range(entry[0], entry[1]+1)): - result[pos] = "%s(%s)" % (entry[2], arg) - assert None not in result - return result - -BYTECODE_NAMES = initialize_bytecode_names() - -def initialize_bytecode_table(): - result = [None] * 256 - for entry in BYTECODE_RANGES: - if len(entry) == 2: - positions = [entry[0]] - else: - positions = range(entry[0], entry[1]+1) - for pos in positions: - result[pos] = getattr(ContextPartShadow, entry[-1]) - assert None not in result - return result - -# this table is only used for creating named bytecodes in tests and printing -BYTECODE_TABLE = initialize_bytecode_table() - -# Smalltalk debugging facilities, patching Interpreter and ContextPartShadow -# in order to enable tracing/jumping for message sends etc. -def debugging(): - def stepping_debugger_init(original): - def meth(self, space, image=None, trace=False): - return_value = original(self, space, image=image, trace=trace) - # ############################################################## - - self.message_stepping = False - self.halt_on_failing_primitives = False - - # ############################################################## - return return_value - return meth - - Interpreter.__init__ = stepping_debugger_init(Interpreter.__init__) - - def stepping_debugger_send(original): - """When interp.message_stepping is True, we halt on every call of ContextPartShadow._sendSelector. - The method is not called for bytecode message sends (see constants.SPECIAL_SELECTORS)""" - def meth(s_context, w_selector, argcount, interp, - receiver, receiverclassshadow): - options = [False] - def next(): interp.message_stepping = True; print 'Now continue (c).' - def over(): options[0] = True; print 'Skipping #%s. You still need to continue(c).' % w_selector.str_content() - def pstack(): print s_context.print_stack() - if interp.message_stepping: - if argcount == 0: - print "-> %s #%s" % (receiver.as_repr_string(), - w_selector.str_content()) - elif argcount == 1: - print "-> %s #%s %s" % (receiver.as_repr_string(), - w_selector.str_content(), - s_context.peek(0).as_repr_string()) - else: - print "-> %s #%s %r" % (receiver.as_repr_string(), - w_selector.str_content(), - [s_context.peek(argcount-1-i) for i in range(argcount)]) - import pdb; pdb.set_trace() - if options[0]: - m_s = interp.message_stepping - interp.message_stepping = False - try: - return original(s_context, w_selector, argcount, interp, receiver, receiverclassshadow) - finally: - interp.message_stepping = m_s - else: - return original(s_context, w_selector, argcount, interp, receiver, receiverclassshadow) - return meth - - ContextPartShadow._sendSelector = stepping_debugger_send(ContextPartShadow._sendSelector) - - def stepping_debugger_failed_primitive_halt(original): - def meth(self, code, interp, argcount, w_method, w_selector): - try: - original(self, code, interp, argcount, w_method, w_selector) - except primitives.PrimitiveFailedError, e: - if interp.halt_on_failing_primitives: - func = primitives.prim_holder.prim_table[code] - if func.func_name != 'raise_failing_default' and code != 83: - import pdb; pdb.set_trace() - try: - func(interp, self, argcount, w_method) # will fail again - except primitives.PrimitiveFailedError: - pass - raise e - return meth - - ContextPartShadow._call_primitive = stepping_debugger_failed_primitive_halt(ContextPartShadow._call_primitive) - - def trace_missing_named_primitives(original): - def meth(interp, s_frame, argcount, w_method=None): - try: - return original(interp, s_frame, argcount, w_method=w_method) - except primitives.PrimitiveFailedError, e: - space = interp.space - w_description = w_method.literalat0(space, 1) - if not isinstance(w_description, model.W_PointersObject) or w_description.size() < 2: - raise e - w_modulename = w_description.at0(space, 0) - w_functionname = w_description.at0(space, 1) - if not (isinstance(w_modulename, model.W_BytesObject) and - isinstance(w_functionname, model.W_BytesObject)): - raise e - signature = (w_modulename.as_string(), w_functionname.as_string()) - debugging.missing_named_primitives.add(signature) - raise e - return meth - - primitives.prim_table[primitives.EXTERNAL_CALL] = trace_missing_named_primitives(primitives.prim_table[primitives.EXTERNAL_CALL]) - debugging.missing_named_primitives = set() - -# debugging() +# Uncomment this to load debugging facilities at startup. +#from spyvm import interpreter_debugging; Interpreter.__init__ = interpreter_debugging.activating_init(Interpreter.__init__) diff --git a/spyvm/interpreter_bytecodes.py b/spyvm/interpreter_bytecodes.py new file mode 100644 --- /dev/null +++ b/spyvm/interpreter_bytecodes.py @@ -0,0 +1,725 @@ + +from spyvm.shadow import ContextPartShadow +from spyvm import model, primitives, wrapper, error +from spyvm.tool.bitmanipulation import splitter +from rpython.rlib import objectmodel, unroll, jit + +# unrolling_zero has been removed from rlib at some point. +if hasattr(unroll, "unrolling_zero"): + unrolling_zero = unroll.unrolling_zero +else: + class unrolling_int(int, unroll.SpecTag): + def __add__(self, other): + return unrolling_int(int.__add__(self, other)) + __radd__ = __add__ + def __sub__(self, other): + return unrolling_int(int.__sub__(self, other)) + def __rsub__(self, other): + return unrolling_int(int.__rsub__(self, other)) + unrolling_zero = unrolling_int(0) + +# This is a decorator for bytecode implementation methods. +# parameter_bytes=N means N additional bytes are fetched as parameters. +def bytecode_implementation(parameter_bytes=0): + def bytecode_implementation_decorator(actual_implementation_method): + @jit.unroll_safe + def bytecode_implementation_wrapper(self, interp, current_bytecode): + parameters = () + i = unrolling_zero + while i < parameter_bytes: + parameters += (self.fetch_next_bytecode(), ) + i = i + 1 + # This is a good place to step through bytecodes. + self.debug_bytecode() + return actual_implementation_method(self, interp, current_bytecode, *parameters) + bytecode_implementation_wrapper.func_name = actual_implementation_method.func_name + return bytecode_implementation_wrapper + return bytecode_implementation_decorator + +def make_call_primitive_bytecode(primitive, selector, argcount, store_pc=False): + func = primitives.prim_table[primitive] + @bytecode_implementation() + def callPrimitive(self, interp, current_bytecode): + # WARNING: this is used for bytecodes for which it is safe to + # directly call the primitive. In general, it is not safe: for + # example, depending on the type of the receiver, bytecodePrimAt + # may invoke primitives.AT, primitives.STRING_AT, or anything + # else that the user put in a class in an 'at:' method. + # The rule of thumb is that primitives with only int and float + # in their unwrap_spec are safe. + try: + return func(interp, self, argcount) + except error.PrimitiveFailedError: + pass + return self._sendSelfSelectorSpecial(selector, argcount, interp) + callPrimitive.func_name = "callPrimitive_%s" % func.func_name + return callPrimitive + +def make_call_primitive_bytecode_classbased(a_class_name, a_primitive, alternative_class_name, alternative_primitive, selector, argcount): + @bytecode_implementation() + def callClassbasedPrimitive(self, interp, current_bytecode): + rcvr = self.peek(argcount) + receiver_class = rcvr.getclass(self.space) + try: + if receiver_class is getattr(self.space, a_class_name): + func = primitives.prim_table[a_primitive] + return func(interp, self, argcount) + elif receiver_class is getattr(self.space, alternative_class_name): + func = primitives.prim_table[alternative_primitive] + return func(interp, self, argcount) + except error.PrimitiveFailedError: + pass + return self._sendSelfSelectorSpecial(selector, argcount, interp) + callClassbasedPrimitive.func_name = "callClassbasedPrimitive_%s" % selector + return callClassbasedPrimitive + +# Some selectors cannot be overwritten, therefore no need to handle PrimitiveFailed. +def make_quick_call_primitive_bytecode(primitive_index, argcount): + func = primitives.prim_table[primitive_index] + @bytecode_implementation() + def quick_call_primitive_bytecode(self, interp, current_bytecode): + return func(interp, self, argcount) + return quick_call_primitive_bytecode + +# This is for bytecodes that actually implement a simple message-send. +# We do not optimize anything for these cases. +def make_send_selector_bytecode(selector, argcount): + @bytecode_implementation() + def selector_bytecode(self, interp, current_bytecode): + return self._sendSelfSelectorSpecial(selector, argcount, interp) + selector_bytecode.func_name = "selector_bytecode_%s" % selector + return selector_bytecode + +# ___________________________________________________________________________ +# Bytecode Implementations: +# +# "self" is always a ContextPartShadow instance. +# __extend__ adds new methods to the ContextPartShadow class +class __extend__(ContextPartShadow): + + # ====== Push/Pop bytecodes ====== + + @bytecode_implementation() + def pushReceiverVariableBytecode(self, interp, current_bytecode): + index = current_bytecode & 15 + self.push(self.w_receiver().fetch(self.space, index)) + + @bytecode_implementation() + def pushTemporaryVariableBytecode(self, interp, current_bytecode): + index = current_bytecode & 15 + self.push(self.gettemp(index)) + + @bytecode_implementation() + def pushLiteralConstantBytecode(self, interp, current_bytecode): + index = current_bytecode & 31 + self.push(self.w_method().getliteral(index)) + + @bytecode_implementation() + def pushLiteralVariableBytecode(self, interp, current_bytecode): + # this bytecode assumes that literals[index] is an Association + # which is an object with two named vars, and fetches the second + # named var (the value). + index = current_bytecode & 31 + w_association = self.w_method().getliteral(index) + association = wrapper.AssociationWrapper(self.space, w_association) + self.push(association.value()) + + @bytecode_implementation() + def storeAndPopReceiverVariableBytecode(self, interp, current_bytecode): + index = current_bytecode & 7 + self.w_receiver().store(self.space, index, self.pop()) + + @bytecode_implementation() + def storeAndPopTemporaryVariableBytecode(self, interp, current_bytecode): + index = current_bytecode & 7 + self.settemp(index, self.pop()) + + @bytecode_implementation() + def pushReceiverBytecode(self, interp, current_bytecode): + self.push(self.w_receiver()) + + @bytecode_implementation() + def pushConstantTrueBytecode(self, interp, current_bytecode): + self.push(interp.space.w_true) + + @bytecode_implementation() + def pushConstantFalseBytecode(self, interp, current_bytecode): + self.push(interp.space.w_false) + + @bytecode_implementation() + def pushConstantNilBytecode(self, interp, current_bytecode): + self.push(interp.space.w_nil) + + @bytecode_implementation() + def pushConstantMinusOneBytecode(self, interp, current_bytecode): + self.push(interp.space.w_minus_one) + + @bytecode_implementation() + def pushConstantZeroBytecode(self, interp, current_bytecode): + self.push(interp.space.w_zero) + + @bytecode_implementation() + def pushConstantOneBytecode(self, interp, current_bytecode): + self.push(interp.space.w_one) + + @bytecode_implementation() + def pushConstantTwoBytecode(self, interp, current_bytecode): + self.push(interp.space.w_two) + + @bytecode_implementation() + def pushActiveContextBytecode(self, interp, current_bytecode): + self.push(self.w_self()) + + @bytecode_implementation() + def duplicateTopBytecode(self, interp, current_bytecode): + self.push(self.top()) + + @bytecode_implementation() + def popStackBytecode(self, interp, current_bytecode): + self.pop() + + @bytecode_implementation(parameter_bytes=1) + def pushNewArrayBytecode(self, interp, current_bytecode, descriptor): + arraySize, popIntoArray = splitter[7, 1](descriptor) + newArray = None + if popIntoArray == 1: + newArray = interp.space.wrap_list(self.pop_and_return_n(arraySize)) + else: + newArray = interp.space.w_Array.as_class_get_shadow(interp.space).new(arraySize) + self.push(newArray) + + # ====== Extended Push/Pop bytecodes ====== + + def _extendedVariableTypeAndIndex(self, descriptor): + return ((descriptor >> 6) & 3), (descriptor & 63) + + @bytecode_implementation(parameter_bytes=1) + def extendedPushBytecode(self, interp, current_bytecode, descriptor): + variableType, variableIndex = self._extendedVariableTypeAndIndex(descriptor) + if variableType == 0: + self.push(self.w_receiver().fetch(self.space, variableIndex)) + elif variableType == 1: + self.push(self.gettemp(variableIndex)) + elif variableType == 2: + self.push(self.w_method().getliteral(variableIndex)) + elif variableType == 3: + w_association = self.w_method().getliteral(variableIndex) + association = wrapper.AssociationWrapper(self.space, w_association) + self.push(association.value()) + else: + assert 0 + + def _extendedStoreBytecode(self, interp, current_bytecode, descriptor): + variableType, variableIndex = self._extendedVariableTypeAndIndex(descriptor) + if variableType == 0: + self.w_receiver().store(self.space, variableIndex, self.top()) + elif variableType == 1: + self.settemp(variableIndex, self.top()) + elif variableType == 2: + raise error.FatalError("Illegal ExtendedStoreBytecode. veriableType 2.") + elif variableType == 3: + w_association = self.w_method().getliteral(variableIndex) + association = wrapper.AssociationWrapper(self.space, w_association) + association.store_value(self.top()) + + @bytecode_implementation(parameter_bytes=1) + def extendedStoreBytecode(self, interp, current_bytecode, descriptor): + return self._extendedStoreBytecode(interp, current_bytecode, descriptor) + + @bytecode_implementation(parameter_bytes=1) + def extendedStoreAndPopBytecode(self, interp, current_bytecode, descriptor): + self._extendedStoreBytecode(interp, current_bytecode, descriptor) + self.pop() + + def _extract_index_and_temps(self, index_in_array, index_of_array): + w_indirectTemps = self.gettemp(index_of_array) + return index_in_array, w_indirectTemps + + @bytecode_implementation(parameter_bytes=2) + def pushRemoteTempLongBytecode(self, interp, current_bytecode, index_in_array, index_of_array): + index_in_array, w_indirectTemps = self._extract_index_and_temps(index_in_array, index_of_array) + self.push(w_indirectTemps.at0(self.space, index_in_array)) + + @bytecode_implementation(parameter_bytes=2) + def storeRemoteTempLongBytecode(self, interp, current_bytecode, index_in_array, index_of_array): + index_in_array, w_indirectTemps = self._extract_index_and_temps(index_in_array, index_of_array) + w_indirectTemps.atput0(self.space, index_in_array, self.top()) + + @bytecode_implementation(parameter_bytes=2) + def storeAndPopRemoteTempLongBytecode(self, interp, current_bytecode, index_in_array, index_of_array): + index_in_array, w_indirectTemps = self._extract_index_and_temps(index_in_array, index_of_array) + w_indirectTemps.atput0(self.space, index_in_array, self.pop()) + + @bytecode_implementation(parameter_bytes=3) + def pushClosureCopyCopiedValuesBytecode(self, interp, current_bytecode, descriptor, j, i): + """ Copied from Blogpost: http://www.mirandabanda.org/cogblog/2008/07/22/closures-part-ii-the-bytecodes/ + ContextPart>>pushClosureCopyNumCopiedValues: numCopied numArgs: numArgs blockSize: blockSize + "Simulate the action of a 'closure copy' bytecode whose result is the + new BlockClosure for the following code" + | copiedValues | + numCopied > 0 + ifTrue: + [copiedValues := Array new: numCopied. + numCopied to: 1 by: -1 do: + [:i| + copiedValues at: i put: self pop]] + ifFalse: + [copiedValues := nil]. + self push: (BlockClosure new + outerContext: self + startpc: pc + numArgs: numArgs + copiedValues: copiedValues). + self jump: blockSize + """ + + space = self.space + numArgs, numCopied = splitter[4, 4](descriptor) + blockSize = (j << 8) | i + # Create new instance of BlockClosure + w_closure = space.newClosure(self.w_self(), self.pc(), numArgs, + self.pop_and_return_n(numCopied)) + self.push(w_closure) + self._jump(blockSize) + + # ====== Helpers for send/return bytecodes ====== + + def _sendSelfSelector(self, w_selector, argcount, interp): + receiver = self.peek(argcount) + return self._sendSelector(w_selector, argcount, interp, + receiver, receiver.class_shadow(self.space)) + + def _sendSuperSelector(self, w_selector, argcount, interp): + compiledin_class = self.w_method().compiled_in() + assert isinstance(compiledin_class, model.W_PointersObject) + s_compiledin = compiledin_class.as_class_get_shadow(self.space) + return self._sendSelector(w_selector, argcount, interp, self.w_receiver(), + s_compiledin.s_superclass()) + + def _sendSelector(self, w_selector, argcount, interp, + receiver, receiverclassshadow, w_arguments=None): + assert argcount >= 0 + try: + w_method = receiverclassshadow.lookup(w_selector) + except error.MethodNotFound: + return self._doesNotUnderstand(w_selector, argcount, interp, receiver) + + code = w_method.primitive() + if code: + if w_arguments: + self.push_all(w_arguments) + try: + return self._call_primitive(code, interp, argcount, w_method, w_selector) + except error.PrimitiveFailedError: + pass # ignore this error and fall back to the Smalltalk version + if not w_arguments: + w_arguments = self.pop_and_return_n(argcount) + s_frame = w_method.create_frame(interp.space, receiver, w_arguments) + self.pop() # receiver + + # ###################################################################### + if interp.is_tracing(): + interp.print_padded('-> ' + s_frame.short_str()) + + return interp.stack_frame(s_frame, self) + + @objectmodel.specialize.arg(1) + def _sendSelfSelectorSpecial(self, selector, numargs, interp): + w_selector = self.space.get_special_selector(selector) + return self._sendSelfSelector(w_selector, numargs, interp) + + def _sendSpecialSelector(self, interp, receiver, special_selector, w_args=[]): + w_special_selector = self.space.objtable["w_" + special_selector] + s_class = receiver.class_shadow(self.space) + w_method = s_class.lookup(w_special_selector) + s_frame = w_method.create_frame(interp.space, receiver, w_args) + + # ###################################################################### + if interp.is_tracing(): + interp.print_padded('-> %s %s' % (special_selector, s_frame.short_str())) + if not objectmodel.we_are_translated(): + import pdb; pdb.set_trace() + + return interp.stack_frame(s_frame, self) + + def _doesNotUnderstand(self, w_selector, argcount, interp, receiver): + arguments = self.pop_and_return_n(argcount) + w_message_class = self.space.classtable["w_Message"] + assert isinstance(w_message_class, model.W_PointersObject) + s_message_class = w_message_class.as_class_get_shadow(self.space) + w_message = s_message_class.new() + w_message.store(self.space, 0, w_selector) + w_message.store(self.space, 1, self.space.wrap_list(arguments)) + self.pop() # The receiver, already known. + + try: + if interp.space.headless.is_set(): + primitives.exitFromHeadlessExecution(self, "doesNotUnderstand:", w_message) + return self._sendSpecialSelector(interp, receiver, "doesNotUnderstand", [w_message]) + except error.MethodNotFound: + from spyvm.shadow import ClassShadow + s_class = receiver.class_shadow(self.space) + assert isinstance(s_class, ClassShadow) + raise error.Exit("Missing doesNotUnderstand in hierarchy of %s" % s_class.getname()) + + def _mustBeBoolean(self, interp, receiver): + return self._sendSpecialSelector(interp, receiver, "mustBeBoolean") + + def _call_primitive(self, code, interp, argcount, w_method, w_selector): + # ################################################################## + if interp.is_tracing(): + interp.print_padded("-> primitive %d \t(in %s, named %s)" % ( + code, self.w_method().get_identifier_string(), + w_selector.selector_string())) + func = primitives.prim_holder.prim_table[code] + try: + # note: argcount does not include rcvr + # the primitive pushes the result (if any) onto the stack itself + return func(interp, self, argcount, w_method) + except error.PrimitiveFailedError, e: + if interp.is_tracing(): + interp.print_padded("-- primitive %d FAILED\t (in %s, named %s)" % ( + code, w_method.safe_identifier_string(), w_selector.selector_string())) + raise e + + def _return(self, return_value, interp, local_return=False): + # unfortunately, this assert is not true for some tests. TODO fix this. + # assert self._stack_ptr == self.tempsize() + + # ################################################################## + if interp.is_tracing(): + interp.print_padded('<- ' + return_value.as_repr_string()) + + if self.home_is_self() or local_return: + # a local return just needs to go up the stack once. there + # it will find the sender as a local, and we don't have to + # force the reference + s_return_to = None + return_from_top = self.s_sender() is None + else: + s_return_to = self.s_home().s_sender() + return_from_top = s_return_to is None + + if return_from_top: + # This should never happen while executing a normal image. + from spyvm.interpreter import ReturnFromTopLevel + raise ReturnFromTopLevel(return_value) + else: + from spyvm.interpreter import Return + raise Return(s_return_to, return_value) + + # ====== Send/Return bytecodes ====== + + @bytecode_implementation() + def returnReceiverBytecode(self, interp, current_bytecode): + return self._return(self.w_receiver(), interp) + + @bytecode_implementation() + def returnTrueBytecode(self, interp, current_bytecode): + return self._return(interp.space.w_true, interp) + + @bytecode_implementation() + def returnFalseBytecode(self, interp, current_bytecode): + return self._return(interp.space.w_false, interp) + + @bytecode_implementation() + def returnNilBytecode(self, interp, current_bytecode): + return self._return(interp.space.w_nil, interp) + + @bytecode_implementation() + def returnTopFromMethodBytecode(self, interp, current_bytecode): + return self._return(self.pop(), interp) + + @bytecode_implementation() + def returnTopFromBlockBytecode(self, interp, current_bytecode): + return self._return(self.pop(), interp, local_return=True) + + @bytecode_implementation() + def sendLiteralSelectorBytecode(self, interp, current_bytecode): + w_selector = self.w_method().getliteral(current_bytecode & 15) + argcount = ((current_bytecode >> 4) & 3) - 1 + return self._sendSelfSelector(w_selector, argcount, interp) + + def _getExtendedSelectorArgcount(self, descriptor): + return ((self.w_method().getliteral(descriptor & 31)), + (descriptor >> 5)) + + @bytecode_implementation(parameter_bytes=1) + def singleExtendedSendBytecode(self, interp, current_bytecode, descriptor): + w_selector, argcount = self._getExtendedSelectorArgcount(descriptor) + return self._sendSelfSelector(w_selector, argcount, interp) + + @bytecode_implementation(parameter_bytes=2) + def doubleExtendedDoAnythingBytecode(self, interp, current_bytecode, second, third): + from spyvm.interpreter import SenderChainManipulation + opType = second >> 5 + if opType == 0: + # selfsend + return self._sendSelfSelector(self.w_method().getliteral(third), + second & 31, interp) + elif opType == 1: + # supersend + return self._sendSuperSelector(self.w_method().getliteral(third), + second & 31, interp) + elif opType == 2: + # pushReceiver + self.push(self.w_receiver().fetch(self.space, third)) + elif opType == 3: + # pushLiteralConstant + self.push(self.w_method().getliteral(third)) + elif opType == 4: + # pushLiteralVariable + w_association = self.w_method().getliteral(third) + association = wrapper.AssociationWrapper(self.space, w_association) + self.push(association.value()) + elif opType == 5: + # TODO - the following two special cases should not be necessary + try: + self.w_receiver().store(self.space, third, self.top()) + except SenderChainManipulation, e: + raise SenderChainManipulation(self) + elif opType == 6: + try: + self.w_receiver().store(self.space, third, self.pop()) + except SenderChainManipulation, e: + raise SenderChainManipulation(self) + elif opType == 7: + w_association = self.w_method().getliteral(third) + association = wrapper.AssociationWrapper(self.space, w_association) + association.store_value(self.top()) + + @bytecode_implementation(parameter_bytes=1) + def singleExtendedSuperBytecode(self, interp, current_bytecode, descriptor): + w_selector, argcount = self._getExtendedSelectorArgcount(descriptor) + return self._sendSuperSelector(w_selector, argcount, interp) + + @bytecode_implementation(parameter_bytes=1) + def secondExtendedSendBytecode(self, interp, current_bytecode, descriptor): + w_selector = self.w_method().getliteral(descriptor & 63) + argcount = descriptor >> 6 + return self._sendSelfSelector(w_selector, argcount, interp) + + # ====== Misc ====== + + def _activate_unwind_context(self, interp): + if self.is_closure_context() or not self.is_BlockClosure_ensure(): + self.mark_returned() + return + # The first temp is executed flag for both #ensure: and #ifCurtailed: + if self.gettemp(1).is_nil(self.space): + self.settemp(1, self.space.w_true) # mark unwound + self.push(self.gettemp(0)) # push the first argument + try: + self.bytecodePrimValue(interp, 0) + except Return, nlr: + assert nlr.s_target_context or nlr.is_local + if self is not nlr.s_target_context and not nlr.is_local: + raise nlr + finally: + self.mark_returned() + + @bytecode_implementation() + def unknownBytecode(self, interp, current_bytecode): + raise error.MissingBytecode("unknownBytecode") + + @bytecode_implementation() + def experimentalBytecode(self, interp, current_bytecode): + raise error.MissingBytecode("experimentalBytecode") + + # ====== Jump bytecodes ====== + + def _jump(self, offset): + self.store_pc(self.pc() + offset) + + def _jumpConditional(self, interp, expecting_true, position): + if expecting_true: + w_expected = interp.space.w_true + w_alternative = interp.space.w_false + else: + w_alternative = interp.space.w_true + w_expected = interp.space.w_false + + # Don't check the class, just compare with only two Boolean instances. + w_bool = self.pop() + if w_expected.is_same_object(w_bool): + self._jump(position) + elif not w_alternative.is_same_object(w_bool): + self._mustBeBoolean(interp, w_bool) + + def _shortJumpOffset(self, current_bytecode): + return (current_bytecode & 7) + 1 + + def _longJumpOffset(self, current_bytecode, parameter): + return ((current_bytecode & 3) << 8) + parameter + + @bytecode_implementation() + def shortUnconditionalJumpBytecode(self, interp, current_bytecode): + self._jump(self._shortJumpOffset(current_bytecode)) + + @bytecode_implementation() + def shortConditionalJumpBytecode(self, interp, current_bytecode): + # The conditional jump is "jump on false" + self._jumpConditional(interp, False, self._shortJumpOffset(current_bytecode)) + + @bytecode_implementation(parameter_bytes=1) + def longUnconditionalJumpBytecode(self, interp, current_bytecode, parameter): + offset = (((current_bytecode & 7) - 4) << 8) + parameter + self._jump(offset) + + @bytecode_implementation(parameter_bytes=1) + def longJumpIfTrueBytecode(self, interp, current_bytecode, parameter): + self._jumpConditional(interp, True, self._longJumpOffset(current_bytecode, parameter)) + + @bytecode_implementation(parameter_bytes=1) + def longJumpIfFalseBytecode(self, interp, current_bytecode, parameter): + self._jumpConditional(interp, False, self._longJumpOffset(current_bytecode, parameter)) + + # ====== Bytecodes implemented with primitives and message sends ====== + + bytecodePrimAdd = make_call_primitive_bytecode(primitives.ADD, "+", 1) + bytecodePrimSubtract = make_call_primitive_bytecode(primitives.SUBTRACT, "-", 1) + bytecodePrimLessThan = make_call_primitive_bytecode (primitives.LESSTHAN, "<", 1) + bytecodePrimGreaterThan = make_call_primitive_bytecode(primitives.GREATERTHAN, ">", 1) + bytecodePrimLessOrEqual = make_call_primitive_bytecode(primitives.LESSOREQUAL, "<=", 1) + bytecodePrimGreaterOrEqual = make_call_primitive_bytecode(primitives.GREATEROREQUAL, ">=", 1) + bytecodePrimEqual = make_call_primitive_bytecode(primitives.EQUAL, "=", 1) + bytecodePrimNotEqual = make_call_primitive_bytecode(primitives.NOTEQUAL, "~=", 1) + bytecodePrimMultiply = make_call_primitive_bytecode(primitives.MULTIPLY, "*", 1) + bytecodePrimDivide = make_call_primitive_bytecode(primitives.DIVIDE, "/", 1) + bytecodePrimMod = make_call_primitive_bytecode(primitives.MOD, "\\\\", 1) + bytecodePrimMakePoint = make_call_primitive_bytecode(primitives.MAKE_POINT, "@", 1) + bytecodePrimBitShift = make_call_primitive_bytecode(primitives.BIT_SHIFT, "bitShift:", 1) + bytecodePrimDiv = make_call_primitive_bytecode(primitives.DIV, "//", 1) + bytecodePrimBitAnd = make_call_primitive_bytecode(primitives.BIT_AND, "bitAnd:", 1) + bytecodePrimBitOr = make_call_primitive_bytecode(primitives.BIT_OR, "bitOr:", 1) + + bytecodePrimAt = make_send_selector_bytecode("at:", 1) + bytecodePrimAtPut = make_send_selector_bytecode("at:put:", 2) + bytecodePrimSize = make_send_selector_bytecode("size", 0) + bytecodePrimNext = make_send_selector_bytecode("next", 0) + bytecodePrimNextPut = make_send_selector_bytecode("nextPut:", 1) + bytecodePrimAtEnd = make_send_selector_bytecode("atEnd", 0) + + bytecodePrimEquivalent = make_quick_call_primitive_bytecode(primitives.EQUIVALENT, 1) + bytecodePrimClass = make_quick_call_primitive_bytecode(primitives.CLASS, 0) + + bytecodePrimBlockCopy = make_call_primitive_bytecode(primitives.BLOCK_COPY, "blockCopy:", 1) + bytecodePrimValue = make_call_primitive_bytecode_classbased("w_BlockContext", primitives.VALUE, "w_BlockClosure", primitives.CLOSURE_VALUE, "value", 0) + bytecodePrimValueWithArg = make_call_primitive_bytecode_classbased("w_BlockContext", primitives.VALUE, "w_BlockClosure", primitives.CLOSURE_VALUE_, "value:", 1) + + bytecodePrimDo = make_send_selector_bytecode("do:", 1) + bytecodePrimNew = make_send_selector_bytecode("new", 0) + bytecodePrimNewWithArg = make_send_selector_bytecode("new:", 1) + bytecodePrimPointX = make_send_selector_bytecode("x", 0) + bytecodePrimPointY = make_send_selector_bytecode("y", 0) + + def debug_bytecode(self): + # Hook used in interpreter_debugging + pass + +BYTECODE_RANGES = [ + ( 0, 15, "pushReceiverVariableBytecode"), + ( 16, 31, "pushTemporaryVariableBytecode"), + ( 32, 63, "pushLiteralConstantBytecode"), + ( 64, 95, "pushLiteralVariableBytecode"), + ( 96, 103, "storeAndPopReceiverVariableBytecode"), + (104, 111, "storeAndPopTemporaryVariableBytecode"), + (112, "pushReceiverBytecode"), + (113, "pushConstantTrueBytecode"), + (114, "pushConstantFalseBytecode"), + (115, "pushConstantNilBytecode"), + (116, "pushConstantMinusOneBytecode"), + (117, "pushConstantZeroBytecode"), + (118, "pushConstantOneBytecode"), + (119, "pushConstantTwoBytecode"), + (120, "returnReceiverBytecode"), + (121, "returnTrueBytecode"), + (122, "returnFalseBytecode"), + (123, "returnNilBytecode"), + (124, "returnTopFromMethodBytecode"), + (125, "returnTopFromBlockBytecode"), + (126, "unknownBytecode"), + (127, "unknownBytecode"), + (128, "extendedPushBytecode"), + (129, "extendedStoreBytecode"), + (130, "extendedStoreAndPopBytecode"), + (131, "singleExtendedSendBytecode"), + (132, "doubleExtendedDoAnythingBytecode"), + (133, "singleExtendedSuperBytecode"), + (134, "secondExtendedSendBytecode"), + (135, "popStackBytecode"), + (136, "duplicateTopBytecode"), + (137, "pushActiveContextBytecode"), + (138, "pushNewArrayBytecode"), + (139, "experimentalBytecode"), + (140, "pushRemoteTempLongBytecode"), + (141, "storeRemoteTempLongBytecode"), + (142, "storeAndPopRemoteTempLongBytecode"), + (143, "pushClosureCopyCopiedValuesBytecode"), + (144, 151, "shortUnconditionalJumpBytecode"), + (152, 159, "shortConditionalJumpBytecode"), + (160, 167, "longUnconditionalJumpBytecode"), + (168, 171, "longJumpIfTrueBytecode"), + (172, 175, "longJumpIfFalseBytecode"), + (176, "bytecodePrimAdd"), + (177, "bytecodePrimSubtract"), + (178, "bytecodePrimLessThan"), + (179, "bytecodePrimGreaterThan"), + (180, "bytecodePrimLessOrEqual"), + (181, "bytecodePrimGreaterOrEqual"), + (182, "bytecodePrimEqual"), + (183, "bytecodePrimNotEqual"), + (184, "bytecodePrimMultiply"), + (185, "bytecodePrimDivide"), + (186, "bytecodePrimMod"), + (187, "bytecodePrimMakePoint"), + (188, "bytecodePrimBitShift"), + (189, "bytecodePrimDiv"), + (190, "bytecodePrimBitAnd"), + (191, "bytecodePrimBitOr"), + (192, "bytecodePrimAt"), + (193, "bytecodePrimAtPut"), + (194, "bytecodePrimSize"), + (195, "bytecodePrimNext"), + (196, "bytecodePrimNextPut"), + (197, "bytecodePrimAtEnd"), + (198, "bytecodePrimEquivalent"), + (199, "bytecodePrimClass"), + (200, "bytecodePrimBlockCopy"), + (201, "bytecodePrimValue"), + (202, "bytecodePrimValueWithArg"), + (203, "bytecodePrimDo"), + (204, "bytecodePrimNew"), + (205, "bytecodePrimNewWithArg"), + (206, "bytecodePrimPointX"), + (207, "bytecodePrimPointY"), + (208, 255, "sendLiteralSelectorBytecode"), + ] + +def initialize_bytecode_names(): + result = [None] * 256 + for entry in BYTECODE_RANGES: + if len(entry) == 2: + result[entry[0]] = entry[1] + else: + for arg, pos in enumerate(range(entry[0], entry[1]+1)): + result[pos] = "%s(%s)" % (entry[2], arg) + assert None not in result + return result + +BYTECODE_NAMES = initialize_bytecode_names() + +def initialize_bytecode_table(): + result = [None] * 256 + for entry in BYTECODE_RANGES: + if len(entry) == 2: + positions = [entry[0]] + else: + positions = range(entry[0], entry[1]+1) + for pos in positions: + result[pos] = getattr(ContextPartShadow, entry[-1]) + assert None not in result + return result + +# this table is only used for creating named bytecodes in tests and printing +BYTECODE_TABLE = initialize_bytecode_table() diff --git a/spyvm/interpreter_debugging.py b/spyvm/interpreter_debugging.py new file mode 100644 --- /dev/null +++ b/spyvm/interpreter_debugging.py @@ -0,0 +1,109 @@ + +import pdb +from spyvm.shadow import ContextPartShadow +from spyvm import model, constants, primitives + +# This module patches up the interpreter and adds breakpoints at certain execution points. +# Only usable in interpreted mode due to pdb. +# To use, execute one of following after interpreter.py is loaded: +# from spyvm import interpreter_debugging; interpreter_debugging.activate_debugging() +# or, before Interpreter instance is created: +# Interpreter.__init__ = interpreter_debugging.activating_init(Interpreter.__init__) + +# After this, following flags control whether the interpreter breaks at the respective locations: +# can be an interpreter instance or the Interpreter class +# interp.step_bytecodes +# interp.step_sends +# interp.step_returns +# interp.step_primitives +# interp.step_failed_primitives +# interp.step_failed_named_primitives + +def activating_init(original): + def meth(*args): + activate_debugging() + return original(*args) + return meth + +def activate_debugging(): + from spyvm.interpreter import Interpreter + Interpreter.step_bytecodes = False + Interpreter.step_sends = False + Interpreter.step_returns = False + Interpreter.step_primitives = False + Interpreter.step_failed_primitives = False + + _break = pdb.set_trace + + def patch(obj): + def do_patch(meth): + name = meth.__name__ + original = getattr(obj, name) + assert original, "Object %r does not have a method named %s" % (obj, name) + replacement = meth(original) + setattr(obj, name, replacement) + return meth + return do_patch + + patch_context = patch(ContextPartShadow) + + @patch_context + def debug_bytecode(original): + def meth(self): + if self.step_bytecodes: + _break() # Continue stepping from here to get to the current bytecode execution + return meth + + @patch_context + def _sendSelector(original): + def meth(self, w_selector, argcount, interp, receiver, receiverclassshadow, w_arguments=None): + if interp.step_sends: + _break() # Continue stepping from here to get to the current message send + return original(self, w_selector, argcount, interp, receiver, receiverclassshadow, w_arguments=w_arguments) + return meth + + @patch_context + def _return(original): + def meth(self, return_value, interp, local_return=False): + if interp.step_returns: + _break() # Continue stepping from here to get to the current return + return original(self, return_value, interp, local_return=local_return) + return meth + + @patch_context + def _call_primitive(original): + def meth(self, code, interp, argcount, w_method, w_selector): + if interp.step_primitives: + _break() # Continue stepping from here to get to the current primitive + try: + return original(self, code, interp, argcount, w_method, w_selector) + except error.PrimitiveFailedError, e: + if interp.step_failed_primitives: + _break() # Continue stepping from here to get to the current failed primitive. + + # Should fail again. + original(self, code, interp, argcount, w_method, w_selector) + return meth + + def failed_named_primitive(original): + def meth(interp, s_frame, argcount, w_method=None): + try: + return original(interp, s_frame, argcount, w_method=w_method) + except error.PrimitiveFailedError, e: + if interp.step_failed_named_primitives: + _break() # Continue from here to get to the current failed named primitive. + + space = interp.space + w_description = w_method.literalat0(space, 1) + if isinstance(w_description, model.W_PointersObject) and w_description.size() >= 2: + w_modulename = w_description.at0(space, 0) + w_functionname = w_description.at0(space, 1) + print "Failed named primitive. Module: %s, Function: %s" % (w_modulename, w_functionname) + + # Should fail again. + original(interp, s_frame, argcount, w_method=w_method) + raise e + return meth + + primitives.prim_table[primitives.EXTERNAL_CALL] = failed_named_primitive(primitives.prim_table[primitives.EXTERNAL_CALL]) + \ No newline at end of file diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -1280,7 +1280,7 @@ return self.get_identifier_string() def bytecode_string(self, markBytecode=0): - from spyvm.interpreter import BYTECODE_TABLE + from spyvm.interpreter_bytecodes import BYTECODE_TABLE retval = "Bytecode:------------" j = 1 for i in self.bytes: diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -679,7 +679,7 @@ assert isinstance(w_bitmap, model_display.W_DisplayBitmap) w_bitmap.flush_to_screen() return w_rcvr - except shadow.MethodNotFound: + except error.MethodNotFound: from spyvm.plugins.bitblt import BitBltPlugin BitBltPlugin.call("primitiveCopyBits", interp, s_frame, argcount, w_method) return w_rcvr @@ -1358,9 +1358,7 @@ unwrap_spec=[object, object, list], no_result=True, clean_stack=False) def func(interp, s_frame, w_rcvr, w_selector, w_arguments): - from spyvm.shadow import MethodNotFound s_frame.pop_n(2) # removing our arguments - return s_frame._sendSelector(w_selector, len(w_arguments), interp, w_rcvr, w_rcvr.class_shadow(interp.space), w_arguments=w_arguments) diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -286,11 +286,8 @@ FLOAT = 5 LARGE_POSITIVE_INTEGER = 6 -class MethodNotFound(error.SmalltalkException): - pass - class ClassShadowError(error.SmalltalkException): - pass + exception_type = "ClassShadowError" class ClassShadow(AbstractCachingShadow): """A shadow for Smalltalk objects that are classes @@ -505,7 +502,7 @@ if w_method is not None: return w_method look_in_shadow = look_in_shadow._s_superclass - raise MethodNotFound(self, w_selector) + raise error.MethodNotFound() def changed(self): self.superclass_changed(version.Version()) diff --git a/spyvm/test/test_interpreter.py b/spyvm/test/test_interpreter.py --- a/spyvm/test/test_interpreter.py +++ b/spyvm/test/test_interpreter.py @@ -1,5 +1,5 @@ import py, operator, sys -from spyvm import model, interpreter, primitives, shadow, objspace, wrapper, constants +from spyvm import model, interpreter, primitives, shadow, objspace, wrapper, constants, error from .util import create_space_interp, copy_to_module, cleanup_module, import_bytecodes, TestInterpreter from spyvm.wrapper import PointWrapper from spyvm.conftest import option @@ -139,7 +139,7 @@ def test_unknownBytecode(): w_frame, s_frame = new_frame(unknownBytecode) - py.test.raises(interpreter.MissingBytecode, step_in_interp, s_frame) + py.test.raises(error.MissingBytecode, step_in_interp, s_frame) # push bytecodes def test_pushReceiverBytecode(): @@ -579,7 +579,7 @@ test_storeAndPopTemporaryVariableBytecode(lambda index: extendedStoreAndPopBytecode + chr((1<<6) + index)) - py.test.raises(interpreter.IllegalStoreError, + py.test.raises(error.FatalError, test_storeAndPopTemporaryVariableBytecode, lambda index: extendedStoreAndPopBytecode + chr((2<<6) + index)) diff --git a/spyvm/test/test_model.py b/spyvm/test/test_model.py --- a/spyvm/test/test_model.py +++ b/spyvm/test/test_model.py @@ -1,6 +1,7 @@ import py, math, socket from spyvm import model, model_display, shadow, objspace, error, display -from spyvm.shadow import MethodNotFound, WEAK_POINTERS +from spyvm.error import MethodNotFound +from spyvm.shadow import WEAK_POINTERS from rpython.rlib.rarithmetic import intmask, r_uint from rpython.rtyper.lltypesystem import lltype, rffi from .util import create_space, copy_to_module, cleanup_module diff --git a/spyvm/test/util.py b/spyvm/test/util.py --- a/spyvm/test/util.py +++ b/spyvm/test/util.py @@ -1,5 +1,5 @@ import sys -from spyvm import model, shadow, objspace, version, constants, squeakimage, interpreter +from spyvm import model, shadow, objspace, version, constants, squeakimage, interpreter, interpreter_bytecodes from rpython.rlib.objectmodel import instantiate # Most tests don't need a bootstrapped objspace. Those that do, indicate so explicitely. @@ -65,7 +65,7 @@ assert entry[0] <= opcode <= entry[1] return chr(opcode) setattr(mod, name, get_opcode_chr) - for entry in interpreter.BYTECODE_RANGES: + for entry in interpreter_bytecodes.BYTECODE_RANGES: name = entry[-1] if len(entry) == 2: # no range setattr(mod, name, chr(entry[0])) diff --git a/spyvm/tool/analyseimage.py b/spyvm/tool/analyseimage.py --- a/spyvm/tool/analyseimage.py +++ b/spyvm/tool/analyseimage.py @@ -59,7 +59,7 @@ w_frame = w_method.create_frame(interp.space, w_object) interp.store_w_active_context(w_frame) - from spyvm.interpreter import BYTECODE_TABLE + from spyvm.interpreter_bytecodes import BYTECODE_TABLE while True: try: interp.step() diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py From noreply at buildbot.pypy.org Sun Jul 27 12:22:12 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Sun, 27 Jul 2014 12:22:12 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-interpreter-refactoring: Updated test. Message-ID: <20140727102212.E8A8D1C03AC@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-interpreter-refactoring Changeset: r949:b18685474409 Date: 2014-07-23 15:30 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/b18685474409/ Log: Updated test. diff --git a/spyvm/test/test_model.py b/spyvm/test/test_model.py --- a/spyvm/test/test_model.py +++ b/spyvm/test/test_model.py @@ -404,12 +404,23 @@ for i in xrange(6, 8): assert target.pixelbuffer[i] == 0x0 -def test_display_offset_computation(): - dbitmap = model_display.W_MappingDisplayBitmap(space, space.w_Array, 5, 1) +def test_display_offset_computation_even(): + dbitmap = model_display.W_MappingDisplayBitmap(space, space.w_Array, 200, 1) + dbitmap.pitch = 64 + dbitmap.words_per_line = 2 assert dbitmap.compute_pos(0) == 0 - assert dbitmap.compute_pos(1) == 8 - assert dbitmap.size() == 5 * 8 + assert dbitmap.compute_pos(1) == 32 + assert dbitmap.compute_pos(2) == 64 +def test_display_offset_computation_uneven(): + dbitmap = model_display.W_MappingDisplayBitmap(space, space.w_Array, 200, 1) + dbitmap.pitch = 67 + dbitmap.words_per_line = 2 + assert dbitmap.compute_pos(0) == 0 + assert dbitmap.compute_pos(1) == 32 + assert dbitmap.compute_pos(2) == 67 + assert dbitmap.compute_pos(3) == 67 + 32 + @py.test.mark.skipif("socket.gethostname() == 'precise32'") def test_weak_pointers(): w_cls = bootstrap_class(2) From noreply at buildbot.pypy.org Sun Jul 27 12:22:13 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Sun, 27 Jul 2014 12:22:13 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-interpreter-refactoring: Finished refactoring. Tests green and compiling. Message-ID: <20140727102213.E0EF41C03AC@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-interpreter-refactoring Changeset: r950:1551fe558ac2 Date: 2014-07-25 09:05 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/1551fe558ac2/ Log: Finished refactoring. Tests green and compiling. diff --git a/spyvm/interpreter_bytecodes.py b/spyvm/interpreter_bytecodes.py --- a/spyvm/interpreter_bytecodes.py +++ b/spyvm/interpreter_bytecodes.py @@ -1,5 +1,5 @@ -from spyvm.shadow import ContextPartShadow +from spyvm.shadow import ContextPartShadow, ClassShadow from spyvm import model, primitives, wrapper, error from spyvm.tool.bitmanipulation import splitter from rpython.rlib import objectmodel, unroll, jit @@ -357,7 +357,6 @@ primitives.exitFromHeadlessExecution(self, "doesNotUnderstand:", w_message) return self._sendSpecialSelector(interp, receiver, "doesNotUnderstand", [w_message]) except error.MethodNotFound: - from spyvm.shadow import ClassShadow s_class = receiver.class_shadow(self.space) assert isinstance(s_class, ClassShadow) raise error.Exit("Missing doesNotUnderstand in hierarchy of %s" % s_class.getname()) @@ -509,6 +508,7 @@ if self.gettemp(1).is_nil(self.space): self.settemp(1, self.space.w_true) # mark unwound self.push(self.gettemp(0)) # push the first argument + from spyvm.interpreter import Return try: self.bytecodePrimValue(interp, 0) except Return, nlr: diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -72,7 +72,7 @@ print_error("Exited: %s" % e.msg) return -1 except error.SmalltalkException, e: - print_error("Unhandled Smalltalk Exception type %s. Message: %s" % (e.exception_type, e.msg) + print_error("Unhandled %s. Message: %s" % (e.exception_type, e.msg)) return -1 except BaseException, e: print_error("Exception: %s" % str(e)) From noreply at buildbot.pypy.org Sun Jul 27 12:22:14 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Sun, 27 Jul 2014 12:22:14 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Merged. Message-ID: <20140727102214.EBA371C03AC@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r951:c78638460d93 Date: 2014-07-25 09:07 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/c78638460d93/ Log: Merged. diff too long, truncating to 2000 out of 2038 lines diff --git a/spyvm/error.py b/spyvm/error.py --- a/spyvm/error.py +++ b/spyvm/error.py @@ -1,30 +1,42 @@ -# some exception classes for the Smalltalk VM + +# Some exception classes for the Smalltalk VM class SmalltalkException(Exception): """Base class for Smalltalk exception hierarchy""" + exception_type = "SmalltalkException" + _attrs_ = ["msg"] + def __init__(self, msg=""): + self.msg = msg class PrimitiveFailedError(SmalltalkException): - pass + exception_type = "PrimitiveFailedError" class PrimitiveNotYetWrittenError(PrimitiveFailedError): - pass + exception_type = "PrimitiveNotYetWrittenError" class UnwrappingError(PrimitiveFailedError): - pass + exception_type = "UnwrappingError" class WrappingError(PrimitiveFailedError): - pass + exception_type = "WrappingError" class WrapperException(SmalltalkException): - def __init__(self, msg): - self.msg = msg + exception_type = "WrapperException" class FatalError(SmalltalkException): - def __init__(self, msg): - self.msg = msg + exception_type = "FatalError" class BlockCannotReturnError(SmalltalkException): - pass + exception_type = "BlockCannotReturnError" + +class MethodNotFound(SmalltalkException): + exception_type = "MethodNotFound" + +class MissingBytecode(SmalltalkException): + """Bytecode not implemented yet.""" + exception_type = "MissingBytecode" + def __init__(self, bytecodename): + SmalltalkException.__init__(self, "Missing bytecode encountered: %s" % bytecodename) class Exit(Exception): _attrs_ = ["msg"] diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -1,26 +1,60 @@ -import py import os -from spyvm.shadow import ContextPartShadow, MethodContextShadow, BlockContextShadow, MethodNotFound -from spyvm import model, constants, primitives, conftest, wrapper, objspace -from spyvm.tool.bitmanipulation import splitter -from rpython.rlib import jit, rstackovf -from rpython.rlib import objectmodel, unroll +from spyvm.shadow import MethodContextShadow +from spyvm import model, constants, wrapper, objspace, interpreter_bytecodes -class MissingBytecode(Exception): - """Bytecode not implemented yet.""" - def __init__(self, bytecodename): - self.bytecodename = bytecodename - print "MissingBytecode:", bytecodename # hack for debugging +from rpython.rlib import jit, rstackovf, unroll -class IllegalStoreError(Exception): - """Illegal Store.""" +class ReturnFromTopLevel(Exception): + _attrs_ = ["object"] + def __init__(self, object): + self.object = object + +class Return(Exception): + _attrs_ = ["value", "s_target_context", "is_local"] + def __init__(self, s_target_context, w_result): + self.value = w_result + self.s_target_context = s_target_context + self.is_local = False + +class ContextSwitchException(Exception): + """General Exception that causes the interpreter to leave + the current context.""" + + _attrs_ = ["s_new_context"] + type = "ContextSwitch" + def __init__(self, s_new_context): + self.s_new_context = s_new_context + + def print_trace(self, old_context): + print "====== %s, contexts forced to heap at: %s" % (self.type, self.s_new_context.short_str()) + +class StackOverflow(ContextSwitchException): + """This causes the current jit-loop to be left, dumping all virtualized objects to the heap. + This breaks performance, so it should rarely happen. + In case of severe performance problems, execute with -t and check if this occurrs.""" + type = "Stack Overflow" + +class ProcessSwitch(ContextSwitchException): + """This causes the interpreter to switch the executed context. + Triggered when switching the process.""" + + def print_trace(self, old_context): + print "====== Switched process from: %s" % old_context.short_str() + print "====== to: %s " % self.s_new_context.short_str() + +class SenderChainManipulation(ContextSwitchException): + """Manipulation of the sender chain can invalidate the jitted C stack. + We have to dump all virtual objects and rebuild the stack. + We try to raise this as rarely as possible and as late as possible.""" + type = "Sender Manipulation" + +UNROLLING_BYTECODE_RANGES = unroll.unrolling_iterable(interpreter_bytecodes.BYTECODE_RANGES) def get_printable_location(pc, self, method): bc = ord(method.bytes[pc]) name = method.safe_identifier_string() - return '(%s) [%d]: <%s>%s' % (name, pc, hex(bc), BYTECODE_NAMES[bc]) - + return '(%s) [%d]: <%s>%s' % (name, pc, hex(bc), interpreter_bytecodes.BYTECODE_NAMES[bc]) class Interpreter(object): _immutable_fields_ = ["space", "image", @@ -218,7 +252,7 @@ s_frame.push_all(list(w_arguments)) return s_frame - # ============== Methods for tracing, printing and debugging ============== + # ============== Methods for tracing and printing ============== def is_tracing(self): return jit.promote(self.trace) @@ -226,875 +260,6 @@ def print_padded(self, str): assert self.is_tracing() print (' ' * self.stack_depth) + str - - def activate_debug_bytecode(self): - "NOT_RPYTHON" - def do_break(self): - import pdb - if self.break_on_bytecodes: - pdb.set_trace() - Interpreter.debug_bytecode = do_break - self.break_on_bytecodes = True - - def debug_bytecode(self): - # This is for debugging. In a pdb console, execute the following: - # self.activate_debug_bytecode() - pass -class ReturnFromTopLevel(Exception): - _attrs_ = ["object"] - def __init__(self, object): - self.object = object - -class Return(Exception): - _attrs_ = ["value", "s_target_context", "is_local"] - def __init__(self, s_target_context, w_result): - self.value = w_result - self.s_target_context = s_target_context - self.is_local = False - -class ContextSwitchException(Exception): - """General Exception that causes the interpreter to leave - the current context.""" - - _attrs_ = ["s_new_context"] - type = "ContextSwitch" - def __init__(self, s_new_context): - self.s_new_context = s_new_context - - def print_trace(self, old_context): - print "====== %s, contexts forced to heap at: %s" % (self.type, self.s_new_context.short_str()) - -class StackOverflow(ContextSwitchException): - """This causes the current jit-loop to be left, dumping all virtualized objects to the heap. - This breaks performance, so it should rarely happen. - In case of severe performance problems, execute with -t and check if this occurrs.""" - type = "Stack Overflow" - -class ProcessSwitch(ContextSwitchException): - """This causes the interpreter to switch the executed context. - Triggered when switching the process.""" - - def print_trace(self, old_context): - print "====== Switched process from: %s" % old_context.short_str() - print "====== to: %s " % self.s_new_context.short_str() - -class SenderChainManipulation(ContextSwitchException): - """Manipulation of the sender chain can invalidate the jitted C stack. - We have to dump all virtual objects and rebuild the stack. - We try to raise this as rarely as possible and as late as possible.""" - type = "Sender Manipulation" - -import rpython.rlib.unroll -if hasattr(unroll, "unrolling_zero"): - unrolling_zero = unroll.unrolling_zero -else: - class unrolling_int(int, unroll.SpecTag): - def __add__(self, other): - return unrolling_int(int.__add__(self, other)) - __radd__ = __add__ - def __sub__(self, other): - return unrolling_int(int.__sub__(self, other)) - def __rsub__(self, other): - return unrolling_int(int.__rsub__(self, other)) - unrolling_zero = unrolling_int(0) - - -# This is a decorator for bytecode implementation methods. -# parameter_bytes=N means N additional bytes are fetched as parameters. -def bytecode_implementation(parameter_bytes=0): - def bytecode_implementation_decorator(actual_implementation_method): - @jit.unroll_safe - def bytecode_implementation_wrapper(self, interp, current_bytecode): - parameters = () - i = unrolling_zero - while i < parameter_bytes: - parameters += (self.fetch_next_bytecode(), ) - i = i + 1 - # This is a good place to step through bytecodes. - interp.debug_bytecode() - return actual_implementation_method(self, interp, current_bytecode, *parameters) - bytecode_implementation_wrapper.func_name = actual_implementation_method.func_name - return bytecode_implementation_wrapper - return bytecode_implementation_decorator - -def make_call_primitive_bytecode(primitive, selector, argcount, store_pc=False): - func = primitives.prim_table[primitive] - @bytecode_implementation() - def callPrimitive(self, interp, current_bytecode): - # WARNING: this is used for bytecodes for which it is safe to - # directly call the primitive. In general, it is not safe: for - # example, depending on the type of the receiver, bytecodePrimAt - # may invoke primitives.AT, primitives.STRING_AT, or anything - # else that the user put in a class in an 'at:' method. - # The rule of thumb is that primitives with only int and float - # in their unwrap_spec are safe. - try: - return func(interp, self, argcount) - except primitives.PrimitiveFailedError: - pass - return self._sendSelfSelectorSpecial(selector, argcount, interp) - callPrimitive.func_name = "callPrimitive_%s" % func.func_name - return callPrimitive - -def make_call_primitive_bytecode_classbased(a_class_name, a_primitive, alternative_class_name, alternative_primitive, selector, argcount): - @bytecode_implementation() - def callClassbasedPrimitive(self, interp, current_bytecode): - rcvr = self.peek(argcount) - receiver_class = rcvr.getclass(self.space) - try: - if receiver_class is getattr(self.space, a_class_name): - func = primitives.prim_table[a_primitive] - return func(interp, self, argcount) - elif receiver_class is getattr(self.space, alternative_class_name): - func = primitives.prim_table[alternative_primitive] - return func(interp, self, argcount) - except primitives.PrimitiveFailedError: - pass - return self._sendSelfSelectorSpecial(selector, argcount, interp) - callClassbasedPrimitive.func_name = "callClassbasedPrimitive_%s" % selector - return callClassbasedPrimitive - -# Some selectors cannot be overwritten, therefore no need to handle PrimitiveFailed. -def make_quick_call_primitive_bytecode(primitive_index, argcount): - func = primitives.prim_table[primitive_index] - @bytecode_implementation() - def quick_call_primitive_bytecode(self, interp, current_bytecode): - return func(interp, self, argcount) - return quick_call_primitive_bytecode - -# This is for bytecodes that actually implement a simple message-send. -# We do not optimize anything for these cases. -def make_send_selector_bytecode(selector, argcount): - @bytecode_implementation() - def selector_bytecode(self, interp, current_bytecode): - return self._sendSelfSelectorSpecial(selector, argcount, interp) - selector_bytecode.func_name = "selector_bytecode_%s" % selector - return selector_bytecode - -# ___________________________________________________________________________ -# Bytecode Implementations: -# -# "self" is always a ContextPartShadow instance. - -# __extend__ adds new methods to the ContextPartShadow class -class __extend__(ContextPartShadow): - - # ====== Push/Pop bytecodes ====== - - @bytecode_implementation() - def pushReceiverVariableBytecode(self, interp, current_bytecode): - index = current_bytecode & 15 - self.push(self.w_receiver().fetch(self.space, index)) - - @bytecode_implementation() - def pushTemporaryVariableBytecode(self, interp, current_bytecode): - index = current_bytecode & 15 - self.push(self.gettemp(index)) - - @bytecode_implementation() - def pushLiteralConstantBytecode(self, interp, current_bytecode): - index = current_bytecode & 31 - self.push(self.w_method().getliteral(index)) - - @bytecode_implementation() - def pushLiteralVariableBytecode(self, interp, current_bytecode): - # this bytecode assumes that literals[index] is an Association - # which is an object with two named vars, and fetches the second - # named var (the value). - index = current_bytecode & 31 - w_association = self.w_method().getliteral(index) - association = wrapper.AssociationWrapper(self.space, w_association) - self.push(association.value()) - - @bytecode_implementation() - def storeAndPopReceiverVariableBytecode(self, interp, current_bytecode): - index = current_bytecode & 7 - self.w_receiver().store(self.space, index, self.pop()) - - @bytecode_implementation() - def storeAndPopTemporaryVariableBytecode(self, interp, current_bytecode): - index = current_bytecode & 7 - self.settemp(index, self.pop()) - - @bytecode_implementation() - def pushReceiverBytecode(self, interp, current_bytecode): - self.push(self.w_receiver()) - - @bytecode_implementation() - def pushConstantTrueBytecode(self, interp, current_bytecode): - self.push(interp.space.w_true) - - @bytecode_implementation() - def pushConstantFalseBytecode(self, interp, current_bytecode): - self.push(interp.space.w_false) - - @bytecode_implementation() - def pushConstantNilBytecode(self, interp, current_bytecode): - self.push(interp.space.w_nil) - - @bytecode_implementation() - def pushConstantMinusOneBytecode(self, interp, current_bytecode): - self.push(interp.space.w_minus_one) - - @bytecode_implementation() - def pushConstantZeroBytecode(self, interp, current_bytecode): - self.push(interp.space.w_zero) - - @bytecode_implementation() - def pushConstantOneBytecode(self, interp, current_bytecode): - self.push(interp.space.w_one) - - @bytecode_implementation() - def pushConstantTwoBytecode(self, interp, current_bytecode): - self.push(interp.space.w_two) - - @bytecode_implementation() - def pushActiveContextBytecode(self, interp, current_bytecode): - self.push(self.w_self()) - - @bytecode_implementation() - def duplicateTopBytecode(self, interp, current_bytecode): - self.push(self.top()) - - @bytecode_implementation() - def popStackBytecode(self, interp, current_bytecode): - self.pop() - - @bytecode_implementation(parameter_bytes=1) - def pushNewArrayBytecode(self, interp, current_bytecode, descriptor): - arraySize, popIntoArray = splitter[7, 1](descriptor) - newArray = None - if popIntoArray == 1: - newArray = interp.space.wrap_list(self.pop_and_return_n(arraySize)) - else: - newArray = interp.space.w_Array.as_class_get_shadow(interp.space).new(arraySize) - self.push(newArray) - - # ====== Extended Push/Pop bytecodes ====== - - def _extendedVariableTypeAndIndex(self, descriptor): - return ((descriptor >> 6) & 3), (descriptor & 63) - - @bytecode_implementation(parameter_bytes=1) - def extendedPushBytecode(self, interp, current_bytecode, descriptor): - variableType, variableIndex = self._extendedVariableTypeAndIndex(descriptor) - if variableType == 0: - self.push(self.w_receiver().fetch(self.space, variableIndex)) - elif variableType == 1: - self.push(self.gettemp(variableIndex)) - elif variableType == 2: - self.push(self.w_method().getliteral(variableIndex)) - elif variableType == 3: - w_association = self.w_method().getliteral(variableIndex) - association = wrapper.AssociationWrapper(self.space, w_association) - self.push(association.value()) - else: - assert 0 - - def _extendedStoreBytecode(self, interp, current_bytecode, descriptor): - variableType, variableIndex = self._extendedVariableTypeAndIndex(descriptor) - if variableType == 0: - self.w_receiver().store(self.space, variableIndex, self.top()) - elif variableType == 1: - self.settemp(variableIndex, self.top()) - elif variableType == 2: - raise IllegalStoreError - elif variableType == 3: - w_association = self.w_method().getliteral(variableIndex) - association = wrapper.AssociationWrapper(self.space, w_association) - association.store_value(self.top()) - - @bytecode_implementation(parameter_bytes=1) - def extendedStoreBytecode(self, interp, current_bytecode, descriptor): - return self._extendedStoreBytecode(interp, current_bytecode, descriptor) - - @bytecode_implementation(parameter_bytes=1) - def extendedStoreAndPopBytecode(self, interp, current_bytecode, descriptor): - self._extendedStoreBytecode(interp, current_bytecode, descriptor) - self.pop() - - def _extract_index_and_temps(self, index_in_array, index_of_array): - w_indirectTemps = self.gettemp(index_of_array) - return index_in_array, w_indirectTemps - - @bytecode_implementation(parameter_bytes=2) - def pushRemoteTempLongBytecode(self, interp, current_bytecode, index_in_array, index_of_array): - index_in_array, w_indirectTemps = self._extract_index_and_temps(index_in_array, index_of_array) - self.push(w_indirectTemps.at0(self.space, index_in_array)) - - @bytecode_implementation(parameter_bytes=2) - def storeRemoteTempLongBytecode(self, interp, current_bytecode, index_in_array, index_of_array): - index_in_array, w_indirectTemps = self._extract_index_and_temps(index_in_array, index_of_array) - w_indirectTemps.atput0(self.space, index_in_array, self.top()) - - @bytecode_implementation(parameter_bytes=2) - def storeAndPopRemoteTempLongBytecode(self, interp, current_bytecode, index_in_array, index_of_array): - index_in_array, w_indirectTemps = self._extract_index_and_temps(index_in_array, index_of_array) - w_indirectTemps.atput0(self.space, index_in_array, self.pop()) - - @bytecode_implementation(parameter_bytes=3) - def pushClosureCopyCopiedValuesBytecode(self, interp, current_bytecode, descriptor, j, i): - """ Copied from Blogpost: http://www.mirandabanda.org/cogblog/2008/07/22/closures-part-ii-the-bytecodes/ - ContextPart>>pushClosureCopyNumCopiedValues: numCopied numArgs: numArgs blockSize: blockSize - "Simulate the action of a 'closure copy' bytecode whose result is the - new BlockClosure for the following code" - | copiedValues | - numCopied > 0 - ifTrue: - [copiedValues := Array new: numCopied. - numCopied to: 1 by: -1 do: - [:i| - copiedValues at: i put: self pop]] - ifFalse: - [copiedValues := nil]. - self push: (BlockClosure new - outerContext: self - startpc: pc - numArgs: numArgs - copiedValues: copiedValues). - self jump: blockSize - """ - - space = self.space - numArgs, numCopied = splitter[4, 4](descriptor) - blockSize = (j << 8) | i - # Create new instance of BlockClosure - w_closure = space.newClosure(self.w_self(), self.pc(), numArgs, - self.pop_and_return_n(numCopied)) - self.push(w_closure) - self._jump(blockSize) - - # ====== Helpers for send/return bytecodes ====== - - def _sendSelfSelector(self, w_selector, argcount, interp): - receiver = self.peek(argcount) - return self._sendSelector(w_selector, argcount, interp, - receiver, receiver.class_shadow(self.space)) - - def _sendSuperSelector(self, w_selector, argcount, interp): - compiledin_class = self.w_method().compiled_in() - assert isinstance(compiledin_class, model.W_PointersObject) - s_compiledin = compiledin_class.as_class_get_shadow(self.space) - return self._sendSelector(w_selector, argcount, interp, self.w_receiver(), - s_compiledin.s_superclass()) - - def _sendSelector(self, w_selector, argcount, interp, - receiver, receiverclassshadow, w_arguments=None): - assert argcount >= 0 - try: - w_method = receiverclassshadow.lookup(w_selector) - except MethodNotFound: - return self._doesNotUnderstand(w_selector, argcount, interp, receiver) - - code = w_method.primitive() - if code: - if w_arguments: - self.push_all(w_arguments) - try: - return self._call_primitive(code, interp, argcount, w_method, w_selector) - except primitives.PrimitiveFailedError: - pass # ignore this error and fall back to the Smalltalk version - if not w_arguments: - w_arguments = self.pop_and_return_n(argcount) - s_frame = w_method.create_frame(interp.space, receiver, w_arguments) - self.pop() # receiver - - # ###################################################################### - if interp.is_tracing(): - interp.print_padded('-> ' + s_frame.short_str()) - - return interp.stack_frame(s_frame, self) - - @objectmodel.specialize.arg(1) - def _sendSelfSelectorSpecial(self, selector, numargs, interp): - w_selector = self.space.get_special_selector(selector) - return self._sendSelfSelector(w_selector, numargs, interp) - - def _sendSpecialSelector(self, interp, receiver, special_selector, w_args=[]): - w_special_selector = self.space.objtable["w_" + special_selector] - s_class = receiver.class_shadow(self.space) - w_method = s_class.lookup(w_special_selector) - s_frame = w_method.create_frame(interp.space, receiver, w_args) - - # ###################################################################### - if interp.is_tracing(): - interp.print_padded('-> %s %s' % (special_selector, s_frame.short_str())) - if not objectmodel.we_are_translated(): - import pdb; pdb.set_trace() - - return interp.stack_frame(s_frame, self) - - def _doesNotUnderstand(self, w_selector, argcount, interp, receiver): - arguments = self.pop_and_return_n(argcount) - w_message_class = self.space.classtable["w_Message"] - assert isinstance(w_message_class, model.W_PointersObject) - s_message_class = w_message_class.as_class_get_shadow(self.space) - w_message = s_message_class.new() - w_message.store(self.space, 0, w_selector) - w_message.store(self.space, 1, self.space.wrap_list(arguments)) - self.pop() # The receiver, already known. - - try: - if interp.space.headless.is_set(): - primitives.exitFromHeadlessExecution(self, "doesNotUnderstand:", w_message) - return self._sendSpecialSelector(interp, receiver, "doesNotUnderstand", [w_message]) - except MethodNotFound: - from spyvm.shadow import ClassShadow - s_class = receiver.class_shadow(self.space) - assert isinstance(s_class, ClassShadow) - from spyvm import error - raise error.Exit("Missing doesNotUnderstand in hierarchy of %s" % s_class.getname()) - - def _mustBeBoolean(self, interp, receiver): - return self._sendSpecialSelector(interp, receiver, "mustBeBoolean") - - def _call_primitive(self, code, interp, argcount, w_method, w_selector): - # ################################################################## - if interp.is_tracing(): - interp.print_padded("-> primitive %d \t(in %s, named %s)" % ( - code, self.w_method().get_identifier_string(), - w_selector.selector_string())) - func = primitives.prim_holder.prim_table[code] - try: - # note: argcount does not include rcvr - # the primitive pushes the result (if any) onto the stack itself - return func(interp, self, argcount, w_method) - except primitives.PrimitiveFailedError, e: - if interp.is_tracing(): - interp.print_padded("-- primitive %d FAILED\t (in %s, named %s)" % ( - code, w_method.safe_identifier_string(), w_selector.selector_string())) - raise e - - def _return(self, return_value, interp, local_return=False): - # unfortunately, this assert is not true for some tests. TODO fix this. - # assert self._stack_ptr == self.tempsize() - - # ################################################################## - if interp.is_tracing(): - interp.print_padded('<- ' + return_value.as_repr_string()) - - if self.home_is_self() or local_return: - # a local return just needs to go up the stack once. there - # it will find the sender as a local, and we don't have to - # force the reference - s_return_to = None - return_from_top = self.s_sender() is None - else: - s_return_to = self.s_home().s_sender() - return_from_top = s_return_to is None - - if return_from_top: - # This should never happen while executing a normal image. - raise ReturnFromTopLevel(return_value) - else: - raise Return(s_return_to, return_value) - - # ====== Send/Return bytecodes ====== - - @bytecode_implementation() - def returnReceiverBytecode(self, interp, current_bytecode): - return self._return(self.w_receiver(), interp) - - @bytecode_implementation() - def returnTrueBytecode(self, interp, current_bytecode): - return self._return(interp.space.w_true, interp) - - @bytecode_implementation() - def returnFalseBytecode(self, interp, current_bytecode): - return self._return(interp.space.w_false, interp) - - @bytecode_implementation() - def returnNilBytecode(self, interp, current_bytecode): - return self._return(interp.space.w_nil, interp) - - @bytecode_implementation() - def returnTopFromMethodBytecode(self, interp, current_bytecode): - return self._return(self.pop(), interp) - - @bytecode_implementation() - def returnTopFromBlockBytecode(self, interp, current_bytecode): - return self._return(self.pop(), interp, local_return=True) - - @bytecode_implementation() - def sendLiteralSelectorBytecode(self, interp, current_bytecode): - w_selector = self.w_method().getliteral(current_bytecode & 15) - argcount = ((current_bytecode >> 4) & 3) - 1 - return self._sendSelfSelector(w_selector, argcount, interp) - - def _getExtendedSelectorArgcount(self, descriptor): - return ((self.w_method().getliteral(descriptor & 31)), - (descriptor >> 5)) - - @bytecode_implementation(parameter_bytes=1) - def singleExtendedSendBytecode(self, interp, current_bytecode, descriptor): - w_selector, argcount = self._getExtendedSelectorArgcount(descriptor) - return self._sendSelfSelector(w_selector, argcount, interp) - - @bytecode_implementation(parameter_bytes=2) - def doubleExtendedDoAnythingBytecode(self, interp, current_bytecode, second, third): - from spyvm import error - opType = second >> 5 - if opType == 0: - # selfsend - return self._sendSelfSelector(self.w_method().getliteral(third), - second & 31, interp) - elif opType == 1: - # supersend - return self._sendSuperSelector(self.w_method().getliteral(third), - second & 31, interp) - elif opType == 2: - # pushReceiver - self.push(self.w_receiver().fetch(self.space, third)) - elif opType == 3: - # pushLiteralConstant - self.push(self.w_method().getliteral(third)) - elif opType == 4: - # pushLiteralVariable - w_association = self.w_method().getliteral(third) - association = wrapper.AssociationWrapper(self.space, w_association) - self.push(association.value()) - elif opType == 5: - # TODO - the following two special cases should not be necessary - try: - self.w_receiver().store(self.space, third, self.top()) - except SenderChainManipulation, e: - raise SenderChainManipulation(self) - elif opType == 6: - try: - self.w_receiver().store(self.space, third, self.pop()) - except SenderChainManipulation, e: - raise SenderChainManipulation(self) - elif opType == 7: - w_association = self.w_method().getliteral(third) - association = wrapper.AssociationWrapper(self.space, w_association) - association.store_value(self.top()) - - @bytecode_implementation(parameter_bytes=1) - def singleExtendedSuperBytecode(self, interp, current_bytecode, descriptor): - w_selector, argcount = self._getExtendedSelectorArgcount(descriptor) - return self._sendSuperSelector(w_selector, argcount, interp) - - @bytecode_implementation(parameter_bytes=1) - def secondExtendedSendBytecode(self, interp, current_bytecode, descriptor): - w_selector = self.w_method().getliteral(descriptor & 63) - argcount = descriptor >> 6 - return self._sendSelfSelector(w_selector, argcount, interp) - - # ====== Misc ====== - - def _activate_unwind_context(self, interp): - if self.is_closure_context() or not self.is_BlockClosure_ensure(): - self.mark_returned() - return - # The first temp is executed flag for both #ensure: and #ifCurtailed: - if self.gettemp(1).is_nil(self.space): - self.settemp(1, self.space.w_true) # mark unwound - self.push(self.gettemp(0)) # push the first argument - try: - self.bytecodePrimValue(interp, 0) - except Return, nlr: - assert nlr.s_target_context or nlr.is_local - if self is not nlr.s_target_context and not nlr.is_local: - raise nlr - finally: - self.mark_returned() - - @bytecode_implementation() - def unknownBytecode(self, interp, current_bytecode): - raise MissingBytecode("unknownBytecode") - - @bytecode_implementation() - def experimentalBytecode(self, interp, current_bytecode): - raise MissingBytecode("experimentalBytecode") - - # ====== Jump bytecodes ====== - - def _jump(self, offset): - self.store_pc(self.pc() + offset) - - def _jumpConditional(self, interp, expecting_true, position): - if expecting_true: - w_expected = interp.space.w_true - w_alternative = interp.space.w_false - else: - w_alternative = interp.space.w_true - w_expected = interp.space.w_false - - # Don't check the class, just compare with only two Boolean instances. - w_bool = self.pop() - if w_expected.is_same_object(w_bool): - self._jump(position) - elif not w_alternative.is_same_object(w_bool): - self._mustBeBoolean(interp, w_bool) - - def _shortJumpOffset(self, current_bytecode): - return (current_bytecode & 7) + 1 - - def _longJumpOffset(self, current_bytecode, parameter): - return ((current_bytecode & 3) << 8) + parameter - - @bytecode_implementation() - def shortUnconditionalJumpBytecode(self, interp, current_bytecode): - self._jump(self._shortJumpOffset(current_bytecode)) - - @bytecode_implementation() - def shortConditionalJumpBytecode(self, interp, current_bytecode): - # The conditional jump is "jump on false" - self._jumpConditional(interp, False, self._shortJumpOffset(current_bytecode)) - - @bytecode_implementation(parameter_bytes=1) - def longUnconditionalJumpBytecode(self, interp, current_bytecode, parameter): - offset = (((current_bytecode & 7) - 4) << 8) + parameter - self._jump(offset) - - @bytecode_implementation(parameter_bytes=1) - def longJumpIfTrueBytecode(self, interp, current_bytecode, parameter): - self._jumpConditional(interp, True, self._longJumpOffset(current_bytecode, parameter)) - - @bytecode_implementation(parameter_bytes=1) - def longJumpIfFalseBytecode(self, interp, current_bytecode, parameter): - self._jumpConditional(interp, False, self._longJumpOffset(current_bytecode, parameter)) - - # ====== Bytecodes implemented with primitives and message sends ====== - - bytecodePrimAdd = make_call_primitive_bytecode(primitives.ADD, "+", 1) - bytecodePrimSubtract = make_call_primitive_bytecode(primitives.SUBTRACT, "-", 1) - bytecodePrimLessThan = make_call_primitive_bytecode (primitives.LESSTHAN, "<", 1) - bytecodePrimGreaterThan = make_call_primitive_bytecode(primitives.GREATERTHAN, ">", 1) - bytecodePrimLessOrEqual = make_call_primitive_bytecode(primitives.LESSOREQUAL, "<=", 1) - bytecodePrimGreaterOrEqual = make_call_primitive_bytecode(primitives.GREATEROREQUAL, ">=", 1) - bytecodePrimEqual = make_call_primitive_bytecode(primitives.EQUAL, "=", 1) - bytecodePrimNotEqual = make_call_primitive_bytecode(primitives.NOTEQUAL, "~=", 1) - bytecodePrimMultiply = make_call_primitive_bytecode(primitives.MULTIPLY, "*", 1) - bytecodePrimDivide = make_call_primitive_bytecode(primitives.DIVIDE, "/", 1) - bytecodePrimMod = make_call_primitive_bytecode(primitives.MOD, "\\\\", 1) - bytecodePrimMakePoint = make_call_primitive_bytecode(primitives.MAKE_POINT, "@", 1) - bytecodePrimBitShift = make_call_primitive_bytecode(primitives.BIT_SHIFT, "bitShift:", 1) - bytecodePrimDiv = make_call_primitive_bytecode(primitives.DIV, "//", 1) - bytecodePrimBitAnd = make_call_primitive_bytecode(primitives.BIT_AND, "bitAnd:", 1) - bytecodePrimBitOr = make_call_primitive_bytecode(primitives.BIT_OR, "bitOr:", 1) - - bytecodePrimAt = make_send_selector_bytecode("at:", 1) - bytecodePrimAtPut = make_send_selector_bytecode("at:put:", 2) - bytecodePrimSize = make_send_selector_bytecode("size", 0) - bytecodePrimNext = make_send_selector_bytecode("next", 0) - bytecodePrimNextPut = make_send_selector_bytecode("nextPut:", 1) - bytecodePrimAtEnd = make_send_selector_bytecode("atEnd", 0) - - bytecodePrimEquivalent = make_quick_call_primitive_bytecode(primitives.EQUIVALENT, 1) - bytecodePrimClass = make_quick_call_primitive_bytecode(primitives.CLASS, 0) - - bytecodePrimBlockCopy = make_call_primitive_bytecode(primitives.BLOCK_COPY, "blockCopy:", 1) - bytecodePrimValue = make_call_primitive_bytecode_classbased("w_BlockContext", primitives.VALUE, "w_BlockClosure", primitives.CLOSURE_VALUE, "value", 0) - bytecodePrimValueWithArg = make_call_primitive_bytecode_classbased("w_BlockContext", primitives.VALUE, "w_BlockClosure", primitives.CLOSURE_VALUE_, "value:", 1) - - bytecodePrimDo = make_send_selector_bytecode("do:", 1) - bytecodePrimNew = make_send_selector_bytecode("new", 0) - bytecodePrimNewWithArg = make_send_selector_bytecode("new:", 1) - bytecodePrimPointX = make_send_selector_bytecode("x", 0) - bytecodePrimPointY = make_send_selector_bytecode("y", 0) - -BYTECODE_RANGES = [ - ( 0, 15, "pushReceiverVariableBytecode"), - ( 16, 31, "pushTemporaryVariableBytecode"), - ( 32, 63, "pushLiteralConstantBytecode"), - ( 64, 95, "pushLiteralVariableBytecode"), - ( 96, 103, "storeAndPopReceiverVariableBytecode"), - (104, 111, "storeAndPopTemporaryVariableBytecode"), - (112, "pushReceiverBytecode"), - (113, "pushConstantTrueBytecode"), - (114, "pushConstantFalseBytecode"), - (115, "pushConstantNilBytecode"), - (116, "pushConstantMinusOneBytecode"), - (117, "pushConstantZeroBytecode"), - (118, "pushConstantOneBytecode"), - (119, "pushConstantTwoBytecode"), - (120, "returnReceiverBytecode"), - (121, "returnTrueBytecode"), - (122, "returnFalseBytecode"), - (123, "returnNilBytecode"), - (124, "returnTopFromMethodBytecode"), - (125, "returnTopFromBlockBytecode"), - (126, "unknownBytecode"), - (127, "unknownBytecode"), - (128, "extendedPushBytecode"), - (129, "extendedStoreBytecode"), - (130, "extendedStoreAndPopBytecode"), - (131, "singleExtendedSendBytecode"), - (132, "doubleExtendedDoAnythingBytecode"), - (133, "singleExtendedSuperBytecode"), - (134, "secondExtendedSendBytecode"), - (135, "popStackBytecode"), - (136, "duplicateTopBytecode"), - (137, "pushActiveContextBytecode"), - (138, "pushNewArrayBytecode"), - (139, "experimentalBytecode"), - (140, "pushRemoteTempLongBytecode"), - (141, "storeRemoteTempLongBytecode"), - (142, "storeAndPopRemoteTempLongBytecode"), - (143, "pushClosureCopyCopiedValuesBytecode"), - (144, 151, "shortUnconditionalJumpBytecode"), - (152, 159, "shortConditionalJumpBytecode"), - (160, 167, "longUnconditionalJumpBytecode"), - (168, 171, "longJumpIfTrueBytecode"), - (172, 175, "longJumpIfFalseBytecode"), - (176, "bytecodePrimAdd"), - (177, "bytecodePrimSubtract"), - (178, "bytecodePrimLessThan"), - (179, "bytecodePrimGreaterThan"), - (180, "bytecodePrimLessOrEqual"), - (181, "bytecodePrimGreaterOrEqual"), - (182, "bytecodePrimEqual"), - (183, "bytecodePrimNotEqual"), - (184, "bytecodePrimMultiply"), - (185, "bytecodePrimDivide"), - (186, "bytecodePrimMod"), - (187, "bytecodePrimMakePoint"), - (188, "bytecodePrimBitShift"), - (189, "bytecodePrimDiv"), - (190, "bytecodePrimBitAnd"), - (191, "bytecodePrimBitOr"), - (192, "bytecodePrimAt"), - (193, "bytecodePrimAtPut"), - (194, "bytecodePrimSize"), - (195, "bytecodePrimNext"), - (196, "bytecodePrimNextPut"), - (197, "bytecodePrimAtEnd"), - (198, "bytecodePrimEquivalent"), - (199, "bytecodePrimClass"), - (200, "bytecodePrimBlockCopy"), - (201, "bytecodePrimValue"), - (202, "bytecodePrimValueWithArg"), - (203, "bytecodePrimDo"), - (204, "bytecodePrimNew"), - (205, "bytecodePrimNewWithArg"), - (206, "bytecodePrimPointX"), - (207, "bytecodePrimPointY"), - (208, 255, "sendLiteralSelectorBytecode"), - ] - -from rpython.rlib.unroll import unrolling_iterable -UNROLLING_BYTECODE_RANGES = unrolling_iterable(BYTECODE_RANGES) - -def initialize_bytecode_names(): - result = [None] * 256 - for entry in BYTECODE_RANGES: - if len(entry) == 2: - result[entry[0]] = entry[1] - else: - for arg, pos in enumerate(range(entry[0], entry[1]+1)): - result[pos] = "%s(%s)" % (entry[2], arg) - assert None not in result - return result - -BYTECODE_NAMES = initialize_bytecode_names() - -def initialize_bytecode_table(): - result = [None] * 256 - for entry in BYTECODE_RANGES: - if len(entry) == 2: - positions = [entry[0]] - else: - positions = range(entry[0], entry[1]+1) - for pos in positions: - result[pos] = getattr(ContextPartShadow, entry[-1]) - assert None not in result - return result - -# this table is only used for creating named bytecodes in tests and printing -BYTECODE_TABLE = initialize_bytecode_table() - -# Smalltalk debugging facilities, patching Interpreter and ContextPartShadow -# in order to enable tracing/jumping for message sends etc. -def debugging(): - def stepping_debugger_init(original): - def meth(self, space, image=None, trace=False): - return_value = original(self, space, image=image, trace=trace) - # ############################################################## - - self.message_stepping = False - self.halt_on_failing_primitives = False - - # ############################################################## - return return_value - return meth - - Interpreter.__init__ = stepping_debugger_init(Interpreter.__init__) - - def stepping_debugger_send(original): - """When interp.message_stepping is True, we halt on every call of ContextPartShadow._sendSelector. - The method is not called for bytecode message sends (see constants.SPECIAL_SELECTORS)""" - def meth(s_context, w_selector, argcount, interp, - receiver, receiverclassshadow): - options = [False] - def next(): interp.message_stepping = True; print 'Now continue (c).' - def over(): options[0] = True; print 'Skipping #%s. You still need to continue(c).' % w_selector.str_content() - def pstack(): print s_context.print_stack() - if interp.message_stepping: - if argcount == 0: - print "-> %s #%s" % (receiver.as_repr_string(), - w_selector.str_content()) - elif argcount == 1: - print "-> %s #%s %s" % (receiver.as_repr_string(), - w_selector.str_content(), - s_context.peek(0).as_repr_string()) - else: - print "-> %s #%s %r" % (receiver.as_repr_string(), - w_selector.str_content(), - [s_context.peek(argcount-1-i) for i in range(argcount)]) - import pdb; pdb.set_trace() - if options[0]: - m_s = interp.message_stepping - interp.message_stepping = False - try: - return original(s_context, w_selector, argcount, interp, receiver, receiverclassshadow) - finally: - interp.message_stepping = m_s - else: - return original(s_context, w_selector, argcount, interp, receiver, receiverclassshadow) - return meth - - ContextPartShadow._sendSelector = stepping_debugger_send(ContextPartShadow._sendSelector) - - def stepping_debugger_failed_primitive_halt(original): - def meth(self, code, interp, argcount, w_method, w_selector): - try: - original(self, code, interp, argcount, w_method, w_selector) - except primitives.PrimitiveFailedError, e: - if interp.halt_on_failing_primitives: - func = primitives.prim_holder.prim_table[code] - if func.func_name != 'raise_failing_default' and code != 83: - import pdb; pdb.set_trace() - try: - func(interp, self, argcount, w_method) # will fail again - except primitives.PrimitiveFailedError: - pass - raise e - return meth - - ContextPartShadow._call_primitive = stepping_debugger_failed_primitive_halt(ContextPartShadow._call_primitive) - - def trace_missing_named_primitives(original): - def meth(interp, s_frame, argcount, w_method=None): - try: - return original(interp, s_frame, argcount, w_method=w_method) - except primitives.PrimitiveFailedError, e: - space = interp.space - w_description = w_method.literalat0(space, 1) - if not isinstance(w_description, model.W_PointersObject) or w_description.size() < 2: - raise e - w_modulename = w_description.at0(space, 0) - w_functionname = w_description.at0(space, 1) - if not (isinstance(w_modulename, model.W_BytesObject) and - isinstance(w_functionname, model.W_BytesObject)): - raise e - signature = (w_modulename.as_string(), w_functionname.as_string()) - debugging.missing_named_primitives.add(signature) - raise e - return meth - - primitives.prim_table[primitives.EXTERNAL_CALL] = trace_missing_named_primitives(primitives.prim_table[primitives.EXTERNAL_CALL]) - debugging.missing_named_primitives = set() - -# debugging() +# Uncomment this to load debugging facilities at startup. +#from spyvm import interpreter_debugging; Interpreter.__init__ = interpreter_debugging.activating_init(Interpreter.__init__) diff --git a/spyvm/interpreter_bytecodes.py b/spyvm/interpreter_bytecodes.py new file mode 100644 --- /dev/null +++ b/spyvm/interpreter_bytecodes.py @@ -0,0 +1,725 @@ + +from spyvm.shadow import ContextPartShadow, ClassShadow +from spyvm import model, primitives, wrapper, error +from spyvm.tool.bitmanipulation import splitter +from rpython.rlib import objectmodel, unroll, jit + +# unrolling_zero has been removed from rlib at some point. +if hasattr(unroll, "unrolling_zero"): + unrolling_zero = unroll.unrolling_zero +else: + class unrolling_int(int, unroll.SpecTag): + def __add__(self, other): + return unrolling_int(int.__add__(self, other)) + __radd__ = __add__ + def __sub__(self, other): + return unrolling_int(int.__sub__(self, other)) + def __rsub__(self, other): + return unrolling_int(int.__rsub__(self, other)) + unrolling_zero = unrolling_int(0) + +# This is a decorator for bytecode implementation methods. +# parameter_bytes=N means N additional bytes are fetched as parameters. +def bytecode_implementation(parameter_bytes=0): + def bytecode_implementation_decorator(actual_implementation_method): + @jit.unroll_safe + def bytecode_implementation_wrapper(self, interp, current_bytecode): + parameters = () + i = unrolling_zero + while i < parameter_bytes: + parameters += (self.fetch_next_bytecode(), ) + i = i + 1 + # This is a good place to step through bytecodes. + self.debug_bytecode() + return actual_implementation_method(self, interp, current_bytecode, *parameters) + bytecode_implementation_wrapper.func_name = actual_implementation_method.func_name + return bytecode_implementation_wrapper + return bytecode_implementation_decorator + +def make_call_primitive_bytecode(primitive, selector, argcount, store_pc=False): + func = primitives.prim_table[primitive] + @bytecode_implementation() + def callPrimitive(self, interp, current_bytecode): + # WARNING: this is used for bytecodes for which it is safe to + # directly call the primitive. In general, it is not safe: for + # example, depending on the type of the receiver, bytecodePrimAt + # may invoke primitives.AT, primitives.STRING_AT, or anything + # else that the user put in a class in an 'at:' method. + # The rule of thumb is that primitives with only int and float + # in their unwrap_spec are safe. + try: + return func(interp, self, argcount) + except error.PrimitiveFailedError: + pass + return self._sendSelfSelectorSpecial(selector, argcount, interp) + callPrimitive.func_name = "callPrimitive_%s" % func.func_name + return callPrimitive + +def make_call_primitive_bytecode_classbased(a_class_name, a_primitive, alternative_class_name, alternative_primitive, selector, argcount): + @bytecode_implementation() + def callClassbasedPrimitive(self, interp, current_bytecode): + rcvr = self.peek(argcount) + receiver_class = rcvr.getclass(self.space) + try: + if receiver_class is getattr(self.space, a_class_name): + func = primitives.prim_table[a_primitive] + return func(interp, self, argcount) + elif receiver_class is getattr(self.space, alternative_class_name): + func = primitives.prim_table[alternative_primitive] + return func(interp, self, argcount) + except error.PrimitiveFailedError: + pass + return self._sendSelfSelectorSpecial(selector, argcount, interp) + callClassbasedPrimitive.func_name = "callClassbasedPrimitive_%s" % selector + return callClassbasedPrimitive + +# Some selectors cannot be overwritten, therefore no need to handle PrimitiveFailed. +def make_quick_call_primitive_bytecode(primitive_index, argcount): + func = primitives.prim_table[primitive_index] + @bytecode_implementation() + def quick_call_primitive_bytecode(self, interp, current_bytecode): + return func(interp, self, argcount) + return quick_call_primitive_bytecode + +# This is for bytecodes that actually implement a simple message-send. +# We do not optimize anything for these cases. +def make_send_selector_bytecode(selector, argcount): + @bytecode_implementation() + def selector_bytecode(self, interp, current_bytecode): + return self._sendSelfSelectorSpecial(selector, argcount, interp) + selector_bytecode.func_name = "selector_bytecode_%s" % selector + return selector_bytecode + +# ___________________________________________________________________________ +# Bytecode Implementations: +# +# "self" is always a ContextPartShadow instance. +# __extend__ adds new methods to the ContextPartShadow class +class __extend__(ContextPartShadow): + + # ====== Push/Pop bytecodes ====== + + @bytecode_implementation() + def pushReceiverVariableBytecode(self, interp, current_bytecode): + index = current_bytecode & 15 + self.push(self.w_receiver().fetch(self.space, index)) + + @bytecode_implementation() + def pushTemporaryVariableBytecode(self, interp, current_bytecode): + index = current_bytecode & 15 + self.push(self.gettemp(index)) + + @bytecode_implementation() + def pushLiteralConstantBytecode(self, interp, current_bytecode): + index = current_bytecode & 31 + self.push(self.w_method().getliteral(index)) + + @bytecode_implementation() + def pushLiteralVariableBytecode(self, interp, current_bytecode): + # this bytecode assumes that literals[index] is an Association + # which is an object with two named vars, and fetches the second + # named var (the value). + index = current_bytecode & 31 + w_association = self.w_method().getliteral(index) + association = wrapper.AssociationWrapper(self.space, w_association) + self.push(association.value()) + + @bytecode_implementation() + def storeAndPopReceiverVariableBytecode(self, interp, current_bytecode): + index = current_bytecode & 7 + self.w_receiver().store(self.space, index, self.pop()) + + @bytecode_implementation() + def storeAndPopTemporaryVariableBytecode(self, interp, current_bytecode): + index = current_bytecode & 7 + self.settemp(index, self.pop()) + + @bytecode_implementation() + def pushReceiverBytecode(self, interp, current_bytecode): + self.push(self.w_receiver()) + + @bytecode_implementation() + def pushConstantTrueBytecode(self, interp, current_bytecode): + self.push(interp.space.w_true) + + @bytecode_implementation() + def pushConstantFalseBytecode(self, interp, current_bytecode): + self.push(interp.space.w_false) + + @bytecode_implementation() + def pushConstantNilBytecode(self, interp, current_bytecode): + self.push(interp.space.w_nil) + + @bytecode_implementation() + def pushConstantMinusOneBytecode(self, interp, current_bytecode): + self.push(interp.space.w_minus_one) + + @bytecode_implementation() + def pushConstantZeroBytecode(self, interp, current_bytecode): + self.push(interp.space.w_zero) + + @bytecode_implementation() + def pushConstantOneBytecode(self, interp, current_bytecode): + self.push(interp.space.w_one) + + @bytecode_implementation() + def pushConstantTwoBytecode(self, interp, current_bytecode): + self.push(interp.space.w_two) + + @bytecode_implementation() + def pushActiveContextBytecode(self, interp, current_bytecode): + self.push(self.w_self()) + + @bytecode_implementation() + def duplicateTopBytecode(self, interp, current_bytecode): + self.push(self.top()) + + @bytecode_implementation() + def popStackBytecode(self, interp, current_bytecode): + self.pop() + + @bytecode_implementation(parameter_bytes=1) + def pushNewArrayBytecode(self, interp, current_bytecode, descriptor): + arraySize, popIntoArray = splitter[7, 1](descriptor) + newArray = None + if popIntoArray == 1: + newArray = interp.space.wrap_list(self.pop_and_return_n(arraySize)) + else: + newArray = interp.space.w_Array.as_class_get_shadow(interp.space).new(arraySize) + self.push(newArray) + + # ====== Extended Push/Pop bytecodes ====== + + def _extendedVariableTypeAndIndex(self, descriptor): + return ((descriptor >> 6) & 3), (descriptor & 63) + + @bytecode_implementation(parameter_bytes=1) + def extendedPushBytecode(self, interp, current_bytecode, descriptor): + variableType, variableIndex = self._extendedVariableTypeAndIndex(descriptor) + if variableType == 0: + self.push(self.w_receiver().fetch(self.space, variableIndex)) + elif variableType == 1: + self.push(self.gettemp(variableIndex)) + elif variableType == 2: + self.push(self.w_method().getliteral(variableIndex)) + elif variableType == 3: + w_association = self.w_method().getliteral(variableIndex) + association = wrapper.AssociationWrapper(self.space, w_association) + self.push(association.value()) + else: + assert 0 + + def _extendedStoreBytecode(self, interp, current_bytecode, descriptor): + variableType, variableIndex = self._extendedVariableTypeAndIndex(descriptor) + if variableType == 0: + self.w_receiver().store(self.space, variableIndex, self.top()) + elif variableType == 1: + self.settemp(variableIndex, self.top()) + elif variableType == 2: + raise error.FatalError("Illegal ExtendedStoreBytecode. veriableType 2.") + elif variableType == 3: + w_association = self.w_method().getliteral(variableIndex) + association = wrapper.AssociationWrapper(self.space, w_association) + association.store_value(self.top()) + + @bytecode_implementation(parameter_bytes=1) + def extendedStoreBytecode(self, interp, current_bytecode, descriptor): + return self._extendedStoreBytecode(interp, current_bytecode, descriptor) + + @bytecode_implementation(parameter_bytes=1) + def extendedStoreAndPopBytecode(self, interp, current_bytecode, descriptor): + self._extendedStoreBytecode(interp, current_bytecode, descriptor) + self.pop() + + def _extract_index_and_temps(self, index_in_array, index_of_array): + w_indirectTemps = self.gettemp(index_of_array) + return index_in_array, w_indirectTemps + + @bytecode_implementation(parameter_bytes=2) + def pushRemoteTempLongBytecode(self, interp, current_bytecode, index_in_array, index_of_array): + index_in_array, w_indirectTemps = self._extract_index_and_temps(index_in_array, index_of_array) + self.push(w_indirectTemps.at0(self.space, index_in_array)) + + @bytecode_implementation(parameter_bytes=2) + def storeRemoteTempLongBytecode(self, interp, current_bytecode, index_in_array, index_of_array): + index_in_array, w_indirectTemps = self._extract_index_and_temps(index_in_array, index_of_array) + w_indirectTemps.atput0(self.space, index_in_array, self.top()) + + @bytecode_implementation(parameter_bytes=2) + def storeAndPopRemoteTempLongBytecode(self, interp, current_bytecode, index_in_array, index_of_array): + index_in_array, w_indirectTemps = self._extract_index_and_temps(index_in_array, index_of_array) + w_indirectTemps.atput0(self.space, index_in_array, self.pop()) + + @bytecode_implementation(parameter_bytes=3) + def pushClosureCopyCopiedValuesBytecode(self, interp, current_bytecode, descriptor, j, i): + """ Copied from Blogpost: http://www.mirandabanda.org/cogblog/2008/07/22/closures-part-ii-the-bytecodes/ + ContextPart>>pushClosureCopyNumCopiedValues: numCopied numArgs: numArgs blockSize: blockSize + "Simulate the action of a 'closure copy' bytecode whose result is the + new BlockClosure for the following code" + | copiedValues | + numCopied > 0 + ifTrue: + [copiedValues := Array new: numCopied. + numCopied to: 1 by: -1 do: + [:i| + copiedValues at: i put: self pop]] + ifFalse: + [copiedValues := nil]. + self push: (BlockClosure new + outerContext: self + startpc: pc + numArgs: numArgs + copiedValues: copiedValues). + self jump: blockSize + """ + + space = self.space + numArgs, numCopied = splitter[4, 4](descriptor) + blockSize = (j << 8) | i + # Create new instance of BlockClosure + w_closure = space.newClosure(self.w_self(), self.pc(), numArgs, + self.pop_and_return_n(numCopied)) + self.push(w_closure) + self._jump(blockSize) + + # ====== Helpers for send/return bytecodes ====== + + def _sendSelfSelector(self, w_selector, argcount, interp): + receiver = self.peek(argcount) + return self._sendSelector(w_selector, argcount, interp, + receiver, receiver.class_shadow(self.space)) + + def _sendSuperSelector(self, w_selector, argcount, interp): + compiledin_class = self.w_method().compiled_in() + assert isinstance(compiledin_class, model.W_PointersObject) + s_compiledin = compiledin_class.as_class_get_shadow(self.space) + return self._sendSelector(w_selector, argcount, interp, self.w_receiver(), + s_compiledin.s_superclass()) + + def _sendSelector(self, w_selector, argcount, interp, + receiver, receiverclassshadow, w_arguments=None): + assert argcount >= 0 + try: + w_method = receiverclassshadow.lookup(w_selector) + except error.MethodNotFound: + return self._doesNotUnderstand(w_selector, argcount, interp, receiver) + + code = w_method.primitive() + if code: + if w_arguments: + self.push_all(w_arguments) + try: + return self._call_primitive(code, interp, argcount, w_method, w_selector) + except error.PrimitiveFailedError: + pass # ignore this error and fall back to the Smalltalk version + if not w_arguments: + w_arguments = self.pop_and_return_n(argcount) + s_frame = w_method.create_frame(interp.space, receiver, w_arguments) + self.pop() # receiver + + # ###################################################################### + if interp.is_tracing(): + interp.print_padded('-> ' + s_frame.short_str()) + + return interp.stack_frame(s_frame, self) + + @objectmodel.specialize.arg(1) + def _sendSelfSelectorSpecial(self, selector, numargs, interp): + w_selector = self.space.get_special_selector(selector) + return self._sendSelfSelector(w_selector, numargs, interp) + + def _sendSpecialSelector(self, interp, receiver, special_selector, w_args=[]): + w_special_selector = self.space.objtable["w_" + special_selector] + s_class = receiver.class_shadow(self.space) + w_method = s_class.lookup(w_special_selector) + s_frame = w_method.create_frame(interp.space, receiver, w_args) + + # ###################################################################### + if interp.is_tracing(): + interp.print_padded('-> %s %s' % (special_selector, s_frame.short_str())) + if not objectmodel.we_are_translated(): + import pdb; pdb.set_trace() + + return interp.stack_frame(s_frame, self) + + def _doesNotUnderstand(self, w_selector, argcount, interp, receiver): + arguments = self.pop_and_return_n(argcount) + w_message_class = self.space.classtable["w_Message"] + assert isinstance(w_message_class, model.W_PointersObject) + s_message_class = w_message_class.as_class_get_shadow(self.space) + w_message = s_message_class.new() + w_message.store(self.space, 0, w_selector) + w_message.store(self.space, 1, self.space.wrap_list(arguments)) + self.pop() # The receiver, already known. + + try: + if interp.space.headless.is_set(): + primitives.exitFromHeadlessExecution(self, "doesNotUnderstand:", w_message) + return self._sendSpecialSelector(interp, receiver, "doesNotUnderstand", [w_message]) + except error.MethodNotFound: + s_class = receiver.class_shadow(self.space) + assert isinstance(s_class, ClassShadow) + raise error.Exit("Missing doesNotUnderstand in hierarchy of %s" % s_class.getname()) + + def _mustBeBoolean(self, interp, receiver): + return self._sendSpecialSelector(interp, receiver, "mustBeBoolean") + + def _call_primitive(self, code, interp, argcount, w_method, w_selector): + # ################################################################## + if interp.is_tracing(): + interp.print_padded("-> primitive %d \t(in %s, named %s)" % ( + code, self.w_method().get_identifier_string(), + w_selector.selector_string())) + func = primitives.prim_holder.prim_table[code] + try: + # note: argcount does not include rcvr + # the primitive pushes the result (if any) onto the stack itself + return func(interp, self, argcount, w_method) + except error.PrimitiveFailedError, e: + if interp.is_tracing(): + interp.print_padded("-- primitive %d FAILED\t (in %s, named %s)" % ( + code, w_method.safe_identifier_string(), w_selector.selector_string())) + raise e + + def _return(self, return_value, interp, local_return=False): + # unfortunately, this assert is not true for some tests. TODO fix this. + # assert self._stack_ptr == self.tempsize() + + # ################################################################## + if interp.is_tracing(): + interp.print_padded('<- ' + return_value.as_repr_string()) + + if self.home_is_self() or local_return: + # a local return just needs to go up the stack once. there + # it will find the sender as a local, and we don't have to + # force the reference + s_return_to = None + return_from_top = self.s_sender() is None + else: + s_return_to = self.s_home().s_sender() + return_from_top = s_return_to is None + + if return_from_top: + # This should never happen while executing a normal image. + from spyvm.interpreter import ReturnFromTopLevel + raise ReturnFromTopLevel(return_value) + else: + from spyvm.interpreter import Return + raise Return(s_return_to, return_value) + + # ====== Send/Return bytecodes ====== + + @bytecode_implementation() + def returnReceiverBytecode(self, interp, current_bytecode): + return self._return(self.w_receiver(), interp) + + @bytecode_implementation() + def returnTrueBytecode(self, interp, current_bytecode): + return self._return(interp.space.w_true, interp) + + @bytecode_implementation() + def returnFalseBytecode(self, interp, current_bytecode): + return self._return(interp.space.w_false, interp) + + @bytecode_implementation() + def returnNilBytecode(self, interp, current_bytecode): + return self._return(interp.space.w_nil, interp) + + @bytecode_implementation() + def returnTopFromMethodBytecode(self, interp, current_bytecode): + return self._return(self.pop(), interp) + + @bytecode_implementation() + def returnTopFromBlockBytecode(self, interp, current_bytecode): + return self._return(self.pop(), interp, local_return=True) + + @bytecode_implementation() + def sendLiteralSelectorBytecode(self, interp, current_bytecode): + w_selector = self.w_method().getliteral(current_bytecode & 15) + argcount = ((current_bytecode >> 4) & 3) - 1 + return self._sendSelfSelector(w_selector, argcount, interp) + + def _getExtendedSelectorArgcount(self, descriptor): + return ((self.w_method().getliteral(descriptor & 31)), + (descriptor >> 5)) + + @bytecode_implementation(parameter_bytes=1) + def singleExtendedSendBytecode(self, interp, current_bytecode, descriptor): + w_selector, argcount = self._getExtendedSelectorArgcount(descriptor) + return self._sendSelfSelector(w_selector, argcount, interp) + + @bytecode_implementation(parameter_bytes=2) + def doubleExtendedDoAnythingBytecode(self, interp, current_bytecode, second, third): + from spyvm.interpreter import SenderChainManipulation + opType = second >> 5 + if opType == 0: + # selfsend + return self._sendSelfSelector(self.w_method().getliteral(third), + second & 31, interp) + elif opType == 1: + # supersend + return self._sendSuperSelector(self.w_method().getliteral(third), + second & 31, interp) + elif opType == 2: + # pushReceiver + self.push(self.w_receiver().fetch(self.space, third)) + elif opType == 3: + # pushLiteralConstant + self.push(self.w_method().getliteral(third)) + elif opType == 4: + # pushLiteralVariable + w_association = self.w_method().getliteral(third) + association = wrapper.AssociationWrapper(self.space, w_association) + self.push(association.value()) + elif opType == 5: + # TODO - the following two special cases should not be necessary + try: + self.w_receiver().store(self.space, third, self.top()) + except SenderChainManipulation, e: + raise SenderChainManipulation(self) + elif opType == 6: + try: + self.w_receiver().store(self.space, third, self.pop()) + except SenderChainManipulation, e: + raise SenderChainManipulation(self) + elif opType == 7: + w_association = self.w_method().getliteral(third) + association = wrapper.AssociationWrapper(self.space, w_association) + association.store_value(self.top()) + + @bytecode_implementation(parameter_bytes=1) + def singleExtendedSuperBytecode(self, interp, current_bytecode, descriptor): + w_selector, argcount = self._getExtendedSelectorArgcount(descriptor) + return self._sendSuperSelector(w_selector, argcount, interp) + + @bytecode_implementation(parameter_bytes=1) + def secondExtendedSendBytecode(self, interp, current_bytecode, descriptor): + w_selector = self.w_method().getliteral(descriptor & 63) + argcount = descriptor >> 6 + return self._sendSelfSelector(w_selector, argcount, interp) + + # ====== Misc ====== + + def _activate_unwind_context(self, interp): + if self.is_closure_context() or not self.is_BlockClosure_ensure(): + self.mark_returned() + return + # The first temp is executed flag for both #ensure: and #ifCurtailed: + if self.gettemp(1).is_nil(self.space): + self.settemp(1, self.space.w_true) # mark unwound + self.push(self.gettemp(0)) # push the first argument + from spyvm.interpreter import Return + try: + self.bytecodePrimValue(interp, 0) + except Return, nlr: + assert nlr.s_target_context or nlr.is_local + if self is not nlr.s_target_context and not nlr.is_local: + raise nlr + finally: + self.mark_returned() + + @bytecode_implementation() + def unknownBytecode(self, interp, current_bytecode): + raise error.MissingBytecode("unknownBytecode") + + @bytecode_implementation() + def experimentalBytecode(self, interp, current_bytecode): + raise error.MissingBytecode("experimentalBytecode") + + # ====== Jump bytecodes ====== + + def _jump(self, offset): + self.store_pc(self.pc() + offset) + + def _jumpConditional(self, interp, expecting_true, position): + if expecting_true: + w_expected = interp.space.w_true + w_alternative = interp.space.w_false + else: + w_alternative = interp.space.w_true + w_expected = interp.space.w_false + + # Don't check the class, just compare with only two Boolean instances. + w_bool = self.pop() + if w_expected.is_same_object(w_bool): + self._jump(position) + elif not w_alternative.is_same_object(w_bool): + self._mustBeBoolean(interp, w_bool) + + def _shortJumpOffset(self, current_bytecode): + return (current_bytecode & 7) + 1 + + def _longJumpOffset(self, current_bytecode, parameter): + return ((current_bytecode & 3) << 8) + parameter + + @bytecode_implementation() + def shortUnconditionalJumpBytecode(self, interp, current_bytecode): + self._jump(self._shortJumpOffset(current_bytecode)) + + @bytecode_implementation() + def shortConditionalJumpBytecode(self, interp, current_bytecode): + # The conditional jump is "jump on false" + self._jumpConditional(interp, False, self._shortJumpOffset(current_bytecode)) + + @bytecode_implementation(parameter_bytes=1) + def longUnconditionalJumpBytecode(self, interp, current_bytecode, parameter): + offset = (((current_bytecode & 7) - 4) << 8) + parameter + self._jump(offset) + + @bytecode_implementation(parameter_bytes=1) + def longJumpIfTrueBytecode(self, interp, current_bytecode, parameter): + self._jumpConditional(interp, True, self._longJumpOffset(current_bytecode, parameter)) + + @bytecode_implementation(parameter_bytes=1) + def longJumpIfFalseBytecode(self, interp, current_bytecode, parameter): + self._jumpConditional(interp, False, self._longJumpOffset(current_bytecode, parameter)) + + # ====== Bytecodes implemented with primitives and message sends ====== + + bytecodePrimAdd = make_call_primitive_bytecode(primitives.ADD, "+", 1) + bytecodePrimSubtract = make_call_primitive_bytecode(primitives.SUBTRACT, "-", 1) + bytecodePrimLessThan = make_call_primitive_bytecode (primitives.LESSTHAN, "<", 1) + bytecodePrimGreaterThan = make_call_primitive_bytecode(primitives.GREATERTHAN, ">", 1) + bytecodePrimLessOrEqual = make_call_primitive_bytecode(primitives.LESSOREQUAL, "<=", 1) + bytecodePrimGreaterOrEqual = make_call_primitive_bytecode(primitives.GREATEROREQUAL, ">=", 1) + bytecodePrimEqual = make_call_primitive_bytecode(primitives.EQUAL, "=", 1) + bytecodePrimNotEqual = make_call_primitive_bytecode(primitives.NOTEQUAL, "~=", 1) + bytecodePrimMultiply = make_call_primitive_bytecode(primitives.MULTIPLY, "*", 1) + bytecodePrimDivide = make_call_primitive_bytecode(primitives.DIVIDE, "/", 1) + bytecodePrimMod = make_call_primitive_bytecode(primitives.MOD, "\\\\", 1) + bytecodePrimMakePoint = make_call_primitive_bytecode(primitives.MAKE_POINT, "@", 1) + bytecodePrimBitShift = make_call_primitive_bytecode(primitives.BIT_SHIFT, "bitShift:", 1) + bytecodePrimDiv = make_call_primitive_bytecode(primitives.DIV, "//", 1) + bytecodePrimBitAnd = make_call_primitive_bytecode(primitives.BIT_AND, "bitAnd:", 1) + bytecodePrimBitOr = make_call_primitive_bytecode(primitives.BIT_OR, "bitOr:", 1) + + bytecodePrimAt = make_send_selector_bytecode("at:", 1) + bytecodePrimAtPut = make_send_selector_bytecode("at:put:", 2) + bytecodePrimSize = make_send_selector_bytecode("size", 0) + bytecodePrimNext = make_send_selector_bytecode("next", 0) + bytecodePrimNextPut = make_send_selector_bytecode("nextPut:", 1) + bytecodePrimAtEnd = make_send_selector_bytecode("atEnd", 0) + + bytecodePrimEquivalent = make_quick_call_primitive_bytecode(primitives.EQUIVALENT, 1) + bytecodePrimClass = make_quick_call_primitive_bytecode(primitives.CLASS, 0) + + bytecodePrimBlockCopy = make_call_primitive_bytecode(primitives.BLOCK_COPY, "blockCopy:", 1) + bytecodePrimValue = make_call_primitive_bytecode_classbased("w_BlockContext", primitives.VALUE, "w_BlockClosure", primitives.CLOSURE_VALUE, "value", 0) + bytecodePrimValueWithArg = make_call_primitive_bytecode_classbased("w_BlockContext", primitives.VALUE, "w_BlockClosure", primitives.CLOSURE_VALUE_, "value:", 1) + + bytecodePrimDo = make_send_selector_bytecode("do:", 1) + bytecodePrimNew = make_send_selector_bytecode("new", 0) + bytecodePrimNewWithArg = make_send_selector_bytecode("new:", 1) + bytecodePrimPointX = make_send_selector_bytecode("x", 0) + bytecodePrimPointY = make_send_selector_bytecode("y", 0) + + def debug_bytecode(self): + # Hook used in interpreter_debugging + pass + +BYTECODE_RANGES = [ + ( 0, 15, "pushReceiverVariableBytecode"), + ( 16, 31, "pushTemporaryVariableBytecode"), + ( 32, 63, "pushLiteralConstantBytecode"), + ( 64, 95, "pushLiteralVariableBytecode"), + ( 96, 103, "storeAndPopReceiverVariableBytecode"), + (104, 111, "storeAndPopTemporaryVariableBytecode"), + (112, "pushReceiverBytecode"), + (113, "pushConstantTrueBytecode"), + (114, "pushConstantFalseBytecode"), + (115, "pushConstantNilBytecode"), + (116, "pushConstantMinusOneBytecode"), + (117, "pushConstantZeroBytecode"), + (118, "pushConstantOneBytecode"), + (119, "pushConstantTwoBytecode"), + (120, "returnReceiverBytecode"), + (121, "returnTrueBytecode"), + (122, "returnFalseBytecode"), + (123, "returnNilBytecode"), + (124, "returnTopFromMethodBytecode"), + (125, "returnTopFromBlockBytecode"), + (126, "unknownBytecode"), + (127, "unknownBytecode"), + (128, "extendedPushBytecode"), + (129, "extendedStoreBytecode"), + (130, "extendedStoreAndPopBytecode"), + (131, "singleExtendedSendBytecode"), + (132, "doubleExtendedDoAnythingBytecode"), + (133, "singleExtendedSuperBytecode"), + (134, "secondExtendedSendBytecode"), + (135, "popStackBytecode"), + (136, "duplicateTopBytecode"), + (137, "pushActiveContextBytecode"), + (138, "pushNewArrayBytecode"), + (139, "experimentalBytecode"), + (140, "pushRemoteTempLongBytecode"), + (141, "storeRemoteTempLongBytecode"), + (142, "storeAndPopRemoteTempLongBytecode"), + (143, "pushClosureCopyCopiedValuesBytecode"), + (144, 151, "shortUnconditionalJumpBytecode"), + (152, 159, "shortConditionalJumpBytecode"), + (160, 167, "longUnconditionalJumpBytecode"), + (168, 171, "longJumpIfTrueBytecode"), + (172, 175, "longJumpIfFalseBytecode"), + (176, "bytecodePrimAdd"), + (177, "bytecodePrimSubtract"), + (178, "bytecodePrimLessThan"), + (179, "bytecodePrimGreaterThan"), + (180, "bytecodePrimLessOrEqual"), + (181, "bytecodePrimGreaterOrEqual"), + (182, "bytecodePrimEqual"), + (183, "bytecodePrimNotEqual"), + (184, "bytecodePrimMultiply"), + (185, "bytecodePrimDivide"), + (186, "bytecodePrimMod"), + (187, "bytecodePrimMakePoint"), + (188, "bytecodePrimBitShift"), + (189, "bytecodePrimDiv"), + (190, "bytecodePrimBitAnd"), + (191, "bytecodePrimBitOr"), + (192, "bytecodePrimAt"), + (193, "bytecodePrimAtPut"), + (194, "bytecodePrimSize"), + (195, "bytecodePrimNext"), + (196, "bytecodePrimNextPut"), + (197, "bytecodePrimAtEnd"), + (198, "bytecodePrimEquivalent"), + (199, "bytecodePrimClass"), + (200, "bytecodePrimBlockCopy"), + (201, "bytecodePrimValue"), + (202, "bytecodePrimValueWithArg"), + (203, "bytecodePrimDo"), + (204, "bytecodePrimNew"), + (205, "bytecodePrimNewWithArg"), + (206, "bytecodePrimPointX"), + (207, "bytecodePrimPointY"), + (208, 255, "sendLiteralSelectorBytecode"), + ] + +def initialize_bytecode_names(): + result = [None] * 256 + for entry in BYTECODE_RANGES: + if len(entry) == 2: + result[entry[0]] = entry[1] + else: + for arg, pos in enumerate(range(entry[0], entry[1]+1)): + result[pos] = "%s(%s)" % (entry[2], arg) + assert None not in result + return result + +BYTECODE_NAMES = initialize_bytecode_names() + +def initialize_bytecode_table(): + result = [None] * 256 + for entry in BYTECODE_RANGES: + if len(entry) == 2: + positions = [entry[0]] + else: + positions = range(entry[0], entry[1]+1) + for pos in positions: + result[pos] = getattr(ContextPartShadow, entry[-1]) + assert None not in result + return result + +# this table is only used for creating named bytecodes in tests and printing +BYTECODE_TABLE = initialize_bytecode_table() diff --git a/spyvm/interpreter_debugging.py b/spyvm/interpreter_debugging.py new file mode 100644 --- /dev/null +++ b/spyvm/interpreter_debugging.py @@ -0,0 +1,109 @@ + +import pdb +from spyvm.shadow import ContextPartShadow +from spyvm import model, constants, primitives + +# This module patches up the interpreter and adds breakpoints at certain execution points. +# Only usable in interpreted mode due to pdb. +# To use, execute one of following after interpreter.py is loaded: +# from spyvm import interpreter_debugging; interpreter_debugging.activate_debugging() +# or, before Interpreter instance is created: +# Interpreter.__init__ = interpreter_debugging.activating_init(Interpreter.__init__) + +# After this, following flags control whether the interpreter breaks at the respective locations: +# can be an interpreter instance or the Interpreter class +# interp.step_bytecodes +# interp.step_sends +# interp.step_returns +# interp.step_primitives +# interp.step_failed_primitives +# interp.step_failed_named_primitives + +def activating_init(original): + def meth(*args): + activate_debugging() + return original(*args) + return meth + +def activate_debugging(): + from spyvm.interpreter import Interpreter + Interpreter.step_bytecodes = False + Interpreter.step_sends = False + Interpreter.step_returns = False + Interpreter.step_primitives = False + Interpreter.step_failed_primitives = False + + _break = pdb.set_trace + + def patch(obj): + def do_patch(meth): + name = meth.__name__ + original = getattr(obj, name) + assert original, "Object %r does not have a method named %s" % (obj, name) + replacement = meth(original) + setattr(obj, name, replacement) + return meth + return do_patch + + patch_context = patch(ContextPartShadow) + + @patch_context + def debug_bytecode(original): + def meth(self): + if self.step_bytecodes: + _break() # Continue stepping from here to get to the current bytecode execution + return meth + + @patch_context + def _sendSelector(original): + def meth(self, w_selector, argcount, interp, receiver, receiverclassshadow, w_arguments=None): + if interp.step_sends: + _break() # Continue stepping from here to get to the current message send + return original(self, w_selector, argcount, interp, receiver, receiverclassshadow, w_arguments=w_arguments) + return meth + + @patch_context + def _return(original): + def meth(self, return_value, interp, local_return=False): + if interp.step_returns: + _break() # Continue stepping from here to get to the current return + return original(self, return_value, interp, local_return=local_return) + return meth + + @patch_context + def _call_primitive(original): + def meth(self, code, interp, argcount, w_method, w_selector): + if interp.step_primitives: + _break() # Continue stepping from here to get to the current primitive + try: + return original(self, code, interp, argcount, w_method, w_selector) + except error.PrimitiveFailedError, e: + if interp.step_failed_primitives: + _break() # Continue stepping from here to get to the current failed primitive. + + # Should fail again. + original(self, code, interp, argcount, w_method, w_selector) + return meth + + def failed_named_primitive(original): + def meth(interp, s_frame, argcount, w_method=None): + try: + return original(interp, s_frame, argcount, w_method=w_method) + except error.PrimitiveFailedError, e: + if interp.step_failed_named_primitives: + _break() # Continue from here to get to the current failed named primitive. + + space = interp.space + w_description = w_method.literalat0(space, 1) + if isinstance(w_description, model.W_PointersObject) and w_description.size() >= 2: + w_modulename = w_description.at0(space, 0) + w_functionname = w_description.at0(space, 1) + print "Failed named primitive. Module: %s, Function: %s" % (w_modulename, w_functionname) + + # Should fail again. + original(interp, s_frame, argcount, w_method=w_method) + raise e + return meth + + primitives.prim_table[primitives.EXTERNAL_CALL] = failed_named_primitive(primitives.prim_table[primitives.EXTERNAL_CALL]) + \ No newline at end of file diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -1280,7 +1280,7 @@ return self.get_identifier_string() def bytecode_string(self, markBytecode=0): - from spyvm.interpreter import BYTECODE_TABLE + from spyvm.interpreter_bytecodes import BYTECODE_TABLE retval = "Bytecode:------------" j = 1 for i in self.bytes: diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -679,7 +679,7 @@ assert isinstance(w_bitmap, model_display.W_DisplayBitmap) w_bitmap.flush_to_screen() return w_rcvr - except shadow.MethodNotFound: + except error.MethodNotFound: from spyvm.plugins.bitblt import BitBltPlugin BitBltPlugin.call("primitiveCopyBits", interp, s_frame, argcount, w_method) return w_rcvr @@ -1358,9 +1358,7 @@ unwrap_spec=[object, object, list], no_result=True, clean_stack=False) def func(interp, s_frame, w_rcvr, w_selector, w_arguments): - from spyvm.shadow import MethodNotFound s_frame.pop_n(2) # removing our arguments - return s_frame._sendSelector(w_selector, len(w_arguments), interp, w_rcvr, w_rcvr.class_shadow(interp.space), w_arguments=w_arguments) diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -286,11 +286,8 @@ FLOAT = 5 LARGE_POSITIVE_INTEGER = 6 -class MethodNotFound(error.SmalltalkException): - pass - class ClassShadowError(error.SmalltalkException): - pass + exception_type = "ClassShadowError" class ClassShadow(AbstractCachingShadow): """A shadow for Smalltalk objects that are classes @@ -505,7 +502,7 @@ if w_method is not None: return w_method look_in_shadow = look_in_shadow._s_superclass - raise MethodNotFound(self, w_selector) + raise error.MethodNotFound() def changed(self): self.superclass_changed(version.Version()) diff --git a/spyvm/test/test_interpreter.py b/spyvm/test/test_interpreter.py --- a/spyvm/test/test_interpreter.py +++ b/spyvm/test/test_interpreter.py @@ -1,5 +1,5 @@ import py, operator, sys -from spyvm import model, interpreter, primitives, shadow, objspace, wrapper, constants +from spyvm import model, interpreter, primitives, shadow, objspace, wrapper, constants, error from .util import create_space_interp, copy_to_module, cleanup_module, import_bytecodes, TestInterpreter from spyvm.wrapper import PointWrapper from spyvm.conftest import option @@ -139,7 +139,7 @@ def test_unknownBytecode(): w_frame, s_frame = new_frame(unknownBytecode) - py.test.raises(interpreter.MissingBytecode, step_in_interp, s_frame) + py.test.raises(error.MissingBytecode, step_in_interp, s_frame) # push bytecodes def test_pushReceiverBytecode(): @@ -579,7 +579,7 @@ test_storeAndPopTemporaryVariableBytecode(lambda index: extendedStoreAndPopBytecode + chr((1<<6) + index)) - py.test.raises(interpreter.IllegalStoreError, + py.test.raises(error.FatalError, test_storeAndPopTemporaryVariableBytecode, lambda index: extendedStoreAndPopBytecode + chr((2<<6) + index)) diff --git a/spyvm/test/test_model.py b/spyvm/test/test_model.py --- a/spyvm/test/test_model.py +++ b/spyvm/test/test_model.py @@ -1,6 +1,7 @@ import py, math, socket from spyvm import model, model_display, shadow, objspace, error, display -from spyvm.shadow import MethodNotFound, WEAK_POINTERS +from spyvm.error import MethodNotFound +from spyvm.shadow import WEAK_POINTERS from rpython.rlib.rarithmetic import intmask, r_uint from rpython.rtyper.lltypesystem import lltype, rffi from .util import create_space, copy_to_module, cleanup_module @@ -403,12 +404,23 @@ for i in xrange(6, 8): assert target.pixelbuffer[i] == 0x0 -def test_display_offset_computation(): - dbitmap = model_display.W_MappingDisplayBitmap(space, space.w_Array, 5, 1) +def test_display_offset_computation_even(): + dbitmap = model_display.W_MappingDisplayBitmap(space, space.w_Array, 200, 1) + dbitmap.pitch = 64 + dbitmap.words_per_line = 2 assert dbitmap.compute_pos(0) == 0 - assert dbitmap.compute_pos(1) == 8 - assert dbitmap.size() == 5 * 8 + assert dbitmap.compute_pos(1) == 32 + assert dbitmap.compute_pos(2) == 64 +def test_display_offset_computation_uneven(): + dbitmap = model_display.W_MappingDisplayBitmap(space, space.w_Array, 200, 1) + dbitmap.pitch = 67 + dbitmap.words_per_line = 2 + assert dbitmap.compute_pos(0) == 0 + assert dbitmap.compute_pos(1) == 32 + assert dbitmap.compute_pos(2) == 67 + assert dbitmap.compute_pos(3) == 67 + 32 + @py.test.mark.skipif("socket.gethostname() == 'precise32'") def test_weak_pointers(): w_cls = bootstrap_class(2) diff --git a/spyvm/test/util.py b/spyvm/test/util.py --- a/spyvm/test/util.py +++ b/spyvm/test/util.py @@ -1,5 +1,5 @@ import sys -from spyvm import model, shadow, objspace, version, constants, squeakimage, interpreter From noreply at buildbot.pypy.org Sun Jul 27 12:22:15 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Sun, 27 Jul 2014 12:22:15 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-context-state-v2: Delaying SenderChainManipulation as much as possible. Message-ID: <20140727102215.E60E11C03AC@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-context-state-v2 Changeset: r952:288e40aaf989 Date: 2014-07-18 15:29 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/288e40aaf989/ Log: Delaying SenderChainManipulation as much as possible. Added a 'state' field to Context objects, can be Inactive, Active or Dirty. Setting the sender of an Active context makes it Dirty. When a Dirty context is left, SenderChainManipulation will be raised, forcing all remaining contexts from the stack to the heap. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -1,6 +1,6 @@ import os -from spyvm.shadow import MethodContextShadow +from spyvm.shadow import MethodContextShadow, ActiveContext, InactiveContext, DirtyContext from spyvm import model, constants, wrapper, objspace, interpreter_bytecodes from rpython.rlib import jit, rstackovf, unroll @@ -151,8 +151,10 @@ if self.is_tracing(): self.stack_depth += 1 if s_frame._s_sender is None and s_sender is not None: - s_frame.store_s_sender(s_sender, raise_error=False) + s_frame.store_s_sender(s_sender) # Now (continue to) execute the context bytecodes + assert s_frame.state is InactiveContext + s_frame.state = ActiveContext self.loop_bytecodes(s_frame, may_context_switch) except rstackovf.StackOverflow: rstackovf.check_stack_overflow() @@ -160,7 +162,11 @@ finally: if self.is_tracing(): self.stack_depth -= 1 - + dirty_frame = s_frame.state is DirtyContext + s_frame.state = InactiveContext + if dirty_frame: + raise SenderChainManipulation(s_frame) + def step(self, context): bytecode = context.fetch_next_bytecode() for entry in UNROLLING_BYTECODE_RANGES: diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -633,18 +633,29 @@ def size(self): return self._w_self_size +class ContextState(object): + def __init__(self, name): + self.name = name + def __str__(self): + return self.name + def __repr__(self): + return self.name +InactiveContext = ContextState("InactiveContext") +ActiveContext = ContextState("ActiveContext") +DirtyContext = ContextState("DirtyContext") + class ContextPartShadow(AbstractRedirectingShadow): __metaclass__ = extendabletype _attrs_ = ['_s_sender', '_pc', '_temps_and_stack', - '_stack_ptr', 'instances_w'] + '_stack_ptr', 'instances_w', 'state'] repr_classname = "ContextPartShadow" _virtualizable_ = [ '_s_sender', "_pc", "_temps_and_stack[*]", "_stack_ptr", - "_w_self", "_w_self_size" + "_w_self", "_w_self_size", 'state' ] # ______________________________________________________________________ @@ -654,13 +665,7 @@ self._s_sender = None AbstractRedirectingShadow.__init__(self, space, w_self, size) self.instances_w = {} - - def copy_field_from(self, n0, other_shadow): - from spyvm.interpreter import SenderChainManipulation - try: - AbstractRedirectingShadow.copy_field_from(self, n0, other_shadow) - except SenderChainManipulation, e: - assert e.s_new_context == self + self.state = InactiveContext def copy_from(self, other_shadow): # Some fields have to be initialized before the rest, to ensure correct initialization. @@ -702,7 +707,7 @@ if n0 == constants.CTXPART_SENDER_INDEX: assert isinstance(w_value, model.W_PointersObject) if w_value.is_nil(self.space): - self.store_s_sender(None, raise_error=False) + self.store_s_sender(None) else: self.store_s_sender(w_value.as_context_get_shadow(self.space)) return @@ -722,12 +727,12 @@ # === Sender === - def store_s_sender(self, s_sender, raise_error=True): + def store_s_sender(self, s_sender): if s_sender is not self._s_sender: self._s_sender = s_sender - if raise_error: - from spyvm.interpreter import SenderChainManipulation - raise SenderChainManipulation(self) + # If new sender is None, we are just being marked as returned. + if s_sender is not None and self.state is ActiveContext: + self.state = DirtyContext def w_sender(self): sender = self.s_sender() @@ -819,7 +824,7 @@ def mark_returned(self): self.store_pc(-1) - self.store_s_sender(None, raise_error=False) + self.store_s_sender(None) def is_returned(self): return self.pc() == -1 and self.w_sender().is_nil(self.space) From noreply at buildbot.pypy.org Sun Jul 27 12:22:16 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Sun, 27 Jul 2014 12:22:16 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-context-state-v2: Fixed tests, added tests. Message-ID: <20140727102216.E6BA01C03AC@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-context-state-v2 Changeset: r953:6a476300d0c3 Date: 2014-07-18 15:29 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/6a476300d0c3/ Log: Fixed tests, added tests. diff --git a/spyvm/test/test_interpreter.py b/spyvm/test/test_interpreter.py --- a/spyvm/test/test_interpreter.py +++ b/spyvm/test/test_interpreter.py @@ -978,9 +978,32 @@ 2, "value:value:"]], test) -def test_c_stack_reset_on_sender_chain_manipulation(): - bytes = reduce(operator.add, map(chr, [0x84, 0xc0, 0x00])) +def test_frame_dirty_if_active(): + bytes = reduce(operator.add, map(chr, [0x84, 0xc0, 0x00])) + returnReceiverBytecode w_frame, s_frame = new_frame(bytes) s_frame.store_w_receiver(w_frame) s_frame.push(w_frame) - py.test.raises(interpreter.SenderChainManipulation, step_in_interp, s_frame) + s_frame.state = shadow.ActiveContext + step_in_interp(s_frame) + assert s_frame.state is shadow.DirtyContext + +def test_frame_not_dirty_if_inactive(): + bytes = reduce(operator.add, map(chr, [0x84, 0xc0, 0x00])) + returnReceiverBytecode + w_frame, s_frame = new_frame(bytes) + w_other_frame, s_other_frame = new_frame("") + s_frame.store_w_receiver(w_other_frame) + s_frame.push(w_frame) + s_frame.state = shadow.ActiveContext + step_in_interp(s_frame) + assert s_frame.state is shadow.ActiveContext + assert s_other_frame.state is shadow.InactiveContext + +def test_raise_SenderManipulation_on_dirty_frame(): + w_frame, s_frame = new_frame(returnReceiverBytecode) + s_frame.state = shadow.DirtyContext + def run_frame(): + #import pdb; pdb.set_trace() + interp._loop = True + interp.stack_frame(s_frame, None) + py.test.raises(interpreter.SenderChainManipulation, run_frame) + \ No newline at end of file diff --git a/spyvm/test/test_zin_squeak_4_5_image.py b/spyvm/test/test_zin_squeak_4_5_image.py --- a/spyvm/test/test_zin_squeak_4_5_image.py +++ b/spyvm/test/test_zin_squeak_4_5_image.py @@ -43,7 +43,7 @@ # create a frame for our newly crafted method with a valid sender (to avoid raising returnFromTop to early) s_initial_frame = create_method(chr(0x7c)).create_frame(space, w(0), []) s_frame = w_method.create_frame(space, w(0)) - s_frame.store_s_sender(s_initial_frame, raise_error=False) + s_frame.store_s_sender(s_initial_frame) try: interp.loop(s_frame.w_self()) @@ -70,7 +70,7 @@ # create a frame for our newly crafted method with a valid sender (to avoid raising returnFromTop to early) s_initial_frame = create_method(chr(0x7c)).create_frame(space, w(0)) s_frame = w_method.create_frame(space, w(0)) - s_frame.store_s_sender(s_initial_frame, raise_error=False) + s_frame.store_s_sender(s_initial_frame) try: interp.loop(s_frame.w_self()) diff --git a/spyvm/test/util.py b/spyvm/test/util.py --- a/spyvm/test/util.py +++ b/spyvm/test/util.py @@ -85,7 +85,7 @@ if not self._loop: # this test is done to not loop in test, but rather step just once where wanted # Unfortunately, we have to mimick some of the original behaviour. - s_new_frame.store_s_sender(s_sender, raise_error=False) + s_new_frame.store_s_sender(s_sender) return s_new_frame return interpreter.Interpreter.stack_frame(self, s_new_frame, s_sender, may_context_switch) From noreply at buildbot.pypy.org Sun Jul 27 12:22:17 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Sun, 27 Jul 2014 12:22:17 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-context-state-v2: Added part of the context state refactoring. Message-ID: <20140727102217.DE2651C03AC@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-context-state-v2 Changeset: r954:7292a07cb139 Date: 2014-07-25 11:07 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/7292a07cb139/ Log: Added part of the context state refactoring. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -12,10 +12,15 @@ class Return(Exception): _attrs_ = ["value", "s_target_context", "is_local"] - def __init__(self, s_target_context, w_result): + def __init__(self, s_target_context, w_result, is_local): self.value = w_result self.s_target_context = s_target_context - self.is_local = False + self.is_local = is_local + +class LocalReturn(Exception): + _attrs_ = ["value"] + def __init__(self, value): + self.value = value class ContextSwitchException(Exception): """General Exception that causes the interpreter to leave @@ -97,21 +102,44 @@ while True: s_sender = s_new_context.s_sender() try: - self.loop_bytecodes(s_new_context) + self.stack_frame(s_new_context, None) raise Exception("loop_bytecodes left without raising...") except ContextSwitchException, e: if self.is_tracing(): e.print_trace(s_new_context) s_new_context = e.s_new_context - except Return, nlr: - assert nlr.s_target_context or nlr.is_local - s_new_context = s_sender - if not nlr.is_local: - while s_new_context is not nlr.s_target_context: - s_sender = s_new_context.s_sender() - s_new_context._activate_unwind_context(self) - s_new_context = s_sender - s_new_context.push(nlr.value) + except LocalReturn, ret: + s_new_context = self.unwind_context_chain(s_sender, s_sender, ret.value) + except Return, ret: + s_new_context = self.unwind_context_chain(s_sender, ret.s_target_context, ret.value) + + # This is a wrapper around loop_bytecodes that cleanly enters/leaves the frame + # and handles the stack overflow protection mechanism. + def stack_frame(self, s_frame, s_sender, may_context_switch=True): + try: + if self.is_tracing(): + self.stack_depth += 1 + if s_frame._s_sender is None and s_sender is not None: + s_frame.store_s_sender(s_sender) + # Now (continue to) execute the context bytecodes + assert s_frame.state is InactiveContext + s_frame.state = ActiveContext + self.loop_bytecodes(s_frame, may_context_switch) + except rstackovf.StackOverflow: + rstackovf.check_stack_overflow() + raise StackOverflow(s_frame) + except Return, ret: + if ret.is_local: + raise LocalReturn(ret.value) + else: + raise ret + finally: + if self.is_tracing(): + self.stack_depth -= 1 + dirty_frame = s_frame.state is DirtyContext + s_frame.state = InactiveContext + if dirty_frame: + raise SenderChainManipulation(s_frame) def loop_bytecodes(self, s_context, may_context_switch=True): old_pc = 0 @@ -133,39 +161,22 @@ s_context=s_context) try: self.step(s_context) - except Return, nlr: - if nlr.s_target_context is s_context or nlr.is_local: - s_context.push(nlr.value) - else: - if nlr.s_target_context is None: - # This is the case where we are returning to our sender. - # Mark the return as local, so our sender will take it - nlr.is_local = True - s_context._activate_unwind_context(self) - raise nlr - - # This is a wrapper around loop_bytecodes that cleanly enters/leaves the frame - # and handles the stack overflow protection mechanism. - def stack_frame(self, s_frame, s_sender, may_context_switch=True): - try: - if self.is_tracing(): - self.stack_depth += 1 - if s_frame._s_sender is None and s_sender is not None: - s_frame.store_s_sender(s_sender) - # Now (continue to) execute the context bytecodes - assert s_frame.state is InactiveContext - s_frame.state = ActiveContext - self.loop_bytecodes(s_frame, may_context_switch) - except rstackovf.StackOverflow: - rstackovf.check_stack_overflow() - raise StackOverflow(s_frame) - finally: - if self.is_tracing(): - self.stack_depth -= 1 - dirty_frame = s_frame.state is DirtyContext - s_frame.state = InactiveContext - if dirty_frame: - raise SenderChainManipulation(s_frame) + except LocalReturn, ret: + s_context.push(ret.value) + + def unwind_context_chain(self, start_context, target_context, return_value): + if start_context is None: + # This is the toplevel frame. Execution ended. + raise ReturnFromTopLevel(return_value) + assert target_context + context = start_context + while context is not target_context: + assert context, "Sender chain ended without finding return-context." + s_sender = context.s_sender() + context._activate_unwind_context(self) + context = s_sender + context.push(return_value) + return context def step(self, context): bytecode = context.fetch_next_bytecode() diff --git a/spyvm/interpreter_bytecodes.py b/spyvm/interpreter_bytecodes.py --- a/spyvm/interpreter_bytecodes.py +++ b/spyvm/interpreter_bytecodes.py @@ -394,18 +394,13 @@ # it will find the sender as a local, and we don't have to # force the reference s_return_to = None - return_from_top = self.s_sender() is None + is_local = True else: s_return_to = self.s_home().s_sender() - return_from_top = s_return_to is None + is_local = False - if return_from_top: - # This should never happen while executing a normal image. - from spyvm.interpreter import ReturnFromTopLevel - raise ReturnFromTopLevel(return_value) - else: - from spyvm.interpreter import Return - raise Return(s_return_to, return_value) + from spyvm.interpreter import Return + raise Return(s_return_to, return_value, is_local) # ====== Send/Return bytecodes ====== @@ -508,16 +503,15 @@ if self.gettemp(1).is_nil(self.space): self.settemp(1, self.space.w_true) # mark unwound self.push(self.gettemp(0)) # push the first argument - from spyvm.interpreter import Return + from spyvm.interpreter import LocalReturn try: self.bytecodePrimValue(interp, 0) - except Return, nlr: - assert nlr.s_target_context or nlr.is_local - if self is not nlr.s_target_context and not nlr.is_local: - raise nlr + except LocalReturn: + # Local return value of ensure: block is ignored + pass finally: self.mark_returned() - + @bytecode_implementation() def unknownBytecode(self, interp, current_bytecode): raise error.MissingBytecode("unknownBytecode") From noreply at buildbot.pypy.org Sun Jul 27 12:22:18 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Sun, 27 Jul 2014 12:22:18 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-context-state-v2: Added the rest of the refactoring. Message-ID: <20140727102218.DB07B1C03AC@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-context-state-v2 Changeset: r955:92d182dabea1 Date: 2014-07-25 16:33 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/92d182dabea1/ Log: Added the rest of the refactoring. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -1,7 +1,7 @@ import os from spyvm.shadow import MethodContextShadow, ActiveContext, InactiveContext, DirtyContext -from spyvm import model, constants, wrapper, objspace, interpreter_bytecodes +from spyvm import model, constants, wrapper, objspace, interpreter_bytecodes, error from rpython.rlib import jit, rstackovf, unroll @@ -17,6 +17,16 @@ self.s_target_context = s_target_context self.is_local = is_local +class NonVirtualReturn(Exception): + _attrs_ = ["s_target_context", "s_current_context", "value"] + def __init__(self, s_target_context, s_current_context, w_result): + self.value = w_result + self.s_target_context = s_target_context + self.s_current_context = s_current_context + + def print_trace(self): + print "\n====== Sender Chain Manipulation, contexts forced to heap at: %s" % self.s_current_context.short_str() + class LocalReturn(Exception): _attrs_ = ["value"] def __init__(self, value): @@ -25,14 +35,13 @@ class ContextSwitchException(Exception): """General Exception that causes the interpreter to leave the current context.""" - _attrs_ = ["s_new_context"] type = "ContextSwitch" def __init__(self, s_new_context): self.s_new_context = s_new_context - def print_trace(self, old_context): - print "====== %s, contexts forced to heap at: %s" % (self.type, self.s_new_context.short_str()) + def print_trace(self): + print "\n====== %s at: %s" % (self.type, self.s_new_context.short_str()) class StackOverflow(ContextSwitchException): """This causes the current jit-loop to be left, dumping all virtualized objects to the heap. @@ -43,17 +52,8 @@ class ProcessSwitch(ContextSwitchException): """This causes the interpreter to switch the executed context. Triggered when switching the process.""" + type = "Process Switch" - def print_trace(self, old_context): - print "====== Switched process from: %s" % old_context.short_str() - print "====== to: %s " % self.s_new_context.short_str() - -class SenderChainManipulation(ContextSwitchException): - """Manipulation of the sender chain can invalidate the jitted C stack. - We have to dump all virtual objects and rebuild the stack. - We try to raise this as rarely as possible and as late as possible.""" - type = "Sender Manipulation" - UNROLLING_BYTECODE_RANGES = unroll.unrolling_iterable(interpreter_bytecodes.BYTECODE_RANGES) def get_printable_location(pc, self, method): @@ -98,23 +98,27 @@ def loop(self, w_active_context): # This is the top-level loop and is not invoked recursively. - s_new_context = w_active_context.as_context_get_shadow(self.space) + s_context = w_active_context.as_context_get_shadow(self.space) while True: - s_sender = s_new_context.s_sender() + s_sender = s_context.s_sender() try: - self.stack_frame(s_new_context, None) + self.stack_frame(s_context, None) raise Exception("loop_bytecodes left without raising...") except ContextSwitchException, e: if self.is_tracing(): - e.print_trace(s_new_context) - s_new_context = e.s_new_context + e.print_trace() + s_context = e.s_new_context except LocalReturn, ret: - s_new_context = self.unwind_context_chain(s_sender, s_sender, ret.value) + s_context = self.unwind_context_chain(s_sender, s_sender, ret.value, "LocalReturn") except Return, ret: - s_new_context = self.unwind_context_chain(s_sender, ret.s_target_context, ret.value) + s_context = self.unwind_context_chain(s_sender, ret.s_target_context, ret.value, "Return") + except NonVirtualReturn, ret: + if self.is_tracing(): + ret.print_trace() + s_context = self.unwind_context_chain(ret.s_current_context, ret.s_target_context, ret.value, "NonVirtual") - # This is a wrapper around loop_bytecodes that cleanly enters/leaves the frame - # and handles the stack overflow protection mechanism. + # This is a wrapper around loop_bytecodes that cleanly enters/leaves the frame, + # handles the stack overflow protection mechanism and handles/dispatches Returns. def stack_frame(self, s_frame, s_sender, may_context_switch=True): try: if self.is_tracing(): @@ -129,17 +133,21 @@ rstackovf.check_stack_overflow() raise StackOverflow(s_frame) except Return, ret: - if ret.is_local: - raise LocalReturn(ret.value) + if s_frame.state is DirtyContext: + s_sender = s_frame.s_sender() # The sender has changed! + s_frame._activate_unwind_context(self) + target_context = s_sender if ret.is_local else ret.s_target_context + raise NonVirtualReturn(target_context, s_sender, ret.value) else: - raise ret + s_frame._activate_unwind_context(self) + if ret.s_target_context is s_sender or ret.is_local: + raise LocalReturn(ret.value) + else: + raise ret finally: if self.is_tracing(): self.stack_depth -= 1 - dirty_frame = s_frame.state is DirtyContext s_frame.state = InactiveContext - if dirty_frame: - raise SenderChainManipulation(s_frame) def loop_bytecodes(self, s_context, may_context_switch=True): old_pc = 0 @@ -164,14 +172,22 @@ except LocalReturn, ret: s_context.push(ret.value) - def unwind_context_chain(self, start_context, target_context, return_value): + def unwind_context_chain(self, start_context, target_context, return_value, source=""): if start_context is None: # This is the toplevel frame. Execution ended. raise ReturnFromTopLevel(return_value) assert target_context context = start_context while context is not target_context: - assert context, "Sender chain ended without finding return-context." + if not context: + msg = "Context chain ended (source: %s) while trying to return\n%s\nfrom\n%s\n(pc %s)\nto\n%s\n(pc %s)" % ( + source, + return_value.as_repr_string(), + start_context.short_str(), + start_context.pc(), + target_context.short_str(), + start_context.pc()) + raise error.FatalError(msg) s_sender = context.s_sender() context._activate_unwind_context(self) context = s_sender diff --git a/spyvm/interpreter_bytecodes.py b/spyvm/interpreter_bytecodes.py --- a/spyvm/interpreter_bytecodes.py +++ b/spyvm/interpreter_bytecodes.py @@ -30,7 +30,8 @@ parameters += (self.fetch_next_bytecode(), ) i = i + 1 # This is a good place to step through bytecodes. - self.debug_bytecode() + + self.debug_bytecode(interp) return actual_implementation_method(self, interp, current_bytecode, *parameters) bytecode_implementation_wrapper.func_name = actual_implementation_method.func_name return bytecode_implementation_wrapper @@ -118,7 +119,7 @@ def pushLiteralVariableBytecode(self, interp, current_bytecode): # this bytecode assumes that literals[index] is an Association # which is an object with two named vars, and fetches the second - # named var (the value). + # named var (the value). index = current_bytecode & 31 w_association = self.w_method().getliteral(index) association = wrapper.AssociationWrapper(self.space, w_association) @@ -337,8 +338,6 @@ # ###################################################################### if interp.is_tracing(): interp.print_padded('-> %s %s' % (special_selector, s_frame.short_str())) - if not objectmodel.we_are_translated(): - import pdb; pdb.set_trace() return interp.stack_frame(s_frame, self) @@ -445,7 +444,6 @@ @bytecode_implementation(parameter_bytes=2) def doubleExtendedDoAnythingBytecode(self, interp, current_bytecode, second, third): - from spyvm.interpreter import SenderChainManipulation opType = second >> 5 if opType == 0: # selfsend @@ -467,16 +465,9 @@ association = wrapper.AssociationWrapper(self.space, w_association) self.push(association.value()) elif opType == 5: - # TODO - the following two special cases should not be necessary - try: - self.w_receiver().store(self.space, third, self.top()) - except SenderChainManipulation, e: - raise SenderChainManipulation(self) + self.w_receiver().store(self.space, third, self.top()) elif opType == 6: - try: - self.w_receiver().store(self.space, third, self.pop()) - except SenderChainManipulation, e: - raise SenderChainManipulation(self) + self.w_receiver().store(self.space, third, self.pop()) elif opType == 7: w_association = self.w_method().getliteral(third) association = wrapper.AssociationWrapper(self.space, w_association) @@ -607,7 +598,7 @@ bytecodePrimPointX = make_send_selector_bytecode("x", 0) bytecodePrimPointY = make_send_selector_bytecode("y", 0) - def debug_bytecode(self): + def debug_bytecode(self, interp): # Hook used in interpreter_debugging pass diff --git a/spyvm/interpreter_debugging.py b/spyvm/interpreter_debugging.py --- a/spyvm/interpreter_debugging.py +++ b/spyvm/interpreter_debugging.py @@ -49,8 +49,8 @@ @patch_context def debug_bytecode(original): - def meth(self): - if self.step_bytecodes: + def meth(self, interp): + if interp.step_bytecodes: _break() # Continue stepping from here to get to the current bytecode execution return meth From noreply at buildbot.pypy.org Sun Jul 27 12:22:19 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Sun, 27 Jul 2014 12:22:19 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-context-state-v2: Added some Squeak image tests. Message-ID: <20140727102219.D19541C03AC@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-context-state-v2 Changeset: r956:f500ff8dea45 Date: 2014-07-25 16:33 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/f500ff8dea45/ Log: Added some Squeak image tests. diff --git a/spyvm/test/test_interpreter.py b/spyvm/test/test_interpreter.py --- a/spyvm/test/test_interpreter.py +++ b/spyvm/test/test_interpreter.py @@ -979,7 +979,7 @@ test) def test_frame_dirty_if_active(): - bytes = reduce(operator.add, map(chr, [0x84, 0xc0, 0x00])) + returnReceiverBytecode + bytes = reduce(operator.add, map(chr, [0x84, 0xc0, 0x00])) w_frame, s_frame = new_frame(bytes) s_frame.store_w_receiver(w_frame) s_frame.push(w_frame) @@ -988,7 +988,7 @@ assert s_frame.state is shadow.DirtyContext def test_frame_not_dirty_if_inactive(): - bytes = reduce(operator.add, map(chr, [0x84, 0xc0, 0x00])) + returnReceiverBytecode + bytes = reduce(operator.add, map(chr, [0x84, 0xc0, 0x00])) w_frame, s_frame = new_frame(bytes) w_other_frame, s_other_frame = new_frame("") s_frame.store_w_receiver(w_other_frame) @@ -998,12 +998,14 @@ assert s_frame.state is shadow.ActiveContext assert s_other_frame.state is shadow.InactiveContext -def test_raise_SenderManipulation_on_dirty_frame(): - w_frame, s_frame = new_frame(returnReceiverBytecode) - s_frame.state = shadow.DirtyContext - def run_frame(): - #import pdb; pdb.set_trace() - interp._loop = True +def test_raise_NonVirtualReturn_on_dirty_frame(): + bytes = reduce(operator.add, map(chr, [0x84, 0xc0, 0x00])) + returnTopFromMethodBytecode + w_frame, s_frame = new_frame(bytes) + s_frame.store_w_receiver(w_frame) + s_frame.push(w_frame) + + interp._loop = True + def do_test(): interp.stack_frame(s_frame, None) - py.test.raises(interpreter.SenderChainManipulation, run_frame) + py.test.raises(interpreter.NonVirtualReturn, do_test) \ No newline at end of file diff --git a/spyvm/test/test_zin_squeak_4_5_image.py b/spyvm/test/test_zin_squeak_4_5_image.py --- a/spyvm/test/test_zin_squeak_4_5_image.py +++ b/spyvm/test/test_zin_squeak_4_5_image.py @@ -1,6 +1,8 @@ from spyvm import squeakimage, model, constants, interpreter, shadow, objspace from .util import read_image, find_symbol_in_methoddict_of, copy_to_module, cleanup_module +import operator + def setup_module(): space, interp, image, reader = read_image('Squeak4.5-12568.image') w = space.w @@ -25,21 +27,13 @@ w_method.setliterals(literals) return w_method -def test_ensure(): - #ensure - # [^'b1'] ensure: [^'b2'] - import operator - bytes = reduce(operator.add, map(chr, [0x8F, 0, 0, 2, 0x21, 0x7c, - 0x8F, 0, 0, 2, 0x22, 0x7c, - 0xe0, 0x87, 0x78])) - - s_class = space.w_BlockClosure.as_class_get_shadow(space) - ensure_ = find_symbol_in_methoddict_of('ensure:', s_class) - assert ensure_ is not None, 'Using image without #ensure:-method.' - - w_method = create_method(bytes, [ensure_, w('b1'), w('b2'), - w('ensure'), space.w_BlockClosure]) - +def find_symbol(w_class, symbolname): + s_class = w_class.as_class_get_shadow(space) + symbol = find_symbol_in_methoddict_of(symbolname, s_class) + assert symbol is not None, 'Using image without %s method.' % symbolname + return symbol + +def execute_frame(w_method): # create a frame for our newly crafted method with a valid sender (to avoid raising returnFromTop to early) s_initial_frame = create_method(chr(0x7c)).create_frame(space, w(0), []) s_frame = w_method.create_frame(space, w(0)) @@ -48,33 +42,141 @@ try: interp.loop(s_frame.w_self()) except interpreter.ReturnFromTopLevel, e: - assert e.object.as_string() == 'b2' - except interpreter.StackOverflow, e: - assert False + return e.object + +def test_ensure(): + #ensure + # [^'b1'] ensure: [^'b2'] + + ensure_ = find_symbol(space.w_BlockClosure, "ensure:") + bytes = reduce(operator.add, map(chr, [0x8F, 0, 0, 2, 0x21, 0x7c, + 0x8F, 0, 0, 2, 0x22, 0x7c, + 0xe0, 0x87, 0x78])) + + w_method = create_method(bytes, [ensure_, w('b1'), w('b2'), + w('ensure'), space.w_BlockClosure]) + result = execute_frame(w_method) + assert result.as_string() == 'b2' def test_ensure_save_original_nlr(): #ensure # [^'b1'] ensure: ['b2'] - import operator + + ensure_ = find_symbol(space.w_BlockClosure, "ensure:") bytes = reduce(operator.add, map(chr, [0x8F, 0, 0, 2, 0x21, 0x7c, 0x8F, 0, 0, 2, 0x22, 0x7d, 0xe0, 0x87, 0x78])) - s_class = space.w_BlockClosure.as_class_get_shadow(space) - ensure_ = find_symbol_in_methoddict_of('ensure:', s_class) - assert ensure_ is not None, 'Using image without #ensure:-method.' - w_method = create_method(bytes, [ensure_, w('b1'), w('b2'), w('ensure'), space.w_BlockClosure]) + result = execute_frame(w_method) + assert result.as_string() == 'b1' - # create a frame for our newly crafted method with a valid sender (to avoid raising returnFromTop to early) - s_initial_frame = create_method(chr(0x7c)).create_frame(space, w(0)) - s_frame = w_method.create_frame(space, w(0)) - s_frame.store_s_sender(s_initial_frame) +def test_ContextPart_jump(): + """ + Code: Create a Block context that jumps back to its sender, instead of returning normally. + The Block is not executed to the end, the sender chain is manipulated. + The local variable should be the value pushed on the sender context before jumping to it. + a := 5. + a := [ thisContext sender push: 2. thisContext sender jump. 10 ] value. + ^ a + """ + ContextPart = space.w_MethodContext.as_class_get_shadow(space).s_superclass().w_self() + push = find_symbol(ContextPart, "push:") + sender = find_symbol(ContextPart, "sender") + jump = find_symbol(ContextPart, "jump") - try: - interp.loop(s_frame.w_self()) - except interpreter.ReturnFromTopLevel, e: - assert e.object.as_string() == 'b1' - except interpreter.StackOverflow, e: - assert False + bytes = reduce(operator.add, map(chr, [0x21, 0x82, 0xc0, # Set a + 0x8f, 0x00, 0x00, 0x0b, # Push block + 0x89, 0xd3, # Send sender + 0x77, 0xe2, # Send push + 0x87, 0x89, 0xd3, 0xd4, # Send jump + 0x87, 0x25, 0x7d, # Block rest (not executed) + 0xc9, 0x82, 0xc0, 0x40, 0x7c])) # Send value and return + + Association = space.classtable["w_Point"] # Wrong class, doesn't matter. + assoc = model.W_PointersObject(space, Association, 2) + assoc.store(space, 0, w('a')) + assoc.store(space, 1, w(3)) + w_method = create_method(bytes, [assoc, w(5), push, sender, jump, w(10)]) + result = execute_frame(w_method) + assert isinstance(result, model.W_SmallInteger) + assert result.value == 2 + +def test_ContextPart_jump_nonlocal(): + """ + Like above test, but with three blocks to make the return non-local. + Also, store the outer context beforehand. + a := 5. + outer := thisContext. + a := [[[ outer push: 2. outer jump. 10 ] value ] value] value. + ^ a + """ + ContextPart = space.w_MethodContext.as_class_get_shadow(space).s_superclass().w_self() + push = find_symbol(ContextPart, "push:") + jump = find_symbol(ContextPart, "jump") + + bytes = reduce(operator.add, map(chr, [0x21, 0x82, 0xc0, # Set a + 0x89, 0x82, 0xc2, # Set outer + 0x8f, 0x00, 0x00, 0x15, # Push block + 0x8f, 0x00, 0x00, 0x0f, # Push block + 0x8f, 0x00, 0x00, 0x09, # Push block + 0x42, 0x77, 0xe3, # Push 2 + 0x87, 0x42, 0xd4, # Send jump + 0x87, 0x25, 0x7d, # Block rest (not executed) + 0xc9, 0x7d, # Send value and return + 0xc9, 0x7d, # Send value and return + 0xc9, 0x82, 0xc0, 0x40, 0x7c])) # Send value and return + + Association = space.classtable["w_Point"] # Wrong class, doesn't matter. + assoc = model.W_PointersObject(space, Association, 2) + assoc.store(space, 0, w('a')) + assoc.store(space, 1, space.w_nil) + assoc2 = model.W_PointersObject(space, Association, 2) + assoc2.store(space, 0, w('outer')) + assoc2.store(space, 1, space.w_nil) + w_method = create_method(bytes, [assoc, w(5), assoc2, push, jump, w(10)]) + result = execute_frame(w_method) + assert isinstance(result, model.W_SmallInteger) + assert result.value == 2 + +def test_contextOn_do_(): + """ + contextOn:do: is some very heavy meta programming. It creates and returns a separate stack frame, + settings it's sender to nil, thereby manipulating the senders of two contexts. + The Point in there should actually be UnhandledError or something. + The test here is just that this works. + ctx := ContextPart contextOn: Point do: ['nothing'] + """ + ContextPart = space.w_MethodContext.as_class_get_shadow(space).s_superclass().w_self() + ContextPartClass = ContextPart.getclass(space).as_class_get_shadow(space).w_self() + contextOnDo = find_symbol(ContextPartClass, "contextOn:do:") + + bytes = reduce(operator.add, map(chr, [ + 0x42, 0x43, # Push the classes + 0x8f, 0x00, 0x00, 0x02, # Push block, + 0x24, 0x7d, # in the block + 0xf1, 0x81, 0xc0, 0x7c # Send contextOn:do: + ])) + + Association = space.classtable["w_Point"] # Wrong class, doesn't matter. + ctxAssoc = model.W_PointersObject(space, Association, 2) + ctxAssoc.store(space, 0, w('ctx')) + ctxAssoc.store(space, 1, space.w_nil) + contextPartAssoc = model.W_PointersObject(space, Association, 2) + contextPartAssoc.store(space, 0, w('ContextPart')) + contextPartAssoc.store(space, 1, ContextPart) + errorAssoc = model.W_PointersObject(space, Association, 2) + errorAssoc.store(space, 0, w('Point')) + errorAssoc.store(space, 1, Association) + w_method = create_method(bytes, [ctxAssoc, contextOnDo, contextPartAssoc, errorAssoc, w('nothing')]) + + interp.trace = True + + result = execute_frame(w_method) + assert isinstance(result, model.W_PointersObject) + s = result.as_context_get_shadow(space) + assert s.w_method().lookup_selector == "on:do:" + assert s.w_method().primitive() == 199 + assert s.s_sender() == None + \ No newline at end of file From noreply at buildbot.pypy.org Sun Jul 27 12:22:20 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Sun, 27 Jul 2014 12:22:20 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Added trace option to only show process switches, stack overflows and sender chain manipulations. Message-ID: <20140727102220.C3BBC1C03AC@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r957:4e239fce82f4 Date: 2014-07-26 10:24 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/4e239fce82f4/ Log: Added trace option to only show process switches, stack overflows and sender chain manipulations. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -58,7 +58,7 @@ class Interpreter(object): _immutable_fields_ = ["space", "image", - "interrupt_counter_size", + "interrupt_counter_size", "trace_important", "startup_time", "evented", "interrupts"] jit_driver = jit.JitDriver( @@ -68,7 +68,7 @@ get_printable_location=get_printable_location ) - def __init__(self, space, image=None, + def __init__(self, space, image=None, trace_important=False, trace=False, evented=True, interrupts=True): # === Initialize immutable variables self.space = space @@ -79,6 +79,7 @@ self.startup_time = constants.CompileTime self.evented = evented self.interrupts = interrupts + self.trace_important = trace_important try: self.interrupt_counter_size = int(os.environ["SPY_ICS"]) except KeyError: @@ -100,7 +101,7 @@ self.loop_bytecodes(s_new_context) raise Exception("loop_bytecodes left without raising...") except ContextSwitchException, e: - if self.is_tracing(): + if self.is_tracing() or self.trace_important: e.print_trace(s_new_context) s_new_context = e.s_new_context except Return, nlr: diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -12,7 +12,7 @@ def _usage(argv): print """ - Usage: %s [-r|-m|-h] [-naPu] [-jpiS] [-tslLE] + Usage: %s [-r|-m|-h] [-naPu] [-jpiS] [-tTslLE] - image path (default: Squeak.image) Execution mode: @@ -40,7 +40,8 @@ Logging parameters: -t|--trace - Output a trace of each message, primitive, return value and process switch. - -s|--safe-trace - Like -t, but without printing contents of BytesObjects + -T - Trace important events (Process switch, stack overflow, sender chain manipulation) + -s|--safe-trace - If tracing is active, omit printing contents of BytesObjects -l|--storage-log - Output a log of storage operations. -L|--storage-log-aggregate - Output an aggregated storage log at the end of execution. -E|--storage-log-elements - Include classnames of elements into the storage log. @@ -94,6 +95,7 @@ poll = False interrupts = True trace = False + trace_important = False space = prebuilt_space idx = 1 @@ -114,8 +116,9 @@ selector, idx = get_parameter(argv, idx, arg) elif arg in ["-t", "--trace"]: trace = True + elif arg in ["-T"]: + trace_important = True elif arg in ["-s", "--safe-trace"]: - trace = True space.omit_printing_raw_bytes.activate() elif arg in ["-p", "--poll"]: poll = True @@ -169,8 +172,8 @@ image_reader = squeakimage.reader_for_image(space, squeakimage.Stream(data=imagedata)) image = create_image(space, image_reader) interp = interpreter.Interpreter(space, image, - trace=trace, evented=not poll, - interrupts=interrupts) + trace=trace, trace_important=trace_important, + evented=not poll, interrupts=interrupts) space.runtime_setup(argv[0], path) print_error("") # Line break after image-loading characters From noreply at buildbot.pypy.org Sun Jul 27 12:22:35 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Sun, 27 Jul 2014 12:22:35 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Commented out instpecting of event input queue in #fetchMoreEvents. Message-ID: <20140727102235.11E371C03AC@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r958:6e826a50e53c Date: 2014-07-26 10:26 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/6e826a50e53c/ Log: Commented out instpecting of event input queue in #fetchMoreEvents. diff --git a/images/Squeak4.5-noBitBlt.changes b/images/Squeak4.5-noBitBlt.changes --- a/images/Squeak4.5-noBitBlt.changes +++ b/images/Squeak4.5-noBitBlt.changes @@ -12631,4 +12631,4 @@ 1+1! -----QUIT/NOSAVE----{21 July 2014 . 4:33:15 pm} Squeak4.5-noBitBlt.image priorSource: 15898877! \ No newline at end of file +----QUIT/NOSAVE----{21 July 2014 . 4:33:15 pm} Squeak4.5-noBitBlt.image priorSource: 15898877! ----STARTUP----{26 July 2014 . 10:25:02 am} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! !EventSensor methodsFor: 'private-I/O' stamp: 'ag 7/26/2014 10:25' prior: 47373772! fetchMoreEvents "Fetch more events from the VM" | eventBuffer type | "Reset input semaphore so clients can wait for the next events after this one." inputSemaphore isSignaled ifTrue: [ hasInputSemaphore := true. inputSemaphore initSignals ]. "Remember the last time that I checked for events." lastEventPoll := Time millisecondClockValue. eventBuffer := Array new: 8. [self primGetNextEvent: eventBuffer. type := eventBuffer at: 1. "type = EventTypeWindow ifTrue: [eventBuffer inspect]." type = EventTypeNone] whileFalse: [self processEvent: eventBuffer]. ! ! ----QUIT----{26 July 2014 . 10:25:36 am} Squeak4.5-noBitBlt.image priorSource: 15898877! \ No newline at end of file diff --git a/images/Squeak4.5-noBitBlt.image b/images/Squeak4.5-noBitBlt.image index 00843c2c83f9c11e5dcfa3b9927bd415d0a22cd8..09f9fb6fb17051fefa839fb357c4170d285c1c8a GIT binary patch [cut] From noreply at buildbot.pypy.org Sun Jul 27 12:22:36 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Sun, 27 Jul 2014 12:22:36 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-context-state-v2: Removed LocalReturn exception, handling it with flags in the Return exception. Message-ID: <20140727102236.189E71C03AC@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-context-state-v2 Changeset: r959:b76819c9b3b6 Date: 2014-07-27 11:18 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/b76819c9b3b6/ Log: Removed LocalReturn exception, handling it with flags in the Return exception. This fixed drawing issue in the mini image. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -11,11 +11,13 @@ self.object = object class Return(Exception): - _attrs_ = ["value", "s_target_context", "is_local"] - def __init__(self, s_target_context, w_result, is_local): + _attrs_ = ["value", "s_target_context", "is_local", "arrived_at_target"] + _immutable_attrs_ = ["value", "s_target_context", "is_local"] + def __init__(self, s_target_context, w_result): self.value = w_result self.s_target_context = s_target_context - self.is_local = is_local + self.arrived_at_target = False + self.is_local = s_target_context is None class NonVirtualReturn(Exception): _attrs_ = ["s_target_context", "s_current_context", "value"] @@ -27,11 +29,6 @@ def print_trace(self): print "\n====== Sender Chain Manipulation, contexts forced to heap at: %s" % self.s_current_context.short_str() -class LocalReturn(Exception): - _attrs_ = ["value"] - def __init__(self, value): - self.value = value - class ContextSwitchException(Exception): """General Exception that causes the interpreter to leave the current context.""" @@ -108,14 +105,13 @@ if self.is_tracing(): e.print_trace() s_context = e.s_new_context - except LocalReturn, ret: - s_context = self.unwind_context_chain(s_sender, s_sender, ret.value, "LocalReturn") except Return, ret: - s_context = self.unwind_context_chain(s_sender, ret.s_target_context, ret.value, "Return") + target = s_sender if ret.arrived_at_target else ret.s_target_context + s_context = self.unwind_context_chain(s_sender, target, ret.value) except NonVirtualReturn, ret: if self.is_tracing(): ret.print_trace() - s_context = self.unwind_context_chain(ret.s_current_context, ret.s_target_context, ret.value, "NonVirtual") + s_context = self.unwind_context_chain(ret.s_current_context, ret.s_target_context, ret.value) # This is a wrapper around loop_bytecodes that cleanly enters/leaves the frame, # handles the stack overflow protection mechanism and handles/dispatches Returns. @@ -140,10 +136,9 @@ raise NonVirtualReturn(target_context, s_sender, ret.value) else: s_frame._activate_unwind_context(self) - if ret.s_target_context is s_sender or ret.is_local: - raise LocalReturn(ret.value) - else: - raise ret + if ret.is_local or ret.s_target_context is s_sender: + ret.arrived_at_target = True + raise ret finally: if self.is_tracing(): self.stack_depth -= 1 @@ -169,10 +164,13 @@ s_context=s_context) try: self.step(s_context) - except LocalReturn, ret: - s_context.push(ret.value) + except Return, ret: + if ret.arrived_at_target: + s_context.push(ret.value) + else: + raise ret - def unwind_context_chain(self, start_context, target_context, return_value, source=""): + def unwind_context_chain(self, start_context, target_context, return_value): if start_context is None: # This is the toplevel frame. Execution ended. raise ReturnFromTopLevel(return_value) @@ -180,8 +178,7 @@ context = start_context while context is not target_context: if not context: - msg = "Context chain ended (source: %s) while trying to return\n%s\nfrom\n%s\n(pc %s)\nto\n%s\n(pc %s)" % ( - source, + msg = "Context chain ended while trying to return\n%s\nfrom\n%s\n(pc %s)\nto\n%s\n(pc %s)" % ( return_value.as_repr_string(), start_context.short_str(), start_context.pc(), diff --git a/spyvm/interpreter_bytecodes.py b/spyvm/interpreter_bytecodes.py --- a/spyvm/interpreter_bytecodes.py +++ b/spyvm/interpreter_bytecodes.py @@ -393,13 +393,12 @@ # it will find the sender as a local, and we don't have to # force the reference s_return_to = None - is_local = True else: s_return_to = self.s_home().s_sender() - is_local = False + assert s_return_to, "No sender to return to!" from spyvm.interpreter import Return - raise Return(s_return_to, return_value, is_local) + raise Return(s_return_to, return_value) # ====== Send/Return bytecodes ====== @@ -494,12 +493,13 @@ if self.gettemp(1).is_nil(self.space): self.settemp(1, self.space.w_true) # mark unwound self.push(self.gettemp(0)) # push the first argument - from spyvm.interpreter import LocalReturn + from spyvm.interpreter import Return try: self.bytecodePrimValue(interp, 0) - except LocalReturn: + except Return, ret: # Local return value of ensure: block is ignored - pass + if not ret.arrived_at_target: + raise ret finally: self.mark_returned() From noreply at buildbot.pypy.org Sun Jul 27 12:22:37 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Sun, 27 Jul 2014 12:22:37 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Merged. Message-ID: <20140727102237.343581C03AC@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r960:3c59d53022d4 Date: 2014-07-27 11:24 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/3c59d53022d4/ Log: Merged. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -1,7 +1,7 @@ import os -from spyvm.shadow import MethodContextShadow -from spyvm import model, constants, wrapper, objspace, interpreter_bytecodes +from spyvm.shadow import MethodContextShadow, ActiveContext, InactiveContext, DirtyContext +from spyvm import model, constants, wrapper, objspace, interpreter_bytecodes, error from rpython.rlib import jit, rstackovf, unroll @@ -11,23 +11,34 @@ self.object = object class Return(Exception): - _attrs_ = ["value", "s_target_context", "is_local"] + _attrs_ = ["value", "s_target_context", "is_local", "arrived_at_target"] + _immutable_attrs_ = ["value", "s_target_context", "is_local"] def __init__(self, s_target_context, w_result): self.value = w_result self.s_target_context = s_target_context - self.is_local = False + self.arrived_at_target = False + self.is_local = s_target_context is None + +class NonVirtualReturn(Exception): + _attrs_ = ["s_target_context", "s_current_context", "value"] + def __init__(self, s_target_context, s_current_context, w_result): + self.value = w_result + self.s_target_context = s_target_context + self.s_current_context = s_current_context + + def print_trace(self): + print "\n====== Sender Chain Manipulation, contexts forced to heap at: %s" % self.s_current_context.short_str() class ContextSwitchException(Exception): """General Exception that causes the interpreter to leave the current context.""" - _attrs_ = ["s_new_context"] type = "ContextSwitch" def __init__(self, s_new_context): self.s_new_context = s_new_context - def print_trace(self, old_context): - print "====== %s, contexts forced to heap at: %s" % (self.type, self.s_new_context.short_str()) + def print_trace(self): + print "\n====== %s at: %s" % (self.type, self.s_new_context.short_str()) class StackOverflow(ContextSwitchException): """This causes the current jit-loop to be left, dumping all virtualized objects to the heap. @@ -38,17 +49,8 @@ class ProcessSwitch(ContextSwitchException): """This causes the interpreter to switch the executed context. Triggered when switching the process.""" + type = "Process Switch" - def print_trace(self, old_context): - print "====== Switched process from: %s" % old_context.short_str() - print "====== to: %s " % self.s_new_context.short_str() - -class SenderChainManipulation(ContextSwitchException): - """Manipulation of the sender chain can invalidate the jitted C stack. - We have to dump all virtual objects and rebuild the stack. - We try to raise this as rarely as possible and as late as possible.""" - type = "Sender Manipulation" - UNROLLING_BYTECODE_RANGES = unroll.unrolling_iterable(interpreter_bytecodes.BYTECODE_RANGES) def get_printable_location(pc, self, method): @@ -94,25 +96,54 @@ def loop(self, w_active_context): # This is the top-level loop and is not invoked recursively. - s_new_context = w_active_context.as_context_get_shadow(self.space) + s_context = w_active_context.as_context_get_shadow(self.space) while True: - s_sender = s_new_context.s_sender() + s_sender = s_context.s_sender() try: - self.loop_bytecodes(s_new_context) + self.stack_frame(s_context, None) raise Exception("loop_bytecodes left without raising...") except ContextSwitchException, e: - if self.is_tracing() or self.trace_important: - e.print_trace(s_new_context) - s_new_context = e.s_new_context - except Return, nlr: - assert nlr.s_target_context or nlr.is_local - s_new_context = s_sender - if not nlr.is_local: - while s_new_context is not nlr.s_target_context: - s_sender = s_new_context.s_sender() - s_new_context._activate_unwind_context(self) - s_new_context = s_sender - s_new_context.push(nlr.value) + if self.is_tracing(): + e.print_trace() + s_context = e.s_new_context + except Return, ret: + target = s_sender if ret.arrived_at_target else ret.s_target_context + s_context = self.unwind_context_chain(s_sender, target, ret.value) + except NonVirtualReturn, ret: + if self.is_tracing(): + ret.print_trace() + s_context = self.unwind_context_chain(ret.s_current_context, ret.s_target_context, ret.value) + + # This is a wrapper around loop_bytecodes that cleanly enters/leaves the frame, + # handles the stack overflow protection mechanism and handles/dispatches Returns. + def stack_frame(self, s_frame, s_sender, may_context_switch=True): + try: + if self.is_tracing(): + self.stack_depth += 1 + if s_frame._s_sender is None and s_sender is not None: + s_frame.store_s_sender(s_sender) + # Now (continue to) execute the context bytecodes + assert s_frame.state is InactiveContext + s_frame.state = ActiveContext + self.loop_bytecodes(s_frame, may_context_switch) + except rstackovf.StackOverflow: + rstackovf.check_stack_overflow() + raise StackOverflow(s_frame) + except Return, ret: + if s_frame.state is DirtyContext: + s_sender = s_frame.s_sender() # The sender has changed! + s_frame._activate_unwind_context(self) + target_context = s_sender if ret.is_local else ret.s_target_context + raise NonVirtualReturn(target_context, s_sender, ret.value) + else: + s_frame._activate_unwind_context(self) + if ret.is_local or ret.s_target_context is s_sender: + ret.arrived_at_target = True + raise ret + finally: + if self.is_tracing(): + self.stack_depth -= 1 + s_frame.state = InactiveContext def loop_bytecodes(self, s_context, may_context_switch=True): old_pc = 0 @@ -134,34 +165,33 @@ s_context=s_context) try: self.step(s_context) - except Return, nlr: - if nlr.s_target_context is s_context or nlr.is_local: - s_context.push(nlr.value) + except Return, ret: + if ret.arrived_at_target: + s_context.push(ret.value) else: - if nlr.s_target_context is None: - # This is the case where we are returning to our sender. - # Mark the return as local, so our sender will take it - nlr.is_local = True - s_context._activate_unwind_context(self) - raise nlr - - # This is a wrapper around loop_bytecodes that cleanly enters/leaves the frame - # and handles the stack overflow protection mechanism. - def stack_frame(self, s_frame, s_sender, may_context_switch=True): - try: - if self.is_tracing(): - self.stack_depth += 1 - if s_frame._s_sender is None and s_sender is not None: - s_frame.store_s_sender(s_sender, raise_error=False) - # Now (continue to) execute the context bytecodes - self.loop_bytecodes(s_frame, may_context_switch) - except rstackovf.StackOverflow: - rstackovf.check_stack_overflow() - raise StackOverflow(s_frame) - finally: - if self.is_tracing(): - self.stack_depth -= 1 - + raise ret + + def unwind_context_chain(self, start_context, target_context, return_value): + if start_context is None: + # This is the toplevel frame. Execution ended. + raise ReturnFromTopLevel(return_value) + assert target_context + context = start_context + while context is not target_context: + if not context: + msg = "Context chain ended while trying to return\n%s\nfrom\n%s\n(pc %s)\nto\n%s\n(pc %s)" % ( + return_value.as_repr_string(), + start_context.short_str(), + start_context.pc(), + target_context.short_str(), + start_context.pc()) + raise error.FatalError(msg) + s_sender = context.s_sender() + context._activate_unwind_context(self) + context = s_sender + context.push(return_value) + return context + def step(self, context): bytecode = context.fetch_next_bytecode() for entry in UNROLLING_BYTECODE_RANGES: diff --git a/spyvm/interpreter_bytecodes.py b/spyvm/interpreter_bytecodes.py --- a/spyvm/interpreter_bytecodes.py +++ b/spyvm/interpreter_bytecodes.py @@ -30,7 +30,8 @@ parameters += (self.fetch_next_bytecode(), ) i = i + 1 # This is a good place to step through bytecodes. - self.debug_bytecode() + + self.debug_bytecode(interp) return actual_implementation_method(self, interp, current_bytecode, *parameters) bytecode_implementation_wrapper.func_name = actual_implementation_method.func_name return bytecode_implementation_wrapper @@ -118,7 +119,7 @@ def pushLiteralVariableBytecode(self, interp, current_bytecode): # this bytecode assumes that literals[index] is an Association # which is an object with two named vars, and fetches the second - # named var (the value). + # named var (the value). index = current_bytecode & 31 w_association = self.w_method().getliteral(index) association = wrapper.AssociationWrapper(self.space, w_association) @@ -337,8 +338,6 @@ # ###################################################################### if interp.is_tracing(): interp.print_padded('-> %s %s' % (special_selector, s_frame.short_str())) - if not objectmodel.we_are_translated(): - import pdb; pdb.set_trace() return interp.stack_frame(s_frame, self) @@ -394,18 +393,12 @@ # it will find the sender as a local, and we don't have to # force the reference s_return_to = None - return_from_top = self.s_sender() is None else: s_return_to = self.s_home().s_sender() - return_from_top = s_return_to is None + assert s_return_to, "No sender to return to!" - if return_from_top: - # This should never happen while executing a normal image. - from spyvm.interpreter import ReturnFromTopLevel - raise ReturnFromTopLevel(return_value) - else: - from spyvm.interpreter import Return - raise Return(s_return_to, return_value) + from spyvm.interpreter import Return + raise Return(s_return_to, return_value) # ====== Send/Return bytecodes ====== @@ -450,7 +443,6 @@ @bytecode_implementation(parameter_bytes=2) def doubleExtendedDoAnythingBytecode(self, interp, current_bytecode, second, third): - from spyvm.interpreter import SenderChainManipulation opType = second >> 5 if opType == 0: # selfsend @@ -472,16 +464,9 @@ association = wrapper.AssociationWrapper(self.space, w_association) self.push(association.value()) elif opType == 5: - # TODO - the following two special cases should not be necessary - try: - self.w_receiver().store(self.space, third, self.top()) - except SenderChainManipulation, e: - raise SenderChainManipulation(self) + self.w_receiver().store(self.space, third, self.top()) elif opType == 6: - try: - self.w_receiver().store(self.space, third, self.pop()) - except SenderChainManipulation, e: - raise SenderChainManipulation(self) + self.w_receiver().store(self.space, third, self.pop()) elif opType == 7: w_association = self.w_method().getliteral(third) association = wrapper.AssociationWrapper(self.space, w_association) @@ -511,13 +496,13 @@ from spyvm.interpreter import Return try: self.bytecodePrimValue(interp, 0) - except Return, nlr: - assert nlr.s_target_context or nlr.is_local - if self is not nlr.s_target_context and not nlr.is_local: - raise nlr + except Return, ret: + # Local return value of ensure: block is ignored + if not ret.arrived_at_target: + raise ret finally: self.mark_returned() - + @bytecode_implementation() def unknownBytecode(self, interp, current_bytecode): raise error.MissingBytecode("unknownBytecode") @@ -613,7 +598,7 @@ bytecodePrimPointX = make_send_selector_bytecode("x", 0) bytecodePrimPointY = make_send_selector_bytecode("y", 0) - def debug_bytecode(self): + def debug_bytecode(self, interp): # Hook used in interpreter_debugging pass diff --git a/spyvm/interpreter_debugging.py b/spyvm/interpreter_debugging.py --- a/spyvm/interpreter_debugging.py +++ b/spyvm/interpreter_debugging.py @@ -49,8 +49,8 @@ @patch_context def debug_bytecode(original): - def meth(self): - if self.step_bytecodes: + def meth(self, interp): + if interp.step_bytecodes: _break() # Continue stepping from here to get to the current bytecode execution return meth diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -633,18 +633,29 @@ def size(self): return self._w_self_size +class ContextState(object): + def __init__(self, name): + self.name = name + def __str__(self): + return self.name + def __repr__(self): + return self.name +InactiveContext = ContextState("InactiveContext") +ActiveContext = ContextState("ActiveContext") +DirtyContext = ContextState("DirtyContext") + class ContextPartShadow(AbstractRedirectingShadow): __metaclass__ = extendabletype _attrs_ = ['_s_sender', '_pc', '_temps_and_stack', - '_stack_ptr', 'instances_w'] + '_stack_ptr', 'instances_w', 'state'] repr_classname = "ContextPartShadow" _virtualizable_ = [ '_s_sender', "_pc", "_temps_and_stack[*]", "_stack_ptr", - "_w_self", "_w_self_size" + "_w_self", "_w_self_size", 'state' ] # ______________________________________________________________________ @@ -654,13 +665,7 @@ self._s_sender = None AbstractRedirectingShadow.__init__(self, space, w_self, size) self.instances_w = {} - - def copy_field_from(self, n0, other_shadow): - from spyvm.interpreter import SenderChainManipulation - try: - AbstractRedirectingShadow.copy_field_from(self, n0, other_shadow) - except SenderChainManipulation, e: - assert e.s_new_context == self + self.state = InactiveContext def copy_from(self, other_shadow): # Some fields have to be initialized before the rest, to ensure correct initialization. @@ -702,7 +707,7 @@ if n0 == constants.CTXPART_SENDER_INDEX: assert isinstance(w_value, model.W_PointersObject) if w_value.is_nil(self.space): - self.store_s_sender(None, raise_error=False) + self.store_s_sender(None) else: self.store_s_sender(w_value.as_context_get_shadow(self.space)) return @@ -722,12 +727,12 @@ # === Sender === - def store_s_sender(self, s_sender, raise_error=True): + def store_s_sender(self, s_sender): if s_sender is not self._s_sender: self._s_sender = s_sender - if raise_error: - from spyvm.interpreter import SenderChainManipulation - raise SenderChainManipulation(self) + # If new sender is None, we are just being marked as returned. + if s_sender is not None and self.state is ActiveContext: + self.state = DirtyContext def w_sender(self): sender = self.s_sender() @@ -819,7 +824,7 @@ def mark_returned(self): self.store_pc(-1) - self.store_s_sender(None, raise_error=False) + self.store_s_sender(None) def is_returned(self): return self.pc() == -1 and self.w_sender().is_nil(self.space) diff --git a/spyvm/test/test_interpreter.py b/spyvm/test/test_interpreter.py --- a/spyvm/test/test_interpreter.py +++ b/spyvm/test/test_interpreter.py @@ -978,9 +978,34 @@ 2, "value:value:"]], test) -def test_c_stack_reset_on_sender_chain_manipulation(): +def test_frame_dirty_if_active(): bytes = reduce(operator.add, map(chr, [0x84, 0xc0, 0x00])) w_frame, s_frame = new_frame(bytes) s_frame.store_w_receiver(w_frame) s_frame.push(w_frame) - py.test.raises(interpreter.SenderChainManipulation, step_in_interp, s_frame) + s_frame.state = shadow.ActiveContext + step_in_interp(s_frame) + assert s_frame.state is shadow.DirtyContext + +def test_frame_not_dirty_if_inactive(): + bytes = reduce(operator.add, map(chr, [0x84, 0xc0, 0x00])) + w_frame, s_frame = new_frame(bytes) + w_other_frame, s_other_frame = new_frame("") + s_frame.store_w_receiver(w_other_frame) + s_frame.push(w_frame) + s_frame.state = shadow.ActiveContext + step_in_interp(s_frame) + assert s_frame.state is shadow.ActiveContext + assert s_other_frame.state is shadow.InactiveContext + +def test_raise_NonVirtualReturn_on_dirty_frame(): + bytes = reduce(operator.add, map(chr, [0x84, 0xc0, 0x00])) + returnTopFromMethodBytecode + w_frame, s_frame = new_frame(bytes) + s_frame.store_w_receiver(w_frame) + s_frame.push(w_frame) + + interp._loop = True + def do_test(): + interp.stack_frame(s_frame, None) + py.test.raises(interpreter.NonVirtualReturn, do_test) + \ No newline at end of file diff --git a/spyvm/test/test_zin_squeak_4_5_image.py b/spyvm/test/test_zin_squeak_4_5_image.py --- a/spyvm/test/test_zin_squeak_4_5_image.py +++ b/spyvm/test/test_zin_squeak_4_5_image.py @@ -1,6 +1,8 @@ from spyvm import squeakimage, model, constants, interpreter, shadow, objspace from .util import read_image, find_symbol_in_methoddict_of, copy_to_module, cleanup_module +import operator + def setup_module(): space, interp, image, reader = read_image('Squeak4.5-12568.image') w = space.w @@ -25,56 +27,156 @@ w_method.setliterals(literals) return w_method -def test_ensure(): - #ensure - # [^'b1'] ensure: [^'b2'] - import operator - bytes = reduce(operator.add, map(chr, [0x8F, 0, 0, 2, 0x21, 0x7c, - 0x8F, 0, 0, 2, 0x22, 0x7c, - 0xe0, 0x87, 0x78])) - - s_class = space.w_BlockClosure.as_class_get_shadow(space) - ensure_ = find_symbol_in_methoddict_of('ensure:', s_class) - assert ensure_ is not None, 'Using image without #ensure:-method.' - - w_method = create_method(bytes, [ensure_, w('b1'), w('b2'), - w('ensure'), space.w_BlockClosure]) - +def find_symbol(w_class, symbolname): + s_class = w_class.as_class_get_shadow(space) + symbol = find_symbol_in_methoddict_of(symbolname, s_class) + assert symbol is not None, 'Using image without %s method.' % symbolname + return symbol + +def execute_frame(w_method): # create a frame for our newly crafted method with a valid sender (to avoid raising returnFromTop to early) s_initial_frame = create_method(chr(0x7c)).create_frame(space, w(0), []) s_frame = w_method.create_frame(space, w(0)) - s_frame.store_s_sender(s_initial_frame, raise_error=False) + s_frame.store_s_sender(s_initial_frame) try: interp.loop(s_frame.w_self()) except interpreter.ReturnFromTopLevel, e: - assert e.object.as_string() == 'b2' - except interpreter.StackOverflow, e: - assert False + return e.object + +def test_ensure(): + #ensure + # [^'b1'] ensure: [^'b2'] + + ensure_ = find_symbol(space.w_BlockClosure, "ensure:") + bytes = reduce(operator.add, map(chr, [0x8F, 0, 0, 2, 0x21, 0x7c, + 0x8F, 0, 0, 2, 0x22, 0x7c, + 0xe0, 0x87, 0x78])) + + w_method = create_method(bytes, [ensure_, w('b1'), w('b2'), + w('ensure'), space.w_BlockClosure]) + result = execute_frame(w_method) + assert result.as_string() == 'b2' def test_ensure_save_original_nlr(): #ensure # [^'b1'] ensure: ['b2'] - import operator + + ensure_ = find_symbol(space.w_BlockClosure, "ensure:") bytes = reduce(operator.add, map(chr, [0x8F, 0, 0, 2, 0x21, 0x7c, 0x8F, 0, 0, 2, 0x22, 0x7d, 0xe0, 0x87, 0x78])) - s_class = space.w_BlockClosure.as_class_get_shadow(space) - ensure_ = find_symbol_in_methoddict_of('ensure:', s_class) - assert ensure_ is not None, 'Using image without #ensure:-method.' - w_method = create_method(bytes, [ensure_, w('b1'), w('b2'), w('ensure'), space.w_BlockClosure]) + result = execute_frame(w_method) + assert result.as_string() == 'b1' - # create a frame for our newly crafted method with a valid sender (to avoid raising returnFromTop to early) - s_initial_frame = create_method(chr(0x7c)).create_frame(space, w(0)) - s_frame = w_method.create_frame(space, w(0)) - s_frame.store_s_sender(s_initial_frame, raise_error=False) +def test_ContextPart_jump(): + """ + Code: Create a Block context that jumps back to its sender, instead of returning normally. + The Block is not executed to the end, the sender chain is manipulated. + The local variable should be the value pushed on the sender context before jumping to it. + a := 5. + a := [ thisContext sender push: 2. thisContext sender jump. 10 ] value. + ^ a + """ + ContextPart = space.w_MethodContext.as_class_get_shadow(space).s_superclass().w_self() + push = find_symbol(ContextPart, "push:") + sender = find_symbol(ContextPart, "sender") + jump = find_symbol(ContextPart, "jump") - try: - interp.loop(s_frame.w_self()) - except interpreter.ReturnFromTopLevel, e: - assert e.object.as_string() == 'b1' - except interpreter.StackOverflow, e: - assert False + bytes = reduce(operator.add, map(chr, [0x21, 0x82, 0xc0, # Set a + 0x8f, 0x00, 0x00, 0x0b, # Push block + 0x89, 0xd3, # Send sender + 0x77, 0xe2, # Send push + 0x87, 0x89, 0xd3, 0xd4, # Send jump + 0x87, 0x25, 0x7d, # Block rest (not executed) + 0xc9, 0x82, 0xc0, 0x40, 0x7c])) # Send value and return + + Association = space.classtable["w_Point"] # Wrong class, doesn't matter. + assoc = model.W_PointersObject(space, Association, 2) + assoc.store(space, 0, w('a')) + assoc.store(space, 1, w(3)) + w_method = create_method(bytes, [assoc, w(5), push, sender, jump, w(10)]) + result = execute_frame(w_method) + assert isinstance(result, model.W_SmallInteger) + assert result.value == 2 + +def test_ContextPart_jump_nonlocal(): + """ + Like above test, but with three blocks to make the return non-local. + Also, store the outer context beforehand. + a := 5. + outer := thisContext. + a := [[[ outer push: 2. outer jump. 10 ] value ] value] value. + ^ a + """ + ContextPart = space.w_MethodContext.as_class_get_shadow(space).s_superclass().w_self() + push = find_symbol(ContextPart, "push:") + jump = find_symbol(ContextPart, "jump") + + bytes = reduce(operator.add, map(chr, [0x21, 0x82, 0xc0, # Set a + 0x89, 0x82, 0xc2, # Set outer + 0x8f, 0x00, 0x00, 0x15, # Push block + 0x8f, 0x00, 0x00, 0x0f, # Push block + 0x8f, 0x00, 0x00, 0x09, # Push block + 0x42, 0x77, 0xe3, # Push 2 + 0x87, 0x42, 0xd4, # Send jump + 0x87, 0x25, 0x7d, # Block rest (not executed) + 0xc9, 0x7d, # Send value and return + 0xc9, 0x7d, # Send value and return + 0xc9, 0x82, 0xc0, 0x40, 0x7c])) # Send value and return + + Association = space.classtable["w_Point"] # Wrong class, doesn't matter. + assoc = model.W_PointersObject(space, Association, 2) + assoc.store(space, 0, w('a')) + assoc.store(space, 1, space.w_nil) + assoc2 = model.W_PointersObject(space, Association, 2) + assoc2.store(space, 0, w('outer')) + assoc2.store(space, 1, space.w_nil) + w_method = create_method(bytes, [assoc, w(5), assoc2, push, jump, w(10)]) + result = execute_frame(w_method) + assert isinstance(result, model.W_SmallInteger) + assert result.value == 2 + +def test_contextOn_do_(): + """ + contextOn:do: is some very heavy meta programming. It creates and returns a separate stack frame, + settings it's sender to nil, thereby manipulating the senders of two contexts. + The Point in there should actually be UnhandledError or something. + The test here is just that this works. + ctx := ContextPart contextOn: Point do: ['nothing'] + """ + ContextPart = space.w_MethodContext.as_class_get_shadow(space).s_superclass().w_self() + ContextPartClass = ContextPart.getclass(space).as_class_get_shadow(space).w_self() + contextOnDo = find_symbol(ContextPartClass, "contextOn:do:") + + bytes = reduce(operator.add, map(chr, [ + 0x42, 0x43, # Push the classes + 0x8f, 0x00, 0x00, 0x02, # Push block, + 0x24, 0x7d, # in the block + 0xf1, 0x81, 0xc0, 0x7c # Send contextOn:do: + ])) + + Association = space.classtable["w_Point"] # Wrong class, doesn't matter. + ctxAssoc = model.W_PointersObject(space, Association, 2) + ctxAssoc.store(space, 0, w('ctx')) + ctxAssoc.store(space, 1, space.w_nil) + contextPartAssoc = model.W_PointersObject(space, Association, 2) + contextPartAssoc.store(space, 0, w('ContextPart')) + contextPartAssoc.store(space, 1, ContextPart) + errorAssoc = model.W_PointersObject(space, Association, 2) + errorAssoc.store(space, 0, w('Point')) + errorAssoc.store(space, 1, Association) + w_method = create_method(bytes, [ctxAssoc, contextOnDo, contextPartAssoc, errorAssoc, w('nothing')]) + + interp.trace = True + + result = execute_frame(w_method) + assert isinstance(result, model.W_PointersObject) + s = result.as_context_get_shadow(space) + assert s.w_method().lookup_selector == "on:do:" + assert s.w_method().primitive() == 199 + assert s.s_sender() == None + \ No newline at end of file diff --git a/spyvm/test/util.py b/spyvm/test/util.py --- a/spyvm/test/util.py +++ b/spyvm/test/util.py @@ -85,7 +85,7 @@ if not self._loop: # this test is done to not loop in test, but rather step just once where wanted # Unfortunately, we have to mimick some of the original behaviour. - s_new_frame.store_s_sender(s_sender, raise_error=False) + s_new_frame.store_s_sender(s_sender) return s_new_frame return interpreter.Interpreter.stack_frame(self, s_new_frame, s_sender, may_context_switch) From noreply at buildbot.pypy.org Sun Jul 27 12:22:38 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Sun, 27 Jul 2014 12:22:38 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Cleaned up tests a little by extracting helper methods. Message-ID: <20140727102238.3E6C51C03AC@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r961:95be1db81391 Date: 2014-07-27 12:16 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/95be1db81391/ Log: Cleaned up tests a little by extracting helper methods. diff --git a/spyvm/test/test_interpreter.py b/spyvm/test/test_interpreter.py --- a/spyvm/test/test_interpreter.py +++ b/spyvm/test/test_interpreter.py @@ -9,16 +9,22 @@ def setup_module(): space, interp = create_space_interp(bootstrap = True) w = space.w + interpret_bc = interp.interpret_bc copy_to_module(locals(), __name__) def teardown_module(): cleanup_module(__name__) +# ======= Helper methods ======= + def bootstrap_class(instsize, w_superclass=None, w_metaclass=None, name='?', format=shadow.POINTERS, varsized=True): return space.bootstrap_class(instsize, w_superclass, w_metaclass, name, format, varsized) +def new_frame(bytes, receiver=None): + return space.make_frame(bytes, receiver=receiver, args=[w("foo"), w("bar")]) + def step_in_interp(ctxt): # due to missing resets in between tests interp._loop = False try: @@ -89,22 +95,8 @@ return lit return [fakeliteral(lit) for lit in literals] -def _new_frame(space, bytes, receiver=None): - assert isinstance(bytes, str) - w_method = model.W_CompiledMethod(space, len(bytes)) - w_method.islarge = 1 - w_method.bytes = bytes - w_method.argsize=2 - w_method._tempsize=8 - w_method.setliterals([model.W_PointersObject(space, None, 2)]) - if receiver is None: - receiver = space.w_nil - s_frame = w_method.create_frame(space, receiver, [space.w("foo"), space.w("bar")]) - return s_frame.w_self(), s_frame +# ======= Test methods ======= -def new_frame(bytes, receiver=None): - return _new_frame(space, bytes, receiver) - def test_create_frame(): w_method = model.W_CompiledMethod(space, len("hello")) w_method.bytes="hello" @@ -695,14 +687,6 @@ storeAssociation(doubleExtendedDoAnythingBytecode + chr(7<<5) + chr(0)) -def interpret_bc(bcodes, literals, receiver=None): - if not receiver: - receiver = space.w_nil - bcode = "".join([chr(x) for x in bcodes]) - w_frame, s_frame = new_frame(bcode, receiver=receiver) - s_frame.w_method().setliterals(literals) - return interp.interpret_toplevel(w_frame) - # tests: bytecodePrimValue & bytecodePrimValueWithArg def test_bc_3_plus_4(): # value0 diff --git a/spyvm/test/test_largeinteger.py b/spyvm/test/test_largeinteger.py --- a/spyvm/test/test_largeinteger.py +++ b/spyvm/test/test_largeinteger.py @@ -1,7 +1,7 @@ import py, operator from spyvm import squeakimage, model, constants, error, interpreter, shadow, primitives from spyvm.test.test_primitives import MockFrame -from .util import read_image, find_symbol_in_methoddict_of, copy_to_module, cleanup_module +from .util import read_image, copy_to_module, cleanup_module from rpython.rlib.rarithmetic import intmask, r_uint def setup_module(): @@ -36,7 +36,7 @@ try: w_selector = space.get_special_selector(selector) except Exception: - w_selector = find_symbol_in_methoddict_of(selector, w(intmask(candidates[0])).getclass(space).shadow) + w_selector = space.find_symbol_in_methoddict(selector, w(intmask(candidates[0])).getclass(space)) interp.trace = trace for i, v in enumerate(candidates): diff --git a/spyvm/test/test_primitives.py b/spyvm/test/test_primitives.py --- a/spyvm/test/test_primitives.py +++ b/spyvm/test/test_primitives.py @@ -6,20 +6,17 @@ from rpython.rlib.rarithmetic import intmask from rpython.rtyper.lltypesystem import lltype, rffi from .util import create_space, copy_to_module, cleanup_module, TestInterpreter -from .test_interpreter import _new_frame def setup_module(): space = create_space(bootstrap = True) wrap = space.w bootstrap_class = space.bootstrap_class + new_frame = space.make_frame copy_to_module(locals(), __name__) def teardown_module(): cleanup_module(__name__) -def new_frame(bytes): - return _new_frame(space, bytes, space.w_nil) - class MockFrame(model.W_PointersObject): def __init__(self, space, stack): size = 6 + len(stack) + 6 diff --git a/spyvm/test/test_wrapper.py b/spyvm/test/test_wrapper.py --- a/spyvm/test/test_wrapper.py +++ b/spyvm/test/test_wrapper.py @@ -2,18 +2,15 @@ from spyvm import wrapper, model, interpreter, objspace from spyvm.error import WrapperException, FatalError from .util import create_space, copy_to_module, cleanup_module -from spyvm.test.test_interpreter import _new_frame def setup_module(): space = create_space(bootstrap = True) + new_frame = lambda: space.make_frame("")[1] copy_to_module(locals(), __name__) def teardown_module(): cleanup_module(__name__) -def new_frame(): - return _new_frame(space, "")[0].as_context_get_shadow(space) - def test_simpleread(): w_o = model.W_PointersObject(space, None, 2) w = wrapper.Wrapper(space, w_o) diff --git a/spyvm/test/test_zin_squeak_4_5_image.py b/spyvm/test/test_zin_squeak_4_5_image.py --- a/spyvm/test/test_zin_squeak_4_5_image.py +++ b/spyvm/test/test_zin_squeak_4_5_image.py @@ -1,11 +1,12 @@ from spyvm import squeakimage, model, constants, interpreter, shadow, objspace -from .util import read_image, find_symbol_in_methoddict_of, copy_to_module, cleanup_module +from .util import read_image, copy_to_module, cleanup_module import operator def setup_module(): space, interp, image, reader = read_image('Squeak4.5-12568.image') w = space.w + find_symbol = space.find_symbol_in_methoddict copy_to_module(locals(), __name__) def teardown_module(): @@ -16,60 +17,33 @@ from test_miniimage import _test_lookup_abs_in_integer _test_all_pointers_are_valid(reader) _test_lookup_abs_in_integer(interp) - -def create_method(bytes, literals=[], islarge=0, argsize=0, tempsize=0): - w_method = model.W_CompiledMethod(space, len(bytes)) - w_method.bytes = bytes - w_method.islarge = islarge - w_method.argsize = argsize - w_method._tempsize = tempsize - - w_method.setliterals(literals) - return w_method - -def find_symbol(w_class, symbolname): - s_class = w_class.as_class_get_shadow(space) - symbol = find_symbol_in_methoddict_of(symbolname, s_class) - assert symbol is not None, 'Using image without %s method.' % symbolname - return symbol - -def execute_frame(w_method): - # create a frame for our newly crafted method with a valid sender (to avoid raising returnFromTop to early) - s_initial_frame = create_method(chr(0x7c)).create_frame(space, w(0), []) - s_frame = w_method.create_frame(space, w(0)) - s_frame.store_s_sender(s_initial_frame) - - try: - interp.loop(s_frame.w_self()) - except interpreter.ReturnFromTopLevel, e: - return e.object def test_ensure(): #ensure # [^'b1'] ensure: [^'b2'] - ensure_ = find_symbol(space.w_BlockClosure, "ensure:") - bytes = reduce(operator.add, map(chr, [0x8F, 0, 0, 2, 0x21, 0x7c, - 0x8F, 0, 0, 2, 0x22, 0x7c, - 0xe0, 0x87, 0x78])) + ensure_ = find_symbol("ensure:", space.w_BlockClosure) + bytes = [0x8F, 0, 0, 2, 0x21, 0x7c, + 0x8F, 0, 0, 2, 0x22, 0x7c, + 0xe0, 0x87, 0x78] - w_method = create_method(bytes, [ensure_, w('b1'), w('b2'), + w_method = space.make_method(bytes, [ensure_, w('b1'), w('b2'), w('ensure'), space.w_BlockClosure]) - result = execute_frame(w_method) + result = interp.execute_method(w_method) assert result.as_string() == 'b2' def test_ensure_save_original_nlr(): #ensure # [^'b1'] ensure: ['b2'] - ensure_ = find_symbol(space.w_BlockClosure, "ensure:") - bytes = reduce(operator.add, map(chr, [0x8F, 0, 0, 2, 0x21, 0x7c, - 0x8F, 0, 0, 2, 0x22, 0x7d, - 0xe0, 0x87, 0x78])) + ensure_ = find_symbol("ensure:", space.w_BlockClosure) + bytes = [0x8F, 0, 0, 2, 0x21, 0x7c, + 0x8F, 0, 0, 2, 0x22, 0x7d, + 0xe0, 0x87, 0x78] - w_method = create_method(bytes, [ensure_, w('b1'), w('b2'), + w_method = space.make_method(bytes, [ensure_, w('b1'), w('b2'), w('ensure'), space.w_BlockClosure]) - result = execute_frame(w_method) + result = interp.execute_method(w_method) assert result.as_string() == 'b1' def test_ContextPart_jump(): @@ -82,24 +56,24 @@ ^ a """ ContextPart = space.w_MethodContext.as_class_get_shadow(space).s_superclass().w_self() - push = find_symbol(ContextPart, "push:") - sender = find_symbol(ContextPart, "sender") - jump = find_symbol(ContextPart, "jump") + push = find_symbol("push:", ContextPart) + sender = find_symbol("sender", ContextPart) + jump = find_symbol("jump", ContextPart) - bytes = reduce(operator.add, map(chr, [0x21, 0x82, 0xc0, # Set a - 0x8f, 0x00, 0x00, 0x0b, # Push block - 0x89, 0xd3, # Send sender - 0x77, 0xe2, # Send push - 0x87, 0x89, 0xd3, 0xd4, # Send jump - 0x87, 0x25, 0x7d, # Block rest (not executed) - 0xc9, 0x82, 0xc0, 0x40, 0x7c])) # Send value and return + bytes = [0x21, 0x82, 0xc0, # Set a + 0x8f, 0x00, 0x00, 0x0b, # Push block + 0x89, 0xd3, # Send sender + 0x77, 0xe2, # Send push + 0x87, 0x89, 0xd3, 0xd4, # Send jump + 0x87, 0x25, 0x7d, # Block rest (not executed) + 0xc9, 0x82, 0xc0, 0x40, 0x7c] # Send value and return Association = space.classtable["w_Point"] # Wrong class, doesn't matter. assoc = model.W_PointersObject(space, Association, 2) assoc.store(space, 0, w('a')) assoc.store(space, 1, w(3)) - w_method = create_method(bytes, [assoc, w(5), push, sender, jump, w(10)]) - result = execute_frame(w_method) + w_method = space.make_method(bytes, [assoc, w(5), push, sender, jump, w(10)]) + result = interp.execute_method(w_method) assert isinstance(result, model.W_SmallInteger) assert result.value == 2 @@ -113,20 +87,20 @@ ^ a """ ContextPart = space.w_MethodContext.as_class_get_shadow(space).s_superclass().w_self() - push = find_symbol(ContextPart, "push:") - jump = find_symbol(ContextPart, "jump") + push = find_symbol("push:", ContextPart) + jump = find_symbol("jump", ContextPart) - bytes = reduce(operator.add, map(chr, [0x21, 0x82, 0xc0, # Set a - 0x89, 0x82, 0xc2, # Set outer - 0x8f, 0x00, 0x00, 0x15, # Push block - 0x8f, 0x00, 0x00, 0x0f, # Push block - 0x8f, 0x00, 0x00, 0x09, # Push block - 0x42, 0x77, 0xe3, # Push 2 - 0x87, 0x42, 0xd4, # Send jump - 0x87, 0x25, 0x7d, # Block rest (not executed) - 0xc9, 0x7d, # Send value and return - 0xc9, 0x7d, # Send value and return - 0xc9, 0x82, 0xc0, 0x40, 0x7c])) # Send value and return + bytes = [0x21, 0x82, 0xc0, # Set a + 0x89, 0x82, 0xc2, # Set outer + 0x8f, 0x00, 0x00, 0x15, # Push block + 0x8f, 0x00, 0x00, 0x0f, # Push block + 0x8f, 0x00, 0x00, 0x09, # Push block + 0x42, 0x77, 0xe3, # Push 2 + 0x87, 0x42, 0xd4, # Send jump + 0x87, 0x25, 0x7d, # Block rest (not executed) + 0xc9, 0x7d, # Send value and return + 0xc9, 0x7d, # Send value and return + 0xc9, 0x82, 0xc0, 0x40, 0x7c] # Send value and return Association = space.classtable["w_Point"] # Wrong class, doesn't matter. assoc = model.W_PointersObject(space, Association, 2) @@ -135,8 +109,8 @@ assoc2 = model.W_PointersObject(space, Association, 2) assoc2.store(space, 0, w('outer')) assoc2.store(space, 1, space.w_nil) - w_method = create_method(bytes, [assoc, w(5), assoc2, push, jump, w(10)]) - result = execute_frame(w_method) + w_method = space.make_method(bytes, [assoc, w(5), assoc2, push, jump, w(10)]) + result = interp.execute_method(w_method) assert isinstance(result, model.W_SmallInteger) assert result.value == 2 @@ -150,14 +124,14 @@ """ ContextPart = space.w_MethodContext.as_class_get_shadow(space).s_superclass().w_self() ContextPartClass = ContextPart.getclass(space).as_class_get_shadow(space).w_self() - contextOnDo = find_symbol(ContextPartClass, "contextOn:do:") + contextOnDo = find_symbol("contextOn:do:", ContextPartClass) - bytes = reduce(operator.add, map(chr, [ + bytes = [ 0x42, 0x43, # Push the classes 0x8f, 0x00, 0x00, 0x02, # Push block, 0x24, 0x7d, # in the block 0xf1, 0x81, 0xc0, 0x7c # Send contextOn:do: - ])) + ] Association = space.classtable["w_Point"] # Wrong class, doesn't matter. ctxAssoc = model.W_PointersObject(space, Association, 2) @@ -169,11 +143,9 @@ errorAssoc = model.W_PointersObject(space, Association, 2) errorAssoc.store(space, 0, w('Point')) errorAssoc.store(space, 1, Association) - w_method = create_method(bytes, [ctxAssoc, contextOnDo, contextPartAssoc, errorAssoc, w('nothing')]) + w_method = space.make_method(bytes, [ctxAssoc, contextOnDo, contextPartAssoc, errorAssoc, w('nothing')]) - interp.trace = True - - result = execute_frame(w_method) + result = interp.execute_method(w_method) assert isinstance(result, model.W_PointersObject) s = result.as_context_get_shadow(space) assert s.w_method().lookup_selector == "on:do:" diff --git a/spyvm/test/util.py b/spyvm/test/util.py --- a/spyvm/test/util.py +++ b/spyvm/test/util.py @@ -31,14 +31,6 @@ interp = TestInterpreter(space) return space, interp -def find_symbol_in_methoddict_of(string, s_class): - s_methoddict = s_class.s_methoddict() - s_methoddict.sync_method_cache() - methoddict_w = s_methoddict.methoddict - for each in methoddict_w.keys(): - if each.as_string() == string: - return each - def copy_to_module(locals, module_name): mod = sys.modules[module_name] mod._copied_objects_ = [] @@ -88,6 +80,22 @@ s_new_frame.store_s_sender(s_sender) return s_new_frame return interpreter.Interpreter.stack_frame(self, s_new_frame, s_sender, may_context_switch) + + # ============ Helpers for executing ============ + + def interpret_bc(self, bcodes, literals=None, receiver=None): + w_frame, s_frame = self.space.make_frame(bcodes, literals=literals, receiver=receiver) + self.space.wrap_frame(s_frame) + return self.interpret_toplevel(w_frame) + + def execute_method(self, w_method): + s_frame = w_method.create_frame(self.space, self.space.w(0)) + self.space.wrap_frame(s_frame) + try: + self.loop(s_frame.w_self()) + except interpreter.ReturnFromTopLevel, e: + return e.object + assert False, "Frame did not return correctly." class BootstrappedObjSpace(objspace.ObjSpace): @@ -264,7 +272,45 @@ raise Exception("Cannot wrap %r" % any) def initialize_class(self, w_class, interp): - initialize_symbol = find_symbol_in_methoddict_of("initialize", + initialize_symbol = self.find_symbol_in_methoddict("initialize", w_class.class_shadow(self)) interp.perform(w_class, w_selector=initialize_symbol) + + def find_symbol_in_methoddict(self, string, cls): + if isinstance(cls, model.W_PointersObject): + cls = cls.as_class_get_shadow(self) + s_methoddict = cls.s_methoddict() + s_methoddict.sync_method_cache() + methoddict_w = s_methoddict.methoddict + for each in methoddict_w.keys(): + if each.as_string() == string: + return each + assert False, 'Using image without %s method in class %s.' % (string, cls.name) + + # ============ Helpers for executing ============ + + def wrap_frame(self, s_frame): + # Add a toplevel frame around s_frame to properly return. + toplevel_frame = self.make_method([0x7c]).create_frame(self, self.w(0), []) + s_frame.store_s_sender(toplevel_frame) + + def make_method(self, bytes, literals=None, numargs=0): + if not isinstance(bytes, str): + bytes = "".join([chr(x) for x in bytes]) + w_method = model.W_CompiledMethod(self, len(bytes)) + w_method.islarge = 1 + w_method.bytes = bytes + w_method.argsize=numargs + w_method._tempsize=8 + if literals is None: + literals = [model.W_PointersObject(self, None, 2)] + w_method.setliterals(literals) + return w_method + + def make_frame(self, bytes, literals=None, receiver=None, args=[]): + w_method = self.make_method(bytes, literals, len(args)) + if receiver is None: + receiver = self.w_nil + s_frame = w_method.create_frame(self, receiver, args) + return s_frame.w_self(), s_frame \ No newline at end of file From noreply at buildbot.pypy.org Sun Jul 27 12:24:19 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 27 Jul 2014 12:24:19 +0200 (CEST) Subject: [pypy-commit] pypy py3k-qualname: A branch to implement the __qualname__ attribute Message-ID: <20140727102419.12E5C1C03AC@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: py3k-qualname Changeset: r72558:0bbca39a93bb Date: 2014-07-27 12:18 +0200 http://bitbucket.org/pypy/pypy/changeset/0bbca39a93bb/ Log: A branch to implement the __qualname__ attribute diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -128,13 +128,7 @@ block = self.unrollstack(SApplicationException.kind) if block is None: # no handler found for the OperationError - if we_are_translated(): - raise operr - else: - # try to preserve the CPython-level traceback - import sys - tb = sys.exc_info()[2] - raise OperationError, operr, tb + raise operr else: unroller = SApplicationException(operr) next_instr = block.handle(self, unroller) From noreply at buildbot.pypy.org Sun Jul 27 12:24:20 2014 From: noreply at buildbot.pypy.org (Ian Foote) Date: Sun, 27 Jul 2014 12:24:20 +0200 (CEST) Subject: [pypy-commit] pypy py3k-qualname: Add __qualname__ attribute to Function Message-ID: <20140727102420.618221C03AC@cobra.cs.uni-duesseldorf.de> Author: Ian Foote Branch: py3k-qualname Changeset: r72559:d3fcc7dfcdee Date: 2014-07-26 11:21 +0200 http://bitbucket.org/pypy/pypy/changeset/d3fcc7dfcdee/ Log: Add __qualname__ attribute to Function Cheat by re-using __name__ implementation without adding extra functionality diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -404,6 +404,12 @@ "to a string object")) raise + def fget_func_qualname(self, space): + return self.fget_func_name(space) + + def fset_func_qualname(self, space, w_name): + return self.fset_func_name(space, w_name) + def fdel_func_doc(self, space): self.w_doc = space.w_None diff --git a/pypy/interpreter/test/test_function.py b/pypy/interpreter/test/test_function.py --- a/pypy/interpreter/test/test_function.py +++ b/pypy/interpreter/test/test_function.py @@ -146,6 +146,10 @@ assert 日本.__name__ == '日本' """ + def test_qualname(self): + def f(): pass + assert hasattr(f, '__qualname__') + class AppTestFunction: def test_simple_call(self): diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -807,6 +807,8 @@ Function.fset_func_code) getset_func_name = GetSetProperty(Function.fget_func_name, Function.fset_func_name) +getset_func_qualname = GetSetProperty(Function.fget_func_qualname, + Function.fset_func_qualname) getset_func_annotations = GetSetProperty(Function.fget_func_annotations, Function.fset_func_annotations, Function.fdel_func_annotations) @@ -824,6 +826,7 @@ __code__ = getset_func_code, __doc__ = getset_func_doc, __name__ = getset_func_name, + __qualname__ = getset_func_qualname, __dict__ = getset_func_dict, __defaults__ = getset_func_defaults, __kwdefaults__ = getset_func_kwdefaults, From noreply at buildbot.pypy.org Sun Jul 27 12:24:21 2014 From: noreply at buildbot.pypy.org (Ian Foote) Date: Sun, 27 Jul 2014 12:24:21 +0200 (CEST) Subject: [pypy-commit] pypy py3k-qualname: Failed attempt to update _make_function to add qualname to bytecode Message-ID: <20140727102421.8E1961C03AC@cobra.cs.uni-duesseldorf.de> Author: Ian Foote Branch: py3k-qualname Changeset: r72560:04e12b74c3cd Date: 2014-07-26 17:22 +0200 http://bitbucket.org/pypy/pypy/changeset/04e12b74c3cd/ Log: Failed attempt to update _make_function to add qualname to bytecode diff --git a/pypy/interpreter/astcompiler/assemble.py b/pypy/interpreter/astcompiler/assemble.py --- a/pypy/interpreter/astcompiler/assemble.py +++ b/pypy/interpreter/astcompiler/assemble.py @@ -639,10 +639,10 @@ return 1 - arg def _compute_MAKE_CLOSURE(arg): - return -1 - _num_args(arg) - ((arg >> 16) & 0xFFFF) + return -2 - _num_args(arg) - ((arg >> 16) & 0xFFFF) def _compute_MAKE_FUNCTION(arg): - return -_num_args(arg) - ((arg >> 16) & 0xFFFF) + return -1 -_num_args(arg) - ((arg >> 16) & 0xFFFF) def _compute_BUILD_SLICE(arg): if arg == 3: diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py --- a/pypy/interpreter/astcompiler/codegen.py +++ b/pypy/interpreter/astcompiler/codegen.py @@ -282,9 +282,12 @@ self.add_none_to_final_return = False mod.body.walkabout(self) - def _make_function(self, code, num_defaults=0): + def _make_function(self, code, num_defaults=0, w_qualname=None): """Emit the opcodes to turn a code object into a function.""" + if w_qualname is None: + w_qualname = self.space.wrap(code.co_name.decode('utf-8')) code_index = self.add_const(code) + qualname_index = self.add_const(w_qualname) if code.co_freevars: # Load cell and free vars to pass on. for free in code.co_freevars: @@ -296,9 +299,11 @@ self.emit_op_arg(ops.LOAD_CLOSURE, index) self.emit_op_arg(ops.BUILD_TUPLE, len(code.co_freevars)) self.emit_op_arg(ops.LOAD_CONST, code_index) + self.emit_op_arg(ops.LOAD_CONST, qualname_index) self.emit_op_arg(ops.MAKE_CLOSURE, num_defaults) else: self.emit_op_arg(ops.LOAD_CONST, code_index) + self.emit_op_arg(ops.LOAD_CONST, qualname_index) self.emit_op_arg(ops.MAKE_FUNCTION, num_defaults) def _visit_kwonlydefaults(self, args): @@ -597,7 +602,7 @@ self.emit_op(ops.POP_TOP) if handler.name: ## generate the equivalent of: - ## + ## ## try: ## # body ## except type as name: @@ -1084,7 +1089,7 @@ self._make_call(0, call.args, call.keywords, call.starargs, call.kwargs) - + def _call_has_no_star_args(self, call): return not call.starargs and not call.kwargs diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -35,7 +35,7 @@ # different value for the highest 16 bits. Bump pypy_incremental_magic every # time you make pyc files incompatible -pypy_incremental_magic = 48 # bump it by 16 +pypy_incremental_magic = 64 # bump it by 16 assert pypy_incremental_magic % 16 == 0 assert pypy_incremental_magic < 3000 # the magic number of Python 3. There are # no known magic numbers below this value diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -1205,6 +1205,7 @@ @jit.unroll_safe def _make_function(self, oparg, freevars=None): space = self.space + w_qualname = self.popvalue() w_codeobj = self.popvalue() codeobj = self.space.interp_w(PyCode, w_codeobj) if freevars is not None: @@ -1236,7 +1237,7 @@ @jit.unroll_safe def MAKE_CLOSURE(self, oparg, next_instr): - w_freevarstuple = self.peekvalue(1) + w_freevarstuple = self.peekvalue(2) freevars = [self.space.interp_w(Cell, cell) for cell in self.space.fixedview(w_freevarstuple)] self._make_function(oparg, freevars) diff --git a/pypy/interpreter/test/test_function.py b/pypy/interpreter/test/test_function.py --- a/pypy/interpreter/test/test_function.py +++ b/pypy/interpreter/test/test_function.py @@ -147,8 +147,11 @@ """ def test_qualname(self): - def f(): pass + def f(): + pass assert hasattr(f, '__qualname__') + expected = 'AppTestFunctionIntrospection.test_qualname..f' + assert f.__qualname__ == expected class AppTestFunction: diff --git a/pypy/tool/importfun.py b/pypy/tool/importfun.py --- a/pypy/tool/importfun.py +++ b/pypy/tool/importfun.py @@ -213,7 +213,7 @@ if postop != _op_.LOAD_ATTR: break seenloadattr = True - + assert postop in storeops, 'postop' storename = name_for_op(codeob, postop, postoparg) @@ -561,10 +561,10 @@ ourlink = link_for_name('', module, d) head = [html.title(module.name + '.' + d)] body = [html.h1([html.a(module.name, href=link_for_module(ourlink, module)), '.' + d])] - + contents = [] - for n in defuses[d]: + for n in defuses[d]: N = module.system.modules[n] contents.append(html.li(html.a(n, href=link_for_module(ourlink, N)))) From noreply at buildbot.pypy.org Sun Jul 27 12:29:17 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Sun, 27 Jul 2014 12:29:17 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Merged default. Message-ID: <20140727102917.349451C03AC@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r962:44419a6b6d08 Date: 2014-07-27 12:29 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/44419a6b6d08/ Log: Merged default. diff --git a/README.md b/README.md new file mode 100644 --- /dev/null +++ b/README.md @@ -0,0 +1,69 @@ +Spy +========= + +A Squeak VM written in RPython, called "SPy VM". + +Setup +---- +### Required Projects +You need three repositories: +* This one +* pypy/pypy +* pypy/rsdl + +### Required packages +You need the following packages on your OS. Install with your favorite package +manager: +* pypy (For faster translation of the SPY VM) +* libsdl-dev + +### Adjusting the PYTHONPATH +In order to allow the RPython toolchain to find the rsdl module you have to add +the rsdl folder to the PYTHONPATH. Note that you have to add the rsdl subfolder +of the rsdl repository to the PYTHONPATH. + +``` +export PYTHONPATH=${PYTHONPATH}:[path to rsdl repository]/rsdl +``` + +### Setting the SDL Driver +For testing the basic functionality of the VM it is currently best to disable +the UI. You can do so by setting the SDL_VIDEODRIVER environment variable to +dummy. +``` +export SDL_VIDEODRIVER=dummy +``` + +### Building +To build the VM enter the following: + +``` +[path to pypy repository]/rpython/bin/rpython [path to lang-smalltalk +repository]/targetimageloadingsmalltalk.py +``` + +To build the VM with enabled just-in-time compiler: +``` +[path to pypy repository]/rpython/bin/rpython -O jit [path to lang-smalltalk +repository]/targetimageloadingsmalltalk.py +``` + +### Starting an image +The build process will produce an executable e.g. called +targetimageloadingsmalltalk-c. Start it with the following: +``` +./targetimageloadingsmalltalk-c images/Squeak4.5-*.image +``` + +Setup for stm-enabled SPY +--- +There are two branches integrating the RPython STM into SPY: stm-c4, +storage-stm-c4. You have to change two things of the setup to build those +branches. + +1. Change your local pypy repository to the stm-c4 branch. +2. Build using the following command: +``` +[path to pypy repository]/rpython/bin/rpython --gc=stmgc [path to lang-smalltalk +repository]/targetimageloadingsmalltalk.py +``` From noreply at buildbot.pypy.org Sun Jul 27 13:50:37 2014 From: noreply at buildbot.pypy.org (kvas) Date: Sun, 27 Jul 2014 13:50:37 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Implement docstring stripping for compile(..., optimize=2). Message-ID: <20140727115037.66EA41C01B8@cobra.cs.uni-duesseldorf.de> Author: Vasily Kuznetsov Branch: py3.3 Changeset: r72561:17f769cdad0a Date: 2014-07-27 13:27 +0200 http://bitbucket.org/pypy/pypy/changeset/17f769cdad0a/ Log: Implement docstring stripping for compile(..., optimize=2). diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py --- a/pypy/interpreter/astcompiler/codegen.py +++ b/pypy/interpreter/astcompiler/codegen.py @@ -244,7 +244,7 @@ self.emit_op_arg(op, self.add_name(container, identifier)) def possible_docstring(self, node): - if isinstance(node, ast.Expr): + if isinstance(node, ast.Expr) and self.compile_info.optimize < 2: expr_value = node.value if isinstance(expr_value, ast.Str): return expr_value diff --git a/pypy/module/__builtin__/test/test_compile.py b/pypy/module/__builtin__/test/test_compile.py --- a/pypy/module/__builtin__/test/test_compile.py +++ b/pypy/module/__builtin__/test/test_compile.py @@ -1,8 +1,7 @@ class AppTestCompile: - # TODO: This test still fails for now because the docstrings are not - # removed with optimize=2. - def untest_compile(self): + def test_compile(self): + """Clone of the part of the original test that was failing.""" import ast codestr = '''def f(): @@ -37,7 +36,7 @@ assert rv == (debugval, docstring) def test_assert_remove(self): - """Test just removal of the asserts with optimize=1.""" + """Test removal of the asserts with optimize=1.""" import ast code = """def f(): @@ -50,9 +49,41 @@ exec(compiled, ns) ns['f']() + def test_docstring_remove(self): + """Test removal of docstrings with optimize=2.""" + import ast + import marshal -# TODO: Remove docstrings with optimize=2. + code = """ +'module_doc' + +def f(): + 'func_doc' + +class C: + 'class_doc' +""" + tree = ast.parse(code) + for to_compile in [code, tree]: + compiled = compile(to_compile, "", "exec", optimize=2) + + # check that the docstrings are really gone + marshalled = str(marshal.dumps(compiled)) + assert 'module_doc' not in marshalled + assert 'func_doc' not in marshalled + assert 'class_doc' not in marshalled + + # try to execute the bytecode and see what we get + ns = {} + exec(compiled, ns) + assert '__doc__' not in ns + assert ns['f'].__doc__ is None + assert ns['C'].__doc__ is None + + # TODO: Check the value of __debug__ inside of the compiled block! # According to the documentation, it should follow the optimize flag. +# However, cpython3.3 behaves the same way as PyPy (__debug__ follows +# -O, -OO flags of the interpreter). # TODO: It would also be good to test that with the assert is not removed and # is executed when -O flag is set but optimize=0. From noreply at buildbot.pypy.org Sun Jul 27 13:50:38 2014 From: noreply at buildbot.pypy.org (agobi) Date: Sun, 27 Jul 2014 13:50:38 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Introducing the missing method _compare_digest in the module operator. Message-ID: <20140727115038.A08081C01B8@cobra.cs.uni-duesseldorf.de> Author: Attila Gobi Branch: py3.3 Changeset: r72562:b453ad72cd9a Date: 2014-07-27 13:23 +0200 http://bitbucket.org/pypy/pypy/changeset/b453ad72cd9a/ Log: Introducing the missing method _compare_digest in the module operator. The method is used in the hmac module. diff --git a/pypy/module/operator/__init__.py b/pypy/module/operator/__init__.py --- a/pypy/module/operator/__init__.py +++ b/pypy/module/operator/__init__.py @@ -35,6 +35,7 @@ 'indexOf'] interpleveldefs = {} + interpleveldefs['_compare_digest'] = 'tscmp.compare_digest' for name in interp_names: interpleveldefs[name] = 'interp_operator.%s' % name diff --git a/pypy/module/operator/test/test_operator.py b/pypy/module/operator/test/test_operator.py --- a/pypy/module/operator/test/test_operator.py +++ b/pypy/module/operator/test/test_operator.py @@ -188,3 +188,25 @@ raises(TypeError, operator.indexOf, None, None) assert operator.indexOf([4, 3, 2, 1], 3) == 1 raises(ValueError, operator.indexOf, [4, 3, 2, 1], 0) + + def test_compare_digest_buffer(self): + import operator + assert operator._compare_digest(b'asd', b'asd') + assert not operator._compare_digest(b'asd', b'qwe') + assert not operator._compare_digest(b'asd', b'asdq') + + def test_compare_digest_nonbuffer(self): + import operator + exc = raises(TypeError, operator._compare_digest, 'asd', b'asd') + assert str(exc.value) == "'str' does not support the buffer interface" + + def test_compare_digest_nonascii(self): + import operator + exc = raises(TypeError, operator._compare_digest, 'G\u00d3bi', 'G\u00d3bi') + assert str(exc.value) == "comparing strings with non-ASCII characters is not supported"; + + def test_compare_digest_ascii(self): + import operator + assert operator._compare_digest('asd', 'asd') + assert not operator._compare_digest('asd', 'qwe') + assert not operator._compare_digest('asd', 'asdq') diff --git a/pypy/module/operator/test/test_tscmp.py b/pypy/module/operator/test/test_tscmp.py new file mode 100644 --- /dev/null +++ b/pypy/module/operator/test/test_tscmp.py @@ -0,0 +1,15 @@ +from pypy.module.operator.tscmp import pypy_tscmp +from rpython.rtyper.lltypesystem.rffi import scoped_nonmovingbuffer + +class TestTimingSafeCompare: + def test_tscmp_neq(self): + assert not pypy_tscmp('asd', 'qwe', 3, 3) + + def test_tscmp_eq(self): + assert pypy_tscmp('asd', 'asd', 3, 3) + + def test_tscmp_len(self): + assert pypy_tscmp('asdp', 'asdq', 3, 3) + + def test_tscmp_nlen(self): + assert not pypy_tscmp('asd', 'asd', 2, 3) diff --git a/pypy/module/operator/tscmp.c b/pypy/module/operator/tscmp.c new file mode 100644 --- /dev/null +++ b/pypy/module/operator/tscmp.c @@ -0,0 +1,42 @@ +/* From CPython 3.3.5's operator.c + */ + +#include +#include "tscmp.h" + +int +pypy_tscmp(const unsigned char *a, const unsigned char *b, long len_a, long len_b) +{ + /* The volatile type declarations make sure that the compiler has no + * chance to optimize and fold the code in any way that may change + * the timing. + */ + volatile long length; + volatile const unsigned char *left; + volatile const unsigned char *right; + long i; + unsigned char result; + + /* loop count depends on length of b */ + length = len_b; + left = NULL; + right = b; + + /* don't use else here to keep the amount of CPU instructions constant, + * volatile forces re-evaluation + * */ + if (len_a == length) { + left = *((volatile const unsigned char**)&a); + result = 0; + } + if (len_a != length) { + left = b; + result = 1; + } + + for (i=0; i < length; i++) { + result |= *left++ ^ *right++; + } + + return (result == 0); +} diff --git a/pypy/module/operator/tscmp.h b/pypy/module/operator/tscmp.h new file mode 100644 --- /dev/null +++ b/pypy/module/operator/tscmp.h @@ -0,0 +1,1 @@ +int pypy_tscmp(const unsigned char *a, const unsigned char *b, long len_a, long len_b); diff --git a/pypy/module/operator/tscmp.py b/pypy/module/operator/tscmp.py new file mode 100644 --- /dev/null +++ b/pypy/module/operator/tscmp.py @@ -0,0 +1,42 @@ +""" +Provides _compare_digest method, which is a safe comparing to prevent timing +attacks for the hmac module. +""" +import py +from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.translator.tool.cbuild import ExternalCompilationInfo +from pypy.interpreter.error import OperationError + +cwd = py.path.local(__file__).dirpath() +eci = ExternalCompilationInfo( + includes=[cwd.join('tscmp.h')], + separate_module_files=[cwd.join('tscmp.c')], + export_symbols=['pypy_tscmp']) + +def llexternal(*args, **kwargs): + kwargs.setdefault('compilation_info', eci) + kwargs.setdefault('sandboxsafe', True) + return rffi.llexternal(*args, **kwargs) + +pypy_tscmp = llexternal('pypy_tscmp', [rffi.CCHARP, rffi.CCHARP, rffi.LONG, rffi.LONG], rffi.INT) + +def compare_digest(space, w_a, w_b): + if space.isinstance_w(w_a, space.w_unicode) and space.isinstance_w(w_b, space.w_unicode): + try: + a_value = space.call_method(w_a, "encode", space.wrap("ascii")) + b_value = space.call_method(w_b, "encode", space.wrap("ascii")) + return compare_digest_buffer(space, a_value, b_value) + except OperationError as e: + if not e.match(space, space.w_UnicodeEncodeError): + raise + raise OperationError(space.w_TypeError, + space.wrap("comparing strings with non-ASCII characters is not supported")) + else: + return compare_digest_buffer(space, w_a, w_b) + +def compare_digest_buffer(space, w_a, w_b): + a = space.bufferstr_w(w_a) + b = space.bufferstr_w(w_b) + with rffi.scoped_nonmovingbuffer(a) as a_buffer: + with rffi.scoped_nonmovingbuffer(b) as b_buffer: + return space.wrap(pypy_tscmp(a_buffer, b_buffer, len(a), len(b))) From noreply at buildbot.pypy.org Sun Jul 27 13:50:39 2014 From: noreply at buildbot.pypy.org (agobi) Date: Sun, 27 Jul 2014 13:50:39 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: fixing return type of _compare_digest Message-ID: <20140727115039.C97371C01B8@cobra.cs.uni-duesseldorf.de> Author: Attila Gobi Branch: py3.3 Changeset: r72563:69a3e8128a9c Date: 2014-07-27 13:36 +0200 http://bitbucket.org/pypy/pypy/changeset/69a3e8128a9c/ Log: fixing return type of _compare_digest diff --git a/pypy/module/operator/tscmp.py b/pypy/module/operator/tscmp.py --- a/pypy/module/operator/tscmp.py +++ b/pypy/module/operator/tscmp.py @@ -39,4 +39,4 @@ b = space.bufferstr_w(w_b) with rffi.scoped_nonmovingbuffer(a) as a_buffer: with rffi.scoped_nonmovingbuffer(b) as b_buffer: - return space.wrap(pypy_tscmp(a_buffer, b_buffer, len(a), len(b))) + return space.wrap(rffi.cast(lltype.Bool, pypy_tscmp(a_buffer, b_buffer, len(a), len(b)))) From noreply at buildbot.pypy.org Sun Jul 27 13:50:41 2014 From: noreply at buildbot.pypy.org (agobi) Date: Sun, 27 Jul 2014 13:50:41 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Merging py3.3 Message-ID: <20140727115041.032451C01B8@cobra.cs.uni-duesseldorf.de> Author: Attila Gobi Branch: py3.3 Changeset: r72564:5a1e336d58d6 Date: 2014-07-27 13:44 +0200 http://bitbucket.org/pypy/pypy/changeset/5a1e336d58d6/ Log: Merging py3.3 diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py --- a/pypy/interpreter/astcompiler/codegen.py +++ b/pypy/interpreter/astcompiler/codegen.py @@ -244,7 +244,7 @@ self.emit_op_arg(op, self.add_name(container, identifier)) def possible_docstring(self, node): - if isinstance(node, ast.Expr): + if isinstance(node, ast.Expr) and self.compile_info.optimize < 2: expr_value = node.value if isinstance(expr_value, ast.Str): return expr_value diff --git a/pypy/module/__builtin__/test/test_compile.py b/pypy/module/__builtin__/test/test_compile.py --- a/pypy/module/__builtin__/test/test_compile.py +++ b/pypy/module/__builtin__/test/test_compile.py @@ -1,8 +1,7 @@ class AppTestCompile: - # TODO: This test still fails for now because the docstrings are not - # removed with optimize=2. - def untest_compile(self): + def test_compile(self): + """Clone of the part of the original test that was failing.""" import ast codestr = '''def f(): @@ -37,7 +36,7 @@ assert rv == (debugval, docstring) def test_assert_remove(self): - """Test just removal of the asserts with optimize=1.""" + """Test removal of the asserts with optimize=1.""" import ast code = """def f(): @@ -50,9 +49,41 @@ exec(compiled, ns) ns['f']() + def test_docstring_remove(self): + """Test removal of docstrings with optimize=2.""" + import ast + import marshal -# TODO: Remove docstrings with optimize=2. + code = """ +'module_doc' + +def f(): + 'func_doc' + +class C: + 'class_doc' +""" + tree = ast.parse(code) + for to_compile in [code, tree]: + compiled = compile(to_compile, "", "exec", optimize=2) + + # check that the docstrings are really gone + marshalled = str(marshal.dumps(compiled)) + assert 'module_doc' not in marshalled + assert 'func_doc' not in marshalled + assert 'class_doc' not in marshalled + + # try to execute the bytecode and see what we get + ns = {} + exec(compiled, ns) + assert '__doc__' not in ns + assert ns['f'].__doc__ is None + assert ns['C'].__doc__ is None + + # TODO: Check the value of __debug__ inside of the compiled block! # According to the documentation, it should follow the optimize flag. +# However, cpython3.3 behaves the same way as PyPy (__debug__ follows +# -O, -OO flags of the interpreter). # TODO: It would also be good to test that with the assert is not removed and # is executed when -O flag is set but optimize=0. From noreply at buildbot.pypy.org Sun Jul 27 14:01:49 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sun, 27 Jul 2014 14:01:49 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs: Update documentions TODO. Message-ID: <20140727120149.A740A1C024A@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: improve-docs Changeset: r72565:fe580312d6dd Date: 2014-07-26 18:16 +0200 http://bitbucket.org/pypy/pypy/changeset/fe580312d6dd/ Log: Update documentions TODO. diff --git a/TODO-docs b/TODO-docs --- a/TODO-docs +++ b/TODO-docs @@ -4,18 +4,19 @@ General ------- -* structure documentation and add appropriate toctrees -* integrate numerous getting started documents into something more useful - (eg. "Installing PyPy", "Building PyPy from source", "Playing with the - RPython Toolchain", "Write your own interpreter in RPython") +* structure documentation and add appropriate toctrees (mostly done) * architecture documents don't really show the separation between PyPy and RPython + * architecture.rst is duplicate (both pypy and rpython) * where should the documentation about coding style etc. be put? + * we don't really document coding style + * w_* convention? Cleanup ~~~~~~~ -* remove documentation for removed features +* remove documentation on removed features + * various object spaces * update / remove dead links @@ -29,11 +30,6 @@ RPython ------- -* remove duplication between translation.rst and rtyper.rst. -* rename / move rpython.rst? - - -PyPy ----- - -* divide user documentation, developer documentation, and academical stuff +* make translation.rst a high-level overview and move details in their own + documents +* redo various outdated pictures in translation.rst From noreply at buildbot.pypy.org Sun Jul 27 14:01:50 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sun, 27 Jul 2014 14:01:50 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs: Remove redundant paragraphs that were both in translation.rst and rtyper.rst. Message-ID: <20140727120150.DB3CD1C024A@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: improve-docs Changeset: r72566:ed4bb506fa58 Date: 2014-07-27 13:04 +0200 http://bitbucket.org/pypy/pypy/changeset/ed4bb506fa58/ Log: Remove redundant paragraphs that were both in translation.rst and rtyper.rst. diff --git a/rpython/doc/rtyper.rst b/rpython/doc/rtyper.rst --- a/rpython/doc/rtyper.rst +++ b/rpython/doc/rtyper.rst @@ -1,3 +1,5 @@ +.. _rtyper: + The RPython Typer ================= @@ -10,25 +12,26 @@ Overview -------- -The RPython Typer is the bridge between the :ref:`Annotator ` and the low-level code -generators. The annotations of the :ref:`Annotator ` are high-level, in the sense -that they describe RPython types like lists or instances of user-defined -classes. In general, though, to emit code we need to represent these -high-level annotations in the low-level model of the target language; for C, -this means structures and pointers and arrays. The Typer both determines the -appropriate low-level type for each annotation, and tries to replace *all* -operations in the control flow graphs with one or a few low-level operations. -Just like low-level types, there is only a fairly restricted set of low-level -operations, along the lines of reading or writing from or to a field of a -structure. +The RPython Typer is the bridge between the :ref:`Annotator ` and +the code generators. The annotations of the :ref:`Annotator ` are +high-level, in the sense that they describe RPython types like lists or +instances of user-defined classes. -In theory, this step is optional; some code generators might be able to read -the high-level types directly. However, we expect that case to be the -exception. "Compiling" high-level types into low-level ones is rather more -messy than one would expect. This was the motivation for making this step -explicit and isolated in a single place. After Typing, the graphs can only -contain very few operations, which makes the job of the code generators much -simpler. +To emit code we need to represent these high-level annotations in the low-level +model of the target language; for C, this means structures and pointers and +arrays. The Typer both determines the appropriate low-level type for each +annotation and replaces each high-level operation in the control flow graphs +with one or a few low-level operations. Just like low-level types, there is +only a fairly restricted set of low-level operations, along the lines of +reading or writing from or to a field of a structure. + +In theory, this step is optional; a code generator might be able to read the +high-level types directly. Our experience, however, suggests that this is very +unlikely to be practical. "Compiling" high-level types into low-level ones is +rather more messy than one would expect. This was the motivation for making +this step explicit and isolated in a single place. After RTyping, the graphs +only contain operations that already live on the level of the target language, +making the job of the code generators much simpler. Example: Integer operations diff --git a/rpython/doc/translation.rst b/rpython/doc/translation.rst --- a/rpython/doc/translation.rst +++ b/rpython/doc/translation.rst @@ -435,67 +435,10 @@ parent class. -.. _rpython-typer: - The RPython Typer ----------------- -:source:`rpython/rtyper/` - -The RTyper is the first place where the choice of backend makes a -difference; as outlined above we are assuming that ANSI C is the target. - -The RPython Typer is the bridge between the Annotator_ and the code -generator. The information computed by the annotator is high-level, in -the sense that it describe RPython types like lists or instances of -user-defined classes. - -To emit code we need to represent these high-level annotations in the -low-level model of the target language; for C, this means structures and -pointers and arrays. The Typer both determines the appropriate low-level type -for each annotation and replaces each high-level operation in the control flow -graphs with one or a few low-level operations. Just like low-level types, -there is only a fairly restricted set of low-level operations, along the lines -of reading or writing from or to a field of a structure. - -In theory, this step is optional; a code generator might be able to read -directly the high-level types. Our experience, however, suggests that this is -very unlikely to be practical. "Compiling" high-level types into low-level -ones is rather more messy than one would expect and this was the motivation -for making this step explicit and isolated in a single place. After RTyping, -the graphs only contain operations that already live on the level of the -target language, which makes the job of the code generators much simpler. - -For more detailed information, see the :doc:`documentation for the RTyper `. - -.. _documentation for the RTyper: rtyper.html - - -Example: Integer operations -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Integer operations are make an easy example. Assume a graph containing the -following operation:: - - v3 = add(v1, v2) - -annotated:: - - v1 -> SomeInteger() - v2 -> SomeInteger() - v3 -> SomeInteger() - -then obviously we want to type it and replace it with:: - - v3 = int_add(v1, v2) - -where -- in C notation -- all three variables v1, v2 and v3 are typed ``int``. -This is done by attaching an attribute ``concretetype`` to v1, v2 and v3 -(which might be instances of Variable or possibly Constant). In our model, -this ``concretetype`` is ``pypy.rpython.lltypesystem.lltype.Signed``. Of -course, the purpose of replacing the operation called ``add`` with -``int_add`` is that code generators no longer have to worry about what kind -of addition (or concatenation maybe?) it means. +See :doc:`rtyper`. .. _optional-transformations: From noreply at buildbot.pypy.org Sun Jul 27 14:01:52 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sun, 27 Jul 2014 14:01:52 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs: Remove mentions of stackless transformations in translation.rst and move 'backend optimizations' section up. Message-ID: <20140727120152.05FED1C024A@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: improve-docs Changeset: r72567:f2f69932b991 Date: 2014-07-27 14:01 +0200 http://bitbucket.org/pypy/pypy/changeset/f2f69932b991/ Log: Remove mentions of stackless transformations in translation.rst and move 'backend optimizations' section up. diff --git a/rpython/doc/translation.rst b/rpython/doc/translation.rst --- a/rpython/doc/translation.rst +++ b/rpython/doc/translation.rst @@ -55,17 +55,12 @@ variable can contain at run-time, building flow graphs using the :ref:`Flow Object Space ` as it encounters them. -3. :ref:`rpython-typer` (or RTyper) uses the high-level information - inferred by the Annotator to turn the operations in the control flow - graphs into low-level operations. +3. The :ref:`RPython Typer ` (or RTyper) uses the high-level + information inferred by the Annotator to turn the operations in the control + flow graphs into low-level operations. -4. After RTyping there are two, rather different, `optional - transformations ` which can be applied -- the "backend - optimizations" which are intended to make the resulting program go - faster, and the "stackless transform" which transforms the program - into a form of continuation passing style which allows the - implementation of coroutines and other forms of non-standard - control flow. +4. After the RTyper there are several optional `optimizations`_ which can be + applied and are intended to make the resulting program go faster. 5. The next step is `preparing the graphs for source generation`_, which involves computing the names that the various functions and types in @@ -441,21 +436,10 @@ See :doc:`rtyper`. -.. _optional-transformations: - -The Optional Transformations ----------------------------- - -Between RTyping and C source generation there are two optional transforms: -the "backend optimizations" and the "stackless transform". See also -`D07.1 Massive Parallelism and Translation Aspects`_ for further details. - -.. _Technical report: -.. _D07.1 Massive Parallelism and Translation Aspects: https://bitbucket.org/pypy/extradoc/raw/ee3059291497/eu-report/D07.1_Massive_Parallelism_and_Translation_Aspects-2007-02-28.pdf - +.. _optimizations: Backend Optimizations -~~~~~~~~~~~~~~~~~~~~~ +--------------------- The point of the backend optimizations are to make the compiled program run faster. Compared to many parts of the PyPy translator, which are very unlike @@ -464,7 +448,7 @@ Function Inlining -+++++++++++++++++ +~~~~~~~~~~~~~~~~~ To reduce the overhead of the many function calls that occur when running the PyPy interpreter we implemented function inlining. This is an optimization @@ -502,7 +486,7 @@ Malloc Removal -++++++++++++++ +~~~~~~~~~~~~~~ Since RPython is a garbage collected language there is a lot of heap memory allocation going on all the time, which would either not occur at all in a more @@ -542,7 +526,7 @@ Escape Analysis and Stack Allocation -++++++++++++++++++++++++++++++++++++ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Another technique to reduce the memory allocation penalty is to use stack allocation for objects that can be proved not to life longer than the stack From noreply at buildbot.pypy.org Sun Jul 27 14:12:36 2014 From: noreply at buildbot.pypy.org (Ian Foote) Date: Sun, 27 Jul 2014 14:12:36 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Fix error message for converting surrogate to int Message-ID: <20140727121236.97A1B1C03AC@cobra.cs.uni-duesseldorf.de> Author: Ian Foote Branch: py3.3 Changeset: r72568:13703de458c9 Date: 2014-07-27 14:01 +0200 http://bitbucket.org/pypy/pypy/changeset/13703de458c9/ Log: Fix error message for converting surrogate to int diff --git a/pypy/objspace/std/test/test_intobject.py b/pypy/objspace/std/test/test_intobject.py --- a/pypy/objspace/std/test/test_intobject.py +++ b/pypy/objspace/std/test/test_intobject.py @@ -418,11 +418,11 @@ return None inst = a() raises(TypeError, int, inst) - assert inst.ar == True + assert inst.ar == True class b(object): - pass - raises((AttributeError,TypeError), int, b()) + pass + raises((AttributeError,TypeError), int, b()) def test_special_long(self): class a(object): @@ -504,6 +504,11 @@ else: assert False, value + def test_int_error_msg_surrogate(self): + value = u'123\ud800' + e = raises(ValueError, int, value) + assert str(e.value) == "invalid literal for int() with base 10: %r" % value + def test_fake_int_as_base(self): class MyInt(object): def __init__(self, x): diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -1166,7 +1166,7 @@ except KeyError: pass result[i] = unichr(uchr) - return unicodehelper.encode_utf8(space, u''.join(result)) + return unicodehelper.encode_utf8(space, u''.join(result), allow_surrogates=True) _repr_function, _ = make_unicode_escape_function( From noreply at buildbot.pypy.org Sun Jul 27 14:27:24 2014 From: noreply at buildbot.pypy.org (kvas) Date: Sun, 27 Jul 2014 14:27:24 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Test cleanup. Message-ID: <20140727122724.295981C03AC@cobra.cs.uni-duesseldorf.de> Author: Vasily Kuznetsov Branch: py3.3 Changeset: r72569:9d0edadd99a6 Date: 2014-07-27 13:35 +0200 http://bitbucket.org/pypy/pypy/changeset/9d0edadd99a6/ Log: Test cleanup. diff --git a/pypy/module/__builtin__/test/test_compile.py b/pypy/module/__builtin__/test/test_compile.py --- a/pypy/module/__builtin__/test/test_compile.py +++ b/pypy/module/__builtin__/test/test_compile.py @@ -67,23 +67,23 @@ for to_compile in [code, tree]: compiled = compile(to_compile, "", "exec", optimize=2) - # check that the docstrings are really gone - marshalled = str(marshal.dumps(compiled)) - assert 'module_doc' not in marshalled - assert 'func_doc' not in marshalled - assert 'class_doc' not in marshalled - - # try to execute the bytecode and see what we get ns = {} exec(compiled, ns) assert '__doc__' not in ns assert ns['f'].__doc__ is None assert ns['C'].__doc__ is None + # Check that the docstrings are gone from the bytecode and not just + # inaccessible. + marshalled = str(marshal.dumps(compiled)) + assert 'module_doc' not in marshalled + assert 'func_doc' not in marshalled + assert 'class_doc' not in marshalled + # TODO: Check the value of __debug__ inside of the compiled block! # According to the documentation, it should follow the optimize flag. -# However, cpython3.3 behaves the same way as PyPy (__debug__ follows +# However, cpython3.5.0a0 behaves the same way as PyPy (__debug__ follows # -O, -OO flags of the interpreter). -# TODO: It would also be good to test that with the assert is not removed and -# is executed when -O flag is set but optimize=0. +# TODO: It would also be good to test that the assert is not removed and is +# executed when -O flag is set but optimize=0. From noreply at buildbot.pypy.org Sun Jul 27 14:27:25 2014 From: noreply at buildbot.pypy.org (kvas) Date: Sun, 27 Jul 2014 14:27:25 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Add a test for interaction between -O and optimize=0. Message-ID: <20140727122725.6EEB11C03AC@cobra.cs.uni-duesseldorf.de> Author: Vasily Kuznetsov Branch: py3.3 Changeset: r72570:fbd2b376181f Date: 2014-07-27 14:14 +0200 http://bitbucket.org/pypy/pypy/changeset/fbd2b376181f/ Log: Add a test for interaction between -O and optimize=0. diff --git a/pypy/module/__builtin__/test/test_compile.py b/pypy/module/__builtin__/test/test_compile.py --- a/pypy/module/__builtin__/test/test_compile.py +++ b/pypy/module/__builtin__/test/test_compile.py @@ -81,9 +81,36 @@ assert 'class_doc' not in marshalled +class TestOptimizeO: + """Test interaction of -O flag and optimize parameter of compile.""" + + def test_O_optmize_0(self): + """Test that assert is not ignored if -O flag is set but optimize=0.""" + space = self.space + space.sys.debug = False # imitate -O + + w_res = space.appexec([], """(): + assert False # check that our -O imitation hack works + try: + exec(compile('assert False', '', 'exec', optimize=0)) + except AssertionError: + return True + else: + return False + """) + assert space.unwrap(w_res) + + def test_O_optimize__1(self): + """Test that assert is ignored with -O and optimize=-1.""" + space = self.space + space.sys.debug = False # imitate -O + + space.appexec([], """(): + exec(compile('assert False', '', 'exec', optimize=-1)) + """) + + # TODO: Check the value of __debug__ inside of the compiled block! # According to the documentation, it should follow the optimize flag. # However, cpython3.5.0a0 behaves the same way as PyPy (__debug__ follows # -O, -OO flags of the interpreter). -# TODO: It would also be good to test that the assert is not removed and is -# executed when -O flag is set but optimize=0. From noreply at buildbot.pypy.org Sun Jul 27 14:27:26 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 27 Jul 2014 14:27:26 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: merge heads Message-ID: <20140727122726.B2A171C03AC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: py3.3 Changeset: r72571:58ed28228b4d Date: 2014-07-27 14:27 +0200 http://bitbucket.org/pypy/pypy/changeset/58ed28228b4d/ Log: merge heads diff --git a/pypy/module/__builtin__/test/test_compile.py b/pypy/module/__builtin__/test/test_compile.py --- a/pypy/module/__builtin__/test/test_compile.py +++ b/pypy/module/__builtin__/test/test_compile.py @@ -67,23 +67,50 @@ for to_compile in [code, tree]: compiled = compile(to_compile, "", "exec", optimize=2) - # check that the docstrings are really gone - marshalled = str(marshal.dumps(compiled)) - assert 'module_doc' not in marshalled - assert 'func_doc' not in marshalled - assert 'class_doc' not in marshalled - - # try to execute the bytecode and see what we get ns = {} exec(compiled, ns) assert '__doc__' not in ns assert ns['f'].__doc__ is None assert ns['C'].__doc__ is None + # Check that the docstrings are gone from the bytecode and not just + # inaccessible. + marshalled = str(marshal.dumps(compiled)) + assert 'module_doc' not in marshalled + assert 'func_doc' not in marshalled + assert 'class_doc' not in marshalled + + +class TestOptimizeO: + """Test interaction of -O flag and optimize parameter of compile.""" + + def test_O_optmize_0(self): + """Test that assert is not ignored if -O flag is set but optimize=0.""" + space = self.space + space.sys.debug = False # imitate -O + + w_res = space.appexec([], """(): + assert False # check that our -O imitation hack works + try: + exec(compile('assert False', '', 'exec', optimize=0)) + except AssertionError: + return True + else: + return False + """) + assert space.unwrap(w_res) + + def test_O_optimize__1(self): + """Test that assert is ignored with -O and optimize=-1.""" + space = self.space + space.sys.debug = False # imitate -O + + space.appexec([], """(): + exec(compile('assert False', '', 'exec', optimize=-1)) + """) + # TODO: Check the value of __debug__ inside of the compiled block! # According to the documentation, it should follow the optimize flag. -# However, cpython3.3 behaves the same way as PyPy (__debug__ follows +# However, cpython3.5.0a0 behaves the same way as PyPy (__debug__ follows # -O, -OO flags of the interpreter). -# TODO: It would also be good to test that with the assert is not removed and -# is executed when -O flag is set but optimize=0. From noreply at buildbot.pypy.org Sun Jul 27 18:03:08 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 27 Jul 2014 18:03:08 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Apply all PyPy specific tweaks to the test suite that were removed with the merge of stdlib 3.3. Message-ID: <20140727160308.D54C91C024A@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r72572:d9b7e7669d2b Date: 2014-07-27 18:02 +0200 http://bitbucket.org/pypy/pypy/changeset/d9b7e7669d2b/ Log: Apply all PyPy specific tweaks to the test suite that were removed with the merge of stdlib 3.3. diff --git a/lib-python/3/test/test_audioop.py b/lib-python/3/test/test_audioop.py --- a/lib-python/3/test/test_audioop.py +++ b/lib-python/3/test/test_audioop.py @@ -1,6 +1,7 @@ import audioop import sys import unittest +from test.support import run_unittest, impl_detail def pack(width, data): return b''.join(v.to_bytes(width, sys.byteorder, signed=True) for v in data) @@ -170,6 +171,7 @@ self.assertEqual(audioop.lin2lin(datas[4], 4, 2), packs[2](0, 0x1234, 0x4567, -0x4568, 0x7fff, -0x8000, -1)) + @impl_detail(pypy=False) def test_adpcm2lin(self): self.assertEqual(audioop.adpcm2lin(b'\x07\x7f\x7f', 1, None), (b'\x00\x00\x00\xff\x00\xff', (-179, 40))) @@ -184,6 +186,7 @@ self.assertEqual(audioop.adpcm2lin(b'\0' * 5, w, None), (b'\0' * w * 10, (0, 0))) + @impl_detail(pypy=False) def test_lin2adpcm(self): self.assertEqual(audioop.lin2adpcm(datas[1], 1, None), (b'\x07\x7f\x7f', (-221, 39))) @@ -197,6 +200,7 @@ self.assertEqual(audioop.lin2adpcm(b'\0' * w * 10, w, None), (b'\0' * 5, (0, 0))) + @impl_detail(pypy=False) def test_lin2alaw(self): self.assertEqual(audioop.lin2alaw(datas[1], 1), b'\xd5\x87\xa4\x24\xaa\x2a\x5a') @@ -205,6 +209,7 @@ self.assertEqual(audioop.lin2alaw(datas[4], 4), b'\xd5\x87\xa4\x24\xaa\x2a\x55') + @impl_detail(pypy=False) def test_alaw2lin(self): encoded = b'\x00\x03\x24\x2a\x51\x54\x55\x58\x6b\x71\x7f'\ b'\x80\x83\xa4\xaa\xd1\xd4\xd5\xd8\xeb\xf1\xff' @@ -219,6 +224,7 @@ decoded = audioop.alaw2lin(encoded, w) self.assertEqual(audioop.lin2alaw(decoded, w), encoded) + @impl_detail(pypy=False) def test_lin2ulaw(self): self.assertEqual(audioop.lin2ulaw(datas[1], 1), b'\xff\xad\x8e\x0e\x80\x00\x67') @@ -227,6 +233,7 @@ self.assertEqual(audioop.lin2ulaw(datas[4], 4), b'\xff\xad\x8e\x0e\x80\x00\x7e') + @impl_detail(pypy=False) def test_ulaw2lin(self): encoded = b'\x00\x0e\x28\x3f\x57\x6a\x76\x7c\x7e\x7f'\ b'\x80\x8e\xa8\xbf\xd7\xea\xf6\xfc\xfe\xff' @@ -341,6 +348,7 @@ self.assertRaises(audioop.error, audioop.findmax, bytes(range(256)), -2392392) + @impl_detail(pypy=False) def test_issue7673(self): state = None for data, size in INVALID_DATA: @@ -365,6 +373,7 @@ self.assertRaises(audioop.error, audioop.lin2alaw, data, size) self.assertRaises(audioop.error, audioop.lin2adpcm, data, size, state) + @impl_detail(pypy=False) def test_wrongsize(self): data = b'abcdefgh' state = None diff --git a/lib-python/3/test/test_capi.py b/lib-python/3/test/test_capi.py --- a/lib-python/3/test/test_capi.py +++ b/lib-python/3/test/test_capi.py @@ -110,6 +110,8 @@ self.assertRaises(TypeError, _posixsubprocess.fork_exec, Z(),[b'1'],3,[1, 2],5,6,7,8,9,10,11,12,13,14,15,16,17) + at unittest.skipIf(support.check_impl_detail(pypy=True), + 'Py_AddPendingCall not currently supported.') @unittest.skipUnless(threading, 'Threading required for this test.') class TestPendingCalls(unittest.TestCase): @@ -327,6 +329,8 @@ self.assertRaises(ValueError, _testcapi.parse_tuple_and_keywords, (), {}, b'', [42]) + at unittest.skipIf(support.check_impl_detail(pypy=True), + 'Not currently supported under PyPy') @unittest.skipUnless(threading, 'Threading required for this test.') class TestThreadState(unittest.TestCase): diff --git a/lib-python/3/test/test_descr.py b/lib-python/3/test/test_descr.py --- a/lib-python/3/test/test_descr.py +++ b/lib-python/3/test/test_descr.py @@ -1196,7 +1196,7 @@ self.assertEqual(Counted.counter, 0) # Test lookup leaks [SF bug 572567] - if hasattr(gc, 'get_objects'): + if hasattr(gc, 'get_objects') and support.check_impl_detail(pypy=False): class G(object): def __eq__(self, other): return False @@ -3035,15 +3035,24 @@ class R(J): __slots__ = ["__dict__", "__weakref__"] - for cls, cls2 in ((G, H), (G, I), (I, H), (Q, R), (R, Q)): + if support.check_impl_detail(pypy=False): + lst = ((G, H), (G, I), (I, H), (Q, R), (R, Q)) + else: + # Not supported in pypy: changing the __class__ of an object + # to another __class__ that just happens to have the same slots. + # If needed, we can add the feature, but what we'll likely do + # then is to allow mostly any __class__ assignment, even if the + # classes have different __slots__, because we it's easier. + lst = ((Q, R), (R, Q)) + for cls, cls2 in lst: x = cls() x.a = 1 x.__class__ = cls2 - self.assertIs(x.__class__, cls2, + self.assertTrue(x.__class__ is cls2, "assigning %r as __class__ for %r silently failed" % (cls2, x)) self.assertEqual(x.a, 1) x.__class__ = cls - self.assertIs(x.__class__, cls, + self.assertTrue(x.__class__ is cls, "assigning %r as __class__ for %r silently failed" % (cls, x)) self.assertEqual(x.a, 1) for cls in G, J, K, L, M, N, P, R, list, Int: @@ -3055,7 +3064,8 @@ # Issue5283: when __class__ changes in __del__, the wrong # type gets DECREF'd. class O(object): - pass + def __del__(self): + pass class A(object): def __del__(self): self.__class__ = O @@ -3118,7 +3128,8 @@ except TypeError: pass else: - self.fail("%r's __dict__ can be modified" % cls) + if support.check_impl_detail(pypy=False): + self.fail("%r's __dict__ can be modified" % cls) # Modules also disallow __dict__ assignment class Module1(types.ModuleType, Base): diff --git a/lib-python/3/test/test_exceptions.py b/lib-python/3/test/test_exceptions.py --- a/lib-python/3/test/test_exceptions.py +++ b/lib-python/3/test/test_exceptions.py @@ -512,6 +512,7 @@ except MyException as e: pass obj = None + gc_collect() obj = wr() self.assertTrue(obj is None, "%s" % obj) @@ -523,6 +524,7 @@ except MyException: pass obj = None + gc_collect() obj = wr() self.assertTrue(obj is None, "%s" % obj) @@ -534,6 +536,7 @@ except: pass obj = None + gc_collect() obj = wr() self.assertTrue(obj is None, "%s" % obj) @@ -546,6 +549,7 @@ except: break obj = None + gc_collect() # XXX it seems it's not enough obj = wr() self.assertTrue(obj is None, "%s" % obj) @@ -564,6 +568,7 @@ # must clear the latter manually for our test to succeed. e.__context__ = None obj = None + gc_collect() obj = wr() # guarantee no ref cycles on CPython (don't gc_collect) if check_impl_detail(cpython=False): @@ -708,6 +713,7 @@ next(g) testfunc(g) g = obj = None + gc_collect() obj = wr() self.assertIs(obj, None) @@ -761,6 +767,7 @@ raise Exception(MyObject()) except: pass + gc_collect() self.assertEqual(e, (None, None, None)) def testUnicodeChangeAttributes(self): @@ -911,6 +918,7 @@ self.assertNotEqual(wr(), None) else: self.fail("MemoryError not raised") + gc_collect() self.assertEqual(wr(), None) @no_tracing @@ -931,6 +939,7 @@ self.assertNotEqual(wr(), None) else: self.fail("RuntimeError not raised") + gc_collect() self.assertEqual(wr(), None) def test_errno_ENOTDIR(self): diff --git a/lib-python/3/test/test_fileio.py b/lib-python/3/test/test_fileio.py --- a/lib-python/3/test/test_fileio.py +++ b/lib-python/3/test/test_fileio.py @@ -10,6 +10,7 @@ from functools import wraps from test.support import TESTFN, check_warnings, run_unittest, make_bad_fd, cpython_only +from test.support import gc_collect from collections import UserList from _io import FileIO as _FileIO @@ -32,6 +33,7 @@ self.assertEqual(self.f.tell(), p.tell()) self.f.close() self.f = None + gc_collect() self.assertRaises(ReferenceError, getattr, p, 'tell') def testSeekTell(self): diff --git a/lib-python/3/test/test_functools.py b/lib-python/3/test/test_functools.py --- a/lib-python/3/test/test_functools.py +++ b/lib-python/3/test/test_functools.py @@ -45,6 +45,8 @@ self.assertEqual(p.args, (1, 2)) self.assertEqual(p.keywords, dict(a=10, b=20)) # attributes should not be writable + if not support.check_impl_detail(): + return self.assertRaises(AttributeError, setattr, p, 'func', map) self.assertRaises(AttributeError, setattr, p, 'args', (1, 2)) self.assertRaises(AttributeError, setattr, p, 'keywords', dict(a=1, b=2)) @@ -136,6 +138,7 @@ p = proxy(f) self.assertEqual(f.func, p.func) f = None + support.gc_collect() self.assertRaises(ReferenceError, getattr, p, 'func') def test_with_bound_and_unbound_methods(self): @@ -192,9 +195,13 @@ raise IndexError f = self.thetype(object) - self.assertRaisesRegex(SystemError, - "new style getargs format but argument is not a tuple", - f.__setstate__, BadSequence()) + if support.check_impl_detail(pypy=True): + # CPython fails, pypy does not :-) + f.__setstate__(BadSequence()) + else: + self.assertRaisesRegex(SystemError, + "new style getargs format but argument is not a tuple", + f.__setstate__, BadSequence()) class PartialSubclass(functools.partial): pass @@ -223,7 +230,7 @@ updated=functools.WRAPPER_UPDATES): # Check attributes were assigned for name in assigned: - self.assertTrue(getattr(wrapper, name) is getattr(wrapped, name)) + self.assertTrue(getattr(wrapper, name) == getattr(wrapped, name)) # Check attributes were updated for name in updated: wrapper_attr = getattr(wrapper, name) diff --git a/lib-python/3/test/test_imp.py b/lib-python/3/test/test_imp.py --- a/lib-python/3/test/test_imp.py +++ b/lib-python/3/test/test_imp.py @@ -317,6 +317,7 @@ @unittest.skipUnless(sys.implementation.cache_tag is not None, 'requires sys.implementation.cache_tag not be None') + @support.impl_detail("PyPy ignores the optimize flag", pypy=False) def test_cache_from_source(self): # Given the path to a .py file, return the path to its PEP 3147 # defined .pyc file (i.e. under __pycache__). @@ -338,6 +339,7 @@ 'file{}.pyc'.format(self.tag)) self.assertEqual(imp.cache_from_source(path, True), expect) + @support.impl_detail("PyPy ignores the optimize flag", pypy=False) def test_cache_from_source_optimized(self): # Given the path to a .py file, return the path to its PEP 3147 # defined .pyo file (i.e. under __pycache__). diff --git a/lib-python/3/test/test_int.py b/lib-python/3/test/test_int.py --- a/lib-python/3/test/test_int.py +++ b/lib-python/3/test/test_int.py @@ -307,9 +307,10 @@ try: int(TruncReturnsNonIntegral()) except TypeError as e: - self.assertEqual(str(e), - "__trunc__ returned non-Integral" - " (type NonIntegral)") + if support.check_impl_detail(pypy=False): + self.assertEqual(str(e), + "__trunc__ returned non-Integral" + " (type NonIntegral)") else: self.fail("Failed to raise TypeError with %s" % ((base, trunc_result_base),)) diff --git a/lib-python/3/test/test_marshal.py b/lib-python/3/test/test_marshal.py --- a/lib-python/3/test/test_marshal.py +++ b/lib-python/3/test/test_marshal.py @@ -203,6 +203,7 @@ s = b'c' + (b'X' * 4*4) + b'{' * 2**20 self.assertRaises(ValueError, marshal.loads, s) + @support.impl_detail('specific recursion check') def test_recursion_limit(self): # Create a deeply nested structure. head = last = [] @@ -291,6 +292,10 @@ LARGE_SIZE = 2**31 pointer_size = 8 if sys.maxsize > 0xFFFFFFFF else 4 +if support.check_impl_detail(pypy=False): + sizeof_large_size = sys.getsizeof(LARGE_SIZE-1) +else: + sizeof_large_size = 32 # Some value for PyPy class NullWriter: def write(self, s): @@ -318,13 +323,13 @@ self.check_unmarshallable([None] * size) @support.bigmemtest(size=LARGE_SIZE, - memuse=pointer_size*12 + sys.getsizeof(LARGE_SIZE-1), + memuse=pointer_size*12 + sizeof_large_size, dry_run=False) def test_set(self, size): self.check_unmarshallable(set(range(size))) @support.bigmemtest(size=LARGE_SIZE, - memuse=pointer_size*12 + sys.getsizeof(LARGE_SIZE-1), + memuse=pointer_size*12 + sizeof_large_size, dry_run=False) def test_frozenset(self, size): self.check_unmarshallable(frozenset(range(size))) diff --git a/lib-python/3/test/test_peepholer.py b/lib-python/3/test/test_peepholer.py --- a/lib-python/3/test/test_peepholer.py +++ b/lib-python/3/test/test_peepholer.py @@ -81,10 +81,13 @@ self.assertIn(elem, asm) def test_pack_unpack(self): + # On PyPy, "a, b = ..." is even more optimized, by removing + # the ROT_TWO. But the ROT_TWO is not removed if assigning + # to more complex expressions, so check that. for line, elem in ( ('a, = a,', 'LOAD_CONST',), - ('a, b = a, b', 'ROT_TWO',), - ('a, b, c = a, b, c', 'ROT_THREE',), + ('a[1], b = a, b', 'ROT_TWO',), + ('a, b[2], c = a, b, c', 'ROT_THREE',), ): asm = dis_single(line) self.assertIn(elem, asm) @@ -92,6 +95,8 @@ self.assertNotIn('UNPACK_TUPLE', asm) def test_folding_of_tuples_of_constants(self): + # On CPython, "a,b,c=1,2,3" turns into "a,b,c=" + # but on PyPy, it turns into "a=1;b=2;c=3". for line, elem in ( ('a = 1,2,3', '((1, 2, 3))'), ('("a","b","c")', "(('a', 'b', 'c'))"), @@ -100,7 +105,8 @@ ('((1, 2), 3, 4)', '(((1, 2), 3, 4))'), ): asm = dis_single(line) - self.assertIn(elem, asm) + self.assert_(elem in asm or ( + line == 'a,b,c = 1,2,3' and 'UNPACK_TUPLE' not in asm)) self.assertNotIn('BUILD_TUPLE', asm) # Long tuples should be folded too. diff --git a/lib-python/3/test/test_subprocess.py b/lib-python/3/test/test_subprocess.py --- a/lib-python/3/test/test_subprocess.py +++ b/lib-python/3/test/test_subprocess.py @@ -1314,6 +1314,7 @@ stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=raise_it) + @support.impl_detail("PyPy's _posixsubprocess doesn't have to disable gc") def test_preexec_gc_module_failure(self): # This tests the code that disables garbage collection if the child # process will execute any Python. @@ -1964,6 +1965,7 @@ ident = id(p) pid = p.pid del p + support.gc_collect() # check that p is in the active processes list self.assertIn(ident, [id(o) for o in subprocess._active]) @@ -1983,6 +1985,7 @@ ident = id(p) pid = p.pid del p + support.gc_collect() os.kill(pid, signal.SIGKILL) # check that p is in the active processes list self.assertIn(ident, [id(o) for o in subprocess._active]) diff --git a/lib-python/3/test/test_sys.py b/lib-python/3/test/test_sys.py --- a/lib-python/3/test/test_sys.py +++ b/lib-python/3/test/test_sys.py @@ -405,8 +405,10 @@ self.assertEqual(len(sys.float_info), 11) self.assertEqual(sys.float_info.radix, 2) self.assertEqual(len(sys.int_info), 2) - self.assertTrue(sys.int_info.bits_per_digit % 5 == 0) - self.assertTrue(sys.int_info.sizeof_digit >= 1) + if test.support.check_impl_detail(cpython=True): + self.assertTrue(sys.int_info.bits_per_digit % 5 == 0) + else: + self.assertTrue(sys.int_info.sizeof_digit >= 1) self.assertEqual(type(sys.int_info.bits_per_digit), int) self.assertEqual(type(sys.int_info.sizeof_digit), int) self.assertIsInstance(sys.hexversion, int) @@ -503,6 +505,7 @@ self.assertTrue(repr(sys.flags)) self.assertEqual(len(sys.flags), len(attrs)) + @test.support.impl_detail("sys._clear_type_cache", pypy=False) def test_clear_type_cache(self): sys._clear_type_cache() diff --git a/lib-python/3/test/test_weakref.py b/lib-python/3/test/test_weakref.py --- a/lib-python/3/test/test_weakref.py +++ b/lib-python/3/test/test_weakref.py @@ -8,6 +8,7 @@ import copy from test import support +from test.support import gc_collect # Used in ReferencesTestCase.test_ref_created_during_del() . ref_from_del = None @@ -88,6 +89,7 @@ ref1 = weakref.ref(o, self.callback) ref2 = weakref.ref(o, self.callback) del o + gc_collect() self.assertIsNone(ref1(), "expected reference to be invalidated") self.assertIsNone(ref2(), "expected reference to be invalidated") self.assertEqual(self.cbcalled, 2, @@ -117,13 +119,16 @@ ref1 = weakref.proxy(o, self.callback) ref2 = weakref.proxy(o, self.callback) del o + gc_collect() def check(proxy): proxy.bar self.assertRaises(ReferenceError, check, ref1) self.assertRaises(ReferenceError, check, ref2) - self.assertRaises(ReferenceError, bool, weakref.proxy(C())) + ref3 = weakref.proxy(C()) + gc_collect() + self.assertRaises(ReferenceError, bool, ref3) self.assertEqual(self.cbcalled, 2) def check_basic_ref(self, factory): @@ -140,6 +145,7 @@ o = factory() ref = weakref.ref(o, self.callback) del o + gc_collect() self.assertEqual(self.cbcalled, 1, "callback did not properly set 'cbcalled'") self.assertIsNone(ref(), @@ -164,6 +170,7 @@ self.assertEqual(weakref.getweakrefcount(o), 2, "wrong weak ref count for object") del proxy + gc_collect() self.assertEqual(weakref.getweakrefcount(o), 1, "wrong weak ref count for object after deleting proxy") @@ -338,6 +345,7 @@ "got wrong number of weak reference objects") del ref1, ref2, proxy1, proxy2 + gc_collect() self.assertEqual(weakref.getweakrefcount(o), 0, "weak reference objects not unlinked from" " referent when discarded.") @@ -351,6 +359,7 @@ ref1 = weakref.ref(o, self.callback) ref2 = weakref.ref(o, self.callback) del ref1 + gc_collect() self.assertEqual(weakref.getweakrefs(o), [ref2], "list of refs does not match") @@ -358,10 +367,12 @@ ref1 = weakref.ref(o, self.callback) ref2 = weakref.ref(o, self.callback) del ref2 + gc_collect() self.assertEqual(weakref.getweakrefs(o), [ref1], "list of refs does not match") del ref1 + gc_collect() self.assertEqual(weakref.getweakrefs(o), [], "list of refs not cleared") @@ -647,9 +658,11 @@ gc.collect() self.assertEqual(alist, []) + @support.impl_detail(pypy=False) def test_gc_during_ref_creation(self): self.check_gc_during_creation(weakref.ref) + @support.impl_detail(pypy=False) def test_gc_during_proxy_creation(self): self.check_gc_during_creation(weakref.proxy) @@ -811,6 +824,7 @@ self.assertTrue(mr.called) self.assertEqual(mr.value, 24) del o + gc_collect() self.assertIsNone(mr()) self.assertTrue(mr.called) @@ -917,6 +931,7 @@ n1 = len(dct) del it gc.collect() + gc.collect() n2 = len(dct) # one item may be kept alive inside the iterator self.assertIn(n1, (0, 1)) @@ -928,6 +943,7 @@ def test_weak_valued_len_cycles(self): self.check_len_cycles(weakref.WeakValueDictionary, lambda k: (1, k)) + @support.impl_detail(pypy=False) def check_len_race(self, dict_type, cons): # Extended sanity checks for len() in the face of cyclic collection self.addCleanup(gc.set_threshold, *gc.get_threshold()) @@ -976,15 +992,18 @@ del items1, items2 self.assertEqual(len(dict), self.COUNT) del objects[0] + gc_collect() self.assertEqual(len(dict), self.COUNT - 1, "deleting object did not cause dictionary update") del objects, o + gc_collect() self.assertEqual(len(dict), 0, "deleting the values did not clear the dictionary") # regression on SF bug #447152: dict = weakref.WeakValueDictionary() self.assertRaises(KeyError, dict.__getitem__, 1) dict[2] = C() + gc_collect() self.assertRaises(KeyError, dict.__getitem__, 2) def test_weak_keys(self): @@ -1005,9 +1024,11 @@ del items1, items2 self.assertEqual(len(dict), self.COUNT) del objects[0] + gc_collect() self.assertEqual(len(dict), (self.COUNT - 1), "deleting object did not cause dictionary update") del objects, o + gc_collect() self.assertEqual(len(dict), 0, "deleting the keys did not clear the dictionary") o = Object(42) @@ -1368,6 +1389,7 @@ for o in objs: count += 1 del d[o] + gc_collect() self.assertEqual(len(d), 0) self.assertEqual(count, 2) @@ -1389,6 +1411,7 @@ libreftest = """ Doctest for examples in the library reference: weakref.rst +>>> from test.support import gc_collect >>> import weakref >>> class Dict(dict): ... pass @@ -1408,6 +1431,7 @@ >>> o is o2 True >>> del o, o2 +>>> gc_collect() >>> print(r()) None @@ -1460,6 +1484,7 @@ >>> id2obj(a_id) is a True >>> del a +>>> gc_collect() >>> try: ... id2obj(a_id) ... except KeyError: diff --git a/lib-python/3/test/test_weakset.py b/lib-python/3/test/test_weakset.py --- a/lib-python/3/test/test_weakset.py +++ b/lib-python/3/test/test_weakset.py @@ -416,11 +416,13 @@ n1 = len(s) del it gc.collect() + gc.collect() n2 = len(s) # one item may be kept alive inside the iterator self.assertIn(n1, (0, 1)) self.assertEqual(n2, 0) + @support.impl_detail("PyPy has no cyclic collection", pypy=False) def test_len_race(self): # Extended sanity checks for len() in the face of cyclic collection self.addCleanup(gc.set_threshold, *gc.get_threshold()) From noreply at buildbot.pypy.org Sun Jul 27 20:40:03 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 27 Jul 2014 20:40:03 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: reapply another workaround from the py3k branch Message-ID: <20140727184003.A1D171C024A@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72573:37baa95f62f2 Date: 2014-07-26 14:52 -0700 http://bitbucket.org/pypy/pypy/changeset/37baa95f62f2/ Log: reapply another workaround from the py3k branch diff --git a/lib-python/3/test/test_marshal.py b/lib-python/3/test/test_marshal.py --- a/lib-python/3/test/test_marshal.py +++ b/lib-python/3/test/test_marshal.py @@ -318,13 +318,13 @@ self.check_unmarshallable([None] * size) @support.bigmemtest(size=LARGE_SIZE, - memuse=pointer_size*12 + sys.getsizeof(LARGE_SIZE-1), + memuse=pointer_size*12, # + sys.getsizeof(LARGE_SIZE-1), dry_run=False) def test_set(self, size): self.check_unmarshallable(set(range(size))) @support.bigmemtest(size=LARGE_SIZE, - memuse=pointer_size*12 + sys.getsizeof(LARGE_SIZE-1), + memuse=pointer_size*12, # + sys.getsizeof(LARGE_SIZE-1), dry_run=False) def test_frozenset(self, size): self.check_unmarshallable(frozenset(range(size))) From noreply at buildbot.pypy.org Sun Jul 27 20:40:04 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 27 Jul 2014 20:40:04 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: call this args_w per the naming conventions Message-ID: <20140727184004.DBE671C024A@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72574:d13fde936489 Date: 2014-07-27 11:35 -0700 http://bitbucket.org/pypy/pypy/changeset/d13fde936489/ Log: call this args_w per the naming conventions diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py --- a/pypy/module/__builtin__/functional.py +++ b/pypy/module/__builtin__/functional.py @@ -670,8 +670,8 @@ def descr_reduce(self, space): w_map = space.getattr(space.getbuiltinmodule('builtins'), space.wrap('map')) - args = [self.w_fun] + self.iterators_w - return space.newtuple([w_map, space.newtuple(args)]) + args_w = [self.w_fun] + self.iterators_w + return space.newtuple([w_map, space.newtuple(args_w)]) def W_Map___new__(space, w_subtype, w_fun, args_w): @@ -721,9 +721,9 @@ def descr_reduce(self, space): w_filter = space.getattr(space.getbuiltinmodule('builtins'), space.wrap('filter')) - args = [space.w_None if self.no_predicate else self.w_predicate, - self.iterable] - return space.newtuple([w_filter, space.newtuple(args)]) + args_w = [space.w_None if self.no_predicate else self.w_predicate, + self.iterable] + return space.newtuple([w_filter, space.newtuple(args_w)]) def W_Filter___new__(space, w_subtype, w_predicate, w_iterable): From noreply at buildbot.pypy.org Sun Jul 27 20:40:06 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 27 Jul 2014 20:40:06 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: pep8/use oefmt Message-ID: <20140727184006.1E2EF1C024A@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72575:6704fc912ee1 Date: 2014-07-27 11:35 -0700 http://bitbucket.org/pypy/pypy/changeset/6704fc912ee1/ Log: pep8/use oefmt diff --git a/pypy/module/operator/test/test_tscmp.py b/pypy/module/operator/test/test_tscmp.py --- a/pypy/module/operator/test/test_tscmp.py +++ b/pypy/module/operator/test/test_tscmp.py @@ -1,5 +1,4 @@ from pypy.module.operator.tscmp import pypy_tscmp -from rpython.rtyper.lltypesystem.rffi import scoped_nonmovingbuffer class TestTimingSafeCompare: def test_tscmp_neq(self): diff --git a/pypy/module/operator/tscmp.py b/pypy/module/operator/tscmp.py --- a/pypy/module/operator/tscmp.py +++ b/pypy/module/operator/tscmp.py @@ -3,9 +3,11 @@ attacks for the hmac module. """ import py + from rpython.rtyper.lltypesystem import lltype, rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo -from pypy.interpreter.error import OperationError + +from pypy.interpreter.error import OperationError, oefmt cwd = py.path.local(__file__).dirpath() eci = ExternalCompilationInfo( @@ -13,15 +15,21 @@ separate_module_files=[cwd.join('tscmp.c')], export_symbols=['pypy_tscmp']) + def llexternal(*args, **kwargs): kwargs.setdefault('compilation_info', eci) kwargs.setdefault('sandboxsafe', True) return rffi.llexternal(*args, **kwargs) -pypy_tscmp = llexternal('pypy_tscmp', [rffi.CCHARP, rffi.CCHARP, rffi.LONG, rffi.LONG], rffi.INT) + +pypy_tscmp = llexternal('pypy_tscmp', + [rffi.CCHARP, rffi.CCHARP, rffi.LONG, rffi.LONG], + rffi.INT) + def compare_digest(space, w_a, w_b): - if space.isinstance_w(w_a, space.w_unicode) and space.isinstance_w(w_b, space.w_unicode): + if (space.isinstance_w(w_a, space.w_unicode) and + space.isinstance_w(w_b, space.w_unicode)): try: a_value = space.call_method(w_a, "encode", space.wrap("ascii")) b_value = space.call_method(w_b, "encode", space.wrap("ascii")) @@ -29,14 +37,17 @@ except OperationError as e: if not e.match(space, space.w_UnicodeEncodeError): raise - raise OperationError(space.w_TypeError, - space.wrap("comparing strings with non-ASCII characters is not supported")) + raise oefmt(space.w_TypeError, + "comparing strings with non-ASCII characters is not " + "supported") else: return compare_digest_buffer(space, w_a, w_b) + def compare_digest_buffer(space, w_a, w_b): a = space.bufferstr_w(w_a) b = space.bufferstr_w(w_b) with rffi.scoped_nonmovingbuffer(a) as a_buffer: with rffi.scoped_nonmovingbuffer(b) as b_buffer: - return space.wrap(rffi.cast(lltype.Bool, pypy_tscmp(a_buffer, b_buffer, len(a), len(b)))) + result = pypy_tscmp(a_buffer, b_buffer, len(a), len(b)) + return space.wrap(rffi.cast(lltype.Bool, result)) From noreply at buildbot.pypy.org Sun Jul 27 20:40:07 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 27 Jul 2014 20:40:07 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: simplify Message-ID: <20140727184007.56B5C1C024A@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72576:504023330351 Date: 2014-07-27 11:36 -0700 http://bitbucket.org/pypy/pypy/changeset/504023330351/ Log: simplify diff --git a/pypy/module/operator/tscmp.py b/pypy/module/operator/tscmp.py --- a/pypy/module/operator/tscmp.py +++ b/pypy/module/operator/tscmp.py @@ -31,17 +31,15 @@ if (space.isinstance_w(w_a, space.w_unicode) and space.isinstance_w(w_b, space.w_unicode)): try: - a_value = space.call_method(w_a, "encode", space.wrap("ascii")) - b_value = space.call_method(w_b, "encode", space.wrap("ascii")) - return compare_digest_buffer(space, a_value, b_value) + w_a = space.call_method(w_a, 'encode', space.wrap('ascii')) + w_b = space.call_method(w_b, 'encode', space.wrap('ascii')) except OperationError as e: if not e.match(space, space.w_UnicodeEncodeError): raise raise oefmt(space.w_TypeError, "comparing strings with non-ASCII characters is not " "supported") - else: - return compare_digest_buffer(space, w_a, w_b) + return compare_digest_buffer(space, w_a, w_b) def compare_digest_buffer(space, w_a, w_b): From noreply at buildbot.pypy.org Sun Jul 27 20:40:08 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 27 Jul 2014 20:40:08 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: merge upstream Message-ID: <20140727184008.8B8A71C024A@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72577:36addf5a5e3d Date: 2014-07-27 11:39 -0700 http://bitbucket.org/pypy/pypy/changeset/36addf5a5e3d/ Log: merge upstream From noreply at buildbot.pypy.org Sun Jul 27 22:04:45 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 27 Jul 2014 22:04:45 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: fix translation: uid is based on id (not rpython), we need to use getaddrstring Message-ID: <20140727200445.9B8DD1C03AC@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72578:6f49b270e66a Date: 2014-07-27 13:03 -0700 http://bitbucket.org/pypy/pypy/changeset/6f49b270e66a/ Log: fix translation: uid is based on id (not rpython), we need to use getaddrstring instead diff --git a/pypy/interpreter/nestedscope.py b/pypy/interpreter/nestedscope.py --- a/pypy/interpreter/nestedscope.py +++ b/pypy/interpreter/nestedscope.py @@ -75,8 +75,9 @@ if self.w_value is None: content = "empty" else: - content = "%s object at 0x%x" % (space.type(self.w_value).name, uid(self.w_value)) - s = "" % (uid(self), content) + content = "%s object at 0x%s" % (space.type(self.w_value).name, + self.w_value.getaddrstring(space)) + s = "" % (self.getaddrstring(space), content) return space.wrap(s.decode('utf-8')) def descr__cell_contents(self, space): From noreply at buildbot.pypy.org Sun Jul 27 22:12:42 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sun, 27 Jul 2014 22:12:42 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs: Replace outdated figure. Message-ID: <20140727201242.E1BAD1C024A@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: improve-docs Changeset: r72579:a14b0250404e Date: 2014-07-27 22:12 +0200 http://bitbucket.org/pypy/pypy/changeset/a14b0250404e/ Log: Replace outdated figure. diff --git a/rpython/doc/translation.rst b/rpython/doc/translation.rst --- a/rpython/doc/translation.rst +++ b/rpython/doc/translation.rst @@ -635,11 +635,35 @@ As should be clear by now, the translation toolchain of PyPy is a flexible and complicated beast, formed from many separate components. -The following image summarizes the various parts of the toolchain as of the -0.9 release, with the default translation to C highlighted: +.. digraph:: translation -.. image:: _static/pypy-translation-0.9.png - :align: center + graph [fontname = "Sans-Serif", size="6.00"] + node [fontname = "Sans-Serif"] + edge [fontname = "Sans-Serif"] + + subgraph legend { + "Input or Output" [shape=ellipse, style=filled] + "Transformation Step" [shape=box, style="rounded,filled"] + // Invisible egde to make sure they are placed vertically + "Input or Output" -> "Transformation Step" [style=invis] + } + + "Input Program" [shape=ellipse] + "Flow Analysis" [shape=box, style=rounded] + "Annotator" [shape=box, style=rounded] + "RTyper" [shape=box, style=rounded] + "Backend Optimizations (optional)" [shape=box, style=rounded] + "Exception Transformer" [shape=box, style=rounded] + "GC Transformer" [shape=box, style=rounded] + "GenC" [shape=box, style=rounded] + "ANSI C code" [shape=ellipse] + + "Input Program" -> "Flow Analysis" -> "Annotator" -> "RTyper" + -> "Backend Optimizations (optional)" -> "Exception Transformer" + -> "GC Transformer" + "RTyper" -> "Exception Transformer" [style=dotted] + "GC Transformer" -> "GenC" -> "ANSI C code" + // "GC Transformer" -> "GenLLVM" -> "LLVM IR" A detail that has not yet been emphasized is the interaction of the various components. It makes for a nice presentation to say that From noreply at buildbot.pypy.org Sun Jul 27 23:07:26 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 27 Jul 2014 23:07:26 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: hopefully fix translation (tscmp.c:5:10: fatal error: 'tscmp.h' file not found) Message-ID: <20140727210726.DD2C81C0547@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72580:871e353f33fc Date: 2014-07-27 14:06 -0700 http://bitbucket.org/pypy/pypy/changeset/871e353f33fc/ Log: hopefully fix translation (tscmp.c:5:10: fatal error: 'tscmp.h' file not found) diff --git a/pypy/module/operator/tscmp.py b/pypy/module/operator/tscmp.py --- a/pypy/module/operator/tscmp.py +++ b/pypy/module/operator/tscmp.py @@ -12,6 +12,7 @@ cwd = py.path.local(__file__).dirpath() eci = ExternalCompilationInfo( includes=[cwd.join('tscmp.h')], + include_dirs=[str(cwd)], separate_module_files=[cwd.join('tscmp.c')], export_symbols=['pypy_tscmp']) From noreply at buildbot.pypy.org Sun Jul 27 23:12:31 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sun, 27 Jul 2014 23:12:31 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs: Remove two sections: Message-ID: <20140727211231.58D461C024A@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: improve-docs Changeset: r72581:8dcbeb5dabed Date: 2014-07-27 23:11 +0200 http://bitbucket.org/pypy/pypy/changeset/8dcbeb5dabed/ Log: Remove two sections: 1. The section about other backends. Currently there's only the C backend. 2. The section about external function calls doesn't make sense here. diff --git a/rpython/doc/translation.rst b/rpython/doc/translation.rst --- a/rpython/doc/translation.rst +++ b/rpython/doc/translation.rst @@ -597,9 +597,7 @@ :source:`rpython/translator/c/` -GenC is usually the most actively maintained backend -- everyone working on -PyPy has a C compiler, for one thing -- and is usually where new features are -implemented first. +This is currently the sole code generation backend. A Historical Note @@ -615,20 +613,6 @@ separately has become clear. -Other backends --------------- - -Use the :config:`translation.backend` option to choose which backend to use. - - -.. _extfunccalls: - -External Function Calls ------------------------ - -The external function call approach is described in :doc:`rffi ` documentation. - - How It Fits Together -------------------- From noreply at buildbot.pypy.org Sun Jul 27 23:41:19 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sun, 27 Jul 2014 23:41:19 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs: Move two paragraphs from architecture.rst to index.rst, which now are used as introductory words (with some modifications). Message-ID: <20140727214119.BE3CA1C03AC@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: improve-docs Changeset: r72582:48825b4cc915 Date: 2014-07-27 23:40 +0200 http://bitbucket.org/pypy/pypy/changeset/48825b4cc915/ Log: Move two paragraphs from architecture.rst to index.rst, which now are used as introductory words (with some modifications). diff --git a/rpython/doc/architecture.rst b/rpython/doc/architecture.rst --- a/rpython/doc/architecture.rst +++ b/rpython/doc/architecture.rst @@ -1,26 +1,6 @@ Goals and Architecture Overview =============================== -.. contents:: - -Mission statement ------------------ - -We aim to provide a common translation and support framework for producing -implementations of dynamic languages, emphasizing a clean separation between -language specification and implementation aspects. We call this the -:doc:`RPython toolchain `. - -By separating concerns in this way, our implementation -of Python - and other dynamic languages - is able to automatically -generate a Just-in-Time compiler for any dynamic language. It also -allows a mix-and-match approach to implementation decisions, including -many that have historically been outside of a user's control, such as -target platform, memory and -threading models, garbage collection strategies, and optimizations applied, -including whether or not to have a JIT in the first place. - - High Level Goals ---------------- diff --git a/rpython/doc/index.rst b/rpython/doc/index.rst --- a/rpython/doc/index.rst +++ b/rpython/doc/index.rst @@ -1,6 +1,19 @@ Welcome to RPython's documentation! =================================== +RPython is a translation and support framework for producing implementations of +dynamic languages, emphasizing a clean separation between language +specification and implementation aspects. + +By separating concerns in this way, our implementation of Python - and other +dynamic languages - is able to automatically generate a Just-in-Time compiler +for any dynamic language. It also allows a mix-and-match approach to +implementation decisions, including many that have historically been outside of +a user's control, such as target platform, memory and threading models, garbage +collection strategies, and optimizations applied, including whether or not to +have a JIT in the first place. + + Table of Contents ----------------- From noreply at buildbot.pypy.org Sun Jul 27 23:57:25 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 27 Jul 2014 23:57:25 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: allow generators to return values (part of pep380) Message-ID: <20140727215725.EE28E1C0547@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72583:8a51205e1709 Date: 2014-07-27 14:06 -0700 http://bitbucket.org/pypy/pypy/changeset/8a51205e1709/ Log: allow generators to return values (part of pep380) diff --git a/pypy/interpreter/astcompiler/symtable.py b/pypy/interpreter/astcompiler/symtable.py --- a/pypy/interpreter/astcompiler/symtable.py +++ b/pypy/interpreter/astcompiler/symtable.py @@ -244,18 +244,12 @@ return Scope.note_symbol(self, identifier, role) def note_yield(self, yield_node): - if self.return_with_value: - raise SyntaxError("'return' with argument inside generator", - self.ret.lineno, self.ret.col_offset) self.is_generator = True if self._in_try_body_depth > 0: self.has_yield_inside_try = True def note_return(self, ret): if ret.value: - if self.is_generator: - raise SyntaxError("'return' with argument inside generator", - ret.lineno, ret.col_offset) self.return_with_value = True self.ret = ret diff --git a/pypy/interpreter/astcompiler/test/test_symtable.py b/pypy/interpreter/astcompiler/test/test_symtable.py --- a/pypy/interpreter/astcompiler/test/test_symtable.py +++ b/pypy/interpreter/astcompiler/test/test_symtable.py @@ -361,8 +361,7 @@ assert exc.msg == "'yield' outside function" for input in ("yield\n return x", "return x\n yield"): input = "def f():\n " + input - exc = py.test.raises(SyntaxError, self.func_scope, input).value - assert exc.msg == "'return' with argument inside generator" + scp = self.func_scope(input) scp = self.func_scope("def f():\n return\n yield x") def test_yield_inside_try(self): diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -99,7 +99,7 @@ # if the frame is now marked as finished, it was RETURNed from if frame.frame_finished_execution: self.frame = None - raise OperationError(space.w_StopIteration, space.w_None) + raise OperationError(space.w_StopIteration, w_result) else: return w_result # YIELDed finally: diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/test_generator.py --- a/pypy/interpreter/test/test_generator.py +++ b/pypy/interpreter/test/test_generator.py @@ -301,6 +301,21 @@ raise StopIteration assert tuple(f()) == (1,) + def test_yield_return(self): + """ + def f(): + yield 1 + return 2 + g = f() + assert next(g) == 1 + try: + next(g) + except StopIteration as e: + assert e.value == 2 + else: + assert False, 'Expected StopIteration' + """ + def test_should_not_inline(space): from pypy.interpreter.generator import should_not_inline From noreply at buildbot.pypy.org Mon Jul 28 00:45:10 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 28 Jul 2014 00:45:10 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: fill in _STRUCT_TM_TIMES Message-ID: <20140727224510.C292D1C0547@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72584:ca139d96a0fa Date: 2014-07-27 15:44 -0700 http://bitbucket.org/pypy/pypy/changeset/ca139d96a0fa/ Log: fill in _STRUCT_TM_TIMES diff --git a/pypy/module/rctime/__init__.py b/pypy/module/rctime/__init__.py --- a/pypy/module/rctime/__init__.py +++ b/pypy/module/rctime/__init__.py @@ -17,6 +17,7 @@ 'mktime': 'interp_time.mktime', 'strftime': 'interp_time.strftime', 'sleep' : 'interp_time.sleep', + '_STRUCT_TM_ITEMS': 'space.wrap(interp_time._STRUCT_TM_ITEMS)', } if os.name == "posix": diff --git a/pypy/module/rctime/interp_time.py b/pypy/module/rctime/interp_time.py --- a/pypy/module/rctime/interp_time.py +++ b/pypy/module/rctime/interp_time.py @@ -134,6 +134,9 @@ ("tm_mon", rffi.INT), ("tm_year", rffi.INT), ("tm_wday", rffi.INT), ("tm_yday", rffi.INT), ("tm_isdst", rffi.INT)]) +# XXX: optionally support the 2 additional tz fields +_STRUCT_TM_ITEMS = 9 + class cConfig: pass diff --git a/pypy/module/rctime/test/test_rctime.py b/pypy/module/rctime/test/test_rctime.py --- a/pypy/module/rctime/test/test_rctime.py +++ b/pypy/module/rctime/test/test_rctime.py @@ -11,6 +11,7 @@ assert isinstance(rctime.timezone, int) assert isinstance(rctime.tzname, tuple) assert isinstance(rctime.__doc__, str) + assert isinstance(rctime._STRUCT_TM_ITEMS, int) def test_sleep(self): import time as rctime From noreply at buildbot.pypy.org Mon Jul 28 01:50:10 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 28 Jul 2014 01:50:10 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: avoid deprecated imp.get_suffixes Message-ID: <20140727235010.1F2F81C0547@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72585:f27c9e5a772b Date: 2014-07-27 16:48 -0700 http://bitbucket.org/pypy/pypy/changeset/f27c9e5a772b/ Log: avoid deprecated imp.get_suffixes diff --git a/lib-python/3/distutils/command/build_ext.py b/lib-python/3/distutils/command/build_ext.py --- a/lib-python/3/distutils/command/build_ext.py +++ b/lib-python/3/distutils/command/build_ext.py @@ -4,7 +4,8 @@ modules (currently limited to C extensions, should accommodate C++ extensions ASAP).""" -import sys, os, re, imp +import sys, os, re +import importlib.machinery from distutils.core import Command from distutils.errors import * from distutils.sysconfig import customize_compiler, get_python_version @@ -36,9 +37,8 @@ show_compilers() def _get_c_extension_suffix(): - for ext, mod, typ in imp.get_suffixes(): - if typ == imp.C_EXTENSION: - return ext + suffixes = importlib.machinery.EXTENSION_SUFFIXES + return suffixes[0] if suffixes else None class build_ext(Command): diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_pypy_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -1,5 +1,6 @@ import os, sys, imp import tempfile, binascii +import importlib.machinery def get_hashed_dir(cfile): @@ -28,9 +29,8 @@ def _get_c_extension_suffix(): - for ext, mod, typ in imp.get_suffixes(): - if typ == imp.C_EXTENSION: - return ext + suffixes = importlib.machinery.EXTENSION_SUFFIXES + return suffixes[0] if suffixes else None def compile_shared(csource, modulename, output_dir=None): diff --git a/lib_pypy/cffi/verifier.py b/lib_pypy/cffi/verifier.py --- a/lib_pypy/cffi/verifier.py +++ b/lib_pypy/cffi/verifier.py @@ -1,7 +1,17 @@ -import sys, os, binascii, imp, shutil +import sys, os, binascii, shutil from . import __version__ from . import ffiplatform +if sys.version_info >= (3, 3): + import importlib.machinery + def extension_suffixes(): + return importlib.machinery.EXTENSION_SUFFIXES[:] +else: + import imp + def extension_suffixes(): + return [suffix for suffix, _, type in imp.get_suffixes() + if type == imp.C_EXTENSION] + class Verifier(object): @@ -222,11 +232,7 @@ pass def _get_so_suffixes(): - suffixes = [] - for suffix, mode, type in imp.get_suffixes(): - if type == imp.C_EXTENSION: - suffixes.append(suffix) - + suffixes = extension_suffixes() if not suffixes: # bah, no C_EXTENSION available. Occurs on pypy without cpyext if sys.platform == 'win32': diff --git a/lib_pypy/pyrepl/module_lister.py b/lib_pypy/pyrepl/module_lister.py --- a/lib_pypy/pyrepl/module_lister.py +++ b/lib_pypy/pyrepl/module_lister.py @@ -40,8 +40,8 @@ return sorted(set(l)) def _make_module_list(): - import imp - suffs = [x[0] for x in imp.get_suffixes() if x[0] != '.pyc'] + import importlib.machinery + suffs = [x for x in importlib.machinery.all_suffixes() if x != '.pyc'] suffs.sort(reverse=True) _packages[''] = list(sys.builtin_module_names) for dir in sys.path: From noreply at buildbot.pypy.org Mon Jul 28 01:50:11 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 28 Jul 2014 01:50:11 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140727235011.8F5E51C0547@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r72586:02100350c9f0 Date: 2014-07-27 16:49 -0700 http://bitbucket.org/pypy/pypy/changeset/02100350c9f0/ Log: merge default diff --git a/lib_pypy/_curses.py b/lib_pypy/_curses.py --- a/lib_pypy/_curses.py +++ b/lib_pypy/_curses.py @@ -309,11 +309,9 @@ #endif int _m_ispad(WINDOW *win) { -#if defined WINDOW_HAS_FLAGS + // may not have _flags (and possibly _ISPAD), + // but for now let's assume that always has it return (win->_flags & _ISPAD); -#else - return 0; -#endif } void _m_getsyx(int *yx) { diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -204,7 +204,7 @@ elif opcode == opcodedesc.BREAK_LOOP.index: next_instr = self.BREAK_LOOP(oparg, next_instr) elif opcode == opcodedesc.CONTINUE_LOOP.index: - next_instr = self.CONTINUE_LOOP(oparg, next_instr) + return self.CONTINUE_LOOP(oparg, next_instr) elif opcode == opcodedesc.FOR_ITER.index: next_instr = self.FOR_ITER(oparg, next_instr) elif opcode == opcodedesc.JUMP_FORWARD.index: diff --git a/pypy/objspace/std/iterobject.py b/pypy/objspace/std/iterobject.py --- a/pypy/objspace/std/iterobject.py +++ b/pypy/objspace/std/iterobject.py @@ -30,10 +30,6 @@ raise NotImplementedError def descr_reduce(self, space): - """ - XXX to do: remove this __reduce__ method and do - a registration with copy_reg, instead. - """ from pypy.interpreter.mixedmodule import MixedModule w_mod = space.getbuiltinmodule('_pickle_support') mod = space.interp_w(MixedModule, w_mod) @@ -125,10 +121,6 @@ self.index = space.int_w(self.w_len) + index def descr_reduce(self, space): - """ - XXX to do: remove this __reduce__ method and do - a registration with copy_reg, instead. - """ from pypy.interpreter.mixedmodule import MixedModule w_mod = space.getbuiltinmodule('_pickle_support') mod = space.interp_w(MixedModule, w_mod) diff --git a/rpython/jit/metainterp/test/test_virtualref.py b/rpython/jit/metainterp/test/test_virtualref.py --- a/rpython/jit/metainterp/test/test_virtualref.py +++ b/rpython/jit/metainterp/test/test_virtualref.py @@ -34,7 +34,7 @@ # def check_call(op, fname): assert op.opname == 'direct_call' - assert op.args[0].value._obj._name == fname + assert op.args[0].value._obj._name.startswith(fname) # ops = [op for block, op in graph.iterblockops()] check_call(ops[-3], 'virtual_ref') diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -340,6 +340,7 @@ # ____________________________________________________________ # VRefs + at specialize.argtype(0) def virtual_ref(x): """Creates a 'vref' object that contains a reference to 'x'. Calls to virtual_ref/virtual_ref_finish must be properly nested. The idea @@ -351,6 +352,7 @@ return DirectJitVRef(x) virtual_ref.oopspec = 'virtual_ref(x)' + at specialize.argtype(1) def virtual_ref_finish(vref, x): """See docstring in virtual_ref(x)""" keepalive_until_here(x) # otherwise the whole function call is removed diff --git a/rpython/translator/platform/windows.py b/rpython/translator/platform/windows.py --- a/rpython/translator/platform/windows.py +++ b/rpython/translator/platform/windows.py @@ -375,21 +375,28 @@ for rule in rules: m.rule(*rule) + if len(headers_to_precompile)>0 and self.version >= 80: + # at least from VS2013 onwards we need to include PCH + # objects in the final link command + linkobjs = 'stdafx.obj @<<\n$(OBJECTS)\n<<' + else: + linkobjs = '@<<\n$(OBJECTS)\n<<' + if self.version < 80: m.rule('$(TARGET)', '$(OBJECTS)', [ '$(CC_LINK) /nologo $(LDFLAGS) $(LDFLAGSEXTRA) /out:$@' +\ - ' $(LIBDIRS) $(LIBS) @<<\n$(OBJECTS)\n<<', + ' $(LIBDIRS) $(LIBS) ' + linkobjs, ]) else: m.rule('$(TARGET)', '$(OBJECTS)', [ '$(CC_LINK) /nologo $(LDFLAGS) $(LDFLAGSEXTRA)' + \ ' $(LINKFILES) /out:$@ $(LIBDIRS) $(LIBS) /MANIFEST' + \ - ' /MANIFESTFILE:$*.manifest @<<\n$(OBJECTS)\n<<', + ' /MANIFESTFILE:$*.manifest ' + linkobjs, 'mt.exe -nologo -manifest $*.manifest -outputresource:$@;1', ]) m.rule('debugmode_$(TARGET)', '$(OBJECTS)', [ '$(CC_LINK) /nologo /DEBUG $(LDFLAGS) $(LDFLAGSEXTRA)' + \ - ' $(LINKFILES) /out:$@ $(LIBDIRS) $(LIBS) @<<\n$(OBJECTS)\n<<', + ' $(LINKFILES) /out:$@ $(LIBDIRS) $(LIBS) ' + linkobjs, ]) if shared: From noreply at buildbot.pypy.org Mon Jul 28 01:50:12 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 28 Jul 2014 01:50:12 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: merge py3k Message-ID: <20140727235012.E31461C0547@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72587:544362d4110b Date: 2014-07-27 16:49 -0700 http://bitbucket.org/pypy/pypy/changeset/544362d4110b/ Log: merge py3k diff --git a/lib_pypy/_curses.py b/lib_pypy/_curses.py --- a/lib_pypy/_curses.py +++ b/lib_pypy/_curses.py @@ -309,11 +309,9 @@ #endif int _m_ispad(WINDOW *win) { -#if defined WINDOW_HAS_FLAGS + // may not have _flags (and possibly _ISPAD), + // but for now let's assume that always has it return (win->_flags & _ISPAD); -#else - return 0; -#endif } void _m_getsyx(int *yx) { diff --git a/pypy/objspace/std/iterobject.py b/pypy/objspace/std/iterobject.py --- a/pypy/objspace/std/iterobject.py +++ b/pypy/objspace/std/iterobject.py @@ -30,10 +30,6 @@ raise NotImplementedError def descr_reduce(self, space): - """ - XXX to do: remove this __reduce__ method and do - a registration with copy_reg, instead. - """ from pypy.interpreter.mixedmodule import MixedModule w_mod = space.getbuiltinmodule('_pickle_support') mod = space.interp_w(MixedModule, w_mod) @@ -125,10 +121,6 @@ self.index = space.int_w(self.w_len) + index def descr_reduce(self, space): - """ - XXX to do: remove this __reduce__ method and do - a registration with copy_reg, instead. - """ from pypy.interpreter.mixedmodule import MixedModule w_mod = space.getbuiltinmodule('_pickle_support') mod = space.interp_w(MixedModule, w_mod) diff --git a/rpython/jit/metainterp/test/test_virtualref.py b/rpython/jit/metainterp/test/test_virtualref.py --- a/rpython/jit/metainterp/test/test_virtualref.py +++ b/rpython/jit/metainterp/test/test_virtualref.py @@ -34,7 +34,7 @@ # def check_call(op, fname): assert op.opname == 'direct_call' - assert op.args[0].value._obj._name == fname + assert op.args[0].value._obj._name.startswith(fname) # ops = [op for block, op in graph.iterblockops()] check_call(ops[-3], 'virtual_ref') diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -340,6 +340,7 @@ # ____________________________________________________________ # VRefs + at specialize.argtype(0) def virtual_ref(x): """Creates a 'vref' object that contains a reference to 'x'. Calls to virtual_ref/virtual_ref_finish must be properly nested. The idea @@ -351,6 +352,7 @@ return DirectJitVRef(x) virtual_ref.oopspec = 'virtual_ref(x)' + at specialize.argtype(1) def virtual_ref_finish(vref, x): """See docstring in virtual_ref(x)""" keepalive_until_here(x) # otherwise the whole function call is removed diff --git a/rpython/translator/platform/windows.py b/rpython/translator/platform/windows.py --- a/rpython/translator/platform/windows.py +++ b/rpython/translator/platform/windows.py @@ -375,21 +375,28 @@ for rule in rules: m.rule(*rule) + if len(headers_to_precompile)>0 and self.version >= 80: + # at least from VS2013 onwards we need to include PCH + # objects in the final link command + linkobjs = 'stdafx.obj @<<\n$(OBJECTS)\n<<' + else: + linkobjs = '@<<\n$(OBJECTS)\n<<' + if self.version < 80: m.rule('$(TARGET)', '$(OBJECTS)', [ '$(CC_LINK) /nologo $(LDFLAGS) $(LDFLAGSEXTRA) /out:$@' +\ - ' $(LIBDIRS) $(LIBS) @<<\n$(OBJECTS)\n<<', + ' $(LIBDIRS) $(LIBS) ' + linkobjs, ]) else: m.rule('$(TARGET)', '$(OBJECTS)', [ '$(CC_LINK) /nologo $(LDFLAGS) $(LDFLAGSEXTRA)' + \ ' $(LINKFILES) /out:$@ $(LIBDIRS) $(LIBS) /MANIFEST' + \ - ' /MANIFESTFILE:$*.manifest @<<\n$(OBJECTS)\n<<', + ' /MANIFESTFILE:$*.manifest ' + linkobjs, 'mt.exe -nologo -manifest $*.manifest -outputresource:$@;1', ]) m.rule('debugmode_$(TARGET)', '$(OBJECTS)', [ '$(CC_LINK) /nologo /DEBUG $(LDFLAGS) $(LDFLAGSEXTRA)' + \ - ' $(LINKFILES) /out:$@ $(LIBDIRS) $(LIBS) @<<\n$(OBJECTS)\n<<', + ' $(LINKFILES) /out:$@ $(LIBDIRS) $(LIBS) ' + linkobjs, ]) if shared: From noreply at buildbot.pypy.org Mon Jul 28 03:14:19 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 28 Jul 2014 03:14:19 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: fix space.sys.debug tweak leaking outside of these tests Message-ID: <20140728011419.C39AF1C024A@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72588:ecd762272c96 Date: 2014-07-27 18:11 -0700 http://bitbucket.org/pypy/pypy/changeset/ecd762272c96/ Log: fix space.sys.debug tweak leaking outside of these tests diff --git a/pypy/module/__builtin__/test/test_compile.py b/pypy/module/__builtin__/test/test_compile.py --- a/pypy/module/__builtin__/test/test_compile.py +++ b/pypy/module/__builtin__/test/test_compile.py @@ -84,11 +84,18 @@ class TestOptimizeO: """Test interaction of -O flag and optimize parameter of compile.""" + def setup_method(self, method): + space = self.space + self._sys_debug = space.sys.debug + # imitate -O + space.sys.debug = False + + def teardown_method(self, method): + self.space.sys.debug = self._sys_debug + def test_O_optmize_0(self): """Test that assert is not ignored if -O flag is set but optimize=0.""" space = self.space - space.sys.debug = False # imitate -O - w_res = space.appexec([], """(): assert False # check that our -O imitation hack works try: @@ -103,8 +110,6 @@ def test_O_optimize__1(self): """Test that assert is ignored with -O and optimize=-1.""" space = self.space - space.sys.debug = False # imitate -O - space.appexec([], """(): exec(compile('assert False', '', 'exec', optimize=-1)) """) From noreply at buildbot.pypy.org Mon Jul 28 10:11:09 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 28 Jul 2014 10:11:09 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk strategies2: cosing. Message-ID: <20140728081109.5D5991C06BC@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: strategies2 Changeset: r985:c597902ccc06 Date: 2014-03-19 11:48 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/c597902ccc06/ Log: cosing. From noreply at buildbot.pypy.org Mon Jul 28 10:11:10 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 28 Jul 2014 10:11:10 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk strategies3: Removed FixedSizeFieldTypes. Message-ID: <20140728081110.64AC61C06BC@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: strategies3 Changeset: r986:f384492f22a3 Date: 2014-03-19 11:55 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/f384492f22a3/ Log: Removed FixedSizeFieldTypes. diff --git a/spyvm/fieldtypes.py b/spyvm/fieldtypes.py --- a/spyvm/fieldtypes.py +++ b/spyvm/fieldtypes.py @@ -2,22 +2,6 @@ from rpython.rlib import rerased from rpython.rlib import objectmodel, jit, signature -from rpython.rlib.listsort import TimSort - -class TypeTag(): - pass - -LPI = TypeTag() -SInt = TypeTag() -flt = TypeTag() -obj = TypeTag() - -# may be used during debugging -# LPI, SInt, flt, obj = 'LPI', 'SInt', 'float', 'object' - -class FieldSort(TimSort): - def lt(self, a, b): - return a[0] < b[0] class AbstractStorageStrategy(): _immutable_fields_ = [] @@ -59,138 +43,23 @@ return self.erase([x for x in collection]) ListStorageStrategy.singleton = ListStorageStrategy() -class SmallIntegerStorageStrategy(AbstractStorageStrategy): - erase, unerase = rerased.new_erasing_pair("object-vector-strategry") - erase = staticmethod(erase) - unerase = staticmethod(unerase) - -class FixedSizeFieldTypes(AbstractStorageStrategy): - _immutable_fields_ = ['types[*]'] - _attrs_ = ['types', 'parent', 'siblings', 'diff'] - _settled_ = True - - erase, unerase = rerased.new_erasing_pair("fixed-size-field-types") - erase = staticmethod(erase) - unerase = staticmethod(unerase) - - def __init__(self, types, parent=None, change=(-1, obj)): - self.types = types - self.parent = parent - if parent is not None: - assert change != (-1, obj) - self.diff = change - self.siblings = {} - - def initial_storage(self, size, default_element): - return self.erase([default_element] * size) - - def storage_for_list(self, collection): - return self.erase([x for x in collection]) - - def size_of(self, w_obj): - return len(self.unerase(w_obj.storage)) - - def fetch(self, w_object, n0): - w_result = self.unerase(w_object.storage)[n0] - assert w_result is not None - types = self.types - # TODO - try 'assert isinstance' instead. - if types[n0] is SInt: - jit.record_known_class(w_result, model.W_SmallInteger) - elif types[n0] is LPI: - jit.record_known_class(w_result, model.W_LargePositiveInteger1Word) - elif types[n0] is flt: - jit.record_known_class(w_result, model.W_Float) - return w_result - - def store(self, w_object, n0, w_value): - types = self.types - changed_type = w_value.fieldtype() - if types[n0] is not changed_type: - w_object.strategy = self.sibling(n0, changed_type) - self.unerase(w_object.storage)[n0] = w_value - - @jit.elidable - def sibling(self, n0, changed_type): - assert self.types[n0] is not changed_type - change = (n0, changed_type) - parent = self.parent - siblings = self.siblings - if change in siblings: - return siblings[change] - elif parent is None: - return self.descent([change]) - else: - if n0 == self.diff[0]: - diff = [change] - else: - diff = [change, self.diff] - - new_fieldtype = parent.ascent(diff) - - if not objectmodel.we_are_translated(): - new_types = list(self.types) - new_types[n0] = changed_type - assert new_fieldtype.types == new_types - siblings[change] = new_fieldtype - return new_fieldtype - - def ascent(self, changes): - parent = self.parent - if parent is None: - FieldSort(changes).sort() - return self.descent(changes) - else: - change = self.diff - if changes[0][0] != change[0]: - changes.append(change) - return parent.ascent(changes) - - def descent(self, changes): - if changes == []: - return self - - change = changes[0] - if change[1] is obj: - return self.descent(changes[1:]) - siblings = self.siblings - if change in siblings: - return siblings[change].descent(changes[1:]) - else: - new_types = list(self.types) - assert new_types[change[0]] == obj - new_types[change[0]] = change[1] - new_fieldtype = FixedSizeFieldTypes(new_types, self, change) - siblings[change] = new_fieldtype - return new_fieldtype.descent(changes[1:]) - - @staticmethod - @jit.elidable - def of_size(n): - if n not in maps: - maps[n] = FixedSizeFieldTypes([obj] * n) - return maps[n] - -maps = {} def strategy_of_size(s_class, size): if s_class is None or s_class.isvariable(): return ListStorageStrategy.singleton else: - return FixedSizeFieldTypes.of_size(size) + # TODO -- add AllNilStorageStrategy + return ListStorageStrategy.singleton def strategy_for_list(w_obj, vars): + if if s_class is None: + return ListStorageStrategy.singleton try: - s_class = w_obj.s_class - if s_class is None or w_obj.s_class.isvariable(): - return ListStorageStrategy.singleton - else: - size = len(vars) - typer = FixedSizeFieldTypes.of_size(size) - for i, w_val in enumerate(vars): - changed_type = w_val.fieldtype() - if changed_type is not obj: - typer = typer.sibling(i, changed_type) - return typer + is_variable = w_obj.s_class.isvariable() except AttributeError: return ListStorageStrategy.singleton + if is_variable: + # TODO - check the contents of vars and choose a good strategy. + return ListStorageStrategy.singleton + return ListStorageStrategy.singleton + \ No newline at end of file From noreply at buildbot.pypy.org Mon Jul 28 10:11:11 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 28 Jul 2014 10:11:11 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk strategies3: Fixing up the system to make some experiments. Message-ID: <20140728081111.805AF1C06BC@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: strategies3 Changeset: r987:8fb43363b6c6 Date: 2014-03-19 12:05 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/8fb43363b6c6/ Log: Fixing up the system to make some experiments. diff --git a/spyvm/fieldtypes.py b/spyvm/fieldtypes.py --- a/spyvm/fieldtypes.py +++ b/spyvm/fieldtypes.py @@ -2,8 +2,9 @@ from rpython.rlib import rerased from rpython.rlib import objectmodel, jit, signature +from rpython.rlib.objectmodel import import_from_mixin -class AbstractStorageStrategy(): +class AbstractStorageStrategy(object): _immutable_fields_ = [] _attrs_ = [] _settled_ = True @@ -23,43 +24,129 @@ def all_vars(self, w_obj): return [self.fetch(w_obj, i) for i in range(0, self.size_of(w_obj))] +class SingletonMeta(type): + def __new__(cls, name, bases, dct): + result = type.__new__(cls, name, bases, dct) + result.singleton = result() + return result + +class BasicStorageStrategyMixin(object): + # Concrete class must implement: unerase + def storage(self, w_obj): + return self.unerase(w_obj.get_storage()) + # This is the regular storage strategy that does not result in any # optimizations but can handle every case. Applicable for both # fixed-sized and var-sized objects. class ListStorageStrategy(AbstractStorageStrategy): - erase, unerase = rerased.new_erasing_pair("list-storage-strategy") - erase = staticmethod(erase) - unerase = staticmethod(unerase) + __metaclass__ = SingletonMeta + erase, unerase = rerased.new_static_erasing_pair("list-storage-strategy") + import_from_mixin(BasicStorageStrategyMixin) def fetch(self, w_obj, n0): - return self.unerase(w_obj.storage)[n0] + return self.storage(w_obj)[n0] def store(self, w_obj, n0, w_val): - self.unerase(w_obj.storage)[n0] = w_val + self.storage(w_obj)[n0] = w_val def size_of(self, w_obj): - return len(self.unerase(w_obj.storage)) + return len(self.storage(w_obj)) def initial_storage(self, size, default_element): return self.erase([default_element] * size) - def storage_for_list(self, collection): + def storage_for_list(self, space, collection): return self.erase([x for x in collection]) -ListStorageStrategy.singleton = ListStorageStrategy() +class TaggingSmallIntegerStorageStrategy(AbstractStorageStrategy): + __metaclass__ = SingletonMeta + erase, unerase = rerased.new_static_erasing_pair("tagging-small-integer-strategry") + import_from_mixin(BasicStorageStrategyMixin) + + @staticmethod + def wrap(val): + return val << 1 + @staticmethod + def unwrap(val): + return val >> 1 + @staticmethod + def is_nil(val): + return (val & 1) == 1 + @staticmethod + def can_contain(w_val): + return isinstance(w_val, model.W_SmallInteger) + # TODO - use just a single value to represent nil (max_int-1) + # Then, turn wrap/unwrap into noops + # also store W_LargePositiveInteger1Word? + nil_value = 1 + + def needs_objspace(self): + return True + + def fetch(self, space, w_obj, n0): + val = self.storage(w_obj)[n0] + if (self.is_nil(val)): + return space.w_nil + else: + return space.wrap_int(self.unwrap(val)) + + def store(self, space, w_obj, n0, w_val): + store = self.storage(w_obj) + if self.can_contain(w_val): + store[n0] = self.wrap(space.unwrap_int(w_val)) + else: + if w_val == model.w_nil: + # TODO - generelize to AllNilStorage by maintaining a counter of nil-elements + store[n0] = self.nil_value + else: + # Storing a wrong type - dehomogenize to ListStorage + return w_obj.store_with_new_strategy(space, ListStorageStrategy.singleton, n0, w_val) + + def size_of(self, w_obj): + return len(self.storage(w_obj)) + + def initial_storage(self, space, size): + return self.erase([self.nil_value] * size) + + def storage_for_list(self, space, collection): + length = len(collection) + store = [self.nil_value] * length + for i in range(length): + if collection[i] != model.w_nil: + store[i] = self.wrap(space.unwrap_int(collection[i])) + return self.erase(store) -def strategy_of_size(s_class, size): - if s_class is None or s_class.isvariable(): +def strategy_of_size(s_containing_class, size): + if s_containing_class is None: + # This is a weird and rare special case for w_nil return ListStorageStrategy.singleton + if not s_containing_class.isvariable(): + return ListStorageStrategy.singleton + + # A newly allocated object contains only nils. + # return AllNilStorageStrategy.singleton + return ListStorageStrategy.singleton + +def strategy_for_list(s_containing_class, vars): + if s_containing_class is None: + # This is a weird and rare special case for w_nil + return ListStorageStrategy.singleton + try: + is_variable = s_containing_class.isvariable() + except AttributeError: + # TODO - This happens during bootstrapping phase, when filling in generic objects. + # Ths class object shadows are not yet synchronized. + return ListStorageStrategy.singleton + + if not is_variable: + return ListStorageStrategy.singleton + + is_all_nils = True + for w_obj in vars: + if w_obj != model.w_nil: + is_all_nils = False + if not TaggingSmallIntegerStorageStrategy.can_contain(w_obj): + # TODO -- here we can still optimize if there is only + # one single type in the collection. + return ListStorageStrategy.singleton + if is_all_nils: + return ListStorageStrategy.singleton + # return AllNilStorageStrategy.singleton else: - # TODO -- add AllNilStorageStrategy - return ListStorageStrategy.singleton - -def strategy_for_list(w_obj, vars): - if if s_class is None: - return ListStorageStrategy.singleton - try: - is_variable = w_obj.s_class.isvariable() - except AttributeError: - return ListStorageStrategy.singleton - if is_variable: - # TODO - check the contents of vars and choose a good strategy. - return ListStorageStrategy.singleton - return ListStorageStrategy.singleton - \ No newline at end of file + return TaggingSmallIntegerStorageStrategy.singleton \ No newline at end of file diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -632,9 +632,15 @@ W_AbstractPointersObject.fillin(self, space, g_self) from spyvm.fieldtypes import strategy_for_list pointers = g_self.get_pointers() - self.strategy = strategy_for_list(self, pointers) + self.strategy = strategy_for_list(self.s_class, pointers) self.storage = self.strategy.storage_for_list(pointers) + def get_strategy(self): + return self.strategy + + def get_storage(self): + return self.storage + def all_vars(self): return self.strategy.all_vars(self) From noreply at buildbot.pypy.org Mon Jul 28 10:11:12 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 28 Jul 2014 10:11:12 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk strategies3: Fixing things up Message-ID: <20140728081112.A0CDA1C06BC@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: strategies3 Changeset: r988:a77d13fd8840 Date: 2014-03-19 12:42 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/a77d13fd8840/ Log: Fixing things up diff --git a/spyvm/fieldtypes.py b/spyvm/fieldtypes.py --- a/spyvm/fieldtypes.py +++ b/spyvm/fieldtypes.py @@ -9,20 +9,26 @@ _attrs_ = [] _settled_ = True - def __init__(self): - pass - def fetch(self, w_obj, n0): + def fetch(self, space, w_obj, n0): raise NotImplementedError("Abstract base class") - def store(self, w_obj, n0, w_val): + def store(self, space, w_obj, n0, w_val): raise NotImplementedError("Abstract base class") def size_of(self, w_obj): raise NotImplementedError("Abstract base class") - def initial_storage(self, size, default_element): + + def initial_storage(self, space, size): raise NotImplementedError("Abstract base class") - def storage_for_list(self, collection): + def storage_for_list(self, space, collection): raise NotImplementedError("Abstract base class") - def all_vars(self, w_obj): - return [self.fetch(w_obj, i) for i in range(0, self.size_of(w_obj))] + def copy_storage_from(self, space, w_obj, reuse_storage=False): + old_strategy = w_obj.strategy + if old_strategy == self and reuse_storage: + return w_obj.get_storage() + if isinstance(old_strategy, AllNilStorageStrategy): + return self.initial_storage(space, old_strategy.size_of(w_obj)) + else: + # This can be overridden and optimized (reuse_storage flag, less temporary storage) + return self.storage_for_list(space, w_obj.fetch_all(space)) class SingletonMeta(type): def __new__(cls, name, bases, dct): @@ -43,14 +49,14 @@ erase, unerase = rerased.new_static_erasing_pair("list-storage-strategy") import_from_mixin(BasicStorageStrategyMixin) - def fetch(self, w_obj, n0): + def fetch(self, space, w_obj, n0): return self.storage(w_obj)[n0] - def store(self, w_obj, n0, w_val): + def store(self, space, w_obj, n0, w_val): self.storage(w_obj)[n0] = w_val def size_of(self, w_obj): return len(self.storage(w_obj)) - def initial_storage(self, size, default_element): - return self.erase([default_element] * size) + def initial_storage(self, space, size): + return self.erase([model.w_nil] * size) def storage_for_list(self, space, collection): return self.erase([x for x in collection]) diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -507,12 +507,12 @@ def fetch(self, space, n0): if self.has_shadow(): return self._shadow.fetch(n0) - return self._fetch(n0) + return self._fetch(space, n0) def store(self, space, n0, w_value): if self.has_shadow(): return self._shadow.store(n0, w_value) - return self._store(n0, w_value) + return self._store(space, n0, w_value) def varsize(self, space): return self.size() - self.instsize(space) @@ -626,39 +626,36 @@ """Create new object with size = fixed + variable size.""" W_AbstractPointersObject.__init__(self, space, w_class, size) self.strategy = strategy_of_size(self.s_class, size) - self.storage = self.strategy.initial_storage(size, w_nil) + self.storage = self.strategy.initial_storage(space, size) def fillin(self, space, g_self): W_AbstractPointersObject.fillin(self, space, g_self) from spyvm.fieldtypes import strategy_for_list pointers = g_self.get_pointers() self.strategy = strategy_for_list(self.s_class, pointers) - self.storage = self.strategy.storage_for_list(pointers) + self.storage = self.strategy.storage_for_list(space, pointers) def get_strategy(self): + # return jit.promote(self.strategy) return self.strategy def get_storage(self): return self.storage - def all_vars(self): - return self.strategy.all_vars(self) + def all_vars(self, space): + return [self.strategy.fetch(space, self, i) for i in range(self.basic_size())] def set_all_vars(self, collection): - # TODO reuse storage if possible - self.storage = self.strategy.storage_for_list(collection) + self.storage = self.strategy.storage_for_list(space, collection) - def _fetch(self, n0): - strategy = jit.promote(self.strategy) - return strategy.fetch(self, n0) + def _fetch(self, space, n0): + return self.get_strategy().fetch(space, self, n0) - def _store(self, n0, w_value): - strategy = jit.promote(self.strategy) - return strategy.store(self, n0, w_value) + def _store(self, space, n0, w_value): + return self.get_strategy().store(space, self, n0, w_value) def basic_size(self): - strategy = jit.promote(self.strategy) - return strategy.size_of(self) + return self.get_strategy().size_of(self) def become(self, w_other): if not isinstance(w_other, W_PointersObject): @@ -672,7 +669,7 @@ length = self.strategy.size_of(self) w_result = W_PointersObject(self.space, self.getclass(space), length) cloned_vars = [self.fetch(space, i) for i in range(length)] - w_result.storage = w_result.strategy.storage_for_list(cloned_vars) + w_result.storage = w_result.strategy.storage_for_list(space, cloned_vars) return w_result def fieldtype(self): @@ -690,11 +687,11 @@ def fillin(self, space, g_self): raise NotImplementedError("we don't expect weak objects in a fresh image") - def _fetch(self, n0): + def _fetch(self, space, n0): weakobj = self._weakvars[n0] return weakobj() or w_nil - def _store(self, n0, w_value): + def _store(self, space, n0, w_value): assert w_value is not None self._weakvars[n0] = weakref.ref(w_value) @@ -1193,7 +1190,10 @@ if isinstance(w_candidate, W_PointersObject): c_shadow = w_candidate._shadow if c_shadow is None and w_candidate.size() >= 2: - w_class = w_candidate._fetch(1) + space = None + if self._shadow: + space = self._shadow.space + w_class = w_candidate._fetch(space, 1) if isinstance(w_class, W_PointersObject): d_shadow = w_class._shadow if isinstance(d_shadow, shadow.ClassShadow): @@ -1260,7 +1260,7 @@ def as_compiledmethod_get_shadow(self, space=None): from shadow import CompiledMethodShadow if self._shadow is None: - self._shadow = CompiledMethodShadow(self) + self._shadow = CompiledMethodShadow(space, self) return self._shadow def literalat0(self, space, index0): diff --git a/spyvm/objspace.py b/spyvm/objspace.py --- a/spyvm/objspace.py +++ b/spyvm/objspace.py @@ -165,7 +165,7 @@ from spyvm.fieldtypes import ListStorageStrategy w_nil.space = self w_nil.strategy = ListStorageStrategy.singleton - w_nil.storage = w_nil.strategy.initial_storage(0, None) + w_nil.storage = w_nil.strategy.initial_storage(self, 0) w_nil.s_class = self.classtable['w_UndefinedObject'].as_class_get_penumbra(self) return w_nil w_nil = self.w_nil = patch_nil(model.w_nil) diff --git a/spyvm/plugins/bitblt.py b/spyvm/plugins/bitblt.py --- a/spyvm/plugins/bitblt.py +++ b/spyvm/plugins/bitblt.py @@ -756,8 +756,8 @@ w_offset = self.fetch(4) assert isinstance(w_offset, model.W_PointersObject) if not w_offset is self.space.w_nil: - self.offsetX = self.intOrIfNil(w_offset._fetch(0), 0) - self.offsetY = self.intOrIfNil(w_offset._fetch(1), 0) + self.offsetX = self.intOrIfNil(w_offset._fetch(self.space, 0), 0) + self.offsetY = self.intOrIfNil(w_offset._fetch(self.space, 1), 0) self.pixPerWord = 32 / self.depth self.pitch = (self.width + (self.pixPerWord - 1)) / self.pixPerWord | 0 if self.w_bits.size() < (self.pitch * self.height): diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -374,7 +374,7 @@ print ("%s" % w_message).replace('\r', '\n') print ("%s" % s_frame.peek(1)).replace('\r', '\n') if isinstance(w_message, model.W_PointersObject): - print ('%s' % w_message.all_vars()).replace('\r', '\n') + print ('%s' % w_message.all_vars(interp.space)).replace('\r', '\n') # raise Exit('Probably Debugger called...') raise PrimitiveFailedError() diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -23,9 +23,9 @@ self.space = space self._w_self = w_self def fetch(self, n0): - return self.w_self()._fetch(n0) + return self.w_self()._fetch(self.space, n0) def store(self, n0, w_value): - return self.w_self()._store(n0, w_value) + return self.w_self()._store(self.space, n0, w_value) def size(self): return self.w_self().basic_size() def w_self(self): @@ -109,7 +109,7 @@ # read and painfully decode the format try: classformat = self.space.unwrap_int( - w_self._fetch(constants.CLASS_FORMAT_INDEX)) + w_self._fetch(self.space, constants.CLASS_FORMAT_INDEX)) # The classformat in Squeak, as an integer value, is: # <2 bits=instSize//64><5 bits=cClass><4 bits=instSpec> # <6 bits=instSize\\64><1 bit=0> @@ -149,19 +149,19 @@ else: raise ClassShadowError("unknown format %d" % (format,)) except UnwrappingError: - assert w_self._fetch(constants.CLASS_FORMAT_INDEX) is self.space.w_nil + assert w_self._fetch(self.space, constants.CLASS_FORMAT_INDEX) is self.space.w_nil pass # not enough information stored in w_self, yet self.guess_class_name() # read the methoddict - w_methoddict = w_self._fetch(constants.CLASS_METHODDICT_INDEX) + w_methoddict = w_self._fetch(self.space, constants.CLASS_METHODDICT_INDEX) assert isinstance(w_methoddict, model.W_PointersObject) if not w_methoddict.is_same_object(self.space.w_nil): self._s_methoddict = w_methoddict.as_methoddict_get_shadow(self.space) self._s_methoddict.s_class = self - w_superclass = w_self._fetch(constants.CLASS_SUPERCLASS_INDEX) + w_superclass = w_self._fetch(self.space, constants.CLASS_SUPERCLASS_INDEX) if w_superclass.is_same_object(self.space.w_nil): self._s_superclass = None else: @@ -184,7 +184,7 @@ # read the name if w_self.size() > constants.CLASS_NAME_INDEX: - w_name = w_self._fetch(constants.CLASS_NAME_INDEX) + w_name = w_self._fetch(self.space, constants.CLASS_NAME_INDEX) else: # Some heuristic to find the classname # Only used for debugging @@ -193,11 +193,11 @@ # we are probably holding a metaclass instead of a class. # metaclasses hold a pointer to the real class in the last # slot. This is pos 6 in mini.image and higher in squeak3.9 - w_realclass = w_self._fetch(w_self.size() - 1) + w_realclass = w_self._fetch(self.space, w_self.size() - 1) if (isinstance(w_realclass, model.W_PointersObject) and w_realclass.size() > constants.CLASS_NAME_INDEX): # TODO ADD TEST WHICH GOES OVER THIS PART - w_name = w_realclass._fetch(constants.CLASS_NAME_INDEX) + w_name = w_realclass._fetch(self.space, constants.CLASS_NAME_INDEX) else: return @@ -233,7 +233,7 @@ return w_new def w_methoddict(self): - return self.w_self()._fetch(constants.CLASS_METHODDICT_INDEX) + return self.w_self()._fetch(self.space, constants.CLASS_METHODDICT_INDEX) def s_methoddict(self): return self._s_methoddict @@ -335,7 +335,7 @@ "NOT_RPYTHON" # this is only for testing. if self._s_methoddict is None: w_methoddict = model.W_PointersObject(self.space, None, 2) - w_methoddict._store(1, model.W_PointersObject(self.space, None, 0)) + w_methoddict._store(self.space, 1, model.W_PointersObject(self.space, None, 0)) self._s_methoddict = w_methoddict.as_methoddict_get_shadow(self.space) self.s_methoddict().sync_cache() self.s_methoddict().invalid = False @@ -386,14 +386,14 @@ def sync_cache(self): if self.w_self().size() == 0: return - w_values = self.w_self()._fetch(constants.METHODDICT_VALUES_INDEX) + w_values = self.w_self()._fetch(self.space, constants.METHODDICT_VALUES_INDEX) assert isinstance(w_values, model.W_PointersObject) s_values = w_values.as_observed_get_shadow(self.space) s_values.notify(self) size = self.w_self().size() - constants.METHODDICT_NAMES_INDEX self.methoddict = {} for i in range(size): - w_selector = self.w_self()._fetch(constants.METHODDICT_NAMES_INDEX+i) + w_selector = self.w_self()._fetch(self.space, constants.METHODDICT_NAMES_INDEX+i) if not w_selector.is_same_object(self.space.w_nil): if not isinstance(w_selector, model.W_BytesObject): pass @@ -401,7 +401,7 @@ # Putting any key in the methodDict and running with # perform is actually supported in Squeak # raise ClassShadowError("bogus selector in method dict") - w_compiledmethod = w_values._fetch(i) + w_compiledmethod = w_values._fetch(self.space, i) if not isinstance(w_compiledmethod, model.W_CompiledMethod): raise ClassShadowError("The methoddict must contain " "CompiledMethods only, for now. " @@ -441,7 +441,7 @@ self.copy_from_w_self(i) except error.SenderChainManipulation, e: assert e.s_context == self - w_self.storage = w_self.strategy.initial_storage(0, None) + w_self.storage = w_self.strategy.initial_storage(self.space, 0) # def detach_shadow(self): # w_self = self.w_self() @@ -451,9 +451,9 @@ # self.copy_to_w_self(i) def copy_from_w_self(self, n0): - self.store(n0, self.w_self()._fetch(n0)) + self.store(n0, self.w_self()._fetch(self.space, n0)) def copy_to_w_self(self, n0): - self.w_self()._store(n0, self.fetch(n0)) + self.w_self()._store(self.space, n0, self.fetch(n0)) class ContextPartShadow(AbstractRedirectingShadow): @@ -1031,11 +1031,12 @@ "literals", "bytecodeoffset", "literalsize", "_tempsize", "_primitive", "argsize", "islarge", - "w_compiledin", "version"] + "w_compiledin", "version", "space"] _immutable_fields_ = ["version?", "_w_self"] - def __init__(self, w_compiledmethod): + def __init__(self, space, w_compiledmethod): self._w_self = w_compiledmethod + self.space = space self.update() def w_self(self): @@ -1110,11 +1111,11 @@ @jit.elidable def safe_fetch(self, n0, version): assert version is self.version - return self._w_self._fetch(n0) + return self._w_self._fetch(self.space, n0) def store(self, n0, w_value): self.version = Version() - return self._w_self._store(n0, w_value) + return self._w_self._store(self.space, n0, w_value) def update(self): pass diff --git a/spyvm/test/test_interpreter.py b/spyvm/test/test_interpreter.py --- a/spyvm/test/test_interpreter.py +++ b/spyvm/test/test_interpreter.py @@ -218,7 +218,7 @@ step_in_interp(s_frame) assert s_frame.stack() == [] for test_index in range(8): - print w_frame.all_vars() + print w_frame.all_vars(space) if test_index == index: assert s_frame.gettemp(test_index) == space.w_true else: diff --git a/spyvm/test/test_primitives.py b/spyvm/test/test_primitives.py --- a/spyvm/test/test_primitives.py +++ b/spyvm/test/test_primitives.py @@ -14,7 +14,7 @@ class MockFrame(model.W_PointersObject): def __init__(self, stack): - self.set_all_vars([None] * 6 + stack + [space.w_nil] * 6) + self.set_all_vars(space, [None] * 6 + stack + [space.w_nil] * 6) s_self = self.as_blockcontext_get_shadow() s_self.init_stack_and_temps() s_self.reset_stack() diff --git a/spyvm/test/test_shadow.py b/spyvm/test/test_shadow.py --- a/spyvm/test/test_shadow.py +++ b/spyvm/test/test_shadow.py @@ -240,15 +240,15 @@ s_methoddict = s_class.s_methoddict() s_methoddict.sync_cache() i = 0 - key = s_methoddict.w_self()._fetch(constants.METHODDICT_NAMES_INDEX+i) + key = s_methoddict.w_self()._fetch(space, constants.METHODDICT_NAMES_INDEX+i) while key is space.w_nil: i = i + 1 - key = s_methoddict.w_self()._fetch(constants.METHODDICT_NAMES_INDEX+i) + key = s_methoddict.w_self()._fetch(space, constants.METHODDICT_NAMES_INDEX+i) assert (s_class.lookup(key) is foo.as_compiledmethod_get_shadow(space) or s_class.lookup(key) is bar.as_compiledmethod_get_shadow(space)) # change that entry - w_array = s_class.w_methoddict()._fetch(constants.METHODDICT_VALUES_INDEX) + w_array = s_class.w_methoddict()._fetch(space, constants.METHODDICT_VALUES_INDEX) version = s_class.version w_array.atput0(space, i, baz) @@ -268,7 +268,7 @@ s_md = w_parent.as_class_get_shadow(space).s_methoddict() s_md.sync_cache() - w_ary = s_md._w_self._fetch(constants.METHODDICT_VALUES_INDEX) + w_ary = s_md._w_self._fetch(space, constants.METHODDICT_VALUES_INDEX) s_md._w_self.atput0(space, 0, key) w_ary.atput0(space, 0, w_method) From noreply at buildbot.pypy.org Mon Jul 28 10:11:26 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 28 Jul 2014 10:11:26 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-allocRemoval: Merged storage branch. Message-ID: <20140728081126.617B91C06BC@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-allocRemoval Changeset: r989:2e3b082dd9a1 Date: 2014-04-03 10:55 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/2e3b082dd9a1/ Log: Merged storage branch. diff too long, truncating to 2000 out of 4957 lines diff --git a/images/Squeak4.5-noBitBlt.changes b/images/Squeak4.5-noBitBlt.changes --- a/images/Squeak4.5-noBitBlt.changes +++ b/images/Squeak4.5-noBitBlt.changes @@ -12198,4 +12198,14 @@ ]. "self footer." - ^ self! ! SMarkRunner execute: CPBAStarBenchmark new with: 3 ! SMarkRunner execute: CPBAStarBenchmark new with: 3 ! SMarkRunner execute: CPBAStarBenchmark new with: 3 ! Object subclass: #Benchmarks instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'Cross-Platform-Benchmarks'! !Benchmarks class methodsFor: 'no messages' stamp: 'ag 3/8/2014 23:21'! runAll: iterations ^ String streamContents: [ :str | self allBenchmarks do: [ :bench | str nextPutAll: (SMarkRunner execute: bench new with: iterations) ] ]! ! !Benchmarks class methodsFor: 'as yet unclassified' stamp: 'ag 3/8/2014 23:21' prior: 49281266! runAll: iterations ^ String streamContents: [ :str | self allBenchmarks do: [ :bench | str nextPutAll: (SMarkRunner execute: bench new with: iterations) asString ] ]! ! !Benchmarks class methodsFor: 'as yet unclassified' stamp: 'ag 3/8/2014 23:22'! allBenchmarks ^ { CPBAStarBenchmark. }! ! !Benchmarks class methodsFor: 'as yet unclassified' stamp: 'ag 3/8/2014 23:26' prior: 49281795! allBenchmarks ^ { CPBAStarBenchmark. CPBBinaryTreeBenchmark. CPBBlowfishSuite. CPBChameneosBenchmark. CPBDeltaBlueBenchmark. CPBMandelbrotBenchmarkSuite. CPBNBodyBenchmark. CPBPolymorphyBenchmark. CPBRichardsBenchmark. CPBSplayTreeBenchmark. }! ! !Benchmarks class methodsFor: 'as yet unclassified' stamp: 'ag 3/8/2014 23:26' prior: 49281938! allBenchmarks ^ { CPBAStarBenchmark. CPBBinaryTreeBenchmark. CPBBlowfishSuite. CPBChameneosBenchmark. CPBDeltaBlueBenchmark. CPBMandelbrotBenchmarkSuite. CPBNBodyBenchmark. CPBPolymorphyBenchmark. CPBRichardsBenchmark. CPBSplayTreeBenchmark. }! ! !Benchmarks class methodsFor: 'as yet unclassified' stamp: 'ag 3/8/2014 23:27'! run: benchmarks iterations: iterations ^ String streamContents: [ :str | benchmarks do: [ :bench | str cr; nextPutAll: (SMarkRunner execute: bench new with: iterations) asString ] ]! ! !Benchmarks class methodsFor: 'as yet unclassified' stamp: 'ag 3/8/2014 23:27' prior: 49281534! runAll: iterations ^ self run: self allBenchmarks iterations: iterations! ! !Benchmarks class methodsFor: 'as yet unclassified' stamp: 'ag 3/8/2014 23:27'! runAll ^ self runAll: 5! ! !Benchmarks class methodsFor: 'as yet unclassified' stamp: 'ag 3/8/2014 23:29'! list ^ String streamContents: [ :str | self allBenchmarks do: [ :bench | str cr; nextPutAll: bench name asString ] ]! ! 'CPBBinaryTreeBenchmark' indexOf: 'Binary'! 'CPBBinaryTreeBenchmark' includesSubString: 'Binary'! !Benchmarks class methodsFor: 'as yet unclassified' stamp: 'ag 3/8/2014 23:30' prior: 49283190! list ^ String streamContents: [ :str | self allBenchmarkNames do: [ :benchName | str cr; nextPutAll: benchName ] ]! ! !Benchmarks class methodsFor: 'as yet unclassified' stamp: 'ag 3/8/2014 23:31'! allBenchmarkNames ^ self allBenchmarks collect: [ :bench | bench name asString ]! ! !Benchmarks class methodsFor: 'as yet unclassified' stamp: 'ag 3/8/2014 23:32'! selectBenchmarks: substring ^ self allBenchmarks select: [ :bench | bench name includesSubString: substring ]! ! !Benchmarks class methodsFor: 'as yet unclassified' stamp: 'ag 3/8/2014 23:35'! runMatching: substring iterations: iterations | benchmarks | benchmarks := self selectBenchmarks: substring. benchmarks ifEmpty: [ ^ String streamContents: [ :str | str nextPutAll: 'No benchmarks matched "'; nextPutAll: substring; nextPutAll: '"'; cr; nextPutAll: 'Available benchmarks:'. self allBenchmarkNames do: [ :name | str cr; nextPutAll: name ] ] ]. ! ! !Benchmarks class methodsFor: 'as yet unclassified' stamp: 'ag 3/8/2014 23:35' prior: 49284077! runMatching: substring iterations: iterations | benchmarks | benchmarks := self selectBenchmarks: substring. benchmarks ifEmpty: [ ^ String streamContents: [ :str | str nextPutAll: 'No benchmarks matched "'; nextPutAll: substring; nextPutAll: '"'; cr; nextPutAll: 'Available benchmarks:'. self allBenchmarkNames do: [ :name | str cr; nextPutAll: name ] ] ]. ^ self run: benchmarks iterations: iterations! ! Benchmarks class organization addCategory: #'category name'! Benchmarks class organization renameCategory: #'category name' toBe: #cli! !Benchmarks class methodsFor: 'cli' stamp: 'ag 3/8/2014 23:36' prior: 49283511! list ^ String streamContents: [ :str | self allBenchmarkNames do: [ :benchName | str cr; nextPutAll: benchName ] ]! ! Benchmarks class organization renameCategory: #'as yet unclassified' toBe: #benchmarks! !Benchmarks class methodsFor: 'benchmarks' stamp: 'ag 3/8/2014 23:31' prior: 49283714! allBenchmarkNames ^ self allBenchmarks collect: [ :bench | bench name asString ]! ! !Benchmarks class methodsFor: 'benchmarks' stamp: 'ag 3/8/2014 23:26' prior: 49282287! allBenchmarks ^ { CPBAStarBenchmark. CPBBinaryTreeBenchmark. CPBBlowfishSuite. CPBChameneosBenchmark. CPBDeltaBlueBenchmark. CPBMandelbrotBenchmarkSuite. CPBNBodyBenchmark. CPBPolymorphyBenchmark. CPBRichardsBenchmark. CPBSplayTreeBenchmark. }! ! !Benchmarks class methodsFor: 'benchmarks' stamp: 'ag 3/8/2014 23:27' prior: 49282628! run: benchmarks iterations: iterations ^ String streamContents: [ :str | benchmarks do: [ :bench | str cr; nextPutAll: (SMarkRunner execute: bench new with: iterations) asString ] ]! ! !Benchmarks class methodsFor: 'benchmarks' stamp: 'ag 3/8/2014 23:27' prior: 49283080! runAll ^ self runAll: 5! ! !Benchmarks class methodsFor: 'benchmarks' stamp: 'ag 3/8/2014 23:27' prior: 49282921! runAll: iterations ^ self run: self allBenchmarks iterations: iterations! ! !Benchmarks class methodsFor: 'benchmarks' stamp: 'ag 3/8/2014 23:35' prior: 49284576! runMatching: substring iterations: iterations | benchmarks | benchmarks := self selectBenchmarks: substring. benchmarks ifEmpty: [ ^ String streamContents: [ :str | str nextPutAll: 'No benchmarks matched "'; nextPutAll: substring; nextPutAll: '"'; cr; nextPutAll: 'Available benchmarks:'. self allBenchmarkNames do: [ :name | str cr; nextPutAll: name ] ] ]. ^ self run: benchmarks iterations: iterations! ! !Benchmarks class methodsFor: 'benchmarks' stamp: 'ag 3/8/2014 23:32' prior: 49283881! selectBenchmarks: substring ^ self allBenchmarks select: [ :bench | bench name includesSubString: substring ]! ! Benchmarks class organization addCategory: #private! !Benchmarks class methodsFor: 'private' stamp: 'ag 3/8/2014 23:37' prior: 49286064! run: benchmarks iterations: iterations ^ String streamContents: [ :str | benchmarks do: [ :bench | str cr; nextPutAll: (SMarkRunner execute: bench new with: iterations) asString ] ]! ! !Benchmarks class methodsFor: 'private' stamp: 'ag 3/8/2014 23:37' prior: 49285542! allBenchmarkNames ^ self allBenchmarks collect: [ :bench | bench name asString ]! ! !Benchmarks class methodsFor: 'private' stamp: 'ag 3/8/2014 23:37' prior: 49287167! selectBenchmarks: substring ^ self allBenchmarks select: [ :bench | bench name includesSubString: substring ]! ! Benchmarks runMatching: 'Binary' iterations: 3! Benchmarks runMatching: 'Bin' iterations: 3! Benchmarks runMatching: 'Bidn' iterations: 3! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Object class', i asString! ! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Object class', i asString! ! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Object class', i asString! ! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Morph class', i asString! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Morph class', i asString! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Morph class', i asString! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Collection class', i asString! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Collection class', i asString! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Collection class', i asString! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am String class', i asString! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am String class', i asString! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was String class', i asString! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am FontSet class', i asString! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am FontSet class', i asString! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was FontSet class', i asString! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Canvas class', i asString! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Canvas class', i asString! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Canvas class', i asString! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Stream class', i asString! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Stream class', i asString! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Stream class', i asString! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Number class', i asString! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Number class', i asString! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Number class', i asString! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Behavior class', i asString! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Behavior class', i asString! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Behavior class', i asString! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Error class', i asString! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Error class', i asString! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Error class', i asString! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! Object class removeSelector: #test1polbench:! Object class removeSelector: #test2polbench:! Object class removeSelector: #test3polbench:! Object class removeSelector: #test123polbench:! Morph class removeSelector: #test1polbench:! Morph class removeSelector: #test2polbench:! Morph class removeSelector: #test3polbench:! Morph class removeSelector: #test123polbench:! Collection class removeSelector: #test1polbench:! Collection class removeSelector: #test2polbench:! Collection class removeSelector: #test3polbench:! Collection class removeSelector: #test123polbench:! String class removeSelector: #test1polbench:! String class removeSelector: #test2polbench:! String class removeSelector: #test3polbench:! String class removeSelector: #test123polbench:! FontSet class removeSelector: #test1polbench:! FontSet class removeSelector: #test2polbench:! FontSet class removeSelector: #test3polbench:! FontSet class removeSelector: #test123polbench:! Canvas class removeSelector: #test1polbench:! Canvas class removeSelector: #test2polbench:! Canvas class removeSelector: #test3polbench:! Canvas class removeSelector: #test123polbench:! Stream class removeSelector: #test1polbench:! Stream class removeSelector: #test2polbench:! Stream class removeSelector: #test3polbench:! Stream class removeSelector: #test123polbench:! Number class removeSelector: #test1polbench:! Number class removeSelector: #test2polbench:! Number class removeSelector: #test3polbench:! Number class removeSelector: #test123polbench:! Behavior class removeSelector: #test1polbench:! Behavior class removeSelector: #test2polbench:! Behavior class removeSelector: #test3polbench:! Behavior class removeSelector: #test123polbench:! Error class removeSelector: #test1polbench:! Error class removeSelector: #test2polbench:! Error class removeSelector: #test3polbench:! Error class removeSelector: #test123polbench:! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Object class', i asString! ! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Object class', i asString! ! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Object class', i asString! ! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Morph class', i asString! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Morph class', i asString! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Morph class', i asString! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Collection class', i asString! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Collection class', i asString! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Collection class', i asString! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am String class', i asString! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am String class', i asString! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was String class', i asString! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am FontSet class', i asString! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am FontSet class', i asString! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was FontSet class', i asString! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Canvas class', i asString! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Canvas class', i asString! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Canvas class', i asString! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Stream class', i asString! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Stream class', i asString! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Stream class', i asString! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Number class', i asString! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Number class', i asString! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Number class', i asString! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Behavior class', i asString! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Behavior class', i asString! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Behavior class', i asString! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Error class', i asString! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Error class', i asString! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Error class', i asString! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! Object class removeSelector: #test1polbench:! Object class removeSelector: #test2polbench:! Object class removeSelector: #test3polbench:! Object class removeSelector: #test123polbench:! Morph class removeSelector: #test1polbench:! Morph class removeSelector: #test2polbench:! Morph class removeSelector: #test3polbench:! Morph class removeSelector: #test123polbench:! Collection class removeSelector: #test1polbench:! Collection class removeSelector: #test2polbench:! Collection class removeSelector: #test3polbench:! Collection class removeSelector: #test123polbench:! String class removeSelector: #test1polbench:! String class removeSelector: #test2polbench:! String class removeSelector: #test3polbench:! String class removeSelector: #test123polbench:! FontSet class removeSelector: #test1polbench:! FontSet class removeSelector: #test2polbench:! FontSet class removeSelector: #test3polbench:! FontSet class removeSelector: #test123polbench:! Canvas class removeSelector: #test1polbench:! Canvas class removeSelector: #test2polbench:! Canvas class removeSelector: #test3polbench:! Canvas class removeSelector: #test123polbench:! Stream class removeSelector: #test1polbench:! Stream class removeSelector: #test2polbench:! Stream class removeSelector: #test3polbench:! Stream class removeSelector: #test123polbench:! Number class removeSelector: #test1polbench:! Number class removeSelector: #test2polbench:! Number class removeSelector: #test3polbench:! Number class removeSelector: #test123polbench:! Behavior class removeSelector: #test1polbench:! Behavior class removeSelector: #test2polbench:! Behavior class removeSelector: #test3polbench:! Behavior class removeSelector: #test123polbench:! Error class removeSelector: #test1polbench:! Error class removeSelector: #test2polbench:! Error class removeSelector: #test3polbench:! Error class removeSelector: #test123polbench:! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Object class', i asString! ! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Object class', i asString! ! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Object class', i asString! ! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Morph class', i asString! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Morph class', i asString! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Morph class', i asString! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Collection class', i asString! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Collection class', i asString! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Collection class', i asString! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am String class', i asString! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am String class', i asString! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was String class', i asString! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am FontSet class', i asString! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am FontSet class', i asString! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was FontSet class', i asString! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Canvas class', i asString! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Canvas class', i asString! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Canvas class', i asString! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Stream class', i asString! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Stream class', i asString! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Stream class', i asString! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Number class', i asString! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Number class', i asString! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Number class', i asString! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Behavior class', i asString! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Behavior class', i asString! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Behavior class', i asString! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Error class', i asString! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Error class', i asString! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Error class', i asString! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! Object class removeSelector: #test1polbench:! Object class removeSelector: #test2polbench:! Object class removeSelector: #test3polbench:! Object class removeSelector: #test123polbench:! Morph class removeSelector: #test1polbench:! Morph class removeSelector: #test2polbench:! Morph class removeSelector: #test3polbench:! Morph class removeSelector: #test123polbench:! Collection class removeSelector: #test1polbench:! Collection class removeSelector: #test2polbench:! Collection class removeSelector: #test3polbench:! Collection class removeSelector: #test123polbench:! String class removeSelector: #test1polbench:! String class removeSelector: #test2polbench:! String class removeSelector: #test3polbench:! String class removeSelector: #test123polbench:! FontSet class removeSelector: #test1polbench:! FontSet class removeSelector: #test2polbench:! FontSet class removeSelector: #test3polbench:! FontSet class removeSelector: #test123polbench:! Canvas class removeSelector: #test1polbench:! Canvas class removeSelector: #test2polbench:! Canvas class removeSelector: #test3polbench:! Canvas class removeSelector: #test123polbench:! Stream class removeSelector: #test1polbench:! Stream class removeSelector: #test2polbench:! Stream class removeSelector: #test3polbench:! Stream class removeSelector: #test123polbench:! Number class removeSelector: #test1polbench:! Number class removeSelector: #test2polbench:! Number class removeSelector: #test3polbench:! Number class removeSelector: #test123polbench:! Behavior class removeSelector: #test1polbench:! Behavior class removeSelector: #test2polbench:! Behavior class removeSelector: #test3polbench:! Behavior class removeSelector: #test123polbench:! Error class removeSelector: #test1polbench:! Error class removeSelector: #test2polbench:! Error class removeSelector: #test3polbench:! Error class removeSelector: #test123polbench:! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Object class', i asString! ! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Object class', i asString! ! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Object class', i asString! ! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Morph class', i asString! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Morph class', i asString! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Morph class', i asString! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Collection class', i asString! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Collection class', i asString! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Collection class', i asString! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am String class', i asString! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am String class', i asString! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was String class', i asString! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am FontSet class', i asString! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am FontSet class', i asString! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was FontSet class', i asString! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Canvas class', i asString! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Canvas class', i asString! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Canvas class', i asString! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Stream class', i asString! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Stream class', i asString! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Stream class', i asString! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Number class', i asString! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Number class', i asString! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Number class', i asString! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Behavior class', i asString! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Behavior class', i asString! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Behavior class', i asString! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Error class', i asString! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Error class', i asString! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Error class', i asString! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! Object class removeSelector: #test1polbench:! Object class removeSelector: #test2polbench:! Object class removeSelector: #test3polbench:! Object class removeSelector: #test123polbench:! Morph class removeSelector: #test1polbench:! Morph class removeSelector: #test2polbench:! Morph class removeSelector: #test3polbench:! Morph class removeSelector: #test123polbench:! Collection class removeSelector: #test1polbench:! Collection class removeSelector: #test2polbench:! Collection class removeSelector: #test3polbench:! Collection class removeSelector: #test123polbench:! String class removeSelector: #test1polbench:! String class removeSelector: #test2polbench:! String class removeSelector: #test3polbench:! String class removeSelector: #test123polbench:! FontSet class removeSelector: #test1polbench:! FontSet class removeSelector: #test2polbench:! FontSet class removeSelector: #test3polbench:! FontSet class removeSelector: #test123polbench:! Canvas class removeSelector: #test1polbench:! Canvas class removeSelector: #test2polbench:! Canvas class removeSelector: #test3polbench:! Canvas class removeSelector: #test123polbench:! Stream class removeSelector: #test1polbench:! Stream class removeSelector: #test2polbench:! Stream class removeSelector: #test3polbench:! Stream class removeSelector: #test123polbench:! Number class removeSelector: #test1polbench:! Number class removeSelector: #test2polbench:! Number class removeSelector: #test3polbench:! Number class removeSelector: #test123polbench:! Behavior class removeSelector: #test1polbench:! Behavior class removeSelector: #test2polbench:! Behavior class removeSelector: #test3polbench:! Behavior class removeSelector: #test123polbench:! Error class removeSelector: #test1polbench:! Error class removeSelector: #test2polbench:! Error class removeSelector: #test3polbench:! Error class removeSelector: #test123polbench:! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Object class', i asString! ! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Object class', i asString! ! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Object class', i asString! ! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Morph class', i asString! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Morph class', i asString! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Morph class', i asString! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Collection class', i asString! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Collection class', i asString! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Collection class', i asString! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am String class', i asString! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am String class', i asString! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was String class', i asString! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am FontSet class', i asString! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am FontSet class', i asString! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was FontSet class', i asString! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Canvas class', i asString! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Canvas class', i asString! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Canvas class', i asString! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Stream class', i asString! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Stream class', i asString! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Stream class', i asString! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Number class', i asString! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Number class', i asString! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Number class', i asString! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Behavior class', i asString! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Behavior class', i asString! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Behavior class', i asString! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test1polbench: i ^'I am Error class', i asString! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test2polbench: i ^'I still am Error class', i asString! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test3polbench: i ^'I allways was Error class', i asString! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/8/2014 23:39'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! Object class removeSelector: #test1polbench:! Object class removeSelector: #test2polbench:! Object class removeSelector: #test3polbench:! Object class removeSelector: #test123polbench:! Morph class removeSelector: #test1polbench:! Morph class removeSelector: #test2polbench:! Morph class removeSelector: #test3polbench:! Morph class removeSelector: #test123polbench:! Collection class removeSelector: #test1polbench:! Collection class removeSelector: #test2polbench:! Collection class removeSelector: #test3polbench:! Collection class removeSelector: #test123polbench:! String class removeSelector: #test1polbench:! String class removeSelector: #test2polbench:! String class removeSelector: #test3polbench:! String class removeSelector: #test123polbench:! FontSet class removeSelector: #test1polbench:! FontSet class removeSelector: #test2polbench:! FontSet class removeSelector: #test3polbench:! FontSet class removeSelector: #test123polbench:! Canvas class removeSelector: #test1polbench:! Canvas class removeSelector: #test2polbench:! Canvas class removeSelector: #test3polbench:! Canvas class removeSelector: #test123polbench:! Stream class removeSelector: #test1polbench:! Stream class removeSelector: #test2polbench:! Stream class removeSelector: #test3polbench:! Stream class removeSelector: #test123polbench:! Number class removeSelector: #test1polbench:! Number class removeSelector: #test2polbench:! Number class removeSelector: #test3polbench:! Number class removeSelector: #test123polbench:! Behavior class removeSelector: #test1polbench:! Behavior class removeSelector: #test2polbench:! Behavior class removeSelector: #test3polbench:! Behavior class removeSelector: #test123polbench:! Error class removeSelector: #test1polbench:! Error class removeSelector: #test2polbench:! Error class removeSelector: #test3polbench:! Error class removeSelector: #test123polbench:! Benchmarks runAll ! !Benchmarks class methodsFor: 'private' stamp: 'ag 3/8/2014 23:48' prior: 49287421! run: benchmarks iterations: iterations ^ String streamContents: [ :str | benchmarks do: [ :bench | str nextPutAll: (SMarkRunner execute: bench new with: iterations) asString ] ]! ! ----SNAPSHOT----{8 March 2014 . 11:49:02 pm} Squeak4.5-noBitBlt.image priorSource: 15724710! ----STARTUP----{11 March 2014 . 10:13:40 am} as C:\Dev\lang-smalltalk\images\Squeak4.5-noBitBlt.image! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am Object class', i asString! ! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am Object class', i asString! ! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was Object class', i asString! ! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am Morph class', i asString! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am Morph class', i asString! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was Morph class', i asString! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am Collection class', i asString! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am Collection class', i asString! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was Collection class', i asString! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am String class', i asString! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am String class', i asString! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was String class', i asString! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am FontSet class', i asString! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am FontSet class', i asString! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was FontSet class', i asString! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am Canvas class', i asString! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am Canvas class', i asString! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was Canvas class', i asString! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am Stream class', i asString! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am Stream class', i asString! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was Stream class', i asString! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am Number class', i asString! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am Number class', i asString! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was Number class', i asString! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am Behavior class', i asString! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am Behavior class', i asString! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was Behavior class', i asString! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am Error class', i asString! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am Error class', i asString! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was Error class', i asString! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! Object class removeSelector: #test1polbench:! Object class removeSelector: #test2polbench:! Object class removeSelector: #test3polbench:! Object class removeSelector: #test123polbench:! Morph class removeSelector: #test1polbench:! Morph class removeSelector: #test2polbench:! Morph class removeSelector: #test3polbench:! Morph class removeSelector: #test123polbench:! Collection class removeSelector: #test1polbench:! Collection class removeSelector: #test2polbench:! Collection class removeSelector: #test3polbench:! Collection class removeSelector: #test123polbench:! String class removeSelector: #test1polbench:! String class removeSelector: #test2polbench:! String class removeSelector: #test3polbench:! String class removeSelector: #test123polbench:! FontSet class removeSelector: #test1polbench:! FontSet class removeSelector: #test2polbench:! FontSet class removeSelector: #test3polbench:! FontSet class removeSelector: #test123polbench:! Canvas class removeSelector: #test1polbench:! Canvas class removeSelector: #test2polbench:! Canvas class removeSelector: #test3polbench:! Canvas class removeSelector: #test123polbench:! Stream class removeSelector: #test1polbench:! Stream class removeSelector: #test2polbench:! Stream class removeSelector: #test3polbench:! Stream class removeSelector: #test123polbench:! Number class removeSelector: #test1polbench:! Number class removeSelector: #test2polbench:! Number class removeSelector: #test3polbench:! Number class removeSelector: #test123polbench:! Behavior class removeSelector: #test1polbench:! Behavior class removeSelector: #test2polbench:! Behavior class removeSelector: #test3polbench:! Behavior class removeSelector: #test123polbench:! Error class removeSelector: #test1polbench:! Error class removeSelector: #test2polbench:! Error class removeSelector: #test3polbench:! Error class removeSelector: #test123polbench:! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am Object class', i asString! ! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am Object class', i asString! ! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was Object class', i asString! ! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am Morph class', i asString! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am Morph class', i asString! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was Morph class', i asString! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am Collection class', i asString! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am Collection class', i asString! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was Collection class', i asString! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am String class', i asString! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am String class', i asString! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was String class', i asString! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am FontSet class', i asString! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am FontSet class', i asString! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was FontSet class', i asString! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am Canvas class', i asString! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am Canvas class', i asString! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was Canvas class', i asString! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am Stream class', i asString! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am Stream class', i asString! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was Stream class', i asString! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am Number class', i asString! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am Number class', i asString! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was Number class', i asString! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am Behavior class', i asString! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am Behavior class', i asString! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was Behavior class', i asString! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am Error class', i asString! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am Error class', i asString! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was Error class', i asString! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! Object class removeSelector: #test1polbench:! Object class removeSelector: #test2polbench:! Object class removeSelector: #test3polbench:! Object class removeSelector: #test123polbench:! Morph class removeSelector: #test1polbench:! Morph class removeSelector: #test2polbench:! Morph class removeSelector: #test3polbench:! Morph class removeSelector: #test123polbench:! Collection class removeSelector: #test1polbench:! Collection class removeSelector: #test2polbench:! Collection class removeSelector: #test3polbench:! Collection class removeSelector: #test123polbench:! String class removeSelector: #test1polbench:! String class removeSelector: #test2polbench:! String class removeSelector: #test3polbench:! String class removeSelector: #test123polbench:! FontSet class removeSelector: #test1polbench:! FontSet class removeSelector: #test2polbench:! FontSet class removeSelector: #test3polbench:! FontSet class removeSelector: #test123polbench:! Canvas class removeSelector: #test1polbench:! Canvas class removeSelector: #test2polbench:! Canvas class removeSelector: #test3polbench:! Canvas class removeSelector: #test123polbench:! Stream class removeSelector: #test1polbench:! Stream class removeSelector: #test2polbench:! Stream class removeSelector: #test3polbench:! Stream class removeSelector: #test123polbench:! Number class removeSelector: #test1polbench:! Number class removeSelector: #test2polbench:! Number class removeSelector: #test3polbench:! Number class removeSelector: #test123polbench:! Behavior class removeSelector: #test1polbench:! Behavior class removeSelector: #test2polbench:! Behavior class removeSelector: #test3polbench:! Behavior class removeSelector: #test123polbench:! Error class removeSelector: #test1polbench:! Error class removeSelector: #test2polbench:! Error class removeSelector: #test3polbench:! Error class removeSelector: #test123polbench:! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am Object class', i asString! ! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am Object class', i asString! ! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was Object class', i asString! ! !Object class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am Morph class', i asString! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am Morph class', i asString! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was Morph class', i asString! ! !Morph class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am Collection class', i asString! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am Collection class', i asString! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was Collection class', i asString! ! !Collection class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am String class', i asString! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am String class', i asString! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was String class', i asString! ! !String class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am FontSet class', i asString! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am FontSet class', i asString! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was FontSet class', i asString! ! !FontSet class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am Canvas class', i asString! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am Canvas class', i asString! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was Canvas class', i asString! ! !Canvas class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am Stream class', i asString! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am Stream class', i asString! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was Stream class', i asString! ! !Stream class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am Number class', i asString! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am Number class', i asString! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was Number class', i asString! ! !Number class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am Behavior class', i asString! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am Behavior class', i asString! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was Behavior class', i asString! ! !Behavior class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test1polbench: i ^'I am Error class', i asString! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test2polbench: i ^'I still am Error class', i asString! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test3polbench: i ^'I allways was Error class', i asString! ! !Error class methodsFor: 'as yet unclassified' stamp: 'test 3/11/2014 10:14'! test123polbench: i self test1polbench: i. self test2polbench: i. self test3polbench: i.! ! Object class removeSelector: #test1polbench:! Object class removeSelector: #test2polbench:! Object class removeSelector: #test3polbench:! Object class removeSelector: #test123polbench:! Morph class removeSelector: #test1polbench:! Morph class removeSelector: #test2polbench:! Morph class removeSelector: #test3polbench:! Morph class removeSelector: #test123polbench:! Collection class removeSelector: #test1polbench:! Collection class removeSelector: #test2polbench:! Collection class removeSelector: #test3polbench:! Collection class removeSelector: #test123polbench:! String class removeSelector: #test1polbench:! String class removeSelector: #test2polbench:! String class removeSelector: #test3polbench:! String class removeSelector: #test123polbench:! FontSet class removeSelector: #test1polbench:! FontSet class removeSelector: #test2polbench:! FontSet class removeSelector: #test3polbench:! FontSet class removeSelector: #test123polbench:! Canvas class removeSelector: #test1polbench:! Canvas class removeSelector: #test2polbench:! Canvas class removeSelector: #test3polbench:! Canvas class removeSelector: #test123polbench:! Stream class removeSelector: #test1polbench:! Stream class removeSelector: #test2polbench:! Stream class removeSelector: #test3polbench:! Stream class removeSelector: #test123polbench:! Number class removeSelector: #test1polbench:! Number class removeSelector: #test2polbench:! Number class removeSelector: #test3polbench:! Number class removeSelector: #test123polbench:! Behavior class removeSelector: #test1polbench:! Behavior class removeSelector: #test2polbench:! Behavior class removeSelector: #test3polbench:! Behavior class removeSelector: #test123polbench:! Error class removeSelector: #test1polbench:! Error class removeSelector: #test2polbench:! Error class removeSelector: #test3polbench:! From noreply at buildbot.pypy.org Mon Jul 28 10:11:27 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 28 Jul 2014 10:11:27 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk interpreter-flags: Added interrupts-flag to interpreter and entry-point to disable user interrupts. Message-ID: <20140728081127.734A71C06BC@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: interpreter-flags Changeset: r990:187ad8278435 Date: 2014-05-13 12:00 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/187ad8278435/ Log: Added interrupts-flag to interpreter and entry-point to disable user interrupts. Added flag to entry-point to configure (or disable) the max stack depth before StackOverflow protection kicks in. Fixed indentation. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -25,7 +25,7 @@ class Interpreter(object): _immutable_fields_ = ["space", "image", "image_name", "max_stack_depth", "interrupt_counter_size", - "startup_time", "evented"] + "startup_time", "evented", "interrupts"] jit_driver = jit.JitDriver( greens=['pc', 'self', 'method'], @@ -34,9 +34,9 @@ get_printable_location=get_printable_location ) - def __init__(self, space, image=None, image_name="", trace=False, - evented=True, - max_stack_depth=constants.MAX_LOOP_DEPTH): + def __init__(self, space, image=None, image_name="", + trace=False, evented=True, interrupts=True, + max_stack_depth=constants.MAX_LOOP_DEPTH): import time # === Initialize immutable variables @@ -49,6 +49,7 @@ self.startup_time = constants.CompileTime self.max_stack_depth = max_stack_depth self.evented = evented + self.interrupts = interrupts try: self.interrupt_counter_size = int(os.environ["SPY_ICS"]) except KeyError: @@ -56,7 +57,7 @@ # === Initialize mutable variables self.interrupt_check_counter = self.interrupt_counter_size - self.remaining_stack_depth = max_stack_depth + self.current_stack_depth = 0 self.next_wakeup_tick = 0 self.trace = trace self.trace_proxy = False @@ -65,7 +66,7 @@ # just a trampoline for the actual loop implemented in loop_bytecodes s_new_context = w_active_context.as_context_get_shadow(self.space) while True: - assert self.remaining_stack_depth == self.max_stack_depth + assert self.current_stack_depth == 0 # Need to save s_sender, loop_bytecodes will nil this on return s_sender = s_new_context.s_sender() try: @@ -85,7 +86,7 @@ except ProcessSwitch, p: if self.trace: print "====== Switched process from: %s" % s_new_context.short_str() - print "====== to: %s " % p.s_new_context.short_str() + print "====== to: %s " % p.s_new_context.short_str() s_new_context = p.s_new_context def loop_bytecodes(self, s_context, may_context_switch=True): @@ -116,33 +117,36 @@ else: s_context.push(nlr.value) - # This is just a wrapper around loop_bytecodes that handles the remaining_stack_depth mechanism + # This is just a wrapper around loop_bytecodes that handles the stack overflow protection mechanism def stack_frame(self, s_new_frame, may_context_switch=True): - if self.remaining_stack_depth <= 1: - raise StackOverflow(s_new_frame) - - self.remaining_stack_depth -= 1 + if self.max_stack_depth > 0: + if self.current_stack_depth >= self.max_stack_depth: + raise StackOverflow(s_new_frame) + + self.current_stack_depth += 1 try: self.loop_bytecodes(s_new_frame, may_context_switch) finally: - self.remaining_stack_depth += 1 - - def step(self, context): - bytecode = context.fetch_next_bytecode() - for entry in UNROLLING_BYTECODE_RANGES: - if len(entry) == 2: - bc, methname = entry - if bytecode == bc: - return getattr(context, methname)(self, bytecode) - else: - start, stop, methname = entry - if start <= bytecode <= stop: - return getattr(context, methname)(self, bytecode) - assert 0, "unreachable" - + self.current_stack_depth -= 1 + + def step(self, context): + bytecode = context.fetch_next_bytecode() + for entry in UNROLLING_BYTECODE_RANGES: + if len(entry) == 2: + bc, methname = entry + if bytecode == bc: + return getattr(context, methname)(self, bytecode) + else: + start, stop, methname = entry + if start <= bytecode <= stop: + return getattr(context, methname)(self, bytecode) + assert 0, "unreachable" + # ============== Methods for handling user interrupts ============== def jitted_check_for_interrupt(self, s_frame): + if not self.interrupts: + return # Normally, the tick counter is decremented by 1 for every message send. # Since we don't know how many messages are called during this trace, we # just decrement by 100th of the trace length (num of bytecodes). @@ -152,6 +156,8 @@ self.quick_check_for_interrupt(s_frame, decr_by) def quick_check_for_interrupt(self, s_frame, dec=1): + if not self.interrupts: + return self.interrupt_check_counter -= dec if self.interrupt_check_counter <= 0: self.interrupt_check_counter = self.interrupt_counter_size @@ -214,7 +220,7 @@ return self.interpret_toplevel(s_frame.w_self()) def padding(self, symbol=' '): - return symbol * (self.max_stack_depth - self.remaining_stack_depth) + return symbol * self.current_stack_depth class ReturnFromTopLevel(Exception): _attrs_ = ["object"] diff --git a/spyvm/test/test_interpreter.py b/spyvm/test/test_interpreter.py --- a/spyvm/test/test_interpreter.py +++ b/spyvm/test/test_interpreter.py @@ -1017,7 +1017,7 @@ class StackTestInterpreter(TestInterpreter): def stack_frame(self, w_frame, may_interrupt=True): - stack_depth = self.max_stack_depth - self.remaining_stack_depth + stack_depth = self.current_stack_depth for i in range(stack_depth + 1): assert sys._getframe(4 + i * 6).f_code.co_name == 'loop_bytecodes' assert sys._getframe(5 + stack_depth * 6).f_code.co_name == 'loop' diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -6,7 +6,7 @@ from rpython.rlib import jit, rpath from spyvm import model, interpreter, squeakimage, objspace, wrapper,\ - error, shadow, storage_statistics + error, shadow, storage_statistics, constants from spyvm.tool.analyseimage import create_image from spyvm.interpreter_proxy import VirtualMachine @@ -128,13 +128,14 @@ -r|--run [code string] -b|--benchmark [code string] -p|--poll_events + -ni|--no-interrupts + -d|--max-stack-depth [number, default %d, <= 0 disables stack protection] --strategy-log --strategy-stats - --strategy-stats-dot + --strategy-stats-dot --strategy-stats-details [image path, default: Squeak.image] - """ % argv[0] - + """ % (argv[0], constants.MAX_LOOP_DEPTH) def _arg_missing(argv, idx, arg): if len(argv) == idx + 1: @@ -152,6 +153,8 @@ stringarg = "" code = None as_benchmark = False + max_stack_depth = constants.MAX_LOOP_DEPTH + interrupts = True while idx < len(argv): arg = argv[idx] @@ -189,6 +192,12 @@ code = argv[idx + 1] as_benchmark = True idx += 1 + elif arg in ["-ni", "--no-interrupts"]: + interrupts = False + elif arg in ["-d", "--max-stack-depth"]: + _arg_missing(argv, idx, arg) + max_stack_depth = int(argv[idx + 1]) + idx += 1 elif arg == "--strategy-log": storage_statistics.activate_statistics(log=True) elif arg == "--strategy-stats": @@ -221,7 +230,9 @@ space = prebuilt_space image_reader = squeakimage.reader_for_image(space, squeakimage.Stream(data=imagedata)) image = create_image(space, image_reader) - interp = interpreter.Interpreter(space, image, image_name=path, trace=trace, evented=evented) + interp = interpreter.Interpreter(space, image, image_name=path, + trace=trace, evented=evented, + interrupts=interrupts, max_stack_depth=max_stack_depth) space.runtime_setup(argv[0]) result = 0 if benchmark is not None: From noreply at buildbot.pypy.org Mon Jul 28 10:11:28 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 28 Jul 2014 10:11:28 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk interpreter-flags: Added trace message on StackOverflow. Message-ID: <20140728081128.709831C06BC@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: interpreter-flags Changeset: r991:7b251d7d382c Date: 2014-05-13 12:04 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/7b251d7d382c/ Log: Added trace message on StackOverflow. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -73,6 +73,8 @@ self.loop_bytecodes(s_new_context) raise Exception("loop_bytecodes left without raising...") except StackOverflow, e: + if self.trace: + print "====== StackOverflow, contexts forced to heap at: %s" % e.s_context.short_str() s_new_context = e.s_context except Return, nlr: s_new_context = s_sender From noreply at buildbot.pypy.org Mon Jul 28 10:11:29 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 28 Jul 2014 10:11:29 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-vref: Added vref refactoring: keeping a virtual back-reference to the sender as long as possible. Message-ID: <20140728081129.720931C06BC@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-vref Changeset: r992:3fa384225c32 Date: 2014-07-10 12:56 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/3fa384225c32/ Log: Added vref refactoring: keeping a virtual back-reference to the sender as long as possible. TODO: Explore why this breaks performance; it should actually improve it. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -67,6 +67,9 @@ s_new_context = w_active_context.as_context_get_shadow(self.space) while True: assert self.current_stack_depth == 0 + # Need to save s_sender, loop_bytecodes will nil this on return + # Virtual references are not allowed here, and neither are "fresh" contexts (except for the toplevel one). + assert s_new_context.virtual_sender is jit.vref_None s_sender = s_new_context.s_sender() try: self.loop_bytecodes(s_new_context) @@ -119,19 +122,27 @@ # This is a wrapper around loop_bytecodes that cleanly enters/leaves the frame # and handles the stack overflow protection mechanism. def stack_frame(self, s_frame, s_sender, may_context_switch=True): + assert s_frame.virtual_sender is jit.vref_None try: - if s_frame._s_sender is None and s_sender is not None: - s_frame.store_s_sender(s_sender, raise_error=False) - + # Enter the context - store a virtual reference back to the sender + # Non-fresh contexts can happen, e.g. when activating a stored BlockContext. + # The same frame object must not pass through here recursively! + if s_frame.is_fresh() and s_sender is not None: + s_frame.virtual_sender = jit.virtual_ref(s_sender) + self.current_stack_depth += 1 if self.max_stack_depth > 0: if self.current_stack_depth >= self.max_stack_depth: raise StackOverflow(s_frame) - + # Now (continue to) execute the context bytecodes self.loop_bytecodes(s_frame, may_context_switch) finally: self.current_stack_depth -= 1 + # Cleanly leave the context. This will finish the virtual sender-reference, if + # it is still there, which can happen in case of ProcessSwitch or StackOverflow; + # in case of a Return, this will already be handled while unwinding the stack. + s_frame.finish_virtual_sender(s_sender) def step(self, context): bytecode = context.fetch_next_bytecode() diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -607,13 +607,13 @@ class ContextPartShadow(AbstractRedirectingShadow): __metaclass__ = extendabletype - _attrs_ = ['_s_sender', + _attrs_ = ['direct_sender', 'virtual_sender', '_pc', '_temps_and_stack', '_stack_ptr', 'instances_w'] repr_classname = "ContextPartShadow" _virtualizable_ = [ - '_s_sender', + 'direct_sender', 'virtual_sender', "_pc", "_temps_and_stack[*]", "_stack_ptr", "_w_self", "_w_self_size" ] @@ -622,7 +622,8 @@ # Initialization def __init__(self, space, w_self): - self._s_sender = None + self.direct_sender = None + self.virtual_sender = jit.vref_None AbstractRedirectingShadow.__init__(self, space, w_self) self.instances_w = {} @@ -691,9 +692,25 @@ raise error.WrapperException("Index in context out of bounds") # === Sender === + # There are two fields for the sender (virtual and direct). Only one of them is can be set at a time. + # As long as the frame object is virtualized, using the virtual reference should increase performance. + # As soon as a frame object is forced to the heap, the direct reference must be used. + + def is_fresh(self): + return self.direct_sender is None and self.virtual_sender is jit.vref_None + + def finish_virtual_sender(self, s_sender): + if self.virtual_sender is not jit.vref_None: + if self.pc() != -1: + # stack is unrolling, but this frame was not + # marked_returned: it is an escaped frame + sender = self.virtual_sender() + self.direct_sender = sender + jit.virtual_ref_finish(self.virtual_sender, s_sender) + self.virtual_sender = jit.vref_None def store_s_sender(self, s_sender, raise_error=True): - self._s_sender = s_sender + self.direct_sender = s_sender if raise_error: raise error.SenderChainManipulation(self) @@ -704,7 +721,11 @@ return sender.w_self() def s_sender(self): - return self._s_sender + if self.direct_sender: + return self.direct_sender + else: + result = self.virtual_sender() + return result # === Stack Pointer === From noreply at buildbot.pypy.org Mon Jul 28 10:11:30 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 28 Jul 2014 10:11:30 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-context-state-v3: Calling stack_frame from loop() Message-ID: <20140728081130.6CC6C1C06BC@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-context-state-v3 Changeset: r993:da722243ee58 Date: 2014-07-26 23:38 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/da722243ee58/ Log: Calling stack_frame from loop() diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -98,7 +98,7 @@ while True: s_sender = s_new_context.s_sender() try: - self.loop_bytecodes(s_new_context) + self.stack_frame(s_new_context, None) raise Exception("loop_bytecodes left without raising...") except ContextSwitchException, e: if self.is_tracing() or self.trace_important: @@ -114,6 +114,23 @@ s_new_context = s_sender s_new_context.push(nlr.value) + # This is a wrapper around loop_bytecodes that cleanly enters/leaves the frame + # and handles the stack overflow protection mechanism. + def stack_frame(self, s_frame, s_sender, may_context_switch=True): + try: + if self.is_tracing(): + self.stack_depth += 1 + if s_frame._s_sender is None and s_sender is not None: + s_frame.store_s_sender(s_sender, raise_error=False) + # Now (continue to) execute the context bytecodes + self.loop_bytecodes(s_frame, may_context_switch) + except rstackovf.StackOverflow: + rstackovf.check_stack_overflow() + raise StackOverflow(s_frame) + finally: + if self.is_tracing(): + self.stack_depth -= 1 + def loop_bytecodes(self, s_context, may_context_switch=True): old_pc = 0 if not jit.we_are_jitted() and may_context_switch: @@ -145,23 +162,6 @@ s_context._activate_unwind_context(self) raise nlr - # This is a wrapper around loop_bytecodes that cleanly enters/leaves the frame - # and handles the stack overflow protection mechanism. - def stack_frame(self, s_frame, s_sender, may_context_switch=True): - try: - if self.is_tracing(): - self.stack_depth += 1 - if s_frame._s_sender is None and s_sender is not None: - s_frame.store_s_sender(s_sender, raise_error=False) - # Now (continue to) execute the context bytecodes - self.loop_bytecodes(s_frame, may_context_switch) - except rstackovf.StackOverflow: - rstackovf.check_stack_overflow() - raise StackOverflow(s_frame) - finally: - if self.is_tracing(): - self.stack_depth -= 1 - def step(self, context): bytecode = context.fetch_next_bytecode() for entry in UNROLLING_BYTECODE_RANGES: From noreply at buildbot.pypy.org Mon Jul 28 10:11:31 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 28 Jul 2014 10:11:31 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-context-state-v3: Added LocalReturn. this is where it breaks. Message-ID: <20140728081131.6870E1C06BC@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-context-state-v3 Changeset: r994:3bd27f741c83 Date: 2014-07-27 00:25 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/3bd27f741c83/ Log: Added LocalReturn. this is where it breaks. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -12,10 +12,15 @@ class Return(Exception): _attrs_ = ["value", "s_target_context", "is_local"] - def __init__(self, s_target_context, w_result): + def __init__(self, s_target_context, w_result, is_local): self.value = w_result self.s_target_context = s_target_context - self.is_local = False + self.is_local = is_local + +class LocalReturn(Exception): + _attrs_ = ["value"] + def __init__(self, value): + self.value = value class ContextSwitchException(Exception): """General Exception that causes the interpreter to leave @@ -104,14 +109,16 @@ if self.is_tracing() or self.trace_important: e.print_trace(s_new_context) s_new_context = e.s_new_context + except LocalReturn, nlr: + s_new_context = s_sender + s_new_context.push(nlr.value) except Return, nlr: - assert nlr.s_target_context or nlr.is_local + assert nlr.s_target_context and not nlr.is_local s_new_context = s_sender - if not nlr.is_local: - while s_new_context is not nlr.s_target_context: - s_sender = s_new_context.s_sender() - s_new_context._activate_unwind_context(self) - s_new_context = s_sender + while s_new_context is not nlr.s_target_context: + s_sender = s_new_context.s_sender() + s_new_context._activate_unwind_context(self) + s_new_context = s_sender s_new_context.push(nlr.value) # This is a wrapper around loop_bytecodes that cleanly enters/leaves the frame @@ -124,6 +131,12 @@ s_frame.store_s_sender(s_sender, raise_error=False) # Now (continue to) execute the context bytecodes self.loop_bytecodes(s_frame, may_context_switch) + except Return, ret: + s_frame._activate_unwind_context(self) + if ret.s_target_context is s_sender or ret.is_local: + raise LocalReturn(ret.value) + else: + raise ret except rstackovf.StackOverflow: rstackovf.check_stack_overflow() raise StackOverflow(s_frame) @@ -151,16 +164,8 @@ s_context=s_context) try: self.step(s_context) - except Return, nlr: - if nlr.s_target_context is s_context or nlr.is_local: - s_context.push(nlr.value) - else: - if nlr.s_target_context is None: - # This is the case where we are returning to our sender. - # Mark the return as local, so our sender will take it - nlr.is_local = True - s_context._activate_unwind_context(self) - raise nlr + except LocalReturn, ret: + s_context.push(ret.value) def step(self, context): bytecode = context.fetch_next_bytecode() diff --git a/spyvm/interpreter_bytecodes.py b/spyvm/interpreter_bytecodes.py --- a/spyvm/interpreter_bytecodes.py +++ b/spyvm/interpreter_bytecodes.py @@ -394,8 +394,10 @@ # it will find the sender as a local, and we don't have to # force the reference s_return_to = None + is_local = True return_from_top = self.s_sender() is None else: + is_local = False s_return_to = self.s_home().s_sender() return_from_top = s_return_to is None @@ -405,7 +407,7 @@ raise ReturnFromTopLevel(return_value) else: from spyvm.interpreter import Return - raise Return(s_return_to, return_value) + raise Return(s_return_to, return_value, is_local) # ====== Send/Return bytecodes ====== @@ -508,13 +510,11 @@ if self.gettemp(1).is_nil(self.space): self.settemp(1, self.space.w_true) # mark unwound self.push(self.gettemp(0)) # push the first argument - from spyvm.interpreter import Return + from spyvm.interpreter import LocalReturn try: self.bytecodePrimValue(interp, 0) - except Return, nlr: - assert nlr.s_target_context or nlr.is_local - if self is not nlr.s_target_context and not nlr.is_local: - raise nlr + except LocalReturn: + pass # Ignore local return value of ensure block. finally: self.mark_returned() From noreply at buildbot.pypy.org Mon Jul 28 10:11:32 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 28 Jul 2014 10:11:32 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-context-state-v3: Message-ID: <20140728081132.634C41C06BC@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-context-state-v3 Changeset: r995:a334c37ae996 Date: 2014-07-27 12:20 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/a334c37ae996/ Log: From noreply at buildbot.pypy.org Mon Jul 28 10:11:33 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 28 Jul 2014 10:11:33 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-context-state: Message-ID: <20140728081133.597BB1C06BC@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-context-state Changeset: r996:7c0f52366ec2 Date: 2014-07-27 12:21 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/7c0f52366ec2/ Log: From noreply at buildbot.pypy.org Mon Jul 28 10:11:34 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 28 Jul 2014 10:11:34 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-display-refactoring: Message-ID: <20140728081134.54F421C06BC@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-display-refactoring Changeset: r997:f899ce5a63b0 Date: 2014-07-27 12:21 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/f899ce5a63b0/ Log: From noreply at buildbot.pypy.org Mon Jul 28 10:11:35 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 28 Jul 2014 10:11:35 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-interpreter-refactoring: Message-ID: <20140728081135.509471C06BC@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-interpreter-refactoring Changeset: r998:63c0ad701980 Date: 2014-07-27 12:21 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/63c0ad701980/ Log: From noreply at buildbot.pypy.org Mon Jul 28 10:11:36 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 28 Jul 2014 10:11:36 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-ignoringStackOverflow: Message-ID: <20140728081136.4FB951C06BC@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-ignoringStackOverflow Changeset: r999:ae4fc5aa360d Date: 2014-07-28 09:42 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/ae4fc5aa360d/ Log: From noreply at buildbot.pypy.org Mon Jul 28 10:11:37 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 28 Jul 2014 10:11:37 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk strategies3: Message-ID: <20140728081137.4BDD81C06BC@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: strategies3 Changeset: r1000:d1bb8ae6a5d4 Date: 2014-07-28 09:46 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/d1bb8ae6a5d4/ Log: From noreply at buildbot.pypy.org Mon Jul 28 10:11:38 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 28 Jul 2014 10:11:38 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk strategies: Message-ID: <20140728081138.475881C06BC@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: strategies Changeset: r1001:f23c08f44b8b Date: 2014-07-28 09:47 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/f23c08f44b8b/ Log: From noreply at buildbot.pypy.org Mon Jul 28 10:11:39 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 28 Jul 2014 10:11:39 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-context-state-v2: Message-ID: <20140728081139.42BAC1C06BC@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-context-state-v2 Changeset: r1002:e4a9aefa19ac Date: 2014-07-28 09:47 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/e4a9aefa19ac/ Log: From noreply at buildbot.pypy.org Mon Jul 28 10:11:40 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 28 Jul 2014 10:11:40 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk strategies: Message-ID: <20140728081140.40D631C06BC@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: strategies Changeset: r1003:e5751c8a4a39 Date: 2014-07-28 09:47 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/e5751c8a4a39/ Log: From noreply at buildbot.pypy.org Mon Jul 28 10:11:41 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 28 Jul 2014 10:11:41 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk strategies-bitmap: Message-ID: <20140728081141.3B5D61C06BC@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: strategies-bitmap Changeset: r1004:35f454029ab0 Date: 2014-07-28 09:48 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/35f454029ab0/ Log: From noreply at buildbot.pypy.org Mon Jul 28 10:11:42 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 28 Jul 2014 10:11:42 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk strategies-tagging: Message-ID: <20140728081142.3312D1C06BC@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: strategies-tagging Changeset: r1005:3698fc95dde7 Date: 2014-07-28 09:48 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/3698fc95dde7/ Log: From noreply at buildbot.pypy.org Mon Jul 28 10:11:43 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Mon, 28 Jul 2014 10:11:43 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk strategies-inlining: Message-ID: <20140728081143.371881C06BC@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: strategies-inlining Changeset: r1006:6c640c6a7724 Date: 2014-07-28 09:52 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/6c640c6a7724/ Log: From noreply at buildbot.pypy.org Mon Jul 28 13:20:06 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 28 Jul 2014 13:20:06 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs: RPython docs: Remove references to removed CLI backend. Message-ID: <20140728112006.66EEE1C024A@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: improve-docs Changeset: r72589:930dd91754b7 Date: 2014-07-28 12:46 +0200 http://bitbucket.org/pypy/pypy/changeset/930dd91754b7/ Log: RPython docs: Remove references to removed CLI backend. diff --git a/rpython/doc/cli-backend.rst b/rpython/doc/cli-backend.rst deleted file mode 100644 --- a/rpython/doc/cli-backend.rst +++ /dev/null @@ -1,458 +0,0 @@ -.. _gencli: - -The CLI backend -=============== - -The goal of GenCLI is to compile RPython programs to the CLI virtual -machine. - - -Target environment and language -------------------------------- - -The target of GenCLI is the Common Language Infrastructure environment -as defined by the `Standard Ecma 335`_. - -While in an ideal world we might suppose GenCLI to run fine with -every implementation conforming to that standard, we know the world we -live in is far from ideal, so extra efforts can be needed to maintain -compatibility with more than one implementation. - -At the moment of writing the two most popular implementations of the -standard are supported: Microsoft Common Language Runtime (CLR) and -Mono. - -Then we have to choose how to generate the real executables. There are -two main alternatives: generating source files in some high level -language (such as C#) or generating assembly level code in -Intermediate Language (IL). - -The IL approach is much faster during the code generation -phase, because it doesn't need to call a compiler. By contrast the -high level approach has two main advantages: - - - the code generation part could be easier because the target - language supports high level control structures such as - structured loops; - - - the generated executables take advantage of compiler's - optimizations. - -In reality the first point is not an advantage in the PyPy context, -because the :ref:`flow graph ` we start from is quite low level and Python -loops are already expressed in terms of branches (i.e., gotos). - -About the compiler optimizations we must remember that the flow graph -we receive from earlier stages is already optimized: PyPy implements -a number of optimizations such a constant propagation and -dead code removal, so it's not obvious if the compiler could -do more. - -Moreover by emitting IL instruction we are not constrained to rely on -compiler choices but can directly choose how to map CLI opcodes: since -the backend often know more than the compiler about the context, we -might expect to produce more efficient code by selecting the most -appropriate instruction; e.g., we can check for arithmetic overflow -only when strictly necessary. - -The last but not least reason for choosing the low level approach is -flexibility in how to get an executable starting from the IL code we -generate: - - - write IL code to a file, then call the ilasm assembler; - - - directly generate code on the fly by accessing the facilities - exposed by the System.Reflection.Emit API. - -.. _Standard Ecma 335: http://www.ecma-international.org/publications/standards/Ecma-335.htm - - -Handling platform differences ------------------------------ - -Since our goal is to support both Microsoft CLR we have to handle the -differences between the twos; in particular the main differences are -in the name of the helper tools we need to call: - -=============== ======== ====== -Tool CLR Mono -=============== ======== ====== -IL assembler ilasm ilasm2 -C# compiler csc gmcs -Runtime ... mono -=============== ======== ====== - -The code that handles these differences is located in the sdk.py -module: it defines an abstract class which exposes some methods -returning the name of the helpers and one subclass for each of the two -supported platforms. - -Since Microsoft ``ilasm`` is not capable of compiling the PyPy -standard interpreter due to its size, on Windows machines we also look -for an existing Mono installation: if present, we use CLR for -everything except the assembling phase, for which we use Mono's -``ilasm2``. - - -Targeting the CLI Virtual Machine ---------------------------------- - -In order to write a CLI backend we have to take a number of decisions. -First, we have to choose the typesystem to use: given that CLI -natively supports primitives like classes and instances, -ootypesystem is the most natural choice. - -Once the typesystem has been chosen there is a number of steps we have -to do for completing the backend: - - - map ootypesystem's types to CLI Common Type System's - types; - - - map ootypesystem's low level operation to CLI instructions; - - - map Python exceptions to CLI exceptions; - - - write a code generator that translates a flow graph - into a list of CLI instructions; - - - write a class generator that translates ootypesystem - classes into CLI classes. - - -Mapping primitive types -~~~~~~~~~~~~~~~~~~~~~~~ - -:doc:`rtyper` give us a flow graph annotated with types belonging to -ootypesystem: in order to produce CLI code we need to translate these -types into their Common Type System equivalents. - -For numeric types the conversion is straightforward, since -there is a one-to-one mapping between the two typesystems, so that -e.g. Float maps to float64. - -For character types the choice is more difficult: RPython has two -distinct types for plain ASCII and Unicode characters (named UniChar), -while .NET only supports Unicode with the char type. There are at -least two ways to map plain Char to CTS: - - - map UniChar to char, thus maintaining the original distinction - between the two types: this has the advantage of being a - one-to-one translation, but has the disadvantage that RPython - strings will not be recognized as .NET strings, since they only - would be sequences of bytes; - - - map both char, so that Python strings will be treated as strings - also by .NET: in this case there could be problems with existing - Python modules that use strings as sequences of byte, such as the - built-in struct module, so we need to pay special attention. - -We think that mapping Python strings to .NET strings is -fundamental, so we chose the second option. - - -Mapping built-in types -~~~~~~~~~~~~~~~~~~~~~~ - -As we saw in section ootypesystem defines a set of types that take -advantage of built-in types offered by the platform. - -For the sake of simplicity we decided to write wrappers -around .NET classes in order to match the signatures required by -pypylib.dll: - -=================== =========================================== -ootype CLI -=================== =========================================== -String System.String -StringBuilder System.Text.StringBuilder -List System.Collections.Generic.List -Dict System.Collections.Generic.Dictionary -CustomDict pypy.runtime.Dict -DictItemsIterator pypy.runtime.DictItemsIterator -=================== =========================================== - -Wrappers exploit inheritance for wrapping the original classes, so, -for example, pypy.runtime.List is a subclass of -System.Collections.Generic.List that provides methods whose names -match those found in the _GENERIC_METHODS of ootype.List - -The only exception to this rule is the String class, which is not -wrapped since in .NET we can not subclass System.String. Instead, we -provide a bunch of static methods in pypylib.dll that implement the -methods declared by ootype.String._GENERIC_METHODS, then we call them -by explicitly passing the string object in the argument list. - - -Mapping instructions -~~~~~~~~~~~~~~~~~~~~ - -PyPy's low level operations are expressed in Static Single Information -(SSI) form, such as this:: - - v2 = int_add(v0, v1) - -By contrast the CLI virtual machine is stack based, which means the -each operation pops its arguments from the top of the stacks and -pushes its result there. The most straightforward way to translate SSI -operations into stack based operations is to explicitly load the -arguments and store the result into the appropriate places:: - - LOAD v0 - LOAD v1 - int_add - STORE v2 - -The code produced works correctly but has some inefficiency issues that -can be addressed during the optimization phase. - -The CLI Virtual Machine is fairly expressive, so the conversion -between PyPy's low level operations and CLI instruction is relatively -simple: many operations maps directly to the corresponding -instruction, e.g int_add and sub. - -By contrast some instructions do not have a direct correspondent and -have to be rendered as a sequence of CLI instructions: this is the -case of the "less-equal" and "greater-equal" family of instructions, -that are rendered as "greater" or "less" followed by a boolean "not", -respectively. - -Finally, there are some instructions that cannot be rendered directly -without increasing the complexity of the code generator, such as -int_abs (which returns the absolute value of its argument). These -operations are translated by calling some helper function written in -C#. - -The code that implements the mapping is in the modules opcodes.py. - - -Mapping exceptions -~~~~~~~~~~~~~~~~~~ - -Both RPython and CLI have their own set of exception classes: some of -these are pretty similar; e.g., we have OverflowError, -ZeroDivisionError and IndexError on the first side and -OverflowException, DivideByZeroException and IndexOutOfRangeException -on the other side. - -The first attempt was to map RPython classes to their corresponding -CLI ones: this worked for simple cases, but it would have triggered -subtle bugs in more complex ones, because the two exception -hierarchies don't completely overlap. - -At the moment we've chosen to build an RPython exception hierarchy -completely independent from the CLI one, but this means that we can't -rely on exceptions raised by built-in operations. The currently -implemented solution is to do an exception translation on-the-fly. - -As an example consider the RPython int_add_ovf operation, that sums -two integers and raises an OverflowError exception in case of -overflow. For implementing it we can use the built-in add.ovf CLI -instruction that raises System.OverflowException when the result -overflows, catch that exception and throw a new one:: - - .try - { - ldarg 'x_0' - ldarg 'y_0' - add.ovf - stloc 'v1' - leave __check_block_2 - } - catch [mscorlib]System.OverflowException - { - newobj instance void class OverflowError::.ctor() - throw - } - - -Translating flow graphs -~~~~~~~~~~~~~~~~~~~~~~~ - -As we saw previously in PyPy function and method bodies are -represented by flow graphs that we need to translate CLI IL code. Flow -graphs are expressed in a format that is very suitable for being -translated to low level code, so that phase is quite straightforward, -though the code is a bit involved because we need to take care of three -different types of blocks. - -The code doing this work is located in the Function.render -method in the file function.py. - -First of all it searches for variable names and types used by -each block; once they are collected it emits a .local IL -statement used for indicating the virtual machine the number and type -of local variables used. - -Then it sequentially renders all blocks in the graph, starting from the -start block; special care is taken for the return block which is -always rendered at last to meet CLI requirements. - -Each block starts with an unique label that is used for jumping -across, followed by the low level instructions the block is composed -of; finally there is some code that jumps to the appropriate next -block. - -Conditional and unconditional jumps are rendered with their -corresponding IL instructions: brtrue, brfalse. - -Blocks that needs to catch exceptions use the native facilities -offered by the CLI virtual machine: the entire block is surrounded by -a .try statement followed by as many catch as needed: each catching -sub-block then branches to the appropriate block:: - - - # RPython - try: - # block0 - ... - except ValueError: - # block1 - ... - except TypeError: - # block2 - ... - - // IL - block0: - .try { - ... - leave block3 - } - catch ValueError { - ... - leave block1 - } - catch TypeError { - ... - leave block2 - } - block1: - ... - br block3 - block2: - ... - br block3 - block3: - ... - -There is also an experimental feature that makes GenCLI to use its own -exception handling mechanism instead of relying on the .NET -one. Surprisingly enough, benchmarks are about 40% faster with our own -exception handling machinery. - - -Translating classes -~~~~~~~~~~~~~~~~~~~ - -As we saw previously, the semantic of ootypesystem classes -is very similar to the .NET one, so the translation is mostly -straightforward. - -The related code is located in the module class\_.py. Rendered classes -are composed of four parts: - - - fields; - - user defined methods; - - default constructor; - - the ToString method, mainly for testing purposes - -Since ootype implicitly assumes all method calls to be late bound, as -an optimization before rendering the classes we search for methods -that are not overridden in subclasses, and declare as "virtual" only -the one that needs to. - -The constructor does nothing more than calling the base class -constructor and initializing class fields to their default value. - -Inheritance is straightforward too, as it is natively supported by -CLI. The only noticeable thing is that we map ootypesystem's ROOT -class to the CLI equivalent System.Object. - - -The Runtime Environment -~~~~~~~~~~~~~~~~~~~~~~~ - -The runtime environment is a collection of helper classes and -functions used and referenced by many of the GenCLI submodules. It is -written in C#, compiled to a DLL (Dynamic Link Library), then linked -to generated code at compile-time. - -The DLL is called pypylib and is composed of three parts: - - - a set of helper functions used to implements complex RPython - low-level instructions such as runtimenew and ooparse_int; - - - a set of helper classes wrapping built-in types - - - a set of helpers used by the test framework - - -The first two parts are contained in the pypy.runtime namespace, while -the third is in the pypy.test one. - - -Testing GenCLI --------------- - -As the rest of PyPy, GenCLI is a test-driven project: there is at -least one unit test for almost each single feature of the -backend. This development methodology allowed us to early discover -many subtle bugs and to do some big refactoring of the code with the -confidence not to break anything. - -The core of the testing framework is in the module -rpython.translator.cli.test.runtest; one of the most important function -of this module is compile_function(): it takes a Python function, -compiles it to CLI and returns a Python object that runs the just -created executable when called. - -This way we can test GenCLI generated code just as if it were a simple -Python function; we can also directly run the generated executable, -whose default name is main.exe, from a shell: the function parameters -are passed as command line arguments, and the return value is printed -on the standard output:: - - # Python source: foo.py - from rpython.translator.cli.test.runtest import compile_function - - def foo(x, y): - return x+y, x*y - - f = compile_function(foo, [int, int]) - assert f(3, 4) == (7, 12) - - - # shell - $ mono main.exe 3 4 - (7, 12) - -GenCLI supports only few RPython types as parameters: int, r_uint, -r_longlong, r_ulonglong, bool, float and one-length strings (i.e., -chars). By contrast, most types are fine for being returned: these -include all primitive types, list, tuples and instances. - - -Installing Python for .NET on Linux ------------------------------------ - -With the CLI backend, you can access .NET libraries from RPython; -programs using .NET libraries will always run when translated, but you -might also want to test them on top of CPython. - -To do so, you can install `Python for .NET`_. Unfortunately, it does -not work out of the box under Linux. - -To make it work, download and unpack the source package of Python -for .NET; the only version tested with PyPy is the 1.0-rc2, but it -might work also with others. Then, you need to create a file named -Python.Runtime.dll.config at the root of the unpacked archive; put the -following lines inside the file (assuming you are using Python 2.7):: - - - - - -The installation should be complete now. To run Python for .NET, -simply type ``mono python.exe``. - -.. _Python for .NET: http://pythonnet.sourceforge.net/ diff --git a/rpython/doc/dir-reference.rst b/rpython/doc/dir-reference.rst --- a/rpython/doc/dir-reference.rst +++ b/rpython/doc/dir-reference.rst @@ -39,9 +39,6 @@ :source:`rpython/translator/c/` the :ref:`GenC backend `, producing C code from an RPython program (generally via the :doc:`rtyper `) -:source:`rpython/translator/cli/` the :doc:`CLI backend ` for `.NET`_ - (Microsoft CLR or Mono_) - :source:`rpython/translator/jvm/` the Java backend :source:`rpython/translator/tool/` helper tools for translation diff --git a/rpython/doc/index.rst b/rpython/doc/index.rst --- a/rpython/doc/index.rst +++ b/rpython/doc/index.rst @@ -28,7 +28,6 @@ translation rtyper garbage_collection - cli-backend windows From noreply at buildbot.pypy.org Mon Jul 28 13:20:07 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 28 Jul 2014 13:20:07 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs: RPython docs: Add hidden toctree in jit/index.rst. Message-ID: <20140728112007.B44471C024A@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: improve-docs Changeset: r72590:c54f119015bc Date: 2014-07-28 13:17 +0200 http://bitbucket.org/pypy/pypy/changeset/c54f119015bc/ Log: RPython docs: Add hidden toctree in jit/index.rst. diff --git a/rpython/doc/jit/index.rst b/rpython/doc/jit/index.rst --- a/rpython/doc/jit/index.rst +++ b/rpython/doc/jit/index.rst @@ -16,11 +16,16 @@ Content ------- +.. toctree:: + :hidden: + + overview + pyjitpl5 + virtualizable + - :doc:`Overview `: motivating our approach - :doc:`Notes ` about the current work in PyPy -- :doc:`Hooks ` debugging facilities available to a python programmer - - :doc:`Virtulizable ` how virtualizables work and what they are (in other words how to make frames more efficient). From noreply at buildbot.pypy.org Mon Jul 28 13:20:08 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 28 Jul 2014 13:20:08 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs: RPython docs: split toctree on index page into sections (general, user documentation, writing your own interpreter, internals). Message-ID: <20140728112008.DAD1C1C024A@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: improve-docs Changeset: r72591:7a3cd5964521 Date: 2014-07-28 13:19 +0200 http://bitbucket.org/pypy/pypy/changeset/7a3cd5964521/ Log: RPython docs: split toctree on index page into sections (general, user documentation, writing your own interpreter, internals). diff --git a/rpython/doc/index.rst b/rpython/doc/index.rst --- a/rpython/doc/index.rst +++ b/rpython/doc/index.rst @@ -14,21 +14,53 @@ have a JIT in the first place. -Table of Contents +General +------- + +.. toctree:: + :maxdepth: 1 + + architecture + faq + + +User Documentation +------------------ + +These documents are mainly interesting for users of interpreters written in +RPython. + +.. toctree:: + :maxdepth: 1 + + arm + windows + + +Writing your own interpreter in RPython +--------------------------------------- + +.. toctree:: + :maxdepth: 1 + + rpython + rlib + rffi + + +RPython internals ----------------- .. toctree:: :maxdepth: 1 + glossary getting-started - faq - rpython - rlib - rffi + dir-reference + jit/index translation rtyper garbage_collection - windows Indices and tables From noreply at buildbot.pypy.org Mon Jul 28 20:59:53 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 28 Jul 2014 20:59:53 +0200 (CEST) Subject: [pypy-commit] cffi default: Merged in leethargo/cffi/parse_constant_plus (pull request #43) Message-ID: <20140728185953.4AC4C1D2CF8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1552:8d650b674140 Date: 2014-07-28 20:59 +0200 http://bitbucket.org/cffi/cffi/changeset/8d650b674140/ Log: Merged in leethargo/cffi/parse_constant_plus (pull request #43) add parsing of constant with unary + diff --git a/cffi/cparser.py b/cffi/cparser.py --- a/cffi/cparser.py +++ b/cffi/cparser.py @@ -532,11 +532,15 @@ def _parse_constant(self, exprnode, partial_length_ok=False): # for now, limited to expressions that are an immediate number - # or negative number + # or positive/negative number if isinstance(exprnode, pycparser.c_ast.Constant): return int(exprnode.value, 0) # if (isinstance(exprnode, pycparser.c_ast.UnaryOp) and + exprnode.op == '+'): + return self._parse_constant(exprnode.expr) + # + if (isinstance(exprnode, pycparser.c_ast.UnaryOp) and exprnode.op == '-'): return -self._parse_constant(exprnode.expr) # load previously defined int constant diff --git a/testing/test_parsing.py b/testing/test_parsing.py --- a/testing/test_parsing.py +++ b/testing/test_parsing.py @@ -288,3 +288,14 @@ decl = ast.children()[0][1] node = decl.type assert p._is_constant_globalvar(node) == expected_output + +def test_enum(): + ffi = FFI() + ffi.cdef(""" + enum Enum { POS = +1, TWO = 2, NIL = 0, NEG = -1}; + """) + C = ffi.dlopen(None) + assert C.POS == 1 + assert C.TWO == 2 + assert C.NIL == 0 + assert C.NEG == -1 From noreply at buildbot.pypy.org Mon Jul 28 20:59:58 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 28 Jul 2014 20:59:58 +0200 (CEST) Subject: [pypy-commit] cffi parse_constant_plus: Close branch parse_constant_plus Message-ID: <20140728185958.8A7C41D2CF8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: parse_constant_plus Changeset: r1553:b8540d978fe0 Date: 2014-07-28 20:59 +0200 http://bitbucket.org/cffi/cffi/changeset/b8540d978fe0/ Log: Close branch parse_constant_plus From noreply at buildbot.pypy.org Mon Jul 28 21:08:10 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 28 Jul 2014 21:08:10 +0200 (CEST) Subject: [pypy-commit] pypy default: Change variable name in test to be slightly more consistent. Message-ID: <20140728190810.A1BAF1D2CF8@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: Changeset: r72592:79b2a6eb9c40 Date: 2014-07-28 18:03 +0200 http://bitbucket.org/pypy/pypy/changeset/79b2a6eb9c40/ Log: Change variable name in test to be slightly more consistent. diff --git a/rpython/flowspace/test/test_model.py b/rpython/flowspace/test/test_model.py --- a/rpython/flowspace/test/test_model.py +++ b/rpython/flowspace/test/test_model.py @@ -13,7 +13,7 @@ class pieces: """ The manually-built graph corresponding to the sample_function(). """ - i = Variable("i") + i0 = Variable("i0") i1 = Variable("i1") i2 = Variable("i2") i3 = Variable("i3") @@ -25,12 +25,12 @@ conditionop = SpaceOperation("gt", [i1, Constant(0)], conditionres) addop = SpaceOperation("add", [sum2, i2], sum3) decop = SpaceOperation("sub", [i2, Constant(1)], i3) - startblock = Block([i]) + startblock = Block([i0]) headerblock = Block([i1, sum1]) whileblock = Block([i2, sum2]) graph = FunctionGraph("f", startblock) - startblock.closeblock(Link([i, Constant(0)], headerblock)) + startblock.closeblock(Link([i0, Constant(0)], headerblock)) headerblock.operations.append(conditionop) headerblock.exitswitch = conditionres headerblock.closeblock(Link([sum1], graph.returnblock, False), @@ -55,7 +55,7 @@ def test_graphattributes(): assert graph.startblock is pieces.startblock assert graph.returnblock is pieces.headerblock.exits[0].target - assert graph.getargs() == [pieces.i] + assert graph.getargs() == [pieces.i0] assert [graph.getreturnvar()] == graph.returnblock.inputargs assert graph.source == inspect.getsource(sample_function) From noreply at buildbot.pypy.org Tue Jul 29 01:43:40 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Tue, 29 Jul 2014 01:43:40 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Use the latest version of _posixsubprocess.c from CPython. Message-ID: <20140728234340.B2B161C13AE@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r72593:67ef90c16a52 Date: 2014-07-27 19:19 +0200 http://bitbucket.org/pypy/pypy/changeset/67ef90c16a52/ Log: Use the latest version of _posixsubprocess.c from CPython. Fix tests. diff --git a/pypy/module/_posixsubprocess/_posixsubprocess.c b/pypy/module/_posixsubprocess/_posixsubprocess.c --- a/pypy/module/_posixsubprocess/_posixsubprocess.c +++ b/pypy/module/_posixsubprocess/_posixsubprocess.c @@ -67,7 +67,7 @@ * that properly supports /dev/fd. */ static int -_is_fdescfs_mounted_on_dev_fd() +_is_fdescfs_mounted_on_dev_fd(void) { struct stat dev_stat; struct stat dev_fd_stat; @@ -142,17 +142,11 @@ * This structure is very old and stable: It will not change unless the kernel * chooses to break compatibility with all existing binaries. Highly Unlikely. */ -struct linux_dirent { -#if defined(__x86_64__) && defined(__ILP32__) - /* Support the wacky x32 ABI (fake 32-bit userspace speaking to x86_64 - * kernel interfaces) - https://sites.google.com/site/x32abi/ */ +struct linux_dirent64 { unsigned long long d_ino; - unsigned long long d_off; -#else - unsigned long d_ino; /* Inode number */ - unsigned long d_off; /* Offset to next linux_dirent */ -#endif + long long d_off; unsigned short d_reclen; /* Length of this linux_dirent */ + unsigned char d_type; char d_name[256]; /* Filename (null-terminated) */ }; @@ -196,16 +190,16 @@ num_fds_to_keep); return; } else { - char buffer[sizeof(struct linux_dirent)]; + char buffer[sizeof(struct linux_dirent64)]; int bytes; - while ((bytes = syscall(SYS_getdents, fd_dir_fd, - (struct linux_dirent *)buffer, + while ((bytes = syscall(SYS_getdents64, fd_dir_fd, + (struct linux_dirent64 *)buffer, sizeof(buffer))) > 0) { - struct linux_dirent *entry; + struct linux_dirent64 *entry; int offset; for (offset = 0; offset < bytes; offset += entry->d_reclen) { int fd; - entry = (struct linux_dirent *)(buffer + offset); + entry = (struct linux_dirent64 *)(buffer + offset); if ((fd = _pos_int_from_ascii(entry->d_name)) < 0) continue; /* Not a number. */ if (fd != fd_dir_fd && fd >= start_fd && fd < end_fd && @@ -299,6 +293,7 @@ #endif /* else NOT (defined(__linux__) && defined(HAVE_SYS_SYSCALL_H)) */ + /* * This function is code executed in the child process immediately after fork * to set things up and call exec(). @@ -389,17 +384,6 @@ POSIX_CALL(close(errwrite)); } - if (close_fds) { - int local_max_fd = max_fd; -#if defined(__NetBSD__) - local_max_fd = fcntl(0, F_MAXFD); - if (local_max_fd < 0) - local_max_fd = max_fd; -#endif - /* TODO HP-UX could use pstat_getproc() if anyone cares about it. */ - _close_open_fd_range(3, local_max_fd, py_fds_to_keep, num_fds_to_keep); - } - if (cwd) POSIX_CALL(chdir(cwd)); @@ -428,6 +412,18 @@ } } + /* close FDs after executing preexec_fn, which might open FDs */ + if (close_fds) { + int local_max_fd = max_fd; +#if defined(__NetBSD__) + local_max_fd = fcntl(0, F_MAXFD); + if (local_max_fd < 0) + local_max_fd = max_fd; +#endif + /* TODO HP-UX could use pstat_getproc() if anyone cares about it. */ + _close_open_fd_range(3, local_max_fd, py_fds_to_keep, num_fds_to_keep); + } + /* This loop matches the Lib/os.py _execvpe()'s PATH search when */ /* given the executable_list generated by Lib/subprocess.py. */ saved_errno = 0; @@ -478,20 +474,18 @@ int pypy_subprocess_cloexec_pipe(int *fds) { - int res; + int res, saved_errno; + long oldflags; #ifdef HAVE_PIPE2 Py_BEGIN_ALLOW_THREADS res = pipe2(fds, O_CLOEXEC); Py_END_ALLOW_THREADS if (res != 0 && errno == ENOSYS) { - { #endif /* We hold the GIL which offers some protection from other code calling * fork() before the CLOEXEC flags have been set but we can't guarantee * anything without pipe2(). */ - long oldflags; - res = pipe(fds); if (res == 0) { @@ -508,9 +502,47 @@ if (res == 0) res = fcntl(fds[1], F_SETFD, oldflags | FD_CLOEXEC); #ifdef HAVE_PIPE2 - } } #endif + if (res == 0 && fds[1] < 3) { + /* We always want the write end of the pipe to avoid fds 0, 1 and 2 + * as our child may claim those for stdio connections. */ + int write_fd = fds[1]; + int fds_to_close[3] = {-1, -1, -1}; + int fds_to_close_idx = 0; +#ifdef F_DUPFD_CLOEXEC + fds_to_close[fds_to_close_idx++] = write_fd; + write_fd = fcntl(write_fd, F_DUPFD_CLOEXEC, 3); + if (write_fd < 0) /* We don't support F_DUPFD_CLOEXEC / other error */ +#endif + { + /* Use dup a few times until we get a desirable fd. */ + for (; fds_to_close_idx < 3; ++fds_to_close_idx) { + fds_to_close[fds_to_close_idx] = write_fd; + write_fd = dup(write_fd); + if (write_fd >= 3) + break; + /* We may dup a few extra times if it returns an error but + * that is okay. Repeat calls should return the same error. */ + } + if (write_fd < 0) res = write_fd; + if (res == 0) { + oldflags = fcntl(write_fd, F_GETFD, 0); + if (oldflags < 0) res = oldflags; + if (res == 0) + res = fcntl(write_fd, F_SETFD, oldflags | FD_CLOEXEC); + } + } + saved_errno = errno; + /* Close fds we tried for the write end that were too low. */ + for (fds_to_close_idx=0; fds_to_close_idx < 3; ++fds_to_close_idx) { + int temp_fd = fds_to_close[fds_to_close_idx]; + while (temp_fd >= 0 && close(temp_fd) < 0 && errno == EINTR); + } + errno = saved_errno; /* report dup or fcntl errors, not close. */ + fds[1] = write_fd; + } /* end if write fd was too small */ + if (res != 0) return res; return 0; diff --git a/pypy/module/_posixsubprocess/test/test_subprocess.py b/pypy/module/_posixsubprocess/test/test_subprocess.py --- a/pypy/module/_posixsubprocess/test/test_subprocess.py +++ b/pypy/module/_posixsubprocess/test/test_subprocess.py @@ -1,7 +1,8 @@ from os.path import dirname class AppTestSubprocess: - spaceconfig = dict(usemodules=('_posixsubprocess', 'signal', 'fcntl', 'select')) + spaceconfig = dict(usemodules=('_posixsubprocess', 'signal', + 'fcntl', 'select', 'rctime')) # XXX write more tests def setup_class(cls): @@ -17,6 +18,7 @@ os.close(fd2) def test_close_fds_true(self): + import traceback # Work around a recursion limit import subprocess import os.path import os @@ -43,6 +45,7 @@ # For code coverage of calling setsid(). We don't care if we get an # EPERM error from it depending on the test execution environment, that # still indicates that it was called. + import traceback # Work around a recursion limit import subprocess import os try: From noreply at buildbot.pypy.org Tue Jul 29 01:43:42 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Tue, 29 Jul 2014 01:43:42 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Thanks to the new imporlib, we don't ignore the optimize flag anymore. Message-ID: <20140728234342.1D8E91C13AE@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r72594:0d3f4870fc29 Date: 2014-07-28 17:50 +0200 http://bitbucket.org/pypy/pypy/changeset/0d3f4870fc29/ Log: Thanks to the new imporlib, we don't ignore the optimize flag anymore. diff --git a/lib-python/3/test/test_imp.py b/lib-python/3/test/test_imp.py --- a/lib-python/3/test/test_imp.py +++ b/lib-python/3/test/test_imp.py @@ -317,7 +317,6 @@ @unittest.skipUnless(sys.implementation.cache_tag is not None, 'requires sys.implementation.cache_tag not be None') - @support.impl_detail("PyPy ignores the optimize flag", pypy=False) def test_cache_from_source(self): # Given the path to a .py file, return the path to its PEP 3147 # defined .pyc file (i.e. under __pycache__). @@ -339,7 +338,6 @@ 'file{}.pyc'.format(self.tag)) self.assertEqual(imp.cache_from_source(path, True), expect) - @support.impl_detail("PyPy ignores the optimize flag", pypy=False) def test_cache_from_source_optimized(self): # Given the path to a .py file, return the path to its PEP 3147 # defined .pyo file (i.e. under __pycache__). From noreply at buildbot.pypy.org Tue Jul 29 01:43:43 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Tue, 29 Jul 2014 01:43:43 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Python3.3 rewrote the bz2 module at applevel, the C part only contains compressor objects. Message-ID: <20140728234343.702B61C13AE@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r72595:1fa088c6d91d Date: 2014-07-28 18:05 +0200 http://bitbucket.org/pypy/pypy/changeset/1fa088c6d91d/ Log: Python3.3 rewrote the bz2 module at applevel, the C part only contains compressor objects. diff --git a/pypy/module/bz2/__init__.py b/pypy/module/bz2/__init__.py --- a/pypy/module/bz2/__init__.py +++ b/pypy/module/bz2/__init__.py @@ -1,19 +1,14 @@ -# REVIEWME from pypy.interpreter.mixedmodule import MixedModule class Module(MixedModule): - """The python bz2 module provides a comprehensive interface for -the bz2 compression library. It implements a complete file -interface, one shot (de)compression functions, and types for -sequential (de)compression.""" + # The private part of the bz2 module. + + applevel_name = '_bz2' interpleveldefs = { 'BZ2Compressor': 'interp_bz2.W_BZ2Compressor', 'BZ2Decompressor': 'interp_bz2.W_BZ2Decompressor', - 'compress': 'interp_bz2.compress', - 'decompress': 'interp_bz2.decompress', } appleveldefs = { - 'BZ2File': 'app_bz2file.BZ2File', } diff --git a/pypy/module/bz2/app_bz2file.py b/pypy/module/bz2/app_bz2file.py deleted file mode 100644 --- a/pypy/module/bz2/app_bz2file.py +++ /dev/null @@ -1,370 +0,0 @@ -"""Interface to the libbzip2 compression library. - -This file is an almost exact copy of CPython3.3 Lib/bz2.py. -""" - -import io - -from bz2 import BZ2Compressor, BZ2Decompressor - - -_MODE_CLOSED = 0 -_MODE_READ = 1 -_MODE_READ_EOF = 2 -_MODE_WRITE = 3 - -_BUFFER_SIZE = 8192 - - -class BZ2File(io.BufferedIOBase): - - """A file object providing transparent bzip2 (de)compression. - - A BZ2File can act as a wrapper for an existing file object, or refer - directly to a named file on disk. - - Note that BZ2File provides a *binary* file interface - data read is - returned as bytes, and data to be written should be given as bytes. - """ - - def __init__(self, filename=None, mode="r", buffering=None, - compresslevel=9, fileobj=None): - """Open a bzip2-compressed file. - - If filename is given, open the named file. Otherwise, operate on - the file object given by fileobj. Exactly one of these two - parameters should be provided. - - mode can be 'r' for reading (default), or 'w' for writing. - - buffering is ignored. Its use is deprecated. - - If mode is 'w', compresslevel can be a number between 1 and 9 - specifying the level of compression: 1 produces the least - compression, and 9 (default) produces the most compression. - """ - # This lock must be recursive, so that BufferedIOBase's - # readline(), readlines() and writelines() don't deadlock. - import threading - self._lock = threading.RLock() - self._fp = None - self._closefp = False - self._mode = _MODE_CLOSED - self._pos = 0 - self._size = -1 - - if not (1 <= compresslevel <= 9): - raise ValueError("compresslevel must be between 1 and 9") - - if mode in ("", "r", "rb"): - mode = "rb" - mode_code = _MODE_READ - self._decompressor = BZ2Decompressor() - self._buffer = None - elif mode in ("w", "wb"): - mode = "wb" - mode_code = _MODE_WRITE - self._compressor = BZ2Compressor(compresslevel) - elif mode in ("a", "ab"): - mode = "ab" - mode_code = _MODE_WRITE - self._compressor = BZ2Compressor(compresslevel) - else: - raise ValueError("Invalid mode: {!r}".format(mode)) - - if filename is not None and fileobj is None: - self._fp = open(filename, mode) - self._closefp = True - self._mode = mode_code - elif fileobj is not None and filename is None: - self._fp = fileobj - self._mode = mode_code - else: - raise ValueError("Must give exactly one of filename and fileobj") - - def close(self): - """Flush and close the file. - - May be called more than once without error. Once the file is - closed, any other operation on it will raise a ValueError. - """ - with self._lock: - if self._mode == _MODE_CLOSED: - return - try: - if self._mode in (_MODE_READ, _MODE_READ_EOF): - self._decompressor = None - elif self._mode == _MODE_WRITE: - self._fp.write(self._compressor.flush()) - self._compressor = None - finally: - try: - if self._closefp: - self._fp.close() - finally: - self._fp = None - self._closefp = False - self._mode = _MODE_CLOSED - self._buffer = None - - @property - def closed(self): - """True if this file is closed.""" - return self._mode == _MODE_CLOSED - - def fileno(self): - """Return the file descriptor for the underlying file.""" - self._check_not_closed() - return self._fp.fileno() - - def seekable(self): - """Return whether the file supports seeking.""" - return self.readable() - - def readable(self): - """Return whether the file was opened for reading.""" - self._check_not_closed() - return self._mode in (_MODE_READ, _MODE_READ_EOF) - - def writable(self): - """Return whether the file was opened for writing.""" - self._check_not_closed() - return self._mode == _MODE_WRITE - - # Mode-checking helper functions. - - def _check_not_closed(self): - if self.closed: - raise ValueError("I/O operation on closed file") - - def _check_can_read(self): - if not self.readable(): - raise io.UnsupportedOperation("File not open for reading") - - def _check_can_write(self): - if not self.writable(): - raise io.UnsupportedOperation("File not open for writing") - - def _check_can_seek(self): - if not self.seekable(): - raise io.UnsupportedOperation("Seeking is only supported " - "on files open for reading") - - # Fill the readahead buffer if it is empty. Returns False on EOF. - def _fill_buffer(self): - if self._buffer: - return True - - if self._decompressor.unused_data: - rawblock = self._decompressor.unused_data - else: - rawblock = self._fp.read(_BUFFER_SIZE) - - if not rawblock: - if self._decompressor.eof: - self._mode = _MODE_READ_EOF - self._size = self._pos - return False - else: - raise EOFError("Compressed file ended before the " - "end-of-stream marker was reached") - - # Continue to next stream. - if self._decompressor.eof: - self._decompressor = BZ2Decompressor() - - self._buffer = self._decompressor.decompress(rawblock) - return True - - # Read data until EOF. - # If return_data is false, consume the data without returning it. - def _read_all(self, return_data=True): - blocks = [] - while self._fill_buffer(): - if return_data: - blocks.append(self._buffer) - self._pos += len(self._buffer) - self._buffer = None - if return_data: - return b"".join(blocks) - - # Read a block of up to n bytes. - # If return_data is false, consume the data without returning it. - def _read_block(self, n, return_data=True): - blocks = [] - while n > 0 and self._fill_buffer(): - if n < len(self._buffer): - data = self._buffer[:n] - self._buffer = self._buffer[n:] - else: - data = self._buffer - self._buffer = None - if return_data: - blocks.append(data) - self._pos += len(data) - n -= len(data) - if return_data: - return b"".join(blocks) - - def peek(self, n=0): - """Return buffered data without advancing the file position. - - Always returns at least one byte of data, unless at EOF. - The exact number of bytes returned is unspecified. - """ - with self._lock: - self._check_can_read() - if self._mode == _MODE_READ_EOF or not self._fill_buffer(): - return b"" - return self._buffer - - def read(self, size=-1): - """Read up to size uncompressed bytes from the file. - - If size is negative or omitted, read until EOF is reached. - Returns b'' if the file is already at EOF. - """ - with self._lock: - self._check_can_read() - if self._mode == _MODE_READ_EOF or size == 0: - return b"" - elif size < 0: - return self._read_all() - else: - return self._read_block(size) - - def read1(self, size=-1): - """Read up to size uncompressed bytes with at most one read - from the underlying stream. - - Returns b'' if the file is at EOF. - """ - with self._lock: - self._check_can_read() - if (size == 0 or self._mode == _MODE_READ_EOF or - not self._fill_buffer()): - return b"" - if 0 < size < len(self._buffer): - data = self._buffer[:size] - self._buffer = self._buffer[size:] - else: - data = self._buffer - self._buffer = None - self._pos += len(data) - return data - - def readinto(self, b): - """Read up to len(b) bytes into b. - - Returns the number of bytes read (0 for EOF). - """ - with self._lock: - return io.BufferedIOBase.readinto(self, b) - - def readline(self, size=-1): - """Read a line of uncompressed bytes from the file. - - The terminating newline (if present) is retained. If size is - non-negative, no more than size bytes will be read (in which - case the line may be incomplete). Returns b'' if already at EOF. - """ - if not hasattr(size, "__index__"): - raise TypeError("Integer argument expected") - size = size.__index__() - with self._lock: - return io.BufferedIOBase.readline(self, size) - - def readlines(self, size=-1): - """Read a list of lines of uncompressed bytes from the file. - - size can be specified to control the number of lines read: no - further lines will be read once the total size of the lines read - so far equals or exceeds size. - """ - if not hasattr(size, "__index__"): - raise TypeError("Integer argument expected") - size = size.__index__() - with self._lock: - return io.BufferedIOBase.readlines(self, size) - - def write(self, data): - """Write a byte string to the file. - - Returns the number of uncompressed bytes written, which is - always len(data). Note that due to buffering, the file on disk - may not reflect the data written until close() is called. - """ - with self._lock: - self._check_can_write() - compressed = self._compressor.compress(data) - self._fp.write(compressed) - self._pos += len(data) - return len(data) - - def writelines(self, seq): - """Write a sequence of byte strings to the file. - - Returns the number of uncompressed bytes written. - seq can be any iterable yielding byte strings. - - Line separators are not added between the written byte strings. - """ - with self._lock: - return io.BufferedIOBase.writelines(self, seq) - - # Rewind the file to the beginning of the data stream. - def _rewind(self): - self._fp.seek(0, 0) - self._mode = _MODE_READ - self._pos = 0 - self._decompressor = BZ2Decompressor() - self._buffer = None - - def seek(self, offset, whence=0): - """Change the file position. - - The new position is specified by offset, relative to the - position indicated by whence. Values for whence are: - - 0: start of stream (default); offset must not be negative - 1: current stream position - 2: end of stream; offset must not be positive - - Returns the new file position. - - Note that seeking is emulated, so depending on the parameters, - this operation may be extremely slow. - """ - with self._lock: - self._check_can_seek() - - # Recalculate offset as an absolute file position. - if whence == 0: - pass - elif whence == 1: - offset = self._pos + offset - elif whence == 2: - # Seeking relative to EOF - we need to know the file's size. - if self._size < 0: - self._read_all(return_data=False) - offset = self._size + offset - else: - raise ValueError("Invalid value for whence: {}".format(whence)) - - # Make it so that offset is the number of bytes to skip forward. - if offset < self._pos: - self._rewind() - else: - offset -= self._pos - - # Read and discard data until we reach the desired position. - if self._mode != _MODE_READ_EOF: - self._read_block(offset, return_data=False) - - return self._pos - - def tell(self): - """Return the current file position.""" - with self._lock: - self._check_not_closed() - return self._pos diff --git a/pypy/module/bz2/interp_bz2.py b/pypy/module/bz2/interp_bz2.py --- a/pypy/module/bz2/interp_bz2.py +++ b/pypy/module/bz2/interp_bz2.py @@ -433,91 +433,3 @@ eof = GetSetProperty(W_BZ2Decompressor.eof_w), decompress = interp2app(W_BZ2Decompressor.decompress), ) - - - at unwrap_spec(data='bufferstr', compresslevel=int) -def compress(space, data, compresslevel=9): - """compress(data [, compresslevel=9]) -> string - - Compress data in one shot. If you want to compress data sequentially, - use an instance of BZ2Compressor instead. The compresslevel parameter, if - given, must be a number between 1 and 9.""" - - if compresslevel < 1 or compresslevel > 9: - raise OperationError(space.w_ValueError, - space.wrap("compresslevel must be between 1 and 9")) - - with lltype.scoped_alloc(bz_stream.TO, zero=True) as bzs: - in_bufsize = len(data) - - with lltype.scoped_alloc(rffi.CCHARP.TO, in_bufsize) as in_buf: - for i in range(in_bufsize): - in_buf[i] = data[i] - bzs.c_next_in = in_buf - rffi.setintfield(bzs, 'c_avail_in', in_bufsize) - - # conforming to bz2 manual, this is large enough to fit compressed - # data in one shot. We will check it later anyway. - with OutBuffer(bzs, - in_bufsize + (in_bufsize / 100 + 1) + 600) as out: - - bzerror = BZ2_bzCompressInit(bzs, compresslevel, 0, 0) - if bzerror != BZ_OK: - _catch_bz2_error(space, bzerror) - - while True: - bzerror = BZ2_bzCompress(bzs, BZ_FINISH) - if bzerror == BZ_STREAM_END: - break - elif bzerror != BZ_FINISH_OK: - BZ2_bzCompressEnd(bzs) - _catch_bz2_error(space, bzerror) - - if rffi.getintfield(bzs, 'c_avail_out') == 0: - out.prepare_next_chunk() - - res = out.make_result_string() - BZ2_bzCompressEnd(bzs) - return space.wrapbytes(res) - - at unwrap_spec(data='bufferstr') -def decompress(space, data): - """decompress(data) -> decompressed data - - Decompress data in one shot. If you want to decompress data sequentially, - use an instance of BZ2Decompressor instead.""" - - in_bufsize = len(data) - if in_bufsize == 0: - return space.wrapbytes("") - - with lltype.scoped_alloc(bz_stream.TO, zero=True) as bzs: - with lltype.scoped_alloc(rffi.CCHARP.TO, in_bufsize) as in_buf: - for i in range(in_bufsize): - in_buf[i] = data[i] - bzs.c_next_in = in_buf - rffi.setintfield(bzs, 'c_avail_in', in_bufsize) - - with OutBuffer(bzs) as out: - bzerror = BZ2_bzDecompressInit(bzs, 0, 0) - if bzerror != BZ_OK: - _catch_bz2_error(space, bzerror) - - while True: - bzerror = BZ2_bzDecompress(bzs) - if bzerror == BZ_STREAM_END: - break - if bzerror != BZ_OK: - BZ2_bzDecompressEnd(bzs) - _catch_bz2_error(space, bzerror) - - if rffi.getintfield(bzs, 'c_avail_in') == 0: - BZ2_bzDecompressEnd(bzs) - raise OperationError(space.w_ValueError, space.wrap( - "couldn't find end of stream")) - elif rffi.getintfield(bzs, 'c_avail_out') == 0: - out.prepare_next_chunk() - - res = out.make_result_string() - BZ2_bzDecompressEnd(bzs) - return space.wrapbytes(res) diff --git a/pypy/module/bz2/test/test_bz2_compdecomp.py b/pypy/module/bz2/test/test_bz2_compdecomp.py --- a/pypy/module/bz2/test/test_bz2_compdecomp.py +++ b/pypy/module/bz2/test/test_bz2_compdecomp.py @@ -41,7 +41,7 @@ interp_bz2.SMALLCHUNK = mod.OLD_SMALLCHUNK class AppTestBZ2Compressor(CheckAllocation): - spaceconfig = dict(usemodules=('bz2',)) + spaceconfig = dict(usemodules=('bz2', 'rctime')) def setup_class(cls): cls.w_TEXT = cls.space.wrapbytes(TEXT) @@ -54,6 +54,8 @@ cls.w_decompress = cls.space.wrap(gateway.interp2app(decompress_w)) cls.w_HUGE_OK = cls.space.wrap(HUGE_OK) + cls.space.appexec([], """(): import warnings""") # Work around a recursion limit + def test_creation(self): from bz2 import BZ2Compressor @@ -108,13 +110,15 @@ class AppTestBZ2Decompressor(CheckAllocation): - spaceconfig = dict(usemodules=('bz2',)) + spaceconfig = dict(usemodules=('bz2', 'rctime')) def setup_class(cls): cls.w_TEXT = cls.space.wrapbytes(TEXT) cls.w_DATA = cls.space.wrapbytes(DATA) cls.w_BUGGY_DATA = cls.space.wrapbytes(BUGGY_DATA) + cls.space.appexec([], """(): import warnings""") # Work around a recursion limit + def test_creation(self): from bz2 import BZ2Decompressor @@ -184,7 +188,7 @@ class AppTestBZ2ModuleFunctions(CheckAllocation): - spaceconfig = dict(usemodules=('bz2',)) + spaceconfig = dict(usemodules=('bz2', 'rctime')) def setup_class(cls): cls.w_TEXT = cls.space.wrapbytes(TEXT) diff --git a/pypy/module/bz2/test/test_bz2_file.py b/pypy/module/bz2/test/test_bz2_file.py --- a/pypy/module/bz2/test/test_bz2_file.py +++ b/pypy/module/bz2/test/test_bz2_file.py @@ -87,6 +87,8 @@ gateway.interp2app(create_broken_temp_file_w)) cls.w_random_data = cls.space.wrapbytes(RANDOM_DATA) + cls.space.appexec([], """(): import warnings""") # Work around a recursion limit + def test_attributes(self): from bz2 import BZ2File From noreply at buildbot.pypy.org Tue Jul 29 01:43:44 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Tue, 29 Jul 2014 01:43:44 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: A minimal _lzma module, allows to import lzma.py. Message-ID: <20140728234344.B952D1C13AE@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r72596:5ff448f5dc64 Date: 2014-07-28 18:20 +0200 http://bitbucket.org/pypy/pypy/changeset/5ff448f5dc64/ Log: A minimal _lzma module, allows to import lzma.py. Test suite will fail... diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -37,7 +37,7 @@ "binascii", "_multiprocessing", '_warnings', "_collections", "_multibytecodec", "_continuation", "_cffi_backend", "_csv", "_pypyjson", "_posixsubprocess", # "cppyy", "micronumpy" - "faulthandler", + "faulthandler", "_lzma", ]) translation_modules = default_modules.copy() @@ -106,6 +106,7 @@ "_hashlib" : ["pypy.module._ssl.interp_ssl"], "_minimal_curses": ["pypy.module._minimal_curses.fficurses"], "_continuation": ["rpython.rlib.rstacklet"], + "_lzma" : ["pypy.module._lzma.interp_lzma"], } def get_module_validator(modname): diff --git a/pypy/module/_lzma/__init__.py b/pypy/module/_lzma/__init__.py new file mode 100644 --- /dev/null +++ b/pypy/module/_lzma/__init__.py @@ -0,0 +1,20 @@ +from pypy.interpreter.mixedmodule import MixedModule + +class Module(MixedModule): + # The private part of the lzma module. + + applevel_name = '_lzma' + + interpleveldefs = { + 'LZMACompressor': 'interp_lzma.W_LZMACompressor', + 'LZMADecompressor': 'interp_lzma.W_LZMADecompressor', + '_encode_filter_properties': 'interp_lzma.encode_filter_properties', + '_decode_filter_properties': 'interp_lzma.decode_filter_properties', + 'FORMAT_AUTO': 'space.wrap(interp_lzma.FORMAT_AUTO)', + 'FORMAT_XZ': 'space.wrap(interp_lzma.FORMAT_XZ)', + 'FORMAT_ALONE': 'space.wrap(interp_lzma.FORMAT_ALONE)', + 'FORMAT_RAW': 'space.wrap(interp_lzma.FORMAT_RAW)', + } + + appleveldefs = { + } diff --git a/pypy/module/_lzma/interp_lzma.py b/pypy/module/_lzma/interp_lzma.py new file mode 100644 --- /dev/null +++ b/pypy/module/_lzma/interp_lzma.py @@ -0,0 +1,32 @@ +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.typedef import TypeDef + +FORMAT_AUTO, FORMAT_XZ, FORMAT_ALONE, FORMAT_RAW = range(4) + + +class W_LZMACompressor(W_Root): + pass + +W_LZMACompressor.typedef = TypeDef("LZMACompressor", +) + + +class W_LZMADecompressor(W_Root): + pass + +W_LZMADecompressor.typedef = TypeDef("LZMADecompressor", +) + + +def encode_filter_properties(space, w_filter): + """Return a bytes object encoding the options (properties) of the filter + specified by *filter* (a dict). + + The result does not include the filter ID itself, only the options. + """ + +def decode_filter_properties(space, w_filter_id, w_encoded_props): + """Return a dict describing a filter with ID *filter_id*, and options + (properties) decoded from the bytes object *encoded_props*. + """ + diff --git a/pypy/module/_lzma/test/test_lzma.py b/pypy/module/_lzma/test/test_lzma.py new file mode 100644 --- /dev/null +++ b/pypy/module/_lzma/test/test_lzma.py @@ -0,0 +1,7 @@ +class AppTestBZ2File: + spaceconfig = { + "usemodules": ["_lzma"] + } + + def test_module(self): + import lzma From noreply at buildbot.pypy.org Tue Jul 29 01:43:46 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Tue, 29 Jul 2014 01:43:46 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Just enough of lzma for the first compress() test. Message-ID: <20140728234346.064CB1C13AE@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r72597:d9880908500a Date: 2014-07-29 01:42 +0200 http://bitbucket.org/pypy/pypy/changeset/d9880908500a/ Log: Just enough of lzma for the first compress() test. diff --git a/pypy/module/_lzma/interp_lzma.py b/pypy/module/_lzma/interp_lzma.py --- a/pypy/module/_lzma/interp_lzma.py +++ b/pypy/module/_lzma/interp_lzma.py @@ -1,13 +1,253 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.typedef import TypeDef +from pypy.interpreter.error import oefmt +from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault +from pypy.module.thread.os_lock import Lock +from rpython.rlib.objectmodel import specialize +from rpython.rtyper.tool import rffi_platform as platform +from rpython.rtyper.lltypesystem import rffi +from rpython.rtyper.lltypesystem import lltype +from rpython.translator.tool.cbuild import ExternalCompilationInfo + FORMAT_AUTO, FORMAT_XZ, FORMAT_ALONE, FORMAT_RAW = range(4) +eci = ExternalCompilationInfo( + includes = ['lzma.h'], + libraries = ['lzma'], + ) +eci = platform.configure_external_library( + 'lzma', eci, + [dict(prefix='lzma-')]) +if not eci: + raise ImportError("Could not find bzip2 library") + + +class CConfig: + _compilation_info_ = eci + calling_conv = 'c' + + BUFSIZ = platform.ConstantInteger("BUFSIZ") + + lzma_stream = platform.Struct( + 'lzma_stream', + [('next_in', rffi.CCHARP), + ('avail_in', rffi.UINT), + ('total_in', rffi.UINT), + ('next_out', rffi.CCHARP), + ('avail_out', rffi.UINT), + ('total_out', rffi.UINT), + ]) + + lzma_options_lzma = platform.Struct( + 'lzma_options_lzma', + []) + +constant_names = ''' + LZMA_RUN LZMA_FINISH + LZMA_OK LZMA_GET_CHECK LZMA_NO_CHECK LZMA_STREAM_END + LZMA_PRESET_DEFAULT + '''.split() +for name in constant_names: + setattr(CConfig, name, platform.ConstantInteger(name)) + +class cConfig(object): + pass +for k, v in platform.configure(CConfig).items(): + setattr(cConfig, k, v) + + +for name in constant_names: + globals()[name] = getattr(cConfig, name) +lzma_stream = lltype.Ptr(cConfig.lzma_stream) +lzma_options_lzma = lltype.Ptr(cConfig.lzma_options_lzma) +BUFSIZ = cConfig.BUFSIZ + +def external(name, args, result, **kwds): + return rffi.llexternal(name, args, result, compilation_info= + CConfig._compilation_info_, **kwds) + +lzma_ret = rffi.INT +lzma_action = rffi.INT +lzma_bool = rffi.INT + +lzma_lzma_preset = external('lzma_lzma_preset', [lzma_options_lzma, rffi.UINT], lzma_bool) +lzma_alone_encoder = external('lzma_alone_encoder', [lzma_stream, lzma_options_lzma], lzma_ret) +lzma_end = external('lzma_end', [lzma_stream], lltype.Void) + +lzma_code = external('lzma_code', [lzma_stream, lzma_action], rffi.INT) + + + at specialize.arg(1) +def raise_error(space, fmt, *args): + raise oefmt(space.w_RuntimeError, fmt, *args) + + +def _catch_lzma_error(space, lzret): + if (lzret == LZMA_OK or lzret == LZMA_GET_CHECK or + lzret == LZMA_NO_CHECK or lzret == LZMA_STREAM_END): + return + raise raise_error(space, "Unrecognized error from liblzma: %d", lzret) + + +if BUFSIZ < 8192: + SMALLCHUNK = 8192 +else: + SMALLCHUNK = BUFSIZ +if rffi.sizeof(rffi.INT) > 4: + BIGCHUNK = 512 * 32 +else: + BIGCHUNK = 512 * 1024 + + +def _new_buffer_size(current_size): + # keep doubling until we reach BIGCHUNK; then the buffer size is no + # longer increased + if current_size < BIGCHUNK: + return current_size + current_size + return current_size + + +class OutBuffer(object): + """Handler for the output buffer. A bit custom code trying to + encapsulate the logic of setting up the fields of 'lzs' and + allocating raw memory as needed. + """ + def __init__(self, lzs, initial_size=SMALLCHUNK): + # when the constructor is called, allocate a piece of memory + # of length 'piece_size' and make lzs ready to dump there. + self.temp = [] + self.lzs = lzs + self._allocate_chunk(initial_size) + + def _allocate_chunk(self, size): + self.raw_buf, self.gc_buf = rffi.alloc_buffer(size) + self.current_size = size + self.lzs.c_next_out = self.raw_buf + rffi.setintfield(self.lzs, 'c_avail_out', size) + + def _get_chunk(self, chunksize): + assert 0 <= chunksize <= self.current_size + raw_buf = self.raw_buf + gc_buf = self.gc_buf + s = rffi.str_from_buffer(raw_buf, gc_buf, self.current_size, chunksize) + rffi.keep_buffer_alive_until_here(raw_buf, gc_buf) + self.current_size = 0 + return s + + def prepare_next_chunk(self): + size = self.current_size + self.temp.append(self._get_chunk(size)) + self._allocate_chunk(_new_buffer_size(size)) + + def make_result_string(self): + count_unoccupied = rffi.getintfield(self.lzs, 'c_avail_out') + s = self._get_chunk(self.current_size - count_unoccupied) + if self.temp: + self.temp.append(s) + return ''.join(self.temp) + else: + return s + + def free(self): + if self.current_size > 0: + rffi.keep_buffer_alive_until_here(self.raw_buf, self.gc_buf) + + def __enter__(self): + return self + def __exit__(self, *args): + self.free() + + class W_LZMACompressor(W_Root): - pass + def __init__(self, space, format): + self.format = format + self.lock = Lock(space) + self.flushed = False + self.lzs = lltype.malloc(lzma_stream.TO, flavor='raw', zero=True) + + def __del__(self): + lzma_end(self.lzs) + lltype.free(self.lzs, flavor='raw') + + def _init_alone(self, space, preset, w_filter): + if space.is_none(w_filter): + with lltype.scoped_alloc(lzma_options_lzma.TO) as options: + if lzma_lzma_preset(options, preset): + raise_error(space, "Invalid compression preset: %d", preset) + lzret = lzma_alone_encoder(self.lzs, options) + else: + raise NotImplementedError + _catch_lzma_error(space, lzret) + + @staticmethod + @unwrap_spec(format=int, + w_check=WrappedDefault(None), + w_preset=WrappedDefault(None), + w_filter=WrappedDefault(None)) + def descr_new(space, w_subtype, format=FORMAT_XZ, + w_check=None, w_preset=None, w_filter=None): + w_self = space.allocate_instance(W_LZMACompressor, w_subtype) + self = space.interp_w(W_LZMACompressor, w_self) + W_LZMACompressor.__init__(self, space, format) + + if space.is_none(w_preset): + preset = LZMA_PRESET_DEFAULT + else: + preset = space.int_w(w_preset) + + if format == FORMAT_ALONE: + self._init_alone(space, preset, w_filter) + else: + raise NotImplementedError + + return w_self + + @unwrap_spec(data='bufferstr') + def compress_w(self, space, data): + with self.lock: + if self.flushed: + raise oefmt(space.w_ValueError, "Compressor has been flushed") + result = self._compress(space, data, LZMA_RUN) + return space.wrapbytes(result) + + def flush_w(self, space): + with self.lock: + if self.flushed: + raise oefmt(space.w_ValueError, "Repeated call to flush()") + result = self._compress(space, "", LZMA_FINISH) + return space.wrapbytes(result) + + def _compress(self, space, data, action): + datasize = len(data) + with OutBuffer(self.lzs) as out: + with lltype.scoped_alloc(rffi.CCHARP.TO, datasize) as in_buf: + for i in range(datasize): + in_buf[i] = data[i] + + self.lzs.c_next_in = in_buf + rffi.setintfield(self.lzs, 'c_avail_in', datasize) + + while True: + lzret = lzma_code(self.lzs, action) + _catch_lzma_error(space, lzret) + + if (action == LZMA_RUN and + rffi.getintfield(self.lzs, 'c_avail_in') == 0): + break + if action == LZMA_FINISH and lzret == LZMA_STREAM_END: + break + elif rffi.getintfield(self.lzs, 'c_avail_out') == 0: + out.prepare_next_chunk() + + return out.make_result_string() + W_LZMACompressor.typedef = TypeDef("LZMACompressor", + __new__ = interp2app(W_LZMACompressor.descr_new), + compress = interp2app(W_LZMACompressor.compress_w), + flush = interp2app(W_LZMACompressor.flush_w), ) diff --git a/pypy/module/_lzma/test/test_lzma.py b/pypy/module/_lzma/test/test_lzma.py --- a/pypy/module/_lzma/test/test_lzma.py +++ b/pypy/module/_lzma/test/test_lzma.py @@ -5,3 +5,12 @@ def test_module(self): import lzma + + def test_simple_compress(self): + import lzma + compressed = lzma.compress(b'Insert Data Here', format=lzma.FORMAT_ALONE) + assert compressed == (b']\x00\x00\x80\x00\xff\xff\xff\xff\xff' + b'\xff\xff\xff\x00$\x9b\x8afg\x91' + b'(\xcb\xde\xfa\x03\r\x1eQT\xbe' + b't\x9e\xdfI]\xff\xf4\x9d\x80\x00') + diff --git a/pypy/module/_lzma/test/test_ztranslation.py b/pypy/module/_lzma/test/test_ztranslation.py new file mode 100644 --- /dev/null +++ b/pypy/module/_lzma/test/test_ztranslation.py @@ -0,0 +1,4 @@ +from pypy.objspace.fake.checkmodule import checkmodule + +def test_lzma_translates(): + checkmodule('_lzma') From noreply at buildbot.pypy.org Tue Jul 29 01:49:00 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Tue, 29 Jul 2014 01:49:00 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Add 'documentation' for new module. Message-ID: <20140728234900.9B42A1C13AE@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r72598:d53e58ebfc12 Date: 2014-07-29 01:48 +0200 http://bitbucket.org/pypy/pypy/changeset/d53e58ebfc12/ Log: Add 'documentation' for new module. diff --git a/pypy/doc/config/objspace.usemodules._lzma.txt b/pypy/doc/config/objspace.usemodules._lzma.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/config/objspace.usemodules._lzma.txt @@ -0,0 +1,2 @@ +Use the '_lzma' module. +This module is expected to be working and is included by default. From noreply at buildbot.pypy.org Tue Jul 29 10:02:26 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Tue, 29 Jul 2014 10:02:26 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: lzma: add basic decompression. Message-ID: <20140729080226.162D81C0D31@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r72599:4076640cb668 Date: 2014-07-29 10:01 +0200 http://bitbucket.org/pypy/pypy/changeset/4076640cb668/ Log: lzma: add basic decompression. diff --git a/pypy/module/_lzma/interp_lzma.py b/pypy/module/_lzma/interp_lzma.py --- a/pypy/module/_lzma/interp_lzma.py +++ b/pypy/module/_lzma/interp_lzma.py @@ -1,9 +1,11 @@ from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.typedef import TypeDef +from pypy.interpreter.typedef import ( + TypeDef, interp_attrproperty_bytes, interp_attrproperty) from pypy.interpreter.error import oefmt from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.module.thread.os_lock import Lock from rpython.rlib.objectmodel import specialize +from rpython.rlib.rarithmetic import LONGLONG_MASK, r_ulonglong from rpython.rtyper.tool import rffi_platform as platform from rpython.rtyper.lltypesystem import rffi from rpython.rtyper.lltypesystem import lltype @@ -21,7 +23,7 @@ 'lzma', eci, [dict(prefix='lzma-')]) if not eci: - raise ImportError("Could not find bzip2 library") + raise ImportError("Could not find lzma library") class CConfig: @@ -48,6 +50,8 @@ LZMA_RUN LZMA_FINISH LZMA_OK LZMA_GET_CHECK LZMA_NO_CHECK LZMA_STREAM_END LZMA_PRESET_DEFAULT + LZMA_CHECK_ID_MAX + LZMA_TELL_ANY_CHECK LZMA_TELL_NO_CHECK '''.split() for name in constant_names: setattr(CConfig, name, platform.ConstantInteger(name)) @@ -57,12 +61,12 @@ for k, v in platform.configure(CConfig).items(): setattr(cConfig, k, v) - for name in constant_names: globals()[name] = getattr(cConfig, name) lzma_stream = lltype.Ptr(cConfig.lzma_stream) lzma_options_lzma = lltype.Ptr(cConfig.lzma_options_lzma) BUFSIZ = cConfig.BUFSIZ +LZMA_CHECK_UNKNOWN = LZMA_CHECK_ID_MAX + 1 def external(name, args, result, **kwds): return rffi.llexternal(name, args, result, compilation_info= @@ -76,6 +80,9 @@ lzma_alone_encoder = external('lzma_alone_encoder', [lzma_stream, lzma_options_lzma], lzma_ret) lzma_end = external('lzma_end', [lzma_stream], lltype.Void) +lzma_auto_decoder = external('lzma_auto_decoder', [lzma_stream, rffi.LONG, rffi.INT], lzma_ret) +lzma_get_check = external('lzma_get_check', [lzma_stream], rffi.INT) + lzma_code = external('lzma_code', [lzma_stream, lzma_action], rffi.INT) @@ -171,8 +178,8 @@ lzma_end(self.lzs) lltype.free(self.lzs, flavor='raw') - def _init_alone(self, space, preset, w_filter): - if space.is_none(w_filter): + def _init_alone(self, space, preset, w_filters): + if space.is_none(w_filters): with lltype.scoped_alloc(lzma_options_lzma.TO) as options: if lzma_lzma_preset(options, preset): raise_error(space, "Invalid compression preset: %d", preset) @@ -185,9 +192,9 @@ @unwrap_spec(format=int, w_check=WrappedDefault(None), w_preset=WrappedDefault(None), - w_filter=WrappedDefault(None)) + w_filters=WrappedDefault(None)) def descr_new(space, w_subtype, format=FORMAT_XZ, - w_check=None, w_preset=None, w_filter=None): + w_check=None, w_preset=None, w_filters=None): w_self = space.allocate_instance(W_LZMACompressor, w_subtype) self = space.interp_w(W_LZMACompressor, w_self) W_LZMACompressor.__init__(self, space, format) @@ -198,7 +205,7 @@ preset = space.int_w(w_preset) if format == FORMAT_ALONE: - self._init_alone(space, preset, w_filter) + self._init_alone(space, preset, w_filters) else: raise NotImplementedError @@ -221,11 +228,11 @@ def _compress(self, space, data, action): datasize = len(data) - with OutBuffer(self.lzs) as out: - with lltype.scoped_alloc(rffi.CCHARP.TO, datasize) as in_buf: - for i in range(datasize): - in_buf[i] = data[i] + with lltype.scoped_alloc(rffi.CCHARP.TO, datasize) as in_buf: + for i in range(datasize): + in_buf[i] = data[i] + with OutBuffer(self.lzs) as out: self.lzs.c_next_in = in_buf rffi.setintfield(self.lzs, 'c_avail_in', datasize) @@ -241,7 +248,7 @@ elif rffi.getintfield(self.lzs, 'c_avail_out') == 0: out.prepare_next_chunk() - return out.make_result_string() + return out.make_result_string() W_LZMACompressor.typedef = TypeDef("LZMACompressor", @@ -252,9 +259,89 @@ class W_LZMADecompressor(W_Root): - pass + def __init__(self, space, format): + self.format = format + self.lock = Lock(space) + self.eof = False + self.lzs = lltype.malloc(lzma_stream.TO, flavor='raw', zero=True) + self.check = LZMA_CHECK_UNKNOWN + self.unused_data = '' + + def __del__(self): + lzma_end(self.lzs) + lltype.free(self.lzs, flavor='raw') + + @staticmethod + @unwrap_spec(format=int, + w_memlimit=WrappedDefault(None), + w_filters=WrappedDefault(None)) + def descr_new(space, w_subtype, format=FORMAT_AUTO, + w_memlimit=None, w_filters=None): + w_self = space.allocate_instance(W_LZMADecompressor, w_subtype) + self = space.interp_w(W_LZMADecompressor, w_self) + W_LZMADecompressor.__init__(self, space, format) + + if space.is_none(w_memlimit): + memlimit = r_ulonglong(LONGLONG_MASK) + else: + memlimit = space.r_ulonglong_w(w_memlimit) + + decoder_flags = LZMA_TELL_ANY_CHECK | LZMA_TELL_NO_CHECK + + if format == FORMAT_AUTO: + lzret = lzma_auto_decoder(self.lzs, memlimit, decoder_flags) + _catch_lzma_error(space, lzret) + else: + raise NotImplementedError + + return w_self + + @unwrap_spec(data='bufferstr') + def decompress_w(self, space, data): + with self.lock: + if self.eof: + raise oefmt(space.w_EOFError, "Already at end of stream") + result = self._decompress(space, data) + return space.wrapbytes(result) + + def _decompress(self, space, data): + datasize = len(data) + + with lltype.scoped_alloc(rffi.CCHARP.TO, datasize) as in_buf: + for i in range(datasize): + in_buf[i] = data[i] + + with OutBuffer(self.lzs) as out: + self.lzs.c_next_in = in_buf + rffi.setintfield(self.lzs, 'c_avail_in', datasize) + + while True: + lzret = lzma_code(self.lzs, LZMA_RUN) + _catch_lzma_error(space, lzret) + if lzret == LZMA_GET_CHECK or lzret == LZMA_NO_CHECK: + self.check = lzma_get_check(self.lzs) + if lzret == LZMA_STREAM_END: + self.eof = True + if rffi.getintfield(self.lzs, 'c_avail_in') > 0: + unused = [self.lzs.c_next_in[i] + for i in range( + rffi.getintfield(self.lzs, + 'c_avail_in'))] + self.unused_data = "".join(unused) + break + if rffi.getintfield(self.lzs, 'c_avail_in') == 0: + break + elif rffi.getintfield(self.lzs, 'c_avail_out') == 0: + out.prepare_next_chunk() + + return out.make_result_string() + W_LZMADecompressor.typedef = TypeDef("LZMADecompressor", + __new__ = interp2app(W_LZMADecompressor.descr_new), + decompress = interp2app(W_LZMADecompressor.decompress_w), + eof = interp_attrproperty("eof", W_LZMADecompressor), + unused_data = interp_attrproperty_bytes("unused_data", W_LZMADecompressor), ) diff --git a/pypy/module/_lzma/test/test_lzma.py b/pypy/module/_lzma/test/test_lzma.py --- a/pypy/module/_lzma/test/test_lzma.py +++ b/pypy/module/_lzma/test/test_lzma.py @@ -13,4 +13,5 @@ b'\xff\xff\xff\x00$\x9b\x8afg\x91' b'(\xcb\xde\xfa\x03\r\x1eQT\xbe' b't\x9e\xdfI]\xff\xf4\x9d\x80\x00') - + decompressed = lzma.decompress(compressed) + assert decompressed == b'Insert Data Here' From noreply at buildbot.pypy.org Tue Jul 29 10:04:57 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 29 Jul 2014 10:04:57 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: clarifications Message-ID: <20140729080457.49BE31C0D31@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5374:4c2c4bc8d9f2 Date: 2014-07-29 10:04 +0200 http://bitbucket.org/pypy/extradoc/changeset/4c2c4bc8d9f2/ Log: clarifications diff --git a/talk/dls2014/paper/paper.tex b/talk/dls2014/paper/paper.tex --- a/talk/dls2014/paper/paper.tex +++ b/talk/dls2014/paper/paper.tex @@ -704,11 +704,20 @@ some young ones that were too big to be allocated in the nursery. \end{description} -Note, since the above configuration is specified at compile time, +Note, since the above configuration is currently specified at compile time, all these areas are at offsets inside the segments known to the compiler. This makes some checks very efficient, e.g.\ checking if an object resides in the nursery only requires comparing its $SO$ -to the static boundaries of the nursery. +to the static boundaries of the nursery. It is possible that we want +some parameters to be configurable at startup or even during the +runtime. In that case we can still use a compile-time specified +maximum so that these checks are still efficient. E.g.\ limiting the +maximum amount of memory available to the application statically to a few +terabytes is fine because it corresponds to virtual memory, +the real physical memory is assigned on-demand by the operating +system. + + \begin{figure*}[t] @@ -1033,6 +1042,11 @@ segments and call the slow path. In \lstinline!allocate_slowpath! they can simply check for this condition and enter a safe point. +Note, this works well for interpreters that really allocate things +all the time. In the presence of an optimising just-in-time compiler, +some loops may indeed not allocate anything and therefore still need +the insertion of a safe-point check. + % For other synchronisation requirements, for example: % \begin{itemize}[noitemsep] % \item waiting for a segment to be released, @@ -1089,7 +1103,8 @@ We performed all benchmarks on a machine with an Intel Core i7-4770 -CPU~@3.40GHz (4 cores, 8 threads). There are 16~GiB of memory +CPU~@3.40GHz (4 cores, 8 threads) with disabled Hyper-Threading and +Turbo Boost for less variation in the results. There are 16~GiB of memory available and we ran them under Ubuntu 14.04 with a Linux 3.13.0 kernel. The STM system was compiled with a number of segments $N=4$ and a maximum amount of memory of 1.5~GiB (both are configurable at From noreply at buildbot.pypy.org Tue Jul 29 11:45:45 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 29 Jul 2014 11:45:45 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: re-add mentioning non-x86; explain better why JIT-STM matters less at the moment; Message-ID: <20140729094545.5EABB1C0F32@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5375:3f06cf9daaab Date: 2014-07-29 11:45 +0200 http://bitbucket.org/pypy/extradoc/changeset/3f06cf9daaab/ Log: re-add mentioning non-x86; explain better why JIT-STM matters less at the moment; make JIT-STM result summary sound less pessimistic diff --git a/talk/dls2014/paper/paper.tex b/talk/dls2014/paper/paper.tex --- a/talk/dls2014/paper/paper.tex +++ b/talk/dls2014/paper/paper.tex @@ -468,6 +468,12 @@ -- we can do it on every object access -- and some compilers support it natively (e.g.\ clang). +On non-x86 architectures, most simple memory accesses could still be +done efficiently if the supported addressing modes allow for the +addition of an offset stored in some register (e.g.\ ARM). For more +complicated accesses (e.g.\ array indexing) or if the CPU does not +support such an addressing mode, one extra addition may be required. + In summary, translating a $\%gs:SO$ to a physical address is a two-step process: First the memory segmentation feature of the CPU constructs a linear address. Then, this LA gets mapped by the MMU to @@ -1100,6 +1106,12 @@ integration is currently still incomplete and not tested much. The JIT-less interpreter provides a much more consistent environment for the STM system, so we remove some unknown variables by disabling it. +Furthermore, we think the interpreter-STM evaluation is more relevant +at this stage as the results can be more directly applied to other +similar interpreters. PyPy's JIT~\cite{cfbolz09}, however, is quite +unique as it is a JIT tracing the interpreter instead of the +interpreted language itself. This demands its own thorough +evaluation, which is out of the scope of this paper. We performed all benchmarks on a machine with an Intel Core i7-4770 @@ -1233,14 +1245,15 @@ two threads despite of this overhead. The achieved speedup comparing STM to the GIL is between $1.14\times$ and $1.94\times$. -Still, STM rarely beats CPython's single-thread performance. However, for +Still, STM rarely beats CPython's \emph{single-thread} performance. However, for programs that need concurrency in CPython and that use threads to achieve this, it also makes sense to look at the overhead induced by the GIL on multiple threads. From this perspective, the STM implementation beats CPython's performance in all but two benchmarks. -Since PyPy comes with a JIT~\cite{cfbolz09} to make its overhead compared to CPython -go away, we will now look at how well STM works together with it. +Since PyPy comes with a JIT~\cite{cfbolz09} to make its overhead +compared to CPython go away, we will now look at how well STM works +together with it. \begin{figure}[h] \centering @@ -1300,12 +1313,16 @@ likelihood of conflicts between them and therefore limits scalability even more than in the no-JIT benchmarks. -Overall PyPy needs the JIT for its performance to be -competitive with CPython's. It would be interesting to see how using -our STM system in CPython would turn out, but an in-depth evaluation is -beyond the scope of this paper. On its own, our system scales well, -so we expect to also see this trend in the presence of a -JIT in the future. +Overall, PyPy without STM is around $2\times$ slower than CPython. +Enabling its JIT allows it to outperform CPython by a huge margin. +We see the same kind of speedup on PyPy with STM when enabling the +JIT. This means that STM does not generally inhibit the JIT from +optimising the programs execution, which is already a very important +result on its own. We also see that for some +benchmarks, STM is already able to give additional speedups +compared to just the JIT-induced acceleration. This looks very +promising and investigating when this is not the case is the next +logical step. \begin{figure}[h] @@ -1430,12 +1447,13 @@ synchronisation in the form of atomic blocks, the average speedup still reaches $2.0\times$. -To obtain an overall performance that is competitive with the -best-performing Python systems (Jython, PyPy), integration of the -STM-based approach with a JIT compiler is necessary. Once this -integration matures, the approach outlined here serves not only as a -simple GIL replacement but also provides a way forward towards -a parallel programming model for Python. +To generally outperform the best-performing Python systems (Jython, +PyPy), integration of the STM-based approach with a JIT compiler is +necessary. Our early results of this integration suggest that there is +no inherent incompatibility between STM and PyPy's JIT. Once the +implementation matures, the approach outlined here serves not only as a +simple GIL replacement but also provides a way forward towards a +parallel programming model for Python. %% \appendix %% \section{Appendix Title} From noreply at buildbot.pypy.org Tue Jul 29 16:17:00 2014 From: noreply at buildbot.pypy.org (waedt) Date: Tue, 29 Jul 2014 16:17:00 +0200 (CEST) Subject: [pypy-commit] pypy utf8-unicode2: Fix array Message-ID: <20140729141700.B3FC51C35CC@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: utf8-unicode2 Changeset: r72606:60fb6d7660b0 Date: 2014-07-19 10:21 -0500 http://bitbucket.org/pypy/pypy/changeset/60fb6d7660b0/ Log: Fix array diff --git a/pypy/interpreter/utf8.py b/pypy/interpreter/utf8.py --- a/pypy/interpreter/utf8.py +++ b/pypy/interpreter/utf8.py @@ -7,15 +7,18 @@ wchar_rint = rffi.r_uint WCHAR_INTP = rffi.UINTP +WCHAR_INT = rffi.UINT if rffi.sizeof(rffi.WCHAR_T) == 2: wchar_rint = rffi.r_ushort WCHAR_INTP = rffi.USHORTP + WCHAR_INT = rffi.USHORT -def utf8chr(value): +def utf8chr(value, allow_large_codepoints=False): # Like unichr, but returns a Utf8Str object + # TODO: Do this without the builder so its faster b = Utf8Builder() - b.append(value) + b.append(value, allow_large_codepoints=allow_large_codepoints) return b.build() def utf8ord_bytes(bytes, start): @@ -550,7 +553,7 @@ @specialize.argtype(1) - def append(self, c): + def append(self, c, allow_large_codepoints=False): if isinstance(c, int) or isinstance(c, r_uint): if c < 0x80: self._builder.append(chr(c)) @@ -563,7 +566,7 @@ self._builder.append(chr(0x80 | (c >> 6 & 0x3F))) self._builder.append(chr(0x80 | (c & 0x3F))) self._is_ascii = False - elif c <= 0x10FFFF: + elif c <= 0x10FFFF or allow_large_codepoints: self._builder.append(chr(0xF0 | (c >> 18))) self._builder.append(chr(0x80 | (c >> 12 & 0x3F))) self._builder.append(chr(0x80 | (c >> 6 & 0x3F))) diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -3,7 +3,7 @@ from rpython.rlib import jit from rpython.rlib.buffer import Buffer from rpython.rlib.objectmodel import keepalive_until_here -from rpython.rlib.rarithmetic import ovfcheck, widen +from rpython.rlib.rarithmetic import ovfcheck, widen, intmask from rpython.rlib.unroll import unrolling_iterable from rpython.rtyper.annlowlevel import llstr from rpython.rtyper.lltypesystem import lltype, rffi @@ -15,6 +15,8 @@ interp2app, interpindirect2app, unwrap_spec) from pypy.interpreter.typedef import ( GetSetProperty, TypeDef, make_weakref_descr) +from pypy.interpreter import utf8 +from pypy.interpreter.utf8 import Utf8Str, utf8ord, utf8chr from pypy.module._file.interp_file import W_File from pypy.objspace.std.floatobject import W_FloatObject @@ -314,7 +316,7 @@ """ if self.typecode == 'u': buf = rffi.cast(UNICODE_ARRAY, self._buffer_as_unsigned()) - return space.wrap(rffi.wcharpsize2unicode(buf, self.len)) + return space.wrap(Utf8Str.from_wcharpsize(buf, self.len)) else: msg = "tounicode() may only be called on type 'u' arrays" raise OperationError(space.w_ValueError, space.wrap(msg)) @@ -570,7 +572,7 @@ types = { 'c': TypeCode(lltype.Char, 'str_w', method=''), - 'u': TypeCode(lltype.UniChar, 'unicode_w', method=''), + 'u': TypeCode(utf8.WCHAR_INT, 'unicode_w', method=''), 'b': TypeCode(rffi.SIGNEDCHAR, 'int_w', True, True), 'B': TypeCode(rffi.UCHAR, 'int_w', True), 'h': TypeCode(rffi.SHORT, 'int_w', True, True), @@ -670,7 +672,10 @@ if len(item) != 1: msg = 'array item must be char' raise OperationError(space.w_TypeError, space.wrap(msg)) - item = item[0] + if mytype.unwrap == 'str_w': + item = item[0] + else: + item = utf8ord(item) return rffi.cast(mytype.itemtype, item) # # "regular" case: it fits in an rpython integer (lltype.Signed) @@ -791,6 +796,9 @@ item = rffi.cast(lltype.Signed, item) elif mytype.typecode == 'f': item = float(item) + elif mytype.typecode == 'u': + # TODO: Does this nned special handling for 16bit whar_t? + item = utf8chr(intmask(item), allow_large_codepoints=True) return space.wrap(item) # interface From noreply at buildbot.pypy.org Tue Jul 29 16:17:02 2014 From: noreply at buildbot.pypy.org (waedt) Date: Tue, 29 Jul 2014 16:17:02 +0200 (CEST) Subject: [pypy-commit] pypy utf8-unicode2: Add support for __eq__, __ne__, __add__ and __mul__ to RPython Message-ID: <20140729141702.0A2551C35CC@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: utf8-unicode2 Changeset: r72607:92302cdd34ec Date: 2014-07-28 13:02 -0500 http://bitbucket.org/pypy/pypy/changeset/92302cdd34ec/ Log: Add support for __eq__, __ne__, __add__ and __mul__ to RPython diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -718,6 +718,19 @@ thistype = pairtype(SomeInstance, SomeInstance) return super(thistype, pair(ins1, ins2)).improve() + def eq((s_obj1, s_obj2)): + if s_obj1.classdef.classdesc.lookup('__eq__'): + return s_obj1._emulate_call("__eq__", s_obj2) + elif s_obj2.classdef.classdesc.lookup('__eq__'): + return s_obj2._emulate_call("__eq__", s_obj1) + return s_Bool + + def ne((s_obj1, s_obj2)): + if s_obj1.classdef.classdesc.lookup('__ne__'): + return s_obj1._emulate_call("__ne__", s_obj2) + elif s_obj2.classdef.classdesc.lookup('__ne__'): + return s_obj2._emulate_call("__ne__", s_obj1) + return s_Bool class __extend__(pairtype(SomeInstance, SomeObject)): def getitem((s_ins, s_idx)): @@ -726,6 +739,33 @@ def setitem((s_ins, s_idx), s_value): return s_ins._emulate_call("__setitem__", s_idx, s_value) + def add((s_ins, s_other)): + return s_ins._emulate_call("__add__", s_other) + + def mul((s_ins, s_other)): + return s_ins._emulate_call("__mul__", s_other) + + def eq((s_ins, s_obj)): + if s_ins.classdef.classdesc.lookup('__eq__'): + return s_ins._emulate_call("__eq__", s_obj) + return super(pairtype(SomeInstance, SomeObject), pair(s_ins, s_obj)).eq() + + def ne((s_ins, s_obj)): + if s_ins.classdef.classdesc.lookup('__ne__'): + return s_ins._emulate_call("__ne__", s_obj) + return super(pairtype(SomeInstance, SomeObject), pair(s_ins, s_obj)).ne() + +class __extend__(pairtype(SomeObject, SomeInstance)): + def eq((s_obj, s_ins)): + if s_ins.classdef.classdesc.lookup('__eq__'): + return s_ins._emulate_call("__eq__", s_obj) + return super(pairtype(SomeObject, SomeInstance), pair(s_obj, s_ins)).eq() + + def ne((s_obj, s_ins)): + if s_ins.classdef.classdesc.lookup('__ne__'): + return s_ins._emulate_call("__ne__", s_obj) + return super(pairtype(SomeObject, SomeInstance), pair(s_obj, s_ins)).ne() + class __extend__(pairtype(SomeIterator, SomeIterator)): diff --git a/rpython/annotator/description.py b/rpython/annotator/description.py --- a/rpython/annotator/description.py +++ b/rpython/annotator/description.py @@ -476,6 +476,17 @@ if self.pyobj not in classdef.FORCE_ATTRIBUTES_INTO_CLASSES: self.all_enforced_attrs = [] # no attribute allowed + if (self.lookup('__eq__') and + not all(b.lookup('__eq__') for b in self.getallbases())): + raise AnnotatorError("A class may only define a __eq__ method if " + "the class at the base of its heirarchy also " + "has a __eq__ method.") + if (self.lookup('__ne__') and + not all(b.lookup('__ne__') for b in self.getallbases())): + raise AnnotatorError("A class may only define a __ne__ method if " + "the class at the base of its heirarchy also " + "has a __ne__ method.") + def add_source_attribute(self, name, value, mixin=False): if isinstance(value, types.FunctionType): # for debugging diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -2780,6 +2780,42 @@ s = a.build_types(f, []) assert s.knowntype == int + def test__eq__in_sub_class(self): + class Base(object): + pass + class A(Base): + def __eq__(self, other): + return True + + def f(a): + if a: + o = Base() + else: + o = A() + + return o == Base() + + a = self.RPythonAnnotator() + py.test.raises(annmodel.AnnotatorError, a.build_types, f, [int]) + + def test__ne__in_sub_class(self): + class Base(object): + pass + class A(Base): + def __ne__(self, other): + return True + + def f(a): + if a: + o = Base() + else: + o = A() + + return o != Base() + + a = self.RPythonAnnotator() + py.test.raises(annmodel.AnnotatorError, a.build_types, f, [int]) + def test_chr_out_of_bounds(self): def g(n, max): if n < max: diff --git a/rpython/rtyper/lltypesystem/rclass.py b/rpython/rtyper/lltypesystem/rclass.py --- a/rpython/rtyper/lltypesystem/rclass.py +++ b/rpython/rtyper/lltypesystem/rclass.py @@ -657,10 +657,8 @@ r_ins = getinstancerepr(r_ins1.rtyper, basedef, r_ins1.gcflavor) return pairtype(Repr, Repr).rtype_is_(pair(r_ins, r_ins), hop) - rtype_eq = rtype_is_ - - def rtype_ne(rpair, hop): - v = rpair.rtype_eq(hop) + def _rtype_ne(rpair, hop): + v = rpair.rtype_is_(hop) return hop.genop("bool_not", [v], resulttype=Bool) # ____________________________________________________________ diff --git a/rpython/rtyper/rclass.py b/rpython/rtyper/rclass.py --- a/rpython/rtyper/rclass.py +++ b/rpython/rtyper/rclass.py @@ -7,7 +7,7 @@ from rpython.rtyper.lltypesystem.lltype import Void from rpython.rtyper.rmodel import Repr, getgcflavor, inputconst from rpython.rlib.objectmodel import UnboxedValue -from rpython.tool.pairtype import pairtype +from rpython.tool.pairtype import pair, pairtype class FieldListAccessor(object): @@ -471,14 +471,77 @@ break +def create_forwarding_func(name): + def f((r_ins, r_obj), hop): + return r_ins._emulate_call(hop, name) + return f + class __extend__(pairtype(AbstractInstanceRepr, Repr)): - def rtype_getitem((r_ins, r_obj), hop): - return r_ins._emulate_call(hop, "__getitem__") + rtype_getitem = create_forwarding_func('__getitem__') + rtype_setitem = create_forwarding_func('__setitem__') + rtype_add = create_forwarding_func('__add__') + rtype_mul = create_forwarding_func('__mul__') - def rtype_setitem((r_ins, r_obj), hop): - return r_ins._emulate_call(hop, "__setitem__") + rtype_inplace_add = rtype_add + rtype_inplace_mul = rtype_mul + def rtype_eq((r_ins, r_other), hop): + if r_ins.classdef.classdesc.lookup('__eq__'): + return r_ins._emulate_call(hop, '__eq__') + return super(pairtype(AbstractInstanceRepr, Repr), + pair(r_ins, r_other)).rtype_eq(hop) + def rtype_ne((r_ins, r_other), hop): + if r_ins.classdef.classdesc.lookup('__ne__'): + return r_ins._emulate_call(hop, '__ne__') + return super(pairtype(AbstractInstanceRepr, Repr), + pair(r_ins, r_other)).rtype_ne(hop) + +class __extend__(pairtype(AbstractInstanceRepr, AbstractInstanceRepr)): + def rtype_eq((r_ins, r_other), hop): + if r_ins.classdef.classdesc.lookup('__eq__'): + return r_ins._emulate_call(hop, '__eq__') + elif r_other.classdef.classdesc.lookup('__eq__'): + # Reverse the order of the arguments before the call to __eq__ + hop2 = hop.copy() + hop2.args_r = hop.args_r[::-1] + hop2.args_s = hop.args_s[::-1] + hop2.args_v = hop.args_v[::-1] + return r_other._emulate_call(hop2, '__eq__') + return pair(r_ins, r_other).rtype_is_(hop) + + def rtype_ne((r_ins, r_other), hop): + if r_ins.classdef.classdesc.lookup('__ne__'): + return r_ins._emulate_call(hop, '__ne__') + elif r_other.classdef.classdesc.lookup('__ne__'): + # Reverse the order of the arguments before the call to __ne__ + hop2 = hop.copy() + hop2.args_r = hop.args_r[::-1] + hop2.args_s = hop.args_s[::-1] + hop2.args_v = hop.args_v[::-1] + return r_other._emulate_call(hop2, '__ne__') + return pair(r_ins, r_other)._rtype_ne(hop) + +class __extend__(pairtype(Repr, AbstractInstanceRepr)): + def rtype_eq((r_other, r_ins), hop): + if r_ins.classdef.classdesc.lookup('__eq__'): + hop2 = hop.copy() + hop2.args_r = hop.args_r[::-1] + hop2.args_s = hop.args_s[::-1] + hop2.args_v = hop.args_v[::-1] + return r_ins._emulate_call(hop2, '__eq__') + return super(pairtype(Repr, AbstractInstanceRepr), + pair(r_other, r_ins)).rtype_eq(hop) + + def rtype_ne((r_other, r_ins), hop): + if r_ins.classdef.classdesc.lookup('__ne__'): + hop2 = hop.copy() + hop2.args_r = hop.args_r[::-1] + hop2.args_s = hop.args_s[::-1] + hop2.args_v = hop.args_v[::-1] + return r_ins._emulate_call(hop2, '__ne__') + return super(pairtype(Repr, AbstractInstanceRepr), + pair(r_other, r_ins)).rtype_ne(hop) # ____________________________________________________________ diff --git a/rpython/rtyper/test/test_rclass.py b/rpython/rtyper/test/test_rclass.py --- a/rpython/rtyper/test/test_rclass.py +++ b/rpython/rtyper/test/test_rclass.py @@ -1271,3 +1271,87 @@ return cls[k](a, b).b assert self.interpret(f, [1, 4, 7]) == 7 + + def test_overriding_eq(self): + class Base(object): + def __eq__(self, other): + return self is other + class A(Base): + def __eq__(self, other): + return True + + def f(a): + if a: + o = Base() + else: + o = A() + + return o == Base() + + assert self.interpret(f, [0]) == f(0) + assert self.interpret(f, [1]) == f(1) + + def test_eq_reversed(self): + class A(object): + def __eq__(self, other): + return not bool(other) + + def f(a): + return (a == A()) == (A() == a) + assert self.interpret(f, [0]) == f(0) + assert self.interpret(f, [1]) == f(1) + + def test_eq_without_ne(self): + class A(object): + def __eq__(self, other): + return False + + def f(): + a = A() + return a != A() + + assert self.interpret(f, []) == f() + + def test_overriding_ne(self): + class Base(object): + def __ne__(self, other): + return self is other + class A(Base): + def __ne__(self, other): + return True + + def f(a): + if a: + o = Base() + else: + o = A() + + return o != Base() + + assert self.interpret(f, [0]) == f(0) + assert self.interpret(f, [1]) == f(1) + + def test_ne_reversed(self): + class A(object): + def __ne__(self, other): + return not bool(other) + + def f(a): + return (a != A()) == (A() != a) + assert self.interpret(f, [0]) == f(0) + assert self.interpret(f, [1]) == f(1) + + def test_arithmetic_ops(self): + class A(object): + def __add__(self, other): + return other + other + + def __mul__(self, other): + return other * other + + def f(a): + o = A() + return (o + a) + (o * a) + + for i in range(10): + assert self.interpret(f, [i]) == f(i) From noreply at buildbot.pypy.org Tue Jul 29 16:17:03 2014 From: noreply at buildbot.pypy.org (waedt) Date: Tue, 29 Jul 2014 16:17:03 +0200 (CEST) Subject: [pypy-commit] pypy utf8-unicode2: Fix most remaining module failures and some translation failures Message-ID: <20140729141703.688B81C35CC@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: utf8-unicode2 Changeset: r72608:d4419a342b68 Date: 2014-07-29 09:16 -0500 http://bitbucket.org/pypy/pypy/changeset/d4419a342b68/ Log: Fix most remaining module failures and some translation failures diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -10,6 +10,7 @@ from rpython.rlib.rarithmetic import r_uint, SHRT_MIN, SHRT_MAX, \ INT_MIN, INT_MAX, UINT_MAX +from pypy.interpreter.utf8 import Utf8Str from pypy.interpreter.executioncontext import (ExecutionContext, ActionFlag, UserDelAction) from pypy.interpreter.error import OperationError, new_exception_class, oefmt @@ -1545,7 +1546,10 @@ return self.str_w(w_obj) def unicode_w(self, w_obj): - return w_obj.unicode_w(self) + #return w_obj.unicode_w(self) + res = w_obj.unicode_w(self) + assert isinstance(res, Utf8Str) + return res def unicode0_w(self, w_obj): "Like unicode_w, but rejects strings with NUL bytes." diff --git a/pypy/interpreter/test/test_utf8.py b/pypy/interpreter/test/test_utf8.py --- a/pypy/interpreter/test/test_utf8.py +++ b/pypy/interpreter/test/test_utf8.py @@ -5,6 +5,7 @@ from pypy.interpreter.utf8 import ( Utf8Str, Utf8Builder, utf8chr, utf8ord) from rpython.rtyper.lltypesystem import rffi +from rpython.rtyper.test.test_llinterp import interpret def build_utf8str(): builder = Utf8Builder() @@ -241,3 +242,25 @@ assert s == u[:4] rffi.free_wcharp(wcharp) + +def test_translate_utf8(): + def f(): + s = build_utf8str() + + s *= 10 + s += Utf8Str('one') + return len(s) + assert interpret(f, []) == f() + + def f(): + one = Utf8Str("one") + two = Utf8Str("one") + + return int(one == two) + int(not (one != two)) + assert interpret(f, []) == f() + + def f(): + one = Utf8Str("one") + + return one == None + assert interpret(f, []) == f() diff --git a/pypy/interpreter/utf8.py b/pypy/interpreter/utf8.py --- a/pypy/interpreter/utf8.py +++ b/pypy/interpreter/utf8.py @@ -1,10 +1,11 @@ from rpython.rlib.rstring import StringBuilder -from rpython.rlib.objectmodel import specialize +from rpython.rlib.objectmodel import we_are_translated, specialize from rpython.rlib.runicode import utf8_code_length from rpython.rlib.unicodedata import unicodedb_5_2_0 as unicodedb -from rpython.rlib.rarithmetic import r_uint, intmask +from rpython.rlib.rarithmetic import r_uint, intmask, base_int from rpython.rtyper.lltypesystem import rffi, lltype + wchar_rint = rffi.r_uint WCHAR_INTP = rffi.UINTP WCHAR_INT = rffi.UINT @@ -14,11 +15,11 @@ WCHAR_INT = rffi.USHORT -def utf8chr(value, allow_large_codepoints=False): +def utf8chr(value): # Like unichr, but returns a Utf8Str object # TODO: Do this without the builder so its faster b = Utf8Builder() - b.append(value, allow_large_codepoints=allow_large_codepoints) + b.append(value) return b.build() def utf8ord_bytes(bytes, start): @@ -160,22 +161,26 @@ return hash(self.bytes) def __eq__(self, other): - """NOT_RPYTHON""" if isinstance(other, Utf8Str): return self.bytes == other.bytes + if other is None: + return False if isinstance(other, unicode): + assert not we_are_translated() return unicode(self.bytes, 'utf8') == other - return False + raise ValueError() def __ne__(self, other): - """NOT_RPYTHON""" if isinstance(other, Utf8Str): return self.bytes != other.bytes + if other is None: + return True if isinstance(other, unicode): + assert not we_are_translated() return unicode(self.bytes, 'utf8') != other - return True + raise ValueError() def __lt__(self, other): return self.bytes < other.bytes @@ -194,7 +199,7 @@ if isinstance(other, Utf8Str): return other.bytes in self.bytes if isinstance(other, unicode): - # TODO: Assert fail if translated + assert not we_are_translated() return other in unicode(self.bytes, 'utf8') if isinstance(other, str): return other in self.bytes @@ -247,6 +252,7 @@ else: end = self.index_of_char(end) + assert start >= 0 return start, end @specialize.argtype(2, 3) @@ -257,10 +263,12 @@ if isinstance(other, Utf8Str): pos = self.bytes.find(other.bytes, start, end) - elif isinstance(other, unicode): - pos = unicode(self.bytes, 'utf8').find(other, start, end) elif isinstance(other, str): pos = self.bytes.find(other, start, end) + else: + assert isinstance(other, unicode) + assert not we_are_translated() + pos = unicode(self.bytes, 'utf8').find(other, start, end) if pos == -1: return -1 @@ -469,7 +477,7 @@ builder = Utf8Builder() i = 0; while True: - c = int(array[i]) + c = intmask(array[i]) if c == 0: break @@ -504,7 +512,7 @@ if rffi.sizeof(rffi.WCHAR_T) == 2: if i != size - 1 and 0xD800 <= c <= 0xDBFF: i += 1 - c2 = int(array[i]) + c2 = intmask(array[i]) if c2 == 0: builder.append(c) break @@ -530,7 +538,7 @@ if rffi.sizeof(rffi.WCHAR_T) == 2: if i != size - 1 and 0xD800 <= c <= 0xDBFF: i += 1 - c2 = int(array[i]) + c2 = intmask(array[i]) if not (0xDC00 <= c2 <= 0xDFFF): builder.append(c) c = c2 @@ -553,8 +561,14 @@ @specialize.argtype(1) - def append(self, c, allow_large_codepoints=False): - if isinstance(c, int) or isinstance(c, r_uint): + def append(self, c): + if isinstance(c, Utf8Str): + self._builder.append(c.bytes) + if not c._is_ascii: + self._is_ascii = False + elif isinstance(c, int) or isinstance(c, r_uint): + if isinstance(c, base_int): + c = intmask(c) if c < 0x80: self._builder.append(chr(c)) elif c < 0x800: @@ -566,7 +580,7 @@ self._builder.append(chr(0x80 | (c >> 6 & 0x3F))) self._builder.append(chr(0x80 | (c & 0x3F))) self._is_ascii = False - elif c <= 0x10FFFF or allow_large_codepoints: + elif c <= 0x10FFFF: self._builder.append(chr(0xF0 | (c >> 18))) self._builder.append(chr(0x80 | (c >> 12 & 0x3F))) self._builder.append(chr(0x80 | (c >> 6 & 0x3F))) @@ -574,10 +588,6 @@ self._is_ascii = False else: raise ValueError("Invalid unicode codepoint > 0x10FFFF.") - elif isinstance(c, Utf8Str): - self._builder.append(c.bytes) - if not c._is_ascii: - self._is_ascii = False else: # TODO: Remove this check? if len(c) == 1: @@ -769,3 +779,4 @@ del character_calc_value del ForwardIterBase del ReverseIterBase + diff --git a/pypy/module/_cffi_backend/ctypeprim.py b/pypy/module/_cffi_backend/ctypeprim.py --- a/pypy/module/_cffi_backend/ctypeprim.py +++ b/pypy/module/_cffi_backend/ctypeprim.py @@ -143,20 +143,20 @@ keepalive_until_here(cdataobj) return w_res - def _convert_to_unichar(self, w_ob): + def _convert_to_uni_codepoint(self, w_ob): space = self.space if space.isinstance_w(w_ob, space.w_unicode): s = space.unicode_w(w_ob) if len(s) == 1: - return s[0] + return utf8ord(s, 0) if (isinstance(w_ob, cdataobj.W_CData) and isinstance(w_ob.ctype, W_CTypePrimitiveUniChar)): - return rffi.cast(rffi.CWCHARP, w_ob._cdata)[0] + return rffi.cast(utf8.WCHAR_INTP, w_ob._cdata)[0] raise self._convert_error("unicode string of length 1", w_ob) def convert_from_object(self, cdata, w_ob): - value = self._convert_to_unichar(w_ob) - rffi.cast(utf8.WCHAR_INTP, cdata)[0] = utf8.wchar_rint(utf8ord(value)) + value = self._convert_to_uni_codepoint(w_ob) + rffi.cast(utf8.WCHAR_INTP, cdata)[0] = utf8.wchar_rint(value) class W_CTypePrimitiveSigned(W_CTypePrimitive): diff --git a/pypy/module/_io/interp_textio.py b/pypy/module/_io/interp_textio.py --- a/pypy/module/_io/interp_textio.py +++ b/pypy/module/_io/interp_textio.py @@ -383,7 +383,7 @@ self.readtranslate = newline is None self.readnl = newline - self.writetranslate = (newline != Utf8Str('')) + self.writetranslate = (newline is None or len(newline) == 0) if not self.readuniversal: self.writenl = self.readnl if self.writenl == Utf8Str('\n'): diff --git a/pypy/module/_rawffi/array.py b/pypy/module/_rawffi/array.py --- a/pypy/module/_rawffi/array.py +++ b/pypy/module/_rawffi/array.py @@ -5,6 +5,7 @@ from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty +from pypy.interpreter.utf8 import utf8ord from rpython.rtyper.lltypesystem import lltype, rffi from pypy.interpreter.error import OperationError from pypy.module._rawffi.interp_rawffi import segfault_exception diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -274,7 +274,7 @@ return ptr_val else: if T is rffi.CWCHARP: - return utf8chr(int(rffi.cast(WCHAR_INTP, ptr)[ofs])) + return utf8chr(intmask(rffi.cast(WCHAR_INTP, ptr)[ofs])) return rffi.cast(T, ptr)[ofs] read_ptr._annspecialcase_ = 'specialize:arg(2)' @@ -415,6 +415,7 @@ "Expected unicode string of length one as wide character")) val = utf8ord(s) + #val = 0 if rffi.sizeof(rffi.WCHAR_T) == 2 and val > 0xFFFF: # Utf-16 must be used on systems with a 2 byte wchar_t to # encode codepoints > 0xFFFF diff --git a/pypy/module/_sre/interp_sre.py b/pypy/module/_sre/interp_sre.py --- a/pypy/module/_sre/interp_sre.py +++ b/pypy/module/_sre/interp_sre.py @@ -5,7 +5,7 @@ from pypy.interpreter.typedef import make_weakref_descr from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.interpreter.error import OperationError -from pypy.interpreter.utf8 import utf8ord +from pypy.interpreter.utf8 import Utf8Str, utf8ord from rpython.rlib.rarithmetic import intmask from rpython.rlib import jit @@ -286,7 +286,7 @@ space.w_None)) if space.isinstance_w(w_string, space.w_unicode): - w_emptystr = space.wrap(u'') + w_emptystr = space.wrap(Utf8Str('')) else: w_emptystr = space.wrap('') w_item = space.call_method(w_emptystr, 'join', diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -797,8 +797,13 @@ elif mytype.typecode == 'f': item = float(item) elif mytype.typecode == 'u': - # TODO: Does this nned special handling for 16bit whar_t? - item = utf8chr(intmask(item), allow_large_codepoints=True) + # TODO: Does this need special handling for 16bit whar_t? + try: + item = utf8chr(intmask(item)) + except ValueError: + raise oefmt(space.w_ValueError, + 'character U+%s is not in range[U+0000; ' + 'U+10ffff]', hex(intmask(item))) return space.wrap(item) # interface @@ -998,9 +1003,9 @@ start = 0 # if oldlen == 1: - if mytype.unwrap == 'str_w' or mytype.unwrap == 'unicode_w': + if mytype.unwrap == 'str_w': zero = not ord(self.buffer[0]) - elif mytype.unwrap == 'int_w' or mytype.unwrap == 'bigint_w': + elif mytype.unwrap in ('int_w', 'bigint_w', 'unicode_w'): zero = not widen(self.buffer[0]) #elif mytype.unwrap == 'float_w': # value = ...float(self.buffer[0]) xxx handle the case of -0.0 diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -834,12 +834,6 @@ assert repr(mya('i', [1, 2, 3])) == "array('i', [1, 2, 3])" assert repr(mya('i', (1, 2, 3))) == "array('i', [1, 2, 3])" - def test_unicode_outofrange(self): - a = self.array('u', unicode(r'\x01\u263a\x00\ufeff', 'unicode-escape')) - b = self.array('u', unicode(r'\x01\u263a\x00\ufeff', 'unicode-escape')) - b.byteswap() - assert a != b - def test_weakref(self): import weakref a = self.array('c', 'Hi!') @@ -1032,6 +1026,11 @@ def test_fresh_array_buffer_str(self): assert str(buffer(self.array('i'))) == '' + def test_unicode_outofrange(self): + b = self.array('u', unicode(r'\x01\u263a\x00\ufeff', 'unicode-escape')) + b.byteswap() + raises(ValueError, "b[0]") + class AppTestArrayBuiltinShortcut(AppTestArray): spaceconfig = AppTestArray.spaceconfig.copy() diff --git a/pypy/module/struct/formatiterator.py b/pypy/module/struct/formatiterator.py --- a/pypy/module/struct/formatiterator.py +++ b/pypy/module/struct/formatiterator.py @@ -3,9 +3,60 @@ from rpython.rlib.rstring import StringBuilder from rpython.rlib.rstruct.error import StructError from rpython.rlib.rstruct.formatiterator import FormatIterator +from rpython.rlib.rstruct.standardfmttable import standard_fmttable +from rpython.rlib.unroll import unrolling_iterable +from rpython.rtyper.lltypesystem import rffi -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.utf8 import utf8ord, utf8chr +wchar_len = rffi.sizeof(rffi.WCHAR_T) + +unroll_pack_unichar_iter = unrolling_iterable(range(wchar_len-1, -1, -1)) +def pack_unichar(fmtiter): + value = utf8ord(fmtiter.accept_unicode_arg()) + + # TODO: What do I do on a system with sizeof(wchar_t) == 2? I can't + # split it reasonably? + #if not min <= value <= max: + # raise StructError(errormsg) + + if fmtiter.bigendian: + for i in unroll_pack_unichar_iter: + x = (value >> (8*i)) & 0xff + fmtiter.result.append(chr(x)) + else: + for i in unroll_pack_unichar_iter: + fmtiter.result.append(chr(value & 0xff)) + value >>= 8 + +unroll_upack_unichar_iter = unrolling_iterable(range(wchar_len)) +def unpack_unichar(fmtiter): + #intvalue = inttype(0) + intvalue = 0 + s = fmtiter.read(wchar_len) + idx = 0 + if fmtiter.bigendian: + for i in unroll_upack_unichar_iter: + x = ord(s[idx]) + intvalue <<= 8 + #intvalue |= inttype(x) + intvalue |= x + idx += 1 + else: + for i in unroll_upack_unichar_iter: + x = ord(s[idx]) + #intvalue |= inttype(x) << (8*i) + intvalue |= x << (8*i) + idx += 1 + + try: + value = utf8chr(intvalue) + except ValueError: + raise oefmt(fmtiter.space.w_ValueError, + 'character U+%s is not in range[U+0000; ' + 'U+10ffff]', hex(intvalue)) + fmtiter.appendobj(value) class PackFormatIterator(FormatIterator): def __init__(self, space, args_w, size): @@ -20,11 +71,15 @@ @jit.unroll_safe @specialize.arg(1) def operate(self, fmtdesc, repetitions): + pack = fmtdesc.pack + if fmtdesc.fmtchar == 'u': + pack = pack_unichar + if fmtdesc.needcount: - fmtdesc.pack(self, repetitions) + pack(self, repetitions) else: for i in range(repetitions): - fmtdesc.pack(self) + pack(self) _operate_is_specialized_ = True @jit.unroll_safe @@ -115,11 +170,15 @@ @jit.unroll_safe @specialize.arg(1) def operate(self, fmtdesc, repetitions): + unpack = fmtdesc.unpack + if fmtdesc.fmtchar == 'u': + unpack = unpack_unichar + if fmtdesc.needcount: - fmtdesc.unpack(self, repetitions) + unpack(self, repetitions) else: for i in range(repetitions): - fmtdesc.unpack(self) + unpack(self) _operate_is_specialized_ = True def align(self, mask): diff --git a/pypy/module/struct/test/test_struct.py b/pypy/module/struct/test/test_struct.py --- a/pypy/module/struct/test/test_struct.py +++ b/pypy/module/struct/test/test_struct.py @@ -412,6 +412,9 @@ assert s.unpack(s.pack(42)) == (42,) assert s.unpack_from(memoryview(s.pack(42))) == (42,) + def test_unicode_outofrange(self): + raises(ValueError, "self.struct.unpack('u', '0000')") + class AppTestStructBuffer(object): spaceconfig = dict(usemodules=['struct', '__pypy__']) From noreply at buildbot.pypy.org Tue Jul 29 16:31:45 2014 From: noreply at buildbot.pypy.org (waedt) Date: Tue, 29 Jul 2014 16:31:45 +0200 (CEST) Subject: [pypy-commit] pypy rpython-__eq__: Add support for __eq__, __ne__, __add__ and __mul__ to RPython Message-ID: <20140729143145.C2AA01C35CC@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: rpython-__eq__ Changeset: r72609:a6f86478a53a Date: 2014-07-28 13:02 -0500 http://bitbucket.org/pypy/pypy/changeset/a6f86478a53a/ Log: Add support for __eq__, __ne__, __add__ and __mul__ to RPython diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -718,6 +718,19 @@ thistype = pairtype(SomeInstance, SomeInstance) return super(thistype, pair(ins1, ins2)).improve() + def eq((s_obj1, s_obj2)): + if s_obj1.classdef.classdesc.lookup('__eq__'): + return s_obj1._emulate_call("__eq__", s_obj2) + elif s_obj2.classdef.classdesc.lookup('__eq__'): + return s_obj2._emulate_call("__eq__", s_obj1) + return s_Bool + + def ne((s_obj1, s_obj2)): + if s_obj1.classdef.classdesc.lookup('__ne__'): + return s_obj1._emulate_call("__ne__", s_obj2) + elif s_obj2.classdef.classdesc.lookup('__ne__'): + return s_obj2._emulate_call("__ne__", s_obj1) + return s_Bool class __extend__(pairtype(SomeInstance, SomeObject)): def getitem((s_ins, s_idx)): @@ -726,6 +739,33 @@ def setitem((s_ins, s_idx), s_value): return s_ins._emulate_call("__setitem__", s_idx, s_value) + def add((s_ins, s_other)): + return s_ins._emulate_call("__add__", s_other) + + def mul((s_ins, s_other)): + return s_ins._emulate_call("__mul__", s_other) + + def eq((s_ins, s_obj)): + if s_ins.classdef.classdesc.lookup('__eq__'): + return s_ins._emulate_call("__eq__", s_obj) + return super(pairtype(SomeInstance, SomeObject), pair(s_ins, s_obj)).eq() + + def ne((s_ins, s_obj)): + if s_ins.classdef.classdesc.lookup('__ne__'): + return s_ins._emulate_call("__ne__", s_obj) + return super(pairtype(SomeInstance, SomeObject), pair(s_ins, s_obj)).ne() + +class __extend__(pairtype(SomeObject, SomeInstance)): + def eq((s_obj, s_ins)): + if s_ins.classdef.classdesc.lookup('__eq__'): + return s_ins._emulate_call("__eq__", s_obj) + return super(pairtype(SomeObject, SomeInstance), pair(s_obj, s_ins)).eq() + + def ne((s_obj, s_ins)): + if s_ins.classdef.classdesc.lookup('__ne__'): + return s_ins._emulate_call("__ne__", s_obj) + return super(pairtype(SomeObject, SomeInstance), pair(s_obj, s_ins)).ne() + class __extend__(pairtype(SomeIterator, SomeIterator)): diff --git a/rpython/annotator/description.py b/rpython/annotator/description.py --- a/rpython/annotator/description.py +++ b/rpython/annotator/description.py @@ -476,6 +476,17 @@ if self.pyobj not in classdef.FORCE_ATTRIBUTES_INTO_CLASSES: self.all_enforced_attrs = [] # no attribute allowed + if (self.lookup('__eq__') and + not all(b.lookup('__eq__') for b in self.getallbases())): + raise AnnotatorError("A class may only define a __eq__ method if " + "the class at the base of its heirarchy also " + "has a __eq__ method.") + if (self.lookup('__ne__') and + not all(b.lookup('__ne__') for b in self.getallbases())): + raise AnnotatorError("A class may only define a __ne__ method if " + "the class at the base of its heirarchy also " + "has a __ne__ method.") + def add_source_attribute(self, name, value, mixin=False): if isinstance(value, types.FunctionType): # for debugging diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -2780,6 +2780,42 @@ s = a.build_types(f, []) assert s.knowntype == int + def test__eq__in_sub_class(self): + class Base(object): + pass + class A(Base): + def __eq__(self, other): + return True + + def f(a): + if a: + o = Base() + else: + o = A() + + return o == Base() + + a = self.RPythonAnnotator() + py.test.raises(annmodel.AnnotatorError, a.build_types, f, [int]) + + def test__ne__in_sub_class(self): + class Base(object): + pass + class A(Base): + def __ne__(self, other): + return True + + def f(a): + if a: + o = Base() + else: + o = A() + + return o != Base() + + a = self.RPythonAnnotator() + py.test.raises(annmodel.AnnotatorError, a.build_types, f, [int]) + def test_chr_out_of_bounds(self): def g(n, max): if n < max: diff --git a/rpython/rtyper/lltypesystem/rclass.py b/rpython/rtyper/lltypesystem/rclass.py --- a/rpython/rtyper/lltypesystem/rclass.py +++ b/rpython/rtyper/lltypesystem/rclass.py @@ -647,10 +647,8 @@ r_ins = getinstancerepr(r_ins1.rtyper, basedef, r_ins1.gcflavor) return pairtype(Repr, Repr).rtype_is_(pair(r_ins, r_ins), hop) - rtype_eq = rtype_is_ - - def rtype_ne(rpair, hop): - v = rpair.rtype_eq(hop) + def _rtype_ne(rpair, hop): + v = rpair.rtype_is_(hop) return hop.genop("bool_not", [v], resulttype=Bool) # ____________________________________________________________ diff --git a/rpython/rtyper/rclass.py b/rpython/rtyper/rclass.py --- a/rpython/rtyper/rclass.py +++ b/rpython/rtyper/rclass.py @@ -7,7 +7,7 @@ from rpython.rtyper.lltypesystem.lltype import Void from rpython.rtyper.rmodel import Repr, getgcflavor, inputconst from rpython.rlib.objectmodel import UnboxedValue -from rpython.tool.pairtype import pairtype +from rpython.tool.pairtype import pair, pairtype class FieldListAccessor(object): @@ -471,14 +471,77 @@ break +def create_forwarding_func(name): + def f((r_ins, r_obj), hop): + return r_ins._emulate_call(hop, name) + return f + class __extend__(pairtype(AbstractInstanceRepr, Repr)): - def rtype_getitem((r_ins, r_obj), hop): - return r_ins._emulate_call(hop, "__getitem__") + rtype_getitem = create_forwarding_func('__getitem__') + rtype_setitem = create_forwarding_func('__setitem__') + rtype_add = create_forwarding_func('__add__') + rtype_mul = create_forwarding_func('__mul__') - def rtype_setitem((r_ins, r_obj), hop): - return r_ins._emulate_call(hop, "__setitem__") + rtype_inplace_add = rtype_add + rtype_inplace_mul = rtype_mul + def rtype_eq((r_ins, r_other), hop): + if r_ins.classdef.classdesc.lookup('__eq__'): + return r_ins._emulate_call(hop, '__eq__') + return super(pairtype(AbstractInstanceRepr, Repr), + pair(r_ins, r_other)).rtype_eq(hop) + def rtype_ne((r_ins, r_other), hop): + if r_ins.classdef.classdesc.lookup('__ne__'): + return r_ins._emulate_call(hop, '__ne__') + return super(pairtype(AbstractInstanceRepr, Repr), + pair(r_ins, r_other)).rtype_ne(hop) + +class __extend__(pairtype(AbstractInstanceRepr, AbstractInstanceRepr)): + def rtype_eq((r_ins, r_other), hop): + if r_ins.classdef.classdesc.lookup('__eq__'): + return r_ins._emulate_call(hop, '__eq__') + elif r_other.classdef.classdesc.lookup('__eq__'): + # Reverse the order of the arguments before the call to __eq__ + hop2 = hop.copy() + hop2.args_r = hop.args_r[::-1] + hop2.args_s = hop.args_s[::-1] + hop2.args_v = hop.args_v[::-1] + return r_other._emulate_call(hop2, '__eq__') + return pair(r_ins, r_other).rtype_is_(hop) + + def rtype_ne((r_ins, r_other), hop): + if r_ins.classdef.classdesc.lookup('__ne__'): + return r_ins._emulate_call(hop, '__ne__') + elif r_other.classdef.classdesc.lookup('__ne__'): + # Reverse the order of the arguments before the call to __ne__ + hop2 = hop.copy() + hop2.args_r = hop.args_r[::-1] + hop2.args_s = hop.args_s[::-1] + hop2.args_v = hop.args_v[::-1] + return r_other._emulate_call(hop2, '__ne__') + return pair(r_ins, r_other)._rtype_ne(hop) + +class __extend__(pairtype(Repr, AbstractInstanceRepr)): + def rtype_eq((r_other, r_ins), hop): + if r_ins.classdef.classdesc.lookup('__eq__'): + hop2 = hop.copy() + hop2.args_r = hop.args_r[::-1] + hop2.args_s = hop.args_s[::-1] + hop2.args_v = hop.args_v[::-1] + return r_ins._emulate_call(hop2, '__eq__') + return super(pairtype(Repr, AbstractInstanceRepr), + pair(r_other, r_ins)).rtype_eq(hop) + + def rtype_ne((r_other, r_ins), hop): + if r_ins.classdef.classdesc.lookup('__ne__'): + hop2 = hop.copy() + hop2.args_r = hop.args_r[::-1] + hop2.args_s = hop.args_s[::-1] + hop2.args_v = hop.args_v[::-1] + return r_ins._emulate_call(hop2, '__ne__') + return super(pairtype(Repr, AbstractInstanceRepr), + pair(r_other, r_ins)).rtype_ne(hop) # ____________________________________________________________ diff --git a/rpython/rtyper/test/test_rclass.py b/rpython/rtyper/test/test_rclass.py --- a/rpython/rtyper/test/test_rclass.py +++ b/rpython/rtyper/test/test_rclass.py @@ -1290,3 +1290,87 @@ return cls[k](a, b).b assert self.interpret(f, [1, 4, 7]) == 7 + + def test_overriding_eq(self): + class Base(object): + def __eq__(self, other): + return self is other + class A(Base): + def __eq__(self, other): + return True + + def f(a): + if a: + o = Base() + else: + o = A() + + return o == Base() + + assert self.interpret(f, [0]) == f(0) + assert self.interpret(f, [1]) == f(1) + + def test_eq_reversed(self): + class A(object): + def __eq__(self, other): + return not bool(other) + + def f(a): + return (a == A()) == (A() == a) + assert self.interpret(f, [0]) == f(0) + assert self.interpret(f, [1]) == f(1) + + def test_eq_without_ne(self): + class A(object): + def __eq__(self, other): + return False + + def f(): + a = A() + return a != A() + + assert self.interpret(f, []) == f() + + def test_overriding_ne(self): + class Base(object): + def __ne__(self, other): + return self is other + class A(Base): + def __ne__(self, other): + return True + + def f(a): + if a: + o = Base() + else: + o = A() + + return o != Base() + + assert self.interpret(f, [0]) == f(0) + assert self.interpret(f, [1]) == f(1) + + def test_ne_reversed(self): + class A(object): + def __ne__(self, other): + return not bool(other) + + def f(a): + return (a != A()) == (A() != a) + assert self.interpret(f, [0]) == f(0) + assert self.interpret(f, [1]) == f(1) + + def test_arithmetic_ops(self): + class A(object): + def __add__(self, other): + return other + other + + def __mul__(self, other): + return other * other + + def f(a): + o = A() + return (o + a) + (o * a) + + for i in range(10): + assert self.interpret(f, [i]) == f(i) From noreply at buildbot.pypy.org Tue Jul 29 19:32:45 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Tue, 29 Jul 2014 19:32:45 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Implement faulthandler.enable() and disable() Message-ID: <20140729173245.A2E931D2A5A@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r72610:9e797b4a53ee Date: 2014-07-29 17:43 +0200 http://bitbucket.org/pypy/pypy/changeset/9e797b4a53ee/ Log: Implement faulthandler.enable() and disable() diff --git a/pypy/module/faulthandler/__init__.py b/pypy/module/faulthandler/__init__.py --- a/pypy/module/faulthandler/__init__.py +++ b/pypy/module/faulthandler/__init__.py @@ -6,5 +6,7 @@ interpleveldefs = { 'enable': 'interp_faulthandler.enable', + 'disable': 'interp_faulthandler.disable', + 'is_enabled': 'interp_faulthandler.is_enabled', 'register': 'interp_faulthandler.register', } diff --git a/pypy/module/faulthandler/interp_faulthandler.py b/pypy/module/faulthandler/interp_faulthandler.py --- a/pypy/module/faulthandler/interp_faulthandler.py +++ b/pypy/module/faulthandler/interp_faulthandler.py @@ -1,5 +1,15 @@ -def enable(space, __args__): - pass +class FatalErrorState(object): + def __init__(self, space): + self.enabled = False + +def enable(space): + space.fromcache(FatalErrorState).enabled = True + +def disable(space): + space.fromcache(FatalErrorState).enabled = False + +def is_enabled(space): + return space.wrap(space.fromcache(FatalErrorState).enabled) def register(space, __args__): pass diff --git a/pypy/module/faulthandler/test/test_faulthander.py b/pypy/module/faulthandler/test/test_faulthander.py new file mode 100644 --- /dev/null +++ b/pypy/module/faulthandler/test/test_faulthander.py @@ -0,0 +1,11 @@ +class AppTestFaultHandler: + spaceconfig = { + "usemodules": ["faulthandler"] + } + + def test_enable(self): + import faulthandler + faulthandler.enable() + assert faulthandler.is_enabled() is True + faulthandler.disable() + assert faulthandler.is_enabled() is False From noreply at buildbot.pypy.org Tue Jul 29 19:32:47 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Tue, 29 Jul 2014 19:32:47 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Fix translation Message-ID: <20140729173247.024D41D2A5A@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r72611:edb40b85036b Date: 2014-07-29 19:31 +0200 http://bitbucket.org/pypy/pypy/changeset/edb40b85036b/ Log: Fix translation diff --git a/pypy/module/_lzma/interp_lzma.py b/pypy/module/_lzma/interp_lzma.py --- a/pypy/module/_lzma/interp_lzma.py +++ b/pypy/module/_lzma/interp_lzma.py @@ -193,8 +193,8 @@ w_check=WrappedDefault(None), w_preset=WrappedDefault(None), w_filters=WrappedDefault(None)) - def descr_new(space, w_subtype, format=FORMAT_XZ, - w_check=None, w_preset=None, w_filters=None): + def descr_new_comp(space, w_subtype, format=FORMAT_XZ, + w_check=None, w_preset=None, w_filters=None): w_self = space.allocate_instance(W_LZMACompressor, w_subtype) self = space.interp_w(W_LZMACompressor, w_self) W_LZMACompressor.__init__(self, space, format) @@ -252,7 +252,7 @@ W_LZMACompressor.typedef = TypeDef("LZMACompressor", - __new__ = interp2app(W_LZMACompressor.descr_new), + __new__ = interp2app(W_LZMACompressor.descr_new_comp), compress = interp2app(W_LZMACompressor.compress_w), flush = interp2app(W_LZMACompressor.flush_w), ) @@ -275,8 +275,8 @@ @unwrap_spec(format=int, w_memlimit=WrappedDefault(None), w_filters=WrappedDefault(None)) - def descr_new(space, w_subtype, format=FORMAT_AUTO, - w_memlimit=None, w_filters=None): + def descr_new_dec(space, w_subtype, format=FORMAT_AUTO, + w_memlimit=None, w_filters=None): w_self = space.allocate_instance(W_LZMADecompressor, w_subtype) self = space.interp_w(W_LZMADecompressor, w_self) W_LZMADecompressor.__init__(self, space, format) @@ -338,7 +338,7 @@ W_LZMADecompressor.typedef = TypeDef("LZMADecompressor", - __new__ = interp2app(W_LZMADecompressor.descr_new), + __new__ = interp2app(W_LZMADecompressor.descr_new_dec), decompress = interp2app(W_LZMADecompressor.decompress_w), eof = interp_attrproperty("eof", W_LZMADecompressor), unused_data = interp_attrproperty_bytes("unused_data", W_LZMADecompressor), From noreply at buildbot.pypy.org Tue Jul 29 20:05:02 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Tue, 29 Jul 2014 20:05:02 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: another translation fix Message-ID: <20140729180502.A360F1D2A5A@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r72612:a467e510c666 Date: 2014-07-29 20:04 +0200 http://bitbucket.org/pypy/pypy/changeset/a467e510c666/ Log: another translation fix diff --git a/pypy/module/_lzma/interp_lzma.py b/pypy/module/_lzma/interp_lzma.py --- a/pypy/module/_lzma/interp_lzma.py +++ b/pypy/module/_lzma/interp_lzma.py @@ -78,7 +78,7 @@ lzma_lzma_preset = external('lzma_lzma_preset', [lzma_options_lzma, rffi.UINT], lzma_bool) lzma_alone_encoder = external('lzma_alone_encoder', [lzma_stream, lzma_options_lzma], lzma_ret) -lzma_end = external('lzma_end', [lzma_stream], lltype.Void) +lzma_end = external('lzma_end', [lzma_stream], lltype.Void, releasegil=False) lzma_auto_decoder = external('lzma_auto_decoder', [lzma_stream, rffi.LONG, rffi.INT], lzma_ret) lzma_get_check = external('lzma_get_check', [lzma_stream], rffi.INT) From noreply at buildbot.pypy.org Wed Jul 30 02:32:51 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 30 Jul 2014 02:32:51 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: fix yield from returns Message-ID: <20140730003251.4B5451C35CC@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72613:936bfd7e5628 Date: 2014-07-29 17:30 -0700 http://bitbucket.org/pypy/pypy/changeset/936bfd7e5628/ Log: fix yield from returns diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -1012,13 +1012,14 @@ else: w_retval = space.call_method(w_gen, "send", w_value) except OperationError as e: - if not e.match(self.space, self.space.w_StopIteration): + if not e.match(space, space.w_StopIteration): raise self.popvalue() # Remove iter from stack + e.normalize_exception(space) try: w_value = space.getattr(e.get_w_value(space), space.wrap("value")) except OperationError as e: - if not e.match(self.space, self.space.w_AttributeError): + if not e.match(space, space.w_AttributeError): raise w_value = space.w_None self.pushvalue(w_value) diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/test_generator.py --- a/pypy/interpreter/test/test_generator.py +++ b/pypy/interpreter/test/test_generator.py @@ -316,6 +316,24 @@ assert False, 'Expected StopIteration' """ + def test_yield_from_return(self): + """ + def f1(): + result = yield from f2() + return result + def f2(): + yield 1 + return 2 + g = f1() + assert next(g) == 1 + try: + next(g) + except StopIteration as e: + assert e.value == 2 + else: + assert False, 'Expected StopIteration' + """ + def test_should_not_inline(space): from pypy.interpreter.generator import should_not_inline From noreply at buildbot.pypy.org Wed Jul 30 02:32:52 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 30 Jul 2014 02:32:52 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: adjust impl details Message-ID: <20140730003252.D77741C35CC@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72614:46c2f3dee4c1 Date: 2014-07-29 17:30 -0700 http://bitbucket.org/pypy/pypy/changeset/46c2f3dee4c1/ Log: adjust impl details diff --git a/lib-python/3/test/test_builtin.py b/lib-python/3/test/test_builtin.py --- a/lib-python/3/test/test_builtin.py +++ b/lib-python/3/test/test_builtin.py @@ -15,7 +15,8 @@ import unittest import warnings from operator import neg -from test.support import TESTFN, unlink, run_unittest, check_warnings +from test.support import ( + TESTFN, unlink, run_unittest, check_warnings, check_impl_detail) try: import pty, signal except ImportError: @@ -423,7 +424,9 @@ try: raise IndexError except: - self.assertEqual(len(dir(sys.exc_info()[2])), 4) + methods = [meth for meth in dir(sys.exc_info()[2]) + if not meth.startswith('_')] + self.assertEqual(len(methods), 4) # test that object has a __dir__() self.assertEqual(sorted([].__dir__()), dir([])) @@ -558,18 +561,21 @@ self.assertEqual((g, l), ({'a': 1}, {'b': 2})) def test_exec_globals(self): - code = compile("print('Hello World!')", "", "exec") - # no builtin function - self.assertRaisesRegex(NameError, "name 'print' is not defined", - exec, code, {'__builtins__': {}}) - # __builtins__ must be a mapping type - self.assertRaises(TypeError, - exec, code, {'__builtins__': 123}) + if check_impl_detail(): + # strict __builtins__ compliance (CPython) + code = compile("print('Hello World!')", "", "exec") + # no builtin function + self.assertRaisesRegex(NameError, "name 'print' is not defined", + exec, code, {'__builtins__': {}}) + # __builtins__ must be a mapping type + self.assertRaises(TypeError, + exec, code, {'__builtins__': 123}) - # no __build_class__ function - code = compile("class A: pass", "", "exec") - self.assertRaisesRegex(NameError, "__build_class__ not found", - exec, code, {'__builtins__': {}}) + # no __build_class__ function + code = compile("class A: pass", "", "exec") + if True: + self.assertRaisesRegex(NameError, "__build_class__ not found", + exec, code, {'__builtins__': {}}) class frozendict_error(Exception): pass @@ -579,7 +585,7 @@ raise frozendict_error("frozendict is readonly") # read-only builtins - frozen_builtins = frozendict(__builtins__) + frozen_builtins = frozendict(builtins.__dict__) code = compile("__builtins__['superglobal']=2; print(superglobal)", "test", "exec") self.assertRaises(frozendict_error, exec, code, {'__builtins__': frozen_builtins}) From noreply at buildbot.pypy.org Wed Jul 30 02:32:54 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 30 Jul 2014 02:32:54 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: add the module name Message-ID: <20140730003254.47A081C35CC@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72615:88a28c2bc747 Date: 2014-07-29 17:31 -0700 http://bitbucket.org/pypy/pypy/changeset/88a28c2bc747/ Log: add the module name diff --git a/pypy/module/bz2/interp_bz2.py b/pypy/module/bz2/interp_bz2.py --- a/pypy/module/bz2/interp_bz2.py +++ b/pypy/module/bz2/interp_bz2.py @@ -330,7 +330,7 @@ res = out.make_result_string() return self.space.wrapbytes(res) -W_BZ2Compressor.typedef = TypeDef("BZ2Compressor", +W_BZ2Compressor.typedef = TypeDef("_bz2.BZ2Compressor", __doc__ = W_BZ2Compressor.__doc__, __new__ = interp2app(descr_compressor__new__), compress = interp2app(W_BZ2Compressor.compress), @@ -426,7 +426,7 @@ return self.space.wrapbytes(res) -W_BZ2Decompressor.typedef = TypeDef("BZ2Decompressor", +W_BZ2Decompressor.typedef = TypeDef("_bz2.BZ2Decompressor", __doc__ = W_BZ2Decompressor.__doc__, __new__ = interp2app(descr_decompressor__new__), unused_data = interp_attrproperty_bytes("unused_data", W_BZ2Decompressor), From noreply at buildbot.pypy.org Wed Jul 30 02:32:56 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 30 Jul 2014 02:32:56 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: support the old GIL API Message-ID: <20140730003256.146A51C35CC@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72616:17d6d860e78f Date: 2014-07-29 17:31 -0700 http://bitbucket.org/pypy/pypy/changeset/17d6d860e78f/ Log: support the old GIL API diff --git a/lib-python/3/test/test_concurrent_futures.py b/lib-python/3/test/test_concurrent_futures.py --- a/lib-python/3/test/test_concurrent_futures.py +++ b/lib-python/3/test/test_concurrent_futures.py @@ -295,14 +295,19 @@ event = threading.Event() def future_func(): event.wait() - oldswitchinterval = sys.getswitchinterval() - sys.setswitchinterval(1e-6) + newgil = hasattr(sys, 'getswitchinterval') + if newgil: + geti, seti = sys.getswitchinterval, sys.setswitchinterval + else: + geti, seti = sys.getcheckinterval, sys.setcheckinterval + oldinterval = geti() + seti(1e-6 if newgil else 1) try: fs = {self.executor.submit(future_func) for i in range(100)} event.set() futures.wait(fs, return_when=futures.ALL_COMPLETED) finally: - sys.setswitchinterval(oldswitchinterval) + seti(oldinterval) class ProcessPoolWaitTests(ProcessPoolMixin, WaitTests, unittest.TestCase): From noreply at buildbot.pypy.org Wed Jul 30 02:32:57 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 30 Jul 2014 02:32:57 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: fix test_validate Message-ID: <20140730003257.A0F5D1C35CC@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72617:7e04e788d910 Date: 2014-07-29 17:31 -0700 http://bitbucket.org/pypy/pypy/changeset/7e04e788d910/ Log: fix test_validate diff --git a/pypy/interpreter/astcompiler/validate.py b/pypy/interpreter/astcompiler/validate.py --- a/pypy/interpreter/astcompiler/validate.py +++ b/pypy/interpreter/astcompiler/validate.py @@ -11,8 +11,7 @@ class ValidationError(Exception): - def __init__(self, message): - self.message = message + """Signals an invalid AST""" def expr_context_name(ctx): From noreply at buildbot.pypy.org Wed Jul 30 02:32:59 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 30 Jul 2014 02:32:59 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: return in generators now allowed Message-ID: <20140730003259.1E1981C35CC@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72618:87aa9c85fb25 Date: 2014-07-29 17:31 -0700 http://bitbucket.org/pypy/pypy/changeset/87aa9c85fb25/ Log: return in generators now allowed diff --git a/pypy/interpreter/test/test_compiler.py b/pypy/interpreter/test/test_compiler.py --- a/pypy/interpreter/test/test_compiler.py +++ b/pypy/interpreter/test/test_compiler.py @@ -268,10 +268,7 @@ def test_return_in_generator(self): code = 'def f():\n return None\n yield 19\n' - e = py.test.raises(OperationError, self.compiler.compile, code, '', 'single', 0) - ex = e.value - ex.normalize_exception(self.space) - assert ex.match(self.space, self.space.w_SyntaxError) + self.compiler.compile(code, '', 'single', 0) def test_yield_in_finally(self): code ='def f():\n try:\n yield 19\n finally:\n pass\n' From noreply at buildbot.pypy.org Wed Jul 30 10:58:21 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 30 Jul 2014 10:58:21 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: some corrections and additions Message-ID: <20140730085821.0B7DF1C0920@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5376:3a3a4191359e Date: 2014-07-30 10:58 +0200 http://bitbucket.org/pypy/extradoc/changeset/3a3a4191359e/ Log: some corrections and additions diff --git a/talk/dls2014/paper/paper.tex b/talk/dls2014/paper/paper.tex --- a/talk/dls2014/paper/paper.tex +++ b/talk/dls2014/paper/paper.tex @@ -34,6 +34,7 @@ \floatname{code}{\protect\codename} + % nice listings \usepackage{xcolor} \usepackage{newverbs} @@ -539,8 +540,11 @@ Described below, we also need a write barrier for our generational garbage collector (GC). Interestingly, we are able to share the same barrier and flag for both use cases. Since the GC is an integral - part of many dynamic languages, we actually do not incur any additional - overhead here since the barriers are already there. + part of many dynamic languages, we often do not incur additional + overhead since the barriers are already there. However, we do need + some additional barriers when writing non-references into objects. + In that case, generational GCs do not need a barrier but the STM + system still does. \item [{Commit:}] When we want to commit a transaction, we need to check for read-write conflicts. Such conflicts cannot be detected in the @@ -966,8 +970,11 @@ information effectively: young objects do not trigger the slow path at all, overflow objects only need to be handled by the GC part of the write barrier once between collections, and only the old objects need -to be handled by the STM part \emph{once per transaction}. This keeps the -overhead of the write barrier very low. +to be handled by the STM part \emph{once per transaction}. The +privatisation step in particular is done very rarely because the pages +stay private until the next major collection where they may get +re-shared. This keeps the overhead of the write barrier very low in +the common cases. \subsection{Abort} @@ -1422,7 +1429,9 @@ % STM needs good performance: -The solution presented here is based on a software TM system. As the +The solution presented here is based on a software TM system developed +as an independent and reusable C library for integration in dynamic +language interpreters. As the STM is a vital part of the interpreter, good performance is essential. We present an interesting way to combine the existing memory segmentation and virtual memory features of current CPUs to @@ -1558,7 +1567,7 @@ \bibitem{odaira14} Rei Odaira, Jose G. Castanos, and Hisanobu Tomari. 2014. Eliminating - global interpreter locks in ruby through hardware transactional + global interpreter locks in Ruby through hardware transactional memory. In \emph{Proc. 19th ACM SIGPLAN Symposium on Principles and Practice of Parallel Programming} (PPoPP '14). From noreply at buildbot.pypy.org Wed Jul 30 14:37:46 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 30 Jul 2014 14:37:46 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: removing a lot of footnotes and other cleanups Message-ID: <20140730123746.1D6161D237F@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5377:fa7bf3bf9ace Date: 2014-07-30 14:37 +0200 http://bitbucket.org/pypy/extradoc/changeset/fa7bf3bf9ace/ Log: removing a lot of footnotes and other cleanups diff --git a/talk/dls2014/paper/paper.tex b/talk/dls2014/paper/paper.tex --- a/talk/dls2014/paper/paper.tex +++ b/talk/dls2014/paper/paper.tex @@ -288,11 +288,16 @@ \emph{Threading} employs operating system (OS) threads to provide concurrency. It is, however, limited by the GIL and thus does not -provide parallelism\footnote{At this point we should mention that it is indeed -possible to run external functions written in C instead of Python in -parallel. Our work focuses on Python itself and ignores this aspect as -it requires writing in a different language.}. Moreover, the GIL can -only ensure correctness of the interpreter itself: applications are +provide real parallelism. +The only code that is not necessarily restricted by the GIL in CPython +is code written in e.g.\ C instead of Python. External functions can +choose to run without the GIL and thus, in parallel. We ignore this +aspect here as it requires writing in a different language than Python +and is therefore not relevant to our work. + +The GIL can also +only ensure correctness of the interpreter itself: applications using +threads are still required to coordinate concurrent accesses to data structures using conventional methods -- locks being the most common way. @@ -444,28 +449,22 @@ need to adapt these addresses to the segment they live in, which would prevent completely any page sharing because then the binary contents differ between segments. Instead, we store pointers as -\emph{Segment Offsets ($SO$)}, i.e.\ offsets from the start of the -segment. An $SO$ can then be interpreted in any segment by adding to -it the start address of that segment.\footnote{An alternative model -would be to simulate threads using several independent processes in -which all segments are at the same address. This might work well for -demonstration purposes, but it is not really suitable for implementing -an existing programming language: programs assume that most of the -state (file descriptors, external libraries, ...) is shared.} The +\emph{Segment Offsets (SO)}, i.e.\ offsets from the start of the +segment. An SO can then be interpreted in any segment by adding to +it the start address of that segment. The result is called a \emph{Linear Address (LA)}. This is illustrated in Figure~\ref{fig:Segment-Addressing}. The x86 CPUs provide a feature which is called \emph{memory -segmentation}. On the modern x86-64, this is enough to perform the -addition described above very efficiently. We use the segment -register $\%gs$\footnote{The other segment register $\%fs$ is -typically used by the threading library to provide access to -thread-local data. One point of view is that we are using $\%gs$ for -a similar purpose: thread-local data -- but a lot of it.}, which is -available to applications, to point to the current thread's segment -start address. The translation from an $SO$ to an LA is done for us by + segmentation}. On these CPU this is enough to perform the addition +described above very efficiently. On the modern x86-64, there are two +segment registers left: $\%fs$ and $\%gs$. On Linux for example, +$\%fs$ is already used for addressing thread-local data +efficiently. Thus, we use the remaining segment register $\%gs$, which is +still available to applications, to point to the current thread's segment +start address. The translation from an SO to an LA is done for us by using the ``$\%gs\colon$'' prefix before any CPU instruction that -reads or writes memory using an $SO$ address. This is very efficient +reads or writes memory using an SO address. This is very efficient -- we can do it on every object access -- and some compilers support it natively (e.g.\ clang). @@ -478,7 +477,7 @@ In summary, translating a $\%gs:SO$ to a physical address is a two-step process: First the memory segmentation feature of the CPU constructs a linear address. Then, this LA gets mapped by the MMU to -some place in the physical memory. This makes the $SO$ a valid reference +some place in the physical memory. This makes the SO a valid reference to an object in all segments, automatically resolving either to a shared or a private version. @@ -503,8 +502,8 @@ itself, but it is missing conflict detection to be considered a full-fledged STM system. This requires adding \emph{barriers} to the program to register the objects in the read or write set before -reading or writing to them\footnote{Barriers added by an automatic, conservative, and -local program transformation~\cite{felber07}}. Furthermore, we still +reading or writing to them. We do this with an automatic, conservative, and +local program transformation (cf.~\cite{felber07}). Furthermore, we still need to describe the commit protocol. \begin{description} @@ -516,7 +515,7 @@ approach allows the read barrier to be a single, unconditional write into segment-local memory. We only need to \emph{mark} an object as read by the current transaction. The mark is stored in a - segment-local array indexed by a number derived from the $SO$ + segment-local array indexed by a number derived from the SO of the object. Unlike other STM systems, the read barrier does not have to find the @@ -532,10 +531,11 @@ Here, we eagerly detect write-write conflicts by allowing only one transaction modifying an object at a time using write - locks.\footnote{Eager write-write detection is not inherent to our - approach; we may lift this restriction in the future.} Part of making - the object write-ready is also the privatisation of all pages the - object belongs to, as described above. + locks\footnote{Eager write-write conflict detection is not inherent + to our approach; we may lift this restriction in the future if it + proves to be useful.}. To make the object write-ready, this is + also the place to privatise all pages the object belongs to, as + described above. Described below, we also need a write barrier for our generational garbage collector (GC). Interestingly, we are able to share the same @@ -633,7 +633,7 @@ \lstinline!stm_commit_transaction()! require saving object references. -In the following sections, whenever we use $SO$, we go through the +In the following sections, whenever we use SO, we go through the address translation to get to the actual contents of an object. This is also signified by the type \lstinline!object_t!. This type is special as it causes the @@ -667,12 +667,12 @@ Another issue is the support of irreversible operations, like calls to external functions or general -input~/ output. If we are allowed to, we commit the transaction -before and start a new one after these operations\footnote{The language -specification needs to specify if this is allowed.}. Within an atomic +input~/ output. If Python allows it, we commit the transaction before +and start a new one after these operations (the same way as the GIL is +released around thread-safe external functions). Within an atomic block, however, the transaction is unbreakable, and we need to turn -the transaction inevitable so that it -cannot abort and strong isolation is guaranteed. +the transaction inevitable so that it cannot abort and strong +isolation is guaranteed at all times. @@ -699,7 +699,7 @@ segmentation violation when accessed. We use this to detect erroneous dereferencing of \lstinline!NULL! references. All $\%gs{::}SO$ translated to linear addresses will point to NULL pages - if $SO$ is set to \lstinline!NULL!. + if SO is set to \lstinline!NULL!. \item [{Segment-local~data:}] Some area private to the segment that contains segment-local information for bookkeeping. \item [{Read~markers:}] These are private pages that store information about @@ -707,8 +707,11 @@ segment. \item [{Nursery:}] This private area contains all the freshly allocated objects (\emph{young objects}) of the current transaction. The GC - uses bump-pointer allocation in this area to allocate objects in the - first generation. + uses bump-pointer allocation in this area to allocate objects of the + first generation. To make sure that only objects of the current + transaction reside in this space, a collection of the first + generation happens at the end of every transaction (see + section~\ref{sub:gc}). \item [{Old~object~space:}] These pages are the ones that are really shared between segments. They mostly contain old objects but also some young ones that were too big to be allocated in the nursery. @@ -717,13 +720,13 @@ Note, since the above configuration is currently specified at compile time, all these areas are at offsets inside the segments known to the compiler. This makes some checks very efficient, e.g.\ checking -if an object resides in the nursery only requires comparing its $SO$ +if an object resides in the nursery only requires comparing its SO to the static boundaries of the nursery. It is possible that we want some parameters to be configurable at startup or even during the runtime. In that case we can still use a compile-time specified maximum so that these checks are still efficient. E.g.\ limiting the maximum amount of memory available to the application statically to a few -terabytes is fine because it corresponds to virtual memory, +terabytes is fine because it corresponds to virtual memory; the real physical memory is assigned on-demand by the operating system. @@ -759,7 +762,7 @@ -\subsection{Garbage Collection} +\subsection{Garbage Collection\label{sub:gc}} Garbage collection plays a big role in our TM system. The GC is generational and has two generations: the \emph{young} and the @@ -828,7 +831,7 @@ and already mentioned in Section~\ref{sub:Setup}. This area can be seen as a continuous, segment-local array of bytes -that is indexed with an object's reference ($SO$) divided by 16. So +that is indexed with an object's reference (SO) divided by 16. So that each object has its own read marker, all objects have a size of at least 16 bytes. Otherwise there could be false conflicts when reading from two adjacent objects that share a single marker. @@ -839,9 +842,9 @@ \lstinline!false! on commit and only need to do this every 255 transactions. The whole code for the barrier shown in Listing~\ref{lst:rb} is easily optimisable for compilers as well as -perfectly predictable for CPUs\footnote{Additional benefit: The read -barrier is not constrained to execute before the actual read -- both -the compiler and the CPU are free to reorder or group them.}. +perfectly predictable for CPUs. Additionally, the read barrier is not +constrained to execute before the actual read -- both the compiler and +the CPU are free to reorder or group them for efficiency. \begin{code}[h] \begin{lstlisting} @@ -876,7 +879,7 @@ The \textbf{fast path} of the write barrier is very simple. We only need to check for the flag \lstinline!WRITE_BARRIER! in the object's -header through its $SO$ and call the slow path if it is set. This flag +header through its SO and call the slow path if it is set. This flag is set either if the object is old and comes from an earlier transaction, or if there was a minor collection (in this case, the flag is added again on all objects, including new overflow objects). The flag @@ -946,7 +949,7 @@ For the \emph{STM part}, we first perform a read barrier on the object. We then try to acquire its write lock. \lstinline!write_locks! is a simple, \emph{global} array of bytes that is indexed with the -$SO$ of the object divided by 16. Note, ``global'' here really means +SO of the object divided by 16. Note, ``global'' here really means it is a single array with data for all segments, there is no address translation going on to access its elements contrary to e.g.\ the read markers array. If we already own the lock, we are done. @@ -961,7 +964,8 @@ In all cases, we remove the \lstinline!WRITE_BARRIER! flag from the object before we return. Thus, we never trigger the slow path again before we do the next minor collection or we start the next -transaction.\footnote{We always do a minor collection during a commit.} +transaction (remember: we always perform a minor collection as part of +each commit anyway). Note that we have three kinds of objects: \emph{young, overflow} and \emph{old} objects. Young and overflow objects were created in the @@ -995,16 +999,20 @@ Committing a transaction needs a bit more work. First, we synchronise all threads so that the committing one is the only one running and all -the others are waiting in safe points.\footnote{This design choice is -important to support our very cheap barriers. It is a trade-off that -works best if the number of concurrent threads is small. -See section~\ref{subsub:sync} for how we mitigate the costs.} +the others are waiting in safe points. We then go through the write set (\lstinline!modified_old_objects!) and check the corresponding \lstinline!read_markers! in other threads~/ segments. If we detect a read-write conflict, we perform contention management to either abort us or the other transaction, or to simply wait a bit (see Section~\ref{subsub:contentionmanagement}). +Note that synchronising all threads is an important design choice we +made to support our very cheap read barriers. Since they now consist +only of a single unconditional write to some memory location, all +threads need to be stopped while we detect conflicts. This is a +trade-off that works best if the number of concurrent threads is +small. + After verifying that there are no conflicts anymore, we copy all our changes (i.e.\ objects in our write set) to all other segments, including the sharing-segment. This is safe since we synchronised all @@ -1292,8 +1300,8 @@ code worthwhile, we increased the input size of all benchmarks to get reasonable execution times (denoted with ``(large)'' in benchmark names). -We also ran these benchmarks on Jython\footnote{version -2.7b1~\cite{webjython}}, an implementation of Python on top of the +We also ran these benchmarks on Jython (v2.7b1 from~\cite{webjython}), +an implementation of Python on top of the Java Virtual Machine (JVM). Instead of a GIL, this interpreter uses fine-grained locking for synchronisation. Even though it can use the JVM's JIT compiler, its performance in these benchmarks is behind From noreply at buildbot.pypy.org Wed Jul 30 15:03:08 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 30 Jul 2014 15:03:08 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: explain why we only have micro benchs Message-ID: <20140730130308.8BAF21D2577@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5378:10d75dccc678 Date: 2014-07-30 15:03 +0200 http://bitbucket.org/pypy/extradoc/changeset/10d75dccc678/ Log: explain why we only have micro benchs diff --git a/talk/dls2014/paper/paper.tex b/talk/dls2014/paper/paper.tex --- a/talk/dls2014/paper/paper.tex +++ b/talk/dls2014/paper/paper.tex @@ -1152,9 +1152,15 @@ \subsection{Scaling} -To asses how well the STM system scales on its own (without any real -workload), we execute the loop in Listing~\ref{lst:scaling_workload} -on 1 to 4 threads on the PyPy interpreter with STM. +To asses how well the STM system in combination with a Python +interpreter scales on its own (without any real workload), we execute +the loop in Listing~\ref{lst:scaling_workload} on 1 to 4 threads on +the PyPy interpreter with STM and without a JIT. There are very few +allocations or calculations in this loop, so the main purpose of this +benchmark is simply to check that there are no inherent conflicts in +the interpreter when everything is thread-local. We also get some +idea about how much overhead each additional thread introduces. + \begin{code}[h] \begin{lstlisting} @@ -1166,9 +1172,10 @@ \caption{Dummy workload\label{lst:scaling_workload}} \end{code} -For the results in Figure~\ref{fig:scaling}, we +The STM system detected no conflicts when running this code on 4 +threads. For the results in Figure~\ref{fig:scaling}, we normalised the average runtimes to the time it took on a single -thread. From this we see that there is additional overhead introduced +thread. From this we see that there is some additional overhead introduced by each thread ($12.3\%$ for all 4 threads together). Every thread adds some overhead because during a commit, there is one more thread which has to reach a safe point. Additionally, conflict detection @@ -1188,7 +1195,11 @@ \subsection{Small-Scale Benchmarks\label{sec:performance-bench}} For the following sections we use a set of six small benchmarks -available at~\cite{pypybenchs}: +available at~\cite{pypybenchs}. There are, unsurprisingly, not +many threaded applications written in Python that can be used +as a benchmark. Since until now, threading was rarely used +for performance reasons because of the GIL, we mostly collected +small demos and wrote our own benchmarks to evaluate our system: \begin{itemize} \item \emph{btree} and \emph{skiplist}, which are both inserting, From noreply at buildbot.pypy.org Wed Jul 30 15:34:50 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 30 Jul 2014 15:34:50 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: clarifications Message-ID: <20140730133450.191321D2577@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5379:212a35ee6e10 Date: 2014-07-30 15:34 +0200 http://bitbucket.org/pypy/extradoc/changeset/212a35ee6e10/ Log: clarifications diff --git a/talk/dls2014/paper/paper.tex b/talk/dls2014/paper/paper.tex --- a/talk/dls2014/paper/paper.tex +++ b/talk/dls2014/paper/paper.tex @@ -1249,10 +1249,11 @@ thread-switching and GIL handling (see~\cite{beazley10} for a detailed analysis). -PyPy using our STM system (\emph{pypy-stm-nojit}) scales in all -benchmarks to a certain degree. It scales best for the embarrassingly -parallel ones ($avg=2.6\times$ speedup) and a little less for -the others ($avg=2.0\times$ speedup). The reason for this difference is +On 4 cores, PyPy using our STM system (\emph{pypy-stm-nojit}) scales +in all benchmarks to a certain degree. It scales best for the +embarrassingly parallel ones ($avg=2.6\times$ speedup over 1 thread) +and a little less for the others ($avg=2.0\times$ speedup over 1 +thread). The reason for this difference is that in the former group there are no real, logical conflicts -- all threads do independent calculations. STM simply replaces the GIL in those programs. In the latter group, the threads work on a common data @@ -1265,11 +1266,13 @@ There is no visible benefit from 3 to 4 threads, even a slight regression. -Looking at the average overhead from switching from GIL to STM, we see -that it is $\approx 43.3\%$. The maximum in richards is $71\%$. In all -benchmarks \emph{pypy-stm-nojit} beats \emph{pypy-nojit} already on -two threads despite of this overhead. The achieved speedup comparing -STM to the GIL is between $1.14\times$ and $1.94\times$. +Looking at the average overhead on a single thread that is induced by +switching from GIL to STM, we see that it is $\approx 43.3\%$. The +maximum in richards is $71\%$. In all benchmarks \emph{pypy-stm-nojit} +beats \emph{pypy-nojit} already on two threads, despite of this +overhead. The achieved speedup comparing the lowest runtimes of STM +to the single-threaded GIL execution is between $1.14\times$ and +$1.94\times$. Still, STM rarely beats CPython's \emph{single-thread} performance. However, for programs that need concurrency in CPython and that use threads to @@ -1277,9 +1280,8 @@ the GIL on multiple threads. From this perspective, the STM implementation beats CPython's performance in all but two benchmarks. -Since PyPy comes with a JIT~\cite{cfbolz09} to make its overhead -compared to CPython go away, we will now look at how well STM works -together with it. +Since PyPy comes with a JIT~\cite{cfbolz09} to make it vastly faster +than CPython, we will now look at how well STM works together with it. \begin{figure}[h] \centering @@ -1305,7 +1307,7 @@ For these reasons, the following results have to be taken with a grain of salt. -The speedups from enabling the JIT in these benchmarks range from +The speedups from simply enabling the JIT in these benchmarks range from $10-50\times$. This is why we had to do without CPython here, since it would be much further up in the plots. Also, to make jitting code worthwhile, we increased the input size of all benchmarks to get @@ -1322,11 +1324,10 @@ fine-grained locking. It is out of the scope of this paper to do this thoroughly. -The results are presented in Figure~\ref{fig:performance-jit}. We -see that the performance is much less stable. There is certainly more -work required in this area. The slowdown factor for switching from GIL -to STM ranges around $1-2.4\times$, and we beat GIL performance -in half of the benchmarks. +The results are presented in Figure~\ref{fig:performance-jit}. We see +that the performance gains are much less reliable. The slowdown +factor for switching from GIL to STM ranges around $1-2.4\times$, and +we beat the GIL's single-thread performance in half of the benchmarks. We see that generally, the group of embarrassingly parallel benchmarks scales best. (There is a notable performance stability problem in the From noreply at buildbot.pypy.org Wed Jul 30 20:22:38 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 30 Jul 2014 20:22:38 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: be nicer to JS and update pdf Message-ID: <20140730182238.1E4911D2577@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5380:f995dc84551c Date: 2014-07-30 20:22 +0200 http://bitbucket.org/pypy/extradoc/changeset/f995dc84551c/ Log: be nicer to JS and update pdf diff --git a/talk/dls2014/paper/paper.pdf b/talk/dls2014/paper/paper.pdf index 952e31bae97ac98d48739e0a955a0662d3c64fe5..f7c23752d1005cd0ef587d90e5589777cba2a6a3 GIT binary patch [cut] diff --git a/talk/dls2014/paper/paper.tex b/talk/dls2014/paper/paper.tex --- a/talk/dls2014/paper/paper.tex +++ b/talk/dls2014/paper/paper.tex @@ -192,10 +192,9 @@ \section{Introduction} - Dynamic languages like Python, PHP, Ruby, and JavaScript receive a lot -of attention but have not yet embraced parallelism. A parallel -programming model was not part of the design of those languages. Thus, +of attention but only provide limited forms of parallelism. A parallel +programming model was not part of the initial design of those languages. Thus, the reference implementations of, e.g., Python and Ruby use a single, global interpreter lock (GIL) to serialise the execution of code in threads. The use of a single global lock causes several disadvantages, @@ -844,7 +843,8 @@ Listing~\ref{lst:rb} is easily optimisable for compilers as well as perfectly predictable for CPUs. Additionally, the read barrier is not constrained to execute before the actual read -- both the compiler and -the CPU are free to reorder or group them for efficiency. +the CPU can reorder or group them freely between potential safe points +for additional performance. \begin{code}[h] \begin{lstlisting} From noreply at buildbot.pypy.org Thu Jul 31 09:58:53 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Thu, 31 Jul 2014 09:58:53 +0200 (CEST) Subject: [pypy-commit] pypy default-trivial-fixes: fix typo Message-ID: <20140731075853.E87DC1C09B2@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: default-trivial-fixes Changeset: r72619:c83a966e0c06 Date: 2014-07-30 22:07 +0200 http://bitbucket.org/pypy/pypy/changeset/c83a966e0c06/ Log: fix typo diff --git a/pypy/interpreter/pycompiler.py b/pypy/interpreter/pycompiler.py --- a/pypy/interpreter/pycompiler.py +++ b/pypy/interpreter/pycompiler.py @@ -96,7 +96,7 @@ XXX: This class should override the baseclass implementation of compile_command() in order to optimize it, especially in case - of incomplete inputs (e.g. we shouldn't re-compile from sracth + of incomplete inputs (e.g. we shouldn't re-compile from scratch the whole source after having only added a new '\n') """ def __init__(self, space, override_version=None): From noreply at buildbot.pypy.org Thu Jul 31 09:58:55 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 31 Jul 2014 09:58:55 +0200 (CEST) Subject: [pypy-commit] pypy default: Merged in numerodix/pypy/default-trivial-fixes (pull request #256) Message-ID: <20140731075855.296311C09B2@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72620:52f3ffa024b7 Date: 2014-07-31 09:58 +0200 http://bitbucket.org/pypy/pypy/changeset/52f3ffa024b7/ Log: Merged in numerodix/pypy/default-trivial-fixes (pull request #256) fix typo diff --git a/pypy/interpreter/pycompiler.py b/pypy/interpreter/pycompiler.py --- a/pypy/interpreter/pycompiler.py +++ b/pypy/interpreter/pycompiler.py @@ -96,7 +96,7 @@ XXX: This class should override the baseclass implementation of compile_command() in order to optimize it, especially in case - of incomplete inputs (e.g. we shouldn't re-compile from sracth + of incomplete inputs (e.g. we shouldn't re-compile from scratch the whole source after having only added a new '\n') """ def __init__(self, space, override_version=None): From noreply at buildbot.pypy.org Thu Jul 31 10:03:22 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 31 Jul 2014 10:03:22 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Add the presentation (from Remi) Message-ID: <20140731080322.11BC41C09B2@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5381:a40cec7d7661 Date: 2014-07-31 10:03 +0200 http://bitbucket.org/pypy/extradoc/changeset/a40cec7d7661/ Log: Add the presentation (from Remi) diff --git a/talk/icooolps2014/presentation.pdf b/talk/icooolps2014/presentation.pdf new file mode 100644 index 0000000000000000000000000000000000000000..51111b5516c7dfd6ca9d947632ea0a2e8c101861 GIT binary patch [cut] From noreply at buildbot.pypy.org Thu Jul 31 12:13:01 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 31 Jul 2014 12:13:01 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Update TODO Message-ID: <20140731101301.82F541C0FEE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r72621:5848b415cf05 Date: 2014-07-30 12:57 +0200 http://bitbucket.org/pypy/pypy/changeset/5848b415cf05/ Log: Update TODO diff --git a/TODO b/TODO --- a/TODO +++ b/TODO @@ -104,6 +104,15 @@ ------------------------------------------------------------ +hg diff -r b0339cb53372^2 -r dd8e2f69fe96^2 : these trunk changes are +missing in the stmgc-c7 branch of PyPy. In particular, reduce the diff +between stmgc-c7 and default (kill rstm.ThreadLocalRef, etc) + +------------------------------------------------------------ + +there are in PyPy-STM crashes related to markers (with the JIT?). +Also, some markers logic is missing with the JIT. + From noreply at buildbot.pypy.org Thu Jul 31 12:13:02 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 31 Jul 2014 12:13:02 +0200 (CEST) Subject: [pypy-commit] pypy default: Passing test with two unrelated virtualizables Message-ID: <20140731101302.BF2AA1C0FEE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72622:b1e750df0a3e Date: 2014-07-31 12:12 +0200 http://bitbucket.org/pypy/pypy/changeset/b1e750df0a3e/ Log: Passing test with two unrelated virtualizables diff --git a/rpython/jit/metainterp/test/test_virtualizable.py b/rpython/jit/metainterp/test/test_virtualizable.py --- a/rpython/jit/metainterp/test/test_virtualizable.py +++ b/rpython/jit/metainterp/test/test_virtualizable.py @@ -1611,6 +1611,40 @@ op.getopnum() == rop.GUARD_NOT_FORCED_2] assert len(l) == 0 + def test_two_virtualizable_types(self): + class A: + _virtualizable_ = ['x'] + def __init__(self, x): + self.x = x + + class B: + _virtualizable_ = ['lst[*]'] + def __init__(self, lst): + self.lst = lst + + driver_a = JitDriver(greens=[], reds=['a'], virtualizables=['a']) + driver_b = JitDriver(greens=[], reds=['b'], virtualizables=['b']) + + def foo_a(a): + while a.x > 0: + driver_a.jit_merge_point(a=a) + a.x -= 2 + return a.x + + def foo_b(b): + while b.lst[0] > 0: + driver_b.jit_merge_point(b=b) + b.lst[0] -= 2 + return b.lst[0] + + def f(): + return foo_a(A(13)) * 100 + foo_b(B([13])) + + assert f() == -101 + res = self.meta_interp(f, [], listops=True) + assert res == -101 + + class TestLLtype(ExplicitVirtualizableTests, ImplicitVirtualizableTests, LLJitMixin): From noreply at buildbot.pypy.org Thu Jul 31 12:35:42 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 31 Jul 2014 12:35:42 +0200 (CEST) Subject: [pypy-commit] stmgc card-marking: Update TODO Message-ID: <20140731103542.8AC0D1C0FEE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: card-marking Changeset: r1273:fc4ea6c20975 Date: 2014-07-30 12:57 +0200 http://bitbucket.org/pypy/stmgc/changeset/fc4ea6c20975/ Log: Update TODO diff --git a/c7/TODO b/c7/TODO --- a/c7/TODO +++ b/c7/TODO @@ -1,8 +1,6 @@ - use small uniform gcpages -- write barrier for big arrays - - finalizers - the highest_overflow_number can overflow after 2**30 non-collect-time @@ -16,3 +14,13 @@ the unused pages away --- or maybe use consecutive addresses from the lowest ones from segment N, instead of the page corresponding to the page number in segment 0 (possibly a bit messy) + +- possibly messy too, but think about not using N+1 segments but only N + +- use a call/cc-style variant of setjmp/longjmp to avoid inevitable + transactions when we need to return + +- kill "atomic" and use regular lock elision + +- increase the memory limit, currently 2.5GB; this requires, apparently, + more fighting against LLVM bugs From noreply at buildbot.pypy.org Thu Jul 31 14:11:57 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Thu, 31 Jul 2014 14:11:57 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Backout 7e04e788d910, it seems like it broke translation. Message-ID: <20140731121157.EDEC31C0FEE@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: py3.3 Changeset: r72623:bcad0c109c8c Date: 2014-07-31 14:11 +0200 http://bitbucket.org/pypy/pypy/changeset/bcad0c109c8c/ Log: Backout 7e04e788d910, it seems like it broke translation. diff --git a/pypy/interpreter/astcompiler/validate.py b/pypy/interpreter/astcompiler/validate.py --- a/pypy/interpreter/astcompiler/validate.py +++ b/pypy/interpreter/astcompiler/validate.py @@ -11,7 +11,8 @@ class ValidationError(Exception): - """Signals an invalid AST""" + def __init__(self, message): + self.message = message def expr_context_name(ctx): From noreply at buildbot.pypy.org Thu Jul 31 16:02:26 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 31 Jul 2014 16:02:26 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: updated benchmark numbers Message-ID: <20140731140226.7A5AC1D237F@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5382:fc61395ad9ed Date: 2014-07-31 16:00 +0200 http://bitbucket.org/pypy/extradoc/changeset/fc61395ad9ed/ Log: updated benchmark numbers diff --git a/talk/dls2014/paper/plots/performance.pdf b/talk/dls2014/paper/plots/performance.pdf index de33012fb48dea85ff95c68e3b6b7be60b077835..9c48101801c6fe17a25b0c587435cfbfd80fa636 GIT binary patch [cut] diff --git a/talk/dls2014/paper/plots/performance_nojit.pdf b/talk/dls2014/paper/plots/performance_nojit.pdf index d26324f323c84ea0c8b4fab8a12a161d3e3edcc3..f189a9c696c64c18e3fc1ca70ef5f5eba1055c5c GIT binary patch [cut] diff --git a/talk/dls2014/paper/plots/plot_performance.py b/talk/dls2014/paper/plots/plot_performance.py --- a/talk/dls2014/paper/plots/plot_performance.py +++ b/talk/dls2014/paper/plots/plot_performance.py @@ -4,13 +4,14 @@ # for now: avg & stddev of the best -# pypy-c-paper-jit bench.py -k5 raytrace/raytrace.py 1-4 -# pypy-c-paper-jit bench.py -k5 btree/btree.py 1-4 -# pypy-c-paper-jit bench.py -k5 skiplist/skiplist.py 1-4 -# pypy-c-paper-jit bench.py -k5 threadworms/threadworms.py 1-4 -# pypy-c-paper-jit bench.py -k5 mandelbrot/mandelbrot.py 1-4 64 -# pypy-c-paper-jit multithread-richards.py 10000 1-4 # report runtime - +# pypy-c-paper-jit bench.py -k7 raytrace/raytrace.py 1-4 +# pypy-c-paper-jit bench.py -k7 btree/btree.py 1-4 +# pypy-c-paper-jit bench.py -k7 skiplist/skiplist.py 1-4 +# pypy-c-paper-jit bench.py -k7 threadworms/threadworms.py 1-4 +# pypy-c-paper-jit bench.py -k7 mandelbrot/mandelbrot.py 1-4 64 +# pypy-c-paper-jit multithread-richards.py 3000 1-4 # report runtime +# miller_rabin: pypy-c-paper-jit bench.py -k7 primes/primes.py 1-4 +# mersenne: pypy-c-paper-jit bench.py -k7 mersenne/mersenne.py 1-4 import matplotlib @@ -42,52 +43,66 @@ benchs = OrderedDict([ ("btree (large)", { "pypy-stm-jit":[ - [1.68,1.74,1.73,1.67,1.68,1.65], - [1.35,1.54,1.52,1.28,1.31,1.28], - [1.39,1.50,1.44,1.47,1.41,1.37], - [1.66,1.67,1.68,1.66,1.71,1.71] + [1.67845606804,1.65006303787,1.66974020004,1.63858819008,1.66356801987], + [1.33803701401,1.32292485237,1.31796693802,1.35001111031,1.31932878494], + [1.36447811127,1.36002612114,1.36561393738,1.33688187599,1.36755800247], + [1.57205200195,1.57919001579,1.56078600883,1.5807390213,1.53369808197], ], "pypy-jit":[ - [1.61,1.62,1.61,1.59,1.59], - [3.31,3.33,3.29,3.27,3.35], - [5.11,5.20,5.07,5.00,5.25], - [5.85,6.02,6.14,6.05,5.91] + [1.55986404419,1.56078219414,1.55183196068,1.56036901474,1.55763506889], + [3.08302307129,3.07977199554,3.19185519218,3.08162689209,3.15804696083], + [4.98545002937,5.04515981674,5.00658583641,5.05858302116,5.0690600872], + [5.81887507439,5.83665108681,5.82539606094,5.85461807251,5.80481886864], ]}), ("skiplist (large)", { "pypy-stm-jit":[ - [2.91,2.88,2.92,2.92,2.96], - [3.05,2.90,2.89,2.93,2.94], - [3.48,3.21,3.18,3.33,3.15], - [3.87,3.69,3.78,3.81,3.66] + [2.8317830562591553, 2.9466500282287598, 2.777143955230713, 2.9067370891571045, 2.868476152420044], + [2.9228100776672363, 2.9347541332244873, 2.9178760051727295, 2.9834980964660645, 2.957442045211792], + [3.15556001663208, 3.139164924621582, 3.1457889080047607, 3.2395389080047607, 3.176427125930786], + [3.60280704498291, 3.591495990753174, 3.5838911533355713, 3.6133289337158203, 3.5673019886016846], ], "pypy-jit":[ - [2.14,2.17,2.31,2.24,2.34], - [4.54,4.61,4.69,4.63,4.57], - [6.29,6.14,6.25,6.16,6.21], - [6.58,6.62,6.61,6.70,6.83] + [2.1221060752868652, 2.1250720024108887, 2.1107590198516846, 2.131819009780884, 2.1142380237579346], + [4.392746210098267, 4.336360216140747, 4.410413980484009, 4.3028059005737305, 4.448081970214844], + [6.0925071239471436, 6.012751817703247, 5.994383811950684, 5.925648927688599, 6.02409815788269], + [6.677470922470093, 6.746111154556274, 6.772925138473511, 6.698845148086548, 6.861325025558472], ]}), ("threadworms (large)", { "pypy-stm-jit":[ - [4.23,4.33,4.46,4.47,4.50], - [3.4,3.34,3.39,3.32,3.02,3.13], - [3.16,2.96,3.5,2.9,3.3], - [3.4, 3.3,3.32,3.86,3.4] + [4.517525911331177, 4.548923015594482, 4.584195137023926, 4.576831102371216, 4.5510969161987305], + [3.2610208988189697, 3.287199020385742, 3.2835729122161865, 2.83652400970459, 3.355794906616211], + [2.552337169647217, 3.3851380348205566, 3.3292200565338135, 2.512721061706543, 3.395833969116211], + [3.0494518280029297, 3.304857015609741, 3.465865135192871, 3.483139991760254, 2.9930028915405273], ], "pypy-jit":[ - [4.14,4.20,4.24,4.25,4.13], - [12.5,11.4,12.3,11.9,11.7], + [4.137892007827759, 4.017345190048218, 4.04946494102478, 4.033931016921997, 4.049654006958008], + [12.235527038574219, 12.323483943939209, 12.391777992248535, 12.369697093963623, 12.642794132232666], [16.1,15.8,15.7,16.2,15.9], [20.3,19.8,18.9,19.7,19.5] ]}), + ("miller-rabin (large)", { + "pypy-stm-jit":[ + [2.469092845916748, 2.465752124786377, 2.5735549926757812, 2.424694061279297, 2.4528331756591797], + [1.6964740753173828, 1.7057690620422363, 1.687267780303955, 1.6836810111999512, 1.7357711791992188], + [1.9504740238189697, 1.9206480979919434, 1.959285020828247, 1.9674179553985596, 1.9120190143585205], + [2.1957740783691406, 2.259826898574829, 2.246433973312378, 2.158545970916748, 2.1627681255340576], + ], + "pypy-jit":[ + [2.161806106567383, 2.1457087993621826, 2.144040107727051, 2.151695966720581, 2.1421849727630615], + [3.4412081241607666, 3.4092049598693848, 3.4685871601104736, 3.388669013977051, 3.453207015991211], + [4.572340965270996, 4.519361972808838, 4.503000020980835, 4.507853984832764, 4.442840814590454], + [5.5019919872283936, 5.51775598526001, 5.487698078155518, 5.485651969909668, 5.482367038726807], + ]}), + ("mandelbrot (large)", { "pypy-stm-jit":[ - [17.87,17.88,17.88,17.63,17.75], - [9.18,9.31,9.28,9.20,9.10,9.25], - [7.75,7.8,7.81,7.7,7.09,7.59,7.43], - [6.8,6.55,6.9,6.7,7.29,6.88,7.1] + [17.430854082107544, 17.26646614074707, 17.176318883895874, 17.15704083442688, 17.212379217147827], + [9.08885383605957, 9.0710289478302, 9.040630102157593, 9.0260329246521, 9.00651502609253], + [6.48170804977417, 6.4656219482421875, 6.51883602142334, 6.484840154647827, 6.444514036178589], + [5.4356160163879395, 5.4222729206085205, 5.4224090576171875, 5.452478885650635, 5.4159040451049805], ], "pypy-jit":[ [13.5,13.7,13.6,13.7,14.0], @@ -98,31 +113,47 @@ ("raytrace (large)", { "pypy-stm-jit":[ - [3.91,3.87,3.88,3.92,3.98,3.95], - [2.53,2.52,2.46,2.42,2.44,2.43], - [2.23,2.17,2.12,2.16,2.30,2.35], - [2.46,2.44,2.45,2.52,2.59,2.51] + [3.9185469150543213, 3.8989100456237793, 3.8274168968200684, 3.844446897506714, 3.8334569931030273], + [2.2376699447631836, 2.236940860748291, 2.1736011505126953, 2.1718149185180664, 2.1718950271606445], + [1.8365809917449951, 1.83445405960083, 1.8318328857421875, 1.830247163772583, 1.8350648880004883], + [1.7387700080871582, 1.7485370635986328, 1.7356500625610352, 1.7735240459442139, 1.7386369705200195], ], "pypy-jit":[ - [1.60,1.59,1.61,1.62,1.66,1.59], - [3.02,3.01,3.15,3.21,2.91,3.19], - [3.33,3.33,3.34,3.30,3.21,3.47], - [3.57,3.67,3.34,3.48,3.46,3.61] + [1.5851430892944336, 1.5589039325714111, 1.5606789588928223, 1.5591561794281006, 1.5676798820495605], + [2.041408061981201, 2.042022943496704, 2.018625020980835, 2.0331828594207764, 1.9610838890075684], + [3.759972095489502, 3.7670998573303223, 3.787578821182251, 3.7681820392608643, 3.75722599029541], + [4.019146919250488, 4.016950845718384, 4.092247009277344, 4.044332981109619, 4.066432952880859], ]}), ("richards (large)", { "pypy-stm-jit":[ - [63.4,61.3,62.4,71.2,63.9], - [33.1,38.1,32.9,35.3,35.7], - [24.9,36.1,24.9,33.4,25.4], - [27.1,39.0,63.5,45.5,21.1] + [17.73,17.73,17.74,17.72,17.73], + [10.41,10.43,10.43,10.39,10.41], + [9.73,9.77,9.81,9.74,9.67], + [9.95,10.00,9.95,10.00,9.94], ], "pypy-jit":[ - [30.7,30.6,31.2,30.5,29.1], - [31.4,28.5,31.5,29.7,32.8], - [33.0,29.5,34.1,32.0,33.4], - [32.0,32.4,34.6,32.6,31.4] + [8.87,8.89,8.89,8.87,8.90], + [9.79,9.79,9.81,9.75,9.80], + [9.95,9.95,9.95,9.98,9.93], + [9.04,9.06,9.01,9.11,9.21] + ]}), + + ("mersenne (large)", { + "pypy-stm-jit":[ + [9.245907068252563, 9.243618965148926, 9.244323968887329, 9.241997957229614, 9.242669820785522], + [4.897014141082764, 4.882230043411255, 4.881627082824707, 4.883110046386719, 4.883028030395508], + [3.603656053543091, 3.6193759441375732, 3.6500911712646484, 3.613867998123169, 3.607400894165039], + [3.6181468963623047, 3.716840982437134, 3.6233408451080322, 3.7763969898223877, 3.4737911224365234], + + ], + "pypy-jit":[ + [5.46359395980835, 5.463193893432617, 5.461122035980225, 5.4631431102752686, 5.46463680267334], + [5.476777076721191, 5.47367787361145, 5.4741599559783936, 5.473356008529663, 5.47412896156311], + [5.497221946716309, 5.484626054763794, 5.478212833404541, 5.484951972961426, 5.490041017532349], + [5.511621952056885, 5.505235910415649, 5.497171878814697, 5.482274055480957, 5.495197772979736], ]}) + ]) def geom_mean(xs): @@ -196,7 +227,7 @@ global fig print "Draw..." - legend = plot_speedups(plt, 2, 3, benchs, interps_styles) + legend = plot_speedups(plt, 2, 4, benchs, interps_styles) #axs[0].set_ylim(0, len(x)) #ax.set_yticks([r+0.5 for r in range(len(logs))]) @@ -224,7 +255,7 @@ if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(description='Plot stm log files') - parser.add_argument('--figure-size', default='7x8', + parser.add_argument('--figure-size', default='7x10', help='set figure size in inches: format=6x4') parser.add_argument('--font-size', default='10.0', help='set font size in pts: 10.0') diff --git a/talk/dls2014/paper/plots/plot_performance_nojit.py b/talk/dls2014/paper/plots/plot_performance_nojit.py --- a/talk/dls2014/paper/plots/plot_performance_nojit.py +++ b/talk/dls2014/paper/plots/plot_performance_nojit.py @@ -38,21 +38,23 @@ # pypy-c-paper-nojit bench.py -k5 threadworms/threadworms.py 1-4 500000 # pypy-c-paper-nojit bench.py -k5 mandelbrot/mandelbrot.py 1-4 64 512 512 # pypy-c-paper-nojit multithread-richards.py 30 1-4 # report runtime +# miller_rabin: bench.py -k5 primes/primes.py 1-4 400000 +# mersenne: bench.py -k5 mersenne/mersenne.py $i 1500 from collections import OrderedDict benchs = OrderedDict([ ("btree (small)", { "pypy-stm-nojit":[ - [8.21,8.10,8.19,8.13,8.14], - [4.60,4.64,4.61,5.21,4.65], - [3.61,4.48,5.02,5.17,3.55], - [4.01,4.07,4.68,4.07,3.76] + [7.98744797707,7.95994615555,8.0069360733,8.01075410843,7.99107217789], + [4.4982790947,4.51797103882,4.53245615959,4.51872205734,4.49158000946], + [3.48803210258,3.5547580719,3.51397109032,3.51541280746,3.50235295296], + [3.0812599659,3.05026102066,3.07854914665,3.05468702316,3.05405092239], ], "cpython":[ - [1.93,1.93,1.88,1.90,1.95], - [5.76,5.78,5.78,5.71,5.79], - [5.91,5.66,5.66,5.60,5.68], - [6.03,5.98,6.01,6.03,5.97] + [1.89,1.89,1.90,1.90,1.90], + [5.70,5.70,5.72,5.74,5.75], + [5.90,5.92,5.97,5.93,5.91], + [5.95,5.92,5.92,5.93,5.90], ], # "jython":[ # [1.76,1.84], @@ -61,24 +63,24 @@ # [2.57,2.52,2.48] # ], "pypy-nojit":[ - [6.23,6.41,6.27,6.23,6.29], - [10.3,10.5,10.4,10.5,10.3], - [11.4,11.4,11.3,11.3,11.5], - [12.0,12.3,12.3,12.1,12.1] + [6.18,6.17,6.17,6.18,6.19], + [9.94,9.96,9.96,9.93,9.93], + [10.36,10.33,10.38,10.37,10.40], + [10.53,10.52,10.53,10.53,10.54], ]}), ("skiplist (small)", { "pypy-stm-nojit":[ - [5.80,5.91,6.10,5.71,5.58], - [3.71,3.42,3.73,3.60,3.54], - [3.22,2.95,3.25,3.61,3.03], - [3.50,3.25,3.68,3.87,3.27] + [5.3692650795,5.75404596329,5.80874490738,5.56507706642,5.54502797127], + [3.33223390579,3.43795180321,3.7308909893,3.6313188076,3.4803340435], + [2.90017795563,2.96001410484,2.98225808144,2.90836501122,3.08461618423], + [2.69811701775,2.71903181076,2.7594909668,2.6924469471,2.68277978897], ], "cpython":[ - [3.3,3.1,3.3,3.2,3.3], - [4.9,5.2,5.2,5.1,5.2], - [5.0,5.4,5.3,5.3,5.4], - [5.1,5.4,5.3,5.3,5.4] + [3.18362903595,3.13687205315,3.09498882294,2.87599921227,3.094383955], + [4.9131629467,5.09300899506,4.8633351326,5.01421904564,5.00006604195], + [5.0695669651,5.18470811844,5.03273797035,5.17617797852,4.92464590073], + [5.08509016037,5.05335688591,5.1101269722,5.26950407028,5.11297392845], ], # "jython":[ # [1.38,1.33,1.47,1.40], @@ -87,24 +89,24 @@ # [1.99,1.92,1.74,1.84] # ], "pypy-nojit":[ - [4.01,4.10,4.11,3.88,3.97], - [5.92,5.84,5.74,6.16,5.76], - [6.67,6.42,6.51,6.48,6.48], - [6.50,6.59,6.93,6.61,6.56] + [3.99729990959,3.92795300484,3.92997407913,4.02246403694,4.16354703903], + [6.20531105995,6.0312640667,5.67126297951,5.88649582863,5.58283090591], + [6.22896409035,6.35634803772,6.38776803017,6.00280714035,6.2800180912], + [6.13741707802,6.11261510849,6.20782995224,6.05345606804,6.21988797188], ]}), ("threadworms (small)", { "pypy-stm-nojit":[ - [4.71,4.67,4.69,4.71,4.69], - [2.61,2.55,2.53,2.52,2.56], - [2.01,1.86,1.88,1.93,2.31], - [2.14,2.02,2.46,2.06,1.69] + [4.66151094437,4.58688688278,4.66727614403,4.6772108078,4.6156001091], + [2.49641013145,2.50251197815,2.49980187416,2.49767017365,2.49619984627], + [1.84332799911,1.86481595039,1.85224509239,1.84722995758,1.86878705025], + [1.548361063,1.55502295494,1.54036188126,1.51436495781,1.55404090881], ], "cpython":[ - [1.64,1.62,1.62,1.66,1.64], - [5.08,5.10,5.08,5.01,5.16], - [5.00,5.10,5.15,5.52,5.01], - [5.37,5.30,5.41,5.21,5.10] + [1.63531804085,1.63171005249,1.64426994324,1.63068985939,1.63233208656], + [5.11088013649,5.0750939846,5.07781815529,5.03861713409,5.14281582832], + [5.18783211708,5.15396308899,5.14551186562,5.12670898438,5.14967012405], + [5.2029311657,5.18416690826,5.16408801079,5.17987704277,5.16225814819], ], # "jython":[ # [2.73,2.38,2.63,2.4], @@ -113,24 +115,44 @@ # [3.19,3.37,3.26,3.36] # ], "pypy-nojit":[ - [4.02,4.03,4.01,4.05,4.05], - [7.21,7.23,7.30,7.12,7.21], - [8.05,8.03,8.08,8.12,8.02], - [8.54,8.56,8.61,8.91,8.80] + [3.96425199509,3.92935299873,3.9230401516,3.93678808212,3.9455499649], + [7.18407416344,7.18444395065,7.16915011406,7.18182110786,7.22779583931], + [7.66615414619,7.66010093689,7.72703194618,7.71287798882,7.72050714493], + [7.84829282761,7.84318208694,7.78388905525,7.74968695641,7.74719285965], + ]}), + + ("miller-rabin (small)", { + "pypy-stm-nojit":[ + [3.369349956512451, 3.3623909950256348, 3.3570539951324463, 3.35870099067688, 3.352569103240967], + [1.8622829914093018, 1.8384361267089844, 1.8307039737701416, 1.8440592288970947, 1.8353259563446045], + [1.4145839214324951, 1.3829829692840576, 1.3893311023712158, 1.4124789237976074, 1.3802859783172607], + [1.259462833404541, 1.2429590225219727, 1.246514081954956, 1.216398000717163, 1.2270379066467285] + ], + "cpython":[ + [1.463728904724121, 1.4588379859924316, 1.4599599838256836, 1.454383134841919, 1.4613091945648193], + [1.9370648860931396, 1.9515810012817383, 1.9363758563995361, 1.9516470432281494, 1.9729619026184082], + [2.0382211208343506, 2.0417628288269043, 2.038292169570923, 2.033823013305664, 2.0392959117889404], + [2.0457639694213867, 2.0550150871276855, 2.045275926589966, 2.0547900199890137, 2.058414936065674] + ], + "pypy-nojit":[ + [3.1500821113586426, 3.1333930492401123, 3.133725881576538, 3.1395609378814697, 3.137906074523926], + [5.383965015411377, 5.348900079727173, 5.329493999481201, 5.293528079986572, 5.336282014846802], + [5.570706129074097, 5.564241170883179, 5.5810630321502686, 5.539607048034668, 5.5388758182525635], + [5.5085608959198, 5.483498811721802, 5.5816099643707275, 5.592865943908691, 5.601343870162964] ]}), ("mandelbrot (small)", { "pypy-stm-nojit":[ - [5.35,5.30,5.23,5.15,5.21], - [2.71,2.69,2.66,2.67,2.64], - [1.96,1.81,1.87,1.83,1.91], - [1.88,1.95,1.85,1.75,1.86] + [5.3069088459,5.2796959877,5.27015900612,5.3081278801,5.32982206345], + [2.67205810547,2.66844701767,2.66896700859,2.68082690239,2.6778409481], + [1.87407612801,1.89018201828,1.88588881493,1.87249279022,1.88197517395], + [1.50376915932,1.55750012398,1.49635004997,1.50658798218,1.5112760067], ], "cpython":[ - [1.65,1.70,1.61,1.73,1.66], - [2.40,2.27,2.30,2.31,2.30], - [2.41,2.46,2.34,2.42,2.46], - [2.51,2.40,2.45,2.37,2.49] + [1.66689109802,1.70921182632,1.67385601997,1.74684906006,1.67685079575], + [2.22828507423,2.22786307335,2.21894907951,2.23693585396,2.23923707008], + [2.27540802956,2.28734898567,2.26518392563,2.2811088562,2.27695202827], + [2.28982901573,2.28482198715,2.29300618172,2.29116606712,2.28355097771], ], # "jython":[ # [5.56,5.61,5.59,5.55], @@ -139,24 +161,24 @@ # [1.8,1.74,1.8,1.88] # ], "pypy-nojit":[ - [3.54,3.33,3.39,3.38,3.34], - [4.43,4.43,4.47,4.47,4.46], - [4.14,3.62,4.07,4.20,3.79], - [3.88,3.83,3.82,3.79,3.88] + [3.37114810944,3.36375617981,3.36287212372,3.36206793785,3.36504197121], + [4.47998595238,4.47378611565,4.46991205215,4.47737216949,4.41863703728], + [4.3745610714,4.37568807602,4.37834095955,4.37904405594,4.38698601723], + [4.40002799034,4.40542793274,4.40231585503,4.40534210205,4.39605498314], ]}), ("raytrace (small)", { "pypy-stm-nojit":[ - [8.3, 8.04,7.99,7.91,8.03,], - [4.33,4.21,4.28,4.23,4.24,], - [3.74,3.61,3.09,4.18,3.01,], - [3.08,2.89,2.75,3.06,2.98,] + [8.09555792809,8.03761792183,8.07105588913,8.08723092079,8.02405285835], + [4.23407101631,4.20152997971,4.19831204414,4.20036005974,4.19185209274], + [2.99346899986,3.01209020615,3.02385997772,2.99872112274,3.01878595352], + [2.58209013939,2.62102484703,2.58993005753,2.59897303581,2.5549788475], ], "cpython":[ - [2.5,2.55,2.51,2.54,2.52,], - [2.7,2.67,2.67,2.69,2.71,], - [2.75,2.77,2.75,2.80,2.76,], - [2.84,2.83,2.85,2.86,2.83,] + [2.52900099754,2.52002096176,2.53882884979,2.53405308723,2.52506089211], + [2.64264392853,2.62728977203,2.68032217026,2.6406929493,2.651679039], + [2.74740791321,2.75495100021,2.73957586288,2.74933600426,2.72902297974], + [2.85847210884,2.85967087746,2.87045097351,2.87352895737,2.87047505379], ], # "jython":[ # [2.95,2.95,2.96], @@ -165,24 +187,24 @@ # [1.09,0.9,0.97,0.99,1.03] # ], "pypy-nojit":[ - [5.41,5.36,5.34,5.31,5.38], - [6.66,6.63,6.61,6.51,6.60], - [6.34,6.29,6.22,6.32,6.31], - [5.91,5.72,5.88,5.87,5.78] + [5.2403049469,5.23123502731,5.23440098763,5.25973105431,5.22721982002], + [6.54222011566,6.54369497299,6.55081796646,6.5521800518,6.56022405624], + [6.10841608047,6.15412402153,6.15307998657,6.16904401779,6.13451099396], + [5.46543121338,5.47703313828,5.47254896164,5.49223995209,5.50018310547], ]}), ("richards (small)", { "pypy-stm-nojit":[ - [10.1,10.24,10.18,10.20,10.32], - [5.71,5.79,5.73,5.75,5.75], - [5.41,4.28,5.22,4.96,5.51], - [4.61,4.62,4.51,4.49,5.08] + [9.90,9.89,9.85,9.92,9.95], + [5.50,5.55,5.52,5.48,5.52], + [4.06,4.09,4.09,4.09,4.08], + [3.63,3.61,3.54,3.51,3.54], ], "cpython":[ [2.51,2.45,2.55,2.51,2.50], [3.87,3.71,3.88,3.81,3.75], - [4.02,4.08,4.10,4.02,3.98], - [4.13,4.01,4.15,3.99,4.12] + [3.72,3.74,3.71,3.70,3.72], + [3.76,3.78,3.79,3.80,3.76], ], # "jython":[ # [3.39,3.31,3.7], @@ -193,9 +215,30 @@ "pypy-nojit":[ [5.95,6.02,5.99,5.92,6.03], [7.88,7.78,7.77,7.69,7.79], - [7.01,7.02,7.17,7.05,7.21], - [6.66,6.71,6.58,6.65,6.56] - ]}) + [7.66,7.66,7.81,7.75,7.53], + [7.89,7.93,7.81,7.69,7.80] + ]}), + + ("mersenne (small)", { + "pypy-stm-nojit":[ + [3.6719789505004883, 3.6668028831481934, 3.664036989212036, 3.6665987968444824, 3.6641900539398193], + [2.137367010116577, 1.9911909103393555, 1.9913787841796875, 1.9875690937042236, 1.988551139831543], + [2.0355780124664307, 2.0366389751434326, 2.029282808303833, 2.0356709957122803, 2.0340609550476074], + [1.9997708797454834, 1.996995210647583, 2.0678999423980713, 2.075084924697876, 1.994607925415039] + ], + "cpython":[ + [3.263484001159668, 3.2587788105010986, 3.25673508644104, 3.2573680877685547, 3.255321979522705], + [3.2739717960357666, 3.274899959564209, 3.293348789215088, 3.2946629524230957, 3.29618501663208], + [3.3265891075134277, 3.2955169677734375, 3.2899580001831055, 3.305911064147949, 3.2877209186553955], + [3.2929179668426514, 3.2984700202941895, 3.2903239727020264, 3.301953077316284, 3.2956490516662598] + ], + "pypy-nojit":[ + [2.2521681785583496, 2.2531819343566895, 2.252506971359253, 2.2531309127807617, 2.256279945373535], + [2.287511110305786, 2.284902811050415, 2.28535795211792, 2.2856361865997314, 2.2816338539123535], + [2.328660011291504, 2.3119280338287354, 2.330618143081665, 2.330143928527832, 2.328179121017456], + [2.3237390518188477, 2.3256571292877197, 2.318053960800171, 2.317232131958008, 2.3278181552886963] + ]}), + ]) def geom_mean(xs): @@ -228,7 +271,7 @@ global fig print "Draw..." - legend = plot_speedups(plt, 2, 3, benchs, interps_styles) + legend = plot_speedups(plt, 2, 4, benchs, interps_styles) #axs[0].set_ylim(0, len(x)) #ax.set_yticks([r+0.5 for r in range(len(logs))]) @@ -256,7 +299,7 @@ if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(description='Plot stm log files') - parser.add_argument('--figure-size', default='7x8', + parser.add_argument('--figure-size', default='7x10', help='set figure size in inches: format=6x4') parser.add_argument('--font-size', default='10.0', help='set font size in pts: 10.0') diff --git a/talk/dls2014/paper/plots/plot_scaling.py b/talk/dls2014/paper/plots/plot_scaling.py --- a/talk/dls2014/paper/plots/plot_scaling.py +++ b/talk/dls2014/paper/plots/plot_scaling.py @@ -1,7 +1,7 @@ #!/usr/bin/python # obtained with time on -# pypy-c --jit off bench_scaling.py [1-4] +# pypy-c-paper-nojit bench_scaling.py [1-4] import matplotlib @@ -20,10 +20,12 @@ # import pprint - slow as hell xs = range(1,5) -ys = [[1.78,1.79,1.75,1.81,1.76], - [1.82,1.82,1.80,1.80,1.8], - [1.88,1.97,1.89,1.90,1.94], - [1.96,1.99,1.87,2.00,1.97,2.09,1.98,2.12]] +ys = [ + [1.77,1.75,1.75,1.74,1.75], + [1.75,1.74,1.75,1.76,1.75], + [1.80,1.92,1.87,1.88,1.84], + [1.90,1.92,1.94,1.88,1.92], +] diff --git a/talk/dls2014/paper/plots/scaling.pdf b/talk/dls2014/paper/plots/scaling.pdf index 091ef98afae2c580360a3ee1de6d388d10ef8f92..a6e66ba747ea2c961db81964298618003d0bc34b GIT binary patch [cut] From noreply at buildbot.pypy.org Thu Jul 31 16:35:12 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 31 Jul 2014 16:35:12 +0200 (CEST) Subject: [pypy-commit] pypy default: Reproduce the translation problem with two virtualizables Message-ID: <20140731143512.C34731C0250@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72624:8674ce83d8e6 Date: 2014-07-31 16:22 +0200 http://bitbucket.org/pypy/pypy/changeset/8674ce83d8e6/ Log: Reproduce the translation problem with two virtualizables diff --git a/rpython/jit/backend/llsupport/test/ztranslation_test.py b/rpython/jit/backend/llsupport/test/ztranslation_test.py --- a/rpython/jit/backend/llsupport/test/ztranslation_test.py +++ b/rpython/jit/backend/llsupport/test/ztranslation_test.py @@ -21,7 +21,7 @@ # this is a basic test that tries to hit a number of features and their # translation: # - jitting of loops and bridges - # - virtualizables + # - two virtualizable types # - set_param interface # - profiler # - full optimizer @@ -79,22 +79,28 @@ if rposix.get_errno() != total: raise ValueError return chr(total % 253) # + class Virt2(object): + _virtualizable_ = ['i'] + def __init__(self, i): + self.i = i from rpython.rlib.libffi import types, CDLL, ArgChain from rpython.rlib.test.test_clibffi import get_libm_name libm_name = get_libm_name(sys.platform) - jitdriver2 = JitDriver(greens=[], reds = ['i', 'func', 'res', 'x']) + jitdriver2 = JitDriver(greens=[], reds = ['v2', 'func', 'res', 'x'], + virtualizables = ['v2']) def libffi_stuff(i, j): lib = CDLL(libm_name) func = lib.getpointer('fabs', [types.double], types.double) res = 0.0 x = float(j) - while i > 0: - jitdriver2.jit_merge_point(i=i, res=res, func=func, x=x) + v2 = Virt2(i) + while v2.i > 0: + jitdriver2.jit_merge_point(v2=v2, res=res, func=func, x=x) promote(func) argchain = ArgChain() argchain.arg(x) res = func.call(argchain, rffi.DOUBLE) - i -= 1 + v2.i -= 1 return res # def main(i, j): From noreply at buildbot.pypy.org Thu Jul 31 16:35:14 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 31 Jul 2014 16:35:14 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix comparison of lltype._ptr if they are delayed. Message-ID: <20140731143514.105B51C0250@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72625:0764713cbb49 Date: 2014-07-31 16:34 +0200 http://bitbucket.org/pypy/pypy/changeset/0764713cbb49/ Log: Fix comparison of lltype._ptr if they are delayed. diff --git a/rpython/rtyper/lltypesystem/lltype.py b/rpython/rtyper/lltypesystem/lltype.py --- a/rpython/rtyper/lltypesystem/lltype.py +++ b/rpython/rtyper/lltypesystem/lltype.py @@ -1154,7 +1154,12 @@ type(other).__name__,)) if self._TYPE != other._TYPE: raise TypeError("comparing %r and %r" % (self._TYPE, other._TYPE)) - return self._obj == other._obj + try: + return self._obj == other._obj + except DelayedPointer: + # if one of the two pointers is delayed, they cannot + # possibly be equal unless they are the same _ptr instance + return self is other def __ne__(self, other): return not (self == other) diff --git a/rpython/rtyper/test/test_annlowlevel.py b/rpython/rtyper/test/test_annlowlevel.py --- a/rpython/rtyper/test/test_annlowlevel.py +++ b/rpython/rtyper/test/test_annlowlevel.py @@ -64,3 +64,13 @@ assert lltype.typeOf(ptr) == OBJECTPTR y = annlowlevel.cast_base_ptr_to_instance(X, ptr) assert y is x + + def test_delayedptr(self): + FUNCTYPE = lltype.FuncType([], lltype.Signed) + name = "delayed!myfunc" + delayedptr1 = lltype._ptr(lltype.Ptr(FUNCTYPE), name, solid=True) + delayedptr2 = lltype._ptr(lltype.Ptr(FUNCTYPE), name, solid=True) + assert delayedptr1 == delayedptr1 + assert delayedptr1 != delayedptr2 + assert bool(delayedptr1) + assert delayedptr1 != lltype.nullptr(FUNCTYPE) From noreply at buildbot.pypy.org Thu Jul 31 16:55:16 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 31 Jul 2014 16:55:16 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: update benchmark numbers in paper Message-ID: <20140731145516.0CA7B1C09B2@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5383:5e9dd2e2f31e Date: 2014-07-31 16:55 +0200 http://bitbucket.org/pypy/extradoc/changeset/5e9dd2e2f31e/ Log: update benchmark numbers in paper diff --git a/talk/dls2014/paper/paper.pdf b/talk/dls2014/paper/paper.pdf index f7c23752d1005cd0ef587d90e5589777cba2a6a3..7c5dd9b395b326795850964f603f80bf65a15bd3 GIT binary patch [cut] diff --git a/talk/dls2014/paper/paper.tex b/talk/dls2014/paper/paper.tex --- a/talk/dls2014/paper/paper.tex +++ b/talk/dls2014/paper/paper.tex @@ -156,7 +156,7 @@ Since some years, the popularity of dynamic languages is on the rise. A common trait of many of these language's implementations is the use of -a single global interpreter lock (GIL) to synchronise the interpreter +a single, global interpreter lock (GIL) to synchronise the interpreter in a multithreading scenario. Since this lock effectively serialises the execution, applications can not make use of the increasing parallelism in current hardware. @@ -197,7 +197,7 @@ programming model was not part of the initial design of those languages. Thus, the reference implementations of, e.g., Python and Ruby use a single, global interpreter lock (GIL) to serialise the execution of code in -threads. The use of a single global lock causes several disadvantages, +threads. The use of a single, global lock causes several disadvantages, and as multi-core processors become the de facto standard in platforms ranging from smart phones to data centres, the issue of parallel execution must be addressed by these dynamic languages. @@ -310,7 +310,7 @@ We focus on the \emph{threading} approach. This requires us to remove the GIL from the interpreter to run code in parallel on multiple threads. One approach to this is fine-grained locking instead of a -single global lock. Jython~\cite{webjython} and +single, global lock. Jython~\cite{webjython} and IronPython~\cite{ironpython} follow this approach. Fine-grained locking is, however, not a \emph{direct} replacement for the GIL. It requires multiple locks in multiple places, not only in a central @@ -1134,7 +1134,7 @@ Turbo Boost for less variation in the results. There are 16~GiB of memory available and we ran them under Ubuntu 14.04 with a Linux 3.13.0 kernel. The STM system was compiled with a number of segments $N=4$ -and a maximum amount of memory of 1.5~GiB (both are configurable at +and a maximum amount of memory of 1.5~GiB (both configured at compile time). For each point in the plots, we took 5 measurements and report the @@ -1176,14 +1176,14 @@ threads. For the results in Figure~\ref{fig:scaling}, we normalised the average runtimes to the time it took on a single thread. From this we see that there is some additional overhead introduced -by each thread ($12.3\%$ for all 4 threads together). Every thread +by each thread ($9.1\%$ for all 4 threads together). Every thread adds some overhead because during a commit, there is one more thread which has to reach a safe point. Additionally, conflict detection needs to check for conflicts in all concurrently running transactions. -While not ideal, we think that $12.3\%$ is acceptable on four -threads. In terms of throughput, 4 threads have $3.54\times$ -more iterations per second than a single thread. +While not ideal, we think that $9.1\%$ is acceptable on four +threads. In terms of throughput, 4 threads reach $3.67\times$ +the iterations per second of a single thread. \begin{figure}[h] \centering @@ -1194,7 +1194,7 @@ \subsection{Small-Scale Benchmarks\label{sec:performance-bench}} -For the following sections we use a set of six small benchmarks +For the following sections we use a set of 8 small benchmarks available at~\cite{pypybenchs}. There are, unsurprisingly, not many threaded applications written in Python that can be used as a benchmark. Since until now, threading was rarely used @@ -1207,18 +1207,18 @@ threads \item \emph{threadworms}, which simulates worms walking on a grid in parallel and checking for collisions with each other -\item \emph{mandelbrot}, \emph{raytrace}, and \emph{richards}, which - all perform some independent computations in parallel (embarrassingly - parallel) +\item \emph{miller-rabin}, \emph{mandelbrot}, \emph{raytrace}, + \emph{richards}, and \emph{mersenne}, which all perform some mostly + independent computations in parallel (embarrassingly parallel). \end{itemize} -We use atomic blocks to easily synchronise access to shared -data structures in the first three benchmarks. For the embarrassingly -parallel benchmarks, we do not need any explicit synchronisation at -all. These atomic blocks are simulated with a single global lock -when running on top of GIL-supported interpreters. With our STM -system, they map to transactions that will execute optimistically -in parallel. +We use atomic blocks as the primary mechanism to synchronise access to +shared data structures in the first three benchmarks. For the +embarrassingly parallel benchmarks, we only need explicit +synchronisation in some small isolated part. These atomic blocks are +simulated with a single, global lock when running on top of +GIL-supported interpreters. With our STM system, they map to +transactions that will execute optimistically in parallel. %%% TODO: if time permits @@ -1250,35 +1250,40 @@ analysis). On 4 cores, PyPy using our STM system (\emph{pypy-stm-nojit}) scales -in all benchmarks to a certain degree. It scales best for the -embarrassingly parallel ones ($avg=2.6\times$ speedup over 1 thread) -and a little less for the others ($avg=2.0\times$ speedup over 1 -thread). The reason for this difference is -that in the former group there are no real, logical conflicts -- all -threads do independent calculations. STM simply replaces the GIL in -those programs. In the latter group, the threads work on a common data -structure and therefore create much more conflicts, which limits the -scalability. Here we make active use of the STM-supported atomic -blocks to synchronise access to the data structure. The hope -is that the STM system is still able to parallelise, even if we use -the atomic blocks in a coarse-grained way. While less than the other -benchmarks, we indeed see some speedup going from 1 to 3 threads. -There is no visible benefit from 3 to 4 threads, even a slight -regression. +in all benchmarks to a certain degree. \emph{mersenne} is a bit of a +special case, since it is easily parallelisable but a few of the +parallel computations dominate the runtime and cannot be made faster +easily by parallelisation. This is why the benefit of adding more +threads than 2 is diminishing. + +\emph{pypy-stm-nojit} scales best for the embarrassingly parallel +benchmarks ($avg=2.7\times$ speedup over 1 thread) and a little less +for the others ($avg=2.5\times$ speedup over 1 thread). The reason for +this difference is that in the former group there are no real, logical +conflicts -- all threads do independent calculations. STM simply +replaces the GIL in those programs. In the latter group, the threads +work on a common data structure and therefore create much more +conflicts, which limits the scalability. Here we make active use of +the STM-supported atomic blocks to synchronise access to the data +structure. The hope is that the STM system is still able to +parallelise, even if we use the atomic blocks in a coarse-grained +way. While less than the other benchmarks, we indeed see some speedup +going from 1 to 4 threads. Looking at the average overhead on a single thread that is induced by -switching from GIL to STM, we see that it is $\approx 43.3\%$. The -maximum in richards is $71\%$. In all benchmarks \emph{pypy-stm-nojit} +switching from GIL to STM, we see that it is $\approx 40.1\%$. The +maximum in richards is $65.5\%$. In all benchmarks \emph{pypy-stm-nojit} beats \emph{pypy-nojit} already on two threads, despite of this -overhead. The achieved speedup comparing the lowest runtimes of STM -to the single-threaded GIL execution is between $1.14\times$ and -$1.94\times$. +overhead. The achieved speedup comparing the fastest runtimes of STM +to the single-threaded GIL execution of \emph{pypy-nojit} is between +$1.11\times$ and $2.55\times$. -Still, STM rarely beats CPython's \emph{single-thread} performance. However, for +Still, \emph{pypy-stm-nojit} barely beats CPython's \emph{single-thread} +performance. However, for programs that need concurrency in CPython and that use threads to achieve this, it also makes sense to look at the overhead induced by the GIL on multiple threads. From this perspective, the STM -implementation beats CPython's performance in all but two benchmarks. +implementation beats CPython's performance in all benchmarks. Since PyPy comes with a JIT~\cite{cfbolz09} to make it vastly faster than CPython, we will now look at how well STM works together with it. @@ -1304,8 +1309,8 @@ so are these optimisations. And third, we did not have enough time to optimise integration with STM so that the JIT exposes the overhead of STM more by speeding up all the rest. -For these reasons, the following results have to be taken with a grain -of salt. +For these reasons, the following results should be regarded as a +preliminary outlook. The speedups from simply enabling the JIT in these benchmarks range from $10-50\times$. This is why we had to do without CPython here, since it @@ -1325,32 +1330,31 @@ this thoroughly. The results are presented in Figure~\ref{fig:performance-jit}. We see -that the performance gains are much less reliable. The slowdown -factor for switching from GIL to STM ranges around $1-2.4\times$, and -we beat the GIL's single-thread performance in half of the benchmarks. +that the performance gains of STM are a bit less reliable with the JIT. +The slowdown factor for switching from GIL to STM ranges around +$1-2.5\times$, and we beat the GIL's single-thread performance in +5 out of 8 benchmarks. We see that generally, the group of embarrassingly parallel benchmarks -scales best. (There is a notable performance stability problem in the -\emph{richards} benchmark. This is a bug we will fix in the future.) -The other three benchmarks scale barely or not at all with the number of -threads. The reason for this is likely again the conflicts in the -latter group. Right now, because the code runs much more quickly with -the JIT than without, it has the effect of making the transactions -logically longer. This increases the -likelihood of conflicts between them and therefore limits scalability -even more than in the no-JIT benchmarks. +scales best. The other three benchmarks scale barely or not at all +with the number of threads. The reason for this is likely again the +conflicts in the latter group. Right now, because the code runs much +more quickly with the JIT than without, it has the effect of making +the transactions logically longer. This increases the likelihood of +conflicts between them and therefore limits scalability even more than +in the no-JIT benchmarks. -Overall, PyPy without STM is around $2\times$ slower than CPython. -Enabling its JIT allows it to outperform CPython by a huge margin. -We see the same kind of speedup on PyPy with STM when enabling the -JIT. This means that STM does not generally inhibit the JIT from -optimising the programs execution, which is already a very important -result on its own. We also see that for some -benchmarks, STM is already able to give additional speedups -compared to just the JIT-induced acceleration. This looks very -promising and investigating when this is not the case is the next -logical step. - +Overall, PyPy without STM is around $2\times$ slower than CPython +(\emph{cpython} vs.\ \emph{pypy-nojit}). Enabling its JIT allows it +to outperform CPython by a huge margin. We see the same kind of +speedup on PyPy with STM when enabling the JIT. This means that STM +does not generally inhibit the JIT from optimising the programs +execution, which is already a very important result on its own. We +also see that for some benchmarks, STM is already able to give +additional speedups compared to just the JIT-induced +acceleration. This looks very promising and improving this situation +even more is the next step for reaching the goal of parallelising +Python. \begin{figure}[h] \centering @@ -1411,26 +1415,6 @@ here and theirs. -% Similar STMs: -% \begin{itemize} -% \item FastLane: \cite{warmhoff13} -% \item TML: \cite{spear09} -% \item Virtualizing HTM: \cite{rajwar05} -% \item Page-based virtualizing HyTM: \cite{chung06}: page-level conflict -% detection, otherwise hardware extensions required; assumes most -% transactions fit HTM capacities (not so true here); COW using page-faults; -% they assume OS-level access to page-tables (maybe not inherent to their -% design); eval on simulator; value-based confl detection; - -% (XTM can be -% implemented either in the OS as part of the virtual memory manager or -% between underlying TM systems and the OS, like virtual machines; -% Conflicts for overflowed transactions are tracked at page granularity; -% XTM-e allows conflict detection at cache line granu- -% larity, even for overflowed data in virtual memory) -% \item using mmap(): Memory-Mapped Transactions -% \item mem-protected conflict detection: \cite{martin09} -% \end{itemize} \section{Conclusions} @@ -1471,10 +1455,10 @@ % results, outlook The early results presented here are very encouraging. STM as a simple -GIL replacement scales well and yields an average speedup of $2.6\times$ for +GIL replacement scales well and yields an average speedup of $2.7\times$ for embarrassingly parallel workloads on 4 threads. When also used for synchronisation in the form of atomic blocks, the average speedup -still reaches $2.0\times$. +still reaches $2.5\times$. To generally outperform the best-performing Python systems (Jython, PyPy), integration of the STM-based approach with a JIT compiler is diff --git a/talk/dls2014/paper/plots/performance.pdf b/talk/dls2014/paper/plots/performance.pdf index 9c48101801c6fe17a25b0c587435cfbfd80fa636..14e8ec27f476f3321d8eff9e18053decb356d1a4 GIT binary patch [cut] diff --git a/talk/dls2014/paper/plots/performance_nojit.pdf b/talk/dls2014/paper/plots/performance_nojit.pdf index f189a9c696c64c18e3fc1ca70ef5f5eba1055c5c..ef783defa489ca139a10781206b1be8d80645ee3 GIT binary patch [cut] diff --git a/talk/dls2014/paper/plots/plot_performance_nojit.py b/talk/dls2014/paper/plots/plot_performance_nojit.py --- a/talk/dls2014/paper/plots/plot_performance_nojit.py +++ b/talk/dls2014/paper/plots/plot_performance_nojit.py @@ -252,9 +252,10 @@ slowdown = np.mean(interps["pypy-stm-nojit"][0]) / np.mean(interps["pypy-nojit"][0]) speedup = np.mean(interps["pypy-stm-nojit"][0]) / np.mean(interps["pypy-stm-nojit"][3]) total = np.mean(interps["pypy-nojit"][0]) / np.mean(interps["pypy-stm-nojit"][3]) - print "overhead", bench_name, ":", slowdown - print "stm speedup", bench_name, ":", speedup - print "totals", bench_name, ":", total + print "===========",bench_name, "==========" + print "overhead STM-GIL", bench_name, ":", slowdown + print "stm-own speedup", bench_name, ":", speedup + print "total speedup STM over GIL", bench_name, ":", total sls.append(slowdown) spds.append(speedup) totals.append(total) diff --git a/talk/dls2014/paper/plots/scaling.pdf b/talk/dls2014/paper/plots/scaling.pdf index a6e66ba747ea2c961db81964298618003d0bc34b..b2fd7e2cb61c56de652c3e17273fe166267ec7d2 GIT binary patch [cut] From noreply at buildbot.pypy.org Thu Jul 31 17:08:36 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 31 Jul 2014 17:08:36 +0200 (CEST) Subject: [pypy-commit] pypy default: Expand a failing AssertionError with a more descriptive TyperError. Message-ID: <20140731150836.DE2501C0FEE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72626:372fd1f54b6d Date: 2014-07-31 17:08 +0200 http://bitbucket.org/pypy/pypy/changeset/372fd1f54b6d/ Log: Expand a failing AssertionError with a more descriptive TyperError. diff --git a/rpython/rtyper/normalizecalls.py b/rpython/rtyper/normalizecalls.py --- a/rpython/rtyper/normalizecalls.py +++ b/rpython/rtyper/normalizecalls.py @@ -93,7 +93,12 @@ return False # nothing to do, all signatures already match shape_cnt, shape_keys, shape_star = shape - assert not shape_star, "XXX not implemented" + if shape_star: + raise TyperError( + "not implemented: a call is done with a '*' argument, and the" + " multiple functions or methods that it can go to don't have" + " all the same signature (different argument names or defaults)." + " The call can go to:\n%s" % '\n'.join(map(repr, graphs))) # for the first 'shape_cnt' arguments we need to generalize to # a common type diff --git a/rpython/rtyper/test/test_normalizecalls.py b/rpython/rtyper/test/test_normalizecalls.py --- a/rpython/rtyper/test/test_normalizecalls.py +++ b/rpython/rtyper/test/test_normalizecalls.py @@ -192,6 +192,25 @@ import re assert re.match(msg, excinfo.value.args[0]) + def test_methods_with_named_arg_call(self): + class Base: + def fn(self, y): + raise NotImplementedError + class Sub1(Base): + def fn(self, y): + return 1 + y + class Sub2(Base): + def fn(self, x): # different name! + return x - 2 + def dummyfn(n): + if n == 1: + s = Sub1() + else: + s = Sub2() + return s.fn(*(n,)) + + py.test.raises(TyperError, self.rtype, dummyfn, [int], int) + class PBase: def fn(self):