From noreply at buildbot.pypy.org Wed Jan 1 02:15:44 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 1 Jan 2014 02:15:44 +0100 (CET) Subject: [pypy-commit] pypy py3k: adapt to py3 Message-ID: <20140101011544.2690C1C0162@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68575:3f0fb50b3bda Date: 2013-12-31 17:13 -0800 http://bitbucket.org/pypy/pypy/changeset/3f0fb50b3bda/ Log: adapt to py3 diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -676,7 +676,7 @@ ) W_LongBox.typedef = TypeDef("int%d" % LONG_BIT, - (W_SignedIntegerBox.typedef, int_typedef), + (W_SignedIntegerBox.typedef, long_typedef), __module__ = "numpy", __new__ = interp2app(W_LongBox.descr__new__.im_func), __index__ = interp2app(W_LongBox.descr_index), From noreply at buildbot.pypy.org Wed Jan 1 02:48:32 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 1 Jan 2014 02:48:32 +0100 (CET) Subject: [pypy-commit] pypy py3k: adapt to py3 Message-ID: <20140101014832.1A6A81C021C@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68576:57889a2f42c2 Date: 2013-12-31 17:47 -0800 http://bitbucket.org/pypy/pypy/changeset/57889a2f42c2/ Log: adapt to py3 diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -467,7 +467,8 @@ return read_val def descr_setitem(self, space, w_item, w_value): - if space.isinstance_w(w_item, space.w_basestring): + if (space.isinstance_w(w_item, space.w_bytes) or + space.isinstance_w(w_item, space.w_unicode)): item = space.str_w(w_item) else: raise OperationError(space.w_IndexError, space.wrap( From noreply at buildbot.pypy.org Wed Jan 1 21:16:42 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 1 Jan 2014 21:16:42 +0100 (CET) Subject: [pypy-commit] pypy default: happy new year Message-ID: <20140101201642.C4C1D1C0162@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r68577:9a14ac877fe2 Date: 2014-01-01 12:15 -0800 http://bitbucket.org/pypy/pypy/changeset/9a14ac877fe2/ Log: happy new year diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -28,7 +28,7 @@ DEALINGS IN THE SOFTWARE. -PyPy Copyright holders 2003-2013 +PyPy Copyright holders 2003-2014 ----------------------------------- Except when otherwise stated (look for LICENSE files or information at diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -38,7 +38,7 @@ # General information about the project. project = u'PyPy' -copyright = u'2013, The PyPy Project' +copyright = u'2014, The PyPy Project' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the diff --git a/pypy/module/sys/app.py b/pypy/module/sys/app.py --- a/pypy/module/sys/app.py +++ b/pypy/module/sys/app.py @@ -70,11 +70,11 @@ return None copyright_str = """ -Copyright 2003-2013 PyPy development team. +Copyright 2003-2014 PyPy development team. All Rights Reserved. For further information, see -Portions Copyright (c) 2001-2013 Python Software Foundation. +Portions Copyright (c) 2001-2014 Python Software Foundation. All Rights Reserved. Portions Copyright (c) 2000 BeOpen.com. From noreply at buildbot.pypy.org Thu Jan 2 01:12:27 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 2 Jan 2014 01:12:27 +0100 (CET) Subject: [pypy-commit] pypy default: Oups, sorry. Fix two tests failing in backend/x86/test/. Message-ID: <20140102001227.AD1271C050C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68578:f29b84b521b6 Date: 2014-01-02 01:11 +0100 http://bitbucket.org/pypy/pypy/changeset/f29b84b521b6/ Log: Oups, sorry. Fix two tests failing in backend/x86/test/. diff --git a/rpython/jit/metainterp/test/test_dict.py b/rpython/jit/metainterp/test/test_dict.py --- a/rpython/jit/metainterp/test/test_dict.py +++ b/rpython/jit/metainterp/test/test_dict.py @@ -5,6 +5,10 @@ from collections import OrderedDict class DictTests: + @staticmethod + def newdict(): # overridden in TestLLOrderedDict + return {} + def _freeze_(self): return True @@ -191,9 +195,7 @@ class TestLLtype(DictTests, LLJitMixin): - @staticmethod - def newdict(): - return {} + pass class TestLLOrderedDict(DictTests, LLJitMixin): @staticmethod diff --git a/rpython/jit/metainterp/test/test_rawmem.py b/rpython/jit/metainterp/test/test_rawmem.py --- a/rpython/jit/metainterp/test/test_rawmem.py +++ b/rpython/jit/metainterp/test/test_rawmem.py @@ -71,7 +71,11 @@ 'raw_store': 1, 'raw_load': 1, 'finish': 1}) + +class TestRawMem(RawMemTests, LLJitMixin): + def test_getarraysubstruct(self): + # NOTE: not for backend/*/test A2 = lltype.Array(('a', lltype.Signed), ('b', lltype.Signed), hints={'nolength': True}) p = lltype.malloc(A2, 10, flavor='raw', immortal=True, zero=True) @@ -90,6 +94,3 @@ assert res == 66 res = self.interp_operations(f, [2, 2], disable_optimizations=True) assert res == 44 - -class TestRawMem(RawMemTests, LLJitMixin): - pass From noreply at buildbot.pypy.org Thu Jan 2 01:13:25 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 2 Jan 2014 01:13:25 +0100 (CET) Subject: [pypy-commit] pypy default: Update Message-ID: <20140102001325.8222F1C050C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68579:5078f6046258 Date: 2014-01-02 01:12 +0100 http://bitbucket.org/pypy/pypy/changeset/5078f6046258/ Log: Update diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -39,3 +39,5 @@ .. branch: OlivierBlanvillain/fix-3-broken-links-on-pypy-published-pap-1386250839215 Fix 3 broken links on PyPy published papers in docs. + +.. branch: jit-ordereddict From noreply at buildbot.pypy.org Thu Jan 2 10:26:32 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 2 Jan 2014 10:26:32 +0100 (CET) Subject: [pypy-commit] stmgc c6: Move this out of the lock Message-ID: <20140102092632.D8B6F1C0722@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c6 Changeset: r591:871467aee622 Date: 2014-01-02 10:23 +0100 http://bitbucket.org/pypy/stmgc/changeset/871467aee622/ Log: Move this out of the lock diff --git a/checkfence/c6/test4.c b/checkfence/c6/test4.c --- a/checkfence/c6/test4.c +++ b/checkfence/c6/test4.c @@ -85,11 +85,11 @@ { if (obj[t].flag_modified) return; /* already modified during this transaction */ + obj[t].flag_modified = true; stm_read(t); int is_leader = acquire_lock_if_leader(t); - obj[t].flag_modified = true; tl[t].n_modified_objects = 1; if (is_leader) { memcpy_obj_without_header(UNDOLOG, t); From noreply at buildbot.pypy.org Thu Jan 2 10:26:33 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 2 Jan 2014 10:26:33 +0100 (CET) Subject: [pypy-commit] stmgc c7: The "c7" version, based on clang's "%gs" prefix and Linux's Message-ID: <20140102092633.EFCB51C1154@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7 Changeset: r592:f9e44ab14c1f Date: 2014-01-02 10:25 +0100 http://bitbucket.org/pypy/stmgc/changeset/f9e44ab14c1f/ Log: The "c7" version, based on clang's "%gs" prefix and Linux's remap_file_pages() in a way that should avoid any large and repeated overheads. From noreply at buildbot.pypy.org Thu Jan 2 10:26:34 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 2 Jan 2014 10:26:34 +0100 (CET) Subject: [pypy-commit] stmgc c7: Initial checkin of the code from Message-ID: <20140102092634.F04991C12CD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7 Changeset: r593:a7e3185f3ead Date: 2014-01-02 10:26 +0100 http://bitbucket.org/pypy/stmgc/changeset/a7e3185f3ead/ Log: Initial checkin of the code from https://bitbucket.org/arigo/arigo/raw/default/hack/stm/c7 diff --git a/c7/core.c b/c7/core.c new file mode 100644 --- /dev/null +++ b/c7/core.c @@ -0,0 +1,648 @@ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "core.h" +#include "list.h" +#include "pagecopy.h" + + +#define NB_PAGES (256*256) // 256MB +#define NB_THREADS 2 +#define MAP_PAGES_FLAGS (MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE) +#define LARGE_OBJECT_WORDS 36 + + +typedef TLPREFIX char localchar_t; +typedef TLPREFIX struct alloc_for_size_s alloc_for_size_t; +typedef TLPREFIX struct _thread_local2_s _thread_local2_t; + + +struct alloc_for_size_s { + localchar_t *next; + uint16_t start, stop; + bool flag_partial_page; +}; + +struct _thread_local2_s { + struct _thread_local1_s _tl1; + int thread_num; + char *thread_base; + struct stm_list_s *modified_objects; + struct stm_list_s *new_object_ranges; + struct alloc_for_size_s alloc[LARGE_OBJECT_WORDS]; +}; +#define _STM_TL2 ((_thread_local2_t *)_STM_TL1) + +enum { SHARED_PAGE=0, REMAPPING_PAGE, PRIVATE_PAGE }; /* flag_page_private */ + + +static char *object_pages; +static char *undo_log_pages; +static char *undo_log_current; +static int num_threads_started, leader_thread_num; +static uintptr_t index_page_never_used; +static int next_write_version; +static int undo_lock; +static struct stm_list_s *global_history; +static uint16_t gh_write_version_first; +static uint16_t gh_write_version_last; +static uint8_t flag_page_private[NB_PAGES]; /* xxx_PAGE constants above */ + + +/************************************************************/ + +static void spin_loop(void) +{ + asm("pause" : : : "memory"); +} + +static void acquire_lock(int *lock) +{ + while (__sync_lock_test_and_set(lock, 1) != 0) { + while (*lock != 0) + spin_loop(); + } +} + +#define ACQUIRE_LOCK_IF(lock, condition) \ +({ \ + bool _acquired = false; \ + while (condition) { \ + if (__sync_lock_test_and_set(lock, 1) == 0) { \ + if (condition) \ + _acquired = true; \ + else \ + __sync_lock_release(lock); \ + break; \ + } \ + spin_loop(); \ + } \ + _acquired; \ +}) + +static void release_lock(int *lock) +{ + __sync_lock_release(lock); +} + +static void write_fence(void) +{ +#if defined(__amd64__) || defined(__i386__) + asm("" : : : "memory"); +#else +# error "Define write_fence() for your architecture" +#endif +} + +static bool _stm_was_read(object_t *obj) +{ + read_marker_t *marker = (read_marker_t *)(((uintptr_t)obj) >> 4); + return (marker->rm == _STM_TL1->transaction_read_version); +} + + +static void _stm_privatize(uintptr_t pagenum) +{ + if (flag_page_private[pagenum] == PRIVATE_PAGE) + return; + + if (!__sync_bool_compare_and_swap(&flag_page_private[pagenum], + SHARED_PAGE, REMAPPING_PAGE)) { + while (flag_page_private[pagenum] == REMAPPING_PAGE) + spin_loop(); + assert(flag_page_private[pagenum] == PRIVATE_PAGE); + return; + } + + ssize_t pgoff1 = pagenum; + ssize_t pgoff2 = pagenum + NB_PAGES; + ssize_t localpgoff = pgoff1 + NB_PAGES * _STM_TL2->thread_num; + ssize_t otherpgoff = pgoff1 + NB_PAGES * (1 - _STM_TL2->thread_num); + + void *localpg = object_pages + localpgoff * 4096UL; + void *otherpg = object_pages + otherpgoff * 4096UL; + + int res = remap_file_pages(localpg, 4096, 0, pgoff2, 0); + if (res < 0) { + perror("remap_file_pages"); + abort(); + } + pagecopy(localpg, otherpg); + write_fence(); + assert(flag_page_private[pagenum] == REMAPPING_PAGE); + flag_page_private[pagenum] = PRIVATE_PAGE; +} + + +#define REAL_ADDRESS(object_pages, src) ((object_pages) + (uintptr_t)(src)) + +static char *real_address(uintptr_t src) +{ + return REAL_ADDRESS(_STM_TL2->thread_base, src); +} + +static char *get_thread_base(long thread_num) +{ + return object_pages + thread_num * (NB_PAGES * 4096UL); +} + +void stm_abort_transaction(void); + +enum detect_conflicts_e { CANNOT_CONFLICT, CAN_CONFLICT }; + +/* XXX this can be done by acquiring the undo_lock for much less time, + but it needs to be carefully synchronized with _stm_write_slowpath(). + For now it must be called with the undo_lock acquired. */ +static void update_to_current_version(enum detect_conflicts_e check_conflict) +{ + /* Loop over objects in 'global_history': if they have been + read by the current transaction, the current transaction must + abort; then copy them out of the leader's object space --- + which may have been modified by the leader's uncommitted + transaction; this case will be fixed afterwards. + */ + bool conflict_found_or_dont_check = (check_conflict == CANNOT_CONFLICT); + char *local_base = _STM_TL2->thread_base; + char *remote_base = get_thread_base(1 - _STM_TL2->thread_num); + struct stm_list_s *gh, *gh_next; + + assert(leader_thread_num != _STM_TL2->thread_num); + + for (gh = global_history; gh != NULL; gh = gh_next) { + + STM_LIST_FOREACH(gh, ({ + + if (!conflict_found_or_dont_check) + conflict_found_or_dont_check = _stm_was_read(item); + + char *dst = REAL_ADDRESS(local_base, item); + char *src = REAL_ADDRESS(remote_base, item); + char *src_rebased = src - (uintptr_t)local_base; + size_t size = stm_object_size_rounded_up((object_t *)src_rebased); + + memcpy(dst + sizeof(char *), + src + sizeof(char *), + size - sizeof(char *)); + })); + + gh_next = gh->nextlist; + stm_list_free(gh); + } + global_history = NULL; + gh_write_version_first = 0xffff; + gh_write_version_last = 0; + + /* Finally, loop over objects modified by the leader, + and copy them out of the undo log. + */ + char *undo = undo_log_pages; + char *undo_end = undo_log_current; + + while (undo < undo_end) { + + char *src = undo; + char *dst = *(char **)src; + char *src_rebased = src - (uintptr_t)local_base; + + *(char **)src = *(char **)dst; /* fix the first word of the object in + the undo log, for stm_object_size() */ + size_t size = stm_object_size_rounded_up((object_t *)src_rebased); + + memcpy(dst + sizeof(char *), + src + sizeof(char *), + size - sizeof(char *)); + + undo += size; + } + undo_log_current = undo_log_pages; /* make empty again */ + + if (conflict_found_or_dont_check && check_conflict == CAN_CONFLICT) { + release_lock(&undo_lock); + stm_abort_transaction(); + } +} + +static void maybe_update(enum detect_conflicts_e check_conflict) +{ + if (leader_thread_num != _STM_TL2->thread_num && global_history != NULL) { + acquire_lock(&undo_lock); + update_to_current_version(check_conflict); + release_lock(&undo_lock); + } +} + + +void _stm_write_slowpath(object_t *obj) +{ + maybe_update(CAN_CONFLICT); + + _stm_privatize(((uintptr_t)obj) / 4096); + + stm_read(obj); + + _STM_TL2->modified_objects = stm_list_append(_STM_TL2->modified_objects, obj); + + uint16_t wv = obj->write_version; + obj->write_version = _STM_TL1->transaction_write_version; + + /* We only need to store a copy of the current version of the object if: + - we are the leader; + - the object is present in the global_history. + The second condition is approximated by the following range check. + Storing a few more objects than strictly needed is not really a problem. + */ + /* XXX this can be done without acquiring the undo_lock at all, + but we need more care in update_to_current_version(). */ + + /* XXX can we avoid writing an unbounded number of copies of the + same object in case we run a lot of transactions while the other + thread is busy? Unlikely case but in theory annoying. Should + we anyway bound the undo log's size to much less than NB_PAGES, + and if full here, sleep? Should the bound also count the size + taken by the global_history lists? */ + if (ACQUIRE_LOCK_IF(&undo_lock, + wv <= gh_write_version_last && wv >= gh_write_version_first + && leader_thread_num == _STM_TL2->thread_num)) { + /* record in the undo log a copy of the content of the object */ + size_t size = stm_object_size_rounded_up(obj); + char *source = real_address((uintptr_t)obj); + char *undo = undo_log_current; + *((object_t **)undo) = obj; + memcpy(undo + sizeof(object_t *), + source + sizeof(object_t *), + size - sizeof(object_t *)); + /*write_fence();*/ + undo_log_current = undo + size; + release_lock(&undo_lock); + } +} + + +uintptr_t _stm_reserve_page(void) +{ + /* Grab a free page, initially shared between the threads. */ + + // XXX look in some free list first + + /* Return the index'th object page, which is so far never used. */ + uintptr_t index = __sync_fetch_and_add(&index_page_never_used, 1); + if (index >= NB_PAGES) { + fprintf(stderr, "Out of mmap'ed memory!\n"); + abort(); + } + return index; +} + +#define TO_RANGE(range, start, stop) \ + ((range) = (object_t *)((start) | (((uintptr_t)(stop)) << 16))) + +#define FROM_RANGE(start, stop, range) \ + ((start) = (uint16_t)(uintptr_t)(range), \ + (stop) = ((uintptr_t)(range)) >> 16) + +localchar_t *_stm_alloc_next_page(size_t i) +{ + /* 'alloc->next' points to where the next allocation should go. The + present function is called instead when this next allocation is + equal to 'alloc->stop'. As we know that 'start', 'next' and + 'stop' are always nearby pointers, we play tricks and only store + the lower 16 bits of 'start' and 'stop', so that the three + variables plus some flags fit in 16 bytes. + + 'flag_partial_page' is *cleared* to mean that the 'alloc' + describes a complete page, so that it needs not be listed inside + 'new_object_ranges'. In all other cases it is *set*. + */ + uintptr_t page; + localchar_t *result; + alloc_for_size_t *alloc = &_STM_TL2->alloc[i]; + size_t size = i * 8; + + if (alloc->flag_partial_page) { + /* record this range in 'new_object_ranges' */ + localchar_t *ptr1 = alloc->next - size - 1; + object_t *range; + TO_RANGE(range, alloc->start, alloc->stop); + page = ((uintptr_t)ptr1) / 4096; + _STM_TL2->new_object_ranges = stm_list_append( + _STM_TL2->new_object_ranges, (object_t *)page); + _STM_TL2->new_object_ranges = stm_list_append( + _STM_TL2->new_object_ranges, range); + } + + /* reserve a fresh new page */ + page = _stm_reserve_page(); + + result = (localchar_t *)(page * 4096UL); + alloc->start = (uintptr_t)result; + alloc->stop = alloc->start + (4096 / size) * size; + alloc->next = result + size; + alloc->flag_partial_page = false; + return result; +} + +object_t *stm_allocate(size_t size) +{ + assert(size % 8 == 0); + size_t i = size / 8; + assert(2 <= i && i < LARGE_OBJECT_WORDS);//XXX + alloc_for_size_t *alloc = &_STM_TL2->alloc[i]; + + localchar_t *p = alloc->next; + alloc->next = p + size; + if ((uint16_t)(uintptr_t)p == alloc->stop) + p = _stm_alloc_next_page(i); + + object_t *result = (object_t *)p; + result->write_version = _STM_TL1->transaction_write_version; + return result; +} + + +#define TOTAL_MEMORY (NB_PAGES * 4096UL * (NB_THREADS + 1)) +#define READMARKER_END ((NB_PAGES * 4096UL) >> 4) +#define FIRST_OBJECT_PAGE ((READMARKER_END + 4095) / 4096UL) +#define READMARKER_START ((FIRST_OBJECT_PAGE * 4096UL) >> 4) +#define FIRST_READMARKER_PAGE (READMARKER_START / 4096UL) + +void stm_setup(void) +{ + /* Check that some values are acceptable */ + assert(4096 <= ((uintptr_t)_STM_TL1)); + assert(((uintptr_t)_STM_TL1) == ((uintptr_t)_STM_TL2)); + assert(((uintptr_t)_STM_TL2) + sizeof(*_STM_TL2) <= 8192); + assert(2 <= FIRST_READMARKER_PAGE); + assert(FIRST_READMARKER_PAGE * 4096UL <= READMARKER_START); + assert(READMARKER_START < READMARKER_END); + assert(READMARKER_END <= 4096UL * FIRST_OBJECT_PAGE); + assert(FIRST_OBJECT_PAGE < NB_PAGES); + + object_pages = mmap(NULL, TOTAL_MEMORY, + PROT_READ | PROT_WRITE, + MAP_PAGES_FLAGS, -1, 0); + if (object_pages == MAP_FAILED) { + perror("object_pages mmap"); + abort(); + } + + long i; + for (i = 0; i < NB_THREADS; i++) { + char *thread_base = get_thread_base(i); + + /* In each thread's section, the first page is where TLPREFIX'ed + NULL accesses land. We mprotect it so that accesses fail. */ + mprotect(thread_base, 4096, PROT_NONE); + + /* Fill the TLS page (page 1) with 0xDD */ + memset(REAL_ADDRESS(thread_base, 4096), 0xDD, 4096); + /* Make a "hole" at _STM_TL1 / _STM_TL2 */ + memset(REAL_ADDRESS(thread_base, _STM_TL2), 0, sizeof(*_STM_TL2)); + + _STM_TL2->thread_num = i; + _STM_TL2->thread_base = thread_base; + + if (i > 0) { + int res; + res = remap_file_pages(thread_base + FIRST_OBJECT_PAGE * 4096UL, + (NB_PAGES - FIRST_OBJECT_PAGE) * 4096UL, + 0, FIRST_OBJECT_PAGE, 0); + if (res != 0) { + perror("remap_file_pages"); + abort(); + } + } + } + + undo_log_pages = get_thread_base(NB_THREADS); + mprotect(undo_log_pages, 4096, PROT_NONE); + mprotect(undo_log_pages + (NB_PAGES - 1) * 4096UL, 4096, PROT_NONE); + undo_log_pages += 4096; + undo_log_current = undo_log_pages; + + num_threads_started = 0; + index_page_never_used = FIRST_OBJECT_PAGE; + next_write_version = 1; + leader_thread_num = 0; + global_history = NULL; + gh_write_version_first = 0xffff; + gh_write_version_last = 0; +} + +#define INVALID_GS_VALUE 0xDDDDDDDDDDDDDDDDUL + +static void set_gs_register(uint64_t value) +{ + int result = syscall(SYS_arch_prctl, ARCH_SET_GS, value); + assert(result == 0); +} + +void stm_setup_thread(void) +{ + int thread_num = __sync_fetch_and_add(&num_threads_started, 1); + assert(thread_num < 2); /* only 2 threads for now */ + + char *thread_base = get_thread_base(thread_num); + set_gs_register((uintptr_t)thread_base); + + assert(_STM_TL2->thread_num == thread_num); + assert(_STM_TL2->thread_base == thread_base); + + _STM_TL2->modified_objects = stm_list_create(); +} + +void _stm_teardown_thread(void) +{ + stm_list_free(_STM_TL2->modified_objects); + _STM_TL2->modified_objects = NULL; + + set_gs_register(INVALID_GS_VALUE); +} + +void _stm_teardown(void) +{ + munmap(object_pages, TOTAL_MEMORY); + object_pages = NULL; + undo_log_pages = NULL; + undo_log_current = NULL; +} + + +static void reset_transaction_read_version(void) +{ + /* force-reset all read markers to 0 */ + int res = madvise(real_address(FIRST_READMARKER_PAGE * 4096UL), + (FIRST_OBJECT_PAGE - FIRST_READMARKER_PAGE) * 4096UL, + MADV_DONTNEED); + if (res < 0) { + perror("madvise"); + abort(); + } + _STM_TL1->transaction_read_version = 0; +} + +void stm_major_collection(void) +{ + abort(); +} + +void stm_start_transaction(jmp_buf *jmpbufptr) +{ + if (_STM_TL1->transaction_read_version == 0xff) + reset_transaction_read_version(); + _STM_TL1->transaction_read_version++; + _STM_TL1->jmpbufptr = NULL; + + while (1) { + int wv = __sync_fetch_and_add(&next_write_version, 1); + if (LIKELY(wv <= 0xffff)) { + _STM_TL1->transaction_write_version = wv; + break; + } + /* We run out of 16-bit numbers before we do the next major + collection, which resets it. XXX This case seems unlikely + for now, but check if it could become a bottleneck at some + point. */ + stm_major_collection(); + } + assert(stm_list_is_empty(_STM_TL2->modified_objects)); + assert(stm_list_is_empty(_STM_TL2->new_object_ranges)); + + maybe_update(CANNOT_CONFLICT); /* no read object: cannot conflict */ + + _STM_TL1->jmpbufptr = jmpbufptr; +} + +static void update_new_objects_in_other_threads(uintptr_t pagenum, + uint16_t start, uint16_t stop) +{ + size_t size = (uint16_t)(stop - start); + assert(size <= 4096 - (start & 4095)); + assert((start & ~4095) == (uint16_t)(pagenum * 4096)); + + int thread_num = _STM_TL2->thread_num; + uintptr_t local_src = (pagenum * 4096UL) + (start & 4095); + char *dst = REAL_ADDRESS(get_thread_base(1 - thread_num), local_src); + char *src = REAL_ADDRESS(_STM_TL2->thread_base, local_src); + + memcpy(dst, src, size); +} + +void stm_stop_transaction(void) +{ + write_fence(); /* see later in this function for why */ + + acquire_lock(&undo_lock); + + if (leader_thread_num != _STM_TL2->thread_num) { + /* non-leader thread */ + if (global_history != NULL) { + update_to_current_version(CAN_CONFLICT); + assert(global_history == NULL); + } + + /* steal leadership now */ + leader_thread_num = _STM_TL2->thread_num; + } + + /* now we are the leader thread. the leader can always commit */ + _STM_TL1->jmpbufptr = NULL; /* cannot abort any more */ + undo_log_current = undo_log_pages; /* throw away the content */ + + /* add these objects to the global_history */ + _STM_TL2->modified_objects->nextlist = global_history; + global_history = _STM_TL2->modified_objects; + _STM_TL2->modified_objects = stm_list_create(); + + uint16_t wv = _STM_TL1->transaction_write_version; + if (wv < gh_write_version_last) gh_write_version_last = wv; + if (wv > gh_write_version_first) gh_write_version_first = wv; + + /* walk the new_object_ranges and manually copy the new objects + to the other thread's pages in the (hopefully rare) case that + the page they belong to is already unshared */ + long i; + struct stm_list_s *lst = _STM_TL2->new_object_ranges; + for (i = stm_list_count(lst); i > 0; ) { + i -= 2; + uintptr_t pagenum = (uintptr_t)stm_list_item(lst, i); + + /* NB. the read next line should work even against a parallel + thread, thanks to the lock acquisition we do earlier (see the + beginning of this function). Indeed, if this read returns + SHARED_PAGE, then we know that the real value in memory was + actually SHARED_PAGE at least at the time of the + acquire_lock(). It may have been modified afterwards by a + compare_and_swap() in the other thread, but then we know for + sure that the other thread is seeing the last, up-to-date + version of our data --- this is the reason of the + write_fence() just before the acquire_lock(). + */ + if (flag_page_private[pagenum] != SHARED_PAGE) { + object_t *range = stm_list_item(lst, i + 1); + uint16_t start, stop; + FROM_RANGE(start, stop, range); + update_new_objects_in_other_threads(pagenum, start, stop); + } + } + + /* do the same for the partially-allocated pages */ + long j; + for (j = 2; j < LARGE_OBJECT_WORDS; j++) { + alloc_for_size_t *alloc = &_STM_TL2->alloc[j]; + uint16_t start = alloc->start; + uint16_t cur = (uintptr_t)alloc->next; + + if (start == cur) { + /* nothing to do: this assigned page was left empty by the + previous transaction, and also starts empty in the new + transaction. 'flag_partial_page' is unchanged. */ + } + else { + uintptr_t pagenum = ((uintptr_t)(alloc->next - 1)) / 4096UL; + /* for the new transaction, it will start here: */ + alloc->start = cur; + + if (alloc->flag_partial_page) { + if (flag_page_private[pagenum] != SHARED_PAGE) { + update_new_objects_in_other_threads(pagenum, start, cur); + } + } + else { + /* we can skip checking page->private_page because the + whole page can only contain objects made by the just- + finished transaction. */ + assert(flag_page_private[pagenum] == SHARED_PAGE); + + /* the next transaction will start with this page + containing objects that are now committed, so + we need to set this flag now */ + alloc->flag_partial_page = true; + } + } + } + + release_lock(&undo_lock); +} + +void stm_abort_transaction(void) +{ + long j; + for (j = 2; j < LARGE_OBJECT_WORDS; j++) { + alloc_for_size_t *alloc = &_STM_TL2->alloc[j]; + uint16_t num_allocated = ((uintptr_t)alloc->next) - alloc->start; + alloc->next -= num_allocated; + } + stm_list_clear(_STM_TL2->new_object_ranges); + stm_list_clear(_STM_TL2->modified_objects); + assert(_STM_TL1->jmpbufptr != NULL); + assert(_STM_TL1->jmpbufptr != (jmp_buf *)-1); /* for tests only */ + longjmp(*_STM_TL1->jmpbufptr, 1); +} diff --git a/c7/core.h b/c7/core.h new file mode 100644 --- /dev/null +++ b/c7/core.h @@ -0,0 +1,58 @@ +#ifndef _STM_CORE_H +#define _STM_CORE_H + +#include +#include +#include + + +#define TLPREFIX __attribute__((address_space(256))) + +typedef TLPREFIX struct _thread_local1_s _thread_local1_t; +typedef TLPREFIX struct object_s object_t; +typedef TLPREFIX struct read_marker_s read_marker_t; + + +struct object_s { + uint16_t write_version; + /*uint8_t stm_flags;*/ +}; + +struct read_marker_s { + uint8_t rm; +}; + +struct _thread_local1_s { + jmp_buf *jmpbufptr; + uint8_t transaction_read_version; + uint16_t transaction_write_version; +}; +#define _STM_TL1 ((_thread_local1_t *)4352) + + +/* this should use llvm's coldcc calling convention, + but it's not exposed to C code so far */ +void _stm_write_slowpath(object_t *); + +#define LIKELY(x) __builtin_expect(x, true) +#define UNLIKELY(x) __builtin_expect(x, false) + + +static inline void stm_read(object_t *obj) +{ + ((read_marker_t *)(((uintptr_t)obj) >> 4))->rm = + _STM_TL1->transaction_read_version; +} + +static inline void stm_write(object_t *obj) +{ + if (UNLIKELY(obj->write_version != _STM_TL1->transaction_write_version)) + _stm_write_slowpath(obj); +} + + +/* must be provided by the user of this library */ +extern size_t stm_object_size_rounded_up(object_t *); + + +#endif diff --git a/c7/list.c b/c7/list.c new file mode 100644 --- /dev/null +++ b/c7/list.c @@ -0,0 +1,38 @@ +#include +#include +#include + +#include "list.h" + + +#define SETSIZE(n) (sizeof(struct stm_list_s) + ITEMSSIZE(n)) +#define ITEMSSIZE(n) ((n) * sizeof(object_t*)) +#define OVERCNT(n) (33 + ((((n) / 2) * 3) | 1)) + +struct stm_list_s *stm_list_create(void) +{ + uintptr_t initial_allocation = 32; + struct stm_list_s *lst = malloc(SETSIZE(initial_allocation)); + if (lst == NULL) { + perror("out of memory in stm_list_create"); + abort(); + } + lst->count = 0; + lst->last_allocated = initial_allocation - 1; + assert(lst->last_allocated & 1); + return lst; +} + +struct stm_list_s *_stm_list_grow(struct stm_list_s *lst, uintptr_t nalloc) +{ + assert(lst->last_allocated & 1); + nalloc = OVERCNT(nalloc); + lst = realloc(lst, SETSIZE(nalloc)); + if (lst == NULL) { + perror("out of memory in _stm_list_grow"); + abort(); + } + lst->last_allocated = nalloc - 1; + assert(lst->last_allocated & 1); + return lst; +} diff --git a/c7/list.h b/c7/list.h new file mode 100644 --- /dev/null +++ b/c7/list.h @@ -0,0 +1,67 @@ +#ifndef _STM_LIST_H +#define _STM_LIST_H + +#include "core.h" + + +struct stm_list_s { + uintptr_t count; + union { + uintptr_t last_allocated; /* always odd */ + struct stm_list_s *nextlist; /* always even */ + }; + object_t *items[]; +}; + +struct stm_list_s *stm_list_create(void); + +static inline void stm_list_free(struct stm_list_s *lst) +{ + free(lst); +} + + +struct stm_list_s *_stm_list_grow(struct stm_list_s *, uintptr_t); + +static inline struct stm_list_s * +stm_list_append(struct stm_list_s *lst, object_t *item) +{ + uintptr_t index = lst->count++; + if (UNLIKELY(index > lst->last_allocated)) + lst = _stm_list_grow(lst, index); + lst->items[index] = item; + return lst; +} + +static inline void stm_list_clear(struct stm_list_s *lst) +{ + lst->count = 0; +} + +static inline bool stm_list_is_empty(struct stm_list_s *lst) +{ + return (lst->count == 0); +} + +static inline bool stm_list_count(struct stm_list_s *lst) +{ + return lst->count; +} + +static inline object_t *stm_list_item(struct stm_list_s *lst, uintptr_t index) +{ + return lst->items[index]; +} + +#define STM_LIST_FOREACH(lst, CODE) \ + do { \ + struct stm_list_s *_lst = (lst); \ + uintptr_t _i; \ + for (_i = _lst->count; _i--; ) { \ + object_t *item = _lst->items[_i]; \ + CODE; \ + } \ + } while (0) + + +#endif diff --git a/c7/pagecopy.c b/c7/pagecopy.c new file mode 100644 --- /dev/null +++ b/c7/pagecopy.c @@ -0,0 +1,57 @@ + +void pagecopy(void *dest, const void *src) +{ + unsigned long i; + for (i=0; i<4096/128; i++) { + asm volatile("movdqa (%0), %%xmm0\n" + "movdqa 16(%0), %%xmm1\n" + "movdqa 32(%0), %%xmm2\n" + "movdqa 48(%0), %%xmm3\n" + "movdqa %%xmm0, (%1)\n" + "movdqa %%xmm1, 16(%1)\n" + "movdqa %%xmm2, 32(%1)\n" + "movdqa %%xmm3, 48(%1)\n" + "movdqa 64(%0), %%xmm0\n" + "movdqa 80(%0), %%xmm1\n" + "movdqa 96(%0), %%xmm2\n" + "movdqa 112(%0), %%xmm3\n" + "movdqa %%xmm0, 64(%1)\n" + "movdqa %%xmm1, 80(%1)\n" + "movdqa %%xmm2, 96(%1)\n" + "movdqa %%xmm3, 112(%1)\n" + : + : "r"(src + 128*i), "r"(dest + 128*i) + : "xmm0", "xmm1", "xmm2", "xmm3", "memory"); + } +} + +#if 0 /* XXX enable if detected on the cpu */ +void pagecopy_ymm8(void *dest, const void *src) +{ + asm volatile("0:\n" + "vmovdqa (%0), %%ymm0\n" + "vmovdqa 32(%0), %%ymm1\n" + "vmovdqa 64(%0), %%ymm2\n" + "vmovdqa 96(%0), %%ymm3\n" + "vmovdqa 128(%0), %%ymm4\n" + "vmovdqa 160(%0), %%ymm5\n" + "vmovdqa 192(%0), %%ymm6\n" + "vmovdqa 224(%0), %%ymm7\n" + "addq $256, %0\n" + "vmovdqa %%ymm0, (%1)\n" + "vmovdqa %%ymm1, 32(%1)\n" + "vmovdqa %%ymm2, 64(%1)\n" + "vmovdqa %%ymm3, 96(%1)\n" + "vmovdqa %%ymm4, 128(%1)\n" + "vmovdqa %%ymm5, 160(%1)\n" + "vmovdqa %%ymm6, 192(%1)\n" + "vmovdqa %%ymm7, 224(%1)\n" + "addq $256, %1\n" + "cmpq %2, %0\n" + "jne 0b" + : "=r"(src), "=r"(dest) + : "r"((char *)src + 4096), "0"(src), "1"(dest) + : "xmm0", "xmm1", "xmm2", "xmm3", + "xmm4", "xmm5", "xmm6", "xmm7"); +} +#endif diff --git a/c7/pagecopy.h b/c7/pagecopy.h new file mode 100644 --- /dev/null +++ b/c7/pagecopy.h @@ -0,0 +1,2 @@ + +void pagecopy(void *dest, const void *src); From noreply at buildbot.pypy.org Thu Jan 2 12:32:07 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 2 Jan 2014 12:32:07 +0100 (CET) Subject: [pypy-commit] stmgc c7: Comments Message-ID: <20140102113207.5104A1C00B3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7 Changeset: r594:a0efd230208b Date: 2014-01-02 11:57 +0100 http://bitbucket.org/pypy/stmgc/changeset/a0efd230208b/ Log: Comments diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -478,6 +478,18 @@ static void reset_transaction_read_version(void) { /* force-reset all read markers to 0 */ + + /* XXX measure the time taken by this madvise() and the following + zeroing of pages done lazily by the kernel; compare it with using + 16-bit read_versions. + */ + /* XXX try to use madvise() on smaller ranges of memory. In my + measures, we could gain a factor 2 --- not really more, even if + the range of virtual addresses below is very large, as long as it + is already mostly non-reserved pages. (The following call keeps + them non-reserved; apparently the kernel just skips them very + quickly.) + */ int res = madvise(real_address(FIRST_READMARKER_PAGE * 4096UL), (FIRST_OBJECT_PAGE - FIRST_READMARKER_PAGE) * 4096UL, MADV_DONTNEED); @@ -601,9 +613,10 @@ uint16_t cur = (uintptr_t)alloc->next; if (start == cur) { - /* nothing to do: this assigned page was left empty by the - previous transaction, and also starts empty in the new - transaction. 'flag_partial_page' is unchanged. */ + /* nothing to do: this page (or fraction thereof) was left + empty by the previous transaction, and starts empty as + well in the new transaction. 'flag_partial_page' is + unchanged. */ } else { uintptr_t pagenum = ((uintptr_t)(alloc->next - 1)) / 4096UL; @@ -616,9 +629,9 @@ } } else { - /* we can skip checking page->private_page because the - whole page can only contain objects made by the just- - finished transaction. */ + /* we can skip checking flag_page_private[] in non-debug + builds, because the whole page can only contain + objects made by the just-finished transaction. */ assert(flag_page_private[pagenum] == SHARED_PAGE); /* the next transaction will start with this page diff --git a/c7/core.h b/c7/core.h --- a/c7/core.h +++ b/c7/core.h @@ -13,9 +13,28 @@ typedef TLPREFIX struct read_marker_s read_marker_t; +/* Structure of objects + -------------------- + + Objects manipulated by the user program, and managed by this library, + must start with a "struct object_s" field. Pointers to any user object + must use the "TLPREFIX struct foo *" type --- don't forget TLPREFIX. + The best is to use typedefs like above. + + The object_s part contains some fields reserved for the STM library, + as well as a 32-bit integer field that can be freely used by the user + program. However, right now this field must be read-only --- i.e. it + must never be modified on any object that may already belong to a + past transaction; you can only set it on just-allocated objects. The + best is to consider it as a field that is written to only once on + newly allocated objects. +*/ + struct object_s { - uint16_t write_version; + uint16_t write_version; /* reserved for the STM library */ /*uint8_t stm_flags;*/ + uint32_t header; /* for the user program -- only write in + newly allocated objects */ }; struct read_marker_s { From noreply at buildbot.pypy.org Fri Jan 3 16:45:40 2014 From: noreply at buildbot.pypy.org (timfel) Date: Fri, 3 Jan 2014 16:45:40 +0100 (CET) Subject: [pypy-commit] lang-smalltalk rbitblt: fix switching to 32bit depth in the mini-image Message-ID: <20140103154540.4BCF61C010E@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: rbitblt Changeset: r547:bafeaf582cff Date: 2014-01-03 09:28 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/bafeaf582cff/ Log: fix switching to 32bit depth in the mini-image diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -965,7 +965,7 @@ return w_rcvr def fake_bytes_left(interp): - return interp.space.wrap_int(2**20) # XXX we don't know how to do this :-( + return interp.space.wrap_int(2**29) # XXX we don't know how to do this :-( @expose_primitive(SPECIAL_OBJECTS_ARRAY, unwrap_spec=[object]) def func(interp, s_frame, w_rcvr): @@ -974,7 +974,8 @@ @expose_primitive(INC_GC, unwrap_spec=[object]) @expose_primitive(FULL_GC, unwrap_spec=[object]) @jit.dont_look_inside -def func(interp, s_frame, w_arg): # Squeak pops the arg and ignores it ... go figure +# def func(interp, s_frame, w_arg): # Squeak pops the arg and ignores it ... go figure +def func(interp, s_frame, w_rcvr): from rpython.rlib import rgc rgc.collect() return fake_bytes_left(interp) diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -1425,7 +1425,7 @@ nPix = startBits words = self.nWords # Here is the horizontal loop... - for word in range(words + 1): + for word in range(words): skewWord = self.pickSourcePixels(nPix, sourcePixMask, destPixMask, srcShiftInc, dstShiftInc) # align next word to leftmost pixel self.dstBitShift = dstShiftLeft @@ -1460,7 +1460,7 @@ nPix = nPixels # always > 0 so we can use do { } while(--nPix); if (self.w_cmLookupTable): # a little optimization for (pretty crucial) blits using indexed lookups only - for px in range(nPix + 1): + for px in range(nPix): sourcePix = self.rshift(rarithmetic.r_uint(sourceWord), srcShift) & srcMask destPix = self.w_cmLookupTable.getword(rarithmetic.intmask(sourcePix & self.cmMask)) # adjust dest pix index @@ -1693,6 +1693,8 @@ w_self._shadow = None raise error.PrimitiveFailedError self.w_bits = self.fetch(0) + if self.w_bits is self.space.w_nil: + return if not (isinstance(self.w_bits, model.W_WordsObject) or isinstance(self.w_bits, model.W_DisplayBitmap)): w_self = self.w_self() assert isinstance(w_self, model.W_PointersObject) @@ -1716,7 +1718,8 @@ self.pixPerWord = 32 / self.depth self.pitch = (self.width + (self.pixPerWord - 1)) / self.pixPerWord | 0 if self.w_bits.size() != (self.pitch * self.height): - raise error.PrimitiveFailedError() + # raise error.PrimitiveFailedError() + pass # - we'll be updated again # def replace_bits(self): # w_bits = self.w_bits From noreply at buildbot.pypy.org Fri Jan 3 16:45:41 2014 From: noreply at buildbot.pypy.org (timfel) Date: Fri, 3 Jan 2014 16:45:41 +0100 (CET) Subject: [pypy-commit] lang-smalltalk rbitblt: merge upstream Message-ID: <20140103154541.85CF21C0162@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: rbitblt Changeset: r548:2bd14f5f88a1 Date: 2014-01-03 09:50 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/2bd14f5f88a1/ Log: merge upstream diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -599,14 +599,6 @@ from spyvm.shadow import ObserveeShadow return self.as_special_get_shadow(space, ObserveeShadow) - def as_bitblt_get_shadow(self, space): - from spyvm.shadow import BitBltShadow - return self.as_special_get_shadow(space, BitBltShadow) - - def as_form_get_shadow(self, space): - from spyvm.shadow import FormShadow - return self.as_special_get_shadow(space, FormShadow) - def has_shadow(self): return self._shadow is not None diff --git a/spyvm/plugins/bitblt.py b/spyvm/plugins/bitblt.py new file mode 100644 --- /dev/null +++ b/spyvm/plugins/bitblt.py @@ -0,0 +1,633 @@ +from spyvm import model +from spyvm.error import PrimitiveFailedError +from spyvm.shadow import AbstractCachingShadow +from spyvm.plugins.plugin import Plugin + +from rpython.rlib import rarithmetic, jit + + +BitBltPlugin = Plugin() + + at BitBltPlugin.expose_primitive(unwrap_spec=[object], clean_stack=False) +def primitiveCopyBits(interp, s_frame, w_rcvr): + from spyvm.interpreter import Return + if not isinstance(w_rcvr, model.W_PointersObject) or w_rcvr.size() < 15: + raise PrimitiveFailedError + + # only allow combinationRules 0-41 + combinationRule = interp.space.unwrap_positive_32bit_int(w_rcvr.fetch(interp.space, 3)) + if combinationRule > 41: + raise PrimitiveFailedError + + space = interp.space + s_bitblt = w_rcvr.as_special_get_shadow(space, BitBltShadow) + s_bitblt.copyBits() + + w_dest_form = w_rcvr.fetch(space, 0) + if (combinationRule == 22 or combinationRule == 32): + s_frame.pop() # pops the next value under BitBlt + s_frame.push(interp.space.wrap_int(s_bitblt.bitCount)) + elif w_dest_form.is_same_object(space.objtable['w_display']): + w_bitmap = w_dest_form.fetch(space, 0) + assert isinstance(w_bitmap, model.W_DisplayBitmap) + w_bitmap.flush_to_screen() + return w_rcvr + + def as_bitblt_get_shadow(self, space): + return + + +class BitBltShadow(AbstractCachingShadow): + WordSize = 32 + MaskTable = [rarithmetic.r_uint(0)] + for i in xrange(WordSize): + MaskTable.append(rarithmetic.r_uint((2 ** (i + 1)) - 1)) + AllOnes = rarithmetic.r_uint(0xFFFFFFFF) + + def sync_cache(self): + self.loadBitBlt() + + def intOrIfNil(self, w_int, i): + if w_int is self.space.w_nil: + return i + else: + return self.space.unwrap_int(w_int) + + def loadForm(self, w_form): + try: + if not isinstance(w_form, model.W_PointersObject): + raise PrimitiveFailedError() + s_form = w_form.as_special_get_shadow(self.space, FormShadow) + if not isinstance(s_form, FormShadow): + raise PrimitiveFailedError() + return s_form + except PrimitiveFailedError, e: + w_self = self.w_self() + assert isinstance(w_self, model.W_PointersObject) + w_self._shadow = None + raise e + + def loadHalftone(self, w_halftone_form): + if w_halftone_form is self.space.w_nil: + return None + elif isinstance(w_halftone_form, model.W_WordsObject): + # Already a bitmap + return w_halftone_form.words + else: + assert isinstance(w_halftone_form, model.W_PointersObject) + w_bits = w_halftone_form.as_special_get_shadow(self.space, FormShadow).w_bits + assert isinstance(w_bits, model.W_WordsObject) + return w_bits.words + + def loadColorMap(self, w_color_map): + if isinstance(w_color_map, model.W_WordsObject): + self.w_cmLookupTable = w_color_map + self.cmMask = self.w_cmLookupTable.size() - 1 + else: + self.w_cmLookupTable = None + + def loadBitBlt(self): + self.success = True + self.w_destForm = self.fetch(0) + self.dest = self.loadForm(self.w_destForm) + self.w_sourceForm = self.fetch(1) + if self.w_sourceForm is not self.space.w_nil: + self.source = self.loadForm(self.w_sourceForm) + else: + self.source = None + self.halftone = self.loadHalftone(self.fetch(2)) + self.combinationRule = self.space.unwrap_int(self.fetch(3)) + self.destX = self.intOrIfNil(self.fetch(4), 0) + self.destY = self.intOrIfNil(self.fetch(5), 0) + self.width = self.intOrIfNil(self.fetch(6), self.dest.width) + self.height = self.intOrIfNil(self.fetch(7), self.dest.height) + self.clipX = self.intOrIfNil(self.fetch(10), 0) + self.clipY = self.intOrIfNil(self.fetch(11), 0) + self.clipW = self.intOrIfNil(self.fetch(12), self.width) + self.clipH = self.intOrIfNil(self.fetch(13), self.height) + if not self.source: + self.sourceX = 0 + self.sourceY = 0 + else: + self.loadColorMap(self.fetch(14)) + self.sourceX = self.intOrIfNil(self.fetch(8), 0) + self.sourceY = self.intOrIfNil(self.fetch(9), 0) + + def copyBits(self): + self.bitCount = 0 + self.clipRange() + if (self.bbW <= 0 or self.bbH <= 0): + return + self.destMaskAndPointerInit() + if not self.source: + self.copyLoopNoSource() + else: + self.checkSourceOverlap() + if self.source.depth != self.dest.depth: + self.copyLoopPixMap() + else: + self.sourceSkewAndPointerInit() + self.copyLoop() + + def checkSourceOverlap(self): + if (self.w_sourceForm is self.w_destForm and self.dy >= self.sy): + if (self.dy > self.sy): + self.vDir = -1 + self.sy = (self.sy + self.bbH) - 1 + self.dy = (self.dy + self.bbH) - 1 + else: + if (self.dy == self.sy and self.dx > self.sx): + self.hDir = -1 + self.sx = (self.sx + self.bbW) - 1 # start at right + self.dx = (self.dx + self.bbW) - 1 + if (self.nWords > 1): + t = self.mask1 # and fix up masks + self.mask1 = self.mask2 + self.mask2 = t + self.destIndex = (self.dy * self.dest.pitch) + (self.dx / self.dest.pixPerWord | 0) # recompute since dx, dy change + self.destDelta = (self.dest.pitch * self.vDir) - (self.nWords * self.hDir) + + def sourceSkewAndPointerInit(self): + pixPerM1 = self.dest.pixPerWord - 1 # Pix per word is power of two, so self makes a mask + sxLowBits = self.sx & pixPerM1 + dxLowBits = self.dx & pixPerM1 + # check if need to preload buffer + # (i.e., two words of source needed for first word of destination) + dWid = -1 + if (self.hDir > 0): + if self.bbW < (self.dest.pixPerWord - dxLowBits): + dWid = self.bbW + else: + dWid = self.dest.pixPerWord - dxLowBits + self.preload = (sxLowBits + dWid) > pixPerM1 + else: + if self.bbW < (dxLowBits + 1): + dWid = self.bbW + else: + dWid = dxLowBits + 1 + self.preload = ((sxLowBits - dWid) + 1) < 0 + + if self.source.msb: + self.skew = (sxLowBits - dxLowBits) * self.dest.depth + else: + self.skew = (dxLowBits - sxLowBits) * self.dest.depth + if (self.preload): + if (self.skew < 0): + self.skew += 32 + else: + self.skew -= 32 + # calculate increments from end of one line to start of next + self.sourceIndex = (self.sy * self.source.pitch) + (self.sx / (32 / self.source.depth) |0) + self.sourceDelta = (self.source.pitch * self.vDir) - (self.nWords * self.hDir) + if (self.preload): + self.sourceDelta -= self.hDir + + def clipRange(self): + # intersect with destForm bounds + if self.clipX < 0: + self.clipW += self.clipX + self.clipX = 0 + if self.clipY < 0: + self.clipH += self.clipY + self.clipY = 0 + if self.clipX + self.clipW > self.dest.width: + self.clipW = self.dest.width - self.clipX + if self.clipY + self.clipH > self.dest.height: + self.clipH = self.dest.height - self.clipY + # intersect with clipRect + leftOffset = max(self.clipX - self.destX, 0) + self.sx = self.sourceX + leftOffset + self.dx = self.destX + leftOffset + self.bbW = self.width - leftOffset + rightOffset = (self.dx + self.bbW) - (self.clipX + self.clipW) + if rightOffset > 0: + self.bbW -= rightOffset + topOffset = max(self.clipY - self.destY, 0) + self.sy = self.sourceY + topOffset + self.dy = self.destY + topOffset + self.bbH = self.height - topOffset + bottomOffset = (self.dy + self.bbH) - (self.clipY + self.clipH) + if bottomOffset > 0: + self.bbH -= bottomOffset + # intersect with sourceForm bounds + if not self.source: + return + if self.sx < 0: + self.dx -= self.sx + self.bbW += self.sx + self.sx = 0 + if (self.sx + self.bbW) > self.source.width: + self.bbW -= (self.sx + self.bbW) - self.source.width + if self.sy < 0: + self.dy -= self.sy + self.bbH += self.sy + self.sy = 0 + if (self.sy + self.bbH) > self.source.height: + self.bbH -= (self.sy + self.bbH) - self.source.height + + def rshift(self, val, n): + # return rarithmetic.r_uint(val >> n if val >= 0 else (val + 0x100000000) >> n) + return rarithmetic.r_uint(rarithmetic.r_uint(val) >> n & BitBltShadow.AllOnes) + + def destMaskAndPointerInit(self): + pixPerM1 = self.dest.pixPerWord - 1 # pixPerWord is power-of-two, so this makes a mask + startBits = self.dest.pixPerWord - (self.dx & pixPerM1) # how many px in 1st word + endBits = (((self.dx + self.bbW) - 1) & pixPerM1) + 1 + if self.dest.msb: + self.mask1 = self.rshift(BitBltShadow.AllOnes, (32 - (startBits * self.dest.depth))) + self.mask2 = BitBltShadow.AllOnes << (32 - (endBits * self.dest.depth)) + else: + self.mask1 = BitBltShadow.AllOnes << (32 - (startBits * self.dest.depth)) + self.mask2 = self.rshift(BitBltShadow.AllOnes, (32 - (endBits * self.dest.depth))) + if self.bbW < startBits: + self.mask1 = self.mask1 & self.mask2 + self.mask2 = 0 + self.nWords = 1 + else: + self.nWords = (((self.bbW - startBits) + pixPerM1) / self.dest.pixPerWord | 0) + 1 + self.hDir = 1 + self.vDir = 1 + self.destIndex = (self.dy * self.dest.pitch) + (self.dx / self.dest.pixPerWord | 0) + self.destDelta = (self.dest.pitch * self.vDir) - (self.nWords * self.hDir) + + def copyLoopNoSource(self): + halftoneWord = BitBltShadow.AllOnes + for i in range(self.bbH): + if self.halftone: + halftoneWord = rarithmetic.r_uint(self.halftone[(self.dy + i) % len(self.halftone)]) + # first word in row is masked + destMask = self.mask1 + destWord = self.dest.w_bits.getword(self.destIndex) + mergeWord = self.mergeFn(halftoneWord, destWord) + destWord = (destMask & mergeWord) | (destWord & (~destMask)) + self.dest.w_bits.setword(self.destIndex, destWord) + self.destIndex += 1 + destMask = BitBltShadow.AllOnes + # the central horizontal loop requires no store masking + if self.combinationRule == 3: # store rule requires no dest merging + for word in range(2, self.nWords): + self.dest.w_bits.setword(self.destIndex, halftoneWord) + self.destIndex += 1 + else: + for word in range(2, self.nWords): + destWord = self.dest.w_bits.getword(self.destIndex) + mergeWord = self.mergeFn(halftoneWord, destWord) + self.dest.w_bits.setword(self.destIndex, mergeWord) + self.destIndex += 1 + # last word in row is masked + if self.nWords > 1: + destMask = self.mask2 + destWord = self.dest.w_bits.getword(self.destIndex) + mergeWord = self.mergeFn(halftoneWord, destWord) + destWord = (destMask & mergeWord) | (destWord & (~destMask)) + self.dest.w_bits.setword(self.destIndex, destWord) + self.destIndex += 1 + self.destIndex += self.destDelta + + def copyLoopPixMap(self): + # This version of the inner loop maps source pixels + # to a destination form with different depth. Because it is already + # unweildy, the loop is not unrolled as in the other versions. + # Preload, skew and skewMask are all overlooked, since pickSourcePixels + # delivers its destination word already properly aligned. + # Note that pickSourcePixels could be copied in-line at the top of + # the horizontal loop, and some of its inits moved out of the loop. + # + # The loop has been rewritten to use only one pickSourcePixels call. + # The idea is that the call itself could be inlined. If we decide not + # to inline pickSourcePixels we could optimize the loop instead. + sourcePixMask = BitBltShadow.MaskTable[self.source.depth] + destPixMask = BitBltShadow.MaskTable[self.dest.depth] + self.sourceIndex = (self.sy * self.source.pitch) + (self.sx / self.source.pixPerWord | 0) + scrStartBits = self.source.pixPerWord - (self.sx & (self.source.pixPerWord - 1)) + if self.bbW < scrStartBits: + nSourceIncs = 0 + else: + nSourceIncs = ((self.bbW - scrStartBits) / self.source.pixPerWord | 0) + 1 + # Note following two items were already calculated in destmask setup! + self.sourceDelta = self.source.pitch - nSourceIncs + startBits = self.dest.pixPerWord - (self.dx & (self.dest.pixPerWord - 1)) + endBits = (((self.dx + self.bbW) - 1) & (self.dest.pixPerWord - 1)) + 1 + if self.bbW < startBits: + startBits = self.bbW # ?! + srcShift = (self.sx & (self.source.pixPerWord - 1)) * self.source.depth + dstShift = (self.dx & (self.dest.pixPerWord - 1)) * self.dest.depth + srcShiftInc = self.source.depth + dstShiftInc = self.dest.depth + dstShiftLeft = 0 + if (self.source.msb): + srcShift = (32 - self.source.depth) - srcShift + srcShiftInc = -srcShiftInc + + if (self.dest.msb): + dstShift = (32 - self.dest.depth) - dstShift + dstShiftInc = -dstShiftInc + dstShiftLeft = 32 - self.dest.depth + + for i in range(self.bbH): + if self.halftone: + halftoneWord = rarithmetic.r_uint(self.halftone[(self.dy + i) % len(self.halftone)]) + else: + halftoneWord = BitBltShadow.AllOnes + self.srcBitShift = srcShift + self.dstBitShift = dstShift + self.destMask = self.mask1 + nPix = startBits + words = self.nWords + # Here is the horizontal loop... + for word in range(words): + skewWord = self.pickSourcePixels(nPix, sourcePixMask, destPixMask, srcShiftInc, dstShiftInc) + # align next word to leftmost pixel + self.dstBitShift = dstShiftLeft + if self.destMask == BitBltShadow.AllOnes: # avoid read-modify-write + self.dest.w_bits.setword( + self.destIndex, + self.mergeFn(skewWord & halftoneWord, self.dest.w_bits.getword(self.destIndex)) + ) + else: # General version using dest masking + destWord = self.dest.w_bits.getword(self.destIndex) + mergeWord = self.mergeFn(skewWord & halftoneWord, destWord & self.destMask) + destWord = (self.destMask & mergeWord) | (destWord & (~self.destMask)) + self.dest.w_bits.setword(self.destIndex, destWord) + + self.destIndex += 1 + if (words == 2): # is the next word the last word? + self.destMask = self.mask2 + nPix = endBits + else: # use fullword mask for inner loop + self.destMask = BitBltShadow.AllOnes + nPix = self.dest.pixPerWord + self.sourceIndex += self.sourceDelta + self.destIndex += self.destDelta + + def pickSourcePixels(self, nPixels, srcMask, dstMask, srcShiftInc, dstShiftInc): + # Pick nPix pixels starting at srcBitIndex from the source, map by the + # color map, and justify them according to dstBitIndex in the resulting destWord. + sourceWord = rarithmetic.r_uint(self.source.w_bits.getword(self.sourceIndex)) + destWord = 0 + srcShift = self.srcBitShift # put into temp for speed + dstShift = self.dstBitShift + nPix = nPixels + # always > 0 so we can use do { } while(--nPix); + if (self.w_cmLookupTable): # a little optimization for (pretty crucial) blits using indexed lookups only + for px in range(nPix): + sourcePix = self.rshift(rarithmetic.r_uint(sourceWord), srcShift) & srcMask + destPix = self.w_cmLookupTable.getword(rarithmetic.intmask(sourcePix & self.cmMask)) + # adjust dest pix index + destWord = destWord | ((destPix & dstMask) << dstShift) + # adjust source pix index + dstShift += dstShiftInc + srcShift += srcShiftInc + if srcShift & rarithmetic.r_uint(0xFFFFFFE0): + if (self.source.msb): + srcShift += 32 + else: + srcShift -= 32 + self.sourceIndex += 1 + sourceWord = self.source.w_bits.getword(self.sourceIndex) + else: + raise PrimitiveFailedError() + self.srcBitShift = srcShift # Store back + return destWord + + def rotate32bit(self, thisWord, prevWord, skewMask, notSkewMask, unskew): + if unskew < 0: + rotated = self.rshift(rarithmetic.r_uint(prevWord & notSkewMask), -unskew) + else: + rotated = (prevWord & notSkewMask) << unskew + if self.skew < 0: + rotated = rotated | self.rshift(rarithmetic.r_uint(thisWord & skewMask), -self.skew) + else: + rotated = rotated | (thisWord & skewMask) << self.skew + return rotated + + def copyLoop(self): + # self version of the inner loop assumes we do have a source + sourceLimit = self.source.w_bits.size() + hInc = self.hDir + # init skew (the difference in word alignment of source and dest) + if (self.skew == -32): + self.skew = unskew = 0 + skewMask = rarithmetic.r_uint(0) + else: + if (self.skew < 0): + unskew = self.skew + 32 + skewMask = rarithmetic.r_uint(BitBltShadow.AllOnes << -self.skew) + else: + if (self.skew == 0): + unskew = 0 + skewMask = BitBltShadow.AllOnes + else: + unskew = self.skew - 32 + skewMask = self.rshift(BitBltShadow.AllOnes, self.skew) + notSkewMask = rarithmetic.r_uint(~skewMask) + + # init halftones + if (self.halftone): + halftoneWord = rarithmetic.r_uint(self.halftone[0]) + halftoneHeight = len(self.halftone) + else: + halftoneWord = BitBltShadow.AllOnes + halftoneHeight = 0 + + # now loop over all lines + y = self.dy + for i in range(1, self.bbH + 1): + if (halftoneHeight > 1): + halftoneWord = rarithmetic.r_uint(self.halftone[y % halftoneHeight]) + y += self.vDir + + if (self.preload): + prevWord = rarithmetic.r_uint(self.source.w_bits.getword(self.sourceIndex)) + self.sourceIndex += hInc + else: + prevWord = rarithmetic.r_uint(0) + + destMask = self.mask1 + # pick up next word + thisWord = rarithmetic.r_uint(self.source.w_bits.getword(self.sourceIndex)) + self.sourceIndex += hInc + skewWord = self.rotate32bit(thisWord, prevWord, skewMask, notSkewMask, unskew) + prevWord = thisWord + destWord = self.dest.w_bits.getword(self.destIndex) + mergeWord = self.mergeFn(skewWord & halftoneWord, destWord) + destWord = (destMask & mergeWord) | (destWord & (~destMask)) + self.dest.w_bits.setword(self.destIndex, destWord) + # The central horizontal loop requires no store masking + self.destIndex += hInc + destMask = BitBltShadow.AllOnes + if (self.combinationRule == 3): # Store mode avoids dest merge function + if ((self.skew == 0) and (halftoneWord == BitBltShadow.AllOnes)): + # Non-skewed with no halftone + if (self.hDir == -1): + for word in range(2, self.nWords): + thisWord = self.source.w_bits.getword(self.sourceIndex) + self.dest.w_bits.setword(self.destIndex, thisWord) + self.sourceIndex += hInc + self.destIndex += hInc + else: + for word in range(2, self.nWords): + self.dest.w_bits.setword(self.destIndex, prevWord) + prevWord = self.source.w_bits.getword(self.sourceIndex) + self.destIndex += hInc + self.sourceIndex += hInc + else: + # skewed and/or halftoned + for word in range(2, self.nWords): + thisWord = self.source.w_bits.getword(self.sourceIndex) + self.sourceIndex += hInc + skewWord = self.rotate32bit(thisWord, prevWord, skewMask, notSkewMask, unskew) + prevWord = thisWord + self.dest.w_bits.setword(self.destIndex, skewWord & halftoneWord) + self.destIndex += hInc + else: # Dest merging here... + for word in range(2, self.nWords): + thisWord = self.source.w_bits.getword(self.sourceIndex) # pick up next word + self.sourceIndex += hInc + skewWord = self.rotate32bit(thisWord, prevWord, skewMask, notSkewMask, unskew) + prevWord = thisWord + mergeWord = self.mergeFn(skewWord & halftoneWord, self.dest.w_bits.getword(self.destIndex)) + self.dest.w_bits.setword(self.destIndex, mergeWord) + self.destIndex += hInc + # last word with masking and all + if (self.nWords > 1): + destMask = self.mask2 + if (self.sourceIndex >= 0 and self.sourceIndex < sourceLimit): + # NOTE: we are currently overrunning source bits in some cases + # self test makes up for it. + thisWord = self.source.w_bits.getword(self.sourceIndex) # pick up next word + self.sourceIndex += hInc + skewWord = self.rotate32bit(thisWord, prevWord, skewMask, notSkewMask, unskew) + destWord = self.dest.w_bits.getword(self.destIndex) + mergeWord = self.mergeFn(skewWord & halftoneWord, destWord) + destWord = (destMask & mergeWord) | (destWord & (~destMask)) + self.dest.w_bits.setword(self.destIndex, destWord) + self.destIndex += hInc + self.sourceIndex += self.sourceDelta + self.destIndex += self.destDelta + + def mergeFn(self, src, dest): + return rarithmetic.r_uint(self.merge( + rarithmetic.r_uint(src), + rarithmetic.r_uint(dest) + )) + + def merge(self, source_word, dest_word): + assert isinstance(source_word, rarithmetic.r_uint) and isinstance(dest_word, rarithmetic.r_uint) + if self.combinationRule == 0: + return 0 + elif self.combinationRule == 1: + return source_word & dest_word + elif self.combinationRule == 2: + return source_word & ~dest_word + elif self.combinationRule == 3: + return source_word + elif self.combinationRule == 4: + return ~source_word & dest_word + elif self.combinationRule == 5: + return dest_word + elif self.combinationRule == 6: + return source_word ^ dest_word + elif self.combinationRule == 7: + return source_word | dest_word + elif self.combinationRule == 8: + return ~source_word & ~dest_word + elif self.combinationRule == 9: + return ~source_word ^ dest_word + elif self.combinationRule == 10: + return ~dest_word + elif self.combinationRule == 11: + return source_word | ~dest_word + elif self.combinationRule == 12: + return ~source_word + elif self.combinationRule == 13: + return ~source_word | dest_word + elif self.combinationRule == 14: + return ~source_word | ~dest_word + elif self.combinationRule >= 15 and self.combinationRule <= 17: + return dest_word + elif self.combinationRule == 18: + return source_word + dest_word + elif self.combinationRule == 19: + return source_word - dest_word + elif self.combinationRule >= 20 and self.combinationRule <= 24: + return source_word + elif self.combinationRule == 25: + if source_word == 0: + return dest_word + else: + return self.partitionedANDtonBitsnPartitions( + ~source_word, + dest_word, + self.dest.depth, + self.dest.pixPerWord + ) + elif self.combinationRule == 26: + return self.partitionedANDtonBitsnPartitions( + ~source_word, + dest_word, + self.dest.depth, + self.dest.pixPerWord + ) + elif 26 < self.combinationRule <= 41: + return dest_word + else: + raise PrimitiveFailedError() + + def partitionedANDtonBitsnPartitions(self, word1, word2, nBits, nParts): + # partition mask starts at the right + mask = BitBltShadow.MaskTable[nBits] + result = 0 + for i in range(1, nParts + 1): + if ((word1 & mask) == mask): + result = result | (word2 & mask) + # slide left to next partition + mask = mask << nBits + return result + + def as_string(bb): + return 'aBitBlt (destX: %d, destY: %d, sx: %d, sy: %d, dx: %d, dy: %d, w: %d, h: %d, hDir: %d, vDir: %d, sDelta: %d, dDelta: %d, skew: %d, sI: %d, dI: %d)' % ( + bb.dest_x, bb.dest_y, bb.sx, bb.sy, bb.dx, bb.dy, bb.w, bb.h, bb.h_dir, bb.v_dir, bb.source_delta, bb.dest_delta, bb.skew, bb.source_index, bb.dest_index) + # "dest_raster", "source_raster", + # "halftone_bits", "mask1", "mask2", "skew_mask", + # "n_words", "preload" + + +class FormShadow(AbstractCachingShadow): + _attrs_ = ["w_bits", "width", "height", "depth", "offsetX", + "offsetY", "msb", "pixPerWord", "pitch"] + + def sync_cache(self): + if self.size() < 5: + w_self = self.w_self() + assert isinstance(w_self, model.W_PointersObject) + w_self._shadow = None + raise PrimitiveFailedError + self.w_bits = self.fetch(0) + if self.w_bits is self.space.w_nil: + return + if not (isinstance(self.w_bits, model.W_WordsObject) or isinstance(self.w_bits, model.W_DisplayBitmap)): + w_self = self.w_self() + assert isinstance(w_self, model.W_PointersObject) + w_self._shadow = None + raise PrimitiveFailedError + self.width = self.space.unwrap_int(self.fetch(1)) + self.height = self.space.unwrap_int(self.fetch(2)) + self.depth = self.space.unwrap_int(self.fetch(3)) + if self.width < 0 or self.height < 0: + raise PrimitiveFailedError() + self.msb = self.depth > 0 + if self.depth < 0: + self.depth = -self.depth + if self.depth == 0: + raise PrimitiveFailedError() + w_offset = self.fetch(4) + assert isinstance(w_offset, model.W_PointersObject) + if not w_offset is self.space.w_nil: + self.offsetX = self.space.unwrap_int(w_offset._fetch(0)) + self.offsetY = self.space.unwrap_int(w_offset._fetch(1)) + self.pixPerWord = 32 / self.depth + self.pitch = (self.width + (self.pixPerWord - 1)) / self.pixPerWord | 0 + if self.w_bits.size() != (self.pitch * self.height): + # raise error.PrimitiveFailedError() + pass # - we'll be updated again diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -629,31 +629,10 @@ def func(interp, s_frame, w_rcvr, w_into): raise PrimitiveNotYetWrittenError() - at expose_primitive(BITBLT_COPY_BITS, unwrap_spec=[object], clean_stack=False) -def func(interp, s_frame, w_rcvr): - from spyvm.interpreter import Return - if not isinstance(w_rcvr, model.W_PointersObject) or w_rcvr.size() < 15: - raise PrimitiveFailedError - - # only allow combinationRules 0-41 - combinationRule = interp.space.unwrap_positive_32bit_int(w_rcvr.fetch(interp.space, 3)) - if combinationRule > 41: - raise PrimitiveFailedError - - space = interp.space - - s_bitblt = w_rcvr.as_bitblt_get_shadow(space) - s_bitblt.copyBits() - - w_dest_form = w_rcvr.fetch(space, 0) - if (combinationRule == 22 or combinationRule == 32): - s_frame.pop() # pops the next value under BitBlt - s_frame.push(interp.space.wrap_int(s_bitblt.bitCount)) - elif w_dest_form.is_same_object(space.objtable['w_display']): - w_bitmap = w_dest_form.fetch(space, 0) - assert isinstance(w_bitmap, model.W_DisplayBitmap) - w_bitmap.flush_to_screen() - return w_rcvr + at expose_primitive(BITBLT_COPY_BITS, clean_stack=False, no_result=True, compiled_method=True) +def func(interp, s_frame, argcount, s_method): + from spyvm.plugins.bitblt import BitBltPlugin + return BitBltPlugin.call("primitiveCopyBits", interp, s_frame, argcount, s_method) @expose_primitive(BE_CURSOR) def func(interp, s_frame, argcount): @@ -877,9 +856,10 @@ raise PrimitiveFailedError signature = (w_modulename.as_string(), w_functionname.as_string()) - if signature == ('BitBltPlugin', 'primitiveCopyBits'): - return prim_holder.prim_table[BITBLT_COPY_BITS](interp, s_frame, argcount, s_method) - if signature[0] == "SocketPlugin": + if signature == 'BitBltPlugin': + from spyvm.plugins.bitblt import BitBltPlugin + return BitBltPlugin.call(signature[1], interp, s_frame, argcount, s_method) + elif signature[0] == "SocketPlugin": from spyvm.plugins.socket import SocketPlugin return SocketPlugin.call(signature[1], interp, s_frame, argcount, s_method) elif signature[0] == "FilePlugin": @@ -889,6 +869,7 @@ from spyvm.plugins.vmdebugging import DebuggingPlugin return DebuggingPlugin.call(signature[1], interp, s_frame, argcount, s_method) else: + print signature from spyvm.interpreter_proxy import IProxy return IProxy.call(signature, interp, s_frame, argcount, s_method) raise PrimitiveFailedError diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -1125,610 +1125,3 @@ self.dependent = dependent def update(self): pass - - -class BitBltShadow(AbstractCachingShadow): - WordSize = 32 - MaskTable = [rarithmetic.r_uint(0)] - for i in xrange(WordSize): - MaskTable.append(rarithmetic.r_uint((2 ** (i + 1)) - 1)) - AllOnes = rarithmetic.r_uint(0xFFFFFFFF) - - def sync_cache(self): - self.loadBitBlt() - - def intOrIfNil(self, w_int, i): - if w_int is self.space.w_nil: - return i - else: - return self.space.unwrap_int(w_int) - - def loadForm(self, w_form): - try: - if not isinstance(w_form, model.W_PointersObject): - raise error.PrimitiveFailedError() - s_form = w_form.as_form_get_shadow(self.space) - if not isinstance(s_form, FormShadow): - raise error.PrimitiveFailedError() - return s_form - except error.PrimitiveFailedError, e: - w_self = self.w_self() - assert isinstance(w_self, model.W_PointersObject) - w_self._shadow = None - raise e - - def loadHalftone(self, w_halftone_form): - if w_halftone_form is self.space.w_nil: - return None - elif isinstance(w_halftone_form, model.W_WordsObject): - # Already a bitmap - return w_halftone_form.words - else: - assert isinstance(w_halftone_form, model.W_PointersObject) - w_bits = w_halftone_form.as_form_get_shadow(self.space).w_bits - assert isinstance(w_bits, model.W_WordsObject) - return w_bits.words - - def loadColorMap(self, w_color_map): - if isinstance(w_color_map, model.W_WordsObject): - self.w_cmLookupTable = w_color_map - self.cmMask = self.w_cmLookupTable.size() - 1 - else: - self.w_cmLookupTable = None - - def loadBitBlt(self): - self.success = True - self.w_destForm = self.fetch(0) - self.dest = self.loadForm(self.w_destForm) - self.w_sourceForm = self.fetch(1) - if self.w_sourceForm is not self.space.w_nil: - self.source = self.loadForm(self.w_sourceForm) - else: - self.source = None - self.halftone = self.loadHalftone(self.fetch(2)) - self.combinationRule = self.space.unwrap_int(self.fetch(3)) - self.destX = self.intOrIfNil(self.fetch(4), 0) - self.destY = self.intOrIfNil(self.fetch(5), 0) - self.width = self.intOrIfNil(self.fetch(6), self.dest.width) - self.height = self.intOrIfNil(self.fetch(7), self.dest.height) - self.clipX = self.intOrIfNil(self.fetch(10), 0) - self.clipY = self.intOrIfNil(self.fetch(11), 0) - self.clipW = self.intOrIfNil(self.fetch(12), self.width) - self.clipH = self.intOrIfNil(self.fetch(13), self.height) - if not self.source: - self.sourceX = 0 - self.sourceY = 0 - else: - self.loadColorMap(self.fetch(14)) - self.sourceX = self.intOrIfNil(self.fetch(8), 0) - self.sourceY = self.intOrIfNil(self.fetch(9), 0) - - def copyBits(self): - self.bitCount = 0 - self.clipRange() - if (self.bbW <= 0 or self.bbH <= 0): - return - self.destMaskAndPointerInit() - if not self.source: - self.copyLoopNoSource() - else: - self.checkSourceOverlap() - if self.source.depth != self.dest.depth: - self.copyLoopPixMap() - else: - self.sourceSkewAndPointerInit() - self.copyLoop() - - def checkSourceOverlap(self): - if (self.w_sourceForm is self.w_destForm and self.dy >= self.sy): - if (self.dy > self.sy): - self.vDir = -1 - self.sy = (self.sy + self.bbH) - 1 - self.dy = (self.dy + self.bbH) - 1 - else: - if (self.dy == self.sy and self.dx > self.sx): - self.hDir = -1 - self.sx = (self.sx + self.bbW) - 1 # start at right - self.dx = (self.dx + self.bbW) - 1 - if (self.nWords > 1): - t = self.mask1 # and fix up masks - self.mask1 = self.mask2 - self.mask2 = t - self.destIndex = (self.dy * self.dest.pitch) + (self.dx / self.dest.pixPerWord | 0) # recompute since dx, dy change - self.destDelta = (self.dest.pitch * self.vDir) - (self.nWords * self.hDir) - - def sourceSkewAndPointerInit(self): - pixPerM1 = self.dest.pixPerWord - 1 # Pix per word is power of two, so self makes a mask - sxLowBits = self.sx & pixPerM1 - dxLowBits = self.dx & pixPerM1 - # check if need to preload buffer - # (i.e., two words of source needed for first word of destination) - dWid = -1 - if (self.hDir > 0): - if self.bbW < (self.dest.pixPerWord - dxLowBits): - dWid = self.bbW - else: - dWid = self.dest.pixPerWord - dxLowBits - self.preload = (sxLowBits + dWid) > pixPerM1 - else: - if self.bbW < (dxLowBits + 1): - dWid = self.bbW - else: - dWid = dxLowBits + 1 - self.preload = ((sxLowBits - dWid) + 1) < 0 - - if self.source.msb: - self.skew = (sxLowBits - dxLowBits) * self.dest.depth - else: - self.skew = (dxLowBits - sxLowBits) * self.dest.depth - if (self.preload): - if (self.skew < 0): - self.skew += 32 - else: - self.skew -= 32 - # calculate increments from end of one line to start of next - self.sourceIndex = (self.sy * self.source.pitch) + (self.sx / (32 / self.source.depth) |0) - self.sourceDelta = (self.source.pitch * self.vDir) - (self.nWords * self.hDir) - if (self.preload): - self.sourceDelta -= self.hDir - - def clipRange(self): - # intersect with destForm bounds - if self.clipX < 0: - self.clipW += self.clipX - self.clipX = 0 - if self.clipY < 0: - self.clipH += self.clipY - self.clipY = 0 - if self.clipX + self.clipW > self.dest.width: - self.clipW = self.dest.width - self.clipX - if self.clipY + self.clipH > self.dest.height: - self.clipH = self.dest.height - self.clipY - # intersect with clipRect - leftOffset = max(self.clipX - self.destX, 0) - self.sx = self.sourceX + leftOffset - self.dx = self.destX + leftOffset - self.bbW = self.width - leftOffset - rightOffset = (self.dx + self.bbW) - (self.clipX + self.clipW) - if rightOffset > 0: - self.bbW -= rightOffset - topOffset = max(self.clipY - self.destY, 0) - self.sy = self.sourceY + topOffset - self.dy = self.destY + topOffset - self.bbH = self.height - topOffset - bottomOffset = (self.dy + self.bbH) - (self.clipY + self.clipH) - if bottomOffset > 0: - self.bbH -= bottomOffset - # intersect with sourceForm bounds - if not self.source: - return - if self.sx < 0: - self.dx -= self.sx - self.bbW += self.sx - self.sx = 0 - if (self.sx + self.bbW) > self.source.width: - self.bbW -= (self.sx + self.bbW) - self.source.width - if self.sy < 0: - self.dy -= self.sy - self.bbH += self.sy - self.sy = 0 - if (self.sy + self.bbH) > self.source.height: - self.bbH -= (self.sy + self.bbH) - self.source.height - - def rshift(self, val, n): - # return rarithmetic.r_uint(val >> n if val >= 0 else (val + 0x100000000) >> n) - return rarithmetic.r_uint(rarithmetic.r_uint(val) >> n & BitBltShadow.AllOnes) - - def destMaskAndPointerInit(self): - pixPerM1 = self.dest.pixPerWord - 1 # pixPerWord is power-of-two, so this makes a mask - startBits = self.dest.pixPerWord - (self.dx & pixPerM1) # how many px in 1st word - endBits = (((self.dx + self.bbW) - 1) & pixPerM1) + 1 - if self.dest.msb: - self.mask1 = self.rshift(BitBltShadow.AllOnes, (32 - (startBits * self.dest.depth))) - self.mask2 = BitBltShadow.AllOnes << (32 - (endBits * self.dest.depth)) - else: - self.mask1 = BitBltShadow.AllOnes << (32 - (startBits * self.dest.depth)) - self.mask2 = self.rshift(BitBltShadow.AllOnes, (32 - (endBits * self.dest.depth))) - if self.bbW < startBits: - self.mask1 = self.mask1 & self.mask2 - self.mask2 = 0 - self.nWords = 1 - else: - self.nWords = (((self.bbW - startBits) + pixPerM1) / self.dest.pixPerWord | 0) + 1 - self.hDir = 1 - self.vDir = 1 - self.destIndex = (self.dy * self.dest.pitch) + (self.dx / self.dest.pixPerWord | 0) - self.destDelta = (self.dest.pitch * self.vDir) - (self.nWords * self.hDir) - - def copyLoopNoSource(self): - halftoneWord = BitBltShadow.AllOnes - for i in range(self.bbH): - if self.halftone: - halftoneWord = rarithmetic.r_uint(self.halftone[(self.dy + i) % len(self.halftone)]) - # first word in row is masked - destMask = self.mask1 - destWord = self.dest.w_bits.getword(self.destIndex) - mergeWord = self.mergeFn(halftoneWord, destWord) - destWord = (destMask & mergeWord) | (destWord & (~destMask)) - self.dest.w_bits.setword(self.destIndex, destWord) - self.destIndex += 1 - destMask = BitBltShadow.AllOnes - # the central horizontal loop requires no store masking - if self.combinationRule == 3: # store rule requires no dest merging - for word in range(2, self.nWords): - self.dest.w_bits.setword(self.destIndex, halftoneWord) - self.destIndex += 1 - else: - for word in range(2, self.nWords): - destWord = self.dest.w_bits.getword(self.destIndex) - mergeWord = self.mergeFn(halftoneWord, destWord) - self.dest.w_bits.setword(self.destIndex, mergeWord) - self.destIndex += 1 - # last word in row is masked - if self.nWords > 1: - destMask = self.mask2 - destWord = self.dest.w_bits.getword(self.destIndex) - mergeWord = self.mergeFn(halftoneWord, destWord) - destWord = (destMask & mergeWord) | (destWord & (~destMask)) - self.dest.w_bits.setword(self.destIndex, destWord) - self.destIndex += 1 - self.destIndex += self.destDelta - - def copyLoopPixMap(self): - # This version of the inner loop maps source pixels - # to a destination form with different depth. Because it is already - # unweildy, the loop is not unrolled as in the other versions. - # Preload, skew and skewMask are all overlooked, since pickSourcePixels - # delivers its destination word already properly aligned. - # Note that pickSourcePixels could be copied in-line at the top of - # the horizontal loop, and some of its inits moved out of the loop. - # - # The loop has been rewritten to use only one pickSourcePixels call. - # The idea is that the call itself could be inlined. If we decide not - # to inline pickSourcePixels we could optimize the loop instead. - sourcePixMask = BitBltShadow.MaskTable[self.source.depth] - destPixMask = BitBltShadow.MaskTable[self.dest.depth] - self.sourceIndex = (self.sy * self.source.pitch) + (self.sx / self.source.pixPerWord | 0) - scrStartBits = self.source.pixPerWord - (self.sx & (self.source.pixPerWord - 1)) - if self.bbW < scrStartBits: - nSourceIncs = 0 - else: - nSourceIncs = ((self.bbW - scrStartBits) / self.source.pixPerWord | 0) + 1 - # Note following two items were already calculated in destmask setup! - self.sourceDelta = self.source.pitch - nSourceIncs - startBits = self.dest.pixPerWord - (self.dx & (self.dest.pixPerWord - 1)) - endBits = (((self.dx + self.bbW) - 1) & (self.dest.pixPerWord - 1)) + 1 - if self.bbW < startBits: - startBits = self.bbW # ?! - srcShift = (self.sx & (self.source.pixPerWord - 1)) * self.source.depth - dstShift = (self.dx & (self.dest.pixPerWord - 1)) * self.dest.depth - srcShiftInc = self.source.depth - dstShiftInc = self.dest.depth - dstShiftLeft = 0 - if (self.source.msb): - srcShift = (32 - self.source.depth) - srcShift - srcShiftInc = -srcShiftInc - - if (self.dest.msb): - dstShift = (32 - self.dest.depth) - dstShift - dstShiftInc = -dstShiftInc - dstShiftLeft = 32 - self.dest.depth - - for i in range(self.bbH): - if self.halftone: - halftoneWord = rarithmetic.r_uint(self.halftone[(self.dy + i) % len(self.halftone)]) - else: - halftoneWord = BitBltShadow.AllOnes - self.srcBitShift = srcShift - self.dstBitShift = dstShift - self.destMask = self.mask1 - nPix = startBits - words = self.nWords - # Here is the horizontal loop... - for word in range(words): - skewWord = self.pickSourcePixels(nPix, sourcePixMask, destPixMask, srcShiftInc, dstShiftInc) - # align next word to leftmost pixel - self.dstBitShift = dstShiftLeft - if self.destMask == BitBltShadow.AllOnes: # avoid read-modify-write - self.dest.w_bits.setword( - self.destIndex, - self.mergeFn(skewWord & halftoneWord, self.dest.w_bits.getword(self.destIndex)) - ) - else: # General version using dest masking - destWord = self.dest.w_bits.getword(self.destIndex) - mergeWord = self.mergeFn(skewWord & halftoneWord, destWord & self.destMask) - destWord = (self.destMask & mergeWord) | (destWord & (~self.destMask)) - self.dest.w_bits.setword(self.destIndex, destWord) - - self.destIndex += 1 - if (words == 2): # is the next word the last word? - self.destMask = self.mask2 - nPix = endBits - else: # use fullword mask for inner loop - self.destMask = BitBltShadow.AllOnes - nPix = self.dest.pixPerWord - self.sourceIndex += self.sourceDelta - self.destIndex += self.destDelta - - def pickSourcePixels(self, nPixels, srcMask, dstMask, srcShiftInc, dstShiftInc): - # Pick nPix pixels starting at srcBitIndex from the source, map by the - # color map, and justify them according to dstBitIndex in the resulting destWord. - sourceWord = rarithmetic.r_uint(self.source.w_bits.getword(self.sourceIndex)) - destWord = 0 - srcShift = self.srcBitShift # put into temp for speed - dstShift = self.dstBitShift - nPix = nPixels - # always > 0 so we can use do { } while(--nPix); - if (self.w_cmLookupTable): # a little optimization for (pretty crucial) blits using indexed lookups only - for px in range(nPix): - sourcePix = self.rshift(rarithmetic.r_uint(sourceWord), srcShift) & srcMask - destPix = self.w_cmLookupTable.getword(rarithmetic.intmask(sourcePix & self.cmMask)) - # adjust dest pix index - destWord = destWord | ((destPix & dstMask) << dstShift) - # adjust source pix index - dstShift += dstShiftInc - srcShift += srcShiftInc - if srcShift & rarithmetic.r_uint(0xFFFFFFE0): - if (self.source.msb): - srcShift += 32 - else: - srcShift -= 32 - self.sourceIndex += 1 - sourceWord = self.source.w_bits.getword(self.sourceIndex) - else: - raise error.PrimitiveFailedError() - self.srcBitShift = srcShift # Store back - return destWord - - def rotate32bit(self, thisWord, prevWord, skewMask, notSkewMask, unskew): - if unskew < 0: - rotated = self.rshift(rarithmetic.r_uint(prevWord & notSkewMask), -unskew) - else: - rotated = (prevWord & notSkewMask) << unskew - if self.skew < 0: - rotated = rotated | self.rshift(rarithmetic.r_uint(thisWord & skewMask), -self.skew) - else: - rotated = rotated | (thisWord & skewMask) << self.skew - return rotated - - def copyLoop(self): - # self version of the inner loop assumes we do have a source - sourceLimit = self.source.w_bits.size() - hInc = self.hDir - # init skew (the difference in word alignment of source and dest) - if (self.skew == -32): - self.skew = unskew = 0 - skewMask = rarithmetic.r_uint(0) - else: - if (self.skew < 0): - unskew = self.skew + 32 - skewMask = rarithmetic.r_uint(BitBltShadow.AllOnes << -self.skew) - else: - if (self.skew == 0): - unskew = 0 - skewMask = BitBltShadow.AllOnes - else: - unskew = self.skew - 32 - skewMask = self.rshift(BitBltShadow.AllOnes, self.skew) - notSkewMask = rarithmetic.r_uint(~skewMask) - - # init halftones - if (self.halftone): - halftoneWord = rarithmetic.r_uint(self.halftone[0]) - halftoneHeight = len(self.halftone) - else: - halftoneWord = BitBltShadow.AllOnes - halftoneHeight = 0 - - # now loop over all lines - y = self.dy - for i in range(1, self.bbH + 1): - if (halftoneHeight > 1): - halftoneWord = rarithmetic.r_uint(self.halftone[y % halftoneHeight]) - y += self.vDir - - if (self.preload): - prevWord = rarithmetic.r_uint(self.source.w_bits.getword(self.sourceIndex)) - self.sourceIndex += hInc - else: - prevWord = rarithmetic.r_uint(0) - - destMask = self.mask1 - # pick up next word - thisWord = rarithmetic.r_uint(self.source.w_bits.getword(self.sourceIndex)) - self.sourceIndex += hInc - skewWord = self.rotate32bit(thisWord, prevWord, skewMask, notSkewMask, unskew) - prevWord = thisWord - destWord = self.dest.w_bits.getword(self.destIndex) - mergeWord = self.mergeFn(skewWord & halftoneWord, destWord) - destWord = (destMask & mergeWord) | (destWord & (~destMask)) - self.dest.w_bits.setword(self.destIndex, destWord) - # The central horizontal loop requires no store masking - self.destIndex += hInc - destMask = BitBltShadow.AllOnes - if (self.combinationRule == 3): # Store mode avoids dest merge function - if ((self.skew == 0) and (halftoneWord == BitBltShadow.AllOnes)): - # Non-skewed with no halftone - if (self.hDir == -1): - for word in range(2, self.nWords): - thisWord = self.source.w_bits.getword(self.sourceIndex) - self.dest.w_bits.setword(self.destIndex, thisWord) - self.sourceIndex += hInc - self.destIndex += hInc - else: - for word in range(2, self.nWords): - self.dest.w_bits.setword(self.destIndex, prevWord) - prevWord = self.source.w_bits.getword(self.sourceIndex) - self.destIndex += hInc - self.sourceIndex += hInc - else: - # skewed and/or halftoned - for word in range(2, self.nWords): - thisWord = self.source.w_bits.getword(self.sourceIndex) - self.sourceIndex += hInc - skewWord = self.rotate32bit(thisWord, prevWord, skewMask, notSkewMask, unskew) - prevWord = thisWord - self.dest.w_bits.setword(self.destIndex, skewWord & halftoneWord) - self.destIndex += hInc - else: # Dest merging here... - for word in range(2, self.nWords): - thisWord = self.source.w_bits.getword(self.sourceIndex) # pick up next word - self.sourceIndex += hInc - skewWord = self.rotate32bit(thisWord, prevWord, skewMask, notSkewMask, unskew) - prevWord = thisWord - mergeWord = self.mergeFn(skewWord & halftoneWord, self.dest.w_bits.getword(self.destIndex)) - self.dest.w_bits.setword(self.destIndex, mergeWord) - self.destIndex += hInc - # last word with masking and all - if (self.nWords > 1): - destMask = self.mask2 - if (self.sourceIndex >= 0 and self.sourceIndex < sourceLimit): - # NOTE: we are currently overrunning source bits in some cases - # self test makes up for it. - thisWord = self.source.w_bits.getword(self.sourceIndex) # pick up next word - self.sourceIndex += hInc - skewWord = self.rotate32bit(thisWord, prevWord, skewMask, notSkewMask, unskew) - destWord = self.dest.w_bits.getword(self.destIndex) - mergeWord = self.mergeFn(skewWord & halftoneWord, destWord) - destWord = (destMask & mergeWord) | (destWord & (~destMask)) - self.dest.w_bits.setword(self.destIndex, destWord) - self.destIndex += hInc - self.sourceIndex += self.sourceDelta - self.destIndex += self.destDelta - - def mergeFn(self, src, dest): - return rarithmetic.r_uint(self.merge( - rarithmetic.r_uint(src), - rarithmetic.r_uint(dest) - )) - - def merge(self, source_word, dest_word): - assert isinstance(source_word, rarithmetic.r_uint) and isinstance(dest_word, rarithmetic.r_uint) - if self.combinationRule == 0: - return 0 - elif self.combinationRule == 1: - return source_word & dest_word - elif self.combinationRule == 2: - return source_word & ~dest_word - elif self.combinationRule == 3: - return source_word - elif self.combinationRule == 4: - return ~source_word & dest_word - elif self.combinationRule == 5: - return dest_word - elif self.combinationRule == 6: - return source_word ^ dest_word - elif self.combinationRule == 7: - return source_word | dest_word - elif self.combinationRule == 8: - return ~source_word & ~dest_word - elif self.combinationRule == 9: - return ~source_word ^ dest_word - elif self.combinationRule == 10: - return ~dest_word - elif self.combinationRule == 11: - return source_word | ~dest_word - elif self.combinationRule == 12: - return ~source_word - elif self.combinationRule == 13: - return ~source_word | dest_word - elif self.combinationRule == 14: - return ~source_word | ~dest_word - elif self.combinationRule >= 15 and self.combinationRule <= 17: - return dest_word - elif self.combinationRule == 18: - return source_word + dest_word - elif self.combinationRule == 19: - return source_word - dest_word - elif self.combinationRule >= 20 and self.combinationRule <= 24: - return source_word - elif self.combinationRule == 25: - if source_word == 0: - return dest_word - else: - return self.partitionedANDtonBitsnPartitions( - ~source_word, - dest_word, - self.dest.depth, - self.dest.pixPerWord - ) - elif self.combinationRule == 26: - return self.partitionedANDtonBitsnPartitions( - ~source_word, - dest_word, - self.dest.depth, - self.dest.pixPerWord - ) - elif 26 < self.combinationRule <= 41: - return dest_word - else: - raise error.PrimitiveFailedError() - - def partitionedANDtonBitsnPartitions(self, word1, word2, nBits, nParts): - # partition mask starts at the right - mask = BitBltShadow.MaskTable[nBits] - result = 0 - for i in range(1, nParts + 1): - if ((word1 & mask) == mask): - result = result | (word2 & mask) - # slide left to next partition - mask = mask << nBits - return result - - def as_string(bb): - return 'aBitBlt (destX: %d, destY: %d, sx: %d, sy: %d, dx: %d, dy: %d, w: %d, h: %d, hDir: %d, vDir: %d, sDelta: %d, dDelta: %d, skew: %d, sI: %d, dI: %d)' % ( - bb.dest_x, bb.dest_y, bb.sx, bb.sy, bb.dx, bb.dy, bb.w, bb.h, bb.h_dir, bb.v_dir, bb.source_delta, bb.dest_delta, bb.skew, bb.source_index, bb.dest_index) - # "dest_raster", "source_raster", - # "halftone_bits", "mask1", "mask2", "skew_mask", - # "n_words", "preload" - -class FormShadow(AbstractCachingShadow): - _attrs_ = ["w_bits", "width", "height", "depth", "offsetX", - "offsetY", "msb", "pixPerWord", "pitch"] - - def sync_cache(self): - if self.size() < 5: - w_self = self.w_self() - assert isinstance(w_self, model.W_PointersObject) - w_self._shadow = None - raise error.PrimitiveFailedError - self.w_bits = self.fetch(0) - if self.w_bits is self.space.w_nil: - return - if not (isinstance(self.w_bits, model.W_WordsObject) or isinstance(self.w_bits, model.W_DisplayBitmap)): - w_self = self.w_self() - assert isinstance(w_self, model.W_PointersObject) - w_self._shadow = None - raise error.PrimitiveFailedError - self.width = self.space.unwrap_int(self.fetch(1)) - self.height = self.space.unwrap_int(self.fetch(2)) - self.depth = self.space.unwrap_int(self.fetch(3)) - if self.width < 0 or self.height < 0: - raise error.PrimitiveFailedError() - self.msb = self.depth > 0 - if self.depth < 0: - self.depth = -self.depth - if self.depth == 0: - raise error.PrimitiveFailedError() - w_offset = self.fetch(4) - assert isinstance(w_offset, model.W_PointersObject) - if not w_offset is self.space.w_nil: - self.offsetX = self.space.unwrap_int(w_offset._fetch(0)) - self.offsetY = self.space.unwrap_int(w_offset._fetch(1)) - self.pixPerWord = 32 / self.depth - self.pitch = (self.width + (self.pixPerWord - 1)) / self.pixPerWord | 0 - if self.w_bits.size() != (self.pitch * self.height): - # raise error.PrimitiveFailedError() - pass # - we'll be updated again - - # def replace_bits(self): - # w_bits = self.w_bits - # if isinstance(w_bits, model.W_WordsObject): - # pass - # elif isinstance(w_bits, model.W_DisplayBitmap): - # w_bits.update_from_buffer() - # else: - # w_self = self.w_self() - # assert isinstance(w_self, model.W_PointersObject) - # w_self._shadow = None - # raise error.PrimitiveFailedError From noreply at buildbot.pypy.org Fri Jan 3 16:45:42 2014 From: noreply at buildbot.pypy.org (timfel) Date: Fri, 3 Jan 2014 16:45:42 +0100 (CET) Subject: [pypy-commit] lang-smalltalk rbitblt: use SDL surface depths where possible, map depths <8. has what looks like endianess issues for depth < 32 Message-ID: <20140103154542.A70181C0356@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: rbitblt Changeset: r549:8070c7ef400e Date: 2014-01-03 16:28 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/8070c7ef400e/ Log: use SDL surface depths where possible, map depths <8. has what looks like endianess issues for depth < 32 diff --git a/spyvm/display.py b/spyvm/display.py --- a/spyvm/display.py +++ b/spyvm/display.py @@ -34,8 +34,15 @@ self.width = w self.height = h self.depth = d - self.screen = RSDL.SetVideoMode(w, h, 32, 0) - assert self.screen + flags = RSDL.HWPALETTE | RSDL.RESIZABLE | RSDL.ASYNCBLIT | RSDL.DOUBLEBUF + if d < 8: + d = 8 + self.screen = RSDL.SetVideoMode(w, h, d, flags) + if not self.screen: + print "Could not open display at depth %d" % d + raise RuntimeError + elif d == 8: + self.set_squeak_colormap(self.screen) def get_pixelbuffer(self): return rffi.cast(rffi.ULONGP, self.screen.c_pixels) @@ -43,6 +50,24 @@ def flip(self): RSDL.Flip(self.screen) + def set_squeak_colormap(self, screen): + # TODO: fix this up from the image + colors = lltype.malloc(rffi.CArray(RSDL.ColorPtr.TO), 4, flavor='raw') + colors[0].c_r = rffi.r_uchar(255) + colors[0].c_g = rffi.r_uchar(255) + colors[0].c_b = rffi.r_uchar(255) + colors[1].c_r = rffi.r_uchar(0) + colors[1].c_g = rffi.r_uchar(0) + colors[1].c_b = rffi.r_uchar(0) + colors[2].c_r = rffi.r_uchar(128) + colors[2].c_g = rffi.r_uchar(128) + colors[2].c_b = rffi.r_uchar(128) + colors[3].c_r = rffi.r_uchar(255) + colors[3].c_g = rffi.r_uchar(255) + colors[3].c_b = rffi.r_uchar(255) + RSDL.SetColors(self.screen, rffi.cast(RSDL.ColorPtr, colors), 0, 4) + lltype.free(colors, flavor='raw') + def handle_mouse_button(self, c_type, event): b = rffi.cast(RSDL.MouseButtonEventPtr, event) btn = rffi.getintfield(b, 'c_button') @@ -102,6 +127,8 @@ elif c_type == RSDL.KEYDOWN: self.handle_keypress(c_type, event) return + elif c_type == RSDL.VIDEORESIZE: + pass # TODO elif c_type == RSDL.QUIT: from spyvm.error import Exit raise Exit("Window closed..") diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -968,22 +968,16 @@ lltype.free(self.c_words, flavor='raw') -NATIVE_DEPTH = 32 - class W_DisplayBitmap(W_AbstractObjectWithClassReference): - _attrs_ = ['pixelbuffer', '_realsize', '_real_depth_buffer', 'display'] - _immutable_fields_ = ['_realsize', 'display'] + _attrs_ = ['pixelbuffer', '_realsize', '_real_depth_buffer', 'display', '_depth'] + _immutable_fields_ = ['_realsize', 'display', '_depth'] @staticmethod def create(space, w_class, size, depth, display): - if depth == 1: - return W_DisplayBitmap1Bit(space, w_class, size, depth, display) - elif depth == 16: - return W_DisplayBitmap16Bit(space, w_class, size, depth, display) - elif depth == 32: - return W_DisplayBitmap32Bit(space, w_class, size, depth, display) + if depth < 8: + return W_MappingDisplayBitmap(space, w_class, size * (8 / depth), depth, display) else: - raise NotImplementedError("non B/W squeak") + return W_DisplayBitmap(space, w_class, size, depth, display) def __init__(self, space, w_class, size, depth, display): W_AbstractObjectWithClassReference.__init__(self, space, w_class) @@ -991,6 +985,7 @@ self.pixelbuffer = display.get_pixelbuffer() self._realsize = size self.display = display + self._depth = depth def at0(self, space, index0): val = self.getword(index0) @@ -1019,14 +1014,11 @@ def getword(self, n): assert self.size() > n >= 0 - # if self._realsize > n: return self._real_depth_buffer[n] - # else: - # print "Out-of-bounds access on display: %d/%d" % (n, self._realsize) - # import pdb; pdb.set_trace() def setword(self, n, word): - raise NotImplementedError("subclass responsibility") + self._real_depth_buffer[n] = word + self.pixelbuffer[n] = word def is_array_object(self): return True @@ -1041,58 +1033,45 @@ def __del__(self): lltype.free(self._real_depth_buffer, flavor='raw') - @jit.elidable - def compute_pos_and_line_end(self, n, depth): - width = self.display.width - words_per_line = width / (NATIVE_DEPTH / depth) - if width % (NATIVE_DEPTH / depth) != 0: - words_per_line += 1 - line = n / words_per_line - assert line < self.display.height # line is 0 based - line_start = width * line - line_end = line_start + width # actually the start of the next line - pos = ((n % words_per_line) * (NATIVE_DEPTH / depth)) + line_start - return pos, line_end - -class W_DisplayBitmap1Bit(W_DisplayBitmap): +NATIVE_DEPTH = 8 +class W_MappingDisplayBitmap(W_DisplayBitmap): @jit.unroll_safe def setword(self, n, word): self._real_depth_buffer[n] = word - pos, line_end = self.compute_pos_and_line_end(n, 1) - mask = r_uint(1) - mask <<= 31 - for i in xrange(32): - if pos == line_end: + word = r_uint(word) + pos = self.compute_pos(n) + # pos, line_end = self.compute_pos_and_line_end(n, self._depth) + maskR = r_uint(2 ** self._depth - 1) + mask = maskR << (32 - self._depth) + rshift = 32 - self._depth + for i in xrange(8 / self._depth): + if pos >= self.size(): return - bit = mask & word - pixel = r_uint((0x00ffffff * (bit == 0)) | r_uint(0xff000000)) - self.pixelbuffer[pos] = pixel - mask >>= 1 + mapword = r_uint(0) + for i in xrange(4): + pixel = r_uint(word) >> rshift + mapword <<= 8 + mapword |= r_uint(pixel) + word <<= self._depth + self.pixelbuffer[pos] = mapword pos += 1 -class W_DisplayBitmap16Bit(W_DisplayBitmap): - @jit.unroll_safe - def setword(self, n, word): - self._real_depth_buffer[n] = word - pos, line_end = self.compute_pos_and_line_end(n, 16) - for i in xrange(2): - if pos >= line_end: - return - pixel = r_uint(0x0 | - ((word & 0b111110000000000) << 9) | - ((word & 0b000001111100000) << 6) | - ((word & 0b000000000011111) << 3) - ) - self.pixelbuffer[pos] = pixel - word = (word >> 16) & 0xffff - pos += 1 + def compute_pos(self, n): + return n * (NATIVE_DEPTH / self._depth) -class W_DisplayBitmap32Bit(W_DisplayBitmap): - @jit.unroll_safe - def setword(self, n, word): - self._real_depth_buffer[n] = word - self.pixelbuffer[n] = word + # @jit.elidable + # def compute_pos_and_line_end(self, n, depth): + # width = self.display.width + # words_per_line = width / (NATIVE_DEPTH / depth) + # if width % (NATIVE_DEPTH / depth) != 0: + # words_per_line += 1 + # line = n / words_per_line + # assert line < self.display.height # line is 0 based + # line_start = width * line + # line_end = line_start + width # actually the start of the next line + # pos = ((n % words_per_line) * (NATIVE_DEPTH / depth)) + line_start + # return pos, line_end # XXX Shouldn't compiledmethod have class reference for subclassed compiled # methods? diff --git a/spyvm/squeakimage.py b/spyvm/squeakimage.py --- a/spyvm/squeakimage.py +++ b/spyvm/squeakimage.py @@ -376,6 +376,14 @@ self.lastWindowSize = reader.lastWindowSize self.version = reader.version self.is_modern = reader.version.magic > 6502 + # self.run_spy_hacks(space) + + def run_spy_hacks(self, space): + w_display = space.objtable["w_display"] + if w_display is not None and w_display is not space.w_nil: + if space.unwrap_int(w_display.fetch(space, 3)) < 8: + # non-native indexed color depth not well supported + w_display.store(space, 3, space.wrap_int(32)) def find_symbol(self, space, reader, symbol): w_dnu = self.special(constants.SO_DOES_NOT_UNDERSTAND) From noreply at buildbot.pypy.org Fri Jan 3 16:45:43 2014 From: noreply at buildbot.pypy.org (timfel) Date: Fri, 3 Jan 2014 16:45:43 +0100 (CET) Subject: [pypy-commit] lang-smalltalk rbitblt: fix translation, intially force images to 32bit Message-ID: <20140103154543.CC37C1C0459@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: rbitblt Changeset: r550:eec84d67649b Date: 2014-01-03 16:45 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/eec84d67649b/ Log: fix translation, intially force images to 32bit diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -1042,7 +1042,8 @@ word = r_uint(word) pos = self.compute_pos(n) # pos, line_end = self.compute_pos_and_line_end(n, self._depth) - maskR = r_uint(2 ** self._depth - 1) + assert self._depth <= 4 + maskR = r_uint(0b1111) mask = maskR << (32 - self._depth) rshift = 32 - self._depth for i in xrange(8 / self._depth): diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -856,7 +856,7 @@ raise PrimitiveFailedError signature = (w_modulename.as_string(), w_functionname.as_string()) - if signature == 'BitBltPlugin': + if signature[0] == 'BitBltPlugin': from spyvm.plugins.bitblt import BitBltPlugin return BitBltPlugin.call(signature[1], interp, s_frame, argcount, s_method) elif signature[0] == "SocketPlugin": diff --git a/spyvm/squeakimage.py b/spyvm/squeakimage.py --- a/spyvm/squeakimage.py +++ b/spyvm/squeakimage.py @@ -376,7 +376,7 @@ self.lastWindowSize = reader.lastWindowSize self.version = reader.version self.is_modern = reader.version.magic > 6502 - # self.run_spy_hacks(space) + self.run_spy_hacks(space) def run_spy_hacks(self, space): w_display = space.objtable["w_display"] From noreply at buildbot.pypy.org Sat Jan 4 00:51:38 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 4 Jan 2014 00:51:38 +0100 (CET) Subject: [pypy-commit] pypy remove-intlong-smm: merge default Message-ID: <20140103235138.C7FFC1C0459@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: remove-intlong-smm Changeset: r68580:a33487be74e5 Date: 2014-01-03 15:46 -0800 http://bitbucket.org/pypy/pypy/changeset/a33487be74e5/ Log: merge default diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -28,7 +28,7 @@ DEALINGS IN THE SOFTWARE. -PyPy Copyright holders 2003-2013 +PyPy Copyright holders 2003-2014 ----------------------------------- Except when otherwise stated (look for LICENSE files or information at diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -38,7 +38,7 @@ # General information about the project. project = u'PyPy' -copyright = u'2013, The PyPy Project' +copyright = u'2014, The PyPy Project' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -39,3 +39,5 @@ .. branch: OlivierBlanvillain/fix-3-broken-links-on-pypy-published-pap-1386250839215 Fix 3 broken links on PyPy published papers in docs. + +.. branch: jit-ordereddict diff --git a/pypy/module/sys/app.py b/pypy/module/sys/app.py --- a/pypy/module/sys/app.py +++ b/pypy/module/sys/app.py @@ -70,11 +70,11 @@ return None copyright_str = """ -Copyright 2003-2013 PyPy development team. +Copyright 2003-2014 PyPy development team. All Rights Reserved. For further information, see -Portions Copyright (c) 2001-2013 Python Software Foundation. +Portions Copyright (c) 2001-2014 Python Software Foundation. All Rights Reserved. Portions Copyright (c) 2000 BeOpen.com. diff --git a/rpython/jit/metainterp/test/test_dict.py b/rpython/jit/metainterp/test/test_dict.py --- a/rpython/jit/metainterp/test/test_dict.py +++ b/rpython/jit/metainterp/test/test_dict.py @@ -5,6 +5,10 @@ from collections import OrderedDict class DictTests: + @staticmethod + def newdict(): # overridden in TestLLOrderedDict + return {} + def _freeze_(self): return True @@ -191,9 +195,7 @@ class TestLLtype(DictTests, LLJitMixin): - @staticmethod - def newdict(): - return {} + pass class TestLLOrderedDict(DictTests, LLJitMixin): @staticmethod diff --git a/rpython/jit/metainterp/test/test_rawmem.py b/rpython/jit/metainterp/test/test_rawmem.py --- a/rpython/jit/metainterp/test/test_rawmem.py +++ b/rpython/jit/metainterp/test/test_rawmem.py @@ -71,7 +71,11 @@ 'raw_store': 1, 'raw_load': 1, 'finish': 1}) + +class TestRawMem(RawMemTests, LLJitMixin): + def test_getarraysubstruct(self): + # NOTE: not for backend/*/test A2 = lltype.Array(('a', lltype.Signed), ('b', lltype.Signed), hints={'nolength': True}) p = lltype.malloc(A2, 10, flavor='raw', immortal=True, zero=True) @@ -90,6 +94,3 @@ assert res == 66 res = self.interp_operations(f, [2, 2], disable_optimizations=True) assert res == 44 - -class TestRawMem(RawMemTests, LLJitMixin): - pass From noreply at buildbot.pypy.org Sat Jan 4 00:51:40 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 4 Jan 2014 00:51:40 +0100 (CET) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140103235140.0F28F1C0459@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68581:733bf19dd5a5 Date: 2014-01-03 15:46 -0800 http://bitbucket.org/pypy/pypy/changeset/733bf19dd5a5/ Log: merge default diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -28,7 +28,7 @@ DEALINGS IN THE SOFTWARE. -PyPy Copyright holders 2003-2013 +PyPy Copyright holders 2003-2014 ----------------------------------- Except when otherwise stated (look for LICENSE files or information at diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -38,7 +38,7 @@ # General information about the project. project = u'PyPy' -copyright = u'2013, The PyPy Project' +copyright = u'2014, The PyPy Project' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -39,3 +39,5 @@ .. branch: OlivierBlanvillain/fix-3-broken-links-on-pypy-published-pap-1386250839215 Fix 3 broken links on PyPy published papers in docs. + +.. branch: jit-ordereddict diff --git a/pypy/module/sys/app.py b/pypy/module/sys/app.py --- a/pypy/module/sys/app.py +++ b/pypy/module/sys/app.py @@ -69,11 +69,11 @@ return None copyright_str = """ -Copyright 2003-2013 PyPy development team. +Copyright 2003-2014 PyPy development team. All Rights Reserved. For further information, see -Portions Copyright (c) 2001-2013 Python Software Foundation. +Portions Copyright (c) 2001-2014 Python Software Foundation. All Rights Reserved. Portions Copyright (c) 2000 BeOpen.com. diff --git a/rpython/jit/metainterp/test/test_dict.py b/rpython/jit/metainterp/test/test_dict.py --- a/rpython/jit/metainterp/test/test_dict.py +++ b/rpython/jit/metainterp/test/test_dict.py @@ -5,6 +5,10 @@ from collections import OrderedDict class DictTests: + @staticmethod + def newdict(): # overridden in TestLLOrderedDict + return {} + def _freeze_(self): return True @@ -191,9 +195,7 @@ class TestLLtype(DictTests, LLJitMixin): - @staticmethod - def newdict(): - return {} + pass class TestLLOrderedDict(DictTests, LLJitMixin): @staticmethod diff --git a/rpython/jit/metainterp/test/test_rawmem.py b/rpython/jit/metainterp/test/test_rawmem.py --- a/rpython/jit/metainterp/test/test_rawmem.py +++ b/rpython/jit/metainterp/test/test_rawmem.py @@ -71,7 +71,11 @@ 'raw_store': 1, 'raw_load': 1, 'finish': 1}) + +class TestRawMem(RawMemTests, LLJitMixin): + def test_getarraysubstruct(self): + # NOTE: not for backend/*/test A2 = lltype.Array(('a', lltype.Signed), ('b', lltype.Signed), hints={'nolength': True}) p = lltype.malloc(A2, 10, flavor='raw', immortal=True, zero=True) @@ -90,6 +94,3 @@ assert res == 66 res = self.interp_operations(f, [2, 2], disable_optimizations=True) assert res == 44 - -class TestRawMem(RawMemTests, LLJitMixin): - pass From noreply at buildbot.pypy.org Sat Jan 4 00:51:41 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 4 Jan 2014 00:51:41 +0100 (CET) Subject: [pypy-commit] pypy remove-intlong-smm: add docs, cleanup and rearrange. do the commutative check when building Message-ID: <20140103235141.463781C0459@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: remove-intlong-smm Changeset: r68582:dbe309c00c2a Date: 2014-01-03 15:50 -0800 http://bitbucket.org/pypy/pypy/changeset/dbe309c00c2a/ Log: add docs, cleanup and rearrange. do the commutative check when building smalllong's descriptors diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -23,6 +23,8 @@ from pypy.interpreter.gateway import ( WrappedDefault, interp2app, interpindirect2app, unwrap_spec) from pypy.objspace.std import newformat +from pypy.objspace.std.model import ( + BINARY_OPS, CMP_OPS, COMMUTATIVE_OPS, IDTAG_INT) from pypy.objspace.std.stdtypedef import StdTypeDef @@ -31,8 +33,163 @@ __slots__ = () def int(self, space): + """x.__int__() <==> int(x)""" raise NotImplementedError + def descr_coerce(self, space, w_other): + """x.__coerce__(y) <==> coerce(x, y)""" + if not isinstance(w_other, W_AbstractIntObject): + return space.w_NotImplemented + return space.newtuple([self, w_other]) + + def descr_long(self, space): + """x.__long__() <==> long(x)""" + from pypy.objspace.std.longobject import W_LongObject + return W_LongObject.fromint(space, self.int_w(space)) + + def descr_hash(self, space): + """x.__hash__() <==> hash(x)""" + # unlike CPython, we don't special-case the value -1 in most of + # our hash functions, so there is not much sense special-casing + # it here either. Make sure this is consistent with the hash of + # floats and longs. + return self.int(space) + + def descr_nonzero(self, space): + """x.__nonzero__() <==> x != 0""" + return space.newbool(space.int_w(self) != 0) + + def descr_invert(self, space): + """x.__invert__() <==> ~x""" + return wrapint(space, ~space.int_w(self)) + + def descr_pos(self, space): + """x.__pos__() <==> +x""" + return self.int(space) + descr_trunc = func_with_new_name(descr_pos, 'descr_trunc') + descr_trunc.__doc__ = 'Truncating an Integral returns itself.' + + def descr_neg(self, space): + """x.__neg__() <==> -x""" + a = space.int_w(self) + try: + x = ovfcheck(-a) + except OverflowError: + if _recover_with_smalllong(space): + from pypy.objspace.std.smalllongobject import neg_ovr + return neg_ovr(space, self) + return self.descr_long(space).descr_neg(space) + return wrapint(space, x) + + def descr_abs(self, space): + """x.__abs__() <==> abs(x)""" + pos = space.int_w(self) >= 0 + return self.int(space) if pos else self.descr_neg(space) + + def descr_index(self, space): + """x[y:z] <==> x[y.__index__():z.__index__()]""" + return self.int(space) + + def descr_float(self, space): + """x.__float__() <==> float(x)""" + a = space.int_w(self) + x = float(a) + return space.newfloat(x) + + def descr_oct(self, space): + """x.__oct__() <==> oct(x)""" + return space.wrap(oct(space.int_w(self))) + + def descr_hex(self, space): + """x.__hex__() <==> hex(x)""" + return space.wrap(hex(space.int_w(self))) + + def descr_getnewargs(self, space): + return space.newtuple([wrapint(space, space.int_w(self))]) + + def descr_conjugate(self, space): + """Returns self, the complex conjugate of any int.""" + return space.int(self) + + def descr_bit_length(self, space): + """int.bit_length() -> int + + Number of bits necessary to represent self in binary. + >>> bin(37) + '0b100101' + >>> (37).bit_length() + 6 + """ + val = space.int_w(self) + if val < 0: + val = -val + bits = 0 + while val: + bits += 1 + val >>= 1 + return space.wrap(bits) + + def descr_repr(self, space): + """x.__repr__() <==> repr(x)""" + res = str(self.int_w(space)) + return space.wrap(res) + descr_str = func_with_new_name(descr_repr, 'descr_str') + descr_str.__doc__ = "x.__str__() <==> str(x)" + + def descr_format(self, space, w_format_spec): + return newformat.run_formatter(space, w_format_spec, + "format_int_or_long", self, + newformat.INT_KIND) + + def descr_get_denominator(self, space): + return space.wrap(1) + + def descr_get_imag(self, space): + return space.wrap(0) + + descr_get_numerator = descr_get_real = descr_conjugate + + @unwrap_spec(w_modulus=WrappedDefault(None)) + def descr_pow(self, space, w_exponent, w_modulus=None): + """x.__pow__(y[, z]) <==> pow(x, y[, z])""" + if not isinstance(w_exponent, W_AbstractIntObject): + return space.w_NotImplemented + + if space.is_none(w_modulus): + z = 0 + elif isinstance(w_modulus, W_AbstractIntObject): + z = space.int_w(w_modulus) + if z == 0: + raise operationerrfmt(space.w_ValueError, + "pow() 3rd argument cannot be 0") + else: + # can't return NotImplemented (space.pow doesn't do full + # ternary, i.e. w_modulus.__zpow__(self, w_exponent)), so + # handle it ourselves + return self._ovfpow2long(space, w_exponent, w_modulus) + + x = space.int_w(self) + y = space.int_w(w_exponent) + try: + result = _pow_impl(space, x, y, z) + except (OverflowError, ValueError): + return self._ovfpow2long(space, w_exponent, w_modulus) + return space.wrap(result) + + @unwrap_spec(w_modulus=WrappedDefault(None)) + def descr_rpow(self, space, w_base, w_modulus=None): + """y.__rpow__(x[, z]) <==> pow(x, y[, z])""" + if not isinstance(w_base, W_AbstractIntObject): + return space.w_NotImplemented + return w_base.descr_pow(space, self, w_modulus) + + def _ovfpow2long(self, space, w_exponent, w_modulus): + if space.is_none(w_modulus) and _recover_with_smalllong(space): + from pypy.objspace.std.smalllongobject import pow_ovr + return pow_ovr(space, self, w_exponent) + self = self.descr_long(space) + return self.descr_pow(space, w_exponent, w_modulus) + def _make_descr_cmp(opname): op = getattr(operator, opname) @func_renamer('descr_' + opname) @@ -42,6 +199,7 @@ i = space.int_w(self) j = space.int_w(w_other) return space.newbool(op(i, j)) + descr_cmp.__doc__ = 'x.__%s__(y) <==> x%sy' % (opname, CMP_OPS[opname]) return descr_cmp descr_lt = _make_descr_cmp('lt') @@ -54,7 +212,9 @@ def _make_generic_descr_binop(opname, ovf=True): op = getattr(operator, opname + '_' if opname in ('and', 'or') else opname) - commutative = opname in ('add', 'mul', 'and', 'or', 'xor') + oper = BINARY_OPS.get(opname) + doc = "x.__%s__(y) <==> x%sy" % (opname, oper) + rdoc = "x.__r%s__(y) <==> y%sx" % (opname, oper) @func_renamer('descr_' + opname) def descr_binop(self, space, w_other): @@ -67,43 +227,47 @@ try: z = ovfcheck(op(x, y)) except OverflowError: - return ovf2long(space, opname, self, w_other) + return _ovf2long(space, opname, self, w_other) else: z = op(x, y) return wrapint(space, z) + descr_binop.__doc__ = doc - if commutative: - return descr_binop, func_with_new_name(descr_binop, - 'descr_r' + opname) + if opname in COMMUTATIVE_OPS: + descr_rbinop = func_with_new_name(descr_binop, 'descr_r' + opname) + else: + @func_renamer('descr_r' + opname) + def descr_rbinop(self, space, w_other): + if not isinstance(w_other, W_AbstractIntObject): + return space.w_NotImplemented - @func_renamer('descr_r' + opname) - def descr_rbinop(self, space, w_other): - if not isinstance(w_other, W_AbstractIntObject): - return space.w_NotImplemented - - x = space.int_w(self) - y = space.int_w(w_other) - if ovf: - try: - z = ovfcheck(op(y, x)) - except OverflowError: - return ovf2long(space, opname, w_other, self) - else: - z = op(y, x) - return wrapint(space, z) + x = space.int_w(self) + y = space.int_w(w_other) + if ovf: + try: + z = ovfcheck(op(y, x)) + except OverflowError: + return _ovf2long(space, opname, w_other, self) + else: + z = op(y, x) + return wrapint(space, z) + descr_rbinop.__doc__ = rdoc return descr_binop, descr_rbinop descr_add, descr_radd = _make_generic_descr_binop('add') descr_sub, descr_rsub = _make_generic_descr_binop('sub') descr_mul, descr_rmul = _make_generic_descr_binop('mul') - descr_and, descr_rand = _make_generic_descr_binop('and', ovf=False) descr_or, descr_ror = _make_generic_descr_binop('or', ovf=False) descr_xor, descr_rxor = _make_generic_descr_binop('xor', ovf=False) def _make_descr_binop(func): opname = func.__name__[1:] + oper = BINARY_OPS.get(opname) + if oper == '%': + oper = '%%' + oper = '%s(%%s, %%s)' % opname if not oper else '%%s%s%%s' % oper @func_renamer('descr_' + opname) def descr_binop(self, space, w_other): @@ -112,7 +276,9 @@ try: return func(self, space, w_other) except OverflowError: - return ovf2long(space, opname, self, w_other) + return _ovf2long(space, opname, self, w_other) + descr_binop.__doc__ = "x.__%s__(y) <==> %s" % (opname, + oper % ('x', 'y')) @func_renamer('descr_r' + opname) def descr_rbinop(self, space, w_other): @@ -121,7 +287,9 @@ try: return func(w_other, space, self) except OverflowError: - return ovf2long(space, opname, w_other, self) + return _ovf2long(space, opname, w_other, self) + descr_rbinop.__doc__ = "x.__r%s__(y) <==> %s" % (opname, + oper % ('y', 'x')) return descr_binop, descr_rbinop @@ -181,10 +349,10 @@ return wrapint(space, c) if b < 0: raise operationerrfmt(space.w_ValueError, "negative shift count") - else: # b >= LONG_BIT - if a == 0: - return self.int(space) - raise OverflowError + # b >= LONG_BIT + if a == 0: + return self.int(space) + raise OverflowError descr_lshift, descr_rlshift = _make_descr_binop(_lshift) def _rshift(self, space, w_other): @@ -203,143 +371,6 @@ return wrapint(space, a) descr_rshift, descr_rrshift = _make_descr_binop(_rshift) - @unwrap_spec(w_modulus=WrappedDefault(None)) - def descr_pow(self, space, w_exponent, w_modulus=None): - if not isinstance(w_exponent, W_AbstractIntObject): - return space.w_NotImplemented - - if space.is_none(w_modulus): - z = 0 - elif isinstance(w_modulus, W_AbstractIntObject): - z = space.int_w(w_modulus) - if z == 0: - raise operationerrfmt(space.w_ValueError, - "pow() 3rd argument cannot be 0") - else: - # can't return NotImplemented (space.pow doesn't do full - # ternary, i.e. w_modulus.__zpow__(self, w_exponent)), so - # handle it ourselves - return self._ovfpow2long(space, w_exponent, w_modulus) - - x = space.int_w(self) - y = space.int_w(w_exponent) - try: - result = _pow_impl(space, x, y, z) - except (OverflowError, ValueError): - return self._ovfpow2long(space, w_exponent, w_modulus) - return space.wrap(result) - - @unwrap_spec(w_modulus=WrappedDefault(None)) - def descr_rpow(self, space, w_base, w_modulus=None): - if not isinstance(w_base, W_AbstractIntObject): - return space.w_NotImplemented - return w_base.descr_pow(space, self, w_modulus) - - def _ovfpow2long(self, space, w_exponent, w_modulus): - if space.is_none(w_modulus) and recover_with_smalllong(space): - from pypy.objspace.std.smalllongobject import pow_ovr - return pow_ovr(space, self, w_exponent) - self = self.descr_long(space) - return self.descr_pow(space, w_exponent, w_modulus) - - def descr_coerce(self, space, w_other): - if not isinstance(w_other, W_AbstractIntObject): - return space.w_NotImplemented - return space.newtuple([self, w_other]) - - def descr_long(self, space): - from pypy.objspace.std.longobject import W_LongObject - return W_LongObject.fromint(space, self.int_w(space)) - - def descr_hash(self, space): - # unlike CPython, we don't special-case the value -1 in most of - # our hash functions, so there is not much sense special-casing - # it here either. Make sure this is consistent with the hash of - # floats and longs. - return self.int(space) - - def descr_nonzero(self, space): - return space.newbool(space.int_w(self) != 0) - - def descr_invert(self, space): - return wrapint(space, ~space.int_w(self)) - - def descr_pos(self, space): - return self.int(space) - descr_trunc = func_with_new_name(descr_pos, 'descr_trunc') - - def descr_neg(self, space): - a = space.int_w(self) - try: - x = ovfcheck(-a) - except OverflowError: - if recover_with_smalllong(space): - from pypy.objspace.std.smalllongobject import neg_ovr - return neg_ovr(space, self) - return self.descr_long(space).descr_neg(space) - return wrapint(space, x) - - def descr_abs(self, space): - pos = space.int_w(self) >= 0 - return self.int(space) if pos else self.descr_neg(space) - - def descr_index(self, space): - return self.int(space) - - def descr_float(self, space): - a = space.int_w(self) - x = float(a) - return space.newfloat(x) - - def descr_oct(self, space): - return space.wrap(oct(space.int_w(self))) - - def descr_hex(self, space): - return space.wrap(hex(space.int_w(self))) - - def descr_getnewargs(self, space): - return space.newtuple([wrapint(space, space.int_w(self))]) - - def descr_conjugate(self, space): - """Returns self, the complex conjugate of any int.""" - return space.int(self) - - def descr_bit_length(self, space): - """int.bit_length() -> int - - Number of bits necessary to represent self in binary. - >>> bin(37) - '0b100101' - >>> (37).bit_length() - 6 - """ - val = space.int_w(self) - if val < 0: - val = -val - bits = 0 - while val: - bits += 1 - val >>= 1 - return space.wrap(bits) - - def descr_repr(self, space): - res = str(self.int_w(space)) - return space.wrap(res) - descr_str = descr_repr - - def descr_format(self, space, w_format_spec): - return newformat.run_formatter(space, w_format_spec, - "format_int_or_long", self, - newformat.INT_KIND) - - def descr_get_denominator(self, space): - return space.wrap(1) - - def descr_get_imag(self, space): - return space.wrap(0) - - descr_get_numerator = descr_get_real = descr_conjugate - class W_IntObject(W_AbstractIntObject): @@ -364,9 +395,8 @@ def immutable_unique_id(self, space): if self.user_overridden_class: return None - from pypy.objspace.std.model import IDTAG_INT as tag b = space.bigint_w(self) - b = b.lshift(3).or_(rbigint.fromint(tag)) + b = b.lshift(3).or_(rbigint.fromint(IDTAG_INT)) return space.newlong_from_rbigint(b) def int_w(self, space): @@ -397,7 +427,7 @@ return space.newint(a) -def recover_with_smalllong(space): +def _recover_with_smalllong(space): # True if there is a chance that a SmallLong would fit when an Int # does not return (space.config.objspace.std.withsmalllong and @@ -405,8 +435,8 @@ @specialize.arg(1) -def ovf2long(space, opname, self, w_other): - if recover_with_smalllong(space) and opname != 'truediv': +def _ovf2long(space, opname, self, w_other): + if _recover_with_smalllong(space) and opname != 'truediv': from pypy.objspace.std import smalllongobject op = getattr(smalllongobject, opname + '_ovr') return op(space, self, w_other) @@ -443,7 +473,6 @@ ix %= iz return ix -# ____________________________________________________________ def wrapint(space, x): if not space.config.objspace.std.withprebuiltint: @@ -465,10 +494,9 @@ w_res.intval = x return w_res -# ____________________________________________________________ @jit.elidable -def string_to_int_or_long(space, string, base=10): +def _string_to_int_or_long(space, string, base=10): w_longval = None value = 0 try: @@ -476,10 +504,11 @@ except ParseStringError as e: raise OperationError(space.w_ValueError, space.wrap(e.msg)) except ParseStringOverflowError as e: - w_longval = retry_to_w_long(space, e.parser) + w_longval = _retry_to_w_long(space, e.parser) return value, w_longval -def retry_to_w_long(space, parser): + +def _retry_to_w_long(space, parser): parser.rewind() try: bigint = rbigint._from_numberstring_parser(parser) @@ -487,6 +516,7 @@ raise OperationError(space.w_ValueError, space.wrap(e.msg)) return space.newlong_from_rbigint(bigint) + @unwrap_spec(w_x=WrappedDefault(0)) def descr__new__(space, w_inttype, w_x, w_base=None): w_longval = None @@ -512,12 +542,12 @@ # an overflowing long value = space.int_w(w_obj) elif space.isinstance_w(w_value, space.w_str): - value, w_longval = string_to_int_or_long(space, - space.str_w(w_value)) + value, w_longval = _string_to_int_or_long(space, + space.str_w(w_value)) elif space.isinstance_w(w_value, space.w_unicode): from pypy.objspace.std.unicodeobject import unicode_to_decimal_w string = unicode_to_decimal_w(space, w_value) - value, w_longval = string_to_int_or_long(space, string) + value, w_longval = _string_to_int_or_long(space, string) else: # If object supports the buffer interface try: @@ -530,7 +560,7 @@ w_value) else: buf = space.interp_w(Buffer, w_buffer) - value, w_longval = string_to_int_or_long(space, buf.as_str()) + value, w_longval = _string_to_int_or_long(space, buf.as_str()) ok = True else: base = space.int_w(w_base) @@ -546,7 +576,7 @@ "int() can't convert non-string with " "explicit base") - value, w_longval = string_to_int_or_long(space, s, base) + value, w_longval = _string_to_int_or_long(space, s, base) if w_longval is not None: if not space.is_w(w_inttype, space.w_int): @@ -561,35 +591,67 @@ W_IntObject.__init__(w_obj, value) return w_obj -# ____________________________________________________________ - W_AbstractIntObject.typedef = StdTypeDef("int", - __doc__ = """int(x[, base]) -> integer + __doc__ = """int(x=0) -> int or long +int(x, base=10) -> int or long -Convert a string or number to an integer, if possible. A floating point -argument will be truncated towards zero (this does not include a string -representation of a floating point number!) When converting a string, use -the optional base. It is an error to supply a base when converting a -non-string. If the argument is outside the integer range a long object -will be returned instead.""", +Convert a number or string to an integer, or return 0 if no arguments +are given. If x is floating point, the conversion truncates towards zero. +If x is outside the integer range, the function returns a long instead. + +If x is not a number or if base is given, then x must be a string or +Unicode object representing an integer literal in the given base. The +literal can be preceded by '+' or '-' and be surrounded by whitespace. +The base defaults to 10. Valid bases are 0 and 2-36. Base 0 means to +interpret the base from the string as an integer literal. +>>> int('0b100', base=0) +4""", __new__ = interp2app(descr__new__), numerator = typedef.GetSetProperty( - W_AbstractIntObject.descr_get_numerator), + W_AbstractIntObject.descr_get_numerator, + doc="the numerator of a rational number in lowest terms"), denominator = typedef.GetSetProperty( - W_AbstractIntObject.descr_get_denominator), - real = typedef.GetSetProperty(W_AbstractIntObject.descr_get_real), - imag = typedef.GetSetProperty(W_AbstractIntObject.descr_get_imag), + W_AbstractIntObject.descr_get_denominator, + doc="the denominator of a rational number in lowest terms"), + real = typedef.GetSetProperty( + W_AbstractIntObject.descr_get_real, + doc="the real part of a complex number"), + imag = typedef.GetSetProperty( + W_AbstractIntObject.descr_get_imag, + doc="the imaginary part of a complex number"), + + __repr__ = interp2app(W_AbstractIntObject.descr_repr), + __str__ = interp2app(W_AbstractIntObject.descr_str), + conjugate = interpindirect2app(W_AbstractIntObject.descr_conjugate), bit_length = interpindirect2app(W_AbstractIntObject.descr_bit_length), + __format__ = interpindirect2app(W_AbstractIntObject.descr_format), + __hash__ = interpindirect2app(W_AbstractIntObject.descr_hash), + __coerce__ = interpindirect2app(W_AbstractIntObject.descr_coerce), + __oct__ = interpindirect2app(W_AbstractIntObject.descr_oct), + __hex__ = interpindirect2app(W_AbstractIntObject.descr_hex), + __getnewargs__ = interpindirect2app(W_AbstractIntObject.descr_getnewargs), __int__ = interpindirect2app(W_AbstractIntObject.int), __long__ = interpindirect2app(W_AbstractIntObject.descr_long), + __index__ = interpindirect2app(W_AbstractIntObject.descr_index), + __trunc__ = interpindirect2app(W_AbstractIntObject.descr_trunc), + __float__ = interpindirect2app(W_AbstractIntObject.descr_float), - __format__ = interpindirect2app(W_AbstractIntObject.descr_format), - __hash__ = interpindirect2app(W_AbstractIntObject.descr_hash), - __coerce__ = interpindirect2app(W_AbstractIntObject.descr_coerce), + __pos__ = interpindirect2app(W_AbstractIntObject.descr_pos), + __neg__ = interpindirect2app(W_AbstractIntObject.descr_neg), + __abs__ = interpindirect2app(W_AbstractIntObject.descr_abs), + __nonzero__ = interpindirect2app(W_AbstractIntObject.descr_nonzero), + __invert__ = interpindirect2app(W_AbstractIntObject.descr_invert), + + __lt__ = interpindirect2app(W_AbstractIntObject.descr_lt), + __le__ = interpindirect2app(W_AbstractIntObject.descr_le), + __eq__ = interpindirect2app(W_AbstractIntObject.descr_eq), + __ne__ = interpindirect2app(W_AbstractIntObject.descr_ne), + __gt__ = interpindirect2app(W_AbstractIntObject.descr_gt), + __ge__ = interpindirect2app(W_AbstractIntObject.descr_ge), __add__ = interpindirect2app(W_AbstractIntObject.descr_add), __radd__ = interpindirect2app(W_AbstractIntObject.descr_radd), @@ -597,12 +659,18 @@ __rsub__ = interpindirect2app(W_AbstractIntObject.descr_rsub), __mul__ = interpindirect2app(W_AbstractIntObject.descr_mul), __rmul__ = interpindirect2app(W_AbstractIntObject.descr_rmul), - __lt__ = interpindirect2app(W_AbstractIntObject.descr_lt), - __le__ = interpindirect2app(W_AbstractIntObject.descr_le), - __eq__ = interpindirect2app(W_AbstractIntObject.descr_eq), - __ne__ = interpindirect2app(W_AbstractIntObject.descr_ne), - __gt__ = interpindirect2app(W_AbstractIntObject.descr_gt), - __ge__ = interpindirect2app(W_AbstractIntObject.descr_ge), + + __and__ = interpindirect2app(W_AbstractIntObject.descr_and), + __rand__ = interpindirect2app(W_AbstractIntObject.descr_rand), + __or__ = interpindirect2app(W_AbstractIntObject.descr_or), + __ror__ = interpindirect2app(W_AbstractIntObject.descr_ror), + __xor__ = interpindirect2app(W_AbstractIntObject.descr_xor), + __rxor__ = interpindirect2app(W_AbstractIntObject.descr_rxor), + + __lshift__ = interpindirect2app(W_AbstractIntObject.descr_lshift), + __rlshift__ = interpindirect2app(W_AbstractIntObject.descr_rlshift), + __rshift__ = interpindirect2app(W_AbstractIntObject.descr_rshift), + __rrshift__ = interpindirect2app(W_AbstractIntObject.descr_rrshift), __floordiv__ = interpindirect2app(W_AbstractIntObject.descr_floordiv), __rfloordiv__ = interpindirect2app(W_AbstractIntObject.descr_rfloordiv), @@ -615,32 +683,6 @@ __divmod__ = interpindirect2app(W_AbstractIntObject.descr_divmod), __rdivmod__ = interpindirect2app(W_AbstractIntObject.descr_rdivmod), - __lshift__ = interpindirect2app(W_AbstractIntObject.descr_lshift), - __rlshift__ = interpindirect2app(W_AbstractIntObject.descr_rlshift), - __rshift__ = interpindirect2app(W_AbstractIntObject.descr_rshift), - __rrshift__ = interpindirect2app(W_AbstractIntObject.descr_rrshift), - __pow__ = interpindirect2app(W_AbstractIntObject.descr_pow), __rpow__ = interpindirect2app(W_AbstractIntObject.descr_rpow), - __neg__ = interpindirect2app(W_AbstractIntObject.descr_neg), - __abs__ = interpindirect2app(W_AbstractIntObject.descr_abs), - __nonzero__ = interpindirect2app(W_AbstractIntObject.descr_nonzero), - __invert__ = interpindirect2app(W_AbstractIntObject.descr_invert), - __and__ = interpindirect2app(W_AbstractIntObject.descr_and), - __rand__ = interpindirect2app(W_AbstractIntObject.descr_rand), - __xor__ = interpindirect2app(W_AbstractIntObject.descr_xor), - __rxor__ = interpindirect2app(W_AbstractIntObject.descr_rxor), - __or__ = interpindirect2app(W_AbstractIntObject.descr_or), - __ror__ = interpindirect2app(W_AbstractIntObject.descr_ror), - - __pos__ = interpindirect2app(W_AbstractIntObject.descr_pos), - __trunc__ = interpindirect2app(W_AbstractIntObject.descr_trunc), - __index__ = interpindirect2app(W_AbstractIntObject.descr_index), - __float__ = interpindirect2app(W_AbstractIntObject.descr_float), - __oct__ = interpindirect2app(W_AbstractIntObject.descr_oct), - __hex__ = interpindirect2app(W_AbstractIntObject.descr_hex), - __getnewargs__ = interpindirect2app(W_AbstractIntObject.descr_getnewargs), - - __repr__ = interp2app(W_AbstractIntObject.descr_repr), - __str__ = interp2app(W_AbstractIntObject.descr_str), ) diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -14,6 +14,8 @@ WrappedDefault, interp2app, interpindirect2app, unwrap_spec) from pypy.objspace.std import newformat from pypy.objspace.std.intobject import W_AbstractIntObject +from pypy.objspace.std.model import ( + BINARY_OPS, CMP_OPS, COMMUTATIVE_OPS, IDTAG_LONG) from pypy.objspace.std.stdtypedef import StdTypeDef @@ -42,9 +44,8 @@ def immutable_unique_id(self, space): if self.user_overridden_class: return None - from pypy.objspace.std.model import IDTAG_LONG as tag b = space.bigint_w(self) - b = b.lshift(3).or_(rbigint.fromint(tag)) + b = b.lshift(3).or_(rbigint.fromint(IDTAG_LONG)) return space.newlong_from_rbigint(b) def unwrap(self, space): @@ -56,85 +57,22 @@ def asbigint(self): raise NotImplementedError - # XXX: cleanup, docstrings etc - def descr_long(self, space): - raise NotImplementedError - descr_index = func_with_new_name(descr_long, 'descr_index') - descr_trunc = func_with_new_name(descr_long, 'descr_trunc') - descr_pos = func_with_new_name(descr_long, 'descr_pos') - - descr_float = func_with_new_name(descr_long, 'descr_float') - descr_neg = func_with_new_name(descr_long, 'descr_neg') - descr_pos = func_with_new_name(descr_long, 'descr_pos') - descr_abs = func_with_new_name(descr_long, 'descr_abs') - descr_nonzero = func_with_new_name(descr_long, 'descr_nonzero') - descr_invert = func_with_new_name(descr_long, 'descr_invert') - - def descr_lt(self, space, w_other): - raise NotImplementedError - descr_le = func_with_new_name(descr_lt, 'descr_le') - descr_eq = func_with_new_name(descr_lt, 'descr_eq') - descr_ne = func_with_new_name(descr_lt, 'descr_ne') - descr_gt = func_with_new_name(descr_lt, 'descr_gt') - descr_ge = func_with_new_name(descr_lt, 'descr_ge') - - descr_add = func_with_new_name(descr_lt, 'descr_add') - descr_radd = func_with_new_name(descr_lt, 'descr_radd') - descr_sub = func_with_new_name(descr_lt, 'descr_sub') - descr_rsub = func_with_new_name(descr_lt, 'descr_rsub') - descr_mul = func_with_new_name(descr_lt, 'descr_mul') - descr_rmul = func_with_new_name(descr_lt, 'descr_rmul') - - descr_and = func_with_new_name(descr_lt, 'descr_and') - descr_rand = func_with_new_name(descr_lt, 'descr_rand') - descr_or = func_with_new_name(descr_lt, 'descr_or') - descr_ror = func_with_new_name(descr_lt, 'descr_ror') - descr_xor = func_with_new_name(descr_lt, 'descr_xor') - descr_rxor = func_with_new_name(descr_lt, 'descr_rxor') - - descr_lshift = func_with_new_name(descr_lt, 'descr_lshift') - descr_rlshift = func_with_new_name(descr_lt, 'descr_rlshift') - descr_rshift = func_with_new_name(descr_lt, 'descr_rshift') - descr_rrshift = func_with_new_name(descr_lt, 'descr_rrshift') - - descr_floordiv = func_with_new_name(descr_lt, 'descr_floordiv') - descr_rfloordiv = func_with_new_name(descr_lt, 'descr_rfloordiv') - descr_div = func_with_new_name(descr_lt, 'descr_div') - descr_rdiv = func_with_new_name(descr_lt, 'descr_rdiv') - descr_mod = func_with_new_name(descr_lt, 'descr_mod') - descr_rmod = func_with_new_name(descr_lt, 'descr_rmod') - descr_divmod = func_with_new_name(descr_lt, 'descr_divmod') - descr_rdivmod = func_with_new_name(descr_lt, 'descr_rdivmod') - - def descr_pow(self, space, w_exponent, w_modulus=None): - raise NotImplementedError - descr_rpow = func_with_new_name(descr_pow, 'descr_rpow') - - def descr_format(self, space, w_format_spec): - return newformat.run_formatter(space, w_format_spec, - "format_int_or_long", self, - newformat.LONG_KIND) - - def _make_descr_unaryop(opname): - op = getattr(rbigint, opname) - @func_renamer('descr_' + opname) - def descr_unaryop(self, space): - return space.wrap(op(self.asbigint())) - return descr_unaryop - - descr_repr = _make_descr_unaryop('repr') - descr_str = _make_descr_unaryop('str') - descr_hash = _make_descr_unaryop('hash') - descr_oct = _make_descr_unaryop('oct') - descr_hex = _make_descr_unaryop('hex') - def descr_getnewargs(self, space): - return space.newtuple([W_LongObject(self.asbigint())]) + return space.newtuple([newlong(space, self.asbigint())]) def descr_conjugate(self, space): + """Returns self, the complex conjugate of any long.""" return space.long(self) def descr_bit_length(self, space): + """long.bit_length() -> int or long + + Number of bits necessary to represent self in binary. + >>> bin(37L) + '0b100101' + >>> (37L).bit_length() + 6 + """ bigint = space.bigint_w(self) try: return space.wrap(bigint.bit_length()) @@ -155,26 +93,118 @@ @delegate_other def descr_truediv(self, space, w_other): + """x.__truediv__(y) <==> x/y""" return W_AbstractLongObject._truediv(self, space, w_other) @delegate_other def descr_rtruediv(self, space, w_other): + """x.__rtruediv__(y) <==> y/x""" return W_AbstractLongObject._truediv(w_other, space, self) @delegate_other def descr_coerce(self, space, w_other): + """x.__coerce__(y) <==> coerce(x, y)""" return space.newtuple([self, w_other]) def descr_get_numerator(self, space): return space.long(self) descr_get_real = func_with_new_name(descr_get_numerator, 'descr_get_real') + def descr_format(self, space, w_format_spec): + return newformat.run_formatter(space, w_format_spec, + "format_int_or_long", self, + newformat.LONG_KIND) + def descr_get_denominator(self, space): return space.newlong(1) def descr_get_imag(self, space): return space.newlong(0) + def _make_descr_unaryop(opname): + op = getattr(rbigint, opname) + @func_renamer('descr_' + opname) + def descr_unaryop(self, space): + return space.wrap(op(self.asbigint())) + descr_unaryop.__doc__ = 'x.__%s__(y) <==> %s(x, y)' % (opname, opname) + return descr_unaryop + + descr_repr = _make_descr_unaryop('repr') + descr_str = _make_descr_unaryop('str') + descr_hash = _make_descr_unaryop('hash') + descr_oct = _make_descr_unaryop('oct') + descr_hex = _make_descr_unaryop('hex') + + def descr_pow(self, space, w_exponent, w_modulus=None): + """x.__pow__(y[, z]) <==> pow(x, y[, z])""" + raise NotImplementedError + descr_rpow = func_with_new_name(descr_pow, 'descr_rpow') + descr_rpow.__doc__ = "y.__rpow__(x[, z]) <==> pow(x, y[, z])" + + def _abstract_unaryop(opname, doc=None): + @func_renamer('descr_' + opname) + def descr_unaryop(self, space): + raise NotImplementedError + descr_unaryop.__doc__ = doc + return descr_unaryop + + descr_long = _abstract_unaryop('long', "x.__long__() <==> long(x)") + descr_float = _abstract_unaryop('float', "x.__float__() <==> float(x)") + descr_index = _abstract_unaryop( + 'index', "x[y:z] <==> x[y.__index__():z.__index__()]") + descr_trunc = _abstract_unaryop('trunc', + "Truncating an Integral returns itself.") + descr_pos = _abstract_unaryop('pos', "x.__pos__() <==> +x") + descr_neg = _abstract_unaryop('neg', "x.__neg__() <==> -x") + descr_abs = _abstract_unaryop('abs', "x.__abs__() <==> abs(x)") + descr_nonzero = _abstract_unaryop('nonzero', "x.__nonzero__() <==> x != 0") + descr_invert = _abstract_unaryop('invert', "x.__invert__() <==> ~x") + + def _abstract_cmpop(opname): + @func_renamer('descr_' + opname) + def descr_cmp(self, space, w_other): + raise NotImplementedError + descr_cmp.__doc__ = 'x.__%s__(y) <==> x%sy' % (opname, CMP_OPS[opname]) + return descr_cmp + + descr_lt = _abstract_cmpop('lt') + descr_le = _abstract_cmpop('le') + descr_eq = _abstract_cmpop('eq') + descr_ne = _abstract_cmpop('ne') + descr_gt = _abstract_cmpop('gt') + descr_ge = _abstract_cmpop('ge') + + def _abstract_binop(opname): + oper = BINARY_OPS.get(opname) + if oper == '%': + oper = '%%' + oper = '%s(%%s, %%s)' % opname if not oper else '%%s%s%%s' % oper + @func_renamer('descr_' + opname) + def descr_binop(self, space, w_other): + raise NotImplementedError + descr_binop.__doc__ = "x.__%s__(y) <==> %s" % (opname, + oper % ('x', 'y')) + descr_rbinop = func_with_new_name(descr_binop, 'descr_r' + opname) + descr_rbinop.__doc__ = "x.__r%s__(y) <==> %s" % (opname, + oper % ('y', 'x')) + return descr_binop, descr_rbinop + + descr_add, descr_radd = _abstract_binop('add') + descr_sub, descr_rsub = _abstract_binop('sub') + descr_mul, descr_rmul = _abstract_binop('mul') + + descr_and, descr_rand = _abstract_binop('and') + descr_or, descr_ror = _abstract_binop('or') + descr_xor, descr_rxor = _abstract_binop('xor') + + descr_lshift, descr_rlshift = _abstract_binop('lshift') + descr_rshift, descr_rrshift = _abstract_binop('rshift') + + descr_floordiv, descr_rfloordiv = _abstract_binop('floordiv') + descr_div, descr_rdiv = _abstract_binop('div') + descr_mod, descr_rmod = _abstract_binop('mod') + descr_divmod, descr_rdivmod = _abstract_binop('divmod') + class W_LongObject(W_AbstractLongObject): """This is a wrapper of rbigint.""" @@ -265,6 +295,57 @@ def descr_float(self, space): return space.newfloat(self.tofloat(space)) + def descr_nonzero(self, space): + return space.newbool(self.num.tobool()) + + @unwrap_spec(w_modulus=WrappedDefault(None)) + def descr_pow(self, space, w_exponent, w_modulus=None): + if isinstance(w_exponent, W_AbstractIntObject): + w_exponent = w_exponent.descr_long(space) + elif not isinstance(w_exponent, W_AbstractLongObject): + return space.w_NotImplemented + + if space.is_none(w_modulus): + if w_exponent.asbigint().sign < 0: + self = self.descr_float(space) + w_exponent = w_exponent.descr_float(space) + return space.pow(self, w_exponent, space.w_None) + return W_LongObject(self.num.pow(w_exponent.asbigint())) + elif isinstance(w_modulus, W_AbstractIntObject): + w_modulus = w_modulus.descr_long(space) + elif not isinstance(w_modulus, W_AbstractLongObject): + return space.w_NotImplemented + + if w_exponent.asbigint().sign < 0: + raise operationerrfmt(space.w_TypeError, + "pow() 2nd argument cannot be negative when " + "3rd argument specified") + try: + result = self.num.pow(w_exponent.asbigint(), w_modulus.asbigint()) + except ValueError: + raise operationerrfmt(space.w_ValueError, + "pow 3rd argument cannot be 0") + return W_LongObject(result) + + @unwrap_spec(w_modulus=WrappedDefault(None)) + def descr_rpow(self, space, w_base, w_modulus=None): + if isinstance(w_base, W_AbstractIntObject): + w_base = w_base.descr_long(space) + elif not isinstance(w_base, W_AbstractLongObject): + return space.w_NotImplemented + return w_base.descr_pow(space, self, w_modulus) + + def _make_descr_unaryop(opname): + op = getattr(rbigint, opname) + @func_renamer('descr_' + opname) + def descr_unaryop(self, space): + return W_LongObject(op(self.num)) + return descr_unaryop + + descr_neg = _make_descr_unaryop('neg') + descr_abs = _make_descr_unaryop('abs') + descr_invert = _make_descr_unaryop('invert') + def _make_descr_cmp(opname): op = getattr(rbigint, opname) @delegate_other @@ -290,11 +371,14 @@ def descr_binop(self, space, w_other): return W_LongObject(op(self.num, w_other.asbigint())) - @func_renamer('descr_r' + opname) - @delegate_other - def descr_rbinop(self, space, w_other): - # XXX: delegate, for --objspace-std-withsmalllong - return W_LongObject(op(w_other.asbigint(), self.num)) + if opname in COMMUTATIVE_OPS: + descr_rbinop = func_with_new_name(descr_binop, 'descr_r' + opname) + else: + @func_renamer('descr_r' + opname) + @delegate_other + def descr_rbinop(self, space, w_other): + # XXX: delegate, for --objspace-std-withsmalllong + return W_LongObject(op(w_other.asbigint(), self.num)) return descr_binop, descr_rbinop @@ -305,20 +389,6 @@ descr_or, descr_ror = _make_generic_descr_binop('or') descr_xor, descr_rxor = _make_generic_descr_binop('xor') - def _make_descr_unaryop(opname): - op = getattr(rbigint, opname) - @func_renamer('descr_' + opname) - def descr_unaryop(self, space): - return W_LongObject(op(self.num)) - return descr_unaryop - - descr_neg = _make_descr_unaryop('neg') - descr_abs = _make_descr_unaryop('abs') - descr_invert = _make_descr_unaryop('invert') - - def descr_nonzero(self, space): - return space.newbool(self.num.tobool()) - def _make_descr_binop(func): opname = func.__name__[1:] @@ -389,43 +459,6 @@ return space.newtuple([newlong(space, div), newlong(space, mod)]) descr_divmod, descr_rdivmod = _make_descr_binop(_divmod) - @unwrap_spec(w_modulus=WrappedDefault(None)) - def descr_pow(self, space, w_exponent, w_modulus=None): - if isinstance(w_exponent, W_AbstractIntObject): - w_exponent = w_exponent.descr_long(space) - elif not isinstance(w_exponent, W_AbstractLongObject): - return space.w_NotImplemented - - if space.is_none(w_modulus): - if w_exponent.asbigint().sign < 0: - self = self.descr_float(space) - w_exponent = w_exponent.descr_float(space) - return space.pow(self, w_exponent, space.w_None) - return W_LongObject(self.num.pow(w_exponent.asbigint())) - elif isinstance(w_modulus, W_AbstractIntObject): - w_modulus = w_modulus.descr_long(space) - elif not isinstance(w_modulus, W_AbstractLongObject): - return space.w_NotImplemented - - if w_exponent.asbigint().sign < 0: - raise operationerrfmt(space.w_TypeError, - "pow() 2nd argument cannot be negative when " - "3rd argument specified") - try: - result = self.num.pow(w_exponent.asbigint(), w_modulus.asbigint()) - except ValueError: - raise operationerrfmt(space.w_ValueError, - "pow 3rd argument cannot be 0") - return W_LongObject(result) - - @unwrap_spec(w_modulus=WrappedDefault(None)) - def descr_rpow(self, space, w_base, w_modulus=None): - if isinstance(w_base, W_AbstractIntObject): - w_base = w_base.descr_long(space) - elif not isinstance(w_base, W_AbstractLongObject): - return space.w_NotImplemented - return w_base.descr_pow(space, self, w_modulus) - def newlong(space, bigint): """Turn the bigint into a W_LongObject. If withsmalllong is @@ -471,11 +504,11 @@ w_obj = space.int(w_obj) return newbigint(space, w_longtype, space.bigint_w(w_obj)) elif space.isinstance_w(w_value, space.w_str): - return string_to_w_long(space, w_longtype, space.str_w(w_value)) + return _string_to_w_long(space, w_longtype, space.str_w(w_value)) elif space.isinstance_w(w_value, space.w_unicode): from pypy.objspace.std.unicodeobject import unicode_to_decimal_w - return string_to_w_long(space, w_longtype, - unicode_to_decimal_w(space, w_value)) + return _string_to_w_long(space, w_longtype, + unicode_to_decimal_w(space, w_value)) else: try: w_buffer = space.buffer(w_value) @@ -487,7 +520,7 @@ w_value) else: buf = space.interp_w(Buffer, w_buffer) - return string_to_w_long(space, w_longtype, buf.as_str()) + return _string_to_w_long(space, w_longtype, buf.as_str()) else: base = space.int_w(w_base) @@ -501,16 +534,16 @@ raise operationerrfmt(space.w_TypeError, "long() can't convert non-string with " "explicit base") - return string_to_w_long(space, w_longtype, s, base) + return _string_to_w_long(space, w_longtype, s, base) -def string_to_w_long(space, w_longtype, s, base=10): +def _string_to_w_long(space, w_longtype, s, base=10): try: bigint = rbigint.fromstr(s, base) except ParseStringError as e: raise OperationError(space.w_ValueError, space.wrap(e.msg)) return newbigint(space, w_longtype, bigint) -string_to_w_long._dont_inline_ = True +_string_to_w_long._dont_inline_ = True def newbigint(space, w_longtype, bigint): @@ -534,23 +567,45 @@ W_AbstractLongObject.typedef = StdTypeDef("long", - __doc__ = """long(x[, base]) -> integer + __doc__ = """long(x=0) -> long +long(x, base=10) -> long -Convert a string or number to a long integer, if possible. A floating -point argument will be truncated towards zero (this does not include a -string representation of a floating point number!) When converting a -string, use the optional base. It is an error to supply a base when -converting a non-string.""", +Convert a number or string to a long integer, or return 0L if no arguments +are given. If x is floating point, the conversion truncates towards zero. + +If x is not a number or if base is given, then x must be a string or +Unicode object representing an integer literal in the given base. The +literal can be preceded by '+' or '-' and be surrounded by whitespace. +The base defaults to 10. Valid bases are 0 and 2-36. Base 0 means to +interpret the base from the string as an integer literal. +>>> int('0b100', base=0) +4L""", __new__ = interp2app(descr__new__), numerator = typedef.GetSetProperty( - W_AbstractLongObject.descr_get_numerator), + W_AbstractLongObject.descr_get_numerator, + doc="the numerator of a rational number in lowest terms"), denominator = typedef.GetSetProperty( - W_AbstractLongObject.descr_get_denominator), - real = typedef.GetSetProperty(W_AbstractLongObject.descr_get_real), - imag = typedef.GetSetProperty(W_AbstractLongObject.descr_get_imag), - conjugate = interp2app(W_AbstractLongObject.descr_conjugate), - bit_length = interp2app(W_AbstractLongObject.descr_bit_length), + W_AbstractLongObject.descr_get_denominator, + doc="the denominator of a rational number in lowest terms"), + real = typedef.GetSetProperty( + W_AbstractLongObject.descr_get_real, + doc="the real part of a complex number"), + imag = typedef.GetSetProperty( + W_AbstractLongObject.descr_get_imag, + doc="the imaginary part of a complex number"), + + __repr__ = interp2app(W_AbstractLongObject.descr_repr), + __str__ = interp2app(W_AbstractLongObject.descr_str), + + conjugate = interpindirect2app(W_AbstractLongObject.descr_conjugate), + bit_length = interpindirect2app(W_AbstractLongObject.descr_bit_length), + __format__ = interpindirect2app(W_AbstractLongObject.descr_format), + __hash__ = interpindirect2app(W_AbstractLongObject.descr_hash), + __coerce__ = interpindirect2app(W_AbstractLongObject.descr_coerce), + __oct__ = interpindirect2app(W_AbstractLongObject.descr_oct), + __hex__ = interpindirect2app(W_AbstractLongObject.descr_hex), + __getnewargs__ = interpindirect2app(W_AbstractLongObject.descr_getnewargs), __int__ = interpindirect2app(W_AbstractLongObject.int), __long__ = interpindirect2app(W_AbstractLongObject.descr_long), @@ -558,12 +613,11 @@ __trunc__ = interpindirect2app(W_AbstractLongObject.descr_trunc), __float__ = interpindirect2app(W_AbstractLongObject.descr_float), - __repr__ = interp2app(W_AbstractLongObject.descr_repr), - __str__ = interp2app(W_AbstractLongObject.descr_str), - __format__ = interp2app(W_AbstractLongObject.descr_format), - - __hash__ = interp2app(W_AbstractLongObject.descr_hash), - __coerce__ = interp2app(W_AbstractLongObject.descr_coerce), + __pos__ = interpindirect2app(W_AbstractLongObject.descr_pos), + __neg__ = interpindirect2app(W_AbstractLongObject.descr_neg), + __abs__ = interpindirect2app(W_AbstractLongObject.descr_abs), + __nonzero__ = interpindirect2app(W_AbstractLongObject.descr_nonzero), + __invert__ = interpindirect2app(W_AbstractLongObject.descr_invert), __lt__ = interpindirect2app(W_AbstractLongObject.descr_lt), __le__ = interpindirect2app(W_AbstractLongObject.descr_le), @@ -586,26 +640,17 @@ __xor__ = interpindirect2app(W_AbstractLongObject.descr_xor), __rxor__ = interpindirect2app(W_AbstractLongObject.descr_rxor), - __neg__ = interpindirect2app(W_AbstractLongObject.descr_neg), - __pos__ = interpindirect2app(W_AbstractLongObject.descr_pos), - __abs__ = interpindirect2app(W_AbstractLongObject.descr_abs), - __nonzero__ = interpindirect2app(W_AbstractLongObject.descr_nonzero), - __invert__ = interpindirect2app(W_AbstractLongObject.descr_invert), - - __oct__ = interp2app(W_AbstractLongObject.descr_oct), - __hex__ = interp2app(W_AbstractLongObject.descr_hex), - __lshift__ = interpindirect2app(W_AbstractLongObject.descr_lshift), __rlshift__ = interpindirect2app(W_AbstractLongObject.descr_rlshift), __rshift__ = interpindirect2app(W_AbstractLongObject.descr_rshift), __rrshift__ = interpindirect2app(W_AbstractLongObject.descr_rrshift), - __truediv__ = interp2app(W_AbstractLongObject.descr_truediv), - __rtruediv__ = interp2app(W_AbstractLongObject.descr_rtruediv), __floordiv__ = interpindirect2app(W_AbstractLongObject.descr_floordiv), __rfloordiv__ = interpindirect2app(W_AbstractLongObject.descr_rfloordiv), __div__ = interpindirect2app(W_AbstractLongObject.descr_div), __rdiv__ = interpindirect2app(W_AbstractLongObject.descr_rdiv), + __truediv__ = interpindirect2app(W_AbstractLongObject.descr_truediv), + __rtruediv__ = interpindirect2app(W_AbstractLongObject.descr_rtruediv), __mod__ = interpindirect2app(W_AbstractLongObject.descr_mod), __rmod__ = interpindirect2app(W_AbstractLongObject.descr_rmod), __divmod__ = interpindirect2app(W_AbstractLongObject.descr_divmod), @@ -613,6 +658,4 @@ __pow__ = interpindirect2app(W_AbstractLongObject.descr_pow), __rpow__ = interpindirect2app(W_AbstractLongObject.descr_rpow), - - __getnewargs__ = interp2app(W_AbstractLongObject.descr_getnewargs), ) diff --git a/pypy/objspace/std/model.py b/pypy/objspace/std/model.py --- a/pypy/objspace/std/model.py +++ b/pypy/objspace/std/model.py @@ -232,8 +232,9 @@ return space.not_(function(space, w_2, w_1)) return op -OPERATORS = ['lt', 'le', 'eq', 'ne', 'gt', 'ge'] -OP_CORRESPONDANCES = [ + +CMP_OPS = dict(lt='<', le='<=', eq='==', ne='!=', gt='>', ge='>=') +CMP_CORRESPONDANCES = [ ('eq', 'ne', _op_negated), ('lt', 'gt', _op_swapped), ('le', 'ge', _op_swapped), @@ -242,22 +243,27 @@ ('lt', 'le', _op_swapped_negated), ('gt', 'ge', _op_swapped_negated), ] -for op1, op2, value in OP_CORRESPONDANCES[:]: - i = OP_CORRESPONDANCES.index((op1, op2, value)) - OP_CORRESPONDANCES.insert(i+1, (op2, op1, value)) +for op1, op2, value in CMP_CORRESPONDANCES[:]: + i = CMP_CORRESPONDANCES.index((op1, op2, value)) + CMP_CORRESPONDANCES.insert(i+1, (op2, op1, value)) +BINARY_BITWISE_OPS = {'and': '&', 'lshift': '<<', 'or': '|', 'rshift': '>>', + 'xor': '^'} +BINARY_OPS = dict(add='+', div='/', floordiv='//', mod='%', mul='*', sub='-', + truediv='/', **BINARY_BITWISE_OPS) +COMMUTATIVE_OPS = ('add', 'mul', 'and', 'or', 'xor') def add_extra_comparisons(): """ Add the missing comparison operators if they were not explicitly defined: eq <-> ne and lt <-> le <-> gt <-> ge. - We try to add them in the order defined by the OP_CORRESPONDANCES + We try to add them in the order defined by the CMP_CORRESPONDANCES table, thus favouring swapping the arguments over negating the result. """ originalentries = {} - for op in OPERATORS: + for op in CMP_OPS.iterkeys(): originalentries[op] = getattr(MM, op).signatures() - for op1, op2, correspondance in OP_CORRESPONDANCES: + for op1, op2, correspondance in CMP_CORRESPONDANCES: mirrorfunc = getattr(MM, op2) for types in originalentries[op1]: t1, t2 = types diff --git a/pypy/objspace/std/smalllongobject.py b/pypy/objspace/std/smalllongobject.py --- a/pypy/objspace/std/smalllongobject.py +++ b/pypy/objspace/std/smalllongobject.py @@ -13,6 +13,7 @@ from pypy.interpreter.gateway import WrappedDefault, unwrap_spec from pypy.objspace.std.intobject import W_AbstractIntObject from pypy.objspace.std.longobject import W_AbstractLongObject, W_LongObject +from pypy.objspace.std.model import COMMUTATIVE_OPS # XXX: breaks translation #LONGLONG_MIN = r_longlong(-1 << (LONGLONG_BIT - 1)) @@ -83,6 +84,75 @@ def descr_float(self, space): return space.newfloat(float(self.longlong)) + def descr_neg(self, space): + a = self.longlong + try: + if a == r_longlong(-1 << (LONGLONG_BIT-1)): + raise OverflowError + x = -a + except OverflowError: + self = _small2long(space, self) + return self.descr_neg(space) + return W_SmallLongObject(x) + + def descr_abs(self, space): + return self if self.longlong >= 0 else self.descr_neg(space) + + def descr_nonzero(self, space): + return space.newbool(bool(self.longlong)) + + def descr_invert(self, space): + x = ~self.longlong + return W_SmallLongObject(x) + + @unwrap_spec(w_modulus=WrappedDefault(None)) + def descr_pow(self, space, w_exponent, w_modulus=None): + if isinstance(w_exponent, W_AbstractLongObject): + self = _small2long(space, self) + return self.descr_pow(space, w_exponent, w_modulus) + elif not isinstance(w_exponent, W_AbstractIntObject): + return space.w_NotImplemented + + if space.is_none(w_modulus): + try: + return _pow_impl(space, self.longlong, w_exponent, + r_longlong(0)) + except ValueError: + self = self.descr_float(space) + return space.pow(self, w_exponent, space.w_None) + except OverflowError: + self = _small2long(space, self) + return self.descr_pow(space, w_exponent, w_modulus) + elif isinstance(w_modulus, W_AbstractIntObject): + w_modulus = _int2small(space, w_modulus) + elif not isinstance(w_modulus, W_AbstractLongObject): + return space.w_NotImplemented + elif not isinstance(w_modulus, W_SmallLongObject): + self = _small2long(space, self) + return self.descr_pow(space, w_exponent, w_modulus) + + z = w_modulus.longlong + if z == 0: + raise operationerrfmt(space.w_ValueError, + "pow() 3rd argument cannot be 0") + try: + return _pow_impl(space, self.longlong, w_exponent, z) + except ValueError: + self = self.descr_float(space) + return space.pow(self, w_exponent, w_modulus) + except OverflowError: + self = _small2long(space, self) + return self.descr_pow(space, w_exponent, w_modulus) + + @unwrap_spec(w_modulus=WrappedDefault(None)) + def descr_rpow(self, space, w_base, w_modulus=None): + if isinstance(w_base, W_AbstractIntObject): + # Defer to w_base.descr_pow + w_base = _int2small(space, w_base) + elif not isinstance(w_base, W_AbstractLongObject): + return space.w_NotImplemented + return w_base.descr_pow(space, self, w_modulus) + def _make_descr_cmp(opname): op = getattr(operator, opname) bigint_op = getattr(rbigint, opname) @@ -108,42 +178,46 @@ def _make_descr_binop(func): opname = func.__name__[1:] - descr_name = 'descr_' + opname - descr_rname = 'descr_r' + opname + descr_name, descr_rname = 'descr_' + opname, 'descr_r' + opname + long_op = getattr(W_LongObject, descr_name) @func_renamer(descr_name) def descr_binop(self, space, w_other): if isinstance(w_other, W_AbstractIntObject): - w_other = delegate_Int2SmallLong(space, w_other) + w_other = _int2small(space, w_other) elif not isinstance(w_other, W_AbstractLongObject): return space.w_NotImplemented elif not isinstance(w_other, W_SmallLongObject): - self = delegate_SmallLong2Long(space, self) - return getattr(self, descr_name)(space, w_other) + self = _small2long(space, self) + return long_op(self, space, w_other) try: return func(self, space, w_other) except OverflowError: - self = delegate_SmallLong2Long(space, self) - w_other = delegate_SmallLong2Long(space, w_other) - return getattr(self, descr_name)(space, w_other) + self = _small2long(space, self) + w_other = _small2long(space, w_other) + return long_op(self, space, w_other) - @func_renamer(descr_rname) - def descr_rbinop(self, space, w_other): - if isinstance(w_other, W_AbstractIntObject): - w_other = delegate_Int2SmallLong(space, w_other) - elif not isinstance(w_other, W_AbstractLongObject): - return space.w_NotImplemented - elif not isinstance(w_other, W_SmallLongObject): - self = delegate_SmallLong2Long(space, self) - return getattr(self, descr_rname)(space, w_other) + if opname in COMMUTATIVE_OPS: + descr_rbinop = func_with_new_name(descr_binop, descr_rname) + else: + long_rop = getattr(W_LongObject, descr_rname) + @func_renamer(descr_rname) + def descr_rbinop(self, space, w_other): + if isinstance(w_other, W_AbstractIntObject): + w_other = _int2small(space, w_other) + elif not isinstance(w_other, W_AbstractLongObject): + return space.w_NotImplemented + elif not isinstance(w_other, W_SmallLongObject): + self = _small2long(space, self) + return long_rop(self, space, w_other) - try: - return func(w_other, space, self) - except OverflowError: - self = delegate_SmallLong2Long(space, self) - w_other = delegate_SmallLong2Long(space, w_other) - return getattr(self, descr_rname)(space, w_other) + try: + return func(w_other, space, self) + except OverflowError: + self = _small2long(space, self) + w_other = _small2long(space, w_other) + return long_rop(self, space, w_other) return descr_binop, descr_rbinop @@ -168,7 +242,7 @@ def _mul(self, space, w_other): x = self.longlong y = w_other.longlong - z = llong_mul_ovf(x, y) + z = _llong_mul_ovf(x, y) return W_SmallLongObject(z) descr_mul, descr_rmul = _make_descr_binop(_mul) @@ -216,57 +290,6 @@ return space.newtuple([W_SmallLongObject(z), W_SmallLongObject(m)]) descr_divmod, descr_rdivmod = _make_descr_binop(_divmod) - @unwrap_spec(w_modulus=WrappedDefault(None)) - def descr_pow(self, space, w_exponent, w_modulus=None): - if isinstance(w_exponent, W_AbstractLongObject): - self = delegate_SmallLong2Long(space, self) - return self.descr_pow(space, w_exponent, w_modulus) - elif not isinstance(w_exponent, W_AbstractIntObject): - return space.w_NotImplemented - - if space.is_none(w_modulus): - try: - return _pow_impl(space, self.longlong, w_exponent, - r_longlong(0)) - except ValueError: - self = self.descr_float(space) - return space.pow(self, w_exponent, space.w_None) - except OverflowError: - self = delegate_SmallLong2Long(space, self) - return self.descr_pow(space, w_exponent, w_modulus) - elif isinstance(w_modulus, W_AbstractIntObject): - w_modulus = delegate_Int2SmallLong(space, w_modulus) - elif not isinstance(w_modulus, W_AbstractLongObject): - return space.w_NotImplemented - elif not isinstance(w_modulus, W_SmallLongObject): - self = delegate_SmallLong2Long(space, self) - return self.descr_pow(space, w_exponent, w_modulus) - - z = w_modulus.longlong - if z == 0: - raise operationerrfmt(space.w_ValueError, - "pow() 3rd argument cannot be 0") - try: - return _pow_impl(space, self.longlong, w_exponent, z) - except ValueError: - self = self.descr_float(space) - return space.pow(self, w_exponent, w_modulus) - except OverflowError: - self = delegate_SmallLong2Long(space, self) - return self.descr_pow(space, w_exponent, w_modulus) - - @unwrap_spec(w_modulus=WrappedDefault(None)) - def descr_rpow(self, space, w_base, w_modulus=None): - if isinstance(w_base, W_AbstractIntObject): - # Defer to w_base.descr_pow - # XXX: W_AbstractIntObject.descr_long could return - # SmallLongs then it could used instead of - # delegate_Int2SmallLong - w_base = delegate_Int2SmallLong(space, w_base) - elif not isinstance(w_base, W_AbstractLongObject): - return space.w_NotImplemented - return w_base.descr_pow(space, self, w_modulus) - def _lshift(self, space, w_other): a = self.longlong # May overflow @@ -322,32 +345,10 @@ return W_SmallLongObject(res) descr_or, descr_ror = _make_descr_binop(_or) - def descr_neg(self, space): - a = self.longlong - try: - if a == r_longlong(-1 << (LONGLONG_BIT-1)): - raise OverflowError - x = -a - except OverflowError: - self = delegate_SmallLong2Long(space, self) - return self.descr_neg(space) - return W_SmallLongObject(x) - - def descr_abs(self, space): - return self if self.longlong >= 0 else self.descr_neg(space) - - def descr_nonzero(self, space): - return space.newbool(bool(self.longlong)) - - def descr_invert(self, space): - x = self.longlong - a = ~x - return W_SmallLongObject(a) - # ____________________________________________________________ -def llong_mul_ovf(a, b): +def _llong_mul_ovf(a, b): # xxx duplication of the logic from translator/c/src/int.h longprod = a * b doubleprod = float(a) * float(b) @@ -373,18 +374,44 @@ # ____________________________________________________________ -def delegate_Int2SmallLong(space, w_int): - return W_SmallLongObject(r_longlong(w_int.int_w(space))) - -def delegate_SmallLong2Long(space, w_small): - return W_LongObject(w_small.asbigint()) - def delegate_SmallLong2Float(space, w_small): return space.newfloat(float(w_small.longlong)) def delegate_SmallLong2Complex(space, w_small): return space.newcomplex(float(w_small.longlong), 0.0) +def _int2small(space, w_int): + # XXX: W_IntObject.descr_long should probably return W_SmallLongs + return W_SmallLongObject(r_longlong(w_int.int_w(space))) + +def _small2long(space, w_small): + return W_LongObject(w_small.asbigint()) + +def _pow_impl(space, iv, w_int2, iz): + iw = space.int_w(w_int2) + if iw < 0: + if iz != 0: + raise operationerrfmt(space.w_TypeError, + "pow() 2nd argument cannot be negative when " + "3rd argument specified") + raise ValueError + temp = iv + ix = r_longlong(1) + while iw > 0: + if iw & 1: + ix = _llong_mul_ovf(ix, temp) + iw >>= 1 # Shift exponent down by 1 bit + if iw == 0: + break + temp = _llong_mul_ovf(temp, temp) # Square the value of temp + if iz: + # If we did a multiplication, perform a modulo + ix %= iz + temp %= iz + if iz: + ix %= iz + return W_SmallLongObject(ix) + def add_ovr(space, w_int1, w_int2): x = r_longlong(space.int_w(w_int1)) y = r_longlong(space.int_w(w_int2)) @@ -415,31 +442,6 @@ return space.newtuple([div_ovr(space, w_int1, w_int2), mod_ovr(space, w_int1, w_int2)]) -def _pow_impl(space, iv, w_int2, iz): - iw = space.int_w(w_int2) - if iw < 0: - if iz != 0: - raise operationerrfmt(space.w_TypeError, - "pow() 2nd argument cannot be negative when " - "3rd argument specified") - raise ValueError - temp = iv - ix = r_longlong(1) - while iw > 0: - if iw & 1: - ix = llong_mul_ovf(ix, temp) - iw >>= 1 # Shift exponent down by 1 bit - if iw == 0: - break - temp = llong_mul_ovf(temp, temp) # Square the value of temp - if iz: - # If we did a multiplication, perform a modulo - ix %= iz - temp %= iz - if iz: - ix %= iz - return W_SmallLongObject(ix) - def pow_ovr(space, w_int1, w_int2): try: return _pow_impl(space, r_longlong(space.int_w(w_int1)), w_int2) @@ -459,5 +461,5 @@ return W_SmallLongObject(a) def lshift_ovr(space, w_int1, w_int2): - w_a = delegate_Int2SmallLong(space, w_int1) + w_a = _int2small(space, w_int1) return w_a.descr_lshift(space, w_int2) From noreply at buildbot.pypy.org Sat Jan 4 00:51:42 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 4 Jan 2014 00:51:42 +0100 (CET) Subject: [pypy-commit] pypy remove-intlong-smm: fix regression: we want actual W_LongObjects here Message-ID: <20140103235142.628DC1C0459@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: remove-intlong-smm Changeset: r68583:f952d1bece0a Date: 2014-01-03 15:50 -0800 http://bitbucket.org/pypy/pypy/changeset/f952d1bece0a/ Log: fix regression: we want actual W_LongObjects here diff --git a/pypy/objspace/std/smalllongobject.py b/pypy/objspace/std/smalllongobject.py --- a/pypy/objspace/std/smalllongobject.py +++ b/pypy/objspace/std/smalllongobject.py @@ -446,8 +446,8 @@ try: return _pow_impl(space, r_longlong(space.int_w(w_int1)), w_int2) except (OverflowError, ValueError): - w_a = w_int1.descr_long(space) - w_b = w_int2.descr_long(space) + w_a = _small2long(space, w_int1) + w_b = _small2long(space, w_int2) return w_a.descr_pow(space, w_b, space.w_None) def neg_ovr(space, w_int): From noreply at buildbot.pypy.org Sat Jan 4 03:05:49 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 4 Jan 2014 03:05:49 +0100 (CET) Subject: [pypy-commit] pypy remove-intlong-smm: fix/another workaround following 4fa4c6b93a84 Message-ID: <20140104020549.40FB81C0356@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: remove-intlong-smm Changeset: r68584:bede33791d55 Date: 2014-01-03 18:04 -0800 http://bitbucket.org/pypy/pypy/changeset/bede33791d55/ Log: fix/another workaround following 4fa4c6b93a84 diff --git a/pypy/objspace/std/smalllongobject.py b/pypy/objspace/std/smalllongobject.py --- a/pypy/objspace/std/smalllongobject.py +++ b/pypy/objspace/std/smalllongobject.py @@ -444,7 +444,8 @@ def pow_ovr(space, w_int1, w_int2): try: - return _pow_impl(space, r_longlong(space.int_w(w_int1)), w_int2) + return _pow_impl(space, r_longlong(space.int_w(w_int1)), w_int2, + r_longlong(0)) except (OverflowError, ValueError): w_a = _small2long(space, w_int1) w_b = _small2long(space, w_int2) From noreply at buildbot.pypy.org Sat Jan 4 10:03:56 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 4 Jan 2014 10:03:56 +0100 (CET) Subject: [pypy-commit] stmgc c7: tweaks Message-ID: <20140104090356.039D71C010E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7 Changeset: r595:8536ef3473a5 Date: 2014-01-03 14:08 +0100 http://bitbucket.org/pypy/stmgc/changeset/8536ef3473a5/ Log: tweaks diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -19,6 +19,10 @@ #define MAP_PAGES_FLAGS (MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE) #define LARGE_OBJECT_WORDS 36 +#if defined(__i386__) || defined(__x86_64__) +# define HAVE_FULL_EXCHANGE_INSN +#endif + typedef TLPREFIX char localchar_t; typedef TLPREFIX struct alloc_for_size_s alloc_for_size_t; @@ -114,11 +118,23 @@ if (flag_page_private[pagenum] == PRIVATE_PAGE) return; - if (!__sync_bool_compare_and_swap(&flag_page_private[pagenum], - SHARED_PAGE, REMAPPING_PAGE)) { +#ifdef HAVE_FULL_EXCHANGE_INSN + /* use __sync_lock_test_and_set() as a cheaper alternative to + __sync_bool_compare_and_swap(). */ + int previous = __sync_lock_test_and_set(&flag_page_private[pagenum], + REMAPPING_PAGE); + if (previous == PRIVATE_PAGE) { + flag_page_private[pagenum] = PRIVATE_PAGE; + return; + } + bool was_shared = (previous == SHARED_PAGE); +#else + bool was_shared = __sync_bool_compare_and_swap(&flag_page_private[pagenum], + SHARED_PAGE, REMAPPING_PAGE); +#endif + if (!was_shared) { while (flag_page_private[pagenum] == REMAPPING_PAGE) spin_loop(); - assert(flag_page_private[pagenum] == PRIVATE_PAGE); return; } @@ -248,7 +264,8 @@ stm_read(obj); - _STM_TL2->modified_objects = stm_list_append(_STM_TL2->modified_objects, obj); + _STM_TL2->modified_objects = stm_list_append( + _STM_TL2->modified_objects, obj); uint16_t wv = obj->write_version; obj->write_version = _STM_TL1->transaction_write_version; @@ -406,6 +423,11 @@ /* Make a "hole" at _STM_TL1 / _STM_TL2 */ memset(REAL_ADDRESS(thread_base, _STM_TL2), 0, sizeof(*_STM_TL2)); + /* Pages in range(2, FIRST_READMARKER_PAGE) are never used */ + if (FIRST_READMARKER_PAGE > 2) + mprotect(thread_base + 8192, (FIRST_READMARKER_PAGE - 2) * 4096UL, + PROT_NONE); + _STM_TL2->thread_num = i; _STM_TL2->thread_base = thread_base; From noreply at buildbot.pypy.org Sat Jan 4 11:38:51 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 4 Jan 2014 11:38:51 +0100 (CET) Subject: [pypy-commit] stmgc c7: Initial draft of this document, based on similar drafts from c5 and c6 Message-ID: <20140104103851.B2D491C0162@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7 Changeset: r596:cdcd428aa614 Date: 2014-01-04 11:38 +0100 http://bitbucket.org/pypy/stmgc/changeset/cdcd428aa614/ Log: Initial draft of this document, based on similar drafts from c5 and c6 but for c7. diff --git a/c7/README.txt b/c7/README.txt new file mode 100644 --- /dev/null +++ b/c7/README.txt @@ -0,0 +1,154 @@ +============================================================ +STMGC-C7 +============================================================ + + +An STM system, focusing on low numbers of CPUs. It requires Linux +running 64-bit, and must be compiled with clang. + + +The %gs segment prefix +---------------------- + +This a hack using __attribute__((address_space(256))) on structs, which +makes clang write all pointer dereferences to them using the "%gs:" +prefix at the assembler level. This is a rarely-used way to shift all +memory accesses by some offset stored in the %gs special register. Each +thread has its own value in %gs. Note that %fs is used in a similar way +by the pthread library to offset the thread-local variables; what we +need is similar to thread-local variables, but in large quantity. + +I did not find any measurable slow-down from any example using the %gs +prefix, so I expect the real performance hit to be tiny (along the lines +of the extra stress on instruction caches caused by the extra byte for +each load/store instruction). + + +remap_file_pages +---------------- + +The Linux-only system call remap_file_pages() allows us to tweak a +mmap() region of memory. It makes explicit one extra level of the +memory-mapped management of the CPU. Let us focus on mmaps that are not +backed by a file. A call to mmap() reserves a number of physical pages +4096 bytes each, initialized to zero (and actually lazily allocated when +the process really needs them, rather than all at once). It also +reserves a range of addresses in the current process, of the same size, +which correspond to the physical pages. But by using +remap_file_pages(), we can change the mapping of the addresses to the +physical pages. The total amount of both quantities is identical, and +invariable, but we can take any page-sized range of addresses and ask +that it now maps to a different physical page. Most probably, this +comes with no overhead once the change is done: neither in terms of +performance nor in extra memory in the kernel. The trick here is that +different ranges of addresses can map to the same physical page of +memory, which gives a zero-cost way to share data at different +addresses. + + +Memory organization +------------------- + +We allocate a big mmap that contains enough addresses for N times M +bytes, where N is the number of threads and M is an upper bound on the +total size of the objects. Then we use remap_file_pages() to make these +N regions all map to the same physical memory. In each thread, +%gs is made to point to the start of the corresponding region. This +means that %gs-relative accesses will go to different addresses in +each thread, but these addresses are then (initially) mapped to the +same physical memory, so the effect is as if we used neither %gs nor +remap_file_pages(). + +The exception comes from pages that contain objects that are already +committed, but are being modified by the current transaction. Such +changes must not be visible to other threads before the current +transaction commits. This is done by using another remap_file_pages() +to "unshare" the page, i.e. stop the corresponding %gs-relative, +thread-local page from mapping to the same physical page as others. We +get a fresh new physical page, and duplicate its content --- much like +the OS does after a fork() for pages modified by one or the other +process. + +In more details: the first page of addresses in each thread-local region +(4096 bytes) is made non-accessible, to detect errors of accessing the +NULL pointer. The second page is reserved for thread-local data. The +rest is divided into 1/16 for thread-local read markers, followed by +15/16 for the real objects. We initially use remap_file_pages() on this +15/16 range. + +Each transaction records the objects that it changed. These are +necessarily within unshared pages. When other threads are about to +commit their own transaction, they first copy these objects into their +version of the page. The point is that, from another thread's point of +view, the memory didn't appear to change unexpectedly, but only when +that other thread decides to copy the change explicitly. + +Each transaction uses their own (private) read markers to track which +objects have been read. When a thread "imports" changes done to some +objects, it can quickly check if these objects have also been read by +the current transaction, and if so, we know we have a conflict. + + +STM details +----------- + +Here is how the STM works in terms that are hopefully common in STM +research. The transactions run from a "start time" to a "commit time", +but these are not explicitly represented numerically. The start time +defines the initial state of the objects as seen in this thread. We use +the "extendable timestamps" approach in order to regularly bump the +start time of running transactions (not only when a potential conflict +is detected, but more eagerly). + +Each thread records privately its read objects (using a byte-map) and +publicly its written objects (using an array of pointers as well as a +global flag in the object). Read-write conflicts are detected during +the start time bumps. Write-write conflicts are detected eagerly --- +only one transaction can be concurrently running with a given object +modified. (In the case of write-write conficts, there are several +possible contention management policies; for now we always abort the +transaction that comes later in its attempt to modify the object.) + +Special care is taken for objects allocated in the current transaction. +We expect these objects to be the vast majority of modified objects, and +also most of them to die quickly. More about it below. + +We use what looks like an "undo log" approach, where objects are +modified in-place and aborts cause them to be copied back from somewhere +else. However, it is implemented without any explicit undo log, but by +copying objects between multiple thread-local copies. Memory pages +containing modified objects are duplicated anyway, and so we already +have around several copies of the objects at potentially different +versions. + +At most one thread is called the "leader" (this is new terminology as +far as I know). The leader is the thread running a transaction whose +start time is higher than the start time of any other running +transaction. If there are several threads with the same highest start +time, we have no leader. Leadership is a temporary condition: it is +acquired (typically) by the thread whose transaction commits and whose +next transaction starts; but it is lost again as soon as any other +thread updates its transaction's start time to match. + +The point of the notion of leadership is that when the leader wants to +modify an object, it must first make sure that the original version is +also present somewhere else. Only the leader thread, if there is any, +needs to worry about it. We don't need to remember the original version +of an older object, because if we need to abort a transaction, we may as +well update all objects to the latest version. And if there are several +threads with the same highest start time, we can be sure that the +original version of the object is somewhere among them --- this is the +point of detecting write-write conflicts eagerly. The only remaining +case is the one in which there is a leader thread, this leader thread +has the only latest version of an object, and it tries to further modify +this object. To handle this precise case, for now, we simply wait until +another thread updates and we are no longer the leader. (*) + +(*) the code in core.c contains, or contained, or will again contain, an +explicit undo log that would be filled in this case only. + + +Object creation and GC +---------------------- + +XXX write me From noreply at buildbot.pypy.org Sat Jan 4 12:00:49 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 4 Jan 2014 12:00:49 +0100 (CET) Subject: [pypy-commit] stmgc c7: Draft the GC paragraph. Message-ID: <20140104110049.5DB481C0459@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7 Changeset: r597:6bbdefa01606 Date: 2014-01-04 12:00 +0100 http://bitbucket.org/pypy/stmgc/changeset/6bbdefa01606/ Log: Draft the GC paragraph. diff --git a/c7/README.txt b/c7/README.txt --- a/c7/README.txt +++ b/c7/README.txt @@ -151,4 +151,37 @@ Object creation and GC ---------------------- -XXX write me +draft: + +- pages containing only freshly allocated objects need not be unshared + +- minor collection: occurs regularly, and maybe always at the end of + transactions (we'll see). Should work by marking the young objects + that survive. Non-marked objects are then sweeped lazily by the + next allocation requests (as in "mark-and-don't-sweep" GCs, here + for the minor collection only). Needs a write barrier to detect + old-objects-pointing-to-young objects (the old object may belong + to the same running transaction, or be already committed). + +- the numers and flags stored in the objects need to be designed with + the above goals in mind. + +- unclear yet: the minor collections may be triggered only when the + memory is full, or whenever a few MBs of memory was allocated. It is + not important for small-to-medium transactions that only allocate a + few MBs anyway, but it might be for long-running transactions. + +- the major collections walk *all* objects. They'll probably require + all threads to be synchronized. Ideally the threads should then proceed + to do a parallel GC, but as a first step, blocking all threads but one + should be fine. + +- the major collections should be triggered by the amount of really-used + memory, which means: counting the unshared pages as N pages. Major + collection should then re-share the pages as much as possible, after + making sure that all threads have their timestamp updated. This is the + essential part that guarantees that large, old, no-longer-modified + bunches of objects are eventually present in only one copy in memory, + in shared pages --- while at the same time bounding the number of + calls to remap_file_pages() for each page at 2 per major collection + cycle. From noreply at buildbot.pypy.org Sat Jan 4 15:43:02 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 4 Jan 2014 15:43:02 +0100 (CET) Subject: [pypy-commit] stmgc c7: Updates Message-ID: <20140104144302.7CEC21C010E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7 Changeset: r598:b327979e2f33 Date: 2014-01-04 15:42 +0100 http://bitbucket.org/pypy/stmgc/changeset/b327979e2f33/ Log: Updates diff --git a/c7/README.txt b/c7/README.txt --- a/c7/README.txt +++ b/c7/README.txt @@ -153,7 +153,12 @@ draft: -- pages containing only freshly allocated objects need not be unshared +- pages need to be unshared when they contain already-committed objects + that are then modified. They can remain shared if a fraction of (or all) + their space was not used previously, but is used by new allocations; any + changes to these fresh objects during the same transaction do *not* need + to unshare the page. This should ensure that in the common case the + majority of pages are not unshared. - minor collection: occurs regularly, and maybe always at the end of transactions (we'll see). Should work by marking the young objects @@ -163,7 +168,7 @@ old-objects-pointing-to-young objects (the old object may belong to the same running transaction, or be already committed). -- the numers and flags stored in the objects need to be designed with +- the numbers and flags stored in the objects need to be designed with the above goals in mind. - unclear yet: the minor collections may be triggered only when the From noreply at buildbot.pypy.org Sat Jan 4 16:30:55 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 4 Jan 2014 16:30:55 +0100 (CET) Subject: [pypy-commit] stmgc c7: Use checkfence to check a particular lockfree subalgorithm used in c7/core.c Message-ID: <20140104153055.D4FC61C015E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7 Changeset: r599:09c6e26a06fe Date: 2014-01-04 16:30 +0100 http://bitbucket.org/pypy/stmgc/changeset/09c6e26a06fe/ Log: Use checkfence to check a particular lockfree subalgorithm used in c7/core.c diff --git a/checkfence/c7/run b/checkfence/c7/run new file mode 100755 --- /dev/null +++ b/checkfence/c7/run @@ -0,0 +1,11 @@ +#!/bin/sh + +export C2LSL_HOME=./c2lsl +export CHECKFENCE_HOME=./checkfence + + +$C2LSL_HOME/bin/c2lsl.exe "$1" _run.lsl || exit 1 +shift +$CHECKFENCE_HOME/run/clean || exit 1 +echo ------------------------------------------------------------------------- +$CHECKFENCE_HOME/run/checkfence -i _run.lsl "$@" || exit 1 diff --git a/checkfence/c7/unshare_page.c b/checkfence/c7/unshare_page.c new file mode 100644 --- /dev/null +++ b/checkfence/c7/unshare_page.c @@ -0,0 +1,57 @@ +#include "lsl_protos.h" + + +int __sync_lock_test_and_set(int *lock, int nvalue) +{ + /* the x86 behavior of this instruction, which is really a XCHG */ + int old = *lock; + lsl_assume(lsl_cas_32(lock, old, nvalue)); + lsl_fence("load-load"); + return old; +} + + +enum { SHARED_PAGE=0, REMAPPING_PAGE, PRIVATE_PAGE }; /* flag_page_private */ +int flag_page_private; +int privatized_data; + +void INIT(void) +{ + flag_page_private = SHARED_PAGE; +} + +void _stm_privatize(void) +{ + int previous = __sync_lock_test_and_set(&flag_page_private, + REMAPPING_PAGE); + switch (previous) { + case PRIVATE_PAGE: + lsl_assert(flag_page_private != SHARED_PAGE); + flag_page_private = PRIVATE_PAGE; + return; + + case REMAPPING_PAGE: + lsl_assert(flag_page_private != SHARED_PAGE); + /* here we wait until 'flag_page_private' is changed away from + REMAPPING_PAGE, and we assume that it eventually occurs */ + lsl_assume(flag_page_private != REMAPPING_PAGE); + lsl_fence("load-load"); + return; + + case SHARED_PAGE: + lsl_observe_label("privatizing"); + privatized_data = 42; + lsl_fence("store-store"); + lsl_assert(flag_page_private == REMAPPING_PAGE); + flag_page_private = PRIVATE_PAGE; + return; + } + lsl_assert(0); +} + +void PRIVATIZE(void) +{ + _stm_privatize(); + int data = privatized_data; + lsl_observe_output("data", data); +} diff --git a/checkfence/c7/unshare_page.lsl b/checkfence/c7/unshare_page.lsl new file mode 100644 --- /dev/null +++ b/checkfence/c7/unshare_page.lsl @@ -0,0 +1,2 @@ + +test T1 = INIT ( PRIVATIZE PRIVATIZE | PRIVATIZE PRIVATIZE | PRIVATIZE PRIVATIZE ) From noreply at buildbot.pypy.org Sat Jan 4 16:54:43 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 4 Jan 2014 16:54:43 +0100 (CET) Subject: [pypy-commit] stmgc c7: Check both the x86 version and the general version. Message-ID: <20140104155443.E00611C010E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7 Changeset: r600:66e0f7f7267c Date: 2014-01-04 16:54 +0100 http://bitbucket.org/pypy/stmgc/changeset/66e0f7f7267c/ Log: Check both the x86 version and the general version. diff --git a/checkfence/c7/unshare_page.c b/checkfence/c7/unshare_page.c --- a/checkfence/c7/unshare_page.c +++ b/checkfence/c7/unshare_page.c @@ -20,38 +20,51 @@ flag_page_private = SHARED_PAGE; } -void _stm_privatize(void) +void _stm_privatize(int mode) { - int previous = __sync_lock_test_and_set(&flag_page_private, - REMAPPING_PAGE); - switch (previous) { - case PRIVATE_PAGE: - lsl_assert(flag_page_private != SHARED_PAGE); - flag_page_private = PRIVATE_PAGE; - return; + int was_shared; + if (mode == 0) { + int previous = __sync_lock_test_and_set(&flag_page_private, + REMAPPING_PAGE); + if (previous == PRIVATE_PAGE) { + lsl_assert(flag_page_private != SHARED_PAGE); + flag_page_private = PRIVATE_PAGE; + return; + } + was_shared = (previous == SHARED_PAGE); + } + else { + was_shared = lsl_cas_32(&flag_page_private, + SHARED_PAGE, REMAPPING_PAGE); + lsl_fence("load-load"); + } - case REMAPPING_PAGE: + if (!was_shared) { lsl_assert(flag_page_private != SHARED_PAGE); /* here we wait until 'flag_page_private' is changed away from REMAPPING_PAGE, and we assume that it eventually occurs */ lsl_assume(flag_page_private != REMAPPING_PAGE); lsl_fence("load-load"); - return; - - case SHARED_PAGE: + } + else { lsl_observe_label("privatizing"); privatized_data = 42; lsl_fence("store-store"); lsl_assert(flag_page_private == REMAPPING_PAGE); flag_page_private = PRIVATE_PAGE; - return; } - lsl_assert(0); } -void PRIVATIZE(void) +void PRIV_X86(void) { - _stm_privatize(); + _stm_privatize(0); int data = privatized_data; lsl_observe_output("data", data); } + +void PRIV_GEN(void) +{ + _stm_privatize(1); + int data = privatized_data; + lsl_observe_output("data", data); +} diff --git a/checkfence/c7/unshare_page.lsl b/checkfence/c7/unshare_page.lsl --- a/checkfence/c7/unshare_page.lsl +++ b/checkfence/c7/unshare_page.lsl @@ -1,2 +1,3 @@ -test T1 = INIT ( PRIVATIZE PRIVATIZE | PRIVATIZE PRIVATIZE | PRIVATIZE PRIVATIZE ) +test T1 = INIT ( PRIV_X86 PRIV_X86 | PRIV_X86 PRIV_X86 | PRIV_X86 PRIV_X86 ) +test T2 = INIT ( PRIV_GEN PRIV_GEN | PRIV_GEN PRIV_GEN | PRIV_GEN PRIV_GEN ) From noreply at buildbot.pypy.org Sat Jan 4 19:35:31 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 4 Jan 2014 19:35:31 +0100 (CET) Subject: [pypy-commit] pypy default: fix numpy int coerce cases Message-ID: <20140104183531.A688E1C067F@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68585:0dcf482a5cb8 Date: 2014-01-04 13:34 -0500 http://bitbucket.org/pypy/pypy/changeset/0dcf482a5cb8/ Log: fix numpy int coerce cases diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -20,7 +20,10 @@ assert math.isnan(np.complex_(None)) for c in ['i', 'I', 'l', 'L', 'q', 'Q']: assert np.dtype(c).type().dtype.char == c - assert np.dtype('L').type(sys.maxint + 42) == sys.maxint + 42 + for c in ['l', 'q']: + assert np.dtype(c).type(sys.maxint) == sys.maxint + for c in ['L', 'Q']: + assert np.dtype(c).type(sys.maxint + 42) == sys.maxint + 42 def test_builtin(self): import numpy as np diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -586,7 +586,8 @@ BoxType = interp_boxes.W_Int64Box format_code = "q" - _coerce = func_with_new_name(_int64_coerce, '_coerce') + if LONG_BIT == 32: + _coerce = func_with_new_name(_int64_coerce, '_coerce') def _uint64_coerce(self, space, w_item): try: @@ -613,16 +614,25 @@ BoxType = interp_boxes.W_LongBox format_code = "l" - if LONG_BIT == 64: - _coerce = func_with_new_name(_int64_coerce, '_coerce') +def _ulong_coerce(self, space, w_item): + try: + return self._base_coerce(space, w_item) + except OperationError, e: + if not e.match(space, space.w_OverflowError): + raise + bigint = space.bigint_w(w_item) + try: + value = bigint.touint() + except OverflowError: + raise OperationError(space.w_OverflowError, space.w_None) + return self.box(value) class ULong(BaseType, Integer): T = rffi.ULONG BoxType = interp_boxes.W_ULongBox format_code = "L" - if LONG_BIT == 64: - _coerce = func_with_new_name(_uint64_coerce, '_coerce') + _coerce = func_with_new_name(_ulong_coerce, '_coerce') class Float(Primitive): _mixin_ = True From noreply at buildbot.pypy.org Mon Jan 6 11:49:26 2014 From: noreply at buildbot.pypy.org (timfel) Date: Mon, 6 Jan 2014 11:49:26 +0100 (CET) Subject: [pypy-commit] lang-smalltalk rbitblt: fix endianess Message-ID: <20140106104926.889691C02B3@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: rbitblt Changeset: r551:04ddd3453396 Date: 2014-01-06 11:29 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/04ddd3453396/ Log: fix endianess diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -976,6 +976,10 @@ def create(space, w_class, size, depth, display): if depth < 8: return W_MappingDisplayBitmap(space, w_class, size * (8 / depth), depth, display) + elif depth == 8: + return W_8BitDisplayBitmap(space, w_class, size, depth, display) + elif depth == 16: + return W_16BitDisplayBitmap(space, w_class, size, depth, display) else: return W_DisplayBitmap(space, w_class, size, depth, display) @@ -1034,6 +1038,38 @@ lltype.free(self._real_depth_buffer, flavor='raw') +class W_16BitDisplayBitmap(W_DisplayBitmap): + def setword(self, n, word): + self._real_depth_buffer[n] = word + mask = 0b11111 + lsb = (r_uint(word) & r_uint(0xffff0000)) >> 16 + msb = (r_uint(word) & r_uint(0x0000ffff)) + + lsb = ( + ((lsb >> 10) & mask) | + (((lsb >> 5) & mask) << 6) | + ((lsb & mask) << 11) + ) + msb = ( + ((msb >> 10) & mask) | + (((msb >> 5) & mask) << 6) | + ((msb & mask) << 11) + ) + + self.pixelbuffer[n] = r_uint(lsb | (msb << 16)) + + +class W_8BitDisplayBitmap(W_DisplayBitmap): + def setword(self, n, word): + self._real_depth_buffer[n] = word + self.pixelbuffer[n] = r_uint( + (word >> 24) | + ((word >> 8) & 0x0000ff00) | + ((word << 8) & 0x00ff0000) | + (word << 24) + ) + + NATIVE_DEPTH = 8 class W_MappingDisplayBitmap(W_DisplayBitmap): @jit.unroll_safe @@ -1041,10 +1077,7 @@ self._real_depth_buffer[n] = word word = r_uint(word) pos = self.compute_pos(n) - # pos, line_end = self.compute_pos_and_line_end(n, self._depth) assert self._depth <= 4 - maskR = r_uint(0b1111) - mask = maskR << (32 - self._depth) rshift = 32 - self._depth for i in xrange(8 / self._depth): if pos >= self.size(): @@ -1052,8 +1085,7 @@ mapword = r_uint(0) for i in xrange(4): pixel = r_uint(word) >> rshift - mapword <<= 8 - mapword |= r_uint(pixel) + mapword |= (r_uint(pixel) << (i * 8)) word <<= self._depth self.pixelbuffer[pos] = mapword pos += 1 @@ -1061,19 +1093,6 @@ def compute_pos(self, n): return n * (NATIVE_DEPTH / self._depth) - # @jit.elidable - # def compute_pos_and_line_end(self, n, depth): - # width = self.display.width - # words_per_line = width / (NATIVE_DEPTH / depth) - # if width % (NATIVE_DEPTH / depth) != 0: - # words_per_line += 1 - # line = n / words_per_line - # assert line < self.display.height # line is 0 based - # line_start = width * line - # line_end = line_start + width # actually the start of the next line - # pos = ((n % words_per_line) * (NATIVE_DEPTH / depth)) + line_start - # return pos, line_end - # XXX Shouldn't compiledmethod have class reference for subclassed compiled # methods? class W_CompiledMethod(W_AbstractObjectWithIdentityHash): From noreply at buildbot.pypy.org Mon Jan 6 23:55:44 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 6 Jan 2014 23:55:44 +0100 (CET) Subject: [pypy-commit] pypy remove-intlong-smm: more don't bother w/ OverflowErrors unless it's necessary Message-ID: <20140106225544.CD5971C136D@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: remove-intlong-smm Changeset: r68586:88b052759cdc Date: 2014-01-06 14:51 -0800 http://bitbucket.org/pypy/pypy/changeset/88b052759cdc/ Log: more don't bother w/ OverflowErrors unless it's necessary diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -262,7 +262,7 @@ descr_or, descr_ror = _make_generic_descr_binop('or', ovf=False) descr_xor, descr_rxor = _make_generic_descr_binop('xor', ovf=False) - def _make_descr_binop(func): + def _make_descr_binop(func, ovf=True): opname = func.__name__[1:] oper = BINARY_OPS.get(opname) if oper == '%': @@ -273,10 +273,13 @@ def descr_binop(self, space, w_other): if not isinstance(w_other, W_AbstractIntObject): return space.w_NotImplemented - try: + if ovf: + try: + return func(self, space, w_other) + except OverflowError: + return _ovf2long(space, opname, self, w_other) + else: return func(self, space, w_other) - except OverflowError: - return _ovf2long(space, opname, self, w_other) descr_binop.__doc__ = "x.__%s__(y) <==> %s" % (opname, oper % ('x', 'y')) @@ -284,10 +287,13 @@ def descr_rbinop(self, space, w_other): if not isinstance(w_other, W_AbstractIntObject): return space.w_NotImplemented - try: + if ovf: + try: + return func(w_other, space, self) + except OverflowError: + return _ovf2long(space, opname, w_other, self) + else: return func(w_other, space, self) - except OverflowError: - return _ovf2long(space, opname, w_other, self) descr_rbinop.__doc__ = "x.__r%s__(y) <==> %s" % (opname, oper % ('y', 'x')) @@ -314,7 +320,7 @@ raise operationerrfmt(space.w_ZeroDivisionError, "division by zero") return space.wrap(x / y) - descr_truediv, descr_rtruediv = _make_descr_binop(_truediv) + descr_truediv, descr_rtruediv = _make_descr_binop(_truediv, ovf=False) def _mod(self, space, w_other): x = space.int_w(self) @@ -369,7 +375,7 @@ else: a = a >> b return wrapint(space, a) - descr_rshift, descr_rrshift = _make_descr_binop(_rshift) + descr_rshift, descr_rrshift = _make_descr_binop(_rshift, ovf=False) class W_IntObject(W_AbstractIntObject): diff --git a/pypy/objspace/std/smalllongobject.py b/pypy/objspace/std/smalllongobject.py --- a/pypy/objspace/std/smalllongobject.py +++ b/pypy/objspace/std/smalllongobject.py @@ -176,7 +176,7 @@ descr_gt = _make_descr_cmp('gt') descr_ge = _make_descr_cmp('ge') - def _make_descr_binop(func): + def _make_descr_binop(func, ovf=True): opname = func.__name__[1:] descr_name, descr_rname = 'descr_' + opname, 'descr_r' + opname long_op = getattr(W_LongObject, descr_name) @@ -191,12 +191,15 @@ self = _small2long(space, self) return long_op(self, space, w_other) - try: + if ovf: + try: + return func(self, space, w_other) + except OverflowError: + self = _small2long(space, self) + w_other = _small2long(space, w_other) + return long_op(self, space, w_other) + else: return func(self, space, w_other) - except OverflowError: - self = _small2long(space, self) - w_other = _small2long(space, w_other) - return long_op(self, space, w_other) if opname in COMMUTATIVE_OPS: descr_rbinop = func_with_new_name(descr_binop, descr_rname) @@ -212,12 +215,15 @@ self = _small2long(space, self) return long_rop(self, space, w_other) - try: + if ovf: + try: + return func(w_other, space, self) + except OverflowError: + self = _small2long(space, self) + w_other = _small2long(space, w_other) + return long_rop(self, space, w_other) + else: return func(w_other, space, self) - except OverflowError: - self = _small2long(space, self) - w_other = _small2long(space, w_other) - return long_rop(self, space, w_other) return descr_binop, descr_rbinop @@ -322,28 +328,28 @@ else: a = a >> b return W_SmallLongObject(a) - descr_rshift, descr_rrshift = _make_descr_binop(_rshift) + descr_rshift, descr_rrshift = _make_descr_binop(_rshift, ovf=False) def _and(self, space, w_other): a = self.longlong b = w_other.longlong res = a & b return W_SmallLongObject(res) - descr_and, descr_rand = _make_descr_binop(_and) + descr_and, descr_rand = _make_descr_binop(_and, ovf=False) def _xor(self, space, w_other): a = self.longlong b = w_other.longlong res = a ^ b return W_SmallLongObject(res) - descr_xor, descr_rxor = _make_descr_binop(_xor) + descr_xor, descr_rxor = _make_descr_binop(_xor, ovf=False) def _or(self, space, w_other): a = self.longlong b = w_other.longlong res = a | b return W_SmallLongObject(res) - descr_or, descr_ror = _make_descr_binop(_or) + descr_or, descr_ror = _make_descr_binop(_or, ovf=False) # ____________________________________________________________ From noreply at buildbot.pypy.org Mon Jan 6 23:55:45 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 6 Jan 2014 23:55:45 +0100 (CET) Subject: [pypy-commit] pypy remove-intlong-smm: cleanup Message-ID: <20140106225545.F09F91C136D@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: remove-intlong-smm Changeset: r68587:3b13a844ca2c Date: 2014-01-06 14:51 -0800 http://bitbucket.org/pypy/pypy/changeset/3b13a844ca2c/ Log: cleanup diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -258,6 +258,7 @@ descr_add, descr_radd = _make_generic_descr_binop('add') descr_sub, descr_rsub = _make_generic_descr_binop('sub') descr_mul, descr_rmul = _make_generic_descr_binop('mul') + descr_and, descr_rand = _make_generic_descr_binop('and', ovf=False) descr_or, descr_ror = _make_generic_descr_binop('or', ovf=False) descr_xor, descr_rxor = _make_generic_descr_binop('xor', ovf=False) diff --git a/pypy/objspace/std/smalllongobject.py b/pypy/objspace/std/smalllongobject.py --- a/pypy/objspace/std/smalllongobject.py +++ b/pypy/objspace/std/smalllongobject.py @@ -337,6 +337,13 @@ return W_SmallLongObject(res) descr_and, descr_rand = _make_descr_binop(_and, ovf=False) + def _or(self, space, w_other): + a = self.longlong + b = w_other.longlong + res = a | b + return W_SmallLongObject(res) + descr_or, descr_ror = _make_descr_binop(_or, ovf=False) + def _xor(self, space, w_other): a = self.longlong b = w_other.longlong @@ -344,15 +351,6 @@ return W_SmallLongObject(res) descr_xor, descr_rxor = _make_descr_binop(_xor, ovf=False) - def _or(self, space, w_other): - a = self.longlong - b = w_other.longlong - res = a | b - return W_SmallLongObject(res) - descr_or, descr_ror = _make_descr_binop(_or, ovf=False) - - -# ____________________________________________________________ def _llong_mul_ovf(a, b): # xxx duplication of the logic from translator/c/src/int.h @@ -378,21 +376,24 @@ return longprod raise OverflowError("integer multiplication") -# ____________________________________________________________ def delegate_SmallLong2Float(space, w_small): return space.newfloat(float(w_small.longlong)) + def delegate_SmallLong2Complex(space, w_small): return space.newcomplex(float(w_small.longlong), 0.0) + def _int2small(space, w_int): # XXX: W_IntObject.descr_long should probably return W_SmallLongs return W_SmallLongObject(r_longlong(w_int.int_w(space))) + def _small2long(space, w_small): return W_LongObject(w_small.asbigint()) + def _pow_impl(space, iv, w_int2, iz): iw = space.int_w(w_int2) if iw < 0: @@ -418,6 +419,7 @@ ix %= iz return W_SmallLongObject(ix) + def add_ovr(space, w_int1, w_int2): x = r_longlong(space.int_w(w_int1)) y = r_longlong(space.int_w(w_int2)) From noreply at buildbot.pypy.org Tue Jan 7 01:38:53 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 7 Jan 2014 01:38:53 +0100 (CET) Subject: [pypy-commit] pypy py3k: py3ify unicode's __iter__ check Message-ID: <20140107003853.33C491D233D@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68588:6839528d381c Date: 2014-01-06 16:37 -0800 http://bitbucket.org/pypy/pypy/changeset/6839528d381c/ Log: py3ify unicode's __iter__ check diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -57,6 +57,13 @@ return w_iter tuple_iter._annspecialcase_ = 'specialize:memo' +def unicode_iter(space): + "Utility that returns the app-level descriptor str.__iter__." + w_src, w_iter = space.lookup_in_type_where(space.w_unicode, + '__iter__') + return w_iter +unicode_iter._annspecialcase_ = 'specialize:memo' + def raiseattrerror(space, w_obj, w_name, w_descr=None): # space.repr always returns an encodable string. if w_descr is None: diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -487,7 +487,7 @@ return w_obj.listview_unicode() if type(w_obj) is W_SetObject or type(w_obj) is W_FrozensetObject: return w_obj.listview_unicode() - if isinstance(w_obj, W_UnicodeObject) and self._uses_no_iter(w_obj): + if isinstance(w_obj, W_UnicodeObject) and self._uses_unicode_iter(w_obj): return w_obj.listview_unicode() if isinstance(w_obj, W_ListObject) and self._uses_list_iter(w_obj): return w_obj.getitems_unicode() @@ -526,8 +526,9 @@ from pypy.objspace.descroperation import tuple_iter return self.lookup(w_obj, '__iter__') is tuple_iter(self) - def _uses_no_iter(self, w_obj): - return self.lookup(w_obj, '__iter__') is None + def _uses_unicode_iter(self, w_obj): + from pypy.objspace.descroperation import unicode_iter + return self.lookup(w_obj, '__iter__') is unicode_iter(self) def sliceindices(self, w_slice, w_length): if isinstance(w_slice, W_SliceObject): From noreply at buildbot.pypy.org Tue Jan 7 03:12:18 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 7 Jan 2014 03:12:18 +0100 (CET) Subject: [pypy-commit] pypy py3k: issue1571: less confusing error msg Message-ID: <20140107021218.8E2AD1C06A7@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68589:18ba4e72d2f7 Date: 2014-01-06 18:11 -0800 http://bitbucket.org/pypy/pypy/changeset/18ba4e72d2f7/ Log: issue1571: less confusing error msg diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py --- a/pypy/module/_cffi_backend/ctypeptr.py +++ b/pypy/module/_cffi_backend/ctypeptr.py @@ -81,7 +81,7 @@ (self.ctitem.is_primitive_integer and self.ctitem.size == rffi.sizeof(lltype.Char))): if not space.isinstance_w(w_ob, space.w_str): - raise self._convert_error("str or list or tuple", w_ob) + raise self._convert_error("bytes or list or tuple", w_ob) s = space.str_w(w_ob) n = len(s) if self.length >= 0 and n > self.length: From noreply at buildbot.pypy.org Tue Jan 7 07:26:02 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 7 Jan 2014 07:26:02 +0100 (CET) Subject: [pypy-commit] pypy default: Started on audioop module. Message-ID: <20140107062602.91E6E1C1176@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r68590:d4da11a52766 Date: 2014-01-06 22:25 -0800 http://bitbucket.org/pypy/pypy/changeset/d4da11a52766/ Log: Started on audioop module. diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -109,7 +109,7 @@ RegrTest('test_asynchat.py', usemodules='select fcntl'), RegrTest('test_asyncore.py', usemodules='select fcntl'), RegrTest('test_atexit.py', core=True), - RegrTest('test_audioop.py', skip="unsupported extension module"), + RegrTest('test_audioop.py', skip="incomplete module"), RegrTest('test_augassign.py', core=True), RegrTest('test_base64.py', usemodules='struct'), RegrTest('test_bastion.py'), diff --git a/lib_pypy/audioop.py b/lib_pypy/audioop.py new file mode 100644 --- /dev/null +++ b/lib_pypy/audioop.py @@ -0,0 +1,29 @@ + +import struct + + +class error(Exception): + pass + + +def _check_size(size): + if size != 1 and size != 2 and size != 4: + raise error("Size should be 1, 2 or 4") + + +def _check_params(length, size): + _check_size(size) + if length % size != 0: + raise error("not a whole number of frames") + + +def getsample(cp, size, i): + _check_params(len(cp), size) + if not (0 <= i < len(cp) / size): + raise error("Index out of range") + if size == 1: + return struct.unpack_from("B", buffer(cp)[i:])[0] + elif size == 2: + return struct.unpack_from("H", buffer(cp)[i * 2:])[0] + elif size == 4: + return struct.unpack_from("I", buffer(cp)[i * 4:])[0] diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -5,7 +5,7 @@ from pypy.interpreter.gateway import interp2app from pypy.interpreter.typedef import TypeDef, make_weakref_descr -from rpython.rlib import objectmodel, rgc +from rpython.rlib import rgc from rpython.rlib.objectmodel import keepalive_until_here, specialize from rpython.rtyper.lltypesystem import lltype, rffi from rpython.tool.sourcetools import func_with_new_name From noreply at buildbot.pypy.org Tue Jan 7 09:58:08 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 7 Jan 2014 09:58:08 +0100 (CET) Subject: [pypy-commit] cffi default: Kill is_{signed,unsigned}_type() Message-ID: <20140107085808.BEA711C06A7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1448:7424e4a25f1e Date: 2013-12-15 09:38 +0100 http://bitbucket.org/cffi/cffi/changeset/7424e4a25f1e/ Log: Kill is_{signed,unsigned}_type() diff --git a/cffi/model.py b/cffi/model.py --- a/cffi/model.py +++ b/cffi/model.py @@ -81,29 +81,29 @@ 'long': 'i', 'long long': 'i', 'signed char': 'i', - 'unsigned char': 'u', - 'unsigned short': 'u', - 'unsigned int': 'u', - 'unsigned long': 'u', - 'unsigned long long': 'u', + 'unsigned char': 'i', + 'unsigned short': 'i', + 'unsigned int': 'i', + 'unsigned long': 'i', + 'unsigned long long': 'i', 'float': 'f', 'double': 'f', 'long double': 'f', - '_Bool': 'u', + '_Bool': 'i', # the following types are not primitive in the C sense 'wchar_t': 'c', 'int8_t': 'i', - 'uint8_t': 'u', + 'uint8_t': 'i', 'int16_t': 'i', - 'uint16_t': 'u', + 'uint16_t': 'i', 'int32_t': 'i', - 'uint32_t': 'u', + 'uint32_t': 'i', 'int64_t': 'i', - 'uint64_t': 'u', + 'uint64_t': 'i', 'intptr_t': 'i', - 'uintptr_t': 'u', + 'uintptr_t': 'i', 'ptrdiff_t': 'i', - 'size_t': 'u', + 'size_t': 'i', 'ssize_t': 'i', } @@ -114,12 +114,8 @@ def is_char_type(self): return self.ALL_PRIMITIVE_TYPES[self.name] == 'c' - def is_signed_type(self): + def is_integer_type(self): return self.ALL_PRIMITIVE_TYPES[self.name] == 'i' - def is_unsigned_type(self): - return self.ALL_PRIMITIVE_TYPES[self.name] == 'u' - def is_integer_type(self): - return self.ALL_PRIMITIVE_TYPES[self.name] in 'iu' def is_float_type(self): return self.ALL_PRIMITIVE_TYPES[self.name] == 'f' diff --git a/cffi/vengine_cpy.py b/cffi/vengine_cpy.py --- a/cffi/vengine_cpy.py +++ b/cffi/vengine_cpy.py @@ -214,10 +214,7 @@ extraarg = '' if isinstance(tp, model.PrimitiveType): if tp.is_integer_type() and tp.name != '_Bool': - if tp.is_signed_type(): - converter = '_cffi_to_c_SIGNED' - else: - converter = '_cffi_to_c_UNSIGNED' + converter = '_cffi_to_c_int' extraarg = ', %s' % tp.name else: converter = '_cffi_to_c_%s' % (tp.name.replace(' ', '_'),) @@ -270,10 +267,7 @@ def _convert_expr_from_c(self, tp, var, context): if isinstance(tp, model.PrimitiveType): if tp.is_integer_type(): - if tp.is_signed_type(): - return '_cffi_from_c_SIGNED(%s, %s)' % (var, tp.name) - else: - return '_cffi_from_c_UNSIGNED(%s, %s)' % (var, tp.name) + return '_cffi_from_c_int(%s, %s)' % (var, tp.name) elif tp.name != 'long double': return '_cffi_from_c_%s(%s)' % (tp.name.replace(' ', '_'), var) else: @@ -801,25 +795,23 @@ #define _cffi_to_c_double PyFloat_AsDouble #define _cffi_to_c_float PyFloat_AsDouble -#define _cffi_from_c_SIGNED(x, type) \ - (sizeof(type) <= sizeof(long) ? PyInt_FromLong(x) : \ - PyLong_FromLongLong(x)) -#define _cffi_from_c_UNSIGNED(x, type) \ - (sizeof(type) < sizeof(long) ? PyInt_FromLong(x) : \ - sizeof(type) == sizeof(long) ? PyLong_FromUnsignedLong(x) : \ - PyLong_FromUnsignedLongLong(x)) +#define _cffi_from_c_int(x, type) \ + (((type)-1) > 0 ? /* unsigned */ \ + (sizeof(type) < sizeof(long) ? PyInt_FromLong(x) : \ + sizeof(type) == sizeof(long) ? PyLong_FromUnsignedLong(x) : \ + PyLong_FromUnsignedLongLong(x)) \ + : (sizeof(type) <= sizeof(long) ? PyInt_FromLong(x) : \ + PyLong_FromLongLong(x))) -#define _cffi_to_c_SIGNED(o, type) \ - (sizeof(type) == 1 ? _cffi_to_c_i8(o) : \ - sizeof(type) == 2 ? _cffi_to_c_i16(o) : \ - sizeof(type) == 4 ? _cffi_to_c_i32(o) : \ - sizeof(type) == 8 ? _cffi_to_c_i64(o) : \ - (Py_FatalError("unsupported size for type " #type), 0)) -#define _cffi_to_c_UNSIGNED(o, type) \ - (sizeof(type) == 1 ? _cffi_to_c_u8(o) : \ - sizeof(type) == 2 ? _cffi_to_c_u16(o) : \ - sizeof(type) == 4 ? _cffi_to_c_u32(o) : \ - sizeof(type) == 8 ? _cffi_to_c_u64(o) : \ +#define _cffi_to_c_int(o, type) \ + (sizeof(type) == 1 ? (((type)-1) > 0 ? _cffi_to_c_u8(o) \ + : _cffi_to_c_i8(o)) : \ + sizeof(type) == 2 ? (((type)-1) > 0 ? _cffi_to_c_u16(o) \ + : _cffi_to_c_i16(o)) : \ + sizeof(type) == 4 ? (((type)-1) > 0 ? _cffi_to_c_u32(o) \ + : _cffi_to_c_i32(o)) : \ + sizeof(type) == 8 ? (((type)-1) > 0 ? _cffi_to_c_u64(o) \ + : _cffi_to_c_i64(o)) : \ (Py_FatalError("unsupported size for type " #type), 0)) #define _cffi_to_c_i8 \ diff --git a/testing/test_verify.py b/testing/test_verify.py --- a/testing/test_verify.py +++ b/testing/test_verify.py @@ -148,28 +148,27 @@ all_primitive_types = model.PrimitiveType.ALL_PRIMITIVE_TYPES -all_signed_integer_types = sorted(tp for tp in all_primitive_types - if all_primitive_types[tp] == 'i') -all_unsigned_integer_types = sorted(tp for tp in all_primitive_types - if all_primitive_types[tp] == 'u') +all_integer_types = sorted(tp for tp in all_primitive_types + if all_primitive_types[tp] == 'i') all_float_types = sorted(tp for tp in all_primitive_types if all_primitive_types[tp] == 'f') +def all_signed_integer_types(ffi): + return [x for x in all_integer_types if int(ffi.cast(x, -1)) < 0] + +def all_unsigned_integer_types(ffi): + return [x for x in all_integer_types if int(ffi.cast(x, -1)) > 0] + + def test_primitive_category(): for typename in all_primitive_types: tp = model.PrimitiveType(typename) C = tp.is_char_type() - U = tp.is_unsigned_type() - S = tp.is_signed_type() F = tp.is_float_type() I = tp.is_integer_type() assert C == (typename in ('char', 'wchar_t')) - assert U == (typename.startswith('unsigned ') or - typename == '_Bool' or typename == 'size_t' or - typename == 'uintptr_t' or typename.startswith('uint')) assert F == (typename in ('float', 'double', 'long double')) - assert S + U + F + C == 1 # one and only one of them is true - assert I == (S or U) + assert I + F + C == 1 # one and only one of them is true def test_all_integer_and_float_types(): typenames = [] @@ -207,7 +206,7 @@ def test_var_signed_integer_types(): ffi = FFI() - lst = all_signed_integer_types + lst = all_signed_integer_types(ffi) csource = "\n".join(["%s somevar_%s;" % (tp, tp.replace(' ', '_')) for tp in lst]) ffi.cdef(csource) @@ -226,7 +225,7 @@ def test_var_unsigned_integer_types(): ffi = FFI() - lst = all_unsigned_integer_types + lst = all_unsigned_integer_types(ffi) csource = "\n".join(["%s somevar_%s;" % (tp, tp.replace(' ', '_')) for tp in lst]) ffi.cdef(csource) @@ -247,7 +246,7 @@ def test_fn_signed_integer_types(): ffi = FFI() - lst = all_signed_integer_types + lst = all_signed_integer_types(ffi) cdefsrc = "\n".join(["%s somefn_%s(%s);" % (tp, tp.replace(' ', '_'), tp) for tp in lst]) ffi.cdef(cdefsrc) @@ -267,7 +266,7 @@ def test_fn_unsigned_integer_types(): ffi = FFI() - lst = all_unsigned_integer_types + lst = all_unsigned_integer_types(ffi) cdefsrc = "\n".join(["%s somefn_%s(%s);" % (tp, tp.replace(' ', '_'), tp) for tp in lst]) ffi.cdef(cdefsrc) @@ -464,11 +463,12 @@ def test_struct_float_vs_int(): if sys.platform == 'win32': py.test.skip("XXX fixme: only gives warnings") - for typename in all_signed_integer_types: + ffi = FFI() + for typename in all_signed_integer_types(ffi): for real in all_float_types: _check_field_match(typename, real, expect_mismatch=True) for typename in all_float_types: - for real in all_signed_integer_types: + for real in all_signed_integer_types(ffi): _check_field_match(typename, real, expect_mismatch=True) def test_struct_array_field(): From noreply at buildbot.pypy.org Tue Jan 7 09:58:09 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 7 Jan 2014 09:58:09 +0100 (CET) Subject: [pypy-commit] cffi default: merge heads Message-ID: <20140107085809.DA3E81C0F86@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1449:9941a90948dc Date: 2014-01-07 09:57 +0100 http://bitbucket.org/cffi/cffi/changeset/9941a90948dc/ Log: merge heads diff --git a/cffi/model.py b/cffi/model.py --- a/cffi/model.py +++ b/cffi/model.py @@ -81,29 +81,29 @@ 'long': 'i', 'long long': 'i', 'signed char': 'i', - 'unsigned char': 'u', - 'unsigned short': 'u', - 'unsigned int': 'u', - 'unsigned long': 'u', - 'unsigned long long': 'u', + 'unsigned char': 'i', + 'unsigned short': 'i', + 'unsigned int': 'i', + 'unsigned long': 'i', + 'unsigned long long': 'i', 'float': 'f', 'double': 'f', 'long double': 'f', - '_Bool': 'u', + '_Bool': 'i', # the following types are not primitive in the C sense 'wchar_t': 'c', 'int8_t': 'i', - 'uint8_t': 'u', + 'uint8_t': 'i', 'int16_t': 'i', - 'uint16_t': 'u', + 'uint16_t': 'i', 'int32_t': 'i', - 'uint32_t': 'u', + 'uint32_t': 'i', 'int64_t': 'i', - 'uint64_t': 'u', + 'uint64_t': 'i', 'intptr_t': 'i', - 'uintptr_t': 'u', + 'uintptr_t': 'i', 'ptrdiff_t': 'i', - 'size_t': 'u', + 'size_t': 'i', 'ssize_t': 'i', } @@ -114,12 +114,8 @@ def is_char_type(self): return self.ALL_PRIMITIVE_TYPES[self.name] == 'c' - def is_signed_type(self): + def is_integer_type(self): return self.ALL_PRIMITIVE_TYPES[self.name] == 'i' - def is_unsigned_type(self): - return self.ALL_PRIMITIVE_TYPES[self.name] == 'u' - def is_integer_type(self): - return self.ALL_PRIMITIVE_TYPES[self.name] in 'iu' def is_float_type(self): return self.ALL_PRIMITIVE_TYPES[self.name] == 'f' diff --git a/cffi/vengine_cpy.py b/cffi/vengine_cpy.py --- a/cffi/vengine_cpy.py +++ b/cffi/vengine_cpy.py @@ -214,10 +214,7 @@ extraarg = '' if isinstance(tp, model.PrimitiveType): if tp.is_integer_type() and tp.name != '_Bool': - if tp.is_signed_type(): - converter = '_cffi_to_c_SIGNED' - else: - converter = '_cffi_to_c_UNSIGNED' + converter = '_cffi_to_c_int' extraarg = ', %s' % tp.name else: converter = '_cffi_to_c_%s' % (tp.name.replace(' ', '_'),) @@ -270,10 +267,7 @@ def _convert_expr_from_c(self, tp, var, context): if isinstance(tp, model.PrimitiveType): if tp.is_integer_type(): - if tp.is_signed_type(): - return '_cffi_from_c_SIGNED(%s, %s)' % (var, tp.name) - else: - return '_cffi_from_c_UNSIGNED(%s, %s)' % (var, tp.name) + return '_cffi_from_c_int(%s, %s)' % (var, tp.name) elif tp.name != 'long double': return '_cffi_from_c_%s(%s)' % (tp.name.replace(' ', '_'), var) else: @@ -801,25 +795,23 @@ #define _cffi_to_c_double PyFloat_AsDouble #define _cffi_to_c_float PyFloat_AsDouble -#define _cffi_from_c_SIGNED(x, type) \ - (sizeof(type) <= sizeof(long) ? PyInt_FromLong(x) : \ - PyLong_FromLongLong(x)) -#define _cffi_from_c_UNSIGNED(x, type) \ - (sizeof(type) < sizeof(long) ? PyInt_FromLong(x) : \ - sizeof(type) == sizeof(long) ? PyLong_FromUnsignedLong(x) : \ - PyLong_FromUnsignedLongLong(x)) +#define _cffi_from_c_int(x, type) \ + (((type)-1) > 0 ? /* unsigned */ \ + (sizeof(type) < sizeof(long) ? PyInt_FromLong(x) : \ + sizeof(type) == sizeof(long) ? PyLong_FromUnsignedLong(x) : \ + PyLong_FromUnsignedLongLong(x)) \ + : (sizeof(type) <= sizeof(long) ? PyInt_FromLong(x) : \ + PyLong_FromLongLong(x))) -#define _cffi_to_c_SIGNED(o, type) \ - (sizeof(type) == 1 ? _cffi_to_c_i8(o) : \ - sizeof(type) == 2 ? _cffi_to_c_i16(o) : \ - sizeof(type) == 4 ? _cffi_to_c_i32(o) : \ - sizeof(type) == 8 ? _cffi_to_c_i64(o) : \ - (Py_FatalError("unsupported size for type " #type), 0)) -#define _cffi_to_c_UNSIGNED(o, type) \ - (sizeof(type) == 1 ? _cffi_to_c_u8(o) : \ - sizeof(type) == 2 ? _cffi_to_c_u16(o) : \ - sizeof(type) == 4 ? _cffi_to_c_u32(o) : \ - sizeof(type) == 8 ? _cffi_to_c_u64(o) : \ +#define _cffi_to_c_int(o, type) \ + (sizeof(type) == 1 ? (((type)-1) > 0 ? _cffi_to_c_u8(o) \ + : _cffi_to_c_i8(o)) : \ + sizeof(type) == 2 ? (((type)-1) > 0 ? _cffi_to_c_u16(o) \ + : _cffi_to_c_i16(o)) : \ + sizeof(type) == 4 ? (((type)-1) > 0 ? _cffi_to_c_u32(o) \ + : _cffi_to_c_i32(o)) : \ + sizeof(type) == 8 ? (((type)-1) > 0 ? _cffi_to_c_u64(o) \ + : _cffi_to_c_i64(o)) : \ (Py_FatalError("unsupported size for type " #type), 0)) #define _cffi_to_c_i8 \ diff --git a/testing/test_verify.py b/testing/test_verify.py --- a/testing/test_verify.py +++ b/testing/test_verify.py @@ -148,28 +148,27 @@ all_primitive_types = model.PrimitiveType.ALL_PRIMITIVE_TYPES -all_signed_integer_types = sorted(tp for tp in all_primitive_types - if all_primitive_types[tp] == 'i') -all_unsigned_integer_types = sorted(tp for tp in all_primitive_types - if all_primitive_types[tp] == 'u') +all_integer_types = sorted(tp for tp in all_primitive_types + if all_primitive_types[tp] == 'i') all_float_types = sorted(tp for tp in all_primitive_types if all_primitive_types[tp] == 'f') +def all_signed_integer_types(ffi): + return [x for x in all_integer_types if int(ffi.cast(x, -1)) < 0] + +def all_unsigned_integer_types(ffi): + return [x for x in all_integer_types if int(ffi.cast(x, -1)) > 0] + + def test_primitive_category(): for typename in all_primitive_types: tp = model.PrimitiveType(typename) C = tp.is_char_type() - U = tp.is_unsigned_type() - S = tp.is_signed_type() F = tp.is_float_type() I = tp.is_integer_type() assert C == (typename in ('char', 'wchar_t')) - assert U == (typename.startswith('unsigned ') or - typename == '_Bool' or typename == 'size_t' or - typename == 'uintptr_t' or typename.startswith('uint')) assert F == (typename in ('float', 'double', 'long double')) - assert S + U + F + C == 1 # one and only one of them is true - assert I == (S or U) + assert I + F + C == 1 # one and only one of them is true def test_all_integer_and_float_types(): typenames = [] @@ -207,7 +206,7 @@ def test_var_signed_integer_types(): ffi = FFI() - lst = all_signed_integer_types + lst = all_signed_integer_types(ffi) csource = "\n".join(["%s somevar_%s;" % (tp, tp.replace(' ', '_')) for tp in lst]) ffi.cdef(csource) @@ -226,7 +225,7 @@ def test_var_unsigned_integer_types(): ffi = FFI() - lst = all_unsigned_integer_types + lst = all_unsigned_integer_types(ffi) csource = "\n".join(["%s somevar_%s;" % (tp, tp.replace(' ', '_')) for tp in lst]) ffi.cdef(csource) @@ -247,7 +246,7 @@ def test_fn_signed_integer_types(): ffi = FFI() - lst = all_signed_integer_types + lst = all_signed_integer_types(ffi) cdefsrc = "\n".join(["%s somefn_%s(%s);" % (tp, tp.replace(' ', '_'), tp) for tp in lst]) ffi.cdef(cdefsrc) @@ -267,7 +266,7 @@ def test_fn_unsigned_integer_types(): ffi = FFI() - lst = all_unsigned_integer_types + lst = all_unsigned_integer_types(ffi) cdefsrc = "\n".join(["%s somefn_%s(%s);" % (tp, tp.replace(' ', '_'), tp) for tp in lst]) ffi.cdef(cdefsrc) @@ -464,11 +463,12 @@ def test_struct_float_vs_int(): if sys.platform == 'win32': py.test.skip("XXX fixme: only gives warnings") - for typename in all_signed_integer_types: + ffi = FFI() + for typename in all_signed_integer_types(ffi): for real in all_float_types: _check_field_match(typename, real, expect_mismatch=True) for typename in all_float_types: - for real in all_signed_integer_types: + for real in all_signed_integer_types(ffi): _check_field_match(typename, real, expect_mismatch=True) def test_struct_array_field(): From noreply at buildbot.pypy.org Tue Jan 7 09:58:10 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 7 Jan 2014 09:58:10 +0100 (CET) Subject: [pypy-commit] cffi default: Issue 123: force __thread off on OS/X, for now. Message-ID: <20140107085810.F2BAE1C06A7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1450:3e29d321b1f1 Date: 2014-01-07 09:57 +0100 http://bitbucket.org/cffi/cffi/changeset/3e29d321b1f1/ Log: Issue 123: force __thread off on OS/X, for now. diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -42,6 +42,11 @@ resultlist[:] = res def ask_supports_thread(): + if sys.platform == "darwin": + sys.stderr.write("OS/X: confusion between 'cc' versus 'gcc'") + sys.stderr.write(" (see issue 123)\n") + sys.stderr.write("will not use '__thread' in the C code\n") + return import distutils.errors from distutils.ccompiler import new_compiler compiler = new_compiler(force=1) From noreply at buildbot.pypy.org Tue Jan 7 11:21:51 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 7 Jan 2014 11:21:51 +0100 (CET) Subject: [pypy-commit] pypy.org extradoc: Fix my resume Message-ID: <20140107102151.2F25A1C02D8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r469:f42fa77b8576 Date: 2014-01-07 11:19 +0100 http://bitbucket.org/pypy/pypy.org/changeset/f42fa77b8576/ Log: Fix my resume diff --git a/people.html b/people.html --- a/people.html +++ b/people.html @@ -48,7 +48,7 @@

Armin Rigo

image/people/arigo.png -

Armin Rigo is a researcher at the Heinrich-Heine Universitat +

Armin Rigo is a former researcher at the Heinrich-Heine Universitat Dusseldorf (Germany). He studied Mathematics at the University of Lausanne (Switzerland), obtained his Ph.D. in Logic and Set Theory at the Free University of Brussels (Belgium) in 2002, and diff --git a/source/people.txt b/source/people.txt --- a/source/people.txt +++ b/source/people.txt @@ -8,7 +8,7 @@ .. image:: image/people/arigo.png -Armin Rigo is a researcher at the Heinrich-Heine Universitat +Armin Rigo is a former researcher at the Heinrich-Heine Universitat Dusseldorf (Germany). He studied Mathematics at the University of Lausanne (Switzerland), obtained his Ph.D. in Logic and Set Theory at the Free University of Brussels (Belgium) in 2002, and From noreply at buildbot.pypy.org Tue Jan 7 14:15:19 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 7 Jan 2014 14:15:19 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Add myself to the list of people coming to the Leysin sprint. Message-ID: <20140107131519.9B2B61C00B3@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: extradoc Changeset: r5123:73830e218bba Date: 2014-01-07 14:15 +0100 http://bitbucket.org/pypy/extradoc/changeset/73830e218bba/ Log: Add myself to the list of people coming to the Leysin sprint. diff --git a/sprintinfo/leysin-winter-2014/people.txt b/sprintinfo/leysin-winter-2014/people.txt --- a/sprintinfo/leysin-winter-2014/people.txt +++ b/sprintinfo/leysin-winter-2014/people.txt @@ -17,6 +17,7 @@ Remi Meier 11-19 Ermina Johan Råde 11-18 Ermina Antonio Cuni 14-18 Ermina +Manuel Jacob 12-19 private ==================== ============== ======================= From noreply at buildbot.pypy.org Thu Jan 9 19:19:52 2014 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 9 Jan 2014 19:19:52 +0100 (CET) Subject: [pypy-commit] lang-smalltalk rbitblt: add WITH_ARGS_EXECUTE_METHOD prim, as it doesn't work in Smalltalk Message-ID: <20140109181952.069F21C0C34@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: rbitblt Changeset: r562:151ff8db1a76 Date: 2014-01-09 16:22 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/151ff8db1a76/ Log: add WITH_ARGS_EXECUTE_METHOD prim, as it doesn't work in Smalltalk diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -1230,6 +1230,7 @@ RESUME = 87 SUSPEND = 88 FLUSH_CACHE = 89 +WITH_ARGS_EXECUTE_METHOD = 188 @expose_primitive(BLOCK_COPY, unwrap_spec=[object, int]) def func(interp, s_frame, w_context, argcnt): @@ -1341,6 +1342,20 @@ s_frame.pop() return interp.stack_frame(s_new_frame) + at expose_primitive(WITH_ARGS_EXECUTE_METHOD, unwrap_spec=[object, list, object], no_result=True) +def func(interp, s_frame, w_rcvr, args_w, w_cm): + if not isinstance(w_cm, model.W_CompiledMethod): + raise PrimitiveFailedError() + + s_method = w_cm.as_compiledmethod_get_shadow(interp.space) + code = s_method.primitive() + if code: + raise PrimitiveFailedError("withArgs:executeMethod: not support with primitive method") + s_new_frame = s_method.create_frame(interp.space, w_rcvr, args_w, s_frame) + if interp.trace: + print interp.padding() + s_new_frame.short_str() + return interp.stack_frame(s_new_frame) + @expose_primitive(SIGNAL, unwrap_spec=[object], clean_stack=False, no_result=True) def func(interp, s_frame, w_rcvr): # XXX we might want to disable this check From noreply at buildbot.pypy.org Thu Jan 9 19:19:53 2014 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 9 Jan 2014 19:19:53 +0100 (CET) Subject: [pypy-commit] lang-smalltalk rbitblt: automatically round fractions to ints directly in our bitblt Message-ID: <20140109181953.0BCFE1C0C34@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: rbitblt Changeset: r563:c22d170b585b Date: 2014-01-09 16:55 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/c22d170b585b/ Log: automatically round fractions to ints directly in our bitblt diff --git a/spyvm/plugins/bitblt.py b/spyvm/plugins/bitblt.py --- a/spyvm/plugins/bitblt.py +++ b/spyvm/plugins/bitblt.py @@ -37,6 +37,15 @@ return w_rcvr +def intOrIfNil(space, w_int, i): + if w_int is space.w_nil: + return i + elif isinstance(w_int, model.W_Float): + return intmask(int(space.unwrap_float(w_int))) + else: + return space.unwrap_int(w_int) + + class BitBltShadow(AbstractCachingShadow): WordSize = 32 MaskTable = [r_uint(0)] @@ -48,10 +57,7 @@ pass def intOrIfNil(self, w_int, i): - if w_int is self.space.w_nil: - return i - else: - return self.space.unwrap_int(w_int) + return intOrIfNil(self.space, w_int, i) def loadForm(self, w_form): if not isinstance(w_form, model.W_PointersObject): @@ -716,6 +722,9 @@ AbstractCachingShadow.__init__(self, space, w_self) self.invalid = False + def intOrIfNil(self, w_int, i): + return intOrIfNil(self.space, w_int, i) + def sync_cache(self): self.invalid = True if self.size() < 5: @@ -725,9 +734,9 @@ return if not (isinstance(self.w_bits, model.W_WordsObject) or isinstance(self.w_bits, model.W_DisplayBitmap)): return - self.width = self.space.unwrap_int(self.fetch(1)) - self.height = self.space.unwrap_int(self.fetch(2)) - self.depth = self.space.unwrap_int(self.fetch(3)) + self.width = self.intOrIfNil(self.fetch(1), 0) + self.height = self.intOrIfNil(self.fetch(2), 0) + self.depth = self.intOrIfNil(self.fetch(3), 0) if self.width < 0 or self.height < 0: return self.msb = self.depth > 0 @@ -738,8 +747,8 @@ w_offset = self.fetch(4) assert isinstance(w_offset, model.W_PointersObject) if not w_offset is self.space.w_nil: - self.offsetX = self.space.unwrap_int(w_offset._fetch(0)) - self.offsetY = self.space.unwrap_int(w_offset._fetch(1)) + self.offsetX = self.intOrIfNil(w_offset._fetch(0), 0) + self.offsetY = self.intOrIfNil(w_offset._fetch(1), 0) self.pixPerWord = 32 / self.depth self.pitch = (self.width + (self.pixPerWord - 1)) / self.pixPerWord | 0 if self.w_bits.size() != (self.pitch * self.height): From noreply at buildbot.pypy.org Thu Jan 9 19:19:54 2014 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 9 Jan 2014 19:19:54 +0100 (CET) Subject: [pypy-commit] lang-smalltalk rbitblt: fix drawing of miniimage Message-ID: <20140109181954.0D45B1C0C34@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: rbitblt Changeset: r564:55bea8ee8fbb Date: 2014-01-09 17:33 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/55bea8ee8fbb/ Log: fix drawing of miniimage diff --git a/spyvm/plugins/bitblt.py b/spyvm/plugins/bitblt.py --- a/spyvm/plugins/bitblt.py +++ b/spyvm/plugins/bitblt.py @@ -751,6 +751,6 @@ self.offsetY = self.intOrIfNil(w_offset._fetch(1), 0) self.pixPerWord = 32 / self.depth self.pitch = (self.width + (self.pixPerWord - 1)) / self.pixPerWord | 0 - if self.w_bits.size() != (self.pitch * self.height): + if self.w_bits.size() < (self.pitch * self.height): return self.invalid = False From noreply at buildbot.pypy.org Thu Jan 9 19:19:55 2014 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 9 Jan 2014 19:19:55 +0100 (CET) Subject: [pypy-commit] lang-smalltalk rbitblt: should fix the sources not found issue i had Message-ID: <20140109181955.0DF461C0C34@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: rbitblt Changeset: r565:b8c725eb9faf Date: 2014-01-09 18:23 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/b8c725eb9faf/ Log: should fix the sources not found issue i had diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -191,7 +191,7 @@ if path is None: path = "Squeak.image" - path = os.path.join(os.getcwd(), path) + path = os.path.abspath(path) try: f = open_file_as_stream(path, mode="rb", buffering=0) except OSError as e: From noreply at buildbot.pypy.org Thu Jan 9 19:19:56 2014 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 9 Jan 2014 19:19:56 +0100 (CET) Subject: [pypy-commit] lang-smalltalk rbitblt: fix translation with jit, add some crazy jit hints in bitblt (needs work) Message-ID: <20140109181956.174CE1C0C34@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: rbitblt Changeset: r566:08464f06251d Date: 2014-01-09 18:53 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/08464f06251d/ Log: fix translation with jit, add some crazy jit hints in bitblt (needs work) diff --git a/spyvm/plugins/bitblt.py b/spyvm/plugins/bitblt.py --- a/spyvm/plugins/bitblt.py +++ b/spyvm/plugins/bitblt.py @@ -3,7 +3,7 @@ from spyvm.shadow import AbstractCachingShadow from spyvm.plugins.plugin import Plugin -from rpython.rlib import jit +from rpython.rlib import jit, objectmodel from rpython.rlib.rarithmetic import r_uint, intmask @@ -61,10 +61,10 @@ def loadForm(self, w_form): if not isinstance(w_form, model.W_PointersObject): - raise PrimitiveFailedError("cannot load form from %s" % w_form.as_repr_string()) + raise PrimitiveFailedError("cannot load form") s_form = w_form.as_special_get_shadow(self.space, FormShadow) if s_form.invalid: - raise PrimitiveFailedError("Could not create form shadow for %s" % w_form.as_repr_string()) + raise PrimitiveFailedError("Could not create form shadow") return s_form def loadHalftone(self, w_halftone_form): @@ -253,6 +253,7 @@ self.destIndex = (self.dy * self.dest.pitch) + (self.dx / self.dest.pixPerWord | 0) self.destDelta = (self.dest.pitch * self.vDir) - (self.nWords * self.hDir) + @jit.unroll_safe def copyLoopNoSource(self): halftoneWord = BitBltShadow.AllOnes for i in range(self.bbH): @@ -287,6 +288,7 @@ self.destIndex += 1 self.destIndex += self.destDelta + @jit.unroll_safe def copyLoopPixMap(self): # This version of the inner loop maps source pixels # to a destination form with different depth. Because it is already @@ -362,6 +364,7 @@ self.sourceIndex += self.sourceDelta self.destIndex += self.destDelta + @jit.unroll_safe def pickSourcePixels(self, nPixels, srcMask, dstMask, srcShiftInc, dstShiftInc): # Pick nPix pixels starting at srcBitIndex from the source, map by the # color map, and justify them according to dstBitIndex in the resulting destWord. @@ -403,6 +406,7 @@ rotated = rotated | (thisWord & skewMask) << self.skew return rotated + @jit.unroll_safe def copyLoop(self): # self version of the inner loop assumes we do have a source sourceLimit = self.source.w_bits.size() @@ -511,56 +515,58 @@ def mergeFn(self, src, dest): return r_uint(self.merge( r_uint(src), - r_uint(dest) + r_uint(dest), + self.combinationRule )) - def merge(self, source_word, dest_word): + @objectmodel.specialize.arg_or_var(3) + def merge(self, source_word, dest_word, combinationRule): assert isinstance(source_word, r_uint) and isinstance(dest_word, r_uint) - if self.combinationRule == 0: + if combinationRule == 0: return 0 - elif self.combinationRule == 1: + elif combinationRule == 1: return source_word & dest_word - elif self.combinationRule == 2: + elif combinationRule == 2: return source_word & ~dest_word - elif self.combinationRule == 3: + elif combinationRule == 3: return source_word - elif self.combinationRule == 4: + elif combinationRule == 4: return ~source_word & dest_word - elif self.combinationRule == 5: + elif combinationRule == 5: return dest_word - elif self.combinationRule == 6: + elif combinationRule == 6: return source_word ^ dest_word - elif self.combinationRule == 7: + elif combinationRule == 7: return source_word | dest_word - elif self.combinationRule == 8: + elif combinationRule == 8: return ~source_word & ~dest_word - elif self.combinationRule == 9: + elif combinationRule == 9: return ~source_word ^ dest_word - elif self.combinationRule == 10: + elif combinationRule == 10: return ~dest_word - elif self.combinationRule == 11: + elif combinationRule == 11: return source_word | ~dest_word - elif self.combinationRule == 12: + elif combinationRule == 12: return ~source_word - elif self.combinationRule == 13: + elif combinationRule == 13: return ~source_word | dest_word - elif self.combinationRule == 14: + elif combinationRule == 14: return ~source_word | ~dest_word - elif self.combinationRule >= 15 and self.combinationRule <= 17: + elif combinationRule >= 15 and combinationRule <= 17: return dest_word - elif self.combinationRule == 18: + elif combinationRule == 18: return source_word + dest_word - elif self.combinationRule == 19: + elif combinationRule == 19: return source_word - dest_word - elif self.combinationRule == 20: + elif combinationRule == 20: return self.rgbAdd(source_word, dest_word) - elif self.combinationRule == 21: + elif combinationRule == 21: return self.rgbSub(source_word, dest_word) - elif 22 <= self.combinationRule <= 23: - raise PrimitiveFailedError("Tried old rule %d" % self.combinationRule) - elif self.combinationRule == 24: + elif 22 <= combinationRule <= 23: + raise PrimitiveFailedError("Tried old rule %d" % combinationRule) + elif combinationRule == 24: return self.alphaBlendWith(source_word, dest_word) - elif self.combinationRule == 25: + elif combinationRule == 25: if source_word == 0: return dest_word else: @@ -570,17 +576,17 @@ self.dest.depth, self.dest.pixPerWord )) - elif self.combinationRule == 26: + elif combinationRule == 26: return self.partitionedANDtonBitsnPartitions( ~source_word, dest_word, self.dest.depth, self.dest.pixPerWord ) - elif self.combinationRule == 37: + elif combinationRule == 37: return self.alphaBlendScaled(source_word, dest_word) else: - raise PrimitiveFailedError("Not implemented combinationRule %d" % self.combinationRule) + raise PrimitiveFailedError("Not implemented combinationRule %d" % combinationRule) def alphaBlendComponent(self, sourceWord, destinationWord, shift, alpha): unAlpha = 255 - alpha @@ -666,6 +672,7 @@ source_word, dest_word, 8, 4 ) + @jit.unroll_safe def partitionedAddTonBitsnPartitions(self, word1, word2, nBits, nParts): # partition mask starts at the right mask = BitBltShadow.MaskTable[nBits] @@ -681,6 +688,7 @@ mask = mask << nBits # slide left to next partition return result + @jit.unroll_safe def partitionedSubTonBitsnPartitions(self, word1, word2, nBits, nParts): # partition mask starts at the right mask = BitBltShadow.MaskTable[nBits] @@ -695,6 +703,7 @@ mask = mask << nBits # slide left to next partition" return result + @jit.unroll_safe def partitionedANDtonBitsnPartitions(self, word1, word2, nBits, nParts): # partition mask starts at the right mask = BitBltShadow.MaskTable[nBits] diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -1352,8 +1352,6 @@ if code: raise PrimitiveFailedError("withArgs:executeMethod: not support with primitive method") s_new_frame = s_method.create_frame(interp.space, w_rcvr, args_w, s_frame) - if interp.trace: - print interp.padding() + s_new_frame.short_str() return interp.stack_frame(s_new_frame) @expose_primitive(SIGNAL, unwrap_spec=[object], clean_stack=False, no_result=True) diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -191,7 +191,7 @@ if path is None: path = "Squeak.image" - path = os.path.abspath(path) + path = os.path.join(os.getcwd(), path).replace("/", "\\") try: f = open_file_as_stream(path, mode="rb", buffering=0) except OSError as e: From noreply at buildbot.pypy.org Thu Jan 9 19:20:10 2014 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 9 Jan 2014 19:20:10 +0100 (CET) Subject: [pypy-commit] lang-smalltalk rbitblt: update Squeak 4 image, this works without balloon Message-ID: <20140109182010.578AC1C0C34@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: rbitblt Changeset: r567:9733ab95cbbe Date: 2014-01-09 19:18 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/9733ab95cbbe/ Log: update Squeak 4 image, this works without balloon diff too long, truncating to 2000 out of 193508 lines diff --git a/images/Squeak4.5-12568.changes b/images/Squeak4.5-12568.changes --- a/images/Squeak4.5-12568.changes +++ b/images/Squeak4.5-12568.changes @@ -36,4 +36,4 @@ Workspace allInstances do: [:w | w topView delete]. ReleaseBuilderFor4dot4 prepareNewBuild. Smalltalk snapshot: true andQuit: true. -! ----End fileIn of a stream----! ----SNAPSHOT----{31 March 2013 . 3:27:34 pm} Squeak4.5-12327.image priorSource: 7430688! !Installer methodsFor: 'squeakmap' stamp: 'fbs 1/28/2013 19:25' prior: 57597950! packageAndVersionFrom: pkg | p | p := ReadStream on: pkg . ^{(p upTo: $(). p upTo: $)} collect: [:s | s withBlanksTrimmed].! ! "Installer-Core"! !Categorizer methodsFor: 'fileIn/Out' stamp: 'cwp 6/20/2012 16:58'! scanFrom: aStream environment: anEnvironment ^ self scanFrom: aStream! ! !ClassCategoryReader methodsFor: 'fileIn/Out' stamp: 'cwp 6/20/2012 17:21'! scanFrom: aStream environment: anEnvironment "File in methods from the stream, aStream." | methodText | [methodText := aStream nextChunkText. methodText size > 0] whileTrue: [class compile: methodText environment: anEnvironment classified: category withStamp: changeStamp notifying: nil]! ! !ClassCommentReader methodsFor: 'as yet unclassified' stamp: 'cwp 6/20/2012 17:22'! scanFrom: aStream environment: anEnvironment ^ self scanFrom: aStream! ! !Metaclass methodsFor: 'compiling' stamp: 'cwp 6/20/2012 17:29'! bindingOf: varName environment: anEnvironment ^ thisClass classBindingOf: varName environment: anEnvironment! ! !LargePositiveInteger methodsFor: 'arithmetic' stamp: 'nice 12/30/2012 20:03' prior: 22505876! \\ aNumber "Primitive. Take the receiver modulo the argument. The result is the remainder rounded towards negative infinity, of the receiver divided by the argument. Fail if the argument is 0. Fail if either the argument or the result is not a SmallInteger or a LargePositiveInteger less than 2-to-the-30th (1073741824). Optional. See Object documentation whatIsAPrimitive." aNumber isInteger ifTrue: [| neg qr q r | neg := self negative == aNumber negative == false. qr := (self digitDiv: (aNumber class == SmallInteger ifTrue: [aNumber abs] ifFalse: [aNumber]) neg: neg). q := qr first normalize. r := qr last normalize. ^(q negative ifTrue: [r isZero not] ifFalse: [q isZero and: [neg]]) ifTrue: [r + aNumber] ifFalse: [r]]. ^super \\ aNumber ! ! !LargePositiveInteger methodsFor: 'converting' stamp: 'nice 1/27/2012 22:41' prior: 37616324! asFloat "Answer a Float that best approximates the value of the receiver. This algorithm is optimized to process only the significant digits of a LargeInteger. And it does honour IEEE 754 round to nearest even mode in case of excess precision (see details below)." "How numbers are rounded in IEEE 754 default rounding mode: A shift is applied so that the highest 53 bits are placed before the floating point to form a mantissa. The trailing bits form the fraction part placed after the floating point. This fractional number must be rounded to the nearest integer. If fraction part is 2r0.1, exactly between two consecutive integers, there is a tie. The nearest even integer is chosen in this case. Examples (First 52bits of mantissa are omitted for brevity): 2r0.00001 is rounded downward to 2r0 2r1.00001 is rounded downward to 2r1 2r0.1 is a tie and rounded to 2r0 (nearest even) 2r1.1 is a tie and rounded to 2r10 (nearest even) 2r0.10001 is rounded upward to 2r1 2r1.10001 is rounded upward to 2r10 Thus, if the next bit after floating point is 0, the mantissa is left unchanged. If next bit after floating point is 1, an odd mantissa is always rounded upper. An even mantissa is rounded upper only if the fraction part is not a tie." "Algorihm details: The floating point hardware can perform the rounding correctly with several excess bits as long as there is a single inexact operation. This can be obtained by splitting the mantissa plus excess bits in two part with less bits than Float precision. Note 1: the inexact flag in floating point hardware must not be trusted because in some cases the operations would be exact but would not take into account some bits that were truncated before the Floating point operations. Note 2: the floating point hardware is presumed configured in default rounding mode." | mantissa shift excess result n | "Check how many bits excess the maximum precision of a Float mantissa." excess := self highBitOfMagnitude - Float precision. excess > 7 ifTrue: ["Remove the excess bits but seven." mantissa := self bitShiftMagnitude: 7 - excess. shift := excess - 7. "An even mantissa with a single excess bit immediately following would be truncated. But this would not be correct if above shift has truncated some extra bits. Check this case, and round excess bits upper manually." ((mantissa digitAt: 1) = 2r01000000 and: [self anyBitOfMagnitudeFrom: 1 to: shift]) ifTrue: [mantissa := mantissa + 1]] ifFalse: [mantissa := self. shift := 0]. "There will be a single inexact round off at last iteration" result := (mantissa digitAt: (n := mantissa digitLength)) asFloat. [(n := n - 1) > 0] whileTrue: [ result := 256.0 * result + (mantissa digitAt: n) asFloat]. ^result timesTwoPower: shift.! ! !LargePositiveInteger methodsFor: 'private' stamp: 'nice 12/30/2012 14:25'! primitiveQuo: anInteger "Primitive. Divide the receiver by the argument and return the result. Round the result down towards zero to make it a whole integer. Fail if the argument is 0. Fail if either the argument or the result is not a SmallInteger or a LargePositiveInteger less than 2-to-the-30th (1073741824). Optional. See Object documentation whatIsAPrimitive." ^nil! ! !LargePositiveInteger methodsFor: 'arithmetic' stamp: 'nice 12/30/2012 14:34'! rem: aNumber "Remainder defined in terms of quo:. See super rem:. This is defined only to speed up case of very large integers." (self primitiveQuo: aNumber) ifNotNil: [:quo | ^self - (quo * aNumber)]. aNumber isInteger ifTrue: [| ng rem | ng := self negative == aNumber negative == false. rem := (self digitDiv: (aNumber class == SmallInteger ifTrue: [aNumber abs] ifFalse: [aNumber]) neg: ng) at: 2. ^ rem normalize]. ^super rem: aNumber! ! !LargeNegativeInteger methodsFor: 'converting' stamp: 'nice 1/1/2013 15:42' prior: 37616204! asFloat ^super asFloat negated! ! !UndefinedObject methodsFor: 'class hierarchy' stamp: 'cwp 6/22/2012 15:39'! literalScannedAs: scannedLiteral environment: anEnvironment notifying: requestor ^ scannedLiteral! ! !Behavior methodsFor: 'testing method dictionary' stamp: 'cwp 6/20/2012 17:32'! bindingOf: varName environment: anEnvironment ^superclass bindingOf: varName environment: anEnvironment! ! !Behavior methodsFor: 'testing method dictionary' stamp: 'cwp 6/20/2012 17:30'! classBindingOf: varName environment: anEnvironment ^self bindingOf: varName environment: anEnvironment! ! !Behavior methodsFor: 'printing' stamp: 'cwp 6/22/2012 15:37'! literalScannedAs: scannedLiteral environment: anEnvironment notifying: requestor "Postprocesses a literal scanned by Scanner scanToken (esp. xLitQuote). If scannedLiteral is not an association, answer it. Else, if it is of the form: nil->#NameOfMetaclass answer nil->theMetaclass, if any has that name, else report an error. Else, if it is of the form: #NameOfGlobalVariable->anythiEng answer the global, class, or pool association with that nameE, if any, else add it to Undeclared a answer the new Association." | key value | (scannedLiteral isVariableBinding) ifFalse: [^ scannedLiteral]. key := scannedLiteral key. value := scannedLiteral value. key ifNil: "###" [(self bindingOf: value environment: anEnvironment) ifNotNil: [:assoc| (assoc value isKindOf: Behavior) ifTrue: [^ nil->assoc value class]]. requestor notify: 'No such metaclass'. ^false]. (key isSymbol) ifTrue: "##" [(self bindingOf: key environment: anEnvironment) ifNotNil: [:assoc | ^assoc]. ^ anEnvironment undeclared: key]. requestor notify: '## must be followed by a non-local variable name'. ^false " Form literalScannedAs: 14 notifying: nil 14 Form literalScannedAs: #OneBitForm notiEfying: nil OneBitForm Form literalScannedAs: ##OneBitForm notifying: nil OneBitForm->a Form Form literalScannedAs: ##Form notifying: nil Form->Form Form literalScannedAs: ###Form notifying: nil nilE->Form class "! ! !Fraction methodsFor: 'converting' stamp: 'nice 11/21/2011 22:34' prior: 37619655! asFloat "Answer a Float that closely approximates the value of the receiver. This implementation will answer the closest floating point number to the receiver. In case of a tie, it will use the IEEE 754 round to nearest even mode. In case of overflow, it will answer +/- Float infinity." | a b mantissa exponent hasTruncatedBits lostBit n ha hb hm | a := numerator abs. b := denominator. "denominator is always positive" ha := a highBitOfMagnitude. hb := b highBitOfMagnitude. "Number of bits to keep in mantissa plus one to handle rounding." n := 1 + Float precision. "If both numerator and denominator are represented exactly in floating point number, then fastest thing to do is to use hardwired float division." (ha < n and: [hb < n]) ifTrue: [^numerator asFloat / denominator asFloat]. "Shift the fraction by a power of two exponent so as to obtain a mantissa with n bits. First guess is rough, the mantissa might have n+1 bits." exponent := ha - hb - n. exponent >= 0 ifTrue: [b := b bitShift: exponent] ifFalse: [a := a bitShift: exponent negated]. mantissa := a quo: b. hasTruncatedBits := a > (mantissa * b). hm := mantissa highBit. "Check for gradual underflow, in which case the mantissa will loose bits. Keep at least one bit to let underflow preserve the sign of zero." lostBit := Float emin - (exponent + hm - 1). lostBit > 0 ifTrue: [n := n - lostBit max: 1]. "Remove excess bits in the mantissa." hm > n ifTrue: [exponent := exponent + hm - n. hasTruncatedBits := hasTruncatedBits or: [mantissa anyBitOfMagnitudeFrom: 1 to: hm - n]. mantissa := mantissa bitShift: n - hm]. "Check if mantissa must be rounded upward. The case of tie (mantissa odd & hasTruncatedBits not) will be handled by Integer>>asFloat." (hasTruncatedBits and: [mantissa odd]) ifTrue: [mantissa := mantissa + 1]. ^ (self positive ifTrue: [mantissa asFloat] ifFalse: [mantissa asFloat negated]) timesTwoPower: exponent! ! !Float methodsFor: 'arithmetic' stamp: 'nice 12/20/2012 23:16' prior: 20878776! negated "Answer a Number that is the negation of the receiver. Implementation note: this version cares of negativeZero." ^-1.0 * self! ! !ClassDescription methodsFor: 'compiling' stamp: 'cwp 6/20/2012 17:21'! compile: text environment: anEnvironment classified: category withStamp: changeStamp notifying: requestor ^ self compile: text environment: anEnvironment classified: category withStamp: changeStamp notifying: requestor logSource: self acceptsLoggingOfCompilation! ! !ClassDescription methodsFor: 'compiling' stamp: 'cwp 12/27/2012 13:17'! compile: text environment: anEnvironment classified: category withStamp: changeStamp notifying: requestor logSource: logSource | methodAndNode context methodNode | context := CompilationCue source: text class: self environment: anEnvironment category: category requestor: requestor. methodNode := self newCompiler compile: context ifFail: [^ nil]. methodAndNode := CompiledMethodWithNode generateMethodFromNode: methodNode trailer: self defaultMethodTrailer. logSource ifTrue: [ self logMethodSource: text forMethodWithNode: methodAndNode inCategory: category withStamp: changeStamp notifying: requestor. ]. self addAndClassifySelector: methodAndNode selector withMethod: methodAndNode method inProtocol: category notifying: requestor. self instanceSide noteCompilationOf: methodAndNode selector meta: self isClassSide. ^ methodAndNode selector! ! !Class methodsFor: 'compiling' stamp: 'cwp 6/20/2012 09:47'! bindingOf: varName environment: anEnvironment "Answer the binding of some variable resolved in the scope of the receiver" | aSymbol binding | aSymbol := varName asSymbol. "First look in classVar dictionary." binding := self classPool bindingOf: aSymbol. binding ifNotNil:[^binding]. "Next look in shared pools." self sharedPools do:[:pool | binding := pool bindingOf: aSymbol. binding ifNotNil:[^binding]. ]. "Next look in declared environment." binding := anEnvironment bindingOf: aSymbol. binding ifNotNil:[^binding]. "Finally look higher up the superclass chain and fail at the end." superclass == nil ifTrue: [^ nil] ifFalse: [^ superclass bindingOf: aSymbol]. ! ! "Kernel"! ParseNode subclass: #Encoder instanceVariableNames: 'scopeTable nTemps supered requestor class selector literalStream selectorSet litIndSet litSet sourceRanges globalSourceRanges addedSelectorAndMethodClassLiterals optimizedSelectors cue' classVariableNames: '' poolDictionaries: '' category: 'Compiler-Kernel'! !Encoder commentStamp: 'cwp 12/26/2012 23:29' prior: 36323851! I encode names and literals into tree nodes with byte codes for the compiler. Byte codes for literals are not assigned until the tree-sizing pass of the compiler, because only then is it known which literals are actually needed. I also keep track of sourceCode ranges during parsing and code generation so I can provide an inverse map for the debugger.! Scanner subclass: #Parser instanceVariableNames: 'here hereType hereMark hereEnd prevMark prevEnd encoder requestor parseNode failBlock requestorOffset tempsMark doitFlag properties category queriedUnusedTemporaries cue' classVariableNames: '' poolDictionaries: '' category: 'Compiler-Kernel'! !Parser commentStamp: 'cwp 12/26/2012 23:34' prior: 38557958! I parse Smalltalk syntax and create a MethodNode that is the root of the parse tree. I look one token ahead.! Object subclass: #CompilationCue instanceVariableNames: 'source context receiver class environment category requestor' classVariableNames: '' poolDictionaries: '' category: 'Compiler-Kernel'! Object subclass: #Compiler instanceVariableNames: 'sourceStream requestor class category context parser cue' classVariableNames: '' poolDictionaries: '' category: 'Compiler-Kernel'! !Compiler commentStamp: 'cwp 12/26/2012 23:17' prior: 59257505! The compiler accepts Smalltalk source code and compiles it with respect to a given class. The user of the compiler supplies a context so that temporary variables are accessible during compilation. If there is an error, a requestor (usually a kind of StringHolderController) is sent the message notify:at:in: so that the error message can be displayed. If there is no error, then the result of compilation is a MethodNode, which is the root of a parse tree whose nodes are kinds of ParseNodes. The parse tree can be sent messages to (1) generate code for a CompiledMethod (this is done for compiling methods or evaluating expressions); (2) pretty-print the code (for formatting); or (3) produce a map from object code back to source code (used by debugger program-counter selection). See also Parser, Encoder, ParseNode.! !Encoder methodsFor: 'initialize-release' stamp: 'cwp 12/26/2012 23:34'! init: aCue notifying: anObject "The use of the variable requestor is a bit confusing here. This is *not* the original requestor, which is available through the cue. It's the Parser instance that is using the encoder." self setCue: aCue. requestor := anObject. nTemps := 0. supered := false. self initScopeAndLiteralTables. cue getClass variablesAndOffsetsDo: [:variable "" :offset "" | offset isNil ifTrue: [scopeTable at: variable name put: (FieldNode new fieldDefinition: variable)] ifFalse: [scopeTable at: variable put: (offset >= 0 ifTrue: [InstanceVariableNode new name: variable index: offset] ifFalse: [MaybeContextInstanceVariableNode new name: variable index: offset negated])]]. cue context ~~ nil ifTrue: [| homeNode | homeNode := self bindTemp: self doItInContextName. "0th temp = aContext passed as arg" cue context tempNames withIndexDo: [:variable :index| scopeTable at: variable put: (MessageAsTempNode new receiver: homeNode selector: #namedTempAt: arguments: (Array with: (self encodeLiteral: index)) precedence: 3 from: self)]]. sourceRanges := Dictionary new: 32. globalSourceRanges := OrderedCollection new: 32 ! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/26/2012 23:30'! setCue: aCue cue := aCue. "Also set legacy instance variables for methods that don't use cue yet" class := cue getClass.! ! !Dictionary methodsFor: '*Compiler' stamp: 'cwp 6/22/2012 09:17'! bindingOf: varName ifAbsent: aBlock ^self associationAt: varName ifAbsent: aBlock! ! !Parser methodsFor: 'private' stamp: 'cwp 12/26/2012 23:37'! init: sourceStream cue: aCue failBlock: aBlock self setCue: aCue. failBlock := aBlock. requestorOffset := 0. super scan: sourceStream. prevMark := hereMark := mark. self advance ! ! !Parser methodsFor: 'public access' stamp: 'cwp 12/26/2012 23:41'! parse: sourceStream cue: aCue noPattern: noPattern ifFail: aBlock "Answer a MethodNode for the argument, sourceStream, that is the root of a parse tree. Parsing is done with respect to the CompilationCue to resolve variables. Errors in parsing are reported to the cue's requestor; otherwise aBlock is evaluated. The argument noPattern is a Boolean that is true if the the sourceStream does not contain a method header (i.e., for DoIts)." | methNode repeatNeeded myStream s p subSelection | myStream := sourceStream. [repeatNeeded := false. p := myStream position. s := myStream upToEnd. myStream position: p. subSelection := aCue requestor notNil and: [aCue requestor selectionInterval = (p + 1 to: p + s size)]. self encoder init: aCue notifying: self. self init: myStream cue: aCue failBlock: [^ aBlock value]. doitFlag := noPattern. failBlock:= aBlock. [methNode := self method: noPattern context: cue context] on: ReparseAfterSourceEditing do: [ :ex | repeatNeeded := true. myStream := subSelection ifTrue: [ReadStream on: cue requestor text string from: cue requestor selectionInterval first to: cue requestor selectionInterval last] ifFalse: [ReadStream on: cue requestor text string]]. repeatNeeded] whileTrue: [encoder := self encoder class new]. methNode sourceText: s. ^methNode ! ! !Parser methodsFor: 'private' stamp: 'cwp 12/26/2012 23:35'! setCue: aCue cue := aCue. "Also set legacy variables for methods that don't use cue yet." requestor := cue requestor. category := cue category.! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:53'! class: aClass ^ self context: nil class: aClass requestor: nil! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:53'! context: aContext class: aClass requestor: anObject ^ self source: nil context: aContext receiver: nil class: aClass environment: (aClass ifNotNil: [aClass environment]) category: nil requestor: anObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:16'! source: aTextOrStream class: aClass environment: anEnvironment category: aString requestor: anObject ^ self source: aTextOrStream context: nil receiver: nil class: aClass environment: anEnvironment category: aString requestor: anObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:53'! source: aTextOrStream context: aContext class: aClass category: aString requestor: anObject ^ self source: aTextOrStream context: aContext receiver: (aContext ifNotNil: [aContext receiver]) class: aClass environment: (aClass ifNotNil: [aClass environment]) category: aString requestor: anObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:54'! source: aTextOrStream context: aContext class: aClass requestor: anObject ^ self source: aTextOrStream context: aContext class: aClass category: nil requestor: anObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:55'! source: aTextOrStream context: aContext receiver: recObject class: aClass environment: anEnvironment category: aString requestor: reqObject ^ self basicNew initializeWithSource: aTextOrStream context: aContext receiver: recObject class: aClass environment: anEnvironment category: aString requestor: reqObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:16'! source: aString environment: anEnvironment ^ self source: aString context: nil receiver: nil class: UndefinedObject environment: anEnvironment category: nil requestor: nil! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:54'! source: aTextOrStream requestor: anObject ^ self source: aTextOrStream context: nil class: nil requestor: anObject! ! !CompilationCue methodsFor: 'binding' stamp: 'cwp 6/20/2012 09:39'! bindingOf: aSymbol ^ class bindingOf: aSymbol environment: environment! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:15'! category ^ category! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 12/26/2012 23:19'! context ^ context! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:15'! environment ^ environment! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:16'! getClass ^ class! ! !CompilationCue methodsFor: 'initialization' stamp: 'cwp 12/26/2012 23:16'! initializeWithSource: aTextOrString context: aContext receiver: recObject class: aClass environment: anEnvironment category: aString requestor: reqObject self initialize. source := aTextOrString isStream ifTrue: [aTextOrString contents] ifFalse: [aTextOrString]. context := aContext. receiver := recObject. class := aClass. environment := anEnvironment. category := aString. requestor := reqObject! ! !CompilationCue methodsFor: 'binding' stamp: 'cwp 6/22/2012 15:39'! literalScannedAs: anObject notifying: anEncoder ^ class literalScannedAs: anObject environment: environment notifying: anEncoder! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:15'! receiver ^ receiver! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:16'! requestor ^ requestor! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:15'! source ^ source! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:44'! sourceStream ^ source readStream! ! !Compiler class methodsFor: 'evaluating' stamp: 'cwp 6/20/2012 17:25'! evaluate: aString environment: anEnvironment ^ self evaluate: aString environment: anEnvironment logged: false! ! !Compiler class methodsFor: 'evaluating' stamp: 'cwp 12/27/2012 12:36'! evaluate: aString environment: anEnvironment logged: aBoolean | cue | cue := CompilationCue source: aString environment: anEnvironment. ^ self new evaluate: aString cue: cue ifFail: [^ nil] logged: aBoolean! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 13:18'! compile: aCue ifFail: failBlock "Answer a MethodNode. If the MethodNode can not be created, notify the requestor in the contxt. If the requestor is nil, evaluate failBlock instead. The MethodNode is the root of a parse tree. It can be told to generate a CompiledMethod to be installed in the method dictionary of the class specified by the context." self setCue: aCue. self source: cue source. ^self translate: sourceStream noPattern: false ifFail: failBlock! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 00:06'! evaluate: textOrStream cue: aCue ifFail: failBlock logged: logFlag "Compiles the sourceStream into a parse tree, then generates code into a method. Finally, the compiled method is invoked from here via withArgs:executeMethod:, hence the system no longer creates Doit method litter on errors." | methodNode method value toLog itsSelection itsSelectionString | self setCue: aCue. self source: textOrStream. methodNode := self translate: sourceStream noPattern: true ifFail: [^failBlock value]. method := self interactive ifTrue: [methodNode generateWithTempNames] ifFalse: [methodNode generate]. value := cue receiver withArgs: (cue context ifNil: [#()] ifNotNil: [{cue context}]) executeMethod: method. logFlag ifTrue: [toLog := ((cue requestor respondsTo: #selection) and:[(itsSelection := cue requestor selection) notNil and:[(itsSelectionString := itsSelection asString) isEmptyOrNil not]]) ifTrue:[itsSelectionString] ifFalse:[sourceStream contents]. SystemChangeNotifier uniqueInstance evaluated: toLog context: cue context]. ^ value ! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/26/2012 23:20'! setCue: aCue cue := aCue. "Set legacy instance variables for methods that don't use cue yet." requestor := cue requestor. class := cue getClass. category := cue category. context := cue context.! ! !Compiler methodsFor: 'private' stamp: 'cwp 6/19/2012 21:58'! source: textOrStream sourceStream := (textOrStream isKindOf: PositionableStream) ifTrue: [ textOrStream ] ifFalse: [ ReadStream on: textOrStream asString ]! ! "Compiler"! !SmartRefStream class methodsFor: 'i/o' stamp: 'cwp 6/20/2012 17:42'! scanFrom: aByteStream environment: anEnvironment ^ self scanFrom: aByteStream! ! !SmartRefStream methodsFor: 'read write' stamp: 'cwp 6/20/2012 17:41'! scanFrom: aByteStream environment: anEnvironment ^ self scanFrom: aByteStream! ! !ImageSegment methodsFor: 'fileIn/Out' stamp: 'cwp 6/20/2012 17:23'! scanFrom: aStream environment: anEnvironment ^ self scanFrom: aStream! ! !PseudoClass methodsFor: 'printing' stamp: 'cwp 6/22/2012 15:39'! literalScannedAs: scannedLiteral environment: anEnvironment notifying: requestor ^ scannedLiteral! ! !InternalTranslator methodsFor: 'fileIn/fileOut' stamp: 'cwp 6/20/2012 17:34'! scanFrom: aStream environment: anEnvironment "Read a definition of dictionary. Make sure current locale corresponds my locale id" | aString newTranslations assoc currentPlatform | newTranslations := Dictionary new. currentPlatform := Locale currentPlatform. [Locale currentPlatform: (Locale localeID: id). [aString := aStream nextChunk withSqueakLineEndings. aString size > 0] whileTrue: [assoc := Compiler evaluate: aString environment: anEnvironment. assoc value = '' ifTrue: [self class registerPhrase: assoc key] ifFalse: [newTranslations add: assoc]]] ensure: [Locale currentPlatform: currentPlatform]. self mergeTranslations: newTranslations! ! !NaturalLanguageTranslator methodsFor: 'fileIn/fileOut' stamp: 'cwp 6/20/2012 17:26'! scanFrom: aStream environment: anEnvironment "Read a definition of dictionary. Make sure current locale corresponds my locale id" | newTranslations currentPlatform | newTranslations := Dictionary new. currentPlatform := Locale currentPlatform. [| aString assoc | Locale currentPlatform: (Locale localeID: id). [aString := aStream nextChunk withSqueakLineEndings. aString size > 0] whileTrue: [assoc := Compiler evaluate: aString environment: anEnvironment. assoc value = '' ifTrue: [self class registerPhrase: assoc key] ifFalse: [newTranslations add: assoc]]] ensure: [Locale currentPlatform: currentPlatform]. self mergeTranslations: newTranslations! ! !ObjectScanner methodsFor: 'scanning' stamp: 'cwp 6/20/2012 17:39'! scanFrom: aByteStream environment: anEnvironment "This should probably be reimplemented using an environment for compilation. For now, don't change anything" ^ self scanFrom: aByteStream! ! !SystemDictionary methodsFor: 'accessing' stamp: 'cwp 6/22/2012 09:16'! bindingOf: varName ifAbsent: aBlock "SystemDictionary includes Symbols only" ^super bindingOf: varName asSymbol ifAbsent: aBlock! ! !SystemDictionary methodsFor: 'accessing' stamp: 'cwp 6/22/2012 15:48'! undeclared ^ self at: #Undeclared! ! "System"! !ExceptionTests methodsFor: 'testing-outer' stamp: 'fbs 1/1/2013 22:14' prior: 40840955! expectedFailures ^ #().! ! "Tests"! ReleaseBuilder subclass: #ReleaseBuilderFor4dot5 instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'ReleaseBuilder'! !ReleaseBuilderFor4dot5 commentStamp: 'fbs 1/1/2013 20:25' prior: 0! The release builder for Squeak 4.5! !ReleaseBuilder class methodsFor: 'scripts' stamp: 'fbs 12/31/2012 20:43'! transferCurrentPackagesAsUser: username password: password "Copy the packages currently loaded in the image from the trunk repository to my releaseRepository." | trunkRep releaseRep | trunkRep := self trunkRepository. releaseRep := self releaseRepository user: username; password: password; yourself. MCWorkingCopy allManagers do: [ : eachWorkingCopy | eachWorkingCopy ancestors do: [ : eachVersionInfo | (releaseRep includesVersionNamed: eachVersionInfo versionName) ifFalse: [ (trunkRep versionWithInfo: eachVersionInfo) ifNil: [ Warning signal: eachVersionInfo name , ' not found in ', trunkRep ] ifNotNilDo: [ : ver | releaseRep storeVersion: ver ] ] ] ]! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:23'! openWelcomeWorkspaces TheWorldMainDockingBar instance showWelcomeText: #squeakUserInterface label: 'Squeak User Interface' in: (40 @ 40 extent: 500 @ 300). TheWorldMainDockingBar instance showWelcomeText: #workingWithSqueak label: 'Working With Squeak' in: (80 @ 80 extent: 500 @ 300). TheWorldMainDockingBar instance showWelcomeText: #licenseInformation label: 'License Information' in: (120 @ 120 extent: 500 @ 300). TheWorldMainDockingBar instance showWelcomeText: #welcomeFutureDirections label: 'Future Directions' in: (160 @ 160 extent: 500 @ 300). TheWorldMainDockingBar instance showWelcomeText: #welcomeToSqueak label: 'Welcome to Squeak 4.5' in: (200 @ 200 extent: 500 @ 300)! ! !ReleaseBuilderFor4dot5 class methodsFor: 'scripts' stamp: 'fbs 1/1/2013 20:22'! prepareNewBuild super prepareNewBuild. MCMockPackageInfo initialize.! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:24'! releaseRepository "At release time, change 'trunk' to 'squeak45'." ^ MCHttpRepository location: 'http://source.squeak.org/trunk' user: 'squeak' password: 'squeak'! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:22'! setDisplayExtent: extent "Uncomment next line when the primitives become available in the Squeak VM." " DisplayScreen hostWindowSize: extent." Display extent = extent ifFalse: [ Warning signal: 'Display extent not set to ', extent ]! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:23'! setPreferences Preferences installBrightWindowColors ; setPreference: #scrollBarsWithoutMenuButton toValue: true ; setPreference: #swapMouseButtons toValue: true ; setPreference: #annotationPanes toValue: true ; setPreference: #showSplitterHandles toValue: false ; setPreference: #showBoundsInHalo toValue: true ; setPreference: #alternateHandlesLook toValue: false ; setPreference: #roundedMenuCorners toValue: false ; setPreference: #roundedWindowCorners toValue: false. PluggableButtonMorph roundedButtonCorners: false. FillInTheBlankMorph roundedDialogCorners: false. Workspace shouldStyle: false. NetNameResolver enableIPv6: true.! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:23'! switchToNewRepository | old44Repository | MCMcmUpdater defaultUpdateURL: self releaseRepository description. old44Repository := MCRepositoryGroup default repositories detect: [:each | each description includesSubString: 'squeak44'] ifNone: [nil]. old44Repository ifNotNil: [MCRepositoryGroup default removeRepository: old44Repository]. MCRepositoryGroup default addRepository: self releaseRepository! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:23'! versionString ^ 'Squeak4.5'.! ! ReleaseBuilder class removeSelector: #transferCurrentPackages! "ReleaseBuilder"! !Environment class methodsFor: 'as yet unclassified' stamp: 'cwp 1/1/2013 18:52' prior: 40834114! initialize self install! ! "Environments"! !Parser methodsFor: 'private' stamp: 'cwp 12/26/2012 23:59' prior: 52081878! initPattern: aString notifying: req return: aBlock | result | self init: (ReadStream on: aString asString) cue: (CompilationCue source: aString requestor: req) failBlock: [^nil]. encoder := self. result := aBlock value: (self pattern: false inContext: nil). encoder := failBlock := nil. "break cycles" ^result! ! !Parser methodsFor: 'public access' stamp: 'cwp 12/27/2012 00:01' prior: 34175471! parse: sourceStream class: class category: aCategory noPattern: noPattern context: aContext notifying: req ifFail: aBlock | c | c := CompilationCue source: sourceStream context: aContext class: class category: aCategory requestor: req. ^ self parse: sourceStream cue: c noPattern: noPattern ifFail: aBlock! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 09:11' prior: 34183963! evaluate: textOrStream in: aContext to: receiver notifying: aRequestor ifFail: failBlock logged: logFlag "Compiles the sourceStream into a parse tree, then generates code into a method. If aContext is not nil, the text can refer to temporaries in that context (the Debugger uses this). If aRequestor is not nil, then it will receive a notify:at: message before the attempt to evaluate is aborted. Finally, the compiled method is invoked from here via withArgs:executeMethod:, hence the system no longer creates Doit method litter on errors." | theClass | theClass := ((aContext == nil ifTrue: [receiver] ifFalse: [aContext receiver]) class). self setCue: (CompilationCue source: textOrStream context: aContext receiver: receiver class: theClass environment: theClass environment category: nil requestor: aRequestor). ^ self evaluate: textOrStream cue: cue ifFail: failBlock logged: logFlag! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 09:17' prior: 34185488! from: textOrStream class: aClass classified: aCategory context: aContext notifying: req self source: textOrStream. self setCue: (CompilationCue source: textOrStream context: aContext class: aClass category: aCategory requestor: req)! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/26/2012 23:55' prior: 50781309! from: textOrStream class: aClass context: aContext notifying: req self source: textOrStream. self setCue: (CompilationCue source: textOrStream context: aContext class: aClass requestor: req) ! ! !Encoder methodsFor: 'initialize-release' stamp: 'cwp 12/27/2012 09:41' prior: 50996506! init: aClass context: aContext notifying: anObject | c | c := CompilationCue context: aContext class: aClass requestor: nil. self init: c notifying: anObject! ! !Encoder methodsFor: 'initialize-release' stamp: 'cwp 12/26/2012 23:58' prior: 39061698! temps: tempVars literals: lits class: cl "Initialize this encoder for decompilation." self setCue: (CompilationCue class: cl). supered := false. nTemps := tempVars size. tempVars do: [:node | scopeTable at: node name put: node]. literalStream := WriteStream on: (Array new: lits size). literalStream nextPutAll: lits. sourceRanges := Dictionary new: 32. globalSourceRanges := OrderedCollection new: 32.! ! "Compiler"! !Class methodsFor: 'class variables' stamp: 'cwp 6/22/2012 15:48' prior: 36026010! addClassVarName: aString "Add the argument, aString, as a class variable of the receiver. Signal an error if the first character of aString is not capitalized, or if it is already a variable named in the class." | symbol oldState | oldState := self copy. aString first canBeGlobalVarInitial ifFalse: [^self error: aString, ' class variable name should be capitalized; proceed to include anyway.']. symbol := aString asSymbol. self withAllSubclasses do: [:subclass | (self canFindWithoutEnvironment: symbol) ifTrue: [ (DuplicateVariableError new) superclass: superclass; "fake!!!!!!" variable: aString; signal: aString, ' is already defined']]. classPool == nil ifTrue: [classPool := Dictionary new]. (classPool includesKey: symbol) ifFalse: ["Pick up any refs in Undeclared" classPool declare: symbol from: environment undeclared. SystemChangeNotifier uniqueInstance classDefinitionChangedFrom: oldState to: self]! ! !Class methodsFor: 'compiling' stamp: 'cwp 6/20/2012 09:48' prior: 54782024! bindingOf: varName ^ self bindingOf: varName environment: self environment! ! !Class methodsFor: 'organization' stamp: 'cwp 6/25/2012 18:25' prior: 54785804! category "Answer the system organization category for the receiver. First check whether the category name stored in the ivar is still correct and only if this fails look it up (latter is much more expensive)" category ifNotNil: [ :symbol | ((self environment organization listAtCategoryNamed: symbol) includes: self name) ifTrue: [ ^symbol ] ]. category := self environment organization categoryOfElement: self name. ^category! ! !Class methodsFor: 'initialize-release' stamp: 'cwp 6/22/2012 15:49' prior: 36027730! declare: varString "Declare class variables common to all instances. Answer whether recompilation is advisable." | newVars conflicts | newVars := (Scanner new scanFieldNames: varString) collect: [:x | x asSymbol]. newVars do: [:var | var first canBeGlobalVarInitial ifFalse: [self error: var, ' class variable name should be capitalized; proceed to include anyway.']]. conflicts := false. classPool == nil ifFalse: [(classPool keys reject: [:x | newVars includes: x]) do: [:var | self removeClassVarName: var]]. (newVars reject: [:var | self classPool includesKey: var]) do: [:var | "adding" "check if new vars defined elsewhere" (self canFindWithoutEnvironment: var) ifTrue: [ (DuplicateVariableError new) superclass: superclass; "fake!!!!!!" variable: var; signal: var, ' is already defined'. conflicts := true]]. newVars size > 0 ifTrue: [classPool := self classPool. "in case it was nil" newVars do: [:var | classPool declare: var from: environment undeclared]]. ^conflicts! ! !Class methodsFor: 'class variables' stamp: 'cwp 6/22/2012 15:49' prior: 54802475! removeClassVarName: aString "Remove the class variable whose name is the argument, aString, from the names defined in the receiver, a class. Create an error notification if aString is not a class variable or if it is still being used in the code of the class." | aSymbol | aSymbol := aString asSymbol. (classPool includesKey: aSymbol) ifFalse: [^self error: aString, ' is not a class variable']. self withAllSubclasses do:[:subclass | (Array with: subclass with: subclass class) do:[:classOrMeta | (classOrMeta whichSelectorsReferTo: (classPool associationAt: aSymbol)) isEmpty ifFalse: [ InMidstOfFileinNotification signal ifTrue: [ Transcript cr; show: self name, ' (' , aString , ' is Undeclared) '. ^ environment undeclared declare: aSymbol from: classPool]. (self confirm: (aString,' is still used in code of class ', classOrMeta name, '.\Is it okay to move it to Undeclared?') withCRs) ifTrue:[^Undeclared declare: aSymbol from: classPool] ifFalse:[^self]]]]. classPool removeKey: aSymbol. classPool isEmpty ifTrue: [classPool := nil]. ! ! !Class methodsFor: 'class name' stamp: 'cwp 6/22/2012 15:49' prior: 54796206! rename: aString "The new name of the receiver is the argument, aString." | oldName newName | (newName := aString asSymbol) = (oldName := self name) ifTrue: [^ self]. (self environment includesKey: newName) ifTrue: [^ self error: newName , ' already exists']. (environment undeclared includesKey: newName) ifTrue: [self inform: 'There are references to, ' , aString printString , ' from Undeclared. Check them after this change.']. name := newName. self environment renameClass: self from: oldName! ! !ClassBuilder methodsFor: 'class definition' stamp: 'cwp 6/22/2012 01:05' prior: 39054430! name: className inEnvironment: env subclassOf: newSuper type: type instanceVariableNames: instVarString classVariableNames: classVarString poolDictionaries: poolString category: category unsafe: unsafe "Define a new class in the given environment. If unsafe is true do not run any validation checks. This facility is provided to implement important system changes." | oldClass instVars classVars copyOfOldClass newClass | environ := env. instVars := Scanner new scanFieldNames: instVarString. classVars := (Scanner new scanFieldNames: classVarString) collect: [:x | x asSymbol]. "Validate the proposed name" unsafe ifFalse:[(self validateClassName: className) ifFalse:[^nil]]. oldClass := env at: className ifAbsent:[nil]. oldClass isBehavior ifFalse: [oldClass := nil] "Already checked in #validateClassName:" ifTrue: [ copyOfOldClass := oldClass copy. copyOfOldClass superclass addSubclass: copyOfOldClass]. [ | newCategory needNew force organization oldCategory | unsafe ifFalse:[ "Run validation checks so we know that we have a good chance for recompilation" (self validateSuperclass: newSuper forSubclass: oldClass) ifFalse:[^nil]. (self validateInstvars: instVars from: oldClass forSuper: newSuper) ifFalse:[^nil]. (self validateClassvars: classVars from: oldClass forSuper: newSuper) ifFalse:[^nil]. (self validateSubclassFormat: type from: oldClass forSuper: newSuper extra: instVars size) ifFalse:[^nil]]. "See if we need a new subclass" needNew := self needsSubclassOf: newSuper type: type instanceVariables: instVars from: oldClass. needNew == nil ifTrue:[^nil]. "some error" (needNew and:[unsafe not]) ifTrue:[ "Make sure we don't redefine any dangerous classes" (self tooDangerousClasses includes: oldClass name) ifTrue:[ self error: oldClass name, ' cannot be changed'. ]. "Check if the receiver should not be redefined" (oldClass ~~ nil and:[oldClass shouldNotBeRedefined]) ifTrue:[ self notify: oldClass name asText allBold, ' should not be redefined. \Proceed to store over it.' withCRs]]. needNew ifTrue:[ "Create the new class" newClass := self newSubclassOf: newSuper type: type instanceVariables: instVars from: oldClass. newClass == nil ifTrue:[^nil]. "Some error" newClass setName: className. newClass environment: environ. ] ifFalse:[ "Reuse the old class" newClass := oldClass. ]. "Install the class variables and pool dictionaries... " force := (newClass declare: classVarString) | (newClass sharing: poolString). "... classify ..." newCategory := category asSymbol. organization := environ ifNotNil:[environ organization]. oldClass isNil ifFalse: [oldCategory := (organization categoryOfElement: oldClass name) asSymbol]. organization classify: newClass name under: newCategory suppressIfDefault: true. "... recompile ..." newClass := self recompile: force from: oldClass to: newClass mutate: false. "... export if not yet done ..." (environ at: newClass name ifAbsent:[nil]) == newClass ifFalse:[ [environ at: newClass name put: newClass] on: AttemptToWriteReadOnlyGlobal do:[:ex| ex resume: true]. environ flushClassNameCache. ]. newClass doneCompiling. "... notify interested clients ..." oldClass isNil ifTrue: [ SystemChangeNotifier uniqueInstance classAdded: newClass inCategory: newCategory. ^ newClass]. newCategory ~= oldCategory ifTrue: [SystemChangeNotifier uniqueInstance class: newClass recategorizedFrom: oldCategory to: category] ifFalse: [SystemChangeNotifier uniqueInstance classDefinitionChangedFrom: copyOfOldClass to: newClass.]. ] ensure: [copyOfOldClass ifNotNil: [copyOfOldClass superclass removeSubclass: copyOfOldClass]. Behavior flushObsoleteSubclasses. ]. ^newClass! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 22:57' prior: 18572019! superclass: newSuper subclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class." | env | env := EnvironmentRequest signal ifNil: [newSuper environment]. ^self name: t inEnvironment: env subclassOf: newSuper type: newSuper typeOfClass instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 23:01' prior: 50629912! superclass: aClass variableByteSubclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class in which the subclass is to have indexable byte-sized nonpointer variables." | oldClassOrNil actualType env | (aClass instSize > 0) ifTrue: [^self error: 'cannot make a byte subclass of a class with named fields']. (aClass isVariable and: [aClass isWords]) ifTrue: [^self error: 'cannot make a byte subclass of a class with word fields']. (aClass isVariable and: [aClass isPointers]) ifTrue: [^self error: 'cannot make a byte subclass of a class with pointer fields']. oldClassOrNil := aClass environment at: t ifAbsent:[nil]. actualType := (oldClassOrNil notNil and: [oldClassOrNil typeOfClass == #compiledMethod]) ifTrue: [#compiledMethod] ifFalse: [#bytes]. env := EnvironmentRequest signal ifNil: [aClass environment]. ^self name: t inEnvironment: env subclassOf: aClass type: actualType instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 23:03' prior: 18573442! superclass: aClass variableSubclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class in which the subclass is to have indexable pointer variables." | env | aClass isBits ifTrue: [^self error: 'cannot make a pointer subclass of a class with non-pointer fields']. env := EnvironmentRequest signal ifNil: [aClass environment]. ^self name: t inEnvironment: env subclassOf: aClass type: #variable instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 23:04' prior: 18574098! superclass: aClass variableWordSubclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class in which the subclass is to have indexable word-sized nonpointer variables." | env | (aClass instSize > 0) ifTrue: [^self error: 'cannot make a word subclass of a class with named fields']. (aClass isVariable and: [aClass isBytes]) ifTrue: [^self error: 'cannot make a word subclass of a class with byte fields']. (aClass isVariable and: [aClass isPointers]) ifTrue: [^self error: 'cannot make a word subclass of a class with pointer fields']. env := EnvironmentRequest signal ifNil: [aClass environment]. ^self name: t inEnvironment: env subclassOf: aClass type: #words instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 23:04' prior: 18575028! superclass: aClass weakSubclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class (the receiver) in which the subclass is to have weak indexable pointer variables." | env | aClass isBits ifTrue: [^self error: 'cannot make a pointer subclass of a class with non-pointer fields']. env := EnvironmentRequest signal ifNil: [aClass environment]. ^self name: t inEnvironment: env subclassOf: aClass type: #weak instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! "Kernel"! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:21' prior: 59135029! ambiguousSelector: aString inRange: anInterval | correctedSelector userSelection offset intervalWithOffset | self interactive ifFalse: [ "In non interactive mode, compile with backward comapatibility: $- is part of literal argument" Transcript cr; store: encoder classEncoding; nextPutAll:#'>>';store: encoder selector; show: ' would send ' , token , '-'. ^super ambiguousSelector: aString inRange: anInterval]. "handle the text selection" userSelection := cue requestor selectionInterval. intervalWithOffset := anInterval first + requestorOffset to: anInterval last + requestorOffset. cue requestor selectFrom: intervalWithOffset first to: intervalWithOffset last. cue requestor select. "Build the menu with alternatives" correctedSelector := AmbiguousSelector signalName: aString inRange: intervalWithOffset. correctedSelector ifNil: [^self fail]. "Execute the selected action" offset := self substituteWord: correctedSelector wordInterval: intervalWithOffset offset: 0. cue requestor deselect. cue requestor selectInvisiblyFrom: userSelection first to: userSelection last + offset. token := (correctedSelector readStream upTo: Character space) asSymbol! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:21' prior: 38558136! collectTemporaryDeclarationsFrom: methodNode | tempsMarks str | tempsMarks := OrderedCollection new. str := cue requestor text asString. methodNode accept: (ParseNodeEnumerator ofBlock: [ :aNode | | mark | (aNode class canUnderstand: #tempsMark) ifTrue: [mark := aNode tempsMark. (mark notNil and: [ mark between: 1 and: str size ] and: [ (str at: mark) = $| ]) ifTrue: [ tempsMarks addLast: aNode ]]]). (tempsMark notNil and: [ tempsMark between: 1 and: str size ] and: [ (str at: tempsMark) = $| ]) ifTrue: [ tempsMarks addLast: self ]. ^ tempsMarks sorted: [ :a :b | a tempsMark > b tempsMark ]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:20' prior: 52096606! correctSelector: proposedKeyword wordIntervals: spots exprInterval: expInt ifAbort: abortAction "Correct the proposedKeyword to some selector symbol, correcting the original text if such action is indicated. abortAction is invoked if the proposedKeyword couldn't be converted into a valid selector. Spots is an ordered collection of intervals within the test stream of the for each of the keyword parts." | correctSelector userSelection | "If we can't ask the user, assume that the keyword will be defined later" self interactive ifFalse: [^proposedKeyword asSymbol]. userSelection := cue requestor selectionInterval. cue requestor selectFrom: spots first first to: spots last last. cue requestor select. correctSelector := UnknownSelector name: proposedKeyword. correctSelector ifNil: [^abortAction value]. cue requestor deselect. cue requestor selectInvisiblyFrom: userSelection first to: userSelection last. self substituteSelector: correctSelector keywords wordIntervals: spots. ^(proposedKeyword last ~~ $: and: [correctSelector last == $:]) ifTrue: [abortAction value] ifFalse: [correctSelector]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:20' prior: 33907242! correctVariable: proposedVariable interval: spot "Correct the proposedVariable to a known variable, or declare it as a new variable if such action is requested. We support declaring lowercase variables as temps or inst-vars, and uppercase variables as Globals or ClassVars, depending on whether the context is nil (class=UndefinedObject). Spot is the interval within the test stream of the variable. rr 3/4/2004 10:26 : adds the option to define a new class. " "Check if this is an i-var, that has been corrected already (ugly)" "Display the pop-up menu" | binding userSelection action | (encoder classEncoding instVarNames includes: proposedVariable) ifTrue: [^InstanceVariableNode new name: proposedVariable index: (encoder classEncoding allInstVarNames indexOf: proposedVariable)]. "If we can't ask the user for correction, make it undeclared" self interactive ifFalse: [^encoder undeclared: proposedVariable]. "First check to see if the requestor knows anything about the variable" (binding := cue requestor bindingOf: proposedVariable) ifNotNil: [^encoder global: binding name: proposedVariable]. userSelection := cue requestor selectionInterval. cue requestor selectFrom: spot first to: spot last. cue requestor select. "Build the menu with alternatives" action := UndeclaredVariable signalFor: self name: proposedVariable inRange: spot. action ifNil: [^self fail]. "Execute the selected action" cue requestor deselect. cue requestor selectInvisiblyFrom: userSelection first to: userSelection last. ^action value! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:19' prior: 34172921! declareUndeclaredTemps: methodNode "Declare any undeclared temps, declaring them at the smallest enclosing scope." | undeclared userSelection blocksToVars | (undeclared := encoder undeclaredTemps) isEmpty ifTrue: [^self]. userSelection := cue requestor selectionInterval. blocksToVars := IdentityDictionary new. undeclared do: [:var| (blocksToVars at: (var tag == #method ifTrue: [methodNode block] ifFalse: [methodNode accept: (VariableScopeFinder new ofVariable: var)]) ifAbsentPut: [SortedCollection new]) add: var name]. (blocksToVars removeKey: methodNode block ifAbsent: []) ifNotNil: [:rootVars| rootVars do: [:varName| self pasteTempAtMethodLevel: varName]]. (blocksToVars keys sorted: [:a :b| a tempsMark < b tempsMark]) do: [:block| | decl | decl := (blocksToVars at: block) reduce: [:a :b| a, ' ', b]. block temporaries isEmpty ifTrue: [self substituteWord: ' | ', decl, ' |' wordInterval: (block tempsMark + 1 to: block tempsMark) offset: requestorOffset] ifFalse: [self substituteWord: decl, ' ' wordInterval: (block tempsMark to: block tempsMark - 1) offset: requestorOffset]]. cue requestor selectInvisiblyFrom: userSelection first to: userSelection last + requestorOffset. ReparseAfterSourceEditing signal! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 11:45' prior: 37183770! defineClass: className "prompts the user to define a new class, asks for it's category, and lets the users edit further the definition" | sym cat def d2 | sym := className asSymbol. cat := UIManager default request: 'Enter class category : ' initialAnswer: self encoder classEncoding theNonMetaClass category. cat ifEmpty: [cat := 'Unknown']. def := 'Object subclass: #' , sym , ' instanceVariableNames: '''' classVariableNames: '''' poolDictionaries: '''' category: ''' , cat , ''''. d2 := UIManager default request: 'Edit class definition : ' initialAnswer: def. d2 ifEmpty: [d2 := def]. Compiler evaluate: d2. ^ encoder global: (cue environment bindingOf: sym) name: sym! ! !Parser methodsFor: 'primitives' stamp: 'cwp 12/27/2012 11:46' prior: 37184567! externalFunctionDeclaration "Parse the function declaration for a call to an external library." | descriptorClass callType modifier retType externalName args argType module fn | descriptorClass := cue environment valueOf: #ExternalFunction ifAbsent: [^ false]. callType := descriptorClass callingConventionFor: here. callType == nil ifTrue:[^false]. [modifier := descriptorClass callingConventionModifierFor: token. modifier notNil] whileTrue: [self advance. callType := callType bitOr: modifier]. "Parse return type" self advance. retType := self externalType: descriptorClass. retType == nil ifTrue:[^self expected:'return type']. "Parse function name or index" externalName := here. (self match: #string) ifTrue:[externalName := externalName asSymbol] ifFalse:[(self match:#number) ifFalse:[^self expected:'function name or index']]. (self matchToken: #'(') ifFalse:[^self expected:'argument list']. args := WriteStream on: Array new. [here == #')'] whileFalse:[ argType := self externalType: descriptorClass. argType == nil ifTrue:[^self expected:'argument']. argType isVoid & argType isPointerType not ifFalse:[args nextPut: argType]. ]. (self matchToken: #')') ifFalse:[^self expected:')']. (self matchToken: 'module:') ifTrue:[ module := here. (self match: #string) ifFalse:[^self expected: 'String']. module := module asSymbol]. Smalltalk at: #ExternalLibraryFunction ifPresent:[:xfn| fn := xfn name: externalName module: module callType: callType returnType: retType argumentTypes: args contents. self allocateLiteral: fn. ]. (self matchToken: 'error:') ifTrue: [| errorCodeVariable | errorCodeVariable := here. (hereType == #string or: [hereType == #word]) ifFalse:[^self expected: 'error code (a variable or string)']. self advance. self addPragma: (Pragma keyword: #primitive:error: arguments: (Array with: 120 with: errorCodeVariable)). fn ifNotNil: [fn setErrorCodeName: errorCodeVariable]] ifFalse: [self addPragma: (Pragma keyword: #primitive: arguments: #(120))]. ^true ! ! !Parser methodsFor: 'error handling' stamp: 'cwp 12/27/2012 10:19' prior: 58306169! interactive "Answer true if compilation is interactive" ^ cue requestor notNil! ! !Parser methodsFor: 'error handling' stamp: 'cwp 12/27/2012 10:22' prior: 58137223! notify: string at: location cue requestor isNil ifTrue: [(encoder == self or: [encoder isNil]) ifTrue: [^ self fail "failure setting up syntax error"]. SyntaxErrorNotification inClass: encoder classEncoding category: cue category withCode: (source contents asText copyReplaceFrom: location to: location - 1 with: ((string , ' ->') asText allBold addAttribute: TextColor red; yourself)) doitFlag: doitFlag errorMessage: string location: location] ifFalse: [cue requestor notify: string , ' ->' at: location in: source]. ^self fail! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:17' prior: 34177108! pasteTempAtMethodLevel: name | insertion delta theTextString characterBeforeMark | theTextString := cue requestor text string. characterBeforeMark := theTextString at: tempsMark-1 ifAbsent: [$ ]. (theTextString at: tempsMark) = $| ifTrue: [ "Paste it before the second vertical bar" insertion := name, ' '. characterBeforeMark isSeparator ifFalse: [ insertion := ' ', insertion]. delta := 0. ] ifFalse: [ "No bars - insert some with CR, tab" insertion := '| ' , name , ' |',String cr. delta := 2. "the bar and CR" characterBeforeMark = Character tab ifTrue: [ insertion := insertion , String tab. delta := delta + 1. "the tab" ]. ]. tempsMark := tempsMark + (self substituteWord: insertion wordInterval: (tempsMark to: tempsMark-1) offset: 0) - delta! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:16' prior: 52095305! queryUndefined | varStart varName | varName := parseNode key. varStart := self endOfLastToken + requestorOffset - varName size + 1. cue requestor selectFrom: varStart to: varStart + varName size - 1; select. (UndefinedVariable name: varName) ifFalse: [^ self fail]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:15' prior: 38599341! removeEmptyTempDeclarationsFrom: methodNode | sourceCode madeChanges tempsMarkHolder | sourceCode := cue requestor text asString. tempsMarkHolder := self collectTemporaryDeclarationsFrom: methodNode. madeChanges := false. tempsMarkHolder do: [ :currentBlock | | tempsMarkChar0 tempsMarkChar1 tempsMarkChar2 end start | tempsMarkChar0 := (sourceCode at: currentBlock tempsMark). tempsMarkChar1 := (sourceCode at: currentBlock tempsMark - 1). tempsMarkChar2 := (sourceCode at: currentBlock tempsMark - 2). tempsMarkChar0 = $| & tempsMarkChar1 = $| ifTrue: [ end := currentBlock tempsMark. start := end - 1]. tempsMarkChar0 = $| & tempsMarkChar1 = $ & tempsMarkChar2 = $| ifTrue: [ end := currentBlock tempsMark. start := end - 2]. start notNil & end notNil ifTrue: [ | lineStart lineEnd | lineStart := 1 + (sourceCode lastIndexOf: Character cr startingAt: start - 1 ifAbsent: [ 0 ]). lineEnd := sourceCode indexOf: Character cr startingAt: end + 1 ifAbsent: [ sourceCode size ]. ((sourceCode indexOfAnyOf: CharacterSet nonSeparators startingAt: lineStart) >= start and: [ (sourceCode indexOfAnyOf: CharacterSet nonSeparators startingAt: end + 1) > lineEnd ]) ifTrue: [ start := lineStart. end := lineEnd ]. cue requestor correctFrom: start to: end with: ''. madeChanges := true. currentBlock tempsMark: nil ] ]. madeChanges ifTrue: [ReparseAfterSourceEditing signal]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:15' prior: 38561281! removeUnusedTemporaryNamed: temp from: str lookingAt: currentBlock movingTempMarksOf: someBlocks | start end | end := currentBlock tempsMark - 1. ["Beginning at right temp marker..." start := end - temp size + 1. end < temp size or: [ (str at: start) = $| ] or: [ temp = (str copyFrom: start to: end) and: [ ((str at: start - 1) = $| | (str at: start - 1) isSeparator) & ((str at: end + 1) = $| | (str at: end + 1) isSeparator) ] ]] whileFalse: [ "Search left for the unused temp" end := cue requestor nextTokenFrom: end direction: -1 ]. (end < temp size or: [ (str at: start) = $| ]) ifFalse: [(str at: start - 1) = $ ifTrue: [ start := start - 1 ]. cue requestor correctFrom: start to: end with: ''. someBlocks do: [ :aBlock | aBlock tempsMark: aBlock tempsMark - (end - start + 1)]. ^true ]. ^false! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:14' prior: 38562194! removeUnusedTemps: methodNode "Scan for unused temp names, and prompt the user about the prospect of removing each one found" | madeChanges tempsMarkHolder unusedTempNames tempMarkHoldersToChange | madeChanges := false. tempMarkHoldersToChange := OrderedCollection new. tempsMarkHolder := self collectTemporaryDeclarationsFrom: methodNode. unusedTempNames := encoder unusedTempNames select: [ :temp | (encoder lookupVariable: temp ifAbsent: [ ]) isUndefTemp and: [ self queriedUnusedTemporaries at: temp ifAbsentPut: [UnusedVariable name: temp] ]]. tempsMarkHolder do: [ :currentBlock | tempMarkHoldersToChange add: currentBlock. unusedTempNames do: [ :temp | (self removeUnusedTemporaryNamed: temp from: cue requestor text asString lookingAt: currentBlock movingTempMarksOf: tempMarkHoldersToChange) ifTrue: [ madeChanges := true ]]]. madeChanges ifTrue: [ self removeEmptyTempDeclarationsFrom: methodNode. ReparseAfterSourceEditing signal ]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:14' prior: 34179326! substituteWord: correctWord wordInterval: spot offset: o "Substitute the correctSelector into the (presumed interactive) receiver. Update requestorOffset based on the delta size and answer the updated offset." cue requestor correctFrom: spot first + o to: spot last + o with: correctWord. requestorOffset := requestorOffset + correctWord size - spot size. ^o + correctWord size - spot size! ! !Parser methodsFor: 'expression types' stamp: 'cwp 12/27/2012 10:14' prior: 34179807! temporaries " [ '|' (variable)* '|' ]" | vars theActualText | (self match: #verticalBar) ifFalse: ["no temps" doitFlag ifTrue: [tempsMark := self interactive ifTrue: [cue requestor selectionInterval first] ifFalse: [1]. ^ #()]. tempsMark := hereMark "formerly --> prevMark + prevToken". tempsMark > 0 ifTrue: [theActualText := source contents. [tempsMark < theActualText size and: [(theActualText at: tempsMark) isSeparator]] whileTrue: [tempsMark := tempsMark + 1]]. ^ #()]. vars := OrderedCollection new. [hereType == #word] whileTrue: [vars addLast: (encoder bindTemp: self advance)]. (self match: #verticalBar) ifTrue: [tempsMark := prevMark. ^ vars]. ^ self expected: 'Vertical bar' ! ! !Parser methodsFor: 'expression types' stamp: 'cwp 12/27/2012 10:14' prior: 34180638! temporariesIn: methodSelector " [ '|' (variable)* '|' ]" | vars theActualText | (self match: #verticalBar) ifFalse: ["no temps" doitFlag ifTrue: [tempsMark := self interactive ifTrue: [cue requestor selectionInterval first] ifFalse: [1]. ^ #()]. tempsMark := hereMark "formerly --> prevMark + prevToken". tempsMark > 0 ifTrue: [theActualText := source contents. [tempsMark < theActualText size and: [(theActualText at: tempsMark) isSeparator]] whileTrue: [tempsMark := tempsMark + 1]]. ^ #()]. vars := OrderedCollection new. [hereType == #word] whileTrue: [vars addLast: (encoder bindTemp: self advance in: methodSelector)]. (self match: #verticalBar) ifTrue: [tempsMark := prevMark. ^ vars]. ^ self expected: 'Vertical bar'! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 10:11' prior: 53971863! compiledMethodFor: textOrStream in: aContext to: receiver notifying: aRequestor ifFail: failBlock logged: logFlag "Compiles the sourceStream into a parse tree, then generates code into a method, and answers it. If receiver is not nil, then the text can refer to instance variables of that receiver (the Inspector uses this). If aContext is not nil, the text can refer to temporaries in that context (the Debugger uses this). If aRequestor is not nil, then it will receive a notify:at: message before the attempt to evaluate is aborted." | methodNode method theClass | theClass := (aContext == nil ifTrue: [receiver] ifFalse: [aContext receiver]) class. self from: textOrStream class: theClass context: aContext notifying: aRequestor. methodNode := self translate: sourceStream noPattern: true ifFail: [^failBlock value]. method := self interactive ifTrue: [ methodNode generateWithTempNames ] ifFalse: [methodNode generate]. logFlag ifTrue: [SystemChangeNotifier uniqueInstance evaluated: sourceStream contents context: aContext]. ^method! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/27/2012 11:33' prior: 34363593! format: aStream noPattern: noPattern ifFail: failBlock ^(self parser parse: aStream cue: cue noPattern: noPattern ifFail: [^failBlock value]) preen! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/27/2012 10:08' prior: 58306325! interactive "Answer true if compilation is interactive" ^ cue requestor notNil! ! !Compiler methodsFor: 'error handling' stamp: 'cwp 12/27/2012 10:10' prior: 50779387! notify: aString at: location "Refer to the comment in Object|notify:." ^ cue requestor == nil ifTrue: [SyntaxErrorNotification inClass: cue getClass category: cue category withCode: (sourceStream contents copyReplaceFrom: location to: location - 1 with: aString) doitFlag: false errorMessage: aString location: location] ifFalse: [cue requestor notify: aString at: location in: sourceStream]! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 11:34' prior: 50777201! parse: textOrStream in: aClass notifying: req "Compile the argument, textOrStream, with respect to the class, aClass, and answer the MethodNode that is the root of the resulting parse tree. Notify the argument, req, if an error occurs. The failBlock is defaulted to an empty block." self from: textOrStream class: aClass context: nil notifying: req. ^self parser parse: sourceStream cue: cue noPattern: false ifFail: []! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 10:09' prior: 36332471! parser parser ifNil: [parser := (cue getClass ifNil: [self class]) newParser]. ^parser! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/27/2012 11:37' prior: 50780779! translate: aStream noPattern: noPattern ifFail: failBlock ^self parser parse: aStream cue: cue noPattern: noPattern ifFail: [^failBlock value]! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 11:37' prior: 19124095! translate: aStream noPattern: noPattern ifFail: failBlock parser: parser | tree | tree := parser parse: aStream cue: cue noPattern: noPattern ifFail: [^ failBlock value]. ^ tree! ! !Encoder methodsFor: 'results' stamp: 'cwp 12/27/2012 10:26' prior: 50999892! associationForClass | assoc | assoc := self environment associationAt: cue getClass name ifAbsent: [nil]. ^assoc value == cue getClass ifTrue: [assoc] ifFalse: [Association new value: cue getClass]! ! !Encoder methodsFor: 'temps' stamp: 'cwp 12/27/2012 10:25' prior: 20148386! bindTemp: name in: methodSelector "Declare a temporary; error not if a field or class variable." scopeTable at: name ifPresent:[:node| "When non-interactive raise the error only if its a duplicate" (node isTemp or:[requestor interactive]) ifTrue:[^self notify:'Name is already defined'] ifFalse:[Transcript show: '(', name, ' is shadowed in "' , cue getClass printString , '>>' , methodSelector printString , '")']]. ^self reallyBind: name! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 10:25' prior: 20149084! classEncoding "This is a hack so that the parser may findout what class it was parsing for when it wants to create a syntax error view." ^ cue getClass! ! !Encoder methodsFor: 'encoding' stamp: 'cwp 12/27/2012 11:39' prior: 20138819! encodeLiteral: object ^self name: object key: (cue literalScannedAs: object notifying: self) class: LiteralNode type: LdLitType set: litSet! ! !Encoder methodsFor: 'encoding' stamp: 'cwp 12/27/2012 11:40' prior: 20139010! encodeSelector: aSelector ^self name: aSelector key: aSelector class: SelectorNode type: SendType set: selectorSet! ! !Encoder methodsFor: 'encoding' stamp: 'cwp 12/27/2012 11:40' prior: 58545123! environment "Answer the environment of the current compilation context, be it in a class or global (e.g. a workspace)" ^cue environment! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 11:41' prior: 50994497! lookupInPools: varName ifFound: assocBlock ^Symbol hasInterned: varName ifTrue: [:sym| (cue bindingOf: sym) ifNil: [^false] ifNotNil: [:assoc| assocBlock value: assoc]]! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 10:24' prior: 51004306! possibleNamesFor: proposedName | results | results := cue getClass possibleVariablesFor: proposedName continuedFrom: nil. ^ proposedName correctAgainst: nil continuedFrom: results. ! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 10:24' prior: 50995012! possibleVariablesFor: proposedVariable | results | results := proposedVariable correctAgainstDictionary: scopeTable continuedFrom: nil. proposedVariable first canBeGlobalVarInitial ifTrue: [ results := cue getClass possibleVariablesFor: proposedVariable continuedFrom: results ]. ^ proposedVariable correctAgainst: nil continuedFrom: results. ! ! !Encoder methodsFor: 'encoding' stamp: 'cwp 12/27/2012 11:42' prior: 51002830! undeclared: name | sym | requestor interactive ifTrue: [requestor requestor == #error: ifTrue: [requestor error: 'Undeclared']. ^self notify: 'Undeclared']. "Allow knowlegeable clients to squash the undeclared warning if they want (e.g. Diffing pretty printers that are simply formatting text). As this breaks compilation it should only be used by clients that want to discard the result of the compilation. To squash the warning use e.g. [Compiler format: code in: class notifying: nil decorated: false] on: UndeclaredVariableWarning do: [:ex| ex resume: false]" sym := name asSymbol. ^(UndeclaredVariableWarning new name: name selector: selector class: cue getClass) signal ifTrue: [| undeclared | undeclared := cue environment undeclared. undeclared at: sym put: nil. self global: (undeclared associationAt: sym) name: sym] ifFalse: [self global: (Association key: sym) name: sym]! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 10:23' prior: 51006007! warnAboutShadowed: name requestor addWarning: name,' is shadowed'. selector ifNotNil: [Transcript cr; show: cue getClass name,'>>', selector, '(', name,' is shadowed)']! ! "Compiler"! !SmalltalkImage methodsFor: 'housekeeping' stamp: 'cwp 6/22/2012 15:56' prior: 58497062! cleanOutUndeclared globals undeclared removeUnreferencedKeys! ! !SmalltalkImage methodsFor: 'special objects' stamp: 'cwp 6/22/2012 09:01' prior: 40515090! recreateSpecialObjectsArray "Smalltalk recreateSpecialObjectsArray" "To external package developers: **** DO NOT OVERRIDE THIS METHOD. ***** If you are writing a plugin and need additional special object(s) for your own use, use addGCRoot() function and use own, separate special objects registry " "The Special Objects Array is an array of objects used by the Squeak virtual machine. Its contents are critical and accesses to it by the VM are unchecked, so don't even think of playing here unless you know what you are doing." | newArray | newArray := Array new: 56. "Nil false and true get used throughout the interpreter" newArray at: 1 put: nil. newArray at: 2 put: false. newArray at: 3 put: true. "This association holds the active process (a ProcessScheduler)" newArray at: 4 put: (self bindingOf: #Processor). "Numerous classes below used for type checking and instantiation" newArray at: 5 put: Bitmap. newArray at: 6 put: SmallInteger. newArray at: 7 put: ByteString. newArray at: 8 put: Array. newArray at: 9 put: Smalltalk. newArray at: 10 put: Float. newArray at: 11 put: MethodContext. newArray at: 12 put: BlockContext. newArray at: 13 put: Point. newArray at: 14 put: LargePositiveInteger. newArray at: 15 put: Display. newArray at: 16 put: Message. newArray at: 17 put: CompiledMethod. newArray at: 18 put: (self specialObjectsArray at: 18). "(low space Semaphore)" newArray at: 19 put: Semaphore. newArray at: 20 put: Character. newArray at: 21 put: #doesNotUnderstand:. newArray at: 22 put: #cannotReturn:. newArray at: 23 put: nil. "This is the process signalling low space." "An array of the 32 selectors that are compiled as special bytecodes, paired alternately with the number of arguments each takes." newArray at: 24 put: #( #+ 1 #- 1 #< 1 #> 1 #<= 1 #>= 1 #= 1 #~= 1 #* 1 #/ 1 #\\ 1 #@ 1 #bitShift: 1 #// 1 #bitAnd: 1 #bitOr: 1 #at: 1 #at:put: 2 #size 0 #next 0 #nextPut: 1 #atEnd 0 #== 1 #class 0 #blockCopy: 1 #value 0 #value: 1 #do: 1 #new 0 #new: 1 #x 0 #y 0 ). "An array of the 255 Characters in ascii order. Cog inlines table into machine code at: prim so do not regenerate it." newArray at: 25 put: (self specialObjectsArray at: 25). newArray at: 26 put: #mustBeBoolean. newArray at: 27 put: ByteArray. newArray at: 28 put: Process. "An array of up to 31 classes whose instances will have compact headers" newArray at: 29 put: self compactClassesArray. newArray at: 30 put: (self specialObjectsArray at: 30). "(delay Semaphore)" newArray at: 31 put: (self specialObjectsArray at: 31). "(user interrupt Semaphore)" "Entries 32 - 34 unreferenced. Previously these contained prototype instances to be copied for fast initialization" newArray at: 32 put: nil. "was (Float new: 2)" newArray at: 33 put: nil. "was (LargePositiveInteger new: 4)" newArray at: 34 put: nil. "was Point new" newArray at: 35 put: #cannotInterpret:. "Note: This must be fixed once we start using context prototypes (yeah, right)" "(MethodContext new: CompiledMethod fullFrameSize)." newArray at: 36 put: (self specialObjectsArray at: 36). "Is the prototype MethodContext (unused by the VM)" newArray at: 37 put: BlockClosure. "(BlockContext new: CompiledMethod fullFrameSize)." newArray at: 38 put: (self specialObjectsArray at: 38). "Is the prototype BlockContext (unused by the VM)" "array of objects referred to by external code" newArray at: 39 put: (self specialObjectsArray at: 39). "preserve external semaphores" newArray at: 40 put: nil. "Reserved for Mutex in Cog VMs" newArray at: 41 put: nil. "Reserved for a LinkedList instance for overlapped calls in CogMT" "finalization Semaphore" newArray at: 42 put: ((self specialObjectsArray at: 42) ifNil: [Semaphore new]). newArray at: 43 put: LargeNegativeInteger. "External objects for callout. Note: Written so that one can actually completely remove the FFI." newArray at: 44 put: (self at: #ExternalAddress ifAbsent: []). newArray at: 45 put: (self at: #ExternalStructure ifAbsent: []). newArray at: 46 put: (self at: #ExternalData ifAbsent: []). newArray at: 47 put: (self at: #ExternalFunction ifAbsent: []). newArray at: 48 put: (self at: #ExternalLibrary ifAbsent: []). newArray at: 49 put: #aboutToReturn:through:. newArray at: 50 put: #run:with:in:. "51 reserved for immutability message" "newArray at: 51 put: #attemptToAssign:withIndex:." newArray at: 52 put: #(nil "nil => generic error" #'bad receiver' #'bad argument' #'bad index' #'bad number of arguments' #'inappropriate operation' #'unsupported operation' #'no modification' #'insufficient object memory' #'insufficient C memory' #'not found' #'bad method' #'internal error in named primitive machinery' #'object may move'). "53 to 55 are for Alien" newArray at: 53 put: (self at: #Alien ifAbsent: []). newArray at: 54 put: #invokeCallback:stack:registers:jmpbuf:. newArray at: 55 put: (self at: #UnsafeAlien ifAbsent: []). "Weak reference finalization" newArray at: 56 put: (self at: #WeakFinalizationList ifAbsent: []). "Now replace the interpreter's reference in one atomic operation" self specialObjectsArray becomeForward: newArray ! ! !SmalltalkImage methodsFor: 'shrinking' stamp: 'cwp 6/22/2012 15:57' prior: 37288071! unloadAllKnownPackages "Unload all packages we know how to unload and reload" "Prepare unloading" Smalltalk zapMVCprojects. Flaps disableGlobalFlaps: false. StandardScriptingSystem removeUnreferencedPlayers. Project removeAllButCurrent. #('Morphic-UserObjects' 'EToy-UserObjects' 'Morphic-Imported' ) do: [:each | SystemOrganization removeSystemCategory: each]. Smalltalk at: #ServiceRegistry ifPresent:[:aClass| SystemChangeNotifier uniqueInstance noMoreNotificationsFor: aClass. ]. World removeAllMorphs. "Go unloading" #( 'ReleaseBuilder' 'ScriptLoader' '311Deprecated' '39Deprecated' 'Universes' 'SMLoader' 'SMBase' 'Installer-Core' 'VersionNumberTests' 'VersionNumber' 'Services-Base' 'PreferenceBrowser' 'Nebraska' 'ToolBuilder-MVC' 'ST80' 'CollectionsTests' 'GraphicsTests' 'KernelTests' 'MorphicTests' 'MultilingualTests' 'NetworkTests' 'ToolsTests' 'TraitsTests' 'SystemChangeNotification-Tests' 'FlexibleVocabularies' 'EToys' 'Protocols' 'XML-Parser' 'Tests' 'SUnitGUI' 'Help-Squeak' 'HelpSystem' 'SystemReporter' ) do: [:pkgName| (MCPackage named: pkgName) unload. MCMcmUpdater disableUpdatesOfPackage: pkgName. ]. "Traits use custom unload" Smalltalk at: #Trait ifPresent:[:aClass| aClass unloadTraits]. "Post-unload cleanup" MCWorkingCopy flushObsoletePackageInfos. SystemOrganization removeSystemCategory: 'UserObjects'. Presenter defaultPresenterClass: nil. World dumpPresenter. ScheduledControllers := nil. Preferences removePreference: #allowEtoyUserCustomEvents. SystemOrganization removeEmptyCategories. ChangeSet removeChangeSetsNamedSuchThat:[:cs | (cs == ChangeSet current) not]. globals undeclared removeUnreferencedKeys. StandardScriptingSystem initialize. MCFileBasedRepository flushAllCaches. MCDefinition clearInstances. Behavior flushObsoleteSubclasses. ChangeSet current clear. ChangeSet current name: 'Unnamed1'. Smalltalk flushClassNameCache. Smalltalk at: #Browser ifPresent:[:br| br initialize]. DebuggerMethodMap voidMapCache. DataStream initialize. AppRegistry removeObsolete. FileServices removeObsolete. Preferences removeObsolete. TheWorldMenu removeObsolete. Smalltalk garbageCollect. Symbol compactSymbolTable. TheWorldMainDockingBar updateInstances. MorphicProject defaultFill: (Color gray: 0.9). World color: (Color gray: 0.9). ! ! !InternalTranslator methodsFor: 'fileIn/fileOut' stamp: 'cwp 6/20/2012 17:34' prior: 40472775! scanFrom: aStream ^ self scanFrom: aStream environment: Environment default! ! !NaturalLanguageTranslator methodsFor: 'fileIn/fileOut' stamp: 'cwp 6/20/2012 17:27' prior: 40496770! scanFrom: aStream ^ self scanFrom: aStream environment: Environment default! ! !SystemDictionary methodsFor: 'dictionary access' stamp: 'cwp 6/22/2012 15:58' prior: 30574136! at: aKey put: anObject "Override from Dictionary to check Undeclared and fix up references to undeclared variables." | index element | (self includesKey: aKey) ifFalse: [self declare: aKey from: (self at: #Undeclared). self flushClassNameCache]. super at: aKey put: anObject. ^ anObject! ! "System"! CodeHolder subclass: #Browser instanceVariableNames: 'environment systemOrganizer classOrganizer metaClassOrganizer editSelection metaClassIndicated selectedSystemCategory selectedClassName selectedMessageName selectedMessageCategoryName' classVariableNames: 'ListClassesHierarchically RecentClasses' poolDictionaries: '' category: 'Tools-Browser'! !Browser commentStamp: 'cwp 12/27/2012 11:09' prior: 36419432! I represent a query path into the class descriptions, the software of the system.! !Browser methodsFor: 'accessing' stamp: 'cwp 6/24/2012 23:20'! selectEnvironment: anEnvironment environment := anEnvironment. systemOrganizer := environment organization! ! !Browser methodsFor: 'system category list' stamp: 'cwp 6/24/2012 23:06' prior: 36467357! From noreply at buildbot.pypy.org Thu Jan 9 19:19:50 2014 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 9 Jan 2014 19:19:50 +0100 (CET) Subject: [pypy-commit] lang-smalltalk rbitblt: don't raise in sync_cache of BitBlt and Forms Message-ID: <20140109181950.036D61C0C34@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: rbitblt Changeset: r560:9f779d03110f Date: 2014-01-09 16:20 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/9f779d03110f/ Log: don't raise in sync_cache of BitBlt and Forms diff --git a/spyvm/plugins/bitblt.py b/spyvm/plugins/bitblt.py --- a/spyvm/plugins/bitblt.py +++ b/spyvm/plugins/bitblt.py @@ -9,6 +9,7 @@ BitBltPlugin = Plugin() + @BitBltPlugin.expose_primitive(unwrap_spec=[object], clean_stack=True) def primitiveCopyBits(interp, s_frame, w_rcvr): from spyvm.interpreter import Return @@ -22,6 +23,7 @@ space = interp.space s_bitblt = w_rcvr.as_special_get_shadow(space, BitBltShadow) + s_bitblt.loadBitBlt() s_bitblt.copyBits() w_dest_form = w_rcvr.fetch(space, 0) @@ -34,9 +36,6 @@ w_bitmap.flush_to_screen() return w_rcvr - def as_bitblt_get_shadow(self, space): - return - class BitBltShadow(AbstractCachingShadow): WordSize = 32 @@ -46,7 +45,7 @@ AllOnes = r_uint(0xFFFFFFFF) def sync_cache(self): - self.loadBitBlt() + pass def intOrIfNil(self, w_int, i): if w_int is self.space.w_nil: @@ -55,18 +54,12 @@ return self.space.unwrap_int(w_int) def loadForm(self, w_form): - try: - if not isinstance(w_form, model.W_PointersObject): - raise PrimitiveFailedError("cannot load form from %s" % w_form.as_repr_string()) - s_form = w_form.as_special_get_shadow(self.space, FormShadow) - if not isinstance(s_form, FormShadow): - raise PrimitiveFailedError("Could not create form shadow for %s" % w_form.as_repr_string()) - return s_form - except PrimitiveFailedError, e: - w_self = self.w_self() - assert isinstance(w_self, model.W_PointersObject) - w_self._shadow = None - raise e + if not isinstance(w_form, model.W_PointersObject): + raise PrimitiveFailedError("cannot load form from %s" % w_form.as_repr_string()) + s_form = w_form.as_special_get_shadow(self.space, FormShadow) + if s_form.invalid: + raise PrimitiveFailedError("Could not create form shadow for %s" % w_form.as_repr_string()) + return s_form def loadHalftone(self, w_halftone_form): if w_halftone_form is self.space.w_nil: @@ -76,7 +69,10 @@ return w_halftone_form.words else: assert isinstance(w_halftone_form, model.W_PointersObject) - w_bits = w_halftone_form.as_special_get_shadow(self.space, FormShadow).w_bits + s_form = w_halftone_form.as_special_get_shadow(self.space, FormShadow) + if s_form.invalid: + raise PrimitiveFailedError("Halftone form is invalid") + w_bits = s_form.w_bits assert isinstance(w_bits, model.W_WordsObject) return w_bits.words @@ -334,9 +330,8 @@ self.dstBitShift = dstShift self.destMask = self.mask1 nPix = startBits - words = self.nWords # Here is the horizontal loop... - for word in range(words): + for word in range(self.nWords): skewWord = self.pickSourcePixels(nPix, sourcePixMask, destPixMask, srcShiftInc, dstShiftInc) # align next word to leftmost pixel self.dstBitShift = dstShiftLeft @@ -352,7 +347,7 @@ self.dest.w_bits.setword(self.destIndex, destWord) self.destIndex += 1 - if (words == 2): # is the next word the last word? + if (self.nWords == 2): # is the next word the last word? self.destMask = self.mask2 nPix = endBits else: # use fullword mask for inner loop @@ -715,32 +710,31 @@ class FormShadow(AbstractCachingShadow): _attrs_ = ["w_bits", "width", "height", "depth", "offsetX", - "offsetY", "msb", "pixPerWord", "pitch"] + "offsetY", "msb", "pixPerWord", "pitch", "invalid"] + + def __init__(self, space, w_self): + AbstractCachingShadow.__init__(self, space, w_self) + self.invalid = False def sync_cache(self): + self.invalid = True if self.size() < 5: - w_self = self.w_self() - assert isinstance(w_self, model.W_PointersObject) - w_self._shadow = None - raise PrimitiveFailedError("Form object too small") + return self.w_bits = self.fetch(0) if self.w_bits is self.space.w_nil: return if not (isinstance(self.w_bits, model.W_WordsObject) or isinstance(self.w_bits, model.W_DisplayBitmap)): - w_self = self.w_self() - assert isinstance(w_self, model.W_PointersObject) - w_self._shadow = None - raise PrimitiveFailedError("Bits in are not words or displaybitmap") + return self.width = self.space.unwrap_int(self.fetch(1)) self.height = self.space.unwrap_int(self.fetch(2)) self.depth = self.space.unwrap_int(self.fetch(3)) if self.width < 0 or self.height < 0: - raise PrimitiveFailedError("Form has negative width or height!") + return self.msb = self.depth > 0 if self.depth < 0: self.depth = -self.depth if self.depth == 0: - raise PrimitiveFailedError("Form depth is 0!") + return w_offset = self.fetch(4) assert isinstance(w_offset, model.W_PointersObject) if not w_offset is self.space.w_nil: @@ -749,6 +743,5 @@ self.pixPerWord = 32 / self.depth self.pitch = (self.width + (self.pixPerWord - 1)) / self.pixPerWord | 0 if self.w_bits.size() != (self.pitch * self.height): - w_self = self.w_self() - assert isinstance(w_self, model.W_PointersObject) - w_self._shadow = None + return + self.invalid = False From noreply at buildbot.pypy.org Thu Jan 9 19:19:47 2014 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 9 Jan 2014 19:19:47 +0100 (CET) Subject: [pypy-commit] lang-smalltalk rbitblt: implement GET_NEXT_EVENT, DEFER_UPDATES, and FORCE_DISPLAY_UPDATE Message-ID: <20140109181947.E4EBF1C0C34@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: rbitblt Changeset: r558:c8495a907803 Date: 2014-01-09 11:34 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/c8495a907803/ Log: implement GET_NEXT_EVENT, DEFER_UPDATES, and FORCE_DISPLAY_UPDATE diff --git a/spyvm/display.py b/spyvm/display.py --- a/spyvm/display.py +++ b/spyvm/display.py @@ -6,16 +6,40 @@ from rsdl import RSDL, RSDL_helper -MOUSE_BTN_RIGHT = 1 -MOUSE_BTN_MIDDLE = 2 -MOUSE_BTN_LEFT = 4 -MOD_SHIFT = 1 -MOD_CONTROL = 2 -MOD_ALT_CMD = 16 | 8 +# EventSensorConstants +RedButtonBit = 4 +BlueButtonBit = 2 +YellowButtonBit = 1 + +ShiftKeyBit = 1 +CtrlKeyBit = 2 +OptionKeyBit = 4 +CommandKeyBit = 8 + +EventTypeNone = 0 +EventTypeMouse = 1 +EventTypeKeyboard = 2 +EventTypeDragDropFiles = 3 +EventTypeMenu = 4 +EventTypeWindow = 5 +EventTypeComplex = 6 + +EventKeyChar = 0 +EventKeyDown = 1 +EventKeyUp = 2 + +WindowEventMetricChange = 1 +WindowEventClose = 2 +WindowEventIconise = 3 +WindowEventActivated = 4 +WindowEventPaint = 5 +WindowEventStinks = 6 + class SDLDisplay(object): _attrs_ = ["screen", "width", "height", "depth", "surface", "has_surface", - "mouse_position", "button", "key", "interrupt_key"] + "mouse_position", "button", "key", "interrupt_key", "_defer_updates", + "_deferred_event"] def __init__(self, title): assert RSDL.Init(RSDL.INIT_VIDEO) >= 0 @@ -27,6 +51,8 @@ self.interrupt_key = 15 << 8 # pushing all four meta keys, of which we support three... self.button = 0 self.key = 0 + self._deferred_event = None + self._defer_updates = False def set_video_mode(self, w, h, d): assert w > 0 and h > 0 @@ -47,8 +73,12 @@ def get_pixelbuffer(self): return rffi.cast(rffi.ULONGP, self.screen.c_pixels) - def flip(self): - RSDL.Flip(self.screen) + def defer_updates(self, flag): + self._defer_updates = flag + + def flip(self, force=False): + if (not self._defer_updates) or force: + RSDL.Flip(self.screen) def set_squeak_colormap(self, screen): # TODO: fix this up from the image @@ -72,11 +102,11 @@ b = rffi.cast(RSDL.MouseButtonEventPtr, event) btn = rffi.getintfield(b, 'c_button') if btn == RSDL.BUTTON_RIGHT: - btn = MOUSE_BTN_RIGHT + btn = YellowButtonBit elif btn == RSDL.BUTTON_MIDDLE: - btn = MOUSE_BTN_MIDDLE + btn = BlueButtonBit elif btn == RSDL.BUTTON_LEFT: - btn = MOUSE_BTN_LEFT + btn = RedButtonBit if c_type == RSDL.MOUSEBUTTONDOWN: self.button |= btn @@ -114,7 +144,75 @@ if (interrupt & 0xFF == self.key and interrupt >> 8 == self.get_modifier_mask(0)): raise KeyboardInterrupt - def get_next_event(self): + def get_next_mouse_event(self, time): + mods = self.get_modifier_mask(3) + btn = self.button + if btn == RedButtonBit: + if mods & CtrlKeyBit: + btn = BlueButtonBit + elif mods & CommandKeyBit: + btn = YellowButtonBit + return [EventTypeMouse, + time, + int(self.mouse_position[0]), + int(self.mouse_position[1]), + btn, + mods, + 0, + 0] + + def get_next_key_event(self, t, time): + mods = self.get_modifier_mask(3) + btn = self.button + return [EventTypeKeyboard, + time, + self.key, + t, + mods, + self.key, + 0, + 0] + + def get_next_event(self, time=0): + if self._deferred_event: + deferred = self._deferred_event + self._deferred_event = None + return deferred + + event = lltype.malloc(RSDL.Event, flavor="raw") + try: + if rffi.cast(lltype.Signed, RSDL.PollEvent(event)) == 1: + c_type = rffi.getintfield(event, 'c_type') + if c_type in [RSDL.MOUSEBUTTONDOWN, RSDL.MOUSEBUTTONUP]: + self.handle_mouse_button(c_type, event) + return self.get_next_mouse_event(time) + elif c_type == RSDL.MOUSEMOTION: + self.handle_mouse_move(c_type, event) + return self.get_next_mouse_event(time) + elif c_type == RSDL.KEYDOWN: + self.handle_keypress(c_type, event) + return self.get_next_key_event(EventKeyDown, time) + elif c_type == RSDL.KEYUP: + self._deferred_event = self.get_next_key_event(EventKeyUp, time) + return self.get_next_key_event(EventKeyChar, time) + elif c_type == RSDL.VIDEORESIZE: + self.screen = RSDL.GetVideoSurface() + self._deferred_event = [EventTypeWindow, time, WindowEventPaint, + 0, 0, int(self.screen.c_w), int(self.screen.c_h), 0] + return [EventTypeWindow, time, WindowEventMetricChange, + 0, 0, int(self.screen.c_w), int(self.screen.c_h), 0] + elif c_type == RSDL.VIDEOEXPOSE: + self._deferred_event = [EventTypeWindow, time, WindowEventPaint, + 0, 0, int(self.screen.c_w), int(self.screen.c_h), 0] + return [EventTypeWindow, time, WindowEventActivated, 0, 0, 0, 0, 0] + elif c_type == RSDL.QUIT: + return [EventTypeWindow, time, WindowEventClose, 0, 0, 0, 0, 0] + finally: + lltype.free(event, flavor='raw') + return [EventTypeNone, 0, 0, 0, 0, 0, 0, 0] + + # Old style event handling + def pump_events(self): event = lltype.malloc(RSDL.Event, flavor="raw") try: if rffi.cast(lltype.Signed, RSDL.PollEvent(event)) == 1: @@ -127,8 +225,6 @@ elif c_type == RSDL.KEYDOWN: self.handle_keypress(c_type, event) return - elif c_type == RSDL.VIDEORESIZE: - pass # TODO elif c_type == RSDL.QUIT: from spyvm.error import Exit raise Exit("Window closed..") @@ -140,19 +236,19 @@ mod = RSDL.GetModState() modifier = 0 if mod & RSDL.KMOD_CTRL != 0: - modifier |= MOD_CONTROL + modifier |= CtrlKeyBit if mod & RSDL.KMOD_SHIFT != 0: - modifier |= MOD_SHIFT + modifier |= ShiftKeyBit if mod & RSDL.KMOD_ALT != 0: - modifier |= MOD_ALT_CMD + modifier |= (OptionKeyBit | CommandKeyBit) return modifier << shift def mouse_point(self): - self.get_next_event() + self.pump_events() return self.mouse_position def mouse_button(self): - self.get_next_event() + self.pump_events() mod = self.get_modifier_mask(3) return self.button | mod @@ -162,7 +258,7 @@ return key | self.get_modifier_mask(8) def peek_keycode(self): - self.get_next_event() + self.pump_events() self.key |= self.get_modifier_mask(8) return self.key diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -137,6 +137,8 @@ args += (interp.space.unwrap_array(w_arg), ) elif spec is char: args += (unwrap_char(w_arg), ) + elif spec is bool: + args += (interp.space.w_true is w_arg, ) else: raise NotImplementedError( "unknown unwrap_spec %s" % (spec, )) @@ -625,9 +627,18 @@ w_point.store(interp.space, 1, interp.space.wrap_int(y)) return w_point + at jit.unroll_safe + at jit.look_inside @expose_primitive(GET_NEXT_EVENT, unwrap_spec=[object, object]) def func(interp, s_frame, w_rcvr, w_into): - raise PrimitiveNotYetWrittenError() + ary = interp.space.get_display().get_next_event(time=interp.time_now()) + for i in range(8): + w_into.store(interp.space, i, interp.space.wrap_int(ary[i])) + # XXX - hack + if ary[0] == display.WindowEventMetricChange and ary[4] > 0 and ary[5] > 0: + if interp.image: + interp.image.lastWindowSize = ((ary[4] & 0xffff) << 16) | (ary[5] & 0xffff) + return w_rcvr @expose_primitive(BITBLT_COPY_BITS, clean_stack=False, no_result=True, compiled_method=True) def func(interp, s_frame, argcount, s_method): @@ -908,9 +919,11 @@ # dont know when the space runs out return w_reciver - at expose_primitive(DEFER_UPDATES, unwrap_spec=[object, object]) -def func(interp, s_frame, w_receiver, w_bool): - raise PrimitiveNotYetWrittenError() + at expose_primitive(DEFER_UPDATES, unwrap_spec=[object, bool]) +def func(interp, s_frame, w_receiver, flag): + sdldisplay = interp.space.get_display() + sdldisplay.defer_updates(flag) + return w_receiver @expose_primitive(DRAW_RECTANGLE, unwrap_spec=[object, int, int, int, int]) def func(interp, s_frame, w_rcvr, left, right, top, bottom): @@ -1461,7 +1474,7 @@ @expose_primitive(FORCE_DISPLAY_UPDATE, unwrap_spec=[object]) def func(interp, s_frame, w_rcvr): - interp.space.get_display().flip() + interp.space.get_display().flip(force=True) return w_rcvr # ___________________________________________________________________________ From noreply at buildbot.pypy.org Thu Jan 9 19:19:44 2014 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 9 Jan 2014 19:19:44 +0100 (CET) Subject: [pypy-commit] lang-smalltalk rbitblt: add beep prim Message-ID: <20140109181944.C448F1C0C34@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: rbitblt Changeset: r555:7fcbfe51e7af Date: 2014-01-08 10:36 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/7fcbfe51e7af/ Log: add beep prim diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -1009,12 +1009,17 @@ #____________________________________________________________________________ # Misc Primitives (138 - 149) +BEEP = 140 VM_PATH = 142 SHORT_AT = 143 SHORT_AT_PUT = 144 FILL = 145 CLONE = 148 + at expose_primitive(BEEP, unwrap_spec=[object]) +def func(interp, s_frame, w_receiver): + return w_receiver + @expose_primitive(VM_PATH, unwrap_spec=[object]) def func(interp, s_frame, w_receiver): return interp.space.wrap_string("%s%s" % (interp.space.executable_path(), os.path.sep)) From noreply at buildbot.pypy.org Thu Jan 9 19:19:45 2014 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 9 Jan 2014 19:19:45 +0100 (CET) Subject: [pypy-commit] lang-smalltalk rbitblt: detach form shadow on sync_cache failure Message-ID: <20140109181945.CCE4D1C0C34@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: rbitblt Changeset: r556:03b60ed307da Date: 2014-01-08 10:37 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/03b60ed307da/ Log: detach form shadow on sync_cache failure diff --git a/spyvm/plugins/bitblt.py b/spyvm/plugins/bitblt.py --- a/spyvm/plugins/bitblt.py +++ b/spyvm/plugins/bitblt.py @@ -752,5 +752,6 @@ self.pixPerWord = 32 / self.depth self.pitch = (self.width + (self.pixPerWord - 1)) / self.pixPerWord | 0 if self.w_bits.size() != (self.pitch * self.height): - # raise error.PrimitiveFailedError() - pass # - we'll be updated again + w_self = self.w_self() + assert isinstance(w_self, model.W_PointersObject) + w_self._shadow = None From noreply at buildbot.pypy.org Thu Jan 9 19:19:46 2014 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 9 Jan 2014 19:19:46 +0100 (CET) Subject: [pypy-commit] lang-smalltalk rbitblt: fix a segv, fix translation Message-ID: <20140109181946.DC63B1C0C34@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: rbitblt Changeset: r557:ff0c5aeb1539 Date: 2014-01-08 13:30 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/ff0c5aeb1539/ Log: fix a segv, fix translation diff --git a/spyvm/plugins/bitblt.py b/spyvm/plugins/bitblt.py --- a/spyvm/plugins/bitblt.py +++ b/spyvm/plugins/bitblt.py @@ -9,7 +9,7 @@ BitBltPlugin = Plugin() - at BitBltPlugin.expose_primitive(unwrap_spec=[object], clean_stack=False) + at BitBltPlugin.expose_primitive(unwrap_spec=[object], clean_stack=True) def primitiveCopyBits(interp, s_frame, w_rcvr): from spyvm.interpreter import Return if not isinstance(w_rcvr, model.W_PointersObject) or w_rcvr.size() < 15: @@ -730,10 +730,7 @@ w_self = self.w_self() assert isinstance(w_self, model.W_PointersObject) w_self._shadow = None - raise PrimitiveFailedError("Bits (%s) in %s are not words or displaybitmap" % ( - self.w_bits.as_repr_string(), - w_self.as_repr_string() - )) + raise PrimitiveFailedError("Bits in are not words or displaybitmap") self.width = self.space.unwrap_int(self.fetch(1)) self.height = self.space.unwrap_int(self.fetch(2)) self.depth = self.space.unwrap_int(self.fetch(3)) From noreply at buildbot.pypy.org Thu Jan 9 19:19:49 2014 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 9 Jan 2014 19:19:49 +0100 (CET) Subject: [pypy-commit] lang-smalltalk rbitblt: make evented code optional Message-ID: <20140109181949.046F71C0C34@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: rbitblt Changeset: r559:43767dcdfe46 Date: 2014-01-09 12:59 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/43767dcdfe46/ Log: make evented code optional diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -25,7 +25,7 @@ class Interpreter(object): _immutable_fields_ = ["space", "image", "image_name", "max_stack_depth", "interrupt_counter_size", - "startup_time"] + "startup_time", "evented"] _w_last_active_context = None cnt = 0 _last_indent = "" @@ -37,7 +37,8 @@ ) def __init__(self, space, image=None, image_name="", trace=False, - max_stack_depth=constants.MAX_LOOP_DEPTH): + evented=True, + max_stack_depth=constants.MAX_LOOP_DEPTH): import time self.space = space self.image = image @@ -47,6 +48,7 @@ self.remaining_stack_depth = max_stack_depth self._loop = False self.next_wakeup_tick = 0 + self.evented = evented try: self.interrupt_counter_size = int(os.environ["SPY_ICS"]) except KeyError: diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -631,6 +631,8 @@ @jit.look_inside @expose_primitive(GET_NEXT_EVENT, unwrap_spec=[object, object]) def func(interp, s_frame, w_rcvr, w_into): + if not interp.evented: + raise PrimitiveFailedError() ary = interp.space.get_display().get_next_event(time=interp.time_now()) for i in range(8): w_into.store(interp.space, i, interp.space.wrap_int(ary[i])) diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -124,6 +124,7 @@ -a|--arg [string argument to #method] -r|--run [code string] -b|--benchmark [code string] + -p|--poll_events [image path, default: Squeak.image] """ % argv[0] @@ -139,6 +140,7 @@ number = 0 benchmark = None trace = False + evented = True stringarg = "" code = None as_benchmark = False @@ -163,6 +165,8 @@ idx += 1 elif arg in ["-t", "--trace"]: trace = True + elif arg in ["-p", "--poll_events"]: + evented = False elif arg in ["-a", "--arg"]: _arg_missing(argv, idx, arg) stringarg = argv[idx + 1] @@ -200,7 +204,7 @@ image_reader = squeakimage.reader_for_image(space, squeakimage.Stream(data=imagedata)) image = create_image(space, image_reader) - interp = interpreter.Interpreter(space, image, image_name=path, trace=trace) + interp = interpreter.Interpreter(space, image, image_name=path, trace=trace, evented=evented) space.runtime_setup(argv[0]) if benchmark is not None: return _run_benchmark(interp, number, benchmark, stringarg) From noreply at buildbot.pypy.org Thu Jan 9 19:19:51 2014 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 9 Jan 2014 19:19:51 +0100 (CET) Subject: [pypy-commit] lang-smalltalk rbitblt: add cache flushing, and store selector strings in methoddict Message-ID: <20140109181951.0880D1C0C34@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: rbitblt Changeset: r561:b804692b36b8 Date: 2014-01-09 16:22 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/b804692b36b8/ Log: add cache flushing, and store selector strings in methoddict diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -801,6 +801,7 @@ QUIT = 113 EXIT_TO_DEBUGGER = 114 CHANGE_CLASS = 115 # Blue Book: primitiveOopsLeft +COMPILED_METHOD_FLUSH_CACHE = 116 EXTERNAL_CALL = 117 SYMBOL_FLUSH_CACHE = 119 @@ -886,6 +887,17 @@ return IProxy.call(signature, interp, s_frame, argcount, s_method) raise PrimitiveFailedError + at expose_primitive(COMPILED_METHOD_FLUSH_CACHE, unwrap_spec=[object]) +def func(interp, s_frame, w_rcvr): + if not isinstance(w_rcvr, model.W_CompiledMethod): + raise PrimitiveFailedError() + s_cm = w_rcvr.as_compiledmethod_get_shadow(interp.space) + w_class = s_cm.w_compiledin + if w_class: + assert isinstance(w_class, model.W_PointersObject) + w_class.as_class_get_shadow(interp.space).flush_caches() + return w_rcvr + @expose_primitive(SYMBOL_FLUSH_CACHE, unwrap_spec=[object]) def func(interp, s_frame, w_rcvr): raise PrimitiveFailedError() @@ -1367,8 +1379,10 @@ @expose_primitive(FLUSH_CACHE, unwrap_spec=[object]) def func(interp, s_frame, w_rcvr): - # XXX we currently don't care about bad flushes :) XXX - # raise PrimitiveNotYetWrittenError() + if not isinstance(w_rcvr, model.W_PointersObject): + raise PrimitiveFailedError() + s_class = w_rcvr.as_class_get_shadow(interp.space) + s_class.flush_caches() return w_rcvr # ___________________________________________________________________________ diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -169,6 +169,13 @@ self.store_w_superclass(w_superclass) self.changed() + @jit.unroll_safe + def flush_caches(self): + look_in_shadow = self + while look_in_shadow is not None: + s_method = look_in_shadow.s_methoddict().sync_cache() + look_in_shadow = look_in_shadow._s_superclass + def guess_class_name(self): if self.name != '': return self.name @@ -355,8 +362,8 @@ def find_selector(self, w_selector): if self.invalid: - return None - return self.methoddict.get(w_selector, None) + return None # we may be invalid if Smalltalk code did not call flushCache + return self.methoddict.get(self._as_md_entry(w_selector), None) def update(self): return self.sync_cache() @@ -370,6 +377,12 @@ AbstractShadow.store(self, n0, w_value) self.invalid = True + def _as_md_entry(self, w_selector): + if isinstance(w_selector, model.W_BytesObject): + return w_selector.as_string() + else: + return "%r" % w_selector # use the pointer for this + def sync_cache(self): if self.w_self().size() == 0: return @@ -394,11 +407,8 @@ "CompiledMethods only, for now. " "If the value observed is nil, our " "invalidating mechanism may be broken.") - self.methoddict[w_selector] = w_compiledmethod.as_compiledmethod_get_shadow(self.space) - if isinstance(w_selector, model.W_BytesObject): - selector = w_selector.as_string() - else: - selector = w_selector.as_repr_string() + selector = self._as_md_entry(w_selector) + self.methoddict[selector] = w_compiledmethod.as_compiledmethod_get_shadow(self.space) w_compiledmethod._likely_methodname = selector if self.s_class: self.s_class.changed() From noreply at buildbot.pypy.org Thu Jan 9 19:53:48 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 9 Jan 2014 19:53:48 +0100 (CET) Subject: [pypy-commit] pypy default: Disable SSLv2 except when a user explicity requests it Message-ID: <20140109185348.AFCC61C01E8@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r68591:1bf39957a7e8 Date: 2014-01-09 10:53 -0800 http://bitbucket.org/pypy/pypy/changeset/1bf39957a7e8/ Log: Disable SSLv2 except when a user explicity requests it diff --git a/lib-python/2.7/test/test_ssl.py b/lib-python/2.7/test/test_ssl.py --- a/lib-python/2.7/test/test_ssl.py +++ b/lib-python/2.7/test/test_ssl.py @@ -993,7 +993,7 @@ try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED) - try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, True) + try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False) diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -711,8 +711,12 @@ raise ssl_error(space, "SSL_CTX_use_certificate_chain_file error") # ssl compatibility - libssl_SSL_CTX_set_options(ss.ctx, - SSL_OP_ALL & ~SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS) + options = SSL_OP_ALL & ~SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS + if protocol != PY_SSL_VERSION_SSL2: + # SSLv2 is extremely broken, don't use it unless a user specifically + # requests it + options |= SSL_OP_NO_SSLv2 + libssl_SSL_CTX_set_options(ss.ctx, options) verification_mode = SSL_VERIFY_NONE if cert_mode == PY_SSL_CERT_OPTIONAL: @@ -724,7 +728,7 @@ libssl_SSL_set_fd(ss.ssl, sock_fd) # set the socket for SSL # The ACCEPT_MOVING_WRITE_BUFFER flag is necessary because the address # of a str object may be changed by the garbage collector. - libssl_SSL_set_mode(ss.ssl, + libssl_SSL_set_mode(ss.ssl, SSL_MODE_AUTO_RETRY | SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER) # If the socket is in non-blocking mode or timeout mode, set the BIO From noreply at buildbot.pypy.org Fri Jan 10 12:16:32 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 10 Jan 2014 12:16:32 +0100 (CET) Subject: [pypy-commit] pypy default: Decrement the raw-memory-pressure counters by 'sizehint' plus a little Message-ID: <20140110111632.061981C02B3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68592:5a2d56ec4bac Date: 2014-01-10 11:06 +0100 http://bitbucket.org/pypy/pypy/changeset/5a2d56ec4bac/ Log: Decrement the raw-memory-pressure counters by 'sizehint' plus a little bit extra. This is needed e.g. for _rawffi, which may allocate a lot of tiny arrays. diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -864,7 +864,10 @@ self.next_major_collection_threshold = self.max_heap_size def raw_malloc_memory_pressure(self, sizehint): - self.next_major_collection_threshold -= sizehint + # Decrement by 'sizehint' plus a very little bit extra. This + # is needed e.g. for _rawffi, which may allocate a lot of tiny + # arrays. + self.next_major_collection_threshold -= (sizehint + 2 * WORD) if self.next_major_collection_threshold < 0: # cannot trigger a full collection now, but we can ensure # that one will occur very soon From noreply at buildbot.pypy.org Fri Jan 10 13:17:28 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 10 Jan 2014 13:17:28 +0100 (CET) Subject: [pypy-commit] pypy default: Issue1670: Ouch ouch ouch. Message-ID: <20140110121728.175AE1C02B3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68593:872f6055d970 Date: 2014-01-10 12:43 +0100 http://bitbucket.org/pypy/pypy/changeset/872f6055d970/ Log: Issue1670: Ouch ouch ouch. diff --git a/pypy/module/_rawffi/tracker.py b/pypy/module/_rawffi/tracker.py --- a/pypy/module/_rawffi/tracker.py +++ b/pypy/module/_rawffi/tracker.py @@ -4,7 +4,7 @@ """ class Tracker(object): - DO_TRACING = True + DO_TRACING = False # make sure this stays False by default! def __init__(self): self.alloced = {} From noreply at buildbot.pypy.org Fri Jan 10 13:17:29 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 10 Jan 2014 13:17:29 +0100 (CET) Subject: [pypy-commit] pypy default: With DO_TRACING=False, we can now ensure that these finalizers are light. Message-ID: <20140110121729.523E91C02B3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68594:d681e26981e8 Date: 2014-01-10 13:16 +0100 http://bitbucket.org/pypy/pypy/changeset/d681e26981e8/ Log: With DO_TRACING=False, we can now ensure that these finalizers are light. diff --git a/pypy/module/_rawffi/array.py b/pypy/module/_rawffi/array.py --- a/pypy/module/_rawffi/array.py +++ b/pypy/module/_rawffi/array.py @@ -15,6 +15,7 @@ from pypy.module._rawffi.interp_rawffi import unpack_shape_with_length from pypy.module._rawffi.interp_rawffi import read_ptr, write_ptr from rpython.rlib.rarithmetic import r_uint +from rpython.rlib import rgc class W_Array(W_DataShape): @@ -220,6 +221,7 @@ def __init__(self, space, shape, length): W_ArrayInstance.__init__(self, space, shape, length, 0) + @rgc.must_be_light_finalizer def __del__(self): if self.ll_buffer: self._free() diff --git a/pypy/module/_rawffi/structure.py b/pypy/module/_rawffi/structure.py --- a/pypy/module/_rawffi/structure.py +++ b/pypy/module/_rawffi/structure.py @@ -16,7 +16,7 @@ from pypy.module._rawffi.interp_rawffi import unroll_letters_for_numbers from pypy.module._rawffi.interp_rawffi import size_alignment from pypy.module._rawffi.interp_rawffi import read_ptr, write_ptr -from rpython.rlib import clibffi +from rpython.rlib import clibffi, rgc from rpython.rlib.rarithmetic import intmask, signedtype, widen from rpython.rlib.rarithmetic import r_uint, r_ulonglong, r_longlong @@ -226,6 +226,7 @@ fieldtypes) return self.ffi_struct.ffistruct + @rgc.must_be_light_finalizer def __del__(self): if self.ffi_struct: lltype.free(self.ffi_struct, flavor='raw') @@ -380,6 +381,7 @@ def __init__(self, space, shape): W_StructureInstance.__init__(self, space, shape, 0) + @rgc.must_be_light_finalizer def __del__(self): if self.ll_buffer: self._free() From noreply at buildbot.pypy.org Fri Jan 10 18:14:19 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 10 Jan 2014 18:14:19 +0100 (CET) Subject: [pypy-commit] pypy default: Issue1669: fix. No clue about how to write a test; only checking that Message-ID: <20140110171419.4E5751C01E8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68595:5ca199444bb9 Date: 2014-01-10 17:12 +0100 http://bitbucket.org/pypy/pypy/changeset/5ca199444bb9/ Log: Issue1669: fix. No clue about how to write a test; only checking that the original issue is fixed. diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py --- a/rpython/jit/metainterp/heapcache.py +++ b/rpython/jit/metainterp/heapcache.py @@ -159,17 +159,17 @@ descr, ) elif cache is not None: - if argboxes[2] in self.new_boxes: - try: - idx_cache = cache[dststart + i] - except KeyError: - pass - else: + try: + idx_cache = cache[dststart + i] + except KeyError: + pass + else: + if argboxes[2] in self.new_boxes: for frombox in idx_cache.keys(): if not self.is_unescaped(frombox): del idx_cache[frombox] - else: - cache[dststart + i].clear() + else: + idx_cache.clear() return elif ( argboxes[2] in self.new_boxes and From noreply at buildbot.pypy.org Sat Jan 11 11:21:02 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 11 Jan 2014 11:21:02 +0100 (CET) Subject: [pypy-commit] stmgc c7: progress Message-ID: <20140111102102.795B91C08B9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7 Changeset: r601:676e3cb14530 Date: 2014-01-11 11:20 +0100 http://bitbucket.org/pypy/stmgc/changeset/676e3cb14530/ Log: progress diff --git a/c7/README.txt b/c7/README.txt --- a/c7/README.txt +++ b/c7/README.txt @@ -121,14 +121,32 @@ have around several copies of the objects at potentially different versions. + +(The rest of this section defines the "leader". It's a complicated way +to make sure we always have an object to copy back in case this +transaction is aborted. At first, what will be implemented in core.c +will simply be waiting if necessary until two threads reach the latest +version; then each thread can use the other's original object.) + + At most one thread is called the "leader" (this is new terminology as -far as I know). The leader is the thread running a transaction whose -start time is higher than the start time of any other running -transaction. If there are several threads with the same highest start -time, we have no leader. Leadership is a temporary condition: it is -acquired (typically) by the thread whose transaction commits and whose -next transaction starts; but it is lost again as soon as any other -thread updates its transaction's start time to match. +far as I know). The leader is: + +- a thread that runs a transaction right now (as opposed to being + in some blocking syscall between two transactions, for example); + +- not alone: there are other threads running a transaction concurrently + (when only one thread is running, there is no leader); + +- finally, the start time of this thread's transaction is strictly + higher than the start time of any other running transaction. (If there + are several threads with the same highest start time, we have no + leader.) + +Leadership is a temporary condition: it is acquired (typically) by the +thread whose transaction commits and whose next transaction starts; but +it is lost again as soon as any other thread updates its transaction's +start time to match. The point of the notion of leadership is that when the leader wants to modify an object, it must first make sure that the original version is @@ -138,11 +156,15 @@ well update all objects to the latest version. And if there are several threads with the same highest start time, we can be sure that the original version of the object is somewhere among them --- this is the -point of detecting write-write conflicts eagerly. The only remaining -case is the one in which there is a leader thread, this leader thread -has the only latest version of an object, and it tries to further modify -this object. To handle this precise case, for now, we simply wait until -another thread updates and we are no longer the leader. (*) +point of detecting write-write conflicts eagerly. Finally, if there is +only one thread running, as soon as it was updated, it cannot abort any +more, so we don't need to record the old version of anything. + +The only remaining case is the one in which there is a leader thread, +this leader thread has the only latest version of an object, and it +tries to further modify this object. To handle this precise case, for +now, we simply wait until another thread updates and we are no longer +the leader. (*) (*) the code in core.c contains, or contained, or will again contain, an explicit undo log that would be filled in this case only. @@ -154,19 +176,21 @@ draft: - pages need to be unshared when they contain already-committed objects - that are then modified. They can remain shared if a fraction of (or all) - their space was not used previously, but is used by new allocations; any - changes to these fresh objects during the same transaction do *not* need - to unshare the page. This should ensure that in the common case the - majority of pages are not unshared. + that are then modified. + +- pages can remain shared if a fraction of (or all) their space was not + used previously, but is used by new allocations; any changes to these + fresh objects during the same transaction do *not* need to unshare the + page. This should ensure that in the common case the majority of pages + are not unshared. - minor collection: occurs regularly, and maybe always at the end of transactions (we'll see). Should work by marking the young objects that survive. Non-marked objects are then sweeped lazily by the next allocation requests (as in "mark-and-don't-sweep" GCs, here for the minor collection only). Needs a write barrier to detect - old-objects-pointing-to-young objects (the old object may belong - to the same running transaction, or be already committed). + old-objects-pointing-to-young objects (the old object may be fresh + from the same running transaction as well, or be already committed). - the numbers and flags stored in the objects need to be designed with the above goals in mind. @@ -190,3 +214,10 @@ in shared pages --- while at the same time bounding the number of calls to remap_file_pages() for each page at 2 per major collection cycle. + + +Misc +---- + +Use __builtin_setjmp() and __builtin_longjmp() rather than setjmp() +and longjmp(). From noreply at buildbot.pypy.org Sat Jan 11 11:57:47 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Sat, 11 Jan 2014 11:57:47 +0100 (CET) Subject: [pypy-commit] pypy default: if the zipfile is corrupted we might get an uncaugth interp-level RZlibError, which results in a crash after translation. Catch it and raise the appropriate app-level exception Message-ID: <20140111105747.CC94B1C31E9@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r68596:de464ba9c716 Date: 2014-01-11 11:56 +0100 http://bitbucket.org/pypy/pypy/changeset/de464ba9c716/ Log: if the zipfile is corrupted we might get an uncaugth interp-level RZlibError, which results in a crash after translation. Catch it and raise the appropriate app-level exception diff --git a/pypy/module/zipimport/interp_zipimport.py b/pypy/module/zipimport/interp_zipimport.py --- a/pypy/module/zipimport/interp_zipimport.py +++ b/pypy/module/zipimport/interp_zipimport.py @@ -5,8 +5,10 @@ from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.interpreter.module import Module from pypy.module.imp import importing +from pypy.module.zlib.interp_zlib import zlib_error from rpython.rlib.unroll import unrolling_iterable from rpython.rlib.rzipfile import RZipFile, BadZipfile +from rpython.rlib.rzlib import RZlibError import os import stat @@ -252,6 +254,10 @@ buf = self.zip_file.read(fname) except (KeyError, OSError, BadZipfile): pass + except RZlibError, e: + # in this case, CPython raises the direct exception coming + # from the zlib module: let's to the same + raise zlib_error(space, e.msg) else: if is_package: pkgpath = (self.filename + os.path.sep + diff --git a/pypy/module/zipimport/test/bad.zip b/pypy/module/zipimport/test/bad.zip new file mode 100644 index 0000000000000000000000000000000000000000..1d326a869a409a04dc46475bf157cb6f5bdb0664 GIT binary patch [cut] diff --git a/pypy/module/zipimport/test/test_zipimport_deflated.py b/pypy/module/zipimport/test/test_zipimport_deflated.py --- a/pypy/module/zipimport/test/test_zipimport_deflated.py +++ b/pypy/module/zipimport/test/test_zipimport_deflated.py @@ -3,6 +3,7 @@ from zipfile import ZIP_DEFLATED from pypy.module.zipimport.test.test_zipimport import AppTestZipimport as Base +BAD_ZIP = str(py.path.local(__file__).dirpath('bad.zip')) class AppTestZipimportDeflated(Base): compression = ZIP_DEFLATED @@ -16,3 +17,10 @@ except ImportError: py.test.skip("zlib not available, cannot test compressed zipfiles") cls.make_class() + cls.w_BAD_ZIP = cls.space.wrap(BAD_ZIP) + + def test_zlib_error(self): + import zipimport + import zlib + z = zipimport.zipimporter(self.BAD_ZIP) + raises(zlib.error, "z.load_module('mymod')") From noreply at buildbot.pypy.org Sat Jan 11 12:31:43 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 11 Jan 2014 12:31:43 +0100 (CET) Subject: [pypy-commit] pypy default: Skip these tests if we're running "py.test -A" with a PyPy compiled Message-ID: <20140111113143.C26311C01E8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68597:57de6303e2f4 Date: 2014-01-11 12:30 +0100 http://bitbucket.org/pypy/pypy/changeset/57de6303e2f4/ Log: Skip these tests if we're running "py.test -A" with a PyPy compiled without DO_TRACING. diff --git a/pypy/module/_rawffi/test/test_tracker.py b/pypy/module/_rawffi/test/test_tracker.py --- a/pypy/module/_rawffi/test/test_tracker.py +++ b/pypy/module/_rawffi/test/test_tracker.py @@ -1,9 +1,21 @@ +import py +from pypy.conftest import option from pypy.module._rawffi.tracker import Tracker + class AppTestTracker: spaceconfig = dict(usemodules=['_rawffi', 'struct']) def setup_class(cls): + # + # detect if we're running on PyPy with DO_TRACING not compiled in + if option.runappdirect: + try: + import _rawffi + _rawffi._num_of_allocated_objects() + except (ImportError, RuntimeError), e: + py.test.skip(str(e)) + # Tracker.DO_TRACING = True def test_array(self): diff --git a/pypy/module/_rawffi/tracker.py b/pypy/module/_rawffi/tracker.py --- a/pypy/module/_rawffi/tracker.py +++ b/pypy/module/_rawffi/tracker.py @@ -2,6 +2,8 @@ """ The file that keeps track about freed/kept-alive objects allocated by _rawffi. Used for debugging ctypes """ +from pypy.interpreter.error import OperationError + class Tracker(object): DO_TRACING = False # make sure this stays False by default! @@ -20,6 +22,9 @@ tracker = Tracker() def num_of_allocated_objects(space): + if not tracker.DO_TRACING: + raise OperationError(space.w_RuntimeError, + space.wrap("DO_TRACING not enabled in this PyPy")) return space.wrap(len(tracker.alloced)) def print_alloced_objects(space): From noreply at buildbot.pypy.org Sat Jan 11 21:27:55 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 11 Jan 2014 21:27:55 +0100 (CET) Subject: [pypy-commit] pypy default: Unroll copying a long string from a constant to a plain virtual Message-ID: <20140111202755.BFA3F1C08E4@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r68598:64423a7bf04d Date: 2014-01-11 12:27 -0800 http://bitbucket.org/pypy/pypy/changeset/64423a7bf04d/ Log: Unroll copying a long string from a constant to a plain virtual diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5031,6 +5031,19 @@ """ self.optimize_strunicode_loop(ops, expected) + def test_copy_long_string_to_virtual(self): + ops = """ + [] + p0 = newstr(20) + copystrcontent(s"aaaaaaaaaaaaaaaaaaaa", p0, 0, 0, 20) + jump(p0) + """ + expected = """ + [] + jump(s"aaaaaaaaaaaaaaaaaaaa") + """ + self.optimize_strunicode_loop(ops, expected) + def test_ptr_eq_str_constant(self): ops = """ [] diff --git a/rpython/jit/metainterp/optimizeopt/vstring.py b/rpython/jit/metainterp/optimizeopt/vstring.py --- a/rpython/jit/metainterp/optimizeopt/vstring.py +++ b/rpython/jit/metainterp/optimizeopt/vstring.py @@ -520,7 +520,7 @@ elif ((src.is_virtual() or src.is_constant()) and srcstart.is_constant() and dststart.is_constant() and length.is_constant() and - (length.force_box(self).getint() < 20 or (src.is_virtual() and dst_virtual))): + (length.force_box(self).getint() < 20 or ((src.is_virtual() or src.is_constant()) and dst_virtual))): src_start = srcstart.force_box(self).getint() dst_start = dststart.force_box(self).getint() actual_length = length.force_box(self).getint() From noreply at buildbot.pypy.org Sat Jan 11 23:23:37 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 11 Jan 2014 23:23:37 +0100 (CET) Subject: [pypy-commit] pypy default: Look a little bit more into weakref -- now a mapdict lookup is seen by the JIT Message-ID: <20140111222337.814551C02A7@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r68599:2f0d7dced731 Date: 2014-01-11 14:23 -0800 http://bitbucket.org/pypy/pypy/changeset/2f0d7dced731/ Log: Look a little bit more into weakref -- now a mapdict lookup is seen by the JIT diff --git a/pypy/module/_weakref/interp__weakref.py b/pypy/module/_weakref/interp__weakref.py --- a/pypy/module/_weakref/interp__weakref.py +++ b/pypy/module/_weakref/interp__weakref.py @@ -122,6 +122,7 @@ W_WeakrefBase.activate_callback, 'weakref callback of ') + @jit.dont_look_inside def make_weakref_with_callback(self, w_subtype, w_obj, w_callable): space = self.space w_ref = space.allocate_instance(W_Weakref, w_subtype) @@ -243,11 +244,12 @@ def get_or_make_weakref(space, w_subtype, w_obj): return getlifeline(space, w_obj).get_or_make_weakref(w_subtype, w_obj) - at jit.dont_look_inside + def make_weakref_with_callback(space, w_subtype, w_obj, w_callable): lifeline = getlifelinewithcallbacks(space, w_obj) return lifeline.make_weakref_with_callback(w_subtype, w_obj, w_callable) + def descr__new__weakref(space, w_subtype, w_obj, w_callable=None, __args__=None): if __args__.arguments_w: diff --git a/pypy/module/pypyjit/test_pypy_c/test_weakref.py b/pypy/module/pypyjit/test_pypy_c/test_weakref.py new file mode 100644 --- /dev/null +++ b/pypy/module/pypyjit/test_pypy_c/test_weakref.py @@ -0,0 +1,46 @@ +from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC + + +class TestThread(BaseTestPyPyC): + def test_make_ref_with_callback(self): + log = self.run(""" + import weakref + + class Dummy(object): + pass + + def noop(obj): + pass + + def main(n): + obj = Dummy() + for i in xrange(n): + weakref.ref(obj, noop) + """, [500]) + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i58 = getfield_gc(p18, descr=) + i59 = getfield_gc(p18, descr=) + i60 = int_lt(i58, i59) + guard_true(i60, descr=...) + i61 = int_add(i58, 1) + p62 = getfield_gc(ConstPtr(ptr37), descr=) + setfield_gc(p18, i61, descr=) + guard_value(p62, ConstPtr(ptr39), descr=...) + guard_not_invalidated(descr=...) + p64 = getfield_gc(ConstPtr(ptr40), descr=) + guard_value(p64, ConstPtr(ptr42), descr=...) + p65 = getfield_gc(p14, descr=) + guard_value(p65, ConstPtr(ptr45), descr=...) + p66 = getfield_gc(p14, descr=) + guard_nonnull_class(p66, 4315455232, descr=...) + p67 = force_token() + setfield_gc(p0, p67, descr=) + p68 = call_may_force(ConstClass(WeakrefLifelineWithCallbacks.make_weakref_with_callback), p66, ConstPtr(ptr50), p14, ConstPtr(ptr51), descr=) + guard_not_forced(descr=...) + guard_no_exception(descr=...) + guard_nonnull_class(p68, 4315527384, descr=...) + guard_not_invalidated(descr=...) + --TICK-- + jump(..., descr=...) + """) From noreply at buildbot.pypy.org Sat Jan 11 23:59:32 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 11 Jan 2014 23:59:32 +0100 (CET) Subject: [pypy-commit] pypy.org extradoc: This is no longer really an issue Message-ID: <20140111225932.B041C1C08B9@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: extradoc Changeset: r470:add99aaf7a21 Date: 2014-01-11 14:59 -0800 http://bitbucket.org/pypy/pypy.org/changeset/add99aaf7a21/ Log: This is no longer really an issue diff --git a/performance.html b/performance.html --- a/performance.html +++ b/performance.html @@ -243,8 +243,6 @@

locals(), globals(), sys._getframe(), sys.exc_info(), and sys.settrace work in PyPy, but they incur a performance penalty that can be huge by disabling the JIT over the enclosing JIT scope.

-

One unobvious case where frame introspection is used is the logging -module. Don't use the logging module if you need to run fast.

(Thanks Eric S. Raymond for the text above)

diff --git a/source/performance.txt b/source/performance.txt --- a/source/performance.txt +++ b/source/performance.txt @@ -236,9 +236,6 @@ work in PyPy, but they incur a performance penalty that can be huge by disabling the JIT over the enclosing JIT scope. -One unobvious case where frame introspection is used is the logging -module. Don't use the logging module if you need to run fast. - *(Thanks Eric S. Raymond for the text above)* From noreply at buildbot.pypy.org Sat Jan 11 23:59:33 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 11 Jan 2014 23:59:33 +0100 (CET) Subject: [pypy-commit] pypy.org extradoc: merged upstream Message-ID: <20140111225933.E18241C08B9@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: extradoc Changeset: r471:0de1b2f3e8b5 Date: 2014-01-11 14:59 -0800 http://bitbucket.org/pypy/pypy.org/changeset/0de1b2f3e8b5/ Log: merged upstream diff --git a/people.html b/people.html --- a/people.html +++ b/people.html @@ -48,7 +48,7 @@

Armin Rigo

image/people/arigo.png -

Armin Rigo is a researcher at the Heinrich-Heine Universitat +

Armin Rigo is a former researcher at the Heinrich-Heine Universitat Dusseldorf (Germany). He studied Mathematics at the University of Lausanne (Switzerland), obtained his Ph.D. in Logic and Set Theory at the Free University of Brussels (Belgium) in 2002, and diff --git a/source/people.txt b/source/people.txt --- a/source/people.txt +++ b/source/people.txt @@ -8,7 +8,7 @@ .. image:: image/people/arigo.png -Armin Rigo is a researcher at the Heinrich-Heine Universitat +Armin Rigo is a former researcher at the Heinrich-Heine Universitat Dusseldorf (Germany). He studied Mathematics at the University of Lausanne (Switzerland), obtained his Ph.D. in Logic and Set Theory at the Free University of Brussels (Belgium) in 2002, and From noreply at buildbot.pypy.org Sun Jan 12 01:49:21 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 12 Jan 2014 01:49:21 +0100 (CET) Subject: [pypy-commit] pypy default: Fix for some platforms Message-ID: <20140112004921.5EFBE1C01E8@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r68600:e69d5d984588 Date: 2014-01-11 16:48 -0800 http://bitbucket.org/pypy/pypy/changeset/e69d5d984588/ Log: Fix for some platforms diff --git a/pypy/module/pypyjit/test_pypy_c/test_weakref.py b/pypy/module/pypyjit/test_pypy_c/test_weakref.py --- a/pypy/module/pypyjit/test_pypy_c/test_weakref.py +++ b/pypy/module/pypyjit/test_pypy_c/test_weakref.py @@ -33,13 +33,13 @@ p65 = getfield_gc(p14, descr=) guard_value(p65, ConstPtr(ptr45), descr=...) p66 = getfield_gc(p14, descr=) - guard_nonnull_class(p66, 4315455232, descr=...) + guard_nonnull_class(p66, ..., descr=...) p67 = force_token() setfield_gc(p0, p67, descr=) p68 = call_may_force(ConstClass(WeakrefLifelineWithCallbacks.make_weakref_with_callback), p66, ConstPtr(ptr50), p14, ConstPtr(ptr51), descr=) guard_not_forced(descr=...) guard_no_exception(descr=...) - guard_nonnull_class(p68, 4315527384, descr=...) + guard_nonnull_class(p68, ..., descr=...) guard_not_invalidated(descr=...) --TICK-- jump(..., descr=...) From noreply at buildbot.pypy.org Sun Jan 12 10:44:24 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 12 Jan 2014 10:44:24 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Add initial planning file Message-ID: <20140112094424.495941C0212@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5124:74465af3be71 Date: 2014-01-12 10:44 +0100 http://bitbucket.org/pypy/extradoc/changeset/74465af3be71/ Log: Add initial planning file diff --git a/sprintinfo/leysin-winter-2014/planning.txt b/sprintinfo/leysin-winter-2014/planning.txt new file mode 100644 --- /dev/null +++ b/sprintinfo/leysin-winter-2014/planning.txt @@ -0,0 +1,32 @@ + +People +------ + +Johan Rade +Remi Meier +Maciej Fijalkowski +Romain Guillebert +Armin Rigo + + +Topics +------ + +* numpy stuff, fix bugs from bug tracker (rguillebert, ?) + +* look at codespeed2 + +* resume-refactor branch (rguillebert, fijal) + +* GC pinning + +* asmgcc bug with greenlets and --shared + +* think about --shared by default + +* CFFI 1.0 + +* STM (remi) + +* discuss about C++ / cppyy (johan, ?) + From noreply at buildbot.pypy.org Sun Jan 12 11:26:29 2014 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 12 Jan 2014 11:26:29 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: (fijal, rguillebert) fix test_resumebuilder for removal of backend_attach Message-ID: <20140112102629.8DCB51C0212@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68601:95b2f5879a2a Date: 2014-01-12 11:25 +0100 http://bitbucket.org/pypy/pypy/changeset/95b2f5879a2a/ Log: (fijal, rguillebert) fix test_resumebuilder for removal of backend_attach diff --git a/rpython/jit/backend/llsupport/regalloc.py b/rpython/jit/backend/llsupport/regalloc.py --- a/rpython/jit/backend/llsupport/regalloc.py +++ b/rpython/jit/backend/llsupport/regalloc.py @@ -11,7 +11,7 @@ class TempBox(Box): type = 't' # none of the types - + def __init__(self): pass @@ -688,8 +688,18 @@ else: return [self.loc(op.getarg(0))] +def flatten(inputframes): + count = 0 + for frame in inputframes: + count += len(frame) + inputargs = [None] * count + i = 0 + for frame in inputframes: + inputargs[i:i + len(frame)] = frame + i += len(frame) + return inputargs -def compute_vars_longevity(inputargs, operations, descr=None): +def compute_vars_longevity(inputframes, operations, descr=None): # compute a dictionary that maps variables to index in # operations that is a "last-time-seen" @@ -701,7 +711,12 @@ last_used = {} last_real_usage = {} frontend_alive = {} - liveness_analyzer = LivenessAnalyzer() + if descr is None: + inputargs = inputframes[0] + liveness_analyzer = LivenessAnalyzer() + else: + inputargs = flatten(inputframes) + liveness_analyzer = LivenessAnalyzer(inputframes) start_pos = 0 for position, op in enumerate(operations): if op.is_guard(): diff --git a/rpython/jit/backend/llsupport/resumebuilder.py b/rpython/jit/backend/llsupport/resumebuilder.py --- a/rpython/jit/backend/llsupport/resumebuilder.py +++ b/rpython/jit/backend/llsupport/resumebuilder.py @@ -4,11 +4,15 @@ from rpython.jit.metainterp.resume2 import ResumeBytecode, AbstractResumeReader class LivenessAnalyzer(AbstractResumeReader): - def __init__(self): + def __init__(self, inputframes=None): self.liveness = {} self.frame_starts = [0] self.framestack = [] self.deps = {} + if inputframes is not None: + for frame in inputframes: + self.frame_starts.append(self.frame_starts[-1] + len(frame)) + self.framestack.append(frame) def enter_frame(self, pc, jitcode): self.frame_starts.append(self.frame_starts[-1] + jitcode.num_regs()) @@ -47,19 +51,33 @@ self.regalloc = regalloc self.current_attachment = {} self.frontend_liveness = frontend_liveness + self.frontend_pos = {} def process(self, op): - self.newops.append(op) + if op.getopnum() == rop.RESUME_PUT: + box = op.getarg(0) + args = op.getarglist() + pos = self.regalloc.loc(box).get_jitframe_position() + self.current_attachment[box] = pos + self.frontend_pos[box] = (args[1], args[2]) + args[0] = ConstInt(pos) + newop = op.copy_and_change(rop.RESUME_PUT, args=args) + else: + newop = op + self.newops.append(newop) def _mark_visited(self, v, loc): pos = loc.get_jitframe_position() if (v not in self.frontend_liveness or self.frontend_liveness[v] < self.regalloc.rm.position): return - if (v not in self.current_attachment or - self.current_attachment[v] != pos): - self.newops.append(ResOperation(rop.BACKEND_ATTACH, [ - v, ConstInt(pos)], None)) + if v not in self.current_attachment: + return + if self.current_attachment[v] != pos: + frame_index, frame_pos = self.frontend_pos[v] + self.newops.append(ResOperation(rop.RESUME_PUT, [ + ConstInt(pos), frame_index, frame_pos], + None)) self.current_attachment[v] = pos def mark_resumable_position(self): diff --git a/rpython/jit/backend/llsupport/test/test_resumebuilder.py b/rpython/jit/backend/llsupport/test/test_resumebuilder.py --- a/rpython/jit/backend/llsupport/test/test_resumebuilder.py +++ b/rpython/jit/backend/llsupport/test/test_resumebuilder.py @@ -35,19 +35,15 @@ self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) descr = loop.operations[2].getdescr() - assert descr.rd_bytecode_position == 3 + assert descr.rd_bytecode_position == 2 expected_resume = parse(""" - [i0] + [] enter_frame(-1, descr=jitcode) - resume_put(i0, 0, 2) - backend_attach(i0, 28) + resume_put(28, 0, 2) leave_frame() """, namespace={'jitcode': jitcode}) - i0 = descr.rd_resume_bytecode.opcodes[1].getarg(0) - i0b = expected_resume.inputargs[0] equaloplists(descr.rd_resume_bytecode.opcodes, - expected_resume.operations, - remap={i0b:i0}) + expected_resume.operations) def test_resume_new(self): jitcode = JitCode("name") @@ -76,17 +72,13 @@ enter_frame(-1, descr=jitcode) p0 = resume_new(descr=structdescr) resume_setfield_gc(p0, i0, descr=fielddescr) - resume_put(p0, 0, 0) - backend_attach(i0, 28) + resume_put(30, 0, 0) leave_frame() """, namespace=namespace) descr = loop.operations[-3].getdescr() - assert descr.rd_bytecode_position == 5 - i0 = descr.rd_resume_bytecode.opcodes[2].getarg(1) - i0b = expected_resume.inputargs[0] + assert descr.rd_bytecode_position == 4 equaloplists(descr.rd_resume_bytecode.opcodes, - expected_resume.operations, - remap={i0b:i0}) + expected_resume.operations) def test_spill(self): jitcode = JitCode("name") @@ -111,17 +103,13 @@ expected_resume = parse(""" [i2] enter_frame(-1, descr=jitcode) - resume_put(i2, 0, 1) - backend_attach(i2, 1) - backend_attach(i2, 29) + resume_put(1, 0, 1) + resume_put(29, 0, 1) leave_frame() """, namespace={'jitcode':jitcode}) descr1 = loop.operations[3].getdescr() descr2 = loop.operations[5].getdescr() - assert descr1.rd_bytecode_position == 3 - assert descr2.rd_bytecode_position == 4 - i0 = descr1.rd_resume_bytecode.opcodes[1].getarg(0) - i0b = expected_resume.inputargs[0] + assert descr1.rd_bytecode_position == 2 + assert descr2.rd_bytecode_position == 3 equaloplists(descr1.rd_resume_bytecode.opcodes, - expected_resume.operations, - remap={i0b:i0}) + expected_resume.operations) diff --git a/rpython/jit/backend/model.py b/rpython/jit/backend/model.py --- a/rpython/jit/backend/model.py +++ b/rpython/jit/backend/model.py @@ -68,7 +68,7 @@ """ raise NotImplementedError - def compile_bridge(self, logger, faildescr, inputargs, backend_positions, + def compile_bridge(self, logger, faildescr, inputframes, backend_positions, operations, original_loop_token, log=True): """Assemble the bridge. The FailDescr is the descr of the original guard that failed. diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -248,7 +248,7 @@ ] locs = rebuild_locs_from_resumedata(faildescr1) - self.cpu.compile_bridge(None, faildescr1, [i1b], locs, bridge, looptoken) + self.cpu.compile_bridge(None, faildescr1, [[i1b]], locs, bridge, looptoken) deadframe = self.cpu.execute_token(looptoken, 2) fail = self.cpu.get_latest_descr(deadframe) @@ -294,7 +294,7 @@ descr=BasicFinalDescr(4))) faillocs = rebuild_locs_from_resumedata(faildescr1) - self.cpu.compile_bridge(None, faildescr1, [i0], faillocs, bridge, looptoken) + self.cpu.compile_bridge(None, faildescr1, [[i0]], faillocs, bridge, looptoken) deadframe = self.cpu.execute_token(looptoken, 1) fail = self.cpu.get_latest_descr(deadframe) @@ -1426,7 +1426,7 @@ ResOperation(rop.JUMP, [f3]+fboxes2[1:], None, descr=targettoken), ] - self.cpu.compile_bridge(None, faildescr1, fboxes2, + self.cpu.compile_bridge(None, faildescr1, [fboxes2], rebuild_locs_from_resumedata(faildescr1), bridge, looptoken) @@ -1491,7 +1491,7 @@ ResOperation(rop.LEAVE_FRAME, [], None), ResOperation(rop.FINISH, [], None, descr=faildescr2), ] - self.cpu.compile_bridge(None, faildescr1, fboxes, + self.cpu.compile_bridge(None, faildescr1, [fboxes], locs, bridgeops, looptoken) args = [1, longlong.getfloatstorage(132.25), @@ -2953,7 +2953,7 @@ ResOperation(rop.GUARD_NOT_INVALIDATED, [],None, descr=faildescr2), ResOperation(rop.FINISH, [i2], None, descr=BasicFinalDescr(3)) ] - self.cpu.compile_bridge(None, faildescr, [i2], locs, ops, looptoken) + self.cpu.compile_bridge(None, faildescr, [[i2]], locs, ops, looptoken) deadframe = self.cpu.execute_token(looptoken, -42, 9) fail = self.cpu.get_latest_descr(deadframe) @@ -2993,7 +2993,7 @@ ops2 = [ ResOperation(rop.JUMP, [ConstInt(333)], None, descr=labeldescr), ] - self.cpu.compile_bridge(None, faildescr, [], [], ops2, looptoken) + self.cpu.compile_bridge(None, faildescr, [[]], [], ops2, looptoken) # run: must not be caught in an infinite loop deadframe = self.cpu.execute_token(looptoken, 16) fail = self.cpu.get_latest_descr(deadframe) @@ -3872,7 +3872,7 @@ ResOperation(rop.INT_SUB, [i0, ConstInt(20)], i2), ResOperation(rop.JUMP, [i2], None, descr=targettoken2), ] - self.cpu.compile_bridge(None, faildescr, inputargs2, locs, operations2, looptoken) + self.cpu.compile_bridge(None, faildescr, [inputargs2], locs, operations2, looptoken) deadframe = self.cpu.execute_token(looptoken, 2) fail = self.cpu.get_latest_descr(deadframe) @@ -3925,7 +3925,7 @@ self.cpu.assembler.set_debug(False) info = self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) locs = rebuild_locs_from_resumedata(faildescr) - bridge_info = self.cpu.compile_bridge(None, faildescr, bridge.inputargs, + bridge_info = self.cpu.compile_bridge(None, faildescr, [bridge.inputargs], locs, bridge.operations, looptoken) self.cpu.assembler.set_debug(True) # always on untranslated @@ -4027,7 +4027,7 @@ ResOperation(rop.JUMP, [i19], None, descr=targettoken1), ] locs = rebuild_locs_from_resumedata(faildescr1) - self.cpu.compile_bridge(None, faildescr1, inputargs, locs, operations2, looptoken1) + self.cpu.compile_bridge(None, faildescr1, [inputargs], locs, operations2, looptoken1) looptoken2 = JitCellToken() inputargs = [BoxInt()] @@ -4054,7 +4054,7 @@ operations = [ ResOperation(rop.FINISH, [], None, descr=BasicFinalDescr(99)) ] - self.cpu.compile_bridge(None, faildescr, [], [], operations, looptoken) + self.cpu.compile_bridge(None, faildescr, [[]], [], operations, looptoken) deadframe = self.cpu.execute_token(looptoken, null_box.getref_base()) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 99 @@ -4291,8 +4291,6 @@ assert values[0] == 0 def test_compile_bridge_while_running(self): - XXX # it crashes because the regalloc does not inherit liveness - # rules from the parent, while it shoul def func(): jitcode2 = JitCode('name2') @@ -4333,7 +4331,7 @@ 'guarddescr': guarddescr, 'func2_ptr': func2_ptr, 'jitcode2': jitcode2}) locs = rebuild_locs_from_resumedata(faildescr) - self.cpu.compile_bridge(None, faildescr, bridge.inputargs, locs, + self.cpu.compile_bridge(None, faildescr, [bridge.inputargs], locs, bridge.operations, looptoken) cpu = self.cpu diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -3,14 +3,14 @@ from rpython.jit.backend.llsupport import symbolic, jitframe, rewrite from rpython.jit.backend.llsupport.assembler import (GuardToken, BaseAssembler, - DEBUG_COUNTER, debug_bridge) + debug_bridge) from rpython.jit.backend.llsupport.asmmemmgr import MachineDataBlockWrapper -from rpython.jit.backend.llsupport.gcmap import allocate_gcmap +from rpython.jit.backend.llsupport.regalloc import flatten from rpython.jit.metainterp.history import Const, Box, VOID from rpython.jit.metainterp.history import AbstractFailDescr, INT, REF, FLOAT from rpython.rtyper.lltypesystem import lltype, rffi, rstr, llmemory from rpython.rtyper.lltypesystem.lloperation import llop -from rpython.rtyper.annlowlevel import llhelper, cast_instance_to_gcref +from rpython.rtyper.annlowlevel import cast_instance_to_gcref from rpython.rlib.jit import AsmInfo from rpython.jit.backend.model import CompiledLoopToken from rpython.jit.backend.x86.regalloc import (RegAlloc, get_ebp_ofs, @@ -29,7 +29,6 @@ from rpython.jit.metainterp.resoperation import rop from rpython.jit.backend.x86 import support from rpython.rlib.debug import debug_print, debug_start, debug_stop -from rpython.rlib import rgc from rpython.jit.codewriter.effectinfo import EffectInfo from rpython.jit.codewriter import longlong from rpython.rlib.rarithmetic import intmask, r_uint @@ -514,11 +513,8 @@ return AsmInfo(ops_offset, rawstart + looppos, size_excluding_failure_stuff - looppos) - def assemble_bridge(self, logger, faildescr, inputargs, backend_positions, + def assemble_bridge(self, logger, faildescr, inputframes, backend_positions, operations, original_loop_token, log): - if not we_are_translated(): - # Arguments should be unique - assert len(set(inputargs)) == len(inputargs) self.setup(original_loop_token) descr_number = compute_unique_id(faildescr) @@ -526,10 +522,11 @@ operations = self._inject_debugging_code(faildescr, operations, 'b', descr_number) + inputargs = flatten(inputframes) arglocs = self.rebuild_faillocs_from_descr(faildescr, inputargs, backend_positions) regalloc = RegAlloc(self, self.cpu.translate_support_code) startpos = self.mc.get_relative_pos() - operations = regalloc.prepare_bridge(inputargs, arglocs, + operations = regalloc.prepare_bridge(inputframes, arglocs, operations, self.current_clt.allgcrefs, self.current_clt.frame_info, diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -9,7 +9,8 @@ from rpython.jit.backend.llsupport.gcmap import allocate_gcmap from rpython.jit.backend.llsupport.resumebuilder import ResumeBuilder from rpython.jit.backend.llsupport.regalloc import (FrameManager, BaseRegalloc, - RegisterManager, TempBox, compute_vars_longevity, is_comparison_or_ovf_op) + RegisterManager, TempBox, compute_vars_longevity, is_comparison_or_ovf_op, + flatten) from rpython.jit.backend.x86 import rx86 from rpython.jit.backend.x86.arch import (WORD, JITFRAME_FIXED_SIZE, IS_X86_32, IS_X86_64) @@ -132,13 +133,13 @@ self.jump_target_descr = None self.final_jump_op = None - def _prepare(self, inputargs, operations, allgcrefs, descr=None): + def _prepare(self, inputframes, operations, allgcrefs, descr=None): cpu = self.assembler.cpu self.fm = X86FrameManager(cpu.get_baseofs_of_frame_field()) operations = cpu.gc_ll_descr.rewrite_assembler(cpu, operations, allgcrefs) # compute longevity of variables - x = compute_vars_longevity(inputargs, operations, descr) + x = compute_vars_longevity(inputframes, operations, descr) longevity, last_real_usage, frontend_liveness = x self.resumebuilder = ResumeBuilder(self, frontend_liveness, descr) self.longevity = longevity @@ -151,7 +152,7 @@ return operations def prepare_loop(self, inputargs, operations, looptoken, allgcrefs): - operations = self._prepare(inputargs, operations, allgcrefs) + operations = self._prepare([inputargs], operations, allgcrefs) self._set_initial_bindings(inputargs, looptoken) # note: we need to make a copy of inputargs because possibly_free_vars # is also used on op args, which is a non-resizable list @@ -162,10 +163,10 @@ self.min_bytes_before_label = 13 return operations - def prepare_bridge(self, inputargs, arglocs, operations, allgcrefs, + def prepare_bridge(self, inputframes, arglocs, operations, allgcrefs, frame_info, descr): - operations = self._prepare(inputargs, operations, allgcrefs, descr) - self._update_bindings(arglocs, inputargs) + operations = self._prepare(inputframes, operations, allgcrefs, descr) + self._update_bindings(arglocs, inputframes) self.min_bytes_before_label = 0 return operations @@ -229,10 +230,11 @@ else: return self.xrm.make_sure_var_in_reg(var, forbidden_vars) - def _update_bindings(self, locs, inputargs): + def _update_bindings(self, locs, inputframes): # XXX this should probably go to llsupport/regalloc.py used = {} i = 0 + inputargs = flatten(inputframes) for loc in locs: if loc is None: # xxx bit kludgy loc = ebp diff --git a/rpython/jit/backend/x86/runner.py b/rpython/jit/backend/x86/runner.py --- a/rpython/jit/backend/x86/runner.py +++ b/rpython/jit/backend/x86/runner.py @@ -93,11 +93,11 @@ return self.assembler.assemble_loop(logger, name, inputargs, operations, looptoken, log=log) - def compile_bridge(self, logger, faildescr, inputargs, backend_positions, + def compile_bridge(self, logger, faildescr, inputframes, backend_positions, operations, original_loop_token, log=True): clt = original_loop_token.compiled_loop_token clt.compiling_a_bridge() - return self.assembler.assemble_bridge(logger, faildescr, inputargs, + return self.assembler.assemble_bridge(logger, faildescr, inputframes, backend_positions, operations, original_loop_token, log=log) diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -472,10 +472,11 @@ # resume-only operations that never make it to the real assembler 'ENTER_FRAME/1d', 'LEAVE_FRAME/0', - 'RESUME_PUT/3', + 'RESUME_PUT/3', # arguments are as follows - box or position in the backend, + # the frame index (counting from top) and position in the + # frontend 'RESUME_NEW/0d', 'RESUME_SETFIELD_GC/2d', - 'BACKEND_ATTACH/2', '_RESUME_LAST', # ----- end of resume only operations ------ '_NOSIDEEFFECT_LAST', # ----- end of no_side_effect operations ----- diff --git a/rpython/jit/metainterp/resume2.py b/rpython/jit/metainterp/resume2.py --- a/rpython/jit/metainterp/resume2.py +++ b/rpython/jit/metainterp/resume2.py @@ -41,8 +41,6 @@ elif op.getopnum() == rop.RESUME_SETFIELD_GC: self.resume_setfield_gc(op.getarg(0), op.getarg(1), op.getdescr()) - elif op.getopnum() == rop.BACKEND_ATTACH: - self.resume_backend_attach(op.getarg(0), op.getarg(1).getint()) elif not op.is_resume(): pos += 1 continue @@ -72,9 +70,6 @@ self.deadframe = deadframe self.backend_values = {} - def resume_backend_attach(self, box, position): - self.backend_values[box] = position - def enter_frame(self, pc, jitcode): if pc != -1: self.metainterp.framestack[-1].pc = pc diff --git a/rpython/jit/metainterp/test/test_resume2.py b/rpython/jit/metainterp/test/test_resume2.py --- a/rpython/jit/metainterp/test/test_resume2.py +++ b/rpython/jit/metainterp/test/test_resume2.py @@ -26,7 +26,7 @@ def dump_registers(self, lst, backend_values): lst += [backend_values[x] for x in self.registers_i] lst += [backend_values[x] for x in self.registers_r] - lst += [backend_values[x] for x in self.registers_f] + lst += [backend_values[x] for x in self.registers_f] class MockMetaInterp(object): def __init__(self): From noreply at buildbot.pypy.org Sun Jan 12 12:01:37 2014 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 12 Jan 2014 12:01:37 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: (fijal, rguillebert) finish resume2.py refactoring for now Message-ID: <20140112110137.51E9E1C039A@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68602:6cc1ba9ae765 Date: 2014-01-12 11:44 +0100 http://bitbucket.org/pypy/pypy/changeset/6cc1ba9ae765/ Log: (fijal, rguillebert) finish resume2.py refactoring for now diff --git a/rpython/jit/metainterp/resume2.py b/rpython/jit/metainterp/resume2.py --- a/rpython/jit/metainterp/resume2.py +++ b/rpython/jit/metainterp/resume2.py @@ -34,7 +34,7 @@ elif op.getopnum() == rop.LEAVE_FRAME: self.leave_frame() elif op.getopnum() == rop.RESUME_PUT: - self.resume_put(op.getarg(0), op.getarg(1).getint(), + self.resume_put(op.getarg(0).getint(), op.getarg(1).getint(), op.getarg(2).getint()) elif op.getopnum() == rop.RESUME_NEW: self.resume_new(op.result, op.getdescr()) @@ -48,27 +48,30 @@ xxx pos += 1 - def resume_put(self, box, depth, frontend_position): + def resume_put(self, jitframe_pos, depth, frontend_position): jitcode = self.metainterp.framestack[-1].jitcode - frame = self.metainterp.framestack[- depth - 1] + frame = self.metainterp.framestack[depth] if frontend_position < jitcode.num_regs_i(): - self.put_box_int(frame, frontend_position, box) + self.put_box_int(frame, frontend_position, jitframe_pos) elif frontend_position < (jitcode.num_regs_r() + jitcode.num_regs_i()): self.put_box_ref(frame, frontend_position - jitcode.num_regs_i(), - box) + jitframe_pos) else: assert frontend_position < jitcode.num_regs() self.put_box_float(frame, frontend_position - jitcode.num_regs_r() - - jitcode.num_regs_i(), box) + - jitcode.num_regs_i(), jitframe_pos) + + def read_int(self, jitframe_pos): + return self.metainterp.cpu.get_int_value(self.deadframe, jitframe_pos) class DirectResumeReader(AbstractResumeReader): - pass + def __init__(self, *args): + xxx class BoxResumeReader(AbstractResumeReader): def __init__(self, metainterp, deadframe): self.metainterp = metainterp self.deadframe = deadframe - self.backend_values = {} def enter_frame(self, pc, jitcode): if pc != -1: @@ -78,24 +81,19 @@ def leave_frame(self): self.metainterp.popframe() - def put_box_int(self, frame, position, box): - frame.registers_i[position] = box + def put_box_int(self, frame, position, jitframe_pos): + frame.registers_i[position] = BoxInt(self.read_int(jitframe_pos)) - def put_box_ref(self, frame, position, box): - frame.registers_r[position] = box + def put_box_ref(self, frame, position, jitframe_pos): + xxx + frame.registers_r[position] = self.read_ref(jitframe_pos) - def put_box_float(self, frame, position, box): - frame.registers_f[position] = box + def put_box_float(self, frame, position, jitframe_pos): + xxx + frame.registers_f[position] = self.read_float(jitframe_pos) def finish(self): - cpu = self.metainterp.cpu - for box, position in self.backend_values.iteritems(): - if box.type == 'i': - intval = cpu.get_int_value(self.deadframe, position) - assert isinstance(box, BoxInt) - box.value = intval - else: - xxx + pass def rebuild_from_resumedata(metainterp, deadframe, faildescr): BoxResumeReader(metainterp, deadframe).rebuild(faildescr) diff --git a/rpython/jit/metainterp/test/test_resume2.py b/rpython/jit/metainterp/test/test_resume2.py --- a/rpython/jit/metainterp/test/test_resume2.py +++ b/rpython/jit/metainterp/test/test_resume2.py @@ -62,15 +62,14 @@ jitcode = JitCode("jitcode") jitcode.setup(num_regs_i=13) resume_loop = parse(""" - [i0] + [] enter_frame(-1, descr=jitcode1) - resume_put(i0, 0, 1) - backend_attach(i0, 10) + resume_put(10, 0, 1) leave_frame() """, namespace={'jitcode1': jitcode}) descr = Descr() descr.rd_resume_bytecode = ResumeBytecode(resume_loop.operations) - descr.rd_bytecode_position = 3 + descr.rd_bytecode_position = 2 metainterp = MockMetaInterp() metainterp.cpu = MockCPU() rebuild_from_resumedata(metainterp, "myframe", descr) @@ -84,25 +83,21 @@ jitcode2 = JitCode("jitcode2") jitcode2.setup(num_regs_i=9) resume_loop = parse(""" - [i0, i1, i2, i3] + [] enter_frame(-1, descr=jitcode1) - resume_put(i0, 0, 2) - backend_attach(i0, 11) + resume_put(11, 0, 2) enter_frame(12, descr=jitcode2) - resume_put(i1, 0, 3) - resume_put(i2, 1, 4) - backend_attach(i1, 12) - backend_attach(i2, 8) + resume_put(12, 1, 3) + resume_put(8, 0, 4) leave_frame() - backend_attach(i3, 10) - resume_put(i3, 0, 1) + resume_put(10, 0, 1) leave_frame() """, namespace={'jitcode1': jitcode1, 'jitcode2': jitcode2}) metainterp = MockMetaInterp() metainterp.cpu = MockCPU() descr = Descr() descr.rd_resume_bytecode = ResumeBytecode(resume_loop.operations) - descr.rd_bytecode_position = 8 + descr.rd_bytecode_position = 5 rebuild_from_resumedata(metainterp, "myframe", descr) assert len(metainterp.framestack) == 2 f = metainterp.framestack[-1] @@ -113,7 +108,7 @@ assert f2.registers_i[4].getint() == 8 + 3 assert f2.registers_i[2].getint() == 11 + 3 - descr.rd_bytecode_position = 11 + descr.rd_bytecode_position = 7 metainterp.framestack = [] rebuild_from_resumedata(metainterp, "myframe", descr) assert len(metainterp.framestack) == 1 @@ -127,24 +122,22 @@ jitcode1 = JitCode("jitcode") jitcode1.setup(num_regs_i=13) base = parse(""" - [i0, i1] + [] enter_frame(-1, descr=jitcode1) - resume_put(i0, 0, 0) - backend_attach(i0, 42) + resume_put(42, 0, 0) # here is the split caused by a guard - resume_put(i1, 0, 1) - backend_attach(i1, 1) + resume_put(1, 0, 1) + leave_frame() """, namespace={'jitcode1': jitcode1}) bridge = parse(""" - [i2] - resume_put(i2, 0, 1) - backend_attach(i2, 2) + [] + resume_put(2, 0, 1) """) descr = Descr() - descr.rd_bytecode_position = 2 + descr.rd_bytecode_position = 1 parent = ResumeBytecode(base.operations) b = ResumeBytecode(bridge.operations, parent=parent, - parent_position=3) + parent_position=2) descr.rd_resume_bytecode = b metainterp = MockMetaInterp() metainterp.cpu = MockCPU() @@ -168,6 +161,7 @@ """, namespace={'jitcode':jitcode1}) def test_reconstructing_resume_reader(self): + py.test.skip("xxx") jitcode1 = JitCode("jitcode") jitcode1.setup(num_regs_i=2, num_regs_f=0, num_regs_r=0) jitcode2 = JitCode("jitcode2") From noreply at buildbot.pypy.org Sun Jan 12 12:01:38 2014 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 12 Jan 2014 12:01:38 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: fix the reconstructing_resume_reader Message-ID: <20140112110138.A41C01C039A@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68603:278fef54424c Date: 2014-01-12 11:59 +0100 http://bitbucket.org/pypy/pypy/changeset/278fef54424c/ Log: fix the reconstructing_resume_reader diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -339,16 +339,13 @@ assert isinstance(res, history.AbstractFailDescr) return res - def get_int_value(self, deadframe, locs, pos): + def get_int_value(self, deadframe, pos): descr = self.gc_ll_descr.getframedescrs(self).arraydescr ofs = self.unpack_arraydescr(descr) - if locs is None: - assert pos == 0 - else: - pos = locs[pos] * WORD - return self.read_int_at_mem(deadframe, pos + ofs, WORD, 1) + return self.read_int_at_mem(deadframe, pos * WORD + ofs, WORD, 1) def get_ref_value(self, deadframe, locs, pos): + xxx descr = self.gc_ll_descr.getframedescrs(self).arraydescr ofs = self.unpack_arraydescr(descr) if locs is None: @@ -357,14 +354,10 @@ pos = locs[pos] * WORD return self.read_ref_at_mem(deadframe, pos + ofs) - def get_float_value(self, deadframe, locs, pos): + def get_float_value(self, deadframe, pos): descr = self.gc_ll_descr.getframedescrs(self).arraydescr ofs = self.unpack_arraydescr(descr) - if locs is None: - assert pos == 0 - else: - pos = locs[pos] * WORD - return self.read_float_at_mem(deadframe, pos + ofs) + return self.read_float_at_mem(deadframe, pos * WORD + ofs) # ____________________ RAW PRIMITIVES ________________________ diff --git a/rpython/jit/backend/llsupport/resumebuilder.py b/rpython/jit/backend/llsupport/resumebuilder.py --- a/rpython/jit/backend/llsupport/resumebuilder.py +++ b/rpython/jit/backend/llsupport/resumebuilder.py @@ -19,7 +19,7 @@ self.framestack.append([None] * jitcode.num_regs()) def resume_put(self, box, framepos, frontend_pos): - self.framestack[-framepos - 1][frontend_pos] = box + self.framestack[framepos][frontend_pos] = box def resume_new(self, result, descr): self.deps[result] = {} diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -69,11 +69,11 @@ else: self.guard_failed = True if result_type == 'int': - return BoxInt(self.cpu.get_int_value(deadframe, locs, 0)) + return BoxInt(self.cpu.get_int_value(deadframe, 0)) elif result_type == 'ref': - return BoxPtr(self.cpu.get_ref_value(deadframe, locs, 0)) + return BoxPtr(self.cpu.get_ref_value(deadframe, 0)) elif result_type == 'float': - return BoxFloat(self.cpu.get_float_value(deadframe, locs, 0)) + return BoxFloat(self.cpu.get_float_value(deadframe, 0)) elif result_type == 'void': return None else: @@ -132,7 +132,7 @@ self.cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 2) fail = self.cpu.get_latest_descr(deadframe) - res = self.cpu.get_int_value(deadframe, None, 0) + res = self.cpu.get_int_value(deadframe, 0) assert res == 3 assert fail.identifier == 1 @@ -151,7 +151,7 @@ deadframe = self.cpu.execute_token(looptoken, longlong.getfloatstorage(2.8)) fail = self.cpu.get_latest_descr(deadframe) - res = self.cpu.get_float_value(deadframe, None, 0) + res = self.cpu.get_float_value(deadframe, 0) assert longlong.getrealfloat(res) == 5.1 fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 1 @@ -172,7 +172,6 @@ ResOperation(rop.RESUME_PUT, [i1, ConstInt(0), ConstInt(0)], None), ResOperation(rop.GUARD_TRUE, [i2], None, descr=BasicFailDescr(2)), - ResOperation(rop.LEAVE_FRAME, [], None), ResOperation(rop.JUMP, [i1], None, descr=targettoken), ] inputargs = [i0] @@ -181,8 +180,7 @@ deadframe = self.cpu.execute_token(looptoken, 2) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 2 - locs = rebuild_locs_from_resumedata(fail) - res = self.cpu.get_int_value(deadframe, locs, 0) + res = self.cpu.get_int_value(deadframe, 0) assert res == 10 def test_backends_dont_keep_loops_alive(self): @@ -357,7 +355,7 @@ deadframe = self.cpu.execute_token(looptoken, value) fail = self.cpu.get_latest_descr(deadframe) assert fail is faildescr - res = self.cpu.get_float_value(deadframe, None, 0) + res = self.cpu.get_float_value(deadframe, 0) assert longlong.getrealfloat(res) == -61.25 looptoken = JitCellToken() @@ -368,7 +366,7 @@ deadframe = self.cpu.execute_token(looptoken) fail = self.cpu.get_latest_descr(deadframe) assert fail is faildescr - res = self.cpu.get_float_value(deadframe, None, 0) + res = self.cpu.get_float_value(deadframe, 0) assert longlong.getrealfloat(res) == 42.5 def test_execute_operations_in_env(self): diff --git a/rpython/jit/metainterp/resume2.py b/rpython/jit/metainterp/resume2.py --- a/rpython/jit/metainterp/resume2.py +++ b/rpython/jit/metainterp/resume2.py @@ -34,7 +34,7 @@ elif op.getopnum() == rop.LEAVE_FRAME: self.leave_frame() elif op.getopnum() == rop.RESUME_PUT: - self.resume_put(op.getarg(0).getint(), op.getarg(1).getint(), + self.resume_put(op.getarg(0), op.getarg(1).getint(), op.getarg(2).getint()) elif op.getopnum() == rop.RESUME_NEW: self.resume_new(op.result, op.getdescr()) @@ -48,7 +48,8 @@ xxx pos += 1 - def resume_put(self, jitframe_pos, depth, frontend_position): + def resume_put(self, jitframe_pos_const, depth, frontend_position): + jitframe_pos = jitframe_pos_const.getint() jitcode = self.metainterp.framestack[-1].jitcode frame = self.metainterp.framestack[depth] if frontend_position < jitcode.num_regs_i(): diff --git a/rpython/jit/metainterp/test/test_resume2.py b/rpython/jit/metainterp/test/test_resume2.py --- a/rpython/jit/metainterp/test/test_resume2.py +++ b/rpython/jit/metainterp/test/test_resume2.py @@ -30,6 +30,7 @@ class MockMetaInterp(object): def __init__(self): + self.cpu = MockCPU() self.framestack = [] def newframe(self, jitcode): @@ -45,14 +46,23 @@ class RebuildingResumeReader(BoxResumeReader): def __init__(self): - self.backend_values = {} self.metainterp = MockMetaInterp() + def put_box_int(self, frame, position, jitframe_pos): + frame.registers_i[position] = jitframe_pos + + def put_box_float(self, frame, position, jitframe_pos): + xxx + + def put_box_ref(self, frame, position, jitframe_pos): + xxx + def finish(self): - l = [] + framestack = [] for frame in self.metainterp.framestack: - frame.dump_registers(l, self.backend_values) - return l + framestack.append(frame.registers_i + frame.registers_r + + frame.registers_f) + return framestack def rebuild_locs_from_resumedata(faildescr): return RebuildingResumeReader().rebuild(faildescr) @@ -161,27 +171,23 @@ """, namespace={'jitcode':jitcode1}) def test_reconstructing_resume_reader(self): - py.test.skip("xxx") jitcode1 = JitCode("jitcode") jitcode1.setup(num_regs_i=2, num_regs_f=0, num_regs_r=0) jitcode2 = JitCode("jitcode2") jitcode2.setup(num_regs_i=1, num_regs_f=0, num_regs_r=0) resume_loop = parse(""" - [i0, i1, i2, i3] + [] enter_frame(-1, descr=jitcode1) - resume_put(i0, 0, 1) - backend_attach(i0, 11) + resume_put(11, 0, 1) enter_frame(12, descr=jitcode2) - resume_put(i1, 0, 0) - backend_attach(i1, 12) - resume_put(i3, 1, 0) - backend_attach(i3, 8) + resume_put(12, 1, 0) + resume_put(8, 0, 0) leave_frame() leave_frame() """, namespace={'jitcode1': jitcode1, 'jitcode2': jitcode2}) descr = Descr() descr.rd_resume_bytecode = ResumeBytecode(resume_loop.operations) - descr.rd_bytecode_position = 8 + descr.rd_bytecode_position = 5 locs = rebuild_locs_from_resumedata(descr) - assert locs == [8, 11, 12] + assert locs == [[8, 11], [12]] From noreply at buildbot.pypy.org Sun Jan 12 14:11:17 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Sun, 12 Jan 2014 14:11:17 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c4: write a test and fix Message-ID: <20140112131117.4384D1C039A@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r68605:85b192f1e657 Date: 2014-01-12 12:56 +0100 http://bitbucket.org/pypy/pypy/changeset/85b192f1e657/ Log: write a test and fix diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -201,7 +201,7 @@ funcaddr = llmemory.cast_ptr_to_adr(funcptr) mi._record_helper_nonpure_varargs( rop.CALL, resbox, funcdescr, - [ConstInt(mi.cpu.cast_adr_to_int(funcaddr)),]) + [ConstInt(heaptracker.adr2int(funcaddr)),]) return resbox else: return ConstInt(0) @@ -1502,8 +1502,10 @@ if warmrunnerdesc: self.config = warmrunnerdesc.translator.config else: - from rpython.config.translationoption import get_combined_translation_config - self.config = get_combined_translation_config(translating=True) + self.config = cpu.rtyper.annotator.translator.config + # else: + # from rpython.config.translationoption import get_combined_translation_config + # self.config = get_combined_translation_config(translating=True) backendmodule = self.cpu.__module__ backendmodule = backendmodule.split('.')[-2] @@ -1523,7 +1525,8 @@ self.stm_should_break_transaction = rffi.llexternal( 'stm_should_break_transaction', [], lltype.Bool, - sandboxsafe=True, _nowrapper=True, transactionsafe=True) + sandboxsafe=True, _nowrapper=True, transactionsafe=True, + _callable=lambda : False) FUNC = lltype.typeOf(self.stm_should_break_transaction).TO ei = EffectInfo([], [], [], [], diff --git a/rpython/jit/metainterp/test/test_stm.py b/rpython/jit/metainterp/test/test_stm.py --- a/rpython/jit/metainterp/test/test_stm.py +++ b/rpython/jit/metainterp/test/test_stm.py @@ -14,6 +14,19 @@ return rstm.jit_stm_should_break_transaction(False) res = self.interp_operations(g, []) assert res == False + self.check_operations_history({}) + + def test_not_removed(self): + import time + def g(): + time.sleep(0) + return rstm.jit_stm_should_break_transaction(False) + res = self.interp_operations(g, [], translationoptions={"stm":True}) + assert res == False + self.check_operations_history(call=1, call_may_force=1) + + + class TestLLtype(STMTests, LLJitMixin): pass From noreply at buildbot.pypy.org Sun Jan 12 14:11:15 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Sun, 12 Jan 2014 14:11:15 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c4: WIP: make stm_transaction_break a guard Message-ID: <20140112131115.DBD4B1C039A@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r68604:55c2df34eca5 Date: 2014-01-12 12:16 +0100 http://bitbucket.org/pypy/pypy/changeset/55c2df34eca5/ Log: WIP: make stm_transaction_break a guard diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -67,7 +67,8 @@ self=self, co_code=co_code, next_instr=next_instr, ec=ec) # nothing inbetween! - rstm.jit_stm_transaction_break_point(False) + if rstm.jit_should_break_transaction(False): + rstm.jit_stm_transaction_break_point() self = self._hints_for_stm() next_instr = self.handle_bytecode(co_code, next_instr, ec) except ExitFrame: diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -70,7 +70,8 @@ frame=self, next_instr=next_instr, pycode=pycode, is_being_profiled=is_being_profiled) # nothing inbetween! - rstm.jit_stm_transaction_break_point(False) + if rstm.jit_should_break_transaction(False): + rstm.jit_stm_transaction_break_point() co_code = pycode.co_code self.valuestackdepth = hint(self.valuestackdepth, promote=True) next_instr = self.handle_bytecode(co_code, next_instr, ec) @@ -96,7 +97,8 @@ self.last_instr = intmask(jumpto) ec.bytecode_trace(self, decr_by) jumpto = r_uint(self.last_instr) - rstm.jit_stm_transaction_break_point(True) + if rstm.jit_should_break_transaction(True): + rstm.jit_stm_transaction_break_point() # pypyjitdriver.can_enter_jit(frame=self, ec=ec, next_instr=jumpto, pycode=self.getcode(), diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -1341,15 +1341,17 @@ [v], None)) return ops + def rewrite_op_jit_stm_should_break_transaction(self, op): + assert isinstance(op.args[0], Constant) + + arg = int(op.args[0].value) + c_arg = Constant(arg, lltype.Signed) + + return SpaceOperation('stm_should_break_transaction', + [c_arg], op.result) + def rewrite_op_jit_stm_transaction_break_point(self, op): - if isinstance(op.args[0], Constant): - arg = int(op.args[0].value) - c_arg = Constant(arg, lltype.Signed) - else: - log.WARNING("stm_transaction_break_point without const argument, assuming False in %r" % (self.graph,)) - c_arg = Constant(0, lltype.Signed) - - return SpaceOperation('stm_transaction_break', [c_arg], op.result) + return SpaceOperation('stm_transaction_break', [], op.result) def rewrite_op_jit_marker(self, op): key = op.args[0].value diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -893,8 +893,14 @@ def bhimpl_ref_isvirtual(x): return False - @arguments("i") - def bhimpl_stm_transaction_break(if_there_is_no_other): + + @arguments("i", returns="i") + def bhimpl_stm_should_break_transaction(if_there_is_no_other): + return False + + + @arguments() + def bhimpl_stm_transaction_break(): pass # ---------- diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -18,8 +18,7 @@ from rpython.rlib.jit import Counters from rpython.rlib.objectmodel import we_are_translated, specialize from rpython.rlib.unroll import unrolling_iterable -from rpython.rtyper.lltypesystem import lltype, rclass, rffi - +from rpython.rtyper.lltypesystem import lltype, rclass, rffi, llmemory # ____________________________________________________________ @@ -189,13 +188,26 @@ # ------------------------------ @arguments("int") - def opimpl_stm_transaction_break(self, if_there_is_no_other): + def opimpl_stm_should_break_transaction(self, if_there_is_no_other): val = bool(if_there_is_no_other) mi = self.metainterp if (mi.stm_break_wanted or (val and not mi.stm_break_done)): mi.stm_break_done = True mi.stm_break_wanted = False - self.execute(rop.STM_TRANSACTION_BREAK, ConstInt(val)) + # insert a CALL + resbox = history.BoxInt(0) + funcptr = mi.staticdata.stm_should_break_transaction + funcdescr = mi.staticdata.stm_should_break_transaction_descr + funcaddr = llmemory.cast_ptr_to_adr(funcptr) + mi._record_helper_nonpure_varargs( + rop.CALL, resbox, funcdescr, + [ConstInt(mi.cpu.cast_adr_to_int(funcaddr)),]) + return resbox + else: + return ConstInt(0) + + def opimpl_stm_transaction_break(self): + self.execute(rop.STM_TRANSACTION_BREAK) for _opimpl in ['int_add', 'int_sub', 'int_mul', 'int_floordiv', 'int_mod', 'int_lt', 'int_le', 'int_eq', @@ -1507,6 +1519,22 @@ d = self.exit_frame_with_exception_descr_ref self.cpu.exit_frame_with_exception_descr_ref = d + if self.config.translation.stm: + self.stm_should_break_transaction = rffi.llexternal( + 'stm_should_break_transaction', + [], lltype.Bool, + sandboxsafe=True, _nowrapper=True, transactionsafe=True) + FUNC = lltype.typeOf(self.stm_should_break_transaction).TO + + ei = EffectInfo([], [], [], [], + EffectInfo.EF_CANNOT_RAISE, + can_invalidate=False) + + self.stm_should_break_transaction_descr = ( + self.cpu.calldescrof(FUNC, FUNC.ARGS, + FUNC.RESULT, ei)) + + def _freeze_(self): return True @@ -2695,7 +2723,6 @@ # if the codewriter didn't produce any OS_LIBFFI_CALL at all. assert self.staticdata.has_libffi_call # - from rpython.rtyper.lltypesystem import llmemory from rpython.rlib.jit_libffi import CIF_DESCRIPTION_P from rpython.jit.backend.llsupport.ffisupport import get_arg_descr # diff --git a/rpython/jit/metainterp/test/test_stm.py b/rpython/jit/metainterp/test/test_stm.py new file mode 100644 --- /dev/null +++ b/rpython/jit/metainterp/test/test_stm.py @@ -0,0 +1,19 @@ +import py, sys +from rpython.jit.metainterp.test.support import LLJitMixin +from rpython.rlib.jit import JitDriver, dont_look_inside +from rpython.rlib.rarithmetic import ovfcheck, LONG_BIT, intmask +from rpython.jit.codewriter.policy import StopAtXPolicy +from rpython.rlib import rstm + + + + +class STMTests: + def test_simple(self): + def g(): + return rstm.jit_stm_should_break_transaction(False) + res = self.interp_operations(g, []) + assert res == False + +class TestLLtype(STMTests, LLJitMixin): + pass diff --git a/rpython/jit/tl/tlc.py b/rpython/jit/tl/tlc.py --- a/rpython/jit/tl/tlc.py +++ b/rpython/jit/tl/tlc.py @@ -250,7 +250,8 @@ myjitdriver.jit_merge_point(frame=frame, code=code, pc=pc, pool=pool) # nothing inbetween! - rstm.jit_stm_transaction_break_point(False) + if rstm.jit_should_break_transaction(False): + rstm.jit_stm_transaction_break_point() opcode = ord(code[pc]) pc += 1 stack = frame.stack @@ -351,7 +352,8 @@ pc += char2int(code[pc]) pc += 1 if jitted and old_pc > pc: - rstm.jit_stm_transaction_break_point(True) + if rstm.jit_should_break_transaction(True): + rstm.jit_stm_transaction_break_point() myjitdriver.can_enter_jit(code=code, pc=pc, frame=frame, pool=pool) @@ -361,7 +363,8 @@ old_pc = pc pc += char2int(code[pc]) + 1 if jitted and old_pc > pc: - rstm.jit_stm_transaction_break_point(True) + if rstm.jit_should_break_transaction(True): + rstm.jit_stm_transaction_break_point() myjitdriver.can_enter_jit(code=code, pc=pc, frame=frame, pool=pool) else: @@ -373,7 +376,8 @@ old_pc = pc pc += offset if jitted and old_pc > pc: - rstm.jit_stm_transaction_break_point(True) + if rstm.jit_should_break_transaction(True): + rstm.jit_stm_transaction_break_point() myjitdriver.can_enter_jit(code=code, pc=pc, frame=frame, pool=pool) diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -29,26 +29,20 @@ addr = llop.stm_get_adr_of_read_barrier_cache(llmemory.Address) return rffi.cast(lltype.Signed, addr) -def jit_stm_transaction_break_point(if_there_is_no_other): +def jit_stm_transaction_break_point(): + if we_are_translated(): + llop.jit_stm_transaction_break_point(lltype.Void) + + +def jit_stm_should_break_transaction(if_there_is_no_other): # if_there_is_no_other means that we use this point only # if there is no other break point in the trace. # If it is False, the point may be used if it comes right # a CALL_RELEASE_GIL - pass # specialized below - # llop.jit_stm_transaction_break_point(lltype.Void, - # if_there_is_no_other) + return llop.jit_stm_should_break_transaction(lltype.Bool, + if_there_is_no_other) -class JitSTMTransactionBreakPoint(ExtRegistryEntry): - _about_ = jit_stm_transaction_break_point - def compute_result_annotation(self, arg): - from rpython.annotator import model as annmodel - return annmodel.s_None - def specialize_call(self, hop): - [v_arg] = hop.inputargs(lltype.Bool) - hop.exception_cannot_occur() - return hop.genop('jit_stm_transaction_break_point', [v_arg], - resulttype=lltype.Void) - + @dont_look_inside def become_inevitable(): llop.stm_become_inevitable(lltype.Void) diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -525,6 +525,7 @@ canraise=(Exception,), canmallocgc=True), 'jit_stm_transaction_break_point' : LLOp(canmallocgc=True), + 'jit_stm_should_break_transaction' : LLOp(canrun=True), # __________ GC operations __________ diff --git a/rpython/rtyper/lltypesystem/opimpl.py b/rpython/rtyper/lltypesystem/opimpl.py --- a/rpython/rtyper/lltypesystem/opimpl.py +++ b/rpython/rtyper/lltypesystem/opimpl.py @@ -701,6 +701,9 @@ def op_jit_assembler_call(funcptr, *args): return funcptr(*args) +def op_jit_stm_should_break_transaction(if_there_is_no_other): + return False + # ____________________________________________________________ def get_op_impl(opname): diff --git a/rpython/translator/stm/jitdriver.py b/rpython/translator/stm/jitdriver.py --- a/rpython/translator/stm/jitdriver.py +++ b/rpython/translator/stm/jitdriver.py @@ -18,12 +18,13 @@ and op.args[0].value == 'jit_merge_point'): jitdriver = op.args[1].value if not jitdriver.autoreds: - if (relaxed - or (i + 1 < len(block.operations) - and block.operations[i+1].opname == 'jit_stm_transaction_break_point')): - found.append((block, i)) - else: - log.WARNING("ignoring jitdriver without a transaction break point in %r" % (graph,)) + # XXX: BUG, redo the below code in order to ensure atomicity of bytecode instrs + # if (relaxed + # or (i + 1 < len(block.operations) + # and block.operations[i+1].opname == 'jit_stm_transaction_break_point')): + found.append((block, i)) + # else: + # log.WARNING("ignoring jitdriver without a transaction break point in %r" % (graph,)) else: log.WARNING("ignoring jitdriver with autoreds in %r" % ( graph,)) # XXX XXX! From noreply at buildbot.pypy.org Sun Jan 12 14:11:18 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Sun, 12 Jan 2014 14:11:18 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c4: another test, more fixes Message-ID: <20140112131118.904481C039A@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r68606:b18d1e39ab92 Date: 2014-01-12 13:06 +0100 http://bitbucket.org/pypy/pypy/changeset/b18d1e39ab92/ Log: another test, more fixes diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -206,8 +206,10 @@ else: return ConstInt(0) + @arguments() def opimpl_stm_transaction_break(self): - self.execute(rop.STM_TRANSACTION_BREAK) + self.metainterp._record_helper_nonpure_varargs( + rop.STM_TRANSACTION_BREAK, None, None, []) for _opimpl in ['int_add', 'int_sub', 'int_mul', 'int_floordiv', 'int_mod', 'int_lt', 'int_le', 'int_eq', diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -510,7 +510,7 @@ 'QUASIIMMUT_FIELD/1d', # [objptr], descr=SlowMutateDescr 'RECORD_KNOWN_CLASS/2', # [objptr, clsptr] 'KEEPALIVE/1', - 'STM_TRANSACTION_BREAK/1', + 'STM_TRANSACTION_BREAK/0', 'STM_SET_REVISION_GC/1d', # not really GC, writes raw to the header '_CANRAISE_FIRST', # ----- start of can_raise operations ----- diff --git a/rpython/jit/metainterp/test/test_stm.py b/rpython/jit/metainterp/test/test_stm.py --- a/rpython/jit/metainterp/test/test_stm.py +++ b/rpython/jit/metainterp/test/test_stm.py @@ -24,9 +24,23 @@ res = self.interp_operations(g, [], translationoptions={"stm":True}) assert res == False self.check_operations_history(call=1, call_may_force=1) + + def test_transaction_break(self): + def g(): + rstm.jit_stm_transaction_break_point() + return 42 + self.interp_operations(g, [], translationoptions={"stm":True}) + self.check_operations_history({'stm_transaction_break':1}) class TestLLtype(STMTests, LLJitMixin): pass + + + + + + + diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -979,7 +979,6 @@ op_stm_become_inevitable = _stm_not_implemented op_stm_stop_all_other_threads = _stm_not_implemented op_stm_partial_commit_and_resume_other_threads = _stm_not_implemented - op_jit_stm_transaction_break_point = _stm_not_implemented # __________________________________________________________ # operations on addresses diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -524,7 +524,7 @@ 'jit_assembler_call': LLOp(canrun=True, # similar to an 'indirect_call' canraise=(Exception,), canmallocgc=True), - 'jit_stm_transaction_break_point' : LLOp(canmallocgc=True), + 'jit_stm_transaction_break_point' : LLOp(canrun=True,canmallocgc=True), 'jit_stm_should_break_transaction' : LLOp(canrun=True), # __________ GC operations __________ diff --git a/rpython/rtyper/lltypesystem/opimpl.py b/rpython/rtyper/lltypesystem/opimpl.py --- a/rpython/rtyper/lltypesystem/opimpl.py +++ b/rpython/rtyper/lltypesystem/opimpl.py @@ -704,6 +704,10 @@ def op_jit_stm_should_break_transaction(if_there_is_no_other): return False +def op_jit_stm_transaction_break_point(): + pass + + # ____________________________________________________________ def get_op_impl(opname): From noreply at buildbot.pypy.org Sun Jan 12 14:11:19 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Sun, 12 Jan 2014 14:11:19 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c4: add new llops to funcgen and change code generation in assembler Message-ID: <20140112131119.D699F1C039A@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r68607:1b0169359e7d Date: 2014-01-12 14:10 +0100 http://bitbucket.org/pypy/pypy/changeset/1b0169359e7d/ Log: add new llops to funcgen and change code generation in assembler diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -67,7 +67,7 @@ self=self, co_code=co_code, next_instr=next_instr, ec=ec) # nothing inbetween! - if rstm.jit_should_break_transaction(False): + if rstm.jit_stm_should_break_transaction(False): rstm.jit_stm_transaction_break_point() self = self._hints_for_stm() next_instr = self.handle_bytecode(co_code, next_instr, ec) diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -70,7 +70,7 @@ frame=self, next_instr=next_instr, pycode=pycode, is_being_profiled=is_being_profiled) # nothing inbetween! - if rstm.jit_should_break_transaction(False): + if rstm.jit_stm_should_break_transaction(False): rstm.jit_stm_transaction_break_point() co_code = pycode.co_code self.valuestackdepth = hint(self.valuestackdepth, promote=True) @@ -97,7 +97,7 @@ self.last_instr = intmask(jumpto) ec.bytecode_trace(self, decr_by) jumpto = r_uint(self.last_instr) - if rstm.jit_should_break_transaction(True): + if rstm.jit_stm_should_break_transaction(True): rstm.jit_stm_transaction_break_point() # pypyjitdriver.can_enter_jit(frame=self, ec=ec, next_instr=jumpto, diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -3160,52 +3160,18 @@ return # tests only mc = self.mc - # if stm_should_break_transaction() - fn = stmtlocal.stm_should_break_transaction_fn - mc.CALL(imm(self.cpu.cast_ptr_to_int(fn))) - mc.TEST8(eax.lowest8bits(), eax.lowest8bits()) - mc.J_il(rx86.Conditions['Z'], 0xfffff) # patched later - jz_location2 = mc.get_relative_pos() # # call stm_transaction_break() with the address of the # STM_RESUME_BUF and the custom longjmp function self.push_gcmap(mc, gcmap, mov=True) # - # save all registers - base_ofs = self.cpu.get_baseofs_of_frame_field() - for gpr in self._regalloc.rm.reg_bindings.values(): - v = gpr_reg_mgr_cls.all_reg_indexes[gpr.value] - mc.MOV_br(v * WORD + base_ofs, gpr.value) - if IS_X86_64: - coeff = 1 - else: - coeff = 2 - ofs = len(gpr_reg_mgr_cls.all_regs) - for xr in self._regalloc.xrm.reg_bindings.values(): - mc.MOVSD_bx((ofs + xr.value * coeff) * WORD + base_ofs, xr.value) - # # CALL break function fn = self.stm_transaction_break_path mc.CALL(imm(fn)) # ** HERE ** is the place an aborted transaction retries # ebp/frame reloaded by longjmp callback # - # restore regs - base_ofs = self.cpu.get_baseofs_of_frame_field() - for gpr in self._regalloc.rm.reg_bindings.values(): - v = gpr_reg_mgr_cls.all_reg_indexes[gpr.value] - mc.MOV_rb(gpr.value, v * WORD + base_ofs) - if IS_X86_64: - coeff = 1 - else: - coeff = 2 - ofs = len(gpr_reg_mgr_cls.all_regs) - for xr in self._regalloc.xrm.reg_bindings.values(): - mc.MOVSD_xb(xr.value, (ofs + xr.value * coeff) * WORD + base_ofs) - # - # patch the JZ above - offset = mc.get_relative_pos() - jz_location2 - mc.overwrite32(jz_location2-4, offset) + diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -1278,8 +1278,8 @@ def consider_stm_transaction_break(self, op): # # only save regs for the should_break_transaction call - self.xrm.before_call() - self.rm.before_call() + self.xrm.before_call(save_all_regs=1) + self.rm.before_call(save_all_regs=1) gcmap = self.get_gcmap() # allocate the gcmap *before* # self.assembler.stm_transaction_break(gcmap) diff --git a/rpython/jit/tl/tlc.py b/rpython/jit/tl/tlc.py --- a/rpython/jit/tl/tlc.py +++ b/rpython/jit/tl/tlc.py @@ -250,7 +250,7 @@ myjitdriver.jit_merge_point(frame=frame, code=code, pc=pc, pool=pool) # nothing inbetween! - if rstm.jit_should_break_transaction(False): + if rstm.jit_stm_should_break_transaction(False): rstm.jit_stm_transaction_break_point() opcode = ord(code[pc]) pc += 1 @@ -352,7 +352,7 @@ pc += char2int(code[pc]) pc += 1 if jitted and old_pc > pc: - if rstm.jit_should_break_transaction(True): + if rstm.jit_stm_should_break_transaction(True): rstm.jit_stm_transaction_break_point() myjitdriver.can_enter_jit(code=code, pc=pc, frame=frame, pool=pool) @@ -363,7 +363,7 @@ old_pc = pc pc += char2int(code[pc]) + 1 if jitted and old_pc > pc: - if rstm.jit_should_break_transaction(True): + if rstm.jit_stm_should_break_transaction(True): rstm.jit_stm_transaction_break_point() myjitdriver.can_enter_jit(code=code, pc=pc, frame=frame, pool=pool) @@ -376,7 +376,7 @@ old_pc = pc pc += offset if jitted and old_pc > pc: - if rstm.jit_should_break_transaction(True): + if rstm.jit_stm_should_break_transaction(True): rstm.jit_stm_transaction_break_point() myjitdriver.can_enter_jit(code=code, pc=pc, frame=frame, pool=pool) diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -623,6 +623,7 @@ OP_STM_CLEAR_EXCEPTION_DATA_ON_ABORT= _OP_STM OP_STM_ALLOCATE_NONMOVABLE_INT_ADR = _OP_STM OP_JIT_STM_TRANSACTION_BREAK_POINT = _OP_STM + OP_JIT_STM_SHOULD_BREAK_TRANSACTION = _OP_STM def OP_STM_IGNORED_START(self, op): return '/* stm_ignored_start */' diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -50,6 +50,10 @@ def jit_stm_transaction_break_point(funcgen, op): return '/* jit_stm_transaction_break_point */' + +def jit_stm_should_break_transaction(funcgen, op): + result = funcgen.expr(op.result) + return '%s = 0; /* jit_stm_should_break_transaction */' % (result, ) def stm_finalize(funcgen, op): return 'stm_finalize();' From noreply at buildbot.pypy.org Sun Jan 12 14:16:27 2014 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 12 Jan 2014 14:16:27 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: (fijal, rguillebert) pass some more runner tests Message-ID: <20140112131627.4234D1C039A@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68608:7824261c8a31 Date: 2014-01-12 12:12 +0100 http://bitbucket.org/pypy/pypy/changeset/7824261c8a31/ Log: (fijal, rguillebert) pass some more runner tests diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -110,27 +110,35 @@ self._debug = v return r - def rebuild_faillocs_from_descr(self, descr, inputargs, loc_positions): - locs = [None] * len(loc_positions) + def rebuild_faillocs_from_descr(self, descr, inputframes, loc_positions): + lgt = 0 + for frame in inputframes: + lgt += len(frame) + locs = [None] * lgt GPR_REGS = len(self.cpu.gen_regs) XMM_REGS = len(self.cpu.float_regs) - input_i = 0 if self.cpu.IS_64_BIT: coeff = 1 else: coeff = 2 - for item, pos in enumerate(loc_positions): - if pos < GPR_REGS: - locs[item] = self.cpu.gen_regs[pos] - elif pos < (GPR_REGS + XMM_REGS * coeff): - pos = (pos - GPR_REGS) // coeff - locs[item] = self.cpu.float_regs[pos] - else: - i = pos - self.cpu.JITFRAME_FIXED_SIZE - assert i >= 0 - tp = inputargs[input_i].type - locs[item] = self.new_stack_loc(i, pos * WORD, tp) - input_i += 1 + locs_index = 0 + for i, frame in enumerate(inputframes): + inputlocs = loc_positions[i] + assert len(inputlocs) == len(frame) + for j, item in enumerate(frame): + pos = inputlocs[j] + if pos < GPR_REGS: + locs[locs_index] = self.cpu.gen_regs[pos] + elif pos < (GPR_REGS + XMM_REGS * coeff): + pos = (pos - GPR_REGS) // coeff + locs[locs_index] = self.cpu.float_regs[pos] + else: + stack_pos = pos - self.cpu.JITFRAME_FIXED_SIZE + assert stack_pos >= 0 + tp = item.type + locs[locs_index] = self.new_stack_loc(stack_pos, + pos * WORD, tp) + locs_index += 1 return locs def store_info_on_descr(self, startspos, guardtok, resume_bytecode): diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -252,7 +252,7 @@ fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 2 locs = rebuild_locs_from_resumedata(fail) - res = self.cpu.get_int_value(deadframe, locs, 0) + res = self.cpu.get_int_value(deadframe, 0) assert res == 20 assert self.cpu.tracker.total_compiled_loops == 1 @@ -285,7 +285,7 @@ bridge.append(ResOperation(rop.INT_ADD, [iprev, ConstInt(1)], i1)) iprev = i1 for i, i1 in enumerate(i1list): - bridge.append(ResOperation(rop.RESUME_PUT, [i1, ConstInt(0), ConstInt(i)], None)) + bridge.append(ResOperation(rop.RESUME_PUT, [i1, ConstInt(1), ConstInt(i)], None)) bridge.append(ResOperation(rop.GUARD_FALSE, [i0], None, descr=BasicFailDescr(3))) bridge.append(ResOperation(rop.FINISH, [], None, @@ -299,7 +299,7 @@ locs = rebuild_locs_from_resumedata(fail) assert fail.identifier == 3 for i in range(len(i1list)): - res = self.cpu.get_int_value(deadframe, locs, i + 1) + res = self.cpu.get_int_value(deadframe, locs[1][i]) assert res == 2 + i def test_finish(self): @@ -321,7 +321,7 @@ deadframe = self.cpu.execute_token(looptoken, 99) fail = self.cpu.get_latest_descr(deadframe) assert fail is faildescr - res = self.cpu.get_int_value(deadframe, None, 0) + res = self.cpu.get_int_value(deadframe, 0) assert res == 99 looptoken = JitCellToken() @@ -332,7 +332,7 @@ deadframe = self.cpu.execute_token(looptoken) fail = self.cpu.get_latest_descr(deadframe) assert fail is faildescr - res = self.cpu.get_int_value(deadframe, None, 0) + res = self.cpu.get_int_value(deadframe, 0) assert res == 42 looptoken = JitCellToken() @@ -397,8 +397,8 @@ deadframe = self.cpu.execute_token(looptoken, 0, 10) fail = self.cpu.get_latest_descr(deadframe) locs = rebuild_locs_from_resumedata(fail) - assert self.cpu.get_int_value(deadframe, locs, 0) == 0 - assert self.cpu.get_int_value(deadframe, locs, 1) == 55 + assert self.cpu.get_int_value(deadframe, locs[0][0]) == 0 + assert self.cpu.get_int_value(deadframe, locs[0][1]) == 55 def test_int_operations(self): from rpython.jit.metainterp.test.test_executor import get_int_tests @@ -469,12 +469,13 @@ fail = self.cpu.get_latest_descr(deadframe) if (z == boom) ^ reversed: locs = rebuild_locs_from_resumedata(fail) + pos = locs[0][0] assert fail.identifier == 1 else: - locs = None + pos = 0 assert fail.identifier == 2 if z != boom: - assert self.cpu.get_int_value(deadframe, locs, 0) == z + assert self.cpu.get_int_value(deadframe, pos) == z excvalue = self.cpu.grab_exc_value(deadframe) assert not excvalue diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -522,8 +522,9 @@ operations = self._inject_debugging_code(faildescr, operations, 'b', descr_number) + arglocs = self.rebuild_faillocs_from_descr(faildescr, inputframes, + backend_positions) inputargs = flatten(inputframes) - arglocs = self.rebuild_faillocs_from_descr(faildescr, inputargs, backend_positions) regalloc = RegAlloc(self, self.cpu.translate_support_code) startpos = self.mc.get_relative_pos() operations = regalloc.prepare_bridge(inputframes, arglocs, From noreply at buildbot.pypy.org Sun Jan 12 14:16:28 2014 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 12 Jan 2014 14:16:28 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: fix a case in resumebuilder Message-ID: <20140112131628.768AA1C039A@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68609:e239dd41741b Date: 2014-01-12 13:38 +0100 http://bitbucket.org/pypy/pypy/changeset/e239dd41741b/ Log: fix a case in resumebuilder diff --git a/rpython/jit/backend/llsupport/regalloc.py b/rpython/jit/backend/llsupport/regalloc.py --- a/rpython/jit/backend/llsupport/regalloc.py +++ b/rpython/jit/backend/llsupport/regalloc.py @@ -492,7 +492,10 @@ if box in self.bindings_to_frame_reg: return self.frame_reg if must_exist: - return self.frame_manager.bindings[box] + try: + return self.frame_manager.bindings[box] + except KeyError: + raise return self.frame_manager.loc(box) def return_constant(self, v, forbidden_vars=[], selected_reg=None): diff --git a/rpython/jit/backend/llsupport/resumebuilder.py b/rpython/jit/backend/llsupport/resumebuilder.py --- a/rpython/jit/backend/llsupport/resumebuilder.py +++ b/rpython/jit/backend/llsupport/resumebuilder.py @@ -57,7 +57,14 @@ if op.getopnum() == rop.RESUME_PUT: box = op.getarg(0) args = op.getarglist() - pos = self.regalloc.loc(box).get_jitframe_position() + try: + pos = self.regalloc.loc(box, must_exist=True).get_jitframe_position() + except KeyError: + # the thing is not *yet* anywhere, which means we'll record + # we know about it, but not store the resume_put just yet + self.current_attachment[box] = -1 + self.frontend_pos[box] = (args[1], args[2]) + return self.current_attachment[box] = pos self.frontend_pos[box] = (args[1], args[2]) args[0] = ConstInt(pos) diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -362,12 +362,12 @@ while mc.get_relative_pos() < self.min_bytes_before_label: mc.NOP() - def loc(self, v): + def loc(self, v, must_exist=False): if v is None: # xxx kludgy return None if v.type == FLOAT: - return self.xrm.loc(v) - return self.rm.loc(v) + return self.xrm.loc(v, must_exist=must_exist) + return self.rm.loc(v, must_exist=must_exist) def _consider_guard(self, op): loc = self.rm.make_sure_var_in_reg(op.getarg(0)) From noreply at buildbot.pypy.org Sun Jan 12 14:16:29 2014 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 12 Jan 2014 14:16:29 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: Fix tests Message-ID: <20140112131629.F0CBF1C039A@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68610:7fe47e3af653 Date: 2014-01-12 14:15 +0100 http://bitbucket.org/pypy/pypy/changeset/7fe47e3af653/ Log: Fix tests diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -344,15 +344,10 @@ ofs = self.unpack_arraydescr(descr) return self.read_int_at_mem(deadframe, pos * WORD + ofs, WORD, 1) - def get_ref_value(self, deadframe, locs, pos): - xxx + def get_ref_value(self, deadframe, pos): descr = self.gc_ll_descr.getframedescrs(self).arraydescr ofs = self.unpack_arraydescr(descr) - if locs is None: - assert pos == 0 - else: - pos = locs[pos] * WORD - return self.read_ref_at_mem(deadframe, pos + ofs) + return self.read_ref_at_mem(deadframe, pos * WORD + ofs) def get_float_value(self, deadframe, pos): descr = self.gc_ll_descr.getframedescrs(self).arraydescr diff --git a/rpython/jit/backend/llsupport/resumebuilder.py b/rpython/jit/backend/llsupport/resumebuilder.py --- a/rpython/jit/backend/llsupport/resumebuilder.py +++ b/rpython/jit/backend/llsupport/resumebuilder.py @@ -52,23 +52,31 @@ self.current_attachment = {} self.frontend_liveness = frontend_liveness self.frontend_pos = {} + self.virtuals = {} def process(self, op): if op.getopnum() == rop.RESUME_PUT: box = op.getarg(0) args = op.getarglist() - try: - pos = self.regalloc.loc(box, must_exist=True).get_jitframe_position() - except KeyError: - # the thing is not *yet* anywhere, which means we'll record - # we know about it, but not store the resume_put just yet - self.current_attachment[box] = -1 + if box in self.virtuals: + newop = op + else: + try: + loc = self.regalloc.loc(box, must_exist=True) + pos = loc.get_jitframe_position() + except KeyError: + # the thing is not *yet* anywhere, which means we'll record + # we know about it, but not store the resume_put just yet + self.current_attachment[box] = -1 + self.frontend_pos[box] = (args[1], args[2]) + return + self.current_attachment[box] = pos self.frontend_pos[box] = (args[1], args[2]) - return - self.current_attachment[box] = pos - self.frontend_pos[box] = (args[1], args[2]) - args[0] = ConstInt(pos) - newop = op.copy_and_change(rop.RESUME_PUT, args=args) + args[0] = ConstInt(pos) + newop = op.copy_and_change(rop.RESUME_PUT, args=args) + elif op.getopnum() == rop.RESUME_NEW: + self.virtuals[op.result] = None + newop = op else: newop = op self.newops.append(newop) diff --git a/rpython/jit/backend/llsupport/test/test_resumebuilder.py b/rpython/jit/backend/llsupport/test/test_resumebuilder.py --- a/rpython/jit/backend/llsupport/test/test_resumebuilder.py +++ b/rpython/jit/backend/llsupport/test/test_resumebuilder.py @@ -72,7 +72,7 @@ enter_frame(-1, descr=jitcode) p0 = resume_new(descr=structdescr) resume_setfield_gc(p0, i0, descr=fielddescr) - resume_put(30, 0, 0) + resume_put(p0, 0, 0) leave_frame() """, namespace=namespace) descr = loop.operations[-3].getdescr() @@ -113,3 +113,19 @@ assert descr2.rd_bytecode_position == 3 equaloplists(descr1.rd_resume_bytecode.opcodes, expected_resume.operations) + + def test_bridge(self): + jitcode = JitCode("name") + jitcode.setup(num_regs_i=2, num_regs_r=0, num_regs_f=0) + loop = parse(""" + [i0] + enter_frame(-1, descr=jitcode) + resume_put(i0, 0, 0) + i1 = int_lt(i0, 10) + guard_true(i1) + leave_frame() + """, namespace={'jitcode': jitcode}) + + looptoken = JitCellToken() + self.cpu.compile_loop(None, loop.inputargs, loop.operations, + looptoken) diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -1248,10 +1248,10 @@ assert fail.identifier == 42 # for k in range(intboxes): - got = self.cpu.get_int_value(deadframe, locs, k) + got = self.cpu.get_int_value(deadframe, locs[0][k]) assert got == expvalues[k] for k in range(floatboxes): - got = self.cpu.get_float_value(deadframe, locs, k + intboxes) + got = self.cpu.get_float_value(deadframe, locs[0][k + intboxes]) assert got == expvalues[k + intboxes] def test_jump(self): @@ -1379,14 +1379,14 @@ else: refvals.append(val) for i, val in enumerate(intvals): - got = self.cpu.get_int_value(deadframe, locs, i) + got = self.cpu.get_int_value(deadframe, locs[0][i]) assert got == val for i, val in enumerate(refvals): - got = self.cpu.get_ref_value(deadframe, locs, i + len(intvals)) + got = self.cpu.get_ref_value(deadframe, locs[0][i + len(intvals)]) assert got == val for i, val in enumerate(floatvals): - got = self.cpu.get_float_value(deadframe, locs, - i + len(intvals) + len(refvals)) + got = self.cpu.get_float_value(deadframe, locs[0][ + i + len(intvals) + len(refvals)]) assert got == val def test_compile_bridge_float(self): @@ -1437,11 +1437,11 @@ fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 2 locs = rebuild_locs_from_resumedata(fail) - res = self.cpu.get_float_value(deadframe, locs, 0) + res = self.cpu.get_float_value(deadframe, locs[0][0]) assert longlong.getrealfloat(res) == 8.5 for i in range(1, len(fboxes)): got = longlong.getrealfloat(self.cpu.get_float_value( - deadframe, locs, i)) + deadframe, locs[0][i])) assert got == 13.5 + 6.73 * i def test_compile_bridge_spilled_float(self): @@ -1474,9 +1474,9 @@ fail = self.cpu.get_latest_descr(deadframe) assert loop.operations[-3].getdescr() is fail is faildescr1 locs = rebuild_locs_from_resumedata(fail) - f1 = self.cpu.get_float_value(deadframe, locs, 0) - f2 = self.cpu.get_float_value(deadframe, locs, 1) - f3 = self.cpu.get_float_value(deadframe, locs, 2) + f1 = self.cpu.get_float_value(deadframe, locs[0][0]) + f2 = self.cpu.get_float_value(deadframe, locs[0][1]) + f3 = self.cpu.get_float_value(deadframe, locs[0][2]) assert longlong.getrealfloat(f1) == 132.25 assert longlong.getrealfloat(f2) == 0.75 assert longlong.getrealfloat(f3) == 133.0 @@ -1499,9 +1499,9 @@ fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 103 locs = rebuild_locs_from_resumedata(fail) - f1 = self.cpu.get_float_value(deadframe, locs, 0) - f2 = self.cpu.get_float_value(deadframe, locs, 1) - f3 = self.cpu.get_float_value(deadframe, locs, 2) + f1 = self.cpu.get_float_value(deadframe, locs[0][0]) + f2 = self.cpu.get_float_value(deadframe, locs[0][1]) + f3 = self.cpu.get_float_value(deadframe, locs[0][2]) assert longlong.getrealfloat(f1) == 132.25 assert longlong.getrealfloat(f2) == 0.75 assert longlong.getrealfloat(f3) == 133.0 @@ -2131,12 +2131,12 @@ looptoken = JitCellToken() self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 1) - assert self.cpu.get_ref_value(deadframe, None, 0) == xptr + assert self.cpu.get_ref_value(deadframe, 0) == xptr excvalue = self.cpu.grab_exc_value(deadframe) assert not excvalue deadframe = self.cpu.execute_token(looptoken, 0) locs = rebuild_locs_from_resumedata(faildescr) - assert self.cpu.get_int_value(deadframe, locs, 0) == 1 + assert self.cpu.get_int_value(deadframe, locs[0][0]) == 1 excvalue = self.cpu.grab_exc_value(deadframe) assert not excvalue @@ -2155,7 +2155,7 @@ looptoken = JitCellToken() self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 1) - assert self.cpu.get_int_value(deadframe, locs, 0) == 1 + assert self.cpu.get_int_value(deadframe, locs[0][0]) == 1 excvalue = self.cpu.grab_exc_value(deadframe) assert excvalue == yptr @@ -2176,11 +2176,11 @@ self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 1) locs = rebuild_locs_from_resumedata(faildescr) - assert self.cpu.get_int_value(deadframe, locs, 0) == 1 + assert self.cpu.get_int_value(deadframe, locs[0][0]) == 1 excvalue = self.cpu.grab_exc_value(deadframe) assert excvalue == xptr deadframe = self.cpu.execute_token(looptoken, 0) - assert self.cpu.get_int_value(deadframe, locs, 0) == 0 + assert self.cpu.get_int_value(deadframe, locs[0][0]) == 0 excvalue = self.cpu.grab_exc_value(deadframe) assert not excvalue @@ -2377,15 +2377,15 @@ assert not called locs = rebuild_locs_from_resumedata(faildescr) for j in range(5): - assert self.cpu.get_int_value(frame, locs, j) == j - assert longlong.getrealfloat(self.cpu.get_float_value(frame, locs, 6)) == 1.2 - assert longlong.getrealfloat(self.cpu.get_float_value(frame, locs, 7)) == 3.4 + assert self.cpu.get_int_value(frame, locs[0][j]) == j + assert longlong.getrealfloat(self.cpu.get_float_value(frame, locs[0][6])) == 1.2 + assert longlong.getrealfloat(self.cpu.get_float_value(frame, locs[0][7])) == 3.4 frame = self.cpu.execute_token(looptoken, 1, 1, 1, 2, 3, 4, 5, f1, f2) assert called == [tuple(range(1, i + 1))] for j in range(4): - assert self.cpu.get_int_value(frame, locs, j + 1) == j + 1 - assert longlong.getrealfloat(self.cpu.get_float_value(frame, locs, 6)) == 1.2 - assert longlong.getrealfloat(self.cpu.get_float_value(frame, locs, 7)) == 3.4 + assert self.cpu.get_int_value(frame, locs[0][j + 1]) == j + 1 + assert longlong.getrealfloat(self.cpu.get_float_value(frame, locs[0][6])) == 1.2 + assert longlong.getrealfloat(self.cpu.get_float_value(frame, locs[0][7])) == 3.4 def test_force_operations_returning_void(self): values = [] @@ -2395,8 +2395,8 @@ fail = self.cpu.get_latest_descr(deadframe) locs = rebuild_locs_from_resumedata(fail) values.append(fail) - values.append(self.cpu.get_int_value(deadframe, locs, 0)) - values.append(self.cpu.get_int_value(deadframe, locs, 1)) + values.append(self.cpu.get_int_value(deadframe, locs[0][0])) + values.append(self.cpu.get_int_value(deadframe, locs[0][1])) self.cpu.set_savedata_ref(deadframe, random_gcref) FUNC = self.FuncType([llmemory.GCREF, lltype.Signed], lltype.Void) @@ -2426,15 +2426,15 @@ deadframe = self.cpu.execute_token(looptoken, 20, 0) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 0 - assert self.cpu.get_int_value(deadframe, None, 0) == 20 + assert self.cpu.get_int_value(deadframe, 0) == 20 assert values == [] deadframe = self.cpu.execute_token(looptoken, 10, 1) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 1 locs = rebuild_locs_from_resumedata(fail) - assert self.cpu.get_int_value(deadframe, locs, 0) == 1 - assert self.cpu.get_int_value(deadframe, locs, 1) == 10 + assert self.cpu.get_int_value(deadframe, locs[0][0]) == 1 + assert self.cpu.get_int_value(deadframe, locs[0][1]) == 10 assert values == [faildescr, 1, 10] assert self.cpu.get_savedata_ref(deadframe) # not NULL assert self.cpu.get_savedata_ref(deadframe) == random_gcref @@ -2446,8 +2446,8 @@ deadframe = self.cpu.force(token) fail = self.cpu.get_latest_descr(deadframe) locs = rebuild_locs_from_resumedata(fail) - values.append(self.cpu.get_int_value(deadframe, locs, 0)) - values.append(self.cpu.get_int_value(deadframe, locs, 2)) + values.append(self.cpu.get_int_value(deadframe, locs[0][0])) + values.append(self.cpu.get_int_value(deadframe, locs[0][2])) self.cpu.set_savedata_ref(deadframe, random_gcref) return 42 @@ -2480,16 +2480,16 @@ deadframe = self.cpu.execute_token(looptoken, 20, 0) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 0 - assert self.cpu.get_int_value(deadframe, None, 0) == 42 + assert self.cpu.get_int_value(deadframe, 0) == 42 assert values == [] deadframe = self.cpu.execute_token(looptoken, 10, 1) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 1 locs = rebuild_locs_from_resumedata(fail) - assert self.cpu.get_int_value(deadframe, locs, 0) == 1 - assert self.cpu.get_int_value(deadframe, locs, 1) == 42 - assert self.cpu.get_int_value(deadframe, locs, 2) == 10 + assert self.cpu.get_int_value(deadframe, locs[0][0]) == 1 + assert self.cpu.get_int_value(deadframe, locs[0][1]) == 42 + assert self.cpu.get_int_value(deadframe, locs[0][2]) == 10 assert values == [1, 10] assert self.cpu.get_savedata_ref(deadframe) == random_gcref @@ -2502,8 +2502,8 @@ deadframe = self.cpu.force(token) fail = self.cpu.get_latest_descr(deadframe) locs = rebuild_locs_from_resumedata(fail) - values.append(self.cpu.get_int_value(deadframe, locs, 0)) - values.append(self.cpu.get_int_value(deadframe, locs, 1)) + values.append(self.cpu.get_int_value(deadframe, locs[0][0])) + values.append(self.cpu.get_int_value(deadframe, locs[0][1])) self.cpu.set_savedata_ref(deadframe, random_gcref) return 42.5 @@ -2536,7 +2536,7 @@ deadframe = self.cpu.execute_token(looptoken, 20, 0) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 0 - x = self.cpu.get_float_value(deadframe, None, 0) + x = self.cpu.get_float_value(deadframe, 0) assert longlong.getrealfloat(x) == 42.5 assert values == [] @@ -2544,10 +2544,10 @@ fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 1 locs = rebuild_locs_from_resumedata(fail) - assert self.cpu.get_int_value(deadframe, locs, 0) == 1 - x = self.cpu.get_float_value(deadframe, locs, 2) + assert self.cpu.get_int_value(deadframe, locs[0][0]) == 1 + x = self.cpu.get_float_value(deadframe, locs[0][2]) assert longlong.getrealfloat(x) == 42.5 - assert self.cpu.get_int_value(deadframe, locs, 1) == 10 + assert self.cpu.get_int_value(deadframe, locs[0][1]) == 10 assert values == [1, 10] assert self.cpu.get_savedata_ref(deadframe) == random_gcref @@ -2578,7 +2578,7 @@ deadframe = self.cpu.execute_token(looptoken, ord('G')) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 0 - assert self.cpu.get_int_value(deadframe, None, 0) == ord('g') + assert self.cpu.get_int_value(deadframe, 0) == ord('g') def test_call_to_c_function_with_callback(self): from rpython.rlib.libffi import CDLL, types, ArgChain, clibffi @@ -2763,7 +2763,7 @@ fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 0 if isinstance(b3, BoxInt): - r = self.cpu.get_int_value(deadframe, None, 0) + r = self.cpu.get_int_value(deadframe, 0) if isinstance(result, r_singlefloat): assert -sys.maxint-1 <= r <= 0xFFFFFFFF r, = struct.unpack("f", struct.pack("I", r & 0xFFFFFFFF)) @@ -2772,7 +2772,7 @@ r = rffi.cast(TP, r) assert r == result elif isinstance(b3, BoxFloat): - r = self.cpu.get_float_value(deadframe, None, 0) + r = self.cpu.get_float_value(deadframe, 0) if isinstance(result, float): r = longlong.getrealfloat(r) else: @@ -2930,7 +2930,7 @@ deadframe = self.cpu.execute_token(looptoken, -42, 9) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 0 - assert self.cpu.get_int_value(deadframe, None, 0) == -42 + assert self.cpu.get_int_value(deadframe, 0) == -42 print 'step 1 ok' print '-'*79 @@ -2941,7 +2941,7 @@ fail = self.cpu.get_latest_descr(deadframe) locs = rebuild_locs_from_resumedata(fail) assert fail is faildescr - assert self.cpu.get_int_value(deadframe, locs, 0) == 9 + assert self.cpu.get_int_value(deadframe, locs[0][0]) == 9 print 'step 2 ok' print '-'*79 @@ -2957,7 +2957,7 @@ deadframe = self.cpu.execute_token(looptoken, -42, 9) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 3 - assert self.cpu.get_int_value(deadframe, None, 0) == 9 + assert self.cpu.get_int_value(deadframe, 0) == 9 print 'step 3 ok' print '-'*79 @@ -2992,12 +2992,12 @@ ops2 = [ ResOperation(rop.JUMP, [ConstInt(333)], None, descr=labeldescr), ] - self.cpu.compile_bridge(None, faildescr, [[]], [], ops2, looptoken) + self.cpu.compile_bridge(None, faildescr, [[]], [[]], ops2, looptoken) # run: must not be caught in an infinite loop deadframe = self.cpu.execute_token(looptoken, 16) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 3 - assert self.cpu.get_int_value(deadframe, None, 0) == 333 + assert self.cpu.get_int_value(deadframe, 0) == 333 # pure do_ / descr features @@ -3170,7 +3170,7 @@ def test_assembler_call(self): called = [] def assembler_helper(deadframe, virtualizable): - assert self.cpu.get_int_value(deadframe, None, 0) == 97 + assert self.cpu.get_int_value(deadframe, 0) == 97 called.append(self.cpu.get_latest_descr(deadframe)) return 4 + 9 @@ -3208,7 +3208,7 @@ EffectInfo.MOST_GENERAL) args = [i+1 for i in range(10)] deadframe = self.cpu.execute_token(looptoken, *args) - assert self.cpu.get_int_value(deadframe, None, 0) == 55 + assert self.cpu.get_int_value(deadframe, 0) == 55 ops = ''' [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9] i10 = int_add(i0, 42) @@ -3221,7 +3221,7 @@ self.cpu.compile_loop(None, loop.inputargs, loop.operations, othertoken) args = [i+1 for i in range(10)] deadframe = self.cpu.execute_token(othertoken, *args) - assert self.cpu.get_int_value(deadframe, None, 0) == 13 + assert self.cpu.get_int_value(deadframe, 0) == 13 assert called == [finish_descr] # test the fast path, which should not call assembler_helper() @@ -3231,7 +3231,7 @@ self.cpu.compile_loop(None, loop.inputargs, loop.operations, othertoken) args = [i+1 for i in range(10)] deadframe = self.cpu.execute_token(othertoken, *args) - assert self.cpu.get_int_value(deadframe, None, 0) == 97 + assert self.cpu.get_int_value(deadframe, 0) == 97 assert not called def test_assembler_call_propagate_exc(self): @@ -3282,14 +3282,14 @@ othertoken = JitCellToken() self.cpu.compile_loop(None, loop.inputargs, loop.operations, othertoken) deadframe = self.cpu.execute_token(othertoken, sys.maxint - 1) - assert self.cpu.get_int_value(deadframe, None, 0) == 3 + assert self.cpu.get_int_value(deadframe, 0) == 3 def test_assembler_call_float(self): if not self.cpu.supports_floats: py.test.skip("requires floats") called = [] def assembler_helper(deadframe, virtualizable): - x = self.cpu.get_float_value(deadframe, None, 0) + x = self.cpu.get_float_value(deadframe, 0) assert longlong.getrealfloat(x) == 1.2 + 3.2 called.append(self.cpu.get_latest_descr(deadframe)) print '!' * 30 + 'assembler_helper' @@ -3322,7 +3322,7 @@ args = [longlong.getfloatstorage(1.2), longlong.getfloatstorage(2.3)] deadframe = self.cpu.execute_token(looptoken, *args) - x = self.cpu.get_float_value(deadframe, None, 0) + x = self.cpu.get_float_value(deadframe, 0) assert longlong.getrealfloat(x) == 1.2 + 2.3 ops = ''' [f4, f5] @@ -3336,7 +3336,7 @@ args = [longlong.getfloatstorage(1.2), longlong.getfloatstorage(3.2)] deadframe = self.cpu.execute_token(othertoken, *args) - x = self.cpu.get_float_value(deadframe, None, 0) + x = self.cpu.get_float_value(deadframe, 0) assert longlong.getrealfloat(x) == 13.5 assert called == [finish_descr] @@ -3348,7 +3348,7 @@ args = [longlong.getfloatstorage(1.2), longlong.getfloatstorage(4.2)] deadframe = self.cpu.execute_token(othertoken, *args) - x = self.cpu.get_float_value(deadframe, None, 0) + x = self.cpu.get_float_value(deadframe, 0) assert longlong.getrealfloat(x) == 1.2 + 4.2 assert not called @@ -3381,7 +3381,7 @@ py.test.skip("requires floats") called = [] def assembler_helper(deadframe, virtualizable): - x = self.cpu.get_float_value(deadframe, None, 0) + x = self.cpu.get_float_value(deadframe, 0) assert longlong.getrealfloat(x) == 1.25 + 3.25 called.append(self.cpu.get_latest_descr(deadframe)) return 13.5 @@ -3412,7 +3412,7 @@ args = [longlong.getfloatstorage(1.25), longlong.getfloatstorage(2.35)] deadframe = self.cpu.execute_token(looptoken, *args) - x = self.cpu.get_float_value(deadframe, None, 0) + x = self.cpu.get_float_value(deadframe, 0) assert longlong.getrealfloat(x) == 1.25 + 2.35 assert not called @@ -3430,7 +3430,7 @@ args = [longlong.getfloatstorage(1.25), longlong.getfloatstorage(3.25)] deadframe = self.cpu.execute_token(othertoken, *args) - x = self.cpu.get_float_value(deadframe, None, 0) + x = self.cpu.get_float_value(deadframe, 0) assert longlong.getrealfloat(x) == 13.5 assert called == [finish_descr] del called[:] @@ -3453,7 +3453,7 @@ args = [longlong.getfloatstorage(6.0), longlong.getfloatstorage(1.5)] # 6.0-1.5 == 1.25+3.25 deadframe = self.cpu.execute_token(othertoken, *args) - x = self.cpu.get_float_value(deadframe, None, 0) + x = self.cpu.get_float_value(deadframe, 0) assert longlong.getrealfloat(x) == 13.5 assert called == [finish_descr2] @@ -3863,7 +3863,7 @@ fail = self.cpu.get_latest_descr(deadframe) locs = rebuild_locs_from_resumedata(fail) assert fail.identifier == 2 - res = self.cpu.get_int_value(deadframe, locs, 0) + res = self.cpu.get_int_value(deadframe, locs[0][0]) assert res == 10 inputargs2 = [i0] @@ -3877,7 +3877,7 @@ fail = self.cpu.get_latest_descr(deadframe) locs = rebuild_locs_from_resumedata(fail) assert fail.identifier == 3 - res = self.cpu.get_int_value(deadframe, locs, 0) + res = self.cpu.get_int_value(deadframe, locs[0][0]) assert res == -10 def test_int_force_ge_zero(self): @@ -3892,7 +3892,7 @@ self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) for inp, outp in [(2,2), (-3, 0)]: deadframe = self.cpu.execute_token(looptoken, inp) - assert outp == self.cpu.get_int_value(deadframe, None, 0) + assert outp == self.cpu.get_int_value(deadframe, 0) def test_compile_asmlen(self): from rpython.jit.backend.llsupport.llmodel import AbstractLLCPU @@ -4053,7 +4053,7 @@ operations = [ ResOperation(rop.FINISH, [], None, descr=BasicFinalDescr(99)) ] - self.cpu.compile_bridge(None, faildescr, [[]], [], operations, looptoken) + self.cpu.compile_bridge(None, faildescr, [[]], [[]], operations, looptoken) deadframe = self.cpu.execute_token(looptoken, null_box.getref_base()) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 99 @@ -4084,7 +4084,7 @@ self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) deadframe = self.cpu.execute_token(looptoken, rffi.cast(lltype.Signed, p), 16) - result = self.cpu.get_int_value(deadframe, None, 0) + result = self.cpu.get_int_value(deadframe, 0) assert result == rffi.cast(lltype.Signed, value) rawstorage.free_raw_storage(p) @@ -4114,7 +4114,7 @@ self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) deadframe = self.cpu.execute_token(looptoken, rffi.cast(lltype.Signed, p), 16) - result = self.cpu.get_float_value(deadframe, None, 0) + result = self.cpu.get_float_value(deadframe, 0) result = longlong.getrealfloat(result) assert result == rffi.cast(lltype.Float, value) rawstorage.free_raw_storage(p) @@ -4144,7 +4144,7 @@ self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) deadframe = self.cpu.execute_token(looptoken, rffi.cast(lltype.Signed, p), 16) - result = self.cpu.get_int_value(deadframe, None, 0) + result = self.cpu.get_int_value(deadframe, 0) assert result == longlong.singlefloat2int(value) rawstorage.free_raw_storage(p) @@ -4254,7 +4254,7 @@ deadframe = self.cpu.force(token) fail = self.cpu.get_latest_descr(deadframe) locs = rebuild_locs_from_resumedata(fail) - values.append(self.cpu.get_int_value(deadframe, locs, 0)) + values.append(self.cpu.get_int_value(deadframe, locs[0][0])) return 42 FUNC = self.FuncType([llmemory.GCREF, lltype.Signed], lltype.Signed) @@ -4284,7 +4284,7 @@ fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 23 locs = rebuild_locs_from_resumedata(fail) - assert self.cpu.get_int_value(deadframe, locs, 0) == 42 + assert self.cpu.get_int_value(deadframe, locs[0][0]) == 42 # make sure that force reads the registers from a zeroed piece of # memory assert values[0] == 0 @@ -4315,13 +4315,13 @@ force_spill(i9) enter_frame(1, descr=jitcode2) call(ConstClass(func2_ptr), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, descr=calldescr2) - resume_put(i3, 0, 0) - resume_put(i4, 0, 1) - resume_put(i5, 0, 2) - resume_put(i6, 0, 3) - resume_put(i7, 0, 4) - resume_put(i8, 0, 5) - resume_put(i9, 0, 6) + resume_put(i3, 1, 0) + resume_put(i4, 1, 1) + resume_put(i5, 1, 2) + resume_put(i6, 1, 3) + resume_put(i7, 1, 4) + resume_put(i8, 1, 5) + resume_put(i9, 1, 6) guard_true(i1, descr=guarddescr) leave_frame() leave_frame() @@ -4381,7 +4381,7 @@ frame = lltype.cast_opaque_ptr(jitframe.JITFRAMEPTR, frame) assert len(frame.jf_frame) == frame.jf_frame_info.jfi_frame_depth locs = rebuild_locs_from_resumedata(guarddescr) - ref = self.cpu.get_ref_value(frame, locs, 2) + ref = self.cpu.get_ref_value(frame, locs[0][2]) token = lltype.cast_opaque_ptr(jitframe.JITFRAMEPTR, ref) assert token != frame token = token.resolve() diff --git a/rpython/jit/metainterp/test/test_resume2.py b/rpython/jit/metainterp/test/test_resume2.py --- a/rpython/jit/metainterp/test/test_resume2.py +++ b/rpython/jit/metainterp/test/test_resume2.py @@ -52,10 +52,10 @@ frame.registers_i[position] = jitframe_pos def put_box_float(self, frame, position, jitframe_pos): - xxx + frame.registers_f[position] = jitframe_pos def put_box_ref(self, frame, position, jitframe_pos): - xxx + frame.registers_r[position] = jitframe_pos def finish(self): framestack = [] From noreply at buildbot.pypy.org Sun Jan 12 14:47:08 2014 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 12 Jan 2014 14:47:08 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: fix one missing item for inheriting parent in resumebuilder Message-ID: <20140112134708.1D0601C338D@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68611:7f1660207c81 Date: 2014-01-12 14:24 +0100 http://bitbucket.org/pypy/pypy/changeset/7f1660207c81/ Log: fix one missing item for inheriting parent in resumebuilder diff --git a/rpython/jit/backend/llsupport/resumebuilder.py b/rpython/jit/backend/llsupport/resumebuilder.py --- a/rpython/jit/backend/llsupport/resumebuilder.py +++ b/rpython/jit/backend/llsupport/resumebuilder.py @@ -46,13 +46,23 @@ self.framestack.pop() class ResumeBuilder(object): - def __init__(self, regalloc, frontend_liveness, descr): + def __init__(self, regalloc, frontend_liveness, descr, inputframes=None, + inputlocs=None): self.newops = [] self.regalloc = regalloc self.current_attachment = {} self.frontend_liveness = frontend_liveness self.frontend_pos = {} self.virtuals = {} + if inputlocs is not None: + i = 0 + for frame_pos, frame in enumerate(inputframes): + for pos_in_frame, box in enumerate(frame): + loc_pos = inputlocs[i].get_jitframe_position() + self.current_attachment[box] = loc_pos + self.frontend_pos[box] = (ConstInt(frame_pos), + ConstInt(pos_in_frame)) + i += 1 def process(self, op): if op.getopnum() == rop.RESUME_PUT: diff --git a/rpython/jit/backend/llsupport/test/test_resumebuilder.py b/rpython/jit/backend/llsupport/test/test_resumebuilder.py --- a/rpython/jit/backend/llsupport/test/test_resumebuilder.py +++ b/rpython/jit/backend/llsupport/test/test_resumebuilder.py @@ -4,6 +4,7 @@ from rpython.jit.codewriter.jitcode import JitCode from rpython.jit.tool.oparser import parse from rpython.jit.metainterp.optimizeopt.util import equaloplists +from rpython.jit.metainterp.test.test_resume2 import rebuild_locs_from_resumedata from rpython.rtyper.lltypesystem import lltype class MockJitCode(JitCode): @@ -116,12 +117,12 @@ def test_bridge(self): jitcode = JitCode("name") - jitcode.setup(num_regs_i=2, num_regs_r=0, num_regs_f=0) + jitcode.setup(num_regs_i=1, num_regs_r=0, num_regs_f=0) loop = parse(""" [i0] enter_frame(-1, descr=jitcode) - resume_put(i0, 0, 0) i1 = int_lt(i0, 10) + resume_put(i1, 0, 0) guard_true(i1) leave_frame() """, namespace={'jitcode': jitcode}) @@ -129,3 +130,22 @@ looptoken = JitCellToken() self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) + + descr = loop.operations[3].getdescr() + + bridge = parse(""" + [i0] + force_spill(i0) + guard_false(i0) + """) + locs = rebuild_locs_from_resumedata(descr) + self.cpu.compile_bridge(None, descr, [bridge.inputargs], locs, + bridge.operations, looptoken) + + descr = bridge.operations[-1].getdescr() + expected_resume = parse(""" + [] + resume_put(28, 0, 0) + """) + equaloplists(descr.rd_resume_bytecode.opcodes, + expected_resume.operations) diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -133,7 +133,8 @@ self.jump_target_descr = None self.final_jump_op = None - def _prepare(self, inputframes, operations, allgcrefs, descr=None): + def _prepare(self, inputframes, operations, allgcrefs, descr=None, + locs=None): cpu = self.assembler.cpu self.fm = X86FrameManager(cpu.get_baseofs_of_frame_field()) operations = cpu.gc_ll_descr.rewrite_assembler(cpu, operations, @@ -141,7 +142,8 @@ # compute longevity of variables x = compute_vars_longevity(inputframes, operations, descr) longevity, last_real_usage, frontend_liveness = x - self.resumebuilder = ResumeBuilder(self, frontend_liveness, descr) + self.resumebuilder = ResumeBuilder(self, frontend_liveness, descr, + inputframes, locs) self.longevity = longevity self.last_real_usage = last_real_usage self.rm = gpr_reg_mgr_cls(self.longevity, @@ -165,7 +167,8 @@ def prepare_bridge(self, inputframes, arglocs, operations, allgcrefs, frame_info, descr): - operations = self._prepare(inputframes, operations, allgcrefs, descr) + operations = self._prepare(inputframes, operations, allgcrefs, descr, + locs=arglocs) self._update_bindings(arglocs, inputframes) self.min_bytes_before_label = 0 return operations From noreply at buildbot.pypy.org Sun Jan 12 14:47:09 2014 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 12 Jan 2014 14:47:09 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: fix more tests Message-ID: <20140112134709.609921C338D@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68612:8f540a68ce91 Date: 2014-01-12 14:38 +0100 http://bitbucket.org/pypy/pypy/changeset/8f540a68ce91/ Log: fix more tests diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -4397,7 +4397,8 @@ def raising(): bridge = parse(""" [i1, i2] - px = guard_exception(ConstClass(xtp), descr=faildescr2) [i1, i2] + enter_frame(1, descr=jitcode2) + px = guard_exception(ConstClass(xtp), descr=faildescr2) i3 = int_add(i1, i2) i4 = int_add(i1, i3) i5 = int_add(i1, i4) @@ -4414,13 +4415,22 @@ force_spill(i7) force_spill(i8) force_spill(i9) - guard_true(i9) [i3, i4, i5, i6, i7, i8, i9] + resume_put(i3, 1, 0) + resume_put(i4, 1, 1) + resume_put(i5, 1, 2) + resume_put(i6, 1, 3) + resume_put(i7, 1, 4) + resume_put(i8, 1, 5) + resume_put(i9, 1, 6) + guard_true(i9) + leave_frame() finish(i9, descr=finaldescr) """, namespace={'finaldescr': BasicFinalDescr(42), 'faildescr2': BasicFailDescr(1), - 'xtp': xtp + 'xtp': xtp, 'jitcode2': jitcode2, }) - self.cpu.compile_bridge(None, faildescr, bridge.inputargs, + locs = rebuild_locs_from_resumedata(faildescr) + self.cpu.compile_bridge(None, faildescr, [bridge.inputargs], locs, bridge.operations, looptoken) raise LLException(xtp, xptr) @@ -4431,15 +4441,23 @@ EffectInfo.MOST_GENERAL) looptoken = JitCellToken() + jitcode = JitCode('name') + jitcode.setup(num_regs_i=2, num_regs_r=0, num_regs_f=0) + jitcode2 = JitCode('name2') + jitcode2.setup(num_regs_i=7, num_regs_r=0, num_regs_f=0) loop = parse(""" [i0, i1, i2] + enter_frame(-1, descr=jitcode) call(ConstClass(raising_ptr), descr=calldescr) - guard_no_exception(descr=faildescr) [i1, i2] + resume_put(i1, 0, 0) + resume_put(i2, 0, 1) + guard_no_exception(descr=faildescr) finish(i2, descr=finaldescr2) """, namespace={'raising_ptr': raising_ptr, 'calldescr': calldescr, 'faildescr': faildescr, - 'finaldescr2': BasicFinalDescr(1)}) + 'finaldescr2': BasicFinalDescr(1), + 'jitcode': jitcode}) self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) frame = self.cpu.execute_token(looptoken, 1, 2, 3) diff --git a/rpython/jit/metainterp/resume2.py b/rpython/jit/metainterp/resume2.py --- a/rpython/jit/metainterp/resume2.py +++ b/rpython/jit/metainterp/resume2.py @@ -48,10 +48,10 @@ xxx pos += 1 - def resume_put(self, jitframe_pos_const, depth, frontend_position): + def resume_put(self, jitframe_pos_const, frame_no, frontend_position): jitframe_pos = jitframe_pos_const.getint() - jitcode = self.metainterp.framestack[-1].jitcode - frame = self.metainterp.framestack[depth] + jitcode = self.metainterp.framestack[frame_no].jitcode + frame = self.metainterp.framestack[frame_no] if frontend_position < jitcode.num_regs_i(): self.put_box_int(frame, frontend_position, jitframe_pos) elif frontend_position < (jitcode.num_regs_r() + jitcode.num_regs_i()): From noreply at buildbot.pypy.org Sun Jan 12 14:47:10 2014 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 12 Jan 2014 14:47:10 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: (fijal, rguillebert, armin lurking) finish porting test_runner Message-ID: <20140112134710.9D3221C338D@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68613:bd6abaee785f Date: 2014-01-12 14:46 +0100 http://bitbucket.org/pypy/pypy/changeset/bd6abaee785f/ Log: (fijal, rguillebert, armin lurking) finish porting test_runner diff --git a/rpython/jit/backend/x86/test/test_runner.py b/rpython/jit/backend/x86/test/test_runner.py --- a/rpython/jit/backend/x86/test/test_runner.py +++ b/rpython/jit/backend/x86/test/test_runner.py @@ -9,9 +9,11 @@ from rpython.jit.backend.x86.arch import WORD from rpython.jit.backend.x86.rx86 import fits_in_32bits from rpython.jit.backend.llsupport import symbolic +from rpython.jit.codewriter.jitcode import JitCode from rpython.jit.metainterp.resoperation import rop from rpython.jit.metainterp.executor import execute from rpython.jit.backend.test.runner_test import LLtypeBackendTest +from rpython.jit.metainterp.test.test_resume2 import rebuild_locs_from_resumedata from rpython.jit.tool.oparser import parse import ctypes @@ -266,6 +268,8 @@ p = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.malloc(lltype.GcStruct('x'))) nullptr = lltype.nullptr(llmemory.GCREF.TO) + jitcode = JitCode('name') + jitcode.setup(num_regs_i=1, num_regs_r=0, num_regs_f=0) f = BoxInt() for op in allops: for guard in guards: @@ -278,18 +282,28 @@ for b in (bp, n): i1 = BoxInt(1) ops = [ + ResOperation(rop.ENTER_FRAME, [ConstInt(-1)], None, + descr=jitcode), ResOperation(rop.SAME_AS, [ConstInt(1)], i1), ResOperation(op, [b], f), + ResOperation(rop.RESUME_PUT, [i1, ConstInt(0), + ConstInt(0)], + None), ResOperation(guard, [f], None, - descr=BasicFailDescr()), + descr=BasicFailDescr(1)), ResOperation(rop.FINISH, [ConstInt(0)], None, - descr=BasicFinalDescr()), + descr=BasicFinalDescr(0)), ] - ops[-2].setfailargs([i1]) looptoken = JitCellToken() self.cpu.compile_loop(None, [b], ops, looptoken) deadframe = self.cpu.execute_token(looptoken, b.value) - result = self.cpu.get_int_value(deadframe, 0) + descr = self.cpu.get_latest_descr(deadframe) + if isinstance(descr, BasicFinalDescr): + pos = 0 + else: + locs = rebuild_locs_from_resumedata(descr) + pos = locs[0][0] + result = self.cpu.get_int_value(deadframe, pos) if guard == rop.GUARD_FALSE: assert result == execute(self.cpu, None, op, None, b).value @@ -317,26 +331,38 @@ guards = [rop.GUARD_FALSE, rop.GUARD_TRUE] all = [rop.INT_EQ, rop.INT_NE, rop.INT_LE, rop.INT_LT, rop.INT_GT, rop.INT_GE, rop.UINT_GT, rop.UINT_LT, rop.UINT_LE, rop.UINT_GE] + jitcode = JitCode('name') + jitcode.setup(num_regs_i=1, num_regs_r=0, num_regs_f=0) for a, b in boxes: for guard in guards: for op in all: res = BoxInt() i1 = BoxInt(1) ops = [ + ResOperation(rop.ENTER_FRAME, [ConstInt(-1)], + None, descr=jitcode), ResOperation(rop.SAME_AS, [ConstInt(1)], i1), ResOperation(op, [a, b], res), + ResOperation(rop.RESUME_PUT, [i1, ConstInt(0), + ConstInt(0)], None), ResOperation(guard, [res], None, - descr=BasicFailDescr()), + descr=BasicFailDescr(1)), ResOperation(rop.FINISH, [ConstInt(0)], None, - descr=BasicFinalDescr()), + descr=BasicFinalDescr(0)), ] - ops[-2].setfailargs([i1]) inputargs = [i for i in (a, b) if isinstance(i, Box)] looptoken = JitCellToken() self.cpu.compile_loop(None, inputargs, ops, looptoken) inputvalues = [box.value for box in inputargs] deadframe = self.cpu.execute_token(looptoken, *inputvalues) - result = self.cpu.get_int_value(deadframe, 0) + + descr = self.cpu.get_latest_descr(deadframe) + if isinstance(descr, BasicFinalDescr): + pos = 0 + else: + locs = rebuild_locs_from_resumedata(descr) + pos = locs[0][0] + result = self.cpu.get_int_value(deadframe, pos) expected = execute(self.cpu, None, op, None, a, b).value if guard == rop.GUARD_FALSE: assert result == expected @@ -535,12 +561,12 @@ debug_merge_point('xyz', 0, 0) i1 = int_add(i0, 1) i2 = int_ge(i1, 10) - guard_false(i2) [] + guard_false(i2) label(i1, descr=targettoken) debug_merge_point('xyz', 0, 0) i11 = int_add(i1, 1) i12 = int_ge(i11, 10) - guard_false(i12) [] + guard_false(i12) jump(i11, descr=targettoken) """ ops = parse(loop, namespace={'targettoken': targettoken, From noreply at buildbot.pypy.org Sun Jan 12 15:56:51 2014 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 12 Jan 2014 15:56:51 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: hack hack hack Message-ID: <20140112145651.BA82C1C0212@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68614:8afdd8b46226 Date: 2014-01-12 15:02 +0100 http://bitbucket.org/pypy/pypy/changeset/8afdd8b46226/ Log: hack hack hack diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -71,6 +71,7 @@ # self.inputargs = map(mapping, inputargs) self.operations = [] + xxxx resumebuilder = LLGraphResumeBuilder() for op in operations: if op.is_resume(): @@ -231,7 +232,7 @@ name=''): clt = model.CompiledLoopToken(self, looptoken.number) looptoken.compiled_loop_token = clt - lltrace = LLTrace(inputargs, operations) + lltrace = LLTrace(inputargs, operations, None) clt._llgraph_loop = lltrace clt._llgraph_alltraces = [lltrace] self._record_labels(lltrace) @@ -240,7 +241,7 @@ original_loop_token, log=True): clt = original_loop_token.compiled_loop_token clt.compiling_a_bridge() - lltrace = LLTrace(inputargs, operations) + lltrace = LLTrace(inputargs, operations, faildescr) faildescr._llgraph_bridge = lltrace clt._llgraph_alltraces.append(lltrace) self._record_labels(lltrace) From noreply at buildbot.pypy.org Sun Jan 12 15:56:53 2014 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 12 Jan 2014 15:56:53 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: shuffle stuff around Message-ID: <20140112145653.1ADB81C0212@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68615:19684fcbfad9 Date: 2014-01-12 15:04 +0100 http://bitbucket.org/pypy/pypy/changeset/19684fcbfad9/ Log: shuffle stuff around diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -1,7 +1,7 @@ import py, weakref from rpython.jit.backend import model from rpython.jit.backend.llgraph import support -from rpython.jit.backend.llsupport.resumebuilder import ResumeBuilder,\ +from rpython.jit.backend.resumebuilder import ResumeBuilder,\ LivenessAnalyzer from rpython.jit.metainterp.history import AbstractDescr from rpython.jit.metainterp.history import Const, getkind diff --git a/rpython/jit/backend/llsupport/regalloc.py b/rpython/jit/backend/llsupport/regalloc.py --- a/rpython/jit/backend/llsupport/regalloc.py +++ b/rpython/jit/backend/llsupport/regalloc.py @@ -2,7 +2,7 @@ from rpython.jit.metainterp.history import Const, Box, REF, JitCellToken from rpython.rlib.objectmodel import we_are_translated, specialize from rpython.jit.metainterp.resoperation import rop -from rpython.jit.backend.llsupport.resumebuilder import LivenessAnalyzer + try: from collections import OrderedDict @@ -691,83 +691,6 @@ else: return [self.loc(op.getarg(0))] -def flatten(inputframes): - count = 0 - for frame in inputframes: - count += len(frame) - inputargs = [None] * count - i = 0 - for frame in inputframes: - inputargs[i:i + len(frame)] = frame - i += len(frame) - return inputargs - -def compute_vars_longevity(inputframes, operations, descr=None): - # compute a dictionary that maps variables to index in - # operations that is a "last-time-seen" - - # returns a pair longevity/useful. Non-useful variables are ones that - # never appear in the assembler or it does not matter if they appear on - # stack or in registers. Main example is loop arguments that go - # only to guard operations or to jump or to finish - produced = {} - last_used = {} - last_real_usage = {} - frontend_alive = {} - if descr is None: - inputargs = inputframes[0] - liveness_analyzer = LivenessAnalyzer() - else: - inputargs = flatten(inputframes) - liveness_analyzer = LivenessAnalyzer(inputframes) - start_pos = 0 - for position, op in enumerate(operations): - if op.is_guard(): - liveness_analyzer.interpret_until(operations, position, start_pos) - start_pos = position - framestack = liveness_analyzer.get_live_info() - for frame in framestack: - for item in liveness_analyzer.all_boxes_from(frame): - if item is not None: - last_used[item] = position - frontend_alive[item] = position - - for i in range(len(operations)-1, -1, -1): - op = operations[i] - if op.result: - if op.result not in last_used and op.has_no_side_effect(): - continue - assert op.result not in produced - produced[op.result] = i - opnum = op.getopnum() - for j in range(op.numargs()): - arg = op.getarg(j) - if not isinstance(arg, Box): - continue - if arg not in last_used: - last_used[arg] = i - else: - last_used[arg] = max(last_used[arg], i) - if opnum != rop.JUMP and opnum != rop.LABEL: - if arg not in last_real_usage: - last_real_usage[arg] = i - # - longevity = {} - for arg in produced: - if arg in last_used: - assert isinstance(arg, Box) - assert produced[arg] < last_used[arg] - longevity[arg] = (produced[arg], last_used[arg]) - del last_used[arg] - for arg in inputargs: - assert isinstance(arg, Box) - if arg not in last_used: - longevity[arg] = (-1, -1) - else: - longevity[arg] = (0, last_used[arg]) - del last_used[arg] - assert len(last_used) == 0 - return longevity, last_real_usage, frontend_alive def is_comparison_or_ovf_op(opnum): from rpython.jit.metainterp.resoperation import opclasses diff --git a/rpython/jit/backend/llsupport/resumebuilder.py b/rpython/jit/backend/resumebuilder.py rename from rpython/jit/backend/llsupport/resumebuilder.py rename to rpython/jit/backend/resumebuilder.py --- a/rpython/jit/backend/llsupport/resumebuilder.py +++ b/rpython/jit/backend/resumebuilder.py @@ -1,6 +1,6 @@ from rpython.jit.metainterp.resoperation import rop, ResOperation -from rpython.jit.metainterp.history import ConstInt +from rpython.jit.metainterp.history import ConstInt, Box from rpython.jit.metainterp.resume2 import ResumeBytecode, AbstractResumeReader class LivenessAnalyzer(AbstractResumeReader): @@ -121,3 +121,82 @@ def finish(self, parent, parent_position, clt): return ResumeBytecode(self.newops, parent, parent_position, clt) + +def flatten(inputframes): + count = 0 + for frame in inputframes: + count += len(frame) + inputargs = [None] * count + i = 0 + for frame in inputframes: + inputargs[i:i + len(frame)] = frame + i += len(frame) + return inputargs + + +def compute_vars_longevity(inputframes, operations, descr=None): + # compute a dictionary that maps variables to index in + # operations that is a "last-time-seen" + + # returns a pair longevity/useful. Non-useful variables are ones that + # never appear in the assembler or it does not matter if they appear on + # stack or in registers. Main example is loop arguments that go + # only to guard operations or to jump or to finish + produced = {} + last_used = {} + last_real_usage = {} + frontend_alive = {} + if descr is None: + inputargs = inputframes[0] + liveness_analyzer = LivenessAnalyzer() + else: + inputargs = flatten(inputframes) + liveness_analyzer = LivenessAnalyzer(inputframes) + start_pos = 0 + for position, op in enumerate(operations): + if op.is_guard(): + liveness_analyzer.interpret_until(operations, position, start_pos) + start_pos = position + framestack = liveness_analyzer.get_live_info() + for frame in framestack: + for item in liveness_analyzer.all_boxes_from(frame): + if item is not None: + last_used[item] = position + frontend_alive[item] = position + + for i in range(len(operations)-1, -1, -1): + op = operations[i] + if op.result: + if op.result not in last_used and op.has_no_side_effect(): + continue + assert op.result not in produced + produced[op.result] = i + opnum = op.getopnum() + for j in range(op.numargs()): + arg = op.getarg(j) + if not isinstance(arg, Box): + continue + if arg not in last_used: + last_used[arg] = i + else: + last_used[arg] = max(last_used[arg], i) + if opnum != rop.JUMP and opnum != rop.LABEL: + if arg not in last_real_usage: + last_real_usage[arg] = i + # + longevity = {} + for arg in produced: + if arg in last_used: + assert isinstance(arg, Box) + assert produced[arg] < last_used[arg] + longevity[arg] = (produced[arg], last_used[arg]) + del last_used[arg] + for arg in inputargs: + assert isinstance(arg, Box) + if arg not in last_used: + longevity[arg] = (-1, -1) + else: + longevity[arg] = (0, last_used[arg]) + del last_used[arg] + assert len(last_used) == 0 + return longevity, last_real_usage, frontend_alive diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -5,7 +5,7 @@ from rpython.jit.backend.llsupport.assembler import (GuardToken, BaseAssembler, debug_bridge) from rpython.jit.backend.llsupport.asmmemmgr import MachineDataBlockWrapper -from rpython.jit.backend.llsupport.regalloc import flatten +from rpython.jit.backend.resumebuilder import flatten from rpython.jit.metainterp.history import Const, Box, VOID from rpython.jit.metainterp.history import AbstractFailDescr, INT, REF, FLOAT from rpython.rtyper.lltypesystem import lltype, rffi, rstr, llmemory diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -7,10 +7,10 @@ from rpython.jit.backend.llsupport.descr import (ArrayDescr, CallDescr, unpack_arraydescr, unpack_fielddescr, unpack_interiorfielddescr) from rpython.jit.backend.llsupport.gcmap import allocate_gcmap -from rpython.jit.backend.llsupport.resumebuilder import ResumeBuilder +from rpython.jit.backend.resumebuilder import ResumeBuilder,\ + compute_vars_longevity, flatten from rpython.jit.backend.llsupport.regalloc import (FrameManager, BaseRegalloc, - RegisterManager, TempBox, compute_vars_longevity, is_comparison_or_ovf_op, - flatten) + RegisterManager, TempBox, is_comparison_or_ovf_op) from rpython.jit.backend.x86 import rx86 from rpython.jit.backend.x86.arch import (WORD, JITFRAME_FIXED_SIZE, IS_X86_32, IS_X86_64) From noreply at buildbot.pypy.org Sun Jan 12 15:56:54 2014 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 12 Jan 2014 15:56:54 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: (fijal, rguillebert) finish porting llgraph backend Message-ID: <20140112145654.4AEF81C0212@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68616:003f6f6d91dd Date: 2014-01-12 15:56 +0100 http://bitbucket.org/pypy/pypy/changeset/003f6f6d91dd/ Log: (fijal, rguillebert) finish porting llgraph backend diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -2,7 +2,7 @@ from rpython.jit.backend import model from rpython.jit.backend.llgraph import support from rpython.jit.backend.resumebuilder import ResumeBuilder,\ - LivenessAnalyzer + LivenessAnalyzer, compute_vars_longevity, flatten from rpython.jit.metainterp.history import AbstractDescr from rpython.jit.metainterp.history import Const, getkind from rpython.jit.metainterp.history import INT, REF, FLOAT, VOID @@ -16,47 +16,78 @@ from rpython.rlib.rarithmetic import ovfcheck, r_uint, r_ulonglong from rpython.rlib.rtimer import read_timestamp +class Position(object): + def __init__(self, pos): + self.pos = pos + + def get_jitframe_position(self): + return self.pos + +class ResumeFrame(object): + def __init__(self, num, start_pos): + self.registers = [None] * num + self.start_pos = start_pos + class LLGraphResumeBuilder(ResumeBuilder): - def __init__(self): - ResumeBuilder.__init__(self, None) + def __init__(self, frontend_liveness, descr, inputframes, inputlocs): self.liveness = LivenessAnalyzer() self.numbering = {} + self.framestack = [] + locs = [] + start_pos = 0 + for frame_pos, frame in enumerate(inputframes): + if inputlocs is not None: + self.framestack.append(ResumeFrame(len(frame), start_pos)) + for pos_in_frame, box in enumerate(frame): + if inputlocs is not None: + pos = inputlocs[frame_pos][pos_in_frame] + self.framestack[-1].registers[pos_in_frame] = box + else: + pos = len(self.numbering) + self.numbering[box] = pos + locs.append(Position(pos)) + start_pos += len(frame) + ResumeBuilder.__init__(self, self, frontend_liveness, descr, + inputframes, locs) + + def loc(self, box, must_exist=True): + return Position(self.numbering[box]) def process(self, op): - func = getattr(self, 'process_' + op.getopname(), None) - if func is not None: - func(op) + getattr(self, 'process_' + op.getopname())(op) + ResumeBuilder.process(self, op) def process_enter_frame(self, op): - self.liveness.enter_frame(op.getdescr()) - ResumeBuilder.process_enter_frame(self, op) + if self.framestack: + prev_frame = self.framestack[-1] + start_pos = prev_frame.start_pos + len(prev_frame.registers) + else: + start_pos = 0 + self.framestack.append(ResumeFrame(op.getdescr().num_regs(), start_pos)) def process_leave_frame(self, op): - self.liveness.leave_frame() - ResumeBuilder.process_leave_frame(self, op) + self.framestack.pop() def process_resume_put(self, op): - self.liveness.put(op.getarg(0), op.getarg(1).getint(), - op.getarg(2).getint()) - ResumeBuilder.process_resume_put(self, op) - - def _find_position_for_box(self, v): - if v not in self.numbering: - self.numbering[v] = len(self.numbering) - return self.numbering[v] + box = op.getarg(0) + frame_pos = op.getarg(1).getint() + pos_in_frame = op.getarg(2).getint() + i = self.framestack[frame_pos].start_pos + pos_in_frame + self.numbering[box] = i + self.framestack[frame_pos].registers[pos_in_frame] = box def get_numbering(self, mapping, op): - numbering = [] - for f in self.liveness.framestack: - for v in f: - numbering.append(mapping(v)) - return numbering - + lst = [] + for frame in self.framestack: + for reg in frame.registers: + lst.append(mapping(reg)) + return lst + class LLTrace(object): has_been_freed = False invalid = False - def __init__(self, inputargs, operations): + def __init__(self, inputframes, operations, descr, locs=None): # We need to clone the list of operations because the # front-end will mutate them under our feet again. We also # need to make sure things get freed. @@ -69,10 +100,13 @@ newbox = _cache[box] = box.__class__() return newbox # - self.inputargs = map(mapping, inputargs) + self.inputargs = map(mapping, flatten(inputframes)) self.operations = [] - xxxx - resumebuilder = LLGraphResumeBuilder() + x = compute_vars_longevity(inputframes, operations, descr) + longevity, last_real_usage, frontend_liveness = x + + resumebuilder = LLGraphResumeBuilder(frontend_liveness, descr, + inputframes, locs) for op in operations: if op.is_resume(): resumebuilder.process(op) @@ -90,10 +124,20 @@ newdescr) if op.is_guard(): newop.failargs = resumebuilder.get_numbering(mapping, op) - newop.failarg_numbers = resumebuilder.get_numbering( - lambda x: resumebuilder.numbering[x], op) + newop.getdescr().rd_bytecode_position = len(resumebuilder.newops) self.operations.append(newop) + if descr is None: + parent = None + parent_position = 0 + else: + parent = descr.rd_resume_bytecode + parent_position = descr.rd_bytecode_position + bytecode = resumebuilder.finish(parent, parent_position, self) + for op in operations: + if op.is_guard(): + op.getdescr().rd_resume_bytecode = bytecode + class WeakrefDescr(AbstractDescr): def __init__(self, realdescr): self.realdescrref = weakref.ref(realdescr) @@ -232,16 +276,16 @@ name=''): clt = model.CompiledLoopToken(self, looptoken.number) looptoken.compiled_loop_token = clt - lltrace = LLTrace(inputargs, operations, None) + lltrace = LLTrace([inputargs], operations, None) clt._llgraph_loop = lltrace clt._llgraph_alltraces = [lltrace] self._record_labels(lltrace) - def compile_bridge(self, logger, faildescr, inputargs, operations, + def compile_bridge(self, logger, faildescr, inputframes, locs, operations, original_loop_token, log=True): clt = original_loop_token.compiled_loop_token clt.compiling_a_bridge() - lltrace = LLTrace(inputargs, operations, faildescr) + lltrace = LLTrace(inputframes, operations, faildescr, locs) faildescr._llgraph_bridge = lltrace clt._llgraph_alltraces.append(lltrace) self._record_labels(lltrace) @@ -317,7 +361,7 @@ assert isinstance(frame, LLFrame) assert frame.forced_deadframe is None values = [] - for box in frame.force_guard_op.getfailargs(): + for box in frame.force_guard_op.failargs: if box is not None: if box is not frame.current_op.result: value = frame.env[box] @@ -739,12 +783,11 @@ # ----------------------------------------------------- def fail_guard(self, descr, saved_data=None): - values = {} + values = [] for i in range(len(self.current_op.failargs)): arg = self.current_op.failargs[i] value = self.env[arg] - index = self.current_op.failarg_numbers[i] - values[index] = value + values.append(value) if hasattr(descr, '_llgraph_bridge'): target = (descr._llgraph_bridge, -1) raise Jump(target, values) From noreply at buildbot.pypy.org Sun Jan 12 16:04:25 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 12 Jan 2014 16:04:25 +0100 (CET) Subject: [pypy-commit] pypy default: Fix in trackgcroot: support another style of "jmp *addr" to do jumptables Message-ID: <20140112150425.B720F1C039A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68617:3c795daae3eb Date: 2014-01-12 15:53 +0100 http://bitbucket.org/pypy/pypy/changeset/3c795daae3eb/ Log: Fix in trackgcroot: support another style of "jmp *addr" to do jumptables diff --git a/rpython/translator/c/gcc/trackgcroot.py b/rpython/translator/c/gcc/trackgcroot.py --- a/rpython/translator/c/gcc/trackgcroot.py +++ b/rpython/translator/c/gcc/trackgcroot.py @@ -31,7 +31,7 @@ cls.r_binaryinsn = re.compile(r"\t[a-z]\w*\s+(?P"+cls.OPERAND+"),\s*(?P"+cls.OPERAND+")\s*$") cls.r_jump = re.compile(r"\tj\w+\s+"+cls.LABEL+"\s*" + cls.COMMENT + "$") - cls.r_jmp_switch = re.compile(r"\tjmp\t[*]"+cls.LABEL+"[(]") + cls.r_jmp_switch = re.compile(r"\tjmp\t[*]") cls.r_jmp_source = re.compile(r"\d*[(](%[\w]+)[,)]") def __init__(self, funcname, lines, filetag=0): @@ -697,10 +697,22 @@ tablelabels = [] match = self.r_jmp_switch.match(line) if match: - # this is a jmp *Label(%index), used for table-based switches. - # Assume that the table is just a list of lines looking like - # .long LABEL or .long 0, ending in a .text or .section .text.hot. - tablelabels.append(match.group(1)) + # this is a jmp *Label(%index) or jmp *%addr, used for + # table-based switches. Assume that the table is coming + # after a .section .rodata and a label, and is a list of + # lines looking like .long LABEL or .long 0 or .long L2-L1, + # ending in a .text or .section .text.hot. + lineno = self.currentlineno + 1 + if '.section' not in self.lines[lineno]: + pass # bah, probably a tail-optimized indirect call... + else: + assert '.rodata' in self.lines[lineno] + lineno += 1 + while '.align' in self.lines[lineno]: + lineno += 1 + match = self.r_label.match(self.lines[lineno]) + assert match, repr(self.lines[lineno]) + tablelabels.append(match.group(1)) elif self.r_unaryinsn_star.match(line): # maybe a jmp similar to the above, but stored in a # registry: From noreply at buildbot.pypy.org Sun Jan 12 16:04:26 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 12 Jan 2014 16:04:26 +0100 (CET) Subject: [pypy-commit] pypy default: Disable running tests of asmgcc for OS/X. Don't use asmgcc there anyway! Message-ID: <20140112150426.DFD661C039A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68618:73c2132b571d Date: 2014-01-12 16:00 +0100 http://bitbucket.org/pypy/pypy/changeset/73c2132b571d/ Log: Disable running tests of asmgcc for OS/X. Don't use asmgcc there anyway! diff --git a/rpython/translator/c/gcc/test/test_trackgcroot.py b/rpython/translator/c/gcc/test/test_trackgcroot.py --- a/rpython/translator/c/gcc/test/test_trackgcroot.py +++ b/rpython/translator/c/gcc/test/test_trackgcroot.py @@ -127,6 +127,8 @@ def check_computegcmaptable(format, path): if format == 'msvc': r_globallabel = re.compile(r"([\w]+)::") + elif format == 'darwin' or format == 'darwin64': + py.test.skip("disabled on OS/X's terribly old gcc") else: r_globallabel = re.compile(r"([\w]+)=[.]+") print From noreply at buildbot.pypy.org Sun Jan 12 16:10:17 2014 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 12 Jan 2014 16:10:17 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: (fijal, rguillebert) hack at scaffolding until we pass the first test Message-ID: <20140112151017.F17AE1C039A@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68619:df624ee695ee Date: 2014-01-12 16:09 +0100 http://bitbucket.org/pypy/pypy/changeset/df624ee695ee/ Log: (fijal, rguillebert) hack at scaffolding until we pass the first test diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -168,14 +168,20 @@ def test_simple(self): ops = """ [i] + enter_frame(-1, descr=jitcode) i0 = int_sub(i, 1) - guard_value(i0, 0) [i0] + resume_put(i0, 0, 0) + guard_value(i0, 0) + leave_frame() jump(i) """ expected = """ [i] + enter_frame(-1, descr=jitcode) i0 = int_sub(i, 1) - guard_value(i0, 0) [i0] + resume_put(i0, 0, 0) + guard_value(i0, 0) + leave_frame() jump(1) """ self.optimize_loop(ops, expected) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -19,6 +19,7 @@ from rpython.config.translationoption import get_combined_translation_config from rpython.jit.metainterp.resoperation import rop, opname, ResOperation from rpython.jit.metainterp.optimizeopt.unroll import Inliner +from rpython.jit.codewriter.jitcode import JitCode def test_sort_descrs(): class PseudoDescr(AbstractDescr): @@ -50,26 +51,6 @@ py.test.raises(AssertionError, "equaloplists(loop1.operations, loop3.operations)") -def test_equaloplists_fail_args(): - ops = """ - [i0] - i1 = int_add(i0, 1) - i2 = int_add(i1, 1) - guard_true(i1) [i2, i1] - jump(i1) - """ - namespace = {} - loop1 = pure_parse(ops, namespace=namespace) - loop2 = pure_parse(ops.replace("[i2, i1]", "[i1, i2]"), - namespace=namespace) - py.test.raises(AssertionError, - "equaloplists(loop1.operations, loop2.operations)") - assert equaloplists(loop1.operations, loop2.operations, - strict_fail_args=False) - loop3 = pure_parse(ops.replace("[i2, i1]", "[i2, i0]"), - namespace=namespace) - py.test.raises(AssertionError, - "equaloplists(loop1.operations, loop3.operations)") # ____________________________________________________________ @@ -278,6 +259,9 @@ register_known_gctype(cpu, intobj_immut_vtable, INTOBJ_IMMUT) register_known_gctype(cpu, ptrobj_immut_vtable, PTROBJ_IMMUT) + jitcode = JitCode('name') + jitcode.setup(num_regs_i=2, num_regs_r=0, num_regs_f=0) + namespace = locals() # ____________________________________________________________ @@ -325,21 +309,20 @@ _kind2count = {history.INT: 1, history.REF: 2, history.FLOAT: 3} return sorted(boxes, key=lambda box: _kind2count[box.type]) +fail_descr = history.BasicFailDescr(1) +final_descr = history.BasicFinalDescr(2) + +def invent_fail_descr(model, opnum): + if opnum == rop.FINISH: + return final_descr + return fail_descr + class BaseTest(object): def parse(self, s, boxkinds=None): return parse(s, self.cpu, self.namespace, type_system=self.type_system, - boxkinds=boxkinds, - invent_fail_descr=self.invent_fail_descr) - - def invent_fail_descr(self, model, opnum, fail_args): - if fail_args is None: - return None - descr = Storage() - descr.rd_frame_info_list = resume.FrameInfo(None, "code", 11) - descr.rd_snapshot = resume.Snapshot(None, _sortboxes(fail_args)) - return descr + boxkinds=boxkinds, invent_fail_descr=invent_fail_descr) def assert_equal(self, optimized, expected, text_right=None): from rpython.jit.metainterp.optimizeopt.util import equaloplists @@ -349,7 +332,7 @@ assert box1.__class__ == box2.__class__ remap[box2] = box1 assert equaloplists(optimized.operations, - expected.operations, False, remap, text_right) + expected.operations, remap, text_right) def _do_optimize_loop(self, loop, call_pure_results): from rpython.jit.metainterp.optimizeopt import optimize_trace From noreply at buildbot.pypy.org Sun Jan 12 16:17:52 2014 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 12 Jan 2014 16:17:52 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: (fijal, rguillebert) Hack test_optimizebasic a little Message-ID: <20140112151752.2B8A21C338D@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68620:70d01a5dd15d Date: 2014-01-12 16:17 +0100 http://bitbucket.org/pypy/pypy/changeset/70d01a5dd15d/ Log: (fijal, rguillebert) Hack test_optimizebasic a little diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -11,31 +11,6 @@ from rpython.jit.metainterp.resoperation import rop, ResOperation from rpython.rlib.rarithmetic import LONG_BIT -def test_store_final_boxes_in_guard(): - from rpython.jit.metainterp.compile import ResumeGuardDescr - from rpython.jit.metainterp.resume import tag, TAGBOX - b0 = BoxInt() - b1 = BoxInt() - opt = optimizeopt.Optimizer(FakeMetaInterpStaticData(LLtypeMixin.cpu), - None) - fdescr = ResumeGuardDescr() - op = ResOperation(rop.GUARD_TRUE, ['dummy'], None, descr=fdescr) - # setup rd data - fi0 = resume.FrameInfo(None, "code0", 11) - fdescr.rd_frame_info_list = resume.FrameInfo(fi0, "code1", 33) - snapshot0 = resume.Snapshot(None, [b0]) - fdescr.rd_snapshot = resume.Snapshot(snapshot0, [b1]) - # - opt.store_final_boxes_in_guard(op, []) - if op.getfailargs() == [b0, b1]: - assert list(fdescr.rd_numb.nums) == [tag(1, TAGBOX)] - assert list(fdescr.rd_numb.prev.nums) == [tag(0, TAGBOX)] - else: - assert op.getfailargs() == [b1, b0] - assert list(fdescr.rd_numb.nums) == [tag(0, TAGBOX)] - assert list(fdescr.rd_numb.prev.nums) == [tag(1, TAGBOX)] - assert fdescr.rd_virtuals is None - assert fdescr.rd_consts == [] def test_sharing_field_lists_of_virtual(): class FakeOptimizer(object): @@ -189,12 +164,34 @@ def test_constant_propagate(self): ops = """ [] + enter_frame(-1, descr=jitcode) i0 = int_add(2, 3) i1 = int_is_true(i0) - guard_true(i1) [] + guard_true(i1) i2 = int_is_zero(i1) - guard_false(i2) [] - guard_value(i0, 5) [] + guard_false(i2) + guard_value(i0, 5) + leave_frame() + jump() + """ + expected = """ + [] + enter_frame(-1, descr=jitcode) + leave_frame() + jump() + """ + self.optimize_loop(ops, expected) + + def test_constant_propagate_ovf(self): + ops = """ + [] + i0 = int_add_ovf(2, 3) + guard_no_overflow() + i1 = int_is_true(i0) + guard_true(i1) + i2 = int_is_zero(i1) + guard_false(i2) + guard_value(i0, 5) jump() """ expected = """ @@ -203,36 +200,18 @@ """ self.optimize_loop(ops, expected) - def test_constant_propagate_ovf(self): - ops = """ - [] - i0 = int_add_ovf(2, 3) - guard_no_overflow() [] - i1 = int_is_true(i0) - guard_true(i1) [] - i2 = int_is_zero(i1) - guard_false(i2) [] - guard_value(i0, 5) [] - jump() - """ - expected = """ - [] - jump() - """ - self.optimize_loop(ops, expected) - # ---------- def test_remove_guard_class_1(self): ops = """ [p0] - guard_class(p0, ConstClass(node_vtable)) [] - guard_class(p0, ConstClass(node_vtable)) [] + guard_class(p0, ConstClass(node_vtable)) + guard_class(p0, ConstClass(node_vtable)) jump(p0) """ expected = """ [p0] - guard_class(p0, ConstClass(node_vtable)) [] + guard_class(p0, ConstClass(node_vtable)) jump(p0) """ self.optimize_loop(ops, expected) @@ -242,7 +221,7 @@ [i0] p0 = new_with_vtable(ConstClass(node_vtable)) escape(p0) - guard_class(p0, ConstClass(node_vtable)) [] + guard_class(p0, ConstClass(node_vtable)) jump(i0) """ expected = """ @@ -257,7 +236,7 @@ ops = """ [i0] p0 = same_as(ConstPtr(myptr)) - guard_class(p0, ConstClass(node_vtable)) [] + guard_class(p0, ConstClass(node_vtable)) jump(i0) """ expected = """ @@ -270,15 +249,15 @@ ops = """ [i0] i1 = int_lt(i0, 0) - guard_true(i1) [] + guard_true(i1) i2 = int_ge(i0, 0) - guard_false(i2) [] + guard_false(i2) jump(i0) """ expected = """ [i0] i1 = int_lt(i0, 0) - guard_true(i1) [] + guard_true(i1) jump(i0) """ self.optimize_loop(ops, expected) @@ -287,15 +266,15 @@ ops = """ [i0] i1 = int_gt(i0, 0) - guard_true(i1) [] + guard_true(i1) i2 = int_le(i0, 0) - guard_false(i2) [] + guard_false(i2) jump(i0) """ expected = """ [i0] i1 = int_gt(i0, 0) - guard_true(i1) [] + guard_true(i1) jump(i0) """ self.optimize_loop(ops, expected) @@ -304,15 +283,15 @@ ops = """ [i0] i1 = int_gt(i0, 0) - guard_true(i1) [] + guard_true(i1) i2 = int_lt(0, i0) - guard_true(i2) [] + guard_true(i2) jump(i0) """ expected = """ [i0] i1 = int_gt(i0, 0) - guard_true(i1) [] + guard_true(i1) jump(i0) """ self.optimize_loop(ops, expected) @@ -321,15 +300,15 @@ ops = """ [i0] i1 = int_gt(i0, 0) - guard_true(i1) [] + guard_true(i1) i2 = int_ge(0, i0) - guard_false(i2) [] + guard_false(i2) jump(i0) """ expected = """ [i0] i1 = int_gt(i0, 0) - guard_true(i1) [] + guard_true(i1) jump(i0) """ self.optimize_loop(ops, expected) @@ -338,9 +317,9 @@ ops = """ [] i0 = escape() - guard_value(i0, 0) [] + guard_value(i0, 0) i1 = int_add(i0, 1) - guard_value(i1, 1) [] + guard_value(i1, 1) i2 = int_add(i1, 2) escape(i2) jump() @@ -348,7 +327,7 @@ expected = """ [] i0 = escape() - guard_value(i0, 0) [] + guard_value(i0, 0) escape(3) jump() """ @@ -357,7 +336,7 @@ def test_remove_guard_value_if_constant(self): ops = """ [p1] - guard_value(p1, ConstPtr(myptr)) [] + guard_value(p1, ConstPtr(myptr)) jump(ConstPtr(myptr)) """ expected = """ @@ -370,13 +349,13 @@ def test_ooisnull_oononnull_1(self): ops = """ [p0] - guard_class(p0, ConstClass(node_vtable)) [] - guard_nonnull(p0) [] + guard_class(p0, ConstClass(node_vtable)) + guard_nonnull(p0) jump(p0) """ expected = """ [p0] - guard_class(p0, ConstClass(node_vtable)) [] + guard_class(p0, ConstClass(node_vtable)) jump(p0) """ self.optimize_loop(ops, expected) @@ -385,9 +364,27 @@ ops = """ [i0] i1 = int_is_true(i0) - guard_true(i1) [] + guard_true(i1) i2 = int_is_true(i0) - guard_true(i2) [] + guard_true(i2) + jump(i0) + """ + expected = """ + [i0] + i1 = int_is_true(i0) + guard_true(i1) + jump(i0) + """ + self.optimize_loop(ops, expected) + + def test_int_is_true_is_zero(self): + py.test.skip("XXX implement me") + ops = """ + [i0] + i1 = int_is_true(i0) + guard_true(i1) + i2 = int_is_zero(i0) + guard_false(i2) [] jump(i0) """ expected = """ @@ -398,37 +395,19 @@ """ self.optimize_loop(ops, expected) - def test_int_is_true_is_zero(self): - py.test.skip("XXX implement me") - ops = """ - [i0] - i1 = int_is_true(i0) - guard_true(i1) [] - i2 = int_is_zero(i0) - guard_false(i2) [] - jump(i0) - """ - expected = """ - [i0] - i1 = int_is_true(i0) - guard_true(i1) [] - jump(i0) - """ - self.optimize_loop(ops, expected) - def test_int_is_zero_int_is_true(self): ops = """ [i0] i1 = int_is_zero(i0) - guard_true(i1) [] + guard_true(i1) i2 = int_is_true(i0) - guard_false(i2) [] + guard_false(i2) jump(i0) """ expected = """ [i0] i1 = int_is_zero(i0) - guard_true(i1) [] + guard_true(i1) jump(0) """ self.optimize_loop(ops, expected) @@ -436,13 +415,13 @@ def test_ooisnull_oononnull_2(self): ops = """ [p0] - guard_nonnull(p0) [] - guard_nonnull(p0) [] + guard_nonnull(p0) + guard_nonnull(p0) jump(p0) """ expected = """ [p0] - guard_nonnull(p0) [] + guard_nonnull(p0) jump(p0) """ self.optimize_loop(ops, expected) @@ -451,14 +430,14 @@ ops = """ [] p0 = escape() - guard_isnull(p0) [] - guard_isnull(p0) [] + guard_isnull(p0) + guard_isnull(p0) jump() """ expected = """ [] p0 = escape() - guard_isnull(p0) [] + guard_isnull(p0) jump() """ self.optimize_loop(ops, expected) @@ -466,16 +445,25 @@ def test_ooisnull_oononnull_via_virtual(self): ops = """ [p0] + enter_frame(-1, descr=jitcode) pv = new_with_vtable(ConstClass(node_vtable)) setfield_gc(pv, p0, descr=valuedescr) - guard_nonnull(p0) [] + resume_put(p0, 0, 2) + guard_nonnull(p0) p1 = getfield_gc(pv, descr=valuedescr) - guard_nonnull(p1) [] + guard_nonnull(p1) + leave_frame() jump(p0) """ + xxx expected = """ [p0] - guard_nonnull(p0) [] + enter_frame(-1, descr=jitcode) + resume_put(p0, 0, 2) + pv = resume_new_with_vtable(ConstClass(node_vtable)) + resume_setfield_gc(p0, pv, descr=...) + guard_nonnull(p0) + leave_frame() jump(p0) """ self.optimize_loop(ops, expected) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -260,7 +260,7 @@ register_known_gctype(cpu, ptrobj_immut_vtable, PTROBJ_IMMUT) jitcode = JitCode('name') - jitcode.setup(num_regs_i=2, num_regs_r=0, num_regs_f=0) + jitcode.setup(num_regs_i=2, num_regs_r=1, num_regs_f=0) namespace = locals() From noreply at buildbot.pypy.org Sun Jan 12 17:38:33 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 12 Jan 2014 17:38:33 +0100 (CET) Subject: [pypy-commit] pypy default: Explicitly disallow asmgcc on OS/X. Message-ID: <20140112163833.0E5C01C07AC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68621:0feb0b82e686 Date: 2014-01-12 17:37 +0100 http://bitbucket.org/pypy/pypy/changeset/0feb0b82e686/ Log: Explicitly disallow asmgcc on OS/X. diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -363,6 +363,10 @@ # if we have specified strange inconsistent settings. config.translation.gc = config.translation.gc + # disallow asmgcc on OS/X + if config.translation.gcrootfinder == "asmgcc": + assert sys.platform != "darwin" + # ---------------------------------------------------------------- def set_platform(config): From noreply at buildbot.pypy.org Sun Jan 12 18:45:23 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 12 Jan 2014 18:45:23 +0100 (CET) Subject: [pypy-commit] pypy default: Apply the same logic here as on str_decode_ascii Message-ID: <20140112174523.3743B1D2410@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r68622:fe10412ef1c3 Date: 2014-01-12 09:43 -0800 http://bitbucket.org/pypy/pypy/changeset/fe10412ef1c3/ Log: Apply the same logic here as on str_decode_ascii diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py --- a/rpython/rlib/runicode.py +++ b/rpython/rlib/runicode.py @@ -988,6 +988,8 @@ return result.build(), pos +# Specialize on the errorhandler when it's a constant + at specialize.arg_or_var(3) def unicode_encode_ucs1_helper(p, size, errors, errorhandler=None, limit=256): if errorhandler is None: From noreply at buildbot.pypy.org Sun Jan 12 18:45:24 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 12 Jan 2014 18:45:24 +0100 (CET) Subject: [pypy-commit] pypy default: Move dont_look_insides around to expose more mapdict logic to the JIT Message-ID: <20140112174524.61B3D1D2411@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r68623:e50bdf0e8179 Date: 2014-01-12 09:44 -0800 http://bitbucket.org/pypy/pypy/changeset/e50bdf0e8179/ Log: Move dont_look_insides around to expose more mapdict logic to the JIT diff --git a/pypy/module/_weakref/interp__weakref.py b/pypy/module/_weakref/interp__weakref.py --- a/pypy/module/_weakref/interp__weakref.py +++ b/pypy/module/_weakref/interp__weakref.py @@ -52,6 +52,7 @@ # weakref callbacks are not invoked eagerly here. They are # invoked by self.__del__() anyway. + @jit.dont_look_inside def get_or_make_weakref(self, w_subtype, w_obj): space = self.space w_weakreftype = space.gettypeobject(W_Weakref.typedef) @@ -70,6 +71,7 @@ self.append_wref_to(w_ref) return w_ref + @jit.dont_look_inside def get_or_make_proxy(self, w_obj): space = self.space if self.cached_proxy is not None: @@ -130,6 +132,7 @@ self.append_wref_to(w_ref) return w_ref + @jit.dont_look_inside def make_proxy_with_callback(self, w_obj, w_callable): space = self.space if space.is_true(space.callable(w_obj)): @@ -240,7 +243,7 @@ w_obj.setweakref(space, lifeline) return lifeline - at jit.dont_look_inside + def get_or_make_weakref(space, w_subtype, w_obj): return getlifeline(space, w_obj).get_or_make_weakref(w_subtype, w_obj) @@ -314,15 +317,16 @@ w_obj = force(space, self) return space.call_args(w_obj, __args__) - at jit.dont_look_inside + def get_or_make_proxy(space, w_obj): return getlifeline(space, w_obj).get_or_make_proxy(w_obj) - at jit.dont_look_inside + def make_proxy_with_callback(space, w_obj, w_callable): lifeline = getlifelinewithcallbacks(space, w_obj) return lifeline.make_proxy_with_callback(w_obj, w_callable) + def proxy(space, w_obj, w_callable=None): """Create a proxy object that weakly references 'obj'. 'callback', if given, is called with the proxy as an argument when 'obj' From noreply at buildbot.pypy.org Sun Jan 12 18:45:25 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 12 Jan 2014 18:45:25 +0100 (CET) Subject: [pypy-commit] pypy default: merged upstream Message-ID: <20140112174525.911051D2411@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r68624:690573814782 Date: 2014-01-12 09:44 -0800 http://bitbucket.org/pypy/pypy/changeset/690573814782/ Log: merged upstream diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -363,6 +363,10 @@ # if we have specified strange inconsistent settings. config.translation.gc = config.translation.gc + # disallow asmgcc on OS/X + if config.translation.gcrootfinder == "asmgcc": + assert sys.platform != "darwin" + # ---------------------------------------------------------------- def set_platform(config): diff --git a/rpython/translator/c/gcc/test/test_trackgcroot.py b/rpython/translator/c/gcc/test/test_trackgcroot.py --- a/rpython/translator/c/gcc/test/test_trackgcroot.py +++ b/rpython/translator/c/gcc/test/test_trackgcroot.py @@ -127,6 +127,8 @@ def check_computegcmaptable(format, path): if format == 'msvc': r_globallabel = re.compile(r"([\w]+)::") + elif format == 'darwin' or format == 'darwin64': + py.test.skip("disabled on OS/X's terribly old gcc") else: r_globallabel = re.compile(r"([\w]+)=[.]+") print diff --git a/rpython/translator/c/gcc/trackgcroot.py b/rpython/translator/c/gcc/trackgcroot.py --- a/rpython/translator/c/gcc/trackgcroot.py +++ b/rpython/translator/c/gcc/trackgcroot.py @@ -31,7 +31,7 @@ cls.r_binaryinsn = re.compile(r"\t[a-z]\w*\s+(?P"+cls.OPERAND+"),\s*(?P"+cls.OPERAND+")\s*$") cls.r_jump = re.compile(r"\tj\w+\s+"+cls.LABEL+"\s*" + cls.COMMENT + "$") - cls.r_jmp_switch = re.compile(r"\tjmp\t[*]"+cls.LABEL+"[(]") + cls.r_jmp_switch = re.compile(r"\tjmp\t[*]") cls.r_jmp_source = re.compile(r"\d*[(](%[\w]+)[,)]") def __init__(self, funcname, lines, filetag=0): @@ -697,10 +697,22 @@ tablelabels = [] match = self.r_jmp_switch.match(line) if match: - # this is a jmp *Label(%index), used for table-based switches. - # Assume that the table is just a list of lines looking like - # .long LABEL or .long 0, ending in a .text or .section .text.hot. - tablelabels.append(match.group(1)) + # this is a jmp *Label(%index) or jmp *%addr, used for + # table-based switches. Assume that the table is coming + # after a .section .rodata and a label, and is a list of + # lines looking like .long LABEL or .long 0 or .long L2-L1, + # ending in a .text or .section .text.hot. + lineno = self.currentlineno + 1 + if '.section' not in self.lines[lineno]: + pass # bah, probably a tail-optimized indirect call... + else: + assert '.rodata' in self.lines[lineno] + lineno += 1 + while '.align' in self.lines[lineno]: + lineno += 1 + match = self.r_label.match(self.lines[lineno]) + assert match, repr(self.lines[lineno]) + tablelabels.append(match.group(1)) elif self.r_unaryinsn_star.match(line): # maybe a jmp similar to the above, but stored in a # registry: From noreply at buildbot.pypy.org Sun Jan 12 21:54:09 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 12 Jan 2014 21:54:09 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: add an easy topic from pypy-dev discussion Message-ID: <20140112205409.0A8421C07AC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5125:ea05f6e425ee Date: 2014-01-12 21:53 +0100 http://bitbucket.org/pypy/extradoc/changeset/ea05f6e425ee/ Log: add an easy topic from pypy-dev discussion diff --git a/sprintinfo/leysin-winter-2014/planning.txt b/sprintinfo/leysin-winter-2014/planning.txt --- a/sprintinfo/leysin-winter-2014/planning.txt +++ b/sprintinfo/leysin-winter-2014/planning.txt @@ -30,3 +30,4 @@ * discuss about C++ / cppyy (johan, ?) +* ctypes: "array_of_char.value = string": speed up this case From noreply at buildbot.pypy.org Sun Jan 12 22:08:50 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 12 Jan 2014 22:08:50 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Some other easy or easyish tasks Message-ID: <20140112210850.97AD51C07AC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5126:337e2c0e73aa Date: 2014-01-12 22:08 +0100 http://bitbucket.org/pypy/extradoc/changeset/337e2c0e73aa/ Log: Some other easy or easyish tasks diff --git a/sprintinfo/leysin-winter-2014/planning.txt b/sprintinfo/leysin-winter-2014/planning.txt --- a/sprintinfo/leysin-winter-2014/planning.txt +++ b/sprintinfo/leysin-winter-2014/planning.txt @@ -31,3 +31,7 @@ * discuss about C++ / cppyy (johan, ?) * ctypes: "array_of_char.value = string": speed up this case + +* ctypes: https://bugs.pypy.org/issue1671 + +* longs multiplication: patch at https://bugs.pypy.org/issue892 From noreply at buildbot.pypy.org Sun Jan 12 23:01:52 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Sun, 12 Jan 2014 23:01:52 +0100 (CET) Subject: [pypy-commit] pypy default: bah, I don't know how to produce bogus zip files for also these cases, but we need to catch RZlibError here as well Message-ID: <20140112220152.B08E11C33D6@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r68625:d38e836c3945 Date: 2014-01-12 22:00 +0000 http://bitbucket.org/pypy/pypy/changeset/d38e836c3945/ Log: bah, I don't know how to produce bogus zip files for also these cases, but we need to catch RZlibError here as well diff --git a/pypy/module/zipimport/interp_zipimport.py b/pypy/module/zipimport/interp_zipimport.py --- a/pypy/module/zipimport/interp_zipimport.py +++ b/pypy/module/zipimport/interp_zipimport.py @@ -289,6 +289,10 @@ return w(data) except (KeyError, OSError, BadZipfile): raise OperationError(space.w_IOError, space.wrap("Error reading file")) + except RZlibError, e: + # in this case, CPython raises the direct exception coming + # from the zlib module: let's to the same + raise zlib_error(space, e.msg) @unwrap_spec(fullname=str) def get_code(self, space, fullname): @@ -387,6 +391,11 @@ except (BadZipfile, OSError): raise operationerrfmt(get_error(space), "%s seems not to be a zipfile", filename) + except RZlibError, e: + # in this case, CPython raises the direct exception coming + # from the zlib module: let's to the same + raise zlib_error(space, e.msg) + prefix = name[len(filename):] if prefix.startswith(os.path.sep) or prefix.startswith(ZIPSEP): prefix = prefix[1:] From noreply at buildbot.pypy.org Sun Jan 12 23:36:45 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 12 Jan 2014 23:36:45 +0100 (CET) Subject: [pypy-commit] pypy default: Also specialize unicode(s, 'unicode_escape') it shows up with things like six.u() Message-ID: <20140112223645.443241C3544@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r68626:5b68b750f853 Date: 2014-01-12 14:34 -0800 http://bitbucket.org/pypy/pypy/changeset/5b68b750f853/ Log: Also specialize unicode(s, 'unicode_escape') it shows up with things like six.u() diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py --- a/rpython/rlib/runicode.py +++ b/rpython/rlib/runicode.py @@ -1154,6 +1154,8 @@ builder.append(res) return pos +# Specialize on the errorhandler when it's a constant + at specialize.arg_or_var(5) def str_decode_unicode_escape(s, size, errors, final=False, errorhandler=False, unicodedata_handler=None): From noreply at buildbot.pypy.org Sun Jan 12 23:36:46 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 12 Jan 2014 23:36:46 +0100 (CET) Subject: [pypy-commit] pypy default: merged upstream Message-ID: <20140112223646.85B581C3544@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r68627:750c75d2a07b Date: 2014-01-12 14:35 -0800 http://bitbucket.org/pypy/pypy/changeset/750c75d2a07b/ Log: merged upstream diff --git a/pypy/module/zipimport/interp_zipimport.py b/pypy/module/zipimport/interp_zipimport.py --- a/pypy/module/zipimport/interp_zipimport.py +++ b/pypy/module/zipimport/interp_zipimport.py @@ -289,6 +289,10 @@ return w(data) except (KeyError, OSError, BadZipfile): raise OperationError(space.w_IOError, space.wrap("Error reading file")) + except RZlibError, e: + # in this case, CPython raises the direct exception coming + # from the zlib module: let's to the same + raise zlib_error(space, e.msg) @unwrap_spec(fullname=str) def get_code(self, space, fullname): @@ -387,6 +391,11 @@ except (BadZipfile, OSError): raise operationerrfmt(get_error(space), "%s seems not to be a zipfile", filename) + except RZlibError, e: + # in this case, CPython raises the direct exception coming + # from the zlib module: let's to the same + raise zlib_error(space, e.msg) + prefix = name[len(filename):] if prefix.startswith(os.path.sep) or prefix.startswith(ZIPSEP): prefix = prefix[1:] From noreply at buildbot.pypy.org Sun Jan 12 23:38:01 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 12 Jan 2014 23:38:01 +0100 (CET) Subject: [pypy-commit] pypy default: Typo fix Message-ID: <20140112223801.E91711C3544@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r68628:66480af0be6b Date: 2014-01-12 14:37 -0800 http://bitbucket.org/pypy/pypy/changeset/66480af0be6b/ Log: Typo fix diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py --- a/rpython/rlib/runicode.py +++ b/rpython/rlib/runicode.py @@ -1155,7 +1155,7 @@ return pos # Specialize on the errorhandler when it's a constant - at specialize.arg_or_var(5) + at specialize.arg_or_var(4) def str_decode_unicode_escape(s, size, errors, final=False, errorhandler=False, unicodedata_handler=None): From noreply at buildbot.pypy.org Sun Jan 12 23:39:38 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 12 Jan 2014 23:39:38 +0100 (CET) Subject: [pypy-commit] pypy default: Use the correct default here, right now if you tried to use the default it owuld explode Message-ID: <20140112223938.F1BB71C0212@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r68629:a19e8cb2a479 Date: 2014-01-12 14:38 -0800 http://bitbucket.org/pypy/pypy/changeset/a19e8cb2a479/ Log: Use the correct default here, right now if you tried to use the default it owuld explode diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py --- a/rpython/rlib/runicode.py +++ b/rpython/rlib/runicode.py @@ -1157,7 +1157,7 @@ # Specialize on the errorhandler when it's a constant @specialize.arg_or_var(4) def str_decode_unicode_escape(s, size, errors, final=False, - errorhandler=False, + errorhandler=None, unicodedata_handler=None): if errorhandler is None: errorhandler = default_unicode_error_decode From noreply at buildbot.pypy.org Mon Jan 13 01:41:57 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Mon, 13 Jan 2014 01:41:57 +0100 (CET) Subject: [pypy-commit] pypy default: Use integer constants instead of strings in sqlite3 Message-ID: <20140113004157.5C6301C039A@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r68630:ed9720ebf38a Date: 2014-01-12 16:41 -0800 http://bitbucket.org/pypy/pypy/changeset/ed9720ebf38a/ Log: Use integer constants instead of strings in sqlite3 diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -330,6 +330,14 @@ # SQLite version information sqlite_version = str(_ffi.string(_lib.sqlite3_libversion()).decode('ascii')) +_STMT_TYPE_UPDATE = 0 +_STMT_TYPE_DELETE = 1 +_STMT_TYPE_INSERT = 2 +_STMT_TYPE_REPLACE = 3 +_STMT_TYPE_OTHER = 4 +_STMT_TYPE_SELECT = 5 +_STMT_TYPE_INVALID = 6 + class Error(StandardError): pass @@ -992,13 +1000,18 @@ self.__statement = self.__connection._statement_cache.get(sql) if self.__connection._isolation_level is not None: - if self.__statement._type in ("UPDATE", "DELETE", "INSERT", "REPLACE"): + if self.__statement._type in ( + _STMT_TYPE_UPDATE, + _STMT_TYPE_DELETE, + _STMT_TYPE_INSERT, + _STMT_TYPE_REPLACE + ): if not self.__connection._in_transaction: self.__connection._begin() - elif self.__statement._type == "OTHER": + elif self.__statement._type == _STMT_TYPE_OTHER: if self.__connection._in_transaction: self.__connection.commit() - elif self.__statement._type == "SELECT": + elif self.__statement._type == _STMT_TYPE_SELECT: if multiple: raise ProgrammingError("You cannot execute SELECT " "statements in executemany().") @@ -1021,12 +1034,17 @@ self.__statement._reset() raise self.__connection._get_exception(ret) - if self.__statement._type in ("UPDATE", "DELETE", "INSERT", "REPLACE"): + if self.__statement._type in ( + _STMT_TYPE_UPDATE, + _STMT_TYPE_DELETE, + _STMT_TYPE_INSERT, + _STMT_TYPE_REPLACE + ): if self.__rowcount == -1: self.__rowcount = 0 self.__rowcount += _lib.sqlite3_changes(self.__connection._db) - if not multiple and self.__statement._type == "INSERT": + if not multiple and self.__statement._type == _STMT_TYPE_INSERT: self.__lastrowid = _lib.sqlite3_last_insert_rowid(self.__connection._db) else: self.__lastrowid = None @@ -1176,11 +1194,19 @@ first_word = sql.lstrip().split(" ")[0].upper() if first_word == "": - self._type = "INVALID" - elif first_word in ("SELECT", "INSERT", "UPDATE", "DELETE", "REPLACE"): - self._type = first_word + self._type = _STMT_TYPE_INVALID + elif first_word == "SELECT": + self._type = _STMT_TYPE_SELECT + elif first_word == "INSERT": + self._type = _STMT_TYPE_INSERT + elif first_word == "UPDATE": + self._type = _STMT_TYPE_UPDATE + elif first_word == "DELETE": + self._type = _STMT_TYPE_DELETE + elif first_word == "REPLACE": + self._type = _STMT_TYPE_REPLACE else: - self._type = "OTHER" + self._type = _STMT_TYPE_OTHER if isinstance(sql, unicode): sql = sql.encode('utf-8') @@ -1193,7 +1219,7 @@ if ret == _lib.SQLITE_OK and not self._statement: # an empty statement, work around that, as it's the least trouble - self._type = "SELECT" + self._type = _STMT_TYPE_SELECT c_sql = _ffi.new("char[]", b"select 42") ret = _lib.sqlite3_prepare_v2(self.__con._db, c_sql, -1, statement_star, next_char) @@ -1312,7 +1338,12 @@ raise ValueError("parameters are of unsupported type") def _get_description(self): - if self._type in ("INSERT", "UPDATE", "DELETE", "REPLACE"): + if self._type in ( + _STMT_TYPE_INSERT, + _STMT_TYPE_UPDATE, + _STMT_TYPE_DELETE, + _STMT_TYPE_REPLACE + ): return None desc = [] for i in xrange(_lib.sqlite3_column_count(self._statement)): From noreply at buildbot.pypy.org Mon Jan 13 10:55:38 2014 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 13 Jan 2014 10:55:38 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: (everyone) planning for today Message-ID: <20140113095538.293B91D233E@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r5127:afe69e674f95 Date: 2014-01-13 10:55 +0100 http://bitbucket.org/pypy/extradoc/changeset/afe69e674f95/ Log: (everyone) planning for today diff --git a/sprintinfo/leysin-winter-2014/planning.txt b/sprintinfo/leysin-winter-2014/planning.txt --- a/sprintinfo/leysin-winter-2014/planning.txt +++ b/sprintinfo/leysin-winter-2014/planning.txt @@ -6,7 +6,8 @@ Remi Meier Maciej Fijalkowski Romain Guillebert -Armin Rigo +Armin Rigo (late) +Manuel Jacob Topics @@ -16,22 +17,24 @@ * look at codespeed2 -* resume-refactor branch (rguillebert, fijal) +* resume-refactor branch (rguillebert, fijal) (PROGRESS) * GC pinning -* asmgcc bug with greenlets and --shared +* asmgcc bug with greenlets and --shared (FIXED) * think about --shared by default * CFFI 1.0 -* STM (remi) +* STM (remi, armin) SOME PROGRESS in transaction breaks -* discuss about C++ / cppyy (johan, ?) +* discuss about C++ / cppyy, look into importing pyshiboken (johan pessimistic, ?) * ctypes: "array_of_char.value = string": speed up this case * ctypes: https://bugs.pypy.org/issue1671 * longs multiplication: patch at https://bugs.pypy.org/issue892 + +* look into merging refactor-str-types (johan, mjacob) From noreply at buildbot.pypy.org Mon Jan 13 12:11:32 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 13 Jan 2014 12:11:32 +0100 (CET) Subject: [pypy-commit] pypy refactor-str-types: hg merge default Message-ID: <20140113111132.172D11C019D@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r68631:1dfbad2cbd56 Date: 2014-01-13 11:05 +0100 http://bitbucket.org/pypy/pypy/changeset/1dfbad2cbd56/ Log: hg merge default diff too long, truncating to 2000 out of 12023 lines diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -28,7 +28,7 @@ DEALINGS IN THE SOFTWARE. -PyPy Copyright holders 2003-2013 +PyPy Copyright holders 2003-2014 ----------------------------------- Except when otherwise stated (look for LICENSE files or information at diff --git a/lib-python/2.7/ctypes/__init__.py b/lib-python/2.7/ctypes/__init__.py --- a/lib-python/2.7/ctypes/__init__.py +++ b/lib-python/2.7/ctypes/__init__.py @@ -371,10 +371,9 @@ self._handle = handle def __repr__(self): - return "<%s '%s', handle %r at %x>" % \ - (self.__class__.__name__, self._name, - (self._handle), - id(self) & (_sys.maxint*2 + 1)) + return "<%s '%s', handle %r at 0x%x>" % ( + self.__class__.__name__, self._name, self._handle, + id(self) & (_sys.maxint * 2 + 1)) def __getattr__(self, name): diff --git a/lib-python/2.7/ctypes/test/test_python_api.py b/lib-python/2.7/ctypes/test/test_python_api.py --- a/lib-python/2.7/ctypes/test/test_python_api.py +++ b/lib-python/2.7/ctypes/test/test_python_api.py @@ -73,6 +73,7 @@ del pyobj self.assertEqual(grc(s), ref) + @xfail def test_PyOS_snprintf(self): PyOS_snprintf = pythonapi.PyOS_snprintf PyOS_snprintf.argtypes = POINTER(c_char), c_size_t, c_char_p diff --git a/lib-python/2.7/test/test_ssl.py b/lib-python/2.7/test/test_ssl.py --- a/lib-python/2.7/test/test_ssl.py +++ b/lib-python/2.7/test/test_ssl.py @@ -993,7 +993,7 @@ try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED) - try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, True) + try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False) diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -109,7 +109,7 @@ RegrTest('test_asynchat.py', usemodules='select fcntl'), RegrTest('test_asyncore.py', usemodules='select fcntl'), RegrTest('test_atexit.py', core=True), - RegrTest('test_audioop.py', skip="unsupported extension module"), + RegrTest('test_audioop.py', skip="incomplete module"), RegrTest('test_augassign.py', core=True), RegrTest('test_base64.py', usemodules='struct'), RegrTest('test_bastion.py'), diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -328,21 +328,23 @@ raise ValueError( "native COM method call without 'this' parameter" ) - thisarg = cast(args[0], POINTER(POINTER(c_void_p))) - keepalives, newargs, argtypes, outargs = self._convert_args(argtypes, - args[1:], kwargs) - newargs.insert(0, args[0].value) + thisvalue = args.pop(0) + thisarg = cast(thisvalue, POINTER(POINTER(c_void_p))) + keepalives, newargs, argtypes, outargs, errcheckargs = ( + self._convert_args(argtypes, args, kwargs)) + args.insert(0, thisvalue) + newargs.insert(0, thisvalue.value) argtypes.insert(0, c_void_p) else: thisarg = None - keepalives, newargs, argtypes, outargs = self._convert_args(argtypes, - args, kwargs) + keepalives, newargs, argtypes, outargs, errcheckargs = ( + self._convert_args(argtypes, args, kwargs)) funcptr = self._getfuncptr(argtypes, self._restype_, thisarg) result = self._call_funcptr(funcptr, *newargs) - result = self._do_errcheck(result, args) + result, forced = self._do_errcheck(result, errcheckargs) - if not outargs: + if not outargs or forced: return result from ctypes import c_void_p @@ -377,22 +379,22 @@ set_last_error(tmp) # try: - return self._build_result(self._restype_, result, newargs) + return self._build_result(self._restype_, result) finally: funcptr.free_temp_buffers() def _do_errcheck(self, result, args): # The 'errcheck' protocol if self._errcheck_: - v = self._errcheck_(result, self, args) + v = self._errcheck_(result, self, tuple(args)) # If the errcheck funtion failed, let it throw # If the errcheck function returned newargs unchanged, # continue normal processing. # If the errcheck function returned something else, # use that as result. if v is not args: - return v - return result + return v, True + return result, False def _getfuncptr_fromaddress(self, argtypes, restype): address = self._get_address() @@ -495,16 +497,16 @@ newargtypes = [] total = len(args) paramflags = self._paramflags - inargs_idx = 0 if not paramflags and total < len(argtypes): raise TypeError("not enough arguments") - for i, argtype in enumerate(argtypes): - flag = 0 - name = None - defval = marker - if paramflags: + if paramflags: + errcheckargs = [] + inargs_idx = 0 + for i, argtype in enumerate(argtypes): + flag = 0 + defval = marker paramflag = paramflags[i] paramlen = len(paramflag) name = None @@ -519,6 +521,7 @@ val = defval if val is marker: val = 0 + errcheckargs.append(val) keepalive, newarg, newargtype = self._conv_param(argtype, val) keepalives.append(keepalive) newargs.append(newarg) @@ -536,27 +539,31 @@ raise TypeError("required argument '%s' missing" % name) else: raise TypeError("not enough arguments") + errcheckargs.append(val) keepalive, newarg, newargtype = self._conv_param(argtype, val) keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) elif flag == PARAMFLAG_FOUT: if defval is not marker: - outargs.append(defval) + val = defval keepalive, newarg, newargtype = self._conv_param(argtype, defval) else: import ctypes val = argtype._type_() - outargs.append(val) keepalive = None newarg = ctypes.byref(val) newargtype = type(newarg) + errcheckargs.append(val) + outargs.append(val) keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) else: raise ValueError("paramflag %d not yet implemented" % flag) - else: + else: + errcheckargs = args + for i, argtype in enumerate(argtypes): try: keepalive, newarg, newargtype = self._conv_param(argtype, args[i]) except (UnicodeError, TypeError, ValueError), e: @@ -564,7 +571,6 @@ keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) - inargs_idx += 1 if len(newargs) < len(args): extra = args[len(newargs):] @@ -576,7 +582,7 @@ keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) - return keepalives, newargs, newargtypes, outargs + return keepalives, newargs, newargtypes, outargs, errcheckargs @staticmethod def _is_primitive(argtype): @@ -601,7 +607,7 @@ retval = restype._CData_retval(buf) return retval - def _build_result(self, restype, result, argsandobjs): + def _build_result(self, restype, result): """Build the function result: If there is no OUT parameter, return the actual function result If there is one OUT parameter, return it @@ -611,11 +617,6 @@ # i.e. an array of ints. Now it takes a result, which is already a # python object. All places that do "resbuffer[0]" should check that # result is actually an int and just use it. - # - # Also, argsandobjs used to be "args" in __call__, now it's "newargs" - # (i.e., the already unwrapped objects). It's used only when we have a - # PARAMFLAG_FOUT and it's probably wrong, I'll fix it when I find a - # failing test retval = None @@ -704,7 +705,7 @@ funcptr = self._getfuncptr(argtypes, restype, thisarg) try: result = self._call_funcptr(funcptr, *args) - result = self._do_errcheck(result, args) + result, _ = self._do_errcheck(result, args) except (TypeError, ArgumentError, UnicodeDecodeError): assert self._slowpath_allowed return CFuncPtr.__call__(self, *args) diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_pypy_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -33,14 +33,13 @@ # set link options output_filename = modulename + _get_c_extension_suffix() if sys.platform == 'win32': - # XXX libpypy-c.lib is currently not installed automatically - library = os.path.join(thisdir, '..', 'include', 'libpypy-c') + # XXX pyconfig.h uses a pragma to link to the import library, + # which is currently python27.lib + library = os.path.join(thisdir, '..', 'include', 'python27') if not os.path.exists(library + '.lib'): - #For a nightly build - library = os.path.join(thisdir, '..', 'include', 'python27') - if not os.path.exists(library + '.lib'): - # For a local translation - library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c') + # For a local translation or nightly build + library = os.path.join(thisdir, '..', 'pypy', 'goal', 'python27') + assert os.path.exists(library + '.lib'),'Could not find import library "%s"' % library libraries = [library, 'oleaut32'] extra_ldargs = ['/MANIFEST', # needed for VC10 '/EXPORT:init' + modulename] diff --git a/lib_pypy/_sha.py b/lib_pypy/_sha.py --- a/lib_pypy/_sha.py +++ b/lib_pypy/_sha.py @@ -115,14 +115,14 @@ ] class sha: - "An implementation of the MD5 hash function in pure Python." + "An implementation of the SHA hash function in pure Python." digest_size = digestsize = 20 - block_size = 1 + block_size = 512 // 8 def __init__(self): "Initialisation." - + # Initial message length in bits(!). self.length = 0 self.count = [0, 0] @@ -209,7 +209,7 @@ self.H2 = (self.H2 + C) & 0xffffffff self.H3 = (self.H3 + D) & 0xffffffff self.H4 = (self.H4 + E) & 0xffffffff - + # Down from here all methods follow the Python Standard Library # API of the sha module. @@ -295,13 +295,13 @@ _long2bytesBigEndian(self.H3, 4) + \ _long2bytesBigEndian(self.H4, 4) - self.H0 = H0 - self.H1 = H1 + self.H0 = H0 + self.H1 = H1 self.H2 = H2 self.H3 = H3 self.H4 = H4 - self.input = input - self.count = count + self.input = input + self.count = count return digest diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -330,6 +330,14 @@ # SQLite version information sqlite_version = str(_ffi.string(_lib.sqlite3_libversion()).decode('ascii')) +_STMT_TYPE_UPDATE = 0 +_STMT_TYPE_DELETE = 1 +_STMT_TYPE_INSERT = 2 +_STMT_TYPE_REPLACE = 3 +_STMT_TYPE_OTHER = 4 +_STMT_TYPE_SELECT = 5 +_STMT_TYPE_INVALID = 6 + class Error(StandardError): pass @@ -992,13 +1000,18 @@ self.__statement = self.__connection._statement_cache.get(sql) if self.__connection._isolation_level is not None: - if self.__statement._type in ("UPDATE", "DELETE", "INSERT", "REPLACE"): + if self.__statement._type in ( + _STMT_TYPE_UPDATE, + _STMT_TYPE_DELETE, + _STMT_TYPE_INSERT, + _STMT_TYPE_REPLACE + ): if not self.__connection._in_transaction: self.__connection._begin() - elif self.__statement._type == "OTHER": + elif self.__statement._type == _STMT_TYPE_OTHER: if self.__connection._in_transaction: self.__connection.commit() - elif self.__statement._type == "SELECT": + elif self.__statement._type == _STMT_TYPE_SELECT: if multiple: raise ProgrammingError("You cannot execute SELECT " "statements in executemany().") @@ -1021,12 +1034,17 @@ self.__statement._reset() raise self.__connection._get_exception(ret) - if self.__statement._type in ("UPDATE", "DELETE", "INSERT", "REPLACE"): + if self.__statement._type in ( + _STMT_TYPE_UPDATE, + _STMT_TYPE_DELETE, + _STMT_TYPE_INSERT, + _STMT_TYPE_REPLACE + ): if self.__rowcount == -1: self.__rowcount = 0 self.__rowcount += _lib.sqlite3_changes(self.__connection._db) - if not multiple and self.__statement._type == "INSERT": + if not multiple and self.__statement._type == _STMT_TYPE_INSERT: self.__lastrowid = _lib.sqlite3_last_insert_rowid(self.__connection._db) else: self.__lastrowid = None @@ -1176,11 +1194,19 @@ first_word = sql.lstrip().split(" ")[0].upper() if first_word == "": - self._type = "INVALID" - elif first_word in ("SELECT", "INSERT", "UPDATE", "DELETE", "REPLACE"): - self._type = first_word + self._type = _STMT_TYPE_INVALID + elif first_word == "SELECT": + self._type = _STMT_TYPE_SELECT + elif first_word == "INSERT": + self._type = _STMT_TYPE_INSERT + elif first_word == "UPDATE": + self._type = _STMT_TYPE_UPDATE + elif first_word == "DELETE": + self._type = _STMT_TYPE_DELETE + elif first_word == "REPLACE": + self._type = _STMT_TYPE_REPLACE else: - self._type = "OTHER" + self._type = _STMT_TYPE_OTHER if isinstance(sql, unicode): sql = sql.encode('utf-8') @@ -1193,7 +1219,7 @@ if ret == _lib.SQLITE_OK and not self._statement: # an empty statement, work around that, as it's the least trouble - self._type = "SELECT" + self._type = _STMT_TYPE_SELECT c_sql = _ffi.new("char[]", b"select 42") ret = _lib.sqlite3_prepare_v2(self.__con._db, c_sql, -1, statement_star, next_char) @@ -1312,7 +1338,12 @@ raise ValueError("parameters are of unsupported type") def _get_description(self): - if self._type in ("INSERT", "UPDATE", "DELETE", "REPLACE"): + if self._type in ( + _STMT_TYPE_INSERT, + _STMT_TYPE_UPDATE, + _STMT_TYPE_DELETE, + _STMT_TYPE_REPLACE + ): return None desc = [] for i in xrange(_lib.sqlite3_column_count(self._statement)): diff --git a/lib_pypy/audioop.py b/lib_pypy/audioop.py new file mode 100644 --- /dev/null +++ b/lib_pypy/audioop.py @@ -0,0 +1,29 @@ + +import struct + + +class error(Exception): + pass + + +def _check_size(size): + if size != 1 and size != 2 and size != 4: + raise error("Size should be 1, 2 or 4") + + +def _check_params(length, size): + _check_size(size) + if length % size != 0: + raise error("not a whole number of frames") + + +def getsample(cp, size, i): + _check_params(len(cp), size) + if not (0 <= i < len(cp) / size): + raise error("Index out of range") + if size == 1: + return struct.unpack_from("B", buffer(cp)[i:])[0] + elif size == 2: + return struct.unpack_from("H", buffer(cp)[i * 2:])[0] + elif size == 4: + return struct.unpack_from("I", buffer(cp)[i * 4:])[0] diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.8" -__version_info__ = (0, 8) +__version__ = "0.8.1" +__version_info__ = (0, 8, 1) diff --git a/lib_pypy/datetime.py b/lib_pypy/datetime.py --- a/lib_pypy/datetime.py +++ b/lib_pypy/datetime.py @@ -878,7 +878,6 @@ month = self._month if day is None: day = self._day - year, month, day = _check_date_fields(year, month, day) return date(year, month, day) # Comparisons of date objects with other. @@ -1389,8 +1388,6 @@ microsecond = self.microsecond if tzinfo is True: tzinfo = self.tzinfo - hour, minute, second, microsecond = _check_time_fields(hour, minute, second, microsecond) - _check_tzinfo_arg(tzinfo) return time(hour, minute, second, microsecond, tzinfo) def __nonzero__(self): @@ -1608,9 +1605,6 @@ microsecond = self.microsecond if tzinfo is True: tzinfo = self.tzinfo - year, month, day = _check_date_fields(year, month, day) - hour, minute, second, microsecond = _check_time_fields(hour, minute, second, microsecond) - _check_tzinfo_arg(tzinfo) return datetime(year, month, day, hour, minute, second, microsecond, tzinfo) diff --git a/lib_pypy/pyrepl/simple_interact.py b/lib_pypy/pyrepl/simple_interact.py --- a/lib_pypy/pyrepl/simple_interact.py +++ b/lib_pypy/pyrepl/simple_interact.py @@ -63,3 +63,6 @@ except KeyboardInterrupt: console.write("\nKeyboardInterrupt\n") console.resetbuffer() + except MemoryError: + console.write("\nMemoryError\n") + console.resetbuffer() diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -38,7 +38,7 @@ # General information about the project. project = u'PyPy' -copyright = u'2013, The PyPy Project' +copyright = u'2014, The PyPy Project' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -47,7 +47,7 @@ # The short X.Y version. version = '2.2' # The full version, including alpha/beta/rc tags. -release = '2.2.0' +release = '2.2.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -83,7 +83,7 @@ _winreg -* Supported by being rewritten in pure Python (possibly using ``ctypes``): +* Supported by being rewritten in pure Python (possibly using ``cffi``): see the `lib_pypy/`_ directory. Examples of modules that we support this way: ``ctypes``, ``cPickle``, ``cmath``, ``dbm``, ``datetime``... Note that some modules are both in there and in the list above; @@ -316,5 +316,4 @@ type and vice versa. For builtin types, a dictionary will be returned that cannot be changed (but still looks and behaves like a normal dictionary). - .. include:: _ref.txt diff --git a/pypy/doc/ctypes-implementation.rst b/pypy/doc/ctypes-implementation.rst --- a/pypy/doc/ctypes-implementation.rst +++ b/pypy/doc/ctypes-implementation.rst @@ -72,7 +72,13 @@ Here is a list of the limitations and missing features of the current implementation: -* No support for ``PyXxx`` functions from ``libpython``, for obvious reasons. +* ``ctypes.pythonapi`` lets you access the CPython C API emulation layer + of PyPy, at your own risks and without doing anything sensible about + the GIL. Since PyPy 2.3, these functions are also named with an extra + "Py", for example ``PyPyInt_FromLong()``. Basically, don't use this, + but it might more or less work in simple cases if you do. (Obviously, + assuming the PyObject pointers you get have any particular fields in + any particular order is just going to crash.) * We copy Python strings instead of having pointers to raw buffers diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst --- a/pypy/doc/extending.rst +++ b/pypy/doc/extending.rst @@ -5,119 +5,68 @@ This document tries to explain how to interface the PyPy python interpreter with any external library. -Note: We try to describe state-of-the art, but it -might fade out of date as this is the front on which things are changing -in pypy rapidly. +Right now, there are the following possibilities of providing +third-party modules for the PyPy python interpreter (in order of +usefulness): -Possibilities -============= +* Write them in pure Python and use CFFI_. -Right now, there are three possibilities of providing third-party modules -for the PyPy python interpreter (in order of usefulness): +* Write them in pure Python and use ctypes_. -* Write them in pure python and use ctypes, see ctypes_ - section +* Write them in C++ and bind them through Reflex_. -* Write them in pure python and use direct libffi low-level bindings, See - \_ffi_ module description. +* Write them in as `RPython mixed modules`_. -* Write them in RPython as mixedmodule_, using *rffi* as bindings. -* Write them in C++ and bind them through Reflex_ +CFFI +==== -.. _ctypes: #CTypes -.. _\_ffi: #LibFFI -.. _mixedmodule: #Mixed Modules +CFFI__ is the recommended way. It is a way to write pure Python code +that accesses C libraries. The idea is to support either ABI- or +API-level access to C --- so that you can sanely access C libraries +without depending on details like the exact field order in the C +structures or the numerical value of all the constants. It works on +both CPython (as a separate ``pip install cffi``) and on PyPy, where it +is included by default. + +PyPy's JIT does a quite reasonable job on the Python code that call C +functions or manipulate C pointers with CFFI. (As of PyPy 2.2.1, it +could still be improved, but is already good.) + +See the documentation here__. + +.. __: http://cffi.readthedocs.org/ +.. __: http://cffi.readthedocs.org/ + CTypes ====== -The ctypes module in PyPy is ready to use. -It's goal is to be as-compatible-as-possible with the -`CPython ctypes`_ version. Right now it's able to support large examples, -such as pyglet. PyPy is planning to have a 100% compatible ctypes -implementation, without the CPython C-level API bindings (so it is very -unlikely that direct object-manipulation trickery through this API will work). +The goal of the ctypes module of PyPy is to be as compatible as possible +with the `CPython ctypes`_ version. It works for large examples, such +as pyglet. PyPy's implementation is not strictly 100% compatible with +CPython, but close enough for most cases. -We also provide a `ctypes-configure`_ for overcoming the platform dependencies, -not relying on the ctypes codegen. This tool works by querying gcc about -platform-dependent details (compiling small snippets of C code and running -them), so it'll benefit not pypy-related ctypes-based modules as well. +We also used to provide ``ctypes-configure`` for some API-level access. +This is now viewed as a precursor of CFFI, which you should use instead. +More (but older) information is available here__. +Also, ctypes' performance is not as good as CFFI's. -ctypes call are optimized by the JIT and the resulting machine code contains a -direct call to the target C function. However, due to the very dynamic nature -of ctypes, some overhead over a bare C call is still present, in particular to -check/convert the types of the parameters. Moreover, even if most calls are -optimized, some cannot and thus need to follow the slow path, not optimized by -the JIT. +.. _`CPython ctypes`: http://docs.python.org/library/ctypes.html +.. __: ctypes-implementation.html -.. _`ctypes-configure`: ctypes-implementation.html#ctypes-configure -.. _`CPython ctypes`: http://docs.python.org/library/ctypes.html +PyPy implements ctypes as pure Python code around two built-in modules +called ``_ffi`` and ``_rawffi``, which give a very low-level binding to +the C library libffi_. Nowadays it is not recommended to use directly +these two modules. -Pros ----- +.. _libffi: http://sourceware.org/libffi/ -Stable, CPython-compatible API. Most calls are fast, optimized by JIT. - -Cons ----- - -Problems with platform-dependency (although we partially solve -those). Although the JIT optimizes ctypes calls, some overhead is still -present. The slow-path is very slow. - - -LibFFI -====== - -Mostly in order to be able to write a ctypes module, we developed a very -low-level libffi bindings called ``_ffi``. (libffi is a C-level library for dynamic calling, -which is used by CPython ctypes). This library provides stable and usable API, -although it's API is a very low-level one. It does not contain any -magic. It is also optimized by the JIT, but has much less overhead than ctypes. - -Pros ----- - -It Works. Probably more suitable for a delicate code where ctypes magic goes -in a way. All calls are optimized by the JIT, there is no slow path as in -ctypes. - -Cons ----- - -It combines disadvantages of using ctypes with disadvantages of using mixed -modules. CPython-incompatible API, very rough and low-level. - -Mixed Modules -============= - -This is the most advanced and powerful way of writing extension modules. -It has some serious disadvantages: - -* a mixed module needs to be written in RPython, which is far more - complicated than Python (XXX link) - -* due to lack of separate compilation (as of July 2011), each - compilation-check requires to recompile whole PyPy python interpreter, - which takes 0.5-1h. We plan to solve this at some point in near future. - -* although rpython is a garbage-collected language, the border between - C and RPython needs to be managed by hand (each object that goes into the - C level must be explicitly freed). - -Some documentation is available `here`_ - -.. _`here`: rffi.html - -XXX we should provide detailed docs about lltype and rffi, especially if we - want people to follow that way. Reflex ====== -This method is still experimental and is being exercised on a branch, -`reflex-support`_, which adds the `cppyy`_ module. +This method is still experimental. It adds the `cppyy`_ module. The method works by using the `Reflex package`_ to provide reflection information of the C++ code, which is then used to automatically generate bindings at runtime. @@ -168,3 +117,15 @@ to work around it in python or with a C++ helper function. Although Reflex works on various platforms, the bindings with PyPy have only been tested on Linux. + + +RPython Mixed Modules +===================== + +This is the internal way to write built-in extension modules in PyPy. +It cannot be used by any 3rd-party module: the extension modules are +*built-in*, not independently loadable DLLs. + +This is reserved for special cases: it gives direct access to e.g. the +details of the JIT, allowing us to tweak its interaction with user code. +This is how the numpy module is being developed. diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.rst +++ b/pypy/doc/extradoc.rst @@ -72,13 +72,13 @@ .. _bibtex: https://bitbucket.org/pypy/extradoc/raw/tip/talk/bibtex.bib .. _`Runtime Feedback in a Meta-Tracing JIT for Efficient Dynamic Languages`: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/icooolps2011/jit-hints.pdf -.. _`Allocation Removal by Partial Evaluation in a Tracing JIT`: http://codespeak.net/svn/pypy/extradoc/talk/pepm2011/bolz-allocation-removal.pdf -.. _`Towards a Jitting VM for Prolog Execution`: http://www.stups.uni-duesseldorf.de/publications/bolz-prolog-jit.pdf +.. _`Allocation Removal by Partial Evaluation in a Tracing JIT`: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/pepm2011/bolz-allocation-removal.pdf +.. _`Towards a Jitting VM for Prolog Execution`: http://www.stups.uni-duesseldorf.de/mediawiki/images/a/a7/Pub-BoLeSch2010.pdf .. _`High performance implementation of Python for CLI/.NET with JIT compiler generation for dynamic languages`: http://buildbot.pypy.org/misc/antocuni-thesis.pdf .. _`How to *not* write Virtual Machines for Dynamic Languages`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dyla2007/dyla.pdf .. _`Tracing the Meta-Level: PyPy's Tracing JIT Compiler`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009/bolz-tracing-jit.pdf .. _`Faster than C#: Efficient Implementation of Dynamic Languages on .NET`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009-dotnet/cli-jit.pdf -.. _`Automatic JIT Compiler Generation with Runtime Partial Evaluation`: http://www.stups.uni-duesseldorf.de/thesis/final-master.pdf +.. _`Automatic JIT Compiler Generation with Runtime Partial Evaluation`: http://wwwold.cobra.cs.uni-duesseldorf.de/thesis/final-master.pdf .. _`RPython: A Step towards Reconciling Dynamically and Statically Typed OO Languages`: http://www.disi.unige.it/person/AnconaD/papers/Recent_abstracts.html#AACM-DLS07 .. _`EU Reports`: index-report.html .. _`Hardware Transactional Memory Support for Lightweight Dynamic Language Evolution`: http://sabi.net/nriley/pubs/dls6-riley.pdf diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -444,7 +444,7 @@ So the position of the core PyPy developers is that if anyone wants to make an N+1'th attempt with LLVM, they are welcome, and will be happy to provide help in the IRC channel, but they are left with the burden of proof -that it works. +that (a) it works and (b) it gives important benefits. ---------------------- How do I compile PyPy? diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -40,7 +40,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 2.2.0`_: the latest official release +* `Release 2.2.1`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -110,7 +110,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 2.2.0`: http://pypy.org/download.html +.. _`Release 2.2.1`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html diff --git a/pypy/doc/release-2.2.1.rst b/pypy/doc/release-2.2.1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.2.1.rst @@ -0,0 +1,47 @@ +======================================= +PyPy 2.2.1 - Incrementalism.1 +======================================= + +We're pleased to announce PyPy 2.2.1, which targets version 2.7.3 of the Python +language. This is a bugfix release over 2.2. + +You can download the PyPy 2.2.1 release here: + + http://pypy.org/download.html + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy 2.2 and cpython 2.7.2`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows +32, or ARM (ARMv6 or ARMv7, with VFPv3). + +Work on the native Windows 64 is still stalling, we would welcome a volunteer +to handle that. + +.. _`pypy 2.2 and cpython 2.7.2`: http://speed.pypy.org + +Highlights +========== + +This is a bugfix release. The most important bugs fixed are: + +* an issue in sockets' reference counting emulation, showing up + notably when using the ssl module and calling ``makefile()``. + +* Tkinter support on Windows. + +* If sys.maxunicode==65535 (on Windows and maybe OS/X), the json + decoder incorrectly decoded surrogate pairs. + +* some FreeBSD fixes. + +Note that CFFI 0.8.1 was released. Both versions 0.8 and 0.8.1 are +compatible with both PyPy 2.2 and 2.2.1. + + +Cheers, +Armin Rigo & everybody diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -15,3 +15,29 @@ .. branch: armhf-singlefloat JIT support for singlefloats on ARM using the hardfloat ABI + +.. branch: voidtype_strformat +Better support for record numpy arrays + +.. branch: osx-eci-frameworks-makefile +OSX: Ensure frameworks end up in Makefile when specified in External compilation info + +.. branch: less-stringly-ops +Use subclasses of SpaceOperation instead of SpaceOperator objects. +Random cleanups in flowspace and annotator. + +.. branch: ndarray-buffer +adds support for the buffer= argument to the ndarray ctor + +.. branch: better_ftime_detect2 +On OpenBSD do not pull in libcompat.a as it is about to be removed. +And more generally, if you have gettimeofday(2) you will not need ftime(3). + +.. branch: timeb_h +Remove dependency upon on OpenBSD. This will be disappearing +along with libcompat.a. + +.. branch: OlivierBlanvillain/fix-3-broken-links-on-pypy-published-pap-1386250839215 +Fix 3 broken links on PyPy published papers in docs. + +.. branch: jit-ordereddict diff --git a/pypy/interpreter/buffer.py b/pypy/interpreter/buffer.py --- a/pypy/interpreter/buffer.py +++ b/pypy/interpreter/buffer.py @@ -47,6 +47,9 @@ def get_raw_address(self): raise ValueError("no raw buffer") + def is_writable(self): + return False + # __________ app-level support __________ def descr_len(self, space): @@ -135,6 +138,9 @@ __slots__ = () # no extra slot here + def is_writable(self): + return True + def setitem(self, index, char): "Write a character into the buffer." raise NotImplementedError # Must be overriden. No bounds checks. diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -806,8 +806,8 @@ raise TypeError("Varargs and keywords not supported in unwrap_spec") argspec = ', '.join([arg for arg in args.args[1:]]) func_code = py.code.Source(""" - def f(w_obj, %(args)s): - return w_obj.%(func_name)s(%(args)s) + def f(self, %(args)s): + return self.%(func_name)s(%(args)s) """ % {'args': argspec, 'func_name': func.func_name}) d = {} exec func_code.compile() in d @@ -822,7 +822,7 @@ else: assert isinstance(unwrap_spec, dict) unwrap_spec = unwrap_spec.copy() - unwrap_spec['w_obj'] = base_cls + unwrap_spec['self'] = base_cls return interp2app(globals()['unwrap_spec'](**unwrap_spec)(f)) class interp2app(W_Root): diff --git a/pypy/module/__builtin__/app_operation.py b/pypy/module/__builtin__/app_operation.py --- a/pypy/module/__builtin__/app_operation.py +++ b/pypy/module/__builtin__/app_operation.py @@ -1,4 +1,5 @@ +import operator + def bin(x): - if not isinstance(x, (int, long)): - raise TypeError("must be int or long") - return x.__format__("#b") + value = operator.index(x) + return value.__format__("#b") diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -46,6 +46,15 @@ assert bin(2L) == "0b10" assert bin(-2L) == "-0b10" raises(TypeError, bin, 0.) + class C(object): + def __index__(self): + return 42 + assert bin(C()) == bin(42) + class D(object): + def __int__(self): + return 42 + exc = raises(TypeError, bin, D()) + assert "index" in exc.value.message def test_unichr(self): import sys diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py --- a/pypy/module/_cffi_backend/cbuffer.py +++ b/pypy/module/_cffi_backend/cbuffer.py @@ -5,7 +5,9 @@ from pypy.interpreter.typedef import TypeDef, make_weakref_descr from pypy.module._cffi_backend import cdataobj, ctypeptr, ctypearray +from rpython.rtyper.annlowlevel import llstr from rpython.rtyper.lltypesystem import rffi +from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw class LLBuffer(RWBuffer): @@ -34,8 +36,7 @@ def setslice(self, start, string): raw_cdata = rffi.ptradd(self.raw_cdata, start) - for i in range(len(string)): - raw_cdata[i] = string[i] + copy_string_to_raw(llstr(string), raw_cdata, 0, len(string)) class MiniBuffer(W_Root): @@ -59,6 +60,9 @@ def descr__buffer__(self, space): return self.buffer.descr__buffer__(space) + def descr_str(self, space): + return space.wrap(self.buffer.as_str()) + MiniBuffer.typedef = TypeDef( "buffer", @@ -68,6 +72,7 @@ __setitem__ = interp2app(MiniBuffer.descr_setitem), __buffer__ = interp2app(MiniBuffer.descr__buffer__), __weakref__ = make_weakref_descr(MiniBuffer), + __str__ = interp2app(MiniBuffer.descr_str), ) MiniBuffer.typedef.acceptable_as_base_class = False diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -5,7 +5,7 @@ from pypy.interpreter.gateway import interp2app from pypy.interpreter.typedef import TypeDef, make_weakref_descr -from rpython.rlib import objectmodel, rgc +from rpython.rlib import rgc from rpython.rlib.objectmodel import keepalive_until_here, specialize from rpython.rtyper.lltypesystem import lltype, rffi from rpython.tool.sourcetools import func_with_new_name @@ -206,8 +206,7 @@ w_value.get_array_length() == length): # fast path: copying from exactly the correct type s = w_value._cdata - for i in range(ctitemsize * length): - cdata[i] = s[i] + rffi.c_memcpy(cdata, s, ctitemsize * length) keepalive_until_here(w_value) return # @@ -259,7 +258,6 @@ space = self.space if isinstance(w_other, W_CData): from pypy.module._cffi_backend import ctypeptr, ctypearray - from pypy.module._cffi_backend import ctypevoid ct = w_other.ctype if isinstance(ct, ctypearray.W_CTypeArray): ct = ct.ctptr @@ -424,6 +422,12 @@ def get_array_length(self): return self.length + def _sizeof(self): + from pypy.module._cffi_backend.ctypeptr import W_CTypePtrOrArray + ctype = self.ctype + assert isinstance(ctype, W_CTypePtrOrArray) + return self.length * ctype.ctitem.size + class W_CDataHandle(W_CData): _attrs_ = ['w_keepalive'] diff --git a/pypy/module/_cffi_backend/handle.py b/pypy/module/_cffi_backend/handle.py --- a/pypy/module/_cffi_backend/handle.py +++ b/pypy/module/_cffi_backend/handle.py @@ -42,7 +42,7 @@ if self.handles[d]() is None: self.look_distance = d + 1 return d - # full! extend, but don't use '!=' here + # full! extend, but don't use '+=' here self.handles = self.handles + [dead_ref] * (length // 3 + 5) self.look_distance = length + 1 return length diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -2143,7 +2143,13 @@ c = newp(BCharArray, b"hi there") # buf = buffer(c) - assert str(buf).startswith('<_cffi_backend.buffer object at 0x') + assert repr(buf).startswith('<_cffi_backend.buffer object at 0x') + assert bytes(buf) == b"hi there\x00" + if sys.version_info < (3,): + assert str(buf) == "hi there\x00" + assert unicode(buf) == u+"hi there\x00" + else: + assert str(buf) == repr(buf) # --mb_length-- assert len(buf) == len(b"hi there\x00") # --mb_item-- @@ -3125,6 +3131,12 @@ py.test.raises(TypeError, "p + cast(new_primitive_type('int'), 42)") py.test.raises(TypeError, "p - cast(new_primitive_type('int'), 42)") +def test_sizeof_sliced_array(): + BInt = new_primitive_type("int") + BArray = new_array_type(new_pointer_type(BInt), 10) + p = newp(BArray, None) + assert sizeof(p[2:9]) == 7 * sizeof(BInt) + def test_version(): # this test is here mostly for PyPy diff --git a/pypy/module/_rawffi/array.py b/pypy/module/_rawffi/array.py --- a/pypy/module/_rawffi/array.py +++ b/pypy/module/_rawffi/array.py @@ -15,6 +15,7 @@ from pypy.module._rawffi.interp_rawffi import unpack_shape_with_length from pypy.module._rawffi.interp_rawffi import read_ptr, write_ptr from rpython.rlib.rarithmetic import r_uint +from rpython.rlib import rgc class W_Array(W_DataShape): @@ -220,6 +221,7 @@ def __init__(self, space, shape, length): W_ArrayInstance.__init__(self, space, shape, length, 0) + @rgc.must_be_light_finalizer def __del__(self): if self.ll_buffer: self._free() diff --git a/pypy/module/_rawffi/structure.py b/pypy/module/_rawffi/structure.py --- a/pypy/module/_rawffi/structure.py +++ b/pypy/module/_rawffi/structure.py @@ -16,7 +16,7 @@ from pypy.module._rawffi.interp_rawffi import unroll_letters_for_numbers from pypy.module._rawffi.interp_rawffi import size_alignment from pypy.module._rawffi.interp_rawffi import read_ptr, write_ptr -from rpython.rlib import clibffi +from rpython.rlib import clibffi, rgc from rpython.rlib.rarithmetic import intmask, signedtype, widen from rpython.rlib.rarithmetic import r_uint, r_ulonglong, r_longlong @@ -226,6 +226,7 @@ fieldtypes) return self.ffi_struct.ffistruct + @rgc.must_be_light_finalizer def __del__(self): if self.ffi_struct: lltype.free(self.ffi_struct, flavor='raw') @@ -380,6 +381,7 @@ def __init__(self, space, shape): W_StructureInstance.__init__(self, space, shape, 0) + @rgc.must_be_light_finalizer def __del__(self): if self.ll_buffer: self._free() diff --git a/pypy/module/_rawffi/test/test_tracker.py b/pypy/module/_rawffi/test/test_tracker.py --- a/pypy/module/_rawffi/test/test_tracker.py +++ b/pypy/module/_rawffi/test/test_tracker.py @@ -1,9 +1,21 @@ +import py +from pypy.conftest import option from pypy.module._rawffi.tracker import Tracker + class AppTestTracker: spaceconfig = dict(usemodules=['_rawffi', 'struct']) def setup_class(cls): + # + # detect if we're running on PyPy with DO_TRACING not compiled in + if option.runappdirect: + try: + import _rawffi + _rawffi._num_of_allocated_objects() + except (ImportError, RuntimeError), e: + py.test.skip(str(e)) + # Tracker.DO_TRACING = True def test_array(self): diff --git a/pypy/module/_rawffi/tracker.py b/pypy/module/_rawffi/tracker.py --- a/pypy/module/_rawffi/tracker.py +++ b/pypy/module/_rawffi/tracker.py @@ -2,9 +2,11 @@ """ The file that keeps track about freed/kept-alive objects allocated by _rawffi. Used for debugging ctypes """ +from pypy.interpreter.error import OperationError + class Tracker(object): - DO_TRACING = True + DO_TRACING = False # make sure this stays False by default! def __init__(self): self.alloced = {} @@ -20,6 +22,9 @@ tracker = Tracker() def num_of_allocated_objects(space): + if not tracker.DO_TRACING: + raise OperationError(space.w_RuntimeError, + space.wrap("DO_TRACING not enabled in this PyPy")) return space.wrap(len(tracker.alloced)) def print_alloced_objects(space): diff --git a/pypy/module/_sre/interp_sre.py b/pypy/module/_sre/interp_sre.py --- a/pypy/module/_sre/interp_sre.py +++ b/pypy/module/_sre/interp_sre.py @@ -415,7 +415,13 @@ except OperationError, e: if not e.match(space, space.w_TypeError): raise - w_groupnum = space.getitem(self.srepat.w_groupindex, w_arg) + try: + w_groupnum = space.getitem(self.srepat.w_groupindex, w_arg) + except OperationError, e: + if not e.match(space, space.w_KeyError): + raise + raise OperationError(space.w_IndexError, + space.wrap("no such group")) groupnum = space.int_w(w_groupnum) if groupnum == 0: return self.ctx.match_start, self.ctx.match_end diff --git a/pypy/module/_sre/test/test_app_sre.py b/pypy/module/_sre/test/test_app_sre.py --- a/pypy/module/_sre/test/test_app_sre.py +++ b/pypy/module/_sre/test/test_app_sre.py @@ -172,6 +172,9 @@ assert ("1", "1", None) == m.group(1, 2, 3) assert ("1", None) == m.group("first", "second") raises(IndexError, m.group, 1, 4) + assert ("1", None) == m.group(1, "second") + raises(IndexError, m.group, 'foobarbaz') + raises(IndexError, m.group, 'first', 'foobarbaz') def test_expand(self): import re diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -711,8 +711,12 @@ raise ssl_error(space, "SSL_CTX_use_certificate_chain_file error") # ssl compatibility - libssl_SSL_CTX_set_options(ss.ctx, - SSL_OP_ALL & ~SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS) + options = SSL_OP_ALL & ~SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS + if protocol != PY_SSL_VERSION_SSL2: + # SSLv2 is extremely broken, don't use it unless a user specifically + # requests it + options |= SSL_OP_NO_SSLv2 + libssl_SSL_CTX_set_options(ss.ctx, options) verification_mode = SSL_VERIFY_NONE if cert_mode == PY_SSL_CERT_OPTIONAL: @@ -724,7 +728,7 @@ libssl_SSL_set_fd(ss.ssl, sock_fd) # set the socket for SSL # The ACCEPT_MOVING_WRITE_BUFFER flag is necessary because the address # of a str object may be changed by the garbage collector. - libssl_SSL_set_mode(ss.ssl, + libssl_SSL_set_mode(ss.ssl, SSL_MODE_AUTO_RETRY | SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER) # If the socket is in non-blocking mode or timeout mode, set the BIO diff --git a/pypy/module/_weakref/interp__weakref.py b/pypy/module/_weakref/interp__weakref.py --- a/pypy/module/_weakref/interp__weakref.py +++ b/pypy/module/_weakref/interp__weakref.py @@ -52,6 +52,7 @@ # weakref callbacks are not invoked eagerly here. They are # invoked by self.__del__() anyway. + @jit.dont_look_inside def get_or_make_weakref(self, w_subtype, w_obj): space = self.space w_weakreftype = space.gettypeobject(W_Weakref.typedef) @@ -70,6 +71,7 @@ self.append_wref_to(w_ref) return w_ref + @jit.dont_look_inside def get_or_make_proxy(self, w_obj): space = self.space if self.cached_proxy is not None: @@ -122,6 +124,7 @@ W_WeakrefBase.activate_callback, 'weakref callback of ') + @jit.dont_look_inside def make_weakref_with_callback(self, w_subtype, w_obj, w_callable): space = self.space w_ref = space.allocate_instance(W_Weakref, w_subtype) @@ -129,6 +132,7 @@ self.append_wref_to(w_ref) return w_ref + @jit.dont_look_inside def make_proxy_with_callback(self, w_obj, w_callable): space = self.space if space.is_true(space.callable(w_obj)): @@ -239,15 +243,16 @@ w_obj.setweakref(space, lifeline) return lifeline - at jit.dont_look_inside + def get_or_make_weakref(space, w_subtype, w_obj): return getlifeline(space, w_obj).get_or_make_weakref(w_subtype, w_obj) - at jit.dont_look_inside + def make_weakref_with_callback(space, w_subtype, w_obj, w_callable): lifeline = getlifelinewithcallbacks(space, w_obj) return lifeline.make_weakref_with_callback(w_subtype, w_obj, w_callable) + def descr__new__weakref(space, w_subtype, w_obj, w_callable=None, __args__=None): if __args__.arguments_w: @@ -312,15 +317,16 @@ w_obj = force(space, self) return space.call_args(w_obj, __args__) - at jit.dont_look_inside + def get_or_make_proxy(space, w_obj): return getlifeline(space, w_obj).get_or_make_proxy(w_obj) - at jit.dont_look_inside + def make_proxy_with_callback(space, w_obj, w_callable): lifeline = getlifelinewithcallbacks(space, w_obj) return lifeline.make_proxy_with_callback(w_obj, w_callable) + def proxy(space, w_obj, w_callable=None): """Create a proxy object that weakly references 'obj'. 'callback', if given, is called with the proxy as an argument when 'obj' diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -26,6 +26,7 @@ from pypy.module.__builtin__.descriptor import W_Property from pypy.module.__builtin__.interp_classobj import W_ClassObject from pypy.module.__builtin__.interp_memoryview import W_MemoryView +from pypy.module.micronumpy.base import W_NDimArray from rpython.rlib.entrypoint import entrypoint_lowlevel from rpython.rlib.rposix import is_valid_fd, validate_fd from rpython.rlib.unroll import unrolling_iterable @@ -399,16 +400,16 @@ '_PyObject_CallFunction_SizeT', '_PyObject_CallMethod_SizeT', 'PyBuffer_FromMemory', 'PyBuffer_FromReadWriteMemory', 'PyBuffer_FromObject', - 'PyBuffer_FromReadWriteObject', 'PyBuffer_New', 'PyBuffer_Type', 'init_bufferobject', + 'PyBuffer_FromReadWriteObject', 'PyBuffer_New', 'PyBuffer_Type', '_Py_init_bufferobject', 'PyCObject_FromVoidPtr', 'PyCObject_FromVoidPtrAndDesc', 'PyCObject_AsVoidPtr', 'PyCObject_GetDesc', 'PyCObject_Import', 'PyCObject_SetVoidPtr', - 'PyCObject_Type', 'init_pycobject', + 'PyCObject_Type', '_Py_init_pycobject', 'PyCapsule_New', 'PyCapsule_IsValid', 'PyCapsule_GetPointer', 'PyCapsule_GetName', 'PyCapsule_GetDestructor', 'PyCapsule_GetContext', 'PyCapsule_SetPointer', 'PyCapsule_SetName', 'PyCapsule_SetDestructor', - 'PyCapsule_SetContext', 'PyCapsule_Import', 'PyCapsule_Type', 'init_capsule', + 'PyCapsule_SetContext', 'PyCapsule_Import', 'PyCapsule_Type', '_Py_init_capsule', 'PyObject_AsReadBuffer', 'PyObject_AsWriteBuffer', 'PyObject_CheckReadBuffer', @@ -469,6 +470,7 @@ "Complex": "space.w_complex", "ByteArray": "space.w_bytearray", "MemoryView": "space.gettypeobject(W_MemoryView.typedef)", + "Array": "space.gettypeobject(W_NDimArray.typedef)", "BaseObject": "space.w_object", 'None': 'space.type(space.w_None)', 'NotImplemented': 'space.type(space.w_NotImplemented)', @@ -685,11 +687,15 @@ globals()['va_get_%s' % name_no_star] = func def setup_init_functions(eci, translating): - init_buffer = rffi.llexternal('init_bufferobject', [], lltype.Void, + if translating: + prefix = 'PyPy' + else: + prefix = 'cpyexttest' + init_buffer = rffi.llexternal('_%s_init_bufferobject' % prefix, [], lltype.Void, compilation_info=eci, _nowrapper=True) - init_pycobject = rffi.llexternal('init_pycobject', [], lltype.Void, + init_pycobject = rffi.llexternal('_%s_init_pycobject' % prefix, [], lltype.Void, compilation_info=eci, _nowrapper=True) - init_capsule = rffi.llexternal('init_capsule', [], lltype.Void, + init_capsule = rffi.llexternal('_%s_init_capsule' % prefix, [], lltype.Void, compilation_info=eci, _nowrapper=True) INIT_FUNCTIONS.extend([ lambda space: init_buffer(), @@ -697,12 +703,8 @@ lambda space: init_capsule(), ]) from pypy.module.posix.interp_posix import add_fork_hook - if translating: - reinit_tls = rffi.llexternal('PyThread_ReInitTLS', [], lltype.Void, - compilation_info=eci) - else: - reinit_tls = rffi.llexternal('PyPyThread_ReInitTLS', [], lltype.Void, - compilation_info=eci) + reinit_tls = rffi.llexternal('%sThread_ReInitTLS' % prefix, [], lltype.Void, + compilation_info=eci) add_fork_hook('child', reinit_tls) def init_function(func): @@ -744,7 +746,7 @@ from rpython.translator.c.database import LowLevelDatabase db = LowLevelDatabase() - generate_macros(export_symbols, rename=True, do_deref=True) + generate_macros(export_symbols, prefix='cpyexttest') # Structure declaration code members = [] @@ -810,7 +812,7 @@ INTERPLEVEL_API[name] = w_obj - name = name.replace('Py', 'PyPy') + name = name.replace('Py', 'cpyexttest') if isptr: ptr = ctypes.c_void_p.in_dll(bridge, name) if typ == 'PyObject*': @@ -822,7 +824,7 @@ ptr.value = ctypes.cast(ll2ctypes.lltype2ctypes(value), ctypes.c_void_p).value elif typ in ('PyObject*', 'PyTypeObject*'): - if name.startswith('PyPyExc_'): + if name.startswith('PyPyExc_') or name.startswith('cpyexttestExc_'): # we already have the pointer in_dll = ll2ctypes.get_ctypes_type(PyObject).in_dll(bridge, name) py_obj = ll2ctypes.ctypes2lltype(PyObject, in_dll) @@ -857,28 +859,27 @@ setup_init_functions(eci, translating=False) return modulename.new(ext='') -def generate_macros(export_symbols, rename=True, do_deref=True): +def mangle_name(prefix, name): + if name.startswith('Py'): + return prefix + name[2:] + elif name.startswith('_Py'): + return '_' + prefix + name[3:] + else: + return None + +def generate_macros(export_symbols, prefix): "NOT_RPYTHON" pypy_macros = [] renamed_symbols = [] for name in export_symbols: - if name.startswith("PyPy"): - renamed_symbols.append(name) - continue - if not rename: - continue name = name.replace("#", "") - newname = name.replace('Py', 'PyPy') - if not rename: - newname = name + newname = mangle_name(prefix, name) + assert newname, name pypy_macros.append('#define %s %s' % (name, newname)) if name.startswith("PyExc_"): pypy_macros.append('#define _%s _%s' % (name, newname)) renamed_symbols.append(newname) - if rename: - export_symbols[:] = renamed_symbols - else: - export_symbols[:] = [sym.replace("#", "") for sym in export_symbols] + export_symbols[:] = renamed_symbols # Generate defines for macro_name, size in [ @@ -1039,7 +1040,7 @@ from rpython.translator.c.database import LowLevelDatabase db = LowLevelDatabase() - generate_macros(export_symbols, rename=False, do_deref=False) + generate_macros(export_symbols, prefix='PyPy') functions = generate_decls_and_callbacks(db, [], api_struct=False) code = "#include \n" + "\n".join(functions) @@ -1069,7 +1070,8 @@ export_struct(name, struct) for name, func in FUNCTIONS.iteritems(): - deco = entrypoint_lowlevel("cpyext", func.argtypes, name, relax=True) + newname = mangle_name('PyPy', name) or name + deco = entrypoint_lowlevel("cpyext", func.argtypes, newname, relax=True) deco(func.get_wrapper(space)) setup_init_functions(eci, translating=True) diff --git a/pypy/module/cpyext/include/bufferobject.h b/pypy/module/cpyext/include/bufferobject.h --- a/pypy/module/cpyext/include/bufferobject.h +++ b/pypy/module/cpyext/include/bufferobject.h @@ -37,7 +37,7 @@ PyObject* PyBuffer_New(Py_ssize_t size); -void init_bufferobject(void); +void _Py_init_bufferobject(void); #ifdef __cplusplus } diff --git a/pypy/module/cpyext/include/pycapsule.h b/pypy/module/cpyext/include/pycapsule.h --- a/pypy/module/cpyext/include/pycapsule.h +++ b/pypy/module/cpyext/include/pycapsule.h @@ -50,7 +50,7 @@ PyAPI_FUNC(void *) PyCapsule_Import(const char *name, int no_block); -void init_capsule(void); +void _Py_init_capsule(void); #ifdef __cplusplus } diff --git a/pypy/module/cpyext/include/pycobject.h b/pypy/module/cpyext/include/pycobject.h --- a/pypy/module/cpyext/include/pycobject.h +++ b/pypy/module/cpyext/include/pycobject.h @@ -48,7 +48,7 @@ } PyCObject; #endif -void init_pycobject(void); +void _Py_init_pycobject(void); #ifdef __cplusplus } diff --git a/pypy/module/cpyext/include/pyconfig.h b/pypy/module/cpyext/include/pyconfig.h --- a/pypy/module/cpyext/include/pyconfig.h +++ b/pypy/module/cpyext/include/pyconfig.h @@ -25,6 +25,22 @@ #define Py_UNICODE_SIZE 2 #endif +#ifndef Py_BUILD_CORE /* not building the core - must be an ext */ +# if defined(_MSC_VER) + /* So MSVC users need not specify the .lib file in + * their Makefile (other compilers are generally + * taken care of by distutils.) */ +# ifdef _DEBUG +# error("debug first with cpython") +# pragma comment(lib,"python27.lib") +# else +# pragma comment(lib,"python27.lib") +# endif /* _DEBUG */ +# endif +#endif /* _MSC_VER */ + + + #ifdef __cplusplus } #endif diff --git a/pypy/module/cpyext/src/bufferobject.c b/pypy/module/cpyext/src/bufferobject.c --- a/pypy/module/cpyext/src/bufferobject.c +++ b/pypy/module/cpyext/src/bufferobject.c @@ -783,7 +783,7 @@ return size; } -void init_bufferobject(void) +void _Py_init_bufferobject(void) { PyType_Ready(&PyBuffer_Type); } diff --git a/pypy/module/cpyext/src/capsule.c b/pypy/module/cpyext/src/capsule.c --- a/pypy/module/cpyext/src/capsule.c +++ b/pypy/module/cpyext/src/capsule.c @@ -321,7 +321,7 @@ PyCapsule_Type__doc__ /*tp_doc*/ }; -void init_capsule() +void _Py_init_capsule() { PyType_Ready(&PyCapsule_Type); } diff --git a/pypy/module/cpyext/src/cobject.c b/pypy/module/cpyext/src/cobject.c --- a/pypy/module/cpyext/src/cobject.c +++ b/pypy/module/cpyext/src/cobject.c @@ -156,7 +156,7 @@ PyCObject_Type__doc__ /*tp_doc*/ }; -void init_pycobject() +void _Py_init_pycobject() { PyType_Ready(&PyCObject_Type); } diff --git a/pypy/module/cpyext/src/ndarrayobject.c b/pypy/module/cpyext/src/ndarrayobject.c --- a/pypy/module/cpyext/src/ndarrayobject.c +++ b/pypy/module/cpyext/src/ndarrayobject.c @@ -3,8 +3,6 @@ #include "numpy/arrayobject.h" #include /* memset, memcpy */ -PyTypeObject PyArray_Type; - void _PyArray_FILLWBYTE(PyObject* obj, int val) { memset(PyArray_DATA(obj), val, PyArray_NBYTES(obj)); diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -286,3 +286,19 @@ assert dt.num == 11 + def test_pass_ndarray_object_to_c(self): + from _numpypy.multiarray import ndarray + mod = self.import_extension('foo', [ + ("check_array", "METH_VARARGS", + ''' + PyObject* obj; + if (!PyArg_ParseTuple(args, "O!", &PyArray_Type, &obj)) + return NULL; + Py_INCREF(obj); + return obj; + '''), + ], prologue='#include ') + array = ndarray((3, 4), dtype='d') + assert mod.check_array(array) is array + raises(TypeError, "mod.check_array(42)") + diff --git a/pypy/module/cpyext/test/test_thread.py b/pypy/module/cpyext/test/test_thread.py --- a/pypy/module/cpyext/test/test_thread.py +++ b/pypy/module/cpyext/test/test_thread.py @@ -8,8 +8,10 @@ module = self.import_extension('foo', [ ("get_thread_ident", "METH_NOARGS", """ - /* Use the 'PyPy' prefix to ensure we access our functions */ - return PyInt_FromLong(PyPyThread_get_thread_ident()); +#ifndef PyThread_get_thread_ident +#error "seems we are not accessing PyPy's functions" +#endif + return PyInt_FromLong(PyThread_get_thread_ident()); """), ]) import thread, threading @@ -32,17 +34,19 @@ module = self.import_extension('foo', [ ("test_acquire_lock", "METH_NOARGS", """ - /* Use the 'PyPy' prefix to ensure we access our functions */ - PyThread_type_lock lock = PyPyThread_allocate_lock(); - if (PyPyThread_acquire_lock(lock, 1) != 1) { +#ifndef PyThread_allocate_lock +#error "seems we are not accessing PyPy's functions" +#endif + PyThread_type_lock lock = PyThread_allocate_lock(); + if (PyThread_acquire_lock(lock, 1) != 1) { PyErr_SetString(PyExc_AssertionError, "first acquire"); return NULL; } - if (PyPyThread_acquire_lock(lock, 0) != 0) { + if (PyThread_acquire_lock(lock, 0) != 0) { PyErr_SetString(PyExc_AssertionError, "second acquire"); return NULL; } - PyPyThread_free_lock(lock); + PyThread_free_lock(lock); Py_RETURN_NONE; """), @@ -53,15 +57,17 @@ module = self.import_extension('foo', [ ("test_release_lock", "METH_NOARGS", """ - /* Use the 'PyPy' prefix to ensure we access our functions */ - PyThread_type_lock lock = PyPyThread_allocate_lock(); - PyPyThread_acquire_lock(lock, 1); - PyPyThread_release_lock(lock); - if (PyPyThread_acquire_lock(lock, 0) != 1) { +#ifndef PyThread_release_lock +#error "seems we are not accessing PyPy's functions" +#endif + PyThread_type_lock lock = PyThread_allocate_lock(); + PyThread_acquire_lock(lock, 1); + PyThread_release_lock(lock); + if (PyThread_acquire_lock(lock, 0) != 1) { PyErr_SetString(PyExc_AssertionError, "first acquire"); return NULL; } - PyPyThread_free_lock(lock); + PyThread_free_lock(lock); Py_RETURN_NONE; """), diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -342,10 +342,8 @@ if space.is_w(w_startstop, space.w_None): start = 0 else: - start = space.int_w(w_startstop) - if start < 0: - raise OperationError(space.w_ValueError, space.wrap( - "Indicies for islice() must be non-negative integers.")) + start = self.arg_int_w(w_startstop, 0, + "Indicies for islice() must be None or non-negative integers") w_stop = args_w[0] else: raise OperationError(space.w_TypeError, space.wrap("islice() takes at most 4 arguments (" + str(num_args) + " given)")) @@ -353,10 +351,8 @@ if space.is_w(w_stop, space.w_None): stop = -1 else: - stop = space.int_w(w_stop) - if stop < 0: - raise OperationError(space.w_ValueError, space.wrap( - "Stop argument must be a non-negative integer or None.")) + stop = self.arg_int_w(w_stop, 0, + "Stop argument must be a non-negative integer or None.") stop = max(start, stop) # for obscure CPython compatibility if num_args == 2: @@ -364,10 +360,8 @@ if space.is_w(w_step, space.w_None): step = 1 else: - step = space.int_w(w_step) - if step < 1: - raise OperationError(space.w_ValueError, space.wrap( - "Step must be one or lager for islice().")) + step = self.arg_int_w(w_step, 1, + "Step for islice() must be a positive integer or None") else: step = 1 @@ -375,6 +369,18 @@ self.start = start self.stop = stop + def arg_int_w(self, w_obj, minimum, errormsg): + space = self.space + try: + result = space.int_w(w_obj) + except OperationError, e: + if e.async(space): + raise + result = -1 + if result < minimum: + raise OperationError(space.w_ValueError, space.wrap(errormsg)) + return result + def iter_w(self): return self.space.wrap(self) diff --git a/pypy/module/itertools/test/test_itertools.py b/pypy/module/itertools/test/test_itertools.py --- a/pypy/module/itertools/test/test_itertools.py +++ b/pypy/module/itertools/test/test_itertools.py @@ -304,6 +304,11 @@ raises(TypeError, itertools.islice, [], 0, 0, 0, 0) + # why not TypeError? Because CPython + raises(ValueError, itertools.islice, [], "a", 1, 2) + raises(ValueError, itertools.islice, [], 0, "a", 2) + raises(ValueError, itertools.islice, [], 0, 1, "a") + def test_chain(self): import itertools diff --git a/pypy/module/math/app_math.py b/pypy/module/math/app_math.py --- a/pypy/module/math/app_math.py +++ b/pypy/module/math/app_math.py @@ -1,13 +1,40 @@ def factorial(x): - """Find x!.""" + """factorial(x) -> Integral + + "Find x!. Raise a ValueError if x is negative or non-integral.""" if isinstance(x, float): fl = int(x) if fl != x: raise ValueError("float arguments must be integral") x = fl - if x < 0: - raise ValueError("x must be >= 0") - res = 1 - for i in range(1, x + 1): - res *= i - return res + + if x <= 100: + if x < 0: + raise ValueError("x must be >= 0") + res = 1 + for i in range(2, x + 1): + res *= i + return res + + # Experimentally this gap seems good + gap = max(100, x >> 7) + def _fac_odd(low, high): + if low + gap >= high: + t = 1 + for i in range(low, high, 2): + t *= i + return t + + mid = ((low + high) >> 1) | 1 + return _fac_odd(low, mid) * _fac_odd(mid, high) + + def _fac1(x): + if x <= 2: + return 1, 1, x - 1 + x2 = x >> 1 + f, g, shift = _fac1(x2) + g *= _fac_odd((x2 + 1) | 1, x + 1) + return (f * g, g, shift + x2) + + res, _, shift = _fac1(x) + return res << shift diff --git a/pypy/module/math/test/test_factorial.py b/pypy/module/math/test/test_factorial.py new file mode 100644 --- /dev/null +++ b/pypy/module/math/test/test_factorial.py @@ -0,0 +1,29 @@ +import py +import math +from pypy.module.math import app_math + +def test_factorial_extra(): + for x in range(1000): + r1 = app_math.factorial(x) + r2 = math.factorial(x) + assert r1 == r2 + assert type(r1) == type(r2) + +def test_timing(): + py.test.skip("for manual running only") + import time + x = 5000 + repeat = 1000 + r1 = app_math.factorial(x) + r2 = math.factorial(x) + assert r1 == r2 + t1 = time.time() + for i in range(repeat): + app_math.factorial(x) + t2 = time.time() + for i in range(repeat): + math.factorial(x) + t3 = time.time() + assert r1 == r2 + print (t2 - t1) / repeat + print (t3 - t2) / repeat diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -10,7 +10,7 @@ 'array': 'interp_numarray.array', 'zeros': 'interp_numarray.zeros', 'empty': 'interp_numarray.zeros', - 'ones': 'interp_numarray.ones', + 'empty_like': 'interp_numarray.empty_like', '_reconstruct' : 'interp_numarray._reconstruct', 'scalar' : 'interp_numarray.build_scalar', 'dot': 'interp_arrayops.dot', @@ -106,8 +106,6 @@ ('logaddexp2', 'logaddexp2'), ('real', 'real'), ('imag', 'imag'), - ('ones_like', 'ones_like'), - ('zeros_like', 'zeros_like'), ]: interpleveldefs[exposed] = "interp_ufuncs.get(space).%s" % impl diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -12,7 +12,9 @@ if dtype is None: test = _numpypy.multiarray.array([start, stop, step, 0]) dtype = test.dtype - arr = _numpypy.multiarray.zeros(int(math.ceil((stop - start) / step)), dtype=dtype) + length = math.ceil((float(stop) - start) / step) + length = int(length) + arr = _numpypy.multiarray.zeros(length, dtype=dtype) i = start for j in range(arr.size): arr[j] = i diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -47,7 +47,7 @@ def setslice(self, space, arr): impl = arr.implementation if impl.is_scalar(): - self.fill(impl.get_scalar_value()) + self.fill(space, impl.get_scalar_value()) return shape = shape_agreement(space, self.get_shape(), arr) if impl.storage == self.storage: @@ -100,7 +100,7 @@ tmp = self.get_real(orig_array) tmp.setslice(space, convert_to_array(space, w_value)) - def get_imag(self, orig_array): + def get_imag(self, space, orig_array): strides = self.get_strides() backstrides = self.get_backstrides() if self.dtype.is_complex_type(): @@ -110,11 +110,11 @@ impl = NonWritableArray(self.get_shape(), self.dtype, self.order, strides, backstrides) if not self.dtype.is_flexible_type(): - impl.fill(self.dtype.box(0)) + impl.fill(space, self.dtype.box(0)) return impl def set_imag(self, space, orig_array, w_value): - tmp = self.get_imag(orig_array) + tmp = self.get_imag(space, orig_array) tmp.setslice(space, convert_to_array(space, w_value)) # -------------------- applevel get/setitem ----------------------- @@ -357,7 +357,7 @@ self.get_backstrides(), self.get_shape()) - def fill(self, box): + def fill(self, space, box): self.dtype.itemtype.fill(self.storage, self.dtype.get_size(), box, 0, self.size, 0) @@ -392,6 +392,21 @@ def __del__(self): free_raw_storage(self.storage, track_allocation=False) +class ConcreteArrayWithBase(ConcreteArrayNotOwning): + def __init__(self, shape, dtype, order, strides, backstrides, storage, orig_base): + ConcreteArrayNotOwning.__init__(self, shape, dtype, order, + strides, backstrides, storage) + self.orig_base = orig_base + + def base(self): + return self.orig_base + + +class ConcreteNonWritableArrayWithBase(ConcreteArrayWithBase): + def descr_setitem(self, space, orig_array, w_index, w_value): + raise OperationError(space.w_ValueError, space.wrap( + "assignment destination is read-only")) + class NonWritableArray(ConcreteArray): def descr_setitem(self, space, orig_array, w_index, w_value): @@ -420,8 +435,8 @@ def base(self): return self.orig_arr - def fill(self, box): - loop.fill(self, box.convert_to(self.dtype)) + def fill(self, space, box): + loop.fill(self, box.convert_to(space, self.dtype)) def create_iter(self, shape=None, backward_broadcast=False, require_index=False): if shape is not None and \ @@ -487,3 +502,6 @@ def getlength(self): return self.impl.size + + def get_raw_address(self): + return self.impl.storage diff --git a/pypy/module/micronumpy/arrayimpl/scalar.py b/pypy/module/micronumpy/arrayimpl/scalar.py --- a/pypy/module/micronumpy/arrayimpl/scalar.py +++ b/pypy/module/micronumpy/arrayimpl/scalar.py @@ -54,8 +54,7 @@ return self.value def set_scalar_value(self, w_val): - assert isinstance(w_val, W_GenericBox) - self.value = w_val.convert_to(self.dtype) + self.value = w_val def copy(self, space): scalar = Scalar(self.dtype) @@ -96,12 +95,12 @@ ','.join([str(x) for x in w_arr.get_shape()],)))) if self.dtype.is_complex_type(): self.value = self.dtype.itemtype.composite( - w_arr.get_scalar_value().convert_to(dtype), + w_arr.get_scalar_value().convert_to(space, dtype), self.value.convert_imag_to(dtype)) else: self.value = w_arr.get_scalar_value() - def get_imag(self, orig_array): + def get_imag(self, space, orig_array): if self.dtype.is_complex_type(): scalar = Scalar(self.dtype.float_type) scalar.value = self.value.convert_imag_to(scalar.dtype) @@ -125,13 +124,23 @@ ','.join([str(x) for x in w_arr.get_shape()],)))) self.value = self.dtype.itemtype.composite( self.value.convert_real_to(dtype), - w_arr.get_scalar_value().convert_to(dtype), + w_arr.get_scalar_value().convert_to(space, dtype), ) def descr_getitem(self, space, _, w_idx): if space.isinstance_w(w_idx, space.w_tuple): if space.len_w(w_idx) == 0: return self.get_scalar_value() + elif space.isinstance_w(w_idx, space.w_str): + if self.dtype.is_record_type(): + w_val = self.value.descr_getitem(space, w_idx) + return convert_to_array(space, w_val) + elif space.is_none(w_idx): + new_shape = [1] + arr = W_NDimArray.from_shape(space, new_shape, self.dtype) + arr_iter = arr.create_iter(new_shape) + arr_iter.setitem(self.value) + return arr From noreply at buildbot.pypy.org Mon Jan 13 12:11:34 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 13 Jan 2014 12:11:34 +0100 (CET) Subject: [pypy-commit] pypy llvm-translation-backend: hg merge default Message-ID: <20140113111134.663DB1C019D@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r68632:6795adf72b9c Date: 2014-01-13 12:08 +0100 http://bitbucket.org/pypy/pypy/changeset/6795adf72b9c/ Log: hg merge default diff too long, truncating to 2000 out of 12354 lines diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -28,7 +28,7 @@ DEALINGS IN THE SOFTWARE. -PyPy Copyright holders 2003-2013 +PyPy Copyright holders 2003-2014 ----------------------------------- Except when otherwise stated (look for LICENSE files or information at diff --git a/lib-python/2.7/ctypes/__init__.py b/lib-python/2.7/ctypes/__init__.py --- a/lib-python/2.7/ctypes/__init__.py +++ b/lib-python/2.7/ctypes/__init__.py @@ -371,10 +371,9 @@ self._handle = handle def __repr__(self): - return "<%s '%s', handle %r at %x>" % \ - (self.__class__.__name__, self._name, - (self._handle), - id(self) & (_sys.maxint*2 + 1)) + return "<%s '%s', handle %r at 0x%x>" % ( + self.__class__.__name__, self._name, self._handle, + id(self) & (_sys.maxint * 2 + 1)) def __getattr__(self, name): diff --git a/lib-python/2.7/ctypes/test/test_python_api.py b/lib-python/2.7/ctypes/test/test_python_api.py --- a/lib-python/2.7/ctypes/test/test_python_api.py +++ b/lib-python/2.7/ctypes/test/test_python_api.py @@ -73,6 +73,7 @@ del pyobj self.assertEqual(grc(s), ref) + @xfail def test_PyOS_snprintf(self): PyOS_snprintf = pythonapi.PyOS_snprintf PyOS_snprintf.argtypes = POINTER(c_char), c_size_t, c_char_p diff --git a/lib-python/2.7/socket.py b/lib-python/2.7/socket.py --- a/lib-python/2.7/socket.py +++ b/lib-python/2.7/socket.py @@ -335,9 +335,10 @@ s = self._sock self._sock = None if s is not None: - s._drop() if self._close: s.close() + else: + s._drop() def __del__(self): try: diff --git a/lib-python/2.7/test/test_ssl.py b/lib-python/2.7/test/test_ssl.py --- a/lib-python/2.7/test/test_ssl.py +++ b/lib-python/2.7/test/test_ssl.py @@ -993,7 +993,7 @@ try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED) - try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, True) + try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False) diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -109,7 +109,7 @@ RegrTest('test_asynchat.py', usemodules='select fcntl'), RegrTest('test_asyncore.py', usemodules='select fcntl'), RegrTest('test_atexit.py', core=True), - RegrTest('test_audioop.py', skip="unsupported extension module"), + RegrTest('test_audioop.py', skip="incomplete module"), RegrTest('test_augassign.py', core=True), RegrTest('test_base64.py', usemodules='struct'), RegrTest('test_bastion.py'), diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -328,21 +328,23 @@ raise ValueError( "native COM method call without 'this' parameter" ) - thisarg = cast(args[0], POINTER(POINTER(c_void_p))) - keepalives, newargs, argtypes, outargs = self._convert_args(argtypes, - args[1:], kwargs) - newargs.insert(0, args[0].value) + thisvalue = args.pop(0) + thisarg = cast(thisvalue, POINTER(POINTER(c_void_p))) + keepalives, newargs, argtypes, outargs, errcheckargs = ( + self._convert_args(argtypes, args, kwargs)) + args.insert(0, thisvalue) + newargs.insert(0, thisvalue.value) argtypes.insert(0, c_void_p) else: thisarg = None - keepalives, newargs, argtypes, outargs = self._convert_args(argtypes, - args, kwargs) + keepalives, newargs, argtypes, outargs, errcheckargs = ( + self._convert_args(argtypes, args, kwargs)) funcptr = self._getfuncptr(argtypes, self._restype_, thisarg) result = self._call_funcptr(funcptr, *newargs) - result = self._do_errcheck(result, args) + result, forced = self._do_errcheck(result, errcheckargs) - if not outargs: + if not outargs or forced: return result from ctypes import c_void_p @@ -377,22 +379,22 @@ set_last_error(tmp) # try: - return self._build_result(self._restype_, result, newargs) + return self._build_result(self._restype_, result) finally: funcptr.free_temp_buffers() def _do_errcheck(self, result, args): # The 'errcheck' protocol if self._errcheck_: - v = self._errcheck_(result, self, args) + v = self._errcheck_(result, self, tuple(args)) # If the errcheck funtion failed, let it throw # If the errcheck function returned newargs unchanged, # continue normal processing. # If the errcheck function returned something else, # use that as result. if v is not args: - return v - return result + return v, True + return result, False def _getfuncptr_fromaddress(self, argtypes, restype): address = self._get_address() @@ -495,16 +497,16 @@ newargtypes = [] total = len(args) paramflags = self._paramflags - inargs_idx = 0 if not paramflags and total < len(argtypes): raise TypeError("not enough arguments") - for i, argtype in enumerate(argtypes): - flag = 0 - name = None - defval = marker - if paramflags: + if paramflags: + errcheckargs = [] + inargs_idx = 0 + for i, argtype in enumerate(argtypes): + flag = 0 + defval = marker paramflag = paramflags[i] paramlen = len(paramflag) name = None @@ -519,6 +521,7 @@ val = defval if val is marker: val = 0 + errcheckargs.append(val) keepalive, newarg, newargtype = self._conv_param(argtype, val) keepalives.append(keepalive) newargs.append(newarg) @@ -536,27 +539,31 @@ raise TypeError("required argument '%s' missing" % name) else: raise TypeError("not enough arguments") + errcheckargs.append(val) keepalive, newarg, newargtype = self._conv_param(argtype, val) keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) elif flag == PARAMFLAG_FOUT: if defval is not marker: - outargs.append(defval) + val = defval keepalive, newarg, newargtype = self._conv_param(argtype, defval) else: import ctypes val = argtype._type_() - outargs.append(val) keepalive = None newarg = ctypes.byref(val) newargtype = type(newarg) + errcheckargs.append(val) + outargs.append(val) keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) else: raise ValueError("paramflag %d not yet implemented" % flag) - else: + else: + errcheckargs = args + for i, argtype in enumerate(argtypes): try: keepalive, newarg, newargtype = self._conv_param(argtype, args[i]) except (UnicodeError, TypeError, ValueError), e: @@ -564,7 +571,6 @@ keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) - inargs_idx += 1 if len(newargs) < len(args): extra = args[len(newargs):] @@ -576,7 +582,7 @@ keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) - return keepalives, newargs, newargtypes, outargs + return keepalives, newargs, newargtypes, outargs, errcheckargs @staticmethod def _is_primitive(argtype): @@ -601,7 +607,7 @@ retval = restype._CData_retval(buf) return retval - def _build_result(self, restype, result, argsandobjs): + def _build_result(self, restype, result): """Build the function result: If there is no OUT parameter, return the actual function result If there is one OUT parameter, return it @@ -611,11 +617,6 @@ # i.e. an array of ints. Now it takes a result, which is already a # python object. All places that do "resbuffer[0]" should check that # result is actually an int and just use it. - # - # Also, argsandobjs used to be "args" in __call__, now it's "newargs" - # (i.e., the already unwrapped objects). It's used only when we have a - # PARAMFLAG_FOUT and it's probably wrong, I'll fix it when I find a - # failing test retval = None @@ -704,7 +705,7 @@ funcptr = self._getfuncptr(argtypes, restype, thisarg) try: result = self._call_funcptr(funcptr, *args) - result = self._do_errcheck(result, args) + result, _ = self._do_errcheck(result, args) except (TypeError, ArgumentError, UnicodeDecodeError): assert self._slowpath_allowed return CFuncPtr.__call__(self, *args) diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_pypy_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -33,14 +33,13 @@ # set link options output_filename = modulename + _get_c_extension_suffix() if sys.platform == 'win32': - # XXX libpypy-c.lib is currently not installed automatically - library = os.path.join(thisdir, '..', 'include', 'libpypy-c') + # XXX pyconfig.h uses a pragma to link to the import library, + # which is currently python27.lib + library = os.path.join(thisdir, '..', 'include', 'python27') if not os.path.exists(library + '.lib'): - #For a nightly build - library = os.path.join(thisdir, '..', 'include', 'python27') - if not os.path.exists(library + '.lib'): - # For a local translation - library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c') + # For a local translation or nightly build + library = os.path.join(thisdir, '..', 'pypy', 'goal', 'python27') + assert os.path.exists(library + '.lib'),'Could not find import library "%s"' % library libraries = [library, 'oleaut32'] extra_ldargs = ['/MANIFEST', # needed for VC10 '/EXPORT:init' + modulename] diff --git a/lib_pypy/_sha.py b/lib_pypy/_sha.py --- a/lib_pypy/_sha.py +++ b/lib_pypy/_sha.py @@ -115,14 +115,14 @@ ] class sha: - "An implementation of the MD5 hash function in pure Python." + "An implementation of the SHA hash function in pure Python." digest_size = digestsize = 20 - block_size = 1 + block_size = 512 // 8 def __init__(self): "Initialisation." - + # Initial message length in bits(!). self.length = 0 self.count = [0, 0] @@ -209,7 +209,7 @@ self.H2 = (self.H2 + C) & 0xffffffff self.H3 = (self.H3 + D) & 0xffffffff self.H4 = (self.H4 + E) & 0xffffffff - + # Down from here all methods follow the Python Standard Library # API of the sha module. @@ -295,13 +295,13 @@ _long2bytesBigEndian(self.H3, 4) + \ _long2bytesBigEndian(self.H4, 4) - self.H0 = H0 - self.H1 = H1 + self.H0 = H0 + self.H1 = H1 self.H2 = H2 self.H3 = H3 self.H4 = H4 - self.input = input - self.count = count + self.input = input + self.count = count return digest diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -330,6 +330,14 @@ # SQLite version information sqlite_version = str(_ffi.string(_lib.sqlite3_libversion()).decode('ascii')) +_STMT_TYPE_UPDATE = 0 +_STMT_TYPE_DELETE = 1 +_STMT_TYPE_INSERT = 2 +_STMT_TYPE_REPLACE = 3 +_STMT_TYPE_OTHER = 4 +_STMT_TYPE_SELECT = 5 +_STMT_TYPE_INVALID = 6 + class Error(StandardError): pass @@ -992,13 +1000,18 @@ self.__statement = self.__connection._statement_cache.get(sql) if self.__connection._isolation_level is not None: - if self.__statement._type in ("UPDATE", "DELETE", "INSERT", "REPLACE"): + if self.__statement._type in ( + _STMT_TYPE_UPDATE, + _STMT_TYPE_DELETE, + _STMT_TYPE_INSERT, + _STMT_TYPE_REPLACE + ): if not self.__connection._in_transaction: self.__connection._begin() - elif self.__statement._type == "OTHER": + elif self.__statement._type == _STMT_TYPE_OTHER: if self.__connection._in_transaction: self.__connection.commit() - elif self.__statement._type == "SELECT": + elif self.__statement._type == _STMT_TYPE_SELECT: if multiple: raise ProgrammingError("You cannot execute SELECT " "statements in executemany().") @@ -1021,12 +1034,17 @@ self.__statement._reset() raise self.__connection._get_exception(ret) - if self.__statement._type in ("UPDATE", "DELETE", "INSERT", "REPLACE"): + if self.__statement._type in ( + _STMT_TYPE_UPDATE, + _STMT_TYPE_DELETE, + _STMT_TYPE_INSERT, + _STMT_TYPE_REPLACE + ): if self.__rowcount == -1: self.__rowcount = 0 self.__rowcount += _lib.sqlite3_changes(self.__connection._db) - if not multiple and self.__statement._type == "INSERT": + if not multiple and self.__statement._type == _STMT_TYPE_INSERT: self.__lastrowid = _lib.sqlite3_last_insert_rowid(self.__connection._db) else: self.__lastrowid = None @@ -1176,11 +1194,19 @@ first_word = sql.lstrip().split(" ")[0].upper() if first_word == "": - self._type = "INVALID" - elif first_word in ("SELECT", "INSERT", "UPDATE", "DELETE", "REPLACE"): - self._type = first_word + self._type = _STMT_TYPE_INVALID + elif first_word == "SELECT": + self._type = _STMT_TYPE_SELECT + elif first_word == "INSERT": + self._type = _STMT_TYPE_INSERT + elif first_word == "UPDATE": + self._type = _STMT_TYPE_UPDATE + elif first_word == "DELETE": + self._type = _STMT_TYPE_DELETE + elif first_word == "REPLACE": + self._type = _STMT_TYPE_REPLACE else: - self._type = "OTHER" + self._type = _STMT_TYPE_OTHER if isinstance(sql, unicode): sql = sql.encode('utf-8') @@ -1193,7 +1219,7 @@ if ret == _lib.SQLITE_OK and not self._statement: # an empty statement, work around that, as it's the least trouble - self._type = "SELECT" + self._type = _STMT_TYPE_SELECT c_sql = _ffi.new("char[]", b"select 42") ret = _lib.sqlite3_prepare_v2(self.__con._db, c_sql, -1, statement_star, next_char) @@ -1312,7 +1338,12 @@ raise ValueError("parameters are of unsupported type") def _get_description(self): - if self._type in ("INSERT", "UPDATE", "DELETE", "REPLACE"): + if self._type in ( + _STMT_TYPE_INSERT, + _STMT_TYPE_UPDATE, + _STMT_TYPE_DELETE, + _STMT_TYPE_REPLACE + ): return None desc = [] for i in xrange(_lib.sqlite3_column_count(self._statement)): diff --git a/lib_pypy/audioop.py b/lib_pypy/audioop.py new file mode 100644 --- /dev/null +++ b/lib_pypy/audioop.py @@ -0,0 +1,29 @@ + +import struct + + +class error(Exception): + pass + + +def _check_size(size): + if size != 1 and size != 2 and size != 4: + raise error("Size should be 1, 2 or 4") + + +def _check_params(length, size): + _check_size(size) + if length % size != 0: + raise error("not a whole number of frames") + + +def getsample(cp, size, i): + _check_params(len(cp), size) + if not (0 <= i < len(cp) / size): + raise error("Index out of range") + if size == 1: + return struct.unpack_from("B", buffer(cp)[i:])[0] + elif size == 2: + return struct.unpack_from("H", buffer(cp)[i * 2:])[0] + elif size == 4: + return struct.unpack_from("I", buffer(cp)[i * 4:])[0] diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.8" -__version_info__ = (0, 8) +__version__ = "0.8.1" +__version_info__ = (0, 8, 1) diff --git a/lib_pypy/datetime.py b/lib_pypy/datetime.py --- a/lib_pypy/datetime.py +++ b/lib_pypy/datetime.py @@ -878,7 +878,6 @@ month = self._month if day is None: day = self._day - year, month, day = _check_date_fields(year, month, day) return date(year, month, day) # Comparisons of date objects with other. @@ -1389,8 +1388,6 @@ microsecond = self.microsecond if tzinfo is True: tzinfo = self.tzinfo - hour, minute, second, microsecond = _check_time_fields(hour, minute, second, microsecond) - _check_tzinfo_arg(tzinfo) return time(hour, minute, second, microsecond, tzinfo) def __nonzero__(self): @@ -1608,9 +1605,6 @@ microsecond = self.microsecond if tzinfo is True: tzinfo = self.tzinfo - year, month, day = _check_date_fields(year, month, day) - hour, minute, second, microsecond = _check_time_fields(hour, minute, second, microsecond) - _check_tzinfo_arg(tzinfo) return datetime(year, month, day, hour, minute, second, microsecond, tzinfo) diff --git a/lib_pypy/pyrepl/simple_interact.py b/lib_pypy/pyrepl/simple_interact.py --- a/lib_pypy/pyrepl/simple_interact.py +++ b/lib_pypy/pyrepl/simple_interact.py @@ -63,3 +63,6 @@ except KeyboardInterrupt: console.write("\nKeyboardInterrupt\n") console.resetbuffer() + except MemoryError: + console.write("\nMemoryError\n") + console.resetbuffer() diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -38,7 +38,7 @@ # General information about the project. project = u'PyPy' -copyright = u'2013, The PyPy Project' +copyright = u'2014, The PyPy Project' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -47,7 +47,7 @@ # The short X.Y version. version = '2.2' # The full version, including alpha/beta/rc tags. -release = '2.2.0' +release = '2.2.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -83,7 +83,7 @@ _winreg -* Supported by being rewritten in pure Python (possibly using ``ctypes``): +* Supported by being rewritten in pure Python (possibly using ``cffi``): see the `lib_pypy/`_ directory. Examples of modules that we support this way: ``ctypes``, ``cPickle``, ``cmath``, ``dbm``, ``datetime``... Note that some modules are both in there and in the list above; @@ -316,5 +316,4 @@ type and vice versa. For builtin types, a dictionary will be returned that cannot be changed (but still looks and behaves like a normal dictionary). - .. include:: _ref.txt diff --git a/pypy/doc/ctypes-implementation.rst b/pypy/doc/ctypes-implementation.rst --- a/pypy/doc/ctypes-implementation.rst +++ b/pypy/doc/ctypes-implementation.rst @@ -72,7 +72,13 @@ Here is a list of the limitations and missing features of the current implementation: -* No support for ``PyXxx`` functions from ``libpython``, for obvious reasons. +* ``ctypes.pythonapi`` lets you access the CPython C API emulation layer + of PyPy, at your own risks and without doing anything sensible about + the GIL. Since PyPy 2.3, these functions are also named with an extra + "Py", for example ``PyPyInt_FromLong()``. Basically, don't use this, + but it might more or less work in simple cases if you do. (Obviously, + assuming the PyObject pointers you get have any particular fields in + any particular order is just going to crash.) * We copy Python strings instead of having pointers to raw buffers diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst --- a/pypy/doc/extending.rst +++ b/pypy/doc/extending.rst @@ -5,119 +5,68 @@ This document tries to explain how to interface the PyPy python interpreter with any external library. -Note: We try to describe state-of-the art, but it -might fade out of date as this is the front on which things are changing -in pypy rapidly. +Right now, there are the following possibilities of providing +third-party modules for the PyPy python interpreter (in order of +usefulness): -Possibilities -============= +* Write them in pure Python and use CFFI_. -Right now, there are three possibilities of providing third-party modules -for the PyPy python interpreter (in order of usefulness): +* Write them in pure Python and use ctypes_. -* Write them in pure python and use ctypes, see ctypes_ - section +* Write them in C++ and bind them through Reflex_. -* Write them in pure python and use direct libffi low-level bindings, See - \_ffi_ module description. +* Write them in as `RPython mixed modules`_. -* Write them in RPython as mixedmodule_, using *rffi* as bindings. -* Write them in C++ and bind them through Reflex_ +CFFI +==== -.. _ctypes: #CTypes -.. _\_ffi: #LibFFI -.. _mixedmodule: #Mixed Modules +CFFI__ is the recommended way. It is a way to write pure Python code +that accesses C libraries. The idea is to support either ABI- or +API-level access to C --- so that you can sanely access C libraries +without depending on details like the exact field order in the C +structures or the numerical value of all the constants. It works on +both CPython (as a separate ``pip install cffi``) and on PyPy, where it +is included by default. + +PyPy's JIT does a quite reasonable job on the Python code that call C +functions or manipulate C pointers with CFFI. (As of PyPy 2.2.1, it +could still be improved, but is already good.) + +See the documentation here__. + +.. __: http://cffi.readthedocs.org/ +.. __: http://cffi.readthedocs.org/ + CTypes ====== -The ctypes module in PyPy is ready to use. -It's goal is to be as-compatible-as-possible with the -`CPython ctypes`_ version. Right now it's able to support large examples, -such as pyglet. PyPy is planning to have a 100% compatible ctypes -implementation, without the CPython C-level API bindings (so it is very -unlikely that direct object-manipulation trickery through this API will work). +The goal of the ctypes module of PyPy is to be as compatible as possible +with the `CPython ctypes`_ version. It works for large examples, such +as pyglet. PyPy's implementation is not strictly 100% compatible with +CPython, but close enough for most cases. -We also provide a `ctypes-configure`_ for overcoming the platform dependencies, -not relying on the ctypes codegen. This tool works by querying gcc about -platform-dependent details (compiling small snippets of C code and running -them), so it'll benefit not pypy-related ctypes-based modules as well. +We also used to provide ``ctypes-configure`` for some API-level access. +This is now viewed as a precursor of CFFI, which you should use instead. +More (but older) information is available here__. +Also, ctypes' performance is not as good as CFFI's. -ctypes call are optimized by the JIT and the resulting machine code contains a -direct call to the target C function. However, due to the very dynamic nature -of ctypes, some overhead over a bare C call is still present, in particular to -check/convert the types of the parameters. Moreover, even if most calls are -optimized, some cannot and thus need to follow the slow path, not optimized by -the JIT. +.. _`CPython ctypes`: http://docs.python.org/library/ctypes.html +.. __: ctypes-implementation.html -.. _`ctypes-configure`: ctypes-implementation.html#ctypes-configure -.. _`CPython ctypes`: http://docs.python.org/library/ctypes.html +PyPy implements ctypes as pure Python code around two built-in modules +called ``_ffi`` and ``_rawffi``, which give a very low-level binding to +the C library libffi_. Nowadays it is not recommended to use directly +these two modules. -Pros ----- +.. _libffi: http://sourceware.org/libffi/ -Stable, CPython-compatible API. Most calls are fast, optimized by JIT. - -Cons ----- - -Problems with platform-dependency (although we partially solve -those). Although the JIT optimizes ctypes calls, some overhead is still -present. The slow-path is very slow. - - -LibFFI -====== - -Mostly in order to be able to write a ctypes module, we developed a very -low-level libffi bindings called ``_ffi``. (libffi is a C-level library for dynamic calling, -which is used by CPython ctypes). This library provides stable and usable API, -although it's API is a very low-level one. It does not contain any -magic. It is also optimized by the JIT, but has much less overhead than ctypes. - -Pros ----- - -It Works. Probably more suitable for a delicate code where ctypes magic goes -in a way. All calls are optimized by the JIT, there is no slow path as in -ctypes. - -Cons ----- - -It combines disadvantages of using ctypes with disadvantages of using mixed -modules. CPython-incompatible API, very rough and low-level. - -Mixed Modules -============= - -This is the most advanced and powerful way of writing extension modules. -It has some serious disadvantages: - -* a mixed module needs to be written in RPython, which is far more - complicated than Python (XXX link) - -* due to lack of separate compilation (as of July 2011), each - compilation-check requires to recompile whole PyPy python interpreter, - which takes 0.5-1h. We plan to solve this at some point in near future. - -* although rpython is a garbage-collected language, the border between - C and RPython needs to be managed by hand (each object that goes into the - C level must be explicitly freed). - -Some documentation is available `here`_ - -.. _`here`: rffi.html - -XXX we should provide detailed docs about lltype and rffi, especially if we - want people to follow that way. Reflex ====== -This method is still experimental and is being exercised on a branch, -`reflex-support`_, which adds the `cppyy`_ module. +This method is still experimental. It adds the `cppyy`_ module. The method works by using the `Reflex package`_ to provide reflection information of the C++ code, which is then used to automatically generate bindings at runtime. @@ -168,3 +117,15 @@ to work around it in python or with a C++ helper function. Although Reflex works on various platforms, the bindings with PyPy have only been tested on Linux. + + +RPython Mixed Modules +===================== + +This is the internal way to write built-in extension modules in PyPy. +It cannot be used by any 3rd-party module: the extension modules are +*built-in*, not independently loadable DLLs. + +This is reserved for special cases: it gives direct access to e.g. the +details of the JIT, allowing us to tweak its interaction with user code. +This is how the numpy module is being developed. diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.rst +++ b/pypy/doc/extradoc.rst @@ -72,13 +72,13 @@ .. _bibtex: https://bitbucket.org/pypy/extradoc/raw/tip/talk/bibtex.bib .. _`Runtime Feedback in a Meta-Tracing JIT for Efficient Dynamic Languages`: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/icooolps2011/jit-hints.pdf -.. _`Allocation Removal by Partial Evaluation in a Tracing JIT`: http://codespeak.net/svn/pypy/extradoc/talk/pepm2011/bolz-allocation-removal.pdf -.. _`Towards a Jitting VM for Prolog Execution`: http://www.stups.uni-duesseldorf.de/publications/bolz-prolog-jit.pdf +.. _`Allocation Removal by Partial Evaluation in a Tracing JIT`: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/pepm2011/bolz-allocation-removal.pdf +.. _`Towards a Jitting VM for Prolog Execution`: http://www.stups.uni-duesseldorf.de/mediawiki/images/a/a7/Pub-BoLeSch2010.pdf .. _`High performance implementation of Python for CLI/.NET with JIT compiler generation for dynamic languages`: http://buildbot.pypy.org/misc/antocuni-thesis.pdf .. _`How to *not* write Virtual Machines for Dynamic Languages`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dyla2007/dyla.pdf .. _`Tracing the Meta-Level: PyPy's Tracing JIT Compiler`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009/bolz-tracing-jit.pdf .. _`Faster than C#: Efficient Implementation of Dynamic Languages on .NET`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009-dotnet/cli-jit.pdf -.. _`Automatic JIT Compiler Generation with Runtime Partial Evaluation`: http://www.stups.uni-duesseldorf.de/thesis/final-master.pdf +.. _`Automatic JIT Compiler Generation with Runtime Partial Evaluation`: http://wwwold.cobra.cs.uni-duesseldorf.de/thesis/final-master.pdf .. _`RPython: A Step towards Reconciling Dynamically and Statically Typed OO Languages`: http://www.disi.unige.it/person/AnconaD/papers/Recent_abstracts.html#AACM-DLS07 .. _`EU Reports`: index-report.html .. _`Hardware Transactional Memory Support for Lightweight Dynamic Language Evolution`: http://sabi.net/nriley/pubs/dls6-riley.pdf diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -444,7 +444,7 @@ So the position of the core PyPy developers is that if anyone wants to make an N+1'th attempt with LLVM, they are welcome, and will be happy to provide help in the IRC channel, but they are left with the burden of proof -that it works. +that (a) it works and (b) it gives important benefits. ---------------------- How do I compile PyPy? diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -40,7 +40,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 2.2.0`_: the latest official release +* `Release 2.2.1`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -110,7 +110,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 2.2.0`: http://pypy.org/download.html +.. _`Release 2.2.1`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html diff --git a/pypy/doc/release-2.2.1.rst b/pypy/doc/release-2.2.1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.2.1.rst @@ -0,0 +1,47 @@ +======================================= +PyPy 2.2.1 - Incrementalism.1 +======================================= + +We're pleased to announce PyPy 2.2.1, which targets version 2.7.3 of the Python +language. This is a bugfix release over 2.2. + +You can download the PyPy 2.2.1 release here: + + http://pypy.org/download.html + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy 2.2 and cpython 2.7.2`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows +32, or ARM (ARMv6 or ARMv7, with VFPv3). + +Work on the native Windows 64 is still stalling, we would welcome a volunteer +to handle that. + +.. _`pypy 2.2 and cpython 2.7.2`: http://speed.pypy.org + +Highlights +========== + +This is a bugfix release. The most important bugs fixed are: + +* an issue in sockets' reference counting emulation, showing up + notably when using the ssl module and calling ``makefile()``. + +* Tkinter support on Windows. + +* If sys.maxunicode==65535 (on Windows and maybe OS/X), the json + decoder incorrectly decoded surrogate pairs. + +* some FreeBSD fixes. + +Note that CFFI 0.8.1 was released. Both versions 0.8 and 0.8.1 are +compatible with both PyPy 2.2 and 2.2.1. + + +Cheers, +Armin Rigo & everybody diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -15,3 +15,29 @@ .. branch: armhf-singlefloat JIT support for singlefloats on ARM using the hardfloat ABI + +.. branch: voidtype_strformat +Better support for record numpy arrays + +.. branch: osx-eci-frameworks-makefile +OSX: Ensure frameworks end up in Makefile when specified in External compilation info + +.. branch: less-stringly-ops +Use subclasses of SpaceOperation instead of SpaceOperator objects. +Random cleanups in flowspace and annotator. + +.. branch: ndarray-buffer +adds support for the buffer= argument to the ndarray ctor + +.. branch: better_ftime_detect2 +On OpenBSD do not pull in libcompat.a as it is about to be removed. +And more generally, if you have gettimeofday(2) you will not need ftime(3). + +.. branch: timeb_h +Remove dependency upon on OpenBSD. This will be disappearing +along with libcompat.a. + +.. branch: OlivierBlanvillain/fix-3-broken-links-on-pypy-published-pap-1386250839215 +Fix 3 broken links on PyPy published papers in docs. + +.. branch: jit-ordereddict diff --git a/pypy/interpreter/buffer.py b/pypy/interpreter/buffer.py --- a/pypy/interpreter/buffer.py +++ b/pypy/interpreter/buffer.py @@ -47,6 +47,9 @@ def get_raw_address(self): raise ValueError("no raw buffer") + def is_writable(self): + return False + # __________ app-level support __________ def descr_len(self, space): @@ -135,6 +138,9 @@ __slots__ = () # no extra slot here + def is_writable(self): + return True + def setitem(self, index, char): "Write a character into the buffer." raise NotImplementedError # Must be overriden. No bounds checks. diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -805,8 +805,8 @@ raise TypeError("Varargs and keywords not supported in unwrap_spec") argspec = ', '.join([arg for arg in args.args[1:]]) func_code = py.code.Source(""" - def f(w_obj, %(args)s): - return w_obj.%(func_name)s(%(args)s) + def f(self, %(args)s): + return self.%(func_name)s(%(args)s) """ % {'args': argspec, 'func_name': func.func_name}) d = {} exec func_code.compile() in d @@ -821,7 +821,7 @@ else: assert isinstance(unwrap_spec, dict) unwrap_spec = unwrap_spec.copy() - unwrap_spec['w_obj'] = base_cls + unwrap_spec['self'] = base_cls return interp2app(globals()['unwrap_spec'](**unwrap_spec)(f)) class interp2app(W_Root): diff --git a/pypy/module/__builtin__/app_operation.py b/pypy/module/__builtin__/app_operation.py --- a/pypy/module/__builtin__/app_operation.py +++ b/pypy/module/__builtin__/app_operation.py @@ -1,4 +1,5 @@ +import operator + def bin(x): - if not isinstance(x, (int, long)): - raise TypeError("must be int or long") - return x.__format__("#b") + value = operator.index(x) + return value.__format__("#b") diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -46,6 +46,15 @@ assert bin(2L) == "0b10" assert bin(-2L) == "-0b10" raises(TypeError, bin, 0.) + class C(object): + def __index__(self): + return 42 + assert bin(C()) == bin(42) + class D(object): + def __int__(self): + return 42 + exc = raises(TypeError, bin, D()) + assert "index" in exc.value.message def test_unichr(self): import sys diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py --- a/pypy/module/_cffi_backend/cbuffer.py +++ b/pypy/module/_cffi_backend/cbuffer.py @@ -5,7 +5,9 @@ from pypy.interpreter.typedef import TypeDef, make_weakref_descr from pypy.module._cffi_backend import cdataobj, ctypeptr, ctypearray +from rpython.rtyper.annlowlevel import llstr from rpython.rtyper.lltypesystem import rffi +from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw class LLBuffer(RWBuffer): @@ -34,8 +36,7 @@ def setslice(self, start, string): raw_cdata = rffi.ptradd(self.raw_cdata, start) - for i in range(len(string)): - raw_cdata[i] = string[i] + copy_string_to_raw(llstr(string), raw_cdata, 0, len(string)) class MiniBuffer(W_Root): @@ -59,6 +60,9 @@ def descr__buffer__(self, space): return self.buffer.descr__buffer__(space) + def descr_str(self, space): + return space.wrap(self.buffer.as_str()) + MiniBuffer.typedef = TypeDef( "buffer", @@ -68,6 +72,7 @@ __setitem__ = interp2app(MiniBuffer.descr_setitem), __buffer__ = interp2app(MiniBuffer.descr__buffer__), __weakref__ = make_weakref_descr(MiniBuffer), + __str__ = interp2app(MiniBuffer.descr_str), ) MiniBuffer.typedef.acceptable_as_base_class = False diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -5,7 +5,7 @@ from pypy.interpreter.gateway import interp2app from pypy.interpreter.typedef import TypeDef, make_weakref_descr -from rpython.rlib import objectmodel, rgc +from rpython.rlib import rgc from rpython.rlib.objectmodel import keepalive_until_here, specialize from rpython.rtyper.lltypesystem import lltype, rffi from rpython.tool.sourcetools import func_with_new_name @@ -206,8 +206,7 @@ w_value.get_array_length() == length): # fast path: copying from exactly the correct type s = w_value._cdata - for i in range(ctitemsize * length): - cdata[i] = s[i] + rffi.c_memcpy(cdata, s, ctitemsize * length) keepalive_until_here(w_value) return # @@ -259,7 +258,6 @@ space = self.space if isinstance(w_other, W_CData): from pypy.module._cffi_backend import ctypeptr, ctypearray - from pypy.module._cffi_backend import ctypevoid ct = w_other.ctype if isinstance(ct, ctypearray.W_CTypeArray): ct = ct.ctptr @@ -424,6 +422,12 @@ def get_array_length(self): return self.length + def _sizeof(self): + from pypy.module._cffi_backend.ctypeptr import W_CTypePtrOrArray + ctype = self.ctype + assert isinstance(ctype, W_CTypePtrOrArray) + return self.length * ctype.ctitem.size + class W_CDataHandle(W_CData): _attrs_ = ['w_keepalive'] diff --git a/pypy/module/_cffi_backend/handle.py b/pypy/module/_cffi_backend/handle.py --- a/pypy/module/_cffi_backend/handle.py +++ b/pypy/module/_cffi_backend/handle.py @@ -42,7 +42,7 @@ if self.handles[d]() is None: self.look_distance = d + 1 return d - # full! extend, but don't use '!=' here + # full! extend, but don't use '+=' here self.handles = self.handles + [dead_ref] * (length // 3 + 5) self.look_distance = length + 1 return length diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -2143,7 +2143,13 @@ c = newp(BCharArray, b"hi there") # buf = buffer(c) - assert str(buf).startswith('<_cffi_backend.buffer object at 0x') + assert repr(buf).startswith('<_cffi_backend.buffer object at 0x') + assert bytes(buf) == b"hi there\x00" + if sys.version_info < (3,): + assert str(buf) == "hi there\x00" + assert unicode(buf) == u+"hi there\x00" + else: + assert str(buf) == repr(buf) # --mb_length-- assert len(buf) == len(b"hi there\x00") # --mb_item-- @@ -3125,6 +3131,12 @@ py.test.raises(TypeError, "p + cast(new_primitive_type('int'), 42)") py.test.raises(TypeError, "p - cast(new_primitive_type('int'), 42)") +def test_sizeof_sliced_array(): + BInt = new_primitive_type("int") + BArray = new_array_type(new_pointer_type(BInt), 10) + p = newp(BArray, None) + assert sizeof(p[2:9]) == 7 * sizeof(BInt) + def test_version(): # this test is here mostly for PyPy diff --git a/pypy/module/_rawffi/array.py b/pypy/module/_rawffi/array.py --- a/pypy/module/_rawffi/array.py +++ b/pypy/module/_rawffi/array.py @@ -15,6 +15,7 @@ from pypy.module._rawffi.interp_rawffi import unpack_shape_with_length from pypy.module._rawffi.interp_rawffi import read_ptr, write_ptr from rpython.rlib.rarithmetic import r_uint +from rpython.rlib import rgc class W_Array(W_DataShape): @@ -220,6 +221,7 @@ def __init__(self, space, shape, length): W_ArrayInstance.__init__(self, space, shape, length, 0) + @rgc.must_be_light_finalizer def __del__(self): if self.ll_buffer: self._free() diff --git a/pypy/module/_rawffi/structure.py b/pypy/module/_rawffi/structure.py --- a/pypy/module/_rawffi/structure.py +++ b/pypy/module/_rawffi/structure.py @@ -16,7 +16,7 @@ from pypy.module._rawffi.interp_rawffi import unroll_letters_for_numbers from pypy.module._rawffi.interp_rawffi import size_alignment from pypy.module._rawffi.interp_rawffi import read_ptr, write_ptr -from rpython.rlib import clibffi +from rpython.rlib import clibffi, rgc from rpython.rlib.rarithmetic import intmask, signedtype, widen from rpython.rlib.rarithmetic import r_uint, r_ulonglong, r_longlong @@ -226,6 +226,7 @@ fieldtypes) return self.ffi_struct.ffistruct + @rgc.must_be_light_finalizer def __del__(self): if self.ffi_struct: lltype.free(self.ffi_struct, flavor='raw') @@ -380,6 +381,7 @@ def __init__(self, space, shape): W_StructureInstance.__init__(self, space, shape, 0) + @rgc.must_be_light_finalizer def __del__(self): if self.ll_buffer: self._free() diff --git a/pypy/module/_rawffi/test/test_tracker.py b/pypy/module/_rawffi/test/test_tracker.py --- a/pypy/module/_rawffi/test/test_tracker.py +++ b/pypy/module/_rawffi/test/test_tracker.py @@ -1,9 +1,21 @@ +import py +from pypy.conftest import option from pypy.module._rawffi.tracker import Tracker + class AppTestTracker: spaceconfig = dict(usemodules=['_rawffi', 'struct']) def setup_class(cls): + # + # detect if we're running on PyPy with DO_TRACING not compiled in + if option.runappdirect: + try: + import _rawffi + _rawffi._num_of_allocated_objects() + except (ImportError, RuntimeError), e: + py.test.skip(str(e)) + # Tracker.DO_TRACING = True def test_array(self): diff --git a/pypy/module/_rawffi/tracker.py b/pypy/module/_rawffi/tracker.py --- a/pypy/module/_rawffi/tracker.py +++ b/pypy/module/_rawffi/tracker.py @@ -2,9 +2,11 @@ """ The file that keeps track about freed/kept-alive objects allocated by _rawffi. Used for debugging ctypes """ +from pypy.interpreter.error import OperationError + class Tracker(object): - DO_TRACING = True + DO_TRACING = False # make sure this stays False by default! def __init__(self): self.alloced = {} @@ -20,6 +22,9 @@ tracker = Tracker() def num_of_allocated_objects(space): + if not tracker.DO_TRACING: + raise OperationError(space.w_RuntimeError, + space.wrap("DO_TRACING not enabled in this PyPy")) return space.wrap(len(tracker.alloced)) def print_alloced_objects(space): diff --git a/pypy/module/_sre/interp_sre.py b/pypy/module/_sre/interp_sre.py --- a/pypy/module/_sre/interp_sre.py +++ b/pypy/module/_sre/interp_sre.py @@ -415,7 +415,13 @@ except OperationError, e: if not e.match(space, space.w_TypeError): raise - w_groupnum = space.getitem(self.srepat.w_groupindex, w_arg) + try: + w_groupnum = space.getitem(self.srepat.w_groupindex, w_arg) + except OperationError, e: + if not e.match(space, space.w_KeyError): + raise + raise OperationError(space.w_IndexError, + space.wrap("no such group")) groupnum = space.int_w(w_groupnum) if groupnum == 0: return self.ctx.match_start, self.ctx.match_end diff --git a/pypy/module/_sre/test/test_app_sre.py b/pypy/module/_sre/test/test_app_sre.py --- a/pypy/module/_sre/test/test_app_sre.py +++ b/pypy/module/_sre/test/test_app_sre.py @@ -172,6 +172,9 @@ assert ("1", "1", None) == m.group(1, 2, 3) assert ("1", None) == m.group("first", "second") raises(IndexError, m.group, 1, 4) + assert ("1", None) == m.group(1, "second") + raises(IndexError, m.group, 'foobarbaz') + raises(IndexError, m.group, 'first', 'foobarbaz') def test_expand(self): import re diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -711,8 +711,12 @@ raise ssl_error(space, "SSL_CTX_use_certificate_chain_file error") # ssl compatibility - libssl_SSL_CTX_set_options(ss.ctx, - SSL_OP_ALL & ~SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS) + options = SSL_OP_ALL & ~SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS + if protocol != PY_SSL_VERSION_SSL2: + # SSLv2 is extremely broken, don't use it unless a user specifically + # requests it + options |= SSL_OP_NO_SSLv2 + libssl_SSL_CTX_set_options(ss.ctx, options) verification_mode = SSL_VERIFY_NONE if cert_mode == PY_SSL_CERT_OPTIONAL: @@ -724,7 +728,7 @@ libssl_SSL_set_fd(ss.ssl, sock_fd) # set the socket for SSL # The ACCEPT_MOVING_WRITE_BUFFER flag is necessary because the address # of a str object may be changed by the garbage collector. - libssl_SSL_set_mode(ss.ssl, + libssl_SSL_set_mode(ss.ssl, SSL_MODE_AUTO_RETRY | SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER) # If the socket is in non-blocking mode or timeout mode, set the BIO diff --git a/pypy/module/_weakref/interp__weakref.py b/pypy/module/_weakref/interp__weakref.py --- a/pypy/module/_weakref/interp__weakref.py +++ b/pypy/module/_weakref/interp__weakref.py @@ -52,6 +52,7 @@ # weakref callbacks are not invoked eagerly here. They are # invoked by self.__del__() anyway. + @jit.dont_look_inside def get_or_make_weakref(self, w_subtype, w_obj): space = self.space w_weakreftype = space.gettypeobject(W_Weakref.typedef) @@ -70,6 +71,7 @@ self.append_wref_to(w_ref) return w_ref + @jit.dont_look_inside def get_or_make_proxy(self, w_obj): space = self.space if self.cached_proxy is not None: @@ -122,6 +124,7 @@ W_WeakrefBase.activate_callback, 'weakref callback of ') + @jit.dont_look_inside def make_weakref_with_callback(self, w_subtype, w_obj, w_callable): space = self.space w_ref = space.allocate_instance(W_Weakref, w_subtype) @@ -129,6 +132,7 @@ self.append_wref_to(w_ref) return w_ref + @jit.dont_look_inside def make_proxy_with_callback(self, w_obj, w_callable): space = self.space if space.is_true(space.callable(w_obj)): @@ -239,15 +243,16 @@ w_obj.setweakref(space, lifeline) return lifeline - at jit.dont_look_inside + def get_or_make_weakref(space, w_subtype, w_obj): return getlifeline(space, w_obj).get_or_make_weakref(w_subtype, w_obj) - at jit.dont_look_inside + def make_weakref_with_callback(space, w_subtype, w_obj, w_callable): lifeline = getlifelinewithcallbacks(space, w_obj) return lifeline.make_weakref_with_callback(w_subtype, w_obj, w_callable) + def descr__new__weakref(space, w_subtype, w_obj, w_callable=None, __args__=None): if __args__.arguments_w: @@ -312,15 +317,16 @@ w_obj = force(space, self) return space.call_args(w_obj, __args__) - at jit.dont_look_inside + def get_or_make_proxy(space, w_obj): return getlifeline(space, w_obj).get_or_make_proxy(w_obj) - at jit.dont_look_inside + def make_proxy_with_callback(space, w_obj, w_callable): lifeline = getlifelinewithcallbacks(space, w_obj) return lifeline.make_proxy_with_callback(w_obj, w_callable) + def proxy(space, w_obj, w_callable=None): """Create a proxy object that weakly references 'obj'. 'callback', if given, is called with the proxy as an argument when 'obj' diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -26,6 +26,7 @@ from pypy.module.__builtin__.descriptor import W_Property from pypy.module.__builtin__.interp_classobj import W_ClassObject from pypy.module.__builtin__.interp_memoryview import W_MemoryView +from pypy.module.micronumpy.base import W_NDimArray from rpython.rlib.entrypoint import entrypoint_lowlevel from rpython.rlib.rposix import is_valid_fd, validate_fd from rpython.rlib.unroll import unrolling_iterable @@ -399,16 +400,16 @@ '_PyObject_CallFunction_SizeT', '_PyObject_CallMethod_SizeT', 'PyBuffer_FromMemory', 'PyBuffer_FromReadWriteMemory', 'PyBuffer_FromObject', - 'PyBuffer_FromReadWriteObject', 'PyBuffer_New', 'PyBuffer_Type', 'init_bufferobject', + 'PyBuffer_FromReadWriteObject', 'PyBuffer_New', 'PyBuffer_Type', '_Py_init_bufferobject', 'PyCObject_FromVoidPtr', 'PyCObject_FromVoidPtrAndDesc', 'PyCObject_AsVoidPtr', 'PyCObject_GetDesc', 'PyCObject_Import', 'PyCObject_SetVoidPtr', - 'PyCObject_Type', 'init_pycobject', + 'PyCObject_Type', '_Py_init_pycobject', 'PyCapsule_New', 'PyCapsule_IsValid', 'PyCapsule_GetPointer', 'PyCapsule_GetName', 'PyCapsule_GetDestructor', 'PyCapsule_GetContext', 'PyCapsule_SetPointer', 'PyCapsule_SetName', 'PyCapsule_SetDestructor', - 'PyCapsule_SetContext', 'PyCapsule_Import', 'PyCapsule_Type', 'init_capsule', + 'PyCapsule_SetContext', 'PyCapsule_Import', 'PyCapsule_Type', '_Py_init_capsule', 'PyObject_AsReadBuffer', 'PyObject_AsWriteBuffer', 'PyObject_CheckReadBuffer', @@ -469,6 +470,7 @@ "Complex": "space.w_complex", "ByteArray": "space.w_bytearray", "MemoryView": "space.gettypeobject(W_MemoryView.typedef)", + "Array": "space.gettypeobject(W_NDimArray.typedef)", "BaseObject": "space.w_object", 'None': 'space.type(space.w_None)', 'NotImplemented': 'space.type(space.w_NotImplemented)', @@ -685,11 +687,15 @@ globals()['va_get_%s' % name_no_star] = func def setup_init_functions(eci, translating): - init_buffer = rffi.llexternal('init_bufferobject', [], lltype.Void, + if translating: + prefix = 'PyPy' + else: + prefix = 'cpyexttest' + init_buffer = rffi.llexternal('_%s_init_bufferobject' % prefix, [], lltype.Void, compilation_info=eci, _nowrapper=True) - init_pycobject = rffi.llexternal('init_pycobject', [], lltype.Void, + init_pycobject = rffi.llexternal('_%s_init_pycobject' % prefix, [], lltype.Void, compilation_info=eci, _nowrapper=True) - init_capsule = rffi.llexternal('init_capsule', [], lltype.Void, + init_capsule = rffi.llexternal('_%s_init_capsule' % prefix, [], lltype.Void, compilation_info=eci, _nowrapper=True) INIT_FUNCTIONS.extend([ lambda space: init_buffer(), @@ -697,12 +703,8 @@ lambda space: init_capsule(), ]) from pypy.module.posix.interp_posix import add_fork_hook - if translating: - reinit_tls = rffi.llexternal('PyThread_ReInitTLS', [], lltype.Void, - compilation_info=eci) - else: - reinit_tls = rffi.llexternal('PyPyThread_ReInitTLS', [], lltype.Void, - compilation_info=eci) + reinit_tls = rffi.llexternal('%sThread_ReInitTLS' % prefix, [], lltype.Void, + compilation_info=eci) add_fork_hook('child', reinit_tls) def init_function(func): @@ -744,7 +746,7 @@ from rpython.translator.c.database import LowLevelDatabase db = LowLevelDatabase() - generate_macros(export_symbols, rename=True, do_deref=True) + generate_macros(export_symbols, prefix='cpyexttest') # Structure declaration code members = [] @@ -810,7 +812,7 @@ INTERPLEVEL_API[name] = w_obj - name = name.replace('Py', 'PyPy') + name = name.replace('Py', 'cpyexttest') if isptr: ptr = ctypes.c_void_p.in_dll(bridge, name) if typ == 'PyObject*': @@ -822,7 +824,7 @@ ptr.value = ctypes.cast(ll2ctypes.lltype2ctypes(value), ctypes.c_void_p).value elif typ in ('PyObject*', 'PyTypeObject*'): - if name.startswith('PyPyExc_'): + if name.startswith('PyPyExc_') or name.startswith('cpyexttestExc_'): # we already have the pointer in_dll = ll2ctypes.get_ctypes_type(PyObject).in_dll(bridge, name) py_obj = ll2ctypes.ctypes2lltype(PyObject, in_dll) @@ -857,28 +859,27 @@ setup_init_functions(eci, translating=False) return modulename.new(ext='') -def generate_macros(export_symbols, rename=True, do_deref=True): +def mangle_name(prefix, name): + if name.startswith('Py'): + return prefix + name[2:] + elif name.startswith('_Py'): + return '_' + prefix + name[3:] + else: + return None + +def generate_macros(export_symbols, prefix): "NOT_RPYTHON" pypy_macros = [] renamed_symbols = [] for name in export_symbols: - if name.startswith("PyPy"): - renamed_symbols.append(name) - continue - if not rename: - continue name = name.replace("#", "") - newname = name.replace('Py', 'PyPy') - if not rename: - newname = name + newname = mangle_name(prefix, name) + assert newname, name pypy_macros.append('#define %s %s' % (name, newname)) if name.startswith("PyExc_"): pypy_macros.append('#define _%s _%s' % (name, newname)) renamed_symbols.append(newname) - if rename: - export_symbols[:] = renamed_symbols - else: - export_symbols[:] = [sym.replace("#", "") for sym in export_symbols] + export_symbols[:] = renamed_symbols # Generate defines for macro_name, size in [ @@ -1039,7 +1040,7 @@ from rpython.translator.c.database import LowLevelDatabase db = LowLevelDatabase() - generate_macros(export_symbols, rename=False, do_deref=False) + generate_macros(export_symbols, prefix='PyPy') functions = generate_decls_and_callbacks(db, [], api_struct=False) code = "#include \n" + "\n".join(functions) @@ -1069,7 +1070,8 @@ export_struct(name, struct) for name, func in FUNCTIONS.iteritems(): - deco = entrypoint_lowlevel("cpyext", func.argtypes, name, relax=True) + newname = mangle_name('PyPy', name) or name + deco = entrypoint_lowlevel("cpyext", func.argtypes, newname, relax=True) deco(func.get_wrapper(space)) setup_init_functions(eci, translating=True) diff --git a/pypy/module/cpyext/include/bufferobject.h b/pypy/module/cpyext/include/bufferobject.h --- a/pypy/module/cpyext/include/bufferobject.h +++ b/pypy/module/cpyext/include/bufferobject.h @@ -37,7 +37,7 @@ PyObject* PyBuffer_New(Py_ssize_t size); -void init_bufferobject(void); +void _Py_init_bufferobject(void); #ifdef __cplusplus } diff --git a/pypy/module/cpyext/include/pycapsule.h b/pypy/module/cpyext/include/pycapsule.h --- a/pypy/module/cpyext/include/pycapsule.h +++ b/pypy/module/cpyext/include/pycapsule.h @@ -50,7 +50,7 @@ PyAPI_FUNC(void *) PyCapsule_Import(const char *name, int no_block); -void init_capsule(void); +void _Py_init_capsule(void); #ifdef __cplusplus } diff --git a/pypy/module/cpyext/include/pycobject.h b/pypy/module/cpyext/include/pycobject.h --- a/pypy/module/cpyext/include/pycobject.h +++ b/pypy/module/cpyext/include/pycobject.h @@ -48,7 +48,7 @@ } PyCObject; #endif -void init_pycobject(void); +void _Py_init_pycobject(void); #ifdef __cplusplus } diff --git a/pypy/module/cpyext/include/pyconfig.h b/pypy/module/cpyext/include/pyconfig.h --- a/pypy/module/cpyext/include/pyconfig.h +++ b/pypy/module/cpyext/include/pyconfig.h @@ -25,6 +25,22 @@ #define Py_UNICODE_SIZE 2 #endif +#ifndef Py_BUILD_CORE /* not building the core - must be an ext */ +# if defined(_MSC_VER) + /* So MSVC users need not specify the .lib file in + * their Makefile (other compilers are generally + * taken care of by distutils.) */ +# ifdef _DEBUG +# error("debug first with cpython") +# pragma comment(lib,"python27.lib") +# else +# pragma comment(lib,"python27.lib") +# endif /* _DEBUG */ +# endif +#endif /* _MSC_VER */ + + + #ifdef __cplusplus } #endif diff --git a/pypy/module/cpyext/src/bufferobject.c b/pypy/module/cpyext/src/bufferobject.c --- a/pypy/module/cpyext/src/bufferobject.c +++ b/pypy/module/cpyext/src/bufferobject.c @@ -783,7 +783,7 @@ return size; } -void init_bufferobject(void) +void _Py_init_bufferobject(void) { PyType_Ready(&PyBuffer_Type); } diff --git a/pypy/module/cpyext/src/capsule.c b/pypy/module/cpyext/src/capsule.c --- a/pypy/module/cpyext/src/capsule.c +++ b/pypy/module/cpyext/src/capsule.c @@ -321,7 +321,7 @@ PyCapsule_Type__doc__ /*tp_doc*/ }; -void init_capsule() +void _Py_init_capsule() { PyType_Ready(&PyCapsule_Type); } diff --git a/pypy/module/cpyext/src/cobject.c b/pypy/module/cpyext/src/cobject.c --- a/pypy/module/cpyext/src/cobject.c +++ b/pypy/module/cpyext/src/cobject.c @@ -156,7 +156,7 @@ PyCObject_Type__doc__ /*tp_doc*/ }; -void init_pycobject() +void _Py_init_pycobject() { PyType_Ready(&PyCObject_Type); } diff --git a/pypy/module/cpyext/src/ndarrayobject.c b/pypy/module/cpyext/src/ndarrayobject.c --- a/pypy/module/cpyext/src/ndarrayobject.c +++ b/pypy/module/cpyext/src/ndarrayobject.c @@ -3,8 +3,6 @@ #include "numpy/arrayobject.h" #include /* memset, memcpy */ -PyTypeObject PyArray_Type; - void _PyArray_FILLWBYTE(PyObject* obj, int val) { memset(PyArray_DATA(obj), val, PyArray_NBYTES(obj)); diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -286,3 +286,19 @@ assert dt.num == 11 + def test_pass_ndarray_object_to_c(self): + from _numpypy.multiarray import ndarray + mod = self.import_extension('foo', [ + ("check_array", "METH_VARARGS", + ''' + PyObject* obj; + if (!PyArg_ParseTuple(args, "O!", &PyArray_Type, &obj)) + return NULL; + Py_INCREF(obj); + return obj; + '''), + ], prologue='#include ') + array = ndarray((3, 4), dtype='d') + assert mod.check_array(array) is array + raises(TypeError, "mod.check_array(42)") + diff --git a/pypy/module/cpyext/test/test_thread.py b/pypy/module/cpyext/test/test_thread.py --- a/pypy/module/cpyext/test/test_thread.py +++ b/pypy/module/cpyext/test/test_thread.py @@ -8,8 +8,10 @@ module = self.import_extension('foo', [ ("get_thread_ident", "METH_NOARGS", """ - /* Use the 'PyPy' prefix to ensure we access our functions */ - return PyInt_FromLong(PyPyThread_get_thread_ident()); +#ifndef PyThread_get_thread_ident +#error "seems we are not accessing PyPy's functions" +#endif + return PyInt_FromLong(PyThread_get_thread_ident()); """), ]) import thread, threading @@ -32,17 +34,19 @@ module = self.import_extension('foo', [ ("test_acquire_lock", "METH_NOARGS", """ - /* Use the 'PyPy' prefix to ensure we access our functions */ - PyThread_type_lock lock = PyPyThread_allocate_lock(); - if (PyPyThread_acquire_lock(lock, 1) != 1) { +#ifndef PyThread_allocate_lock +#error "seems we are not accessing PyPy's functions" +#endif + PyThread_type_lock lock = PyThread_allocate_lock(); + if (PyThread_acquire_lock(lock, 1) != 1) { PyErr_SetString(PyExc_AssertionError, "first acquire"); return NULL; } - if (PyPyThread_acquire_lock(lock, 0) != 0) { + if (PyThread_acquire_lock(lock, 0) != 0) { PyErr_SetString(PyExc_AssertionError, "second acquire"); return NULL; } - PyPyThread_free_lock(lock); + PyThread_free_lock(lock); Py_RETURN_NONE; """), @@ -53,15 +57,17 @@ module = self.import_extension('foo', [ ("test_release_lock", "METH_NOARGS", """ - /* Use the 'PyPy' prefix to ensure we access our functions */ - PyThread_type_lock lock = PyPyThread_allocate_lock(); - PyPyThread_acquire_lock(lock, 1); - PyPyThread_release_lock(lock); - if (PyPyThread_acquire_lock(lock, 0) != 1) { +#ifndef PyThread_release_lock +#error "seems we are not accessing PyPy's functions" +#endif + PyThread_type_lock lock = PyThread_allocate_lock(); + PyThread_acquire_lock(lock, 1); + PyThread_release_lock(lock); + if (PyThread_acquire_lock(lock, 0) != 1) { PyErr_SetString(PyExc_AssertionError, "first acquire"); return NULL; } - PyPyThread_free_lock(lock); + PyThread_free_lock(lock); Py_RETURN_NONE; """), diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -342,10 +342,8 @@ if space.is_w(w_startstop, space.w_None): start = 0 else: - start = space.int_w(w_startstop) - if start < 0: - raise OperationError(space.w_ValueError, space.wrap( - "Indicies for islice() must be non-negative integers.")) + start = self.arg_int_w(w_startstop, 0, + "Indicies for islice() must be None or non-negative integers") w_stop = args_w[0] else: raise OperationError(space.w_TypeError, space.wrap("islice() takes at most 4 arguments (" + str(num_args) + " given)")) @@ -353,10 +351,8 @@ if space.is_w(w_stop, space.w_None): stop = -1 else: - stop = space.int_w(w_stop) - if stop < 0: - raise OperationError(space.w_ValueError, space.wrap( - "Stop argument must be a non-negative integer or None.")) + stop = self.arg_int_w(w_stop, 0, + "Stop argument must be a non-negative integer or None.") stop = max(start, stop) # for obscure CPython compatibility if num_args == 2: @@ -364,10 +360,8 @@ if space.is_w(w_step, space.w_None): step = 1 else: - step = space.int_w(w_step) - if step < 1: - raise OperationError(space.w_ValueError, space.wrap( - "Step must be one or lager for islice().")) + step = self.arg_int_w(w_step, 1, + "Step for islice() must be a positive integer or None") else: step = 1 @@ -375,6 +369,18 @@ self.start = start self.stop = stop + def arg_int_w(self, w_obj, minimum, errormsg): + space = self.space + try: + result = space.int_w(w_obj) + except OperationError, e: + if e.async(space): + raise + result = -1 + if result < minimum: + raise OperationError(space.w_ValueError, space.wrap(errormsg)) + return result + def iter_w(self): return self.space.wrap(self) diff --git a/pypy/module/itertools/test/test_itertools.py b/pypy/module/itertools/test/test_itertools.py --- a/pypy/module/itertools/test/test_itertools.py +++ b/pypy/module/itertools/test/test_itertools.py @@ -304,6 +304,11 @@ raises(TypeError, itertools.islice, [], 0, 0, 0, 0) + # why not TypeError? Because CPython + raises(ValueError, itertools.islice, [], "a", 1, 2) + raises(ValueError, itertools.islice, [], 0, "a", 2) + raises(ValueError, itertools.islice, [], 0, 1, "a") + def test_chain(self): import itertools diff --git a/pypy/module/math/app_math.py b/pypy/module/math/app_math.py --- a/pypy/module/math/app_math.py +++ b/pypy/module/math/app_math.py @@ -1,13 +1,40 @@ def factorial(x): - """Find x!.""" + """factorial(x) -> Integral + + "Find x!. Raise a ValueError if x is negative or non-integral.""" if isinstance(x, float): fl = int(x) if fl != x: raise ValueError("float arguments must be integral") x = fl - if x < 0: - raise ValueError("x must be >= 0") - res = 1 - for i in range(1, x + 1): - res *= i - return res + + if x <= 100: + if x < 0: + raise ValueError("x must be >= 0") + res = 1 + for i in range(2, x + 1): + res *= i + return res + + # Experimentally this gap seems good + gap = max(100, x >> 7) + def _fac_odd(low, high): + if low + gap >= high: + t = 1 + for i in range(low, high, 2): + t *= i + return t + + mid = ((low + high) >> 1) | 1 + return _fac_odd(low, mid) * _fac_odd(mid, high) + + def _fac1(x): + if x <= 2: + return 1, 1, x - 1 + x2 = x >> 1 + f, g, shift = _fac1(x2) + g *= _fac_odd((x2 + 1) | 1, x + 1) + return (f * g, g, shift + x2) + + res, _, shift = _fac1(x) + return res << shift diff --git a/pypy/module/math/test/test_factorial.py b/pypy/module/math/test/test_factorial.py new file mode 100644 --- /dev/null +++ b/pypy/module/math/test/test_factorial.py @@ -0,0 +1,29 @@ +import py +import math +from pypy.module.math import app_math + +def test_factorial_extra(): + for x in range(1000): + r1 = app_math.factorial(x) + r2 = math.factorial(x) + assert r1 == r2 + assert type(r1) == type(r2) + +def test_timing(): + py.test.skip("for manual running only") + import time + x = 5000 + repeat = 1000 + r1 = app_math.factorial(x) + r2 = math.factorial(x) + assert r1 == r2 + t1 = time.time() + for i in range(repeat): + app_math.factorial(x) + t2 = time.time() + for i in range(repeat): + math.factorial(x) + t3 = time.time() + assert r1 == r2 + print (t2 - t1) / repeat + print (t3 - t2) / repeat diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -10,7 +10,7 @@ 'array': 'interp_numarray.array', 'zeros': 'interp_numarray.zeros', 'empty': 'interp_numarray.zeros', - 'ones': 'interp_numarray.ones', + 'empty_like': 'interp_numarray.empty_like', '_reconstruct' : 'interp_numarray._reconstruct', 'scalar' : 'interp_numarray.build_scalar', 'dot': 'interp_arrayops.dot', @@ -106,8 +106,6 @@ ('logaddexp2', 'logaddexp2'), ('real', 'real'), ('imag', 'imag'), - ('ones_like', 'ones_like'), - ('zeros_like', 'zeros_like'), ]: interpleveldefs[exposed] = "interp_ufuncs.get(space).%s" % impl diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -12,7 +12,9 @@ if dtype is None: test = _numpypy.multiarray.array([start, stop, step, 0]) dtype = test.dtype - arr = _numpypy.multiarray.zeros(int(math.ceil((stop - start) / step)), dtype=dtype) + length = math.ceil((float(stop) - start) / step) + length = int(length) + arr = _numpypy.multiarray.zeros(length, dtype=dtype) i = start for j in range(arr.size): arr[j] = i diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -47,7 +47,7 @@ def setslice(self, space, arr): impl = arr.implementation if impl.is_scalar(): - self.fill(impl.get_scalar_value()) + self.fill(space, impl.get_scalar_value()) return shape = shape_agreement(space, self.get_shape(), arr) if impl.storage == self.storage: @@ -100,7 +100,7 @@ tmp = self.get_real(orig_array) tmp.setslice(space, convert_to_array(space, w_value)) - def get_imag(self, orig_array): + def get_imag(self, space, orig_array): strides = self.get_strides() backstrides = self.get_backstrides() if self.dtype.is_complex_type(): @@ -110,11 +110,11 @@ impl = NonWritableArray(self.get_shape(), self.dtype, self.order, strides, backstrides) if not self.dtype.is_flexible_type(): - impl.fill(self.dtype.box(0)) + impl.fill(space, self.dtype.box(0)) return impl def set_imag(self, space, orig_array, w_value): - tmp = self.get_imag(orig_array) + tmp = self.get_imag(space, orig_array) tmp.setslice(space, convert_to_array(space, w_value)) # -------------------- applevel get/setitem ----------------------- @@ -357,7 +357,7 @@ self.get_backstrides(), self.get_shape()) - def fill(self, box): + def fill(self, space, box): self.dtype.itemtype.fill(self.storage, self.dtype.get_size(), box, 0, self.size, 0) @@ -392,6 +392,21 @@ def __del__(self): free_raw_storage(self.storage, track_allocation=False) +class ConcreteArrayWithBase(ConcreteArrayNotOwning): + def __init__(self, shape, dtype, order, strides, backstrides, storage, orig_base): + ConcreteArrayNotOwning.__init__(self, shape, dtype, order, + strides, backstrides, storage) + self.orig_base = orig_base + + def base(self): + return self.orig_base + + +class ConcreteNonWritableArrayWithBase(ConcreteArrayWithBase): + def descr_setitem(self, space, orig_array, w_index, w_value): + raise OperationError(space.w_ValueError, space.wrap( + "assignment destination is read-only")) + class NonWritableArray(ConcreteArray): def descr_setitem(self, space, orig_array, w_index, w_value): @@ -420,8 +435,8 @@ def base(self): return self.orig_arr - def fill(self, box): - loop.fill(self, box.convert_to(self.dtype)) + def fill(self, space, box): + loop.fill(self, box.convert_to(space, self.dtype)) def create_iter(self, shape=None, backward_broadcast=False, require_index=False): if shape is not None and \ @@ -487,3 +502,6 @@ def getlength(self): return self.impl.size + + def get_raw_address(self): + return self.impl.storage diff --git a/pypy/module/micronumpy/arrayimpl/scalar.py b/pypy/module/micronumpy/arrayimpl/scalar.py --- a/pypy/module/micronumpy/arrayimpl/scalar.py +++ b/pypy/module/micronumpy/arrayimpl/scalar.py @@ -54,8 +54,7 @@ return self.value def set_scalar_value(self, w_val): - assert isinstance(w_val, W_GenericBox) - self.value = w_val.convert_to(self.dtype) + self.value = w_val def copy(self, space): scalar = Scalar(self.dtype) @@ -96,12 +95,12 @@ ','.join([str(x) for x in w_arr.get_shape()],)))) if self.dtype.is_complex_type(): self.value = self.dtype.itemtype.composite( - w_arr.get_scalar_value().convert_to(dtype), + w_arr.get_scalar_value().convert_to(space, dtype), self.value.convert_imag_to(dtype)) else: self.value = w_arr.get_scalar_value() - def get_imag(self, orig_array): + def get_imag(self, space, orig_array): if self.dtype.is_complex_type(): scalar = Scalar(self.dtype.float_type) scalar.value = self.value.convert_imag_to(scalar.dtype) @@ -125,13 +124,23 @@ ','.join([str(x) for x in w_arr.get_shape()],)))) self.value = self.dtype.itemtype.composite( self.value.convert_real_to(dtype), - w_arr.get_scalar_value().convert_to(dtype), + w_arr.get_scalar_value().convert_to(space, dtype), ) From noreply at buildbot.pypy.org Mon Jan 13 12:58:00 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 13 Jan 2014 12:58:00 +0100 (CET) Subject: [pypy-commit] pypy default: Kill dead code Message-ID: <20140113115800.C66D31C10A8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68633:1e10a4e76eff Date: 2014-01-13 12:57 +0100 http://bitbucket.org/pypy/pypy/changeset/1e10a4e76eff/ Log: Kill dead code diff --git a/rpython/jit/metainterp/optimizeopt/earlyforce.py b/rpython/jit/metainterp/optimizeopt/earlyforce.py --- a/rpython/jit/metainterp/optimizeopt/earlyforce.py +++ b/rpython/jit/metainterp/optimizeopt/earlyforce.py @@ -28,8 +28,5 @@ value.force_box(self) self.emit_operation(op) - def new(self): - return OptEarlyForce() - def setup(self): self.optimizer.optearlyforce = self diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -184,9 +184,6 @@ self.postponed_op = None self.next_optimization.propagate_forward(postponed_op) - def new(self): - return OptHeap() - def produce_potential_short_preamble_ops(self, sb): descrkeys = self.cached_fields.keys() if not we_are_translated(): diff --git a/rpython/jit/metainterp/optimizeopt/intbounds.py b/rpython/jit/metainterp/optimizeopt/intbounds.py --- a/rpython/jit/metainterp/optimizeopt/intbounds.py +++ b/rpython/jit/metainterp/optimizeopt/intbounds.py @@ -13,9 +13,6 @@ """Keeps track of the bounds placed on integers by guards and remove redundant guards""" - def new(self): - return OptIntBounds() - def propagate_forward(self, op): dispatch_opt(self, op) diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -323,10 +323,6 @@ def force_at_end_of_preamble(self): pass - # It is too late to force stuff here, it must be done in force_at_end_of_preamble - def new(self): - raise NotImplementedError - # Called after last operation has been propagated to flush out any posponed ops def flush(self): pass @@ -390,16 +386,6 @@ for o in self.optimizations: o.flush() - def new(self): - new = Optimizer(self.metainterp_sd, self.loop) - return self._new(new) - - def _new(self, new): - optimizations = [o.new() for o in self.optimizations] - new.set_optimizations(optimizations) - new.quasi_immutable_deps = self.quasi_immutable_deps - return new - def produce_potential_short_preamble_ops(self, sb): for opt in self.optimizations: opt.produce_potential_short_preamble_ops(sb) diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -85,10 +85,6 @@ def flush(self): assert self.postponed_op is None - def new(self): - assert self.postponed_op is None - return OptPure() - def setup(self): self.optimizer.optpure = self diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -20,9 +20,6 @@ self.loop_invariant_results = {} self.loop_invariant_producer = {} - def new(self): - return OptRewrite() - def produce_potential_short_preamble_ops(self, sb): for op in self.loop_invariant_producer.values(): sb.add_potential(op) diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -41,10 +41,6 @@ self.emitted_guards += 1 # FIXME: can we use counter in self._emit_operation? self._emit_operation(op) - def new(self): - new = UnrollableOptimizer(self.metainterp_sd, self.loop) - return self._new(new) - class UnrollOptimizer(Optimization): """Unroll the loop into two iterations. The first one will diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -497,9 +497,6 @@ _last_guard_not_forced_2 = None - def new(self): - return OptVirtualize() - def make_virtual(self, known_class, box, source_op=None): vvalue = VirtualValue(self.optimizer.cpu, known_class, box, source_op) self.make_equal_to(box, vvalue) diff --git a/rpython/jit/metainterp/optimizeopt/vstring.py b/rpython/jit/metainterp/optimizeopt/vstring.py --- a/rpython/jit/metainterp/optimizeopt/vstring.py +++ b/rpython/jit/metainterp/optimizeopt/vstring.py @@ -388,8 +388,6 @@ class OptString(optimizer.Optimization): "Handling of strings and unicodes." - def new(self): - return OptString() def make_vstring_plain(self, box, source_op, mode): vvalue = VStringPlainValue(box, source_op, mode) From noreply at buildbot.pypy.org Mon Jan 13 14:20:50 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 13 Jan 2014 14:20:50 +0100 (CET) Subject: [pypy-commit] pypy refactor-translator: hg merge default Message-ID: <20140113132050.E36B31D23C0@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-translator Changeset: r68634:56ad7eafa31c Date: 2014-01-13 13:30 +0100 http://bitbucket.org/pypy/pypy/changeset/56ad7eafa31c/ Log: hg merge default diff too long, truncating to 2000 out of 12117 lines diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -28,7 +28,7 @@ DEALINGS IN THE SOFTWARE. -PyPy Copyright holders 2003-2013 +PyPy Copyright holders 2003-2014 ----------------------------------- Except when otherwise stated (look for LICENSE files or information at diff --git a/lib-python/2.7/ctypes/__init__.py b/lib-python/2.7/ctypes/__init__.py --- a/lib-python/2.7/ctypes/__init__.py +++ b/lib-python/2.7/ctypes/__init__.py @@ -371,10 +371,9 @@ self._handle = handle def __repr__(self): - return "<%s '%s', handle %r at %x>" % \ - (self.__class__.__name__, self._name, - (self._handle), - id(self) & (_sys.maxint*2 + 1)) + return "<%s '%s', handle %r at 0x%x>" % ( + self.__class__.__name__, self._name, self._handle, + id(self) & (_sys.maxint * 2 + 1)) def __getattr__(self, name): diff --git a/lib-python/2.7/ctypes/test/test_python_api.py b/lib-python/2.7/ctypes/test/test_python_api.py --- a/lib-python/2.7/ctypes/test/test_python_api.py +++ b/lib-python/2.7/ctypes/test/test_python_api.py @@ -73,6 +73,7 @@ del pyobj self.assertEqual(grc(s), ref) + @xfail def test_PyOS_snprintf(self): PyOS_snprintf = pythonapi.PyOS_snprintf PyOS_snprintf.argtypes = POINTER(c_char), c_size_t, c_char_p diff --git a/lib-python/2.7/socket.py b/lib-python/2.7/socket.py --- a/lib-python/2.7/socket.py +++ b/lib-python/2.7/socket.py @@ -335,9 +335,10 @@ s = self._sock self._sock = None if s is not None: - s._drop() if self._close: s.close() + else: + s._drop() def __del__(self): try: diff --git a/lib-python/2.7/test/test_ssl.py b/lib-python/2.7/test/test_ssl.py --- a/lib-python/2.7/test/test_ssl.py +++ b/lib-python/2.7/test/test_ssl.py @@ -993,7 +993,7 @@ try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED) - try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, True) + try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False) diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -109,7 +109,7 @@ RegrTest('test_asynchat.py', usemodules='select fcntl'), RegrTest('test_asyncore.py', usemodules='select fcntl'), RegrTest('test_atexit.py', core=True), - RegrTest('test_audioop.py', skip="unsupported extension module"), + RegrTest('test_audioop.py', skip="incomplete module"), RegrTest('test_augassign.py', core=True), RegrTest('test_base64.py', usemodules='struct'), RegrTest('test_bastion.py'), diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -328,21 +328,23 @@ raise ValueError( "native COM method call without 'this' parameter" ) - thisarg = cast(args[0], POINTER(POINTER(c_void_p))) - keepalives, newargs, argtypes, outargs = self._convert_args(argtypes, - args[1:], kwargs) - newargs.insert(0, args[0].value) + thisvalue = args.pop(0) + thisarg = cast(thisvalue, POINTER(POINTER(c_void_p))) + keepalives, newargs, argtypes, outargs, errcheckargs = ( + self._convert_args(argtypes, args, kwargs)) + args.insert(0, thisvalue) + newargs.insert(0, thisvalue.value) argtypes.insert(0, c_void_p) else: thisarg = None - keepalives, newargs, argtypes, outargs = self._convert_args(argtypes, - args, kwargs) + keepalives, newargs, argtypes, outargs, errcheckargs = ( + self._convert_args(argtypes, args, kwargs)) funcptr = self._getfuncptr(argtypes, self._restype_, thisarg) result = self._call_funcptr(funcptr, *newargs) - result = self._do_errcheck(result, args) + result, forced = self._do_errcheck(result, errcheckargs) - if not outargs: + if not outargs or forced: return result from ctypes import c_void_p @@ -377,22 +379,22 @@ set_last_error(tmp) # try: - return self._build_result(self._restype_, result, newargs) + return self._build_result(self._restype_, result) finally: funcptr.free_temp_buffers() def _do_errcheck(self, result, args): # The 'errcheck' protocol if self._errcheck_: - v = self._errcheck_(result, self, args) + v = self._errcheck_(result, self, tuple(args)) # If the errcheck funtion failed, let it throw # If the errcheck function returned newargs unchanged, # continue normal processing. # If the errcheck function returned something else, # use that as result. if v is not args: - return v - return result + return v, True + return result, False def _getfuncptr_fromaddress(self, argtypes, restype): address = self._get_address() @@ -495,16 +497,16 @@ newargtypes = [] total = len(args) paramflags = self._paramflags - inargs_idx = 0 if not paramflags and total < len(argtypes): raise TypeError("not enough arguments") - for i, argtype in enumerate(argtypes): - flag = 0 - name = None - defval = marker - if paramflags: + if paramflags: + errcheckargs = [] + inargs_idx = 0 + for i, argtype in enumerate(argtypes): + flag = 0 + defval = marker paramflag = paramflags[i] paramlen = len(paramflag) name = None @@ -519,6 +521,7 @@ val = defval if val is marker: val = 0 + errcheckargs.append(val) keepalive, newarg, newargtype = self._conv_param(argtype, val) keepalives.append(keepalive) newargs.append(newarg) @@ -536,27 +539,31 @@ raise TypeError("required argument '%s' missing" % name) else: raise TypeError("not enough arguments") + errcheckargs.append(val) keepalive, newarg, newargtype = self._conv_param(argtype, val) keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) elif flag == PARAMFLAG_FOUT: if defval is not marker: - outargs.append(defval) + val = defval keepalive, newarg, newargtype = self._conv_param(argtype, defval) else: import ctypes val = argtype._type_() - outargs.append(val) keepalive = None newarg = ctypes.byref(val) newargtype = type(newarg) + errcheckargs.append(val) + outargs.append(val) keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) else: raise ValueError("paramflag %d not yet implemented" % flag) - else: + else: + errcheckargs = args + for i, argtype in enumerate(argtypes): try: keepalive, newarg, newargtype = self._conv_param(argtype, args[i]) except (UnicodeError, TypeError, ValueError), e: @@ -564,7 +571,6 @@ keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) - inargs_idx += 1 if len(newargs) < len(args): extra = args[len(newargs):] @@ -576,7 +582,7 @@ keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) - return keepalives, newargs, newargtypes, outargs + return keepalives, newargs, newargtypes, outargs, errcheckargs @staticmethod def _is_primitive(argtype): @@ -601,7 +607,7 @@ retval = restype._CData_retval(buf) return retval - def _build_result(self, restype, result, argsandobjs): + def _build_result(self, restype, result): """Build the function result: If there is no OUT parameter, return the actual function result If there is one OUT parameter, return it @@ -611,11 +617,6 @@ # i.e. an array of ints. Now it takes a result, which is already a # python object. All places that do "resbuffer[0]" should check that # result is actually an int and just use it. - # - # Also, argsandobjs used to be "args" in __call__, now it's "newargs" - # (i.e., the already unwrapped objects). It's used only when we have a - # PARAMFLAG_FOUT and it's probably wrong, I'll fix it when I find a - # failing test retval = None @@ -704,7 +705,7 @@ funcptr = self._getfuncptr(argtypes, restype, thisarg) try: result = self._call_funcptr(funcptr, *args) - result = self._do_errcheck(result, args) + result, _ = self._do_errcheck(result, args) except (TypeError, ArgumentError, UnicodeDecodeError): assert self._slowpath_allowed return CFuncPtr.__call__(self, *args) diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_pypy_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -33,14 +33,13 @@ # set link options output_filename = modulename + _get_c_extension_suffix() if sys.platform == 'win32': - # XXX libpypy-c.lib is currently not installed automatically - library = os.path.join(thisdir, '..', 'include', 'libpypy-c') + # XXX pyconfig.h uses a pragma to link to the import library, + # which is currently python27.lib + library = os.path.join(thisdir, '..', 'include', 'python27') if not os.path.exists(library + '.lib'): - #For a nightly build - library = os.path.join(thisdir, '..', 'include', 'python27') - if not os.path.exists(library + '.lib'): - # For a local translation - library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c') + # For a local translation or nightly build + library = os.path.join(thisdir, '..', 'pypy', 'goal', 'python27') + assert os.path.exists(library + '.lib'),'Could not find import library "%s"' % library libraries = [library, 'oleaut32'] extra_ldargs = ['/MANIFEST', # needed for VC10 '/EXPORT:init' + modulename] diff --git a/lib_pypy/_sha.py b/lib_pypy/_sha.py --- a/lib_pypy/_sha.py +++ b/lib_pypy/_sha.py @@ -115,14 +115,14 @@ ] class sha: - "An implementation of the MD5 hash function in pure Python." + "An implementation of the SHA hash function in pure Python." digest_size = digestsize = 20 - block_size = 1 + block_size = 512 // 8 def __init__(self): "Initialisation." - + # Initial message length in bits(!). self.length = 0 self.count = [0, 0] @@ -209,7 +209,7 @@ self.H2 = (self.H2 + C) & 0xffffffff self.H3 = (self.H3 + D) & 0xffffffff self.H4 = (self.H4 + E) & 0xffffffff - + # Down from here all methods follow the Python Standard Library # API of the sha module. @@ -295,13 +295,13 @@ _long2bytesBigEndian(self.H3, 4) + \ _long2bytesBigEndian(self.H4, 4) - self.H0 = H0 - self.H1 = H1 + self.H0 = H0 + self.H1 = H1 self.H2 = H2 self.H3 = H3 self.H4 = H4 - self.input = input - self.count = count + self.input = input + self.count = count return digest diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -330,6 +330,14 @@ # SQLite version information sqlite_version = str(_ffi.string(_lib.sqlite3_libversion()).decode('ascii')) +_STMT_TYPE_UPDATE = 0 +_STMT_TYPE_DELETE = 1 +_STMT_TYPE_INSERT = 2 +_STMT_TYPE_REPLACE = 3 +_STMT_TYPE_OTHER = 4 +_STMT_TYPE_SELECT = 5 +_STMT_TYPE_INVALID = 6 + class Error(StandardError): pass @@ -992,13 +1000,18 @@ self.__statement = self.__connection._statement_cache.get(sql) if self.__connection._isolation_level is not None: - if self.__statement._type in ("UPDATE", "DELETE", "INSERT", "REPLACE"): + if self.__statement._type in ( + _STMT_TYPE_UPDATE, + _STMT_TYPE_DELETE, + _STMT_TYPE_INSERT, + _STMT_TYPE_REPLACE + ): if not self.__connection._in_transaction: self.__connection._begin() - elif self.__statement._type == "OTHER": + elif self.__statement._type == _STMT_TYPE_OTHER: if self.__connection._in_transaction: self.__connection.commit() - elif self.__statement._type == "SELECT": + elif self.__statement._type == _STMT_TYPE_SELECT: if multiple: raise ProgrammingError("You cannot execute SELECT " "statements in executemany().") @@ -1021,12 +1034,17 @@ self.__statement._reset() raise self.__connection._get_exception(ret) - if self.__statement._type in ("UPDATE", "DELETE", "INSERT", "REPLACE"): + if self.__statement._type in ( + _STMT_TYPE_UPDATE, + _STMT_TYPE_DELETE, + _STMT_TYPE_INSERT, + _STMT_TYPE_REPLACE + ): if self.__rowcount == -1: self.__rowcount = 0 self.__rowcount += _lib.sqlite3_changes(self.__connection._db) - if not multiple and self.__statement._type == "INSERT": + if not multiple and self.__statement._type == _STMT_TYPE_INSERT: self.__lastrowid = _lib.sqlite3_last_insert_rowid(self.__connection._db) else: self.__lastrowid = None @@ -1176,11 +1194,19 @@ first_word = sql.lstrip().split(" ")[0].upper() if first_word == "": - self._type = "INVALID" - elif first_word in ("SELECT", "INSERT", "UPDATE", "DELETE", "REPLACE"): - self._type = first_word + self._type = _STMT_TYPE_INVALID + elif first_word == "SELECT": + self._type = _STMT_TYPE_SELECT + elif first_word == "INSERT": + self._type = _STMT_TYPE_INSERT + elif first_word == "UPDATE": + self._type = _STMT_TYPE_UPDATE + elif first_word == "DELETE": + self._type = _STMT_TYPE_DELETE + elif first_word == "REPLACE": + self._type = _STMT_TYPE_REPLACE else: - self._type = "OTHER" + self._type = _STMT_TYPE_OTHER if isinstance(sql, unicode): sql = sql.encode('utf-8') @@ -1193,7 +1219,7 @@ if ret == _lib.SQLITE_OK and not self._statement: # an empty statement, work around that, as it's the least trouble - self._type = "SELECT" + self._type = _STMT_TYPE_SELECT c_sql = _ffi.new("char[]", b"select 42") ret = _lib.sqlite3_prepare_v2(self.__con._db, c_sql, -1, statement_star, next_char) @@ -1312,7 +1338,12 @@ raise ValueError("parameters are of unsupported type") def _get_description(self): - if self._type in ("INSERT", "UPDATE", "DELETE", "REPLACE"): + if self._type in ( + _STMT_TYPE_INSERT, + _STMT_TYPE_UPDATE, + _STMT_TYPE_DELETE, + _STMT_TYPE_REPLACE + ): return None desc = [] for i in xrange(_lib.sqlite3_column_count(self._statement)): diff --git a/lib_pypy/audioop.py b/lib_pypy/audioop.py new file mode 100644 --- /dev/null +++ b/lib_pypy/audioop.py @@ -0,0 +1,29 @@ + +import struct + + +class error(Exception): + pass + + +def _check_size(size): + if size != 1 and size != 2 and size != 4: + raise error("Size should be 1, 2 or 4") + + +def _check_params(length, size): + _check_size(size) + if length % size != 0: + raise error("not a whole number of frames") + + +def getsample(cp, size, i): + _check_params(len(cp), size) + if not (0 <= i < len(cp) / size): + raise error("Index out of range") + if size == 1: + return struct.unpack_from("B", buffer(cp)[i:])[0] + elif size == 2: + return struct.unpack_from("H", buffer(cp)[i * 2:])[0] + elif size == 4: + return struct.unpack_from("I", buffer(cp)[i * 4:])[0] diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.8" -__version_info__ = (0, 8) +__version__ = "0.8.1" +__version_info__ = (0, 8, 1) diff --git a/lib_pypy/datetime.py b/lib_pypy/datetime.py --- a/lib_pypy/datetime.py +++ b/lib_pypy/datetime.py @@ -878,7 +878,6 @@ month = self._month if day is None: day = self._day - year, month, day = _check_date_fields(year, month, day) return date(year, month, day) # Comparisons of date objects with other. @@ -1389,8 +1388,6 @@ microsecond = self.microsecond if tzinfo is True: tzinfo = self.tzinfo - hour, minute, second, microsecond = _check_time_fields(hour, minute, second, microsecond) - _check_tzinfo_arg(tzinfo) return time(hour, minute, second, microsecond, tzinfo) def __nonzero__(self): @@ -1608,9 +1605,6 @@ microsecond = self.microsecond if tzinfo is True: tzinfo = self.tzinfo - year, month, day = _check_date_fields(year, month, day) - hour, minute, second, microsecond = _check_time_fields(hour, minute, second, microsecond) - _check_tzinfo_arg(tzinfo) return datetime(year, month, day, hour, minute, second, microsecond, tzinfo) diff --git a/lib_pypy/pyrepl/simple_interact.py b/lib_pypy/pyrepl/simple_interact.py --- a/lib_pypy/pyrepl/simple_interact.py +++ b/lib_pypy/pyrepl/simple_interact.py @@ -63,3 +63,6 @@ except KeyboardInterrupt: console.write("\nKeyboardInterrupt\n") console.resetbuffer() + except MemoryError: + console.write("\nMemoryError\n") + console.resetbuffer() diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -38,7 +38,7 @@ # General information about the project. project = u'PyPy' -copyright = u'2013, The PyPy Project' +copyright = u'2014, The PyPy Project' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -47,7 +47,7 @@ # The short X.Y version. version = '2.2' # The full version, including alpha/beta/rc tags. -release = '2.2.0' +release = '2.2.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -83,7 +83,7 @@ _winreg -* Supported by being rewritten in pure Python (possibly using ``ctypes``): +* Supported by being rewritten in pure Python (possibly using ``cffi``): see the `lib_pypy/`_ directory. Examples of modules that we support this way: ``ctypes``, ``cPickle``, ``cmath``, ``dbm``, ``datetime``... Note that some modules are both in there and in the list above; @@ -316,5 +316,4 @@ type and vice versa. For builtin types, a dictionary will be returned that cannot be changed (but still looks and behaves like a normal dictionary). - .. include:: _ref.txt diff --git a/pypy/doc/ctypes-implementation.rst b/pypy/doc/ctypes-implementation.rst --- a/pypy/doc/ctypes-implementation.rst +++ b/pypy/doc/ctypes-implementation.rst @@ -72,7 +72,13 @@ Here is a list of the limitations and missing features of the current implementation: -* No support for ``PyXxx`` functions from ``libpython``, for obvious reasons. +* ``ctypes.pythonapi`` lets you access the CPython C API emulation layer + of PyPy, at your own risks and without doing anything sensible about + the GIL. Since PyPy 2.3, these functions are also named with an extra + "Py", for example ``PyPyInt_FromLong()``. Basically, don't use this, + but it might more or less work in simple cases if you do. (Obviously, + assuming the PyObject pointers you get have any particular fields in + any particular order is just going to crash.) * We copy Python strings instead of having pointers to raw buffers diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst --- a/pypy/doc/extending.rst +++ b/pypy/doc/extending.rst @@ -5,119 +5,68 @@ This document tries to explain how to interface the PyPy python interpreter with any external library. -Note: We try to describe state-of-the art, but it -might fade out of date as this is the front on which things are changing -in pypy rapidly. +Right now, there are the following possibilities of providing +third-party modules for the PyPy python interpreter (in order of +usefulness): -Possibilities -============= +* Write them in pure Python and use CFFI_. -Right now, there are three possibilities of providing third-party modules -for the PyPy python interpreter (in order of usefulness): +* Write them in pure Python and use ctypes_. -* Write them in pure python and use ctypes, see ctypes_ - section +* Write them in C++ and bind them through Reflex_. -* Write them in pure python and use direct libffi low-level bindings, See - \_ffi_ module description. +* Write them in as `RPython mixed modules`_. -* Write them in RPython as mixedmodule_, using *rffi* as bindings. -* Write them in C++ and bind them through Reflex_ +CFFI +==== -.. _ctypes: #CTypes -.. _\_ffi: #LibFFI -.. _mixedmodule: #Mixed Modules +CFFI__ is the recommended way. It is a way to write pure Python code +that accesses C libraries. The idea is to support either ABI- or +API-level access to C --- so that you can sanely access C libraries +without depending on details like the exact field order in the C +structures or the numerical value of all the constants. It works on +both CPython (as a separate ``pip install cffi``) and on PyPy, where it +is included by default. + +PyPy's JIT does a quite reasonable job on the Python code that call C +functions or manipulate C pointers with CFFI. (As of PyPy 2.2.1, it +could still be improved, but is already good.) + +See the documentation here__. + +.. __: http://cffi.readthedocs.org/ +.. __: http://cffi.readthedocs.org/ + CTypes ====== -The ctypes module in PyPy is ready to use. -It's goal is to be as-compatible-as-possible with the -`CPython ctypes`_ version. Right now it's able to support large examples, -such as pyglet. PyPy is planning to have a 100% compatible ctypes -implementation, without the CPython C-level API bindings (so it is very -unlikely that direct object-manipulation trickery through this API will work). +The goal of the ctypes module of PyPy is to be as compatible as possible +with the `CPython ctypes`_ version. It works for large examples, such +as pyglet. PyPy's implementation is not strictly 100% compatible with +CPython, but close enough for most cases. -We also provide a `ctypes-configure`_ for overcoming the platform dependencies, -not relying on the ctypes codegen. This tool works by querying gcc about -platform-dependent details (compiling small snippets of C code and running -them), so it'll benefit not pypy-related ctypes-based modules as well. +We also used to provide ``ctypes-configure`` for some API-level access. +This is now viewed as a precursor of CFFI, which you should use instead. +More (but older) information is available here__. +Also, ctypes' performance is not as good as CFFI's. -ctypes call are optimized by the JIT and the resulting machine code contains a -direct call to the target C function. However, due to the very dynamic nature -of ctypes, some overhead over a bare C call is still present, in particular to -check/convert the types of the parameters. Moreover, even if most calls are -optimized, some cannot and thus need to follow the slow path, not optimized by -the JIT. +.. _`CPython ctypes`: http://docs.python.org/library/ctypes.html +.. __: ctypes-implementation.html -.. _`ctypes-configure`: ctypes-implementation.html#ctypes-configure -.. _`CPython ctypes`: http://docs.python.org/library/ctypes.html +PyPy implements ctypes as pure Python code around two built-in modules +called ``_ffi`` and ``_rawffi``, which give a very low-level binding to +the C library libffi_. Nowadays it is not recommended to use directly +these two modules. -Pros ----- +.. _libffi: http://sourceware.org/libffi/ -Stable, CPython-compatible API. Most calls are fast, optimized by JIT. - -Cons ----- - -Problems with platform-dependency (although we partially solve -those). Although the JIT optimizes ctypes calls, some overhead is still -present. The slow-path is very slow. - - -LibFFI -====== - -Mostly in order to be able to write a ctypes module, we developed a very -low-level libffi bindings called ``_ffi``. (libffi is a C-level library for dynamic calling, -which is used by CPython ctypes). This library provides stable and usable API, -although it's API is a very low-level one. It does not contain any -magic. It is also optimized by the JIT, but has much less overhead than ctypes. - -Pros ----- - -It Works. Probably more suitable for a delicate code where ctypes magic goes -in a way. All calls are optimized by the JIT, there is no slow path as in -ctypes. - -Cons ----- - -It combines disadvantages of using ctypes with disadvantages of using mixed -modules. CPython-incompatible API, very rough and low-level. - -Mixed Modules -============= - -This is the most advanced and powerful way of writing extension modules. -It has some serious disadvantages: - -* a mixed module needs to be written in RPython, which is far more - complicated than Python (XXX link) - -* due to lack of separate compilation (as of July 2011), each - compilation-check requires to recompile whole PyPy python interpreter, - which takes 0.5-1h. We plan to solve this at some point in near future. - -* although rpython is a garbage-collected language, the border between - C and RPython needs to be managed by hand (each object that goes into the - C level must be explicitly freed). - -Some documentation is available `here`_ - -.. _`here`: rffi.html - -XXX we should provide detailed docs about lltype and rffi, especially if we - want people to follow that way. Reflex ====== -This method is still experimental and is being exercised on a branch, -`reflex-support`_, which adds the `cppyy`_ module. +This method is still experimental. It adds the `cppyy`_ module. The method works by using the `Reflex package`_ to provide reflection information of the C++ code, which is then used to automatically generate bindings at runtime. @@ -168,3 +117,15 @@ to work around it in python or with a C++ helper function. Although Reflex works on various platforms, the bindings with PyPy have only been tested on Linux. + + +RPython Mixed Modules +===================== + +This is the internal way to write built-in extension modules in PyPy. +It cannot be used by any 3rd-party module: the extension modules are +*built-in*, not independently loadable DLLs. + +This is reserved for special cases: it gives direct access to e.g. the +details of the JIT, allowing us to tweak its interaction with user code. +This is how the numpy module is being developed. diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.rst +++ b/pypy/doc/extradoc.rst @@ -72,13 +72,13 @@ .. _bibtex: https://bitbucket.org/pypy/extradoc/raw/tip/talk/bibtex.bib .. _`Runtime Feedback in a Meta-Tracing JIT for Efficient Dynamic Languages`: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/icooolps2011/jit-hints.pdf -.. _`Allocation Removal by Partial Evaluation in a Tracing JIT`: http://codespeak.net/svn/pypy/extradoc/talk/pepm2011/bolz-allocation-removal.pdf -.. _`Towards a Jitting VM for Prolog Execution`: http://www.stups.uni-duesseldorf.de/publications/bolz-prolog-jit.pdf +.. _`Allocation Removal by Partial Evaluation in a Tracing JIT`: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/pepm2011/bolz-allocation-removal.pdf +.. _`Towards a Jitting VM for Prolog Execution`: http://www.stups.uni-duesseldorf.de/mediawiki/images/a/a7/Pub-BoLeSch2010.pdf .. _`High performance implementation of Python for CLI/.NET with JIT compiler generation for dynamic languages`: http://buildbot.pypy.org/misc/antocuni-thesis.pdf .. _`How to *not* write Virtual Machines for Dynamic Languages`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dyla2007/dyla.pdf .. _`Tracing the Meta-Level: PyPy's Tracing JIT Compiler`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009/bolz-tracing-jit.pdf .. _`Faster than C#: Efficient Implementation of Dynamic Languages on .NET`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009-dotnet/cli-jit.pdf -.. _`Automatic JIT Compiler Generation with Runtime Partial Evaluation`: http://www.stups.uni-duesseldorf.de/thesis/final-master.pdf +.. _`Automatic JIT Compiler Generation with Runtime Partial Evaluation`: http://wwwold.cobra.cs.uni-duesseldorf.de/thesis/final-master.pdf .. _`RPython: A Step towards Reconciling Dynamically and Statically Typed OO Languages`: http://www.disi.unige.it/person/AnconaD/papers/Recent_abstracts.html#AACM-DLS07 .. _`EU Reports`: index-report.html .. _`Hardware Transactional Memory Support for Lightweight Dynamic Language Evolution`: http://sabi.net/nriley/pubs/dls6-riley.pdf diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -444,7 +444,7 @@ So the position of the core PyPy developers is that if anyone wants to make an N+1'th attempt with LLVM, they are welcome, and will be happy to provide help in the IRC channel, but they are left with the burden of proof -that it works. +that (a) it works and (b) it gives important benefits. ---------------------- How do I compile PyPy? diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -40,7 +40,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 2.2.0`_: the latest official release +* `Release 2.2.1`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -110,7 +110,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 2.2.0`: http://pypy.org/download.html +.. _`Release 2.2.1`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html diff --git a/pypy/doc/release-2.2.1.rst b/pypy/doc/release-2.2.1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.2.1.rst @@ -0,0 +1,47 @@ +======================================= +PyPy 2.2.1 - Incrementalism.1 +======================================= + +We're pleased to announce PyPy 2.2.1, which targets version 2.7.3 of the Python +language. This is a bugfix release over 2.2. + +You can download the PyPy 2.2.1 release here: + + http://pypy.org/download.html + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy 2.2 and cpython 2.7.2`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows +32, or ARM (ARMv6 or ARMv7, with VFPv3). + +Work on the native Windows 64 is still stalling, we would welcome a volunteer +to handle that. + +.. _`pypy 2.2 and cpython 2.7.2`: http://speed.pypy.org + +Highlights +========== + +This is a bugfix release. The most important bugs fixed are: + +* an issue in sockets' reference counting emulation, showing up + notably when using the ssl module and calling ``makefile()``. + +* Tkinter support on Windows. + +* If sys.maxunicode==65535 (on Windows and maybe OS/X), the json + decoder incorrectly decoded surrogate pairs. + +* some FreeBSD fixes. + +Note that CFFI 0.8.1 was released. Both versions 0.8 and 0.8.1 are +compatible with both PyPy 2.2 and 2.2.1. + + +Cheers, +Armin Rigo & everybody diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -15,3 +15,29 @@ .. branch: armhf-singlefloat JIT support for singlefloats on ARM using the hardfloat ABI + +.. branch: voidtype_strformat +Better support for record numpy arrays + +.. branch: osx-eci-frameworks-makefile +OSX: Ensure frameworks end up in Makefile when specified in External compilation info + +.. branch: less-stringly-ops +Use subclasses of SpaceOperation instead of SpaceOperator objects. +Random cleanups in flowspace and annotator. + +.. branch: ndarray-buffer +adds support for the buffer= argument to the ndarray ctor + +.. branch: better_ftime_detect2 +On OpenBSD do not pull in libcompat.a as it is about to be removed. +And more generally, if you have gettimeofday(2) you will not need ftime(3). + +.. branch: timeb_h +Remove dependency upon on OpenBSD. This will be disappearing +along with libcompat.a. + +.. branch: OlivierBlanvillain/fix-3-broken-links-on-pypy-published-pap-1386250839215 +Fix 3 broken links on PyPy published papers in docs. + +.. branch: jit-ordereddict diff --git a/pypy/interpreter/buffer.py b/pypy/interpreter/buffer.py --- a/pypy/interpreter/buffer.py +++ b/pypy/interpreter/buffer.py @@ -47,6 +47,9 @@ def get_raw_address(self): raise ValueError("no raw buffer") + def is_writable(self): + return False + # __________ app-level support __________ def descr_len(self, space): @@ -135,6 +138,9 @@ __slots__ = () # no extra slot here + def is_writable(self): + return True + def setitem(self, index, char): "Write a character into the buffer." raise NotImplementedError # Must be overriden. No bounds checks. diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -805,8 +805,8 @@ raise TypeError("Varargs and keywords not supported in unwrap_spec") argspec = ', '.join([arg for arg in args.args[1:]]) func_code = py.code.Source(""" - def f(w_obj, %(args)s): - return w_obj.%(func_name)s(%(args)s) + def f(self, %(args)s): + return self.%(func_name)s(%(args)s) """ % {'args': argspec, 'func_name': func.func_name}) d = {} exec func_code.compile() in d @@ -821,7 +821,7 @@ else: assert isinstance(unwrap_spec, dict) unwrap_spec = unwrap_spec.copy() - unwrap_spec['w_obj'] = base_cls + unwrap_spec['self'] = base_cls return interp2app(globals()['unwrap_spec'](**unwrap_spec)(f)) class interp2app(W_Root): diff --git a/pypy/module/__builtin__/app_operation.py b/pypy/module/__builtin__/app_operation.py --- a/pypy/module/__builtin__/app_operation.py +++ b/pypy/module/__builtin__/app_operation.py @@ -1,4 +1,5 @@ +import operator + def bin(x): - if not isinstance(x, (int, long)): - raise TypeError("must be int or long") - return x.__format__("#b") + value = operator.index(x) + return value.__format__("#b") diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -46,6 +46,15 @@ assert bin(2L) == "0b10" assert bin(-2L) == "-0b10" raises(TypeError, bin, 0.) + class C(object): + def __index__(self): + return 42 + assert bin(C()) == bin(42) + class D(object): + def __int__(self): + return 42 + exc = raises(TypeError, bin, D()) + assert "index" in exc.value.message def test_unichr(self): import sys diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py --- a/pypy/module/_cffi_backend/cbuffer.py +++ b/pypy/module/_cffi_backend/cbuffer.py @@ -5,7 +5,9 @@ from pypy.interpreter.typedef import TypeDef, make_weakref_descr from pypy.module._cffi_backend import cdataobj, ctypeptr, ctypearray +from rpython.rtyper.annlowlevel import llstr from rpython.rtyper.lltypesystem import rffi +from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw class LLBuffer(RWBuffer): @@ -34,8 +36,7 @@ def setslice(self, start, string): raw_cdata = rffi.ptradd(self.raw_cdata, start) - for i in range(len(string)): - raw_cdata[i] = string[i] + copy_string_to_raw(llstr(string), raw_cdata, 0, len(string)) class MiniBuffer(W_Root): @@ -59,6 +60,9 @@ def descr__buffer__(self, space): return self.buffer.descr__buffer__(space) + def descr_str(self, space): + return space.wrap(self.buffer.as_str()) + MiniBuffer.typedef = TypeDef( "buffer", @@ -68,6 +72,7 @@ __setitem__ = interp2app(MiniBuffer.descr_setitem), __buffer__ = interp2app(MiniBuffer.descr__buffer__), __weakref__ = make_weakref_descr(MiniBuffer), + __str__ = interp2app(MiniBuffer.descr_str), ) MiniBuffer.typedef.acceptable_as_base_class = False diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -5,7 +5,7 @@ from pypy.interpreter.gateway import interp2app from pypy.interpreter.typedef import TypeDef, make_weakref_descr -from rpython.rlib import objectmodel, rgc +from rpython.rlib import rgc from rpython.rlib.objectmodel import keepalive_until_here, specialize from rpython.rtyper.lltypesystem import lltype, rffi from rpython.tool.sourcetools import func_with_new_name @@ -206,8 +206,7 @@ w_value.get_array_length() == length): # fast path: copying from exactly the correct type s = w_value._cdata - for i in range(ctitemsize * length): - cdata[i] = s[i] + rffi.c_memcpy(cdata, s, ctitemsize * length) keepalive_until_here(w_value) return # @@ -259,7 +258,6 @@ space = self.space if isinstance(w_other, W_CData): from pypy.module._cffi_backend import ctypeptr, ctypearray - from pypy.module._cffi_backend import ctypevoid ct = w_other.ctype if isinstance(ct, ctypearray.W_CTypeArray): ct = ct.ctptr @@ -424,6 +422,12 @@ def get_array_length(self): return self.length + def _sizeof(self): + from pypy.module._cffi_backend.ctypeptr import W_CTypePtrOrArray + ctype = self.ctype + assert isinstance(ctype, W_CTypePtrOrArray) + return self.length * ctype.ctitem.size + class W_CDataHandle(W_CData): _attrs_ = ['w_keepalive'] diff --git a/pypy/module/_cffi_backend/handle.py b/pypy/module/_cffi_backend/handle.py --- a/pypy/module/_cffi_backend/handle.py +++ b/pypy/module/_cffi_backend/handle.py @@ -42,7 +42,7 @@ if self.handles[d]() is None: self.look_distance = d + 1 return d - # full! extend, but don't use '!=' here + # full! extend, but don't use '+=' here self.handles = self.handles + [dead_ref] * (length // 3 + 5) self.look_distance = length + 1 return length diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -2143,7 +2143,13 @@ c = newp(BCharArray, b"hi there") # buf = buffer(c) - assert str(buf).startswith('<_cffi_backend.buffer object at 0x') + assert repr(buf).startswith('<_cffi_backend.buffer object at 0x') + assert bytes(buf) == b"hi there\x00" + if sys.version_info < (3,): + assert str(buf) == "hi there\x00" + assert unicode(buf) == u+"hi there\x00" + else: + assert str(buf) == repr(buf) # --mb_length-- assert len(buf) == len(b"hi there\x00") # --mb_item-- @@ -3125,6 +3131,12 @@ py.test.raises(TypeError, "p + cast(new_primitive_type('int'), 42)") py.test.raises(TypeError, "p - cast(new_primitive_type('int'), 42)") +def test_sizeof_sliced_array(): + BInt = new_primitive_type("int") + BArray = new_array_type(new_pointer_type(BInt), 10) + p = newp(BArray, None) + assert sizeof(p[2:9]) == 7 * sizeof(BInt) + def test_version(): # this test is here mostly for PyPy diff --git a/pypy/module/_rawffi/array.py b/pypy/module/_rawffi/array.py --- a/pypy/module/_rawffi/array.py +++ b/pypy/module/_rawffi/array.py @@ -15,6 +15,7 @@ from pypy.module._rawffi.interp_rawffi import unpack_shape_with_length from pypy.module._rawffi.interp_rawffi import read_ptr, write_ptr from rpython.rlib.rarithmetic import r_uint +from rpython.rlib import rgc class W_Array(W_DataShape): @@ -220,6 +221,7 @@ def __init__(self, space, shape, length): W_ArrayInstance.__init__(self, space, shape, length, 0) + @rgc.must_be_light_finalizer def __del__(self): if self.ll_buffer: self._free() diff --git a/pypy/module/_rawffi/structure.py b/pypy/module/_rawffi/structure.py --- a/pypy/module/_rawffi/structure.py +++ b/pypy/module/_rawffi/structure.py @@ -16,7 +16,7 @@ from pypy.module._rawffi.interp_rawffi import unroll_letters_for_numbers from pypy.module._rawffi.interp_rawffi import size_alignment from pypy.module._rawffi.interp_rawffi import read_ptr, write_ptr -from rpython.rlib import clibffi +from rpython.rlib import clibffi, rgc from rpython.rlib.rarithmetic import intmask, signedtype, widen from rpython.rlib.rarithmetic import r_uint, r_ulonglong, r_longlong @@ -226,6 +226,7 @@ fieldtypes) return self.ffi_struct.ffistruct + @rgc.must_be_light_finalizer def __del__(self): if self.ffi_struct: lltype.free(self.ffi_struct, flavor='raw') @@ -380,6 +381,7 @@ def __init__(self, space, shape): W_StructureInstance.__init__(self, space, shape, 0) + @rgc.must_be_light_finalizer def __del__(self): if self.ll_buffer: self._free() diff --git a/pypy/module/_rawffi/test/test_tracker.py b/pypy/module/_rawffi/test/test_tracker.py --- a/pypy/module/_rawffi/test/test_tracker.py +++ b/pypy/module/_rawffi/test/test_tracker.py @@ -1,9 +1,21 @@ +import py +from pypy.conftest import option from pypy.module._rawffi.tracker import Tracker + class AppTestTracker: spaceconfig = dict(usemodules=['_rawffi', 'struct']) def setup_class(cls): + # + # detect if we're running on PyPy with DO_TRACING not compiled in + if option.runappdirect: + try: + import _rawffi + _rawffi._num_of_allocated_objects() + except (ImportError, RuntimeError), e: + py.test.skip(str(e)) + # Tracker.DO_TRACING = True def test_array(self): diff --git a/pypy/module/_rawffi/tracker.py b/pypy/module/_rawffi/tracker.py --- a/pypy/module/_rawffi/tracker.py +++ b/pypy/module/_rawffi/tracker.py @@ -2,9 +2,11 @@ """ The file that keeps track about freed/kept-alive objects allocated by _rawffi. Used for debugging ctypes """ +from pypy.interpreter.error import OperationError + class Tracker(object): - DO_TRACING = True + DO_TRACING = False # make sure this stays False by default! def __init__(self): self.alloced = {} @@ -20,6 +22,9 @@ tracker = Tracker() def num_of_allocated_objects(space): + if not tracker.DO_TRACING: + raise OperationError(space.w_RuntimeError, + space.wrap("DO_TRACING not enabled in this PyPy")) return space.wrap(len(tracker.alloced)) def print_alloced_objects(space): diff --git a/pypy/module/_sre/interp_sre.py b/pypy/module/_sre/interp_sre.py --- a/pypy/module/_sre/interp_sre.py +++ b/pypy/module/_sre/interp_sre.py @@ -415,7 +415,13 @@ except OperationError, e: if not e.match(space, space.w_TypeError): raise - w_groupnum = space.getitem(self.srepat.w_groupindex, w_arg) + try: + w_groupnum = space.getitem(self.srepat.w_groupindex, w_arg) + except OperationError, e: + if not e.match(space, space.w_KeyError): + raise + raise OperationError(space.w_IndexError, + space.wrap("no such group")) groupnum = space.int_w(w_groupnum) if groupnum == 0: return self.ctx.match_start, self.ctx.match_end diff --git a/pypy/module/_sre/test/test_app_sre.py b/pypy/module/_sre/test/test_app_sre.py --- a/pypy/module/_sre/test/test_app_sre.py +++ b/pypy/module/_sre/test/test_app_sre.py @@ -172,6 +172,9 @@ assert ("1", "1", None) == m.group(1, 2, 3) assert ("1", None) == m.group("first", "second") raises(IndexError, m.group, 1, 4) + assert ("1", None) == m.group(1, "second") + raises(IndexError, m.group, 'foobarbaz') + raises(IndexError, m.group, 'first', 'foobarbaz') def test_expand(self): import re diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -711,8 +711,12 @@ raise ssl_error(space, "SSL_CTX_use_certificate_chain_file error") # ssl compatibility - libssl_SSL_CTX_set_options(ss.ctx, - SSL_OP_ALL & ~SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS) + options = SSL_OP_ALL & ~SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS + if protocol != PY_SSL_VERSION_SSL2: + # SSLv2 is extremely broken, don't use it unless a user specifically + # requests it + options |= SSL_OP_NO_SSLv2 + libssl_SSL_CTX_set_options(ss.ctx, options) verification_mode = SSL_VERIFY_NONE if cert_mode == PY_SSL_CERT_OPTIONAL: @@ -724,7 +728,7 @@ libssl_SSL_set_fd(ss.ssl, sock_fd) # set the socket for SSL # The ACCEPT_MOVING_WRITE_BUFFER flag is necessary because the address # of a str object may be changed by the garbage collector. - libssl_SSL_set_mode(ss.ssl, + libssl_SSL_set_mode(ss.ssl, SSL_MODE_AUTO_RETRY | SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER) # If the socket is in non-blocking mode or timeout mode, set the BIO diff --git a/pypy/module/_weakref/interp__weakref.py b/pypy/module/_weakref/interp__weakref.py --- a/pypy/module/_weakref/interp__weakref.py +++ b/pypy/module/_weakref/interp__weakref.py @@ -52,6 +52,7 @@ # weakref callbacks are not invoked eagerly here. They are # invoked by self.__del__() anyway. + @jit.dont_look_inside def get_or_make_weakref(self, w_subtype, w_obj): space = self.space w_weakreftype = space.gettypeobject(W_Weakref.typedef) @@ -70,6 +71,7 @@ self.append_wref_to(w_ref) return w_ref + @jit.dont_look_inside def get_or_make_proxy(self, w_obj): space = self.space if self.cached_proxy is not None: @@ -122,6 +124,7 @@ W_WeakrefBase.activate_callback, 'weakref callback of ') + @jit.dont_look_inside def make_weakref_with_callback(self, w_subtype, w_obj, w_callable): space = self.space w_ref = space.allocate_instance(W_Weakref, w_subtype) @@ -129,6 +132,7 @@ self.append_wref_to(w_ref) return w_ref + @jit.dont_look_inside def make_proxy_with_callback(self, w_obj, w_callable): space = self.space if space.is_true(space.callable(w_obj)): @@ -239,15 +243,16 @@ w_obj.setweakref(space, lifeline) return lifeline - at jit.dont_look_inside + def get_or_make_weakref(space, w_subtype, w_obj): return getlifeline(space, w_obj).get_or_make_weakref(w_subtype, w_obj) - at jit.dont_look_inside + def make_weakref_with_callback(space, w_subtype, w_obj, w_callable): lifeline = getlifelinewithcallbacks(space, w_obj) return lifeline.make_weakref_with_callback(w_subtype, w_obj, w_callable) + def descr__new__weakref(space, w_subtype, w_obj, w_callable=None, __args__=None): if __args__.arguments_w: @@ -312,15 +317,16 @@ w_obj = force(space, self) return space.call_args(w_obj, __args__) - at jit.dont_look_inside + def get_or_make_proxy(space, w_obj): return getlifeline(space, w_obj).get_or_make_proxy(w_obj) - at jit.dont_look_inside + def make_proxy_with_callback(space, w_obj, w_callable): lifeline = getlifelinewithcallbacks(space, w_obj) return lifeline.make_proxy_with_callback(w_obj, w_callable) + def proxy(space, w_obj, w_callable=None): """Create a proxy object that weakly references 'obj'. 'callback', if given, is called with the proxy as an argument when 'obj' diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -26,6 +26,7 @@ from pypy.module.__builtin__.descriptor import W_Property from pypy.module.__builtin__.interp_classobj import W_ClassObject from pypy.module.__builtin__.interp_memoryview import W_MemoryView +from pypy.module.micronumpy.base import W_NDimArray from rpython.rlib.entrypoint import entrypoint_lowlevel from rpython.rlib.rposix import is_valid_fd, validate_fd from rpython.rlib.unroll import unrolling_iterable @@ -399,16 +400,16 @@ '_PyObject_CallFunction_SizeT', '_PyObject_CallMethod_SizeT', 'PyBuffer_FromMemory', 'PyBuffer_FromReadWriteMemory', 'PyBuffer_FromObject', - 'PyBuffer_FromReadWriteObject', 'PyBuffer_New', 'PyBuffer_Type', 'init_bufferobject', + 'PyBuffer_FromReadWriteObject', 'PyBuffer_New', 'PyBuffer_Type', '_Py_init_bufferobject', 'PyCObject_FromVoidPtr', 'PyCObject_FromVoidPtrAndDesc', 'PyCObject_AsVoidPtr', 'PyCObject_GetDesc', 'PyCObject_Import', 'PyCObject_SetVoidPtr', - 'PyCObject_Type', 'init_pycobject', + 'PyCObject_Type', '_Py_init_pycobject', 'PyCapsule_New', 'PyCapsule_IsValid', 'PyCapsule_GetPointer', 'PyCapsule_GetName', 'PyCapsule_GetDestructor', 'PyCapsule_GetContext', 'PyCapsule_SetPointer', 'PyCapsule_SetName', 'PyCapsule_SetDestructor', - 'PyCapsule_SetContext', 'PyCapsule_Import', 'PyCapsule_Type', 'init_capsule', + 'PyCapsule_SetContext', 'PyCapsule_Import', 'PyCapsule_Type', '_Py_init_capsule', 'PyObject_AsReadBuffer', 'PyObject_AsWriteBuffer', 'PyObject_CheckReadBuffer', @@ -469,6 +470,7 @@ "Complex": "space.w_complex", "ByteArray": "space.w_bytearray", "MemoryView": "space.gettypeobject(W_MemoryView.typedef)", + "Array": "space.gettypeobject(W_NDimArray.typedef)", "BaseObject": "space.w_object", 'None': 'space.type(space.w_None)', 'NotImplemented': 'space.type(space.w_NotImplemented)', @@ -685,11 +687,15 @@ globals()['va_get_%s' % name_no_star] = func def setup_init_functions(eci, translating): - init_buffer = rffi.llexternal('init_bufferobject', [], lltype.Void, + if translating: + prefix = 'PyPy' + else: + prefix = 'cpyexttest' + init_buffer = rffi.llexternal('_%s_init_bufferobject' % prefix, [], lltype.Void, compilation_info=eci, _nowrapper=True) - init_pycobject = rffi.llexternal('init_pycobject', [], lltype.Void, + init_pycobject = rffi.llexternal('_%s_init_pycobject' % prefix, [], lltype.Void, compilation_info=eci, _nowrapper=True) - init_capsule = rffi.llexternal('init_capsule', [], lltype.Void, + init_capsule = rffi.llexternal('_%s_init_capsule' % prefix, [], lltype.Void, compilation_info=eci, _nowrapper=True) INIT_FUNCTIONS.extend([ lambda space: init_buffer(), @@ -697,12 +703,8 @@ lambda space: init_capsule(), ]) from pypy.module.posix.interp_posix import add_fork_hook - if translating: - reinit_tls = rffi.llexternal('PyThread_ReInitTLS', [], lltype.Void, - compilation_info=eci) - else: - reinit_tls = rffi.llexternal('PyPyThread_ReInitTLS', [], lltype.Void, - compilation_info=eci) + reinit_tls = rffi.llexternal('%sThread_ReInitTLS' % prefix, [], lltype.Void, + compilation_info=eci) add_fork_hook('child', reinit_tls) def init_function(func): @@ -744,7 +746,7 @@ from rpython.translator.c.database import LowLevelDatabase db = LowLevelDatabase() - generate_macros(export_symbols, rename=True, do_deref=True) + generate_macros(export_symbols, prefix='cpyexttest') # Structure declaration code members = [] @@ -810,7 +812,7 @@ INTERPLEVEL_API[name] = w_obj - name = name.replace('Py', 'PyPy') + name = name.replace('Py', 'cpyexttest') if isptr: ptr = ctypes.c_void_p.in_dll(bridge, name) if typ == 'PyObject*': @@ -822,7 +824,7 @@ ptr.value = ctypes.cast(ll2ctypes.lltype2ctypes(value), ctypes.c_void_p).value elif typ in ('PyObject*', 'PyTypeObject*'): - if name.startswith('PyPyExc_'): + if name.startswith('PyPyExc_') or name.startswith('cpyexttestExc_'): # we already have the pointer in_dll = ll2ctypes.get_ctypes_type(PyObject).in_dll(bridge, name) py_obj = ll2ctypes.ctypes2lltype(PyObject, in_dll) @@ -857,28 +859,27 @@ setup_init_functions(eci, translating=False) return modulename.new(ext='') -def generate_macros(export_symbols, rename=True, do_deref=True): +def mangle_name(prefix, name): + if name.startswith('Py'): + return prefix + name[2:] + elif name.startswith('_Py'): + return '_' + prefix + name[3:] + else: + return None + +def generate_macros(export_symbols, prefix): "NOT_RPYTHON" pypy_macros = [] renamed_symbols = [] for name in export_symbols: - if name.startswith("PyPy"): - renamed_symbols.append(name) - continue - if not rename: - continue name = name.replace("#", "") - newname = name.replace('Py', 'PyPy') - if not rename: - newname = name + newname = mangle_name(prefix, name) + assert newname, name pypy_macros.append('#define %s %s' % (name, newname)) if name.startswith("PyExc_"): pypy_macros.append('#define _%s _%s' % (name, newname)) renamed_symbols.append(newname) - if rename: - export_symbols[:] = renamed_symbols - else: - export_symbols[:] = [sym.replace("#", "") for sym in export_symbols] + export_symbols[:] = renamed_symbols # Generate defines for macro_name, size in [ @@ -1039,7 +1040,7 @@ from rpython.translator.c.database import LowLevelDatabase db = LowLevelDatabase() - generate_macros(export_symbols, rename=False, do_deref=False) + generate_macros(export_symbols, prefix='PyPy') functions = generate_decls_and_callbacks(db, [], api_struct=False) code = "#include \n" + "\n".join(functions) @@ -1069,7 +1070,8 @@ export_struct(name, struct) for name, func in FUNCTIONS.iteritems(): - deco = entrypoint_lowlevel("cpyext", func.argtypes, name, relax=True) + newname = mangle_name('PyPy', name) or name + deco = entrypoint_lowlevel("cpyext", func.argtypes, newname, relax=True) deco(func.get_wrapper(space)) setup_init_functions(eci, translating=True) diff --git a/pypy/module/cpyext/include/bufferobject.h b/pypy/module/cpyext/include/bufferobject.h --- a/pypy/module/cpyext/include/bufferobject.h +++ b/pypy/module/cpyext/include/bufferobject.h @@ -37,7 +37,7 @@ PyObject* PyBuffer_New(Py_ssize_t size); -void init_bufferobject(void); +void _Py_init_bufferobject(void); #ifdef __cplusplus } diff --git a/pypy/module/cpyext/include/pycapsule.h b/pypy/module/cpyext/include/pycapsule.h --- a/pypy/module/cpyext/include/pycapsule.h +++ b/pypy/module/cpyext/include/pycapsule.h @@ -50,7 +50,7 @@ PyAPI_FUNC(void *) PyCapsule_Import(const char *name, int no_block); -void init_capsule(void); +void _Py_init_capsule(void); #ifdef __cplusplus } diff --git a/pypy/module/cpyext/include/pycobject.h b/pypy/module/cpyext/include/pycobject.h --- a/pypy/module/cpyext/include/pycobject.h +++ b/pypy/module/cpyext/include/pycobject.h @@ -48,7 +48,7 @@ } PyCObject; #endif -void init_pycobject(void); +void _Py_init_pycobject(void); #ifdef __cplusplus } diff --git a/pypy/module/cpyext/include/pyconfig.h b/pypy/module/cpyext/include/pyconfig.h --- a/pypy/module/cpyext/include/pyconfig.h +++ b/pypy/module/cpyext/include/pyconfig.h @@ -25,6 +25,22 @@ #define Py_UNICODE_SIZE 2 #endif +#ifndef Py_BUILD_CORE /* not building the core - must be an ext */ +# if defined(_MSC_VER) + /* So MSVC users need not specify the .lib file in + * their Makefile (other compilers are generally + * taken care of by distutils.) */ +# ifdef _DEBUG +# error("debug first with cpython") +# pragma comment(lib,"python27.lib") +# else +# pragma comment(lib,"python27.lib") +# endif /* _DEBUG */ +# endif +#endif /* _MSC_VER */ + + + #ifdef __cplusplus } #endif diff --git a/pypy/module/cpyext/src/bufferobject.c b/pypy/module/cpyext/src/bufferobject.c --- a/pypy/module/cpyext/src/bufferobject.c +++ b/pypy/module/cpyext/src/bufferobject.c @@ -783,7 +783,7 @@ return size; } -void init_bufferobject(void) +void _Py_init_bufferobject(void) { PyType_Ready(&PyBuffer_Type); } diff --git a/pypy/module/cpyext/src/capsule.c b/pypy/module/cpyext/src/capsule.c --- a/pypy/module/cpyext/src/capsule.c +++ b/pypy/module/cpyext/src/capsule.c @@ -321,7 +321,7 @@ PyCapsule_Type__doc__ /*tp_doc*/ }; -void init_capsule() +void _Py_init_capsule() { PyType_Ready(&PyCapsule_Type); } diff --git a/pypy/module/cpyext/src/cobject.c b/pypy/module/cpyext/src/cobject.c --- a/pypy/module/cpyext/src/cobject.c +++ b/pypy/module/cpyext/src/cobject.c @@ -156,7 +156,7 @@ PyCObject_Type__doc__ /*tp_doc*/ }; -void init_pycobject() +void _Py_init_pycobject() { PyType_Ready(&PyCObject_Type); } diff --git a/pypy/module/cpyext/src/ndarrayobject.c b/pypy/module/cpyext/src/ndarrayobject.c --- a/pypy/module/cpyext/src/ndarrayobject.c +++ b/pypy/module/cpyext/src/ndarrayobject.c @@ -3,8 +3,6 @@ #include "numpy/arrayobject.h" #include /* memset, memcpy */ -PyTypeObject PyArray_Type; - void _PyArray_FILLWBYTE(PyObject* obj, int val) { memset(PyArray_DATA(obj), val, PyArray_NBYTES(obj)); diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -286,3 +286,19 @@ assert dt.num == 11 + def test_pass_ndarray_object_to_c(self): + from _numpypy.multiarray import ndarray + mod = self.import_extension('foo', [ + ("check_array", "METH_VARARGS", + ''' + PyObject* obj; + if (!PyArg_ParseTuple(args, "O!", &PyArray_Type, &obj)) + return NULL; + Py_INCREF(obj); + return obj; + '''), + ], prologue='#include ') + array = ndarray((3, 4), dtype='d') + assert mod.check_array(array) is array + raises(TypeError, "mod.check_array(42)") + diff --git a/pypy/module/cpyext/test/test_thread.py b/pypy/module/cpyext/test/test_thread.py --- a/pypy/module/cpyext/test/test_thread.py +++ b/pypy/module/cpyext/test/test_thread.py @@ -8,8 +8,10 @@ module = self.import_extension('foo', [ ("get_thread_ident", "METH_NOARGS", """ - /* Use the 'PyPy' prefix to ensure we access our functions */ - return PyInt_FromLong(PyPyThread_get_thread_ident()); +#ifndef PyThread_get_thread_ident +#error "seems we are not accessing PyPy's functions" +#endif + return PyInt_FromLong(PyThread_get_thread_ident()); """), ]) import thread, threading @@ -32,17 +34,19 @@ module = self.import_extension('foo', [ ("test_acquire_lock", "METH_NOARGS", """ - /* Use the 'PyPy' prefix to ensure we access our functions */ - PyThread_type_lock lock = PyPyThread_allocate_lock(); - if (PyPyThread_acquire_lock(lock, 1) != 1) { +#ifndef PyThread_allocate_lock +#error "seems we are not accessing PyPy's functions" +#endif + PyThread_type_lock lock = PyThread_allocate_lock(); + if (PyThread_acquire_lock(lock, 1) != 1) { PyErr_SetString(PyExc_AssertionError, "first acquire"); return NULL; } - if (PyPyThread_acquire_lock(lock, 0) != 0) { + if (PyThread_acquire_lock(lock, 0) != 0) { PyErr_SetString(PyExc_AssertionError, "second acquire"); return NULL; } - PyPyThread_free_lock(lock); + PyThread_free_lock(lock); Py_RETURN_NONE; """), @@ -53,15 +57,17 @@ module = self.import_extension('foo', [ ("test_release_lock", "METH_NOARGS", """ - /* Use the 'PyPy' prefix to ensure we access our functions */ - PyThread_type_lock lock = PyPyThread_allocate_lock(); - PyPyThread_acquire_lock(lock, 1); - PyPyThread_release_lock(lock); - if (PyPyThread_acquire_lock(lock, 0) != 1) { +#ifndef PyThread_release_lock +#error "seems we are not accessing PyPy's functions" +#endif + PyThread_type_lock lock = PyThread_allocate_lock(); + PyThread_acquire_lock(lock, 1); + PyThread_release_lock(lock); + if (PyThread_acquire_lock(lock, 0) != 1) { PyErr_SetString(PyExc_AssertionError, "first acquire"); return NULL; } - PyPyThread_free_lock(lock); + PyThread_free_lock(lock); Py_RETURN_NONE; """), diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -342,10 +342,8 @@ if space.is_w(w_startstop, space.w_None): start = 0 else: - start = space.int_w(w_startstop) - if start < 0: - raise OperationError(space.w_ValueError, space.wrap( - "Indicies for islice() must be non-negative integers.")) + start = self.arg_int_w(w_startstop, 0, + "Indicies for islice() must be None or non-negative integers") w_stop = args_w[0] else: raise OperationError(space.w_TypeError, space.wrap("islice() takes at most 4 arguments (" + str(num_args) + " given)")) @@ -353,10 +351,8 @@ if space.is_w(w_stop, space.w_None): stop = -1 else: - stop = space.int_w(w_stop) - if stop < 0: - raise OperationError(space.w_ValueError, space.wrap( - "Stop argument must be a non-negative integer or None.")) + stop = self.arg_int_w(w_stop, 0, + "Stop argument must be a non-negative integer or None.") stop = max(start, stop) # for obscure CPython compatibility if num_args == 2: @@ -364,10 +360,8 @@ if space.is_w(w_step, space.w_None): step = 1 else: - step = space.int_w(w_step) - if step < 1: - raise OperationError(space.w_ValueError, space.wrap( - "Step must be one or lager for islice().")) + step = self.arg_int_w(w_step, 1, + "Step for islice() must be a positive integer or None") else: step = 1 @@ -375,6 +369,18 @@ self.start = start self.stop = stop + def arg_int_w(self, w_obj, minimum, errormsg): + space = self.space + try: + result = space.int_w(w_obj) + except OperationError, e: + if e.async(space): + raise + result = -1 + if result < minimum: + raise OperationError(space.w_ValueError, space.wrap(errormsg)) + return result + def iter_w(self): return self.space.wrap(self) diff --git a/pypy/module/itertools/test/test_itertools.py b/pypy/module/itertools/test/test_itertools.py --- a/pypy/module/itertools/test/test_itertools.py +++ b/pypy/module/itertools/test/test_itertools.py @@ -304,6 +304,11 @@ raises(TypeError, itertools.islice, [], 0, 0, 0, 0) + # why not TypeError? Because CPython + raises(ValueError, itertools.islice, [], "a", 1, 2) + raises(ValueError, itertools.islice, [], 0, "a", 2) + raises(ValueError, itertools.islice, [], 0, 1, "a") + def test_chain(self): import itertools diff --git a/pypy/module/math/app_math.py b/pypy/module/math/app_math.py --- a/pypy/module/math/app_math.py +++ b/pypy/module/math/app_math.py @@ -1,13 +1,40 @@ def factorial(x): - """Find x!.""" + """factorial(x) -> Integral + + "Find x!. Raise a ValueError if x is negative or non-integral.""" if isinstance(x, float): fl = int(x) if fl != x: raise ValueError("float arguments must be integral") x = fl - if x < 0: - raise ValueError("x must be >= 0") - res = 1 - for i in range(1, x + 1): - res *= i - return res + + if x <= 100: + if x < 0: + raise ValueError("x must be >= 0") + res = 1 + for i in range(2, x + 1): + res *= i + return res + + # Experimentally this gap seems good + gap = max(100, x >> 7) + def _fac_odd(low, high): + if low + gap >= high: + t = 1 + for i in range(low, high, 2): + t *= i + return t + + mid = ((low + high) >> 1) | 1 + return _fac_odd(low, mid) * _fac_odd(mid, high) + + def _fac1(x): + if x <= 2: + return 1, 1, x - 1 + x2 = x >> 1 + f, g, shift = _fac1(x2) + g *= _fac_odd((x2 + 1) | 1, x + 1) + return (f * g, g, shift + x2) + + res, _, shift = _fac1(x) + return res << shift diff --git a/pypy/module/math/test/test_factorial.py b/pypy/module/math/test/test_factorial.py new file mode 100644 --- /dev/null +++ b/pypy/module/math/test/test_factorial.py @@ -0,0 +1,29 @@ +import py +import math +from pypy.module.math import app_math + +def test_factorial_extra(): + for x in range(1000): + r1 = app_math.factorial(x) + r2 = math.factorial(x) + assert r1 == r2 + assert type(r1) == type(r2) + +def test_timing(): + py.test.skip("for manual running only") + import time + x = 5000 + repeat = 1000 + r1 = app_math.factorial(x) + r2 = math.factorial(x) + assert r1 == r2 + t1 = time.time() + for i in range(repeat): + app_math.factorial(x) + t2 = time.time() + for i in range(repeat): + math.factorial(x) + t3 = time.time() + assert r1 == r2 + print (t2 - t1) / repeat + print (t3 - t2) / repeat diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -10,7 +10,7 @@ 'array': 'interp_numarray.array', 'zeros': 'interp_numarray.zeros', 'empty': 'interp_numarray.zeros', - 'ones': 'interp_numarray.ones', + 'empty_like': 'interp_numarray.empty_like', '_reconstruct' : 'interp_numarray._reconstruct', 'scalar' : 'interp_numarray.build_scalar', 'dot': 'interp_arrayops.dot', @@ -106,8 +106,6 @@ ('logaddexp2', 'logaddexp2'), ('real', 'real'), ('imag', 'imag'), - ('ones_like', 'ones_like'), - ('zeros_like', 'zeros_like'), ]: interpleveldefs[exposed] = "interp_ufuncs.get(space).%s" % impl diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -12,7 +12,9 @@ if dtype is None: test = _numpypy.multiarray.array([start, stop, step, 0]) dtype = test.dtype - arr = _numpypy.multiarray.zeros(int(math.ceil((stop - start) / step)), dtype=dtype) + length = math.ceil((float(stop) - start) / step) + length = int(length) + arr = _numpypy.multiarray.zeros(length, dtype=dtype) i = start for j in range(arr.size): arr[j] = i diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -47,7 +47,7 @@ def setslice(self, space, arr): impl = arr.implementation if impl.is_scalar(): - self.fill(impl.get_scalar_value()) + self.fill(space, impl.get_scalar_value()) return shape = shape_agreement(space, self.get_shape(), arr) if impl.storage == self.storage: @@ -100,7 +100,7 @@ tmp = self.get_real(orig_array) tmp.setslice(space, convert_to_array(space, w_value)) - def get_imag(self, orig_array): + def get_imag(self, space, orig_array): strides = self.get_strides() backstrides = self.get_backstrides() if self.dtype.is_complex_type(): @@ -110,11 +110,11 @@ impl = NonWritableArray(self.get_shape(), self.dtype, self.order, strides, backstrides) if not self.dtype.is_flexible_type(): - impl.fill(self.dtype.box(0)) + impl.fill(space, self.dtype.box(0)) return impl def set_imag(self, space, orig_array, w_value): - tmp = self.get_imag(orig_array) + tmp = self.get_imag(space, orig_array) tmp.setslice(space, convert_to_array(space, w_value)) # -------------------- applevel get/setitem ----------------------- @@ -357,7 +357,7 @@ self.get_backstrides(), self.get_shape()) - def fill(self, box): + def fill(self, space, box): self.dtype.itemtype.fill(self.storage, self.dtype.get_size(), box, 0, self.size, 0) @@ -392,6 +392,21 @@ def __del__(self): free_raw_storage(self.storage, track_allocation=False) +class ConcreteArrayWithBase(ConcreteArrayNotOwning): + def __init__(self, shape, dtype, order, strides, backstrides, storage, orig_base): + ConcreteArrayNotOwning.__init__(self, shape, dtype, order, + strides, backstrides, storage) + self.orig_base = orig_base + + def base(self): + return self.orig_base + + +class ConcreteNonWritableArrayWithBase(ConcreteArrayWithBase): + def descr_setitem(self, space, orig_array, w_index, w_value): + raise OperationError(space.w_ValueError, space.wrap( + "assignment destination is read-only")) + class NonWritableArray(ConcreteArray): def descr_setitem(self, space, orig_array, w_index, w_value): @@ -420,8 +435,8 @@ def base(self): return self.orig_arr - def fill(self, box): - loop.fill(self, box.convert_to(self.dtype)) + def fill(self, space, box): + loop.fill(self, box.convert_to(space, self.dtype)) def create_iter(self, shape=None, backward_broadcast=False, require_index=False): if shape is not None and \ @@ -487,3 +502,6 @@ def getlength(self): return self.impl.size + + def get_raw_address(self): + return self.impl.storage diff --git a/pypy/module/micronumpy/arrayimpl/scalar.py b/pypy/module/micronumpy/arrayimpl/scalar.py --- a/pypy/module/micronumpy/arrayimpl/scalar.py +++ b/pypy/module/micronumpy/arrayimpl/scalar.py @@ -54,8 +54,7 @@ return self.value def set_scalar_value(self, w_val): - assert isinstance(w_val, W_GenericBox) - self.value = w_val.convert_to(self.dtype) + self.value = w_val def copy(self, space): scalar = Scalar(self.dtype) @@ -96,12 +95,12 @@ ','.join([str(x) for x in w_arr.get_shape()],)))) if self.dtype.is_complex_type(): self.value = self.dtype.itemtype.composite( - w_arr.get_scalar_value().convert_to(dtype), + w_arr.get_scalar_value().convert_to(space, dtype), self.value.convert_imag_to(dtype)) else: self.value = w_arr.get_scalar_value() - def get_imag(self, orig_array): + def get_imag(self, space, orig_array): if self.dtype.is_complex_type(): scalar = Scalar(self.dtype.float_type) scalar.value = self.value.convert_imag_to(scalar.dtype) @@ -125,13 +124,23 @@ ','.join([str(x) for x in w_arr.get_shape()],)))) self.value = self.dtype.itemtype.composite( self.value.convert_real_to(dtype), - w_arr.get_scalar_value().convert_to(dtype), + w_arr.get_scalar_value().convert_to(space, dtype), ) From noreply at buildbot.pypy.org Mon Jan 13 14:20:52 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 13 Jan 2014 14:20:52 +0100 (CET) Subject: [pypy-commit] pypy default: Remove dead code. Message-ID: <20140113132052.345271D23C0@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: Changeset: r68635:42c053667504 Date: 2014-01-13 14:18 +0100 http://bitbucket.org/pypy/pypy/changeset/42c053667504/ Log: Remove dead code. diff --git a/rpython/translator/unsimplify.py b/rpython/translator/unsimplify.py --- a/rpython/translator/unsimplify.py +++ b/rpython/translator/unsimplify.py @@ -132,11 +132,6 @@ block.exitswitch = None return link -def split_block_at_start(annotator, block): - # split before the first op, preserve order and inputargs - # in the second block! - return split_block(annotator, block, 0, _forcelink=block.inputargs) - def call_initial_function(translator, initial_func, annhelper=None): """Before the program starts, call 'initial_func()'.""" from rpython.annotator import model as annmodel From noreply at buildbot.pypy.org Mon Jan 13 14:20:53 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 13 Jan 2014 14:20:53 +0100 (CET) Subject: [pypy-commit] pypy default: hg merge Message-ID: <20140113132053.73FBE1D23C0@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: Changeset: r68636:37f4aea427ce Date: 2014-01-13 14:20 +0100 http://bitbucket.org/pypy/pypy/changeset/37f4aea427ce/ Log: hg merge diff --git a/rpython/jit/metainterp/optimizeopt/earlyforce.py b/rpython/jit/metainterp/optimizeopt/earlyforce.py --- a/rpython/jit/metainterp/optimizeopt/earlyforce.py +++ b/rpython/jit/metainterp/optimizeopt/earlyforce.py @@ -28,8 +28,5 @@ value.force_box(self) self.emit_operation(op) - def new(self): - return OptEarlyForce() - def setup(self): self.optimizer.optearlyforce = self diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -184,9 +184,6 @@ self.postponed_op = None self.next_optimization.propagate_forward(postponed_op) - def new(self): - return OptHeap() - def produce_potential_short_preamble_ops(self, sb): descrkeys = self.cached_fields.keys() if not we_are_translated(): diff --git a/rpython/jit/metainterp/optimizeopt/intbounds.py b/rpython/jit/metainterp/optimizeopt/intbounds.py --- a/rpython/jit/metainterp/optimizeopt/intbounds.py +++ b/rpython/jit/metainterp/optimizeopt/intbounds.py @@ -13,9 +13,6 @@ """Keeps track of the bounds placed on integers by guards and remove redundant guards""" - def new(self): - return OptIntBounds() - def propagate_forward(self, op): dispatch_opt(self, op) diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -323,10 +323,6 @@ def force_at_end_of_preamble(self): pass - # It is too late to force stuff here, it must be done in force_at_end_of_preamble - def new(self): - raise NotImplementedError - # Called after last operation has been propagated to flush out any posponed ops def flush(self): pass @@ -390,16 +386,6 @@ for o in self.optimizations: o.flush() - def new(self): - new = Optimizer(self.metainterp_sd, self.loop) - return self._new(new) - - def _new(self, new): - optimizations = [o.new() for o in self.optimizations] - new.set_optimizations(optimizations) - new.quasi_immutable_deps = self.quasi_immutable_deps - return new - def produce_potential_short_preamble_ops(self, sb): for opt in self.optimizations: opt.produce_potential_short_preamble_ops(sb) diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -85,10 +85,6 @@ def flush(self): assert self.postponed_op is None - def new(self): - assert self.postponed_op is None - return OptPure() - def setup(self): self.optimizer.optpure = self diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -20,9 +20,6 @@ self.loop_invariant_results = {} self.loop_invariant_producer = {} - def new(self): - return OptRewrite() - def produce_potential_short_preamble_ops(self, sb): for op in self.loop_invariant_producer.values(): sb.add_potential(op) diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -41,10 +41,6 @@ self.emitted_guards += 1 # FIXME: can we use counter in self._emit_operation? self._emit_operation(op) - def new(self): - new = UnrollableOptimizer(self.metainterp_sd, self.loop) - return self._new(new) - class UnrollOptimizer(Optimization): """Unroll the loop into two iterations. The first one will diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -497,9 +497,6 @@ _last_guard_not_forced_2 = None - def new(self): - return OptVirtualize() - def make_virtual(self, known_class, box, source_op=None): vvalue = VirtualValue(self.optimizer.cpu, known_class, box, source_op) self.make_equal_to(box, vvalue) diff --git a/rpython/jit/metainterp/optimizeopt/vstring.py b/rpython/jit/metainterp/optimizeopt/vstring.py --- a/rpython/jit/metainterp/optimizeopt/vstring.py +++ b/rpython/jit/metainterp/optimizeopt/vstring.py @@ -388,8 +388,6 @@ class OptString(optimizer.Optimization): "Handling of strings and unicodes." - def new(self): - return OptString() def make_vstring_plain(self, box, source_op, mode): vvalue = VStringPlainValue(box, source_op, mode) From noreply at buildbot.pypy.org Mon Jan 13 14:20:54 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 13 Jan 2014 14:20:54 +0100 (CET) Subject: [pypy-commit] pypy refactor-translator: hg merge default Message-ID: <20140113132054.AA9E21D23C0@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-translator Changeset: r68637:5a9b55c64601 Date: 2014-01-13 14:21 +0100 http://bitbucket.org/pypy/pypy/changeset/5a9b55c64601/ Log: hg merge default diff --git a/rpython/jit/metainterp/optimizeopt/earlyforce.py b/rpython/jit/metainterp/optimizeopt/earlyforce.py --- a/rpython/jit/metainterp/optimizeopt/earlyforce.py +++ b/rpython/jit/metainterp/optimizeopt/earlyforce.py @@ -28,8 +28,5 @@ value.force_box(self) self.emit_operation(op) - def new(self): - return OptEarlyForce() - def setup(self): self.optimizer.optearlyforce = self diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -184,9 +184,6 @@ self.postponed_op = None self.next_optimization.propagate_forward(postponed_op) - def new(self): - return OptHeap() - def produce_potential_short_preamble_ops(self, sb): descrkeys = self.cached_fields.keys() if not we_are_translated(): diff --git a/rpython/jit/metainterp/optimizeopt/intbounds.py b/rpython/jit/metainterp/optimizeopt/intbounds.py --- a/rpython/jit/metainterp/optimizeopt/intbounds.py +++ b/rpython/jit/metainterp/optimizeopt/intbounds.py @@ -13,9 +13,6 @@ """Keeps track of the bounds placed on integers by guards and remove redundant guards""" - def new(self): - return OptIntBounds() - def propagate_forward(self, op): dispatch_opt(self, op) diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -323,10 +323,6 @@ def force_at_end_of_preamble(self): pass - # It is too late to force stuff here, it must be done in force_at_end_of_preamble - def new(self): - raise NotImplementedError - # Called after last operation has been propagated to flush out any posponed ops def flush(self): pass @@ -390,16 +386,6 @@ for o in self.optimizations: o.flush() - def new(self): - new = Optimizer(self.metainterp_sd, self.loop) - return self._new(new) - - def _new(self, new): - optimizations = [o.new() for o in self.optimizations] - new.set_optimizations(optimizations) - new.quasi_immutable_deps = self.quasi_immutable_deps - return new - def produce_potential_short_preamble_ops(self, sb): for opt in self.optimizations: opt.produce_potential_short_preamble_ops(sb) diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -85,10 +85,6 @@ def flush(self): assert self.postponed_op is None - def new(self): - assert self.postponed_op is None - return OptPure() - def setup(self): self.optimizer.optpure = self diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -20,9 +20,6 @@ self.loop_invariant_results = {} self.loop_invariant_producer = {} - def new(self): - return OptRewrite() - def produce_potential_short_preamble_ops(self, sb): for op in self.loop_invariant_producer.values(): sb.add_potential(op) diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -41,10 +41,6 @@ self.emitted_guards += 1 # FIXME: can we use counter in self._emit_operation? self._emit_operation(op) - def new(self): - new = UnrollableOptimizer(self.metainterp_sd, self.loop) - return self._new(new) - class UnrollOptimizer(Optimization): """Unroll the loop into two iterations. The first one will diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -497,9 +497,6 @@ _last_guard_not_forced_2 = None - def new(self): - return OptVirtualize() - def make_virtual(self, known_class, box, source_op=None): vvalue = VirtualValue(self.optimizer.cpu, known_class, box, source_op) self.make_equal_to(box, vvalue) diff --git a/rpython/jit/metainterp/optimizeopt/vstring.py b/rpython/jit/metainterp/optimizeopt/vstring.py --- a/rpython/jit/metainterp/optimizeopt/vstring.py +++ b/rpython/jit/metainterp/optimizeopt/vstring.py @@ -388,8 +388,6 @@ class OptString(optimizer.Optimization): "Handling of strings and unicodes." - def new(self): - return OptString() def make_vstring_plain(self, box, source_op, mode): vvalue = VStringPlainValue(box, source_op, mode) diff --git a/rpython/translator/unsimplify.py b/rpython/translator/unsimplify.py --- a/rpython/translator/unsimplify.py +++ b/rpython/translator/unsimplify.py @@ -132,11 +132,6 @@ block.exitswitch = None return link -def split_block_at_start(annotator, block): - # split before the first op, preserve order and inputargs - # in the second block! - return split_block(annotator, block, 0, _forcelink=block.inputargs) - def call_initial_function(translator, initial_func, annhelper=None): """Before the program starts, call 'initial_func()'.""" from rpython.annotator import model as annmodel From noreply at buildbot.pypy.org Mon Jan 13 14:43:52 2014 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 13 Jan 2014 14:43:52 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: hack enough to get the first frontend test passing Message-ID: <20140113134352.899F91C019D@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68638:7c8b3cf8c3b5 Date: 2014-01-12 17:37 +0100 http://bitbucket.org/pypy/pypy/changeset/7c8b3cf8c3b5/ Log: hack enough to get the first frontend test passing diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -68,6 +68,9 @@ def process_leave_frame(self, op): self.framestack.pop() + def process_resume_set_pc(self, op): + pass + def process_resume_put(self, op): box = op.getarg(0) frame_pos = op.getarg(1).getint() diff --git a/rpython/jit/backend/resumebuilder.py b/rpython/jit/backend/resumebuilder.py --- a/rpython/jit/backend/resumebuilder.py +++ b/rpython/jit/backend/resumebuilder.py @@ -27,6 +27,9 @@ def resume_setfield_gc(self, arg0, arg1, descr): self.deps[arg0][descr] = arg1 + def resume_set_pc(self, pc): + pass + def _track(self, allboxes, box): if box in self.deps: for dep in self.deps[box].values(): diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -2,7 +2,7 @@ from rpython.jit.codewriter.jitcode import JitCode, SwitchDictDescr from rpython.jit.metainterp.compile import ResumeAtPositionDescr from rpython.jit.metainterp.jitexc import get_llexception, reraise -from rpython.jit.metainterp import jitexc +from rpython.jit.metainterp import jitexc, resume2 from rpython.rlib import longlong2float from rpython.rlib.debug import ll_assert, make_sure_not_resized from rpython.rlib.objectmodel import we_are_translated @@ -1608,11 +1608,10 @@ def resume_in_blackhole(metainterp_sd, jitdriver_sd, resumedescr, deadframe, all_virtuals=None): - from rpython.jit.metainterp.resume import blackhole_from_resumedata #debug_start('jit-blackhole') - blackholeinterp = blackhole_from_resumedata( + blackholeinterp = resume2.blackhole_from_resumedata( metainterp_sd.blackholeinterpbuilder, - jitdriver_sd, + metainterp_sd, resumedescr, deadframe, all_virtuals) diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -133,14 +133,6 @@ def repr_of_descr(self): return '%r' % (self,) - def _clone_if_mutable(self): - return self - def clone_if_mutable(self): - clone = self._clone_if_mutable() - if not we_are_translated(): - assert clone.__class__ is self.__class__ - return clone - def hide(self, cpu): descr_ptr = cpu.ts.cast_instance_to_base_ref(self) return cpu.ts.cast_to_ref(descr_ptr) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -291,20 +291,6 @@ retrace_limit = 5 max_retrace_guards = 15 -class Storage(compile.ResumeGuardDescr): - "for tests." - def __init__(self, metainterp_sd=None, original_greenkey=None): - self.metainterp_sd = metainterp_sd - self.original_greenkey = original_greenkey - def store_final_boxes(self, op, boxes): - op.setfailargs(boxes) - def __eq__(self, other): - return type(self) is type(other) # xxx obscure - def clone_if_mutable(self): - res = Storage(self.metainterp_sd, self.original_greenkey) - self.copy_all_attributes_into(res) - return res - def _sortboxes(boxes): _kind2count = {history.INT: 1, history.REF: 2, history.FLOAT: 3} return sorted(boxes, key=lambda box: _kind2count[box.type]) @@ -399,23 +385,6 @@ return preamble -class FakeDescr(compile.ResumeGuardDescr): - def clone_if_mutable(self): - return FakeDescr() - def __eq__(self, other): - return isinstance(other, FakeDescr) - -class FakeDescrWithSnapshot(compile.ResumeGuardDescr): - class rd_snapshot: - class prev: - prev = None - boxes = [] - boxes = [] - def clone_if_mutable(self): - return FakeDescrWithSnapshot() - def __eq__(self, other): - return isinstance(other, Storage) or isinstance(other, FakeDescrWithSnapshot) - def convert_old_style_to_targets(loop, jump): newloop = TreeLoop(loop.name) diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -5,7 +5,7 @@ from rpython.jit.codewriter import heaptracker from rpython.jit.codewriter.effectinfo import EffectInfo from rpython.jit.codewriter.jitcode import JitCode, SwitchDictDescr -from rpython.jit.metainterp import history, compile, resume, executor, jitexc +from rpython.jit.metainterp import history, compile, resume2, executor, jitexc from rpython.jit.metainterp.heapcache import HeapCache from rpython.jit.metainterp.history import (Const, ConstInt, ConstPtr, ConstFloat, Box, TargetToken) @@ -41,9 +41,11 @@ self.registers_i = [None] * 256 self.registers_r = [None] * 256 self.registers_f = [None] * 256 + self.resume_cache = {} def setup(self, jitcode, greenkey=None): assert isinstance(jitcode, JitCode) + self.resume_cache = {} self.jitcode = jitcode self.bytecode = jitcode.code # this is not None for frames that are recursive portal calls @@ -1458,6 +1460,31 @@ # but we should not follow calls to that graph return self.do_residual_call(funcbox, argboxes, calldescr, pc) + def emit_resume_data(self, pos): + i = 0 + history = self.metainterp.history + for i in range(self.jitcode.num_regs_i()): + box = self.registers_i[i] + if box is not None and box not in self.resume_cache: + history.record(rop.RESUME_PUT, + [box, ConstInt(pos), ConstInt(i)], None) + self.resume_cache[box] = None + start = self.jitcode.num_regs_i() + for i in range(self.jitcode.num_regs_r()): + box = self.registers_r[i] + if box is not None and box not in self.resume_cache: + history.record(rop.RESUME_PUT, + [box, ConstInt(pos), ConstInt(i + start)], None) + self.resume_cache[box] = None + start = self.jitcode.num_regs_i() + self.jitcode.num_regs_r() + for i in range(self.jitcode.num_regs_f()): + box = self.registers_f[i] + if box is not None and box not in self.resume_cache: + history.record(rop.RESUME_PUT, + [box, ConstInt(pos), ConstInt(i + start)], None) + self.resume_cache[box] = None + history.record(rop.RESUME_SET_PC, [ConstInt(self.pc)], None) + # ____________________________________________________________ class MetaInterpStaticData(object): @@ -1672,6 +1699,12 @@ return self.jitdriver_sd is not None and jitcode is self.jitdriver_sd.mainjitcode def newframe(self, jitcode, greenkey=None): + if self.framestack: + pc = self.framestack[-1].pc + else: + pc = -1 + self.history.record(rop.ENTER_FRAME, [ConstInt(pc)], None, + descr=jitcode) if jitcode.is_portal: self.portal_call_depth += 1 self.call_ids.append(self.current_call_id) @@ -1688,6 +1721,7 @@ return f def popframe(self): + self.history.record(rop.LEAVE_FRAME, [], None) frame = self.framestack.pop() jitcode = frame.jitcode if jitcode.is_portal: @@ -1786,15 +1820,21 @@ resumedescr = compile.ResumeGuardNotInvalidated() else: resumedescr = compile.ResumeGuardDescr() + resumedescr.guard_opnum = opnum # XXX kill me + self.sync_resume_data(resumedescr, resumepc) guard_op = self.history.record(opnum, moreargs, None, descr=resumedescr) - #self.capture_resumedata(resumedescr, resumepc) self.staticdata.profiler.count_ops(opnum, Counters.GUARDS) # count self.attach_debug_info(guard_op) return guard_op - + + def sync_resume_data(self, resumedescr, resumepc): + for i, frame in enumerate(self.framestack): + frame.emit_resume_data(i) + def capture_resumedata(self, resumedescr, resumepc=-1): + XXXX virtualizable_boxes = None if (self.jitdriver_sd.virtualizable_info is not None or self.jitdriver_sd.greenfield_info is not None): @@ -1805,6 +1845,7 @@ saved_pc = frame.pc if resumepc >= 0: frame.pc = resumepc + xxx resume.capture_resumedata(self.framestack, virtualizable_boxes, self.virtualref_boxes, resumedescr) if self.framestack: @@ -2490,6 +2531,7 @@ vinfo = self.jitdriver_sd.virtualizable_info ginfo = self.jitdriver_sd.greenfield_info self.framestack = [] + xxx boxlists = resume.rebuild_from_resumedata(self, resumedescr, deadframe, vinfo, ginfo) inputargs_and_holes, virtualizable_boxes, virtualref_boxes = boxlists diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -80,8 +80,6 @@ def clone(self): args = self.getarglist() descr = self.getdescr() - if descr is not None: - descr = descr.clone_if_mutable() op = ResOperation(self.getopnum(), args[:], self.result, descr) if not we_are_translated(): op.name = self.name @@ -477,6 +475,7 @@ # frontend 'RESUME_NEW/0d', 'RESUME_SETFIELD_GC/2d', + 'RESUME_SET_PC/1', '_RESUME_LAST', # ----- end of resume only operations ------ '_NOSIDEEFFECT_LAST', # ----- end of no_side_effect operations ----- diff --git a/rpython/jit/metainterp/resume2.py b/rpython/jit/metainterp/resume2.py --- a/rpython/jit/metainterp/resume2.py +++ b/rpython/jit/metainterp/resume2.py @@ -2,6 +2,7 @@ from rpython.jit.metainterp.resoperation import rop from rpython.jit.metainterp.history import BoxInt from rpython.jit.codewriter.jitcode import JitCode +from rpython.rlib import rstack class ResumeBytecode(object): def __init__(self, opcodes, parent=None, parent_position=-1, loop=None): @@ -41,6 +42,8 @@ elif op.getopnum() == rop.RESUME_SETFIELD_GC: self.resume_setfield_gc(op.getarg(0), op.getarg(1), op.getdescr()) + elif op.getopnum() == rop.RESUME_SET_PC: + self.resume_set_pc(op.getarg(0).getint()) elif not op.is_resume(): pos += 1 continue @@ -50,24 +53,56 @@ def resume_put(self, jitframe_pos_const, frame_no, frontend_position): jitframe_pos = jitframe_pos_const.getint() - jitcode = self.metainterp.framestack[frame_no].jitcode - frame = self.metainterp.framestack[frame_no] + jitcode = self.get_jitcode(frame_no) if frontend_position < jitcode.num_regs_i(): - self.put_box_int(frame, frontend_position, jitframe_pos) + self.put_box_int(frame_no, frontend_position, jitframe_pos) elif frontend_position < (jitcode.num_regs_r() + jitcode.num_regs_i()): - self.put_box_ref(frame, frontend_position - jitcode.num_regs_i(), + self.put_box_ref(frame_no, frontend_position - jitcode.num_regs_i(), jitframe_pos) else: assert frontend_position < jitcode.num_regs() - self.put_box_float(frame, frontend_position - jitcode.num_regs_r() + self.put_box_float(frame_no, + frontend_position - jitcode.num_regs_r() - jitcode.num_regs_i(), jitframe_pos) def read_int(self, jitframe_pos): return self.metainterp.cpu.get_int_value(self.deadframe, jitframe_pos) class DirectResumeReader(AbstractResumeReader): - def __init__(self, *args): - xxx + def __init__(self, binterpbuilder, cpu, deadframe): + self.bhinterpbuilder = binterpbuilder + self.cpu = cpu + self.deadframe = deadframe + self.bhinterp_stack = [] + + def get_jitcode(self, no): + return self.bhinterp_stack[no].jitcode + + def get_frame(self, no): + return self.bhinterp_stack[no] + + def enter_frame(self, pc, jitcode): + if pc != -1: + self.bhinterp_stack[-1].position = pc + self.bhinterp_stack.append(self.bhinterpbuilder.acquire_interp()) + self.bhinterp_stack[-1].setposition(jitcode, 0) + + def put_box_int(self, frame_no, frontend_position, jitframe_pos): + val = self.cpu.get_int_value(self.deadframe, jitframe_pos) + self.bhinterp_stack[frame_no].registers_i[frontend_position] = val + + def resume_set_pc(self, pc): + self.bhinterp_stack[-1].position = pc + + def leave_frame(self): + bhinterp = self.bhinterp_stack.pop() + self.bhinterpbuilder.release_interp(bhinterp) + + def finish(self): + self.bhinterp_stack[0].nextblackholeinterp = None + for i in range(1, len(self.bhinterp_stack)): + self.bhinterp_stack[i].nextblackholeinterp = self.bhinterp_stack[i - 1] + return self.bhinterp_stack[-1] class BoxResumeReader(AbstractResumeReader): def __init__(self, metainterp, deadframe): @@ -82,14 +117,17 @@ def leave_frame(self): self.metainterp.popframe() - def put_box_int(self, frame, position, jitframe_pos): + def put_box_int(self, frame_no, position, jitframe_pos): + frame = self.metainterp.framestack[frame_no] frame.registers_i[position] = BoxInt(self.read_int(jitframe_pos)) - def put_box_ref(self, frame, position, jitframe_pos): + def put_box_ref(self, frame_no, position, jitframe_pos): + frame = self.metainterp.framestack[frame_no] xxx frame.registers_r[position] = self.read_ref(jitframe_pos) - def put_box_float(self, frame, position, jitframe_pos): + def put_box_float(self, frame_no, position, jitframe_pos): + frame = self.metainterp.framestack[frame_no] xxx frame.registers_f[position] = self.read_float(jitframe_pos) @@ -99,3 +137,16 @@ def rebuild_from_resumedata(metainterp, deadframe, faildescr): BoxResumeReader(metainterp, deadframe).rebuild(faildescr) +def blackhole_from_resumedata(interpbuilder, metainterp_sd, faildescr, + deadframe, all_virtuals=None): + assert all_virtuals is None + #rstack._stack_criticalcode_start() + #try: + # xxx consume vrefs and random shit + #finally: + # rstack._stack_criticalcode_stop() + cpu = metainterp_sd.cpu + last_bhinterp = DirectResumeReader(interpbuilder, cpu, + deadframe).rebuild(faildescr) + + return last_bhinterp From noreply at buildbot.pypy.org Mon Jan 13 14:43:54 2014 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 13 Jan 2014 14:43:54 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: hack some more Message-ID: <20140113134354.0207A1C019D@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68639:86df5551ea2e Date: 2014-01-12 17:42 +0100 http://bitbucket.org/pypy/pypy/changeset/86df5551ea2e/ Log: hack some more diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -789,8 +789,11 @@ values = [] for i in range(len(self.current_op.failargs)): arg = self.current_op.failargs[i] - value = self.env[arg] - values.append(value) + if arg is None: + values.append(None) + else: + value = self.env[arg] + values.append(value) if hasattr(descr, '_llgraph_bridge'): target = (descr._llgraph_bridge, -1) raise Jump(target, values) diff --git a/rpython/jit/metainterp/resume2.py b/rpython/jit/metainterp/resume2.py --- a/rpython/jit/metainterp/resume2.py +++ b/rpython/jit/metainterp/resume2.py @@ -91,6 +91,10 @@ val = self.cpu.get_int_value(self.deadframe, jitframe_pos) self.bhinterp_stack[frame_no].registers_i[frontend_position] = val + def put_box_ref(self, frame_no, frontend_position, jitframe_pos): + val = self.cpu.get_ref_value(self.deadframe, jitframe_pos) + self.bhinterp_stack[frame_no].registers_r[frontend_position] = val + def resume_set_pc(self, pc): self.bhinterp_stack[-1].position = pc From noreply at buildbot.pypy.org Mon Jan 13 14:43:55 2014 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 13 Jan 2014 14:43:55 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: (fijal, rguillebert) don't eagerly create frames, but instead do the Message-ID: <20140113134355.460F11C019D@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68640:c2ad1730a302 Date: 2014-01-13 11:53 +0100 http://bitbucket.org/pypy/pypy/changeset/c2ad1730a302/ Log: (fijal, rguillebert) don't eagerly create frames, but instead do the computations symbolically diff --git a/rpython/jit/backend/resumebuilder.py b/rpython/jit/backend/resumebuilder.py --- a/rpython/jit/backend/resumebuilder.py +++ b/rpython/jit/backend/resumebuilder.py @@ -48,6 +48,9 @@ def leave_frame(self): self.framestack.pop() + def rebuild(self, faildescr): + raise Exception("should not be called") + class ResumeBuilder(object): def __init__(self, regalloc, frontend_liveness, descr, inputframes=None, inputlocs=None): diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -2409,8 +2409,8 @@ try: self.portal_call_depth = -1 # always one portal around self.history = history.History() - inputargs_and_holes = self.rebuild_state_after_failure(resumedescr, - deadframe) + self.rebuild_state_after_failure(resumedescr, deadframe) + xxx self.history.inputargs = [box for box in inputargs_and_holes if box] finally: rstack._stack_criticalcode_stop() @@ -2531,10 +2531,10 @@ vinfo = self.jitdriver_sd.virtualizable_info ginfo = self.jitdriver_sd.greenfield_info self.framestack = [] - xxx - boxlists = resume.rebuild_from_resumedata(self, resumedescr, deadframe, - vinfo, ginfo) - inputargs_and_holes, virtualizable_boxes, virtualref_boxes = boxlists + inputframes = resume2.rebuild_from_resumedata(self, deadframe, + resumedescr) + virtualizable_boxes = None + virtualref_boxes = None # # virtual refs: make the vrefs point to the freshly allocated virtuals self.virtualref_boxes = virtualref_boxes @@ -2565,7 +2565,7 @@ else: assert not virtualizable_boxes # - return inputargs_and_holes + return inputframes def check_synchronized_virtualizable(self): if not we_are_translated(): diff --git a/rpython/jit/metainterp/resume2.py b/rpython/jit/metainterp/resume2.py --- a/rpython/jit/metainterp/resume2.py +++ b/rpython/jit/metainterp/resume2.py @@ -1,6 +1,6 @@ from rpython.jit.metainterp.resoperation import rop -from rpython.jit.metainterp.history import BoxInt +from rpython.jit.metainterp.history import BoxInt, BoxPtr, BoxFloat from rpython.jit.codewriter.jitcode import JitCode from rpython.rlib import rstack @@ -11,7 +11,16 @@ self.parent_position = parent_position self.loop = loop +class ResumeFrame(object): + def __init__(self, jitcode): + self.registers = [-1] * jitcode.num_regs() + self.jitcode = jitcode + self.pc = -1 + class AbstractResumeReader(object): + def __init__(self): + self.framestack = [] + def rebuild(self, faildescr): self._rebuild_until(faildescr.rd_resume_bytecode, faildescr.rd_bytecode_position) @@ -20,6 +29,22 @@ def finish(self): pass + def enter_frame(self, pc, jitcode): + if self.framestack: + assert pc != -1 + self.framestack[-1].pc = pc + self.framestack.append(ResumeFrame(jitcode)) + + def resume_put(self, jitframe_pos_const, frame_no, frontend_position): + jitframe_pos = jitframe_pos_const.getint() + self.framestack[frame_no].registers[frontend_position] = jitframe_pos + + def resume_set_pc(self, pc): + self.framestack[-1].pc = pc + + def leave_frame(self): + self.framestack.pop() + def _rebuild_until(self, rb, position): if rb.parent is not None: self._rebuild_until(rb.parent, rb.parent_position) @@ -36,7 +61,7 @@ self.leave_frame() elif op.getopnum() == rop.RESUME_PUT: self.resume_put(op.getarg(0), op.getarg(1).getint(), - op.getarg(2).getint()) + op.getarg(2).getint()) elif op.getopnum() == rop.RESUME_NEW: self.resume_new(op.result, op.getdescr()) elif op.getopnum() == rop.RESUME_SETFIELD_GC: @@ -51,20 +76,6 @@ xxx pos += 1 - def resume_put(self, jitframe_pos_const, frame_no, frontend_position): - jitframe_pos = jitframe_pos_const.getint() - jitcode = self.get_jitcode(frame_no) - if frontend_position < jitcode.num_regs_i(): - self.put_box_int(frame_no, frontend_position, jitframe_pos) - elif frontend_position < (jitcode.num_regs_r() + jitcode.num_regs_i()): - self.put_box_ref(frame_no, frontend_position - jitcode.num_regs_i(), - jitframe_pos) - else: - assert frontend_position < jitcode.num_regs() - self.put_box_float(frame_no, - frontend_position - jitcode.num_regs_r() - - jitcode.num_regs_i(), jitframe_pos) - def read_int(self, jitframe_pos): return self.metainterp.cpu.get_int_value(self.deadframe, jitframe_pos) @@ -75,12 +86,6 @@ self.deadframe = deadframe self.bhinterp_stack = [] - def get_jitcode(self, no): - return self.bhinterp_stack[no].jitcode - - def get_frame(self, no): - return self.bhinterp_stack[no] - def enter_frame(self, pc, jitcode): if pc != -1: self.bhinterp_stack[-1].position = pc @@ -112,34 +117,37 @@ def __init__(self, metainterp, deadframe): self.metainterp = metainterp self.deadframe = deadframe + AbstractResumeReader.__init__(self) - def enter_frame(self, pc, jitcode): - if pc != -1: - self.metainterp.framestack[-1].pc = pc - self.metainterp.newframe(jitcode) + def get_int_box(self, pos): + return BoxInt(self.metainterp.cpu.get_int_value(self.deadframe, pos)) - def leave_frame(self): - self.metainterp.popframe() + def get_ref_box(self, pos): + return BoxPtr(self.metainterp.cpu.get_ref_value(self.deadframe, pos)) - def put_box_int(self, frame_no, position, jitframe_pos): - frame = self.metainterp.framestack[frame_no] - frame.registers_i[position] = BoxInt(self.read_int(jitframe_pos)) - - def put_box_ref(self, frame_no, position, jitframe_pos): - frame = self.metainterp.framestack[frame_no] - xxx - frame.registers_r[position] = self.read_ref(jitframe_pos) - - def put_box_float(self, frame_no, position, jitframe_pos): - frame = self.metainterp.framestack[frame_no] - xxx - frame.registers_f[position] = self.read_float(jitframe_pos) + def get_float_box(self, pos): + return BoxFloat(self.metainterp.cpu.get_float_value(self.deadframe, + pos)) def finish(self): - pass - + for frame in self.framestack: + jitcode = frame.jitcode + miframe = self.metainterp.newframe(jitcode) + miframe.pc = frame.pc + pos = 0 + for i in range(jitcode.num_regs_i()): + miframe.registers_i[i] = self.get_int_box(frame.registers[pos]) + pos += 1 + for i in range(jitcode.num_regs_r()): + miframe.registers_r[i] = self.get_ref_box(frame.registers[pos]) + pos += 1 + for i in range(jitcode.num_regs_f()): + jitframe_pos = frame.registers[pos] + miframe.registers_f[i] = self.get_float_box(jitframe_pos) + pos += 1 + def rebuild_from_resumedata(metainterp, deadframe, faildescr): - BoxResumeReader(metainterp, deadframe).rebuild(faildescr) + return BoxResumeReader(metainterp, deadframe).rebuild(faildescr) def blackhole_from_resumedata(interpbuilder, metainterp_sd, faildescr, deadframe, all_virtuals=None): diff --git a/rpython/jit/metainterp/test/test_resume2.py b/rpython/jit/metainterp/test/test_resume2.py --- a/rpython/jit/metainterp/test/test_resume2.py +++ b/rpython/jit/metainterp/test/test_resume2.py @@ -4,7 +4,7 @@ from rpython.jit.codewriter.jitcode import JitCode from rpython.jit.metainterp.history import AbstractDescr from rpython.jit.metainterp.resume2 import rebuild_from_resumedata,\ - ResumeBytecode, BoxResumeReader + ResumeBytecode, AbstractResumeReader class Descr(AbstractDescr): @@ -21,7 +21,7 @@ self.registers_f = [None] * jitcode.num_regs_f() def num_nonempty_regs(self): - return len(filter(bool, self.registers_i)) + return len([i for i in self.registers_i if i.getint() != 2]) def dump_registers(self, lst, backend_values): lst += [backend_values[x] for x in self.registers_i] @@ -34,7 +34,9 @@ self.framestack = [] def newframe(self, jitcode): - self.framestack.append(Frame(jitcode)) + f = Frame(jitcode) + self.framestack.append(f) + return f def popframe(self): self.framestack.pop() @@ -44,25 +46,9 @@ assert frame == "myframe" return index + 3 -class RebuildingResumeReader(BoxResumeReader): - def __init__(self): - self.metainterp = MockMetaInterp() - - def put_box_int(self, frame, position, jitframe_pos): - frame.registers_i[position] = jitframe_pos - - def put_box_float(self, frame, position, jitframe_pos): - frame.registers_f[position] = jitframe_pos - - def put_box_ref(self, frame, position, jitframe_pos): - frame.registers_r[position] = jitframe_pos - +class RebuildingResumeReader(AbstractResumeReader): def finish(self): - framestack = [] - for frame in self.metainterp.framestack: - framestack.append(frame.registers_i + frame.registers_r + - frame.registers_f) - return framestack + return [f.registers for f in self.framestack] def rebuild_locs_from_resumedata(faildescr): return RebuildingResumeReader().rebuild(faildescr) @@ -70,7 +56,7 @@ class TestResumeDirect(object): def test_box_resume_reader(self): jitcode = JitCode("jitcode") - jitcode.setup(num_regs_i=13) + jitcode.setup(num_regs_i=13, num_regs_r=0, num_regs_f=0) resume_loop = parse(""" [] enter_frame(-1, descr=jitcode1) @@ -89,9 +75,9 @@ def test_nested_call(self): jitcode1 = JitCode("jitcode") - jitcode1.setup(num_regs_i=13) + jitcode1.setup(num_regs_i=13, num_regs_r=0, num_regs_f=0) jitcode2 = JitCode("jitcode2") - jitcode2.setup(num_regs_i=9) + jitcode2.setup(num_regs_i=9, num_regs_r=0, num_regs_f=0) resume_loop = parse(""" [] enter_frame(-1, descr=jitcode1) @@ -130,7 +116,7 @@ def test_bridge(self): jitcode1 = JitCode("jitcode") - jitcode1.setup(num_regs_i=13) + jitcode1.setup(num_regs_i=13, num_regs_r=0, num_regs_f=0) base = parse(""" [] enter_frame(-1, descr=jitcode1) @@ -160,7 +146,7 @@ def test_new(self): py.test.skip("finish") jitcode1 = JitCode("jitcode") - jitcode1.setup(num_regs_i=1) + jitcode1.setup(num_regs_i=1, num_regs_r=0, num_regs_f=0) base = parse(""" [] enter_frame(-1, descr=jitcode) From noreply at buildbot.pypy.org Mon Jan 13 14:43:56 2014 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 13 Jan 2014 14:43:56 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: (fijal, rguillebert) finish the blackhole refactor Message-ID: <20140113134356.7310E1C019D@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68641:07d4cfa59ad7 Date: 2014-01-13 12:11 +0100 http://bitbucket.org/pypy/pypy/changeset/07d4cfa59ad7/ Log: (fijal, rguillebert) finish the blackhole refactor diff --git a/rpython/jit/metainterp/resume2.py b/rpython/jit/metainterp/resume2.py --- a/rpython/jit/metainterp/resume2.py +++ b/rpython/jit/metainterp/resume2.py @@ -84,34 +84,36 @@ self.bhinterpbuilder = binterpbuilder self.cpu = cpu self.deadframe = deadframe - self.bhinterp_stack = [] - - def enter_frame(self, pc, jitcode): - if pc != -1: - self.bhinterp_stack[-1].position = pc - self.bhinterp_stack.append(self.bhinterpbuilder.acquire_interp()) - self.bhinterp_stack[-1].setposition(jitcode, 0) - - def put_box_int(self, frame_no, frontend_position, jitframe_pos): - val = self.cpu.get_int_value(self.deadframe, jitframe_pos) - self.bhinterp_stack[frame_no].registers_i[frontend_position] = val - - def put_box_ref(self, frame_no, frontend_position, jitframe_pos): - val = self.cpu.get_ref_value(self.deadframe, jitframe_pos) - self.bhinterp_stack[frame_no].registers_r[frontend_position] = val - - def resume_set_pc(self, pc): - self.bhinterp_stack[-1].position = pc - - def leave_frame(self): - bhinterp = self.bhinterp_stack.pop() - self.bhinterpbuilder.release_interp(bhinterp) + AbstractResumeReader.__init__(self) def finish(self): - self.bhinterp_stack[0].nextblackholeinterp = None - for i in range(1, len(self.bhinterp_stack)): - self.bhinterp_stack[i].nextblackholeinterp = self.bhinterp_stack[i - 1] - return self.bhinterp_stack[-1] + nextbh = None + for frame in self.framestack: + curbh = self.bhinterpbuilder.acquire_interp() + curbh.nextblackholeinterp = nextbh + nextbh = curbh + jitcode = frame.jitcode + curbh.setposition(jitcode, frame.pc) + pos = 0 + for i in range(jitcode.num_regs_i()): + jitframe_pos = frame.registers[pos] + if jitframe_pos != -1: + curbh.registers_i[i] = self.cpu.get_int_value( + self.deadframe, jitframe_pos) + pos += 1 + for i in range(jitcode.num_regs_r()): + jitframe_pos = frame.registers[pos] + if jitframe_pos != -1: + curbh.registers_r[i] = self.cpu.get_ref_value( + self.deadframe, jitframe_pos) + pos += 1 + for i in range(jitcode.num_regs_f()): + jitframe_pos = frame.registers[pos] + if jitframe_pos != -1: + curbh.registers_f[i] = self.cpu.get_float_value( + self.deadframe, jitframe_pos) + pos += 1 + return curbh class BoxResumeReader(AbstractResumeReader): def __init__(self, metainterp, deadframe): @@ -136,13 +138,21 @@ miframe.pc = frame.pc pos = 0 for i in range(jitcode.num_regs_i()): - miframe.registers_i[i] = self.get_int_box(frame.registers[pos]) + jitframe_pos = frame.registers[pos] + if jitframe_pos == -1: + continue + miframe.registers_i[i] = self.get_int_box(jitframe_pos) pos += 1 for i in range(jitcode.num_regs_r()): - miframe.registers_r[i] = self.get_ref_box(frame.registers[pos]) + jitframe_pos = frame.registers[pos] + if jitframe_pos == -1: + continue + miframe.registers_r[i] = self.get_ref_box(jitframe_pos) pos += 1 for i in range(jitcode.num_regs_f()): jitframe_pos = frame.registers[pos] + if jitframe_pos == -1: + continue miframe.registers_f[i] = self.get_float_box(jitframe_pos) pos += 1 From noreply at buildbot.pypy.org Mon Jan 13 14:43:57 2014 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 13 Jan 2014 14:43:57 +0100 (CET) Subject: [pypy-commit] pypy default: port this to python 2/3 (sigh) Message-ID: <20140113134357.A48061C019D@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r68642:c4fb947e9488 Date: 2014-01-13 14:42 +0100 http://bitbucket.org/pypy/pypy/changeset/c4fb947e9488/ Log: port this to python 2/3 (sigh) diff --git a/pypy/tool/gdb_pypy.py b/pypy/tool/gdb_pypy.py --- a/pypy/tool/gdb_pypy.py +++ b/pypy/tool/gdb_pypy.py @@ -38,9 +38,9 @@ if len(names) == 1: return val[names[0]] elif len(names) == 0: - raise KeyError, "cannot find field *%s" % suffix + raise KeyError("cannot find field *%s" % suffix) else: - raise KeyError, "too many matching fields: %s" % ', '.join(names) + raise KeyError("too many matching fields: %s" % ', '.join(names)) def lookup(val, suffix): """ @@ -76,10 +76,14 @@ def invoke(self, arg, from_tty): # some magic code to automatically reload the python file while developing from pypy.tool import gdb_pypy - reload(gdb_pypy) + try: + reload(gdb_pypy) + except: + import imp + imp.reload(gdb_pypy) gdb_pypy.RPyType.prog2typeids = self.prog2typeids # persist the cache self.__class__ = gdb_pypy.RPyType - print self.do_invoke(arg, from_tty) + print (self.do_invoke(arg, from_tty).decode('latin-1')) def do_invoke(self, arg, from_tty): try: @@ -88,7 +92,7 @@ obj = self.gdb.parse_and_eval(arg) hdr = lookup(obj, '_gcheader') tid = hdr['h_tid'] - if sys.maxint < 2**32: + if sys.maxsize < 2**32: offset = tid & 0xFFFF # 32bit else: offset = tid & 0xFFFFFFFF # 64bit @@ -147,13 +151,13 @@ if linenum in self.line2offset: return self.line2offset[linenum] line = self.lines[linenum] - member, descr = map(str.strip, line.split(None, 1)) - if sys.maxint < 2**32: + member, descr = [x.strip() for x in line.split(None, 1)] + if sys.maxsize < 2**32: TIDT = "int*" else: TIDT = "char*" expr = ("((%s)(&pypy_g_typeinfo.%s)) - (%s)&pypy_g_typeinfo" - % (TIDT, member, TIDT)) + % (TIDT, member.decode("latin-1"), TIDT)) offset = int(self.gdb.parse_and_eval(expr)) self.line2offset[linenum] = offset self.offset2descr[offset] = descr @@ -164,7 +168,7 @@ # binary search through the lines, asking gdb to parse stuff lazily if offset in self.offset2descr: return self.offset2descr[offset] - if not (0 < offset < sys.maxint): + if not (0 < offset < sys.maxsize): return None linerange = (0, len(self.lines)) while linerange[0] < linerange[1]: From noreply at buildbot.pypy.org Mon Jan 13 14:43:58 2014 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 13 Jan 2014 14:43:58 +0100 (CET) Subject: [pypy-commit] pypy default: merge Message-ID: <20140113134358.D409B1C019D@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r68643:25070f2267c4 Date: 2014-01-13 14:43 +0100 http://bitbucket.org/pypy/pypy/changeset/25070f2267c4/ Log: merge diff --git a/rpython/jit/metainterp/optimizeopt/earlyforce.py b/rpython/jit/metainterp/optimizeopt/earlyforce.py --- a/rpython/jit/metainterp/optimizeopt/earlyforce.py +++ b/rpython/jit/metainterp/optimizeopt/earlyforce.py @@ -28,8 +28,5 @@ value.force_box(self) self.emit_operation(op) - def new(self): - return OptEarlyForce() - def setup(self): self.optimizer.optearlyforce = self diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -184,9 +184,6 @@ self.postponed_op = None self.next_optimization.propagate_forward(postponed_op) - def new(self): - return OptHeap() - def produce_potential_short_preamble_ops(self, sb): descrkeys = self.cached_fields.keys() if not we_are_translated(): diff --git a/rpython/jit/metainterp/optimizeopt/intbounds.py b/rpython/jit/metainterp/optimizeopt/intbounds.py --- a/rpython/jit/metainterp/optimizeopt/intbounds.py +++ b/rpython/jit/metainterp/optimizeopt/intbounds.py @@ -13,9 +13,6 @@ """Keeps track of the bounds placed on integers by guards and remove redundant guards""" - def new(self): - return OptIntBounds() - def propagate_forward(self, op): dispatch_opt(self, op) diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -323,10 +323,6 @@ def force_at_end_of_preamble(self): pass - # It is too late to force stuff here, it must be done in force_at_end_of_preamble - def new(self): - raise NotImplementedError - # Called after last operation has been propagated to flush out any posponed ops def flush(self): pass @@ -390,16 +386,6 @@ for o in self.optimizations: o.flush() - def new(self): - new = Optimizer(self.metainterp_sd, self.loop) - return self._new(new) - - def _new(self, new): - optimizations = [o.new() for o in self.optimizations] - new.set_optimizations(optimizations) - new.quasi_immutable_deps = self.quasi_immutable_deps - return new - def produce_potential_short_preamble_ops(self, sb): for opt in self.optimizations: opt.produce_potential_short_preamble_ops(sb) diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -85,10 +85,6 @@ def flush(self): assert self.postponed_op is None - def new(self): - assert self.postponed_op is None - return OptPure() - def setup(self): self.optimizer.optpure = self diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -20,9 +20,6 @@ self.loop_invariant_results = {} self.loop_invariant_producer = {} - def new(self): - return OptRewrite() - def produce_potential_short_preamble_ops(self, sb): for op in self.loop_invariant_producer.values(): sb.add_potential(op) diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -41,10 +41,6 @@ self.emitted_guards += 1 # FIXME: can we use counter in self._emit_operation? self._emit_operation(op) - def new(self): - new = UnrollableOptimizer(self.metainterp_sd, self.loop) - return self._new(new) - class UnrollOptimizer(Optimization): """Unroll the loop into two iterations. The first one will diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -497,9 +497,6 @@ _last_guard_not_forced_2 = None - def new(self): - return OptVirtualize() - def make_virtual(self, known_class, box, source_op=None): vvalue = VirtualValue(self.optimizer.cpu, known_class, box, source_op) self.make_equal_to(box, vvalue) diff --git a/rpython/jit/metainterp/optimizeopt/vstring.py b/rpython/jit/metainterp/optimizeopt/vstring.py --- a/rpython/jit/metainterp/optimizeopt/vstring.py +++ b/rpython/jit/metainterp/optimizeopt/vstring.py @@ -388,8 +388,6 @@ class OptString(optimizer.Optimization): "Handling of strings and unicodes." - def new(self): - return OptString() def make_vstring_plain(self, box, source_op, mode): vvalue = VStringPlainValue(box, source_op, mode) diff --git a/rpython/translator/unsimplify.py b/rpython/translator/unsimplify.py --- a/rpython/translator/unsimplify.py +++ b/rpython/translator/unsimplify.py @@ -132,11 +132,6 @@ block.exitswitch = None return link -def split_block_at_start(annotator, block): - # split before the first op, preserve order and inputargs - # in the second block! - return split_block(annotator, block, 0, _forcelink=block.inputargs) - def call_initial_function(translator, initial_func, annhelper=None): """Before the program starts, call 'initial_func()'.""" from rpython.annotator import model as annmodel From noreply at buildbot.pypy.org Mon Jan 13 16:13:16 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 13 Jan 2014 16:13:16 +0100 (CET) Subject: [pypy-commit] cffi default: Issue 134: add #ifdef _AIX Message-ID: <20140113151316.27D9F1C019D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1452:5336c0aa38f9 Date: 2014-01-13 16:12 +0100 http://bitbucket.org/cffi/cffi/changeset/5336c0aa38f9/ Log: Issue 134: add #ifdef _AIX diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -13,7 +13,7 @@ #include #include #include -#if defined (__SVR4) && defined (__sun) +#if (defined (__SVR4) && defined (__sun)) || defined(_AIX) # include #endif #endif From noreply at buildbot.pypy.org Mon Jan 13 16:29:38 2014 From: noreply at buildbot.pypy.org (rguillebert) Date: Mon, 13 Jan 2014 16:29:38 +0100 (CET) Subject: [pypy-commit] pypy numpypy-array_prepare_-array_wrap: Merge default Message-ID: <20140113152938.25BCE1C153F@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: numpypy-array_prepare_-array_wrap Changeset: r68644:29f861b7d557 Date: 2014-01-13 16:28 +0100 http://bitbucket.org/pypy/pypy/changeset/29f861b7d557/ Log: Merge default diff too long, truncating to 2000 out of 12363 lines diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -28,7 +28,7 @@ DEALINGS IN THE SOFTWARE. -PyPy Copyright holders 2003-2013 +PyPy Copyright holders 2003-2014 ----------------------------------- Except when otherwise stated (look for LICENSE files or information at diff --git a/lib-python/2.7/ctypes/__init__.py b/lib-python/2.7/ctypes/__init__.py --- a/lib-python/2.7/ctypes/__init__.py +++ b/lib-python/2.7/ctypes/__init__.py @@ -371,10 +371,9 @@ self._handle = handle def __repr__(self): - return "<%s '%s', handle %r at %x>" % \ - (self.__class__.__name__, self._name, - (self._handle), - id(self) & (_sys.maxint*2 + 1)) + return "<%s '%s', handle %r at 0x%x>" % ( + self.__class__.__name__, self._name, self._handle, + id(self) & (_sys.maxint * 2 + 1)) def __getattr__(self, name): diff --git a/lib-python/2.7/ctypes/test/test_python_api.py b/lib-python/2.7/ctypes/test/test_python_api.py --- a/lib-python/2.7/ctypes/test/test_python_api.py +++ b/lib-python/2.7/ctypes/test/test_python_api.py @@ -73,6 +73,7 @@ del pyobj self.assertEqual(grc(s), ref) + @xfail def test_PyOS_snprintf(self): PyOS_snprintf = pythonapi.PyOS_snprintf PyOS_snprintf.argtypes = POINTER(c_char), c_size_t, c_char_p diff --git a/lib-python/2.7/socket.py b/lib-python/2.7/socket.py --- a/lib-python/2.7/socket.py +++ b/lib-python/2.7/socket.py @@ -335,9 +335,10 @@ s = self._sock self._sock = None if s is not None: - s._drop() if self._close: s.close() + else: + s._drop() def __del__(self): try: diff --git a/lib-python/2.7/test/test_ssl.py b/lib-python/2.7/test/test_ssl.py --- a/lib-python/2.7/test/test_ssl.py +++ b/lib-python/2.7/test/test_ssl.py @@ -993,7 +993,7 @@ try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED) - try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, True) + try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False) diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -109,7 +109,7 @@ RegrTest('test_asynchat.py', usemodules='select fcntl'), RegrTest('test_asyncore.py', usemodules='select fcntl'), RegrTest('test_atexit.py', core=True), - RegrTest('test_audioop.py', skip="unsupported extension module"), + RegrTest('test_audioop.py', skip="incomplete module"), RegrTest('test_augassign.py', core=True), RegrTest('test_base64.py', usemodules='struct'), RegrTest('test_bastion.py'), diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -328,21 +328,23 @@ raise ValueError( "native COM method call without 'this' parameter" ) - thisarg = cast(args[0], POINTER(POINTER(c_void_p))) - keepalives, newargs, argtypes, outargs = self._convert_args(argtypes, - args[1:], kwargs) - newargs.insert(0, args[0].value) + thisvalue = args.pop(0) + thisarg = cast(thisvalue, POINTER(POINTER(c_void_p))) + keepalives, newargs, argtypes, outargs, errcheckargs = ( + self._convert_args(argtypes, args, kwargs)) + args.insert(0, thisvalue) + newargs.insert(0, thisvalue.value) argtypes.insert(0, c_void_p) else: thisarg = None - keepalives, newargs, argtypes, outargs = self._convert_args(argtypes, - args, kwargs) + keepalives, newargs, argtypes, outargs, errcheckargs = ( + self._convert_args(argtypes, args, kwargs)) funcptr = self._getfuncptr(argtypes, self._restype_, thisarg) result = self._call_funcptr(funcptr, *newargs) - result = self._do_errcheck(result, args) + result, forced = self._do_errcheck(result, errcheckargs) - if not outargs: + if not outargs or forced: return result from ctypes import c_void_p @@ -377,22 +379,22 @@ set_last_error(tmp) # try: - return self._build_result(self._restype_, result, newargs) + return self._build_result(self._restype_, result) finally: funcptr.free_temp_buffers() def _do_errcheck(self, result, args): # The 'errcheck' protocol if self._errcheck_: - v = self._errcheck_(result, self, args) + v = self._errcheck_(result, self, tuple(args)) # If the errcheck funtion failed, let it throw # If the errcheck function returned newargs unchanged, # continue normal processing. # If the errcheck function returned something else, # use that as result. if v is not args: - return v - return result + return v, True + return result, False def _getfuncptr_fromaddress(self, argtypes, restype): address = self._get_address() @@ -495,16 +497,16 @@ newargtypes = [] total = len(args) paramflags = self._paramflags - inargs_idx = 0 if not paramflags and total < len(argtypes): raise TypeError("not enough arguments") - for i, argtype in enumerate(argtypes): - flag = 0 - name = None - defval = marker - if paramflags: + if paramflags: + errcheckargs = [] + inargs_idx = 0 + for i, argtype in enumerate(argtypes): + flag = 0 + defval = marker paramflag = paramflags[i] paramlen = len(paramflag) name = None @@ -519,6 +521,7 @@ val = defval if val is marker: val = 0 + errcheckargs.append(val) keepalive, newarg, newargtype = self._conv_param(argtype, val) keepalives.append(keepalive) newargs.append(newarg) @@ -536,27 +539,31 @@ raise TypeError("required argument '%s' missing" % name) else: raise TypeError("not enough arguments") + errcheckargs.append(val) keepalive, newarg, newargtype = self._conv_param(argtype, val) keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) elif flag == PARAMFLAG_FOUT: if defval is not marker: - outargs.append(defval) + val = defval keepalive, newarg, newargtype = self._conv_param(argtype, defval) else: import ctypes val = argtype._type_() - outargs.append(val) keepalive = None newarg = ctypes.byref(val) newargtype = type(newarg) + errcheckargs.append(val) + outargs.append(val) keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) else: raise ValueError("paramflag %d not yet implemented" % flag) - else: + else: + errcheckargs = args + for i, argtype in enumerate(argtypes): try: keepalive, newarg, newargtype = self._conv_param(argtype, args[i]) except (UnicodeError, TypeError, ValueError), e: @@ -564,7 +571,6 @@ keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) - inargs_idx += 1 if len(newargs) < len(args): extra = args[len(newargs):] @@ -576,7 +582,7 @@ keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) - return keepalives, newargs, newargtypes, outargs + return keepalives, newargs, newargtypes, outargs, errcheckargs @staticmethod def _is_primitive(argtype): @@ -601,7 +607,7 @@ retval = restype._CData_retval(buf) return retval - def _build_result(self, restype, result, argsandobjs): + def _build_result(self, restype, result): """Build the function result: If there is no OUT parameter, return the actual function result If there is one OUT parameter, return it @@ -611,11 +617,6 @@ # i.e. an array of ints. Now it takes a result, which is already a # python object. All places that do "resbuffer[0]" should check that # result is actually an int and just use it. - # - # Also, argsandobjs used to be "args" in __call__, now it's "newargs" - # (i.e., the already unwrapped objects). It's used only when we have a - # PARAMFLAG_FOUT and it's probably wrong, I'll fix it when I find a - # failing test retval = None @@ -704,7 +705,7 @@ funcptr = self._getfuncptr(argtypes, restype, thisarg) try: result = self._call_funcptr(funcptr, *args) - result = self._do_errcheck(result, args) + result, _ = self._do_errcheck(result, args) except (TypeError, ArgumentError, UnicodeDecodeError): assert self._slowpath_allowed return CFuncPtr.__call__(self, *args) diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_pypy_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -33,14 +33,13 @@ # set link options output_filename = modulename + _get_c_extension_suffix() if sys.platform == 'win32': - # XXX libpypy-c.lib is currently not installed automatically - library = os.path.join(thisdir, '..', 'include', 'libpypy-c') + # XXX pyconfig.h uses a pragma to link to the import library, + # which is currently python27.lib + library = os.path.join(thisdir, '..', 'include', 'python27') if not os.path.exists(library + '.lib'): - #For a nightly build - library = os.path.join(thisdir, '..', 'include', 'python27') - if not os.path.exists(library + '.lib'): - # For a local translation - library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c') + # For a local translation or nightly build + library = os.path.join(thisdir, '..', 'pypy', 'goal', 'python27') + assert os.path.exists(library + '.lib'),'Could not find import library "%s"' % library libraries = [library, 'oleaut32'] extra_ldargs = ['/MANIFEST', # needed for VC10 '/EXPORT:init' + modulename] diff --git a/lib_pypy/_sha.py b/lib_pypy/_sha.py --- a/lib_pypy/_sha.py +++ b/lib_pypy/_sha.py @@ -115,14 +115,14 @@ ] class sha: - "An implementation of the MD5 hash function in pure Python." + "An implementation of the SHA hash function in pure Python." digest_size = digestsize = 20 - block_size = 1 + block_size = 512 // 8 def __init__(self): "Initialisation." - + # Initial message length in bits(!). self.length = 0 self.count = [0, 0] @@ -209,7 +209,7 @@ self.H2 = (self.H2 + C) & 0xffffffff self.H3 = (self.H3 + D) & 0xffffffff self.H4 = (self.H4 + E) & 0xffffffff - + # Down from here all methods follow the Python Standard Library # API of the sha module. @@ -295,13 +295,13 @@ _long2bytesBigEndian(self.H3, 4) + \ _long2bytesBigEndian(self.H4, 4) - self.H0 = H0 - self.H1 = H1 + self.H0 = H0 + self.H1 = H1 self.H2 = H2 self.H3 = H3 self.H4 = H4 - self.input = input - self.count = count + self.input = input + self.count = count return digest diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -330,6 +330,14 @@ # SQLite version information sqlite_version = str(_ffi.string(_lib.sqlite3_libversion()).decode('ascii')) +_STMT_TYPE_UPDATE = 0 +_STMT_TYPE_DELETE = 1 +_STMT_TYPE_INSERT = 2 +_STMT_TYPE_REPLACE = 3 +_STMT_TYPE_OTHER = 4 +_STMT_TYPE_SELECT = 5 +_STMT_TYPE_INVALID = 6 + class Error(StandardError): pass @@ -992,13 +1000,18 @@ self.__statement = self.__connection._statement_cache.get(sql) if self.__connection._isolation_level is not None: - if self.__statement._type in ("UPDATE", "DELETE", "INSERT", "REPLACE"): + if self.__statement._type in ( + _STMT_TYPE_UPDATE, + _STMT_TYPE_DELETE, + _STMT_TYPE_INSERT, + _STMT_TYPE_REPLACE + ): if not self.__connection._in_transaction: self.__connection._begin() - elif self.__statement._type == "OTHER": + elif self.__statement._type == _STMT_TYPE_OTHER: if self.__connection._in_transaction: self.__connection.commit() - elif self.__statement._type == "SELECT": + elif self.__statement._type == _STMT_TYPE_SELECT: if multiple: raise ProgrammingError("You cannot execute SELECT " "statements in executemany().") @@ -1021,12 +1034,17 @@ self.__statement._reset() raise self.__connection._get_exception(ret) - if self.__statement._type in ("UPDATE", "DELETE", "INSERT", "REPLACE"): + if self.__statement._type in ( + _STMT_TYPE_UPDATE, + _STMT_TYPE_DELETE, + _STMT_TYPE_INSERT, + _STMT_TYPE_REPLACE + ): if self.__rowcount == -1: self.__rowcount = 0 self.__rowcount += _lib.sqlite3_changes(self.__connection._db) - if not multiple and self.__statement._type == "INSERT": + if not multiple and self.__statement._type == _STMT_TYPE_INSERT: self.__lastrowid = _lib.sqlite3_last_insert_rowid(self.__connection._db) else: self.__lastrowid = None @@ -1176,11 +1194,19 @@ first_word = sql.lstrip().split(" ")[0].upper() if first_word == "": - self._type = "INVALID" - elif first_word in ("SELECT", "INSERT", "UPDATE", "DELETE", "REPLACE"): - self._type = first_word + self._type = _STMT_TYPE_INVALID + elif first_word == "SELECT": + self._type = _STMT_TYPE_SELECT + elif first_word == "INSERT": + self._type = _STMT_TYPE_INSERT + elif first_word == "UPDATE": + self._type = _STMT_TYPE_UPDATE + elif first_word == "DELETE": + self._type = _STMT_TYPE_DELETE + elif first_word == "REPLACE": + self._type = _STMT_TYPE_REPLACE else: - self._type = "OTHER" + self._type = _STMT_TYPE_OTHER if isinstance(sql, unicode): sql = sql.encode('utf-8') @@ -1193,7 +1219,7 @@ if ret == _lib.SQLITE_OK and not self._statement: # an empty statement, work around that, as it's the least trouble - self._type = "SELECT" + self._type = _STMT_TYPE_SELECT c_sql = _ffi.new("char[]", b"select 42") ret = _lib.sqlite3_prepare_v2(self.__con._db, c_sql, -1, statement_star, next_char) @@ -1312,7 +1338,12 @@ raise ValueError("parameters are of unsupported type") def _get_description(self): - if self._type in ("INSERT", "UPDATE", "DELETE", "REPLACE"): + if self._type in ( + _STMT_TYPE_INSERT, + _STMT_TYPE_UPDATE, + _STMT_TYPE_DELETE, + _STMT_TYPE_REPLACE + ): return None desc = [] for i in xrange(_lib.sqlite3_column_count(self._statement)): diff --git a/lib_pypy/audioop.py b/lib_pypy/audioop.py new file mode 100644 --- /dev/null +++ b/lib_pypy/audioop.py @@ -0,0 +1,29 @@ + +import struct + + +class error(Exception): + pass + + +def _check_size(size): + if size != 1 and size != 2 and size != 4: + raise error("Size should be 1, 2 or 4") + + +def _check_params(length, size): + _check_size(size) + if length % size != 0: + raise error("not a whole number of frames") + + +def getsample(cp, size, i): + _check_params(len(cp), size) + if not (0 <= i < len(cp) / size): + raise error("Index out of range") + if size == 1: + return struct.unpack_from("B", buffer(cp)[i:])[0] + elif size == 2: + return struct.unpack_from("H", buffer(cp)[i * 2:])[0] + elif size == 4: + return struct.unpack_from("I", buffer(cp)[i * 4:])[0] diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.8" -__version_info__ = (0, 8) +__version__ = "0.8.1" +__version_info__ = (0, 8, 1) diff --git a/lib_pypy/datetime.py b/lib_pypy/datetime.py --- a/lib_pypy/datetime.py +++ b/lib_pypy/datetime.py @@ -878,7 +878,6 @@ month = self._month if day is None: day = self._day - year, month, day = _check_date_fields(year, month, day) return date(year, month, day) # Comparisons of date objects with other. @@ -1389,8 +1388,6 @@ microsecond = self.microsecond if tzinfo is True: tzinfo = self.tzinfo - hour, minute, second, microsecond = _check_time_fields(hour, minute, second, microsecond) - _check_tzinfo_arg(tzinfo) return time(hour, minute, second, microsecond, tzinfo) def __nonzero__(self): @@ -1608,9 +1605,6 @@ microsecond = self.microsecond if tzinfo is True: tzinfo = self.tzinfo - year, month, day = _check_date_fields(year, month, day) - hour, minute, second, microsecond = _check_time_fields(hour, minute, second, microsecond) - _check_tzinfo_arg(tzinfo) return datetime(year, month, day, hour, minute, second, microsecond, tzinfo) diff --git a/lib_pypy/pyrepl/simple_interact.py b/lib_pypy/pyrepl/simple_interact.py --- a/lib_pypy/pyrepl/simple_interact.py +++ b/lib_pypy/pyrepl/simple_interact.py @@ -63,3 +63,6 @@ except KeyboardInterrupt: console.write("\nKeyboardInterrupt\n") console.resetbuffer() + except MemoryError: + console.write("\nMemoryError\n") + console.resetbuffer() diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -38,7 +38,7 @@ # General information about the project. project = u'PyPy' -copyright = u'2013, The PyPy Project' +copyright = u'2014, The PyPy Project' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -47,7 +47,7 @@ # The short X.Y version. version = '2.2' # The full version, including alpha/beta/rc tags. -release = '2.2.0' +release = '2.2.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -83,7 +83,7 @@ _winreg -* Supported by being rewritten in pure Python (possibly using ``ctypes``): +* Supported by being rewritten in pure Python (possibly using ``cffi``): see the `lib_pypy/`_ directory. Examples of modules that we support this way: ``ctypes``, ``cPickle``, ``cmath``, ``dbm``, ``datetime``... Note that some modules are both in there and in the list above; @@ -316,5 +316,4 @@ type and vice versa. For builtin types, a dictionary will be returned that cannot be changed (but still looks and behaves like a normal dictionary). - .. include:: _ref.txt diff --git a/pypy/doc/ctypes-implementation.rst b/pypy/doc/ctypes-implementation.rst --- a/pypy/doc/ctypes-implementation.rst +++ b/pypy/doc/ctypes-implementation.rst @@ -72,7 +72,13 @@ Here is a list of the limitations and missing features of the current implementation: -* No support for ``PyXxx`` functions from ``libpython``, for obvious reasons. +* ``ctypes.pythonapi`` lets you access the CPython C API emulation layer + of PyPy, at your own risks and without doing anything sensible about + the GIL. Since PyPy 2.3, these functions are also named with an extra + "Py", for example ``PyPyInt_FromLong()``. Basically, don't use this, + but it might more or less work in simple cases if you do. (Obviously, + assuming the PyObject pointers you get have any particular fields in + any particular order is just going to crash.) * We copy Python strings instead of having pointers to raw buffers diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst --- a/pypy/doc/extending.rst +++ b/pypy/doc/extending.rst @@ -5,119 +5,68 @@ This document tries to explain how to interface the PyPy python interpreter with any external library. -Note: We try to describe state-of-the art, but it -might fade out of date as this is the front on which things are changing -in pypy rapidly. +Right now, there are the following possibilities of providing +third-party modules for the PyPy python interpreter (in order of +usefulness): -Possibilities -============= +* Write them in pure Python and use CFFI_. -Right now, there are three possibilities of providing third-party modules -for the PyPy python interpreter (in order of usefulness): +* Write them in pure Python and use ctypes_. -* Write them in pure python and use ctypes, see ctypes_ - section +* Write them in C++ and bind them through Reflex_. -* Write them in pure python and use direct libffi low-level bindings, See - \_ffi_ module description. +* Write them in as `RPython mixed modules`_. -* Write them in RPython as mixedmodule_, using *rffi* as bindings. -* Write them in C++ and bind them through Reflex_ +CFFI +==== -.. _ctypes: #CTypes -.. _\_ffi: #LibFFI -.. _mixedmodule: #Mixed Modules +CFFI__ is the recommended way. It is a way to write pure Python code +that accesses C libraries. The idea is to support either ABI- or +API-level access to C --- so that you can sanely access C libraries +without depending on details like the exact field order in the C +structures or the numerical value of all the constants. It works on +both CPython (as a separate ``pip install cffi``) and on PyPy, where it +is included by default. + +PyPy's JIT does a quite reasonable job on the Python code that call C +functions or manipulate C pointers with CFFI. (As of PyPy 2.2.1, it +could still be improved, but is already good.) + +See the documentation here__. + +.. __: http://cffi.readthedocs.org/ +.. __: http://cffi.readthedocs.org/ + CTypes ====== -The ctypes module in PyPy is ready to use. -It's goal is to be as-compatible-as-possible with the -`CPython ctypes`_ version. Right now it's able to support large examples, -such as pyglet. PyPy is planning to have a 100% compatible ctypes -implementation, without the CPython C-level API bindings (so it is very -unlikely that direct object-manipulation trickery through this API will work). +The goal of the ctypes module of PyPy is to be as compatible as possible +with the `CPython ctypes`_ version. It works for large examples, such +as pyglet. PyPy's implementation is not strictly 100% compatible with +CPython, but close enough for most cases. -We also provide a `ctypes-configure`_ for overcoming the platform dependencies, -not relying on the ctypes codegen. This tool works by querying gcc about -platform-dependent details (compiling small snippets of C code and running -them), so it'll benefit not pypy-related ctypes-based modules as well. +We also used to provide ``ctypes-configure`` for some API-level access. +This is now viewed as a precursor of CFFI, which you should use instead. +More (but older) information is available here__. +Also, ctypes' performance is not as good as CFFI's. -ctypes call are optimized by the JIT and the resulting machine code contains a -direct call to the target C function. However, due to the very dynamic nature -of ctypes, some overhead over a bare C call is still present, in particular to -check/convert the types of the parameters. Moreover, even if most calls are -optimized, some cannot and thus need to follow the slow path, not optimized by -the JIT. +.. _`CPython ctypes`: http://docs.python.org/library/ctypes.html +.. __: ctypes-implementation.html -.. _`ctypes-configure`: ctypes-implementation.html#ctypes-configure -.. _`CPython ctypes`: http://docs.python.org/library/ctypes.html +PyPy implements ctypes as pure Python code around two built-in modules +called ``_ffi`` and ``_rawffi``, which give a very low-level binding to +the C library libffi_. Nowadays it is not recommended to use directly +these two modules. -Pros ----- +.. _libffi: http://sourceware.org/libffi/ -Stable, CPython-compatible API. Most calls are fast, optimized by JIT. - -Cons ----- - -Problems with platform-dependency (although we partially solve -those). Although the JIT optimizes ctypes calls, some overhead is still -present. The slow-path is very slow. - - -LibFFI -====== - -Mostly in order to be able to write a ctypes module, we developed a very -low-level libffi bindings called ``_ffi``. (libffi is a C-level library for dynamic calling, -which is used by CPython ctypes). This library provides stable and usable API, -although it's API is a very low-level one. It does not contain any -magic. It is also optimized by the JIT, but has much less overhead than ctypes. - -Pros ----- - -It Works. Probably more suitable for a delicate code where ctypes magic goes -in a way. All calls are optimized by the JIT, there is no slow path as in -ctypes. - -Cons ----- - -It combines disadvantages of using ctypes with disadvantages of using mixed -modules. CPython-incompatible API, very rough and low-level. - -Mixed Modules -============= - -This is the most advanced and powerful way of writing extension modules. -It has some serious disadvantages: - -* a mixed module needs to be written in RPython, which is far more - complicated than Python (XXX link) - -* due to lack of separate compilation (as of July 2011), each - compilation-check requires to recompile whole PyPy python interpreter, - which takes 0.5-1h. We plan to solve this at some point in near future. - -* although rpython is a garbage-collected language, the border between - C and RPython needs to be managed by hand (each object that goes into the - C level must be explicitly freed). - -Some documentation is available `here`_ - -.. _`here`: rffi.html - -XXX we should provide detailed docs about lltype and rffi, especially if we - want people to follow that way. Reflex ====== -This method is still experimental and is being exercised on a branch, -`reflex-support`_, which adds the `cppyy`_ module. +This method is still experimental. It adds the `cppyy`_ module. The method works by using the `Reflex package`_ to provide reflection information of the C++ code, which is then used to automatically generate bindings at runtime. @@ -168,3 +117,15 @@ to work around it in python or with a C++ helper function. Although Reflex works on various platforms, the bindings with PyPy have only been tested on Linux. + + +RPython Mixed Modules +===================== + +This is the internal way to write built-in extension modules in PyPy. +It cannot be used by any 3rd-party module: the extension modules are +*built-in*, not independently loadable DLLs. + +This is reserved for special cases: it gives direct access to e.g. the +details of the JIT, allowing us to tweak its interaction with user code. +This is how the numpy module is being developed. diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.rst +++ b/pypy/doc/extradoc.rst @@ -72,13 +72,13 @@ .. _bibtex: https://bitbucket.org/pypy/extradoc/raw/tip/talk/bibtex.bib .. _`Runtime Feedback in a Meta-Tracing JIT for Efficient Dynamic Languages`: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/icooolps2011/jit-hints.pdf -.. _`Allocation Removal by Partial Evaluation in a Tracing JIT`: http://codespeak.net/svn/pypy/extradoc/talk/pepm2011/bolz-allocation-removal.pdf -.. _`Towards a Jitting VM for Prolog Execution`: http://www.stups.uni-duesseldorf.de/publications/bolz-prolog-jit.pdf +.. _`Allocation Removal by Partial Evaluation in a Tracing JIT`: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/pepm2011/bolz-allocation-removal.pdf +.. _`Towards a Jitting VM for Prolog Execution`: http://www.stups.uni-duesseldorf.de/mediawiki/images/a/a7/Pub-BoLeSch2010.pdf .. _`High performance implementation of Python for CLI/.NET with JIT compiler generation for dynamic languages`: http://buildbot.pypy.org/misc/antocuni-thesis.pdf .. _`How to *not* write Virtual Machines for Dynamic Languages`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dyla2007/dyla.pdf .. _`Tracing the Meta-Level: PyPy's Tracing JIT Compiler`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009/bolz-tracing-jit.pdf .. _`Faster than C#: Efficient Implementation of Dynamic Languages on .NET`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009-dotnet/cli-jit.pdf -.. _`Automatic JIT Compiler Generation with Runtime Partial Evaluation`: http://www.stups.uni-duesseldorf.de/thesis/final-master.pdf +.. _`Automatic JIT Compiler Generation with Runtime Partial Evaluation`: http://wwwold.cobra.cs.uni-duesseldorf.de/thesis/final-master.pdf .. _`RPython: A Step towards Reconciling Dynamically and Statically Typed OO Languages`: http://www.disi.unige.it/person/AnconaD/papers/Recent_abstracts.html#AACM-DLS07 .. _`EU Reports`: index-report.html .. _`Hardware Transactional Memory Support for Lightweight Dynamic Language Evolution`: http://sabi.net/nriley/pubs/dls6-riley.pdf diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -444,7 +444,7 @@ So the position of the core PyPy developers is that if anyone wants to make an N+1'th attempt with LLVM, they are welcome, and will be happy to provide help in the IRC channel, but they are left with the burden of proof -that it works. +that (a) it works and (b) it gives important benefits. ---------------------- How do I compile PyPy? diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -40,7 +40,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 2.2.0`_: the latest official release +* `Release 2.2.1`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -110,7 +110,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 2.2.0`: http://pypy.org/download.html +.. _`Release 2.2.1`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html diff --git a/pypy/doc/release-2.2.1.rst b/pypy/doc/release-2.2.1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.2.1.rst @@ -0,0 +1,47 @@ +======================================= +PyPy 2.2.1 - Incrementalism.1 +======================================= + +We're pleased to announce PyPy 2.2.1, which targets version 2.7.3 of the Python +language. This is a bugfix release over 2.2. + +You can download the PyPy 2.2.1 release here: + + http://pypy.org/download.html + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy 2.2 and cpython 2.7.2`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows +32, or ARM (ARMv6 or ARMv7, with VFPv3). + +Work on the native Windows 64 is still stalling, we would welcome a volunteer +to handle that. + +.. _`pypy 2.2 and cpython 2.7.2`: http://speed.pypy.org + +Highlights +========== + +This is a bugfix release. The most important bugs fixed are: + +* an issue in sockets' reference counting emulation, showing up + notably when using the ssl module and calling ``makefile()``. + +* Tkinter support on Windows. + +* If sys.maxunicode==65535 (on Windows and maybe OS/X), the json + decoder incorrectly decoded surrogate pairs. + +* some FreeBSD fixes. + +Note that CFFI 0.8.1 was released. Both versions 0.8 and 0.8.1 are +compatible with both PyPy 2.2 and 2.2.1. + + +Cheers, +Armin Rigo & everybody diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -15,3 +15,29 @@ .. branch: armhf-singlefloat JIT support for singlefloats on ARM using the hardfloat ABI + +.. branch: voidtype_strformat +Better support for record numpy arrays + +.. branch: osx-eci-frameworks-makefile +OSX: Ensure frameworks end up in Makefile when specified in External compilation info + +.. branch: less-stringly-ops +Use subclasses of SpaceOperation instead of SpaceOperator objects. +Random cleanups in flowspace and annotator. + +.. branch: ndarray-buffer +adds support for the buffer= argument to the ndarray ctor + +.. branch: better_ftime_detect2 +On OpenBSD do not pull in libcompat.a as it is about to be removed. +And more generally, if you have gettimeofday(2) you will not need ftime(3). + +.. branch: timeb_h +Remove dependency upon on OpenBSD. This will be disappearing +along with libcompat.a. + +.. branch: OlivierBlanvillain/fix-3-broken-links-on-pypy-published-pap-1386250839215 +Fix 3 broken links on PyPy published papers in docs. + +.. branch: jit-ordereddict diff --git a/pypy/interpreter/buffer.py b/pypy/interpreter/buffer.py --- a/pypy/interpreter/buffer.py +++ b/pypy/interpreter/buffer.py @@ -47,6 +47,9 @@ def get_raw_address(self): raise ValueError("no raw buffer") + def is_writable(self): + return False + # __________ app-level support __________ def descr_len(self, space): @@ -135,6 +138,9 @@ __slots__ = () # no extra slot here + def is_writable(self): + return True + def setitem(self, index, char): "Write a character into the buffer." raise NotImplementedError # Must be overriden. No bounds checks. diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -805,8 +805,8 @@ raise TypeError("Varargs and keywords not supported in unwrap_spec") argspec = ', '.join([arg for arg in args.args[1:]]) func_code = py.code.Source(""" - def f(w_obj, %(args)s): - return w_obj.%(func_name)s(%(args)s) + def f(self, %(args)s): + return self.%(func_name)s(%(args)s) """ % {'args': argspec, 'func_name': func.func_name}) d = {} exec func_code.compile() in d @@ -821,7 +821,7 @@ else: assert isinstance(unwrap_spec, dict) unwrap_spec = unwrap_spec.copy() - unwrap_spec['w_obj'] = base_cls + unwrap_spec['self'] = base_cls return interp2app(globals()['unwrap_spec'](**unwrap_spec)(f)) class interp2app(W_Root): diff --git a/pypy/module/__builtin__/app_operation.py b/pypy/module/__builtin__/app_operation.py --- a/pypy/module/__builtin__/app_operation.py +++ b/pypy/module/__builtin__/app_operation.py @@ -1,4 +1,5 @@ +import operator + def bin(x): - if not isinstance(x, (int, long)): - raise TypeError("must be int or long") - return x.__format__("#b") + value = operator.index(x) + return value.__format__("#b") diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -46,6 +46,15 @@ assert bin(2L) == "0b10" assert bin(-2L) == "-0b10" raises(TypeError, bin, 0.) + class C(object): + def __index__(self): + return 42 + assert bin(C()) == bin(42) + class D(object): + def __int__(self): + return 42 + exc = raises(TypeError, bin, D()) + assert "index" in exc.value.message def test_unichr(self): import sys diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py --- a/pypy/module/_cffi_backend/cbuffer.py +++ b/pypy/module/_cffi_backend/cbuffer.py @@ -5,7 +5,9 @@ from pypy.interpreter.typedef import TypeDef, make_weakref_descr from pypy.module._cffi_backend import cdataobj, ctypeptr, ctypearray +from rpython.rtyper.annlowlevel import llstr from rpython.rtyper.lltypesystem import rffi +from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw class LLBuffer(RWBuffer): @@ -34,8 +36,7 @@ def setslice(self, start, string): raw_cdata = rffi.ptradd(self.raw_cdata, start) - for i in range(len(string)): - raw_cdata[i] = string[i] + copy_string_to_raw(llstr(string), raw_cdata, 0, len(string)) class MiniBuffer(W_Root): @@ -59,6 +60,9 @@ def descr__buffer__(self, space): return self.buffer.descr__buffer__(space) + def descr_str(self, space): + return space.wrap(self.buffer.as_str()) + MiniBuffer.typedef = TypeDef( "buffer", @@ -68,6 +72,7 @@ __setitem__ = interp2app(MiniBuffer.descr_setitem), __buffer__ = interp2app(MiniBuffer.descr__buffer__), __weakref__ = make_weakref_descr(MiniBuffer), + __str__ = interp2app(MiniBuffer.descr_str), ) MiniBuffer.typedef.acceptable_as_base_class = False diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -5,7 +5,7 @@ from pypy.interpreter.gateway import interp2app from pypy.interpreter.typedef import TypeDef, make_weakref_descr -from rpython.rlib import objectmodel, rgc +from rpython.rlib import rgc from rpython.rlib.objectmodel import keepalive_until_here, specialize from rpython.rtyper.lltypesystem import lltype, rffi from rpython.tool.sourcetools import func_with_new_name @@ -206,8 +206,7 @@ w_value.get_array_length() == length): # fast path: copying from exactly the correct type s = w_value._cdata - for i in range(ctitemsize * length): - cdata[i] = s[i] + rffi.c_memcpy(cdata, s, ctitemsize * length) keepalive_until_here(w_value) return # @@ -259,7 +258,6 @@ space = self.space if isinstance(w_other, W_CData): from pypy.module._cffi_backend import ctypeptr, ctypearray - from pypy.module._cffi_backend import ctypevoid ct = w_other.ctype if isinstance(ct, ctypearray.W_CTypeArray): ct = ct.ctptr @@ -424,6 +422,12 @@ def get_array_length(self): return self.length + def _sizeof(self): + from pypy.module._cffi_backend.ctypeptr import W_CTypePtrOrArray + ctype = self.ctype + assert isinstance(ctype, W_CTypePtrOrArray) + return self.length * ctype.ctitem.size + class W_CDataHandle(W_CData): _attrs_ = ['w_keepalive'] diff --git a/pypy/module/_cffi_backend/handle.py b/pypy/module/_cffi_backend/handle.py --- a/pypy/module/_cffi_backend/handle.py +++ b/pypy/module/_cffi_backend/handle.py @@ -42,7 +42,7 @@ if self.handles[d]() is None: self.look_distance = d + 1 return d - # full! extend, but don't use '!=' here + # full! extend, but don't use '+=' here self.handles = self.handles + [dead_ref] * (length // 3 + 5) self.look_distance = length + 1 return length diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -2143,7 +2143,13 @@ c = newp(BCharArray, b"hi there") # buf = buffer(c) - assert str(buf).startswith('<_cffi_backend.buffer object at 0x') + assert repr(buf).startswith('<_cffi_backend.buffer object at 0x') + assert bytes(buf) == b"hi there\x00" + if sys.version_info < (3,): + assert str(buf) == "hi there\x00" + assert unicode(buf) == u+"hi there\x00" + else: + assert str(buf) == repr(buf) # --mb_length-- assert len(buf) == len(b"hi there\x00") # --mb_item-- @@ -3125,6 +3131,12 @@ py.test.raises(TypeError, "p + cast(new_primitive_type('int'), 42)") py.test.raises(TypeError, "p - cast(new_primitive_type('int'), 42)") +def test_sizeof_sliced_array(): + BInt = new_primitive_type("int") + BArray = new_array_type(new_pointer_type(BInt), 10) + p = newp(BArray, None) + assert sizeof(p[2:9]) == 7 * sizeof(BInt) + def test_version(): # this test is here mostly for PyPy diff --git a/pypy/module/_rawffi/array.py b/pypy/module/_rawffi/array.py --- a/pypy/module/_rawffi/array.py +++ b/pypy/module/_rawffi/array.py @@ -15,6 +15,7 @@ from pypy.module._rawffi.interp_rawffi import unpack_shape_with_length from pypy.module._rawffi.interp_rawffi import read_ptr, write_ptr from rpython.rlib.rarithmetic import r_uint +from rpython.rlib import rgc class W_Array(W_DataShape): @@ -220,6 +221,7 @@ def __init__(self, space, shape, length): W_ArrayInstance.__init__(self, space, shape, length, 0) + @rgc.must_be_light_finalizer def __del__(self): if self.ll_buffer: self._free() diff --git a/pypy/module/_rawffi/structure.py b/pypy/module/_rawffi/structure.py --- a/pypy/module/_rawffi/structure.py +++ b/pypy/module/_rawffi/structure.py @@ -16,7 +16,7 @@ from pypy.module._rawffi.interp_rawffi import unroll_letters_for_numbers from pypy.module._rawffi.interp_rawffi import size_alignment from pypy.module._rawffi.interp_rawffi import read_ptr, write_ptr -from rpython.rlib import clibffi +from rpython.rlib import clibffi, rgc from rpython.rlib.rarithmetic import intmask, signedtype, widen from rpython.rlib.rarithmetic import r_uint, r_ulonglong, r_longlong @@ -226,6 +226,7 @@ fieldtypes) return self.ffi_struct.ffistruct + @rgc.must_be_light_finalizer def __del__(self): if self.ffi_struct: lltype.free(self.ffi_struct, flavor='raw') @@ -380,6 +381,7 @@ def __init__(self, space, shape): W_StructureInstance.__init__(self, space, shape, 0) + @rgc.must_be_light_finalizer def __del__(self): if self.ll_buffer: self._free() diff --git a/pypy/module/_rawffi/test/test_tracker.py b/pypy/module/_rawffi/test/test_tracker.py --- a/pypy/module/_rawffi/test/test_tracker.py +++ b/pypy/module/_rawffi/test/test_tracker.py @@ -1,9 +1,21 @@ +import py +from pypy.conftest import option from pypy.module._rawffi.tracker import Tracker + class AppTestTracker: spaceconfig = dict(usemodules=['_rawffi', 'struct']) def setup_class(cls): + # + # detect if we're running on PyPy with DO_TRACING not compiled in + if option.runappdirect: + try: + import _rawffi + _rawffi._num_of_allocated_objects() + except (ImportError, RuntimeError), e: + py.test.skip(str(e)) + # Tracker.DO_TRACING = True def test_array(self): diff --git a/pypy/module/_rawffi/tracker.py b/pypy/module/_rawffi/tracker.py --- a/pypy/module/_rawffi/tracker.py +++ b/pypy/module/_rawffi/tracker.py @@ -2,9 +2,11 @@ """ The file that keeps track about freed/kept-alive objects allocated by _rawffi. Used for debugging ctypes """ +from pypy.interpreter.error import OperationError + class Tracker(object): - DO_TRACING = True + DO_TRACING = False # make sure this stays False by default! def __init__(self): self.alloced = {} @@ -20,6 +22,9 @@ tracker = Tracker() def num_of_allocated_objects(space): + if not tracker.DO_TRACING: + raise OperationError(space.w_RuntimeError, + space.wrap("DO_TRACING not enabled in this PyPy")) return space.wrap(len(tracker.alloced)) def print_alloced_objects(space): diff --git a/pypy/module/_sre/interp_sre.py b/pypy/module/_sre/interp_sre.py --- a/pypy/module/_sre/interp_sre.py +++ b/pypy/module/_sre/interp_sre.py @@ -415,7 +415,13 @@ except OperationError, e: if not e.match(space, space.w_TypeError): raise - w_groupnum = space.getitem(self.srepat.w_groupindex, w_arg) + try: + w_groupnum = space.getitem(self.srepat.w_groupindex, w_arg) + except OperationError, e: + if not e.match(space, space.w_KeyError): + raise + raise OperationError(space.w_IndexError, + space.wrap("no such group")) groupnum = space.int_w(w_groupnum) if groupnum == 0: return self.ctx.match_start, self.ctx.match_end diff --git a/pypy/module/_sre/test/test_app_sre.py b/pypy/module/_sre/test/test_app_sre.py --- a/pypy/module/_sre/test/test_app_sre.py +++ b/pypy/module/_sre/test/test_app_sre.py @@ -172,6 +172,9 @@ assert ("1", "1", None) == m.group(1, 2, 3) assert ("1", None) == m.group("first", "second") raises(IndexError, m.group, 1, 4) + assert ("1", None) == m.group(1, "second") + raises(IndexError, m.group, 'foobarbaz') + raises(IndexError, m.group, 'first', 'foobarbaz') def test_expand(self): import re diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -711,8 +711,12 @@ raise ssl_error(space, "SSL_CTX_use_certificate_chain_file error") # ssl compatibility - libssl_SSL_CTX_set_options(ss.ctx, - SSL_OP_ALL & ~SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS) + options = SSL_OP_ALL & ~SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS + if protocol != PY_SSL_VERSION_SSL2: + # SSLv2 is extremely broken, don't use it unless a user specifically + # requests it + options |= SSL_OP_NO_SSLv2 + libssl_SSL_CTX_set_options(ss.ctx, options) verification_mode = SSL_VERIFY_NONE if cert_mode == PY_SSL_CERT_OPTIONAL: @@ -724,7 +728,7 @@ libssl_SSL_set_fd(ss.ssl, sock_fd) # set the socket for SSL # The ACCEPT_MOVING_WRITE_BUFFER flag is necessary because the address # of a str object may be changed by the garbage collector. - libssl_SSL_set_mode(ss.ssl, + libssl_SSL_set_mode(ss.ssl, SSL_MODE_AUTO_RETRY | SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER) # If the socket is in non-blocking mode or timeout mode, set the BIO diff --git a/pypy/module/_weakref/interp__weakref.py b/pypy/module/_weakref/interp__weakref.py --- a/pypy/module/_weakref/interp__weakref.py +++ b/pypy/module/_weakref/interp__weakref.py @@ -52,6 +52,7 @@ # weakref callbacks are not invoked eagerly here. They are # invoked by self.__del__() anyway. + @jit.dont_look_inside def get_or_make_weakref(self, w_subtype, w_obj): space = self.space w_weakreftype = space.gettypeobject(W_Weakref.typedef) @@ -70,6 +71,7 @@ self.append_wref_to(w_ref) return w_ref + @jit.dont_look_inside def get_or_make_proxy(self, w_obj): space = self.space if self.cached_proxy is not None: @@ -122,6 +124,7 @@ W_WeakrefBase.activate_callback, 'weakref callback of ') + @jit.dont_look_inside def make_weakref_with_callback(self, w_subtype, w_obj, w_callable): space = self.space w_ref = space.allocate_instance(W_Weakref, w_subtype) @@ -129,6 +132,7 @@ self.append_wref_to(w_ref) return w_ref + @jit.dont_look_inside def make_proxy_with_callback(self, w_obj, w_callable): space = self.space if space.is_true(space.callable(w_obj)): @@ -239,15 +243,16 @@ w_obj.setweakref(space, lifeline) return lifeline - at jit.dont_look_inside + def get_or_make_weakref(space, w_subtype, w_obj): return getlifeline(space, w_obj).get_or_make_weakref(w_subtype, w_obj) - at jit.dont_look_inside + def make_weakref_with_callback(space, w_subtype, w_obj, w_callable): lifeline = getlifelinewithcallbacks(space, w_obj) return lifeline.make_weakref_with_callback(w_subtype, w_obj, w_callable) + def descr__new__weakref(space, w_subtype, w_obj, w_callable=None, __args__=None): if __args__.arguments_w: @@ -312,15 +317,16 @@ w_obj = force(space, self) return space.call_args(w_obj, __args__) - at jit.dont_look_inside + def get_or_make_proxy(space, w_obj): return getlifeline(space, w_obj).get_or_make_proxy(w_obj) - at jit.dont_look_inside + def make_proxy_with_callback(space, w_obj, w_callable): lifeline = getlifelinewithcallbacks(space, w_obj) return lifeline.make_proxy_with_callback(w_obj, w_callable) + def proxy(space, w_obj, w_callable=None): """Create a proxy object that weakly references 'obj'. 'callback', if given, is called with the proxy as an argument when 'obj' diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -26,6 +26,7 @@ from pypy.module.__builtin__.descriptor import W_Property from pypy.module.__builtin__.interp_classobj import W_ClassObject from pypy.module.__builtin__.interp_memoryview import W_MemoryView +from pypy.module.micronumpy.base import W_NDimArray from rpython.rlib.entrypoint import entrypoint_lowlevel from rpython.rlib.rposix import is_valid_fd, validate_fd from rpython.rlib.unroll import unrolling_iterable @@ -399,16 +400,16 @@ '_PyObject_CallFunction_SizeT', '_PyObject_CallMethod_SizeT', 'PyBuffer_FromMemory', 'PyBuffer_FromReadWriteMemory', 'PyBuffer_FromObject', - 'PyBuffer_FromReadWriteObject', 'PyBuffer_New', 'PyBuffer_Type', 'init_bufferobject', + 'PyBuffer_FromReadWriteObject', 'PyBuffer_New', 'PyBuffer_Type', '_Py_init_bufferobject', 'PyCObject_FromVoidPtr', 'PyCObject_FromVoidPtrAndDesc', 'PyCObject_AsVoidPtr', 'PyCObject_GetDesc', 'PyCObject_Import', 'PyCObject_SetVoidPtr', - 'PyCObject_Type', 'init_pycobject', + 'PyCObject_Type', '_Py_init_pycobject', 'PyCapsule_New', 'PyCapsule_IsValid', 'PyCapsule_GetPointer', 'PyCapsule_GetName', 'PyCapsule_GetDestructor', 'PyCapsule_GetContext', 'PyCapsule_SetPointer', 'PyCapsule_SetName', 'PyCapsule_SetDestructor', - 'PyCapsule_SetContext', 'PyCapsule_Import', 'PyCapsule_Type', 'init_capsule', + 'PyCapsule_SetContext', 'PyCapsule_Import', 'PyCapsule_Type', '_Py_init_capsule', 'PyObject_AsReadBuffer', 'PyObject_AsWriteBuffer', 'PyObject_CheckReadBuffer', @@ -469,6 +470,7 @@ "Complex": "space.w_complex", "ByteArray": "space.w_bytearray", "MemoryView": "space.gettypeobject(W_MemoryView.typedef)", + "Array": "space.gettypeobject(W_NDimArray.typedef)", "BaseObject": "space.w_object", 'None': 'space.type(space.w_None)', 'NotImplemented': 'space.type(space.w_NotImplemented)', @@ -685,11 +687,15 @@ globals()['va_get_%s' % name_no_star] = func def setup_init_functions(eci, translating): - init_buffer = rffi.llexternal('init_bufferobject', [], lltype.Void, + if translating: + prefix = 'PyPy' + else: + prefix = 'cpyexttest' + init_buffer = rffi.llexternal('_%s_init_bufferobject' % prefix, [], lltype.Void, compilation_info=eci, _nowrapper=True) - init_pycobject = rffi.llexternal('init_pycobject', [], lltype.Void, + init_pycobject = rffi.llexternal('_%s_init_pycobject' % prefix, [], lltype.Void, compilation_info=eci, _nowrapper=True) - init_capsule = rffi.llexternal('init_capsule', [], lltype.Void, + init_capsule = rffi.llexternal('_%s_init_capsule' % prefix, [], lltype.Void, compilation_info=eci, _nowrapper=True) INIT_FUNCTIONS.extend([ lambda space: init_buffer(), @@ -697,12 +703,8 @@ lambda space: init_capsule(), ]) from pypy.module.posix.interp_posix import add_fork_hook - if translating: - reinit_tls = rffi.llexternal('PyThread_ReInitTLS', [], lltype.Void, - compilation_info=eci) - else: - reinit_tls = rffi.llexternal('PyPyThread_ReInitTLS', [], lltype.Void, - compilation_info=eci) + reinit_tls = rffi.llexternal('%sThread_ReInitTLS' % prefix, [], lltype.Void, + compilation_info=eci) add_fork_hook('child', reinit_tls) def init_function(func): @@ -744,7 +746,7 @@ from rpython.translator.c.database import LowLevelDatabase db = LowLevelDatabase() - generate_macros(export_symbols, rename=True, do_deref=True) + generate_macros(export_symbols, prefix='cpyexttest') # Structure declaration code members = [] @@ -810,7 +812,7 @@ INTERPLEVEL_API[name] = w_obj - name = name.replace('Py', 'PyPy') + name = name.replace('Py', 'cpyexttest') if isptr: ptr = ctypes.c_void_p.in_dll(bridge, name) if typ == 'PyObject*': @@ -822,7 +824,7 @@ ptr.value = ctypes.cast(ll2ctypes.lltype2ctypes(value), ctypes.c_void_p).value elif typ in ('PyObject*', 'PyTypeObject*'): - if name.startswith('PyPyExc_'): + if name.startswith('PyPyExc_') or name.startswith('cpyexttestExc_'): # we already have the pointer in_dll = ll2ctypes.get_ctypes_type(PyObject).in_dll(bridge, name) py_obj = ll2ctypes.ctypes2lltype(PyObject, in_dll) @@ -857,28 +859,27 @@ setup_init_functions(eci, translating=False) return modulename.new(ext='') -def generate_macros(export_symbols, rename=True, do_deref=True): +def mangle_name(prefix, name): + if name.startswith('Py'): + return prefix + name[2:] + elif name.startswith('_Py'): + return '_' + prefix + name[3:] + else: + return None + +def generate_macros(export_symbols, prefix): "NOT_RPYTHON" pypy_macros = [] renamed_symbols = [] for name in export_symbols: - if name.startswith("PyPy"): - renamed_symbols.append(name) - continue - if not rename: - continue name = name.replace("#", "") - newname = name.replace('Py', 'PyPy') - if not rename: - newname = name + newname = mangle_name(prefix, name) + assert newname, name pypy_macros.append('#define %s %s' % (name, newname)) if name.startswith("PyExc_"): pypy_macros.append('#define _%s _%s' % (name, newname)) renamed_symbols.append(newname) - if rename: - export_symbols[:] = renamed_symbols - else: - export_symbols[:] = [sym.replace("#", "") for sym in export_symbols] + export_symbols[:] = renamed_symbols # Generate defines for macro_name, size in [ @@ -1039,7 +1040,7 @@ from rpython.translator.c.database import LowLevelDatabase db = LowLevelDatabase() - generate_macros(export_symbols, rename=False, do_deref=False) + generate_macros(export_symbols, prefix='PyPy') functions = generate_decls_and_callbacks(db, [], api_struct=False) code = "#include \n" + "\n".join(functions) @@ -1069,7 +1070,8 @@ export_struct(name, struct) for name, func in FUNCTIONS.iteritems(): - deco = entrypoint_lowlevel("cpyext", func.argtypes, name, relax=True) + newname = mangle_name('PyPy', name) or name + deco = entrypoint_lowlevel("cpyext", func.argtypes, newname, relax=True) deco(func.get_wrapper(space)) setup_init_functions(eci, translating=True) diff --git a/pypy/module/cpyext/include/bufferobject.h b/pypy/module/cpyext/include/bufferobject.h --- a/pypy/module/cpyext/include/bufferobject.h +++ b/pypy/module/cpyext/include/bufferobject.h @@ -37,7 +37,7 @@ PyObject* PyBuffer_New(Py_ssize_t size); -void init_bufferobject(void); +void _Py_init_bufferobject(void); #ifdef __cplusplus } diff --git a/pypy/module/cpyext/include/pycapsule.h b/pypy/module/cpyext/include/pycapsule.h --- a/pypy/module/cpyext/include/pycapsule.h +++ b/pypy/module/cpyext/include/pycapsule.h @@ -50,7 +50,7 @@ PyAPI_FUNC(void *) PyCapsule_Import(const char *name, int no_block); -void init_capsule(void); +void _Py_init_capsule(void); #ifdef __cplusplus } diff --git a/pypy/module/cpyext/include/pycobject.h b/pypy/module/cpyext/include/pycobject.h --- a/pypy/module/cpyext/include/pycobject.h +++ b/pypy/module/cpyext/include/pycobject.h @@ -48,7 +48,7 @@ } PyCObject; #endif -void init_pycobject(void); +void _Py_init_pycobject(void); #ifdef __cplusplus } diff --git a/pypy/module/cpyext/include/pyconfig.h b/pypy/module/cpyext/include/pyconfig.h --- a/pypy/module/cpyext/include/pyconfig.h +++ b/pypy/module/cpyext/include/pyconfig.h @@ -25,6 +25,22 @@ #define Py_UNICODE_SIZE 2 #endif +#ifndef Py_BUILD_CORE /* not building the core - must be an ext */ +# if defined(_MSC_VER) + /* So MSVC users need not specify the .lib file in + * their Makefile (other compilers are generally + * taken care of by distutils.) */ +# ifdef _DEBUG +# error("debug first with cpython") +# pragma comment(lib,"python27.lib") +# else +# pragma comment(lib,"python27.lib") +# endif /* _DEBUG */ +# endif +#endif /* _MSC_VER */ + + + #ifdef __cplusplus } #endif diff --git a/pypy/module/cpyext/src/bufferobject.c b/pypy/module/cpyext/src/bufferobject.c --- a/pypy/module/cpyext/src/bufferobject.c +++ b/pypy/module/cpyext/src/bufferobject.c @@ -783,7 +783,7 @@ return size; } -void init_bufferobject(void) +void _Py_init_bufferobject(void) { PyType_Ready(&PyBuffer_Type); } diff --git a/pypy/module/cpyext/src/capsule.c b/pypy/module/cpyext/src/capsule.c --- a/pypy/module/cpyext/src/capsule.c +++ b/pypy/module/cpyext/src/capsule.c @@ -321,7 +321,7 @@ PyCapsule_Type__doc__ /*tp_doc*/ }; -void init_capsule() +void _Py_init_capsule() { PyType_Ready(&PyCapsule_Type); } diff --git a/pypy/module/cpyext/src/cobject.c b/pypy/module/cpyext/src/cobject.c --- a/pypy/module/cpyext/src/cobject.c +++ b/pypy/module/cpyext/src/cobject.c @@ -156,7 +156,7 @@ PyCObject_Type__doc__ /*tp_doc*/ }; -void init_pycobject() +void _Py_init_pycobject() { PyType_Ready(&PyCObject_Type); } diff --git a/pypy/module/cpyext/src/ndarrayobject.c b/pypy/module/cpyext/src/ndarrayobject.c --- a/pypy/module/cpyext/src/ndarrayobject.c +++ b/pypy/module/cpyext/src/ndarrayobject.c @@ -3,8 +3,6 @@ #include "numpy/arrayobject.h" #include /* memset, memcpy */ -PyTypeObject PyArray_Type; - void _PyArray_FILLWBYTE(PyObject* obj, int val) { memset(PyArray_DATA(obj), val, PyArray_NBYTES(obj)); diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -286,3 +286,19 @@ assert dt.num == 11 + def test_pass_ndarray_object_to_c(self): + from _numpypy.multiarray import ndarray + mod = self.import_extension('foo', [ + ("check_array", "METH_VARARGS", + ''' + PyObject* obj; + if (!PyArg_ParseTuple(args, "O!", &PyArray_Type, &obj)) + return NULL; + Py_INCREF(obj); + return obj; + '''), + ], prologue='#include ') + array = ndarray((3, 4), dtype='d') + assert mod.check_array(array) is array + raises(TypeError, "mod.check_array(42)") + diff --git a/pypy/module/cpyext/test/test_thread.py b/pypy/module/cpyext/test/test_thread.py --- a/pypy/module/cpyext/test/test_thread.py +++ b/pypy/module/cpyext/test/test_thread.py @@ -8,8 +8,10 @@ module = self.import_extension('foo', [ ("get_thread_ident", "METH_NOARGS", """ - /* Use the 'PyPy' prefix to ensure we access our functions */ - return PyInt_FromLong(PyPyThread_get_thread_ident()); +#ifndef PyThread_get_thread_ident +#error "seems we are not accessing PyPy's functions" +#endif + return PyInt_FromLong(PyThread_get_thread_ident()); """), ]) import thread, threading @@ -32,17 +34,19 @@ module = self.import_extension('foo', [ ("test_acquire_lock", "METH_NOARGS", """ - /* Use the 'PyPy' prefix to ensure we access our functions */ - PyThread_type_lock lock = PyPyThread_allocate_lock(); - if (PyPyThread_acquire_lock(lock, 1) != 1) { +#ifndef PyThread_allocate_lock +#error "seems we are not accessing PyPy's functions" +#endif + PyThread_type_lock lock = PyThread_allocate_lock(); + if (PyThread_acquire_lock(lock, 1) != 1) { PyErr_SetString(PyExc_AssertionError, "first acquire"); return NULL; } - if (PyPyThread_acquire_lock(lock, 0) != 0) { + if (PyThread_acquire_lock(lock, 0) != 0) { PyErr_SetString(PyExc_AssertionError, "second acquire"); return NULL; } - PyPyThread_free_lock(lock); + PyThread_free_lock(lock); Py_RETURN_NONE; """), @@ -53,15 +57,17 @@ module = self.import_extension('foo', [ ("test_release_lock", "METH_NOARGS", """ - /* Use the 'PyPy' prefix to ensure we access our functions */ - PyThread_type_lock lock = PyPyThread_allocate_lock(); - PyPyThread_acquire_lock(lock, 1); - PyPyThread_release_lock(lock); - if (PyPyThread_acquire_lock(lock, 0) != 1) { +#ifndef PyThread_release_lock +#error "seems we are not accessing PyPy's functions" +#endif + PyThread_type_lock lock = PyThread_allocate_lock(); + PyThread_acquire_lock(lock, 1); + PyThread_release_lock(lock); + if (PyThread_acquire_lock(lock, 0) != 1) { PyErr_SetString(PyExc_AssertionError, "first acquire"); return NULL; } - PyPyThread_free_lock(lock); + PyThread_free_lock(lock); Py_RETURN_NONE; """), diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -342,10 +342,8 @@ if space.is_w(w_startstop, space.w_None): start = 0 else: - start = space.int_w(w_startstop) - if start < 0: - raise OperationError(space.w_ValueError, space.wrap( - "Indicies for islice() must be non-negative integers.")) + start = self.arg_int_w(w_startstop, 0, + "Indicies for islice() must be None or non-negative integers") w_stop = args_w[0] else: raise OperationError(space.w_TypeError, space.wrap("islice() takes at most 4 arguments (" + str(num_args) + " given)")) @@ -353,10 +351,8 @@ if space.is_w(w_stop, space.w_None): stop = -1 else: - stop = space.int_w(w_stop) - if stop < 0: - raise OperationError(space.w_ValueError, space.wrap( - "Stop argument must be a non-negative integer or None.")) + stop = self.arg_int_w(w_stop, 0, + "Stop argument must be a non-negative integer or None.") stop = max(start, stop) # for obscure CPython compatibility if num_args == 2: @@ -364,10 +360,8 @@ if space.is_w(w_step, space.w_None): step = 1 else: - step = space.int_w(w_step) - if step < 1: - raise OperationError(space.w_ValueError, space.wrap( - "Step must be one or lager for islice().")) + step = self.arg_int_w(w_step, 1, + "Step for islice() must be a positive integer or None") else: step = 1 @@ -375,6 +369,18 @@ self.start = start self.stop = stop + def arg_int_w(self, w_obj, minimum, errormsg): + space = self.space + try: + result = space.int_w(w_obj) + except OperationError, e: + if e.async(space): + raise + result = -1 + if result < minimum: + raise OperationError(space.w_ValueError, space.wrap(errormsg)) + return result + def iter_w(self): return self.space.wrap(self) diff --git a/pypy/module/itertools/test/test_itertools.py b/pypy/module/itertools/test/test_itertools.py --- a/pypy/module/itertools/test/test_itertools.py +++ b/pypy/module/itertools/test/test_itertools.py @@ -304,6 +304,11 @@ raises(TypeError, itertools.islice, [], 0, 0, 0, 0) + # why not TypeError? Because CPython + raises(ValueError, itertools.islice, [], "a", 1, 2) + raises(ValueError, itertools.islice, [], 0, "a", 2) + raises(ValueError, itertools.islice, [], 0, 1, "a") + def test_chain(self): import itertools diff --git a/pypy/module/math/app_math.py b/pypy/module/math/app_math.py --- a/pypy/module/math/app_math.py +++ b/pypy/module/math/app_math.py @@ -1,13 +1,40 @@ def factorial(x): - """Find x!.""" + """factorial(x) -> Integral + + "Find x!. Raise a ValueError if x is negative or non-integral.""" if isinstance(x, float): fl = int(x) if fl != x: raise ValueError("float arguments must be integral") x = fl - if x < 0: - raise ValueError("x must be >= 0") - res = 1 - for i in range(1, x + 1): - res *= i - return res + + if x <= 100: + if x < 0: + raise ValueError("x must be >= 0") + res = 1 + for i in range(2, x + 1): + res *= i + return res + + # Experimentally this gap seems good + gap = max(100, x >> 7) + def _fac_odd(low, high): + if low + gap >= high: + t = 1 + for i in range(low, high, 2): + t *= i + return t + + mid = ((low + high) >> 1) | 1 + return _fac_odd(low, mid) * _fac_odd(mid, high) + + def _fac1(x): + if x <= 2: + return 1, 1, x - 1 + x2 = x >> 1 + f, g, shift = _fac1(x2) + g *= _fac_odd((x2 + 1) | 1, x + 1) + return (f * g, g, shift + x2) + + res, _, shift = _fac1(x) + return res << shift diff --git a/pypy/module/math/test/test_factorial.py b/pypy/module/math/test/test_factorial.py new file mode 100644 --- /dev/null +++ b/pypy/module/math/test/test_factorial.py @@ -0,0 +1,29 @@ +import py +import math +from pypy.module.math import app_math + +def test_factorial_extra(): + for x in range(1000): + r1 = app_math.factorial(x) + r2 = math.factorial(x) + assert r1 == r2 + assert type(r1) == type(r2) + +def test_timing(): + py.test.skip("for manual running only") + import time + x = 5000 + repeat = 1000 + r1 = app_math.factorial(x) + r2 = math.factorial(x) + assert r1 == r2 + t1 = time.time() + for i in range(repeat): + app_math.factorial(x) + t2 = time.time() + for i in range(repeat): + math.factorial(x) + t3 = time.time() + assert r1 == r2 + print (t2 - t1) / repeat + print (t3 - t2) / repeat diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -10,7 +10,7 @@ 'array': 'interp_numarray.array', 'zeros': 'interp_numarray.zeros', 'empty': 'interp_numarray.zeros', - 'ones': 'interp_numarray.ones', + 'empty_like': 'interp_numarray.empty_like', '_reconstruct' : 'interp_numarray._reconstruct', 'scalar' : 'interp_numarray.build_scalar', 'dot': 'interp_arrayops.dot', @@ -106,8 +106,6 @@ ('logaddexp2', 'logaddexp2'), ('real', 'real'), ('imag', 'imag'), - ('ones_like', 'ones_like'), - ('zeros_like', 'zeros_like'), ]: interpleveldefs[exposed] = "interp_ufuncs.get(space).%s" % impl diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py --- a/pypy/module/micronumpy/app_numpy.py +++ b/pypy/module/micronumpy/app_numpy.py @@ -12,7 +12,9 @@ if dtype is None: test = _numpypy.multiarray.array([start, stop, step, 0]) dtype = test.dtype - arr = _numpypy.multiarray.zeros(int(math.ceil((stop - start) / step)), dtype=dtype) + length = math.ceil((float(stop) - start) / step) + length = int(length) + arr = _numpypy.multiarray.zeros(length, dtype=dtype) i = start for j in range(arr.size): arr[j] = i diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -47,7 +47,7 @@ def setslice(self, space, arr): impl = arr.implementation if impl.is_scalar(): - self.fill(impl.get_scalar_value()) + self.fill(space, impl.get_scalar_value()) return shape = shape_agreement(space, self.get_shape(), arr) if impl.storage == self.storage: @@ -100,7 +100,7 @@ tmp = self.get_real(orig_array) tmp.setslice(space, convert_to_array(space, w_value)) - def get_imag(self, orig_array): + def get_imag(self, space, orig_array): strides = self.get_strides() backstrides = self.get_backstrides() if self.dtype.is_complex_type(): @@ -110,11 +110,11 @@ impl = NonWritableArray(self.get_shape(), self.dtype, self.order, strides, backstrides) if not self.dtype.is_flexible_type(): - impl.fill(self.dtype.box(0)) + impl.fill(space, self.dtype.box(0)) return impl def set_imag(self, space, orig_array, w_value): - tmp = self.get_imag(orig_array) + tmp = self.get_imag(space, orig_array) tmp.setslice(space, convert_to_array(space, w_value)) # -------------------- applevel get/setitem ----------------------- @@ -357,7 +357,7 @@ self.get_backstrides(), self.get_shape()) - def fill(self, box): + def fill(self, space, box): self.dtype.itemtype.fill(self.storage, self.dtype.get_size(), box, 0, self.size, 0) @@ -392,6 +392,21 @@ def __del__(self): free_raw_storage(self.storage, track_allocation=False) +class ConcreteArrayWithBase(ConcreteArrayNotOwning): + def __init__(self, shape, dtype, order, strides, backstrides, storage, orig_base): + ConcreteArrayNotOwning.__init__(self, shape, dtype, order, + strides, backstrides, storage) + self.orig_base = orig_base + + def base(self): + return self.orig_base + + +class ConcreteNonWritableArrayWithBase(ConcreteArrayWithBase): + def descr_setitem(self, space, orig_array, w_index, w_value): + raise OperationError(space.w_ValueError, space.wrap( + "assignment destination is read-only")) + class NonWritableArray(ConcreteArray): def descr_setitem(self, space, orig_array, w_index, w_value): @@ -420,8 +435,8 @@ def base(self): return self.orig_arr - def fill(self, box): - loop.fill(self, box.convert_to(self.dtype)) + def fill(self, space, box): + loop.fill(self, box.convert_to(space, self.dtype)) def create_iter(self, shape=None, backward_broadcast=False, require_index=False): if shape is not None and \ @@ -487,3 +502,6 @@ def getlength(self): return self.impl.size + + def get_raw_address(self): + return self.impl.storage diff --git a/pypy/module/micronumpy/arrayimpl/scalar.py b/pypy/module/micronumpy/arrayimpl/scalar.py --- a/pypy/module/micronumpy/arrayimpl/scalar.py +++ b/pypy/module/micronumpy/arrayimpl/scalar.py @@ -54,8 +54,7 @@ return self.value def set_scalar_value(self, w_val): - assert isinstance(w_val, W_GenericBox) - self.value = w_val.convert_to(self.dtype) + self.value = w_val def copy(self, space): scalar = Scalar(self.dtype) @@ -96,12 +95,12 @@ ','.join([str(x) for x in w_arr.get_shape()],)))) if self.dtype.is_complex_type(): self.value = self.dtype.itemtype.composite( - w_arr.get_scalar_value().convert_to(dtype), + w_arr.get_scalar_value().convert_to(space, dtype), self.value.convert_imag_to(dtype)) else: self.value = w_arr.get_scalar_value() - def get_imag(self, orig_array): + def get_imag(self, space, orig_array): if self.dtype.is_complex_type(): scalar = Scalar(self.dtype.float_type) scalar.value = self.value.convert_imag_to(scalar.dtype) @@ -125,13 +124,23 @@ ','.join([str(x) for x in w_arr.get_shape()],)))) self.value = self.dtype.itemtype.composite( self.value.convert_real_to(dtype), - w_arr.get_scalar_value().convert_to(dtype), + w_arr.get_scalar_value().convert_to(space, dtype), ) From noreply at buildbot.pypy.org Mon Jan 13 16:39:51 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 13 Jan 2014 16:39:51 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c4: revert changes to assembler.py and start support of transaction breaks in optimizeopt Message-ID: <20140113153951.655001C0282@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r68645:195b15f42c66 Date: 2014-01-13 12:15 +0100 http://bitbucket.org/pypy/pypy/changeset/195b15f42c66/ Log: revert changes to assembler.py and start support of transaction breaks in optimizeopt diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -3160,17 +3160,52 @@ return # tests only mc = self.mc + # if stm_should_break_transaction() + fn = stmtlocal.stm_should_break_transaction_fn + mc.CALL(imm(self.cpu.cast_ptr_to_int(fn))) + mc.TEST8(eax.lowest8bits(), eax.lowest8bits()) + mc.J_il(rx86.Conditions['Z'], 0xfffff) # patched later + jz_location2 = mc.get_relative_pos() # # call stm_transaction_break() with the address of the # STM_RESUME_BUF and the custom longjmp function self.push_gcmap(mc, gcmap, mov=True) # + # save all registers + base_ofs = self.cpu.get_baseofs_of_frame_field() + for gpr in self._regalloc.rm.reg_bindings.values(): + v = gpr_reg_mgr_cls.all_reg_indexes[gpr.value] + mc.MOV_br(v * WORD + base_ofs, gpr.value) + if IS_X86_64: + coeff = 1 + else: + coeff = 2 + ofs = len(gpr_reg_mgr_cls.all_regs) + for xr in self._regalloc.xrm.reg_bindings.values(): + mc.MOVSD_bx((ofs + xr.value * coeff) * WORD + base_ofs, xr.value) + # # CALL break function fn = self.stm_transaction_break_path mc.CALL(imm(fn)) # ** HERE ** is the place an aborted transaction retries # ebp/frame reloaded by longjmp callback # + # restore regs + base_ofs = self.cpu.get_baseofs_of_frame_field() + for gpr in self._regalloc.rm.reg_bindings.values(): + v = gpr_reg_mgr_cls.all_reg_indexes[gpr.value] + mc.MOV_rb(gpr.value, v * WORD + base_ofs) + if IS_X86_64: + coeff = 1 + else: + coeff = 2 + ofs = len(gpr_reg_mgr_cls.all_regs) + for xr in self._regalloc.xrm.reg_bindings.values(): + mc.MOVSD_xb(xr.value, (ofs + xr.value * coeff) * WORD + base_ofs) + # + # patch the JZ above + offset = mc.get_relative_pos() - jz_location2 + mc.overwrite32(jz_location2-4, offset) diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -1278,8 +1278,8 @@ def consider_stm_transaction_break(self, op): # # only save regs for the should_break_transaction call - self.xrm.before_call(save_all_regs=1) - self.rm.before_call(save_all_regs=1) + self.xrm.before_call() + self.rm.before_call() gcmap = self.get_gcmap() # allocate the gcmap *before* # self.assembler.stm_transaction_break(gcmap) diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -1347,11 +1347,13 @@ arg = int(op.args[0].value) c_arg = Constant(arg, lltype.Signed) - return SpaceOperation('stm_should_break_transaction', - [c_arg], op.result) + return [SpaceOperation('stm_should_break_transaction', + [c_arg], op.result), + SpaceOperation('-live-', [], None),] def rewrite_op_jit_stm_transaction_break_point(self, op): - return SpaceOperation('stm_transaction_break', [], op.result) + return [SpaceOperation('stm_transaction_break', [], op.result), + SpaceOperation('-live-', [], None),] def rewrite_op_jit_marker(self, op): key = op.args[0].value diff --git a/rpython/jit/metainterp/optimizeopt/__init__.py b/rpython/jit/metainterp/optimizeopt/__init__.py --- a/rpython/jit/metainterp/optimizeopt/__init__.py +++ b/rpython/jit/metainterp/optimizeopt/__init__.py @@ -8,6 +8,7 @@ from rpython.jit.metainterp.optimizeopt.simplify import OptSimplify from rpython.jit.metainterp.optimizeopt.pure import OptPure from rpython.jit.metainterp.optimizeopt.earlyforce import OptEarlyForce +from rpython.jit.metainterp.optimizeopt.stm import OptSTM from rpython.rlib.jit import PARAMETERS, ENABLE_ALL_OPTS from rpython.rlib.unroll import unrolling_iterable from rpython.rlib.debug import debug_start, debug_stop, debug_print @@ -35,6 +36,9 @@ config = metainterp_sd.config optimizations = [] unroll = 'unroll' in enable_opts # 'enable_opts' is normally a dict + if config.translation.stm: + optimizations.append(OptSTM()) + for name, opt in unroll_all_opts: if name in enable_opts: if opt is not None: diff --git a/rpython/jit/metainterp/optimizeopt/stm.py b/rpython/jit/metainterp/optimizeopt/stm.py new file mode 100644 --- /dev/null +++ b/rpython/jit/metainterp/optimizeopt/stm.py @@ -0,0 +1,36 @@ +from rpython.jit.metainterp.history import (Const, ConstInt, BoxInt, BoxFloat, + BoxPtr, make_hashable_int) +from rpython.jit.metainterp.optimizeopt.optimizer import (Optimization, REMOVED, + CONST_0, CONST_1) +from rpython.jit.metainterp.optimizeopt.util import make_dispatcher_method +from rpython.jit.metainterp.resoperation import (opboolinvers, opboolreflex, rop, + ResOperation) + + +class OptSTM(Optimization): + """ + For now only changes some guarded transaction breaks + to unconditional ones. + """ + def __init__(self): + pass + + def new(self): + return OptSTM() + + def propagate_forward(self, op): + dispatch_opt(self, op) + + def optimize_CALL(self, op): + self.emit_operation(op) + + def optimize_STM_TRANSACTION_BREAK(self, op): + self.emit_operation(op) + +dispatch_opt = make_dispatcher_method(OptSTM, 'optimize_', + default=OptSTM.emit_operation) + + + + + diff --git a/rpython/jit/metainterp/optimizeopt/test/test_stm.py b/rpython/jit/metainterp/optimizeopt/test/test_stm.py new file mode 100644 --- /dev/null +++ b/rpython/jit/metainterp/optimizeopt/test/test_stm.py @@ -0,0 +1,26 @@ +from rpython.jit.metainterp.optimizeopt.test.test_optimizebasic import ( + BaseTestBasic,) +from rpython.jit.metainterp.optimizeopt.test.test_util import LLtypeMixin + + + + +class BaseTestSTM(BaseTestBasic): + stm = True + + def test_simple(self): + ops = """ + [] + stm_transaction_break() + guard_not_forced() [] + jump() + """ + expected = ops + self.optimize_loop(ops, expected) + + + +class TestLLtype(BaseTestSTM, LLtypeMixin): + pass + + diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -365,6 +365,8 @@ metainterp_sd.virtualref_info = self.vrefinfo if hasattr(self, 'callinfocollection'): metainterp_sd.callinfocollection = self.callinfocollection + if hasattr(self, 'stm'): + metainterp_sd.config.translation.stm = self.stm # optimize_trace(metainterp_sd, loop, self.enable_opts) diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -187,14 +187,46 @@ raise AssertionError("bad result box type") # ------------------------------ + def _record_stm_transaction_break(self): + # records an unconditional stm_transaction_break + mi = self.metainterp + mi.vable_and_vrefs_before_residual_call() + mi._record_helper_nonpure_varargs( + rop.STM_TRANSACTION_BREAK, None, None, []) + mi.vrefs_after_residual_call() + mi.vable_after_residual_call() + mi.generate_guard(rop.GUARD_NOT_FORCED, None) + + @arguments("int") def opimpl_stm_should_break_transaction(self, if_there_is_no_other): val = bool(if_there_is_no_other) mi = self.metainterp - if (mi.stm_break_wanted or (val and not mi.stm_break_done)): - mi.stm_break_done = True + if mi.stm_break_wanted: + # after call_release_gil and similar we want to have + # stm_transaction_break that may disable optimizations, + # but they would have been disabled anyways by the call + self._record_stm_transaction_break() mi.stm_break_wanted = False - # insert a CALL + return ConstInt(0) + elif val: + # Never ignore these. As long as we don't track the info + # if we are atomic, this could be the only possible + # transaction break in the loop (they are the most + # likely ones): + # loop: stmts -> inc_atomic -> stmts -> dec_atomic -> + # transaction_break -> loop_end + # + # we insert: + # i0 = call(should_break_transaction) + # stm_transaction_break() + # guard_not_forced() + # guard_false(i0) + # + # the stm_transaction_break() and its guard, + # OR + # the call(should_break_transaction) and its guard, + # or both are going to be removed by optimizeopt resbox = history.BoxInt(0) funcptr = mi.staticdata.stm_should_break_transaction funcdescr = mi.staticdata.stm_should_break_transaction_descr @@ -202,14 +234,21 @@ mi._record_helper_nonpure_varargs( rop.CALL, resbox, funcdescr, [ConstInt(heaptracker.adr2int(funcaddr)),]) + # + # ALSO generate an stm_transaction_break + # This is needed to be able to transform the guard + # into an unconditional TB during optimizeopt + # if wanted... + self._record_stm_transaction_break() + # return resbox else: + # we ignore this one. return ConstInt(0) @arguments() def opimpl_stm_transaction_break(self): - self.metainterp._record_helper_nonpure_varargs( - rop.STM_TRANSACTION_BREAK, None, None, []) + self._record_stm_transaction_break() for _opimpl in ['int_add', 'int_sub', 'int_mul', 'int_floordiv', 'int_mod', 'int_lt', 'int_le', 'int_eq', @@ -1703,7 +1742,7 @@ # for stm: placement of stm_break_point, used by MIFrame self.stm_break_wanted = False - self.stm_break_done = False + self.stm_insert_first_break = True diff --git a/rpython/jit/metainterp/test/test_stm.py b/rpython/jit/metainterp/test/test_stm.py --- a/rpython/jit/metainterp/test/test_stm.py +++ b/rpython/jit/metainterp/test/test_stm.py @@ -23,14 +23,22 @@ return rstm.jit_stm_should_break_transaction(False) res = self.interp_operations(g, [], translationoptions={"stm":True}) assert res == False - self.check_operations_history(call=1, call_may_force=1) + self.check_operations_history(stm_transaction_break=1, call_may_force=1) + + def test_not_removed2(self): + def g(): + return rstm.jit_stm_should_break_transaction(True) + res = self.interp_operations(g, [], translationoptions={"stm":True}) + assert res == False + self.check_operations_history(call=1, stm_transaction_break=1) def test_transaction_break(self): def g(): rstm.jit_stm_transaction_break_point() return 42 self.interp_operations(g, [], translationoptions={"stm":True}) - self.check_operations_history({'stm_transaction_break':1}) + self.check_operations_history({'stm_transaction_break':1, + 'guard_not_forced':1}) From noreply at buildbot.pypy.org Mon Jan 13 16:39:52 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 13 Jan 2014 16:39:52 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c4: test and fix transaction break removal Message-ID: <20140113153952.A47181C0282@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r68646:f7e20f997e43 Date: 2014-01-13 14:30 +0100 http://bitbucket.org/pypy/pypy/changeset/f7e20f997e43/ Log: test and fix transaction break removal diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -85,6 +85,8 @@ OS_JIT_FORCE_VIRTUAL = 120 OS_JIT_FORCE_VIRTUALIZABLE = 121 + OS_JIT_STM_SHOULD_BREAK_TRANSACTION = 130 + # for debugging: _OS_CANRAISE = set([ OS_NONE, OS_STR2UNICODE, OS_LIBFFI_CALL, OS_RAW_MALLOC_VARSIZE_CHAR, diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -45,6 +45,7 @@ name = metainterp.staticdata.stats.name_for_new_loop() loop = TreeLoop(name_prefix + name) loop.call_pure_results = metainterp.call_pure_results + loop.stm_info = {} return loop diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -634,6 +634,7 @@ inputargs = None operations = None call_pure_results = None + stm_info = None logops = None quasi_immutable_deps = None resume_at_jump_descr = None diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -362,6 +362,7 @@ self.optearlyforce = None if loop is not None: self.call_pure_results = loop.call_pure_results + self.stm_info = loop.stm_info self.set_optimizations(optimizations) self.setup() diff --git a/rpython/jit/metainterp/optimizeopt/stm.py b/rpython/jit/metainterp/optimizeopt/stm.py --- a/rpython/jit/metainterp/optimizeopt/stm.py +++ b/rpython/jit/metainterp/optimizeopt/stm.py @@ -5,7 +5,7 @@ from rpython.jit.metainterp.optimizeopt.util import make_dispatcher_method from rpython.jit.metainterp.resoperation import (opboolinvers, opboolreflex, rop, ResOperation) - +from rpython.jit.codewriter.effectinfo import EffectInfo class OptSTM(Optimization): """ @@ -13,19 +13,43 @@ to unconditional ones. """ def __init__(self): - pass - - def new(self): - return OptSTM() + self.remove_next_break = False + self.remove_next_gnf = False def propagate_forward(self, op): dispatch_opt(self, op) + + def _seen_unconditional_break(self): + return self.optimizer.stm_info.get('seen_unconditional_break', False) def optimize_CALL(self, op): + effectinfo = op.getdescr().get_extra_info() + oopspecindex = effectinfo.oopspecindex + if oopspecindex == EffectInfo.OS_JIT_STM_SHOULD_BREAK_TRANSACTION: + if not self._seen_unconditional_break(): + self.make_constant_int(op.result, False) + return + else: + self.remove_next_break = True self.emit_operation(op) + def optimize_STM_TRANSACTION_BREAK(self, op): - self.emit_operation(op) + self.optimizer.stm_info['seen_unconditional_break'] = True + + if self.remove_next_break: + self.remove_next_break = False + self.remove_next_gnf = True + else: + self.emit_operation(op) + + def optimize_GUARD_NOT_FORCED(self, op): + if self.remove_next_gnf: + self.remove_next_gnf = False + else: + self.emit_operation(op) + + dispatch_opt = make_dispatcher_method(OptSTM, 'optimize_', default=OptSTM.emit_operation) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_stm.py b/rpython/jit/metainterp/optimizeopt/test/test_stm.py --- a/rpython/jit/metainterp/optimizeopt/test/test_stm.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_stm.py @@ -1,12 +1,24 @@ -from rpython.jit.metainterp.optimizeopt.test.test_optimizebasic import ( - BaseTestBasic,) +from rpython.jit.metainterp.optimizeopt.test.test_optimizeopt import ( + BaseTestWithUnroll,) from rpython.jit.metainterp.optimizeopt.test.test_util import LLtypeMixin +from rpython.jit.codewriter.effectinfo import EffectInfo +from rpython.rtyper.lltypesystem import lltype, rclass, rffi, llmemory +class TestSTM(BaseTestWithUnroll, LLtypeMixin): + stm = True - -class BaseTestSTM(BaseTestBasic): - stm = True + FUNC = lltype.FuncType([], lltype.Signed) + sbtdescr = LLtypeMixin.cpu.calldescrof( + FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo([], [], [], [], + EffectInfo.EF_CANNOT_RAISE, + oopspecindex=EffectInfo.OS_JIT_STM_SHOULD_BREAK_TRANSACTION, + can_invalidate=False) + ) + namespace = LLtypeMixin.namespace.copy() + namespace.update(locals()) + def test_simple(self): ops = """ @@ -18,9 +30,99 @@ expected = ops self.optimize_loop(ops, expected) + def test_unrolled_loop(self): + ops = """ + [] + i0 = call(123, descr=sbtdescr) + stm_transaction_break() + guard_not_forced() [] + guard_false(i0) [] + jump() + """ + preamble = """ + [] + stm_transaction_break() + guard_not_forced() [] + jump() + """ + expected = """ + [] + i0 = call(123, descr=sbtdescr) + guard_false(i0) [] + jump() + """ + self.optimize_loop(ops, expected, expected_preamble=preamble) + def test_unrolled_loop2(self): + ops = """ + [] + stm_transaction_break() + guard_not_forced() [] -class TestLLtype(BaseTestSTM, LLtypeMixin): - pass + i0 = call(123, descr=sbtdescr) + stm_transaction_break() + guard_not_forced() [] + guard_false(i0) [] + jump() + """ + preamble = """ + [] + stm_transaction_break() + guard_not_forced() [] + i0 = call(123, descr=sbtdescr) + guard_false(i0) [] + + jump() + """ + expected = """ + [] + stm_transaction_break() + guard_not_forced() [] + + i0 = call(123, descr=sbtdescr) + guard_false(i0) [] + jump() + """ + self.optimize_loop(ops, expected, expected_preamble=preamble) + + def test_not_disable_opt(self): + ops = """ + [p1] + i0 = call(123, descr=sbtdescr) + stm_transaction_break() + guard_not_forced() [] + guard_false(i0) [] + + i1 = getfield_gc(p1, descr=adescr) + + jump(p1) + """ + preamble = """ + [p1] + stm_transaction_break() + guard_not_forced() [] + + i1 = getfield_gc(p1, descr=adescr) + + jump(p1) + """ + expected = """ + [p1] + i0 = call(123, descr=sbtdescr) + guard_false(i0) [] + + jump(p1) + """ + self.optimize_loop(ops, expected, expected_preamble=preamble) + + + + + + + + + + diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -351,12 +351,14 @@ assert equaloplists(optimized.operations, expected.operations, False, remap, text_right) - def _do_optimize_loop(self, loop, call_pure_results): + def _do_optimize_loop(self, loop, call_pure_results, + stm_info): from rpython.jit.metainterp.optimizeopt import optimize_trace from rpython.jit.metainterp.optimizeopt.util import args_dict self.loop = loop loop.call_pure_results = args_dict() + loop.stm_info = stm_info if call_pure_results is not None: for k, v in call_pure_results.items(): loop.call_pure_results[list(k)] = v @@ -388,7 +390,8 @@ preamble.operations = [ResOperation(rop.LABEL, inputargs, None, descr=TargetToken(token))] + \ operations + \ [ResOperation(rop.LABEL, jump_args, None, descr=token)] - self._do_optimize_loop(preamble, call_pure_results) + stm_info = {} + self._do_optimize_loop(preamble, call_pure_results, stm_info) assert preamble.operations[-1].getopnum() == rop.LABEL @@ -403,7 +406,7 @@ assert loop.operations[0].getopnum() == rop.LABEL loop.inputargs = loop.operations[0].getarglist() - self._do_optimize_loop(loop, call_pure_results) + self._do_optimize_loop(loop, call_pure_results, stm_info) extra_same_as = [] while loop.operations[0].getopnum() != rop.LABEL: extra_same_as.append(loop.operations[0]) diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1572,6 +1572,7 @@ ei = EffectInfo([], [], [], [], EffectInfo.EF_CANNOT_RAISE, + oopspecindex=EffectInfo.OS_JIT_STM_SHOULD_BREAK_TRANSACTION, can_invalidate=False) self.stm_should_break_transaction_descr = ( From noreply at buildbot.pypy.org Mon Jan 13 16:39:53 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 13 Jan 2014 16:39:53 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c4: partly moved transaction break logic from pyjitpl to optimizeopt Message-ID: <20140113153953.C863C1C0282@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r68647:da15e05409ad Date: 2014-01-13 14:56 +0100 http://bitbucket.org/pypy/pypy/changeset/da15e05409ad/ Log: partly moved transaction break logic from pyjitpl to optimizeopt diff --git a/rpython/jit/metainterp/optimizeopt/stm.py b/rpython/jit/metainterp/optimizeopt/stm.py --- a/rpython/jit/metainterp/optimizeopt/stm.py +++ b/rpython/jit/metainterp/optimizeopt/stm.py @@ -13,35 +13,33 @@ to unconditional ones. """ def __init__(self): - self.remove_next_break = False - self.remove_next_gnf = False + self.remove_next_gnf = False # guard_not_forced def propagate_forward(self, op): dispatch_opt(self, op) - def _seen_unconditional_break(self): - return self.optimizer.stm_info.get('seen_unconditional_break', False) + def _break_wanted(self): + return self.optimizer.stm_info.get('break_wanted', True) + + def _set_break_wanted(self, val): + self.optimizer.stm_info['break_wanted'] = val def optimize_CALL(self, op): effectinfo = op.getdescr().get_extra_info() oopspecindex = effectinfo.oopspecindex if oopspecindex == EffectInfo.OS_JIT_STM_SHOULD_BREAK_TRANSACTION: - if not self._seen_unconditional_break(): - self.make_constant_int(op.result, False) - return - else: - self.remove_next_break = True + self._set_break_wanted(False) self.emit_operation(op) def optimize_STM_TRANSACTION_BREAK(self, op): - self.optimizer.stm_info['seen_unconditional_break'] = True + assert not self.remove_next_gnf - if self.remove_next_break: - self.remove_next_break = False + if self._break_wanted(): + self._set_break_wanted(False) + self.emit_operation(op) + else: self.remove_next_gnf = True - else: - self.emit_operation(op) def optimize_GUARD_NOT_FORCED(self, op): if self.remove_next_gnf: diff --git a/rpython/jit/metainterp/optimizeopt/test/test_stm.py b/rpython/jit/metainterp/optimizeopt/test/test_stm.py --- a/rpython/jit/metainterp/optimizeopt/test/test_stm.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_stm.py @@ -20,38 +20,14 @@ namespace.update(locals()) - def test_simple(self): - ops = """ - [] - stm_transaction_break() - guard_not_forced() [] - jump() - """ - expected = ops - self.optimize_loop(ops, expected) - def test_unrolled_loop(self): ops = """ [] i0 = call(123, descr=sbtdescr) - stm_transaction_break() - guard_not_forced() [] - guard_false(i0) [] - jump() - """ - preamble = """ - [] - stm_transaction_break() - guard_not_forced() [] - jump() - """ - expected = """ - [] - i0 = call(123, descr=sbtdescr) guard_false(i0) [] jump() """ - self.optimize_loop(ops, expected, expected_preamble=preamble) + self.optimize_loop(ops, ops, expected_preamble=ops) def test_unrolled_loop2(self): ops = """ @@ -60,8 +36,6 @@ guard_not_forced() [] i0 = call(123, descr=sbtdescr) - stm_transaction_break() - guard_not_forced() [] guard_false(i0) [] jump() @@ -78,9 +52,6 @@ """ expected = """ [] - stm_transaction_break() - guard_not_forced() [] - i0 = call(123, descr=sbtdescr) guard_false(i0) [] jump() @@ -90,22 +61,18 @@ def test_not_disable_opt(self): ops = """ [p1] + i1 = getfield_gc(p1, descr=adescr) + i0 = call(123, descr=sbtdescr) - stm_transaction_break() - guard_not_forced() [] guard_false(i0) [] - - i1 = getfield_gc(p1, descr=adescr) - jump(p1) """ preamble = """ [p1] - stm_transaction_break() - guard_not_forced() [] - i1 = getfield_gc(p1, descr=adescr) - + + i0 = call(123, descr=sbtdescr) + guard_false(i0) [] jump(p1) """ expected = """ @@ -117,7 +84,12 @@ """ self.optimize_loop(ops, expected, expected_preamble=preamble) - + # def test_dont_remove_first_tb(self): + # ops = """ + # [] + # stm_transaction_break() + # guard_not_forced() [] + diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -202,31 +202,7 @@ def opimpl_stm_should_break_transaction(self, if_there_is_no_other): val = bool(if_there_is_no_other) mi = self.metainterp - if mi.stm_break_wanted: - # after call_release_gil and similar we want to have - # stm_transaction_break that may disable optimizations, - # but they would have been disabled anyways by the call - self._record_stm_transaction_break() - mi.stm_break_wanted = False - return ConstInt(0) - elif val: - # Never ignore these. As long as we don't track the info - # if we are atomic, this could be the only possible - # transaction break in the loop (they are the most - # likely ones): - # loop: stmts -> inc_atomic -> stmts -> dec_atomic -> - # transaction_break -> loop_end - # - # we insert: - # i0 = call(should_break_transaction) - # stm_transaction_break() - # guard_not_forced() - # guard_false(i0) - # - # the stm_transaction_break() and its guard, - # OR - # the call(should_break_transaction) and its guard, - # or both are going to be removed by optimizeopt + if val: resbox = history.BoxInt(0) funcptr = mi.staticdata.stm_should_break_transaction funcdescr = mi.staticdata.stm_should_break_transaction_descr @@ -235,15 +211,9 @@ rop.CALL, resbox, funcdescr, [ConstInt(heaptracker.adr2int(funcaddr)),]) # - # ALSO generate an stm_transaction_break - # This is needed to be able to transform the guard - # into an unconditional TB during optimizeopt - # if wanted... - self._record_stm_transaction_break() - # return resbox else: - # we ignore this one. + self._record_stm_transaction_break() return ConstInt(0) @arguments() @@ -1469,7 +1439,6 @@ # XXX refactor: direct_libffi_call() is a hack if effectinfo.oopspecindex == effectinfo.OS_LIBFFI_CALL: self.metainterp.direct_libffi_call() - self.metainterp.stm_break_wanted = True return resbox else: effect = effectinfo.extraeffect @@ -1740,11 +1709,6 @@ self.call_ids = [] self.current_call_id = 0 - - # for stm: placement of stm_break_point, used by MIFrame - self.stm_break_wanted = False - self.stm_insert_first_break = True - def retrace_needed(self, trace): diff --git a/rpython/jit/metainterp/test/test_stm.py b/rpython/jit/metainterp/test/test_stm.py --- a/rpython/jit/metainterp/test/test_stm.py +++ b/rpython/jit/metainterp/test/test_stm.py @@ -12,9 +12,9 @@ def test_simple(self): def g(): return rstm.jit_stm_should_break_transaction(False) - res = self.interp_operations(g, []) + res = self.interp_operations(g, [], translationoptions={"stm":True}) assert res == False - self.check_operations_history({}) + self.check_operations_history(stm_transaction_break=1) def test_not_removed(self): import time @@ -30,7 +30,7 @@ return rstm.jit_stm_should_break_transaction(True) res = self.interp_operations(g, [], translationoptions={"stm":True}) assert res == False - self.check_operations_history(call=1, stm_transaction_break=1) + self.check_operations_history(call=1, stm_transaction_break=0) def test_transaction_break(self): def g(): From noreply at buildbot.pypy.org Mon Jan 13 16:39:54 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 13 Jan 2014 16:39:54 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c4: another test Message-ID: <20140113153954.F33291C0282@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r68648:3cee8a6b2cd2 Date: 2014-01-13 14:58 +0100 http://bitbucket.org/pypy/pypy/changeset/3cee8a6b2cd2/ Log: another test diff --git a/rpython/jit/metainterp/optimizeopt/test/test_stm.py b/rpython/jit/metainterp/optimizeopt/test/test_stm.py --- a/rpython/jit/metainterp/optimizeopt/test/test_stm.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_stm.py @@ -84,13 +84,35 @@ """ self.optimize_loop(ops, expected, expected_preamble=preamble) - # def test_dont_remove_first_tb(self): - # ops = """ - # [] - # stm_transaction_break() - # guard_not_forced() [] - + def test_dont_remove_first_tb(self): + ops = """ + [] + stm_transaction_break() + guard_not_forced() [] + stm_transaction_break() + guard_not_forced() [] + stm_transaction_break() + guard_not_forced() [] + i0 = call(123, descr=sbtdescr) + guard_false(i0) [] + jump() + """ + preamble = """ + [] + stm_transaction_break() + guard_not_forced() [] + i0 = call(123, descr=sbtdescr) + guard_false(i0) [] + jump() + """ + expected = """ + [] + i0 = call(123, descr=sbtdescr) + guard_false(i0) [] + jump() + """ + self.optimize_loop(ops, expected, expected_preamble=preamble) From noreply at buildbot.pypy.org Mon Jan 13 16:39:56 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 13 Jan 2014 16:39:56 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c4: add transaction breaks after guard_not_forced Message-ID: <20140113153956.3B9C71C0282@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r68649:8a7060e399a8 Date: 2014-01-13 15:05 +0100 http://bitbucket.org/pypy/pypy/changeset/8a7060e399a8/ Log: add transaction breaks after guard_not_forced diff --git a/rpython/jit/metainterp/optimizeopt/stm.py b/rpython/jit/metainterp/optimizeopt/stm.py --- a/rpython/jit/metainterp/optimizeopt/stm.py +++ b/rpython/jit/metainterp/optimizeopt/stm.py @@ -14,6 +14,7 @@ """ def __init__(self): self.remove_next_gnf = False # guard_not_forced + self.keep_but_ignore_gnf = False def propagate_forward(self, op): dispatch_opt(self, op) @@ -38,6 +39,7 @@ if self._break_wanted(): self._set_break_wanted(False) self.emit_operation(op) + self.keep_but_ignore_gnf = True else: self.remove_next_gnf = True @@ -45,6 +47,9 @@ if self.remove_next_gnf: self.remove_next_gnf = False else: + if not self.keep_but_ignore_gnf: + self._set_break_wanted(True) + self.keep_but_ignore_gnf = False self.emit_operation(op) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_stm.py b/rpython/jit/metainterp/optimizeopt/test/test_stm.py --- a/rpython/jit/metainterp/optimizeopt/test/test_stm.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_stm.py @@ -114,6 +114,53 @@ """ self.optimize_loop(ops, expected, expected_preamble=preamble) + def test_add_tb_after_guard_not_forced(self): + ops = """ + [] + stm_transaction_break() + guard_not_forced() [] + + escape() # e.g. like a call_release_gil + guard_not_forced() [] + + stm_transaction_break() + guard_not_forced() [] + stm_transaction_break() + guard_not_forced() [] + i0 = call(123, descr=sbtdescr) + guard_false(i0) [] + jump() + """ + preamble = """ + [] + stm_transaction_break() + guard_not_forced() [] + + escape() + guard_not_forced() [] + + stm_transaction_break() + guard_not_forced() [] + + i0 = call(123, descr=sbtdescr) + guard_false(i0) [] + jump() + """ + expected = """ + [] + escape() + guard_not_forced() [] + + stm_transaction_break() + guard_not_forced() [] + + i0 = call(123, descr=sbtdescr) + guard_false(i0) [] + jump() + """ + self.optimize_loop(ops, expected, expected_preamble=preamble) + + From noreply at buildbot.pypy.org Mon Jan 13 16:39:57 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 13 Jan 2014 16:39:57 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c4: connect guard_not_forced with stm_transaction_break in regalloc/assembler Message-ID: <20140113153957.775DB1C0282@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r68650:79f98636eaf7 Date: 2014-01-13 15:47 +0100 http://bitbucket.org/pypy/pypy/changeset/79f98636eaf7/ Log: connect guard_not_forced with stm_transaction_break in regalloc/assembler diff --git a/rpython/jit/backend/llsupport/regalloc.py b/rpython/jit/backend/llsupport/regalloc.py --- a/rpython/jit/backend/llsupport/regalloc.py +++ b/rpython/jit/backend/llsupport/regalloc.py @@ -656,7 +656,8 @@ def can_merge_with_next_guard(self, op, i, operations): if (op.getopnum() == rop.CALL_MAY_FORCE or op.getopnum() == rop.CALL_ASSEMBLER or - op.getopnum() == rop.CALL_RELEASE_GIL): + op.getopnum() == rop.CALL_RELEASE_GIL or + op.getopnum() == rop.STM_TRANSACTION_BREAK): assert operations[i + 1].getopnum() == rop.GUARD_NOT_FORCED return True if not op.is_comparison(): diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -3153,12 +3153,15 @@ dest_addr = AddressLoc(base_loc, ofs_loc) mc.MOV(dest_addr, X86_64_SCRATCH_REG) - - def stm_transaction_break(self, gcmap): + def genop_guard_stm_transaction_break(self, op, guard_op, guard_token, + arglocs, result_loc): assert self.cpu.gc_ll_descr.stm if not we_are_translated(): return # tests only + gcmap = self._regalloc.get_gcmap() + self._store_force_index(guard_op) + mc = self.mc # if stm_should_break_transaction() fn = stmtlocal.stm_should_break_transaction_fn @@ -3207,6 +3210,8 @@ offset = mc.get_relative_pos() - jz_location2 mc.overwrite32(jz_location2-4, offset) + self._emit_guard_not_forced(guard_token) + diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -1275,14 +1275,13 @@ base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) self.perform_discard(op, [base_loc, ofs_loc, size_loc]) - def consider_stm_transaction_break(self, op): + def consider_stm_transaction_break(self, op, guard_op): # # only save regs for the should_break_transaction call self.xrm.before_call() self.rm.before_call() - gcmap = self.get_gcmap() # allocate the gcmap *before* # - self.assembler.stm_transaction_break(gcmap) + self.perform_with_guard(op, guard_op, [], None) def consider_jump(self, op): @@ -1423,7 +1422,8 @@ if (is_comparison_or_ovf_op(num) or num == rop.CALL_MAY_FORCE or num == rop.CALL_ASSEMBLER - or num == rop.CALL_RELEASE_GIL): + or num == rop.CALL_RELEASE_GIL + or num == rop.STM_TRANSACTION_BREAK): oplist_with_guard[num] = value oplist[num] = add_none_argument(value) else: diff --git a/rpython/jit/metainterp/optimizeopt/stm.py b/rpython/jit/metainterp/optimizeopt/stm.py --- a/rpython/jit/metainterp/optimizeopt/stm.py +++ b/rpython/jit/metainterp/optimizeopt/stm.py @@ -20,7 +20,8 @@ dispatch_opt(self, op) def _break_wanted(self): - return self.optimizer.stm_info.get('break_wanted', True) + is_loop = self.optimizer.loop.operations[0].getopnum() == rop.LABEL + return self.optimizer.stm_info.get('break_wanted', is_loop) def _set_break_wanted(self, val): self.optimizer.stm_info['break_wanted'] = val From noreply at buildbot.pypy.org Mon Jan 13 16:39:58 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 13 Jan 2014 16:39:58 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c4: fix placing TB in bridge Message-ID: <20140113153958.A1B9B1C0282@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r68651:205454cb7542 Date: 2014-01-13 16:02 +0100 http://bitbucket.org/pypy/pypy/changeset/205454cb7542/ Log: fix placing TB in bridge diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -1274,6 +1274,7 @@ args = op.getarglist() base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args) self.perform_discard(op, [base_loc, ofs_loc, size_loc]) + def consider_stm_transaction_break(self, op, guard_op): # diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -899,8 +899,8 @@ return False - @arguments() - def bhimpl_stm_transaction_break(): + @arguments("i") + def bhimpl_stm_transaction_break(really_wanted): pass # ---------- diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -635,6 +635,7 @@ operations = None call_pure_results = None stm_info = None + is_really_loop = False logops = None quasi_immutable_deps = None resume_at_jump_descr = None diff --git a/rpython/jit/metainterp/optimizeopt/stm.py b/rpython/jit/metainterp/optimizeopt/stm.py --- a/rpython/jit/metainterp/optimizeopt/stm.py +++ b/rpython/jit/metainterp/optimizeopt/stm.py @@ -20,7 +20,7 @@ dispatch_opt(self, op) def _break_wanted(self): - is_loop = self.optimizer.loop.operations[0].getopnum() == rop.LABEL + is_loop = self.optimizer.loop.is_really_loop return self.optimizer.stm_info.get('break_wanted', is_loop) def _set_break_wanted(self, val): @@ -36,8 +36,8 @@ def optimize_STM_TRANSACTION_BREAK(self, op): assert not self.remove_next_gnf - - if self._break_wanted(): + really_wanted = op.getarg(0).getint() + if really_wanted or self._break_wanted(): self._set_break_wanted(False) self.emit_operation(op) self.keep_but_ignore_gnf = True diff --git a/rpython/jit/metainterp/optimizeopt/test/test_stm.py b/rpython/jit/metainterp/optimizeopt/test/test_stm.py --- a/rpython/jit/metainterp/optimizeopt/test/test_stm.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_stm.py @@ -29,10 +29,41 @@ """ self.optimize_loop(ops, ops, expected_preamble=ops) + def test_really_wanted_tb(self): + ops = """ + [] + stm_transaction_break(0) + guard_not_forced() [] + + stm_transaction_break(1) + guard_not_forced() [] + + jump() + """ + preamble = """ + [] + stm_transaction_break(0) + guard_not_forced() [] + + stm_transaction_break(1) + guard_not_forced() [] + + jump() + """ + expected = """ + [] + stm_transaction_break(1) + guard_not_forced() [] + + jump() + """ + self.optimize_loop(ops, expected, expected_preamble=preamble) + + def test_unrolled_loop2(self): ops = """ [] - stm_transaction_break() + stm_transaction_break(0) guard_not_forced() [] i0 = call(123, descr=sbtdescr) @@ -42,7 +73,7 @@ """ preamble = """ [] - stm_transaction_break() + stm_transaction_break(0) guard_not_forced() [] i0 = call(123, descr=sbtdescr) @@ -87,11 +118,11 @@ def test_dont_remove_first_tb(self): ops = """ [] - stm_transaction_break() + stm_transaction_break(0) guard_not_forced() [] - stm_transaction_break() + stm_transaction_break(0) guard_not_forced() [] - stm_transaction_break() + stm_transaction_break(0) guard_not_forced() [] i0 = call(123, descr=sbtdescr) guard_false(i0) [] @@ -99,7 +130,7 @@ """ preamble = """ [] - stm_transaction_break() + stm_transaction_break(0) guard_not_forced() [] i0 = call(123, descr=sbtdescr) @@ -117,15 +148,15 @@ def test_add_tb_after_guard_not_forced(self): ops = """ [] - stm_transaction_break() + stm_transaction_break(0) guard_not_forced() [] escape() # e.g. like a call_release_gil guard_not_forced() [] - stm_transaction_break() + stm_transaction_break(0) guard_not_forced() [] - stm_transaction_break() + stm_transaction_break(0) guard_not_forced() [] i0 = call(123, descr=sbtdescr) guard_false(i0) [] @@ -133,13 +164,13 @@ """ preamble = """ [] - stm_transaction_break() + stm_transaction_break(0) guard_not_forced() [] escape() guard_not_forced() [] - stm_transaction_break() + stm_transaction_break(0) guard_not_forced() [] i0 = call(123, descr=sbtdescr) @@ -151,7 +182,7 @@ escape() guard_not_forced() [] - stm_transaction_break() + stm_transaction_break(0) guard_not_forced() [] i0 = call(123, descr=sbtdescr) diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -74,6 +74,7 @@ start_label = loop.operations[0] if start_label.getopnum() == rop.LABEL: + loop.is_really_loop = True loop.operations = loop.operations[1:] # We need to emit the label op before import_state() as emitting it # will clear heap caches diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -187,12 +187,13 @@ raise AssertionError("bad result box type") # ------------------------------ - def _record_stm_transaction_break(self): + def _record_stm_transaction_break(self, really_wanted): # records an unconditional stm_transaction_break mi = self.metainterp mi.vable_and_vrefs_before_residual_call() mi._record_helper_nonpure_varargs( - rop.STM_TRANSACTION_BREAK, None, None, []) + rop.STM_TRANSACTION_BREAK, None, None, + [history.ConstInt(really_wanted)]) mi.vrefs_after_residual_call() mi.vable_after_residual_call() mi.generate_guard(rop.GUARD_NOT_FORCED, None) @@ -213,12 +214,12 @@ # return resbox else: - self._record_stm_transaction_break() + self._record_stm_transaction_break(False) return ConstInt(0) @arguments() def opimpl_stm_transaction_break(self): - self._record_stm_transaction_break() + self._record_stm_transaction_break(True) for _opimpl in ['int_add', 'int_sub', 'int_mul', 'int_floordiv', 'int_mod', 'int_lt', 'int_le', 'int_eq', diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -510,7 +510,7 @@ 'QUASIIMMUT_FIELD/1d', # [objptr], descr=SlowMutateDescr 'RECORD_KNOWN_CLASS/2', # [objptr, clsptr] 'KEEPALIVE/1', - 'STM_TRANSACTION_BREAK/0', + 'STM_TRANSACTION_BREAK/1', 'STM_SET_REVISION_GC/1d', # not really GC, writes raw to the header '_CANRAISE_FIRST', # ----- start of can_raise operations ----- From noreply at buildbot.pypy.org Mon Jan 13 16:39:59 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 13 Jan 2014 16:39:59 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c4: it was the impl in runner.py that was actually missing Message-ID: <20140113153959.CFE751C0282@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r68652:59e483bddcfb Date: 2014-01-13 16:10 +0100 http://bitbucket.org/pypy/pypy/changeset/59e483bddcfb/ Log: it was the impl in runner.py that was actually missing diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -969,6 +969,9 @@ def execute_cond_call_stm_b(self, descr, a): py.test.skip("cond_call_stm_b not supported") + def execute_stm_transaction_break(self, _, really_wanted): + pass + def execute_increment_debug_counter(self, descr, a): pass diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -899,8 +899,8 @@ return False - @arguments("i") - def bhimpl_stm_transaction_break(really_wanted): + @arguments() + def bhimpl_stm_transaction_break(): pass # ---------- From noreply at buildbot.pypy.org Mon Jan 13 16:42:17 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 13 Jan 2014 16:42:17 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: another possible topic Message-ID: <20140113154217.134851C0282@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5128:c64366b7ae96 Date: 2014-01-13 16:42 +0100 http://bitbucket.org/pypy/extradoc/changeset/c64366b7ae96/ Log: another possible topic diff --git a/sprintinfo/leysin-winter-2014/planning.txt b/sprintinfo/leysin-winter-2014/planning.txt --- a/sprintinfo/leysin-winter-2014/planning.txt +++ b/sprintinfo/leysin-winter-2014/planning.txt @@ -38,3 +38,5 @@ * longs multiplication: patch at https://bugs.pypy.org/issue892 * look into merging refactor-str-types (johan, mjacob) + +* tweaking ast classes: https://bugs.pypy.org/issue1673 From noreply at buildbot.pypy.org Mon Jan 13 18:07:19 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 13 Jan 2014 18:07:19 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c4: add a medium slowpath for write-barriers on private_from_protected objects Message-ID: <20140113170719.B88AF1C050C@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r68653:c9086eff1e22 Date: 2014-01-13 18:05 +0100 http://bitbucket.org/pypy/pypy/changeset/c9086eff1e22/ Log: add a medium slowpath for write-barriers on private_from_protected objects diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -529,6 +529,18 @@ mc = codebuf.MachineCodeBlockWrapper() # if not for_frame: + if descr.stmcat in ['A2W', 'A2V']: + # slow fastpath + flag = StmGC.GCFLAG_PRIVATE_FROM_PROTECTED >> 40 + off = 5 + assert 0 < flag < 256 + mc.MOV_rs(X86_64_SCRATCH_REG.value, WORD) + mc.TEST8_mi((X86_64_SCRATCH_REG.value, off), flag) + mc.J_il8(rx86.Conditions['Z'], 0) + jz = mc.get_relative_pos() + mc.RET() + mc.overwrite(jz - 1, chr(mc.get_relative_pos() - jz)) + self._push_all_regs_to_frame(mc, [], withfloats, callee_only=True) if IS_X86_32: # we have 2 extra words on stack for retval and we pass 1 extra From noreply at buildbot.pypy.org Tue Jan 14 07:12:27 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 14 Jan 2014 07:12:27 +0100 (CET) Subject: [pypy-commit] pypy default: Fix (found by targetgcbench) Message-ID: <20140114061227.4B2401C0500@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68655:e90073a83e63 Date: 2014-01-14 07:11 +0100 http://bitbucket.org/pypy/pypy/changeset/e90073a83e63/ Log: Fix (found by targetgcbench) diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -624,6 +624,7 @@ i += 1 return count + @signature(types.any(), types.any(), types.int(), types.int(), returns=types.int()) def ll_find(s1, s2, start, end): if start < 0: start = 0 @@ -638,6 +639,7 @@ return LLHelpers.ll_search(s1, s2, start, end, FAST_FIND) + @signature(types.any(), types.any(), types.int(), types.int(), returns=types.int()) def ll_rfind(s1, s2, start, end): if start < 0: start = 0 From noreply at buildbot.pypy.org Tue Jan 14 09:06:23 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 14 Jan 2014 09:06:23 +0100 (CET) Subject: [pypy-commit] cffi default: Issue 131: support ffi.cdef("...", packed=True) Message-ID: <20140114080623.607EA1C30AC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1453:c5e17441bc96 Date: 2014-01-14 09:06 +0100 http://bitbucket.org/cffi/cffi/changeset/c5e17441bc96/ Log: Issue 131: support ffi.cdef("...", packed=True) diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -3586,6 +3586,7 @@ #define SF_MSVC_BITFIELDS 1 #define SF_GCC_ARM_BITFIELDS 2 #define SF_GCC_BIG_ENDIAN 4 +#define SF_PACKED 8 static PyObject *b_complete_struct_or_union(PyObject *self, PyObject *args) { @@ -3671,8 +3672,8 @@ boffset = 0; /* reset each field at offset 0 */ /* update the total alignment requirement, but skip it if the - field is an anonymous bitfield */ - falign = get_alignment(ftype); + field is an anonymous bitfield or if SF_PACKED */ + falign = (sflags & SF_PACKED) ? 1 : get_alignment(ftype); if (falign < 0) goto error; diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -3148,6 +3148,34 @@ p = newp(BArray, None) assert sizeof(p[2:9]) == 7 * sizeof(BInt) +def test_packed(): + BLong = new_primitive_type("long") + BChar = new_primitive_type("char") + BShort = new_primitive_type("short") + BStruct = new_struct_type("struct foo") + complete_struct_or_union(BStruct, [('a1', BLong, -1), + ('a2', BChar, -1), + ('a3', BShort, -1)], + None, -1, -1, 8) # SF_PACKED==8 + d = BStruct.fields + assert len(d) == 3 + assert d[0][0] == 'a1' + assert d[0][1].type is BLong + assert d[0][1].offset == 0 + assert d[0][1].bitshift == -1 + assert d[0][1].bitsize == -1 + assert d[1][0] == 'a2' + assert d[1][1].type is BChar + assert d[1][1].offset == sizeof(BLong) + assert d[1][1].bitshift == -1 + assert d[1][1].bitsize == -1 + assert d[2][0] == 'a3' + assert d[2][1].type is BShort + assert d[2][1].offset == sizeof(BLong) + sizeof(BChar) + assert d[2][1].bitshift == -1 + assert d[2][1].bitsize == -1 + assert sizeof(BStruct) == sizeof(BLong) + sizeof(BChar) + sizeof(BShort) + assert alignof(BStruct) == 1 def test_version(): # this test is here mostly for PyPy diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -88,18 +88,20 @@ self.NULL = self.cast(self.BVoidP, 0) self.CData, self.CType = backend._get_types() - def cdef(self, csource, override=False): + def cdef(self, csource, override=False, packed=False): """Parse the given C source. This registers all declared functions, types, and global variables. The functions and global variables can then be accessed via either 'ffi.dlopen()' or 'ffi.verify()'. The types can be used in 'ffi.new()' and other functions. + If 'packed' is specified as True, all structs declared inside this + cdef are packed, i.e. laid out without any field alignment at all. """ if not isinstance(csource, str): # unicode, on Python 2 if not isinstance(csource, basestring): raise TypeError("cdef() argument must be a string") csource = csource.encode('ascii') with self._lock: - self._parser.parse(csource, override=override) + self._parser.parse(csource, override=override, packed=packed) self._cdefsources.append(csource) if override: for cache in self._function_caches: diff --git a/cffi/backend_ctypes.py b/cffi/backend_ctypes.py --- a/cffi/backend_ctypes.py +++ b/cffi/backend_ctypes.py @@ -720,7 +720,7 @@ return self._new_struct_or_union('union', name, ctypes.Union) def complete_struct_or_union(self, CTypesStructOrUnion, fields, tp, - totalsize=-1, totalalignment=-1): + totalsize=-1, totalalignment=-1, sflags=0): if totalsize >= 0 or totalalignment >= 0: raise NotImplementedError("the ctypes backend of CFFI does not support " "structures completed by verify(); please " @@ -739,6 +739,8 @@ else: cfields.append((fname, BField._ctype, bitsize)) bfield_types[fname] = Ellipsis + if sflags & 8: + struct_or_union._pack_ = 1 struct_or_union._fields_ = cfields CTypesStructOrUnion._bfield_types = bfield_types # diff --git a/cffi/cparser.py b/cffi/cparser.py --- a/cffi/cparser.py +++ b/cffi/cparser.py @@ -98,6 +98,7 @@ self._anonymous_counter = 0 self._structnode2type = weakref.WeakKeyDictionary() self._override = False + self._packed = False def _parse(self, csource): csource, macros = _preprocess(csource) @@ -147,13 +148,16 @@ msg = 'parse error\n%s' % (msg,) raise api.CDefError(msg) - def parse(self, csource, override=False): + def parse(self, csource, override=False, packed=False): prev_override = self._override + prev_packed = self._packed try: self._override = override + self._packed = packed self._internal_parse(csource) finally: self._override = prev_override + self._packed = prev_packed def _internal_parse(self, csource): ast, macros = self._parse(csource) @@ -476,6 +480,7 @@ if isinstance(tp, model.StructType) and tp.partial: raise NotImplementedError("%s: using both bitfields and '...;'" % (tp,)) + tp.packed = self._packed return tp def _make_partial(self, tp, nested): diff --git a/cffi/model.py b/cffi/model.py --- a/cffi/model.py +++ b/cffi/model.py @@ -255,6 +255,7 @@ fixedlayout = None completed = False partial = False + packed = False def __init__(self, name, fldnames, fldtypes, fldbitsize): self.name = name @@ -311,7 +312,11 @@ fldtypes = [tp.get_cached_btype(ffi, finishlist) for tp in self.fldtypes] lst = list(zip(self.fldnames, fldtypes, self.fldbitsize)) - ffi._backend.complete_struct_or_union(BType, lst, self) + sflags = 0 + if self.packed: + sflags = 8 # SF_PACKED + ffi._backend.complete_struct_or_union(BType, lst, self, + -1, -1, sflags) # else: fldtypes = [] diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -851,6 +851,14 @@ ``ffi`` normally caches the string ``"int[]"`` to not need to re-parse it all the time. +.. versionadded:: 0.9 + The ``ffi.cdef()`` call takes an optional argument ``packed``: if + True, then all structs declared within this cdef are "packed". This + has a meaning similar to ``__attribute__((packed))`` in GCC. It + specifies that all structure fields should have an alignment of one + byte. (Note that the packed attribute has no effect on bit fields so + far, which mean that they may be packed differently than on GCC.) + Python 3 support ---------------- diff --git a/testing/backend_tests.py b/testing/backend_tests.py --- a/testing/backend_tests.py +++ b/testing/backend_tests.py @@ -1549,3 +1549,21 @@ ffi2.include(ffi1) p = ffi2.new("foo_p", [142]) assert p.x == 142 + + def test_struct_packed(self): + ffi = FFI(backend=self.Backend()) + ffi.cdef("struct nonpacked { char a; int b; };") + ffi.cdef("struct is_packed { char a; int b; };", packed=True) + assert ffi.sizeof("struct nonpacked") == 8 + assert ffi.sizeof("struct is_packed") == 5 + assert ffi.alignof("struct nonpacked") == 4 + assert ffi.alignof("struct is_packed") == 1 + s = ffi.new("struct is_packed[2]") + s[0].b = 42623381 + s[0].a = 'X' + s[1].b = -4892220 + s[1].a = 'Y' + assert s[0].b == 42623381 + assert s[0].a == 'X' + assert s[1].b == -4892220 + assert s[1].a == 'Y' From noreply at buildbot.pypy.org Tue Jan 14 09:08:57 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 14 Jan 2014 09:08:57 +0100 (CET) Subject: [pypy-commit] cffi default: Fix test Message-ID: <20140114080857.8ACBA1C30AC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1454:1974af91fdb2 Date: 2014-01-14 09:08 +0100 http://bitbucket.org/cffi/cffi/changeset/1974af91fdb2/ Log: Fix test diff --git a/testing/test_parsing.py b/testing/test_parsing.py --- a/testing/test_parsing.py +++ b/testing/test_parsing.py @@ -32,7 +32,8 @@ def new_struct_type(self, name): return FakeStruct(name) - def complete_struct_or_union(self, s, fields, tp=None): + def complete_struct_or_union(self, s, fields, tp=None, + totalsize=-1, totalalignment=-1, sflags=0): assert isinstance(s, FakeStruct) s.fields = fields From noreply at buildbot.pypy.org Tue Jan 14 09:43:31 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 14 Jan 2014 09:43:31 +0100 (CET) Subject: [pypy-commit] pypy default: Fix test Message-ID: <20140114084331.435881C0500@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68657:5c1e61b82376 Date: 2014-01-14 09:36 +0100 http://bitbucket.org/pypy/pypy/changeset/5c1e61b82376/ Log: Fix test diff --git a/pypy/module/test_lib_pypy/ctypes_tests/support.py b/pypy/module/test_lib_pypy/ctypes_tests/support.py --- a/pypy/module/test_lib_pypy/ctypes_tests/support.py +++ b/pypy/module/test_lib_pypy/ctypes_tests/support.py @@ -31,8 +31,10 @@ import gc for _ in range(4): gc.collect() - cls.old_num = _rawffi._num_of_allocated_objects() - + try: + cls.old_num = _rawffi._num_of_allocated_objects() + except RuntimeError: + pass def teardown_class(cls): if sys.pypy_translation_info['translation.gc'] == 'boehm': From noreply at buildbot.pypy.org Tue Jan 14 09:43:29 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 14 Jan 2014 09:43:29 +0100 (CET) Subject: [pypy-commit] pypy default: Add rawstring2charp. Message-ID: <20140114084329.E63AA1C0500@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68656:55aed8609dfd Date: 2014-01-14 08:21 +0100 http://bitbucket.org/pypy/pypy/changeset/55aed8609dfd/ Log: Add rawstring2charp. diff --git a/pypy/module/_rawffi/__init__.py b/pypy/module/_rawffi/__init__.py --- a/pypy/module/_rawffi/__init__.py +++ b/pypy/module/_rawffi/__init__.py @@ -19,6 +19,7 @@ 'wcharp2unicode' : 'interp_rawffi.wcharp2unicode', 'charp2rawstring' : 'interp_rawffi.charp2rawstring', 'wcharp2rawunicode' : 'interp_rawffi.wcharp2rawunicode', + 'rawstring2charp' : 'interp_rawffi.rawstring2charp', 'CallbackPtr' : 'callback.W_CallbackPtr', '_num_of_allocated_objects' : 'tracker.num_of_allocated_objects', 'get_libc' : 'interp_rawffi.get_libc', diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -579,6 +579,13 @@ s = rffi.wcharpsize2unicode(rffi.cast(rffi.CWCHARP, address), maxlength) return space.wrap(s) + at unwrap_spec(address=r_uint, newcontent=str) +def rawstring2charp(space, address, newcontent): + from rpython.rtyper.annlowlevel import llstr + from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw + array = rffi.cast(rffi.CCHARP, address) + copy_string_to_raw(llstr(newcontent), array, 0, len(newcontent)) + if _MS_WINDOWS: @unwrap_spec(code=int) def FormatError(space, code): diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -323,6 +323,14 @@ assert res == u'xx' a.free() + def test_rawstring2charp(self): + import _rawffi + A = _rawffi.Array('c') + a = A(10, 'x'*10) + _rawffi.rawstring2charp(a.buffer, "foobar") + assert ''.join([a[i] for i in range(10)]) == "foobarxxxx" + a.free() + def test_raw_callable(self): import _rawffi lib = _rawffi.CDLL(self.lib_name) From noreply at buildbot.pypy.org Tue Jan 14 09:43:32 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 14 Jan 2014 09:43:32 +0100 (CET) Subject: [pypy-commit] pypy default: Use rawstring2charp() in the ctypes module. Message-ID: <20140114084332.7A40C1C0500@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68658:28108772614a Date: 2014-01-14 09:42 +0100 http://bitbucket.org/pypy/pypy/changeset/28108772614a/ Log: Use rawstring2charp() in the ctypes module. diff --git a/lib_pypy/_ctypes/array.py b/lib_pypy/_ctypes/array.py --- a/lib_pypy/_ctypes/array.py +++ b/lib_pypy/_ctypes/array.py @@ -20,10 +20,13 @@ # we don't want to have buffers here if len(val) > self._length_: raise ValueError("%r too long" % (val,)) - for i in range(len(val)): - self[i] = val[i] + if isinstance(val, str): + _rawffi.rawstring2charp(self._buffer.buffer, val) + else: + for i in range(len(val)): + self[i] = val[i] if len(val) < self._length_: - self[len(val)] = '\x00' + self._buffer[len(val)] = '\x00' res.value = property(getvalue, setvalue) def getraw(self): @@ -33,8 +36,7 @@ def setraw(self, buffer): if len(buffer) > self._length_: raise ValueError("%r too long" % (buffer,)) - for i in range(len(buffer)): - self[i] = buffer[i] + _rawffi.rawstring2charp(self._buffer.buffer, buffer) res.raw = property(getraw, setraw) elif subletter == 'u': def getvalue(self): @@ -45,10 +47,14 @@ # we don't want to have buffers here if len(val) > self._length_: raise ValueError("%r too long" % (val,)) + if isinstance(val, unicode): + target = self._buffer + else: + target = self for i in range(len(val)): - self[i] = val[i] + target[i] = val[i] if len(val) < self._length_: - self[len(val)] = '\x00' + target[len(val)] = u'\x00' res.value = property(getvalue, setvalue) if '_length_' in typedict: From noreply at buildbot.pypy.org Tue Jan 14 10:06:37 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 14 Jan 2014 10:06:37 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: did this, this morning Message-ID: <20140114090637.487CF1C0C35@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5129:1797ac776753 Date: 2014-01-14 10:06 +0100 http://bitbucket.org/pypy/extradoc/changeset/1797ac776753/ Log: did this, this morning diff --git a/sprintinfo/leysin-winter-2014/planning.txt b/sprintinfo/leysin-winter-2014/planning.txt --- a/sprintinfo/leysin-winter-2014/planning.txt +++ b/sprintinfo/leysin-winter-2014/planning.txt @@ -31,8 +31,6 @@ * discuss about C++ / cppyy, look into importing pyshiboken (johan pessimistic, ?) -* ctypes: "array_of_char.value = string": speed up this case - * ctypes: https://bugs.pypy.org/issue1671 * longs multiplication: patch at https://bugs.pypy.org/issue892 From noreply at buildbot.pypy.org Tue Jan 14 10:13:39 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 14 Jan 2014 10:13:39 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: I'm no longer late today (at least not by more than 5 minutes) Message-ID: <20140114091339.42B461C050C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5130:3163c7adb9ec Date: 2014-01-14 10:13 +0100 http://bitbucket.org/pypy/extradoc/changeset/3163c7adb9ec/ Log: I'm no longer late today (at least not by more than 5 minutes) diff --git a/sprintinfo/leysin-winter-2014/planning.txt b/sprintinfo/leysin-winter-2014/planning.txt --- a/sprintinfo/leysin-winter-2014/planning.txt +++ b/sprintinfo/leysin-winter-2014/planning.txt @@ -6,7 +6,7 @@ Remi Meier Maciej Fijalkowski Romain Guillebert -Armin Rigo (late) +Armin Rigo Manuel Jacob From noreply at buildbot.pypy.org Tue Jan 14 10:44:18 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 14 Jan 2014 10:44:18 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c4: reintroduce the medium fastpath for PRIVATE_FROM_PROTECTED objects Message-ID: <20140114094418.A272D1C050C@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r68660:1f646fa9a6ac Date: 2014-01-14 10:28 +0100 http://bitbucket.org/pypy/pypy/changeset/1f646fa9a6ac/ Log: reintroduce the medium fastpath for PRIVATE_FROM_PROTECTED objects diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -529,6 +529,31 @@ mc = codebuf.MachineCodeBlockWrapper() # if not for_frame: + if descr.stmcat in ['A2W', 'A2V']: + # slow fastpath + # check if PRIV_FROM_PROT is set, but not + # WRITE_BARRIER + mc.MOV_rs(X86_64_SCRATCH_REG.value, WORD) + + flag = StmGC.GCFLAG_WRITE_BARRIER >> 32 + off = 4 + assert 0 < flag < 256 + mc.TEST8_mi((X86_64_SCRATCH_REG.value, off), flag) + mc.J_il8(rx86.Conditions['NZ'], 0) + jz1 = mc.get_relative_pos() + # if flag set, jump over the next check & RET + + flag = StmGC.GCFLAG_PRIVATE_FROM_PROTECTED >> 40 + off = 5 + assert 0 < flag < 256 + mc.TEST8_mi((X86_64_SCRATCH_REG.value, off), flag) + mc.J_il8(rx86.Conditions['Z'], 0) + jz2 = mc.get_relative_pos() + # if PRIV_F_PROT, RET + mc.RET() + mc.overwrite(jz2 - 1, chr(mc.get_relative_pos() - jz2)) + mc.overwrite(jz1 - 1, chr(mc.get_relative_pos() - jz1)) + self._push_all_regs_to_frame(mc, [], withfloats, callee_only=True) if IS_X86_32: # we have 2 extra words on stack for retval and we pass 1 extra From noreply at buildbot.pypy.org Tue Jan 14 10:44:17 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 14 Jan 2014 10:44:17 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c4: add a flag to jitdriver to mark it as transaction-break friendly -> reintroduces atomicity of bytecode instructions Message-ID: <20140114094417.499711C050C@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r68659:e21f53a50e0d Date: 2014-01-14 10:17 +0100 http://bitbucket.org/pypy/pypy/changeset/e21f53a50e0d/ Log: add a flag to jitdriver to mark it as transaction-break friendly -> reintroduces atomicity of bytecode instructions diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -42,7 +42,8 @@ stmonly_jitdriver = jit.JitDriver(greens=[], reds=['next_instr', 'ec', - 'self', 'co_code']) + 'self', 'co_code'], + stm_do_transaction_breaks=True) opcodedesc = bytecode_spec.opcodedesc HAVE_ARGUMENT = bytecode_spec.HAVE_ARGUMENT diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -56,7 +56,8 @@ set_jitcell_at = set_jitcell_at, should_unroll_one_iteration = should_unroll_one_iteration, - name='pypyjit') + name='pypyjit', + stm_do_transaction_breaks=True) class __extend__(PyFrame): diff --git a/rpython/jit/tl/tlc.py b/rpython/jit/tl/tlc.py --- a/rpython/jit/tl/tlc.py +++ b/rpython/jit/tl/tlc.py @@ -229,7 +229,8 @@ def make_interp(supports_call, jitted=True): myjitdriver = JitDriver(greens = ['pc', 'code'], - reds = ['frame', 'pool']) + reds = ['frame', 'pool'], + stm_do_transaction_breaks=True) def interp(code='', pc=0, inputarg=0, pool=None): if not isinstance(code,str): diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -479,12 +479,14 @@ name = 'jitdriver' inline_jit_merge_point = False _store_last_enter_jit = None + stm_do_transaction_breaks = False def __init__(self, greens=None, reds=None, virtualizables=None, get_jitcell_at=None, set_jitcell_at=None, get_printable_location=None, confirm_enter_jit=None, can_never_inline=None, should_unroll_one_iteration=None, - name='jitdriver', check_untranslated=True): + name='jitdriver', check_untranslated=True, + stm_do_transaction_breaks=False): if greens is not None: self.greens = greens self.name = name @@ -520,6 +522,7 @@ self.can_never_inline = can_never_inline self.should_unroll_one_iteration = should_unroll_one_iteration self.check_untranslated = check_untranslated + self.stm_do_transaction_breaks = stm_do_transaction_breaks def _freeze_(self): return True diff --git a/rpython/translator/stm/jitdriver.py b/rpython/translator/stm/jitdriver.py --- a/rpython/translator/stm/jitdriver.py +++ b/rpython/translator/stm/jitdriver.py @@ -18,13 +18,11 @@ and op.args[0].value == 'jit_merge_point'): jitdriver = op.args[1].value if not jitdriver.autoreds: - # XXX: BUG, redo the below code in order to ensure atomicity of bytecode instrs - # if (relaxed - # or (i + 1 < len(block.operations) - # and block.operations[i+1].opname == 'jit_stm_transaction_break_point')): - found.append((block, i)) - # else: - # log.WARNING("ignoring jitdriver without a transaction break point in %r" % (graph,)) + if jitdriver.stm_do_transaction_breaks: + found.append((block, i)) + else: + log.WARNING("ignoring non-stm jitdriver in %r" % ( + graph,)) else: log.WARNING("ignoring jitdriver with autoreds in %r" % ( graph,)) # XXX XXX! diff --git a/rpython/translator/stm/test/test_jitdriver.py b/rpython/translator/stm/test/test_jitdriver.py --- a/rpython/translator/stm/test/test_jitdriver.py +++ b/rpython/translator/stm/test/test_jitdriver.py @@ -1,6 +1,7 @@ from rpython.rtyper.lltypesystem import lltype, rffi from rpython.translator.stm.test.transform_support import BaseTestTransform from rpython.rlib.jit import JitDriver +from rpython.rlib import rstm class TestJitDriver(BaseTestTransform): @@ -10,11 +11,14 @@ class X: counter = 10 x = X() - myjitdriver = JitDriver(greens=[], reds=[]) + myjitdriver = JitDriver(greens=[], reds=[], + stm_do_transaction_breaks=True) def f1(): while x.counter > 0: myjitdriver.jit_merge_point() + if rstm.jit_stm_should_break_transaction(False): + rstm.jit_stm_transaction_break_point() x.counter -= 1 return 'X' From noreply at buildbot.pypy.org Tue Jan 14 10:44:20 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 14 Jan 2014 10:44:20 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c4: treat OS_JIT_STM_SHOULD_BREAK_TRANSACTION as a non-collecting function in the jit Message-ID: <20140114094420.038BB1C050C@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r68661:162a671c4f65 Date: 2014-01-14 10:43 +0100 http://bitbucket.org/pypy/pypy/changeset/162a671c4f65/ Log: treat OS_JIT_STM_SHOULD_BREAK_TRANSACTION as a non-collecting function in the jit diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -105,6 +105,14 @@ continue # ---------- calls ---------- if op.is_call(): + if opnum == rop.CALL and op.getdescr(): + d = op.getdescr() + assert isinstance(d, CallDescr) + ei = d.get_extra_info() + if ei and ei.oopspecindex == EffectInfo.OS_JIT_STM_SHOULD_BREAK_TRANSACTION: + self.newops.append(op) + continue + self.emitting_an_operation_that_can_collect() self.next_op_may_be_in_new_transaction() diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -2278,6 +2278,7 @@ cb = callbuilder.CallBuilder(self, arglocs[2], arglocs[3:], resloc) descr = op.getdescr() + effectinfo = descr.get_extra_info() assert isinstance(descr, CallDescr) cb.callconv = descr.get_call_conv() cb.argtypes = descr.get_arg_types() @@ -2289,7 +2290,9 @@ assert isinstance(signloc, ImmedLoc) cb.ressign = signloc.value - if is_call_release_gil: + if effectinfo and effectinfo.oopspecindex == EffectInfo.OS_JIT_STM_SHOULD_BREAK_TRANSACTION: + cb.emit_no_collect() + elif is_call_release_gil: cb.emit_call_release_gil() else: cb.emit() From noreply at buildbot.pypy.org Tue Jan 14 10:45:43 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 14 Jan 2014 10:45:43 +0100 (CET) Subject: [pypy-commit] stmgc c7: in-progress Message-ID: <20140114094543.2BA661C050C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7 Changeset: r602:4561d50017f8 Date: 2014-01-14 10:45 +0100 http://bitbucket.org/pypy/stmgc/changeset/4561d50017f8/ Log: in-progress diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -23,7 +23,6 @@ # define HAVE_FULL_EXCHANGE_INSN #endif - typedef TLPREFIX char localchar_t; typedef TLPREFIX struct alloc_for_size_s alloc_for_size_t; typedef TLPREFIX struct _thread_local2_s _thread_local2_t; @@ -38,6 +37,7 @@ struct _thread_local2_s { struct _thread_local1_s _tl1; int thread_num; + bool running_transaction; char *thread_base; struct stm_list_s *modified_objects; struct stm_list_s *new_object_ranges; @@ -49,15 +49,9 @@ static char *object_pages; -static char *undo_log_pages; -static char *undo_log_current; -static int num_threads_started, leader_thread_num; +static int num_threads_started; static uintptr_t index_page_never_used; -static int next_write_version; -static int undo_lock; -static struct stm_list_s *global_history; -static uint16_t gh_write_version_first; -static uint16_t gh_write_version_last; +static struct stm_list_s *volatile pending_updates; static uint8_t flag_page_private[NB_PAGES]; /* xxx_PAGE constants above */ @@ -146,6 +140,9 @@ void *localpg = object_pages + localpgoff * 4096UL; void *otherpg = object_pages + otherpgoff * 4096UL; + // XXX should not use pgoff2, but instead the next unused page in + // thread 2, so that after major GCs the next dirty pages are the + // same as the old ones int res = remap_file_pages(localpg, 4096, 0, pgoff2, 0); if (res < 0) { perror("remap_file_pages"); @@ -174,87 +171,58 @@ enum detect_conflicts_e { CANNOT_CONFLICT, CAN_CONFLICT }; -/* XXX this can be done by acquiring the undo_lock for much less time, - but it needs to be carefully synchronized with _stm_write_slowpath(). - For now it must be called with the undo_lock acquired. */ static void update_to_current_version(enum detect_conflicts_e check_conflict) { - /* Loop over objects in 'global_history': if they have been + /* Loop over objects in 'pending_updates': if they have been read by the current transaction, the current transaction must - abort; then copy them out of the leader's object space --- - which may have been modified by the leader's uncommitted - transaction; this case will be fixed afterwards. + abort; then copy them out of the other thread's object space, + which is not modified so far (the other thread just committed + and will wait until we are done here before it starts the + next transaction). */ bool conflict_found_or_dont_check = (check_conflict == CANNOT_CONFLICT); char *local_base = _STM_TL2->thread_base; char *remote_base = get_thread_base(1 - _STM_TL2->thread_num); - struct stm_list_s *gh, *gh_next; + struct stm_list_s *pu = pending_updates; - assert(leader_thread_num != _STM_TL2->thread_num); + assert(pu != _STM_TL2->modified_objects); - for (gh = global_history; gh != NULL; gh = gh_next) { + STM_LIST_FOREACH(pu, ({ - STM_LIST_FOREACH(gh, ({ + if (!conflict_found_or_dont_check) + conflict_found_or_dont_check = _stm_was_read(item); - if (!conflict_found_or_dont_check) - conflict_found_or_dont_check = _stm_was_read(item); - - char *dst = REAL_ADDRESS(local_base, item); - char *src = REAL_ADDRESS(remote_base, item); - char *src_rebased = src - (uintptr_t)local_base; - size_t size = stm_object_size_rounded_up((object_t *)src_rebased); - - memcpy(dst + sizeof(char *), - src + sizeof(char *), - size - sizeof(char *)); - })); - - gh_next = gh->nextlist; - stm_list_free(gh); - } - global_history = NULL; - gh_write_version_first = 0xffff; - gh_write_version_last = 0; - - /* Finally, loop over objects modified by the leader, - and copy them out of the undo log. - */ - char *undo = undo_log_pages; - char *undo_end = undo_log_current; - - while (undo < undo_end) { - - char *src = undo; - char *dst = *(char **)src; + char *dst = REAL_ADDRESS(local_base, item); + char *src = REAL_ADDRESS(remote_base, item); char *src_rebased = src - (uintptr_t)local_base; - - *(char **)src = *(char **)dst; /* fix the first word of the object in - the undo log, for stm_object_size() */ size_t size = stm_object_size_rounded_up((object_t *)src_rebased); memcpy(dst + sizeof(char *), src + sizeof(char *), size - sizeof(char *)); + })); - undo += size; - } - undo_log_current = undo_log_pages; /* make empty again */ + write_fence(); + pending_updates = NULL; if (conflict_found_or_dont_check && check_conflict == CAN_CONFLICT) { - release_lock(&undo_lock); stm_abort_transaction(); } } static void maybe_update(enum detect_conflicts_e check_conflict) { - if (leader_thread_num != _STM_TL2->thread_num && global_history != NULL) { - acquire_lock(&undo_lock); + if (pending_updates != NULL) { update_to_current_version(check_conflict); - release_lock(&undo_lock); } } +static void wait_until_updated(void) +{ + while (pending_updates == _STM_TL2->modified_objects) + spin_loop(); +} + void _stm_write_slowpath(object_t *obj) { @@ -263,43 +231,10 @@ _stm_privatize(((uintptr_t)obj) / 4096); stm_read(obj); + obj->write_version = _STM_TL1->transaction_write_version; _STM_TL2->modified_objects = stm_list_append( _STM_TL2->modified_objects, obj); - - uint16_t wv = obj->write_version; - obj->write_version = _STM_TL1->transaction_write_version; - - /* We only need to store a copy of the current version of the object if: - - we are the leader; - - the object is present in the global_history. - The second condition is approximated by the following range check. - Storing a few more objects than strictly needed is not really a problem. - */ - /* XXX this can be done without acquiring the undo_lock at all, - but we need more care in update_to_current_version(). */ - - /* XXX can we avoid writing an unbounded number of copies of the - same object in case we run a lot of transactions while the other - thread is busy? Unlikely case but in theory annoying. Should - we anyway bound the undo log's size to much less than NB_PAGES, - and if full here, sleep? Should the bound also count the size - taken by the global_history lists? */ - if (ACQUIRE_LOCK_IF(&undo_lock, - wv <= gh_write_version_last && wv >= gh_write_version_first - && leader_thread_num == _STM_TL2->thread_num)) { - /* record in the undo log a copy of the content of the object */ - size_t size = stm_object_size_rounded_up(obj); - char *source = real_address((uintptr_t)obj); - char *undo = undo_log_current; - *((object_t **)undo) = obj; - memcpy(undo + sizeof(object_t *), - source + sizeof(object_t *), - size - sizeof(object_t *)); - /*write_fence();*/ - undo_log_current = undo + size; - release_lock(&undo_lock); - } } @@ -384,7 +319,7 @@ } -#define TOTAL_MEMORY (NB_PAGES * 4096UL * (NB_THREADS + 1)) +#define TOTAL_MEMORY (NB_PAGES * 4096UL * NB_THREADS) #define READMARKER_END ((NB_PAGES * 4096UL) >> 4) #define FIRST_OBJECT_PAGE ((READMARKER_END + 4095) / 4096UL) #define READMARKER_START ((FIRST_OBJECT_PAGE * 4096UL) >> 4) @@ -443,22 +378,12 @@ } } - undo_log_pages = get_thread_base(NB_THREADS); - mprotect(undo_log_pages, 4096, PROT_NONE); - mprotect(undo_log_pages + (NB_PAGES - 1) * 4096UL, 4096, PROT_NONE); - undo_log_pages += 4096; - undo_log_current = undo_log_pages; - num_threads_started = 0; index_page_never_used = FIRST_OBJECT_PAGE; - next_write_version = 1; - leader_thread_num = 0; - global_history = NULL; - gh_write_version_first = 0xffff; - gh_write_version_last = 0; + pending_updates = NULL; } -#define INVALID_GS_VALUE 0xDDDDDDDDDDDDDDDDUL +#define INVALID_GS_VALUE 0x6D6D6D6D static void set_gs_register(uint64_t value) { @@ -478,10 +403,13 @@ assert(_STM_TL2->thread_base == thread_base); _STM_TL2->modified_objects = stm_list_create(); + assert(!_STM_TL2->running_transaction); } void _stm_teardown_thread(void) { + assert(!_STM_TL2->running_transaction); + wait_until_updated(); stm_list_free(_STM_TL2->modified_objects); _STM_TL2->modified_objects = NULL; @@ -492,8 +420,6 @@ { munmap(object_pages, TOTAL_MEMORY); object_pages = NULL; - undo_log_pages = NULL; - undo_log_current = NULL; } @@ -519,39 +445,45 @@ perror("madvise"); abort(); } - _STM_TL1->transaction_read_version = 0; + _STM_TL1->transaction_read_version = 1; } void stm_major_collection(void) { + assert(_STM_TL2->running_transaction); abort(); } -void stm_start_transaction(jmp_buf *jmpbufptr) +void stm_start_transaction(jmpbufptr_t *jmpbufptr) { - if (_STM_TL1->transaction_read_version == 0xff) + assert(!_STM_TL2->running_transaction); + + uint8_t old_rv = _STM_TL1->transaction_read_version; + _STM_TL1->transaction_read_version = old_rv + 1; + if (UNLIKELY(old_rv == 0xff)) reset_transaction_read_version(); - _STM_TL1->transaction_read_version++; - _STM_TL1->jmpbufptr = NULL; - while (1) { - int wv = __sync_fetch_and_add(&next_write_version, 1); - if (LIKELY(wv <= 0xffff)) { - _STM_TL1->transaction_write_version = wv; - break; - } + int old_wv = _STM_TL1->transaction_write_version; + _STM_TL1->transaction_write_version = old_wv + 1; + if (UNLIKELY(old_wv == 0xffff)) { /* We run out of 16-bit numbers before we do the next major collection, which resets it. XXX This case seems unlikely for now, but check if it could become a bottleneck at some point. */ stm_major_collection(); } - assert(stm_list_is_empty(_STM_TL2->modified_objects)); + + wait_until_updated(); + stm_list_clear(_STM_TL2->modified_objects); assert(stm_list_is_empty(_STM_TL2->new_object_ranges)); + /* check that there is no stm_abort() in the following maybe_update() */ + _STM_TL1->jmpbufptr = NULL; + maybe_update(CANNOT_CONFLICT); /* no read object: cannot conflict */ _STM_TL1->jmpbufptr = jmpbufptr; + _STM_TL2->running_transaction = 1; } static void update_new_objects_in_other_threads(uintptr_t pagenum, @@ -567,10 +499,13 @@ char *src = REAL_ADDRESS(_STM_TL2->thread_base, local_src); memcpy(dst, src, size); + ...; } void stm_stop_transaction(void) { + assert(_STM_TL2->running_transaction); + write_fence(); /* see later in this function for why */ acquire_lock(&undo_lock); @@ -596,8 +531,7 @@ _STM_TL2->modified_objects = stm_list_create(); uint16_t wv = _STM_TL1->transaction_write_version; - if (wv < gh_write_version_last) gh_write_version_last = wv; - if (wv > gh_write_version_first) gh_write_version_first = wv; + if (gh_write_version_first < wv) gh_write_version_first = wv; /* walk the new_object_ranges and manually copy the new objects to the other thread's pages in the (hopefully rare) case that @@ -664,11 +598,14 @@ } } + _STM_TL2->running_transaction = 0; release_lock(&undo_lock); } void stm_abort_transaction(void) { + assert(_STM_TL2->running_transaction); + // XXX copy back the modified objects!! long j; for (j = 2; j < LARGE_OBJECT_WORDS; j++) { alloc_for_size_t *alloc = &_STM_TL2->alloc[j]; @@ -678,6 +615,7 @@ stm_list_clear(_STM_TL2->new_object_ranges); stm_list_clear(_STM_TL2->modified_objects); assert(_STM_TL1->jmpbufptr != NULL); - assert(_STM_TL1->jmpbufptr != (jmp_buf *)-1); /* for tests only */ - longjmp(*_STM_TL1->jmpbufptr, 1); + assert(_STM_TL1->jmpbufptr != (jmpbufptr_t *)-1); /* for tests only */ + _STM_TL2->running_transaction = 0; + __builtin_longjmp(*_STM_TL1->jmpbufptr, 1); } diff --git a/c7/core.h b/c7/core.h --- a/c7/core.h +++ b/c7/core.h @@ -3,7 +3,6 @@ #include #include -#include #define TLPREFIX __attribute__((address_space(256))) @@ -31,8 +30,7 @@ */ struct object_s { - uint16_t write_version; /* reserved for the STM library */ - /*uint8_t stm_flags;*/ + uint8_t stm_flags; /* reserved for the STM library */ uint32_t header; /* for the user program -- only write in newly allocated objects */ }; @@ -41,8 +39,10 @@ uint8_t rm; }; +typedef intptr_t jmpbufptr_t[5]; /* for use with __builtin_setjmp() */ + struct _thread_local1_s { - jmp_buf *jmpbufptr; + jmpbufptr_t *jmpbufptr; uint8_t transaction_read_version; uint16_t transaction_write_version; }; diff --git a/c7/list.h b/c7/list.h --- a/c7/list.h +++ b/c7/list.h @@ -8,7 +8,7 @@ uintptr_t count; union { uintptr_t last_allocated; /* always odd */ - struct stm_list_s *nextlist; /* always even */ + //struct stm_list_s *nextlist; /* always even */ }; object_t *items[]; }; From noreply at buildbot.pypy.org Tue Jan 14 10:54:02 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 14 Jan 2014 10:54:02 +0100 (CET) Subject: [pypy-commit] pypy refactor-str-types: Fix annotation. Message-ID: <20140114095402.317FC1C050C@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r68662:85197e03b1f7 Date: 2014-01-14 10:44 +0100 http://bitbucket.org/pypy/pypy/changeset/85197e03b1f7/ Log: Fix annotation. diff --git a/rpython/rlib/rstring.py b/rpython/rlib/rstring.py --- a/rpython/rlib/rstring.py +++ b/rpython/rlib/rstring.py @@ -4,15 +4,24 @@ from rpython.annotator.model import (SomeObject, SomeString, s_None, SomeChar, SomeInteger, SomeUnicodeCodePoint, SomeUnicodeString, SomePtr, SomePBC) +from rpython.rlib import jit from rpython.rlib.objectmodel import newlist_hint, specialize from rpython.rlib.rarithmetic import ovfcheck +from rpython.rlib.unicodedata import unicodedb_5_2_0 as unicodedb from rpython.rtyper.extregistry import ExtRegistryEntry from rpython.tool.pairtype import pairtype -from rpython.rlib import jit # -------------- public API for string functions ----------------------- +def _isspace(char): + if isinstance(char, str): + return char.isspace() + else: + assert isinstance(char, unicode) + return unicodedb.isspace(ord(char)) + + @specialize.argtype(0) def split(value, by=None, maxsplit=-1): if by is None: @@ -22,7 +31,7 @@ while True: # find the beginning of the next word while i < length: - if not value[i].isspace(): + if not _isspace(value[i]): break # found i += 1 else: @@ -33,7 +42,7 @@ j = length # take all the rest of the string else: j = i + 1 - while j < length and not value[j].isspace(): + while j < length and not _isspace(value[j]): j += 1 maxsplit -= 1 # NB. if it's already < 0, it stays < 0 @@ -95,7 +104,7 @@ while True: # starting from the end, find the end of the next word while i >= 0: - if not value[i].isspace(): + if not _isspace(value[i]): break # found i -= 1 else: @@ -107,7 +116,7 @@ j = -1 # take all the rest of the string else: j = i - 1 - while j >= 0 and not value[j].isspace(): + while j >= 0 and not _isspace(value[j]): j -= 1 maxsplit -= 1 # NB. if it's already < 0, it stays < 0 From noreply at buildbot.pypy.org Tue Jan 14 11:16:07 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 14 Jan 2014 11:16:07 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c4: add import.. Message-ID: <20140114101607.61E251C0500@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r68663:0ff0e88dff93 Date: 2014-01-14 11:15 +0100 http://bitbucket.org/pypy/pypy/changeset/0ff0e88dff93/ Log: add import.. diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -7,6 +7,7 @@ from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.debug import (have_debug_prints, debug_start, debug_stop, debug_print) +from rpython.jit.codewriter.effectinfo import EffectInfo # # STM Support From noreply at buildbot.pypy.org Tue Jan 14 12:17:13 2014 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 14 Jan 2014 12:17:13 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: Fix the problem with gaps and start writing a test for consts Message-ID: <20140114111713.1E1B31C0500@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68664:78ad080697be Date: 2014-01-13 15:08 +0100 http://bitbucket.org/pypy/pypy/changeset/78ad080697be/ Log: Fix the problem with gaps and start writing a test for consts diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -18,14 +18,15 @@ class Position(object): def __init__(self, pos): + assert pos != -1 self.pos = pos def get_jitframe_position(self): return self.pos class ResumeFrame(object): - def __init__(self, num, start_pos): - self.registers = [None] * num + def __init__(self, no, start_pos): + self.registers = [None] * no self.start_pos = start_pos class LLGraphResumeBuilder(ResumeBuilder): @@ -33,20 +34,20 @@ self.liveness = LivenessAnalyzer() self.numbering = {} self.framestack = [] - locs = [] + locs = None start_pos = 0 - for frame_pos, frame in enumerate(inputframes): - if inputlocs is not None: + if inputlocs is not None: + locs = [] + for frame_pos, frame in enumerate(inputframes): self.framestack.append(ResumeFrame(len(frame), start_pos)) - for pos_in_frame, box in enumerate(frame): - if inputlocs is not None: + for pos_in_frame, box in enumerate(frame): + if box is None: + continue pos = inputlocs[frame_pos][pos_in_frame] self.framestack[-1].registers[pos_in_frame] = box - else: - pos = len(self.numbering) - self.numbering[box] = pos - locs.append(Position(pos)) - start_pos += len(frame) + self.numbering[box] = pos + locs.append(Position(pos)) + start_pos += 1 ResumeBuilder.__init__(self, self, frontend_liveness, descr, inputframes, locs) @@ -83,7 +84,10 @@ lst = [] for frame in self.framestack: for reg in frame.registers: - lst.append(mapping(reg)) + if reg is None: + lst.append(None) + else: + lst.append(mapping(reg)) return lst class LLTrace(object): @@ -365,13 +369,12 @@ assert frame.forced_deadframe is None values = [] for box in frame.force_guard_op.failargs: - if box is not None: - if box is not frame.current_op.result: - value = frame.env[box] - else: - value = box.value # 0 or 0.0 or NULL + if box is None: + value = None + elif box is not frame.current_op.result: + value = frame.env[box] else: - value = None + value = box.value # 0 or 0.0 or NULL values.append(value) frame.forced_deadframe = LLDeadFrame( _getdescr(frame.force_guard_op), values) @@ -777,11 +780,14 @@ i += 1 def do_renaming(self, newargs, newvalues): - assert len(newargs) == len(newvalues) self.env = {} self.framecontent = {} - for new, newvalue in zip(newargs, newvalues): - self.setenv(new, newvalue) + i = 0 + for value in newvalues: + if value is None: + continue + self.setenv(newargs[i], value) + i += 1 # ----------------------------------------------------- @@ -790,10 +796,10 @@ for i in range(len(self.current_op.failargs)): arg = self.current_op.failargs[i] if arg is None: - values.append(None) + value = None else: value = self.env[arg] - values.append(value) + values.append(value) if hasattr(descr, '_llgraph_bridge'): target = (descr._llgraph_bridge, -1) raise Jump(target, values) diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -111,35 +111,32 @@ return r def rebuild_faillocs_from_descr(self, descr, inputframes, loc_positions): - lgt = 0 - for frame in inputframes: - lgt += len(frame) - locs = [None] * lgt + locs = [] GPR_REGS = len(self.cpu.gen_regs) XMM_REGS = len(self.cpu.float_regs) if self.cpu.IS_64_BIT: coeff = 1 else: coeff = 2 - locs_index = 0 for i, frame in enumerate(inputframes): inputlocs = loc_positions[i] assert len(inputlocs) == len(frame) for j, item in enumerate(frame): + if item is None: + continue pos = inputlocs[j] if pos < GPR_REGS: - locs[locs_index] = self.cpu.gen_regs[pos] + locs.append(self.cpu.gen_regs[pos]) elif pos < (GPR_REGS + XMM_REGS * coeff): pos = (pos - GPR_REGS) // coeff - locs[locs_index] = self.cpu.float_regs[pos] + locs.append(self.cpu.float_regs[pos]) else: stack_pos = pos - self.cpu.JITFRAME_FIXED_SIZE assert stack_pos >= 0 tp = item.type - locs[locs_index] = self.new_stack_loc(stack_pos, - pos * WORD, tp) - locs_index += 1 - return locs + locs.append(self.new_stack_loc(stack_pos, + pos * WORD, tp)) + return locs[:] def store_info_on_descr(self, startspos, guardtok, resume_bytecode): withfloats = guardtok.has_floats diff --git a/rpython/jit/backend/resumebuilder.py b/rpython/jit/backend/resumebuilder.py --- a/rpython/jit/backend/resumebuilder.py +++ b/rpython/jit/backend/resumebuilder.py @@ -12,7 +12,7 @@ if inputframes is not None: for frame in inputframes: self.frame_starts.append(self.frame_starts[-1] + len(frame)) - self.framestack.append(frame) + self.framestack.append(frame[:]) def enter_frame(self, pc, jitcode): self.frame_starts.append(self.frame_starts[-1] + jitcode.num_regs()) @@ -64,11 +64,14 @@ i = 0 for frame_pos, frame in enumerate(inputframes): for pos_in_frame, box in enumerate(frame): - loc_pos = inputlocs[i].get_jitframe_position() + if box is None: + loc_pos = -1 + else: + loc_pos = inputlocs[i].get_jitframe_position() + i += 1 + self.frontend_pos[box] = (ConstInt(frame_pos), + ConstInt(pos_in_frame)) self.current_attachment[box] = loc_pos - self.frontend_pos[box] = (ConstInt(frame_pos), - ConstInt(pos_in_frame)) - i += 1 def process(self, op): if op.getopnum() == rop.RESUME_PUT: @@ -131,12 +134,16 @@ def flatten(inputframes): count = 0 for frame in inputframes: - count += len(frame) + for x in frame: + if x is not None: + count += 1 inputargs = [None] * count - i = 0 + pos = 0 for frame in inputframes: - inputargs[i:i + len(frame)] = frame - i += len(frame) + for item in frame: + if item is not None: + inputargs[pos] = item + pos += 1 return inputargs diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -259,6 +259,52 @@ assert self.cpu.tracker.total_compiled_bridges == 1 return looptoken + def test_compile_bridge_with_holes(self): + i0 = BoxInt() + i1 = BoxInt() + i2 = BoxInt() + i3 = BoxInt() + faildescr1 = BasicFailDescr(1) + faildescr2 = BasicFailDescr(2) + looptoken = JitCellToken() + targettoken = TargetToken() + jitcode = JitCode('name') + jitcode.setup(num_regs_i=3, num_regs_r=0, num_regs_f=0) + operations = [ + ResOperation(rop.ENTER_FRAME, [ConstInt(-1)], None, descr=jitcode), + ResOperation(rop.INT_SUB, [i3, ConstInt(42)], i0), + ResOperation(rop.LABEL, [i0], None, descr=targettoken), + ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1), + ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2), + ResOperation(rop.RESUME_PUT, [i1, ConstInt(0), ConstInt(1)], + None), + ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1), + ResOperation(rop.JUMP, [i1], None, descr=targettoken), + ] + inputargs = [i3] + self.cpu.compile_loop(None, inputargs, operations, looptoken) + + i1b = BoxInt() + i3 = BoxInt() + bridge = [ + ResOperation(rop.INT_LE, [i1b, ConstInt(19)], i3), + ResOperation(rop.RESUME_PUT, [i3, ConstInt(0), ConstInt(2)], + None), + ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2), + ResOperation(rop.JUMP, [i1b], None, descr=targettoken), + ] + + locs = rebuild_locs_from_resumedata(faildescr1) + self.cpu.compile_bridge(None, faildescr1, [[None, i1b, None]], + locs, bridge, looptoken) + + deadframe = self.cpu.execute_token(looptoken, 2) + fail = self.cpu.get_latest_descr(deadframe) + assert fail.identifier == 2 + locs = rebuild_locs_from_resumedata(fail) + res = self.cpu.get_int_value(deadframe, locs[0][1]) + assert res == 20 + def test_compile_big_bridge_out_of_small_loop(self): jitcode = JitCode("name") jitcode.setup(num_regs_i=1, num_regs_r=0, num_regs_f=0) diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -238,6 +238,7 @@ used = {} i = 0 inputargs = flatten(inputframes) + assert len(inputargs) == len(locs) for loc in locs: if loc is None: # xxx bit kludgy loc = ebp diff --git a/rpython/jit/codewriter/codewriter.py b/rpython/jit/codewriter/codewriter.py --- a/rpython/jit/codewriter/codewriter.py +++ b/rpython/jit/codewriter/codewriter.py @@ -13,7 +13,7 @@ class CodeWriter(object): callcontrol = None # for tests - debug = False + debug = True def __init__(self, cpu=None, jitdrivers_sd=[]): self.cpu = cpu diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -16,7 +16,7 @@ from rpython.jit.metainterp.inliner import Inliner from rpython.jit.metainterp.resume import NUMBERING, PENDINGFIELDSP, ResumeDataDirectReader from rpython.jit.codewriter import heaptracker, longlong - +from rpython.jit.backend.resumebuilder import flatten def giveup(): from rpython.jit.metainterp.pyjitpl import SwitchToBlackhole @@ -306,12 +306,14 @@ inputargs, operations, looptoken, log=log, name=name) -def do_compile_bridge(metainterp_sd, faildescr, inputargs, operations, - original_loop_token, log=True): - metainterp_sd.logger_ops.log_bridge(inputargs, operations, "compiling") +def do_compile_bridge(metainterp_sd, faildescr, inputframes, + inputlocs, operations, original_loop_token, log=True): + metainterp_sd.logger_ops.log_bridge(flatten(inputframes), operations, + "compiling") assert isinstance(faildescr, AbstractFailDescr) return metainterp_sd.cpu.compile_bridge(metainterp_sd.logger_ops, - faildescr, inputargs, operations, + faildescr, inputframes, + inputlocs, operations, original_loop_token, log=log) def send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, type): @@ -367,11 +369,11 @@ if metainterp_sd.warmrunnerdesc is not None: # for tests metainterp_sd.warmrunnerdesc.memory_manager.keep_loop_alive(original_jitcell_token) -def send_bridge_to_backend(jitdriver_sd, metainterp_sd, faildescr, inputargs, - operations, original_loop_token): +def send_bridge_to_backend(jitdriver_sd, metainterp_sd, faildescr, inputframes, + inputlocs, operations, original_loop_token): if not we_are_translated(): show_procedures(metainterp_sd) - seen = dict.fromkeys(inputargs) + seen = dict.fromkeys(flatten(inputframes)) TreeLoop.check_consistency_of_branch(operations, seen) if metainterp_sd.warmrunnerdesc is not None: hooks = metainterp_sd.warmrunnerdesc.hooks @@ -386,8 +388,8 @@ metainterp_sd.profiler.start_backend() debug_start("jit-backend") try: - asminfo = do_compile_bridge(metainterp_sd, faildescr, inputargs, - operations, + asminfo = do_compile_bridge(metainterp_sd, faildescr, inputframes, + inputlocs, operations, original_loop_token) finally: debug_stop("jit-backend") @@ -403,8 +405,8 @@ ops_offset = asminfo.ops_offset else: ops_offset = None - metainterp_sd.logger_ops.log_bridge(inputargs, operations, None, faildescr, - ops_offset) + metainterp_sd.logger_ops.log_bridge(flatten(inputframes), operations, + None, faildescr, ops_offset) # #if metainterp_sd.warmrunnerdesc is not None: # for tests # metainterp_sd.warmrunnerdesc.memory_manager.keep_loop_alive( @@ -612,12 +614,14 @@ # to the corresponding guard_op and compile from there assert metainterp.resumekey_original_loop_token is not None new_loop.original_jitcell_token = metainterp.resumekey_original_loop_token - inputargs = metainterp.history.inputargs + inputframes = metainterp.history.inputframes + inputlocs = metainterp.history.inputlocs if not we_are_translated(): self._debug_suboperations = new_loop.operations propagate_original_jitcell_token(new_loop) send_bridge_to_backend(metainterp.jitdriver_sd, metainterp.staticdata, - self, inputargs, new_loop.operations, + self, inputframes, inputlocs, + new_loop.operations, new_loop.original_jitcell_token) class ResumeGuardNotInvalidated(ResumeGuardDescr): @@ -804,7 +808,8 @@ # Attempt to use optimize_bridge(). This may return None in case # it does not work -- i.e. none of the existing old_loop_tokens match. new_trace = create_empty_loop(metainterp) - new_trace.inputargs = metainterp.history.inputargs[:] + new_trace.inputframes = metainterp.history.inputframes[:] + new_trace.inputlocs = metainterp.history.inputlocs[:] # clone ops, as optimize_bridge can mutate the ops new_trace.operations = [op.clone() for op in metainterp.history.operations] diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -167,7 +167,7 @@ jump_args = [self.getvalue(a).get_key_box() for a in original_jump_args] assert self.optimizer.loop.resume_at_jump_descr - resume_at_jump_descr = self.optimizer.loop.resume_at_jump_descr.clone_if_mutable() + resume_at_jump_descr = self.optimizer.loop.resume_at_jump_descr assert isinstance(resume_at_jump_descr, ResumeGuardDescr) resume_at_jump_descr.rd_snapshot = self.fix_snapshot(jump_args, resume_at_jump_descr.rd_snapshot) @@ -421,7 +421,7 @@ if op.is_guard(): op = op.clone() op.setfailargs(None) - descr = target_token.resume_at_jump_descr.clone_if_mutable() + descr = target_token.resume_at_jump_descr op.setdescr(descr) short[i] = op @@ -444,7 +444,7 @@ if op.result and op.result in self.short_boxes.assumed_classes: target_token.assumed_classes[newop.result] = self.short_boxes.assumed_classes[op.result] short[i] = newop - target_token.resume_at_jump_descr = target_token.resume_at_jump_descr.clone_if_mutable() + target_token.resume_at_jump_descr = target_token.resume_at_jump_descr inliner.inline_descr_inplace(target_token.resume_at_jump_descr) # Forget the values to allow them to be freed @@ -489,7 +489,7 @@ if not isinstance(a, Const) and a not in self.short_seen: self.add_op_to_short(self.short_boxes.producer(a), emit, guards_needed) if op.is_guard(): - descr = self.short_resume_at_jump_descr.clone_if_mutable() + descr = self.short_resume_at_jump_descr op.setdescr(descr) if guards_needed and self.short_boxes.has_producer(op.result): @@ -588,7 +588,7 @@ for guard in extra_guards: if guard.is_guard(): - descr = target.resume_at_jump_descr.clone_if_mutable() + descr = target.resume_at_jump_descr inliner.inline_descr_inplace(descr) guard.setdescr(descr) self.optimizer.send_extra_operation(guard) diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1698,13 +1698,14 @@ def is_main_jitcode(self, jitcode): return self.jitdriver_sd is not None and jitcode is self.jitdriver_sd.mainjitcode - def newframe(self, jitcode, greenkey=None): + def newframe(self, jitcode, greenkey=None, record_resume=True): if self.framestack: pc = self.framestack[-1].pc else: pc = -1 - self.history.record(rop.ENTER_FRAME, [ConstInt(pc)], None, - descr=jitcode) + if record_resume: + self.history.record(rop.ENTER_FRAME, [ConstInt(pc)], None, + descr=jitcode) if jitcode.is_portal: self.portal_call_depth += 1 self.call_ids.append(self.current_call_id) @@ -2409,9 +2410,8 @@ try: self.portal_call_depth = -1 # always one portal around self.history = history.History() - self.rebuild_state_after_failure(resumedescr, deadframe) - xxx - self.history.inputargs = [box for box in inputargs_and_holes if box] + state = self.rebuild_state_after_failure(resumedescr, deadframe) + self.history.inputframes, self.history.inputlocs = state finally: rstack._stack_criticalcode_stop() @@ -2531,10 +2531,10 @@ vinfo = self.jitdriver_sd.virtualizable_info ginfo = self.jitdriver_sd.greenfield_info self.framestack = [] - inputframes = resume2.rebuild_from_resumedata(self, deadframe, - resumedescr) - virtualizable_boxes = None - virtualref_boxes = None + inputlocs = resume2.rebuild_from_resumedata(self, deadframe, + resumedescr) + virtualizable_boxes = [] + virtualref_boxes = [] # # virtual refs: make the vrefs point to the freshly allocated virtuals self.virtualref_boxes = virtualref_boxes @@ -2565,7 +2565,7 @@ else: assert not virtualizable_boxes # - return inputframes + return inputlocs def check_synchronized_virtualizable(self): if not we_are_translated(): diff --git a/rpython/jit/metainterp/resume2.py b/rpython/jit/metainterp/resume2.py --- a/rpython/jit/metainterp/resume2.py +++ b/rpython/jit/metainterp/resume2.py @@ -132,29 +132,35 @@ pos)) def finish(self): + res = [] for frame in self.framestack: jitcode = frame.jitcode - miframe = self.metainterp.newframe(jitcode) + res.append([None] * jitcode.num_regs()) + miframe = self.metainterp.newframe(jitcode, record_resume=False) miframe.pc = frame.pc pos = 0 for i in range(jitcode.num_regs_i()): jitframe_pos = frame.registers[pos] - if jitframe_pos == -1: - continue - miframe.registers_i[i] = self.get_int_box(jitframe_pos) + if jitframe_pos != -1: + box = self.get_int_box(jitframe_pos) + miframe.registers_i[i] = box + res[-1][pos] = box pos += 1 for i in range(jitcode.num_regs_r()): jitframe_pos = frame.registers[pos] - if jitframe_pos == -1: - continue - miframe.registers_r[i] = self.get_ref_box(jitframe_pos) + if jitframe_pos != -1: + box = self.get_int_box(jitframe_pos) + res[-1][pos] = box + miframe.registers_r[i] = box pos += 1 for i in range(jitcode.num_regs_f()): jitframe_pos = frame.registers[pos] - if jitframe_pos == -1: - continue - miframe.registers_f[i] = self.get_float_box(jitframe_pos) + if jitframe_pos != -1: + box = self.get_int_box(jitframe_pos) + res[-1][pos] = box + miframe.registers_f[i] = box pos += 1 + return res, [f.registers for f in self.framestack] def rebuild_from_resumedata(metainterp, deadframe, faildescr): return BoxResumeReader(metainterp, deadframe).rebuild(faildescr) diff --git a/rpython/jit/metainterp/test/test_resume2.py b/rpython/jit/metainterp/test/test_resume2.py --- a/rpython/jit/metainterp/test/test_resume2.py +++ b/rpython/jit/metainterp/test/test_resume2.py @@ -21,7 +21,7 @@ self.registers_f = [None] * jitcode.num_regs_f() def num_nonempty_regs(self): - return len([i for i in self.registers_i if i.getint() != 2]) + return len([i for i in self.registers_i if i is not None]) def dump_registers(self, lst, backend_values): lst += [backend_values[x] for x in self.registers_i] @@ -33,7 +33,7 @@ self.cpu = MockCPU() self.framestack = [] - def newframe(self, jitcode): + def newframe(self, jitcode, record_resume=False): f = Frame(jitcode) self.framestack.append(f) return f @@ -94,7 +94,7 @@ descr = Descr() descr.rd_resume_bytecode = ResumeBytecode(resume_loop.operations) descr.rd_bytecode_position = 5 - rebuild_from_resumedata(metainterp, "myframe", descr) + state = rebuild_from_resumedata(metainterp, "myframe", descr) assert len(metainterp.framestack) == 2 f = metainterp.framestack[-1] f2 = metainterp.framestack[0] @@ -177,3 +177,6 @@ descr.rd_bytecode_position = 5 locs = rebuild_locs_from_resumedata(descr) assert locs == [[8, 11], [12]] + + def test_resume_put_const(self): + xxx From noreply at buildbot.pypy.org Tue Jan 14 12:17:15 2014 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 14 Jan 2014 12:17:15 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: a TODO file Message-ID: <20140114111715.5BE6F1C0500@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68665:2c6b29d5d290 Date: 2014-01-13 16:28 +0100 http://bitbucket.org/pypy/pypy/changeset/2c6b29d5d290/ Log: a TODO file diff --git a/TODO b/TODO new file mode 100644 --- /dev/null +++ b/TODO @@ -0,0 +1,3 @@ + +* kill resumedescr.guard_opnum and replace by classes + From noreply at buildbot.pypy.org Tue Jan 14 12:17:16 2014 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 14 Jan 2014 12:17:16 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: (fijal, rguillebert) make resume2 emit RESUME_CLEAR and cleanup resume recording Message-ID: <20140114111716.AA1081C0500@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68666:8d4029b5e97f Date: 2014-01-14 12:16 +0100 http://bitbucket.org/pypy/pypy/changeset/8d4029b5e97f/ Log: (fijal, rguillebert) make resume2 emit RESUME_CLEAR and cleanup resume recording Additionally write unit tests diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -74,12 +74,19 @@ def process_resume_put(self, op): box = op.getarg(0) + if isinstance(box, Const): + return frame_pos = op.getarg(1).getint() pos_in_frame = op.getarg(2).getint() i = self.framestack[frame_pos].start_pos + pos_in_frame self.numbering[box] = i self.framestack[frame_pos].registers[pos_in_frame] = box + def process_resume_clear(self, op): + frame_pos = op.getarg(0).getint() + frontend_pos = op.getarg(1).getint() + self.framestack[frame_pos].registers[frontend_pos] = None + def get_numbering(self, mapping, op): lst = [] for frame in self.framestack: @@ -371,6 +378,8 @@ for box in frame.force_guard_op.failargs: if box is None: value = None + elif isinstance(box, Const): + xxx elif box is not frame.current_op.result: value = frame.env[box] else: @@ -784,7 +793,7 @@ self.framecontent = {} i = 0 for value in newvalues: - if value is None: + if value is None or isinstance(value, Const): continue self.setenv(newargs[i], value) i += 1 @@ -797,6 +806,8 @@ arg = self.current_op.failargs[i] if arg is None: value = None + elif isinstance(arg, Const): + value = arg else: value = self.env[arg] values.append(value) diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -2,7 +2,7 @@ from rpython.jit.backend.llsupport.memcpy import memcpy_fn from rpython.jit.backend.llsupport.symbolic import WORD from rpython.jit.metainterp.history import (INT, REF, FLOAT, JitCellToken, - ConstInt, BoxInt, AbstractFailDescr) + ConstInt, BoxInt, AbstractFailDescr, Const) from rpython.jit.metainterp.resoperation import ResOperation, rop from rpython.rlib import rgc from rpython.rlib.debug import (debug_start, debug_stop, have_debug_prints, @@ -122,7 +122,7 @@ inputlocs = loc_positions[i] assert len(inputlocs) == len(frame) for j, item in enumerate(frame): - if item is None: + if item is None or isinstance(item, Const): continue pos = inputlocs[j] if pos < GPR_REGS: diff --git a/rpython/jit/backend/llsupport/test/test_resumebuilder.py b/rpython/jit/backend/llsupport/test/test_resumebuilder.py --- a/rpython/jit/backend/llsupport/test/test_resumebuilder.py +++ b/rpython/jit/backend/llsupport/test/test_resumebuilder.py @@ -29,18 +29,20 @@ [i0] enter_frame(-1, descr=jitcode) resume_put(i0, 0, 2) + resume_put(1, 0, 1) guard_true(i0) leave_frame() """, namespace={'jitcode': jitcode}) looptoken = JitCellToken() self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) - descr = loop.operations[2].getdescr() - assert descr.rd_bytecode_position == 2 + descr = loop.operations[3].getdescr() + assert descr.rd_bytecode_position == 3 expected_resume = parse(""" [] enter_frame(-1, descr=jitcode) resume_put(28, 0, 2) + resume_put_const(1, 0, 1) leave_frame() """, namespace={'jitcode': jitcode}) equaloplists(descr.rd_resume_bytecode.opcodes, diff --git a/rpython/jit/backend/resumebuilder.py b/rpython/jit/backend/resumebuilder.py --- a/rpython/jit/backend/resumebuilder.py +++ b/rpython/jit/backend/resumebuilder.py @@ -1,6 +1,6 @@ from rpython.jit.metainterp.resoperation import rop, ResOperation -from rpython.jit.metainterp.history import ConstInt, Box +from rpython.jit.metainterp.history import ConstInt, Box, Const from rpython.jit.metainterp.resume2 import ResumeBytecode, AbstractResumeReader class LivenessAnalyzer(AbstractResumeReader): @@ -19,8 +19,16 @@ self.framestack.append([None] * jitcode.num_regs()) def resume_put(self, box, framepos, frontend_pos): + if isinstance(box, Const): + return self.framestack[framepos][frontend_pos] = box + def resume_clear(self, framepos, frontend_pos): + self.framestack[framepos][frontend_pos] = None + + def resume_put_const(self, box, framepos, frontend_pos): + xxx + def resume_new(self, result, descr): self.deps[result] = {} @@ -34,7 +42,8 @@ if box in self.deps: for dep in self.deps[box].values(): self._track(allboxes, dep) - allboxes.append(box) + if not isinstance(box, Const) and box is not None: + allboxes.append(box) def all_boxes_from(self, frame): allboxes = [] @@ -77,7 +86,9 @@ if op.getopnum() == rop.RESUME_PUT: box = op.getarg(0) args = op.getarglist() - if box in self.virtuals: + if isinstance(box, Const): + newop = op.copy_and_change(rop.RESUME_PUT_CONST) + elif box in self.virtuals: newop = op else: try: @@ -135,13 +146,13 @@ count = 0 for frame in inputframes: for x in frame: - if x is not None: + if x is not None and not isinstance(x, Const): count += 1 inputargs = [None] * count pos = 0 for frame in inputframes: for item in frame: - if item is not None: + if item is not None and not isinstance(item, Const): inputargs[pos] = item pos += 1 return inputargs @@ -173,9 +184,8 @@ framestack = liveness_analyzer.get_live_info() for frame in framestack: for item in liveness_analyzer.all_boxes_from(frame): - if item is not None: - last_used[item] = position - frontend_alive[item] = position + last_used[item] = position + frontend_alive[item] = position for i in range(len(operations)-1, -1, -1): op = operations[i] diff --git a/rpython/jit/codewriter/format.py b/rpython/jit/codewriter/format.py --- a/rpython/jit/codewriter/format.py +++ b/rpython/jit/codewriter/format.py @@ -98,7 +98,7 @@ raise AssertionError('\n'.join(msg)) assert len(asmlines) == len(explines) -def unformat_assembler(text, registers=None): +def unformat_assembler(text, registers=None, name=None): # XXX limited to simple assembler right now # def unformat_arg(s): @@ -161,6 +161,8 @@ extra = [] insn = [opname] + [unformat_arg(s) for s in words] + extra ssarepr.insns.append(tuple(insn)) + if name is not None: + ssarepr.name = name return ssarepr diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -125,7 +125,7 @@ jitcell_token = make_jitcell_token(jitdriver_sd) part = create_empty_loop(metainterp) - part.inputargs = inputargs[:] + part.inputframes = [inputargs[:]] h_ops = history.operations part.resume_at_jump_descr = resume_at_jump_descr part.operations = [ResOperation(rop.LABEL, inputargs, None, descr=TargetToken(jitcell_token))] + \ @@ -141,7 +141,7 @@ all_target_tokens = [target_token] loop = create_empty_loop(metainterp) - loop.inputargs = part.inputargs + loop.inputframes = part.inputframes loop.operations = part.operations loop.quasi_immutable_deps = {} if part.quasi_immutable_deps: @@ -170,8 +170,9 @@ if not loop.quasi_immutable_deps: loop.quasi_immutable_deps = None - for box in loop.inputargs: - assert isinstance(box, Box) + for frame in loop.inputframes: + for box in frame: + assert isinstance(box, Box) loop.original_jitcell_token = jitcell_token for label in all_target_tokens: @@ -201,7 +202,7 @@ assert partial_trace.operations[-1].getopnum() == rop.LABEL part = create_empty_loop(metainterp) - part.inputargs = inputargs[:] + part.inputframes = [inputargs[:]] part.resume_at_jump_descr = resume_at_jump_descr h_ops = history.operations @@ -245,8 +246,9 @@ if quasi_immutable_deps: loop.quasi_immutable_deps = quasi_immutable_deps - for box in loop.inputargs: - assert isinstance(box, Box) + for frame in loop.inputframes: + for box in frame: + assert isinstance(box, Box) target_token = loop.operations[-1].getdescr() resumekey.compile_and_attach(metainterp, loop) @@ -259,10 +261,10 @@ def patch_new_loop_to_load_virtualizable_fields(loop, jitdriver_sd): vinfo = jitdriver_sd.virtualizable_info extra_ops = [] - inputargs = loop.inputargs + inputargs = loop.inputframes[0] vable_box = inputargs[jitdriver_sd.index_of_virtualizable] i = jitdriver_sd.num_red_args - loop.inputargs = inputargs[:i] + loop.inputframes = [inputargs[:i]] for descr in vinfo.static_field_descrs: assert i < len(inputargs) box = inputargs[i] @@ -344,7 +346,8 @@ metainterp_sd.profiler.start_backend() debug_start("jit-backend") try: - asminfo = do_compile_loop(metainterp_sd, loop.inputargs, + assert len(loop.inputframes) == 1 + asminfo = do_compile_loop(metainterp_sd, loop.inputframes[0], operations, original_jitcell_token, name=loopname) finally: @@ -362,7 +365,7 @@ ops_offset = asminfo.ops_offset else: ops_offset = None - metainterp_sd.logger_ops.log_loop(loop.inputargs, loop.operations, n, + metainterp_sd.logger_ops.log_loop(loop.inputframes[0], loop.operations, n, type, ops_offset, name=loopname) # @@ -809,7 +812,8 @@ # it does not work -- i.e. none of the existing old_loop_tokens match. new_trace = create_empty_loop(metainterp) new_trace.inputframes = metainterp.history.inputframes[:] - new_trace.inputlocs = metainterp.history.inputlocs[:] + if metainterp.history.inputlocs is not None: + new_trace.inputlocs = metainterp.history.inputlocs[:] # clone ops, as optimize_bridge can mutate the ops new_trace.operations = [op.clone() for op in metainterp.history.operations] diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -615,7 +615,8 @@ return 'TargetToken(%d)' % compute_unique_id(self) class TreeLoop(object): - inputargs = None + inputframes = None + inputlocs = None operations = None call_pure_results = None logops = None @@ -655,7 +656,7 @@ return self.operations def get_display_text(self): # for graphpage.py - return self.name + '\n' + repr(self.inputargs) + return self.name + '\n' + repr(self.inputframes) def show(self, errmsg=None): "NOT_RPYTHON" @@ -664,7 +665,7 @@ def check_consistency(self): # for testing "NOT_RPYTHON" - self.check_consistency_of(self.inputargs, self.operations) + self.check_consistency_of(self.inputframes, self.operations) for op in self.operations: descr = op.getdescr() if op.getopnum() == rop.LABEL and isinstance(descr, TargetToken): @@ -672,10 +673,14 @@ @staticmethod def check_consistency_of(inputargs, operations): - for box in inputargs: - assert isinstance(box, Box), "Loop.inputargs contains %r" % (box,) - seen = dict.fromkeys(inputargs) - assert len(seen) == len(inputargs), ( + seen = {} + all = 0 + for frame in inputargs: + for box in frame: + assert isinstance(box, Box), "Loop.inputargs contains %r" % (box,) + seen[box] = None + all += 1 + assert len(seen) == all, ( "duplicate Box in the Loop.inputargs") TreeLoop.check_consistency_of_branch(operations, seen) @@ -713,7 +718,7 @@ def dump(self): # RPython-friendly - print '%r: inputargs =' % self, self._dump_args(self.inputargs) + print '%r: inputargs =' % self, self._dump_args(self.inputframes) for op in self.operations: args = op.getarglist() print '\t', op.getopname(), self._dump_args(args), \ @@ -735,7 +740,7 @@ if omit_finish and operations[-1].getopnum() == rop.FINISH: # xxx obscure return - result.extend(operations) + result.extend([op for op in operations if not op.is_resume()]) for op in operations: if op.is_guard() and op.getdescr(): if hasattr(op.getdescr(), '_debug_suboperations'): @@ -747,7 +752,8 @@ class History(object): def __init__(self): - self.inputargs = None + self.inputframes = None + self.inputlocs = None self.operations = [] def record(self, opnum, argboxes, resbox, descr=None): diff --git a/rpython/jit/metainterp/jitdriver.py b/rpython/jit/metainterp/jitdriver.py --- a/rpython/jit/metainterp/jitdriver.py +++ b/rpython/jit/metainterp/jitdriver.py @@ -3,16 +3,21 @@ class JitDriverStaticData(object): """There is one instance of this class per JitDriver used in the program. """ + virtualizable_info = None + greenfield_info = None + + def __init__(self, jitdriver, portal_graph, result_type): + self.jitdriver = jitdriver + self.portal_graph = portal_graph + self.result_type = result_type + self.num_green_args = len(jitdriver.greens) + self.num_red_args = len(jitdriver.reds) + # This is just a container with the following attributes (... set by): - # self.jitdriver ... rpython.jit.metainterp.warmspot - # self.portal_graph ... rpython.jit.metainterp.warmspot # self.portal_runner_ptr ... rpython.jit.metainterp.warmspot # self.portal_runner_adr ... rpython.jit.metainterp.warmspot # self.portal_calldescr ... rpython.jit.metainterp.warmspot - # self.num_green_args ... rpython.jit.metainterp.warmspot - # self.num_red_args ... rpython.jit.metainterp.warmspot # self.red_args_types ... rpython.jit.metainterp.warmspot - # self.result_type ... rpython.jit.metainterp.warmspot # self.virtualizable_info... rpython.jit.metainterp.warmspot # self.greenfield_info ... rpython.jit.metainterp.warmspot # self.warmstate ... rpython.jit.metainterp.warmspot diff --git a/rpython/jit/metainterp/optimizeopt/__init__.py b/rpython/jit/metainterp/optimizeopt/__init__.py --- a/rpython/jit/metainterp/optimizeopt/__init__.py +++ b/rpython/jit/metainterp/optimizeopt/__init__.py @@ -51,11 +51,13 @@ def optimize_trace(metainterp_sd, loop, enable_opts, inline_short_preamble=True): """Optimize loop.operations to remove internal overheadish operations. """ + from rpython.jit.backend.resumebuilder import flatten debug_start("jit-optimize") try: - loop.logops = metainterp_sd.logger_noopt.log_loop(loop.inputargs, - loop.operations) + loop.logops = metainterp_sd.logger_noopt.log_loop( + flatten(loop.inputframes), + loop.operations) optimizations, unroll = build_opt_chain(metainterp_sd, enable_opts) if unroll: optimize_unroll(metainterp_sd, loop, optimizations, inline_short_preamble) diff --git a/rpython/jit/metainterp/optimizeopt/util.py b/rpython/jit/metainterp/optimizeopt/util.py --- a/rpython/jit/metainterp/optimizeopt/util.py +++ b/rpython/jit/metainterp/optimizeopt/util.py @@ -125,11 +125,14 @@ # ____________________________________________________________ -def equaloplists(oplist1, oplist2, remap={}, text_right=None): +def equaloplists(oplist1, oplist2, remap=None, text_right=None, + cache=True): # try to use the full width of the terminal to display the list # unfortunately, does not work with the default capture method of py.test # (which is fd), you you need to use either -s or --capture=sys, else you # get the standard 80 columns width + if remap is None: + remap = {} totwidth = py.io.get_terminal_width() width = totwidth / 2 - 1 print ' Comparing lists '.center(totwidth, '-') @@ -147,11 +150,15 @@ for i in range(op1.numargs()): x = op1.getarg(i) y = op2.getarg(i) + if cache and y not in remap: + remap[y] = x assert x.same_box(remap.get(y, y)) if op2.result in remap: if op2.result is None: assert op1.result == remap[op2.result] else: + if cache and op2.result not in remap: + remap[op2.result] = op1.result assert op1.result.same_box(remap[op2.result]) else: remap[op2.result] = op1.result diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -13,6 +13,7 @@ from rpython.jit.metainterp.logger import Logger from rpython.jit.metainterp.optimizeopt.util import args_dict_box from rpython.jit.metainterp.resoperation import rop +from rpython.jit.metainterp.resume2 import ResumeRecorder from rpython.rlib import nonconst, rstack from rpython.rlib.debug import debug_start, debug_stop, debug_print, make_sure_not_resized from rpython.rlib.jit import Counters @@ -111,39 +112,6 @@ def get_current_position_info(self): return self.jitcode.get_live_vars_info(self.pc) - def get_list_of_active_boxes(self, in_a_call): - if in_a_call: - # If we are not the topmost frame, self._result_argcode contains - # the type of the result of the call instruction in the bytecode. - # We use it to clear the box that will hold the result: this box - # is not defined yet. - argcode = self._result_argcode - index = ord(self.bytecode[self.pc - 1]) - if argcode == 'i': self.registers_i[index] = history.CONST_FALSE - elif argcode == 'r': self.registers_r[index] = history.CONST_NULL - elif argcode == 'f': self.registers_f[index] = history.CONST_FZERO - self._result_argcode = '?' # done - # - info = self.get_current_position_info() - start_i = 0 - start_r = start_i + info.get_register_count_i() - start_f = start_r + info.get_register_count_r() - total = start_f + info.get_register_count_f() - # allocate a list of the correct size - env = [None] * total - make_sure_not_resized(env) - # fill it now - for i in range(info.get_register_count_i()): - index = info.get_register_index_i(i) - env[start_i + i] = self.registers_i[index] - for i in range(info.get_register_count_r()): - index = info.get_register_index_r(i) - env[start_r + i] = self.registers_r[index] - for i in range(info.get_register_count_f()): - index = info.get_register_index_f(i) - env[start_f + i] = self.registers_f[index] - return env - def replace_active_box_in_frame(self, oldbox, newbox): if isinstance(oldbox, history.BoxInt): count = self.jitcode.num_regs_i() @@ -1460,29 +1428,32 @@ # but we should not follow calls to that graph return self.do_residual_call(funcbox, argboxes, calldescr, pc) - def emit_resume_data(self, pos): + def emit_resume_data(self, pos, in_call): i = 0 history = self.metainterp.history + boxes = self.get_list_of_active_boxes(in_call) + #xxx + #xxx for i in range(self.jitcode.num_regs_i()): box = self.registers_i[i] - if box is not None and box not in self.resume_cache: + if box is not None and (box, pos, i) not in self.resume_cache: history.record(rop.RESUME_PUT, [box, ConstInt(pos), ConstInt(i)], None) - self.resume_cache[box] = None + self.resume_cache[(box, pos, i)] = None start = self.jitcode.num_regs_i() for i in range(self.jitcode.num_regs_r()): box = self.registers_r[i] - if box is not None and box not in self.resume_cache: + if box is not None and (box, pos, i) not in self.resume_cache: history.record(rop.RESUME_PUT, [box, ConstInt(pos), ConstInt(i + start)], None) - self.resume_cache[box] = None + self.resume_cache[(box, pos, i)] = None start = self.jitcode.num_regs_i() + self.jitcode.num_regs_r() for i in range(self.jitcode.num_regs_f()): box = self.registers_f[i] - if box is not None and box not in self.resume_cache: + if box is not None and (box, pos, i) not in self.resume_cache: history.record(rop.RESUME_PUT, [box, ConstInt(pos), ConstInt(i + start)], None) - self.resume_cache[box] = None + self.resume_cache[(box, pos, i)] = None history.record(rop.RESUME_SET_PC, [ConstInt(self.pc)], None) # ____________________________________________________________ @@ -1679,6 +1650,7 @@ self.retracing_from = -1 self.call_pure_results = args_dict_box() self.heapcache = HeapCache() + self.resumerecorder = ResumeRecorder(self) self.call_ids = [] self.current_call_id = 0 @@ -1704,8 +1676,7 @@ else: pc = -1 if record_resume: - self.history.record(rop.ENTER_FRAME, [ConstInt(pc)], None, - descr=jitcode) + self.resumerecorder.enter_frame(pc, jitcode) if jitcode.is_portal: self.portal_call_depth += 1 self.call_ids.append(self.current_call_id) @@ -1722,7 +1693,7 @@ return f def popframe(self): - self.history.record(rop.LEAVE_FRAME, [], None) + self.resumerecorder.leave_frame() frame = self.framestack.pop() jitcode = frame.jitcode if jitcode.is_portal: @@ -1822,18 +1793,14 @@ else: resumedescr = compile.ResumeGuardDescr() resumedescr.guard_opnum = opnum # XXX kill me - self.sync_resume_data(resumedescr, resumepc) + self.resumerecorder.resume_point(resumedescr, resumepc) guard_op = self.history.record(opnum, moreargs, None, descr=resumedescr) self.staticdata.profiler.count_ops(opnum, Counters.GUARDS) # count self.attach_debug_info(guard_op) return guard_op - - def sync_resume_data(self, resumedescr, resumepc): - for i, frame in enumerate(self.framestack): - frame.emit_resume_data(i) - + def capture_resumedata(self, resumedescr, resumepc=-1): XXXX virtualizable_boxes = None @@ -2034,7 +2001,7 @@ num_green_args = self.jitdriver_sd.num_green_args original_greenkey = original_boxes[:num_green_args] self.resumekey = compile.ResumeFromInterpDescr(original_greenkey) - self.history.inputargs = original_boxes[num_green_args:] + self.history.inputframes = [original_boxes[num_green_args:]] self.seen_loop_header_for_jdindex = -1 try: self.interpret() @@ -2175,7 +2142,8 @@ return ints[:], refs[:], floats[:] def raise_continue_running_normally(self, live_arg_boxes, loop_token): - self.history.inputargs = None + self.history.inputframes = None + self.history.inputlocs = None self.history.operations = None # For simplicity, we just raise ContinueRunningNormally here and # ignore the loop_token passed in. It means that we go back to diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -473,9 +473,11 @@ 'RESUME_PUT/3', # arguments are as follows - box or position in the backend, # the frame index (counting from top) and position in the # frontend + 'RESUME_PUT_CONST/3', # the same but for a constant 'RESUME_NEW/0d', 'RESUME_SETFIELD_GC/2d', 'RESUME_SET_PC/1', + 'RESUME_CLEAR/2', '_RESUME_LAST', # ----- end of resume only operations ------ '_NOSIDEEFFECT_LAST', # ----- end of no_side_effect operations ----- diff --git a/rpython/jit/metainterp/resume2.py b/rpython/jit/metainterp/resume2.py --- a/rpython/jit/metainterp/resume2.py +++ b/rpython/jit/metainterp/resume2.py @@ -1,6 +1,7 @@ from rpython.jit.metainterp.resoperation import rop -from rpython.jit.metainterp.history import BoxInt, BoxPtr, BoxFloat +from rpython.jit.metainterp.history import BoxInt, BoxPtr, BoxFloat, ConstInt +from rpython.jit.metainterp import history from rpython.jit.codewriter.jitcode import JitCode from rpython.rlib import rstack @@ -18,8 +19,13 @@ self.pc = -1 class AbstractResumeReader(object): + """ A resume reader that can follow resume until given point. Consult + the concrete classes for details + """ + def __init__(self): self.framestack = [] + self.consts = [] # XXX cache? def rebuild(self, faildescr): self._rebuild_until(faildescr.rd_resume_bytecode, @@ -39,6 +45,13 @@ jitframe_pos = jitframe_pos_const.getint() self.framestack[frame_no].registers[frontend_position] = jitframe_pos + def resume_clear(self, frame_no, frontend_position): + self.framestack[frame_no].registers[frontend_position] = -1 + + def resume_put_const(self, const, frame_no, frontend_position): + self.framestack[frame_no].registers[frontend_position] = - 2 - len(self.consts) + self.consts.append(const) + def resume_set_pc(self, pc): self.framestack[-1].pc = pc @@ -62,6 +75,9 @@ elif op.getopnum() == rop.RESUME_PUT: self.resume_put(op.getarg(0), op.getarg(1).getint(), op.getarg(2).getint()) + elif op.getopnum() == rop.RESUME_PUT_CONST: + self.resume_put_const(op.getarg(0), op.getarg(1).getint(), + op.getarg(2).getint()) elif op.getopnum() == rop.RESUME_NEW: self.resume_new(op.result, op.getdescr()) elif op.getopnum() == rop.RESUME_SETFIELD_GC: @@ -69,6 +85,9 @@ op.getdescr()) elif op.getopnum() == rop.RESUME_SET_PC: self.resume_set_pc(op.getarg(0).getint()) + elif op.getopnum() == rop.RESUME_CLEAR: + self.resume_clear(op.getarg(0).getint(), + op.getarg(1).getint()) elif not op.is_resume(): pos += 1 continue @@ -80,6 +99,10 @@ return self.metainterp.cpu.get_int_value(self.deadframe, jitframe_pos) class DirectResumeReader(AbstractResumeReader): + """ Directly read values from the jitframe and put them in the blackhole + interpreter + """ + def __init__(self, binterpbuilder, cpu, deadframe): self.bhinterpbuilder = binterpbuilder self.cpu = cpu @@ -96,40 +119,79 @@ curbh.setposition(jitcode, frame.pc) pos = 0 for i in range(jitcode.num_regs_i()): - jitframe_pos = frame.registers[pos] - if jitframe_pos != -1: - curbh.registers_i[i] = self.cpu.get_int_value( - self.deadframe, jitframe_pos) + self.store_int_value(curbh, i, frame.registers[pos]) pos += 1 for i in range(jitcode.num_regs_r()): - jitframe_pos = frame.registers[pos] - if jitframe_pos != -1: - curbh.registers_r[i] = self.cpu.get_ref_value( - self.deadframe, jitframe_pos) + self.store_ref_value(curbh, i, frame.registers[pos]) pos += 1 for i in range(jitcode.num_regs_f()): - jitframe_pos = frame.registers[pos] - if jitframe_pos != -1: - curbh.registers_f[i] = self.cpu.get_float_value( - self.deadframe, jitframe_pos) + self.store_float_value(curbh, i, frame.registers[pos]) pos += 1 return curbh + def store_int_value(self, curbh, i, jitframe_pos): + if jitframe_pos >= 0: + curbh.registers_i[i] = self.cpu.get_int_value( + self.deadframe, jitframe_pos) + elif jitframe_pos < -1: + curbh.registers_i[i] = self.consts[-jitframe_pos - 2].getint() + + def store_ref_value(self, curbh, i, jitframe_pos): + if jitframe_pos >= 0: + curbh.registers_r[i] = self.cpu.get_ref_value( + self.deadframe, jitframe_pos) + elif jitframe_pos < -1: + curbh.registers_r[i] = self.consts[-jitframe_pos - 2].getref_base() + + def store_float_value(self, curbh, i, jitframe_pos): + if jitframe_pos >= 0: + curbh.registers_f[i] = self.cpu.get_float_value( + self.deadframe, jitframe_pos) + elif jitframe_pos < -1: + curbh.registers_f[i] = self.consts[-jitframe_pos - 2].getfloat() + class BoxResumeReader(AbstractResumeReader): + """ Create boxes corresponding to the resume and store them in + the metainterp + """ + def __init__(self, metainterp, deadframe): self.metainterp = metainterp self.deadframe = deadframe AbstractResumeReader.__init__(self) - def get_int_box(self, pos): - return BoxInt(self.metainterp.cpu.get_int_value(self.deadframe, pos)) + def store_int_box(self, res, pos, miframe, i, jitframe_pos): + if jitframe_pos == -1: + return + if jitframe_pos >= 0: + box = BoxInt(self.metainterp.cpu.get_int_value(self.deadframe, + jitframe_pos)) + elif jitframe_pos <= -2: + box = self.consts[-jitframe_pos - 2] + miframe.registers_i[i] = box + res[-1][pos] = box - def get_ref_box(self, pos): - return BoxPtr(self.metainterp.cpu.get_ref_value(self.deadframe, pos)) + def store_ref_box(self, res, pos, miframe, i, jitframe_pos): + if jitframe_pos == -1: + return + if jitframe_pos >= 0: + box = BoxPtr(self.metainterp.cpu.get_ref_value(self.deadframe, + jitframe_pos)) + elif jitframe_pos <= -2: + box = self.consts[-jitframe_pos - 2] + miframe.registers_r[i] = box + res[-1][pos] = box - def get_float_box(self, pos): - return BoxFloat(self.metainterp.cpu.get_float_value(self.deadframe, - pos)) + def store_float_box(self, res, pos, miframe, i, jitframe_pos): + if jitframe_pos == -1: + return + if jitframe_pos >= 0: + box = BoxFloat(self.metainterp.cpu.get_float_value(self.deadframe, + jitframe_pos)) + elif jitframe_pos <= -2: + box = self.consts[-jitframe_pos - 2] + miframe.registers_f[i] = box + res[-1][pos] = box def finish(self): res = [] @@ -140,33 +202,25 @@ miframe.pc = frame.pc pos = 0 for i in range(jitcode.num_regs_i()): - jitframe_pos = frame.registers[pos] - if jitframe_pos != -1: - box = self.get_int_box(jitframe_pos) - miframe.registers_i[i] = box - res[-1][pos] = box + self.store_int_box(res, pos, miframe, i, frame.registers[pos]) pos += 1 for i in range(jitcode.num_regs_r()): - jitframe_pos = frame.registers[pos] - if jitframe_pos != -1: - box = self.get_int_box(jitframe_pos) - res[-1][pos] = box - miframe.registers_r[i] = box + self.store_ref_box(res, pos, miframe, i, frame.registers[pos]) pos += 1 for i in range(jitcode.num_regs_f()): - jitframe_pos = frame.registers[pos] - if jitframe_pos != -1: - box = self.get_int_box(jitframe_pos) - res[-1][pos] = box - miframe.registers_f[i] = box + self.store_float_box(res, pos, miframe, i, frame.registers[pos]) pos += 1 return res, [f.registers for f in self.framestack] def rebuild_from_resumedata(metainterp, deadframe, faildescr): + """ Reconstruct metainterp frames from the resumedata + """ return BoxResumeReader(metainterp, deadframe).rebuild(faildescr) def blackhole_from_resumedata(interpbuilder, metainterp_sd, faildescr, deadframe, all_virtuals=None): + """ Reconstruct the blackhole interpreter from the resume data + """ assert all_virtuals is None #rstack._stack_criticalcode_start() #try: @@ -178,3 +232,75 @@ deadframe).rebuild(faildescr) return last_bhinterp + +class ResumeRecorder(object): + """ Created by metainterp to record the resume as we record operations + """ + def __init__(self, metainterp): + self.metainterp = metainterp + self.cachestack = [] + + def enter_frame(self, pc, jitcode): + self.metainterp.history.record(rop.ENTER_FRAME, [ConstInt(pc)], None, + descr=jitcode) + self.cachestack.append([None] * jitcode.num_regs()) + + def leave_frame(self): + self.metainterp.history.record(rop.LEAVE_FRAME, [], None) + self.cachestack.pop() + + def resume_point(self, resumedescr, resumepc): + framestack = self.metainterp.framestack + for i, frame in enumerate(framestack): + self._emit_resume_data(resumepc, frame, i, not i == len(framestack)) + + def process_box(self, index_in_frontend, frame_pos, box): + cache = self.cachestack[frame_pos] + self.marked[index_in_frontend] = box + if cache[index_in_frontend] is box: + return + cache[index_in_frontend] = box + self.metainterp.history.record(rop.RESUME_PUT, + [box, ConstInt(frame_pos), + ConstInt(index_in_frontend)], None) + + def _emit_resume_data(self, resume_pc, frame, frame_pos, in_a_call): + self.marked = [None] * len(self.cachestack[frame_pos]) + if in_a_call: + # If we are not the topmost frame, frame._result_argcode contains + # the type of the result of the call instruction in the bytecode. + # We use it to clear the box that will hold the result: this box + # is not defined yet. + argcode = frame._result_argcode + index = ord(frame.bytecode[frame.pc - 1]) + if argcode == 'i': frame.registers_i[index] = history.CONST_FALSE + elif argcode == 'r': frame.registers_r[index] = history.CONST_NULL + elif argcode == 'f': frame.registers_f[index] = history.CONST_FZERO + frame._result_argcode = '?' # done + # + info = frame.get_current_position_info() + start_i = 0 + start_r = start_i + info.get_register_count_i() + start_f = start_r + info.get_register_count_r() + # fill it now + for i in range(info.get_register_count_i()): + index = info.get_register_index_i(i) + self.process_box(index, frame_pos, frame.registers_i[index]) + for i in range(info.get_register_count_r()): + index = info.get_register_index_r(i) + self.process_box(index + start_r, frame_pos, + frame.registers_i[index]) + for i in range(info.get_register_count_f()): + index = info.get_register_index_f(i) + self.process_box(index + start_f, frame_pos, + frame.registers_i[index]) + + history = self.metainterp.history + cache = self.cachestack[frame_pos] + for i in range(len(self.marked)): + if self.marked[i] is None and cache[i] is not None: + cache[i] = None + history.record(rop.RESUME_CLEAR, [ConstInt(frame_pos), + ConstInt(i)], None) + history.record(rop.RESUME_SET_PC, [ConstInt(resume_pc)], None) + self.marked = None diff --git a/rpython/jit/metainterp/test/support.py b/rpython/jit/metainterp/test/support.py --- a/rpython/jit/metainterp/test/support.py +++ b/rpython/jit/metainterp/test/support.py @@ -199,6 +199,7 @@ def meta_interp(self, *args, **kwds): kwds['CPUClass'] = self.CPUClass kwds['type_system'] = self.type_system + kwds['enable_opts'] = '' if "backendopt" not in kwds: kwds["backendopt"] = False old = codewriter.CodeWriter.debug diff --git a/rpython/jit/metainterp/test/test_loop.py b/rpython/jit/metainterp/test/test_loop.py --- a/rpython/jit/metainterp/test/test_loop.py +++ b/rpython/jit/metainterp/test/test_loop.py @@ -189,10 +189,6 @@ found = 0 for op in get_stats().loops[0]._all_operations(): if op.getopname() == 'guard_true': - liveboxes = op.getfailargs() - assert len(liveboxes) == 2 # x, y (in some order) - assert isinstance(liveboxes[0], history.BoxInt) - assert isinstance(liveboxes[1], history.BoxInt) found += 1 if 'unroll' in self.enable_opts: assert found == 2 diff --git a/rpython/jit/metainterp/test/test_resume2.py b/rpython/jit/metainterp/test/test_resume2.py --- a/rpython/jit/metainterp/test/test_resume2.py +++ b/rpython/jit/metainterp/test/test_resume2.py @@ -2,9 +2,18 @@ import py from rpython.jit.tool.oparser import parse from rpython.jit.codewriter.jitcode import JitCode -from rpython.jit.metainterp.history import AbstractDescr +from rpython.jit.metainterp.history import AbstractDescr, Const, INT, Stats from rpython.jit.metainterp.resume2 import rebuild_from_resumedata,\ ResumeBytecode, AbstractResumeReader +from rpython.jit.codewriter.format import unformat_assembler +from rpython.jit.codewriter.codewriter import CodeWriter +from rpython.jit.backend.llgraph.runner import LLGraphCPU +from rpython.jit.metainterp.pyjitpl import MetaInterp, MetaInterpStaticData +from rpython.jit.metainterp.jitdriver import JitDriverStaticData +from rpython.jit.metainterp.warmstate import JitCell +from rpython.jit.metainterp.jitexc import DoneWithThisFrameInt +from rpython.jit.metainterp.optimizeopt.util import equaloplists +from rpython.rlib.jit import JitDriver class Descr(AbstractDescr): @@ -61,17 +70,20 @@ [] enter_frame(-1, descr=jitcode1) resume_put(10, 0, 1) + resume_put_const(1, 0, 2) leave_frame() - """, namespace={'jitcode1': jitcode}) + """, namespace= {'jitcode1': jitcode}) descr = Descr() descr.rd_resume_bytecode = ResumeBytecode(resume_loop.operations) - descr.rd_bytecode_position = 2 + descr.rd_bytecode_position = 3 metainterp = MockMetaInterp() metainterp.cpu = MockCPU() rebuild_from_resumedata(metainterp, "myframe", descr) assert len(metainterp.framestack) == 1 f = metainterp.framestack[-1] assert f.registers_i[1].getint() == 13 + assert isinstance(f.registers_i[2], Const) + assert f.registers_i[2].getint() == 1 def test_nested_call(self): jitcode1 = JitCode("jitcode") @@ -94,7 +106,7 @@ descr = Descr() descr.rd_resume_bytecode = ResumeBytecode(resume_loop.operations) descr.rd_bytecode_position = 5 - state = rebuild_from_resumedata(metainterp, "myframe", descr) + rebuild_from_resumedata(metainterp, "myframe", descr) assert len(metainterp.framestack) == 2 f = metainterp.framestack[-1] f2 = metainterp.framestack[0] @@ -178,5 +190,147 @@ locs = rebuild_locs_from_resumedata(descr) assert locs == [[8, 11], [12]] - def test_resume_put_const(self): - xxx +class AssemblerExecuted(Exception): + pass + +class FakeWarmstate(object): + enable_opts = [] + + def __init__(self): + self.jitcell = JitCell() + + def get_location_str(self, greenkey): + return "foo" + + def jit_cell_at_key(self, greenkey): + return self.jitcell + + def attach_procedure_to_interp(self, *args): + pass + + def execute_assembler(self, token, *args): + raise AssemblerExecuted(*args) + +def get_metainterp(assembler, no_reds=0): + codewriter = CodeWriter() + ssarepr = unformat_assembler(assembler, name='one') + jitcode = codewriter.assembler.assemble(ssarepr) + jitcode.is_portal = True + reds = ['v' + str(i) for i in range(no_reds)] + jitdriver_sd = JitDriverStaticData(JitDriver(greens = [], + reds = reds), + None, INT) + jitdriver_sd.mainjitcode = jitcode + jitdriver_sd.warmstate = FakeWarmstate() + jitdriver_sd.no_loop_header = False + jitdriver_sd._get_printable_location_ptr = None + codewriter.setup_jitdriver(jitdriver_sd) + stats = Stats() + cpu = LLGraphCPU(None, stats) + metainterp_sd = MetaInterpStaticData(cpu, None) + metainterp_sd.finish_setup(codewriter) + return MetaInterp(metainterp_sd, jitdriver_sd), stats, jitdriver_sd + +class TestResumeRecorder(object): + def test_simple(self): + assembler = """ + L1: + -live- %i0, %i1, %i2 + jit_merge_point $0, I[], R[], F[], I[%i0, %i1, %i2], R[], F[] + -live- %i0, %i1, %i2 + int_add %i2, %i0 -> %i2 + int_sub %i1, $1 -> %i1 + goto_if_not_int_gt %i1, $0, L2 + -live- %i0, %i1, %i2, L2 + loop_header $0 + goto L1 + --- + L2: + int_mul %i2, $2 -> %i0 + int_return %i0 + """ + metainterp, stats, jitdriver_sd = get_metainterp(assembler, no_reds=3) + jitcode = jitdriver_sd.mainjitcode + try: + metainterp.compile_and_run_once(jitdriver_sd, 6, 7, 0) + except AssemblerExecuted, e: + assert e.args == (6, 6, 6) + else: + raise Exception("did not exit") + resume_ops = [o for o in stats.operations if o.is_resume()] + expected = parse(""" + [i0, i1, i2] + enter_frame(-1, descr=jitcode) + resume_put(i0, 0, 0) + resume_put(i1, 0, 1) + resume_put(i2, 0, 2) + resume_set_pc(24) + """, namespace={'jitcode': jitcode}) + equaloplists(resume_ops, expected.operations, cache=True) + + def test_live_boxes(self): + assembler = """ + L1: + -live- %i0, %i1, %i2 + jit_merge_point $0, I[], R[], F[], I[%i0, %i1, %i2], R[], F[] + -live- %i0, %i1, %i2 + goto_if_not_int_gt %i1, $0, L2 + -live- %i0, %i1, L2 + loop_header $0 + goto L1 + --- + L2: + int_return %i0 + """ + metainterp, stats, jitdriver_sd = get_metainterp(assembler, no_reds=3) + jitcode = jitdriver_sd.mainjitcode + try: + metainterp.compile_and_run_once(jitdriver_sd, -1, -1, 0) + except DoneWithThisFrameInt: + pass + resume_ops = [o for o in stats.operations if o.is_resume()] + expected = parse(""" + [i0, i1, i2] + enter_frame(-1, descr=jitcode) + resume_put(i0, 0, 0) + resume_put(i1, 0, 1) + resume_set_pc(16) + leave_frame() + """, namespace={'jitcode': jitcode}) + equaloplists(resume_ops, expected.operations, cache=True) + + def test_live_boxes_2(self): + assembler = """ + L1: + -live- %i0, %i1, %i2 + jit_merge_point $0, I[], R[], F[], I[%i0, %i1, %i2], R[], F[] + -live- %i0, %i1, %i2 + goto_if_not_int_gt %i1, $0, L2 + -live- %i0, %i1, %i2, L2 + goto_if_not_int_gt %i2, $0, L2 + -live- %i0, %i2, L2 + loop_header $0 + goto L1 + --- + L2: + int_return %i0 + """ + metainterp, stats, jitdriver_sd = get_metainterp(assembler, no_reds=3) + jitcode = jitdriver_sd.mainjitcode + try: + metainterp.compile_and_run_once(jitdriver_sd, -1, 13, -1) + except DoneWithThisFrameInt: + pass + resume_ops = [o for o in stats.operations if o.is_resume()] + expected = parse(""" + [i0, i1, i2] + enter_frame(-1, descr=jitcode) + resume_put(i0, 0, 0) + resume_put(i1, 0, 1) + resume_put(i2, 0, 2) + resume_set_pc(-1) + resume_clear(0, 1) + resume_set_pc(-1) + leave_frame() + """, namespace={'jitcode': jitcode}) + equaloplists(resume_ops, expected.operations, cache=True) diff --git a/rpython/jit/metainterp/warmspot.py b/rpython/jit/metainterp/warmspot.py --- a/rpython/jit/metainterp/warmspot.py +++ b/rpython/jit/metainterp/warmspot.py @@ -350,11 +350,8 @@ def split_graph_and_record_jitdriver(self, graph, block, pos): op = block.operations[pos] - jd = JitDriverStaticData() - jd._jit_merge_point_in = graph args = op.args[2:] s_binding = self.translator.annotator.binding - jd._portal_args_s = [s_binding(v) for v in args] graph = copygraph(graph) [jmpp] = find_jit_merge_points([graph]) graph.startblock = support.split_before_jit_merge_point(*jmpp) @@ -370,15 +367,16 @@ assert isinstance(v, Variable) assert len(dict.fromkeys(graph.getargs())) == len(graph.getargs()) self.translator.graphs.append(graph) - jd.portal_graph = graph # it's a bit unbelievable to have a portal without func assert hasattr(graph, "func") graph.func._dont_inline_ = True graph.func._jit_unroll_safe_ = True - jd.jitdriver = block.operations[pos].args[1].value + result_type = history.getkind(graph.getreturnvar().concretetype)[0] + jd = JitDriverStaticData(block.operations[pos].args[1].value, graph, + result_type) + jd._portal_args_s = [s_binding(v) for v in args] + jd._jit_merge_point_in = graph jd.portal_runner_ptr = "" - jd.result_type = history.getkind(jd.portal_graph.getreturnvar() - .concretetype)[0] self.jitdrivers_sd.append(jd) def check_access_directly_sanity(self, graphs): @@ -567,8 +565,6 @@ ALLARGS = [v.concretetype for v in (greens_v + reds_v)] jd._green_args_spec = [v.concretetype for v in greens_v] jd.red_args_types = [history.getkind(v.concretetype) for v in reds_v] - jd.num_green_args = len(jd._green_args_spec) - jd.num_red_args = len(jd.red_args_types) RESTYPE = graph.getreturnvar().concretetype (jd._JIT_ENTER_FUNCTYPE, jd._PTR_JIT_ENTER_FUNCTYPE) = self.cpu.ts.get_FuncType(ALLARGS, lltype.Void) From noreply at buildbot.pypy.org Tue Jan 14 12:19:16 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 14 Jan 2014 12:19:16 +0100 (CET) Subject: [pypy-commit] stmgc c7: start adding a nursery and redo barriers without the leader-model Message-ID: <20140114111916.D31801C0500@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r603:6de71cb85ddb Date: 2014-01-14 12:18 +0100 http://bitbucket.org/pypy/stmgc/changeset/6de71cb85ddb/ Log: start adding a nursery and redo barriers without the leader-model diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -18,6 +18,7 @@ #define NB_THREADS 2 #define MAP_PAGES_FLAGS (MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE) #define LARGE_OBJECT_WORDS 36 +#define NB_NURSERY_PAGES 1024 #if defined(__i386__) || defined(__x86_64__) # define HAVE_FULL_EXCHANGE_INSN @@ -42,6 +43,7 @@ struct stm_list_s *modified_objects; struct stm_list_s *new_object_ranges; struct alloc_for_size_s alloc[LARGE_OBJECT_WORDS]; + localchar_t *nursery_current; }; #define _STM_TL2 ((_thread_local2_t *)_STM_TL1) @@ -226,12 +228,20 @@ void _stm_write_slowpath(object_t *obj) { - maybe_update(CAN_CONFLICT); - _stm_privatize(((uintptr_t)obj) / 4096); + uintptr_t t0_offset = (uintptr_t)obj; + char* t0_addr = get_thread_base(0) + t0_offset; + struct object_s *t0_obj = (struct object_s *)t0_addr; + + + int previous = __sync_lock_test_and_set(&t0_obj->stm_write_lock, 1); + if (previous) + abort(); /* XXX */ + + obj->stm_flags &= ~GCFLAG_WRITE_BARRIER; + stm_read(obj); - obj->write_version = _STM_TL1->transaction_write_version; _STM_TL2->modified_objects = stm_list_append( _STM_TL2->modified_objects, obj); @@ -306,24 +316,29 @@ assert(size % 8 == 0); size_t i = size / 8; assert(2 <= i && i < LARGE_OBJECT_WORDS);//XXX - alloc_for_size_t *alloc = &_STM_TL2->alloc[i]; - localchar_t *p = alloc->next; - alloc->next = p + size; - if ((uint16_t)(uintptr_t)p == alloc->stop) - p = _stm_alloc_next_page(i); + localchar_t *current = _STM_TL2->nursery_current; + localchar_t *new_current = current + size; + if ((uintptr_t)new_current > FIRST_AFTER_NURSERY_PAGE * 4096) { + /* XXX: do minor collection */ + abort(); + } - object_t *result = (object_t *)p; - result->write_version = _STM_TL1->transaction_write_version; + object_t *result = (object_t *)current; return result; } + + + + #define TOTAL_MEMORY (NB_PAGES * 4096UL * NB_THREADS) #define READMARKER_END ((NB_PAGES * 4096UL) >> 4) #define FIRST_OBJECT_PAGE ((READMARKER_END + 4095) / 4096UL) #define READMARKER_START ((FIRST_OBJECT_PAGE * 4096UL) >> 4) #define FIRST_READMARKER_PAGE (READMARKER_START / 4096UL) +#define FIRST_AFTER_NURSERY_PAGE (FIRST_OBJECT_PAGE + NB_NURSERY_PAGES) void stm_setup(void) { @@ -368,9 +383,12 @@ if (i > 0) { int res; - res = remap_file_pages(thread_base + FIRST_OBJECT_PAGE * 4096UL, - (NB_PAGES - FIRST_OBJECT_PAGE) * 4096UL, - 0, FIRST_OBJECT_PAGE, 0); + + res = remap_file_pages( + thread_base + FIRST_AFTER_NURSERY_PAGE * 4096UL, + (NB_PAGES - FIRST_AFTER_NURSERY_PAGE) * 4096UL, + 0, FIRST_AFTER_NURSERY_PAGE, 0); + if (res != 0) { perror("remap_file_pages"); abort(); @@ -379,7 +397,7 @@ } num_threads_started = 0; - index_page_never_used = FIRST_OBJECT_PAGE; + index_page_never_used = FIRST_AFTER_NURSERY_PAGE; pending_updates = NULL; } diff --git a/c7/core.h b/c7/core.h --- a/c7/core.h +++ b/c7/core.h @@ -29,8 +29,13 @@ newly allocated objects. */ +enum { + GCFLAG_WRITE_BARRIER = (1 << 0), +}; + struct object_s { uint8_t stm_flags; /* reserved for the STM library */ + uint8_t stm_write_lock; /* 1 if writeable by some thread */ uint32_t header; /* for the user program -- only write in newly allocated objects */ }; @@ -65,7 +70,7 @@ static inline void stm_write(object_t *obj) { - if (UNLIKELY(obj->write_version != _STM_TL1->transaction_write_version)) + if (UNLIKELY(obj->stm_flags & GCFLAG_WRITE_BARRIER)) _stm_write_slowpath(obj); } From noreply at buildbot.pypy.org Tue Jan 14 14:12:10 2014 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 14 Jan 2014 14:12:10 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: (fijal, rguillebert) fix enough to pass the first test of test_loop (again) Message-ID: <20140114131210.9334E1C050C@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68667:1f547589daea Date: 2014-01-14 14:11 +0100 http://bitbucket.org/pypy/pypy/changeset/1f547589daea/ Log: (fijal, rguillebert) fix enough to pass the first test of test_loop (again) diff --git a/rpython/jit/metainterp/optimizeopt/util.py b/rpython/jit/metainterp/optimizeopt/util.py --- a/rpython/jit/metainterp/optimizeopt/util.py +++ b/rpython/jit/metainterp/optimizeopt/util.py @@ -3,6 +3,7 @@ from rpython.rlib.rarithmetic import intmask from rpython.rlib.unroll import unrolling_iterable from rpython.jit.metainterp import resoperation +from rpython.jit.metainterp.history import Box from rpython.rlib.debug import make_sure_not_resized from rpython.jit.metainterp.resoperation import rop from rpython.rlib.objectmodel import we_are_translated @@ -150,7 +151,7 @@ for i in range(op1.numargs()): x = op1.getarg(i) y = op2.getarg(i) - if cache and y not in remap: + if cache and isinstance(y, Box) and y not in remap: remap[y] = x assert x.same_box(remap.get(y, y)) if op2.result in remap: diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1428,34 +1428,6 @@ # but we should not follow calls to that graph return self.do_residual_call(funcbox, argboxes, calldescr, pc) - def emit_resume_data(self, pos, in_call): - i = 0 - history = self.metainterp.history - boxes = self.get_list_of_active_boxes(in_call) - #xxx - #xxx - for i in range(self.jitcode.num_regs_i()): - box = self.registers_i[i] - if box is not None and (box, pos, i) not in self.resume_cache: - history.record(rop.RESUME_PUT, - [box, ConstInt(pos), ConstInt(i)], None) - self.resume_cache[(box, pos, i)] = None - start = self.jitcode.num_regs_i() - for i in range(self.jitcode.num_regs_r()): - box = self.registers_r[i] - if box is not None and (box, pos, i) not in self.resume_cache: - history.record(rop.RESUME_PUT, - [box, ConstInt(pos), ConstInt(i + start)], None) - self.resume_cache[(box, pos, i)] = None - start = self.jitcode.num_regs_i() + self.jitcode.num_regs_r() - for i in range(self.jitcode.num_regs_f()): - box = self.registers_f[i] - if box is not None and (box, pos, i) not in self.resume_cache: - history.record(rop.RESUME_PUT, - [box, ConstInt(pos), ConstInt(i + start)], None) - self.resume_cache[(box, pos, i)] = None - history.record(rop.RESUME_SET_PC, [ConstInt(self.pc)], None) - # ____________________________________________________________ class MetaInterpStaticData(object): diff --git a/rpython/jit/metainterp/resume2.py b/rpython/jit/metainterp/resume2.py --- a/rpython/jit/metainterp/resume2.py +++ b/rpython/jit/metainterp/resume2.py @@ -295,12 +295,14 @@ self.process_box(index + start_f, frame_pos, frame.registers_i[index]) - history = self.metainterp.history + mi_history = self.metainterp.history cache = self.cachestack[frame_pos] for i in range(len(self.marked)): if self.marked[i] is None and cache[i] is not None: cache[i] = None - history.record(rop.RESUME_CLEAR, [ConstInt(frame_pos), + mi_history.record(rop.RESUME_CLEAR, [ConstInt(frame_pos), ConstInt(i)], None) - history.record(rop.RESUME_SET_PC, [ConstInt(resume_pc)], None) + if resume_pc == -1: + resume_pc = self.metainterp.framestack[-1].pc + mi_history.record(rop.RESUME_SET_PC, [ConstInt(resume_pc)], None) self.marked = None diff --git a/rpython/jit/metainterp/test/test_resume2.py b/rpython/jit/metainterp/test/test_resume2.py --- a/rpython/jit/metainterp/test/test_resume2.py +++ b/rpython/jit/metainterp/test/test_resume2.py @@ -261,9 +261,9 @@ expected = parse(""" [i0, i1, i2] enter_frame(-1, descr=jitcode) - resume_put(i0, 0, 0) + resume_put(i0, 0, 2) resume_put(i1, 0, 1) - resume_put(i2, 0, 2) + resume_put(i2, 0, 0) resume_set_pc(24) """, namespace={'jitcode': jitcode}) equaloplists(resume_ops, expected.operations, cache=True) @@ -292,8 +292,8 @@ expected = parse(""" [i0, i1, i2] enter_frame(-1, descr=jitcode) - resume_put(i0, 0, 0) - resume_put(i1, 0, 1) + resume_put(i0, 0, 1) + resume_put(i1, 0, 0) resume_set_pc(16) leave_frame() """, namespace={'jitcode': jitcode}) @@ -325,12 +325,12 @@ expected = parse(""" [i0, i1, i2] enter_frame(-1, descr=jitcode) - resume_put(i0, 0, 0) + resume_put(i0, 0, 2) resume_put(i1, 0, 1) - resume_put(i2, 0, 2) - resume_set_pc(-1) + resume_put(i2, 0, 0) + resume_set_pc(16) resume_clear(0, 1) - resume_set_pc(-1) + resume_set_pc(21) leave_frame() """, namespace={'jitcode': jitcode}) equaloplists(resume_ops, expected.operations, cache=True) diff --git a/rpython/jit/metainterp/warmspot.py b/rpython/jit/metainterp/warmspot.py --- a/rpython/jit/metainterp/warmspot.py +++ b/rpython/jit/metainterp/warmspot.py @@ -348,11 +348,11 @@ len(self.jitdrivers_sd)), \ "there are multiple jit_merge_points with the same jitdriver" - def split_graph_and_record_jitdriver(self, graph, block, pos): + def split_graph_and_record_jitdriver(self, orig_graph, block, pos): op = block.operations[pos] args = op.args[2:] s_binding = self.translator.annotator.binding - graph = copygraph(graph) + graph = copygraph(orig_graph) [jmpp] = find_jit_merge_points([graph]) graph.startblock = support.split_before_jit_merge_point(*jmpp) # XXX this is incredibly obscure, but this is sometiems necessary @@ -371,11 +371,11 @@ assert hasattr(graph, "func") graph.func._dont_inline_ = True graph.func._jit_unroll_safe_ = True - result_type = history.getkind(graph.getreturnvar().concretetype)[0] + result_type = history.getkind(orig_graph.getreturnvar().concretetype)[0] jd = JitDriverStaticData(block.operations[pos].args[1].value, graph, result_type) jd._portal_args_s = [s_binding(v) for v in args] - jd._jit_merge_point_in = graph + jd._jit_merge_point_in = orig_graph jd.portal_runner_ptr = "" self.jitdrivers_sd.append(jd) From noreply at buildbot.pypy.org Tue Jan 14 14:36:17 2014 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 14 Jan 2014 14:36:17 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: (fijal, rguillebert) few fixes to pass test_loop Message-ID: <20140114133617.63EDB1C0500@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68668:47e28c4e00ed Date: 2014-01-14 14:35 +0100 http://bitbucket.org/pypy/pypy/changeset/47e28c4e00ed/ Log: (fijal, rguillebert) few fixes to pass test_loop diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -756,6 +756,12 @@ self.inputlocs = None self.operations = [] + def any_operation(self): + for op in self.operations: + if not op.is_resume(): + return True + return False + def record(self, opnum, argboxes, resbox, descr=None): op = ResOperation(opnum, argboxes, resbox, descr) self.operations.append(op) diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -109,7 +109,9 @@ elif box.type == history.FLOAT: self.registers_f[index] = box else: raise AssertionError(box.type) - def get_current_position_info(self): + def get_current_position_info(self, resumepc=-1): + if resumepc != -1: + return self.jitcode.get_live_vars_info(resumepc) return self.jitcode.get_live_vars_info(self.pc) def replace_active_box_in_frame(self, oldbox, newbox): @@ -982,7 +984,7 @@ @arguments("int", "boxes3", "jitcode_position", "boxes3", "orgpc") def opimpl_jit_merge_point(self, jdindex, greenboxes, jcposition, redboxes, orgpc): - any_operation = len(self.metainterp.history.operations) > 0 + any_operation = self.metainterp.history.any_operation() jitdriver_sd = self.metainterp.staticdata.jitdrivers_sd[jdindex] self.verify_green_args(jitdriver_sd, greenboxes) self.debug_merge_point(jitdriver_sd, jdindex, @@ -1622,7 +1624,6 @@ self.retracing_from = -1 self.call_pure_results = args_dict_box() self.heapcache = HeapCache() - self.resumerecorder = ResumeRecorder(self) self.call_ids = [] self.current_call_id = 0 @@ -2038,6 +2039,7 @@ duplicates[box] = None def reached_loop_header(self, greenboxes, redboxes, resumedescr): + self.resumerecorder.leave_frame() self.heapcache.reset() duplicates = {} @@ -2102,6 +2104,7 @@ # Otherwise, no loop found so far, so continue tracing. start = len(self.history.operations) self.current_merge_points.append((live_arg_boxes, start)) + self.resumerecorder.enter_frame(-1, self.jitdriver_sd.mainjitcode) def _unpack_boxes(self, boxes, start, stop): ints = []; refs = []; floats = [] @@ -2335,6 +2338,7 @@ # ----- make a new frame ----- self.portal_call_depth = -1 # always one portal around self.framestack = [] + self.resumerecorder = ResumeRecorder(self, False) f = self.newframe(self.jitdriver_sd.mainjitcode) f.setup_call(original_boxes) assert self.portal_call_depth == 0 @@ -2352,6 +2356,7 @@ self.history = history.History() state = self.rebuild_state_after_failure(resumedescr, deadframe) self.history.inputframes, self.history.inputlocs = state + self.resumerecorder = ResumeRecorder(self, True) finally: rstack._stack_criticalcode_stop() diff --git a/rpython/jit/metainterp/resume2.py b/rpython/jit/metainterp/resume2.py --- a/rpython/jit/metainterp/resume2.py +++ b/rpython/jit/metainterp/resume2.py @@ -236,9 +236,12 @@ class ResumeRecorder(object): """ Created by metainterp to record the resume as we record operations """ - def __init__(self, metainterp): + def __init__(self, metainterp, is_bridge=False): self.metainterp = metainterp self.cachestack = [] + if is_bridge: + for frame in metainterp.framestack: + self.cachestack.append([None] * frame.jitcode.num_regs()) def enter_frame(self, pc, jitcode): self.metainterp.history.record(rop.ENTER_FRAME, [ConstInt(pc)], None, @@ -252,7 +255,8 @@ def resume_point(self, resumedescr, resumepc): framestack = self.metainterp.framestack for i, frame in enumerate(framestack): - self._emit_resume_data(resumepc, frame, i, not i == len(framestack)) + in_a_call = not i == len(framestack) - 1 + self._emit_resume_data(resumepc, frame, i, in_a_call) def process_box(self, index_in_frontend, frame_pos, box): cache = self.cachestack[frame_pos] @@ -278,10 +282,13 @@ elif argcode == 'f': frame.registers_f[index] = history.CONST_FZERO frame._result_argcode = '?' # done # - info = frame.get_current_position_info() + if not in_a_call and resume_pc != -1: + info = frame.get_current_position_info(resume_pc) + else: + info = frame.get_current_position_info() start_i = 0 - start_r = start_i + info.get_register_count_i() - start_f = start_r + info.get_register_count_r() + start_r = start_i + frame.jitcode.num_regs_i() + start_f = start_r + frame.jitcode.num_regs_r() # fill it now for i in range(info.get_register_count_i()): index = info.get_register_index_i(i) @@ -289,11 +296,11 @@ for i in range(info.get_register_count_r()): index = info.get_register_index_r(i) self.process_box(index + start_r, frame_pos, - frame.registers_i[index]) + frame.registers_r[index]) for i in range(info.get_register_count_f()): index = info.get_register_index_f(i) self.process_box(index + start_f, frame_pos, - frame.registers_i[index]) + frame.registers_f[index]) mi_history = self.metainterp.history cache = self.cachestack[frame_pos] From noreply at buildbot.pypy.org Tue Jan 14 14:57:16 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 14 Jan 2014 14:57:16 +0100 (CET) Subject: [pypy-commit] stmgc c7: add tests Message-ID: <20140114135716.871BA1C30CA@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r604:75dfcdb650aa Date: 2014-01-14 14:57 +0100 http://bitbucket.org/pypy/stmgc/changeset/75dfcdb650aa/ Log: add tests diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -16,10 +16,20 @@ #define NB_PAGES (256*256) // 256MB #define NB_THREADS 2 -#define MAP_PAGES_FLAGS (MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE) +#define MAP_PAGES_FLAGS (MAP_SHARED | MAP_ANONYMOUS | MAP_NORESERVE) #define LARGE_OBJECT_WORDS 36 #define NB_NURSERY_PAGES 1024 + +#define TOTAL_MEMORY (NB_PAGES * 4096UL * NB_THREADS) +#define READMARKER_END ((NB_PAGES * 4096UL) >> 4) +#define FIRST_OBJECT_PAGE ((READMARKER_END + 4095) / 4096UL) +#define READMARKER_START ((FIRST_OBJECT_PAGE * 4096UL) >> 4) +#define FIRST_READMARKER_PAGE (READMARKER_START / 4096UL) +#define FIRST_AFTER_NURSERY_PAGE (FIRST_OBJECT_PAGE + NB_NURSERY_PAGES) + + + #if defined(__i386__) || defined(__x86_64__) # define HAVE_FULL_EXCHANGE_INSN #endif @@ -64,6 +74,7 @@ asm("pause" : : : "memory"); } +#if 0 static void acquire_lock(int *lock) { while (__sync_lock_test_and_set(lock, 1) != 0) { @@ -92,6 +103,7 @@ { __sync_lock_release(lock); } +#endif static void write_fence(void) { @@ -102,12 +114,18 @@ #endif } -static bool _stm_was_read(object_t *obj) +bool _stm_was_read(object_t *obj) { read_marker_t *marker = (read_marker_t *)(((uintptr_t)obj) >> 4); return (marker->rm == _STM_TL1->transaction_read_version); } +bool _stm_was_written(object_t *obj) +{ + return obj->stm_flags & GCFLAG_WRITE_BARRIER; +} + + static void _stm_privatize(uintptr_t pagenum) { @@ -234,7 +252,6 @@ char* t0_addr = get_thread_base(0) + t0_offset; struct object_s *t0_obj = (struct object_s *)t0_addr; - int previous = __sync_lock_test_and_set(&t0_obj->stm_write_lock, 1); if (previous) abort(); /* XXX */ @@ -331,15 +348,6 @@ - - -#define TOTAL_MEMORY (NB_PAGES * 4096UL * NB_THREADS) -#define READMARKER_END ((NB_PAGES * 4096UL) >> 4) -#define FIRST_OBJECT_PAGE ((READMARKER_END + 4095) / 4096UL) -#define READMARKER_START ((FIRST_OBJECT_PAGE * 4096UL) >> 4) -#define FIRST_READMARKER_PAGE (READMARKER_START / 4096UL) -#define FIRST_AFTER_NURSERY_PAGE (FIRST_OBJECT_PAGE + NB_NURSERY_PAGES) - void stm_setup(void) { /* Check that some values are acceptable */ @@ -376,14 +384,16 @@ /* Pages in range(2, FIRST_READMARKER_PAGE) are never used */ if (FIRST_READMARKER_PAGE > 2) mprotect(thread_base + 8192, (FIRST_READMARKER_PAGE - 2) * 4096UL, - PROT_NONE); + PROT_NONE); - _STM_TL2->thread_num = i; - _STM_TL2->thread_base = thread_base; + struct _thread_local2_s *th = + (struct _thread_local2_s *)REAL_ADDRESS(thread_base, _STM_TL2); + + th->thread_num = i; + th->thread_base = thread_base; if (i > 0) { int res; - res = remap_file_pages( thread_base + FIRST_AFTER_NURSERY_PAGE * 4096UL, (NB_PAGES - FIRST_AFTER_NURSERY_PAGE) * 4096UL, @@ -414,12 +424,8 @@ int thread_num = __sync_fetch_and_add(&num_threads_started, 1); assert(thread_num < 2); /* only 2 threads for now */ - char *thread_base = get_thread_base(thread_num); - set_gs_register((uintptr_t)thread_base); - - assert(_STM_TL2->thread_num == thread_num); - assert(_STM_TL2->thread_base == thread_base); - + _stm_restore_local_state(thread_num); + _STM_TL2->modified_objects = stm_list_create(); assert(!_STM_TL2->running_transaction); } @@ -440,6 +446,14 @@ object_pages = NULL; } +void _stm_restore_local_state(int thread_num) +{ + char *thread_base = get_thread_base(thread_num); + set_gs_register((uintptr_t)thread_base); + + assert(_STM_TL2->thread_num == thread_num); + assert(_STM_TL2->thread_base == thread_base); +} static void reset_transaction_read_version(void) { @@ -504,6 +518,7 @@ _STM_TL2->running_transaction = 1; } +#if 0 static void update_new_objects_in_other_threads(uintptr_t pagenum, uint16_t start, uint16_t stop) { @@ -517,16 +532,17 @@ char *src = REAL_ADDRESS(_STM_TL2->thread_base, local_src); memcpy(dst, src, size); - ...; + abort(); } +#endif void stm_stop_transaction(void) { +#if 0 assert(_STM_TL2->running_transaction); write_fence(); /* see later in this function for why */ - acquire_lock(&undo_lock); if (leader_thread_num != _STM_TL2->thread_num) { /* non-leader thread */ @@ -617,7 +633,7 @@ } _STM_TL2->running_transaction = 0; - release_lock(&undo_lock); +#endif } void stm_abort_transaction(void) diff --git a/c7/core.h b/c7/core.h --- a/c7/core.h +++ b/c7/core.h @@ -44,7 +44,7 @@ uint8_t rm; }; -typedef intptr_t jmpbufptr_t[5]; /* for use with __builtin_setjmp() */ +typedef void* jmpbufptr_t[5]; /* for use with __builtin_setjmp() */ struct _thread_local1_s { jmpbufptr_t *jmpbufptr; @@ -78,5 +78,17 @@ /* must be provided by the user of this library */ extern size_t stm_object_size_rounded_up(object_t *); +void _stm_restore_local_state(int thread_num); +void _stm_teardown(void); +void _stm_teardown_thread(void); + +bool _stm_was_read(object_t *obj); +bool _stm_was_written(object_t *obj); + +object_t *stm_allocate(size_t size); +void stm_setup(void); +void stm_setup_thread(void); +void stm_start_transaction(jmpbufptr_t *jmpbufptr); +void stm_stop_transaction(void); #endif diff --git a/c7/list.c b/c7/list.c --- a/c7/list.c +++ b/c7/list.c @@ -1,6 +1,7 @@ #include #include #include +#include #include "list.h" diff --git a/c7/test/support.py b/c7/test/support.py new file mode 100644 --- /dev/null +++ b/c7/test/support.py @@ -0,0 +1,119 @@ +import os +import cffi + +# ---------- + +parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + +header_files = [os.path.join(parent_dir, _n) for _n in + "core.h pagecopy.h list.h".split()] +source_files = [os.path.join(parent_dir, _n) for _n in + "core.c pagecopy.c list.c".split()] + +_pycache_ = os.path.join(parent_dir, 'test', '__pycache__') +if os.path.exists(_pycache_): + _fs = [_f for _f in os.listdir(_pycache_) if _f.startswith('_cffi_')] + if _fs: + _fsmtime = min(os.stat(os.path.join(_pycache_, _f)).st_mtime + for _f in _fs) + if any(os.stat(src).st_mtime >= _fsmtime + for src in header_files + source_files): + import shutil + shutil.rmtree(_pycache_) + +# ---------- + +ffi = cffi.FFI() +ffi.cdef(""" +typedef ... object_t; +typedef ... jmpbufptr_t; + +void stm_setup(void); +void stm_setup_thread(void); + +void stm_start_transaction(jmpbufptr_t *); +void stm_stop_transaction(void); +object_t *stm_allocate(size_t size); + +void stm_read(object_t *object); +void stm_write(object_t *object); +_Bool _stm_was_read(object_t *object); +_Bool _stm_was_written(object_t *object); + +void _stm_restore_local_state(int thread_num); +void _stm_teardown(void); +void _stm_teardown_thread(void); + + +void *memset(void *s, int c, size_t n); +""") + +lib = ffi.verify(''' +#include +#include "core.h" + +size_t stm_object_size_rounded_up(object_t * obj) { + return 16; +} + +''', sources=source_files, + define_macros=[('STM_TESTS', '1')], + undef_macros=['NDEBUG'], + include_dirs=[parent_dir], + extra_compile_args=['-g', '-O0', '-Werror'], + force_generic_engine=True) + +def intptr(p): + return int(ffi.cast("intptr_t", p)) + +def stm_allocate(size): + return ffi.cast("char *", lib.stm_allocate(size)) + +def stm_read(ptr): + lib.stm_read(ffi.cast("struct object_s *", ptr)) + +def stm_write(ptr): + lib.stm_write(ffi.cast("struct object_s *", ptr)) + +def _stm_was_read(ptr): + return lib._stm_was_read(ffi.cast("struct object_s *", ptr)) + +def _stm_was_written(ptr): + return lib._stm_was_written(ffi.cast("struct object_s *", ptr)) + +def stm_start_transaction(): + lib.stm_start_transaction() + +def stm_stop_transaction(expected_conflict): + res = lib.stm_stop_transaction() + if expected_conflict: + assert res == 0 + else: + assert res == 1 + + +class BaseTest(object): + + def setup_method(self, meth): + lib.stm_setup() + lib.stm_setup_thread() + self.saved_states = {} + self.current_proc = "main" + + def teardown_method(self, meth): + lib._stm_teardown_thread() + for saved_state in self.saved_states.values(): + lib._stm_restore_local_state(saved_state) + lib._stm_teardown_thread() + del self.saved_states + lib._stm_teardown() + + def switch(self, process_name): + self.saved_states[self.current_proc] = lib._stm_save_local_state() + try: + target_saved_state = self.saved_states.pop(process_name) + except KeyError: + lib.stm_setup_thread() + else: + lib._stm_restore_local_state(target_saved_state) + self.current_proc = process_name diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py new file mode 100644 --- /dev/null +++ b/c7/test/test_basic.py @@ -0,0 +1,250 @@ +from support import * + + +class TestBasic(BaseTest): + + def test_empty(self): + pass + + def test_thread_local_allocations(self): + p1 = stm_allocate(16) + p2 = stm_allocate(16) + assert intptr(p2) - intptr(p1) == 16 + p3 = stm_allocate(16) + assert intptr(p3) - intptr(p2) == 16 + # + self.switch("sub1") + p1s = stm_allocate(16) + assert abs(intptr(p1s) - intptr(p3)) >= 4000 + # + self.switch("main") + p4 = stm_allocate(16) + assert intptr(p4) - intptr(p3) == 16 + + def test_read_write_1(self): + stm_start_transaction() + p1 = stm_allocate(16) + p1[8] = 'a' + stm_stop_transaction(False) + # + self.switch("sub1") + stm_start_transaction() + stm_write(p1) + assert p1[8] == 'a' + p1[8] = 'b' + # + self.switch("main") + stm_start_transaction() + stm_read(p1) + assert p1[8] == 'a' + # + self.switch("sub1") + stm_stop_transaction(False) + # + self.switch("main") + assert p1[8] == 'a' + + def test_start_transaction_updates(self): + stm_start_transaction() + p1 = stm_allocate(16) + p1[8] = 'a' + stm_stop_transaction(False) + # + self.switch("sub1") + stm_start_transaction() + stm_write(p1) + assert p1[8] == 'a' + p1[8] = 'b' + stm_stop_transaction(False) + # + self.switch("main") + assert p1[8] == 'a' + stm_start_transaction() + assert p1[8] == 'b' + + def test_resolve_no_conflict_empty(self): + stm_start_transaction() + # + self.switch("sub1") + stm_start_transaction() + stm_stop_transaction(False) + # + self.switch("main") + stm_stop_transaction(False) + + def test_resolve_no_conflict_write_only_in_already_committed(self): + stm_start_transaction() + p1 = stm_allocate(16) + p1[8] = 'a' + stm_stop_transaction(False) + stm_start_transaction() + # + self.switch("sub1") + stm_start_transaction() + stm_write(p1) + p1[8] = 'b' + stm_stop_transaction(False) + # + self.switch("main") + assert p1[8] == 'a' + stm_stop_transaction(False) + assert p1[8] == 'b' + + def test_resolve_write_read_conflict(self): + stm_start_transaction() + p1 = stm_allocate(16) + p1[8] = 'a' + stm_stop_transaction(False) + stm_start_transaction() + # + self.switch("sub1") + stm_start_transaction() + stm_write(p1) + p1[8] = 'b' + stm_stop_transaction(False) + # + self.switch("main") + stm_read(p1) + assert p1[8] == 'a' + stm_stop_transaction(expected_conflict=True) + assert p1[8] in ('a', 'b') + stm_start_transaction() + assert p1[8] == 'b' + + def test_resolve_write_write_conflict(self): + stm_start_transaction() + p1 = stm_allocate(16) + p1[8] = 'a' + stm_stop_transaction(False) + stm_start_transaction() + # + self.switch("sub1") + stm_start_transaction() + stm_write(p1) + p1[8] = 'b' + stm_stop_transaction(False) + # + self.switch("main") + assert p1[8] == 'a' + stm_write(p1) + p1[8] = 'c' + stm_stop_transaction(expected_conflict=True) + assert p1[8] in ('a', 'b') + stm_start_transaction() + assert p1[8] == 'b' + + def test_resolve_write_write_no_conflict(self): + stm_start_transaction() + p1 = stm_allocate(16) + p2 = stm_allocate(16) + p1[8] = 'a' + p2[8] = 'A' + stm_stop_transaction(False) + stm_start_transaction() + # + self.switch("sub1") + stm_start_transaction() + stm_write(p1) + p1[8] = 'b' + stm_stop_transaction(False) + # + self.switch("main") + stm_write(p2) + p2[8] = 'C' + stm_stop_transaction(False) + assert p1[8] == 'b' + assert p2[8] == 'C' + + def test_page_extra_malloc_unchanged_page(self): + stm_start_transaction() + p1 = stm_allocate(16) + p2 = stm_allocate(16) + p1[8] = 'A' + p2[8] = 'a' + stm_stop_transaction(False) + stm_start_transaction() + # + self.switch("sub1") + stm_start_transaction() + stm_write(p1) + assert p1[8] == 'A' + p1[8] = 'B' + stm_stop_transaction(False) + # + self.switch("main") + stm_read(p2) + assert p2[8] == 'a' + p3 = stm_allocate(16) # goes into the same page, which is + p3[8] = ':' # not otherwise modified + stm_stop_transaction(False) + # + assert p1[8] == 'B' + assert p2[8] == 'a' + assert p3[8] == ':' + + def test_page_extra_malloc_changed_page_before(self): + stm_start_transaction() + p1 = stm_allocate(16) + p2 = stm_allocate(16) + p1[8] = 'A' + p2[8] = 'a' + stm_stop_transaction(False) + stm_start_transaction() + # + self.switch("sub1") + stm_start_transaction() + stm_write(p1) + assert p1[8] == 'A' + p1[8] = 'B' + stm_stop_transaction(False) + # + self.switch("main") + stm_write(p2) + assert p2[8] == 'a' + p2[8] = 'b' + p3 = stm_allocate(16) # goes into the same page, which I already + p3[8] = ':' # modified just above + stm_stop_transaction(False) + # + assert p1[8] == 'B' + assert p2[8] == 'b' + assert p3[8] == ':' + + def test_page_extra_malloc_changed_page_after(self): + stm_start_transaction() + p1 = stm_allocate(16) + p2 = stm_allocate(16) + p1[8] = 'A' + p2[8] = 'a' + stm_stop_transaction(False) + stm_start_transaction() + # + self.switch("sub1") + stm_start_transaction() + stm_write(p1) + assert p1[8] == 'A' + p1[8] = 'B' + stm_stop_transaction(False) + # + self.switch("main") + p3 = stm_allocate(16) # goes into the same page, which I will + p3[8] = ':' # modify just below + stm_write(p2) + assert p2[8] == 'a' + p2[8] = 'b' + stm_stop_transaction(False) + # + assert p1[8] == 'B' + assert p2[8] == 'b' + assert p3[8] == ':' + + def test_overflow_write_history(self): + stm_start_transaction() + plist = [stm_allocate(n) for n in range(16, 256, 8)] + stm_stop_transaction(False) + # + for i in range(20): + stm_start_transaction() + for p in plist: + stm_write(p) + stm_stop_transaction(False) diff --git a/c7/test/test_bug.py b/c7/test/test_bug.py new file mode 100644 --- /dev/null +++ b/c7/test/test_bug.py @@ -0,0 +1,429 @@ +from support import * + + +class TestBug(BaseTest): + + def test_bug1(self): + stm_start_transaction() + p8 = stm_allocate(16) + p8[8] = '\x08' + stm_stop_transaction(False) + # + self.switch("sub1") + self.switch("main") + stm_start_transaction() + stm_write(p8) + p8[8] = '\x97' + # + self.switch("sub1") + stm_start_transaction() + stm_read(p8) + assert p8[8] == '\x08' + + def test_bug2(self): + stm_start_transaction() + p0 = stm_allocate(16) + p1 = stm_allocate(16) + p2 = stm_allocate(16) + p3 = stm_allocate(16) + p4 = stm_allocate(16) + p5 = stm_allocate(16) + p6 = stm_allocate(16) + p7 = stm_allocate(16) + p8 = stm_allocate(16) + p9 = stm_allocate(16) + p0[8] = '\x00' + p1[8] = '\x01' + p2[8] = '\x02' + p3[8] = '\x03' + p4[8] = '\x04' + p5[8] = '\x05' + p6[8] = '\x06' + p7[8] = '\x07' + p8[8] = '\x08' + p9[8] = '\t' + stm_stop_transaction(False) + self.switch(0) + self.switch(1) + self.switch(2) + # + self.switch(1) + stm_start_transaction() + stm_read(p7) + assert p7[8] == '\x07' + # + self.switch(1) + stm_read(p0) + assert p0[8] == '\x00' + # + self.switch(1) + stm_read(p4) + assert p4[8] == '\x04' + # + self.switch(0) + stm_start_transaction() + stm_read(p3) + assert p3[8] == '\x03' + # + self.switch(2) + stm_start_transaction() + stm_read(p8) + assert p8[8] == '\x08' + stm_write(p8) + p8[8] = '\x08' + # + self.switch(0) + stm_read(p0) + assert p0[8] == '\x00' + # + self.switch(0) + stm_read(p0) + assert p0[8] == '\x00' + # + self.switch(1) + stm_read(p2) + assert p2[8] == '\x02' + # + self.switch(2) + stm_read(p2) + assert p2[8] == '\x02' + # + self.switch(2) + stm_read(p2) + assert p2[8] == '\x02' + stm_write(p2) + p2[8] = 'm' + # + self.switch(0) + stm_read(p4) + assert p4[8] == '\x04' + stm_write(p4) + p4[8] = '\xc5' + # + self.switch(2) + stm_read(p1) + assert p1[8] == '\x01' + # + self.switch(2) + stm_stop_transaction(False) #1 + # ['\x00', '\x01', 'm', '\x03', '\x04', '\x05', '\x06', '\x07', '\x08', '\t'] + # log: [8, 2] + # + self.switch(0) + stm_stop_transaction(False) #2 + # ['\x00', '\x01', 'm', '\x03', '\xc5', '\x05', '\x06', '\x07', '\x08', '\t'] + # log: [4] + # + self.switch(0) + stm_start_transaction() + stm_read(p6) + assert p6[8] == '\x06' + # + self.switch(0) + stm_read(p4) + assert p4[8] == '\xc5' + # + self.switch(0) + stm_read(p4) + assert p4[8] == '\xc5' + # + self.switch(1) + stm_read(p0) + assert p0[8] == '\x00' + # + self.switch(1) + stm_stop_transaction(True) #3 + # conflict: 0xdf0a8028 + # + self.switch(2) + stm_start_transaction() + stm_read(p6) + assert p6[8] == '\x06' + # + self.switch(1) + stm_start_transaction() + stm_read(p1) + assert p1[8] == '\x01' + # + self.switch(0) + stm_read(p4) + assert p4[8] == '\xc5' + stm_write(p4) + p4[8] = '\x0c' + # + self.switch(2) + stm_read(p2) + assert p2[8] == 'm' + stm_write(p2) + p2[8] = '\x81' + # + self.switch(2) + stm_read(p7) + assert p7[8] == '\x07' + # + self.switch(0) + stm_read(p5) + assert p5[8] == '\x05' + stm_write(p5) + p5[8] = 'Z' + # + self.switch(1) + stm_stop_transaction(False) #4 + # ['\x00', '\x01', 'm', '\x03', '\xc5', '\x05', '\x06', '\x07', '\x08', '\t'] + # log: [] + # + self.switch(2) + stm_read(p8) + assert p8[8] == '\x08' + # + self.switch(0) + stm_read(p0) + assert p0[8] == '\x00' + # + self.switch(1) + stm_start_transaction() + stm_read(p0) + assert p0[8] == '\x00' + # + self.switch(2) + stm_read(p9) + assert p9[8] == '\t' + stm_write(p9) + p9[8] = '\x81' + # + self.switch(0) + stm_read(p0) + assert p0[8] == '\x00' + # + self.switch(1) + stm_read(p2) + assert p2[8] == 'm' + # + self.switch(2) + stm_read(p9) + assert p9[8] == '\x81' + stm_write(p9) + p9[8] = 'g' + # + self.switch(1) + stm_read(p3) + assert p3[8] == '\x03' + # + self.switch(2) + stm_read(p7) + assert p7[8] == '\x07' + # + self.switch(1) + stm_read(p1) + assert p1[8] == '\x01' + # + self.switch(0) + stm_read(p2) + assert p2[8] == 'm' + stm_write(p2) + p2[8] = 'T' + # + self.switch(2) + stm_read(p4) + assert p4[8] == '\xc5' + # + self.switch(2) + stm_read(p9) + assert p9[8] == 'g' + # + self.switch(2) + stm_read(p1) + assert p1[8] == '\x01' + stm_write(p1) + p1[8] = 'L' + # + self.switch(0) + stm_read(p0) + assert p0[8] == '\x00' + # + self.switch(2) + stm_read(p0) + assert p0[8] == '\x00' + stm_write(p0) + p0[8] = '\xf3' + # + self.switch(1) + stm_stop_transaction(False) #5 + # ['\x00', '\x01', 'm', '\x03', '\xc5', '\x05', '\x06', '\x07', '\x08', '\t'] + # log: [] + # + self.switch(0) + stm_read(p1) + assert p1[8] == '\x01' + stm_write(p1) + p1[8] = '*' + # + self.switch(1) + stm_start_transaction() + stm_read(p3) + assert p3[8] == '\x03' + stm_write(p3) + p3[8] = '\xd2' + # + self.switch(0) + stm_stop_transaction(False) #6 + # ['\x00', '*', 'T', '\x03', '\x0c', 'Z', '\x06', '\x07', '\x08', '\t'] + # log: [1, 2, 4, 5] + # + self.switch(1) + stm_read(p7) + assert p7[8] == '\x07' + stm_write(p7) + p7[8] = '.' + # + self.switch(0) + stm_start_transaction() + stm_read(p7) + assert p7[8] == '\x07' + # + self.switch(1) + stm_read(p2) + assert p2[8] == 'm' + stm_write(p2) + p2[8] = '\xe9' + # + self.switch(1) + stm_read(p0) + assert p0[8] == '\x00' + # + self.switch(0) + stm_read(p1) + assert p1[8] == '*' + # + self.switch(0) + stm_read(p8) + assert p8[8] == '\x08' + stm_write(p8) + p8[8] = 'X' + # + self.switch(2) + stm_stop_transaction(True) #7 + # conflict: 0xdf0a8018 + # + self.switch(1) + stm_read(p9) + assert p9[8] == '\t' + # + self.switch(0) + stm_read(p8) + assert p8[8] == 'X' + # + self.switch(1) + stm_read(p4) + assert p4[8] == '\xc5' + stm_write(p4) + p4[8] = '\xb2' + # + self.switch(0) + stm_read(p9) + assert p9[8] == '\t' + # + self.switch(2) + stm_start_transaction() + stm_read(p5) + assert p5[8] == 'Z' + stm_write(p5) + p5[8] = '\xfa' + # + self.switch(2) + stm_read(p3) + assert p3[8] == '\x03' + # + self.switch(1) + stm_read(p9) + assert p9[8] == '\t' + # + self.switch(1) + stm_read(p8) + assert p8[8] == '\x08' + stm_write(p8) + p8[8] = 'g' + # + self.switch(1) + stm_read(p8) + assert p8[8] == 'g' + # + self.switch(2) + stm_read(p5) + assert p5[8] == '\xfa' + stm_write(p5) + p5[8] = '\x86' + # + self.switch(2) + stm_read(p6) + assert p6[8] == '\x06' + # + self.switch(1) + stm_read(p4) + assert p4[8] == '\xb2' + stm_write(p4) + p4[8] = '\xce' + # + self.switch(2) + stm_read(p2) + assert p2[8] == 'T' + stm_write(p2) + p2[8] = 'Q' + # + self.switch(1) + stm_stop_transaction(True) #8 + # conflict: 0xdf0a8028 + # + self.switch(2) + stm_stop_transaction(False) #9 + # ['\x00', '*', 'Q', '\x03', '\x0c', '\x86', '\x06', '\x07', '\x08', '\t'] + # log: [2, 5] + # + self.switch(0) + stm_read(p0) + assert p0[8] == '\x00' + # + self.switch(1) + stm_start_transaction() + stm_read(p3) + assert p3[8] == '\x03' + # + self.switch(1) + stm_read(p5) + assert p5[8] == '\x86' + # + self.switch(2) + stm_start_transaction() + stm_read(p4) + assert p4[8] == '\x0c' + stm_write(p4) + p4[8] = '{' + # + self.switch(1) + stm_read(p2) + assert p2[8] == 'Q' + # + self.switch(2) + stm_read(p3) + assert p3[8] == '\x03' + stm_write(p3) + p3[8] = 'V' + # + self.switch(1) + stm_stop_transaction(False) #10 + # ['\x00', '*', 'Q', '\x03', '\x0c', '\x86', '\x06', '\x07', '\x08', '\t'] + # log: [] + # + self.switch(1) + stm_start_transaction() + stm_read(p7) + assert p7[8] == '\x07' + # + self.switch(2) + stm_read(p0) + assert p0[8] == '\x00' + stm_write(p0) + p0[8] = 'P' + # + self.switch(0) + stm_stop_transaction(False) #11 diff --git a/c7/test/test_largemalloc.py b/c7/test/test_largemalloc.py new file mode 100644 --- /dev/null +++ b/c7/test/test_largemalloc.py @@ -0,0 +1,114 @@ +from support import * +import sys, random + + +class TestLargeMalloc(object): + + def setup_method(self, meth): + size = 1024 * 1024 # 1MB + self.rawmem = ffi.new("char[]", size) + self.size = size + lib.memset(self.rawmem, 0xcd, size) + lib.stm_largemalloc_init(self.rawmem, size) + + def test_simple(self): + d1 = lib.stm_large_malloc(7000) + d2 = lib.stm_large_malloc(8000) + assert d2 - d1 == 7016 + d3 = lib.stm_large_malloc(9000) + assert d3 - d2 == 8016 + # + lib.stm_large_free(d1) + lib.stm_large_free(d2) + # + d4 = lib.stm_large_malloc(600) + assert d4 == d1 + d5 = lib.stm_large_malloc(600) + assert d5 == d4 + 616 + # + lib.stm_large_free(d5) + # + d6 = lib.stm_large_malloc(600) + assert d6 == d5 + # + lib.stm_large_free(d4) + # + d7 = lib.stm_large_malloc(608) + assert d7 == d6 + 616 + d8 = lib.stm_large_malloc(600) + assert d8 == d4 + # + lib._stm_large_dump() + + def test_overflow_1(self): + d = lib.stm_large_malloc(self.size - 32) + assert d == self.rawmem + 16 + lib._stm_large_dump() + + def test_overflow_2(self): + d = lib.stm_large_malloc(self.size - 16) + assert d == ffi.NULL + lib._stm_large_dump() + + def test_overflow_3(self): + d = lib.stm_large_malloc(sys.maxint & ~7) + assert d == ffi.NULL + lib._stm_large_dump() + + def test_resize_arena_reduce_1(self): + r = lib.stm_largemalloc_resize_arena(self.size - 32) + assert r == 1 + d = lib.stm_large_malloc(self.size - 32) + assert d == ffi.NULL + lib._stm_large_dump() + + def test_resize_arena_reduce_2(self): + lib.stm_large_malloc(self.size // 2 - 64) + r = lib.stm_largemalloc_resize_arena(self.size // 2) + assert r == 1 + lib._stm_large_dump() + + def test_resize_arena_reduce_3(self): + d1 = lib.stm_large_malloc(128) + r = lib.stm_largemalloc_resize_arena(self.size // 2) + assert r == 1 + d2 = lib.stm_large_malloc(128) + assert d1 == self.rawmem + 16 + assert d2 == d1 + 128 + 16 + lib._stm_large_dump() + + def test_resize_arena_cannot_reduce_1(self): + lib.stm_large_malloc(self.size // 2) + r = lib.stm_largemalloc_resize_arena(self.size // 2) + assert r == 0 + lib._stm_large_dump() + + def test_resize_arena_cannot_reduce_2(self): + lib.stm_large_malloc(self.size // 2 - 56) + r = lib.stm_largemalloc_resize_arena(self.size // 2) + assert r == 0 + lib._stm_large_dump() + + def test_random(self): + r = random.Random(1007) + p = [] + for i in range(100000): + if len(p) != 0 and (len(p) > 100 or r.randrange(0, 5) < 2): + index = r.randrange(0, len(p)) + d, length, content1, content2 = p.pop(index) + print ' free %5d (%s)' % (length, d) + assert d[0] == content1 + assert d[length - 1] == content2 + lib.stm_large_free(d) + else: + sz = r.randrange(8, 160) * 8 + d = lib.stm_large_malloc(sz) + print 'alloc %5d (%s)' % (sz, d) + assert d != ffi.NULL + lib.memset(d, 0xdd, sz) + content1 = chr(r.randrange(0, 256)) + content2 = chr(r.randrange(0, 256)) + d[0] = content1 + d[sz - 1] = content2 + p.append((d, sz, content1, content2)) + lib._stm_large_dump() diff --git a/c7/test/test_random.py b/c7/test/test_random.py new file mode 100644 --- /dev/null +++ b/c7/test/test_random.py @@ -0,0 +1,85 @@ +from support import * +import sys, random + + +class TestRandom(BaseTest): + + def test_fixed_16_bytes_objects(self): + rnd = random.Random(1010) + + N_OBJECTS = 10 + N_THREADS = 3 + print >> sys.stderr, 'stm_start_transaction()' + stm_start_transaction() + plist = [stm_allocate(16) for i in range(N_OBJECTS)] + read_sets = [{} for i in range(N_THREADS)] + write_sets = [{} for i in range(N_THREADS)] + active_transactions = {} + + for i in range(N_OBJECTS): + print >> sys.stderr, 'p%d = stm_allocate(16)' % i + for i in range(N_OBJECTS): + print >> sys.stderr, 'p%d[8] = %r' % (i, chr(i)) + plist[i][8] = chr(i) + head_state = [[chr(i) for i in range(N_OBJECTS)]] + commit_log = [] + print >> sys.stderr, 'stm_stop_transaction(False)' + stm_stop_transaction(False) + + for i in range(N_THREADS): + print >> sys.stderr, 'self.switch(%d)' % i + self.switch(i) + stop_count = 1 + + for i in range(10000): + n_thread = rnd.randrange(0, N_THREADS) + print >> sys.stderr, '#\nself.switch(%d)' % n_thread + self.switch(n_thread) + if n_thread not in active_transactions: + print >> sys.stderr, 'stm_start_transaction()' + stm_start_transaction() + active_transactions[n_thread] = len(commit_log) + + action = rnd.randrange(0, 7) + if action < 6: + is_write = action >= 4 + i = rnd.randrange(0, N_OBJECTS) + print >> sys.stderr, "stm_read(p%d)" % i + stm_read(plist[i]) + got = plist[i][8] + print >> sys.stderr, "assert p%d[8] ==" % i, + my_head_state = head_state[active_transactions[n_thread]] + prev = read_sets[n_thread].setdefault(i, my_head_state[i]) + print >> sys.stderr, "%r" % (prev,) + assert got == prev + # + if is_write: + print >> sys.stderr, 'stm_write(p%d)' % i + stm_write(plist[i]) + newval = chr(rnd.randrange(0, 256)) + print >> sys.stderr, 'p%d[8] = %r' % (i, newval) + plist[i][8] = newval + read_sets[n_thread][i] = write_sets[n_thread][i] = newval + else: + src_index = active_transactions.pop(n_thread) + conflict = False + for i in range(src_index, len(commit_log)): + for j in commit_log[i]: + if j in read_sets[n_thread]: + conflict = True + print >> sys.stderr, "stm_stop_transaction(%r) #%d" % ( + conflict, stop_count) + stop_count += 1 + stm_stop_transaction(conflict) + # + if not conflict: + hs = head_state[-1][:] + for i, newval in write_sets[n_thread].items(): + hs[i] = newval + assert plist[i][8] == newval + head_state.append(hs) + commit_log.append(write_sets[n_thread].keys()) + print >> sys.stderr, '#', head_state[-1] + print >> sys.stderr, '# log:', commit_log[-1] + write_sets[n_thread].clear() + read_sets[n_thread].clear() From noreply at buildbot.pypy.org Tue Jan 14 15:16:33 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 14 Jan 2014 15:16:33 +0100 (CET) Subject: [pypy-commit] stmgc c7: address conversion functions Message-ID: <20140114141633.4C0C71C0500@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r605:0789b61edcca Date: 2014-01-14 15:16 +0100 http://bitbucket.org/pypy/stmgc/changeset/0789b61edcca/ Log: address conversion functions diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -182,11 +182,33 @@ return REAL_ADDRESS(_STM_TL2->thread_base, src); } + static char *get_thread_base(long thread_num) { return object_pages + thread_num * (NB_PAGES * 4096UL); } + +char *_stm_real_address(object_t *o) +{ + if (o == NULL) + return NULL; + assert(FIRST_OBJECT_PAGE * 4096 <= (uintptr_t)o + && (uintptr_t)o < NB_PAGES * 4096); + return real_address((uintptr_t)o); +} + +object_t *_stm_tl_address(char *ptr) +{ + if (ptr == NULL) + return NULL; + + uintptr_t res = ptr - _STM_TL2->thread_base; + assert(FIRST_OBJECT_PAGE * 4096 <= res + && res < NB_PAGES * 4096); + return (object_t*)res; +} + void stm_abort_transaction(void); enum detect_conflicts_e { CANNOT_CONFLICT, CAN_CONFLICT }; diff --git a/c7/core.h b/c7/core.h --- a/c7/core.h +++ b/c7/core.h @@ -90,5 +90,6 @@ void stm_setup_thread(void); void stm_start_transaction(jmpbufptr_t *jmpbufptr); void stm_stop_transaction(void); - +char *_stm_real_address(object_t *o); +object_t *_stm_tl_address(char *ptr); #endif diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -2,6 +2,7 @@ import cffi # ---------- +os.environ['CC'] = 'clang' parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) @@ -44,6 +45,8 @@ void _stm_teardown(void); void _stm_teardown_thread(void); +char *_stm_real_address(object_t *o); +object_t *_stm_tl_address(char *ptr); void *memset(void *s, int c, size_t n); """) @@ -63,23 +66,21 @@ extra_compile_args=['-g', '-O0', '-Werror'], force_generic_engine=True) -def intptr(p): - return int(ffi.cast("intptr_t", p)) def stm_allocate(size): - return ffi.cast("char *", lib.stm_allocate(size)) + return lib._stm_real_address(lib.stm_allocate(size)) def stm_read(ptr): - lib.stm_read(ffi.cast("struct object_s *", ptr)) + lib.stm_read(lib._stm_tl_address(ptr)) def stm_write(ptr): - lib.stm_write(ffi.cast("struct object_s *", ptr)) + lib.stm_write(lib._stm_tl_address(ptr)) def _stm_was_read(ptr): - return lib._stm_was_read(ffi.cast("struct object_s *", ptr)) + return lib._stm_was_read(lib._stm_tl_address(ptr)) def _stm_was_written(ptr): - return lib._stm_was_written(ffi.cast("struct object_s *", ptr)) + return lib._stm_was_written(lib._stm_tl_address(ptr)) def stm_start_transaction(): lib.stm_start_transaction() From noreply at buildbot.pypy.org Tue Jan 14 15:24:05 2014 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 14 Jan 2014 15:24:05 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: (fijal, rguillebert) Deal with duplicates in inputframes Message-ID: <20140114142405.355241C0500@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68669:3d4afd8a844a Date: 2014-01-14 15:23 +0100 http://bitbucket.org/pypy/pypy/changeset/3d4afd8a844a/ Log: (fijal, rguillebert) Deal with duplicates in inputframes diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -118,12 +118,14 @@ coeff = 1 else: coeff = 2 + all = {} for i, frame in enumerate(inputframes): inputlocs = loc_positions[i] assert len(inputlocs) == len(frame) for j, item in enumerate(frame): - if item is None or isinstance(item, Const): + if item is None or isinstance(item, Const) or item in all: continue + all[item] = None pos = inputlocs[j] if pos < GPR_REGS: locs.append(self.cpu.gen_regs[pos]) diff --git a/rpython/jit/backend/llsupport/regalloc.py b/rpython/jit/backend/llsupport/regalloc.py --- a/rpython/jit/backend/llsupport/regalloc.py +++ b/rpython/jit/backend/llsupport/regalloc.py @@ -646,6 +646,7 @@ """ locs = [] base_ofs = self.assembler.cpu.get_baseofs_of_frame_field() + for box in inputargs: assert isinstance(box, Box) loc = self.fm.get_new_loc(box) diff --git a/rpython/jit/backend/resumebuilder.py b/rpython/jit/backend/resumebuilder.py --- a/rpython/jit/backend/resumebuilder.py +++ b/rpython/jit/backend/resumebuilder.py @@ -71,16 +71,19 @@ self.virtuals = {} if inputlocs is not None: i = 0 + all = {} for frame_pos, frame in enumerate(inputframes): for pos_in_frame, box in enumerate(frame): - if box is None: + if box is None or isinstance(box, Const) or box in all: loc_pos = -1 else: loc_pos = inputlocs[i].get_jitframe_position() i += 1 self.frontend_pos[box] = (ConstInt(frame_pos), ConstInt(pos_in_frame)) - self.current_attachment[box] = loc_pos + all[box] = None + if box not in self.current_attachment: + self.current_attachment[box] = loc_pos def process(self, op): if op.getopnum() == rop.RESUME_PUT: @@ -144,16 +147,21 @@ def flatten(inputframes): count = 0 + all = {} for frame in inputframes: for x in frame: - if x is not None and not isinstance(x, Const): + if x is not None and not isinstance(x, Const) and x not in all: count += 1 + all[x] = None inputargs = [None] * count pos = 0 + all = {} for frame in inputframes: for item in frame: - if item is not None and not isinstance(item, Const): + if (item is not None and not isinstance(item, Const) and + item not in all): inputargs[pos] = item + all[item] = None pos += 1 return inputargs diff --git a/rpython/jit/metainterp/resume2.py b/rpython/jit/metainterp/resume2.py --- a/rpython/jit/metainterp/resume2.py +++ b/rpython/jit/metainterp/resume2.py @@ -161,40 +161,50 @@ AbstractResumeReader.__init__(self) def store_int_box(self, res, pos, miframe, i, jitframe_pos): - if jitframe_pos == -1: + if jitframe_pos in self.cache: + box = self.cache[jitframe_pos] + elif jitframe_pos == -1: return - if jitframe_pos >= 0: + elif jitframe_pos >= 0: box = BoxInt(self.metainterp.cpu.get_int_value(self.deadframe, jitframe_pos)) elif jitframe_pos <= -2: box = self.consts[-jitframe_pos - 2] miframe.registers_i[i] = box + self.cache[jitframe_pos] = box res[-1][pos] = box def store_ref_box(self, res, pos, miframe, i, jitframe_pos): - if jitframe_pos == -1: + if jitframe_pos in self.cache: + box = self.cache[jitframe_pos] + elif jitframe_pos == -1: return - if jitframe_pos >= 0: + elif jitframe_pos >= 0: box = BoxPtr(self.metainterp.cpu.get_ref_value(self.deadframe, jitframe_pos)) elif jitframe_pos <= -2: box = self.consts[-jitframe_pos - 2] miframe.registers_r[i] = box + self.cache[jitframe_pos] = box res[-1][pos] = box def store_float_box(self, res, pos, miframe, i, jitframe_pos): - if jitframe_pos == -1: + if jitframe_pos in self.cache: + box = self.cache[jitframe_pos] + elif jitframe_pos == -1: return - if jitframe_pos >= 0: + elif jitframe_pos >= 0: box = BoxFloat(self.metainterp.cpu.get_float_value(self.deadframe, jitframe_pos)) elif jitframe_pos <= -2: box = self.consts[-jitframe_pos - 2] miframe.registers_f[i] = box + self.cache[jitframe_pos] = box res[-1][pos] = box def finish(self): res = [] + self.cache = {} for frame in self.framestack: jitcode = frame.jitcode res.append([None] * jitcode.num_regs()) @@ -210,6 +220,7 @@ for i in range(jitcode.num_regs_f()): self.store_float_box(res, pos, miframe, i, frame.registers[pos]) pos += 1 + self.cache = None return res, [f.registers for f in self.framestack] def rebuild_from_resumedata(metainterp, deadframe, faildescr): From noreply at buildbot.pypy.org Tue Jan 14 15:34:31 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 14 Jan 2014 15:34:31 +0100 (CET) Subject: [pypy-commit] stmgc c7: first allocs working Message-ID: <20140114143431.D7EB51C0500@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r606:22e3df474bf9 Date: 2014-01-14 15:28 +0100 http://bitbucket.org/pypy/stmgc/changeset/22e3df474bf9/ Log: first allocs working diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -188,6 +188,12 @@ return object_pages + thread_num * (NB_PAGES * 4096UL); } +bool _stm_is_in_nursery(char *ptr) +{ + object_t * o = _stm_tl_address(ptr); + assert(o); + return (uintptr_t)o < FIRST_AFTER_NURSERY_PAGE * 4096; +} char *_stm_real_address(object_t *o) { @@ -358,6 +364,7 @@ localchar_t *current = _STM_TL2->nursery_current; localchar_t *new_current = current + size; + _STM_TL2->nursery_current = new_current; if ((uintptr_t)new_current > FIRST_AFTER_NURSERY_PAGE * 4096) { /* XXX: do minor collection */ abort(); @@ -447,6 +454,8 @@ assert(thread_num < 2); /* only 2 threads for now */ _stm_restore_local_state(thread_num); + + _STM_TL2->nursery_current = (localchar_t*)(FIRST_OBJECT_PAGE * 4096); _STM_TL2->modified_objects = stm_list_create(); assert(!_STM_TL2->running_transaction); diff --git a/c7/core.h b/c7/core.h --- a/c7/core.h +++ b/c7/core.h @@ -92,4 +92,6 @@ void stm_stop_transaction(void); char *_stm_real_address(object_t *o); object_t *_stm_tl_address(char *ptr); + +bool _stm_is_in_nursery(char *ptr); #endif diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -47,6 +47,7 @@ char *_stm_real_address(object_t *o); object_t *_stm_tl_address(char *ptr); +bool _stm_is_in_nursery(char *ptr); void *memset(void *s, int c, size_t n); """) @@ -67,6 +68,9 @@ force_generic_engine=True) +def is_in_nursery(ptr): + return lib._stm_is_in_nursery(ptr) + def stm_allocate(size): return lib._stm_real_address(lib.stm_allocate(size)) diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -5,21 +5,23 @@ def test_empty(self): pass - + def test_thread_local_allocations(self): p1 = stm_allocate(16) p2 = stm_allocate(16) - assert intptr(p2) - intptr(p1) == 16 + assert is_in_nursery(p1) + assert is_in_nursery(p2) + assert p2 - p1 == 16 p3 = stm_allocate(16) - assert intptr(p3) - intptr(p2) == 16 + assert p3 - p2 == 16 # self.switch("sub1") p1s = stm_allocate(16) - assert abs(intptr(p1s) - intptr(p3)) >= 4000 + assert abs(p1s - p3) >= 4000 # self.switch("main") p4 = stm_allocate(16) - assert intptr(p4) - intptr(p3) == 16 + assert p4 - p3 == 16 def test_read_write_1(self): stm_start_transaction() From noreply at buildbot.pypy.org Tue Jan 14 15:34:33 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 14 Jan 2014 15:34:33 +0100 (CET) Subject: [pypy-commit] stmgc c7: re-implement thread switching Message-ID: <20140114143433.0EA531C0500@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r607:3b6bf19bff12 Date: 2014-01-14 15:34 +0100 http://bitbucket.org/pypy/stmgc/changeset/3b6bf19bff12/ Log: re-implement thread switching diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -102,23 +102,19 @@ def setup_method(self, meth): lib.stm_setup() lib.stm_setup_thread() - self.saved_states = {} - self.current_proc = "main" + lib.stm_setup_thread() + lib._stm_restore_local_state(0) + self.current_thread = 0 def teardown_method(self, meth): + lib._stm_restore_local_state(1) lib._stm_teardown_thread() - for saved_state in self.saved_states.values(): - lib._stm_restore_local_state(saved_state) - lib._stm_teardown_thread() - del self.saved_states + lib._stm_restore_local_state(0) + lib._stm_teardown_thread() lib._stm_teardown() - def switch(self, process_name): - self.saved_states[self.current_proc] = lib._stm_save_local_state() - try: - target_saved_state = self.saved_states.pop(process_name) - except KeyError: - lib.stm_setup_thread() - else: - lib._stm_restore_local_state(target_saved_state) - self.current_proc = process_name + def switch(self, thread_num): + assert thread_num != self.current_thread + lib._stm_restore_local_state(thread_num) + self.current_thread = thread_num + diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -15,11 +15,11 @@ p3 = stm_allocate(16) assert p3 - p2 == 16 # - self.switch("sub1") + self.switch(1) p1s = stm_allocate(16) assert abs(p1s - p3) >= 4000 # - self.switch("main") + self.switch(0) p4 = stm_allocate(16) assert p4 - p3 == 16 From noreply at buildbot.pypy.org Tue Jan 14 16:15:20 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 14 Jan 2014 16:15:20 +0100 (CET) Subject: [pypy-commit] stmgc c7: stop_transaction hacking for tests Message-ID: <20140114151520.216A21C0500@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r608:cb41605afd90 Date: 2014-01-14 16:15 +0100 http://bitbucket.org/pypy/stmgc/changeset/cb41605afd90/ Log: stop_transaction hacking for tests diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -463,7 +463,6 @@ void _stm_teardown_thread(void) { - assert(!_STM_TL2->running_transaction); wait_until_updated(); stm_list_free(_STM_TL2->modified_objects); _STM_TL2->modified_objects = NULL; @@ -538,7 +537,6 @@ wait_until_updated(); stm_list_clear(_STM_TL2->modified_objects); - assert(stm_list_is_empty(_STM_TL2->new_object_ranges)); /* check that there is no stm_abort() in the following maybe_update() */ _STM_TL1->jmpbufptr = NULL; @@ -569,8 +567,8 @@ void stm_stop_transaction(void) { + assert(_STM_TL2->running_transaction); #if 0 - assert(_STM_TL2->running_transaction); write_fence(); /* see later in this function for why */ @@ -662,9 +660,8 @@ } } } - +#endif _STM_TL2->running_transaction = 0; -#endif } void stm_abort_transaction(void) diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -33,7 +33,7 @@ void stm_setup_thread(void); void stm_start_transaction(jmpbufptr_t *); -void stm_stop_transaction(void); +bool _stm_stop_transaction(void); object_t *stm_allocate(size_t size); void stm_read(object_t *object); @@ -54,12 +54,27 @@ lib = ffi.verify(''' #include +#include + #include "core.h" size_t stm_object_size_rounded_up(object_t * obj) { return 16; } +bool _stm_stop_transaction(void) { + jmpbufptr_t here; + if (__builtin_setjmp(here) == 0) { + assert(_STM_TL1->jmpbufptr == (jmpbufptr_t*)-1); + _STM_TL1->jmpbufptr = &here; + stm_stop_transaction(); + _STM_TL1->jmpbufptr = (jmpbufptr_t*)-1; + return 0; + } + _STM_TL1->jmpbufptr = (jmpbufptr_t*)-1; + return 1; +} + ''', sources=source_files, define_macros=[('STM_TESTS', '1')], undef_macros=['NDEBUG'], @@ -87,14 +102,14 @@ return lib._stm_was_written(lib._stm_tl_address(ptr)) def stm_start_transaction(): - lib.stm_start_transaction() + lib.stm_start_transaction(ffi.cast("jmpbufptr_t*", -1)) -def stm_stop_transaction(expected_conflict): - res = lib.stm_stop_transaction() +def stm_stop_transaction(expected_conflict=False): + res = lib._stm_stop_transaction() if expected_conflict: + assert res == 1 + else: assert res == 0 - else: - assert res == 1 class BaseTest(object): diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -23,6 +23,14 @@ p4 = stm_allocate(16) assert p4 - p3 == 16 + def test_simple(self): + stm_start_transaction() + self.switch(1) + stm_start_transaction() + stm_stop_transaction() + self.switch(0) + stm_stop_transaction() + def test_read_write_1(self): stm_start_transaction() p1 = stm_allocate(16) From noreply at buildbot.pypy.org Tue Jan 14 16:22:41 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 14 Jan 2014 16:22:41 +0100 (CET) Subject: [pypy-commit] stmgc c7: simple tests Message-ID: <20140114152241.630B41C0500@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r609:a0a36a7daa1f Date: 2014-01-14 16:22 +0100 http://bitbucket.org/pypy/stmgc/changeset/a0a36a7daa1f/ Log: simple tests diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -122,7 +122,9 @@ bool _stm_was_written(object_t *obj) { - return obj->stm_flags & GCFLAG_WRITE_BARRIER; + /* if the obj was written to in the current transaction + and doesn't trigger the write-barrier slowpath */ + return !(obj->stm_flags & GCFLAG_WRITE_BARRIER); } diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -95,10 +95,10 @@ def stm_write(ptr): lib.stm_write(lib._stm_tl_address(ptr)) -def _stm_was_read(ptr): +def stm_was_read(ptr): return lib._stm_was_read(lib._stm_tl_address(ptr)) -def _stm_was_written(ptr): +def stm_was_written(ptr): return lib._stm_was_written(lib._stm_tl_address(ptr)) def stm_start_transaction(): diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -23,7 +23,7 @@ p4 = stm_allocate(16) assert p4 - p3 == 16 - def test_simple(self): + def test_transaction_start_stop(self): stm_start_transaction() self.switch(1) stm_start_transaction() @@ -31,6 +31,19 @@ self.switch(0) stm_stop_transaction() + def test_simple_read(self): + stm_start_transaction() + p1 = stm_allocate(16) + stm_read(p1) + assert stm_was_read(p1) + + def test_simple_write(self): + stm_start_transaction() + p1 = stm_allocate(16) + assert stm_was_written(p1) + stm_write(p1) + assert stm_was_written(p1) + def test_read_write_1(self): stm_start_transaction() p1 = stm_allocate(16) From noreply at buildbot.pypy.org Tue Jan 14 16:48:58 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 14 Jan 2014 16:48:58 +0100 (CET) Subject: [pypy-commit] stmgc c7: stm_alloc_old Message-ID: <20140114154858.E33601C0632@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r610:607e5b19ffe2 Date: 2014-01-14 16:48 +0100 http://bitbucket.org/pypy/stmgc/changeset/607e5b19ffe2/ Log: stm_alloc_old diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -68,6 +68,7 @@ /************************************************************/ +uintptr_t _stm_reserve_page(void); static void spin_loop(void) { @@ -128,6 +129,15 @@ } +object_t *_stm_allocate_old(size_t size) +{ + assert(size <= 4096); + localchar_t* addr = (localchar_t*)(_stm_reserve_page() * 4096); + object_t* o = (object_t*)addr; + o->stm_flags |= GCFLAG_WRITE_BARRIER; + return o; +} + static void _stm_privatize(uintptr_t pagenum) { diff --git a/c7/core.h b/c7/core.h --- a/c7/core.h +++ b/c7/core.h @@ -31,6 +31,9 @@ enum { GCFLAG_WRITE_BARRIER = (1 << 0), + /* set if the write-barrier slowpath needs to trigger. set on all + old objects if there was no write-barrier on it in the same + transaction and no collection inbetween. */ }; struct object_s { @@ -94,4 +97,5 @@ object_t *_stm_tl_address(char *ptr); bool _stm_is_in_nursery(char *ptr); +object_t *_stm_allocate_old(size_t size); #endif diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -48,6 +48,7 @@ char *_stm_real_address(object_t *o); object_t *_stm_tl_address(char *ptr); bool _stm_is_in_nursery(char *ptr); +object_t *_stm_allocate_old(size_t size); void *memset(void *s, int c, size_t n); """) @@ -86,9 +87,15 @@ def is_in_nursery(ptr): return lib._stm_is_in_nursery(ptr) +def stm_allocate_old(size): + return lib._stm_real_address(lib._stm_allocate_old(size)) + def stm_allocate(size): return lib._stm_real_address(lib.stm_allocate(size)) +def stm_get_tl_address(ptr): + return int(ffi.cast('uintptr_t', lib._stm_tl_address(ptr))) + def stm_read(ptr): lib.stm_read(lib._stm_tl_address(ptr)) diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -44,6 +44,18 @@ stm_write(p1) assert stm_was_written(p1) + def test_write_on_old(self): + p1 = stm_allocate_old(16) + p1tl = stm_get_tl_address(p1) + self.switch(1) + p2 = stm_allocate_old(16) + p2tl = stm_get_tl_address(p2) + assert p1tl != p2tl + + + + + def test_read_write_1(self): stm_start_transaction() p1 = stm_allocate(16) From noreply at buildbot.pypy.org Tue Jan 14 16:55:51 2014 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 14 Jan 2014 16:55:51 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: disable unrolling for now + fixes Message-ID: <20140114155551.70F561C0632@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68670:6debbdda22a5 Date: 2014-01-14 16:55 +0100 http://bitbucket.org/pypy/pypy/changeset/6debbdda22a5/ Log: disable unrolling for now + fixes diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -170,9 +170,6 @@ if not loop.quasi_immutable_deps: loop.quasi_immutable_deps = None - for frame in loop.inputframes: - for box in frame: - assert isinstance(box, Box) loop.original_jitcell_token = jitcell_token for label in all_target_tokens: @@ -246,10 +243,6 @@ if quasi_immutable_deps: loop.quasi_immutable_deps = quasi_immutable_deps - for frame in loop.inputframes: - for box in frame: - assert isinstance(box, Box) - target_token = loop.operations[-1].getdescr() resumekey.compile_and_attach(metainterp, loop) diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -616,6 +616,7 @@ class TreeLoop(object): inputframes = None + inputargs = None inputlocs = None operations = None call_pure_results = None diff --git a/rpython/jit/metainterp/inliner.py b/rpython/jit/metainterp/inliner.py --- a/rpython/jit/metainterp/inliner.py +++ b/rpython/jit/metainterp/inliner.py @@ -20,38 +20,16 @@ args = newop.getarglist() newop.initarglist([self.inline_arg(a) for a in args]) - if newop.is_guard(): - args = newop.getfailargs() - if args and not ignore_failargs: - newop.setfailargs([self.inline_arg(a) for a in args]) - else: - newop.setfailargs([]) - if newop.result and not ignore_result: old_result = newop.result newop.result = newop.result.clonebox() self.argmap[old_result] = newop.result - self.inline_descr_inplace(newop.getdescr()) - return newop - def inline_descr_inplace(self, descr): - from rpython.jit.metainterp.compile import ResumeGuardDescr - if isinstance(descr, ResumeGuardDescr): - descr.rd_snapshot = self.inline_snapshot(descr.rd_snapshot) - def inline_arg(self, arg): if arg is None: return None if isinstance(arg, Const): return arg return self.argmap[arg] - - def inline_snapshot(self, snapshot): - if snapshot in self.snapshot_map: - return self.snapshot_map[snapshot] - boxes = [self.inline_arg(a) for a in snapshot.boxes] - new_snapshot = Snapshot(self.inline_snapshot(snapshot.prev), boxes) - self.snapshot_map[snapshot] = new_snapshot - return new_snapshot diff --git a/rpython/jit/metainterp/optimizeopt/__init__.py b/rpython/jit/metainterp/optimizeopt/__init__.py --- a/rpython/jit/metainterp/optimizeopt/__init__.py +++ b/rpython/jit/metainterp/optimizeopt/__init__.py @@ -19,8 +19,7 @@ ('string', OptString), ('earlyforce', OptEarlyForce), ('pure', OptPure), - ('heap', OptHeap), - ('unroll', None)] + ('heap', OptHeap)] # no direct instantiation of unroll unroll_all_opts = unrolling_iterable(ALL_OPTS) diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -670,6 +670,9 @@ value = self.getvalue(op.getarg(0)) self.optimizer.opaque_pointers[value] = True + def optimize_ENTER_FRAME(self, op): + self.optimize_default(op) + dispatch_opt = make_dispatcher_method(Optimizer, 'optimize_', default=Optimizer.optimize_default) diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -169,7 +169,6 @@ assert self.optimizer.loop.resume_at_jump_descr resume_at_jump_descr = self.optimizer.loop.resume_at_jump_descr assert isinstance(resume_at_jump_descr, ResumeGuardDescr) - resume_at_jump_descr.rd_snapshot = self.fix_snapshot(jump_args, resume_at_jump_descr.rd_snapshot) modifier = VirtualStateAdder(self.optimizer) virtual_state = modifier.get_virtual_state(jump_args) @@ -293,8 +292,6 @@ op = newoperations[i] self.boxes_created_this_iteration[op.result] = None args = op.getarglist() - if op.is_guard(): - args = args + op.getfailargs() for a in args: self.import_box(a, inputargs, short_jumpargs, []) i += 1 @@ -365,8 +362,6 @@ self.boxes_created_this_iteration[op.result] = None args = op.getarglist() - if op.is_guard(): - args = args + op.getfailargs() #if self.optimizer.loop.logops: # debug_print('OP: ' + self.optimizer.loop.logops.repr_of_resop(op)) @@ -445,7 +440,6 @@ target_token.assumed_classes[newop.result] = self.short_boxes.assumed_classes[op.result] short[i] = newop target_token.resume_at_jump_descr = target_token.resume_at_jump_descr - inliner.inline_descr_inplace(target_token.resume_at_jump_descr) # Forget the values to allow them to be freed for box in short[0].getarglist(): @@ -604,8 +598,8 @@ 'of the bridge does not mach the class ' + 'it has at the start of the target loop') except InvalidLoop: - #debug_print("Inlining failed unexpectedly", - # "jumping to preamble instead") + debug_print("Inlining failed unexpectedly", + "jumping to preamble instead") assert cell_token.target_tokens[0].virtual_state is None jumpop.setdescr(cell_token.target_tokens[0]) self.optimizer.send_extra_operation(jumpop) diff --git a/rpython/jit/metainterp/test/test_loop.py b/rpython/jit/metainterp/test/test_loop.py --- a/rpython/jit/metainterp/test/test_loop.py +++ b/rpython/jit/metainterp/test/test_loop.py @@ -60,7 +60,7 @@ assert res == f(6, 13) self.check_trace_count(1) if self.enable_opts: - self.check_resops(setfield_gc=2, getfield_gc=0) + self.check_resops(setfield_gc=1, getfield_gc=0) def test_loop_with_two_paths(self): @@ -107,10 +107,10 @@ pattern >>= 1 return 42 self.meta_interp(f, [0xF0F0F0]) - if self.enable_opts: - self.check_trace_count(3) - else: - self.check_trace_count(2) + #if self.enable_opts: + # self.check_trace_count(3) + #else: + self.check_trace_count(2) def test_interp_simple(self): myjitdriver = JitDriver(greens = ['i'], reds = ['x', 'y']) diff --git a/rpython/jit/metainterp/test/test_loop_unroll.py b/rpython/jit/metainterp/test/test_loop_unroll.py --- a/rpython/jit/metainterp/test/test_loop_unroll.py +++ b/rpython/jit/metainterp/test/test_loop_unroll.py @@ -8,7 +8,7 @@ enable_opts = ALL_OPTS_NAMES automatic_promotion_result = { - 'int_gt': 2, 'guard_false': 2, 'jump': 1, 'int_add': 6, + 'int_gt': 1, 'guard_false': 1, 'jump': 1, 'int_add': 3, 'guard_value': 1 } diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -437,7 +437,7 @@ """Inconsistency in the JIT hints.""" ENABLE_ALL_OPTS = ( - 'intbounds:rewrite:virtualize:string:earlyforce:pure:heap:unroll') + 'intbounds:rewrite:virtualize:string:earlyforce:pure:heap') PARAMETER_DOCS = { 'threshold': 'number of times a loop has to run for it to become hot', From noreply at buildbot.pypy.org Tue Jan 14 20:55:25 2014 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 14 Jan 2014 20:55:25 +0100 (CET) Subject: [pypy-commit] buildbot default: run the incremental benchmark nightly Message-ID: <20140114195525.C5B551C0632@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r905:efa01db4e858 Date: 2014-01-14 20:55 +0100 http://bitbucket.org/pypy/buildbot/changeset/efa01db4e858/ Log: run the incremental benchmark nightly diff --git a/bot2/pypybuildbot/master.py b/bot2/pypybuildbot/master.py --- a/bot2/pypybuildbot/master.py +++ b/bot2/pypybuildbot/master.py @@ -230,6 +230,7 @@ # XXX maybe use a trigger instead? JITBENCH, # on tannit32, uses 1 core (in part exclusively) JITBENCH64, # on tannit64, uses 1 core (in part exclusively) + JITBENCH64_NEW, # on speed64, uses 1 core (in part exclusively) ], branch=None, hour=2, minute=0), From noreply at buildbot.pypy.org Wed Jan 15 00:24:06 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 15 Jan 2014 00:24:06 +0100 (CET) Subject: [pypy-commit] pypy remove-intlong-smm: prefer decorators Message-ID: <20140114232406.DA0511C0500@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: remove-intlong-smm Changeset: r68671:c39305287536 Date: 2014-01-14 13:09 -0800 http://bitbucket.org/pypy/pypy/changeset/c39305287536/ Log: prefer decorators diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -2,6 +2,7 @@ import functools +from rpython.rlib.objectmodel import specialize from rpython.rlib.rbigint import rbigint from rpython.rlib.rstring import ParseStringError from rpython.tool.sourcetools import func_renamer, func_with_new_name @@ -214,9 +215,9 @@ def __init__(self, num): self.num = num # instance of rbigint + @staticmethod def fromint(space, intval): return W_LongObject(rbigint.fromint(intval)) - fromint = staticmethod(fromint) def longval(self): return self.num.tolong() @@ -231,18 +232,18 @@ def toint(self): return self.num.toint() + @staticmethod def fromfloat(space, f): return newlong(space, rbigint.fromfloat(f)) - fromfloat = staticmethod(fromfloat) + @staticmethod def fromlong(l): return W_LongObject(rbigint.fromlong(l)) - fromlong = staticmethod(fromlong) + @staticmethod + @specialize.argtype(0) def fromrarith_int(i): return W_LongObject(rbigint.fromrarith_int(i)) - fromrarith_int._annspecialcase_ = "specialize:argtype(0)" - fromrarith_int = staticmethod(fromrarith_int) def int_w(self, space): try: From noreply at buildbot.pypy.org Wed Jan 15 00:24:09 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 15 Jan 2014 00:24:09 +0100 (CET) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140114232409.130DF1C0500@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68672:60112abc6c12 Date: 2014-01-14 13:36 -0800 http://bitbucket.org/pypy/pypy/changeset/60112abc6c12/ Log: merge default diff --git a/lib-python/2.7/test/test_ssl.py b/lib-python/2.7/test/test_ssl.py --- a/lib-python/2.7/test/test_ssl.py +++ b/lib-python/2.7/test/test_ssl.py @@ -993,7 +993,7 @@ try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED) - try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, True) + try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False) diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -107,7 +107,7 @@ RegrTest('test_asynchat.py', usemodules='select fcntl'), RegrTest('test_asyncore.py', usemodules='select fcntl'), RegrTest('test_atexit.py', core=True), - RegrTest('test_audioop.py', skip="unsupported extension module"), + RegrTest('test_audioop.py', skip="incomplete module"), RegrTest('test_augassign.py', core=True), RegrTest('test_base64.py', usemodules='struct'), RegrTest('test_bigaddrspace.py'), diff --git a/lib_pypy/_ctypes/array.py b/lib_pypy/_ctypes/array.py --- a/lib_pypy/_ctypes/array.py +++ b/lib_pypy/_ctypes/array.py @@ -21,10 +21,13 @@ # we don't want to have buffers here if len(val) > self._length_: raise ValueError("%r too long" % (val,)) - for i in range(len(val)): - self[i] = val[i] + if isinstance(val, str): + _rawffi.rawstring2charp(self._buffer.buffer, val) + else: + for i in range(len(val)): + self[i] = val[i] if len(val) < self._length_: - self[len(val)] = b'\x00' + self._buffer[len(val)] = b'\x00' res.value = property(getvalue, setvalue) def getraw(self): @@ -34,8 +37,7 @@ def setraw(self, buffer): if len(buffer) > self._length_: raise ValueError("%r too long" % (buffer,)) - for i in range(len(buffer)): - self[i] = buffer[i] + _rawffi.rawstring2charp(self._buffer.buffer, buffer) res.raw = property(getraw, setraw) elif subletter == 'u': def getvalue(self): @@ -46,10 +48,14 @@ # we don't want to have buffers here if len(val) > self._length_: raise ValueError("%r too long" % (val,)) + if isinstance(val, str): + target = self._buffer + else: + target = self for i in range(len(val)): - self[i] = val[i] + target[i] = val[i] if len(val) < self._length_: - self[len(val)] = '\x00' + target[len(val)] = '\x00' res.value = property(getvalue, setvalue) res._ffishape = (ffiarray, res._length_) diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -330,6 +330,14 @@ # SQLite version information sqlite_version = str(_ffi.string(_lib.sqlite3_libversion()).decode('ascii')) +_STMT_TYPE_UPDATE = 0 +_STMT_TYPE_DELETE = 1 +_STMT_TYPE_INSERT = 2 +_STMT_TYPE_REPLACE = 3 +_STMT_TYPE_OTHER = 4 +_STMT_TYPE_SELECT = 5 +_STMT_TYPE_INVALID = 6 + class Error(StandardError): pass @@ -1004,13 +1012,18 @@ self.__statement = self.__connection._statement_cache.get(sql) if self.__connection._isolation_level is not None: - if self.__statement._type in ("UPDATE", "DELETE", "INSERT", "REPLACE"): + if self.__statement._type in ( + _STMT_TYPE_UPDATE, + _STMT_TYPE_DELETE, + _STMT_TYPE_INSERT, + _STMT_TYPE_REPLACE + ): if not self.__connection._in_transaction: self.__connection._begin() - elif self.__statement._type == "OTHER": + elif self.__statement._type == _STMT_TYPE_OTHER: if self.__connection._in_transaction: self.__connection.commit() - elif self.__statement._type == "SELECT": + elif self.__statement._type == _STMT_TYPE_SELECT: if multiple: raise ProgrammingError("You cannot execute SELECT " "statements in executemany().") @@ -1033,12 +1046,17 @@ self.__statement._reset() raise self.__connection._get_exception(ret) - if self.__statement._type in ("UPDATE", "DELETE", "INSERT", "REPLACE"): + if self.__statement._type in ( + _STMT_TYPE_UPDATE, + _STMT_TYPE_DELETE, + _STMT_TYPE_INSERT, + _STMT_TYPE_REPLACE + ): if self.__rowcount == -1: self.__rowcount = 0 self.__rowcount += _lib.sqlite3_changes(self.__connection._db) - if not multiple and self.__statement._type == "INSERT": + if not multiple and self.__statement._type == _STMT_TYPE_INSERT: self.__lastrowid = _lib.sqlite3_last_insert_rowid(self.__connection._db) else: self.__lastrowid = None @@ -1188,11 +1206,19 @@ first_word = sql.lstrip().split(" ")[0].upper() if first_word == "": - self._type = "INVALID" - elif first_word in ("SELECT", "INSERT", "UPDATE", "DELETE", "REPLACE"): - self._type = first_word + self._type = _STMT_TYPE_INVALID + elif first_word == "SELECT": + self._type = _STMT_TYPE_SELECT + elif first_word == "INSERT": + self._type = _STMT_TYPE_INSERT + elif first_word == "UPDATE": + self._type = _STMT_TYPE_UPDATE + elif first_word == "DELETE": + self._type = _STMT_TYPE_DELETE + elif first_word == "REPLACE": + self._type = _STMT_TYPE_REPLACE else: - self._type = "OTHER" + self._type = _STMT_TYPE_OTHER if isinstance(sql, unicode): sql = sql.encode('utf-8') @@ -1205,7 +1231,7 @@ if ret == _lib.SQLITE_OK and not self._statement: # an empty statement, work around that, as it's the least trouble - self._type = "SELECT" + self._type = _STMT_TYPE_SELECT c_sql = _ffi.new("char[]", b"select 42") ret = _lib.sqlite3_prepare_v2(self.__con._db, c_sql, -1, statement_star, next_char) @@ -1324,7 +1350,12 @@ raise ValueError("parameters are of unsupported type") def _get_description(self): - if self._type in ("INSERT", "UPDATE", "DELETE", "REPLACE"): + if self._type in ( + _STMT_TYPE_INSERT, + _STMT_TYPE_UPDATE, + _STMT_TYPE_DELETE, + _STMT_TYPE_REPLACE + ): return None desc = [] for i in xrange(_lib.sqlite3_column_count(self._statement)): diff --git a/lib_pypy/audioop.py b/lib_pypy/audioop.py new file mode 100644 --- /dev/null +++ b/lib_pypy/audioop.py @@ -0,0 +1,29 @@ + +import struct + + +class error(Exception): + pass + + +def _check_size(size): + if size != 1 and size != 2 and size != 4: + raise error("Size should be 1, 2 or 4") + + +def _check_params(length, size): + _check_size(size) + if length % size != 0: + raise error("not a whole number of frames") + + +def getsample(cp, size, i): + _check_params(len(cp), size) + if not (0 <= i < len(cp) / size): + raise error("Index out of range") + if size == 1: + return struct.unpack_from("B", buffer(cp)[i:])[0] + elif size == 2: + return struct.unpack_from("H", buffer(cp)[i * 2:])[0] + elif size == 4: + return struct.unpack_from("I", buffer(cp)[i * 4:])[0] diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -5,7 +5,7 @@ from pypy.interpreter.gateway import interp2app from pypy.interpreter.typedef import TypeDef, make_weakref_descr -from rpython.rlib import objectmodel, rgc +from rpython.rlib import rgc from rpython.rlib.objectmodel import keepalive_until_here, specialize from rpython.rtyper.lltypesystem import lltype, rffi from rpython.tool.sourcetools import func_with_new_name diff --git a/pypy/module/_rawffi/__init__.py b/pypy/module/_rawffi/__init__.py --- a/pypy/module/_rawffi/__init__.py +++ b/pypy/module/_rawffi/__init__.py @@ -19,6 +19,7 @@ 'wcharp2unicode' : 'interp_rawffi.wcharp2unicode', 'charp2rawstring' : 'interp_rawffi.charp2rawstring', 'wcharp2rawunicode' : 'interp_rawffi.wcharp2rawunicode', + 'rawstring2charp' : 'interp_rawffi.rawstring2charp', 'CallbackPtr' : 'callback.W_CallbackPtr', '_num_of_allocated_objects' : 'tracker.num_of_allocated_objects', 'get_libc' : 'interp_rawffi.get_libc', diff --git a/pypy/module/_rawffi/array.py b/pypy/module/_rawffi/array.py --- a/pypy/module/_rawffi/array.py +++ b/pypy/module/_rawffi/array.py @@ -15,6 +15,7 @@ from pypy.module._rawffi.interp_rawffi import unpack_shape_with_length from pypy.module._rawffi.interp_rawffi import read_ptr, write_ptr from rpython.rlib.rarithmetic import r_uint +from rpython.rlib import rgc class W_Array(W_DataShape): @@ -220,6 +221,7 @@ def __init__(self, space, shape, length): W_ArrayInstance.__init__(self, space, shape, length, 0) + @rgc.must_be_light_finalizer def __del__(self): if self.ll_buffer: self._free() diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -589,6 +589,13 @@ s = rffi.wcharpsize2unicode(rffi.cast(rffi.CWCHARP, address), maxlength) return space.wrap(s) + at unwrap_spec(address=r_uint, newcontent=str) +def rawstring2charp(space, address, newcontent): + from rpython.rtyper.annlowlevel import llstr + from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw + array = rffi.cast(rffi.CCHARP, address) + copy_string_to_raw(llstr(newcontent), array, 0, len(newcontent)) + if _MS_WINDOWS: @unwrap_spec(code=int) def FormatError(space, code): diff --git a/pypy/module/_rawffi/structure.py b/pypy/module/_rawffi/structure.py --- a/pypy/module/_rawffi/structure.py +++ b/pypy/module/_rawffi/structure.py @@ -16,7 +16,7 @@ from pypy.module._rawffi.interp_rawffi import unroll_letters_for_numbers from pypy.module._rawffi.interp_rawffi import size_alignment from pypy.module._rawffi.interp_rawffi import read_ptr, write_ptr -from rpython.rlib import clibffi +from rpython.rlib import clibffi, rgc from rpython.rlib.rarithmetic import intmask, signedtype, widen from rpython.rlib.rarithmetic import r_uint, r_ulonglong, r_longlong @@ -226,6 +226,7 @@ fieldtypes) return self.ffi_struct.ffistruct + @rgc.must_be_light_finalizer def __del__(self): if self.ffi_struct: lltype.free(self.ffi_struct, flavor='raw') @@ -380,6 +381,7 @@ def __init__(self, space, shape): W_StructureInstance.__init__(self, space, shape, 0) + @rgc.must_be_light_finalizer def __del__(self): if self.ll_buffer: self._free() diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -324,6 +324,14 @@ assert res == 'xx' a.free() + def test_rawstring2charp(self): + import _rawffi + A = _rawffi.Array('c') + a = A(10, 'x'*10) + _rawffi.rawstring2charp(a.buffer, "foobar") + assert ''.join([a[i] for i in range(10)]) == "foobarxxxx" + a.free() + def test_raw_callable(self): import _rawffi lib = _rawffi.CDLL(self.lib_name) diff --git a/pypy/module/_rawffi/test/test_tracker.py b/pypy/module/_rawffi/test/test_tracker.py --- a/pypy/module/_rawffi/test/test_tracker.py +++ b/pypy/module/_rawffi/test/test_tracker.py @@ -1,9 +1,21 @@ +import py +from pypy.conftest import option from pypy.module._rawffi.tracker import Tracker + class AppTestTracker: spaceconfig = dict(usemodules=['_rawffi', 'struct']) def setup_class(cls): + # + # detect if we're running on PyPy with DO_TRACING not compiled in + if option.runappdirect: + try: + import _rawffi + _rawffi._num_of_allocated_objects() + except (ImportError, RuntimeError), e: + py.test.skip(str(e)) + # Tracker.DO_TRACING = True def test_array(self): diff --git a/pypy/module/_rawffi/tracker.py b/pypy/module/_rawffi/tracker.py --- a/pypy/module/_rawffi/tracker.py +++ b/pypy/module/_rawffi/tracker.py @@ -2,9 +2,11 @@ """ The file that keeps track about freed/kept-alive objects allocated by _rawffi. Used for debugging ctypes """ +from pypy.interpreter.error import OperationError + class Tracker(object): - DO_TRACING = True + DO_TRACING = False # make sure this stays False by default! def __init__(self): self.alloced = {} @@ -20,6 +22,9 @@ tracker = Tracker() def num_of_allocated_objects(space): + if not tracker.DO_TRACING: + raise OperationError(space.w_RuntimeError, + space.wrap("DO_TRACING not enabled in this PyPy")) return space.wrap(len(tracker.alloced)) def print_alloced_objects(space): diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -859,7 +859,7 @@ libssl_SSL_set_fd(ss.ssl, sock_fd) # set the socket for SSL # The ACCEPT_MOVING_WRITE_BUFFER flag is necessary because the address # of a str object may be changed by the garbage collector. - libssl_SSL_set_mode(ss.ssl, + libssl_SSL_set_mode(ss.ssl, SSL_MODE_AUTO_RETRY | SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER) if server_hostname: diff --git a/pypy/module/_weakref/interp__weakref.py b/pypy/module/_weakref/interp__weakref.py --- a/pypy/module/_weakref/interp__weakref.py +++ b/pypy/module/_weakref/interp__weakref.py @@ -52,6 +52,7 @@ # weakref callbacks are not invoked eagerly here. They are # invoked by self.__del__() anyway. + @jit.dont_look_inside def get_or_make_weakref(self, w_subtype, w_obj): space = self.space w_weakreftype = space.gettypeobject(W_Weakref.typedef) @@ -70,6 +71,7 @@ self.append_wref_to(w_ref) return w_ref + @jit.dont_look_inside def get_or_make_proxy(self, w_obj): space = self.space if self.cached_proxy is not None: @@ -122,6 +124,7 @@ W_WeakrefBase.activate_callback, 'weakref callback of ') + @jit.dont_look_inside def make_weakref_with_callback(self, w_subtype, w_obj, w_callable): space = self.space w_ref = space.allocate_instance(W_Weakref, w_subtype) @@ -129,6 +132,7 @@ self.append_wref_to(w_ref) return w_ref + @jit.dont_look_inside def make_proxy_with_callback(self, w_obj, w_callable): space = self.space if space.is_true(space.callable(w_obj)): @@ -239,15 +243,16 @@ w_obj.setweakref(space, lifeline) return lifeline - at jit.dont_look_inside + def get_or_make_weakref(space, w_subtype, w_obj): return getlifeline(space, w_obj).get_or_make_weakref(w_subtype, w_obj) - at jit.dont_look_inside + def make_weakref_with_callback(space, w_subtype, w_obj, w_callable): lifeline = getlifelinewithcallbacks(space, w_obj) return lifeline.make_weakref_with_callback(w_subtype, w_obj, w_callable) + def descr__new__weakref(space, w_subtype, w_obj, w_callable=None, __args__=None): if __args__.arguments_w: @@ -312,15 +317,16 @@ w_obj = force(space, self) return space.call_args(w_obj, __args__) - at jit.dont_look_inside + def get_or_make_proxy(space, w_obj): return getlifeline(space, w_obj).get_or_make_proxy(w_obj) - at jit.dont_look_inside + def make_proxy_with_callback(space, w_obj, w_callable): lifeline = getlifelinewithcallbacks(space, w_obj) return lifeline.make_proxy_with_callback(w_obj, w_callable) + def proxy(space, w_obj, w_callable=None): """Create a proxy object that weakly references 'obj'. 'callback', if given, is called with the proxy as an argument when 'obj' diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -20,7 +20,10 @@ assert math.isnan(np.complex_(None)) for c in ['i', 'I', 'l', 'L', 'q', 'Q']: assert np.dtype(c).type().dtype.char == c - assert np.dtype('L').type(sys.maxint + 42) == sys.maxint + 42 + for c in ['l', 'q']: + assert np.dtype(c).type(sys.maxint) == sys.maxint + for c in ['L', 'Q']: + assert np.dtype(c).type(sys.maxint + 42) == sys.maxint + 42 def test_builtin(self): import numpy as np diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -586,7 +586,8 @@ BoxType = interp_boxes.W_Int64Box format_code = "q" - _coerce = func_with_new_name(_int64_coerce, '_coerce') + if LONG_BIT == 32: + _coerce = func_with_new_name(_int64_coerce, '_coerce') def _uint64_coerce(self, space, w_item): try: @@ -613,16 +614,25 @@ BoxType = interp_boxes.W_LongBox format_code = "l" - if LONG_BIT == 64: - _coerce = func_with_new_name(_int64_coerce, '_coerce') +def _ulong_coerce(self, space, w_item): + try: + return self._base_coerce(space, w_item) + except OperationError, e: + if not e.match(space, space.w_OverflowError): + raise + bigint = space.bigint_w(w_item) + try: + value = bigint.touint() + except OverflowError: + raise OperationError(space.w_OverflowError, space.w_None) + return self.box(value) class ULong(BaseType, Integer): T = rffi.ULONG BoxType = interp_boxes.W_ULongBox format_code = "L" - if LONG_BIT == 64: - _coerce = func_with_new_name(_uint64_coerce, '_coerce') + _coerce = func_with_new_name(_ulong_coerce, '_coerce') class Float(Primitive): _mixin_ = True diff --git a/pypy/module/pypyjit/test_pypy_c/test_weakref.py b/pypy/module/pypyjit/test_pypy_c/test_weakref.py new file mode 100644 --- /dev/null +++ b/pypy/module/pypyjit/test_pypy_c/test_weakref.py @@ -0,0 +1,46 @@ +from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC + + +class TestThread(BaseTestPyPyC): + def test_make_ref_with_callback(self): + log = self.run(""" + import weakref + + class Dummy(object): + pass + + def noop(obj): + pass + + def main(n): + obj = Dummy() + for i in xrange(n): + weakref.ref(obj, noop) + """, [500]) + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i58 = getfield_gc(p18, descr=) + i59 = getfield_gc(p18, descr=) + i60 = int_lt(i58, i59) + guard_true(i60, descr=...) + i61 = int_add(i58, 1) + p62 = getfield_gc(ConstPtr(ptr37), descr=) + setfield_gc(p18, i61, descr=) + guard_value(p62, ConstPtr(ptr39), descr=...) + guard_not_invalidated(descr=...) + p64 = getfield_gc(ConstPtr(ptr40), descr=) + guard_value(p64, ConstPtr(ptr42), descr=...) + p65 = getfield_gc(p14, descr=) + guard_value(p65, ConstPtr(ptr45), descr=...) + p66 = getfield_gc(p14, descr=) + guard_nonnull_class(p66, ..., descr=...) + p67 = force_token() + setfield_gc(p0, p67, descr=) + p68 = call_may_force(ConstClass(WeakrefLifelineWithCallbacks.make_weakref_with_callback), p66, ConstPtr(ptr50), p14, ConstPtr(ptr51), descr=) + guard_not_forced(descr=...) + guard_no_exception(descr=...) + guard_nonnull_class(p68, ..., descr=...) + guard_not_invalidated(descr=...) + --TICK-- + jump(..., descr=...) + """) diff --git a/pypy/module/test_lib_pypy/ctypes_tests/support.py b/pypy/module/test_lib_pypy/ctypes_tests/support.py --- a/pypy/module/test_lib_pypy/ctypes_tests/support.py +++ b/pypy/module/test_lib_pypy/ctypes_tests/support.py @@ -31,8 +31,10 @@ import gc for _ in range(4): gc.collect() - cls.old_num = _rawffi._num_of_allocated_objects() - + try: + cls.old_num = _rawffi._num_of_allocated_objects() + except RuntimeError: + pass def teardown_class(cls): if sys.pypy_translation_info['translation.gc'] == 'boehm': diff --git a/pypy/module/zipimport/interp_zipimport.py b/pypy/module/zipimport/interp_zipimport.py --- a/pypy/module/zipimport/interp_zipimport.py +++ b/pypy/module/zipimport/interp_zipimport.py @@ -5,8 +5,10 @@ from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.interpreter.module import Module from pypy.module.imp import importing +from pypy.module.zlib.interp_zlib import zlib_error from rpython.rlib.unroll import unrolling_iterable from rpython.rlib.rzipfile import RZipFile, BadZipfile +from rpython.rlib.rzlib import RZlibError import os import stat @@ -252,6 +254,10 @@ buf = self.zip_file.read(fname) except (KeyError, OSError, BadZipfile): pass + except RZlibError, e: + # in this case, CPython raises the direct exception coming + # from the zlib module: let's to the same + raise zlib_error(space, e.msg) else: if is_package: pkgpath = (self.filename + os.path.sep + @@ -282,6 +288,10 @@ return space.wrapbytes(data) except (KeyError, OSError, BadZipfile): raise OperationError(space.w_IOError, space.wrap("Error reading file")) + except RZlibError, e: + # in this case, CPython raises the direct exception coming + # from the zlib module: let's to the same + raise zlib_error(space, e.msg) @unwrap_spec(fullname='str0') def get_code(self, space, fullname): @@ -384,6 +394,11 @@ except (BadZipfile, OSError): raise operationerrfmt(get_error(space), "%s seems not to be a zipfile", filename) + except RZlibError, e: + # in this case, CPython raises the direct exception coming + # from the zlib module: let's to the same + raise zlib_error(space, e.msg) + prefix = name[len(filename):] if prefix.startswith(os.path.sep) or prefix.startswith(ZIPSEP): prefix = prefix[1:] diff --git a/pypy/module/zipimport/test/bad.zip b/pypy/module/zipimport/test/bad.zip new file mode 100644 index 0000000000000000000000000000000000000000..1d326a869a409a04dc46475bf157cb6f5bdb0664 GIT binary patch [cut] diff --git a/pypy/module/zipimport/test/test_zipimport_deflated.py b/pypy/module/zipimport/test/test_zipimport_deflated.py --- a/pypy/module/zipimport/test/test_zipimport_deflated.py +++ b/pypy/module/zipimport/test/test_zipimport_deflated.py @@ -3,6 +3,7 @@ from zipfile import ZIP_DEFLATED from pypy.module.zipimport.test.test_zipimport import AppTestZipimport as Base +BAD_ZIP = str(py.path.local(__file__).dirpath('bad.zip')) class AppTestZipimportDeflated(Base): compression = ZIP_DEFLATED @@ -16,3 +17,10 @@ except ImportError: py.test.skip("zlib not available, cannot test compressed zipfiles") cls.make_class() + cls.w_BAD_ZIP = cls.space.wrap(BAD_ZIP) + + def test_zlib_error(self): + import zipimport + import zlib + z = zipimport.zipimporter(self.BAD_ZIP) + raises(zlib.error, "z.load_module('mymod')") diff --git a/pypy/tool/gdb_pypy.py b/pypy/tool/gdb_pypy.py --- a/pypy/tool/gdb_pypy.py +++ b/pypy/tool/gdb_pypy.py @@ -38,9 +38,9 @@ if len(names) == 1: return val[names[0]] elif len(names) == 0: - raise KeyError, "cannot find field *%s" % suffix + raise KeyError("cannot find field *%s" % suffix) else: - raise KeyError, "too many matching fields: %s" % ', '.join(names) + raise KeyError("too many matching fields: %s" % ', '.join(names)) def lookup(val, suffix): """ @@ -76,10 +76,14 @@ def invoke(self, arg, from_tty): # some magic code to automatically reload the python file while developing from pypy.tool import gdb_pypy - reload(gdb_pypy) + try: + reload(gdb_pypy) + except: + import imp + imp.reload(gdb_pypy) gdb_pypy.RPyType.prog2typeids = self.prog2typeids # persist the cache self.__class__ = gdb_pypy.RPyType - print self.do_invoke(arg, from_tty) + print (self.do_invoke(arg, from_tty).decode('latin-1')) def do_invoke(self, arg, from_tty): try: @@ -88,7 +92,7 @@ obj = self.gdb.parse_and_eval(arg) hdr = lookup(obj, '_gcheader') tid = hdr['h_tid'] - if sys.maxint < 2**32: + if sys.maxsize < 2**32: offset = tid & 0xFFFF # 32bit else: offset = tid & 0xFFFFFFFF # 64bit @@ -147,13 +151,13 @@ if linenum in self.line2offset: return self.line2offset[linenum] line = self.lines[linenum] - member, descr = map(str.strip, line.split(None, 1)) - if sys.maxint < 2**32: + member, descr = [x.strip() for x in line.split(None, 1)] + if sys.maxsize < 2**32: TIDT = "int*" else: TIDT = "char*" expr = ("((%s)(&pypy_g_typeinfo.%s)) - (%s)&pypy_g_typeinfo" - % (TIDT, member, TIDT)) + % (TIDT, member.decode("latin-1"), TIDT)) offset = int(self.gdb.parse_and_eval(expr)) self.line2offset[linenum] = offset self.offset2descr[offset] = descr @@ -164,7 +168,7 @@ # binary search through the lines, asking gdb to parse stuff lazily if offset in self.offset2descr: return self.offset2descr[offset] - if not (0 < offset < sys.maxint): + if not (0 < offset < sys.maxsize): return None linerange = (0, len(self.lines)) while linerange[0] < linerange[1]: diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -363,6 +363,10 @@ # if we have specified strange inconsistent settings. config.translation.gc = config.translation.gc + # disallow asmgcc on OS/X + if config.translation.gcrootfinder == "asmgcc": + assert sys.platform != "darwin" + # ---------------------------------------------------------------- def set_platform(config): diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py --- a/rpython/jit/metainterp/heapcache.py +++ b/rpython/jit/metainterp/heapcache.py @@ -159,17 +159,17 @@ descr, ) elif cache is not None: - if argboxes[2] in self.new_boxes: - try: - idx_cache = cache[dststart + i] - except KeyError: - pass - else: + try: + idx_cache = cache[dststart + i] + except KeyError: + pass + else: + if argboxes[2] in self.new_boxes: for frombox in idx_cache.keys(): if not self.is_unescaped(frombox): del idx_cache[frombox] - else: - cache[dststart + i].clear() + else: + idx_cache.clear() return elif ( argboxes[2] in self.new_boxes and diff --git a/rpython/jit/metainterp/optimizeopt/earlyforce.py b/rpython/jit/metainterp/optimizeopt/earlyforce.py --- a/rpython/jit/metainterp/optimizeopt/earlyforce.py +++ b/rpython/jit/metainterp/optimizeopt/earlyforce.py @@ -28,8 +28,5 @@ value.force_box(self) self.emit_operation(op) - def new(self): - return OptEarlyForce() - def setup(self): self.optimizer.optearlyforce = self diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -184,9 +184,6 @@ self.postponed_op = None self.next_optimization.propagate_forward(postponed_op) - def new(self): - return OptHeap() - def produce_potential_short_preamble_ops(self, sb): descrkeys = self.cached_fields.keys() if not we_are_translated(): diff --git a/rpython/jit/metainterp/optimizeopt/intbounds.py b/rpython/jit/metainterp/optimizeopt/intbounds.py --- a/rpython/jit/metainterp/optimizeopt/intbounds.py +++ b/rpython/jit/metainterp/optimizeopt/intbounds.py @@ -13,9 +13,6 @@ """Keeps track of the bounds placed on integers by guards and remove redundant guards""" - def new(self): - return OptIntBounds() - def propagate_forward(self, op): dispatch_opt(self, op) diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -323,10 +323,6 @@ def force_at_end_of_preamble(self): pass - # It is too late to force stuff here, it must be done in force_at_end_of_preamble - def new(self): - raise NotImplementedError - # Called after last operation has been propagated to flush out any posponed ops def flush(self): pass @@ -390,16 +386,6 @@ for o in self.optimizations: o.flush() - def new(self): - new = Optimizer(self.metainterp_sd, self.loop) - return self._new(new) - - def _new(self, new): - optimizations = [o.new() for o in self.optimizations] - new.set_optimizations(optimizations) - new.quasi_immutable_deps = self.quasi_immutable_deps - return new - def produce_potential_short_preamble_ops(self, sb): for opt in self.optimizations: opt.produce_potential_short_preamble_ops(sb) diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -85,10 +85,6 @@ def flush(self): assert self.postponed_op is None - def new(self): - assert self.postponed_op is None - return OptPure() - def setup(self): self.optimizer.optpure = self diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -20,9 +20,6 @@ self.loop_invariant_results = {} self.loop_invariant_producer = {} - def new(self): - return OptRewrite() - def produce_potential_short_preamble_ops(self, sb): for op in self.loop_invariant_producer.values(): sb.add_potential(op) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5031,6 +5031,19 @@ """ self.optimize_strunicode_loop(ops, expected) + def test_copy_long_string_to_virtual(self): + ops = """ + [] + p0 = newstr(20) + copystrcontent(s"aaaaaaaaaaaaaaaaaaaa", p0, 0, 0, 20) + jump(p0) + """ + expected = """ + [] + jump(s"aaaaaaaaaaaaaaaaaaaa") + """ + self.optimize_strunicode_loop(ops, expected) + def test_ptr_eq_str_constant(self): ops = """ [] diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -41,10 +41,6 @@ self.emitted_guards += 1 # FIXME: can we use counter in self._emit_operation? self._emit_operation(op) - def new(self): - new = UnrollableOptimizer(self.metainterp_sd, self.loop) - return self._new(new) - class UnrollOptimizer(Optimization): """Unroll the loop into two iterations. The first one will diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -497,9 +497,6 @@ _last_guard_not_forced_2 = None - def new(self): - return OptVirtualize() - def make_virtual(self, known_class, box, source_op=None): vvalue = VirtualValue(self.optimizer.cpu, known_class, box, source_op) self.make_equal_to(box, vvalue) diff --git a/rpython/jit/metainterp/optimizeopt/vstring.py b/rpython/jit/metainterp/optimizeopt/vstring.py --- a/rpython/jit/metainterp/optimizeopt/vstring.py +++ b/rpython/jit/metainterp/optimizeopt/vstring.py @@ -388,8 +388,6 @@ class OptString(optimizer.Optimization): "Handling of strings and unicodes." - def new(self): - return OptString() def make_vstring_plain(self, box, source_op, mode): vvalue = VStringPlainValue(box, source_op, mode) @@ -520,7 +518,7 @@ elif ((src.is_virtual() or src.is_constant()) and srcstart.is_constant() and dststart.is_constant() and length.is_constant() and - (length.force_box(self).getint() < 20 or (src.is_virtual() and dst_virtual))): + (length.force_box(self).getint() < 20 or ((src.is_virtual() or src.is_constant()) and dst_virtual))): src_start = srcstart.force_box(self).getint() dst_start = dststart.force_box(self).getint() actual_length = length.force_box(self).getint() diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -864,7 +864,10 @@ self.next_major_collection_threshold = self.max_heap_size def raw_malloc_memory_pressure(self, sizehint): - self.next_major_collection_threshold -= sizehint + # Decrement by 'sizehint' plus a very little bit extra. This + # is needed e.g. for _rawffi, which may allocate a lot of tiny + # arrays. + self.next_major_collection_threshold -= (sizehint + 2 * WORD) if self.next_major_collection_threshold < 0: # cannot trigger a full collection now, but we can ensure # that one will occur very soon diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py --- a/rpython/rlib/runicode.py +++ b/rpython/rlib/runicode.py @@ -988,6 +988,8 @@ return result.build(), pos +# Specialize on the errorhandler when it's a constant + at specialize.arg_or_var(3) def unicode_encode_ucs1_helper(p, size, errors, errorhandler=None, limit=256): if errorhandler is None: @@ -1152,8 +1154,10 @@ builder.append(res) return pos +# Specialize on the errorhandler when it's a constant + at specialize.arg_or_var(4) def str_decode_unicode_escape(s, size, errors, final=False, - errorhandler=False, + errorhandler=None, unicodedata_handler=None): if errorhandler is None: errorhandler = default_unicode_error_decode diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -624,6 +624,7 @@ i += 1 return count + @signature(types.any(), types.any(), types.int(), types.int(), returns=types.int()) def ll_find(s1, s2, start, end): if start < 0: start = 0 @@ -638,6 +639,7 @@ return LLHelpers.ll_search(s1, s2, start, end, FAST_FIND) + @signature(types.any(), types.any(), types.int(), types.int(), returns=types.int()) def ll_rfind(s1, s2, start, end): if start < 0: start = 0 diff --git a/rpython/translator/c/gcc/test/test_trackgcroot.py b/rpython/translator/c/gcc/test/test_trackgcroot.py --- a/rpython/translator/c/gcc/test/test_trackgcroot.py +++ b/rpython/translator/c/gcc/test/test_trackgcroot.py @@ -127,6 +127,8 @@ def check_computegcmaptable(format, path): if format == 'msvc': r_globallabel = re.compile(r"([\w]+)::") + elif format == 'darwin' or format == 'darwin64': + py.test.skip("disabled on OS/X's terribly old gcc") else: r_globallabel = re.compile(r"([\w]+)=[.]+") print diff --git a/rpython/translator/c/gcc/trackgcroot.py b/rpython/translator/c/gcc/trackgcroot.py --- a/rpython/translator/c/gcc/trackgcroot.py +++ b/rpython/translator/c/gcc/trackgcroot.py @@ -31,7 +31,7 @@ cls.r_binaryinsn = re.compile(r"\t[a-z]\w*\s+(?P"+cls.OPERAND+"),\s*(?P"+cls.OPERAND+")\s*$") cls.r_jump = re.compile(r"\tj\w+\s+"+cls.LABEL+"\s*" + cls.COMMENT + "$") - cls.r_jmp_switch = re.compile(r"\tjmp\t[*]"+cls.LABEL+"[(]") + cls.r_jmp_switch = re.compile(r"\tjmp\t[*]") cls.r_jmp_source = re.compile(r"\d*[(](%[\w]+)[,)]") def __init__(self, funcname, lines, filetag=0): @@ -697,10 +697,22 @@ tablelabels = [] match = self.r_jmp_switch.match(line) if match: - # this is a jmp *Label(%index), used for table-based switches. - # Assume that the table is just a list of lines looking like - # .long LABEL or .long 0, ending in a .text or .section .text.hot. - tablelabels.append(match.group(1)) + # this is a jmp *Label(%index) or jmp *%addr, used for + # table-based switches. Assume that the table is coming + # after a .section .rodata and a label, and is a list of + # lines looking like .long LABEL or .long 0 or .long L2-L1, + # ending in a .text or .section .text.hot. + lineno = self.currentlineno + 1 + if '.section' not in self.lines[lineno]: + pass # bah, probably a tail-optimized indirect call... + else: + assert '.rodata' in self.lines[lineno] + lineno += 1 + while '.align' in self.lines[lineno]: + lineno += 1 + match = self.r_label.match(self.lines[lineno]) + assert match, repr(self.lines[lineno]) + tablelabels.append(match.group(1)) elif self.r_unaryinsn_star.match(line): # maybe a jmp similar to the above, but stored in a # registry: diff --git a/rpython/translator/unsimplify.py b/rpython/translator/unsimplify.py --- a/rpython/translator/unsimplify.py +++ b/rpython/translator/unsimplify.py @@ -132,11 +132,6 @@ block.exitswitch = None return link -def split_block_at_start(annotator, block): - # split before the first op, preserve order and inputargs - # in the second block! - return split_block(annotator, block, 0, _forcelink=block.inputargs) - def call_initial_function(translator, initial_func, annhelper=None): """Before the program starts, call 'initial_func()'.""" from rpython.annotator import model as annmodel From noreply at buildbot.pypy.org Wed Jan 15 00:24:10 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 15 Jan 2014 00:24:10 +0100 (CET) Subject: [pypy-commit] pypy py3k: adapt 1bf39957a7e8 from default Message-ID: <20140114232410.558091C0500@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68673:30065a062e2c Date: 2014-01-14 15:22 -0800 http://bitbucket.org/pypy/pypy/changeset/30065a062e2c/ Log: adapt 1bf39957a7e8 from default diff --git a/lib-python/3/test/test_ssl.py b/lib-python/3/test/test_ssl.py --- a/lib-python/3/test/test_ssl.py +++ b/lib-python/3/test/test_ssl.py @@ -1259,7 +1259,7 @@ try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED) - try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, True) + try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False) # SSLv23 client with specific SSL options diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -1,6 +1,7 @@ from __future__ import with_statement from rpython.rtyper.lltypesystem import rffi, lltype -from pypy.interpreter.error import OperationError, wrap_oserror +from pypy.interpreter.error import ( + OperationError, operationerrfmt, wrap_oserror) from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.interpreter.gateway import interp2app, unwrap_spec @@ -91,13 +92,26 @@ class SSLContext(W_Root): - def __init__(self, method): + def __init__(self, protocol): + if protocol == PY_SSL_VERSION_TLS1: + method = libssl_TLSv1_method() + elif protocol == PY_SSL_VERSION_SSL3: + method = libssl_SSLv3_method() + elif protocol == PY_SSL_VERSION_SSL2 and not OPENSSL_NO_SSL2: + method = libssl_SSLv2_method() + elif protocol == PY_SSL_VERSION_SSL23: + method = libssl_SSLv23_method() + else: + raise operationerrfmt(space.w_ValueError, + "invalid protocol version") self.ctx = libssl_SSL_CTX_new(method) # Defaults libssl_SSL_CTX_set_verify(self.ctx, SSL_VERIFY_NONE, None) - libssl_SSL_CTX_set_options( - self.ctx, SSL_OP_ALL & ~SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS) + options = SSL_OP_ALL & ~SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS + if protocol != PY_SSL_VERSION_SSL2: + options |= SSL_OP_NO_SSLv2 + libssl_SSL_CTX_set_options(self.ctx, options) libssl_SSL_CTX_set_session_id_context(self.ctx, "Python", len("Python")) def __del__(self): @@ -107,18 +121,7 @@ @unwrap_spec(protocol=int) def descr_new(space, w_subtype, protocol=PY_SSL_VERSION_SSL23): self = space.allocate_instance(SSLContext, w_subtype) - if protocol == PY_SSL_VERSION_TLS1: - method = libssl_TLSv1_method() - elif protocol == PY_SSL_VERSION_SSL3: - method = libssl_SSLv3_method() - elif protocol == PY_SSL_VERSION_SSL2 and not OPENSSL_NO_SSL2: - method = libssl_SSLv2_method() - elif protocol == PY_SSL_VERSION_SSL23: - method = libssl_SSLv23_method() - else: - raise OperationError( - space.w_ValueError, space.wrap("invalid protocol version")) - self.__init__(method) + self.__init__(protocol) if not self.ctx: raise ssl_error(space, "failed to allocate SSL context") return space.wrap(self) From noreply at buildbot.pypy.org Wed Jan 15 00:24:11 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 15 Jan 2014 00:24:11 +0100 (CET) Subject: [pypy-commit] pypy py3k: 2to3 Message-ID: <20140114232411.8D07E1C0500@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68674:70bdd46f5d7c Date: 2014-01-14 15:22 -0800 http://bitbucket.org/pypy/pypy/changeset/70bdd46f5d7c/ Log: 2to3 diff --git a/lib_pypy/audioop.py b/lib_pypy/audioop.py --- a/lib_pypy/audioop.py +++ b/lib_pypy/audioop.py @@ -22,8 +22,8 @@ if not (0 <= i < len(cp) / size): raise error("Index out of range") if size == 1: - return struct.unpack_from("B", buffer(cp)[i:])[0] + return struct.unpack_from("B", memoryview(cp)[i:])[0] elif size == 2: - return struct.unpack_from("H", buffer(cp)[i * 2:])[0] + return struct.unpack_from("H", memoryview(cp)[i * 2:])[0] elif size == 4: - return struct.unpack_from("I", buffer(cp)[i * 4:])[0] + return struct.unpack_from("I", memoryview(cp)[i * 4:])[0] From noreply at buildbot.pypy.org Wed Jan 15 03:12:34 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 15 Jan 2014 03:12:34 +0100 (CET) Subject: [pypy-commit] pypy py3k: fix for 30065a062e2c Message-ID: <20140115021234.A06A31D23B0@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68675:0f695956f1f6 Date: 2014-01-14 18:11 -0800 http://bitbucket.org/pypy/pypy/changeset/0f695956f1f6/ Log: fix for 30065a062e2c diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -92,7 +92,7 @@ class SSLContext(W_Root): - def __init__(self, protocol): + def __init__(self, space, protocol): if protocol == PY_SSL_VERSION_TLS1: method = libssl_TLSv1_method() elif protocol == PY_SSL_VERSION_SSL3: @@ -121,7 +121,7 @@ @unwrap_spec(protocol=int) def descr_new(space, w_subtype, protocol=PY_SSL_VERSION_SSL23): self = space.allocate_instance(SSLContext, w_subtype) - self.__init__(protocol) + self.__init__(space, protocol) if not self.ctx: raise ssl_error(space, "failed to allocate SSL context") return space.wrap(self) diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py --- a/pypy/module/_ssl/test/test_ssl.py +++ b/pypy/module/_ssl/test/test_ssl.py @@ -215,7 +215,7 @@ def test_options(self): import _ssl ctx = _ssl._SSLContext(_ssl.PROTOCOL_TLSv1) - assert _ssl.OP_ALL == ctx.options + assert _ssl.OP_ALL | _ssl.OP_NO_SSLv2 == ctx.options From noreply at buildbot.pypy.org Wed Jan 15 09:43:31 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 15 Jan 2014 09:43:31 +0100 (CET) Subject: [pypy-commit] pypy llvm-translation-backend: hg merge default Message-ID: <20140115084331.448B71C338D@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r68676:a6fecf78f85c Date: 2014-01-14 17:15 +0100 http://bitbucket.org/pypy/pypy/changeset/a6fecf78f85c/ Log: hg merge default diff --git a/lib_pypy/_ctypes/array.py b/lib_pypy/_ctypes/array.py --- a/lib_pypy/_ctypes/array.py +++ b/lib_pypy/_ctypes/array.py @@ -20,10 +20,13 @@ # we don't want to have buffers here if len(val) > self._length_: raise ValueError("%r too long" % (val,)) - for i in range(len(val)): - self[i] = val[i] + if isinstance(val, str): + _rawffi.rawstring2charp(self._buffer.buffer, val) + else: + for i in range(len(val)): + self[i] = val[i] if len(val) < self._length_: - self[len(val)] = '\x00' + self._buffer[len(val)] = '\x00' res.value = property(getvalue, setvalue) def getraw(self): @@ -33,8 +36,7 @@ def setraw(self, buffer): if len(buffer) > self._length_: raise ValueError("%r too long" % (buffer,)) - for i in range(len(buffer)): - self[i] = buffer[i] + _rawffi.rawstring2charp(self._buffer.buffer, buffer) res.raw = property(getraw, setraw) elif subletter == 'u': def getvalue(self): @@ -45,10 +47,14 @@ # we don't want to have buffers here if len(val) > self._length_: raise ValueError("%r too long" % (val,)) + if isinstance(val, unicode): + target = self._buffer + else: + target = self for i in range(len(val)): - self[i] = val[i] + target[i] = val[i] if len(val) < self._length_: - self[len(val)] = '\x00' + target[len(val)] = u'\x00' res.value = property(getvalue, setvalue) if '_length_' in typedict: diff --git a/pypy/module/_rawffi/__init__.py b/pypy/module/_rawffi/__init__.py --- a/pypy/module/_rawffi/__init__.py +++ b/pypy/module/_rawffi/__init__.py @@ -19,6 +19,7 @@ 'wcharp2unicode' : 'interp_rawffi.wcharp2unicode', 'charp2rawstring' : 'interp_rawffi.charp2rawstring', 'wcharp2rawunicode' : 'interp_rawffi.wcharp2rawunicode', + 'rawstring2charp' : 'interp_rawffi.rawstring2charp', 'CallbackPtr' : 'callback.W_CallbackPtr', '_num_of_allocated_objects' : 'tracker.num_of_allocated_objects', 'get_libc' : 'interp_rawffi.get_libc', diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -579,6 +579,13 @@ s = rffi.wcharpsize2unicode(rffi.cast(rffi.CWCHARP, address), maxlength) return space.wrap(s) + at unwrap_spec(address=r_uint, newcontent=str) +def rawstring2charp(space, address, newcontent): + from rpython.rtyper.annlowlevel import llstr + from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw + array = rffi.cast(rffi.CCHARP, address) + copy_string_to_raw(llstr(newcontent), array, 0, len(newcontent)) + if _MS_WINDOWS: @unwrap_spec(code=int) def FormatError(space, code): diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -323,6 +323,14 @@ assert res == u'xx' a.free() + def test_rawstring2charp(self): + import _rawffi + A = _rawffi.Array('c') + a = A(10, 'x'*10) + _rawffi.rawstring2charp(a.buffer, "foobar") + assert ''.join([a[i] for i in range(10)]) == "foobarxxxx" + a.free() + def test_raw_callable(self): import _rawffi lib = _rawffi.CDLL(self.lib_name) diff --git a/pypy/module/test_lib_pypy/ctypes_tests/support.py b/pypy/module/test_lib_pypy/ctypes_tests/support.py --- a/pypy/module/test_lib_pypy/ctypes_tests/support.py +++ b/pypy/module/test_lib_pypy/ctypes_tests/support.py @@ -31,8 +31,10 @@ import gc for _ in range(4): gc.collect() - cls.old_num = _rawffi._num_of_allocated_objects() - + try: + cls.old_num = _rawffi._num_of_allocated_objects() + except RuntimeError: + pass def teardown_class(cls): if sys.pypy_translation_info['translation.gc'] == 'boehm': diff --git a/pypy/tool/gdb_pypy.py b/pypy/tool/gdb_pypy.py --- a/pypy/tool/gdb_pypy.py +++ b/pypy/tool/gdb_pypy.py @@ -38,9 +38,9 @@ if len(names) == 1: return val[names[0]] elif len(names) == 0: - raise KeyError, "cannot find field *%s" % suffix + raise KeyError("cannot find field *%s" % suffix) else: - raise KeyError, "too many matching fields: %s" % ', '.join(names) + raise KeyError("too many matching fields: %s" % ', '.join(names)) def lookup(val, suffix): """ @@ -76,10 +76,14 @@ def invoke(self, arg, from_tty): # some magic code to automatically reload the python file while developing from pypy.tool import gdb_pypy - reload(gdb_pypy) + try: + reload(gdb_pypy) + except: + import imp + imp.reload(gdb_pypy) gdb_pypy.RPyType.prog2typeids = self.prog2typeids # persist the cache self.__class__ = gdb_pypy.RPyType - print self.do_invoke(arg, from_tty) + print (self.do_invoke(arg, from_tty).decode('latin-1')) def do_invoke(self, arg, from_tty): try: @@ -88,7 +92,7 @@ obj = self.gdb.parse_and_eval(arg) hdr = lookup(obj, '_gcheader') tid = hdr['h_tid'] - if sys.maxint < 2**32: + if sys.maxsize < 2**32: offset = tid & 0xFFFF # 32bit else: offset = tid & 0xFFFFFFFF # 64bit @@ -147,13 +151,13 @@ if linenum in self.line2offset: return self.line2offset[linenum] line = self.lines[linenum] - member, descr = map(str.strip, line.split(None, 1)) - if sys.maxint < 2**32: + member, descr = [x.strip() for x in line.split(None, 1)] + if sys.maxsize < 2**32: TIDT = "int*" else: TIDT = "char*" expr = ("((%s)(&pypy_g_typeinfo.%s)) - (%s)&pypy_g_typeinfo" - % (TIDT, member, TIDT)) + % (TIDT, member.decode("latin-1"), TIDT)) offset = int(self.gdb.parse_and_eval(expr)) self.line2offset[linenum] = offset self.offset2descr[offset] = descr @@ -164,7 +168,7 @@ # binary search through the lines, asking gdb to parse stuff lazily if offset in self.offset2descr: return self.offset2descr[offset] - if not (0 < offset < sys.maxint): + if not (0 < offset < sys.maxsize): return None linerange = (0, len(self.lines)) while linerange[0] < linerange[1]: diff --git a/rpython/jit/metainterp/optimizeopt/earlyforce.py b/rpython/jit/metainterp/optimizeopt/earlyforce.py --- a/rpython/jit/metainterp/optimizeopt/earlyforce.py +++ b/rpython/jit/metainterp/optimizeopt/earlyforce.py @@ -28,8 +28,5 @@ value.force_box(self) self.emit_operation(op) - def new(self): - return OptEarlyForce() - def setup(self): self.optimizer.optearlyforce = self diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -184,9 +184,6 @@ self.postponed_op = None self.next_optimization.propagate_forward(postponed_op) - def new(self): - return OptHeap() - def produce_potential_short_preamble_ops(self, sb): descrkeys = self.cached_fields.keys() if not we_are_translated(): diff --git a/rpython/jit/metainterp/optimizeopt/intbounds.py b/rpython/jit/metainterp/optimizeopt/intbounds.py --- a/rpython/jit/metainterp/optimizeopt/intbounds.py +++ b/rpython/jit/metainterp/optimizeopt/intbounds.py @@ -13,9 +13,6 @@ """Keeps track of the bounds placed on integers by guards and remove redundant guards""" - def new(self): - return OptIntBounds() - def propagate_forward(self, op): dispatch_opt(self, op) diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -323,10 +323,6 @@ def force_at_end_of_preamble(self): pass - # It is too late to force stuff here, it must be done in force_at_end_of_preamble - def new(self): - raise NotImplementedError - # Called after last operation has been propagated to flush out any posponed ops def flush(self): pass @@ -390,16 +386,6 @@ for o in self.optimizations: o.flush() - def new(self): - new = Optimizer(self.metainterp_sd, self.loop) - return self._new(new) - - def _new(self, new): - optimizations = [o.new() for o in self.optimizations] - new.set_optimizations(optimizations) - new.quasi_immutable_deps = self.quasi_immutable_deps - return new - def produce_potential_short_preamble_ops(self, sb): for opt in self.optimizations: opt.produce_potential_short_preamble_ops(sb) diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -85,10 +85,6 @@ def flush(self): assert self.postponed_op is None - def new(self): - assert self.postponed_op is None - return OptPure() - def setup(self): self.optimizer.optpure = self diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -20,9 +20,6 @@ self.loop_invariant_results = {} self.loop_invariant_producer = {} - def new(self): - return OptRewrite() - def produce_potential_short_preamble_ops(self, sb): for op in self.loop_invariant_producer.values(): sb.add_potential(op) diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -41,10 +41,6 @@ self.emitted_guards += 1 # FIXME: can we use counter in self._emit_operation? self._emit_operation(op) - def new(self): - new = UnrollableOptimizer(self.metainterp_sd, self.loop) - return self._new(new) - class UnrollOptimizer(Optimization): """Unroll the loop into two iterations. The first one will diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -497,9 +497,6 @@ _last_guard_not_forced_2 = None - def new(self): - return OptVirtualize() - def make_virtual(self, known_class, box, source_op=None): vvalue = VirtualValue(self.optimizer.cpu, known_class, box, source_op) self.make_equal_to(box, vvalue) diff --git a/rpython/jit/metainterp/optimizeopt/vstring.py b/rpython/jit/metainterp/optimizeopt/vstring.py --- a/rpython/jit/metainterp/optimizeopt/vstring.py +++ b/rpython/jit/metainterp/optimizeopt/vstring.py @@ -388,8 +388,6 @@ class OptString(optimizer.Optimization): "Handling of strings and unicodes." - def new(self): - return OptString() def make_vstring_plain(self, box, source_op, mode): vvalue = VStringPlainValue(box, source_op, mode) diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -624,6 +624,7 @@ i += 1 return count + @signature(types.any(), types.any(), types.int(), types.int(), returns=types.int()) def ll_find(s1, s2, start, end): if start < 0: start = 0 @@ -638,6 +639,7 @@ return LLHelpers.ll_search(s1, s2, start, end, FAST_FIND) + @signature(types.any(), types.any(), types.int(), types.int(), returns=types.int()) def ll_rfind(s1, s2, start, end): if start < 0: start = 0 diff --git a/rpython/translator/unsimplify.py b/rpython/translator/unsimplify.py --- a/rpython/translator/unsimplify.py +++ b/rpython/translator/unsimplify.py @@ -132,11 +132,6 @@ block.exitswitch = None return link -def split_block_at_start(annotator, block): - # split before the first op, preserve order and inputargs - # in the second block! - return split_block(annotator, block, 0, _forcelink=block.inputargs) - def remove_double_links(annotator, graph): """This can be useful for code generators: it ensures that no block has more than one incoming links from one and the same other block. It allows From noreply at buildbot.pypy.org Wed Jan 15 10:02:44 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 15 Jan 2014 10:02:44 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: planning for today Message-ID: <20140115090244.5D88D1C338D@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: extradoc Changeset: r5131:70dc63c3ff08 Date: 2014-01-15 10:02 +0100 http://bitbucket.org/pypy/extradoc/changeset/70dc63c3ff08/ Log: planning for today diff --git a/sprintinfo/leysin-winter-2014/planning.txt b/sprintinfo/leysin-winter-2014/planning.txt --- a/sprintinfo/leysin-winter-2014/planning.txt +++ b/sprintinfo/leysin-winter-2014/planning.txt @@ -8,16 +8,16 @@ Romain Guillebert Armin Rigo Manuel Jacob - +Antonio Cuni Topics ------ -* numpy stuff, fix bugs from bug tracker (rguillebert, ?) +* numpy stuff, fix bugs from bug tracker (rguillebert, antocuni around) * look at codespeed2 -* resume-refactor branch (rguillebert, fijal) (PROGRESS) +* resume-refactor branch (MORE PROGRESS) * GC pinning @@ -27,14 +27,18 @@ * CFFI 1.0 -* STM (remi, armin) SOME PROGRESS in transaction breaks +* STM (remi, armin) DONE in transaction breaks, started c7 * discuss about C++ / cppyy, look into importing pyshiboken (johan pessimistic, ?) -* ctypes: https://bugs.pypy.org/issue1671 +* try cppyy to run on windows (johan) IN PROGRESS + +* ctypes: https://bugs.pypy.org/issue1671 DONE * longs multiplication: patch at https://bugs.pypy.org/issue892 -* look into merging refactor-str-types (johan, mjacob) +* look into merging refactor-str-types (johan, mjacob, antocuni) FIX TRANSLATION -* tweaking ast classes: https://bugs.pypy.org/issue1673 +* tweaking ast classes: https://bugs.pypy.org/issue1673 (mjacob) + +* skiing (fijal) From noreply at buildbot.pypy.org Wed Jan 15 10:22:53 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 15 Jan 2014 10:22:53 +0100 (CET) Subject: [pypy-commit] stmgc c7: tests Message-ID: <20140115092253.960E51C0100@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r611:8a471acbcc36 Date: 2014-01-15 10:22 +0100 http://bitbucket.org/pypy/stmgc/changeset/8a471acbcc36/ Log: tests diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -88,25 +88,30 @@ return lib._stm_is_in_nursery(ptr) def stm_allocate_old(size): - return lib._stm_real_address(lib._stm_allocate_old(size)) + o = lib._stm_allocate_old(size) + return o, lib._stm_real_address(o) def stm_allocate(size): - return lib._stm_real_address(lib.stm_allocate(size)) + o = lib.stm_allocate(size) + return o, lib._stm_real_address(o) +def stm_get_real_address(obj): + return lib._stm_real_address(ffi.cast('object_t*', obj)) + def stm_get_tl_address(ptr): return int(ffi.cast('uintptr_t', lib._stm_tl_address(ptr))) -def stm_read(ptr): - lib.stm_read(lib._stm_tl_address(ptr)) +def stm_read(o): + lib.stm_read(o) -def stm_write(ptr): - lib.stm_write(lib._stm_tl_address(ptr)) +def stm_write(o): + lib.stm_write(o) -def stm_was_read(ptr): - return lib._stm_was_read(lib._stm_tl_address(ptr)) +def stm_was_read(o): + return lib._stm_was_read(o) -def stm_was_written(ptr): - return lib._stm_was_written(lib._stm_tl_address(ptr)) +def stm_was_written(o): + return lib._stm_was_written(o) def stm_start_transaction(): lib.stm_start_transaction(ffi.cast("jmpbufptr_t*", -1)) diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -7,20 +7,20 @@ pass def test_thread_local_allocations(self): - p1 = stm_allocate(16) - p2 = stm_allocate(16) + lp1, p1 = stm_allocate(16) + lp2, p2 = stm_allocate(16) assert is_in_nursery(p1) assert is_in_nursery(p2) assert p2 - p1 == 16 - p3 = stm_allocate(16) + lp3, p3 = stm_allocate(16) assert p3 - p2 == 16 # self.switch(1) - p1s = stm_allocate(16) + lp1s, p1s = stm_allocate(16) assert abs(p1s - p3) >= 4000 # self.switch(0) - p4 = stm_allocate(16) + lp4, p4 = stm_allocate(16) assert p4 - p3 == 16 def test_transaction_start_stop(self): @@ -33,26 +33,33 @@ def test_simple_read(self): stm_start_transaction() - p1 = stm_allocate(16) - stm_read(p1) - assert stm_was_read(p1) + lp1, _ = stm_allocate(16) + stm_read(lp1) + assert stm_was_read(lp1) def test_simple_write(self): stm_start_transaction() - p1 = stm_allocate(16) - assert stm_was_written(p1) - stm_write(p1) - assert stm_was_written(p1) + lp1, _ = stm_allocate(16) + assert stm_was_written(lp1) + stm_write(lp1) + assert stm_was_written(lp1) + def test_allocate_old(self): + lp1, _ = stm_allocate_old(16) + self.switch(1) + lp2, _ = stm_allocate_old(16) + assert lp1 != lp2 + def test_write_on_old(self): - p1 = stm_allocate_old(16) - p1tl = stm_get_tl_address(p1) + lp1, p1 = stm_allocate_old(16) + stm_start_transaction() + stm_write(lp1) + p1[15] = 'a' self.switch(1) - p2 = stm_allocate_old(16) - p2tl = stm_get_tl_address(p2) - assert p1tl != p2tl - - + stm_start_transaction() + stm_read(lp1) + tp1 = stm_get_real_address(lp1) + assert tp1[15] == '\0' From noreply at buildbot.pypy.org Wed Jan 15 11:56:26 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 15 Jan 2014 11:56:26 +0100 (CET) Subject: [pypy-commit] stmgc c7: try introducing safe-points Message-ID: <20140115105626.E292C1C338D@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r612:53b7a774625c Date: 2014-01-15 11:56 +0100 http://bitbucket.org/pypy/stmgc/changeset/53b7a774625c/ Log: try introducing safe-points diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -8,6 +8,7 @@ #include #include #include +#include #include "core.h" #include "list.h" @@ -49,6 +50,7 @@ struct _thread_local1_s _tl1; int thread_num; bool running_transaction; + bool need_abort; char *thread_base; struct stm_list_s *modified_objects; struct stm_list_s *new_object_ranges; @@ -69,6 +71,7 @@ /************************************************************/ uintptr_t _stm_reserve_page(void); +void stm_abort_transaction(void); static void spin_loop(void) { @@ -115,6 +118,59 @@ #endif } +/************************************************************/ + +/* a multi-reader, single-writer lock: transactions normally take a reader + lock, so don't conflict with each other; when we need to do a global GC, + we take a writer lock to "stop the world". Note the initializer here, + which should give the correct priority for stm_possible_safe_point(). */ +static pthread_rwlock_t rwlock_shared = + PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP; + +struct tx_descriptor *in_single_thread = NULL; + +void stm_start_sharedlock(void) +{ + int err = pthread_rwlock_rdlock(&rwlock_shared); + if (err != 0) + abort(); +} + +void stm_stop_sharedlock(void) +{ + int err = pthread_rwlock_unlock(&rwlock_shared); + if (err != 0) + abort(); +} + +static void start_exclusivelock(void) +{ + int err = pthread_rwlock_wrlock(&rwlock_shared); + if (err != 0) + abort(); +} + +static void stop_exclusivelock(void) +{ + int err = pthread_rwlock_unlock(&rwlock_shared); + if (err != 0) + abort(); +} + +void _stm_start_safe_point(void) +{ + stm_stop_sharedlock(); +} + +void _stm_stop_safe_point(void) +{ + stm_start_sharedlock(); + if (_STM_TL2->need_abort) + stm_abort_transaction(); +} + + + bool _stm_was_read(object_t *obj) { read_marker_t *marker = (read_marker_t *)(((uintptr_t)obj) >> 4); @@ -227,9 +283,8 @@ return (object_t*)res; } -void stm_abort_transaction(void); -enum detect_conflicts_e { CANNOT_CONFLICT, CAN_CONFLICT }; +enum detect_conflicts_e { CANNOT_CONFLICT, CAN_CONFLICT, CHECK_CONFLICT }; static void update_to_current_version(enum detect_conflicts_e check_conflict) { @@ -265,8 +320,12 @@ write_fence(); pending_updates = NULL; - if (conflict_found_or_dont_check && check_conflict == CAN_CONFLICT) { - stm_abort_transaction(); + if (conflict_found_or_dont_check) { + if (check_conflict == CAN_CONFLICT) { + stm_abort_transaction(); + } else { /* CHECK_CONFLICT */ + _STM_TL2->need_abort = 1; + } } } @@ -532,6 +591,8 @@ { assert(!_STM_TL2->running_transaction); + stm_start_sharedlock(); + uint8_t old_rv = _STM_TL1->transaction_read_version; _STM_TL1->transaction_read_version = old_rv + 1; if (UNLIKELY(old_rv == 0xff)) @@ -557,6 +618,7 @@ _STM_TL1->jmpbufptr = jmpbufptr; _STM_TL2->running_transaction = 1; + _STM_TL2->need_abort = 0; } #if 0 @@ -580,100 +642,87 @@ void stm_stop_transaction(void) { assert(_STM_TL2->running_transaction); -#if 0 + stm_stop_sharedlock(); + start_exclusivelock(); - write_fence(); /* see later in this function for why */ + _STM_TL1->jmpbufptr = NULL; /* cannot abort any more */ + + /* copy modified object versions to other threads */ + pending_updates = _STM_TL2->modified_objects; + int my_thread_num = _STM_TL2->thread_num; + int other_thread_num = 1 - my_thread_num; + _stm_restore_local_state(other_thread_num); + update_to_current_version(CHECK_CONFLICT); /* sets need_abort */ + _stm_restore_local_state(my_thread_num); + + + /* /\* walk the new_object_ranges and manually copy the new objects */ + /* to the other thread's pages in the (hopefully rare) case that */ + /* the page they belong to is already unshared *\/ */ + /* long i; */ + /* struct stm_list_s *lst = _STM_TL2->new_object_ranges; */ + /* for (i = stm_list_count(lst); i > 0; ) { */ + /* i -= 2; */ + /* uintptr_t pagenum = (uintptr_t)stm_list_item(lst, i); */ + /* /\* NB. the read next line should work even against a parallel */ + /* thread, thanks to the lock acquisition we do earlier (see the */ + /* beginning of this function). Indeed, if this read returns */ + /* SHARED_PAGE, then we know that the real value in memory was */ + /* actually SHARED_PAGE at least at the time of the */ + /* acquire_lock(). It may have been modified afterwards by a */ + /* compare_and_swap() in the other thread, but then we know for */ + /* sure that the other thread is seeing the last, up-to-date */ + /* version of our data --- this is the reason of the */ + /* write_fence() just before the acquire_lock(). */ + /* *\/ */ + /* if (flag_page_private[pagenum] != SHARED_PAGE) { */ + /* object_t *range = stm_list_item(lst, i + 1); */ + /* uint16_t start, stop; */ + /* FROM_RANGE(start, stop, range); */ + /* update_new_objects_in_other_threads(pagenum, start, stop); */ + /* } */ + /* } */ + + /* /\* do the same for the partially-allocated pages *\/ */ + /* long j; */ + /* for (j = 2; j < LARGE_OBJECT_WORDS; j++) { */ + /* alloc_for_size_t *alloc = &_STM_TL2->alloc[j]; */ + /* uint16_t start = alloc->start; */ + /* uint16_t cur = (uintptr_t)alloc->next; */ - if (leader_thread_num != _STM_TL2->thread_num) { - /* non-leader thread */ - if (global_history != NULL) { - update_to_current_version(CAN_CONFLICT); - assert(global_history == NULL); - } + /* if (start == cur) { */ + /* /\* nothing to do: this page (or fraction thereof) was left */ + /* empty by the previous transaction, and starts empty as */ + /* well in the new transaction. 'flag_partial_page' is */ + /* unchanged. *\/ */ + /* } */ + /* else { */ + /* uintptr_t pagenum = ((uintptr_t)(alloc->next - 1)) / 4096UL; */ + /* /\* for the new transaction, it will start here: *\/ */ + /* alloc->start = cur; */ - /* steal leadership now */ - leader_thread_num = _STM_TL2->thread_num; - } + /* if (alloc->flag_partial_page) { */ + /* if (flag_page_private[pagenum] != SHARED_PAGE) { */ + /* update_new_objects_in_other_threads(pagenum, start, cur); */ + /* } */ + /* } */ + /* else { */ + /* /\* we can skip checking flag_page_private[] in non-debug */ + /* builds, because the whole page can only contain */ + /* objects made by the just-finished transaction. *\/ */ + /* assert(flag_page_private[pagenum] == SHARED_PAGE); */ - /* now we are the leader thread. the leader can always commit */ - _STM_TL1->jmpbufptr = NULL; /* cannot abort any more */ - undo_log_current = undo_log_pages; /* throw away the content */ + /* /\* the next transaction will start with this page */ + /* containing objects that are now committed, so */ + /* we need to set this flag now *\/ */ + /* alloc->flag_partial_page = true; */ + /* } */ + /* } */ + /* } */ - /* add these objects to the global_history */ - _STM_TL2->modified_objects->nextlist = global_history; - global_history = _STM_TL2->modified_objects; - _STM_TL2->modified_objects = stm_list_create(); - - uint16_t wv = _STM_TL1->transaction_write_version; - if (gh_write_version_first < wv) gh_write_version_first = wv; - - /* walk the new_object_ranges and manually copy the new objects - to the other thread's pages in the (hopefully rare) case that - the page they belong to is already unshared */ - long i; - struct stm_list_s *lst = _STM_TL2->new_object_ranges; - for (i = stm_list_count(lst); i > 0; ) { - i -= 2; - uintptr_t pagenum = (uintptr_t)stm_list_item(lst, i); - - /* NB. the read next line should work even against a parallel - thread, thanks to the lock acquisition we do earlier (see the - beginning of this function). Indeed, if this read returns - SHARED_PAGE, then we know that the real value in memory was - actually SHARED_PAGE at least at the time of the - acquire_lock(). It may have been modified afterwards by a - compare_and_swap() in the other thread, but then we know for - sure that the other thread is seeing the last, up-to-date - version of our data --- this is the reason of the - write_fence() just before the acquire_lock(). - */ - if (flag_page_private[pagenum] != SHARED_PAGE) { - object_t *range = stm_list_item(lst, i + 1); - uint16_t start, stop; - FROM_RANGE(start, stop, range); - update_new_objects_in_other_threads(pagenum, start, stop); - } - } - - /* do the same for the partially-allocated pages */ - long j; - for (j = 2; j < LARGE_OBJECT_WORDS; j++) { - alloc_for_size_t *alloc = &_STM_TL2->alloc[j]; - uint16_t start = alloc->start; - uint16_t cur = (uintptr_t)alloc->next; - - if (start == cur) { - /* nothing to do: this page (or fraction thereof) was left - empty by the previous transaction, and starts empty as - well in the new transaction. 'flag_partial_page' is - unchanged. */ - } - else { - uintptr_t pagenum = ((uintptr_t)(alloc->next - 1)) / 4096UL; - /* for the new transaction, it will start here: */ - alloc->start = cur; - - if (alloc->flag_partial_page) { - if (flag_page_private[pagenum] != SHARED_PAGE) { - update_new_objects_in_other_threads(pagenum, start, cur); - } - } - else { - /* we can skip checking flag_page_private[] in non-debug - builds, because the whole page can only contain - objects made by the just-finished transaction. */ - assert(flag_page_private[pagenum] == SHARED_PAGE); - - /* the next transaction will start with this page - containing objects that are now committed, so - we need to set this flag now */ - alloc->flag_partial_page = true; - } - } - } -#endif _STM_TL2->running_transaction = 0; + stop_exclusivelock(); } void stm_abort_transaction(void) @@ -686,10 +735,11 @@ uint16_t num_allocated = ((uintptr_t)alloc->next) - alloc->start; alloc->next -= num_allocated; } - stm_list_clear(_STM_TL2->new_object_ranges); + /* stm_list_clear(_STM_TL2->new_object_ranges); */ stm_list_clear(_STM_TL2->modified_objects); assert(_STM_TL1->jmpbufptr != NULL); assert(_STM_TL1->jmpbufptr != (jmpbufptr_t *)-1); /* for tests only */ _STM_TL2->running_transaction = 0; + stm_stop_sharedlock(); __builtin_longjmp(*_STM_TL1->jmpbufptr, 1); } diff --git a/c7/core.h b/c7/core.h --- a/c7/core.h +++ b/c7/core.h @@ -98,4 +98,10 @@ bool _stm_is_in_nursery(char *ptr); object_t *_stm_allocate_old(size_t size); + +void _stm_start_safe_point(void); +void _stm_stop_safe_point(void); #endif + + + diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -50,6 +50,10 @@ bool _stm_is_in_nursery(char *ptr); object_t *_stm_allocate_old(size_t size); +void _stm_start_safe_point(void); +void _stm_stop_safe_point(void); +bool _stm_check_stop_safe_point(void); + void *memset(void *s, int c, size_t n); """) @@ -65,7 +69,7 @@ bool _stm_stop_transaction(void) { jmpbufptr_t here; - if (__builtin_setjmp(here) == 0) { + if (__builtin_setjmp(here) == 0) { // returned directly assert(_STM_TL1->jmpbufptr == (jmpbufptr_t*)-1); _STM_TL1->jmpbufptr = &here; stm_stop_transaction(); @@ -76,6 +80,20 @@ return 1; } +bool _stm_check_stop_safe_point(void) { + jmpbufptr_t here; + if (__builtin_setjmp(here) == 0) { // returned directly + assert(_STM_TL1->jmpbufptr == (jmpbufptr_t*)-1); + _STM_TL1->jmpbufptr = &here; + _stm_stop_safe_point(); + _STM_TL1->jmpbufptr = (jmpbufptr_t*)-1; + return 0; + } + _STM_TL1->jmpbufptr = (jmpbufptr_t*)-1; + return 1; +} + + ''', sources=source_files, define_macros=[('STM_TESTS', '1')], undef_macros=['NDEBUG'], @@ -123,6 +141,16 @@ else: assert res == 0 +def stm_start_safe_point(): + lib._stm_start_safe_point() + +def stm_stop_safe_point(expected_conflict=False): + res = lib._stm_check_stop_safe_point() + if expected_conflict: + assert res == 1 + else: + assert res == 0 + class BaseTest(object): diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -25,10 +25,12 @@ def test_transaction_start_stop(self): stm_start_transaction() + stm_start_safe_point() self.switch(1) stm_start_transaction() stm_stop_transaction() self.switch(0) + stm_stop_safe_point() stm_stop_transaction() def test_simple_read(self): @@ -54,52 +56,85 @@ lp1, p1 = stm_allocate_old(16) stm_start_transaction() stm_write(lp1) + assert stm_was_written(lp1) p1[15] = 'a' self.switch(1) stm_start_transaction() stm_read(lp1) + assert stm_was_read(lp1) tp1 = stm_get_real_address(lp1) assert tp1[15] == '\0' + def test_read_write_1(self): + lp1, p1 = stm_allocate_old(16) + p1[8] = 'a' + stm_start_transaction() + stm_stop_transaction() + # + self.switch(1) + stm_start_transaction() + stm_write(lp1) + p1 = stm_get_real_address(lp1) + assert p1[8] == 'a' + p1[8] = 'b' + stm_start_safe_point() + # + self.switch(0) + stm_start_transaction() + stm_read(lp1) + p1 = stm_get_real_address(lp1) + assert p1[8] == 'a' + stm_start_safe_point() + # + self.switch(1) + stm_stop_safe_point() + stm_stop_transaction(False) + # + self.switch(0) + stm_stop_safe_point(True) # detects rw conflict - - def test_read_write_1(self): + + def test_read_write_2(self): stm_start_transaction() - p1 = stm_allocate(16) + lp1, p1 = stm_allocate(16) p1[8] = 'a' stm_stop_transaction(False) # - self.switch("sub1") + self.switch(1) stm_start_transaction() - stm_write(p1) + stm_write(lp1) + p1 = stm_get_real_address(lp1) assert p1[8] == 'a' p1[8] = 'b' # - self.switch("main") + self.switch(0) stm_start_transaction() - stm_read(p1) + stm_read(lp1) + p1 = stm_get_real_address(lp1) assert p1[8] == 'a' # - self.switch("sub1") + self.switch(1) stm_stop_transaction(False) # - self.switch("main") + self.switch(0) + p1 = stm_get_real_address(lp1) assert p1[8] == 'a' + def test_start_transaction_updates(self): stm_start_transaction() p1 = stm_allocate(16) p1[8] = 'a' stm_stop_transaction(False) # - self.switch("sub1") + self.switch(1) stm_start_transaction() stm_write(p1) assert p1[8] == 'a' p1[8] = 'b' stm_stop_transaction(False) # - self.switch("main") + self.switch(0) assert p1[8] == 'a' stm_start_transaction() assert p1[8] == 'b' @@ -107,11 +142,11 @@ def test_resolve_no_conflict_empty(self): stm_start_transaction() # - self.switch("sub1") + self.switch(1) stm_start_transaction() stm_stop_transaction(False) # - self.switch("main") + self.switch(0) stm_stop_transaction(False) def test_resolve_no_conflict_write_only_in_already_committed(self): @@ -121,13 +156,13 @@ stm_stop_transaction(False) stm_start_transaction() # - self.switch("sub1") + self.switch(1) stm_start_transaction() stm_write(p1) p1[8] = 'b' stm_stop_transaction(False) # - self.switch("main") + self.switch(0) assert p1[8] == 'a' stm_stop_transaction(False) assert p1[8] == 'b' @@ -139,13 +174,13 @@ stm_stop_transaction(False) stm_start_transaction() # - self.switch("sub1") + self.switch(1) stm_start_transaction() stm_write(p1) p1[8] = 'b' stm_stop_transaction(False) # - self.switch("main") + self.switch(0) stm_read(p1) assert p1[8] == 'a' stm_stop_transaction(expected_conflict=True) @@ -160,13 +195,13 @@ stm_stop_transaction(False) stm_start_transaction() # - self.switch("sub1") + self.switch(1) stm_start_transaction() stm_write(p1) p1[8] = 'b' stm_stop_transaction(False) # - self.switch("main") + self.switch(0) assert p1[8] == 'a' stm_write(p1) p1[8] = 'c' @@ -184,13 +219,13 @@ stm_stop_transaction(False) stm_start_transaction() # - self.switch("sub1") + self.switch(1) stm_start_transaction() stm_write(p1) p1[8] = 'b' stm_stop_transaction(False) # - self.switch("main") + self.switch(0) stm_write(p2) p2[8] = 'C' stm_stop_transaction(False) @@ -206,14 +241,14 @@ stm_stop_transaction(False) stm_start_transaction() # - self.switch("sub1") + self.switch(1) stm_start_transaction() stm_write(p1) assert p1[8] == 'A' p1[8] = 'B' stm_stop_transaction(False) # - self.switch("main") + self.switch(0) stm_read(p2) assert p2[8] == 'a' p3 = stm_allocate(16) # goes into the same page, which is @@ -233,14 +268,14 @@ stm_stop_transaction(False) stm_start_transaction() # - self.switch("sub1") + self.switch(1) stm_start_transaction() stm_write(p1) assert p1[8] == 'A' p1[8] = 'B' stm_stop_transaction(False) # - self.switch("main") + self.switch(0) stm_write(p2) assert p2[8] == 'a' p2[8] = 'b' @@ -261,14 +296,14 @@ stm_stop_transaction(False) stm_start_transaction() # - self.switch("sub1") + self.switch(1) stm_start_transaction() stm_write(p1) assert p1[8] == 'A' p1[8] = 'B' stm_stop_transaction(False) # - self.switch("main") + self.switch(0) p3 = stm_allocate(16) # goes into the same page, which I will p3[8] = ':' # modify just below stm_write(p2) From noreply at buildbot.pypy.org Wed Jan 15 13:46:30 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 15 Jan 2014 13:46:30 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c4: remove the forcing of virtualizables when we remove the stm_transaction_break Message-ID: <20140115124630.79D7F1C051C@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r68677:7a522e1c61ab Date: 2014-01-15 13:45 +0100 http://bitbucket.org/pypy/pypy/changeset/7a522e1c61ab/ Log: remove the forcing of virtualizables when we remove the stm_transaction_break diff --git a/rpython/jit/metainterp/optimizeopt/stm.py b/rpython/jit/metainterp/optimizeopt/stm.py --- a/rpython/jit/metainterp/optimizeopt/stm.py +++ b/rpython/jit/metainterp/optimizeopt/stm.py @@ -1,10 +1,5 @@ -from rpython.jit.metainterp.history import (Const, ConstInt, BoxInt, BoxFloat, - BoxPtr, make_hashable_int) -from rpython.jit.metainterp.optimizeopt.optimizer import (Optimization, REMOVED, - CONST_0, CONST_1) +from rpython.jit.metainterp.optimizeopt.optimizer import (Optimization, ) from rpython.jit.metainterp.optimizeopt.util import make_dispatcher_method -from rpython.jit.metainterp.resoperation import (opboolinvers, opboolreflex, rop, - ResOperation) from rpython.jit.codewriter.effectinfo import EffectInfo class OptSTM(Optimization): @@ -15,36 +10,54 @@ def __init__(self): self.remove_next_gnf = False # guard_not_forced self.keep_but_ignore_gnf = False + self.cached_ops = [] def propagate_forward(self, op): dispatch_opt(self, op) + def flush_cached(self): + while self.cached_ops: + self.emit_operation(self.cached_ops.pop(0)) + + def default_emit(self, op): + self.flush_cached() + self.emit_operation(op) + def _break_wanted(self): is_loop = self.optimizer.loop.is_really_loop return self.optimizer.stm_info.get('break_wanted', is_loop) def _set_break_wanted(self, val): self.optimizer.stm_info['break_wanted'] = val + + def optimize_FORCE_TOKEN(self, op): + self.cached_ops.append(op) + + def optimize_SETFIELD_GC(self, op): + self.cached_ops.append(op) def optimize_CALL(self, op): + self.flush_cached() effectinfo = op.getdescr().get_extra_info() oopspecindex = effectinfo.oopspecindex if oopspecindex == EffectInfo.OS_JIT_STM_SHOULD_BREAK_TRANSACTION: self._set_break_wanted(False) self.emit_operation(op) - def optimize_STM_TRANSACTION_BREAK(self, op): assert not self.remove_next_gnf really_wanted = op.getarg(0).getint() if really_wanted or self._break_wanted(): + self.flush_cached() self._set_break_wanted(False) self.emit_operation(op) self.keep_but_ignore_gnf = True else: + self.cached_ops = [] self.remove_next_gnf = True def optimize_GUARD_NOT_FORCED(self, op): + self.flush_cached() if self.remove_next_gnf: self.remove_next_gnf = False else: @@ -56,7 +69,7 @@ dispatch_opt = make_dispatcher_method(OptSTM, 'optimize_', - default=OptSTM.emit_operation) + default=OptSTM.default_emit) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_stm.py b/rpython/jit/metainterp/optimizeopt/test/test_stm.py --- a/rpython/jit/metainterp/optimizeopt/test/test_stm.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_stm.py @@ -191,6 +191,63 @@ """ self.optimize_loop(ops, expected, expected_preamble=preamble) + def test_remove_force_token(self): + ops = """ + [p0] + p1 = force_token() + setfield_gc(p0, p1, descr=adescr) + stm_transaction_break(0) + guard_not_forced() [] + + p2 = force_token() + setfield_gc(p0, p2, descr=adescr) + stm_transaction_break(0) + guard_not_forced() [] + + p3 = force_token() + setfield_gc(p0, p3, descr=adescr) + stm_transaction_break(0) + guard_not_forced() [] + + escape() + + p4 = force_token() + setfield_gc(p0, p4, descr=adescr) + stm_transaction_break(0) + guard_not_forced() [] + + p6 = force_token() # not removed! + + i0 = call(123, descr=sbtdescr) + guard_false(i0) [] + jump(p0) + """ + preamble = """ + [p0] + p1 = force_token() + setfield_gc(p0, p1, descr=adescr) + stm_transaction_break(0) + guard_not_forced() [] + + escape() + + p6 = force_token() # not removed! + + i0 = call(123, descr=sbtdescr) + guard_false(i0) [] + jump(p0) + """ + expected = """ + [p0] + escape() + + p6 = force_token() # not removed! + + i0 = call(123, descr=sbtdescr) + guard_false(i0) [] + jump(p0) + """ + self.optimize_loop(ops, expected, expected_preamble=preamble) From noreply at buildbot.pypy.org Wed Jan 15 14:09:09 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 15 Jan 2014 14:09:09 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c4: also define flush() to maybe be more robust Message-ID: <20140115130909.6B5331C0100@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r68678:9af6ff8ea976 Date: 2014-01-15 14:08 +0100 http://bitbucket.org/pypy/pypy/changeset/9af6ff8ea976/ Log: also define flush() to maybe be more robust diff --git a/rpython/jit/metainterp/optimizeopt/stm.py b/rpython/jit/metainterp/optimizeopt/stm.py --- a/rpython/jit/metainterp/optimizeopt/stm.py +++ b/rpython/jit/metainterp/optimizeopt/stm.py @@ -18,6 +18,10 @@ def flush_cached(self): while self.cached_ops: self.emit_operation(self.cached_ops.pop(0)) + + def flush(self): + # just in case. it shouldn't be necessary + self.flush_cached() def default_emit(self, op): self.flush_cached() diff --git a/rpython/jit/metainterp/optimizeopt/test/test_multilabel.py b/rpython/jit/metainterp/optimizeopt/test/test_multilabel.py --- a/rpython/jit/metainterp/optimizeopt/test/test_multilabel.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_multilabel.py @@ -31,6 +31,7 @@ if op.getopnum()==rop.LABEL] prv = 0 last_label = [] + stm_info = {} for nxt in labels + [len(loop.operations)]: assert prv != nxt operations = last_label + loop.operations[prv:nxt] @@ -42,7 +43,7 @@ operations.append(label) part.operations = operations - self._do_optimize_loop(part, None) + self._do_optimize_loop(part, None, stm_info) if part.operations[-1].getopnum() == rop.LABEL: last_label = [part.operations.pop()] else: @@ -494,7 +495,7 @@ class BaseTestOptimizerRenamingBoxes(BaseTestMultiLabel): - def _do_optimize_loop(self, loop, call_pure_results): + def _do_optimize_loop(self, loop, call_pure_results, stminfo): from rpython.jit.metainterp.optimizeopt.unroll import optimize_unroll from rpython.jit.metainterp.optimizeopt.util import args_dict from rpython.jit.metainterp.optimizeopt.pure import OptPure diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -122,7 +122,7 @@ if loop.operations[-1].getopnum() == rop.JUMP: loop.operations[-1].setdescr(token) expected = convert_old_style_to_targets(self.parse(optops), jump=True) - self._do_optimize_loop(loop, call_pure_results) + self._do_optimize_loop(loop, call_pure_results, {}) print '\n'.join([str(o) for o in loop.operations]) self.assert_equal(loop, expected) From noreply at buildbot.pypy.org Wed Jan 15 14:21:32 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 15 Jan 2014 14:21:32 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c4: more correct removal of force_tokens and setfields Message-ID: <20140115132132.1F8AD1C0100@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r68679:931e649743aa Date: 2014-01-15 14:20 +0100 http://bitbucket.org/pypy/pypy/changeset/931e649743aa/ Log: more correct removal of force_tokens and setfields diff --git a/rpython/jit/metainterp/optimizeopt/stm.py b/rpython/jit/metainterp/optimizeopt/stm.py --- a/rpython/jit/metainterp/optimizeopt/stm.py +++ b/rpython/jit/metainterp/optimizeopt/stm.py @@ -1,6 +1,7 @@ from rpython.jit.metainterp.optimizeopt.optimizer import (Optimization, ) from rpython.jit.metainterp.optimizeopt.util import make_dispatcher_method from rpython.jit.codewriter.effectinfo import EffectInfo +from rpython.jit.metainterp.resoperation import rop class OptSTM(Optimization): """ @@ -35,10 +36,18 @@ self.optimizer.stm_info['break_wanted'] = val def optimize_FORCE_TOKEN(self, op): + # if we have cached stuff, flush it. Not our case + self.flush_cached() self.cached_ops.append(op) def optimize_SETFIELD_GC(self, op): - self.cached_ops.append(op) + if not self.cached_ops: + # setfield not for force_token + self.emit_operation(op) + else: + assert len(self.cached_ops) == 1 + assert self.cached_ops[0].getopnum() == rop.FORCE_TOKEN + self.cached_ops.append(op) def optimize_CALL(self, op): self.flush_cached() diff --git a/rpython/jit/metainterp/optimizeopt/test/test_stm.py b/rpython/jit/metainterp/optimizeopt/test/test_stm.py --- a/rpython/jit/metainterp/optimizeopt/test/test_stm.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_stm.py @@ -249,6 +249,34 @@ """ self.optimize_loop(ops, expected, expected_preamble=preamble) + def test_not_remove_setfield(self): + ops = """ + [p0, p1] + setfield_gc(p0, p1, descr=adescr) + stm_transaction_break(0) + + p2 = force_token() + p3 = force_token() + jump(p0, p1) + """ + preamble = """ + [p0, p1] + setfield_gc(p0, p1, descr=adescr) + stm_transaction_break(0) + + p2 = force_token() + p3 = force_token() + jump(p0, p1) + """ + expected = """ + [p0, p1] + p2 = force_token() + p3 = force_token() + + setfield_gc(p0, p1, descr=adescr) # moved here by other stuff... + jump(p0, p1) + """ + self.optimize_loop(ops, expected, expected_preamble=preamble) From noreply at buildbot.pypy.org Wed Jan 15 14:24:14 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 15 Jan 2014 14:24:14 +0100 (CET) Subject: [pypy-commit] pypy default: add the possibility to download -notjit versions Message-ID: <20140115132414.AB61E1C0100@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r68680:6e4d0f5f1039 Date: 2014-01-15 14:16 +0100 http://bitbucket.org/pypy/pypy/changeset/6e4d0f5f1039/ Log: add the possibility to download -notjit versions diff --git a/pypy/goal/getnightly.py b/pypy/goal/getnightly.py --- a/pypy/goal/getnightly.py +++ b/pypy/goal/getnightly.py @@ -26,7 +26,12 @@ if branch == 'default': branch = 'trunk' -filename = 'pypy-c-jit-latest-%s.tar.bz2' % arch +if '--nojit' in sys.argv: + kind = 'nojit' +else: + kind = 'jit' + +filename = 'pypy-c-%s-latest-%s.tar.bz2' % (kind, arch) url = 'http://buildbot.pypy.org/nightly/%s/%s' % (branch, filename) tmp = py.path.local.mkdtemp() mydir = tmp.chdir() From noreply at buildbot.pypy.org Wed Jan 15 14:24:15 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 15 Jan 2014 14:24:15 +0100 (CET) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20140115132415.F16291C0100@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r68681:ce6ebed2b074 Date: 2014-01-15 14:23 +0100 http://bitbucket.org/pypy/pypy/changeset/ce6ebed2b074/ Log: merge heads diff --git a/pypy/goal/getnightly.py b/pypy/goal/getnightly.py --- a/pypy/goal/getnightly.py +++ b/pypy/goal/getnightly.py @@ -26,7 +26,12 @@ if branch == 'default': branch = 'trunk' -filename = 'pypy-c-jit-latest-%s.tar.bz2' % arch +if '--nojit' in sys.argv: + kind = 'nojit' +else: + kind = 'jit' + +filename = 'pypy-c-%s-latest-%s.tar.bz2' % (kind, arch) url = 'http://buildbot.pypy.org/nightly/%s/%s' % (branch, filename) tmp = py.path.local.mkdtemp() mydir = tmp.chdir() From noreply at buildbot.pypy.org Wed Jan 15 14:33:08 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 15 Jan 2014 14:33:08 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c4: updated comment Message-ID: <20140115133308.9FF241C0100@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r68682:f8476923f8b9 Date: 2014-01-15 14:32 +0100 http://bitbucket.org/pypy/pypy/changeset/f8476923f8b9/ Log: updated comment diff --git a/rpython/jit/metainterp/optimizeopt/stm.py b/rpython/jit/metainterp/optimizeopt/stm.py --- a/rpython/jit/metainterp/optimizeopt/stm.py +++ b/rpython/jit/metainterp/optimizeopt/stm.py @@ -5,8 +5,16 @@ class OptSTM(Optimization): """ - For now only changes some guarded transaction breaks - to unconditional ones. + This step removes a lot of uncecessary transaction_breaks (TBs) + emitted by pyjitpl from traces. We only want to keep these + unconditional TBs after external calls (identified by GUARD_NOT_FORCED) + because they are likely to return as inevitable transactions which + we want to break ASAP. + Guarded TBs are left in place, as they represent app-level loops + and are likely points to break between atomic transactions. + + The cached_ops is here to remove the virtualizable-forcing added + by pyjitpl before unconditional TBs. See tests. """ def __init__(self): self.remove_next_gnf = False # guard_not_forced From noreply at buildbot.pypy.org Wed Jan 15 14:39:28 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 15 Jan 2014 14:39:28 +0100 (CET) Subject: [pypy-commit] pypy default: add the --lldebug0 option, which is like lldebug but in addition compiles C files with -O0, useful when gdb keeps on saying '' when you want to look at a variable Message-ID: <20140115133928.BE7531C051C@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r68683:4987a52c6a7b Date: 2014-01-15 14:38 +0100 http://bitbucket.org/pypy/pypy/changeset/4987a52c6a7b/ Log: add the --lldebug0 option, which is like lldebug but in addition compiles C files with -O0, useful when gdb keeps on saying '' when you want to look at a variable diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -177,6 +177,9 @@ BoolOption("lldebug", "If true, makes an lldebug build", default=False, cmdline="--lldebug"), + BoolOption("lldebug0", + "If true, makes an lldebug0 build", default=False, + cmdline="--lldebug0"), OptionDescription("backendopt", "Backend Optimization Options", [ # control inlining diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -364,6 +364,8 @@ extra_opts += ['-j', str(self.config.translation.make_jobs)] if self.config.translation.lldebug: extra_opts += ["lldebug"] + elif self.config.translation.lldebug0: + extra_opts += ["lldebug0"] self.translator.platform.execute_makefile(self.targetdir, extra_opts) if shared: @@ -398,6 +400,7 @@ ('linuxmemchk', '', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -DRPY_ASSERT -DPPY_USE_LINUXMEMCHK" debug_target'), ('llsafer', '', '$(MAKE) CFLAGS="-O2 -DRPY_LL_ASSERT" $(TARGET)'), ('lldebug', '', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -DRPY_ASSERT -DRPY_LL_ASSERT" debug_target'), + ('lldebug0','', '$(MAKE) CFLAGS="-O0 $(DEBUGFLAGS) -DRPY_ASSERT -DRPY_LL_ASSERT" debug_target'), ('profile', '', '$(MAKE) CFLAGS="-g -O1 -pg $(CFLAGS) -fno-omit-frame-pointer" LDFLAGS="-pg $(LDFLAGS)" $(TARGET)'), ] if self.has_profopt(): From noreply at buildbot.pypy.org Wed Jan 15 17:39:48 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 15 Jan 2014 17:39:48 +0100 (CET) Subject: [pypy-commit] pypy refactor-str-types: Add failing test. Message-ID: <20140115163948.5508B1C0100@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r68685:5625a1371090 Date: 2014-01-15 17:39 +0100 http://bitbucket.org/pypy/pypy/changeset/5625a1371090/ Log: Add failing test. diff --git a/pypy/objspace/std/test/test_bytearrayobject.py b/pypy/objspace/std/test/test_bytearrayobject.py --- a/pypy/objspace/std/test/test_bytearrayobject.py +++ b/pypy/objspace/std/test/test_bytearrayobject.py @@ -466,3 +466,7 @@ def test_partition_return_copy(self): b = bytearray(b'foo') assert b.partition(b'x')[0] is not b + + def test_split_whitespace(self): + b = bytearray(b'\x09\x0A\x0B\x0C\x0D\x1C\x1D\x1E\x1F') + assert b.split() == [b'\x1c\x1d\x1e\x1f'] From noreply at buildbot.pypy.org Wed Jan 15 17:39:47 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 15 Jan 2014 17:39:47 +0100 (CET) Subject: [pypy-commit] pypy refactor-str-types: hg merge default Message-ID: <20140115163947.1A1C11C0100@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r68684:0dcdafb5d639 Date: 2014-01-15 13:59 +0100 http://bitbucket.org/pypy/pypy/changeset/0dcdafb5d639/ Log: hg merge default diff --git a/lib_pypy/_ctypes/array.py b/lib_pypy/_ctypes/array.py --- a/lib_pypy/_ctypes/array.py +++ b/lib_pypy/_ctypes/array.py @@ -20,10 +20,13 @@ # we don't want to have buffers here if len(val) > self._length_: raise ValueError("%r too long" % (val,)) - for i in range(len(val)): - self[i] = val[i] + if isinstance(val, str): + _rawffi.rawstring2charp(self._buffer.buffer, val) + else: + for i in range(len(val)): + self[i] = val[i] if len(val) < self._length_: - self[len(val)] = '\x00' + self._buffer[len(val)] = '\x00' res.value = property(getvalue, setvalue) def getraw(self): @@ -33,8 +36,7 @@ def setraw(self, buffer): if len(buffer) > self._length_: raise ValueError("%r too long" % (buffer,)) - for i in range(len(buffer)): - self[i] = buffer[i] + _rawffi.rawstring2charp(self._buffer.buffer, buffer) res.raw = property(getraw, setraw) elif subletter == 'u': def getvalue(self): @@ -45,10 +47,14 @@ # we don't want to have buffers here if len(val) > self._length_: raise ValueError("%r too long" % (val,)) + if isinstance(val, unicode): + target = self._buffer + else: + target = self for i in range(len(val)): - self[i] = val[i] + target[i] = val[i] if len(val) < self._length_: - self[len(val)] = '\x00' + target[len(val)] = u'\x00' res.value = property(getvalue, setvalue) if '_length_' in typedict: diff --git a/pypy/module/_rawffi/__init__.py b/pypy/module/_rawffi/__init__.py --- a/pypy/module/_rawffi/__init__.py +++ b/pypy/module/_rawffi/__init__.py @@ -19,6 +19,7 @@ 'wcharp2unicode' : 'interp_rawffi.wcharp2unicode', 'charp2rawstring' : 'interp_rawffi.charp2rawstring', 'wcharp2rawunicode' : 'interp_rawffi.wcharp2rawunicode', + 'rawstring2charp' : 'interp_rawffi.rawstring2charp', 'CallbackPtr' : 'callback.W_CallbackPtr', '_num_of_allocated_objects' : 'tracker.num_of_allocated_objects', 'get_libc' : 'interp_rawffi.get_libc', diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -579,6 +579,13 @@ s = rffi.wcharpsize2unicode(rffi.cast(rffi.CWCHARP, address), maxlength) return space.wrap(s) + at unwrap_spec(address=r_uint, newcontent=str) +def rawstring2charp(space, address, newcontent): + from rpython.rtyper.annlowlevel import llstr + from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw + array = rffi.cast(rffi.CCHARP, address) + copy_string_to_raw(llstr(newcontent), array, 0, len(newcontent)) + if _MS_WINDOWS: @unwrap_spec(code=int) def FormatError(space, code): diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -323,6 +323,14 @@ assert res == u'xx' a.free() + def test_rawstring2charp(self): + import _rawffi + A = _rawffi.Array('c') + a = A(10, 'x'*10) + _rawffi.rawstring2charp(a.buffer, "foobar") + assert ''.join([a[i] for i in range(10)]) == "foobarxxxx" + a.free() + def test_raw_callable(self): import _rawffi lib = _rawffi.CDLL(self.lib_name) diff --git a/pypy/module/test_lib_pypy/ctypes_tests/support.py b/pypy/module/test_lib_pypy/ctypes_tests/support.py --- a/pypy/module/test_lib_pypy/ctypes_tests/support.py +++ b/pypy/module/test_lib_pypy/ctypes_tests/support.py @@ -31,8 +31,10 @@ import gc for _ in range(4): gc.collect() - cls.old_num = _rawffi._num_of_allocated_objects() - + try: + cls.old_num = _rawffi._num_of_allocated_objects() + except RuntimeError: + pass def teardown_class(cls): if sys.pypy_translation_info['translation.gc'] == 'boehm': diff --git a/pypy/tool/gdb_pypy.py b/pypy/tool/gdb_pypy.py --- a/pypy/tool/gdb_pypy.py +++ b/pypy/tool/gdb_pypy.py @@ -38,9 +38,9 @@ if len(names) == 1: return val[names[0]] elif len(names) == 0: - raise KeyError, "cannot find field *%s" % suffix + raise KeyError("cannot find field *%s" % suffix) else: - raise KeyError, "too many matching fields: %s" % ', '.join(names) + raise KeyError("too many matching fields: %s" % ', '.join(names)) def lookup(val, suffix): """ @@ -76,10 +76,14 @@ def invoke(self, arg, from_tty): # some magic code to automatically reload the python file while developing from pypy.tool import gdb_pypy - reload(gdb_pypy) + try: + reload(gdb_pypy) + except: + import imp + imp.reload(gdb_pypy) gdb_pypy.RPyType.prog2typeids = self.prog2typeids # persist the cache self.__class__ = gdb_pypy.RPyType - print self.do_invoke(arg, from_tty) + print (self.do_invoke(arg, from_tty).decode('latin-1')) def do_invoke(self, arg, from_tty): try: @@ -88,7 +92,7 @@ obj = self.gdb.parse_and_eval(arg) hdr = lookup(obj, '_gcheader') tid = hdr['h_tid'] - if sys.maxint < 2**32: + if sys.maxsize < 2**32: offset = tid & 0xFFFF # 32bit else: offset = tid & 0xFFFFFFFF # 64bit @@ -147,13 +151,13 @@ if linenum in self.line2offset: return self.line2offset[linenum] line = self.lines[linenum] - member, descr = map(str.strip, line.split(None, 1)) - if sys.maxint < 2**32: + member, descr = [x.strip() for x in line.split(None, 1)] + if sys.maxsize < 2**32: TIDT = "int*" else: TIDT = "char*" expr = ("((%s)(&pypy_g_typeinfo.%s)) - (%s)&pypy_g_typeinfo" - % (TIDT, member, TIDT)) + % (TIDT, member.decode("latin-1"), TIDT)) offset = int(self.gdb.parse_and_eval(expr)) self.line2offset[linenum] = offset self.offset2descr[offset] = descr @@ -164,7 +168,7 @@ # binary search through the lines, asking gdb to parse stuff lazily if offset in self.offset2descr: return self.offset2descr[offset] - if not (0 < offset < sys.maxint): + if not (0 < offset < sys.maxsize): return None linerange = (0, len(self.lines)) while linerange[0] < linerange[1]: diff --git a/rpython/jit/metainterp/optimizeopt/earlyforce.py b/rpython/jit/metainterp/optimizeopt/earlyforce.py --- a/rpython/jit/metainterp/optimizeopt/earlyforce.py +++ b/rpython/jit/metainterp/optimizeopt/earlyforce.py @@ -28,8 +28,5 @@ value.force_box(self) self.emit_operation(op) - def new(self): - return OptEarlyForce() - def setup(self): self.optimizer.optearlyforce = self diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -184,9 +184,6 @@ self.postponed_op = None self.next_optimization.propagate_forward(postponed_op) - def new(self): - return OptHeap() - def produce_potential_short_preamble_ops(self, sb): descrkeys = self.cached_fields.keys() if not we_are_translated(): diff --git a/rpython/jit/metainterp/optimizeopt/intbounds.py b/rpython/jit/metainterp/optimizeopt/intbounds.py --- a/rpython/jit/metainterp/optimizeopt/intbounds.py +++ b/rpython/jit/metainterp/optimizeopt/intbounds.py @@ -13,9 +13,6 @@ """Keeps track of the bounds placed on integers by guards and remove redundant guards""" - def new(self): - return OptIntBounds() - def propagate_forward(self, op): dispatch_opt(self, op) diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -323,10 +323,6 @@ def force_at_end_of_preamble(self): pass - # It is too late to force stuff here, it must be done in force_at_end_of_preamble - def new(self): - raise NotImplementedError - # Called after last operation has been propagated to flush out any posponed ops def flush(self): pass @@ -390,16 +386,6 @@ for o in self.optimizations: o.flush() - def new(self): - new = Optimizer(self.metainterp_sd, self.loop) - return self._new(new) - - def _new(self, new): - optimizations = [o.new() for o in self.optimizations] - new.set_optimizations(optimizations) - new.quasi_immutable_deps = self.quasi_immutable_deps - return new - def produce_potential_short_preamble_ops(self, sb): for opt in self.optimizations: opt.produce_potential_short_preamble_ops(sb) diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -85,10 +85,6 @@ def flush(self): assert self.postponed_op is None - def new(self): - assert self.postponed_op is None - return OptPure() - def setup(self): self.optimizer.optpure = self diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -20,9 +20,6 @@ self.loop_invariant_results = {} self.loop_invariant_producer = {} - def new(self): - return OptRewrite() - def produce_potential_short_preamble_ops(self, sb): for op in self.loop_invariant_producer.values(): sb.add_potential(op) diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -41,10 +41,6 @@ self.emitted_guards += 1 # FIXME: can we use counter in self._emit_operation? self._emit_operation(op) - def new(self): - new = UnrollableOptimizer(self.metainterp_sd, self.loop) - return self._new(new) - class UnrollOptimizer(Optimization): """Unroll the loop into two iterations. The first one will diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -497,9 +497,6 @@ _last_guard_not_forced_2 = None - def new(self): - return OptVirtualize() - def make_virtual(self, known_class, box, source_op=None): vvalue = VirtualValue(self.optimizer.cpu, known_class, box, source_op) self.make_equal_to(box, vvalue) diff --git a/rpython/jit/metainterp/optimizeopt/vstring.py b/rpython/jit/metainterp/optimizeopt/vstring.py --- a/rpython/jit/metainterp/optimizeopt/vstring.py +++ b/rpython/jit/metainterp/optimizeopt/vstring.py @@ -388,8 +388,6 @@ class OptString(optimizer.Optimization): "Handling of strings and unicodes." - def new(self): - return OptString() def make_vstring_plain(self, box, source_op, mode): vvalue = VStringPlainValue(box, source_op, mode) diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -624,6 +624,7 @@ i += 1 return count + @signature(types.any(), types.any(), types.int(), types.int(), returns=types.int()) def ll_find(s1, s2, start, end): if start < 0: start = 0 @@ -638,6 +639,7 @@ return LLHelpers.ll_search(s1, s2, start, end, FAST_FIND) + @signature(types.any(), types.any(), types.int(), types.int(), returns=types.int()) def ll_rfind(s1, s2, start, end): if start < 0: start = 0 diff --git a/rpython/translator/unsimplify.py b/rpython/translator/unsimplify.py --- a/rpython/translator/unsimplify.py +++ b/rpython/translator/unsimplify.py @@ -132,11 +132,6 @@ block.exitswitch = None return link -def split_block_at_start(annotator, block): - # split before the first op, preserve order and inputargs - # in the second block! - return split_block(annotator, block, 0, _forcelink=block.inputargs) - def call_initial_function(translator, initial_func, annhelper=None): """Before the program starts, call 'initial_func()'.""" from rpython.annotator import model as annmodel From noreply at buildbot.pypy.org Wed Jan 15 18:49:11 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 15 Jan 2014 18:49:11 +0100 (CET) Subject: [pypy-commit] pypy default: Don't allow mixing of SomeChar and SomeUnicodeCodePoint. Message-ID: <20140115174911.A053B1C0100@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: Changeset: r68686:61380dc9c4fb Date: 2014-01-15 18:48 +0100 http://bitbucket.org/pypy/pypy/changeset/61380dc9c4fb/ Log: Don't allow mixing of SomeChar and SomeUnicodeCodePoint. diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -435,11 +435,6 @@ return SomeChar(no_nul=no_nul) -class __extend__(pairtype(SomeChar, SomeUnicodeCodePoint), - pairtype(SomeUnicodeCodePoint, SomeChar)): - def union((uchr1, uchr2)): - return SomeUnicodeCodePoint() - class __extend__(pairtype(SomeUnicodeCodePoint, SomeUnicodeCodePoint)): def union((uchr1, uchr2)): return SomeUnicodeCodePoint() From noreply at buildbot.pypy.org Wed Jan 15 18:50:22 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 15 Jan 2014 18:50:22 +0100 (CET) Subject: [pypy-commit] pypy refactor-str-types: hg merge default Message-ID: <20140115175022.381EF1C0100@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r68687:eb520c9ebbd8 Date: 2014-01-15 18:50 +0100 http://bitbucket.org/pypy/pypy/changeset/eb520c9ebbd8/ Log: hg merge default diff --git a/pypy/goal/getnightly.py b/pypy/goal/getnightly.py --- a/pypy/goal/getnightly.py +++ b/pypy/goal/getnightly.py @@ -26,7 +26,12 @@ if branch == 'default': branch = 'trunk' -filename = 'pypy-c-jit-latest-%s.tar.bz2' % arch +if '--nojit' in sys.argv: + kind = 'nojit' +else: + kind = 'jit' + +filename = 'pypy-c-%s-latest-%s.tar.bz2' % (kind, arch) url = 'http://buildbot.pypy.org/nightly/%s/%s' % (branch, filename) tmp = py.path.local.mkdtemp() mydir = tmp.chdir() diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -435,11 +435,6 @@ return SomeChar(no_nul=no_nul) -class __extend__(pairtype(SomeChar, SomeUnicodeCodePoint), - pairtype(SomeUnicodeCodePoint, SomeChar)): - def union((uchr1, uchr2)): - return SomeUnicodeCodePoint() - class __extend__(pairtype(SomeUnicodeCodePoint, SomeUnicodeCodePoint)): def union((uchr1, uchr2)): return SomeUnicodeCodePoint() diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -177,6 +177,9 @@ BoolOption("lldebug", "If true, makes an lldebug build", default=False, cmdline="--lldebug"), + BoolOption("lldebug0", + "If true, makes an lldebug0 build", default=False, + cmdline="--lldebug0"), OptionDescription("backendopt", "Backend Optimization Options", [ # control inlining diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -364,6 +364,8 @@ extra_opts += ['-j', str(self.config.translation.make_jobs)] if self.config.translation.lldebug: extra_opts += ["lldebug"] + elif self.config.translation.lldebug0: + extra_opts += ["lldebug0"] self.translator.platform.execute_makefile(self.targetdir, extra_opts) if shared: @@ -398,6 +400,7 @@ ('linuxmemchk', '', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -DRPY_ASSERT -DPPY_USE_LINUXMEMCHK" debug_target'), ('llsafer', '', '$(MAKE) CFLAGS="-O2 -DRPY_LL_ASSERT" $(TARGET)'), ('lldebug', '', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -DRPY_ASSERT -DRPY_LL_ASSERT" debug_target'), + ('lldebug0','', '$(MAKE) CFLAGS="-O0 $(DEBUGFLAGS) -DRPY_ASSERT -DRPY_LL_ASSERT" debug_target'), ('profile', '', '$(MAKE) CFLAGS="-g -O1 -pg $(CFLAGS) -fno-omit-frame-pointer" LDFLAGS="-pg $(LDFLAGS)" $(TARGET)'), ] if self.has_profopt(): From noreply at buildbot.pypy.org Wed Jan 15 20:56:38 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 15 Jan 2014 20:56:38 +0100 (CET) Subject: [pypy-commit] pypy refactor-str-types: (antocuni, mjacob, arigo): we need to specialize _isspace too, else 'char' is promoted to unicode and we always hit the unicode path Message-ID: <20140115195638.C09AC1C0500@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: refactor-str-types Changeset: r68688:4316d9dd77e0 Date: 2014-01-15 18:46 +0100 http://bitbucket.org/pypy/pypy/changeset/4316d9dd77e0/ Log: (antocuni, mjacob, arigo): we need to specialize _isspace too, else 'char' is promoted to unicode and we always hit the unicode path diff --git a/rpython/rlib/rstring.py b/rpython/rlib/rstring.py --- a/rpython/rlib/rstring.py +++ b/rpython/rlib/rstring.py @@ -14,6 +14,7 @@ # -------------- public API for string functions ----------------------- + at specialize.argtype(0) def _isspace(char): if isinstance(char, str): return char.isspace() From noreply at buildbot.pypy.org Wed Jan 15 21:14:48 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 15 Jan 2014 21:14:48 +0100 (CET) Subject: [pypy-commit] pypy default: Backed out changeset 61380dc9c4fb Message-ID: <20140115201448.6A07D1C33CF@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: Changeset: r68689:14ff99b7167b Date: 2014-01-15 21:14 +0100 http://bitbucket.org/pypy/pypy/changeset/14ff99b7167b/ Log: Backed out changeset 61380dc9c4fb diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -435,6 +435,11 @@ return SomeChar(no_nul=no_nul) +class __extend__(pairtype(SomeChar, SomeUnicodeCodePoint), + pairtype(SomeUnicodeCodePoint, SomeChar)): + def union((uchr1, uchr2)): + return SomeUnicodeCodePoint() + class __extend__(pairtype(SomeUnicodeCodePoint, SomeUnicodeCodePoint)): def union((uchr1, uchr2)): return SomeUnicodeCodePoint() From noreply at buildbot.pypy.org Wed Jan 15 21:49:13 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 15 Jan 2014 21:49:13 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c4: completeness Message-ID: <20140115204913.D88A91C0500@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r68690:756ea867e0d1 Date: 2014-01-15 21:48 +0100 http://bitbucket.org/pypy/pypy/changeset/756ea867e0d1/ Log: completeness diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -95,6 +95,7 @@ @dont_look_inside def before_external_call(): if we_are_translated(): + # this tries to commit, or becomes inevitable if atomic llop.stm_commit_transaction(lltype.Void) before_external_call._dont_reach_me_in_del_ = True before_external_call._transaction_break_ = True @@ -102,6 +103,7 @@ @dont_look_inside def after_external_call(): if we_are_translated(): + # starts a new transaction if we are not atomic already llop.stm_begin_inevitable_transaction(lltype.Void) after_external_call._dont_reach_me_in_del_ = True after_external_call._transaction_break_ = True diff --git a/rpython/translator/stm/breakfinder.py b/rpython/translator/stm/breakfinder.py --- a/rpython/translator/stm/breakfinder.py +++ b/rpython/translator/stm/breakfinder.py @@ -8,6 +8,8 @@ 'stm_partial_commit_and_resume_other_threads', # new priv_revision 'jit_assembler_call', 'jit_stm_transaction_break_point', + 'stm_enter_callback_call', + 'stm_leave_callback_call', ]) diff --git a/rpython/translator/stm/test/test_jitdriver.py b/rpython/translator/stm/test/test_jitdriver.py --- a/rpython/translator/stm/test/test_jitdriver.py +++ b/rpython/translator/stm/test/test_jitdriver.py @@ -44,11 +44,15 @@ class X: counter = 10 x = X() - myjitdriver = JitDriver(greens=[], reds=[]) + myjitdriver = JitDriver(greens=[], reds=[], + stm_do_transaction_breaks=True) def f1(): while x.counter > 0: myjitdriver.jit_merge_point() + if rstm.jit_stm_should_break_transaction(False): + rstm.jit_stm_transaction_break_point() + x.counter -= 1 res = self.interpret(f1, []) From noreply at buildbot.pypy.org Wed Jan 15 22:21:18 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 15 Jan 2014 22:21:18 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c4: fix some stmrewrite things (mostly tests) Message-ID: <20140115212118.AB6471C33CF@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r68691:d2b0b0428c8c Date: 2014-01-15 22:20 +0100 http://bitbucket.org/pypy/pypy/changeset/d2b0b0428c8c/ Log: fix some stmrewrite things (mostly tests) diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -9,6 +9,12 @@ debug_print) from rpython.jit.codewriter.effectinfo import EffectInfo +### XXX: +### we changed some 'x2I' barriers to 'x2R' since +### obj initialization may happen in 2 different transactions. +### check and fix this assumption + + # # STM Support # ----------- @@ -83,7 +89,8 @@ # e.g. getting inst_intval of a W_IntObject that is # currently only a stub needs to first resolve to a # real object - self.handle_category_operations(op, 'I') + # XXX: 'I' enough? + self.handle_category_operations(op, 'R') continue # ---------- pure operations, guards ---------- if op.is_always_pure() or op.is_guard() or op.is_ovf(): @@ -315,7 +322,8 @@ lst[1] = self.gen_barrier(lst[1], 'W') op = op.copy_and_change(op.getopnum(), args=lst) # then an immutable read barrier the source string - self.handle_category_operations(op, 'I') + # XXX: 'I' enough? + self.handle_category_operations(op, 'R') @specialize.arg(1) def _do_stm_call(self, funcname, args, result): diff --git a/rpython/jit/backend/llsupport/test/test_stmrewrite.py b/rpython/jit/backend/llsupport/test/test_stmrewrite.py --- a/rpython/jit/backend/llsupport/test/test_stmrewrite.py +++ b/rpython/jit/backend/llsupport/test/test_stmrewrite.py @@ -68,6 +68,7 @@ U = lltype.GcStruct('U', ('x', lltype.Signed)) for inev in (True, False): class fakeextrainfo: + oopspecindex = 0 def call_needs_inevitable(self): return inev @@ -82,7 +83,6 @@ [] %s call(123, descr=cd) - stm_transaction_break(1) jump() """ % ("$INEV" if inev else "",), cd=calldescr) @@ -95,7 +95,7 @@ [p1, p2] cond_call_stm_b(p1, descr=A2Wdescr) setfield_gc(p1, p2, descr=tzdescr) - stm_transaction_break(1) + jump() """) @@ -111,7 +111,7 @@ p3 = same_as(ConstPtr(t)) cond_call_stm_b(p3, descr=A2Wdescr) setfield_gc(p3, p2, descr=tzdescr) - stm_transaction_break(1) + jump() """, t=NULL) @@ -136,7 +136,7 @@ setfield_gc(p2, p0, descr=tzdescr) cond_call_stm_b(p1, descr=Q2Rdescr) p4 = getfield_gc(p1, descr=tzdescr) - stm_transaction_break(1) + jump() """, t=NULL) @@ -159,7 +159,7 @@ setfield_gc(p2, p0, descr=tzdescr) cond_call_stm_b(p1, descr=Q2Rdescr) p5 = getfield_gc(p1, descr=tzdescr) - stm_transaction_break(1) + jump() """) @@ -183,7 +183,7 @@ setfield_gc(p2, p0, descr=tzdescr) cond_call_stm_b(p1, descr=Q2Rdescr) p4 = getfield_gc(p1, descr=tzdescr) - stm_transaction_break(1) + jump() """) @@ -210,7 +210,7 @@ setfield_gc(p2, p0, descr=tzdescr) cond_call_stm_b(p1, descr=Q2Rdescr) p4 = getfield_gc(p1, descr=tzdescr) - stm_transaction_break(1) + jump() """ for op, descr in ops: @@ -233,7 +233,7 @@ stm_set_revision_gc(p2, descr=revdescr) cond_call_stm_b(p3, descr=V2Wdescr) setfield_gc(p3, p1, descr=tzdescr) - stm_transaction_break(1) + jump(p2) """) @@ -252,7 +252,7 @@ setfield_gc(p3, %(tdescr.tid)d, descr=tiddescr) stm_set_revision_gc(p3, descr=revdescr) p4 = getfield_gc(p1, descr=tzdescr) - stm_transaction_break(1) + jump(p2) """) @@ -268,7 +268,7 @@ setfield_gc(p2, %(tdescr.tid)d, descr=tiddescr) stm_set_revision_gc(p2, descr=revdescr) setfield_gc(p2, p1, descr=tzdescr) - stm_transaction_break(1) + jump(p2) """) @@ -284,7 +284,7 @@ setfield_gc(p1, p2, descr=tzdescr) cond_call_stm_b(p3, descr=A2Wdescr) setfield_gc(p3, p4, descr=tzdescr) - stm_transaction_break(1) + jump() """) @@ -299,7 +299,7 @@ cond_call_stm_b(p1, descr=A2Wdescr) setfield_gc(p1, p2, descr=tzdescr) setfield_gc(p1, i3, descr=tydescr) - stm_transaction_break(1) + jump() """) @@ -317,18 +317,19 @@ label(p1, i3) cond_call_stm_b(p1, descr=A2Vdescr) # noptr setfield_gc(p1, i3, descr=tydescr) - stm_transaction_break(1) + jump(p1) """) def test_remove_debug_merge_point(self): + py.test.skip("why??") self.check_rewrite(""" [i1, i2] debug_merge_point(i1, i2) jump() """, """ [i1, i2] - stm_transaction_break(1) + jump() """) @@ -361,7 +362,7 @@ [p1] cond_call_stm_b(p1, descr=A2Rdescr) p2 = getfield_gc(p1, descr=tzdescr) - stm_transaction_break(1) + jump(p2) """) @@ -377,7 +378,7 @@ p3 = same_as(ConstPtr(t)) cond_call_stm_b(p3, descr=A2Rdescr) p2 = getfield_gc(p3, descr=tzdescr) - stm_transaction_break(1) + jump(p2) """, t=NULL) # XXX could do better: G2Rdescr @@ -391,7 +392,7 @@ [p1, i2] cond_call_stm_b(p1, descr=A2Rdescr) i3 = getarrayitem_gc(p1, i2, descr=adescr) - stm_transaction_break(1) + jump(i3) """) @@ -404,7 +405,7 @@ [p1, i2] cond_call_stm_b(p1, descr=A2Rdescr) i3 = getinteriorfield_gc(p1, i2, descr=intzdescr) - stm_transaction_break(1) + jump(i3) """) @@ -419,7 +420,7 @@ cond_call_stm_b(p1, descr=A2Rdescr) p2 = getfield_gc(p1, descr=tzdescr) i2 = getfield_gc(p1, descr=tydescr) - stm_transaction_break(1) + jump(p2, i2) """) @@ -435,7 +436,7 @@ p2 = getfield_gc(p1, descr=tzdescr) cond_call_stm_b(p2, descr=A2Rdescr) i2 = getfield_gc(p2, descr=tydescr) - stm_transaction_break(1) + jump(p2, i2) """) @@ -455,7 +456,7 @@ i2 = int_add(i1, 1) cond_call_stm_b(p1, descr=A2Vdescr) setfield_gc(p1, i2, descr=tydescr) - stm_transaction_break(1) + jump(p1) """) @@ -470,7 +471,7 @@ cond_call_stm_b(p1, descr=A2Vdescr) setfield_gc(p1, 123, descr=tydescr) p2 = getfield_gc(p1, descr=tzdescr) - stm_transaction_break(1) + jump(p2) """) @@ -486,7 +487,7 @@ setfield_gc(p1, %(tdescr.tid)d, descr=tiddescr) stm_set_revision_gc(p1, descr=revdescr) p2 = getfield_gc(p1, descr=tzdescr) - stm_transaction_break(1) + jump(p2) """) @@ -494,6 +495,7 @@ # XXX could detect CALLs that cannot interrupt the transaction # and/or could use the L category class fakeextrainfo: + oopspecindex = 0 def call_needs_inevitable(self): return False T = rffi.CArrayPtr(rffi.TIME_T) @@ -512,7 +514,7 @@ call(p2, descr=calldescr1) cond_call_stm_b(p1, descr=A2Vdescr) setfield_gc(p1, 5, descr=tydescr) - stm_transaction_break(1) + jump(p2) """, calldescr1=calldescr1) @@ -529,7 +531,7 @@ i3 = getfield_raw(i1, descr=tydescr) keepalive(i3) i4 = getfield_raw(i2, descr=tydescr) - stm_transaction_break(1) + jump(i3, i4) """) @@ -545,7 +547,7 @@ """, """ [i1] i2 = getfield_raw(i1, descr=fdescr) - stm_transaction_break(1) + jump(i2) """, fdescr=fdescr) @@ -563,7 +565,7 @@ label(i1, i2, i3) $INEV i4 = getfield_raw(i2, descr=tydescr) - stm_transaction_break(1) + jump(i3, i4) """) @@ -578,7 +580,7 @@ $INEV i3 = getarrayitem_raw(i1, 5, descr=adescr) i4 = getarrayitem_raw(i2, i3, descr=adescr) - stm_transaction_break(1) + jump(i3, i4) """) @@ -594,7 +596,7 @@ setarrayitem_gc(p1, i1, p2, descr=adescr) cond_call_stm_b(p3, descr=A2Vdescr) setarrayitem_gc(p3, i3, p4, descr=adescr) - stm_transaction_break(1) + jump() """) @@ -611,7 +613,7 @@ setarrayitem_gc(p1, i2, p2, descr=adescr) i4 = read_timestamp() setarrayitem_gc(p1, i3, p3, descr=adescr) - stm_transaction_break(1) + jump() """) @@ -628,7 +630,7 @@ setinteriorfield_gc(p1, i2, p2, descr=intzdescr) i4 = read_timestamp() setinteriorfield_gc(p1, i3, p3, descr=intzdescr) - stm_transaction_break(1) + jump() """) @@ -643,7 +645,7 @@ cond_call_stm_b(p1, descr=A2Vdescr) strsetitem(p1, i2, i3) unicodesetitem(p1, i2, i3) - stm_transaction_break(1) + jump() """) # py.test.skip("XXX not really right: should instead be an assert " @@ -657,10 +659,10 @@ jump() """, """ [p1, i2, i3] - cond_call_stm_b(p1, descr=A2Idescr) + cond_call_stm_b(p1, descr=A2Rdescr) i4=strgetitem(p1, i2) i5=unicodegetitem(p1, i2) - stm_transaction_break(1) + jump() """) @@ -681,10 +683,10 @@ setfield_gc(p7, 10, descr=tydescr) call_release_gil(123, descr=calldescr2) guard_not_forced() [] - stm_transaction_break(0) + cond_call_stm_b(p7, descr=A2Vdescr) setfield_gc(p7, 20, descr=tydescr) - stm_transaction_break(1) + jump(i2, p7) """, calldescr2=calldescr2) @@ -712,7 +714,7 @@ %s cond_call_stm_b(p7, descr=A2Vdescr) setfield_gc(p7, 20, descr=tydescr) - stm_transaction_break(1) + jump(i2, p7) """ % op, calldescr2=calldescr2) @@ -726,7 +728,7 @@ cond_call_stm_b(p2, descr=A2Wdescr) cond_call_stm_b(p1, descr=A2Rdescr) copystrcontent(p1, p2, i1, i2, i3) - stm_transaction_break(1) + jump() """) @@ -748,12 +750,13 @@ setfield_gc(p1, 10, descr=tydescr) %s setfield_gc(p1, 20, descr=tydescr) - stm_transaction_break(1) + jump(p1) """ % op) def test_call_force(self): class fakeextrainfo: + oopspecindex=0 def call_needs_inevitable(self): return False T = rffi.CArrayPtr(rffi.TIME_T) @@ -765,7 +768,7 @@ ("call_loopinvariant(123, descr=calldescr2)", False), ]: guard = "guard_not_forced() []" if guarded else "" - tr_break = "stm_transaction_break(0)" if guarded else "" + tr_break = "" if guarded else "" self.check_rewrite(""" [p1] setfield_gc(p1, 10, descr=tydescr) @@ -782,7 +785,7 @@ %s cond_call_stm_b(p1, descr=A2Vdescr) setfield_gc(p1, 20, descr=tydescr) - stm_transaction_break(1) + jump(p1) """ % (op, guard, tr_break), calldescr2=calldescr2) @@ -806,7 +809,7 @@ setarrayitem_gc(p1, 1, f0, descr=floatframedescr) i3 = call_assembler(p1, descr=casmdescr) guard_not_forced() [] - stm_transaction_break(0) + """) def test_ptr_eq_null(self): @@ -817,7 +820,7 @@ """, """ [p1, p2] i1 = ptr_eq(p1, NULL) - stm_transaction_break(1) + jump(i1) """) @@ -829,7 +832,7 @@ """, """ [p1, p2] i1 = ptr_eq(p1, p2) - stm_transaction_break(1) + jump(i1) """) @@ -841,7 +844,7 @@ """, """ [p1, p2] i1 = instance_ptr_eq(p1, p2) - stm_transaction_break(1) + jump(i1) """) @@ -853,7 +856,7 @@ """, """ [p1, p2] i1 = ptr_ne(p1, p2) - stm_transaction_break(1) + jump(i1) """) @@ -865,7 +868,7 @@ """, """ [p1, p2] i1 = instance_ptr_ne(p1, p2) - stm_transaction_break(1) + jump(i1) """) @@ -999,7 +1002,7 @@ [i0] p0 = call_malloc_nursery_varsize(0, 1, i0, descr=bdescr) setfield_gc(p0, i0, descr=blendescr) - stm_transaction_break(1) + jump(i0) """) @@ -1012,7 +1015,7 @@ [i0] p0 = call_malloc_nursery_varsize(1, 1, i0, descr=strdescr) setfield_gc(p0, i0, descr=strlendescr) - stm_transaction_break(1) + jump(i0) """) @@ -1036,7 +1039,7 @@ %(nonstd_descr.lendescr.offset)d, \ 6464, i0, \ descr=malloc_array_nonstandard_descr) - stm_transaction_break(1) + jump(i0) """, nonstd_descr=nonstd_descr) @@ -1051,7 +1054,7 @@ p0 = call_malloc_gc(ConstClass(malloc_array), 1, \ %(bdescr.tid)d, 103, \ descr=malloc_array_descr) - stm_transaction_break(1) + jump() """) @@ -1091,7 +1094,7 @@ p0 = call_malloc_gc(ConstClass(malloc_array), 1, \ %(bdescr.tid)d, 20000000, \ descr=malloc_array_descr) - stm_transaction_break(1) + jump() """) @@ -1177,8 +1180,7 @@ [i0, f0] p0 = new_array(5, descr=bdescr) p1 = new_array(5, descr=bdescr) - call_may_force(123, descr=calldescr2) - guard_not_forced() [] + stm_transaction_break(1) p2 = new_array(5, descr=bdescr) """, """ [i0, f0] @@ -1191,11 +1193,9 @@ setfield_gc(p1, 8765, descr=tiddescr) stm_set_revision_gc(p1, descr=revdescr) setfield_gc(p1, 5, descr=blendescr) - - call_may_force(123, descr=calldescr2) - guard_not_forced() [] - stm_transaction_break(0) - + + stm_transaction_break(1) + p2 = call_malloc_nursery( \ %(bdescr.basesize + 8)d) setfield_gc(p2, 8765, descr=tiddescr) @@ -1203,45 +1203,6 @@ setfield_gc(p2, 5, descr=blendescr) """, calldescr2=calldescr2) - def test_no_transactionbreak_in_loop_body(self): - py.test.skip("actually not good") - - class fakeextrainfo: - def call_needs_inevitable(self): - return False - T = rffi.CArrayPtr(rffi.TIME_T) - calldescr2 = get_call_descr(self.gc_ll_descr, [T], rffi.TIME_T, - fakeextrainfo()) - - self.check_rewrite(""" - [] - call_may_force(123, descr=calldescr2) - guard_not_forced() [] - - label() - - call_may_force(123, descr=calldescr2) - guard_not_forced() [] - - i0 = int_add(1, 2) - - jump() - """, """ - [] - call_may_force(123, descr=calldescr2) - guard_not_forced() [] - stm_transaction_break(0) - - label() - - call_may_force(123, descr=calldescr2) - guard_not_forced() [] - - i0 = int_add(1, 2) - - stm_transaction_break(1) - jump() - """, calldescr2=calldescr2) def test_immutable_getfields(self): for imm_hint in [{}, {'immutable':True}]: @@ -1260,7 +1221,8 @@ vdescr.tid = 1233 vzdescr = get_interiorfield_descr(self.gc_ll_descr, V, 'z') - barr = "A2Idescr" if imm_hint else "A2Rdescr" + # XXX: "A2Idescr" if imm_hint else "A2Rdescr" + barr = "A2Rdescr" if imm_hint else "A2Rdescr" self.check_rewrite(""" [p1, p3, i1, p4] p2 = getfield_gc(p1, descr=uxdescr) @@ -1275,7 +1237,7 @@ i3 = getinteriorfield_gc(p3, i1, descr=vzdescr) cond_call_stm_b(p4, descr=%s) i4 = getarrayitem_gc(p4, i3, descr=vdescr) - stm_transaction_break(1) + jump(p2) """ % (barr, barr, barr), uxdescr=uxdescr, vzdescr=vzdescr, vdescr=vdescr) @@ -1309,7 +1271,7 @@ setinteriorfield_gc(p3, i1, 1, descr=vzdescr) cond_call_stm_b(p4, descr=A2Vdescr) setarrayitem_gc(p4, i1, 1, descr=vdescr) - stm_transaction_break(1) + jump(p3) """, uxdescr=uxdescr, vzdescr=vzdescr, vdescr=vdescr) From noreply at buildbot.pypy.org Thu Jan 16 10:31:05 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 16 Jan 2014 10:31:05 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Today's plannings Message-ID: <20140116093105.72DF91C0291@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5132:db3921fe306f Date: 2014-01-16 10:30 +0100 http://bitbucket.org/pypy/extradoc/changeset/db3921fe306f/ Log: Today's plannings diff --git a/sprintinfo/leysin-winter-2014/planning.txt b/sprintinfo/leysin-winter-2014/planning.txt --- a/sprintinfo/leysin-winter-2014/planning.txt +++ b/sprintinfo/leysin-winter-2014/planning.txt @@ -17,7 +17,7 @@ * look at codespeed2 -* resume-refactor branch (MORE PROGRESS) +* resume-refactor branch (fijal, rguillebert) MORE PROGRESS * GC pinning @@ -37,8 +37,10 @@ * longs multiplication: patch at https://bugs.pypy.org/issue892 -* look into merging refactor-str-types (johan, mjacob, antocuni) FIX TRANSLATION +* look into merging refactor-str-types (mjacob, antocuni) FIX TRANSLATION * tweaking ast classes: https://bugs.pypy.org/issue1673 (mjacob) -* skiing (fijal) +* skiing (fijal, DONE) + +* add jit_merge_point to tuple_contains (anybody) From noreply at buildbot.pypy.org Thu Jan 16 10:50:47 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 16 Jan 2014 10:50:47 +0100 (CET) Subject: [pypy-commit] pypy default: Update 'Optimized Unicode Representation' Message-ID: <20140116095047.A83C21C0291@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68692:b65f1d056fa3 Date: 2014-01-16 10:50 +0100 http://bitbucket.org/pypy/pypy/changeset/b65f1d056fa3/ Log: Update 'Optimized Unicode Representation' diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -74,6 +74,10 @@ The actual details would be rather differen in PyPy, but we would like to have the same optimization implemented. +Or maybe not. We can also play around with the idea of using a single +representation: as a byte string in utf-8. (This idea needs some extra logic +for efficient indexing, like a cache.) + .. _`optimized unicode representation`: http://www.python.org/dev/peps/pep-0393/ Translation Toolchain From noreply at buildbot.pypy.org Thu Jan 16 11:01:37 2014 From: noreply at buildbot.pypy.org (rguillebert) Date: Thu, 16 Jan 2014 11:01:37 +0100 (CET) Subject: [pypy-commit] pypy numpypy-array_prepare_-array_wrap: Change the way ufuncs work on scalar to make it possible to call __array_prepare__ Message-ID: <20140116100137.5F4451C0291@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: numpypy-array_prepare_-array_wrap Changeset: r68693:a9b32d23096f Date: 2014-01-16 11:00 +0100 http://bitbucket.org/pypy/pypy/changeset/a9b32d23096f/ Log: Change the way ufuncs work on scalar to make it possible to call __array_prepare__ diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -10,6 +10,7 @@ from pypy.module.micronumpy.strides import shape_agreement from pypy.module.micronumpy.base import convert_to_array, W_NDimArray from pypy.module.micronumpy.constants import * +from pypy.module.micronumpy.arrayimpl.scalar import Scalar def done_if_true(dtype, val): return dtype.itemtype.bool(val) @@ -383,8 +384,8 @@ w_ldtype = w_rdtype if (self.int_only and (not w_ldtype.is_int_type() or not w_rdtype.is_int_type()) or - not self.allow_bool and (w_ldtype.is_bool_type() or w_rdtype.is_bool_type()) or - not self.allow_complex and (w_ldtype.is_complex_type() or w_rdtype.is_complex_type())): + not self.allow_bool and (w_ldtype.is_bool_type() or w_rdtype.is_bool_type()) or + not self.allow_complex and (w_ldtype.is_complex_type() or w_rdtype.is_complex_type())): raise OperationError(space.w_TypeError, space.wrap("Unsupported types")) calc_dtype = find_binop_result_dtype(space, @@ -418,12 +419,16 @@ w_rhs.get_scalar_value().convert_to(space, calc_dtype) ) if isinstance(out, W_NDimArray): + # TODO: Call __array_prepare__ if out.is_scalar(): out.set_scalar_value(arr) else: out.fill(space, arr) else: - out = arr + # TODO: Call __array_prepare__ + out = W_NDimArray(Scalar(res_dtype, res_dtype.box(0))) + out.set_scalar_value(arr) + return out new_shape = shape_agreement(space, w_lhs.get_shape(), w_rhs) new_shape = shape_agreement(space, new_shape, out, broadcast_down=False) From noreply at buildbot.pypy.org Thu Jan 16 11:59:53 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 16 Jan 2014 11:59:53 +0100 (CET) Subject: [pypy-commit] pypy stmgc-c4: add failing test for possible optimization Message-ID: <20140116105953.3BCD21C0291@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c4 Changeset: r68694:f7c148ced74c Date: 2014-01-16 11:59 +0100 http://bitbucket.org/pypy/pypy/changeset/f7c148ced74c/ Log: add failing test for possible optimization diff --git a/rpython/jit/backend/llsupport/test/test_stmrewrite.py b/rpython/jit/backend/llsupport/test/test_stmrewrite.py --- a/rpython/jit/backend/llsupport/test/test_stmrewrite.py +++ b/rpython/jit/backend/llsupport/test/test_stmrewrite.py @@ -1275,6 +1275,42 @@ jump(p3) """, uxdescr=uxdescr, vzdescr=vzdescr, vdescr=vdescr) - + def test_weaken_previous_barrier(self): + class fakeextrainfo: + oopspecindex=0 + def call_needs_inevitable(self): + return False + T = rffi.CArrayPtr(rffi.TIME_T) + calldescr2 = get_call_descr(self.gc_ll_descr, [T], rffi.TIME_T, + fakeextrainfo()) + # True: weaken previous barrier + # False: do not weaken + ops = [("stm_transaction_break(1)", False), + ("call(123, descr=cd)", False), + ("label()", False), + ("i2 = int_add(i1, 1)", True) + ] + for op, weaken in ops: + b1 = ("cond_call_stm_b(p1, descr=A2Vdescr)" if weaken + else "cond_call_stm_b(p1, descr=A2Rdescr)") + b2 = ("" if weaken + else "cond_call_stm_b(p1, descr=A2Vdescr)") + self.check_rewrite(""" + [p1, i3] + i1 = getfield_gc(p1, descr=tydescr) # noptr + %s + setfield_gc(p1, i3, descr=tydescr) # noptr + jump(p1) + """ % (op,), """ + [p1, i3] + %s + i1 = getfield_gc(p1, descr=tydescr) + %s + %s + setfield_gc(p1, i3, descr=tydescr) + + jump(p1) + """ % (b1, op, b2), cd=calldescr2) + From noreply at buildbot.pypy.org Thu Jan 16 12:18:14 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Thu, 16 Jan 2014 12:18:14 +0100 (CET) Subject: [pypy-commit] pypy refactor-str-types: hg merge default Message-ID: <20140116111814.B7A9C1C0500@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r68695:e32ce09e64c1 Date: 2014-01-16 12:18 +0100 http://bitbucket.org/pypy/pypy/changeset/e32ce09e64c1/ Log: hg merge default diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -74,6 +74,10 @@ The actual details would be rather differen in PyPy, but we would like to have the same optimization implemented. +Or maybe not. We can also play around with the idea of using a single +representation: as a byte string in utf-8. (This idea needs some extra logic +for efficient indexing, like a cache.) + .. _`optimized unicode representation`: http://www.python.org/dev/peps/pep-0393/ Translation Toolchain diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -435,6 +435,11 @@ return SomeChar(no_nul=no_nul) +class __extend__(pairtype(SomeChar, SomeUnicodeCodePoint), + pairtype(SomeUnicodeCodePoint, SomeChar)): + def union((uchr1, uchr2)): + return SomeUnicodeCodePoint() + class __extend__(pairtype(SomeUnicodeCodePoint, SomeUnicodeCodePoint)): def union((uchr1, uchr2)): return SomeUnicodeCodePoint() From noreply at buildbot.pypy.org Thu Jan 16 12:19:54 2014 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 16 Jan 2014 12:19:54 +0100 (CET) Subject: [pypy-commit] pypy default: add a jitdriver to tuple.__contains__ Message-ID: <20140116111954.C70201C0500@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r68696:198338979947 Date: 2014-01-16 12:09 +0100 http://bitbucket.org/pypy/pypy/changeset/198338979947/ Log: add a jitdriver to tuple.__contains__ diff --git a/pypy/objspace/std/tupleobject.py b/pypy/objspace/std/tupleobject.py --- a/pypy/objspace/std/tupleobject.py +++ b/pypy/objspace/std/tupleobject.py @@ -27,6 +27,9 @@ jit.loop_unrolling_heuristic(other, other.length(), UNROLL_CUTOFF)) +contains_jmp = jit.JitDriver(greens = [], reds = 'auto', + name = 'tuple.contains') + class W_AbstractTupleObject(W_Root): __slots__ = () @@ -119,13 +122,26 @@ descr_gt = _make_tuple_comparison('gt') descr_ge = _make_tuple_comparison('ge') - @jit.look_inside_iff(lambda self, _1, _2: _unroll_condition(self)) def descr_contains(self, space, w_obj): + if _unroll_condition(self): + return self._descr_contains_unroll_safe(space, w_obj) + else: + return self._descr_contains_jmp(space, w_obj) + + @jit.unroll_safe + def _descr_contains_unroll_safe(self, space, w_obj): for w_item in self.tolist(): if space.eq_w(w_item, w_obj): return space.w_True return space.w_False + def _descr_contains_jmp(self, space, w_obj): + for w_item in self.tolist(): + contains_jmp.jit_merge_point() + if space.eq_w(w_item, w_obj): + return space.w_True + return space.w_False + def descr_add(self, space, w_other): if not isinstance(w_other, W_AbstractTupleObject): return space.w_NotImplemented From noreply at buildbot.pypy.org Thu Jan 16 12:19:56 2014 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 16 Jan 2014 12:19:56 +0100 (CET) Subject: [pypy-commit] pypy default: merge Message-ID: <20140116111956.22EE91C0500@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r68697:156ddeb302d1 Date: 2014-01-16 12:19 +0100 http://bitbucket.org/pypy/pypy/changeset/156ddeb302d1/ Log: merge diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -74,6 +74,10 @@ The actual details would be rather differen in PyPy, but we would like to have the same optimization implemented. +Or maybe not. We can also play around with the idea of using a single +representation: as a byte string in utf-8. (This idea needs some extra logic +for efficient indexing, like a cache.) + .. _`optimized unicode representation`: http://www.python.org/dev/peps/pep-0393/ Translation Toolchain From noreply at buildbot.pypy.org Thu Jan 16 12:55:35 2014 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 16 Jan 2014 12:55:35 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: basic stuff with resume and optimizeopt Message-ID: <20140116115535.786191C0291@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68698:1e684994fc01 Date: 2014-01-16 12:54 +0100 http://bitbucket.org/pypy/pypy/changeset/1e684994fc01/ Log: basic stuff with resume and optimizeopt diff --git a/rpython/jit/metainterp/optimizeopt/__init__.py b/rpython/jit/metainterp/optimizeopt/__init__.py --- a/rpython/jit/metainterp/optimizeopt/__init__.py +++ b/rpython/jit/metainterp/optimizeopt/__init__.py @@ -55,8 +55,7 @@ debug_start("jit-optimize") try: loop.logops = metainterp_sd.logger_noopt.log_loop( - flatten(loop.inputframes), - loop.operations) + flatten(loop.inputframes), loop.operations) optimizations, unroll = build_opt_chain(metainterp_sd, enable_opts) if unroll: optimize_unroll(metainterp_sd, loop, optimizations, inline_short_preamble) diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -347,7 +347,6 @@ self.values = {} self.interned_refs = self.cpu.ts.new_ref_dict() self.interned_ints = {} - self.resumedata_memo = resume.ResumeDataLoopMemo(metainterp_sd) self.bool_boxes = {} self.producer = {} self.pendingfields = None # set temporarily to a list, normally by @@ -364,6 +363,7 @@ self.call_pure_results = loop.call_pure_results self.set_optimizations(optimizations) + self.resume_stack = [] self.setup() def set_optimizations(self, optimizations): @@ -387,6 +387,7 @@ o.force_at_end_of_preamble() def flush(self): + self.resume_flush() for o in self.optimizations: o.flush() @@ -410,7 +411,7 @@ def forget_numberings(self, virtualbox): self.metainterp_sd.profiler.count(jitprof.Counters.OPT_FORCINGS) - self.resumedata_memo.forget_numberings(virtualbox) + #self.resumedata_memo.forget_numberings(virtualbox) def getinterned(self, box): constbox = self.get_constant_box(box) @@ -517,7 +518,7 @@ self.loop.operations = self.get_newoperations() self.loop.quasi_immutable_deps = self.quasi_immutable_deps # accumulate counters - self.resumedata_memo.update_counters(self.metainterp_sd.profiler) + #self.resumedata_memo.update_counters(self.metainterp_sd.profiler) def send_extra_operation(self, op): self.first_optimization.propagate_forward(op) @@ -571,6 +572,7 @@ assert False def store_final_boxes_in_guard(self, op, pendingfields): + xxx assert pendingfields is not None descr = op.getdescr() assert isinstance(descr, compile.ResumeGuardDescr) @@ -670,9 +672,27 @@ value = self.getvalue(op.getarg(0)) self.optimizer.opaque_pointers[value] = True + # the following stuff should go to the default Optimization thing, + # pending refactor + def optimize_ENTER_FRAME(self, op): + self.resume_stack.append(op) + + def optimize_LEAVE_FRAME(self, op): + if self.resume_stack: + self.resume_stack.pop() + else: + self.emit_operation(op) + + def optimize_RESUME_PUT(self, op): + self.resume_flush() self.optimize_default(op) + def resume_flush(self): + for op in self.resume_stack: + self.emit_operation(op) + self.resume_stack = [] + dispatch_opt = make_dispatcher_method(Optimizer, 'optimize_', default=Optimizer.optimize_default) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -92,7 +92,8 @@ def optimize_loop(self, ops, optops, call_pure_results=None): loop = self.parse(ops) token = JitCellToken() - loop.operations = [ResOperation(rop.LABEL, loop.inputargs, None, descr=TargetToken(token))] + \ + loop.operations = [ResOperation(rop.LABEL, loop.inputframes[0], None, + descr=TargetToken(token))] + \ loop.operations if loop.operations[-1].getopnum() == rop.JUMP: loop.operations[-1].setdescr(token) @@ -152,8 +153,8 @@ """ expected = """ [i] + i0 = int_sub(i, 1) enter_frame(-1, descr=jitcode) - i0 = int_sub(i, 1) resume_put(i0, 0, 0) guard_value(i0, 0) leave_frame() @@ -176,8 +177,6 @@ """ expected = """ [] - enter_frame(-1, descr=jitcode) - leave_frame() jump() """ self.optimize_loop(ops, expected) @@ -442,6 +441,19 @@ """ self.optimize_loop(ops, expected) + def test_remove_enter_leave_frame(self): + ops = """ + [] + enter_frame(-1, descr=jitcode) + leave_frame() + finish() + """ + expected = """ + [] + finish() + """ + self.optimize_loop(ops, expected) + def test_ooisnull_oononnull_via_virtual(self): ops = """ [p0] @@ -455,13 +467,12 @@ leave_frame() jump(p0) """ - xxx expected = """ [p0] enter_frame(-1, descr=jitcode) resume_put(p0, 0, 2) pv = resume_new_with_vtable(ConstClass(node_vtable)) - resume_setfield_gc(p0, pv, descr=...) + resume_setfield_gc(p0, pv, descr=valuedescr) guard_nonnull(p0) leave_frame() jump(p0) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -312,9 +312,10 @@ def assert_equal(self, optimized, expected, text_right=None): from rpython.jit.metainterp.optimizeopt.util import equaloplists - assert len(optimized.inputargs) == len(expected.inputargs) + assert len(optimized.inputframes[0]) == len(expected.inputframes[0]) remap = {} - for box1, box2 in zip(optimized.inputargs, expected.inputargs): + for box1, box2 in zip(optimized.inputframes[0], + expected.inputframes[0]): assert box1.__class__ == box2.__class__ remap[box2] = box1 assert equaloplists(optimized.operations, @@ -341,7 +342,7 @@ operations = loop.operations jumpop = operations[-1] assert jumpop.getopnum() == rop.JUMP - inputargs = loop.inputargs + inputargs = loop.inputframes[0] jump_args = jumpop.getarglist()[:] operations = operations[:-1] @@ -368,7 +369,7 @@ #[inliner.inline_op(jumpop)] assert loop.operations[-1].getopnum() == rop.JUMP assert loop.operations[0].getopnum() == rop.LABEL - loop.inputargs = loop.operations[0].getarglist() + loop.inputframes = [loop.operations[0].getarglist()] self._do_optimize_loop(loop, call_pure_results) extra_same_as = [] @@ -384,13 +385,17 @@ return preamble - +class FakeDescr(compile.ResumeGuardDescr): + def __eq__(self, other): + return isinstance(other, FakeDescr) def convert_old_style_to_targets(loop, jump): newloop = TreeLoop(loop.name) - newloop.inputargs = loop.inputargs - newloop.operations = [ResOperation(rop.LABEL, loop.inputargs, None, descr=FakeDescr())] + \ - loop.operations + newloop.inputframes = loop.inputframes + label = ResOperation(rop.LABEL, loop.inputframes[0], None, + descr=FakeDescr()) + newloop.operations = [label] + newloop.operations += loop.operations if not jump: assert newloop.operations[-1].getopnum() == rop.JUMP newloop.operations[-1] = ResOperation(rop.LABEL, newloop.operations[-1].getarglist(), None, descr=FakeDescr()) diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -475,6 +475,10 @@ # frontend 'RESUME_PUT_CONST/3', # the same but for a constant 'RESUME_NEW/0d', + 'RESUME_NEW_WITH_VTABLE/1', + 'RESUME_NEW_ARRAY/1d', + 'RESUME_NEWSTR/1', + 'RESUME_NEWUNICODE/1', 'RESUME_SETFIELD_GC/2d', 'RESUME_SET_PC/1', 'RESUME_CLEAR/2', diff --git a/rpython/jit/tool/oparser.py b/rpython/jit/tool/oparser.py --- a/rpython/jit/tool/oparser.py +++ b/rpython/jit/tool/oparser.py @@ -299,7 +299,7 @@ loop.comment = first_comment loop.original_jitcell_token = self.original_jitcell_token loop.operations = ops - loop.inputargs = inpargs + loop.inputframes = [inpargs] loop.last_offset = last_offset return loop From noreply at buildbot.pypy.org Thu Jan 16 15:04:31 2014 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 16 Jan 2014 15:04:31 +0100 (CET) Subject: [pypy-commit] lang-smalltalk rbitblt: stupid me ... only replace slashes on windows Message-ID: <20140116140431.EA9BA1C35EC@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: rbitblt Changeset: r568:d70d89b47bf9 Date: 2014-01-10 11:22 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/d70d89b47bf9/ Log: stupid me ... only replace slashes on windows diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -191,7 +191,9 @@ if path is None: path = "Squeak.image" - path = os.path.join(os.getcwd(), path).replace("/", "\\") + path = os.path.join(os.getcwd(), path) + if os.name == "nt": + path = path.replace("/", "\\") try: f = open_file_as_stream(path, mode="rb", buffering=0) except OSError as e: From noreply at buildbot.pypy.org Thu Jan 16 15:04:33 2014 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 16 Jan 2014 15:04:33 +0100 (CET) Subject: [pypy-commit] lang-smalltalk rbitblt: use rpath Message-ID: <20140116140433.116281C35EC@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: rbitblt Changeset: r569:66cea7643c37 Date: 2014-01-10 11:36 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/66cea7643c37/ Log: use rpath diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -2,7 +2,7 @@ import os from rpython.rlib.streamio import open_file_as_stream -from rpython.rlib import jit +from rpython.rlib import jit, rpath from spyvm import model, interpreter, squeakimage, objspace, wrapper,\ error, shadow @@ -191,9 +191,7 @@ if path is None: path = "Squeak.image" - path = os.path.join(os.getcwd(), path) - if os.name == "nt": - path = path.replace("/", "\\") + path = rpath.rabspath(path) try: f = open_file_as_stream(path, mode="rb", buffering=0) except OSError as e: From noreply at buildbot.pypy.org Thu Jan 16 15:04:34 2014 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 16 Jan 2014 15:04:34 +0100 (CET) Subject: [pypy-commit] lang-smalltalk rbitblt: improve traces of byteobject comparisons Message-ID: <20140116140434.248F21C35EC@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: rbitblt Changeset: r570:1670a39ac8e7 Date: 2014-01-10 15:16 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/1670a39ac8e7/ Log: improve traces of byteobject comparisons diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -661,6 +661,7 @@ self._vars, w_other._vars = w_other._vars, self._vars return W_AbstractPointersObject.become(self, w_other) + @jit.unroll_safe def clone(self, space): w_result = W_PointersObject(self.space, self.getclass(space), len(self._vars)) @@ -703,6 +704,7 @@ self._weakvars, w_other._weakvars = w_other._weakvars, self._weakvars return W_AbstractPointersObject.become(self, w_other) + @jit.unroll_safe def clone(self, space): w_result = W_WeakPointersObject(self.space, self.getclass(space), len(self._weakvars)) @@ -715,7 +717,7 @@ class W_BytesObject(W_AbstractObjectWithClassReference): _attrs_ = ['bytes', 'c_bytes', '_size'] - _immutable_fields_ = ['_size'] + _immutable_fields_ = ['_size', 'bytes[*]?'] def __init__(self, space, w_class, size): W_AbstractObjectWithClassReference.__init__(self, space, w_class) @@ -798,16 +800,20 @@ # XXX this sounds very wrong to me if not isinstance(other, W_BytesObject): return False - if self.bytes is not None and other.bytes is not None: + size = self.size() + if size != other.size(): + return False + if size > 256 and self.bytes is not None and other.bytes is not None: return self.bytes == other.bytes else: - size = self.size() - if size != other.size(): + return self.has_same_chars(other, size) + + @jit.look_inside_iff(lambda self, other, size: size < 256) + def has_same_chars(self, other, size): + for i in range(size): + if self.getchar(i) != other.getchar(i): return False - for i in range(size): - if self.getchar(i) != other.getchar(i): - return False - return True + return True def clone(self, space): size = self.size() diff --git a/spyvm/test/jittest/test_basic.py b/spyvm/test/jittest/test_basic.py --- a/spyvm/test/jittest/test_basic.py +++ b/spyvm/test/jittest/test_basic.py @@ -56,28 +56,77 @@ ^ i """) self.assert_matches(traces[0].loop, """ - label(p0, p3, i58, p12, p14, p16, p18, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, i65, descr=TargetToken(153187472)) - debug_merge_point(0, 0, '2: [0x10]pushTemporaryVariableBytecode (codeTest1387373494)') guard_not_invalidated(descr=) - debug_merge_point(0, 0, '3: [0x21]pushLiteralConstantBytecode (codeTest1387373494)') - debug_merge_point(0, 0, '4: [0xb4]bytecodePrimLessOrEqual (codeTest1387373494)') i68 = int_le(i58, 10000) guard_true(i68, descr=) - debug_merge_point(0, 0, '5: [0x9e]shortConditionalJump (codeTest1387373494)') - debug_merge_point(0, 0, '6: [0x10]pushTemporaryVariableBytecode (codeTest1387373494)') - debug_merge_point(0, 0, '7: [0x20]pushLiteralConstantBytecode (codeTest1387373494)') - debug_merge_point(0, 0, '8: [0xc2]bytecodePrimSize (codeTest1387373494)') - debug_merge_point(0, 0, '9: [0xb0]bytecodePrimAdd (codeTest1387373494)') i69 = int_add(i58, 1) i70 = int_sub(i69, -1073741824) i71 = uint_lt(i70, -2147483648) guard_true(i71, descr=) - debug_merge_point(0, 0, '10: [0x68]storeAndPopTemporaryVariableBytecode (codeTest1387373494)') - debug_merge_point(0, 0, '11: [0xa3]longUnconditionalJump (codeTest1387373494)') i72 = int_sub(i65, 1) setfield_gc(ConstPtr(ptr55), i72, descr=) i73 = int_le(i72, 0) guard_false(i73, descr=) - debug_merge_point(0, 0, '2: [0x10]pushTemporaryVariableBytecode (codeTest1387373494)') jump(p0, p3, i69, p12, p14, p16, p18, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, i72, descr=TargetToken(153187472)) """) + + def test_constant_string_equal2(self, spy, tmpdir): + # This used to have a call to array comparison in it + traces = self.run(spy, tmpdir, """ + | i | + i := 0. + [i <= 100000] whileTrue: [ + 'a' == 'ab'. + 'cde' == 'efg'. + 'hij' == 'hij'. + i := i + 1]. + ^ i + """) + self.assert_matches(traces[0].loop, """ + guard_not_invalidated(descr=), + i76 = int_le(i65, 100000), + guard_true(i76, descr=), + i77 = int_add(i65, 1), + i78 = int_sub(i77, -1073741824), + i79 = uint_lt(i78, -2147483648), + guard_true(i79, descr=), + i80 = int_sub(i73, 2), + setfield_gc(ConstPtr(ptr70), i80, descr=), + i81 = int_le(i80, 0), + guard_false(i81, descr=), + i83 = arraylen_gc(p49, descr=), + i84 = arraylen_gc(p53, descr=), + i85 = arraylen_gc(p57, descr=), + jump(p0, p3, i77, p12, p14, p16, p18, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, i80, p49, p53, p57, descr=TargetToken(163738864)) + """) + + def test_constant_string_var_equal(self, spy, tmpdir): + # This used to have a call to array comparison in it + traces = self.run(spy, tmpdir, """ + | i a b c d | + i := 0. + a = 'a'. + b = 'bc'. + c = 'cd'. + d = 'bc'. + [i <= 100000] whileTrue: [ + a == b. + b == c. + b == d. + i := i + 1]. + ^ i + """) + self.assert_matches(traces[0].loop, """ + guard_not_invalidated(descr=), + i73 = int_le(i62, 100000), + guard_true(i73, descr=), + i74 = int_add(i62, 1), + i75 = int_sub(i74, -1073741824), + i76 = uint_lt(i75, -2147483648), + guard_true(i76, descr=), + i77 = int_sub(i70, 1), + setfield_gc(ConstPtr(ptr67), i77, descr=), + i78 = int_le(i77, 0), + guard_false(i78, descr=), + jump(p0, p3, i74, p8, p10, p12, p14, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, p40, p42, p44, p46, i77, descr=TargetToken(157713840)) + """) From noreply at buildbot.pypy.org Thu Jan 16 15:04:48 2014 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 16 Jan 2014 15:04:48 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: merge rbitblt Message-ID: <20140116140448.7FB041C35EC@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r571:7d034b319f2b Date: 2014-01-10 17:11 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/7d034b319f2b/ Log: merge rbitblt diff too long, truncating to 2000 out of 195554 lines diff --git a/images/Squeak4.5-12568.changes b/images/Squeak4.5-12568.changes --- a/images/Squeak4.5-12568.changes +++ b/images/Squeak4.5-12568.changes @@ -36,4 +36,4 @@ Workspace allInstances do: [:w | w topView delete]. ReleaseBuilderFor4dot4 prepareNewBuild. Smalltalk snapshot: true andQuit: true. -! ----End fileIn of a stream----! ----SNAPSHOT----{31 March 2013 . 3:27:34 pm} Squeak4.5-12327.image priorSource: 7430688! !Installer methodsFor: 'squeakmap' stamp: 'fbs 1/28/2013 19:25' prior: 57597950! packageAndVersionFrom: pkg | p | p := ReadStream on: pkg . ^{(p upTo: $(). p upTo: $)} collect: [:s | s withBlanksTrimmed].! ! "Installer-Core"! !Categorizer methodsFor: 'fileIn/Out' stamp: 'cwp 6/20/2012 16:58'! scanFrom: aStream environment: anEnvironment ^ self scanFrom: aStream! ! !ClassCategoryReader methodsFor: 'fileIn/Out' stamp: 'cwp 6/20/2012 17:21'! scanFrom: aStream environment: anEnvironment "File in methods from the stream, aStream." | methodText | [methodText := aStream nextChunkText. methodText size > 0] whileTrue: [class compile: methodText environment: anEnvironment classified: category withStamp: changeStamp notifying: nil]! ! !ClassCommentReader methodsFor: 'as yet unclassified' stamp: 'cwp 6/20/2012 17:22'! scanFrom: aStream environment: anEnvironment ^ self scanFrom: aStream! ! !Metaclass methodsFor: 'compiling' stamp: 'cwp 6/20/2012 17:29'! bindingOf: varName environment: anEnvironment ^ thisClass classBindingOf: varName environment: anEnvironment! ! !LargePositiveInteger methodsFor: 'arithmetic' stamp: 'nice 12/30/2012 20:03' prior: 22505876! \\ aNumber "Primitive. Take the receiver modulo the argument. The result is the remainder rounded towards negative infinity, of the receiver divided by the argument. Fail if the argument is 0. Fail if either the argument or the result is not a SmallInteger or a LargePositiveInteger less than 2-to-the-30th (1073741824). Optional. See Object documentation whatIsAPrimitive." aNumber isInteger ifTrue: [| neg qr q r | neg := self negative == aNumber negative == false. qr := (self digitDiv: (aNumber class == SmallInteger ifTrue: [aNumber abs] ifFalse: [aNumber]) neg: neg). q := qr first normalize. r := qr last normalize. ^(q negative ifTrue: [r isZero not] ifFalse: [q isZero and: [neg]]) ifTrue: [r + aNumber] ifFalse: [r]]. ^super \\ aNumber ! ! !LargePositiveInteger methodsFor: 'converting' stamp: 'nice 1/27/2012 22:41' prior: 37616324! asFloat "Answer a Float that best approximates the value of the receiver. This algorithm is optimized to process only the significant digits of a LargeInteger. And it does honour IEEE 754 round to nearest even mode in case of excess precision (see details below)." "How numbers are rounded in IEEE 754 default rounding mode: A shift is applied so that the highest 53 bits are placed before the floating point to form a mantissa. The trailing bits form the fraction part placed after the floating point. This fractional number must be rounded to the nearest integer. If fraction part is 2r0.1, exactly between two consecutive integers, there is a tie. The nearest even integer is chosen in this case. Examples (First 52bits of mantissa are omitted for brevity): 2r0.00001 is rounded downward to 2r0 2r1.00001 is rounded downward to 2r1 2r0.1 is a tie and rounded to 2r0 (nearest even) 2r1.1 is a tie and rounded to 2r10 (nearest even) 2r0.10001 is rounded upward to 2r1 2r1.10001 is rounded upward to 2r10 Thus, if the next bit after floating point is 0, the mantissa is left unchanged. If next bit after floating point is 1, an odd mantissa is always rounded upper. An even mantissa is rounded upper only if the fraction part is not a tie." "Algorihm details: The floating point hardware can perform the rounding correctly with several excess bits as long as there is a single inexact operation. This can be obtained by splitting the mantissa plus excess bits in two part with less bits than Float precision. Note 1: the inexact flag in floating point hardware must not be trusted because in some cases the operations would be exact but would not take into account some bits that were truncated before the Floating point operations. Note 2: the floating point hardware is presumed configured in default rounding mode." | mantissa shift excess result n | "Check how many bits excess the maximum precision of a Float mantissa." excess := self highBitOfMagnitude - Float precision. excess > 7 ifTrue: ["Remove the excess bits but seven." mantissa := self bitShiftMagnitude: 7 - excess. shift := excess - 7. "An even mantissa with a single excess bit immediately following would be truncated. But this would not be correct if above shift has truncated some extra bits. Check this case, and round excess bits upper manually." ((mantissa digitAt: 1) = 2r01000000 and: [self anyBitOfMagnitudeFrom: 1 to: shift]) ifTrue: [mantissa := mantissa + 1]] ifFalse: [mantissa := self. shift := 0]. "There will be a single inexact round off at last iteration" result := (mantissa digitAt: (n := mantissa digitLength)) asFloat. [(n := n - 1) > 0] whileTrue: [ result := 256.0 * result + (mantissa digitAt: n) asFloat]. ^result timesTwoPower: shift.! ! !LargePositiveInteger methodsFor: 'private' stamp: 'nice 12/30/2012 14:25'! primitiveQuo: anInteger "Primitive. Divide the receiver by the argument and return the result. Round the result down towards zero to make it a whole integer. Fail if the argument is 0. Fail if either the argument or the result is not a SmallInteger or a LargePositiveInteger less than 2-to-the-30th (1073741824). Optional. See Object documentation whatIsAPrimitive." ^nil! ! !LargePositiveInteger methodsFor: 'arithmetic' stamp: 'nice 12/30/2012 14:34'! rem: aNumber "Remainder defined in terms of quo:. See super rem:. This is defined only to speed up case of very large integers." (self primitiveQuo: aNumber) ifNotNil: [:quo | ^self - (quo * aNumber)]. aNumber isInteger ifTrue: [| ng rem | ng := self negative == aNumber negative == false. rem := (self digitDiv: (aNumber class == SmallInteger ifTrue: [aNumber abs] ifFalse: [aNumber]) neg: ng) at: 2. ^ rem normalize]. ^super rem: aNumber! ! !LargeNegativeInteger methodsFor: 'converting' stamp: 'nice 1/1/2013 15:42' prior: 37616204! asFloat ^super asFloat negated! ! !UndefinedObject methodsFor: 'class hierarchy' stamp: 'cwp 6/22/2012 15:39'! literalScannedAs: scannedLiteral environment: anEnvironment notifying: requestor ^ scannedLiteral! ! !Behavior methodsFor: 'testing method dictionary' stamp: 'cwp 6/20/2012 17:32'! bindingOf: varName environment: anEnvironment ^superclass bindingOf: varName environment: anEnvironment! ! !Behavior methodsFor: 'testing method dictionary' stamp: 'cwp 6/20/2012 17:30'! classBindingOf: varName environment: anEnvironment ^self bindingOf: varName environment: anEnvironment! ! !Behavior methodsFor: 'printing' stamp: 'cwp 6/22/2012 15:37'! literalScannedAs: scannedLiteral environment: anEnvironment notifying: requestor "Postprocesses a literal scanned by Scanner scanToken (esp. xLitQuote). If scannedLiteral is not an association, answer it. Else, if it is of the form: nil->#NameOfMetaclass answer nil->theMetaclass, if any has that name, else report an error. Else, if it is of the form: #NameOfGlobalVariable->anythiEng answer the global, class, or pool association with that nameE, if any, else add it to Undeclared a answer the new Association." | key value | (scannedLiteral isVariableBinding) ifFalse: [^ scannedLiteral]. key := scannedLiteral key. value := scannedLiteral value. key ifNil: "###" [(self bindingOf: value environment: anEnvironment) ifNotNil: [:assoc| (assoc value isKindOf: Behavior) ifTrue: [^ nil->assoc value class]]. requestor notify: 'No such metaclass'. ^false]. (key isSymbol) ifTrue: "##" [(self bindingOf: key environment: anEnvironment) ifNotNil: [:assoc | ^assoc]. ^ anEnvironment undeclared: key]. requestor notify: '## must be followed by a non-local variable name'. ^false " Form literalScannedAs: 14 notifying: nil 14 Form literalScannedAs: #OneBitForm notiEfying: nil OneBitForm Form literalScannedAs: ##OneBitForm notifying: nil OneBitForm->a Form Form literalScannedAs: ##Form notifying: nil Form->Form Form literalScannedAs: ###Form notifying: nil nilE->Form class "! ! !Fraction methodsFor: 'converting' stamp: 'nice 11/21/2011 22:34' prior: 37619655! asFloat "Answer a Float that closely approximates the value of the receiver. This implementation will answer the closest floating point number to the receiver. In case of a tie, it will use the IEEE 754 round to nearest even mode. In case of overflow, it will answer +/- Float infinity." | a b mantissa exponent hasTruncatedBits lostBit n ha hb hm | a := numerator abs. b := denominator. "denominator is always positive" ha := a highBitOfMagnitude. hb := b highBitOfMagnitude. "Number of bits to keep in mantissa plus one to handle rounding." n := 1 + Float precision. "If both numerator and denominator are represented exactly in floating point number, then fastest thing to do is to use hardwired float division." (ha < n and: [hb < n]) ifTrue: [^numerator asFloat / denominator asFloat]. "Shift the fraction by a power of two exponent so as to obtain a mantissa with n bits. First guess is rough, the mantissa might have n+1 bits." exponent := ha - hb - n. exponent >= 0 ifTrue: [b := b bitShift: exponent] ifFalse: [a := a bitShift: exponent negated]. mantissa := a quo: b. hasTruncatedBits := a > (mantissa * b). hm := mantissa highBit. "Check for gradual underflow, in which case the mantissa will loose bits. Keep at least one bit to let underflow preserve the sign of zero." lostBit := Float emin - (exponent + hm - 1). lostBit > 0 ifTrue: [n := n - lostBit max: 1]. "Remove excess bits in the mantissa." hm > n ifTrue: [exponent := exponent + hm - n. hasTruncatedBits := hasTruncatedBits or: [mantissa anyBitOfMagnitudeFrom: 1 to: hm - n]. mantissa := mantissa bitShift: n - hm]. "Check if mantissa must be rounded upward. The case of tie (mantissa odd & hasTruncatedBits not) will be handled by Integer>>asFloat." (hasTruncatedBits and: [mantissa odd]) ifTrue: [mantissa := mantissa + 1]. ^ (self positive ifTrue: [mantissa asFloat] ifFalse: [mantissa asFloat negated]) timesTwoPower: exponent! ! !Float methodsFor: 'arithmetic' stamp: 'nice 12/20/2012 23:16' prior: 20878776! negated "Answer a Number that is the negation of the receiver. Implementation note: this version cares of negativeZero." ^-1.0 * self! ! !ClassDescription methodsFor: 'compiling' stamp: 'cwp 6/20/2012 17:21'! compile: text environment: anEnvironment classified: category withStamp: changeStamp notifying: requestor ^ self compile: text environment: anEnvironment classified: category withStamp: changeStamp notifying: requestor logSource: self acceptsLoggingOfCompilation! ! !ClassDescription methodsFor: 'compiling' stamp: 'cwp 12/27/2012 13:17'! compile: text environment: anEnvironment classified: category withStamp: changeStamp notifying: requestor logSource: logSource | methodAndNode context methodNode | context := CompilationCue source: text class: self environment: anEnvironment category: category requestor: requestor. methodNode := self newCompiler compile: context ifFail: [^ nil]. methodAndNode := CompiledMethodWithNode generateMethodFromNode: methodNode trailer: self defaultMethodTrailer. logSource ifTrue: [ self logMethodSource: text forMethodWithNode: methodAndNode inCategory: category withStamp: changeStamp notifying: requestor. ]. self addAndClassifySelector: methodAndNode selector withMethod: methodAndNode method inProtocol: category notifying: requestor. self instanceSide noteCompilationOf: methodAndNode selector meta: self isClassSide. ^ methodAndNode selector! ! !Class methodsFor: 'compiling' stamp: 'cwp 6/20/2012 09:47'! bindingOf: varName environment: anEnvironment "Answer the binding of some variable resolved in the scope of the receiver" | aSymbol binding | aSymbol := varName asSymbol. "First look in classVar dictionary." binding := self classPool bindingOf: aSymbol. binding ifNotNil:[^binding]. "Next look in shared pools." self sharedPools do:[:pool | binding := pool bindingOf: aSymbol. binding ifNotNil:[^binding]. ]. "Next look in declared environment." binding := anEnvironment bindingOf: aSymbol. binding ifNotNil:[^binding]. "Finally look higher up the superclass chain and fail at the end." superclass == nil ifTrue: [^ nil] ifFalse: [^ superclass bindingOf: aSymbol]. ! ! "Kernel"! ParseNode subclass: #Encoder instanceVariableNames: 'scopeTable nTemps supered requestor class selector literalStream selectorSet litIndSet litSet sourceRanges globalSourceRanges addedSelectorAndMethodClassLiterals optimizedSelectors cue' classVariableNames: '' poolDictionaries: '' category: 'Compiler-Kernel'! !Encoder commentStamp: 'cwp 12/26/2012 23:29' prior: 36323851! I encode names and literals into tree nodes with byte codes for the compiler. Byte codes for literals are not assigned until the tree-sizing pass of the compiler, because only then is it known which literals are actually needed. I also keep track of sourceCode ranges during parsing and code generation so I can provide an inverse map for the debugger.! Scanner subclass: #Parser instanceVariableNames: 'here hereType hereMark hereEnd prevMark prevEnd encoder requestor parseNode failBlock requestorOffset tempsMark doitFlag properties category queriedUnusedTemporaries cue' classVariableNames: '' poolDictionaries: '' category: 'Compiler-Kernel'! !Parser commentStamp: 'cwp 12/26/2012 23:34' prior: 38557958! I parse Smalltalk syntax and create a MethodNode that is the root of the parse tree. I look one token ahead.! Object subclass: #CompilationCue instanceVariableNames: 'source context receiver class environment category requestor' classVariableNames: '' poolDictionaries: '' category: 'Compiler-Kernel'! Object subclass: #Compiler instanceVariableNames: 'sourceStream requestor class category context parser cue' classVariableNames: '' poolDictionaries: '' category: 'Compiler-Kernel'! !Compiler commentStamp: 'cwp 12/26/2012 23:17' prior: 59257505! The compiler accepts Smalltalk source code and compiles it with respect to a given class. The user of the compiler supplies a context so that temporary variables are accessible during compilation. If there is an error, a requestor (usually a kind of StringHolderController) is sent the message notify:at:in: so that the error message can be displayed. If there is no error, then the result of compilation is a MethodNode, which is the root of a parse tree whose nodes are kinds of ParseNodes. The parse tree can be sent messages to (1) generate code for a CompiledMethod (this is done for compiling methods or evaluating expressions); (2) pretty-print the code (for formatting); or (3) produce a map from object code back to source code (used by debugger program-counter selection). See also Parser, Encoder, ParseNode.! !Encoder methodsFor: 'initialize-release' stamp: 'cwp 12/26/2012 23:34'! init: aCue notifying: anObject "The use of the variable requestor is a bit confusing here. This is *not* the original requestor, which is available through the cue. It's the Parser instance that is using the encoder." self setCue: aCue. requestor := anObject. nTemps := 0. supered := false. self initScopeAndLiteralTables. cue getClass variablesAndOffsetsDo: [:variable "" :offset "" | offset isNil ifTrue: [scopeTable at: variable name put: (FieldNode new fieldDefinition: variable)] ifFalse: [scopeTable at: variable put: (offset >= 0 ifTrue: [InstanceVariableNode new name: variable index: offset] ifFalse: [MaybeContextInstanceVariableNode new name: variable index: offset negated])]]. cue context ~~ nil ifTrue: [| homeNode | homeNode := self bindTemp: self doItInContextName. "0th temp = aContext passed as arg" cue context tempNames withIndexDo: [:variable :index| scopeTable at: variable put: (MessageAsTempNode new receiver: homeNode selector: #namedTempAt: arguments: (Array with: (self encodeLiteral: index)) precedence: 3 from: self)]]. sourceRanges := Dictionary new: 32. globalSourceRanges := OrderedCollection new: 32 ! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/26/2012 23:30'! setCue: aCue cue := aCue. "Also set legacy instance variables for methods that don't use cue yet" class := cue getClass.! ! !Dictionary methodsFor: '*Compiler' stamp: 'cwp 6/22/2012 09:17'! bindingOf: varName ifAbsent: aBlock ^self associationAt: varName ifAbsent: aBlock! ! !Parser methodsFor: 'private' stamp: 'cwp 12/26/2012 23:37'! init: sourceStream cue: aCue failBlock: aBlock self setCue: aCue. failBlock := aBlock. requestorOffset := 0. super scan: sourceStream. prevMark := hereMark := mark. self advance ! ! !Parser methodsFor: 'public access' stamp: 'cwp 12/26/2012 23:41'! parse: sourceStream cue: aCue noPattern: noPattern ifFail: aBlock "Answer a MethodNode for the argument, sourceStream, that is the root of a parse tree. Parsing is done with respect to the CompilationCue to resolve variables. Errors in parsing are reported to the cue's requestor; otherwise aBlock is evaluated. The argument noPattern is a Boolean that is true if the the sourceStream does not contain a method header (i.e., for DoIts)." | methNode repeatNeeded myStream s p subSelection | myStream := sourceStream. [repeatNeeded := false. p := myStream position. s := myStream upToEnd. myStream position: p. subSelection := aCue requestor notNil and: [aCue requestor selectionInterval = (p + 1 to: p + s size)]. self encoder init: aCue notifying: self. self init: myStream cue: aCue failBlock: [^ aBlock value]. doitFlag := noPattern. failBlock:= aBlock. [methNode := self method: noPattern context: cue context] on: ReparseAfterSourceEditing do: [ :ex | repeatNeeded := true. myStream := subSelection ifTrue: [ReadStream on: cue requestor text string from: cue requestor selectionInterval first to: cue requestor selectionInterval last] ifFalse: [ReadStream on: cue requestor text string]]. repeatNeeded] whileTrue: [encoder := self encoder class new]. methNode sourceText: s. ^methNode ! ! !Parser methodsFor: 'private' stamp: 'cwp 12/26/2012 23:35'! setCue: aCue cue := aCue. "Also set legacy variables for methods that don't use cue yet." requestor := cue requestor. category := cue category.! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:53'! class: aClass ^ self context: nil class: aClass requestor: nil! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:53'! context: aContext class: aClass requestor: anObject ^ self source: nil context: aContext receiver: nil class: aClass environment: (aClass ifNotNil: [aClass environment]) category: nil requestor: anObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:16'! source: aTextOrStream class: aClass environment: anEnvironment category: aString requestor: anObject ^ self source: aTextOrStream context: nil receiver: nil class: aClass environment: anEnvironment category: aString requestor: anObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:53'! source: aTextOrStream context: aContext class: aClass category: aString requestor: anObject ^ self source: aTextOrStream context: aContext receiver: (aContext ifNotNil: [aContext receiver]) class: aClass environment: (aClass ifNotNil: [aClass environment]) category: aString requestor: anObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:54'! source: aTextOrStream context: aContext class: aClass requestor: anObject ^ self source: aTextOrStream context: aContext class: aClass category: nil requestor: anObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:55'! source: aTextOrStream context: aContext receiver: recObject class: aClass environment: anEnvironment category: aString requestor: reqObject ^ self basicNew initializeWithSource: aTextOrStream context: aContext receiver: recObject class: aClass environment: anEnvironment category: aString requestor: reqObject! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:16'! source: aString environment: anEnvironment ^ self source: aString context: nil receiver: nil class: UndefinedObject environment: anEnvironment category: nil requestor: nil! ! !CompilationCue class methodsFor: 'instance creation' stamp: 'cwp 12/26/2012 23:54'! source: aTextOrStream requestor: anObject ^ self source: aTextOrStream context: nil class: nil requestor: anObject! ! !CompilationCue methodsFor: 'binding' stamp: 'cwp 6/20/2012 09:39'! bindingOf: aSymbol ^ class bindingOf: aSymbol environment: environment! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:15'! category ^ category! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 12/26/2012 23:19'! context ^ context! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:15'! environment ^ environment! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:16'! getClass ^ class! ! !CompilationCue methodsFor: 'initialization' stamp: 'cwp 12/26/2012 23:16'! initializeWithSource: aTextOrString context: aContext receiver: recObject class: aClass environment: anEnvironment category: aString requestor: reqObject self initialize. source := aTextOrString isStream ifTrue: [aTextOrString contents] ifFalse: [aTextOrString]. context := aContext. receiver := recObject. class := aClass. environment := anEnvironment. category := aString. requestor := reqObject! ! !CompilationCue methodsFor: 'binding' stamp: 'cwp 6/22/2012 15:39'! literalScannedAs: anObject notifying: anEncoder ^ class literalScannedAs: anObject environment: environment notifying: anEncoder! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:15'! receiver ^ receiver! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:16'! requestor ^ requestor! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:15'! source ^ source! ! !CompilationCue methodsFor: 'accessing' stamp: 'cwp 6/19/2012 11:44'! sourceStream ^ source readStream! ! !Compiler class methodsFor: 'evaluating' stamp: 'cwp 6/20/2012 17:25'! evaluate: aString environment: anEnvironment ^ self evaluate: aString environment: anEnvironment logged: false! ! !Compiler class methodsFor: 'evaluating' stamp: 'cwp 12/27/2012 12:36'! evaluate: aString environment: anEnvironment logged: aBoolean | cue | cue := CompilationCue source: aString environment: anEnvironment. ^ self new evaluate: aString cue: cue ifFail: [^ nil] logged: aBoolean! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 13:18'! compile: aCue ifFail: failBlock "Answer a MethodNode. If the MethodNode can not be created, notify the requestor in the contxt. If the requestor is nil, evaluate failBlock instead. The MethodNode is the root of a parse tree. It can be told to generate a CompiledMethod to be installed in the method dictionary of the class specified by the context." self setCue: aCue. self source: cue source. ^self translate: sourceStream noPattern: false ifFail: failBlock! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 00:06'! evaluate: textOrStream cue: aCue ifFail: failBlock logged: logFlag "Compiles the sourceStream into a parse tree, then generates code into a method. Finally, the compiled method is invoked from here via withArgs:executeMethod:, hence the system no longer creates Doit method litter on errors." | methodNode method value toLog itsSelection itsSelectionString | self setCue: aCue. self source: textOrStream. methodNode := self translate: sourceStream noPattern: true ifFail: [^failBlock value]. method := self interactive ifTrue: [methodNode generateWithTempNames] ifFalse: [methodNode generate]. value := cue receiver withArgs: (cue context ifNil: [#()] ifNotNil: [{cue context}]) executeMethod: method. logFlag ifTrue: [toLog := ((cue requestor respondsTo: #selection) and:[(itsSelection := cue requestor selection) notNil and:[(itsSelectionString := itsSelection asString) isEmptyOrNil not]]) ifTrue:[itsSelectionString] ifFalse:[sourceStream contents]. SystemChangeNotifier uniqueInstance evaluated: toLog context: cue context]. ^ value ! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/26/2012 23:20'! setCue: aCue cue := aCue. "Set legacy instance variables for methods that don't use cue yet." requestor := cue requestor. class := cue getClass. category := cue category. context := cue context.! ! !Compiler methodsFor: 'private' stamp: 'cwp 6/19/2012 21:58'! source: textOrStream sourceStream := (textOrStream isKindOf: PositionableStream) ifTrue: [ textOrStream ] ifFalse: [ ReadStream on: textOrStream asString ]! ! "Compiler"! !SmartRefStream class methodsFor: 'i/o' stamp: 'cwp 6/20/2012 17:42'! scanFrom: aByteStream environment: anEnvironment ^ self scanFrom: aByteStream! ! !SmartRefStream methodsFor: 'read write' stamp: 'cwp 6/20/2012 17:41'! scanFrom: aByteStream environment: anEnvironment ^ self scanFrom: aByteStream! ! !ImageSegment methodsFor: 'fileIn/Out' stamp: 'cwp 6/20/2012 17:23'! scanFrom: aStream environment: anEnvironment ^ self scanFrom: aStream! ! !PseudoClass methodsFor: 'printing' stamp: 'cwp 6/22/2012 15:39'! literalScannedAs: scannedLiteral environment: anEnvironment notifying: requestor ^ scannedLiteral! ! !InternalTranslator methodsFor: 'fileIn/fileOut' stamp: 'cwp 6/20/2012 17:34'! scanFrom: aStream environment: anEnvironment "Read a definition of dictionary. Make sure current locale corresponds my locale id" | aString newTranslations assoc currentPlatform | newTranslations := Dictionary new. currentPlatform := Locale currentPlatform. [Locale currentPlatform: (Locale localeID: id). [aString := aStream nextChunk withSqueakLineEndings. aString size > 0] whileTrue: [assoc := Compiler evaluate: aString environment: anEnvironment. assoc value = '' ifTrue: [self class registerPhrase: assoc key] ifFalse: [newTranslations add: assoc]]] ensure: [Locale currentPlatform: currentPlatform]. self mergeTranslations: newTranslations! ! !NaturalLanguageTranslator methodsFor: 'fileIn/fileOut' stamp: 'cwp 6/20/2012 17:26'! scanFrom: aStream environment: anEnvironment "Read a definition of dictionary. Make sure current locale corresponds my locale id" | newTranslations currentPlatform | newTranslations := Dictionary new. currentPlatform := Locale currentPlatform. [| aString assoc | Locale currentPlatform: (Locale localeID: id). [aString := aStream nextChunk withSqueakLineEndings. aString size > 0] whileTrue: [assoc := Compiler evaluate: aString environment: anEnvironment. assoc value = '' ifTrue: [self class registerPhrase: assoc key] ifFalse: [newTranslations add: assoc]]] ensure: [Locale currentPlatform: currentPlatform]. self mergeTranslations: newTranslations! ! !ObjectScanner methodsFor: 'scanning' stamp: 'cwp 6/20/2012 17:39'! scanFrom: aByteStream environment: anEnvironment "This should probably be reimplemented using an environment for compilation. For now, don't change anything" ^ self scanFrom: aByteStream! ! !SystemDictionary methodsFor: 'accessing' stamp: 'cwp 6/22/2012 09:16'! bindingOf: varName ifAbsent: aBlock "SystemDictionary includes Symbols only" ^super bindingOf: varName asSymbol ifAbsent: aBlock! ! !SystemDictionary methodsFor: 'accessing' stamp: 'cwp 6/22/2012 15:48'! undeclared ^ self at: #Undeclared! ! "System"! !ExceptionTests methodsFor: 'testing-outer' stamp: 'fbs 1/1/2013 22:14' prior: 40840955! expectedFailures ^ #().! ! "Tests"! ReleaseBuilder subclass: #ReleaseBuilderFor4dot5 instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'ReleaseBuilder'! !ReleaseBuilderFor4dot5 commentStamp: 'fbs 1/1/2013 20:25' prior: 0! The release builder for Squeak 4.5! !ReleaseBuilder class methodsFor: 'scripts' stamp: 'fbs 12/31/2012 20:43'! transferCurrentPackagesAsUser: username password: password "Copy the packages currently loaded in the image from the trunk repository to my releaseRepository." | trunkRep releaseRep | trunkRep := self trunkRepository. releaseRep := self releaseRepository user: username; password: password; yourself. MCWorkingCopy allManagers do: [ : eachWorkingCopy | eachWorkingCopy ancestors do: [ : eachVersionInfo | (releaseRep includesVersionNamed: eachVersionInfo versionName) ifFalse: [ (trunkRep versionWithInfo: eachVersionInfo) ifNil: [ Warning signal: eachVersionInfo name , ' not found in ', trunkRep ] ifNotNilDo: [ : ver | releaseRep storeVersion: ver ] ] ] ]! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:23'! openWelcomeWorkspaces TheWorldMainDockingBar instance showWelcomeText: #squeakUserInterface label: 'Squeak User Interface' in: (40 @ 40 extent: 500 @ 300). TheWorldMainDockingBar instance showWelcomeText: #workingWithSqueak label: 'Working With Squeak' in: (80 @ 80 extent: 500 @ 300). TheWorldMainDockingBar instance showWelcomeText: #licenseInformation label: 'License Information' in: (120 @ 120 extent: 500 @ 300). TheWorldMainDockingBar instance showWelcomeText: #welcomeFutureDirections label: 'Future Directions' in: (160 @ 160 extent: 500 @ 300). TheWorldMainDockingBar instance showWelcomeText: #welcomeToSqueak label: 'Welcome to Squeak 4.5' in: (200 @ 200 extent: 500 @ 300)! ! !ReleaseBuilderFor4dot5 class methodsFor: 'scripts' stamp: 'fbs 1/1/2013 20:22'! prepareNewBuild super prepareNewBuild. MCMockPackageInfo initialize.! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:24'! releaseRepository "At release time, change 'trunk' to 'squeak45'." ^ MCHttpRepository location: 'http://source.squeak.org/trunk' user: 'squeak' password: 'squeak'! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:22'! setDisplayExtent: extent "Uncomment next line when the primitives become available in the Squeak VM." " DisplayScreen hostWindowSize: extent." Display extent = extent ifFalse: [ Warning signal: 'Display extent not set to ', extent ]! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:23'! setPreferences Preferences installBrightWindowColors ; setPreference: #scrollBarsWithoutMenuButton toValue: true ; setPreference: #swapMouseButtons toValue: true ; setPreference: #annotationPanes toValue: true ; setPreference: #showSplitterHandles toValue: false ; setPreference: #showBoundsInHalo toValue: true ; setPreference: #alternateHandlesLook toValue: false ; setPreference: #roundedMenuCorners toValue: false ; setPreference: #roundedWindowCorners toValue: false. PluggableButtonMorph roundedButtonCorners: false. FillInTheBlankMorph roundedDialogCorners: false. Workspace shouldStyle: false. NetNameResolver enableIPv6: true.! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:23'! switchToNewRepository | old44Repository | MCMcmUpdater defaultUpdateURL: self releaseRepository description. old44Repository := MCRepositoryGroup default repositories detect: [:each | each description includesSubString: 'squeak44'] ifNone: [nil]. old44Repository ifNotNil: [MCRepositoryGroup default removeRepository: old44Repository]. MCRepositoryGroup default addRepository: self releaseRepository! ! !ReleaseBuilderFor4dot5 class methodsFor: 'private' stamp: 'fbs 1/1/2013 20:23'! versionString ^ 'Squeak4.5'.! ! ReleaseBuilder class removeSelector: #transferCurrentPackages! "ReleaseBuilder"! !Environment class methodsFor: 'as yet unclassified' stamp: 'cwp 1/1/2013 18:52' prior: 40834114! initialize self install! ! "Environments"! !Parser methodsFor: 'private' stamp: 'cwp 12/26/2012 23:59' prior: 52081878! initPattern: aString notifying: req return: aBlock | result | self init: (ReadStream on: aString asString) cue: (CompilationCue source: aString requestor: req) failBlock: [^nil]. encoder := self. result := aBlock value: (self pattern: false inContext: nil). encoder := failBlock := nil. "break cycles" ^result! ! !Parser methodsFor: 'public access' stamp: 'cwp 12/27/2012 00:01' prior: 34175471! parse: sourceStream class: class category: aCategory noPattern: noPattern context: aContext notifying: req ifFail: aBlock | c | c := CompilationCue source: sourceStream context: aContext class: class category: aCategory requestor: req. ^ self parse: sourceStream cue: c noPattern: noPattern ifFail: aBlock! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 09:11' prior: 34183963! evaluate: textOrStream in: aContext to: receiver notifying: aRequestor ifFail: failBlock logged: logFlag "Compiles the sourceStream into a parse tree, then generates code into a method. If aContext is not nil, the text can refer to temporaries in that context (the Debugger uses this). If aRequestor is not nil, then it will receive a notify:at: message before the attempt to evaluate is aborted. Finally, the compiled method is invoked from here via withArgs:executeMethod:, hence the system no longer creates Doit method litter on errors." | theClass | theClass := ((aContext == nil ifTrue: [receiver] ifFalse: [aContext receiver]) class). self setCue: (CompilationCue source: textOrStream context: aContext receiver: receiver class: theClass environment: theClass environment category: nil requestor: aRequestor). ^ self evaluate: textOrStream cue: cue ifFail: failBlock logged: logFlag! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 09:17' prior: 34185488! from: textOrStream class: aClass classified: aCategory context: aContext notifying: req self source: textOrStream. self setCue: (CompilationCue source: textOrStream context: aContext class: aClass category: aCategory requestor: req)! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/26/2012 23:55' prior: 50781309! from: textOrStream class: aClass context: aContext notifying: req self source: textOrStream. self setCue: (CompilationCue source: textOrStream context: aContext class: aClass requestor: req) ! ! !Encoder methodsFor: 'initialize-release' stamp: 'cwp 12/27/2012 09:41' prior: 50996506! init: aClass context: aContext notifying: anObject | c | c := CompilationCue context: aContext class: aClass requestor: nil. self init: c notifying: anObject! ! !Encoder methodsFor: 'initialize-release' stamp: 'cwp 12/26/2012 23:58' prior: 39061698! temps: tempVars literals: lits class: cl "Initialize this encoder for decompilation." self setCue: (CompilationCue class: cl). supered := false. nTemps := tempVars size. tempVars do: [:node | scopeTable at: node name put: node]. literalStream := WriteStream on: (Array new: lits size). literalStream nextPutAll: lits. sourceRanges := Dictionary new: 32. globalSourceRanges := OrderedCollection new: 32.! ! "Compiler"! !Class methodsFor: 'class variables' stamp: 'cwp 6/22/2012 15:48' prior: 36026010! addClassVarName: aString "Add the argument, aString, as a class variable of the receiver. Signal an error if the first character of aString is not capitalized, or if it is already a variable named in the class." | symbol oldState | oldState := self copy. aString first canBeGlobalVarInitial ifFalse: [^self error: aString, ' class variable name should be capitalized; proceed to include anyway.']. symbol := aString asSymbol. self withAllSubclasses do: [:subclass | (self canFindWithoutEnvironment: symbol) ifTrue: [ (DuplicateVariableError new) superclass: superclass; "fake!!!!!!" variable: aString; signal: aString, ' is already defined']]. classPool == nil ifTrue: [classPool := Dictionary new]. (classPool includesKey: symbol) ifFalse: ["Pick up any refs in Undeclared" classPool declare: symbol from: environment undeclared. SystemChangeNotifier uniqueInstance classDefinitionChangedFrom: oldState to: self]! ! !Class methodsFor: 'compiling' stamp: 'cwp 6/20/2012 09:48' prior: 54782024! bindingOf: varName ^ self bindingOf: varName environment: self environment! ! !Class methodsFor: 'organization' stamp: 'cwp 6/25/2012 18:25' prior: 54785804! category "Answer the system organization category for the receiver. First check whether the category name stored in the ivar is still correct and only if this fails look it up (latter is much more expensive)" category ifNotNil: [ :symbol | ((self environment organization listAtCategoryNamed: symbol) includes: self name) ifTrue: [ ^symbol ] ]. category := self environment organization categoryOfElement: self name. ^category! ! !Class methodsFor: 'initialize-release' stamp: 'cwp 6/22/2012 15:49' prior: 36027730! declare: varString "Declare class variables common to all instances. Answer whether recompilation is advisable." | newVars conflicts | newVars := (Scanner new scanFieldNames: varString) collect: [:x | x asSymbol]. newVars do: [:var | var first canBeGlobalVarInitial ifFalse: [self error: var, ' class variable name should be capitalized; proceed to include anyway.']]. conflicts := false. classPool == nil ifFalse: [(classPool keys reject: [:x | newVars includes: x]) do: [:var | self removeClassVarName: var]]. (newVars reject: [:var | self classPool includesKey: var]) do: [:var | "adding" "check if new vars defined elsewhere" (self canFindWithoutEnvironment: var) ifTrue: [ (DuplicateVariableError new) superclass: superclass; "fake!!!!!!" variable: var; signal: var, ' is already defined'. conflicts := true]]. newVars size > 0 ifTrue: [classPool := self classPool. "in case it was nil" newVars do: [:var | classPool declare: var from: environment undeclared]]. ^conflicts! ! !Class methodsFor: 'class variables' stamp: 'cwp 6/22/2012 15:49' prior: 54802475! removeClassVarName: aString "Remove the class variable whose name is the argument, aString, from the names defined in the receiver, a class. Create an error notification if aString is not a class variable or if it is still being used in the code of the class." | aSymbol | aSymbol := aString asSymbol. (classPool includesKey: aSymbol) ifFalse: [^self error: aString, ' is not a class variable']. self withAllSubclasses do:[:subclass | (Array with: subclass with: subclass class) do:[:classOrMeta | (classOrMeta whichSelectorsReferTo: (classPool associationAt: aSymbol)) isEmpty ifFalse: [ InMidstOfFileinNotification signal ifTrue: [ Transcript cr; show: self name, ' (' , aString , ' is Undeclared) '. ^ environment undeclared declare: aSymbol from: classPool]. (self confirm: (aString,' is still used in code of class ', classOrMeta name, '.\Is it okay to move it to Undeclared?') withCRs) ifTrue:[^Undeclared declare: aSymbol from: classPool] ifFalse:[^self]]]]. classPool removeKey: aSymbol. classPool isEmpty ifTrue: [classPool := nil]. ! ! !Class methodsFor: 'class name' stamp: 'cwp 6/22/2012 15:49' prior: 54796206! rename: aString "The new name of the receiver is the argument, aString." | oldName newName | (newName := aString asSymbol) = (oldName := self name) ifTrue: [^ self]. (self environment includesKey: newName) ifTrue: [^ self error: newName , ' already exists']. (environment undeclared includesKey: newName) ifTrue: [self inform: 'There are references to, ' , aString printString , ' from Undeclared. Check them after this change.']. name := newName. self environment renameClass: self from: oldName! ! !ClassBuilder methodsFor: 'class definition' stamp: 'cwp 6/22/2012 01:05' prior: 39054430! name: className inEnvironment: env subclassOf: newSuper type: type instanceVariableNames: instVarString classVariableNames: classVarString poolDictionaries: poolString category: category unsafe: unsafe "Define a new class in the given environment. If unsafe is true do not run any validation checks. This facility is provided to implement important system changes." | oldClass instVars classVars copyOfOldClass newClass | environ := env. instVars := Scanner new scanFieldNames: instVarString. classVars := (Scanner new scanFieldNames: classVarString) collect: [:x | x asSymbol]. "Validate the proposed name" unsafe ifFalse:[(self validateClassName: className) ifFalse:[^nil]]. oldClass := env at: className ifAbsent:[nil]. oldClass isBehavior ifFalse: [oldClass := nil] "Already checked in #validateClassName:" ifTrue: [ copyOfOldClass := oldClass copy. copyOfOldClass superclass addSubclass: copyOfOldClass]. [ | newCategory needNew force organization oldCategory | unsafe ifFalse:[ "Run validation checks so we know that we have a good chance for recompilation" (self validateSuperclass: newSuper forSubclass: oldClass) ifFalse:[^nil]. (self validateInstvars: instVars from: oldClass forSuper: newSuper) ifFalse:[^nil]. (self validateClassvars: classVars from: oldClass forSuper: newSuper) ifFalse:[^nil]. (self validateSubclassFormat: type from: oldClass forSuper: newSuper extra: instVars size) ifFalse:[^nil]]. "See if we need a new subclass" needNew := self needsSubclassOf: newSuper type: type instanceVariables: instVars from: oldClass. needNew == nil ifTrue:[^nil]. "some error" (needNew and:[unsafe not]) ifTrue:[ "Make sure we don't redefine any dangerous classes" (self tooDangerousClasses includes: oldClass name) ifTrue:[ self error: oldClass name, ' cannot be changed'. ]. "Check if the receiver should not be redefined" (oldClass ~~ nil and:[oldClass shouldNotBeRedefined]) ifTrue:[ self notify: oldClass name asText allBold, ' should not be redefined. \Proceed to store over it.' withCRs]]. needNew ifTrue:[ "Create the new class" newClass := self newSubclassOf: newSuper type: type instanceVariables: instVars from: oldClass. newClass == nil ifTrue:[^nil]. "Some error" newClass setName: className. newClass environment: environ. ] ifFalse:[ "Reuse the old class" newClass := oldClass. ]. "Install the class variables and pool dictionaries... " force := (newClass declare: classVarString) | (newClass sharing: poolString). "... classify ..." newCategory := category asSymbol. organization := environ ifNotNil:[environ organization]. oldClass isNil ifFalse: [oldCategory := (organization categoryOfElement: oldClass name) asSymbol]. organization classify: newClass name under: newCategory suppressIfDefault: true. "... recompile ..." newClass := self recompile: force from: oldClass to: newClass mutate: false. "... export if not yet done ..." (environ at: newClass name ifAbsent:[nil]) == newClass ifFalse:[ [environ at: newClass name put: newClass] on: AttemptToWriteReadOnlyGlobal do:[:ex| ex resume: true]. environ flushClassNameCache. ]. newClass doneCompiling. "... notify interested clients ..." oldClass isNil ifTrue: [ SystemChangeNotifier uniqueInstance classAdded: newClass inCategory: newCategory. ^ newClass]. newCategory ~= oldCategory ifTrue: [SystemChangeNotifier uniqueInstance class: newClass recategorizedFrom: oldCategory to: category] ifFalse: [SystemChangeNotifier uniqueInstance classDefinitionChangedFrom: copyOfOldClass to: newClass.]. ] ensure: [copyOfOldClass ifNotNil: [copyOfOldClass superclass removeSubclass: copyOfOldClass]. Behavior flushObsoleteSubclasses. ]. ^newClass! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 22:57' prior: 18572019! superclass: newSuper subclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class." | env | env := EnvironmentRequest signal ifNil: [newSuper environment]. ^self name: t inEnvironment: env subclassOf: newSuper type: newSuper typeOfClass instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 23:01' prior: 50629912! superclass: aClass variableByteSubclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class in which the subclass is to have indexable byte-sized nonpointer variables." | oldClassOrNil actualType env | (aClass instSize > 0) ifTrue: [^self error: 'cannot make a byte subclass of a class with named fields']. (aClass isVariable and: [aClass isWords]) ifTrue: [^self error: 'cannot make a byte subclass of a class with word fields']. (aClass isVariable and: [aClass isPointers]) ifTrue: [^self error: 'cannot make a byte subclass of a class with pointer fields']. oldClassOrNil := aClass environment at: t ifAbsent:[nil]. actualType := (oldClassOrNil notNil and: [oldClassOrNil typeOfClass == #compiledMethod]) ifTrue: [#compiledMethod] ifFalse: [#bytes]. env := EnvironmentRequest signal ifNil: [aClass environment]. ^self name: t inEnvironment: env subclassOf: aClass type: actualType instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 23:03' prior: 18573442! superclass: aClass variableSubclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class in which the subclass is to have indexable pointer variables." | env | aClass isBits ifTrue: [^self error: 'cannot make a pointer subclass of a class with non-pointer fields']. env := EnvironmentRequest signal ifNil: [aClass environment]. ^self name: t inEnvironment: env subclassOf: aClass type: #variable instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 23:04' prior: 18574098! superclass: aClass variableWordSubclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class in which the subclass is to have indexable word-sized nonpointer variables." | env | (aClass instSize > 0) ifTrue: [^self error: 'cannot make a word subclass of a class with named fields']. (aClass isVariable and: [aClass isBytes]) ifTrue: [^self error: 'cannot make a word subclass of a class with byte fields']. (aClass isVariable and: [aClass isPointers]) ifTrue: [^self error: 'cannot make a word subclass of a class with pointer fields']. env := EnvironmentRequest signal ifNil: [aClass environment]. ^self name: t inEnvironment: env subclassOf: aClass type: #words instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! !ClassBuilder methodsFor: 'public' stamp: 'cwp 6/19/2012 23:04' prior: 18575028! superclass: aClass weakSubclass: t instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat "This is the standard initialization message for creating a new class as a subclass of an existing class (the receiver) in which the subclass is to have weak indexable pointer variables." | env | aClass isBits ifTrue: [^self error: 'cannot make a pointer subclass of a class with non-pointer fields']. env := EnvironmentRequest signal ifNil: [aClass environment]. ^self name: t inEnvironment: env subclassOf: aClass type: #weak instanceVariableNames: f classVariableNames: d poolDictionaries: s category: cat! ! "Kernel"! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:21' prior: 59135029! ambiguousSelector: aString inRange: anInterval | correctedSelector userSelection offset intervalWithOffset | self interactive ifFalse: [ "In non interactive mode, compile with backward comapatibility: $- is part of literal argument" Transcript cr; store: encoder classEncoding; nextPutAll:#'>>';store: encoder selector; show: ' would send ' , token , '-'. ^super ambiguousSelector: aString inRange: anInterval]. "handle the text selection" userSelection := cue requestor selectionInterval. intervalWithOffset := anInterval first + requestorOffset to: anInterval last + requestorOffset. cue requestor selectFrom: intervalWithOffset first to: intervalWithOffset last. cue requestor select. "Build the menu with alternatives" correctedSelector := AmbiguousSelector signalName: aString inRange: intervalWithOffset. correctedSelector ifNil: [^self fail]. "Execute the selected action" offset := self substituteWord: correctedSelector wordInterval: intervalWithOffset offset: 0. cue requestor deselect. cue requestor selectInvisiblyFrom: userSelection first to: userSelection last + offset. token := (correctedSelector readStream upTo: Character space) asSymbol! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:21' prior: 38558136! collectTemporaryDeclarationsFrom: methodNode | tempsMarks str | tempsMarks := OrderedCollection new. str := cue requestor text asString. methodNode accept: (ParseNodeEnumerator ofBlock: [ :aNode | | mark | (aNode class canUnderstand: #tempsMark) ifTrue: [mark := aNode tempsMark. (mark notNil and: [ mark between: 1 and: str size ] and: [ (str at: mark) = $| ]) ifTrue: [ tempsMarks addLast: aNode ]]]). (tempsMark notNil and: [ tempsMark between: 1 and: str size ] and: [ (str at: tempsMark) = $| ]) ifTrue: [ tempsMarks addLast: self ]. ^ tempsMarks sorted: [ :a :b | a tempsMark > b tempsMark ]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:20' prior: 52096606! correctSelector: proposedKeyword wordIntervals: spots exprInterval: expInt ifAbort: abortAction "Correct the proposedKeyword to some selector symbol, correcting the original text if such action is indicated. abortAction is invoked if the proposedKeyword couldn't be converted into a valid selector. Spots is an ordered collection of intervals within the test stream of the for each of the keyword parts." | correctSelector userSelection | "If we can't ask the user, assume that the keyword will be defined later" self interactive ifFalse: [^proposedKeyword asSymbol]. userSelection := cue requestor selectionInterval. cue requestor selectFrom: spots first first to: spots last last. cue requestor select. correctSelector := UnknownSelector name: proposedKeyword. correctSelector ifNil: [^abortAction value]. cue requestor deselect. cue requestor selectInvisiblyFrom: userSelection first to: userSelection last. self substituteSelector: correctSelector keywords wordIntervals: spots. ^(proposedKeyword last ~~ $: and: [correctSelector last == $:]) ifTrue: [abortAction value] ifFalse: [correctSelector]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:20' prior: 33907242! correctVariable: proposedVariable interval: spot "Correct the proposedVariable to a known variable, or declare it as a new variable if such action is requested. We support declaring lowercase variables as temps or inst-vars, and uppercase variables as Globals or ClassVars, depending on whether the context is nil (class=UndefinedObject). Spot is the interval within the test stream of the variable. rr 3/4/2004 10:26 : adds the option to define a new class. " "Check if this is an i-var, that has been corrected already (ugly)" "Display the pop-up menu" | binding userSelection action | (encoder classEncoding instVarNames includes: proposedVariable) ifTrue: [^InstanceVariableNode new name: proposedVariable index: (encoder classEncoding allInstVarNames indexOf: proposedVariable)]. "If we can't ask the user for correction, make it undeclared" self interactive ifFalse: [^encoder undeclared: proposedVariable]. "First check to see if the requestor knows anything about the variable" (binding := cue requestor bindingOf: proposedVariable) ifNotNil: [^encoder global: binding name: proposedVariable]. userSelection := cue requestor selectionInterval. cue requestor selectFrom: spot first to: spot last. cue requestor select. "Build the menu with alternatives" action := UndeclaredVariable signalFor: self name: proposedVariable inRange: spot. action ifNil: [^self fail]. "Execute the selected action" cue requestor deselect. cue requestor selectInvisiblyFrom: userSelection first to: userSelection last. ^action value! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:19' prior: 34172921! declareUndeclaredTemps: methodNode "Declare any undeclared temps, declaring them at the smallest enclosing scope." | undeclared userSelection blocksToVars | (undeclared := encoder undeclaredTemps) isEmpty ifTrue: [^self]. userSelection := cue requestor selectionInterval. blocksToVars := IdentityDictionary new. undeclared do: [:var| (blocksToVars at: (var tag == #method ifTrue: [methodNode block] ifFalse: [methodNode accept: (VariableScopeFinder new ofVariable: var)]) ifAbsentPut: [SortedCollection new]) add: var name]. (blocksToVars removeKey: methodNode block ifAbsent: []) ifNotNil: [:rootVars| rootVars do: [:varName| self pasteTempAtMethodLevel: varName]]. (blocksToVars keys sorted: [:a :b| a tempsMark < b tempsMark]) do: [:block| | decl | decl := (blocksToVars at: block) reduce: [:a :b| a, ' ', b]. block temporaries isEmpty ifTrue: [self substituteWord: ' | ', decl, ' |' wordInterval: (block tempsMark + 1 to: block tempsMark) offset: requestorOffset] ifFalse: [self substituteWord: decl, ' ' wordInterval: (block tempsMark to: block tempsMark - 1) offset: requestorOffset]]. cue requestor selectInvisiblyFrom: userSelection first to: userSelection last + requestorOffset. ReparseAfterSourceEditing signal! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 11:45' prior: 37183770! defineClass: className "prompts the user to define a new class, asks for it's category, and lets the users edit further the definition" | sym cat def d2 | sym := className asSymbol. cat := UIManager default request: 'Enter class category : ' initialAnswer: self encoder classEncoding theNonMetaClass category. cat ifEmpty: [cat := 'Unknown']. def := 'Object subclass: #' , sym , ' instanceVariableNames: '''' classVariableNames: '''' poolDictionaries: '''' category: ''' , cat , ''''. d2 := UIManager default request: 'Edit class definition : ' initialAnswer: def. d2 ifEmpty: [d2 := def]. Compiler evaluate: d2. ^ encoder global: (cue environment bindingOf: sym) name: sym! ! !Parser methodsFor: 'primitives' stamp: 'cwp 12/27/2012 11:46' prior: 37184567! externalFunctionDeclaration "Parse the function declaration for a call to an external library." | descriptorClass callType modifier retType externalName args argType module fn | descriptorClass := cue environment valueOf: #ExternalFunction ifAbsent: [^ false]. callType := descriptorClass callingConventionFor: here. callType == nil ifTrue:[^false]. [modifier := descriptorClass callingConventionModifierFor: token. modifier notNil] whileTrue: [self advance. callType := callType bitOr: modifier]. "Parse return type" self advance. retType := self externalType: descriptorClass. retType == nil ifTrue:[^self expected:'return type']. "Parse function name or index" externalName := here. (self match: #string) ifTrue:[externalName := externalName asSymbol] ifFalse:[(self match:#number) ifFalse:[^self expected:'function name or index']]. (self matchToken: #'(') ifFalse:[^self expected:'argument list']. args := WriteStream on: Array new. [here == #')'] whileFalse:[ argType := self externalType: descriptorClass. argType == nil ifTrue:[^self expected:'argument']. argType isVoid & argType isPointerType not ifFalse:[args nextPut: argType]. ]. (self matchToken: #')') ifFalse:[^self expected:')']. (self matchToken: 'module:') ifTrue:[ module := here. (self match: #string) ifFalse:[^self expected: 'String']. module := module asSymbol]. Smalltalk at: #ExternalLibraryFunction ifPresent:[:xfn| fn := xfn name: externalName module: module callType: callType returnType: retType argumentTypes: args contents. self allocateLiteral: fn. ]. (self matchToken: 'error:') ifTrue: [| errorCodeVariable | errorCodeVariable := here. (hereType == #string or: [hereType == #word]) ifFalse:[^self expected: 'error code (a variable or string)']. self advance. self addPragma: (Pragma keyword: #primitive:error: arguments: (Array with: 120 with: errorCodeVariable)). fn ifNotNil: [fn setErrorCodeName: errorCodeVariable]] ifFalse: [self addPragma: (Pragma keyword: #primitive: arguments: #(120))]. ^true ! ! !Parser methodsFor: 'error handling' stamp: 'cwp 12/27/2012 10:19' prior: 58306169! interactive "Answer true if compilation is interactive" ^ cue requestor notNil! ! !Parser methodsFor: 'error handling' stamp: 'cwp 12/27/2012 10:22' prior: 58137223! notify: string at: location cue requestor isNil ifTrue: [(encoder == self or: [encoder isNil]) ifTrue: [^ self fail "failure setting up syntax error"]. SyntaxErrorNotification inClass: encoder classEncoding category: cue category withCode: (source contents asText copyReplaceFrom: location to: location - 1 with: ((string , ' ->') asText allBold addAttribute: TextColor red; yourself)) doitFlag: doitFlag errorMessage: string location: location] ifFalse: [cue requestor notify: string , ' ->' at: location in: source]. ^self fail! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:17' prior: 34177108! pasteTempAtMethodLevel: name | insertion delta theTextString characterBeforeMark | theTextString := cue requestor text string. characterBeforeMark := theTextString at: tempsMark-1 ifAbsent: [$ ]. (theTextString at: tempsMark) = $| ifTrue: [ "Paste it before the second vertical bar" insertion := name, ' '. characterBeforeMark isSeparator ifFalse: [ insertion := ' ', insertion]. delta := 0. ] ifFalse: [ "No bars - insert some with CR, tab" insertion := '| ' , name , ' |',String cr. delta := 2. "the bar and CR" characterBeforeMark = Character tab ifTrue: [ insertion := insertion , String tab. delta := delta + 1. "the tab" ]. ]. tempsMark := tempsMark + (self substituteWord: insertion wordInterval: (tempsMark to: tempsMark-1) offset: 0) - delta! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:16' prior: 52095305! queryUndefined | varStart varName | varName := parseNode key. varStart := self endOfLastToken + requestorOffset - varName size + 1. cue requestor selectFrom: varStart to: varStart + varName size - 1; select. (UndefinedVariable name: varName) ifFalse: [^ self fail]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:15' prior: 38599341! removeEmptyTempDeclarationsFrom: methodNode | sourceCode madeChanges tempsMarkHolder | sourceCode := cue requestor text asString. tempsMarkHolder := self collectTemporaryDeclarationsFrom: methodNode. madeChanges := false. tempsMarkHolder do: [ :currentBlock | | tempsMarkChar0 tempsMarkChar1 tempsMarkChar2 end start | tempsMarkChar0 := (sourceCode at: currentBlock tempsMark). tempsMarkChar1 := (sourceCode at: currentBlock tempsMark - 1). tempsMarkChar2 := (sourceCode at: currentBlock tempsMark - 2). tempsMarkChar0 = $| & tempsMarkChar1 = $| ifTrue: [ end := currentBlock tempsMark. start := end - 1]. tempsMarkChar0 = $| & tempsMarkChar1 = $ & tempsMarkChar2 = $| ifTrue: [ end := currentBlock tempsMark. start := end - 2]. start notNil & end notNil ifTrue: [ | lineStart lineEnd | lineStart := 1 + (sourceCode lastIndexOf: Character cr startingAt: start - 1 ifAbsent: [ 0 ]). lineEnd := sourceCode indexOf: Character cr startingAt: end + 1 ifAbsent: [ sourceCode size ]. ((sourceCode indexOfAnyOf: CharacterSet nonSeparators startingAt: lineStart) >= start and: [ (sourceCode indexOfAnyOf: CharacterSet nonSeparators startingAt: end + 1) > lineEnd ]) ifTrue: [ start := lineStart. end := lineEnd ]. cue requestor correctFrom: start to: end with: ''. madeChanges := true. currentBlock tempsMark: nil ] ]. madeChanges ifTrue: [ReparseAfterSourceEditing signal]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:15' prior: 38561281! removeUnusedTemporaryNamed: temp from: str lookingAt: currentBlock movingTempMarksOf: someBlocks | start end | end := currentBlock tempsMark - 1. ["Beginning at right temp marker..." start := end - temp size + 1. end < temp size or: [ (str at: start) = $| ] or: [ temp = (str copyFrom: start to: end) and: [ ((str at: start - 1) = $| | (str at: start - 1) isSeparator) & ((str at: end + 1) = $| | (str at: end + 1) isSeparator) ] ]] whileFalse: [ "Search left for the unused temp" end := cue requestor nextTokenFrom: end direction: -1 ]. (end < temp size or: [ (str at: start) = $| ]) ifFalse: [(str at: start - 1) = $ ifTrue: [ start := start - 1 ]. cue requestor correctFrom: start to: end with: ''. someBlocks do: [ :aBlock | aBlock tempsMark: aBlock tempsMark - (end - start + 1)]. ^true ]. ^false! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:14' prior: 38562194! removeUnusedTemps: methodNode "Scan for unused temp names, and prompt the user about the prospect of removing each one found" | madeChanges tempsMarkHolder unusedTempNames tempMarkHoldersToChange | madeChanges := false. tempMarkHoldersToChange := OrderedCollection new. tempsMarkHolder := self collectTemporaryDeclarationsFrom: methodNode. unusedTempNames := encoder unusedTempNames select: [ :temp | (encoder lookupVariable: temp ifAbsent: [ ]) isUndefTemp and: [ self queriedUnusedTemporaries at: temp ifAbsentPut: [UnusedVariable name: temp] ]]. tempsMarkHolder do: [ :currentBlock | tempMarkHoldersToChange add: currentBlock. unusedTempNames do: [ :temp | (self removeUnusedTemporaryNamed: temp from: cue requestor text asString lookingAt: currentBlock movingTempMarksOf: tempMarkHoldersToChange) ifTrue: [ madeChanges := true ]]]. madeChanges ifTrue: [ self removeEmptyTempDeclarationsFrom: methodNode. ReparseAfterSourceEditing signal ]! ! !Parser methodsFor: 'error correction' stamp: 'cwp 12/27/2012 10:14' prior: 34179326! substituteWord: correctWord wordInterval: spot offset: o "Substitute the correctSelector into the (presumed interactive) receiver. Update requestorOffset based on the delta size and answer the updated offset." cue requestor correctFrom: spot first + o to: spot last + o with: correctWord. requestorOffset := requestorOffset + correctWord size - spot size. ^o + correctWord size - spot size! ! !Parser methodsFor: 'expression types' stamp: 'cwp 12/27/2012 10:14' prior: 34179807! temporaries " [ '|' (variable)* '|' ]" | vars theActualText | (self match: #verticalBar) ifFalse: ["no temps" doitFlag ifTrue: [tempsMark := self interactive ifTrue: [cue requestor selectionInterval first] ifFalse: [1]. ^ #()]. tempsMark := hereMark "formerly --> prevMark + prevToken". tempsMark > 0 ifTrue: [theActualText := source contents. [tempsMark < theActualText size and: [(theActualText at: tempsMark) isSeparator]] whileTrue: [tempsMark := tempsMark + 1]]. ^ #()]. vars := OrderedCollection new. [hereType == #word] whileTrue: [vars addLast: (encoder bindTemp: self advance)]. (self match: #verticalBar) ifTrue: [tempsMark := prevMark. ^ vars]. ^ self expected: 'Vertical bar' ! ! !Parser methodsFor: 'expression types' stamp: 'cwp 12/27/2012 10:14' prior: 34180638! temporariesIn: methodSelector " [ '|' (variable)* '|' ]" | vars theActualText | (self match: #verticalBar) ifFalse: ["no temps" doitFlag ifTrue: [tempsMark := self interactive ifTrue: [cue requestor selectionInterval first] ifFalse: [1]. ^ #()]. tempsMark := hereMark "formerly --> prevMark + prevToken". tempsMark > 0 ifTrue: [theActualText := source contents. [tempsMark < theActualText size and: [(theActualText at: tempsMark) isSeparator]] whileTrue: [tempsMark := tempsMark + 1]]. ^ #()]. vars := OrderedCollection new. [hereType == #word] whileTrue: [vars addLast: (encoder bindTemp: self advance in: methodSelector)]. (self match: #verticalBar) ifTrue: [tempsMark := prevMark. ^ vars]. ^ self expected: 'Vertical bar'! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 10:11' prior: 53971863! compiledMethodFor: textOrStream in: aContext to: receiver notifying: aRequestor ifFail: failBlock logged: logFlag "Compiles the sourceStream into a parse tree, then generates code into a method, and answers it. If receiver is not nil, then the text can refer to instance variables of that receiver (the Inspector uses this). If aContext is not nil, the text can refer to temporaries in that context (the Debugger uses this). If aRequestor is not nil, then it will receive a notify:at: message before the attempt to evaluate is aborted." | methodNode method theClass | theClass := (aContext == nil ifTrue: [receiver] ifFalse: [aContext receiver]) class. self from: textOrStream class: theClass context: aContext notifying: aRequestor. methodNode := self translate: sourceStream noPattern: true ifFail: [^failBlock value]. method := self interactive ifTrue: [ methodNode generateWithTempNames ] ifFalse: [methodNode generate]. logFlag ifTrue: [SystemChangeNotifier uniqueInstance evaluated: sourceStream contents context: aContext]. ^method! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/27/2012 11:33' prior: 34363593! format: aStream noPattern: noPattern ifFail: failBlock ^(self parser parse: aStream cue: cue noPattern: noPattern ifFail: [^failBlock value]) preen! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/27/2012 10:08' prior: 58306325! interactive "Answer true if compilation is interactive" ^ cue requestor notNil! ! !Compiler methodsFor: 'error handling' stamp: 'cwp 12/27/2012 10:10' prior: 50779387! notify: aString at: location "Refer to the comment in Object|notify:." ^ cue requestor == nil ifTrue: [SyntaxErrorNotification inClass: cue getClass category: cue category withCode: (sourceStream contents copyReplaceFrom: location to: location - 1 with: aString) doitFlag: false errorMessage: aString location: location] ifFalse: [cue requestor notify: aString at: location in: sourceStream]! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 11:34' prior: 50777201! parse: textOrStream in: aClass notifying: req "Compile the argument, textOrStream, with respect to the class, aClass, and answer the MethodNode that is the root of the resulting parse tree. Notify the argument, req, if an error occurs. The failBlock is defaulted to an empty block." self from: textOrStream class: aClass context: nil notifying: req. ^self parser parse: sourceStream cue: cue noPattern: false ifFail: []! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 10:09' prior: 36332471! parser parser ifNil: [parser := (cue getClass ifNil: [self class]) newParser]. ^parser! ! !Compiler methodsFor: 'private' stamp: 'cwp 12/27/2012 11:37' prior: 50780779! translate: aStream noPattern: noPattern ifFail: failBlock ^self parser parse: aStream cue: cue noPattern: noPattern ifFail: [^failBlock value]! ! !Compiler methodsFor: 'public access' stamp: 'cwp 12/27/2012 11:37' prior: 19124095! translate: aStream noPattern: noPattern ifFail: failBlock parser: parser | tree | tree := parser parse: aStream cue: cue noPattern: noPattern ifFail: [^ failBlock value]. ^ tree! ! !Encoder methodsFor: 'results' stamp: 'cwp 12/27/2012 10:26' prior: 50999892! associationForClass | assoc | assoc := self environment associationAt: cue getClass name ifAbsent: [nil]. ^assoc value == cue getClass ifTrue: [assoc] ifFalse: [Association new value: cue getClass]! ! !Encoder methodsFor: 'temps' stamp: 'cwp 12/27/2012 10:25' prior: 20148386! bindTemp: name in: methodSelector "Declare a temporary; error not if a field or class variable." scopeTable at: name ifPresent:[:node| "When non-interactive raise the error only if its a duplicate" (node isTemp or:[requestor interactive]) ifTrue:[^self notify:'Name is already defined'] ifFalse:[Transcript show: '(', name, ' is shadowed in "' , cue getClass printString , '>>' , methodSelector printString , '")']]. ^self reallyBind: name! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 10:25' prior: 20149084! classEncoding "This is a hack so that the parser may findout what class it was parsing for when it wants to create a syntax error view." ^ cue getClass! ! !Encoder methodsFor: 'encoding' stamp: 'cwp 12/27/2012 11:39' prior: 20138819! encodeLiteral: object ^self name: object key: (cue literalScannedAs: object notifying: self) class: LiteralNode type: LdLitType set: litSet! ! !Encoder methodsFor: 'encoding' stamp: 'cwp 12/27/2012 11:40' prior: 20139010! encodeSelector: aSelector ^self name: aSelector key: aSelector class: SelectorNode type: SendType set: selectorSet! ! !Encoder methodsFor: 'encoding' stamp: 'cwp 12/27/2012 11:40' prior: 58545123! environment "Answer the environment of the current compilation context, be it in a class or global (e.g. a workspace)" ^cue environment! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 11:41' prior: 50994497! lookupInPools: varName ifFound: assocBlock ^Symbol hasInterned: varName ifTrue: [:sym| (cue bindingOf: sym) ifNil: [^false] ifNotNil: [:assoc| assocBlock value: assoc]]! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 10:24' prior: 51004306! possibleNamesFor: proposedName | results | results := cue getClass possibleVariablesFor: proposedName continuedFrom: nil. ^ proposedName correctAgainst: nil continuedFrom: results. ! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 10:24' prior: 50995012! possibleVariablesFor: proposedVariable | results | results := proposedVariable correctAgainstDictionary: scopeTable continuedFrom: nil. proposedVariable first canBeGlobalVarInitial ifTrue: [ results := cue getClass possibleVariablesFor: proposedVariable continuedFrom: results ]. ^ proposedVariable correctAgainst: nil continuedFrom: results. ! ! !Encoder methodsFor: 'encoding' stamp: 'cwp 12/27/2012 11:42' prior: 51002830! undeclared: name | sym | requestor interactive ifTrue: [requestor requestor == #error: ifTrue: [requestor error: 'Undeclared']. ^self notify: 'Undeclared']. "Allow knowlegeable clients to squash the undeclared warning if they want (e.g. Diffing pretty printers that are simply formatting text). As this breaks compilation it should only be used by clients that want to discard the result of the compilation. To squash the warning use e.g. [Compiler format: code in: class notifying: nil decorated: false] on: UndeclaredVariableWarning do: [:ex| ex resume: false]" sym := name asSymbol. ^(UndeclaredVariableWarning new name: name selector: selector class: cue getClass) signal ifTrue: [| undeclared | undeclared := cue environment undeclared. undeclared at: sym put: nil. self global: (undeclared associationAt: sym) name: sym] ifFalse: [self global: (Association key: sym) name: sym]! ! !Encoder methodsFor: 'private' stamp: 'cwp 12/27/2012 10:23' prior: 51006007! warnAboutShadowed: name requestor addWarning: name,' is shadowed'. selector ifNotNil: [Transcript cr; show: cue getClass name,'>>', selector, '(', name,' is shadowed)']! ! "Compiler"! !SmalltalkImage methodsFor: 'housekeeping' stamp: 'cwp 6/22/2012 15:56' prior: 58497062! cleanOutUndeclared globals undeclared removeUnreferencedKeys! ! !SmalltalkImage methodsFor: 'special objects' stamp: 'cwp 6/22/2012 09:01' prior: 40515090! recreateSpecialObjectsArray "Smalltalk recreateSpecialObjectsArray" "To external package developers: **** DO NOT OVERRIDE THIS METHOD. ***** If you are writing a plugin and need additional special object(s) for your own use, use addGCRoot() function and use own, separate special objects registry " "The Special Objects Array is an array of objects used by the Squeak virtual machine. Its contents are critical and accesses to it by the VM are unchecked, so don't even think of playing here unless you know what you are doing." | newArray | newArray := Array new: 56. "Nil false and true get used throughout the interpreter" newArray at: 1 put: nil. newArray at: 2 put: false. newArray at: 3 put: true. "This association holds the active process (a ProcessScheduler)" newArray at: 4 put: (self bindingOf: #Processor). "Numerous classes below used for type checking and instantiation" newArray at: 5 put: Bitmap. newArray at: 6 put: SmallInteger. newArray at: 7 put: ByteString. newArray at: 8 put: Array. newArray at: 9 put: Smalltalk. newArray at: 10 put: Float. newArray at: 11 put: MethodContext. newArray at: 12 put: BlockContext. newArray at: 13 put: Point. newArray at: 14 put: LargePositiveInteger. newArray at: 15 put: Display. newArray at: 16 put: Message. newArray at: 17 put: CompiledMethod. newArray at: 18 put: (self specialObjectsArray at: 18). "(low space Semaphore)" newArray at: 19 put: Semaphore. newArray at: 20 put: Character. newArray at: 21 put: #doesNotUnderstand:. newArray at: 22 put: #cannotReturn:. newArray at: 23 put: nil. "This is the process signalling low space." "An array of the 32 selectors that are compiled as special bytecodes, paired alternately with the number of arguments each takes." newArray at: 24 put: #( #+ 1 #- 1 #< 1 #> 1 #<= 1 #>= 1 #= 1 #~= 1 #* 1 #/ 1 #\\ 1 #@ 1 #bitShift: 1 #// 1 #bitAnd: 1 #bitOr: 1 #at: 1 #at:put: 2 #size 0 #next 0 #nextPut: 1 #atEnd 0 #== 1 #class 0 #blockCopy: 1 #value 0 #value: 1 #do: 1 #new 0 #new: 1 #x 0 #y 0 ). "An array of the 255 Characters in ascii order. Cog inlines table into machine code at: prim so do not regenerate it." newArray at: 25 put: (self specialObjectsArray at: 25). newArray at: 26 put: #mustBeBoolean. newArray at: 27 put: ByteArray. newArray at: 28 put: Process. "An array of up to 31 classes whose instances will have compact headers" newArray at: 29 put: self compactClassesArray. newArray at: 30 put: (self specialObjectsArray at: 30). "(delay Semaphore)" newArray at: 31 put: (self specialObjectsArray at: 31). "(user interrupt Semaphore)" "Entries 32 - 34 unreferenced. Previously these contained prototype instances to be copied for fast initialization" newArray at: 32 put: nil. "was (Float new: 2)" newArray at: 33 put: nil. "was (LargePositiveInteger new: 4)" newArray at: 34 put: nil. "was Point new" newArray at: 35 put: #cannotInterpret:. "Note: This must be fixed once we start using context prototypes (yeah, right)" "(MethodContext new: CompiledMethod fullFrameSize)." newArray at: 36 put: (self specialObjectsArray at: 36). "Is the prototype MethodContext (unused by the VM)" newArray at: 37 put: BlockClosure. "(BlockContext new: CompiledMethod fullFrameSize)." newArray at: 38 put: (self specialObjectsArray at: 38). "Is the prototype BlockContext (unused by the VM)" "array of objects referred to by external code" newArray at: 39 put: (self specialObjectsArray at: 39). "preserve external semaphores" newArray at: 40 put: nil. "Reserved for Mutex in Cog VMs" newArray at: 41 put: nil. "Reserved for a LinkedList instance for overlapped calls in CogMT" "finalization Semaphore" newArray at: 42 put: ((self specialObjectsArray at: 42) ifNil: [Semaphore new]). newArray at: 43 put: LargeNegativeInteger. "External objects for callout. Note: Written so that one can actually completely remove the FFI." newArray at: 44 put: (self at: #ExternalAddress ifAbsent: []). newArray at: 45 put: (self at: #ExternalStructure ifAbsent: []). newArray at: 46 put: (self at: #ExternalData ifAbsent: []). newArray at: 47 put: (self at: #ExternalFunction ifAbsent: []). newArray at: 48 put: (self at: #ExternalLibrary ifAbsent: []). newArray at: 49 put: #aboutToReturn:through:. newArray at: 50 put: #run:with:in:. "51 reserved for immutability message" "newArray at: 51 put: #attemptToAssign:withIndex:." newArray at: 52 put: #(nil "nil => generic error" #'bad receiver' #'bad argument' #'bad index' #'bad number of arguments' #'inappropriate operation' #'unsupported operation' #'no modification' #'insufficient object memory' #'insufficient C memory' #'not found' #'bad method' #'internal error in named primitive machinery' #'object may move'). "53 to 55 are for Alien" newArray at: 53 put: (self at: #Alien ifAbsent: []). newArray at: 54 put: #invokeCallback:stack:registers:jmpbuf:. newArray at: 55 put: (self at: #UnsafeAlien ifAbsent: []). "Weak reference finalization" newArray at: 56 put: (self at: #WeakFinalizationList ifAbsent: []). "Now replace the interpreter's reference in one atomic operation" self specialObjectsArray becomeForward: newArray ! ! !SmalltalkImage methodsFor: 'shrinking' stamp: 'cwp 6/22/2012 15:57' prior: 37288071! unloadAllKnownPackages "Unload all packages we know how to unload and reload" "Prepare unloading" Smalltalk zapMVCprojects. Flaps disableGlobalFlaps: false. StandardScriptingSystem removeUnreferencedPlayers. Project removeAllButCurrent. #('Morphic-UserObjects' 'EToy-UserObjects' 'Morphic-Imported' ) do: [:each | SystemOrganization removeSystemCategory: each]. Smalltalk at: #ServiceRegistry ifPresent:[:aClass| SystemChangeNotifier uniqueInstance noMoreNotificationsFor: aClass. ]. World removeAllMorphs. "Go unloading" #( 'ReleaseBuilder' 'ScriptLoader' '311Deprecated' '39Deprecated' 'Universes' 'SMLoader' 'SMBase' 'Installer-Core' 'VersionNumberTests' 'VersionNumber' 'Services-Base' 'PreferenceBrowser' 'Nebraska' 'ToolBuilder-MVC' 'ST80' 'CollectionsTests' 'GraphicsTests' 'KernelTests' 'MorphicTests' 'MultilingualTests' 'NetworkTests' 'ToolsTests' 'TraitsTests' 'SystemChangeNotification-Tests' 'FlexibleVocabularies' 'EToys' 'Protocols' 'XML-Parser' 'Tests' 'SUnitGUI' 'Help-Squeak' 'HelpSystem' 'SystemReporter' ) do: [:pkgName| (MCPackage named: pkgName) unload. MCMcmUpdater disableUpdatesOfPackage: pkgName. ]. "Traits use custom unload" Smalltalk at: #Trait ifPresent:[:aClass| aClass unloadTraits]. "Post-unload cleanup" MCWorkingCopy flushObsoletePackageInfos. SystemOrganization removeSystemCategory: 'UserObjects'. Presenter defaultPresenterClass: nil. World dumpPresenter. ScheduledControllers := nil. Preferences removePreference: #allowEtoyUserCustomEvents. SystemOrganization removeEmptyCategories. ChangeSet removeChangeSetsNamedSuchThat:[:cs | (cs == ChangeSet current) not]. globals undeclared removeUnreferencedKeys. StandardScriptingSystem initialize. MCFileBasedRepository flushAllCaches. MCDefinition clearInstances. Behavior flushObsoleteSubclasses. ChangeSet current clear. ChangeSet current name: 'Unnamed1'. Smalltalk flushClassNameCache. Smalltalk at: #Browser ifPresent:[:br| br initialize]. DebuggerMethodMap voidMapCache. DataStream initialize. AppRegistry removeObsolete. FileServices removeObsolete. Preferences removeObsolete. TheWorldMenu removeObsolete. Smalltalk garbageCollect. Symbol compactSymbolTable. TheWorldMainDockingBar updateInstances. MorphicProject defaultFill: (Color gray: 0.9). World color: (Color gray: 0.9). ! ! !InternalTranslator methodsFor: 'fileIn/fileOut' stamp: 'cwp 6/20/2012 17:34' prior: 40472775! scanFrom: aStream ^ self scanFrom: aStream environment: Environment default! ! !NaturalLanguageTranslator methodsFor: 'fileIn/fileOut' stamp: 'cwp 6/20/2012 17:27' prior: 40496770! scanFrom: aStream ^ self scanFrom: aStream environment: Environment default! ! !SystemDictionary methodsFor: 'dictionary access' stamp: 'cwp 6/22/2012 15:58' prior: 30574136! at: aKey put: anObject "Override from Dictionary to check Undeclared and fix up references to undeclared variables." | index element | (self includesKey: aKey) ifFalse: [self declare: aKey from: (self at: #Undeclared). self flushClassNameCache]. super at: aKey put: anObject. ^ anObject! ! "System"! CodeHolder subclass: #Browser instanceVariableNames: 'environment systemOrganizer classOrganizer metaClassOrganizer editSelection metaClassIndicated selectedSystemCategory selectedClassName selectedMessageName selectedMessageCategoryName' classVariableNames: 'ListClassesHierarchically RecentClasses' poolDictionaries: '' category: 'Tools-Browser'! !Browser commentStamp: 'cwp 12/27/2012 11:09' prior: 36419432! I represent a query path into the class descriptions, the software of the system.! !Browser methodsFor: 'accessing' stamp: 'cwp 6/24/2012 23:20'! selectEnvironment: anEnvironment environment := anEnvironment. systemOrganizer := environment organization! ! !Browser methodsFor: 'system category list' stamp: 'cwp 6/24/2012 23:06' prior: 36467357! From noreply at buildbot.pypy.org Thu Jan 16 15:04:49 2014 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 16 Jan 2014 15:04:49 +0100 (CET) Subject: [pypy-commit] lang-smalltalk 64bit: support 64bit target with 32bit images Message-ID: <20140116140449.994131C35EC@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: 64bit Changeset: r572:1ebcd9bf1944 Date: 2014-01-10 21:19 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/1ebcd9bf1944/ Log: support 64bit target with 32bit images diff --git a/spyvm/display.py b/spyvm/display.py --- a/spyvm/display.py +++ b/spyvm/display.py @@ -1,4 +1,4 @@ -from rpython.rlib.rarithmetic import r_uint +from rpython.rlib.rarithmetic import r_uint32 as r_uint from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib.runicode import unicode_encode_utf_8 from rpython.rlib import jit @@ -71,7 +71,7 @@ self.set_squeak_colormap(self.screen) def get_pixelbuffer(self): - return rffi.cast(rffi.ULONGP, self.screen.c_pixels) + return rffi.cast(rffi.UINTP, self.screen.c_pixels) def defer_updates(self, flag): self._defer_updates = flag diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -18,7 +18,7 @@ from spyvm import constants, error from rpython.rlib import rrandom, objectmodel, jit, signature -from rpython.rlib.rarithmetic import intmask, r_uint +from rpython.rlib.rarithmetic import intmask, r_uint32, r_uint from rpython.tool.pairtype import extendabletype from rpython.rlib.objectmodel import instantiate, compute_hash from rpython.rtyper.lltypesystem import lltype, rffi @@ -187,7 +187,6 @@ return space.wrap_int(self.value >> shift) def unwrap_uint(self, space): - from rpython.rlib.rarithmetic import r_uint val = self.value if val < 0: raise error.UnwrappingError("got negative integer") @@ -295,7 +294,6 @@ return space.wrap_int((self.value >> shift) & mask) def unwrap_uint(self, space): - from rpython.rlib.rarithmetic import r_uint return r_uint(self.value) def clone(self, space): @@ -397,11 +395,11 @@ from rpython.rlib.rstruct.ieee import float_pack r = float_pack(self.value, 8) # C double if n0 == 0: - return space.wrap_uint(r_uint(intmask(r >> 32))) + return space.wrap_uint(r_uint32(intmask(r >> 32))) else: # bounds-check for primitive access is done in the primitive assert n0 == 1 - return space.wrap_uint(r_uint(intmask(r))) + return space.wrap_uint(r_uint32(intmask(r))) def store(self, space, n0, w_obj): from rpython.rlib.rstruct.ieee import float_unpack, float_pack @@ -940,7 +938,7 @@ if self.words is None: return self.c_words else: - from spyvm.interpreter_proxy import sqIntArrayPtr + from spyvm.iproxy import sqIntArrayPtr size = self.size() old_words = self.words c_words = self.c_words = lltype.malloc(sqIntArrayPtr.TO, size, flavor='raw') @@ -991,7 +989,8 @@ def __init__(self, space, w_class, size, depth, display): W_AbstractObjectWithClassReference.__init__(self, space, w_class) - self._real_depth_buffer = lltype.malloc(rffi.CArray(rffi.UINT), size, flavor='raw') + # self._real_depth_buffer = lltype.malloc(rffi.CArray(rffi.UINT), size, flavor='raw') + self._real_depth_buffer = [r_uint(0)] * size self.pixelbuffer = display.get_pixelbuffer() self._realsize = size self.display = display @@ -1003,7 +1002,7 @@ def atput0(self, space, index0, w_value): word = space.unwrap_uint(w_value) - self.setword(index0, word) + self.setword(index0, r_uint(word)) def flush_to_screen(self): self.display.flip() @@ -1028,7 +1027,7 @@ def setword(self, n, word): self._real_depth_buffer[n] = word - self.pixelbuffer[n] = word + self.pixelbuffer[n] = r_uint32(word) def is_array_object(self): return True @@ -1062,17 +1061,18 @@ ((msb & mask) << 11) ) - self.pixelbuffer[n] = r_uint(lsb | (msb << 16)) + self.pixelbuffer[n] = r_uint32(lsb | (msb << 16)) class W_8BitDisplayBitmap(W_DisplayBitmap): def setword(self, n, word): self._real_depth_buffer[n] = word - self.pixelbuffer[n] = r_uint( - (word >> 24) | - ((word >> 8) & 0x0000ff00) | - ((word << 8) & 0x00ff0000) | - (word << 24) + nWord = r_uint(word) + self.pixelbuffer[n] = r_uint32( + (nWord >> 24) | + ((nWord >> 8) & 0x0000ff00) | + ((nWord << 8) & 0x00ff0000) | + (nWord << 24) ) @@ -1081,7 +1081,7 @@ @jit.unroll_safe def setword(self, n, word): self._real_depth_buffer[n] = word - word = r_uint(word) + nWord = r_uint(word) pos = self.compute_pos(n) assert self._depth <= 4 rshift = 32 - self._depth @@ -1092,8 +1092,8 @@ for i in xrange(4): pixel = r_uint(word) >> rshift mapword |= (r_uint(pixel) << (i * 8)) - word <<= self._depth - self.pixelbuffer[pos] = mapword + nWord <<= self._depth + self.pixelbuffer[pos] = r_uint32(mapword) pos += 1 def compute_pos(self, n): diff --git a/spyvm/objspace.py b/spyvm/objspace.py --- a/spyvm/objspace.py +++ b/spyvm/objspace.py @@ -1,10 +1,11 @@ import os -from spyvm import constants, model, shadow, wrapper +from spyvm import constants, model, shadow, wrapper, system from spyvm.error import UnwrappingError, WrappingError, PrimitiveFailedError from rpython.rlib import jit, rpath from rpython.rlib.objectmodel import instantiate, specialize -from rpython.rlib.rarithmetic import intmask, r_uint, int_between +from rpython.rlib.rarithmetic import intmask, int_between, r_uint + class ObjSpace(object): def __init__(self): @@ -15,7 +16,7 @@ self.make_bootstrap_objects() def find_executable(self, executable): - if os.sep in executable or (os.name == "nt" and ":" in executable): + if os.sep in executable or (system.IS_WINDOWS and ":" in executable): return executable path = os.environ.get("PATH") if path: @@ -226,6 +227,7 @@ else: return self._wrap_uint_loop(val, bytes_len) + @jit.unroll_safe def _wrap_uint_loop(self, val, bytes_len): w_result = model.W_BytesObject(self, self.classtable['w_LargePositiveInteger'], bytes_len) diff --git a/spyvm/plugins/bitblt.py b/spyvm/plugins/bitblt.py --- a/spyvm/plugins/bitblt.py +++ b/spyvm/plugins/bitblt.py @@ -4,7 +4,7 @@ from spyvm.plugins.plugin import Plugin from rpython.rlib import jit, objectmodel -from rpython.rlib.rarithmetic import r_uint, intmask +from rpython.rlib.rarithmetic import intmask, r_uint32, r_uint BitBltPlugin = Plugin() @@ -17,7 +17,7 @@ raise PrimitiveFailedError("BitBlt primitive not called in BitBlt object!") # only allow combinationRules 0-41 - combinationRule = interp.space.unwrap_positive_32bit_int(w_rcvr.fetch(interp.space, 3)) + combinationRule = interp.space.unwrap_int(w_rcvr.fetch(interp.space, 3)) if combinationRule > 41: raise PrimitiveFailedError("Missing combinationRule %d" % combinationRule) @@ -264,19 +264,19 @@ destWord = self.dest.w_bits.getword(self.destIndex) mergeWord = self.mergeFn(halftoneWord, destWord) destWord = (destMask & mergeWord) | (destWord & (~destMask)) - self.dest.w_bits.setword(self.destIndex, destWord) + self.dest.w_bits.setword(self.destIndex, r_uint32(destWord)) self.destIndex += 1 destMask = BitBltShadow.AllOnes # the central horizontal loop requires no store masking if self.combinationRule == 3: # store rule requires no dest merging for word in range(2, self.nWords): - self.dest.w_bits.setword(self.destIndex, halftoneWord) + self.dest.w_bits.setword(self.destIndex, r_uint32(halftoneWord)) self.destIndex += 1 else: for word in range(2, self.nWords): destWord = self.dest.w_bits.getword(self.destIndex) mergeWord = self.mergeFn(halftoneWord, destWord) - self.dest.w_bits.setword(self.destIndex, mergeWord) + self.dest.w_bits.setword(self.destIndex, r_uint32(mergeWord)) self.destIndex += 1 # last word in row is masked if self.nWords > 1: @@ -284,7 +284,7 @@ destWord = self.dest.w_bits.getword(self.destIndex) mergeWord = self.mergeFn(halftoneWord, destWord) destWord = (destMask & mergeWord) | (destWord & (~destMask)) - self.dest.w_bits.setword(self.destIndex, destWord) + self.dest.w_bits.setword(self.destIndex, r_uint32(destWord)) self.destIndex += 1 self.destIndex += self.destDelta @@ -346,13 +346,13 @@ if self.destMask == BitBltShadow.AllOnes: # avoid read-modify-write self.dest.w_bits.setword( self.destIndex, - self.mergeFn(skewWord & halftoneWord, self.dest.w_bits.getword(self.destIndex)) + r_uint32(self.mergeFn(skewWord & halftoneWord, self.dest.w_bits.getword(self.destIndex))) ) else: # General version using dest masking destWord = self.dest.w_bits.getword(self.destIndex) mergeWord = self.mergeFn(skewWord & halftoneWord, destWord & self.destMask) destWord = (self.destMask & mergeWord) | (destWord & (~self.destMask)) - self.dest.w_bits.setword(self.destIndex, destWord) + self.dest.w_bits.setword(self.destIndex, r_uint32(destWord)) self.destIndex += 1 if (self.nWords == 2): # is the next word the last word? @@ -458,7 +458,7 @@ destWord = self.dest.w_bits.getword(self.destIndex) mergeWord = self.mergeFn(skewWord & halftoneWord, destWord) destWord = (destMask & mergeWord) | (destWord & (~destMask)) - self.dest.w_bits.setword(self.destIndex, destWord) + self.dest.w_bits.setword(self.destIndex, r_uint32(destWord)) # The central horizontal loop requires no store masking self.destIndex += hInc destMask = BitBltShadow.AllOnes @@ -468,12 +468,12 @@ if (self.hDir == -1): for word in range(2, self.nWords): thisWord = self.source.w_bits.getword(self.sourceIndex) - self.dest.w_bits.setword(self.destIndex, thisWord) + self.dest.w_bits.setword(self.destIndex, r_uint32(thisWord)) self.sourceIndex += hInc self.destIndex += hInc else: for word in range(2, self.nWords): - self.dest.w_bits.setword(self.destIndex, prevWord) + self.dest.w_bits.setword(self.destIndex, r_uint32(prevWord)) prevWord = self.source.w_bits.getword(self.sourceIndex) self.destIndex += hInc self.sourceIndex += hInc @@ -484,7 +484,7 @@ self.sourceIndex += hInc skewWord = self.rotate32bit(thisWord, prevWord, skewMask, notSkewMask, unskew) prevWord = thisWord - self.dest.w_bits.setword(self.destIndex, skewWord & halftoneWord) + self.dest.w_bits.setword(self.destIndex, r_uint32(skewWord & halftoneWord)) self.destIndex += hInc else: # Dest merging here... for word in range(2, self.nWords): @@ -493,7 +493,7 @@ skewWord = self.rotate32bit(thisWord, prevWord, skewMask, notSkewMask, unskew) prevWord = thisWord mergeWord = self.mergeFn(skewWord & halftoneWord, self.dest.w_bits.getword(self.destIndex)) - self.dest.w_bits.setword(self.destIndex, mergeWord) + self.dest.w_bits.setword(self.destIndex, r_uint32(mergeWord)) self.destIndex += hInc # last word with masking and all if (self.nWords > 1): @@ -507,7 +507,7 @@ destWord = self.dest.w_bits.getword(self.destIndex) mergeWord = self.mergeFn(skewWord & halftoneWord, destWord) destWord = (destMask & mergeWord) | (destWord & (~destMask)) - self.dest.w_bits.setword(self.destIndex, destWord) + self.dest.w_bits.setword(self.destIndex, r_uint32(destWord)) self.destIndex += hInc self.sourceIndex += self.sourceDelta self.destIndex += self.destDelta @@ -521,7 +521,7 @@ @objectmodel.specialize.arg_or_var(3) def merge(self, source_word, dest_word, combinationRule): - assert isinstance(source_word, r_uint) and isinstance(dest_word, r_uint) + # assert isinstance(source_word, r_uint) and isinstance(dest_word, r_uint) if combinationRule == 0: return 0 elif combinationRule == 1: diff --git a/spyvm/plugins/fileplugin.py b/spyvm/plugins/fileplugin.py --- a/spyvm/plugins/fileplugin.py +++ b/spyvm/plugins/fileplugin.py @@ -154,6 +154,6 @@ def smalltalk_timestamp(space, sec_since_epoch): import time from spyvm.primitives import secs_between_1901_and_1970 - from rpython.rlib.rarithmetic import r_uint - sec_since_1901 = r_uint(sec_since_epoch + secs_between_1901_and_1970) + from rpython.rlib.rarithmetic import r_uint32 as r_uint + sec_since_1901 = r_uint(int(sec_since_epoch + secs_between_1901_and_1970)) return space.wrap_uint(sec_since_1901) diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -9,6 +9,7 @@ from spyvm import wrapper from rpython.rlib import rarithmetic, rfloat, unroll, jit +from rpython.rlib.rarithmetic import r_uint32, r_uint def assert_bounds(n0, minimum, maximum): if not minimum <= n0 < maximum: @@ -246,7 +247,9 @@ # #bitShift: -- return the shifted value @expose_primitive(BIT_SHIFT, unwrap_spec=[object, int]) def func(interp, s_frame, receiver, argument): - from rpython.rlib.rarithmetic import LONG_BIT + # from rpython.rlib.rarithmetic import LONG_BIT + # XXX: 32-bit images only! + LONG_BIT = 32 if -LONG_BIT < argument < LONG_BIT: # overflow-checking done in lshift implementations if argument > 0: @@ -293,10 +296,19 @@ return w_res make_func(op) +# XXX: 32-bit only +def ovfcheck_float_to_int(x): + from rpython.rlib.rfloat import isnan + if isnan(x): + raise OverflowError + if -2147483649.0 < x < 2147483648.0: + return int(x) + raise OverflowError + @expose_primitive(FLOAT_TRUNCATED, unwrap_spec=[float]) def func(interp, s_frame, f): try: - return interp.space.wrap_int(rarithmetic.ovfcheck_float_to_int(f)) + return interp.space.wrap_int(ovfcheck_float_to_int(f)) except OverflowError: raise PrimitiveFailedError @@ -883,7 +895,7 @@ from spyvm.plugins.vmdebugging import DebuggingPlugin return DebuggingPlugin.call(signature[1], interp, s_frame, argcount, s_method) else: - from spyvm.interpreter_proxy import IProxy + from spyvm.iproxy import IProxy return IProxy.call(signature, interp, s_frame, argcount, s_method) raise PrimitiveFailedError @@ -1023,15 +1035,15 @@ -secs_between_1901_and_1970 = rarithmetic.r_uint((69 * 365 + 17) * 24 * 3600) +secs_between_1901_and_1970 = r_uint((69 * 365 + 17) * 24 * 3600) @expose_primitive(SECONDS_CLOCK, unwrap_spec=[object]) def func(interp, s_frame, w_arg): import time - sec_since_epoch = rarithmetic.r_uint(time.time()) + sec_since_epoch = r_uint(int(time.time())) # XXX: overflow check necessary? sec_since_1901 = sec_since_epoch + secs_between_1901_and_1970 - return interp.space.wrap_uint(sec_since_1901) + return interp.space.wrap_uint(r_uint32(sec_since_1901)) #____________________________________________________________________________ @@ -1073,9 +1085,12 @@ raise PrimitiveFailedError for i in xrange(w_arg.size()): w_arg.setchar(i, chr(new_value)) - elif isinstance(w_arg, model.W_WordsObject) or isinstance(w_arg, model.W_DisplayBitmap): + elif isinstance(w_arg, model.W_WordsObject): for i in xrange(w_arg.size()): w_arg.setword(i, new_value) + elif isinstance(w_arg, model.W_DisplayBitmap): + for i in xrange(w_arg.size()): + w_arg.setword(i, r_uint32(new_value)) else: raise PrimitiveFailedError return w_arg diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -2,6 +2,8 @@ from spyvm import model, constants, error, wrapper from rpython.tool.pairtype import extendabletype from rpython.rlib import rarithmetic, jit +from rpython.rlib.rarithmetic import r_uint + def make_elidable_after_versioning(func): @jit.elidable @@ -648,7 +650,7 @@ self._temps_and_stack = [None] * (stacksize + tempsize) for i in range(tempsize): self._temps_and_stack[i] = self.space.w_nil - self._stack_ptr = rarithmetic.r_uint(tempsize) # we point after the last element + self._stack_ptr = r_uint(tempsize) # we point after the last element # ______________________________________________________________________ # Stack Manipulation @@ -681,11 +683,11 @@ return self.peek(0) def set_top(self, value, position=0): - rpos = rarithmetic.r_uint(position) + rpos = r_uint(position) self._temps_and_stack[self._stack_ptr + ~rpos] = value def peek(self, idx): - rpos = rarithmetic.r_uint(idx) + rpos = r_uint(idx) return self._temps_and_stack[jit.promote(self._stack_ptr) + ~rpos] @jit.unroll_safe diff --git a/spyvm/squeakimage.py b/spyvm/squeakimage.py --- a/spyvm/squeakimage.py +++ b/spyvm/squeakimage.py @@ -380,11 +380,11 @@ def run_spy_hacks(self, space): pass - # w_display = space.objtable["w_display"] - # if w_display is not None and w_display is not space.w_nil: - # if space.unwrap_int(w_display.fetch(space, 3)) < 8: - # # non-native indexed color depth not well supported - # w_display.store(space, 3, space.wrap_int(8)) + w_display = space.objtable["w_display"] + if w_display is not None and w_display is not space.w_nil: + if space.unwrap_int(w_display.fetch(space, 3)) < 8: + # non-native indexed color depth not well supported + w_display.store(space, 3, space.wrap_int(32)) def find_symbol(self, space, reader, symbol): w_dnu = self.special(constants.SO_DOES_NOT_UNDERSTAND) @@ -558,7 +558,7 @@ return bytes[:stop] # omit odd bytes def get_ruints(self, required_len=-1): - from rpython.rlib.rarithmetic import r_uint + from rpython.rlib.rarithmetic import r_uint32 as r_uint words = [r_uint(x) for x in self.chunk.data] if required_len != -1 and len(words) != required_len: raise CorruptImageError("Expected %d words, got %d" % (required_len, len(words))) diff --git a/spyvm/test/test_bitblt.py b/spyvm/test/test_bitblt.py --- a/spyvm/test/test_bitblt.py +++ b/spyvm/test/test_bitblt.py @@ -1,4 +1,5 @@ from spyvm import model, shadow, constants, interpreter, objspace +from spyvm.plugins import bitblt space = objspace.ObjSpace() @@ -37,7 +38,7 @@ def test_bitBlt_values(): w_bb = model.W_PointersObject(space, space.w_Array, 15) - w_bb.store(space, 0, make_form([], 1230, 20, 1)) + w_bb.store(space, 0, make_form([0] * 1230 * 20, 1230, 20, 1)) w_bb.store(space, 1, w_bb.fetch(space, 0)) w_bb.store(space, 2, space.w_nil) @@ -54,25 +55,26 @@ w_bb.store(space, 13, w(15)) # clip height w_bb.store(space, 14, model.W_PointersObject(space, space.w_Array, 5)) # color map - s_bb = w_bb.as_bitblt_get_shadow(space) - s_bb.clip_range() - assert not (s_bb.w <= 0 or s_bb.h <= 0) - s_bb.compute_masks() - s_bb.check_overlap() - s_bb.calculate_offsets() + s_bb = w_bb.as_special_get_shadow(space, bitblt.BitBltShadow) + s_bb.loadBitBlt() + s_bb.clipRange() + assert not (s_bb.width <= 0 or s_bb.height <= 0) + s_bb.destMaskAndPointerInit() + s_bb.checkSourceOverlap() + s_bb.sourceSkewAndPointerInit() - assert s_bb.dest_x == 1 - assert s_bb.dest_y == 0 + assert s_bb.destX == 1 + assert s_bb.destY == 0 assert s_bb.sx == 1218 assert s_bb.sy == 0 assert s_bb.dx == 1219 assert s_bb.dy == 0 - assert s_bb.w == 1219 - assert s_bb.h == 15 - assert s_bb.h_dir == -1 - assert s_bb.v_dir == 1 - assert s_bb.source_delta == 79 - assert s_bb.dest_delta == 78 + assert s_bb.width == 1220 + assert s_bb.height == 15 + assert s_bb.hDir == -1 + assert s_bb.vDir == 1 + assert s_bb.sourceDelta == 79 + assert s_bb.destDelta == 78 assert s_bb.skew == 31 - assert s_bb.source_index == 38 - assert s_bb.dest_index == 38 \ No newline at end of file + assert s_bb.sourceIndex == 38 + assert s_bb.destIndex == 38 diff --git a/spyvm/test/test_bootstrappedimage.py b/spyvm/test/test_bootstrappedimage.py --- a/spyvm/test/test_bootstrappedimage.py +++ b/spyvm/test/test_bootstrappedimage.py @@ -13,7 +13,7 @@ s_methoddict.sync_cache() methoddict_w = s_methoddict.methoddict for each in methoddict_w.keys(): - if each.as_string() == string: + if each == string: return each def initialize_class(w_class): diff --git a/spyvm/test/test_largeinteger.py b/spyvm/test/test_largeinteger.py --- a/spyvm/test/test_largeinteger.py +++ b/spyvm/test/test_largeinteger.py @@ -6,7 +6,8 @@ from spyvm.test.test_miniimage import perform, w from spyvm.test.test_primitives import MockFrame -from rpython.rlib.rarithmetic import intmask, r_uint +from rpython.rlib.rarithmetic import intmask, r_uint32 as r_uint + space, interp = tools.setup_module(tools, filename='bootstrapped.image') @@ -16,11 +17,11 @@ s_methoddict.sync_cache() methoddict_w = s_methoddict.methoddict for each in methoddict_w.keys(): - if each.as_string() == string: + if each == string: return each def initialize_class(w_class): - initialize_symbol = find_symbol_in_methoddict_of("initialize", + initialize_symbol = find_symbol_in_methoddict_of("initialize", w_class.shadow_of_my_class(tools.space)) perform(w_class, initialize_symbol) @@ -78,7 +79,7 @@ def test_bitXor(): do_primitive("bitXor:", operator.xor) - do_primitive("bitXor:", operator.xor, i=[0xFFFFFFFF, 0x0F0F0F0F, 0xFFFFFF], + do_primitive("bitXor:", operator.xor, i=[0xFFFFFFFF, 0x0F0F0F0F, 0xFFFFFF], j=[0xF0F0F0F0, 0xFFFEFCF8, 4294967295]) def test_bitShift(): @@ -97,4 +98,3 @@ # do_primitive("-", operator.sub, j=[0xFF, 0xFFFF, 0xF0E0D0C0], i=[-1, -1, -1]) # do_primitive("-", operator.sub) # do_primitive("-", operator.sub, i=[0xFF], j=0x3FFFFFFF) - diff --git a/spyvm/test/test_model.py b/spyvm/test/test_model.py --- a/spyvm/test/test_model.py +++ b/spyvm/test/test_model.py @@ -4,7 +4,8 @@ from spyvm import model, shadow from spyvm.shadow import MethodNotFound from spyvm import objspace, error, display -from rpython.rlib.rarithmetic import intmask, r_uint +from rpython.rlib.rarithmetic import intmask, r_uint32 as r_uint + mockclass = objspace.bootstrap_class @@ -376,20 +377,6 @@ for i in xrange(24, 32): assert target.pixelbuffer[i] == 0xffffffff -def test_display_offset_computation(): - - def get_pixelbuffer(self): - from rpython.rtyper.lltypesystem import lltype, rffi - return lltype.malloc(rffi.ULONGP.TO, self.width * self.height * 32, flavor='raw') - display.SDLDisplay.get_pixelbuffer = get_pixelbuffer - d = display.SDLDisplay("test") - d.set_video_mode(18, 5, 1) - - dbitmap = model.W_DisplayBitmap.create(space, space.w_Array, 5, 1, d) - - assert dbitmap.compute_pos_and_line_end(0, 1) == (0, 18) - assert dbitmap.compute_pos_and_line_end(1, 1) == (18, 36) - assert dbitmap.size() == 5 @py.test.mark.skipif("socket.gethostname() == 'precise32'") def test_weak_pointers(): diff --git a/spyvm/test/test_objectspace.py b/spyvm/test/test_objectspace.py --- a/spyvm/test/test_objectspace.py +++ b/spyvm/test/test_objectspace.py @@ -40,7 +40,7 @@ => 27670116110564327424 """ - from rpython.rlib.rarithmetic import r_uint + from rpython.rlib.rarithmetic import r_uint32 as r_uint for num in [0, 1, 41, 100, 2**31, sys.maxint + 1, -1]: num = r_uint(num) assert space.unwrap_uint(space.wrap_uint(num)) == num diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -5,9 +5,8 @@ from rpython.rlib import jit, rpath from spyvm import model, interpreter, squeakimage, objspace, wrapper,\ - error, shadow + error, shadow, system from spyvm.tool.analyseimage import create_image -from spyvm.interpreter_proxy import VirtualMachine def _run_benchmark(interp, number, benchmark, arg): @@ -224,6 +223,9 @@ if hasattr(rgc, "stm_is_enabled"): driver.config.translation.stm = True driver.config.translation.thread = True + driver.exe_name = "rsqueakvm" + if system.IS_64BIT: + driver.exe_name += "-64" return entry_point, None From noreply at buildbot.pypy.org Thu Jan 16 15:04:50 2014 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 16 Jan 2014 15:04:50 +0100 (CET) Subject: [pypy-commit] lang-smalltalk 64bit: simplify some r_uint/r_uint32 distinctions. this 64bit vm still works ; ) Message-ID: <20140116140450.BA8D41C35EC@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: 64bit Changeset: r573:04dd02446fb4 Date: 2014-01-10 22:26 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/04dd02446fb4/ Log: simplify some r_uint/r_uint32 distinctions. this 64bit vm still works ;) diff --git a/spyvm/display.py b/spyvm/display.py --- a/spyvm/display.py +++ b/spyvm/display.py @@ -1,4 +1,3 @@ -from rpython.rlib.rarithmetic import r_uint32 as r_uint from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib.runicode import unicode_encode_utf_8 from rpython.rlib import jit diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -395,11 +395,11 @@ from rpython.rlib.rstruct.ieee import float_pack r = float_pack(self.value, 8) # C double if n0 == 0: - return space.wrap_uint(r_uint32(intmask(r >> 32))) + return space.wrap_positive_32bit_int(intmask(r >> 32)) else: # bounds-check for primitive access is done in the primitive assert n0 == 1 - return space.wrap_uint(r_uint32(intmask(r))) + return space.wrap_positive_32bit_int(intmask(r)) def store(self, space, n0, w_obj): from rpython.rlib.rstruct.ieee import float_unpack, float_pack @@ -974,7 +974,7 @@ class W_DisplayBitmap(W_AbstractObjectWithClassReference): _attrs_ = ['pixelbuffer', '_realsize', '_real_depth_buffer', 'display', '_depth'] - _immutable_fields_ = ['_realsize', 'display', '_depth'] + _immutable_fields_ = ['_realsize', 'display', '_depth', '_real_depth_buffer'] @staticmethod def create(space, w_class, size, depth, display): @@ -989,7 +989,6 @@ def __init__(self, space, w_class, size, depth, display): W_AbstractObjectWithClassReference.__init__(self, space, w_class) - # self._real_depth_buffer = lltype.malloc(rffi.CArray(rffi.UINT), size, flavor='raw') self._real_depth_buffer = [r_uint(0)] * size self.pixelbuffer = display.get_pixelbuffer() self._realsize = size diff --git a/spyvm/plugins/bitblt.py b/spyvm/plugins/bitblt.py --- a/spyvm/plugins/bitblt.py +++ b/spyvm/plugins/bitblt.py @@ -4,7 +4,7 @@ from spyvm.plugins.plugin import Plugin from rpython.rlib import jit, objectmodel -from rpython.rlib.rarithmetic import intmask, r_uint32, r_uint +from rpython.rlib.rarithmetic import intmask, r_uint BitBltPlugin = Plugin() @@ -264,19 +264,19 @@ destWord = self.dest.w_bits.getword(self.destIndex) mergeWord = self.mergeFn(halftoneWord, destWord) destWord = (destMask & mergeWord) | (destWord & (~destMask)) - self.dest.w_bits.setword(self.destIndex, r_uint32(destWord)) + self.dest.w_bits.setword(self.destIndex, destWord) self.destIndex += 1 destMask = BitBltShadow.AllOnes # the central horizontal loop requires no store masking if self.combinationRule == 3: # store rule requires no dest merging for word in range(2, self.nWords): - self.dest.w_bits.setword(self.destIndex, r_uint32(halftoneWord)) + self.dest.w_bits.setword(self.destIndex, halftoneWord) self.destIndex += 1 else: for word in range(2, self.nWords): destWord = self.dest.w_bits.getword(self.destIndex) mergeWord = self.mergeFn(halftoneWord, destWord) - self.dest.w_bits.setword(self.destIndex, r_uint32(mergeWord)) + self.dest.w_bits.setword(self.destIndex, mergeWord) self.destIndex += 1 # last word in row is masked if self.nWords > 1: @@ -284,7 +284,7 @@ destWord = self.dest.w_bits.getword(self.destIndex) mergeWord = self.mergeFn(halftoneWord, destWord) destWord = (destMask & mergeWord) | (destWord & (~destMask)) - self.dest.w_bits.setword(self.destIndex, r_uint32(destWord)) + self.dest.w_bits.setword(self.destIndex, destWord) self.destIndex += 1 self.destIndex += self.destDelta @@ -346,13 +346,13 @@ if self.destMask == BitBltShadow.AllOnes: # avoid read-modify-write self.dest.w_bits.setword( self.destIndex, - r_uint32(self.mergeFn(skewWord & halftoneWord, self.dest.w_bits.getword(self.destIndex))) + self.mergeFn(skewWord & halftoneWord, self.dest.w_bits.getword(self.destIndex)) ) else: # General version using dest masking destWord = self.dest.w_bits.getword(self.destIndex) mergeWord = self.mergeFn(skewWord & halftoneWord, destWord & self.destMask) destWord = (self.destMask & mergeWord) | (destWord & (~self.destMask)) - self.dest.w_bits.setword(self.destIndex, r_uint32(destWord)) + self.dest.w_bits.setword(self.destIndex, destWord) self.destIndex += 1 if (self.nWords == 2): # is the next word the last word? @@ -458,7 +458,7 @@ destWord = self.dest.w_bits.getword(self.destIndex) mergeWord = self.mergeFn(skewWord & halftoneWord, destWord) destWord = (destMask & mergeWord) | (destWord & (~destMask)) - self.dest.w_bits.setword(self.destIndex, r_uint32(destWord)) + self.dest.w_bits.setword(self.destIndex, destWord) # The central horizontal loop requires no store masking self.destIndex += hInc destMask = BitBltShadow.AllOnes @@ -468,12 +468,12 @@ if (self.hDir == -1): for word in range(2, self.nWords): thisWord = self.source.w_bits.getword(self.sourceIndex) - self.dest.w_bits.setword(self.destIndex, r_uint32(thisWord)) + self.dest.w_bits.setword(self.destIndex, thisWord) self.sourceIndex += hInc self.destIndex += hInc else: for word in range(2, self.nWords): - self.dest.w_bits.setword(self.destIndex, r_uint32(prevWord)) + self.dest.w_bits.setword(self.destIndex, prevWord) prevWord = self.source.w_bits.getword(self.sourceIndex) self.destIndex += hInc self.sourceIndex += hInc @@ -484,7 +484,7 @@ self.sourceIndex += hInc skewWord = self.rotate32bit(thisWord, prevWord, skewMask, notSkewMask, unskew) prevWord = thisWord - self.dest.w_bits.setword(self.destIndex, r_uint32(skewWord & halftoneWord)) + self.dest.w_bits.setword(self.destIndex, skewWord & halftoneWord) self.destIndex += hInc else: # Dest merging here... for word in range(2, self.nWords): @@ -493,7 +493,7 @@ skewWord = self.rotate32bit(thisWord, prevWord, skewMask, notSkewMask, unskew) prevWord = thisWord mergeWord = self.mergeFn(skewWord & halftoneWord, self.dest.w_bits.getword(self.destIndex)) - self.dest.w_bits.setword(self.destIndex, r_uint32(mergeWord)) + self.dest.w_bits.setword(self.destIndex, mergeWord) self.destIndex += hInc # last word with masking and all if (self.nWords > 1): @@ -507,7 +507,7 @@ destWord = self.dest.w_bits.getword(self.destIndex) mergeWord = self.mergeFn(skewWord & halftoneWord, destWord) destWord = (destMask & mergeWord) | (destWord & (~destMask)) - self.dest.w_bits.setword(self.destIndex, r_uint32(destWord)) + self.dest.w_bits.setword(self.destIndex, destWord) self.destIndex += hInc self.sourceIndex += self.sourceDelta self.destIndex += self.destDelta @@ -521,7 +521,6 @@ @objectmodel.specialize.arg_or_var(3) def merge(self, source_word, dest_word, combinationRule): - # assert isinstance(source_word, r_uint) and isinstance(dest_word, r_uint) if combinationRule == 0: return 0 elif combinationRule == 1: diff --git a/spyvm/plugins/fileplugin.py b/spyvm/plugins/fileplugin.py --- a/spyvm/plugins/fileplugin.py +++ b/spyvm/plugins/fileplugin.py @@ -154,6 +154,6 @@ def smalltalk_timestamp(space, sec_since_epoch): import time from spyvm.primitives import secs_between_1901_and_1970 - from rpython.rlib.rarithmetic import r_uint32 as r_uint - sec_since_1901 = r_uint(int(sec_since_epoch + secs_between_1901_and_1970)) + from rpython.rlib.rarithmetic import r_uint + sec_since_1901 = r_uint(sec_since_epoch + secs_between_1901_and_1970) return space.wrap_uint(sec_since_1901) diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -5,11 +5,11 @@ from spyvm import model, shadow from spyvm import constants, display from spyvm.error import PrimitiveFailedError, \ - PrimitiveNotYetWrittenError + PrimitiveNotYetWrittenError, WrappingError from spyvm import wrapper from rpython.rlib import rarithmetic, rfloat, unroll, jit -from rpython.rlib.rarithmetic import r_uint32, r_uint +from rpython.rlib.rarithmetic import r_uint def assert_bounds(n0, minimum, maximum): if not minimum <= n0 < maximum: @@ -247,9 +247,9 @@ # #bitShift: -- return the shifted value @expose_primitive(BIT_SHIFT, unwrap_spec=[object, int]) def func(interp, s_frame, receiver, argument): - # from rpython.rlib.rarithmetic import LONG_BIT + from rpython.rlib.rarithmetic import LONG_BIT # XXX: 32-bit images only! - LONG_BIT = 32 + # LONG_BIT = 32 if -LONG_BIT < argument < LONG_BIT: # overflow-checking done in lshift implementations if argument > 0: @@ -296,21 +296,16 @@ return w_res make_func(op) -# XXX: 32-bit only -def ovfcheck_float_to_int(x): - from rpython.rlib.rfloat import isnan - if isnan(x): - raise OverflowError - if -2147483649.0 < x < 2147483648.0: - return int(x) - raise OverflowError - @expose_primitive(FLOAT_TRUNCATED, unwrap_spec=[float]) def func(interp, s_frame, f): try: - return interp.space.wrap_int(ovfcheck_float_to_int(f)) + integer = rarithmetic.ovfcheck_float_to_int(f) except OverflowError: raise PrimitiveFailedError + try: + return interp.space.wrap_int(integer) # in 64bit VMs, this may fail + except WrappingError: + raise PrimitiveFailedError @expose_primitive(FLOAT_TIMES_TWO_POWER, unwrap_spec=[float, int]) def func(interp, s_frame, rcvr, arg): @@ -1043,7 +1038,7 @@ sec_since_epoch = r_uint(int(time.time())) # XXX: overflow check necessary? sec_since_1901 = sec_since_epoch + secs_between_1901_and_1970 - return interp.space.wrap_uint(r_uint32(sec_since_1901)) + return interp.space.wrap_uint(r_uint(sec_since_1901)) #____________________________________________________________________________ @@ -1085,12 +1080,9 @@ raise PrimitiveFailedError for i in xrange(w_arg.size()): w_arg.setchar(i, chr(new_value)) - elif isinstance(w_arg, model.W_WordsObject): + elif isinstance(w_arg, model.W_WordsObject) or isinstance(w_arg, model.W_DisplayBitmap): for i in xrange(w_arg.size()): - w_arg.setword(i, new_value) - elif isinstance(w_arg, model.W_DisplayBitmap): - for i in xrange(w_arg.size()): - w_arg.setword(i, r_uint32(new_value)) + w_arg.setword(i, r_uint(new_value)) else: raise PrimitiveFailedError return w_arg diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -2,7 +2,6 @@ from spyvm import model, constants, error, wrapper from rpython.tool.pairtype import extendabletype from rpython.rlib import rarithmetic, jit -from rpython.rlib.rarithmetic import r_uint def make_elidable_after_versioning(func): @@ -650,7 +649,7 @@ self._temps_and_stack = [None] * (stacksize + tempsize) for i in range(tempsize): self._temps_and_stack[i] = self.space.w_nil - self._stack_ptr = r_uint(tempsize) # we point after the last element + self._stack_ptr = rarithmetic.r_uint(tempsize) # we point after the last element # ______________________________________________________________________ # Stack Manipulation @@ -683,11 +682,11 @@ return self.peek(0) def set_top(self, value, position=0): - rpos = r_uint(position) + rpos = rarithmetic.r_uint(position) self._temps_and_stack[self._stack_ptr + ~rpos] = value def peek(self, idx): - rpos = r_uint(idx) + rpos = rarithmetic.r_uint(idx) return self._temps_and_stack[jit.promote(self._stack_ptr) + ~rpos] @jit.unroll_safe diff --git a/spyvm/squeakimage.py b/spyvm/squeakimage.py --- a/spyvm/squeakimage.py +++ b/spyvm/squeakimage.py @@ -558,7 +558,7 @@ return bytes[:stop] # omit odd bytes def get_ruints(self, required_len=-1): - from rpython.rlib.rarithmetic import r_uint32 as r_uint + from rpython.rlib.rarithmetic import r_uint words = [r_uint(x) for x in self.chunk.data] if required_len != -1 and len(words) != required_len: raise CorruptImageError("Expected %d words, got %d" % (required_len, len(words))) From noreply at buildbot.pypy.org Thu Jan 16 15:04:51 2014 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 16 Jan 2014 15:04:51 +0100 (CET) Subject: [pypy-commit] lang-smalltalk 64bit: make more code word-size agnostic Message-ID: <20140116140451.D24821C35EC@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: 64bit Changeset: r574:4661bff0c7d3 Date: 2014-01-10 23:08 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/4661bff0c7d3/ Log: make more code word-size agnostic diff --git a/spyvm/constants.py b/spyvm/constants.py --- a/spyvm/constants.py +++ b/spyvm/constants.py @@ -142,7 +142,7 @@ "timerSemaphore" : SO_TIMER_SEMAPHORE, } -LONG_BIT = 32 +from rpython.rlib.rarithmetic import LONG_BIT TAGGED_MAXINT = 2 ** (LONG_BIT - 2) - 1 TAGGED_MININT = -2 ** (LONG_BIT - 2) diff --git a/spyvm/interpreter_proxy.py b/spyvm/interpreter_proxy.py --- a/spyvm/interpreter_proxy.py +++ b/spyvm/interpreter_proxy.py @@ -414,12 +414,12 @@ @expose_on_virtual_machine_proxy([int], oop) def positive32BitIntegerFor(n): - return IProxy.space.wrap_positive_32bit_int(n) + return IProxy.space.wrap_positive_1word_int(n) @expose_on_virtual_machine_proxy([oop], int) def positive32BitValueOf(n): from rpython.rlib.rarithmetic import intmask - return intmask(IProxy.space.unwrap_positive_32bit_int(n)) + return intmask(IProxy.space.unwrap_positive_1word_int(n)) # /* InterpreterProxy methodsFor: 'special objects' */ @@ -1165,4 +1165,4 @@ # class __extend__(model.W_WordsObject): # def as_c_array(self, proxy): -# return map(lambda x: proxy.object_to_oop(proxy.space.wrap_positive_32bit_int(x), self.words) +# return map(lambda x: proxy.object_to_oop(proxy.space.wrap_positive_1word_int(x), self.words) diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -174,7 +174,7 @@ upperbound = intmask(r_uint(-1) >> shift) if 0 <= self.value <= upperbound: shifted = intmask(self.value << shift) - return space.wrap_positive_32bit_int(shifted) + return space.wrap_positive_1word_int(shifted) else: try: shifted = ovfcheck(self.value << shift) @@ -280,7 +280,7 @@ upperbound = intmask(r_uint(-1) >> shift) if 0 <= self.value <= upperbound: shifted = intmask(self.value << shift) - return space.wrap_positive_32bit_int(shifted) + return space.wrap_positive_1word_int(shifted) else: raise error.PrimitiveFailedError() @@ -395,11 +395,11 @@ from rpython.rlib.rstruct.ieee import float_pack r = float_pack(self.value, 8) # C double if n0 == 0: - return space.wrap_positive_32bit_int(intmask(r >> 32)) + return space.wrap_positive_1word_int(intmask(r >> 32)) else: # bounds-check for primitive access is done in the primitive assert n0 == 1 - return space.wrap_positive_32bit_int(intmask(r)) + return space.wrap_positive_1word_int(intmask(r)) def store(self, space, n0, w_obj): from rpython.rlib.rstruct.ieee import float_unpack, float_pack diff --git a/spyvm/objspace.py b/spyvm/objspace.py --- a/spyvm/objspace.py +++ b/spyvm/objspace.py @@ -216,14 +216,14 @@ raise WrappingError("negative integer") if val >= 0: try: - return self.wrap_positive_32bit_int(intmask(val)) + return self.wrap_positive_1word_int(intmask(val)) except WrappingError: pass # XXX this code sucks import math bytes_len = int(math.log(val) / math.log(0xff)) + 1 if bytes_len <= 4: - return self.wrap_positive_32bit_int(intmask(val)) + return self.wrap_positive_1word_int(intmask(val)) else: return self._wrap_uint_loop(val, bytes_len) @@ -235,9 +235,9 @@ w_result.setchar(i, chr(intmask((val >> i*8) & 255))) return w_result - def wrap_positive_32bit_int(self, val): + def wrap_positive_1word_int(self, val): # This will always return a positive value. - # XXX: For now, we assume that val is at most 32bit, i.e. overflows are + # XXX: For now, we assume that val is at most 1word, i.e. overflows are # checked for before wrapping. if int_between(0, val, constants.TAGGED_MAXINT + 1): return model.W_SmallInteger(val) @@ -280,13 +280,13 @@ if w_value.value >= 0: return intmask(w_value.value) else: - raise UnwrappingError("The value is negative when interpreted as 32bit value.") + raise UnwrappingError("The value is negative when interpreted as 1 word value.") raise UnwrappingError("expected a W_SmallInteger or W_LargePositiveInteger1Word, got %s" % (w_value,)) def unwrap_uint(self, w_value): return w_value.unwrap_uint(self) - def unwrap_positive_32bit_int(self, w_value): + def unwrap_positive_1word_int(self, w_value): if isinstance(w_value, model.W_SmallInteger): if w_value.value >= 0: return r_uint(w_value.value) diff --git a/spyvm/plugins/fileplugin.py b/spyvm/plugins/fileplugin.py --- a/spyvm/plugins/fileplugin.py +++ b/spyvm/plugins/fileplugin.py @@ -112,7 +112,7 @@ except OSError: raise PrimitiveFailedError else: - return interp.space.wrap_positive_32bit_int(rarithmetic.intmask(pos)) + return interp.space.wrap_positive_1word_int(rarithmetic.intmask(pos)) @FilePlugin.expose_primitive(unwrap_spec=[object, int, int]) def primitiveFileSetPosition(interp, s_frame, w_rcvr, fd, position): @@ -128,7 +128,7 @@ file_info = os.fstat(fd) except OSError: raise PrimitiveFailedError - return interp.space.wrap_positive_32bit_int(rarithmetic.intmask(file_info.st_size)) + return interp.space.wrap_positive_1word_int(rarithmetic.intmask(file_info.st_size)) @FilePlugin.expose_primitive(unwrap_spec=[object]) def primitiveFileStdioHandles(interp, s_frame, w_rcvr): @@ -148,7 +148,7 @@ except OSError: raise PrimitiveFailedError else: - return space.wrap_positive_32bit_int(rarithmetic.intmask(written)) + return space.wrap_positive_1word_int(rarithmetic.intmask(written)) @jit.elidable def smalltalk_timestamp(space, sec_since_epoch): diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -49,7 +49,7 @@ # converted to an index0 index1_0 = object() char = object() -pos_32bit_int = object() +pos_1word_int = object() def expose_primitive(code, unwrap_spec=None, no_result=False, result_is_new_frame=False, may_context_switch=True, @@ -121,8 +121,8 @@ w_arg = s_frame.peek(index) if spec is int: args += (interp.space.unwrap_int(w_arg), ) - elif spec is pos_32bit_int: - args += (interp.space.unwrap_positive_32bit_int(w_arg),) + elif spec is pos_1word_int: + args += (interp.space.unwrap_positive_1word_int(w_arg),) elif spec is index1_0: args += (interp.space.unwrap_int(w_arg)-1, ) elif spec is float: @@ -204,10 +204,10 @@ } for (code,op) in bitwise_binary_ops.items(): def make_func(op): - @expose_primitive(code, unwrap_spec=[pos_32bit_int, pos_32bit_int]) + @expose_primitive(code, unwrap_spec=[pos_1word_int, pos_1word_int]) def func(interp, s_frame, receiver, argument): res = op(receiver, argument) - return interp.space.wrap_positive_32bit_int(rarithmetic.intmask(res)) + return interp.space.wrap_int(rarithmetic.intmask(res)) make_func(op) # #/ -- return the result of a division, only succeed if the division is exact @@ -248,8 +248,6 @@ @expose_primitive(BIT_SHIFT, unwrap_spec=[object, int]) def func(interp, s_frame, receiver, argument): from rpython.rlib.rarithmetic import LONG_BIT - # XXX: 32-bit images only! - # LONG_BIT = 32 if -LONG_BIT < argument < LONG_BIT: # overflow-checking done in lshift implementations if argument > 0: @@ -1072,7 +1070,7 @@ raise PrimitiveFailedError return w_receiver.short_atput0(interp.space, n0, w_value) - at expose_primitive(FILL, unwrap_spec=[object, pos_32bit_int]) + at expose_primitive(FILL, unwrap_spec=[object, pos_1word_int]) def func(interp, s_frame, w_arg, new_value): space = interp.space if isinstance(w_arg, model.W_BytesObject): From noreply at buildbot.pypy.org Thu Jan 16 15:04:52 2014 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 16 Jan 2014 15:04:52 +0100 (CET) Subject: [pypy-commit] lang-smalltalk 64bit: start adding support for running the hosted metainterpreter for the jittests Message-ID: <20140116140452.E99C11C35EC@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: 64bit Changeset: r575:58f9d9fb67d3 Date: 2014-01-12 02:55 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/58f9d9fb67d3/ Log: start adding support for running the hosted metainterpreter for the jittests diff --git a/spyvm/test/jittest/base.py b/spyvm/test/jittest/base.py --- a/spyvm/test/jittest/base.py +++ b/spyvm/test/jittest/base.py @@ -1,13 +1,43 @@ import subprocess import os +import sys # TODO: from pypy.tool.jitlogparser.parser import SimpleParser, Op from pypy.tool.jitlogparser.storage import LoopStorage +from rpython.jit.metainterp.test.support import LLJitMixin from rpython.jit.metainterp.resoperation import opname from rpython.jit.tool import oparser from rpython.tool import logparser +from rpython import conftest + +class o: + view = False + viewloops = True +conftest.option = o + + +sys.setrecursionlimit(5000) +# expose the bytecode's values as global constants. +# Bytecodes that have a whole range are exposed as global functions: +# call them with an argument 'n' to get the bytecode number 'base + n'. +# XXX hackish +def setup(): + from spyvm import interpreter + def make_getter(entry): + def get_opcode_chr(n): + opcode = entry[0] + n + assert entry[0] <= opcode <= entry[1] + return chr(opcode) + return get_opcode_chr + for entry in interpreter.BYTECODE_RANGES: + name = entry[-1] + if len(entry) == 2: # no range + globals()[name] = chr(entry[0]) + else: + globals()[name] = make_getter(entry) +setup() BasePath = os.path.abspath( @@ -19,10 +49,17 @@ ) BenchmarkImage = os.path.join(os.path.dirname(__file__), "benchmark.image") -class BaseJITTest(object): +class BaseJITTest(LLJitMixin): def run(self, spy, tmpdir, code): + code = code.replace("\n", "\r\n") + if spy: + return self.run_binary(spy, tmpdir, code) + else: + return self.run_simulated(tmpdir, code) + + def run_binary(self, spy, tmpdir, code): proc = subprocess.Popen( - [str(spy), "-r", code.replace("\n", "\r\n"), BenchmarkImage], + [str(spy), "-r", code, BenchmarkImage], cwd=str(tmpdir), env={"PYPYLOG": "jit-log-opt:%s" % tmpdir.join("x.pypylog")} ) @@ -41,6 +78,68 @@ traces_w[len(traces_w) - 1].addbridge(trace) return traces_w + def run_simulated(self, tmpdir, code): + import targetimageloadingsmalltalk + + info = {"interpreter": None, "selector": None} + + old_run_code = targetimageloadingsmalltalk._run_code + def new_run_code(interp, code, as_benchmark=False): + info["interpreter"] = interp + return old_run_code(interp, code, as_benchmark=as_benchmark, raise_selector=True) + targetimageloadingsmalltalk._run_code = new_run_code + + try: + targetimageloadingsmalltalk.entry_point( + [str(tmpdir), "-r", code, BenchmarkImage] + ) + except targetimageloadingsmalltalk.SelectorNotification as e: + info["selector"] = e.selector + + interp = info["interpreter"] + selector = info["selector"] + def interpret(): + return interp.perform(interp.space.wrap_int(0), selector) + + # XXX custom fishing, depends on the exact env var and format + logfile = tmpdir.join("x.pypylog") + os.environ['PYPYLOG'] = "jit-log-opt:%s" % logfile + self.meta_interp(interpret, [], listcomp=True, listops=True, backendopt=True, inline=True) + + from rpython.jit.metainterp.warmspot import get_stats + import re + loops = get_stats().get_all_loops() + logstr = "[bed8a96917a] {jit-log-opt-loop\n" + logstr += "# Loop 0 (exp: eval) : entry bridge with %d ops\n" % len(loops[len(loops) -1].operations) + logstr += "[p0, p1]\n" + counter = 1 + for op in loops[len(loops) -1].operations: + counter += 1 + op = str(op) + match = re.match("[a-zA-Z0-9]+\.[a-zA-Z0-9]+:\d+", op) + if match: + op = op[0:match.span()[1]].strip() + if op.startswith("i"): + op = "+%d: %s" % (counter, op) + logstr += op + logstr += "\n" + logfile.write(logstr + "[bed8a999a87] jit-log-opt-loop}\n") + + import pdb; pdb.set_trace() + data = logparser.parse_log_file(str(tmpdir.join("x.pypylog")), verbose=False) + data = logparser.extract_category(data, "jit-log-opt-") + + storage = LoopStorage() + traces = [SimpleParser.parse_from_input(t) for t in data] + main_loops = storage.reconnect_loops(traces) + traces_w = [] + for trace in traces: + if trace in main_loops: + traces_w.append(Trace(trace)) + else: + traces_w[len(traces_w) - 1].addbridge(trace) + return traces_w + def assert_matches(self, trace, expected): expected_lines = [ line.strip() diff --git a/spyvm/test/jittest/conftest.py b/spyvm/test/jittest/conftest.py --- a/spyvm/test/jittest/conftest.py +++ b/spyvm/test/jittest/conftest.py @@ -12,4 +12,7 @@ def pytest_funcarg__spy(request): - return str(py.path.local(request.config.getvalueorskip("spy"))) + if request.config.getvalue("spy"): + return str(py.path.local(request.config.getvalueorskip("spy"))) + else: + return None diff --git a/spyvm/test/jittest/test_basic.py b/spyvm/test/jittest/test_basic.py --- a/spyvm/test/jittest/test_basic.py +++ b/spyvm/test/jittest/test_basic.py @@ -4,9 +4,15 @@ class TestBasic(BaseJITTest): + def test_empty(self, spy, tmpdir): + traces = self.run(spy, tmpdir, """ + ^ self + """) + assert True + def test_while_loop(self, spy, tmpdir): traces = self.run(spy, tmpdir, """ - 0 to: 1000000000 do: [:t|nil]. + 0 to: 100 do: [:t|nil]. """) self.assert_matches(traces[0].loop, """ guard_not_invalidated(descr=) @@ -130,3 +136,35 @@ guard_false(i78, descr=), jump(p0, p3, i74, p8, p10, p12, p14, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, p40, p42, p44, p46, i77, descr=TargetToken(157713840)) """) + + def test_bitInvert32(self, spy, tmpdir): + traces = self.run(spy, tmpdir, """ + | srcWord dstWord | + srcWord := 16rCAFFEE. + dstWord := 16r987654. + 1 to: 1000000 do: [:t| + srcWord := srcWord bitInvert32. + ]. + """) + self.assert_matches(traces[0].loop, """ + guard_not_invalidated(descr=), + i90 = int_le(i79, 1000000), + guard_true(i90, descr=), + setfield_gc(ConstPtr(ptr60), i67, descr=), + i91 = int_ge(i73, 0), + guard_true(i91, descr=), + i92 = int_xor(i73, i72), + i93 = int_sub(i92, -4611686018427387904), + i94 = uint_lt(i93, -9223372036854775808), + guard_true(i94, descr=), + i95 = int_add(i79, 1), + i96 = int_sub(i95, -4611686018427387904), + setfield_gc(ConstPtr(ptr60), i63, descr=), + i97 = uint_lt(i96, -9223372036854775808), + guard_true(i97, descr=), + i98 = int_sub(i87, 3), + setfield_gc(ConstPtr(ptr60), i98, descr=), + i99 = int_le(i98, 0), + guard_false(i99, descr=), + jump(p0, p3, i92, p8, i95, p16, p18, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, p40, p42, i67, i52, i72, i63, i98, descr=TargetToken(44203440)) + """) diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py old mode 100644 new mode 100755 --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python import sys, time import os @@ -60,7 +61,12 @@ except error.Exit, e: print e.msg -def _run_code(interp, code, as_benchmark=False): + +class SelectorNotification(Exception): + def __init__(self, sel): + self.selector = sel + +def _run_code(interp, code, as_benchmark=False, raise_selector=False): import time selector = "codeTest%d" % int(time.time()) try: @@ -78,6 +84,9 @@ print e.msg return 1 + if raise_selector: + raise SelectorNotification(selector) + if not as_benchmark: try: w_result = interp.perform(space.wrap_int(0), selector) From noreply at buildbot.pypy.org Thu Jan 16 15:04:53 2014 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 16 Jan 2014 15:04:53 +0100 (CET) Subject: [pypy-commit] lang-smalltalk 64bit: show loops, only start metainterp at test method, not with "asSymbol" of the selector Message-ID: <20140116140453.EE4601C35EC@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: 64bit Changeset: r576:3187da37d491 Date: 2014-01-13 10:35 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/3187da37d491/ Log: show loops, only start metainterp at test method, not with "asSymbol" of the selector diff --git a/spyvm/test/jittest/base.py b/spyvm/test/jittest/base.py --- a/spyvm/test/jittest/base.py +++ b/spyvm/test/jittest/base.py @@ -2,20 +2,18 @@ import os import sys -# TODO: +from rpython import conftest +class o: + view = False + viewloops = True +conftest.option = o + from pypy.tool.jitlogparser.parser import SimpleParser, Op from pypy.tool.jitlogparser.storage import LoopStorage - from rpython.jit.metainterp.test.support import LLJitMixin from rpython.jit.metainterp.resoperation import opname from rpython.jit.tool import oparser from rpython.tool import logparser -from rpython import conftest - -class o: - view = False - viewloops = True -conftest.option = o sys.setrecursionlimit(5000) @@ -98,47 +96,15 @@ interp = info["interpreter"] selector = info["selector"] + w_selector = interp.perform(interp.space.wrap_string(selector), + "asSymbol") def interpret(): - return interp.perform(interp.space.wrap_int(0), selector) + return interp.perform(interp.space.wrap_int(0), w_selector) - # XXX custom fishing, depends on the exact env var and format - logfile = tmpdir.join("x.pypylog") - os.environ['PYPYLOG'] = "jit-log-opt:%s" % logfile self.meta_interp(interpret, [], listcomp=True, listops=True, backendopt=True, inline=True) - from rpython.jit.metainterp.warmspot import get_stats - import re loops = get_stats().get_all_loops() - logstr = "[bed8a96917a] {jit-log-opt-loop\n" - logstr += "# Loop 0 (exp: eval) : entry bridge with %d ops\n" % len(loops[len(loops) -1].operations) - logstr += "[p0, p1]\n" - counter = 1 - for op in loops[len(loops) -1].operations: - counter += 1 - op = str(op) - match = re.match("[a-zA-Z0-9]+\.[a-zA-Z0-9]+:\d+", op) - if match: - op = op[0:match.span()[1]].strip() - if op.startswith("i"): - op = "+%d: %s" % (counter, op) - logstr += op - logstr += "\n" - logfile.write(logstr + "[bed8a999a87] jit-log-opt-loop}\n") - - import pdb; pdb.set_trace() - data = logparser.parse_log_file(str(tmpdir.join("x.pypylog")), verbose=False) - data = logparser.extract_category(data, "jit-log-opt-") - - storage = LoopStorage() - traces = [SimpleParser.parse_from_input(t) for t in data] - main_loops = storage.reconnect_loops(traces) - traces_w = [] - for trace in traces: - if trace in main_loops: - traces_w.append(Trace(trace)) - else: - traces_w[len(traces_w) - 1].addbridge(trace) - return traces_w + return [] def assert_matches(self, trace, expected): expected_lines = [ From noreply at buildbot.pypy.org Thu Jan 16 15:04:54 2014 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 16 Jan 2014 15:04:54 +0100 (CET) Subject: [pypy-commit] lang-smalltalk 64bit: fix running jittests Message-ID: <20140116140454.F24D71C35EC@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: 64bit Changeset: r577:9b3497891e83 Date: 2014-01-13 11:07 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/9b3497891e83/ Log: fix running jittests diff --git a/spyvm/test/jittest/base.py b/spyvm/test/jittest/base.py --- a/spyvm/test/jittest/base.py +++ b/spyvm/test/jittest/base.py @@ -35,7 +35,6 @@ globals()[name] = chr(entry[0]) else: globals()[name] = make_getter(entry) -setup() BasePath = os.path.abspath( @@ -77,6 +76,7 @@ return traces_w def run_simulated(self, tmpdir, code): + setup() import targetimageloadingsmalltalk info = {"interpreter": None, "selector": None} diff --git a/spyvm/test/jittest/test_basic.py b/spyvm/test/jittest/test_basic.py --- a/spyvm/test/jittest/test_basic.py +++ b/spyvm/test/jittest/test_basic.py @@ -4,15 +4,9 @@ class TestBasic(BaseJITTest): - def test_empty(self, spy, tmpdir): - traces = self.run(spy, tmpdir, """ - ^ self - """) - assert True - def test_while_loop(self, spy, tmpdir): traces = self.run(spy, tmpdir, """ - 0 to: 100 do: [:t|nil]. + 0 to: 10000000 do: [:t|nil]. """) self.assert_matches(traces[0].loop, """ guard_not_invalidated(descr=) @@ -141,7 +135,6 @@ traces = self.run(spy, tmpdir, """ | srcWord dstWord | srcWord := 16rCAFFEE. - dstWord := 16r987654. 1 to: 1000000 do: [:t| srcWord := srcWord bitInvert32. ]. @@ -168,3 +161,31 @@ guard_false(i99, descr=), jump(p0, p3, i92, p8, i95, p16, p18, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, p40, p42, i67, i52, i72, i63, i98, descr=TargetToken(44203440)) """) + + def test_bitXor(self, spy, tmpdir): + traces = self.run(spy, tmpdir, """ + | srcWord dstWord | + srcWord := 16rCAFFEE. + dstWord := 16r987654. + 1 to: 1000000 do: [:t| + srcWord := srcWord bitXor: dstWord. + ]. + """) + self.assert_matches(traces[0].loop, """ + guard_not_invalidated(descr=), + i82 = int_le(i72, 1000000), + guard_true(i82, descr=), + i83 = int_xor(i66, i63), + i84 = int_sub(i83, -4611686018427387904), + i85 = uint_lt(i84, -9223372036854775808), + guard_true(i85, descr=), + i86 = int_add(i72, 1), + i87 = int_sub(i86, -4611686018427387904), + i88 = uint_lt(i87, -9223372036854775808), + guard_true(i88, descr=), + i90 = int_sub(i79, 1), + setfield_gc(ConstPtr(ptr57), i90, descr=), + i91 = int_le(i90, 0), + guard_false(i91, descr=), + jump(p0, p3, i83, p8, i86, p16, p18, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, p40, p42, i63, i90, descr=TargetToken(41172136)) + """) From noreply at buildbot.pypy.org Thu Jan 16 15:04:56 2014 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 16 Jan 2014 15:04:56 +0100 (CET) Subject: [pypy-commit] lang-smalltalk 64bit: use untagged integers wherever possible, fix most tests Message-ID: <20140116140456.13CE11C35EC@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: 64bit Changeset: r578:1246e0869fc3 Date: 2014-01-13 17:12 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/1246e0869fc3/ Log: use untagged integers wherever possible, fix most tests diff --git a/spyvm/constants.py b/spyvm/constants.py --- a/spyvm/constants.py +++ b/spyvm/constants.py @@ -1,3 +1,4 @@ +import sys from rpython.rlib.jit import elidable from spyvm.tool.bitmanipulation import splitter @@ -145,9 +146,9 @@ from rpython.rlib.rarithmetic import LONG_BIT TAGGED_MAXINT = 2 ** (LONG_BIT - 2) - 1 TAGGED_MININT = -2 ** (LONG_BIT - 2) - TAGGED_MASK = int(2 ** (LONG_BIT - 1) - 1) - +MAXINT = sys.maxint +MININT = -sys.maxint-1 # Entries into SO_SPECIAL_SELECTORS_ARRAY: #(#+ 1 #- 1 #< 1 #> 1 #<= 1 #>= 1 #= 1 #~= 1 #* 1 #/ 1 #\\ 1 #@ 1 #bitShift: 1 #// 1 #bitAnd: 1 #bitOr: 1 #at: 1 #at:put: 2 #size 0 #next 0 #nextPut: 1 #atEnd 0 #== 1 #class 0 #blockCopy: 1 #value 0 #value: 1 #do: 1 #new 0 #new: 1 #x 0 #y 0) diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -43,7 +43,6 @@ self.space = space self.image = image self.image_name = image_name - self.startup_time = time.time() self.max_stack_depth = max_stack_depth self.remaining_stack_depth = max_stack_depth self._loop = False @@ -204,7 +203,7 @@ def time_now(self): import time from rpython.rlib.rarithmetic import intmask - return intmask(int((time.time() - self.startup_time) * 1000) & constants.TAGGED_MASK) + return intmask(int(time.time() * 1000) - self.image.startup_time) def padding(self, symbol=' '): return symbol * (self.max_stack_depth - self.remaining_stack_depth) diff --git a/spyvm/iproxy.py b/spyvm/iproxy.py new file mode 100644 --- /dev/null +++ b/spyvm/iproxy.py @@ -0,0 +1,11 @@ +from spyvm.system import IS_64BIT + +if not IS_64BIT: + from spyvm import interpreter_proxy + IProxy = interpreter_proxy._InterpreterProxy() +else: + from spyvm.error import PrimitiveFailedError + class _InterpreterProxy(): + def call(self, signature, interp, s_frame, argcount, s_method): + raise PrimitiveFailedError + IProxy = _InterpreterProxy() diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -188,8 +188,9 @@ def unwrap_uint(self, space): val = self.value - if val < 0: - raise error.UnwrappingError("got negative integer") + # if val < 0: + # raise error.UnwrappingError("got negative integer") + # XXX: Assume that caller knows what he does return r_uint(val) @@ -399,7 +400,7 @@ else: # bounds-check for primitive access is done in the primitive assert n0 == 1 - return space.wrap_positive_1word_int(intmask(r)) + return space.wrap_positive_1word_int(intmask(r_uint32(r))) def store(self, space, n0, w_obj): from rpython.rlib.rstruct.ieee import float_unpack, float_pack @@ -756,14 +757,21 @@ byte0 = ord(self.getchar(byte_index0)) byte1 = ord(self.getchar(byte_index0 + 1)) << 8 if byte1 & 0x8000 != 0: - byte1 = intmask(-65536 | byte1) # -65536 = 0xffff0000 + byte1 = intmask(intmask(r_uint32(0xffff0000)) | intmask(r_uint32(byte1))) return space.wrap_int(byte1 | byte0) def short_atput0(self, space, index0, w_value): from rpython.rlib.rarithmetic import int_between i_value = space.unwrap_int(w_value) - if not int_between(-32768, i_value, 0x8000): - raise error.PrimitiveFailedError + if constants.LONG_BIT == 64: + if (not int_between(0, i_value, 0x8000) and + not int_between(0, i_value ^ (0xffffffff), 0x8000)): + raise error.PrimitiveFailedError + elif constants.LONG_BIT == 32: + if not int_between(-0x8000, i_value, 0x8000): + raise error.PrimitiveFailedError + else: + raise NotImplementedError byte_index0 = index0 * 2 byte0 = i_value & 0xff byte1 = (i_value & 0xff00) >> 8 @@ -894,20 +902,27 @@ else: short = (word >> 16) & 0xffff if short & 0x8000 != 0: - short = -65536 | short # -65536 = 0xffff0000 - return space.wrap_int(intmask(short)) + short = intmask(r_uint32(0xffff0000)) | short + return space.wrap_int(intmask(r_uint32(short))) def short_atput0(self, space, index0, w_value): - from rpython.rlib.rarithmetic import int_between + from rpython.rlib.rarithmetic import int_between, widen i_value = space.unwrap_int(w_value) - if not int_between(-32768, i_value, 0x8000): - raise error.PrimitiveFailedError + if constants.LONG_BIT == 64: + if (not int_between(0, i_value, 0x8000) and + not int_between(0, i_value ^ (0xffffffff), 0x8000)): + raise error.PrimitiveFailedError + elif constants.LONG_BIT == 32: + if not int_between(-0x8000, i_value, 0x8000): + raise error.PrimitiveFailedError + else: + raise NotImplementedError word_index0 = index0 / 2 - word = intmask(self.getword(word_index0)) + word = intmask(r_uint32(self.getword(word_index0))) if index0 % 2 == 0: - word = (word & -65536) | (i_value & 0xffff) # -65536 = 0xffff0000 + word = intmask(r_uint32((word & widen(r_uint32(0xffff0000))) | (i_value & 0xffff))) else: - word = (i_value << 16) | (word & 0xffff) + word = intmask(r_uint32((i_value << 16) | (word & 0xffff))) value = r_uint(word) self.setword(word_index0, value) @@ -938,10 +953,10 @@ if self.words is None: return self.c_words else: - from spyvm.iproxy import sqIntArrayPtr + from spyvm.iproxy import IProxy size = self.size() old_words = self.words - c_words = self.c_words = lltype.malloc(sqIntArrayPtr.TO, size, flavor='raw') + c_words = self.c_words = lltype.malloc(IProxy.sqIntArrayPtr.TO, size, flavor='raw') for i in range(size): c_words[i] = intmask(old_words[i]) self.words = None diff --git a/spyvm/objspace.py b/spyvm/objspace.py --- a/spyvm/objspace.py +++ b/spyvm/objspace.py @@ -201,45 +201,23 @@ def wrap_int(self, val): from spyvm import constants assert isinstance(val, int) - if int_between(constants.TAGGED_MININT, val, - constants.TAGGED_MAXINT + 1): - return model.W_SmallInteger(val) - # We can't build large integers here, because we don't know what to do - # with negativ vals: raise an error or interpret them as 4-byte positive? - raise WrappingError("integer too large to fit into a tagged pointer") + # We don't use tagging + return model.W_SmallInteger(val) def wrap_uint(self, val): from rpython.rlib.objectmodel import we_are_translated if not we_are_translated(): - assert val <= 0xFFFFFFFF + assert val <= (2**constants.LONG_BIT) - 1 if val < 0: raise WrappingError("negative integer") - if val >= 0: - try: - return self.wrap_positive_1word_int(intmask(val)) - except WrappingError: - pass - # XXX this code sucks - import math - bytes_len = int(math.log(val) / math.log(0xff)) + 1 - if bytes_len <= 4: + else: return self.wrap_positive_1word_int(intmask(val)) - else: - return self._wrap_uint_loop(val, bytes_len) - - @jit.unroll_safe - def _wrap_uint_loop(self, val, bytes_len): - w_result = model.W_BytesObject(self, - self.classtable['w_LargePositiveInteger'], bytes_len) - for i in range(bytes_len): - w_result.setchar(i, chr(intmask((val >> i*8) & 255))) - return w_result def wrap_positive_1word_int(self, val): # This will always return a positive value. # XXX: For now, we assume that val is at most 1word, i.e. overflows are - # checked for before wrapping. - if int_between(0, val, constants.TAGGED_MAXINT + 1): + # checked for before wrapping. Also, we ignore tagging. + if int_between(0, val, constants.MAXINT): return model.W_SmallInteger(val) else: return model.W_LargePositiveInteger1Word(val) diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -471,7 +471,9 @@ s_class = w_cls.as_class_get_shadow(interp.space) if s_class.isvariable(): raise PrimitiveFailedError() - return s_class.new() + w_inst = s_class.new() + s_frame.store_instance(w_cls, w_inst) + return w_inst @expose_primitive(NEW_WITH_ARG, unwrap_spec=[object, int]) def func(interp, s_frame, w_cls, size): @@ -480,9 +482,11 @@ if not s_class.isvariable() and size != 0: raise PrimitiveFailedError() try: - return s_class.new(size) + w_inst = s_class.new(size) except MemoryError: raise PrimitiveFailedError + s_frame.store_instance(w_cls, w_inst) + return w_inst @expose_primitive(ARRAY_BECOME_ONE_WAY, unwrap_spec=[object, object]) def func(interp, s_frame, w_obj1, w_obj2): @@ -538,7 +542,7 @@ match_w = [] from rpython.rlib import rgc - if USES_STM: + if not USES_STM: roots = [gcref for gcref in rgc.get_rpy_roots() if gcref] pending = roots[:] while pending: @@ -887,9 +891,9 @@ elif signature[0] == "VMDebugging": from spyvm.plugins.vmdebugging import DebuggingPlugin return DebuggingPlugin.call(signature[1], interp, s_frame, argcount, s_method) - else: - from spyvm.iproxy import IProxy - return IProxy.call(signature, interp, s_frame, argcount, s_method) + # else: + # from spyvm.iproxy import IProxy + # return IProxy.call(signature, interp, s_frame, argcount, s_method) raise PrimitiveFailedError @expose_primitive(COMPILED_METHOD_FLUSH_CACHE, unwrap_spec=[object]) diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -346,7 +346,7 @@ assert not isinstance(w_selector, str) self.initialize_methoddict() s_method = w_method.as_compiledmethod_get_shadow(self.space) - self.s_methoddict().methoddict[w_selector] = s_method + self.s_methoddict().methoddict[w_selector.as_string()] = s_method if isinstance(w_method, model.W_CompiledMethod): s_method.w_compiledin = self.w_self() @@ -460,18 +460,19 @@ __metaclass__ = extendabletype _attrs_ = ['_s_sender', '_pc', '_temps_and_stack', - '_stack_ptr', 'instances_w'] - + '_stack_ptr', 'instances_w', '_stores_instances'] _virtualizable_ = [ "_s_sender", "_pc", "_temps_and_stack[*]", "_stack_ptr", - "_w_self", "_w_self_size" + "_w_self", "_w_self_size", + "_stores_instances" ] def __init__(self, space, w_self): self._s_sender = None AbstractRedirectingShadow.__init__(self, space, w_self) self.instances_w = {} + self._stores_instances = False @staticmethod def is_block_context(w_pointers, space): @@ -728,13 +729,22 @@ self._w_self_size = w_self.size() return w_self + def store_instance(self, w_class, w_obj): + if not jit.promote(self._stores_instances): + return + else: + instances_w = self.instances_w.get(w_class, None) + if instances_w is not None: + instances_w.append(w_obj) + def store_instances_array(self, w_class, match_w): # used for primitives 77 & 78 + self._stores_instances = True self.instances_w[w_class] = match_w - @jit.elidable def instances_array(self, w_class): - return self.instances_w.get(w_class, None) + if jit.promote(self._stores_instances): + return self.instances_w.get(w_class, None) # ______________________________________________________________________ # Debugging printout diff --git a/spyvm/squeakimage.py b/spyvm/squeakimage.py --- a/spyvm/squeakimage.py +++ b/spyvm/squeakimage.py @@ -361,8 +361,12 @@ # ____________________________________________________________ class SqueakImage(object): + _immutable_fields_ = ["special_objects", "w_asSymbol", + "w_simulateCopyBits", "version", "is_modern", + "startup_time"] def from_reader(self, space, reader): + import time from spyvm import constants self.special_objects = [g_object.w_object for g_object in reader.chunks[reader.specialobjectspointer] @@ -376,6 +380,7 @@ self.lastWindowSize = reader.lastWindowSize self.version = reader.version self.is_modern = reader.version.magic > 6502 + self.startup_time = int(time.time() * 1000) self.run_spy_hacks(space) def run_spy_hacks(self, space): diff --git a/spyvm/test/jittest/base.py b/spyvm/test/jittest/base.py --- a/spyvm/test/jittest/base.py +++ b/spyvm/test/jittest/base.py @@ -55,10 +55,12 @@ return self.run_simulated(tmpdir, code) def run_binary(self, spy, tmpdir, code): + print code proc = subprocess.Popen( [str(spy), "-r", code, BenchmarkImage], cwd=str(tmpdir), - env={"PYPYLOG": "jit-log-opt:%s" % tmpdir.join("x.pypylog")} + env={"PYPYLOG": "jit-log-opt:%s" % tmpdir.join("x.pypylog"), + "SDL_VIDEODRIVER": "dummy"} ) proc.wait() data = logparser.parse_log_file(str(tmpdir.join("x.pypylog")), verbose=False) diff --git a/spyvm/test/jittest/test_basic.py b/spyvm/test/jittest/test_basic.py --- a/spyvm/test/jittest/test_basic.py +++ b/spyvm/test/jittest/test_basic.py @@ -9,43 +9,36 @@ 0 to: 10000000 do: [:t|nil]. """) self.assert_matches(traces[0].loop, """ - guard_not_invalidated(descr=) - i60 = int_le(i49, 10000) - guard_true(i60, descr=) - i61 = int_add(i49, 1) - i62 = int_sub(i61, -1073741824) - i63 = uint_lt(i62, -2147483648) - guard_true(i63, descr=) - i64 = int_sub(i57, 1) - setfield_gc(ConstPtr(ptr54), i64, descr=) - i65 = int_le(i64, 0) - guard_false(i65, descr=) - jump(p0, p3, i61, p12, p14, p16, p18, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, i64, descr=TargetToken(169145008)) + guard_not_invalidated(descr=), + i57 = int_le(i50, 10000000), + guard_true(i57, descr=), + i58 = int_add(i50, 1), + i59 = int_sub(i54, 1), + setfield_gc(ConstPtr(ptr51), i59, descr=), + i60 = int_le(i59, 0), + guard_false(i60, descr=), + jump(p0, i3, p4, i58, p13, p15, p17, p19, p21, p23, p25, p27, p29, p31, p33, p35, p37, p39, i59, descr=TargetToken(27868504)) """) self.assert_matches(traces[0].bridges[0], """ - f18 = call(ConstClass(ll_time.ll_time_time), descr=) - setfield_gc(ConstPtr(ptr19), 10000, descr=) - guard_no_exception(descr=) - f22 = float_sub(f18, 1387380038.806162) - f24 = float_mul(f22, 1000.000000) - i25 = cast_float_to_int(f24) - i27 = int_and(i25, 2147483647) - i28 = getfield_gc(ConstPtr(ptr19), descr=) - i29 = int_is_zero(i28) - guard_true(i29, descr=) - label(p0, p1, i16, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13, p14, p15, descr=TargetToken(158475216)) - guard_class(p0, ConstClass(MethodContextShadow), descr=) - p31 = getfield_gc(p0, descr=) - guard_value(p31, ConstPtr(ptr32), descr=) - guard_not_invalidated(descr=) - i34 = int_le(i16, 1000000000) - guard_true(i34, descr=) - i36 = int_add(i16, 1) - i38 = int_sub(i36, -1073741824) - i40 = uint_lt(i38, -2147483648) - guard_true(i40, descr=) - setfield_gc(ConstPtr(ptr19), 9999, descr=) - jump(p0, p1, i36, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13, p14, p15, 9999, descr=TargetToken(158474976)) + f19 = call(ConstClass(ll_time.ll_time_time), descr=), + setfield_gc(ConstPtr(ptr20), 10000, descr=), + guard_no_exception(descr=), + f23 = float_mul(f19, 1000.000000), + i24 = cast_float_to_int(f23), + i26 = int_sub(i24, 1389627640615), + i27 = getfield_gc(ConstPtr(ptr20), descr=), + i28 = int_is_zero(i27), + guard_true(i28, descr=), + label(p0, i1, p2, i17, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13, p14, p15, p16, descr=TargetToken(55082592)), + guard_class(p0, ConstClass(MethodContextShadow), descr=), + p30 = getfield_gc(p0, descr=), + guard_value(p30, ConstPtr(ptr31), descr=), + guard_not_invalidated(descr=), + i33 = int_le(i17, 10000000), + guard_true(i33, descr=), + i35 = int_add(i17, 1), + setfield_gc(ConstPtr(ptr20), 9999, descr=), + jump(p0, i1, p2, i35, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13, p14, p15, p16, 9999, descr=TargetToken(55082328)) """) def test_constant_string(self, spy, tmpdir): @@ -56,18 +49,15 @@ ^ i """) self.assert_matches(traces[0].loop, """ - guard_not_invalidated(descr=) - i68 = int_le(i58, 10000) - guard_true(i68, descr=) - i69 = int_add(i58, 1) - i70 = int_sub(i69, -1073741824) - i71 = uint_lt(i70, -2147483648) - guard_true(i71, descr=) - i72 = int_sub(i65, 1) - setfield_gc(ConstPtr(ptr55), i72, descr=) - i73 = int_le(i72, 0) - guard_false(i73, descr=) - jump(p0, p3, i69, p12, p14, p16, p18, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, i72, descr=TargetToken(153187472)) + guard_not_invalidated(descr=), + i65 = int_le(i59, 10000), + guard_true(i65, descr=), + i66 = int_add(i59, 1), + i67 = int_sub(i62, 1), + setfield_gc(ConstPtr(ptr56), i67, descr=), + i68 = int_le(i67, 0), + guard_false(i68, descr=), + jump(p0, i3, p4, i66, p13, p15, p17, p19, p21, p23, p25, p27, p29, p31, p33, p35, p37, p39, i67, descr=TargetToken(48782592)) """) def test_constant_string_equal2(self, spy, tmpdir): @@ -83,21 +73,18 @@ ^ i """) self.assert_matches(traces[0].loop, """ - guard_not_invalidated(descr=), - i76 = int_le(i65, 100000), - guard_true(i76, descr=), - i77 = int_add(i65, 1), - i78 = int_sub(i77, -1073741824), - i79 = uint_lt(i78, -2147483648), - guard_true(i79, descr=), - i80 = int_sub(i73, 2), - setfield_gc(ConstPtr(ptr70), i80, descr=), - i81 = int_le(i80, 0), - guard_false(i81, descr=), - i83 = arraylen_gc(p49, descr=), - i84 = arraylen_gc(p53, descr=), - i85 = arraylen_gc(p57, descr=), - jump(p0, p3, i77, p12, p14, p16, p18, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, i80, p49, p53, p57, descr=TargetToken(163738864)) + guard_not_invalidated(descr=), + i73 = int_le(i66, 100000), + guard_true(i73, descr=), + i74 = int_add(i66, 1), + i75 = int_sub(i70, 2), + setfield_gc(ConstPtr(ptr67), i75, descr=), + i76 = int_le(i75, 0), + guard_false(i76, descr=), + i78 = arraylen_gc(p50, descr=), + i79 = arraylen_gc(p54, descr=), + i80 = arraylen_gc(p58, descr=), + jump(p0, i3, p4, i74, p13, p15, p17, p19, p21, p23, p25, p27, p29, p31, p33, p35, p37, p39, i75, p50, p54, p58, descr=TargetToken(40565840)) """) def test_constant_string_var_equal(self, spy, tmpdir): @@ -117,18 +104,15 @@ ^ i """) self.assert_matches(traces[0].loop, """ - guard_not_invalidated(descr=), - i73 = int_le(i62, 100000), - guard_true(i73, descr=), - i74 = int_add(i62, 1), - i75 = int_sub(i74, -1073741824), - i76 = uint_lt(i75, -2147483648), - guard_true(i76, descr=), - i77 = int_sub(i70, 1), - setfield_gc(ConstPtr(ptr67), i77, descr=), - i78 = int_le(i77, 0), - guard_false(i78, descr=), - jump(p0, p3, i74, p8, p10, p12, p14, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, p40, p42, p44, p46, i77, descr=TargetToken(157713840)) + guard_not_invalidated(descr=), + i70 = int_le(i63, 100000), + guard_true(i70, descr=), + i71 = int_add(i63, 1), + i72 = int_sub(i67, 1), + setfield_gc(ConstPtr(ptr64), i72, descr=), + i73 = int_le(i72, 0), + guard_false(i73, descr=), + jump(p0, i3, p4, i71, p9, p11, p13, p15, p21, p23, p25, p27, p29, p31, p33, p35, p37, p39, p41, p43, p45, p47, i72, descr=TargetToken(52952232)) """) def test_bitInvert32(self, spy, tmpdir): @@ -140,26 +124,20 @@ ]. """) self.assert_matches(traces[0].loop, """ - guard_not_invalidated(descr=), - i90 = int_le(i79, 1000000), - guard_true(i90, descr=), - setfield_gc(ConstPtr(ptr60), i67, descr=), - i91 = int_ge(i73, 0), - guard_true(i91, descr=), - i92 = int_xor(i73, i72), - i93 = int_sub(i92, -4611686018427387904), - i94 = uint_lt(i93, -9223372036854775808), - guard_true(i94, descr=), - i95 = int_add(i79, 1), - i96 = int_sub(i95, -4611686018427387904), - setfield_gc(ConstPtr(ptr60), i63, descr=), - i97 = uint_lt(i96, -9223372036854775808), - guard_true(i97, descr=), - i98 = int_sub(i87, 3), - setfield_gc(ConstPtr(ptr60), i98, descr=), - i99 = int_le(i98, 0), - guard_false(i99, descr=), - jump(p0, p3, i92, p8, i95, p16, p18, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, p40, p42, i67, i52, i72, i63, i98, descr=TargetToken(44203440)) + guard_not_invalidated(descr=), + i83 = int_le(i76, 1000000), + guard_true(i83, descr=), + setfield_gc(ConstPtr(ptr61), i68, descr=), + i84 = int_ge(i74, 0), + guard_true(i84, descr=), + i85 = int_xor(i74, i73), + i86 = int_add(i76, 1), + i87 = int_sub(i79, 3), + setfield_gc(ConstPtr(ptr61), i64, descr=), + setfield_gc(ConstPtr(ptr61), i87, descr=), + i88 = int_le(i87, 0), + guard_false(i88, descr=), + jump(p0, i3, p4, i85, p9, i86, p17, p19, p21, p23, p25, p27, p29, p31, p33, p35, p37, p39, p41, p43, i68, i53, i73, i87, i64, descr=TargetToken(43310512)) """) def test_bitXor(self, spy, tmpdir): @@ -172,20 +150,41 @@ ]. """) self.assert_matches(traces[0].loop, """ - guard_not_invalidated(descr=), - i82 = int_le(i72, 1000000), - guard_true(i82, descr=), - i83 = int_xor(i66, i63), - i84 = int_sub(i83, -4611686018427387904), - i85 = uint_lt(i84, -9223372036854775808), - guard_true(i85, descr=), - i86 = int_add(i72, 1), - i87 = int_sub(i86, -4611686018427387904), - i88 = uint_lt(i87, -9223372036854775808), - guard_true(i88, descr=), - i90 = int_sub(i79, 1), - setfield_gc(ConstPtr(ptr57), i90, descr=), - i91 = int_le(i90, 0), - guard_false(i91, descr=), - jump(p0, p3, i83, p8, i86, p16, p18, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, p40, p42, i63, i90, descr=TargetToken(41172136)) + guard_not_invalidated(descr=), + i75 = int_le(i69, 1000000), + guard_true(i75, descr=), + i76 = int_xor(i67, i64), + i77 = int_add(i69, 1), + i78 = int_sub(i72, 1), + setfield_gc(ConstPtr(ptr58), i78, descr=), + i79 = int_le(i78, 0), + guard_false(i79, descr=), + jump(p0, i3, p4, i76, p9, i77, p17, p19, p21, p23, p25, p27, p29, p31, p33, p35, p37, p39, p41, p43, i64, i78, descr=TargetToken(46857208)) """) + + def test_DisplayFlash(self, spy, tmpdir): + traces = self.run(spy, tmpdir, """ + | path | + Display + setExtent: 200 at 200; + beDisplay; + fillWhite. + path := OrderedCollection new: 16. + #(40 115 190 265) do: [:y | + #(60 160 260 360) do: [:x | + path add: x at y]]. + 1 to: 16 do: [:index | + BitBlt exampleAt: (path at: index) rule: index - 1 fillColor: nil]. + """) + self.assert_matches(traces[0].loop, """ + guard_not_invalidated(descr=), + i74 = int_le(i68, 1000000), + guard_true(i74, descr=), + i75 = int_xor(i66, i63), + i76 = int_add(i68, 1), + i77 = int_sub(i71, 1), + setfield_gc(ConstPtr(ptr57), i77, descr=), + i78 = int_le(i77, 0), + guard_false(i78, descr=), + jump(p0, p3, i75, p8, i76, p16, p18, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, p40, p42, i63, i77, descr=TargetToken(24673448)) + """) diff --git a/spyvm/test/test_interpreter.py b/spyvm/test/test_interpreter.py --- a/spyvm/test/test_interpreter.py +++ b/spyvm/test/test_interpreter.py @@ -431,7 +431,7 @@ assert s_active_context.w_sender() == w_frame assert s_active_context.stack() == [] assert w_active_context.as_methodcontext_get_shadow(space).w_receiver().is_same_object(w_object) - assert w_active_context.as_methodcontext_get_shadow(space).w_method().is_same_object(shadow.s_methoddict().methoddict[w_foo].w_self()) + assert w_active_context.as_methodcontext_get_shadow(space).w_method().is_same_object(shadow.s_methoddict().methoddict[w_foo.as_string()].w_self()) assert s_frame.stack() == [] step_in_interp(s_active_context) w_active_context = step_in_interp(s_active_context) @@ -602,7 +602,7 @@ s_frame.push(space.w_one) w_active_context = step_in_interp(s_frame) s_active_context = w_active_context.as_context_get_shadow(space) - assert w_active_context.as_methodcontext_get_shadow(space).s_method() == shadow.s_methoddict().methoddict[w_symbol] + assert w_active_context.as_methodcontext_get_shadow(space).s_method() == shadow.s_methoddict().methoddict[w_symbol.as_string()] assert s_active_context.w_receiver() is w_object assert w_active_context.as_methodcontext_get_shadow(space).gettemp(0).is_same_object(space.w_one) assert s_active_context.stack() == [] @@ -662,7 +662,7 @@ assert s_active_context.w_sender() == w_caller_context assert s_active_context.stack() == [] assert w_active_context.as_methodcontext_get_shadow(space).w_receiver() == w_object - meth = w_specificclass.as_class_get_shadow(space).s_methoddict().methoddict[foo] + meth = w_specificclass.as_class_get_shadow(space).s_methoddict().methoddict[foo.as_string()] assert s_active_context.w_method() == meth.w_self() assert s_caller_context.stack() == [] diff --git a/spyvm/test/test_miniimage.py b/spyvm/test/test_miniimage.py --- a/spyvm/test/test_miniimage.py +++ b/spyvm/test/test_miniimage.py @@ -343,21 +343,28 @@ def test_large_positive_integer_operations(): w_result = perform(interp.space.w_SmallInteger, "maxVal") - w_result = perform(w_result, "+", space.wrap_int(42)) + w_result = perform(w_result, "+", space.wrap_int(2 * interp.space.unwrap_int(w_result))) assert w_result is not None - assert isinstance(w_result, model.W_LargePositiveInteger1Word) + if constants.LONG_BIT == 32: + assert isinstance(w_result, model.W_LargePositiveInteger1Word) + else: + assert isinstance(w_result, model.W_SmallInteger) w_result = perform(interp.space.w_SmallInteger, "maxVal") w_result = perform(w_result, "*", w_result) assert w_result is not None - assert isinstance(w_result, model.W_BytesObject) + if constants.LONG_BIT == 32: + assert isinstance(w_result, model.W_BytesObject) + else: + assert isinstance(w_result, model.W_SmallInteger) def test_compiling_large_positive_integer(): - sourcecode = """aLargeInteger + if constants.LONG_BIT == 32: + sourcecode = """aLargeInteger ^ 16rFFFFFFFF""" - perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None)) - w_result = perform(w(10), "aLargeInteger") - assert isinstance(w_result, model.W_LargePositiveInteger1Word) + perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None)) + w_result = perform(w(10), "aLargeInteger") + assert isinstance(w_result, model.W_LargePositiveInteger1Word) def test_doesNotUnderstand(): w_dnu = interp.space.objtable["w_doesNotUnderstand"] @@ -410,7 +417,7 @@ selectors_w = w_methoddict._shadow.methoddict.keys() w_sel = None for sel in selectors_w: - if sel.as_string() == 'size': + if sel == 'size': w_sel = sel size = prim(primitives.PERFORM_WITH_ARGS, [w_o, w_sel, []]) assert size.value == 3 diff --git a/spyvm/test/test_model.py b/spyvm/test/test_model.py --- a/spyvm/test/test_model.py +++ b/spyvm/test/test_model.py @@ -3,8 +3,8 @@ import socket from spyvm import model, shadow from spyvm.shadow import MethodNotFound -from spyvm import objspace, error, display -from rpython.rlib.rarithmetic import intmask, r_uint32 as r_uint +from spyvm import objspace, error, display, constants +from rpython.rlib.rarithmetic import intmask, r_uint32 mockclass = objspace.bootstrap_class @@ -50,6 +50,7 @@ assert w_bytes.getchar(0) == "\x00" py.test.raises(IndexError, lambda: w_bytes.getchar(20)) + at py.test.mark.skipif("constants.LONG_BIT == 64") def test_c_bytes_object(): w_class = mockclass(space, 0, format=shadow.BYTES) w_bytes = w_class.as_class_get_shadow(space).new(20) @@ -75,6 +76,7 @@ assert w_bytes.getword(0) == 0 py.test.raises(AssertionError, lambda: w_bytes.getword(20)) + at py.test.mark.skipif("constants.LONG_BIT == 64") def test_c_word_object(): w_class = mockclass(space, 0, format=shadow.WORDS) w_bytes = w_class.as_class_get_shadow(space).new(20) @@ -266,18 +268,25 @@ b.setword(0, 3221225472) r = b.at0(space, 0) - assert isinstance(r, (model.W_BytesObject, model.W_LargePositiveInteger1Word)) - assert r.size() == 4 + if constants.LONG_BIT == 32: + assert isinstance(r, (model.W_BytesObject, model.W_LargePositiveInteger1Word)) + assert r.size() == 4 + else: + assert isinstance(r, (model.W_SmallInteger)) def test_float_at(): b = model.W_Float(64.0) r = b.fetch(space, 0) - assert isinstance(r, model.W_LargePositiveInteger1Word) - assert r.size() == 4 - assert space.unwrap_int(r.at0(space, 0)) == 0 - assert space.unwrap_int(r.at0(space, 1)) == 0 - assert space.unwrap_int(r.at0(space, 2)) == 80 - assert space.unwrap_int(r.at0(space, 3)) == 64 + assert isinstance(r, model.W_LargePositiveInteger1Word) or (isinstance(r, model.W_SmallInteger) and r.value > 0) + if not isinstance(r, model.W_SmallInteger): + assert False + assert r.size() == 4 + assert space.unwrap_int(r.at0(space, 0)) == 0 + assert space.unwrap_int(r.at0(space, 1)) == 0 + assert space.unwrap_int(r.at0(space, 2)) == 80 + assert space.unwrap_int(r.at0(space, 3)) == 64 + else: + assert r.value == ((64 << 24) + (80 << 16)) r = b.fetch(space, 1) assert isinstance(r, model.W_SmallInteger) assert r.value == 0 @@ -313,7 +322,7 @@ for i in range(4): target.atput0(space, i, source.at0(space, i)) assert target.at0(space, i) == source.at0(space, i) - assert hex(r_uint(target.value)) == hex(r_uint(source.value)) + assert hex(r_uint32(target.value)) == hex(r_uint32(source.value)) def test_BytesObject_short_at(): target = model.W_BytesObject(space, None, 4) @@ -335,8 +344,8 @@ def test_WordsObject_short_at(): target = model.W_WordsObject(space, None, 2) - target.setword(0, r_uint(0x00018000)) - target.setword(1, r_uint(0x80010111)) + target.setword(0, r_uint32(0x00018000)) + target.setword(1, r_uint32(0x80010111)) assert target.short_at0(space, 0).value == intmask(0xffff8000) assert target.short_at0(space, 1).value == intmask(0x0001) assert target.short_at0(space, 2).value == intmask(0x0111) @@ -345,7 +354,7 @@ def test_WordsObject_short_atput(): target = model.W_WordsObject(space, None, 2) target.short_atput0(space, 0, space.wrap_int(0x0100)) - target.short_atput0(space, 1, space.wrap_int(-1)) + target.short_atput0(space, 1, space.wrap_int(intmask(0xffffffff))) target.short_atput0(space, 2, space.wrap_int(intmask(0xffff8000))) target.short_atput0(space, 3, space.wrap_int(0x7fff)) assert target.getword(0) == 0xffff0100 @@ -356,26 +365,21 @@ # double-free bug def get_pixelbuffer(self): from rpython.rtyper.lltypesystem import lltype, rffi - return lltype.malloc(rffi.ULONGP.TO, self.width * self.height * 32, flavor='raw') + return lltype.malloc(rffi.UINTP.TO, self.width * self.height * 32, flavor='raw') display.SDLDisplay.get_pixelbuffer = get_pixelbuffer d = display.SDLDisplay("test") d.set_video_mode(32, 10, 1) target = model.W_DisplayBitmap.create(space, space.w_Array, 10, 1, d) - target.setword(0, r_uint(0xFF00)) + target.setword(0, r_uint32(0xFF00)) assert bin(target.getword(0)) == bin(0xFF00) - target.setword(0, r_uint(0x00FF00FF)) + target.setword(0, r_uint32(0x00FF00FF)) assert bin(target.getword(0)) == bin(0x00FF00FF) - target.setword(0, r_uint(0xFF00FF00)) + target.setword(0, r_uint32(0xFF00FF00)) assert bin(target.getword(0)) == bin(0xFF00FF00) + # Mapping 1-bit to 8-bit depth for i in xrange(8): - assert target.pixelbuffer[i] == 0xff000000 - for i in xrange(8, 16): - assert target.pixelbuffer[i] == 0xffffffff - for i in xrange(16, 24): - assert target.pixelbuffer[i] == 0xff000000 - for i in xrange(24, 32): - assert target.pixelbuffer[i] == 0xffffffff + assert target.pixelbuffer[i] == 0x01010101 @py.test.mark.skipif("socket.gethostname() == 'precise32'") diff --git a/spyvm/test/test_objectspace.py b/spyvm/test/test_objectspace.py --- a/spyvm/test/test_objectspace.py +++ b/spyvm/test/test_objectspace.py @@ -40,16 +40,17 @@ => 27670116110564327424 """ - from rpython.rlib.rarithmetic import r_uint32 as r_uint + from rpython.rlib.rarithmetic import r_uint for num in [0, 1, 41, 100, 2**31, sys.maxint + 1, -1]: num = r_uint(num) assert space.unwrap_uint(space.wrap_uint(num)) == num for num in [-1, -100, -sys.maxint]: with py.test.raises(objspace.WrappingError): space.wrap_uint(num) - for obj in [space.wrap_char('a'), space.wrap_int(-1)]: + for obj in [space.wrap_char('a')]: with py.test.raises(objspace.UnwrappingError): space.unwrap_uint(obj) + assert space.unwrap_uint(space.wrap_int(-1)) == r_uint(-1) # byteobj = space.wrap_uint(0x100000000) # assert isinstance(byteobj, model.W_BytesObject) # byteobj.bytes.append('\x01') @@ -65,7 +66,8 @@ with py.test.raises(AssertionError): space.wrap_int(num) - from rpython.rlib.rarithmetic import intmask - for num in [0x7fffffff, intmask(0x80000000)]: - with py.test.raises(objspace.WrappingError): - space.wrap_int(num) + # XXX: We ignore tagging now -- not sure if this is ok + # from rpython.rlib.rarithmetic import intmask + # for num in [0x7fffffff, intmask(0x80000000)]: + # with py.test.raises(objspace.WrappingError): + # space.wrap_int(num) diff --git a/spyvm/test/test_primitives.py b/spyvm/test/test_primitives.py --- a/spyvm/test/test_primitives.py +++ b/spyvm/test/test_primitives.py @@ -68,7 +68,7 @@ assert prim(primitives.ADD, [3,4]).value == 7 def test_small_int_add_fail(): - w_result = prim_fails(primitives.ADD, [constants.TAGGED_MAXINT, 2]) + w_result = prim_fails(primitives.ADD, [constants.MAXINT, 2]) # assert isinstance(w_result, model.W_LargePositiveInteger1Word) # assert w_result.value == constants.TAGGED_MAXINT + 2 # prim_fails(primitives.ADD, [constants.TAGGED_MAXINT, constants.TAGGED_MAXINT * 2]) @@ -77,21 +77,21 @@ assert prim(primitives.SUBTRACT, [5,9]).value == -4 def test_small_int_minus_fail(): - prim_fails(primitives.SUBTRACT, [constants.TAGGED_MININT,1]) + prim_fails(primitives.SUBTRACT, [constants.MININT,1]) prim_fails(primitives.SUBTRACT, - [constants.TAGGED_MININT, constants.TAGGED_MAXINT]) + [constants.MININT, constants.MAXINT]) def test_small_int_multiply(): assert prim(primitives.MULTIPLY, [6,3]).value == 18 def test_small_int_multiply_overflow(): - w_result = prim_fails(primitives.MULTIPLY, [constants.TAGGED_MAXINT, 2]) + w_result = prim_fails(primitives.MULTIPLY, [constants.MAXINT, 2]) #assert isinstance(w_result, model.W_LargePositiveInteger1Word) #assert w_result.value == constants.TAGGED_MAXINT * 2 - prim_fails(primitives.MULTIPLY, [constants.TAGGED_MAXINT, constants.TAGGED_MAXINT]) - prim_fails(primitives.MULTIPLY, [constants.TAGGED_MAXINT, -4]) - prim_fails(primitives.MULTIPLY, [constants.TAGGED_MININT, constants.TAGGED_MAXINT]) - prim_fails(primitives.MULTIPLY, [constants.TAGGED_MININT, 2]) + prim_fails(primitives.MULTIPLY, [constants.MAXINT, constants.MAXINT]) + prim_fails(primitives.MULTIPLY, [constants.MAXINT, -4]) + prim_fails(primitives.MULTIPLY, [constants.MININT, constants.MAXINT]) + prim_fails(primitives.MULTIPLY, [constants.MININT, 2]) def test_small_int_divide(): assert prim(primitives.DIVIDE, [6,3]).value == 2 @@ -176,14 +176,22 @@ def test_small_int_bit_shift_fail(): from rpython.rlib.rarithmetic import intmask - prim_fails(primitives.BIT_SHIFT, [4, 32]) - prim_fails(primitives.BIT_SHIFT, [4, 31]) - prim_fails(primitives.BIT_SHIFT, [4, 30]) + if constants.LONG_BIT == 32: + prim_fails(primitives.BIT_SHIFT, [4, 32]) + prim_fails(primitives.BIT_SHIFT, [4, 31]) + prim_fails(primitives.BIT_SHIFT, [4, 30]) + else: + prim_fails(primitives.BIT_SHIFT, [4, 64]) + prim_fails(primitives.BIT_SHIFT, [4, 63]) + prim_fails(primitives.BIT_SHIFT, [4, 62]) w_result = prim(primitives.BIT_SHIFT, [4, 29]) - assert isinstance(w_result, model.W_LargePositiveInteger1Word) + if constants.LONG_BIT == 32: + assert isinstance(w_result, model.W_LargePositiveInteger1Word) + else: + assert isinstance(w_result, model.W_SmallInteger) assert w_result.value == intmask(4 << 29) w_result = prim(primitives.BIT_SHIFT, [4, 28]) - assert isinstance(w_result, model.W_LargePositiveInteger1Word) + assert isinstance(w_result, model.W_SmallInteger) assert w_result.value == 4 << 28 def test_smallint_as_float(): @@ -430,16 +438,51 @@ def test_primitive_milliseconds_clock(): import time - start = prim(primitives.MILLISECOND_CLOCK, [0]).value + class Image(object): + def __init__(self): + self.startup_time = int(time.time() * 1000) + image = Image() + + interp, w_frame, argument_count = mock([0], None) + interp.image = image + prim_table[primitives.MILLISECOND_CLOCK](interp, w_frame.as_context_get_shadow(space), argument_count-1) + start = w_frame.as_context_get_shadow(space).pop().value + s_frame = w_frame.as_context_get_shadow(space) + assert not s_frame.stackdepth() - s_frame.tempsize() # check args are consumed + time.sleep(0.3) - stop = prim(primitives.MILLISECOND_CLOCK, [0]).value + interp, w_frame, argument_count = mock([0], None) + interp.image = image + prim_table[primitives.MILLISECOND_CLOCK](interp, w_frame.as_context_get_shadow(space), argument_count-1) + stop = w_frame.as_context_get_shadow(space).pop().value + s_frame = w_frame.as_context_get_shadow(space) + assert not s_frame.stackdepth() - s_frame.tempsize() # check args are consumed + assert start + 250 <= stop def test_signal_at_milliseconds(): import time - future = prim(primitives.MILLISECOND_CLOCK, [0]).value + 400 + class Image(object): + def __init__(self): + self.startup_time = int(time.time() * 1000) + image = Image() + + interp, w_frame, argument_count = mock([0], None) + interp.image = image + prim_table[primitives.MILLISECOND_CLOCK](interp, w_frame.as_context_get_shadow(space), argument_count-1) + future = w_frame.as_context_get_shadow(space).pop().value + 400 + s_frame = w_frame.as_context_get_shadow(space) + assert not s_frame.stackdepth() - s_frame.tempsize() # check args are consumed + sema = space.w_Semaphore.as_class_get_shadow(space).new() - prim(primitives.SIGNAL_AT_MILLISECONDS, [space.w_nil, sema, future]) + + interp, w_frame, argument_count = mock([space.w_nil, sema, future], None) + interp.image = image + prim_table[primitives.SIGNAL_AT_MILLISECONDS](interp, w_frame.as_context_get_shadow(space), argument_count-1) + res = w_frame.as_context_get_shadow(space).pop() + s_frame = w_frame.as_context_get_shadow(space) + assert not s_frame.stackdepth() - s_frame.tempsize() # check args are consumed + assert space.objtable["w_timerSemaphore"] is sema def test_inc_gc(): @@ -722,7 +765,7 @@ # double-free bug def get_pixelbuffer(self): from rpython.rtyper.lltypesystem import lltype, rffi - return lltype.malloc(rffi.ULONGP.TO, self.width * self.height * 32, flavor='raw') + return lltype.malloc(rffi.UINTP.TO, self.width * self.height * 32, flavor='raw') display.SDLDisplay.get_pixelbuffer = get_pixelbuffer assert space.objtable["w_display"] is None @@ -762,7 +805,7 @@ # double-free bug def get_pixelbuffer(self): from rpython.rtyper.lltypesystem import lltype, rffi - return lltype.malloc(rffi.ULONGP.TO, self.width * self.height * 32, flavor='raw') + return lltype.malloc(rffi.UINTP.TO, self.width * self.height * 32, flavor='raw') display.SDLDisplay.get_pixelbuffer = get_pixelbuffer mock_display = model.W_PointersObject(space, space.w_Point, 4) @@ -776,7 +819,7 @@ class DisplayFlush(Exception): pass - def flush_to_screen_mock(self): + def flush_to_screen_mock(self, force=False): raise DisplayFlush try: @@ -787,6 +830,8 @@ monkeypatch.undo() def test_bitblt_copy_bits(monkeypatch): + from spyvm.plugins import bitblt + class CallCopyBitsSimulation(Exception): pass class Image(): @@ -810,7 +855,7 @@ try: monkeypatch.setattr(w_frame._shadow, "_sendSelfSelector", perform_mock) - monkeypatch.setattr(shadow.BitBltShadow, "sync_cache", sync_cache_mock) + monkeypatch.setattr(bitblt.BitBltShadow, "sync_cache", sync_cache_mock) with py.test.raises(CallCopyBitsSimulation): prim_table[primitives.BITBLT_COPY_BITS](interp, w_frame.as_context_get_shadow(space), argument_count-1) finally: diff --git a/spyvm/test/test_shadow.py b/spyvm/test/test_shadow.py --- a/spyvm/test/test_shadow.py +++ b/spyvm/test/test_shadow.py @@ -72,8 +72,8 @@ classshadow = w_class.as_class_get_shadow(space) methoddict = classshadow.s_methoddict().methoddict assert len(methods) == len(methoddict) - for w_key, value in methoddict.items(): - assert methods[w_key.as_string()].as_compiledmethod_get_shadow(space) is value + for key, value in methoddict.items(): + assert methods[key].as_compiledmethod_get_shadow(space) is value def method(tempsize=3,argsize=2, bytes="abcde"): w_m = model.W_CompiledMethod() @@ -264,15 +264,15 @@ version = s_class.version w_method = model.W_CompiledMethod(0) - key = space.wrap_string('foo') + w_key = space.wrap_string('foo') s_md = w_parent.as_class_get_shadow(space).s_methoddict() s_md.sync_cache() w_ary = s_md._w_self._fetch(constants.METHODDICT_VALUES_INDEX) - s_md._w_self.atput0(space, 0, key) + s_md._w_self.atput0(space, 0, w_key) w_ary.atput0(space, 0, w_method) - assert s_class.lookup(key) is w_method.as_compiledmethod_get_shadow(space) + assert s_class.lookup(w_key) is w_method.as_compiledmethod_get_shadow(space) assert s_class.version is not version assert s_class.version is w_parent.as_class_get_shadow(space).version From noreply at buildbot.pypy.org Thu Jan 16 15:04:57 2014 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 16 Jan 2014 15:04:57 +0100 (CET) Subject: [pypy-commit] lang-smalltalk 64bit: WIP/HACK: fix more tests Message-ID: <20140116140457.1B5221C35EC@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: 64bit Changeset: r579:b7958e74bdbe Date: 2014-01-14 17:12 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/b7958e74bdbe/ Log: WIP/HACK: fix more tests diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -15,7 +15,7 @@ that create W_PointersObjects of correct size with attached shadows. """ import sys, weakref -from spyvm import constants, error +from spyvm import constants, error, system from rpython.rlib import rrandom, objectmodel, jit, signature from rpython.rlib.rarithmetic import intmask, r_uint32, r_uint @@ -24,6 +24,13 @@ from rpython.rtyper.lltypesystem import lltype, rffi from rsdl import RSDL, RSDL_helper +if system.IS_64BIT: + from rpython.rlib.rarithmetic import widen +else: + def widen(x): + return x + + class W_Object(object): """Root of Squeak model, abstract.""" _attrs_ = [] # no RPython-level instance variables allowed in W_Object @@ -881,8 +888,8 @@ self.setword(index0, word) def getword(self, n): - # if n < 0: - # import pdb; pdb.set_trace() + if self.size() <= n: + return r_uint(0) assert self.size() > n >= 0 if self.words is not None: return self.words[n] @@ -890,6 +897,8 @@ return r_uint(self.c_words[n]) def setword(self, n, word): + if self.size() <= n: + return if self.words is not None: self.words[n] = r_uint(word) else: @@ -906,7 +915,7 @@ return space.wrap_int(intmask(r_uint32(short))) def short_atput0(self, space, index0, w_value): - from rpython.rlib.rarithmetic import int_between, widen + from rpython.rlib.rarithmetic import int_between i_value = space.unwrap_int(w_value) if constants.LONG_BIT == 64: if (not int_between(0, i_value, 0x8000) and @@ -1036,10 +1045,14 @@ return w_result def getword(self, n): + if self.size() <= n: + return r_uint(0) assert self.size() > n >= 0 return self._real_depth_buffer[n] def setword(self, n, word): + if self.size() <= n: + return self._real_depth_buffer[n] = word self.pixelbuffer[n] = r_uint32(word) diff --git a/spyvm/plugins/bitblt.py b/spyvm/plugins/bitblt.py --- a/spyvm/plugins/bitblt.py +++ b/spyvm/plugins/bitblt.py @@ -329,7 +329,7 @@ dstShiftInc = -dstShiftInc dstShiftLeft = 32 - self.dest.depth - for i in range(self.bbH): + for i in range(self.bbH + 1): if self.halftone: halftoneWord = r_uint(self.halftone[(self.dy + i) % len(self.halftone)]) else: @@ -355,7 +355,7 @@ self.dest.w_bits.setword(self.destIndex, destWord) self.destIndex += 1 - if (self.nWords == 2): # is the next word the last word? + if (word + 2 == self.nWords): # is the next word the last word? self.destMask = self.mask2 nPix = endBits else: # use fullword mask for inner loop @@ -438,7 +438,7 @@ # now loop over all lines y = self.dy - for i in range(1, self.bbH + 1): + for i in range(self.bbH + 1): if (halftoneHeight > 1): halftoneWord = r_uint(self.halftone[y % halftoneHeight]) y += self.vDir diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -565,6 +565,9 @@ @expose_primitive(SOME_INSTANCE, unwrap_spec=[object]) def func(interp, s_frame, w_class): + # XXX: finding Symbols via someInstance is broken + if w_class.is_same_object(interp.image.w_asSymbol.getclass(interp.space)): + raise PrimitiveFailedError() match_w = get_instances_array(interp.space, s_frame, w_class) try: return match_w[0] diff --git a/spyvm/test/test_largeinteger.py b/spyvm/test/test_largeinteger.py --- a/spyvm/test/test_largeinteger.py +++ b/spyvm/test/test_largeinteger.py @@ -40,10 +40,7 @@ return s_frame.pop() def w_l(largeInteger): - if largeInteger >= 0 and largeInteger <= constants.TAGGED_MAXINT: - return space.wrap_int(intmask(largeInteger)) - else: - return model.W_LargePositiveInteger1Word(intmask(largeInteger)) + return model.W_LargePositiveInteger1Word(intmask(largeInteger)) # test that using W_LargePositiveInteger1Word yields the correct results. # we use this way of testing to have multiple different test which may fail @@ -53,7 +50,7 @@ try: w_selector = space.get_special_selector(selector) except Exception: - w_selector = find_symbol_in_methoddict_of(selector, w(intmask(candidates[0])).getclass(space)._shadow) + w_selector = find_symbol_in_methoddict_of(selector, w_l(intmask(candidates[0])).getclass(space)._shadow) interp.trace=trace for i, v in enumerate(candidates): diff --git a/spyvm/test/test_zin_squeak_4_5_image.py b/spyvm/test/test_zin_squeak_4_5_image.py --- a/spyvm/test/test_zin_squeak_4_5_image.py +++ b/spyvm/test/test_zin_squeak_4_5_image.py @@ -16,8 +16,8 @@ s_methoddict.sync_cache() methoddict_w = s_methoddict.methoddict for each in methoddict_w.keys(): - if each.as_string() == string: - return each + if each == string: + return w(each) def test_all_pointers_are_valid(): tools.test_all_pointers_are_valid() From noreply at buildbot.pypy.org Thu Jan 16 15:04:58 2014 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 16 Jan 2014 15:04:58 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: store objects in methoddicts again Message-ID: <20140116140458.1DDAB1C35EC@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r580:ac28aa16f50f Date: 2014-01-16 13:31 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/ac28aa16f50f/ Log: store objects in methoddicts again diff --git a/spyvm/shadow.py b/spyvm/shadow.py --- a/spyvm/shadow.py +++ b/spyvm/shadow.py @@ -363,7 +363,7 @@ def find_selector(self, w_selector): if self.invalid: return None # we may be invalid if Smalltalk code did not call flushCache - return self.methoddict.get(self._as_md_entry(w_selector), None) + return self.methoddict.get(w_selector, None) def update(self): return self.sync_cache() @@ -408,7 +408,7 @@ "If the value observed is nil, our " "invalidating mechanism may be broken.") selector = self._as_md_entry(w_selector) - self.methoddict[selector] = w_compiledmethod.as_compiledmethod_get_shadow(self.space) + self.methoddict[w_selector] = w_compiledmethod.as_compiledmethod_get_shadow(self.space) w_compiledmethod._likely_methodname = selector if self.s_class: self.s_class.changed() From noreply at buildbot.pypy.org Thu Jan 16 15:04:59 2014 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 16 Jan 2014 15:04:59 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: fix instances getting for STM/no-STM Message-ID: <20140116140459.1FE641C35EC@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r581:676ac9995e51 Date: 2014-01-16 13:38 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/676ac9995e51/ Log: fix instances getting for STM/no-STM diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -522,18 +522,19 @@ """NOT RPYTHON""" from rpython.rlib import rgc return hasattr(rgc, "stm_is_enabled") and rgc.stm_is_enabled() -USES_STM = stm_enabled() +if stm_enabled(): + def get_instances_array(space, s_frame, w_class): + return [] +else: + def get_instances_array(space, s_frame, w_class): + # This primitive returns some instance of the class on the stack. + # Not sure quite how to do this; maintain a weak list of all + # existing instances or something? + match_w = s_frame.instances_array(w_class) + if match_w is None: + match_w = [] + from rpython.rlib import rgc -def get_instances_array(space, s_frame, w_class): - # This primitive returns some instance of the class on the stack. - # Not sure quite how to do this; maintain a weak list of all - # existing instances or something? - match_w = s_frame.instances_array(w_class) - if match_w is None: - match_w = [] - from rpython.rlib import rgc - - if USES_STM: roots = [gcref for gcref in rgc.get_rpy_roots() if gcref] pending = roots[:] while pending: @@ -552,7 +553,7 @@ rgc.toggle_gcflag_extra(gcref) roots.extend(rgc.get_rpy_referents(gcref)) s_frame.store_instances_array(w_class, match_w) - return match_w + return match_w @expose_primitive(SOME_INSTANCE, unwrap_spec=[object]) def func(interp, s_frame, w_class): From noreply at buildbot.pypy.org Thu Jan 16 15:05:00 2014 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 16 Jan 2014 15:05:00 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: make all tests green again Message-ID: <20140116140500.252171C35EC@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r582:2d567611e3c6 Date: 2014-01-16 14:11 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/2d567611e3c6/ Log: make all tests green again diff --git a/spyvm/constants.py b/spyvm/constants.py --- a/spyvm/constants.py +++ b/spyvm/constants.py @@ -1,3 +1,4 @@ +import time from rpython.rlib.jit import elidable from spyvm.tool.bitmanipulation import splitter @@ -186,3 +187,4 @@ MAX_LOOP_DEPTH = 100 INTERRUPT_COUNTER_SIZE = 10000 +CompileTime = int(time.time() * 1000) diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -43,7 +43,10 @@ self.space = space self.image = image self.image_name = image_name - self.startup_time = time.time() + if image: + self.startup_time = image.startup_time + else: + self.startup_time = constants.CompileTime self.max_stack_depth = max_stack_depth self.remaining_stack_depth = max_stack_depth self._loop = False @@ -204,7 +207,7 @@ def time_now(self): import time from rpython.rlib.rarithmetic import intmask - return intmask(int((time.time() - self.startup_time) * 1000) & constants.TAGGED_MASK) + return intmask((int(time.time() * 1000) - self.startup_time) & constants.TAGGED_MASK) def padding(self, symbol=' '): return symbol * (self.max_stack_depth - self.remaining_stack_depth) diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -645,8 +645,18 @@ @expose_primitive(BITBLT_COPY_BITS, clean_stack=False, no_result=True, compiled_method=True) def func(interp, s_frame, argcount, s_method): - from spyvm.plugins.bitblt import BitBltPlugin - return BitBltPlugin.call("primitiveCopyBits", interp, s_frame, argcount, s_method) + from spyvm.interpreter import Return + try: + s_frame._sendSelfSelector(interp.image.w_simulateCopyBits, 0, interp) + except Return: + w_dest_form = w_rcvr.fetch(space, 0) + if w_dest_form.is_same_object(space.objtable['w_display']): + w_bitmap = w_dest_form.fetch(space, 0) + assert isinstance(w_bitmap, model.W_DisplayBitmap) + w_bitmap.flush_to_screen() + except shadow.MethodNotFound: + from spyvm.plugins.bitblt import BitBltPlugin + BitBltPlugin.call("primitiveCopyBits", interp, s_frame, argcount, s_method) @expose_primitive(BE_CURSOR) def func(interp, s_frame, argcount): diff --git a/spyvm/squeakimage.py b/spyvm/squeakimage.py --- a/spyvm/squeakimage.py +++ b/spyvm/squeakimage.py @@ -1,6 +1,7 @@ import py import os import sys +import time from spyvm import constants from spyvm import model from spyvm.tool.bitmanipulation import splitter @@ -377,6 +378,7 @@ self.version = reader.version self.is_modern = reader.version.magic > 6502 self.run_spy_hacks(space) + self.startup_time = int(time.time() * 1000) def run_spy_hacks(self, space): pass diff --git a/spyvm/test/test_bitblt.py b/spyvm/test/test_bitblt.py --- a/spyvm/test/test_bitblt.py +++ b/spyvm/test/test_bitblt.py @@ -1,4 +1,5 @@ from spyvm import model, shadow, constants, interpreter, objspace +from spyvm.plugins import bitblt space = objspace.ObjSpace() @@ -37,7 +38,7 @@ def test_bitBlt_values(): w_bb = model.W_PointersObject(space, space.w_Array, 15) - w_bb.store(space, 0, make_form([], 1230, 20, 1)) + w_bb.store(space, 0, make_form([0] * 1230 * 20, 1230, 20, 1)) w_bb.store(space, 1, w_bb.fetch(space, 0)) w_bb.store(space, 2, space.w_nil) @@ -54,25 +55,26 @@ w_bb.store(space, 13, w(15)) # clip height w_bb.store(space, 14, model.W_PointersObject(space, space.w_Array, 5)) # color map - s_bb = w_bb.as_bitblt_get_shadow(space) - s_bb.clip_range() - assert not (s_bb.w <= 0 or s_bb.h <= 0) - s_bb.compute_masks() - s_bb.check_overlap() - s_bb.calculate_offsets() + s_bb = w_bb.as_special_get_shadow(space, bitblt.BitBltShadow) + s_bb.loadBitBlt() + s_bb.clipRange() + assert not (s_bb.width <= 0 or s_bb.height <= 0) + s_bb.destMaskAndPointerInit() + s_bb.checkSourceOverlap() + s_bb.sourceSkewAndPointerInit() - assert s_bb.dest_x == 1 - assert s_bb.dest_y == 0 - assert s_bb.sx == 1218 - assert s_bb.sy == 0 - assert s_bb.dx == 1219 - assert s_bb.dy == 0 - assert s_bb.w == 1219 - assert s_bb.h == 15 - assert s_bb.h_dir == -1 - assert s_bb.v_dir == 1 - assert s_bb.source_delta == 79 - assert s_bb.dest_delta == 78 + assert s_bb.destX == 1 + assert s_bb.destY == 0 + assert s_bb.sourceX == 0 + assert s_bb.sourceY == 0 + assert s_bb.destX == 1 + assert s_bb.destY == 0 + assert s_bb.width == 1220 + assert s_bb.height == 15 + assert s_bb.hDir == -1 + assert s_bb.vDir == 1 + assert s_bb.sourceDelta == 79 + assert s_bb.destDelta == 78 assert s_bb.skew == 31 - assert s_bb.source_index == 38 - assert s_bb.dest_index == 38 \ No newline at end of file + assert s_bb.sourceIndex == 38 + assert s_bb.destIndex == 38 diff --git a/spyvm/test/test_model.py b/spyvm/test/test_model.py --- a/spyvm/test/test_model.py +++ b/spyvm/test/test_model.py @@ -367,14 +367,14 @@ assert bin(target.getword(0)) == bin(0x00FF00FF) target.setword(0, r_uint(0xFF00FF00)) assert bin(target.getword(0)) == bin(0xFF00FF00) - for i in xrange(8): - assert target.pixelbuffer[i] == 0xff000000 - for i in xrange(8, 16): - assert target.pixelbuffer[i] == 0xffffffff - for i in xrange(16, 24): - assert target.pixelbuffer[i] == 0xff000000 - for i in xrange(24, 32): - assert target.pixelbuffer[i] == 0xffffffff + for i in xrange(2): + assert target.pixelbuffer[i] == 0x01010101 + for i in xrange(2, 4): + assert target.pixelbuffer[i] == 0x0 + for i in xrange(4, 6): + assert target.pixelbuffer[i] == 0x01010101 + for i in xrange(6, 8): + assert target.pixelbuffer[i] == 0x0 def test_display_offset_computation(): @@ -387,9 +387,9 @@ dbitmap = model.W_DisplayBitmap.create(space, space.w_Array, 5, 1, d) - assert dbitmap.compute_pos_and_line_end(0, 1) == (0, 18) - assert dbitmap.compute_pos_and_line_end(1, 1) == (18, 36) - assert dbitmap.size() == 5 + assert dbitmap.compute_pos(0) == 0 + assert dbitmap.compute_pos(1) == 8 + assert dbitmap.size() == 5 * 8 @py.test.mark.skipif("socket.gethostname() == 'precise32'") def test_weak_pointers(): diff --git a/spyvm/test/test_primitives.py b/spyvm/test/test_primitives.py --- a/spyvm/test/test_primitives.py +++ b/spyvm/test/test_primitives.py @@ -4,6 +4,7 @@ from spyvm.primitives import prim_table, PrimitiveFailedError from spyvm import model, shadow, interpreter from spyvm import constants, primitives, objspace, wrapper, display +from spyvm.plugins import bitblt from rpython.rlib.rfloat import INFINITY, NAN, isinf, isnan @@ -776,7 +777,7 @@ class DisplayFlush(Exception): pass - def flush_to_screen_mock(self): + def flush_to_screen_mock(self, force=False): raise DisplayFlush try: @@ -810,7 +811,7 @@ try: monkeypatch.setattr(w_frame._shadow, "_sendSelfSelector", perform_mock) - monkeypatch.setattr(shadow.BitBltShadow, "sync_cache", sync_cache_mock) + monkeypatch.setattr(bitblt.BitBltShadow, "sync_cache", sync_cache_mock) with py.test.raises(CallCopyBitsSimulation): prim_table[primitives.BITBLT_COPY_BITS](interp, w_frame.as_context_get_shadow(space), argument_count-1) finally: From noreply at buildbot.pypy.org Thu Jan 16 15:05:01 2014 From: noreply at buildbot.pypy.org (timfel) Date: Thu, 16 Jan 2014 15:05:01 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: untagged ints everywhere Message-ID: <20140116140501.293011C35EC@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r583:6d5ffc8d1ed3 Date: 2014-01-16 14:45 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/6d5ffc8d1ed3/ Log: untagged ints everywhere diff --git a/spyvm/constants.py b/spyvm/constants.py --- a/spyvm/constants.py +++ b/spyvm/constants.py @@ -1,3 +1,4 @@ +import sys import time from rpython.rlib.jit import elidable @@ -149,6 +150,8 @@ TAGGED_MASK = int(2 ** (LONG_BIT - 1) - 1) +MAXINT = sys.maxint +MININT = -sys.maxint-1 # Entries into SO_SPECIAL_SELECTORS_ARRAY: #(#+ 1 #- 1 #< 1 #> 1 #<= 1 #>= 1 #= 1 #~= 1 #* 1 #/ 1 #\\ 1 #@ 1 #bitShift: 1 #// 1 #bitAnd: 1 #bitOr: 1 #at: 1 #at:put: 2 #size 0 #next 0 #nextPut: 1 #atEnd 0 #== 1 #class 0 #blockCopy: 1 #value 0 #value: 1 #do: 1 #new 0 #new: 1 #x 0 #y 0) diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -207,7 +207,7 @@ def time_now(self): import time from rpython.rlib.rarithmetic import intmask - return intmask((int(time.time() * 1000) - self.startup_time) & constants.TAGGED_MASK) + return intmask((int(time.time() * 1000) - self.startup_time)) def padding(self, symbol=' '): return symbol * (self.max_stack_depth - self.remaining_stack_depth) diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -189,8 +189,7 @@ def unwrap_uint(self, space): from rpython.rlib.rarithmetic import r_uint val = self.value - if val < 0: - raise error.UnwrappingError("got negative integer") + # Assume the caller knows what he does, even if int is negative return r_uint(val) @@ -758,7 +757,7 @@ byte0 = ord(self.getchar(byte_index0)) byte1 = ord(self.getchar(byte_index0 + 1)) << 8 if byte1 & 0x8000 != 0: - byte1 = intmask(-65536 | byte1) # -65536 = 0xffff0000 + byte1 = intmask(intmask(0xffff0000) | byte1) return space.wrap_int(byte1 | byte0) def short_atput0(self, space, index0, w_value): @@ -896,7 +895,7 @@ else: short = (word >> 16) & 0xffff if short & 0x8000 != 0: - short = -65536 | short # -65536 = 0xffff0000 + short = intmask(0xffff0000) | short return space.wrap_int(intmask(short)) def short_atput0(self, space, index0, w_value): @@ -907,7 +906,7 @@ word_index0 = index0 / 2 word = intmask(self.getword(word_index0)) if index0 % 2 == 0: - word = (word & -65536) | (i_value & 0xffff) # -65536 = 0xffff0000 + word = (word & intmask(0xffff0000)) | (i_value & 0xffff) else: word = (i_value << 16) | (word & 0xffff) value = r_uint(word) diff --git a/spyvm/objspace.py b/spyvm/objspace.py --- a/spyvm/objspace.py +++ b/spyvm/objspace.py @@ -200,44 +200,21 @@ def wrap_int(self, val): from spyvm import constants assert isinstance(val, int) - if int_between(constants.TAGGED_MININT, val, - constants.TAGGED_MAXINT + 1): - return model.W_SmallInteger(val) - # We can't build large integers here, because we don't know what to do - # with negativ vals: raise an error or interpret them as 4-byte positive? - raise WrappingError("integer too large to fit into a tagged pointer") + # we don't do tagging + return model.W_SmallInteger(val) def wrap_uint(self, val): from rpython.rlib.objectmodel import we_are_translated - if not we_are_translated(): - assert val <= 0xFFFFFFFF if val < 0: raise WrappingError("negative integer") - if val >= 0: - try: - return self.wrap_positive_32bit_int(intmask(val)) - except WrappingError: - pass - # XXX this code sucks - import math - bytes_len = int(math.log(val) / math.log(0xff)) + 1 - if bytes_len <= 4: + else: return self.wrap_positive_32bit_int(intmask(val)) - else: - return self._wrap_uint_loop(val, bytes_len) - - def _wrap_uint_loop(self, val, bytes_len): - w_result = model.W_BytesObject(self, - self.classtable['w_LargePositiveInteger'], bytes_len) - for i in range(bytes_len): - w_result.setchar(i, chr(intmask((val >> i*8) & 255))) - return w_result def wrap_positive_32bit_int(self, val): # This will always return a positive value. # XXX: For now, we assume that val is at most 32bit, i.e. overflows are - # checked for before wrapping. - if int_between(0, val, constants.TAGGED_MAXINT + 1): + # checked for before wrapping. Also, we ignore tagging. + if int_between(0, val, constants.MAXINT): return model.W_SmallInteger(val) else: return model.W_LargePositiveInteger1Word(val) diff --git a/spyvm/test/test_miniimage.py b/spyvm/test/test_miniimage.py --- a/spyvm/test/test_miniimage.py +++ b/spyvm/test/test_miniimage.py @@ -343,7 +343,7 @@ def test_large_positive_integer_operations(): w_result = perform(interp.space.w_SmallInteger, "maxVal") - w_result = perform(w_result, "+", space.wrap_int(42)) + w_result = perform(w_result, "+", interp.space.wrap_int(2 * interp.space.unwrap_int(w_result))) assert w_result is not None assert isinstance(w_result, model.W_LargePositiveInteger1Word) diff --git a/spyvm/test/test_model.py b/spyvm/test/test_model.py --- a/spyvm/test/test_model.py +++ b/spyvm/test/test_model.py @@ -271,12 +271,15 @@ def test_float_at(): b = model.W_Float(64.0) r = b.fetch(space, 0) - assert isinstance(r, model.W_LargePositiveInteger1Word) - assert r.size() == 4 - assert space.unwrap_int(r.at0(space, 0)) == 0 - assert space.unwrap_int(r.at0(space, 1)) == 0 - assert space.unwrap_int(r.at0(space, 2)) == 80 - assert space.unwrap_int(r.at0(space, 3)) == 64 + if isinstance(r, model.W_LargePositiveInteger1Word): + assert r.size() == 4 + assert space.unwrap_int(r.at0(space, 0)) == 0 + assert space.unwrap_int(r.at0(space, 1)) == 0 + assert space.unwrap_int(r.at0(space, 2)) == 80 + assert space.unwrap_int(r.at0(space, 3)) == 64 + else: + assert isinstance(r, model.W_SmallInteger) + assert space.unwrap_int(r) == (80 << 16) + (64 << 24) r = b.fetch(space, 1) assert isinstance(r, model.W_SmallInteger) assert r.value == 0 diff --git a/spyvm/test/test_objectspace.py b/spyvm/test/test_objectspace.py --- a/spyvm/test/test_objectspace.py +++ b/spyvm/test/test_objectspace.py @@ -47,9 +47,6 @@ for num in [-1, -100, -sys.maxint]: with py.test.raises(objspace.WrappingError): space.wrap_uint(num) - for obj in [space.wrap_char('a'), space.wrap_int(-1)]: - with py.test.raises(objspace.UnwrappingError): - space.unwrap_uint(obj) # byteobj = space.wrap_uint(0x100000000) # assert isinstance(byteobj, model.W_BytesObject) # byteobj.bytes.append('\x01') @@ -64,8 +61,3 @@ for num in [2L, -5L]: with py.test.raises(AssertionError): space.wrap_int(num) - - from rpython.rlib.rarithmetic import intmask - for num in [0x7fffffff, intmask(0x80000000)]: - with py.test.raises(objspace.WrappingError): - space.wrap_int(num) diff --git a/spyvm/test/test_primitives.py b/spyvm/test/test_primitives.py --- a/spyvm/test/test_primitives.py +++ b/spyvm/test/test_primitives.py @@ -69,7 +69,7 @@ assert prim(primitives.ADD, [3,4]).value == 7 def test_small_int_add_fail(): - w_result = prim_fails(primitives.ADD, [constants.TAGGED_MAXINT, 2]) + w_result = prim_fails(primitives.ADD, [constants.MAXINT, 2]) # assert isinstance(w_result, model.W_LargePositiveInteger1Word) # assert w_result.value == constants.TAGGED_MAXINT + 2 # prim_fails(primitives.ADD, [constants.TAGGED_MAXINT, constants.TAGGED_MAXINT * 2]) @@ -78,21 +78,21 @@ assert prim(primitives.SUBTRACT, [5,9]).value == -4 def test_small_int_minus_fail(): - prim_fails(primitives.SUBTRACT, [constants.TAGGED_MININT,1]) + prim_fails(primitives.SUBTRACT, [constants.MININT,1]) prim_fails(primitives.SUBTRACT, - [constants.TAGGED_MININT, constants.TAGGED_MAXINT]) + [constants.MININT, constants.MAXINT]) def test_small_int_multiply(): assert prim(primitives.MULTIPLY, [6,3]).value == 18 def test_small_int_multiply_overflow(): - w_result = prim_fails(primitives.MULTIPLY, [constants.TAGGED_MAXINT, 2]) + w_result = prim_fails(primitives.MULTIPLY, [constants.MAXINT, 2]) #assert isinstance(w_result, model.W_LargePositiveInteger1Word) #assert w_result.value == constants.TAGGED_MAXINT * 2 - prim_fails(primitives.MULTIPLY, [constants.TAGGED_MAXINT, constants.TAGGED_MAXINT]) - prim_fails(primitives.MULTIPLY, [constants.TAGGED_MAXINT, -4]) - prim_fails(primitives.MULTIPLY, [constants.TAGGED_MININT, constants.TAGGED_MAXINT]) - prim_fails(primitives.MULTIPLY, [constants.TAGGED_MININT, 2]) + prim_fails(primitives.MULTIPLY, [constants.MAXINT, constants.MAXINT]) + prim_fails(primitives.MULTIPLY, [constants.MAXINT, -4]) + prim_fails(primitives.MULTIPLY, [constants.MININT, constants.MAXINT]) + prim_fails(primitives.MULTIPLY, [constants.MININT, 2]) def test_small_int_divide(): assert prim(primitives.DIVIDE, [6,3]).value == 2 @@ -179,13 +179,9 @@ from rpython.rlib.rarithmetic import intmask prim_fails(primitives.BIT_SHIFT, [4, 32]) prim_fails(primitives.BIT_SHIFT, [4, 31]) - prim_fails(primitives.BIT_SHIFT, [4, 30]) w_result = prim(primitives.BIT_SHIFT, [4, 29]) assert isinstance(w_result, model.W_LargePositiveInteger1Word) assert w_result.value == intmask(4 << 29) - w_result = prim(primitives.BIT_SHIFT, [4, 28]) - assert isinstance(w_result, model.W_LargePositiveInteger1Word) - assert w_result.value == 4 << 28 def test_smallint_as_float(): assert prim(primitives.SMALLINT_AS_FLOAT, [12]).value == 12.0 From noreply at buildbot.pypy.org Thu Jan 16 15:16:03 2014 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 16 Jan 2014 15:16:03 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: (fijal, rguillebert) support for the very basic resume_new Message-ID: <20140116141603.81C491C0459@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68699:5ce191fc13ab Date: 2014-01-16 15:15 +0100 http://bitbucket.org/pypy/pypy/changeset/5ce191fc13ab/ Log: (fijal, rguillebert) support for the very basic resume_new diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -5,6 +5,7 @@ ImmutableIntUnbounded, \ IntLowerBound, MININT, MAXINT from rpython.jit.metainterp.optimizeopt.util import make_dispatcher_method +from rpython.jit.metainterp.optimizeopt.resumeopt import OptResumeBuilder from rpython.jit.metainterp.resoperation import rop, ResOperation, AbstractResOp from rpython.jit.metainterp.typesystem import llhelper from rpython.tool.pairtype import extendabletype @@ -363,7 +364,7 @@ self.call_pure_results = loop.call_pure_results self.set_optimizations(optimizations) - self.resume_stack = [] + self.resumebuilder = OptResumeBuilder(self) self.setup() def set_optimizations(self, optimizations): @@ -387,7 +388,7 @@ o.force_at_end_of_preamble() def flush(self): - self.resume_flush() + self.resumebuilder.resume_flush() for o in self.optimizations: o.flush() @@ -547,11 +548,13 @@ self.metainterp_sd.profiler.count(jitprof.Counters.OPT_OPS) if op.is_guard(): self.metainterp_sd.profiler.count(jitprof.Counters.OPT_GUARDS) + pendingfields = self.pendingfields self.pendingfields = None if self.replaces_guard and op in self.replaces_guard: self.replace_op(self.replaces_guard[op], op) del self.replaces_guard[op] return + self.resumebuilder.guard_seen(op, pendingfields) elif op.can_raise(): self.exception_might_have_happened = True if op.result: @@ -676,23 +679,13 @@ # pending refactor def optimize_ENTER_FRAME(self, op): - self.resume_stack.append(op) + self.resumebuilder.enter_frame(op.getarg(0).getint(), op.getdescr()) + self.optimize_default(op) def optimize_LEAVE_FRAME(self, op): - if self.resume_stack: - self.resume_stack.pop() - else: - self.emit_operation(op) - - def optimize_RESUME_PUT(self, op): - self.resume_flush() + self.resumebuilder.leave_frame(op) self.optimize_default(op) - def resume_flush(self): - for op in self.resume_stack: - self.emit_operation(op) - self.resume_stack = [] - dispatch_opt = make_dispatcher_method(Optimizer, 'optimize_', default=Optimizer.optimize_default) diff --git a/rpython/jit/metainterp/optimizeopt/resumeopt.py b/rpython/jit/metainterp/optimizeopt/resumeopt.py new file mode 100644 --- /dev/null +++ b/rpython/jit/metainterp/optimizeopt/resumeopt.py @@ -0,0 +1,67 @@ + +from rpython.jit.metainterp.history import ConstInt, BoxPtr +from rpython.jit.metainterp.resoperation import rop, ResOperation +from rpython.jit.codewriter.jitcode import JitCode + +class ResumeFrame(object): + def __init__(self, pc, jitcode): + self.pc = pc + assert isinstance(jitcode, JitCode) + self.jitcode = jitcode + self.boxes = [None] * jitcode.num_regs() + +class OptResumeBuilder(object): + def __init__(self, opt): + self.framestack = [] + self.last_flushed_pos = 0 + self.opt = opt + self.virtuals = {} + + def enter_frame(self, pc, jitcode): + self.framestack.append(ResumeFrame(pc, jitcode)) + + def leave_frame(self, op): + #if self.last_flushed_pos < len(self.framestack) - 1: + # self.emit_missing_enter_frame() + #else: + # self.opt.emit_operation(op) + # self.last_flushed_pos -= 1 + self.framestack.pop() + + def resume_flush(self): + return + for i in range(self.last_flushed_pos, len(self.framestack)): + frame = self.framestack[i] + resop = ResOperation(rop.ENTER_FRAME, [ConstInt(frame.pc)], + None, descr=frame.jitcode) + self.opt.emit_operation(resop) + self.last_flushed_pos = len(self.framestack) + + def resume_put(self, op): + self.resume_flush() + box = op.getarg(0) + value = self.opt.getvalue(box) + if value.is_virtual(): + op = ResOperation(rop.RESUME_PUT, [value.resume_box, + op.getarg(1), + op.getarg(2)], None) + self.opt._newoperations.append(op) + else: + self.opt.emit_operation(op) + #no = op.getarg(2).getint() + #box = self.opt.getvalue(op.getarg(0)).box + #self.framestack[op.getarg(1).getint()].boxes[no] = box + + def new_virtual(self, box): + xxx + self.optimizer.emit_operation(rop.RESUME_NEW) + + def new_virtual_struct(self, box, vstruct, structdescr): + newbox = BoxPtr() + vstruct.resume_box = newbox + op = ResOperation(rop.RESUME_NEW, [], newbox, descr=structdescr) + self.opt._newoperations.append(op) + + def guard_seen(self, op, pendingfields): + #xxx + pass diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -454,6 +454,27 @@ """ self.optimize_loop(ops, expected) + def test_virtual_resume_info(self): + ops = """ + [i0] + enter_frame(-1, descr=jitcode) + p0 = new(descr=ssize) + resume_put(p0, 0, 0) + guard_true(i0) + leave_frame() + finish() + """ + expected = """ + [i0] + enter_frame(-1, descr=jitcode) + p0 = resume_new(descr=ssize) + resume_put(p0, 0, 0) + guard_true(i0) + leave_frame() + finish() + """ + self.optimize_loop(ops, expected) + def test_ooisnull_oononnull_via_virtual(self): ops = """ [p0] diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -14,7 +14,7 @@ class AbstractVirtualValue(optimizer.OptValue): - _attrs_ = ('keybox', 'source_op', '_cached_vinfo') + _attrs_ = ('keybox', 'source_op', '_cached_vinfo', 'resume_box') box = None level = optimizer.LEVEL_NONNULL is_about_raw = False @@ -491,6 +491,7 @@ def make_virtual(self, known_class, box, source_op=None): vvalue = VirtualValue(self.optimizer.cpu, known_class, box, source_op) + self.optimizer.resumebuilder.new_virtual(box) self.make_equal_to(box, vvalue) return vvalue @@ -505,6 +506,8 @@ def make_vstruct(self, structdescr, box, source_op=None): vvalue = VStructValue(self.optimizer.cpu, structdescr, box, source_op) + self.optimizer.resumebuilder.new_virtual_struct(box, vvalue, + structdescr) self.make_equal_to(box, vvalue) return vvalue @@ -825,6 +828,9 @@ value.ensure_nonnull() self.emit_operation(op) + def optimize_RESUME_PUT(self, op): + self.optimizer.resumebuilder.resume_put(op) + dispatch_opt = make_dispatcher_method(OptVirtualize, 'optimize_', default=OptVirtualize.emit_operation) From noreply at buildbot.pypy.org Thu Jan 16 16:34:03 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 16 Jan 2014 16:34:03 +0100 (CET) Subject: [pypy-commit] pypy refactor-str-types: (antocuni, mjacob, arigo): declare that CPython is a mess and mark these tests as implementation dependent Message-ID: <20140116153403.24EB61C0500@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: refactor-str-types Changeset: r68700:b398d5362713 Date: 2014-01-16 16:33 +0100 http://bitbucket.org/pypy/pypy/changeset/b398d5362713/ Log: (antocuni, mjacob, arigo): declare that CPython is a mess and mark these tests as implementation dependent diff --git a/lib-python/2.7/test/test_memoryview.py b/lib-python/2.7/test/test_memoryview.py --- a/lib-python/2.7/test/test_memoryview.py +++ b/lib-python/2.7/test/test_memoryview.py @@ -166,11 +166,18 @@ self.assertTrue(m[0:6] == m[:]) self.assertFalse(m[0:5] == m) - # Comparison with objects which don't support the buffer API - self.assertFalse(m == u"abcdef") - self.assertTrue(m != u"abcdef") - self.assertFalse(u"abcdef" == m) - self.assertTrue(u"abcdef" != m) + if test_support.check_impl_detail(cpython=True): + # what is supported and what is not supported by memoryview is + # very inconsisten on CPython. In PyPy, memoryview supports + # the buffer interface, and thus the following comparison + # succeeds. See also the comment in + # pypy.modules.__builtin__.interp_memoryview.W_MemoryView.descr_buffer + # + # Comparison with objects which don't support the buffer API + self.assertFalse(m == u"abcdef", "%s %s" % (self, tp)) + self.assertTrue(m != u"abcdef") + self.assertFalse(u"abcdef" == m) + self.assertTrue(u"abcdef" != m) # Unordered comparisons are unimplemented, and therefore give # arbitrary results (they raise a TypeError in py3k) diff --git a/pypy/module/__builtin__/interp_memoryview.py b/pypy/module/__builtin__/interp_memoryview.py --- a/pypy/module/__builtin__/interp_memoryview.py +++ b/pypy/module/__builtin__/interp_memoryview.py @@ -68,10 +68,14 @@ return W_MemoryView(buf) def descr_buffer(self, space): - """Note that memoryview() objects in PyPy support buffer(), whereas - not in CPython; but CPython supports passing memoryview() to most - built-in functions that accept buffers, with the notable exception - of the buffer() built-in.""" + """ + Note that memoryview() is very inconsistent in CPython: it does not + support the buffer interface but does support the new buffer + interface: as a result, it is possible to pass memoryview to + e.g. socket.send() but not to file.write(). For simplicity and + consistency, in PyPy memoryview DOES support buffer(), which means + that it is accepted in more places than CPython. + """ return space.wrap(self.buf) def descr_tobytes(self, space): From noreply at buildbot.pypy.org Thu Jan 16 17:20:53 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 16 Jan 2014 17:20:53 +0100 (CET) Subject: [pypy-commit] stmgc c7: start doing minor collections Message-ID: <20140116162053.B1AB11C3399@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r613:0c6d8dd59f10 Date: 2014-01-16 17:20 +0100 http://bitbucket.org/pypy/stmgc/changeset/0c6d8dd59f10/ Log: start doing minor collections diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -20,11 +20,13 @@ #define MAP_PAGES_FLAGS (MAP_SHARED | MAP_ANONYMOUS | MAP_NORESERVE) #define LARGE_OBJECT_WORDS 36 #define NB_NURSERY_PAGES 1024 +#define LENGTH_SHADOW_STACK 163840 #define TOTAL_MEMORY (NB_PAGES * 4096UL * NB_THREADS) #define READMARKER_END ((NB_PAGES * 4096UL) >> 4) #define FIRST_OBJECT_PAGE ((READMARKER_END + 4095) / 4096UL) +#define FIRST_NURSERY_PAGE FIRST_OBJECT_PAGE #define READMARKER_START ((FIRST_OBJECT_PAGE * 4096UL) >> 4) #define FIRST_READMARKER_PAGE (READMARKER_START / 4096UL) #define FIRST_AFTER_NURSERY_PAGE (FIRST_OBJECT_PAGE + NB_NURSERY_PAGES) @@ -56,10 +58,29 @@ struct stm_list_s *new_object_ranges; struct alloc_for_size_s alloc[LARGE_OBJECT_WORDS]; localchar_t *nursery_current; + + struct stm_list_s *old_objects_to_trace; + /* pages newly allocated in the current transaction only containing + uncommitted objects */ + struct stm_list_s *uncommitted_pages; }; #define _STM_TL2 ((_thread_local2_t *)_STM_TL1) -enum { SHARED_PAGE=0, REMAPPING_PAGE, PRIVATE_PAGE }; /* flag_page_private */ +enum { + /* unprivatized page seen by all threads */ + SHARED_PAGE=0, + + /* page being in the process of privatization */ + REMAPPING_PAGE, + + /* page private for each thread */ + PRIVATE_PAGE, + + /* set for SHARED pages that only contain objects belonging + to the current transaction, so the whole page is not + visible yet for other threads */ + UNCOMMITTED_SHARED_PAGE, +}; /* flag_page_private */ static char *object_pages; @@ -72,6 +93,8 @@ /************************************************************/ uintptr_t _stm_reserve_page(void); void stm_abort_transaction(void); +localchar_t *_stm_alloc_next_page(size_t i); +void mark_page_as_uncommitted(uintptr_t pagenum); static void spin_loop(void) { @@ -124,8 +147,7 @@ lock, so don't conflict with each other; when we need to do a global GC, we take a writer lock to "stop the world". Note the initializer here, which should give the correct priority for stm_possible_safe_point(). */ -static pthread_rwlock_t rwlock_shared = - PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP; +static pthread_rwlock_t rwlock_shared; struct tx_descriptor *in_single_thread = NULL; @@ -245,9 +267,9 @@ #define REAL_ADDRESS(object_pages, src) ((object_pages) + (uintptr_t)(src)) -static char *real_address(uintptr_t src) +static struct object_s *real_address(object_t *src) { - return REAL_ADDRESS(_STM_TL2->thread_base, src); + return (struct object_s*)REAL_ADDRESS(_STM_TL2->thread_base, src); } @@ -256,11 +278,16 @@ return object_pages + thread_num * (NB_PAGES * 4096UL); } +bool _is_young(object_t *o) +{ + assert((uintptr_t)o >= FIRST_NURSERY_PAGE * 4096); + return (uintptr_t)o < FIRST_AFTER_NURSERY_PAGE * 4096; +} + bool _stm_is_in_nursery(char *ptr) { object_t * o = _stm_tl_address(ptr); - assert(o); - return (uintptr_t)o < FIRST_AFTER_NURSERY_PAGE * 4096; + return _is_young(o); } char *_stm_real_address(object_t *o) @@ -269,7 +296,7 @@ return NULL; assert(FIRST_OBJECT_PAGE * 4096 <= (uintptr_t)o && (uintptr_t)o < NB_PAGES * 4096); - return real_address((uintptr_t)o); + return (char*)real_address(o); } object_t *_stm_tl_address(char *ptr) @@ -284,6 +311,7 @@ } + enum detect_conflicts_e { CANNOT_CONFLICT, CAN_CONFLICT, CHECK_CONFLICT }; static void update_to_current_version(enum detect_conflicts_e check_conflict) @@ -309,12 +337,9 @@ char *dst = REAL_ADDRESS(local_base, item); char *src = REAL_ADDRESS(remote_base, item); - char *src_rebased = src - (uintptr_t)local_base; - size_t size = stm_object_size_rounded_up((object_t *)src_rebased); + size_t size = stmcb_size((struct object_s*)src); - memcpy(dst + sizeof(char *), - src + sizeof(char *), - size - sizeof(char *)); + memcpy(dst, src, size); })); write_fence(); @@ -345,7 +370,17 @@ void _stm_write_slowpath(object_t *obj) { - _stm_privatize(((uintptr_t)obj) / 4096); + uintptr_t pagenum = ((uintptr_t)obj) / 4096; + + /* old objects from the same transaction */ + if (flag_page_private[pagenum] == UNCOMMITTED_SHARED_PAGE + || obj->stm_flags & GCFLAG_NOT_COMMITTED) { + _STM_TL2->old_objects_to_trace = stm_list_append + (_STM_TL2->old_objects_to_trace, obj); + + return; + } + _stm_privatize(pagenum); uintptr_t t0_offset = (uintptr_t)obj; char* t0_addr = get_thread_base(0) + t0_offset; @@ -372,6 +407,7 @@ /* Return the index'th object page, which is so far never used. */ uintptr_t index = __sync_fetch_and_add(&index_page_never_used, 1); + assert(flag_page_private[index] == SHARED_PAGE); if (index >= NB_PAGES) { fprintf(stderr, "Out of mmap'ed memory!\n"); abort(); @@ -386,6 +422,22 @@ ((start) = (uint16_t)(uintptr_t)(range), \ (stop) = ((uintptr_t)(range)) >> 16) +localchar_t *_stm_alloc_old(size_t size) +{ + size_t size_class = size / 8; + alloc_for_size_t *alloc = &_STM_TL2->alloc[size_class]; + localchar_t *result; + + if ((uint16_t)((uintptr_t)alloc->next) == alloc->stop) + result = _stm_alloc_next_page(size_class); + else { + result = alloc->next; + alloc->next += size; + } + + return result; +} + localchar_t *_stm_alloc_next_page(size_t i) { /* 'alloc->next' points to where the next allocation should go. The @@ -404,21 +456,24 @@ alloc_for_size_t *alloc = &_STM_TL2->alloc[i]; size_t size = i * 8; - if (alloc->flag_partial_page) { - /* record this range in 'new_object_ranges' */ - localchar_t *ptr1 = alloc->next - size - 1; - object_t *range; - TO_RANGE(range, alloc->start, alloc->stop); - page = ((uintptr_t)ptr1) / 4096; - _STM_TL2->new_object_ranges = stm_list_append( - _STM_TL2->new_object_ranges, (object_t *)page); - _STM_TL2->new_object_ranges = stm_list_append( - _STM_TL2->new_object_ranges, range); - } + /* if (alloc->flag_partial_page) { */ + /* /\* record this range in 'new_object_ranges' *\/ */ + /* localchar_t *ptr1 = alloc->next - size - 1; */ + /* object_t *range; */ + /* TO_RANGE(range, alloc->start, alloc->stop); */ + /* page = ((uintptr_t)ptr1) / 4096; */ + /* _STM_TL2->new_object_ranges = stm_list_append( */ + /* _STM_TL2->new_object_ranges, (object_t *)page); */ + /* _STM_TL2->new_object_ranges = stm_list_append( */ + /* _STM_TL2->new_object_ranges, range); */ + /* } */ /* reserve a fresh new page */ page = _stm_reserve_page(); + /* mark as UNCOMMITTED_... */ + mark_page_as_uncommitted(page); + result = (localchar_t *)(page * 4096UL); alloc->start = (uintptr_t)result; alloc->stop = alloc->start + (4096 / size) * size; @@ -427,18 +482,94 @@ return result; } + +void mark_page_as_uncommitted(uintptr_t pagenum) +{ + flag_page_private[pagenum] = UNCOMMITTED_SHARED_PAGE; + _STM_TL2->uncommitted_pages = stm_list_append + (_STM_TL2->uncommitted_pages, (object_t*)pagenum); +} + +void trace_if_young(object_t **pobj) +{ + if (*pobj == NULL) + return; + if (!_is_young(*pobj)) + return; + + /* the location the object moved to is at an 8b offset */ + object_t **pforwared = (object_t**)(((char*)(*pobj)) + 8); + if ((*pobj)->stm_flags & GCFLAG_MOVED) { + *pobj = *pforwared; + return; + } + + /* move obj to somewhere else */ + size_t size = stmcb_size(real_address(*pobj)); + object_t *moved = (object_t*)_stm_alloc_old(size); + + memcpy((void*)real_address(moved), + (void*)real_address(*pobj), + size); + + (*pobj)->stm_flags |= GCFLAG_MOVED; + *pforwared = moved; + *pobj = moved; + + _STM_TL2->old_objects_to_trace = stm_list_append + (_STM_TL2->old_objects_to_trace, moved); +} + +void minor_collect() +{ + /* visit shadowstack & add to old_obj_to_trace */ + object_t **current = _STM_TL1->shadow_stack; + object_t **base = _STM_TL1->shadow_stack_base; + while (current-- != base) { + trace_if_young(current); + } + + /* visit old_obj_to_trace until empty */ + struct stm_list_s *old_objs = _STM_TL2->old_objects_to_trace; + while (!stm_list_is_empty(old_objs)) { + object_t *item = stm_list_pop_item(old_objs); + stmcb_trace(real_address(item), + trace_if_young); + } + + /* XXX fix modified_objects? */ + + // also move objects to PRIVATE_PAGE pages, but then + // also add the GCFLAG_NOT_COMMITTED to these objects. + + /* clear nursery */ + localchar_t *nursery_base = (localchar_t*)(FIRST_NURSERY_PAGE * 4096); + memset((void*)real_address((object_t*)nursery_base), 0x0, + _STM_TL2->nursery_current - nursery_base); + _STM_TL2->nursery_current = nursery_base; +} + +localchar_t *collect_and_reserve(size_t size) +{ + minor_collect(); + + localchar_t *current = _STM_TL2->nursery_current; + _STM_TL2->nursery_current = current + size; + return current; +} + object_t *stm_allocate(size_t size) { assert(size % 8 == 0); size_t i = size / 8; assert(2 <= i && i < LARGE_OBJECT_WORDS);//XXX + assert(2 <= i && i < NB_NURSERY_PAGES * 4096);//XXX localchar_t *current = _STM_TL2->nursery_current; localchar_t *new_current = current + size; _STM_TL2->nursery_current = new_current; if ((uintptr_t)new_current > FIRST_AFTER_NURSERY_PAGE * 4096) { - /* XXX: do minor collection */ - abort(); + current = collect_and_reserve(size); } object_t *result = (object_t *)current; @@ -450,6 +581,13 @@ void stm_setup(void) { + pthread_rwlockattr_t attr; + pthread_rwlockattr_init(&attr); + pthread_rwlockattr_setkind_np(&attr, + PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP); + pthread_rwlock_init(&rwlock_shared, &attr); + pthread_rwlockattr_destroy(&attr); + /* Check that some values are acceptable */ assert(4096 <= ((uintptr_t)_STM_TL1)); assert(((uintptr_t)_STM_TL1) == ((uintptr_t)_STM_TL2)); @@ -526,24 +664,48 @@ _stm_restore_local_state(thread_num); - _STM_TL2->nursery_current = (localchar_t*)(FIRST_OBJECT_PAGE * 4096); + _STM_TL2->nursery_current = (localchar_t*)(FIRST_NURSERY_PAGE * 4096); + _STM_TL1->shadow_stack = (object_t**)malloc(LENGTH_SHADOW_STACK * sizeof(void*)); + _STM_TL1->shadow_stack_base = _STM_TL1->shadow_stack; + + _STM_TL2->old_objects_to_trace = stm_list_create(); + _STM_TL2->uncommitted_pages = stm_list_create(); _STM_TL2->modified_objects = stm_list_create(); assert(!_STM_TL2->running_transaction); } +bool _stm_is_in_transaction(void) +{ + return _STM_TL2->running_transaction; +} + void _stm_teardown_thread(void) { + assert(!pthread_rwlock_trywrlock(&rwlock_shared)); + assert(!pthread_rwlock_unlock(&rwlock_shared)); + wait_until_updated(); stm_list_free(_STM_TL2->modified_objects); _STM_TL2->modified_objects = NULL; + assert(_STM_TL1->shadow_stack == _STM_TL1->shadow_stack_base); + free(_STM_TL1->shadow_stack); + + assert(_STM_TL2->old_objects_to_trace->count == 0); + stm_list_free(_STM_TL2->old_objects_to_trace); + + assert(_STM_TL2->uncommitted_pages->count == 0); + stm_list_free(_STM_TL2->uncommitted_pages); + set_gs_register(INVALID_GS_VALUE); } void _stm_teardown(void) { munmap(object_pages, TOTAL_MEMORY); + memset(flag_page_private, 0, sizeof(flag_page_private)); + pthread_rwlock_destroy(&rwlock_shared); object_pages = NULL; } @@ -571,7 +733,8 @@ them non-reserved; apparently the kernel just skips them very quickly.) */ - int res = madvise(real_address(FIRST_READMARKER_PAGE * 4096UL), + int res = madvise((void*)real_address + ((object_t*) (FIRST_READMARKER_PAGE * 4096UL)), (FIRST_OBJECT_PAGE - FIRST_READMARKER_PAGE) * 4096UL, MADV_DONTNEED); if (res < 0) { @@ -610,6 +773,8 @@ wait_until_updated(); stm_list_clear(_STM_TL2->modified_objects); + assert(stm_list_is_empty(_STM_TL2->old_objects_to_trace)); + stm_list_clear(_STM_TL2->uncommitted_pages); /* check that there is no stm_abort() in the following maybe_update() */ _STM_TL1->jmpbufptr = NULL; @@ -646,6 +811,8 @@ start_exclusivelock(); _STM_TL1->jmpbufptr = NULL; /* cannot abort any more */ + + minor_collect(); /* copy modified object versions to other threads */ pending_updates = _STM_TL2->modified_objects; @@ -654,7 +821,32 @@ _stm_restore_local_state(other_thread_num); update_to_current_version(CHECK_CONFLICT); /* sets need_abort */ _stm_restore_local_state(my_thread_num); + + /* uncommitted_pages */ + long j; + for (j = 2; j < LARGE_OBJECT_WORDS; j++) { + alloc_for_size_t *alloc = &_STM_TL2->alloc[j]; + uint16_t start = alloc->start; + uint16_t cur = (uintptr_t)alloc->next; + if (start == cur) + continue; + uintptr_t pagenum = ((uintptr_t)(alloc->next - 1)) / 4096UL; + if (flag_page_private[pagenum] == UNCOMMITTED_SHARED_PAGE) { + /* mark it as empty so it doesn't get used in the next + transaction */ + /* XXX: flag_partial_page!! */ + alloc->start = 0; + alloc->next = 0; + alloc->stop = 0; + } + } + STM_LIST_FOREACH(_STM_TL2->uncommitted_pages, ({ + uintptr_t pagenum = (uintptr_t)item; + flag_page_private[pagenum] = SHARED_PAGE; + })); + stm_list_clear(_STM_TL2->uncommitted_pages); + /* /\* walk the new_object_ranges and manually copy the new objects */ /* to the other thread's pages in the (hopefully rare) case that */ @@ -737,6 +929,7 @@ } /* stm_list_clear(_STM_TL2->new_object_ranges); */ stm_list_clear(_STM_TL2->modified_objects); + stm_list_clear(_STM_TL2->old_objects_to_trace); assert(_STM_TL1->jmpbufptr != NULL); assert(_STM_TL1->jmpbufptr != (jmpbufptr_t *)-1); /* for tests only */ _STM_TL2->running_transaction = 0; diff --git a/c7/core.h b/c7/core.h --- a/c7/core.h +++ b/c7/core.h @@ -30,17 +30,23 @@ */ enum { - GCFLAG_WRITE_BARRIER = (1 << 0), /* set if the write-barrier slowpath needs to trigger. set on all old objects if there was no write-barrier on it in the same transaction and no collection inbetween. */ + GCFLAG_WRITE_BARRIER = (1 << 0), + /* set on objects which are in pages visible to others (SHARED + or PRIVATE), but not committed yet. So only visible from + this transaction. */ + GCFLAG_NOT_COMMITTED = (1 << 1), + + GCFLAG_MOVED = (1 << 2), }; struct object_s { uint8_t stm_flags; /* reserved for the STM library */ uint8_t stm_write_lock; /* 1 if writeable by some thread */ - uint32_t header; /* for the user program -- only write in - newly allocated objects */ + /* make sure it doesn't get bigger than 4 bytes for performance + reasons */ }; struct read_marker_s { @@ -53,6 +59,8 @@ jmpbufptr_t *jmpbufptr; uint8_t transaction_read_version; uint16_t transaction_write_version; + object_t **shadow_stack; + object_t **shadow_stack_base; }; #define _STM_TL1 ((_thread_local1_t *)4352) @@ -77,13 +85,24 @@ _stm_write_slowpath(obj); } +static inline void stm_push_root(object_t *obj) +{ + *(_STM_TL1->shadow_stack++) = obj; +} + +static inline object_t *stm_pop_root(void) +{ + return *(--_STM_TL1->shadow_stack); +} /* must be provided by the user of this library */ -extern size_t stm_object_size_rounded_up(object_t *); +extern size_t stmcb_size(struct object_s *); +extern void stmcb_trace(struct object_s *, void (object_t **)); void _stm_restore_local_state(int thread_num); void _stm_teardown(void); void _stm_teardown_thread(void); +bool _stm_is_in_transaction(void); bool _stm_was_read(object_t *obj); bool _stm_was_written(object_t *obj); diff --git a/c7/list.h b/c7/list.h --- a/c7/list.h +++ b/c7/list.h @@ -48,6 +48,11 @@ return lst->count; } +static inline object_t *stm_list_pop_item(struct stm_list_s *lst) +{ + return lst->items[--lst->count]; +} + static inline object_t *stm_list_item(struct stm_list_s *lst, uintptr_t index) { return lst->items[index]; diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -54,6 +54,14 @@ void _stm_stop_safe_point(void); bool _stm_check_stop_safe_point(void); +void _set_type_id(object_t *obj, uint32_t h); +uint32_t _get_type_id(object_t *obj); +bool _stm_is_in_transaction(void); + +void stm_push_root(object_t *obj); +object_t *stm_pop_root(void); + + void *memset(void *s, int c, size_t n); """) @@ -63,6 +71,13 @@ #include "core.h" +struct myobj_s { + struct object_s hdr; + uint32_t type_id; +}; +typedef TLPREFIX struct myobj_s myobj_t; + + size_t stm_object_size_rounded_up(object_t * obj) { return 16; } @@ -94,6 +109,45 @@ } +void _set_type_id(object_t *obj, uint32_t h) { + ((myobj_t*)obj)->type_id = h; +} + +uint32_t _get_type_id(object_t *obj) { + return ((myobj_t*)obj)->type_id; +} + +size_t stmcb_size(struct object_s *obj) +{ + struct myobj_s *myobj = (struct myobj_s*)obj; + if (myobj->type_id < 42142) { + /* basic case: tid equals 42 plus the size of the object */ + assert(myobj->type_id >= 42 + sizeof(struct myobj_s)); + return myobj->type_id - 42; + } + else { + int nrefs = myobj->type_id - 42142; + assert(nrefs < 100); + if (nrefs == 0) /* weakrefs */ + nrefs = 1; + return sizeof(struct myobj_s) + nrefs * sizeof(void*); + } +} + +void stmcb_trace(struct object_s *obj, void visit(object_t **)) +{ + int i; + struct myobj_s *myobj = (struct myobj_s*)obj; + if (myobj->type_id < 42142) { + /* basic case: no references */ + return; + } + for (i=0; i < myobj->type_id - 42142; i++) { + object_t **ref = ((object_t **)(myobj + 1)) + i; + visit(ref); + } +} + ''', sources=source_files, define_macros=[('STM_TESTS', '1')], undef_macros=['NDEBUG'], @@ -102,15 +156,22 @@ force_generic_engine=True) +MAGIC_HEADER = ffi.cast('uint32_t', 42142) + + def is_in_nursery(ptr): return lib._stm_is_in_nursery(ptr) def stm_allocate_old(size): o = lib._stm_allocate_old(size) + tid = 42 + size + lib._set_type_id(o, tid) return o, lib._stm_real_address(o) def stm_allocate(size): o = lib.stm_allocate(size) + tid = 42 + size + lib._set_type_id(o, tid) return o, lib._stm_real_address(o) def stm_get_real_address(obj): @@ -131,6 +192,12 @@ def stm_was_written(o): return lib._stm_was_written(o) +def stm_push_root(o): + return lib.stm_push_root(o) + +def stm_pop_root(): + return lib.stm_pop_root() + def stm_start_transaction(): lib.stm_start_transaction(ffi.cast("jmpbufptr_t*", -1)) @@ -163,13 +230,23 @@ def teardown_method(self, meth): lib._stm_restore_local_state(1) + if lib._stm_is_in_transaction(): + stm_stop_transaction() lib._stm_teardown_thread() lib._stm_restore_local_state(0) + if lib._stm_is_in_transaction(): + stm_stop_transaction() lib._stm_teardown_thread() lib._stm_teardown() - def switch(self, thread_num): + def switch(self, thread_num, expect_conflict=False): assert thread_num != self.current_thread + if lib._stm_is_in_transaction(): + stm_start_safe_point() lib._stm_restore_local_state(thread_num) + if lib._stm_is_in_transaction(): + stm_stop_safe_point(expect_conflict) + elif expect_conflict: + assert False self.current_thread = thread_num diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -25,12 +25,12 @@ def test_transaction_start_stop(self): stm_start_transaction() - stm_start_safe_point() + self.switch(1) stm_start_transaction() stm_stop_transaction() self.switch(0) - stm_stop_safe_point() + stm_stop_transaction() def test_simple_read(self): @@ -38,6 +38,7 @@ lp1, _ = stm_allocate(16) stm_read(lp1) assert stm_was_read(lp1) + stm_stop_transaction() def test_simple_write(self): stm_start_transaction() @@ -45,6 +46,7 @@ assert stm_was_written(lp1) stm_write(lp1) assert stm_was_written(lp1) + stm_stop_transaction() def test_allocate_old(self): lp1, _ = stm_allocate_old(16) @@ -58,12 +60,17 @@ stm_write(lp1) assert stm_was_written(lp1) p1[15] = 'a' + self.switch(1) stm_start_transaction() stm_read(lp1) assert stm_was_read(lp1) tp1 = stm_get_real_address(lp1) assert tp1[15] == '\0' + stm_stop_transaction() + self.switch(0) + + stm_stop_transaction() def test_read_write_1(self): lp1, p1 = stm_allocate_old(16) @@ -77,35 +84,7 @@ p1 = stm_get_real_address(lp1) assert p1[8] == 'a' p1[8] = 'b' - stm_start_safe_point() - # - self.switch(0) - stm_start_transaction() - stm_read(lp1) - p1 = stm_get_real_address(lp1) - assert p1[8] == 'a' - stm_start_safe_point() - # - self.switch(1) - stm_stop_safe_point() - stm_stop_transaction(False) - # - self.switch(0) - stm_stop_safe_point(True) # detects rw conflict - - def test_read_write_2(self): - stm_start_transaction() - lp1, p1 = stm_allocate(16) - p1[8] = 'a' - stm_stop_transaction(False) - # - self.switch(1) - stm_start_transaction() - stm_write(lp1) - p1 = stm_get_real_address(lp1) - assert p1[8] == 'a' - p1[8] = 'b' # self.switch(0) stm_start_transaction() @@ -116,212 +95,256 @@ self.switch(1) stm_stop_transaction(False) # - self.switch(0) - p1 = stm_get_real_address(lp1) - assert p1[8] == 'a' + self.switch(0, expect_conflict=True) # detects rw conflict + + def test_commit_fresh_objects(self): + stm_start_transaction() + lp, p = stm_allocate(16) + p[8] = 'u' + stm_push_root(lp) + stm_stop_transaction() + lp = stm_pop_root() + + self.switch(1) + + stm_start_transaction() + stm_write(lp) # privatize page + p_ = stm_get_real_address(lp) + assert p != p_ + assert p_[8] == 'u' + stm_stop_transaction() + + - def test_start_transaction_updates(self): - stm_start_transaction() - p1 = stm_allocate(16) - p1[8] = 'a' - stm_stop_transaction(False) - # - self.switch(1) - stm_start_transaction() - stm_write(p1) - assert p1[8] == 'a' - p1[8] = 'b' - stm_stop_transaction(False) - # - self.switch(0) - assert p1[8] == 'a' - stm_start_transaction() - assert p1[8] == 'b' + # def test_read_write_2(self): + # stm_start_transaction() + # lp1, p1 = stm_allocate(16) + # p1[8] = 'a' + # stm_stop_transaction(False) + # # + # self.switch(1) + # stm_start_transaction() + # stm_write(lp1) + # p1 = stm_get_real_address(lp1) + # assert p1[8] == 'a' + # p1[8] = 'b' + # # + # self.switch(0) + # stm_start_transaction() + # stm_read(lp1) + # p1 = stm_get_real_address(lp1) + # assert p1[8] == 'a' + # # + # self.switch(1) + # stm_stop_transaction(False) + # # + # self.switch(0) + # p1 = stm_get_real_address(lp1) + # assert p1[8] == 'a' - def test_resolve_no_conflict_empty(self): - stm_start_transaction() - # - self.switch(1) - stm_start_transaction() - stm_stop_transaction(False) - # - self.switch(0) - stm_stop_transaction(False) + + # def test_start_transaction_updates(self): + # stm_start_transaction() + # p1 = stm_allocate(16) + # p1[8] = 'a' + # stm_stop_transaction(False) + # # + # self.switch(1) + # stm_start_transaction() + # stm_write(p1) + # assert p1[8] == 'a' + # p1[8] = 'b' + # stm_stop_transaction(False) + # # + # self.switch(0) + # assert p1[8] == 'a' + # stm_start_transaction() + # assert p1[8] == 'b' - def test_resolve_no_conflict_write_only_in_already_committed(self): - stm_start_transaction() - p1 = stm_allocate(16) - p1[8] = 'a' - stm_stop_transaction(False) - stm_start_transaction() - # - self.switch(1) - stm_start_transaction() - stm_write(p1) - p1[8] = 'b' - stm_stop_transaction(False) - # - self.switch(0) - assert p1[8] == 'a' - stm_stop_transaction(False) - assert p1[8] == 'b' + # def test_resolve_no_conflict_empty(self): + # stm_start_transaction() + # # + # self.switch(1) + # stm_start_transaction() + # stm_stop_transaction(False) + # # + # self.switch(0) + # stm_stop_transaction(False) - def test_resolve_write_read_conflict(self): - stm_start_transaction() - p1 = stm_allocate(16) - p1[8] = 'a' - stm_stop_transaction(False) - stm_start_transaction() - # - self.switch(1) - stm_start_transaction() - stm_write(p1) - p1[8] = 'b' - stm_stop_transaction(False) - # - self.switch(0) - stm_read(p1) - assert p1[8] == 'a' - stm_stop_transaction(expected_conflict=True) - assert p1[8] in ('a', 'b') - stm_start_transaction() - assert p1[8] == 'b' + # def test_resolve_no_conflict_write_only_in_already_committed(self): + # stm_start_transaction() + # p1 = stm_allocate(16) + # p1[8] = 'a' + # stm_stop_transaction(False) + # stm_start_transaction() + # # + # self.switch(1) + # stm_start_transaction() + # stm_write(p1) + # p1[8] = 'b' + # stm_stop_transaction(False) + # # + # self.switch(0) + # assert p1[8] == 'a' + # stm_stop_transaction(False) + # assert p1[8] == 'b' - def test_resolve_write_write_conflict(self): - stm_start_transaction() - p1 = stm_allocate(16) - p1[8] = 'a' - stm_stop_transaction(False) - stm_start_transaction() - # - self.switch(1) - stm_start_transaction() - stm_write(p1) - p1[8] = 'b' - stm_stop_transaction(False) - # - self.switch(0) - assert p1[8] == 'a' - stm_write(p1) - p1[8] = 'c' - stm_stop_transaction(expected_conflict=True) - assert p1[8] in ('a', 'b') - stm_start_transaction() - assert p1[8] == 'b' + # def test_resolve_write_read_conflict(self): + # stm_start_transaction() + # p1 = stm_allocate(16) + # p1[8] = 'a' + # stm_stop_transaction(False) + # stm_start_transaction() + # # + # self.switch(1) + # stm_start_transaction() + # stm_write(p1) + # p1[8] = 'b' + # stm_stop_transaction(False) + # # + # self.switch(0) + # stm_read(p1) + # assert p1[8] == 'a' + # stm_stop_transaction(expected_conflict=True) + # assert p1[8] in ('a', 'b') + # stm_start_transaction() + # assert p1[8] == 'b' - def test_resolve_write_write_no_conflict(self): - stm_start_transaction() - p1 = stm_allocate(16) - p2 = stm_allocate(16) - p1[8] = 'a' - p2[8] = 'A' - stm_stop_transaction(False) - stm_start_transaction() - # - self.switch(1) - stm_start_transaction() - stm_write(p1) - p1[8] = 'b' - stm_stop_transaction(False) - # - self.switch(0) - stm_write(p2) - p2[8] = 'C' - stm_stop_transaction(False) - assert p1[8] == 'b' - assert p2[8] == 'C' + # def test_resolve_write_write_conflict(self): + # stm_start_transaction() + # p1 = stm_allocate(16) + # p1[8] = 'a' + # stm_stop_transaction(False) + # stm_start_transaction() + # # + # self.switch(1) + # stm_start_transaction() + # stm_write(p1) + # p1[8] = 'b' + # stm_stop_transaction(False) + # # + # self.switch(0) + # assert p1[8] == 'a' + # stm_write(p1) + # p1[8] = 'c' + # stm_stop_transaction(expected_conflict=True) + # assert p1[8] in ('a', 'b') + # stm_start_transaction() + # assert p1[8] == 'b' - def test_page_extra_malloc_unchanged_page(self): - stm_start_transaction() - p1 = stm_allocate(16) - p2 = stm_allocate(16) - p1[8] = 'A' - p2[8] = 'a' - stm_stop_transaction(False) - stm_start_transaction() - # - self.switch(1) - stm_start_transaction() - stm_write(p1) - assert p1[8] == 'A' - p1[8] = 'B' - stm_stop_transaction(False) - # - self.switch(0) - stm_read(p2) - assert p2[8] == 'a' - p3 = stm_allocate(16) # goes into the same page, which is - p3[8] = ':' # not otherwise modified - stm_stop_transaction(False) - # - assert p1[8] == 'B' - assert p2[8] == 'a' - assert p3[8] == ':' + # def test_resolve_write_write_no_conflict(self): + # stm_start_transaction() + # p1 = stm_allocate(16) + # p2 = stm_allocate(16) + # p1[8] = 'a' + # p2[8] = 'A' + # stm_stop_transaction(False) + # stm_start_transaction() + # # + # self.switch(1) + # stm_start_transaction() + # stm_write(p1) + # p1[8] = 'b' + # stm_stop_transaction(False) + # # + # self.switch(0) + # stm_write(p2) + # p2[8] = 'C' + # stm_stop_transaction(False) + # assert p1[8] == 'b' + # assert p2[8] == 'C' - def test_page_extra_malloc_changed_page_before(self): - stm_start_transaction() - p1 = stm_allocate(16) - p2 = stm_allocate(16) - p1[8] = 'A' - p2[8] = 'a' - stm_stop_transaction(False) - stm_start_transaction() - # - self.switch(1) - stm_start_transaction() - stm_write(p1) - assert p1[8] == 'A' - p1[8] = 'B' - stm_stop_transaction(False) - # - self.switch(0) - stm_write(p2) - assert p2[8] == 'a' - p2[8] = 'b' - p3 = stm_allocate(16) # goes into the same page, which I already - p3[8] = ':' # modified just above - stm_stop_transaction(False) - # - assert p1[8] == 'B' - assert p2[8] == 'b' - assert p3[8] == ':' + # def test_page_extra_malloc_unchanged_page(self): + # stm_start_transaction() + # p1 = stm_allocate(16) + # p2 = stm_allocate(16) + # p1[8] = 'A' + # p2[8] = 'a' + # stm_stop_transaction(False) + # stm_start_transaction() + # # + # self.switch(1) + # stm_start_transaction() + # stm_write(p1) + # assert p1[8] == 'A' + # p1[8] = 'B' + # stm_stop_transaction(False) + # # + # self.switch(0) + # stm_read(p2) + # assert p2[8] == 'a' + # p3 = stm_allocate(16) # goes into the same page, which is + # p3[8] = ':' # not otherwise modified + # stm_stop_transaction(False) + # # + # assert p1[8] == 'B' + # assert p2[8] == 'a' + # assert p3[8] == ':' - def test_page_extra_malloc_changed_page_after(self): - stm_start_transaction() - p1 = stm_allocate(16) - p2 = stm_allocate(16) - p1[8] = 'A' - p2[8] = 'a' - stm_stop_transaction(False) - stm_start_transaction() - # - self.switch(1) - stm_start_transaction() - stm_write(p1) - assert p1[8] == 'A' - p1[8] = 'B' - stm_stop_transaction(False) - # - self.switch(0) - p3 = stm_allocate(16) # goes into the same page, which I will - p3[8] = ':' # modify just below - stm_write(p2) - assert p2[8] == 'a' - p2[8] = 'b' - stm_stop_transaction(False) - # - assert p1[8] == 'B' - assert p2[8] == 'b' - assert p3[8] == ':' + # def test_page_extra_malloc_changed_page_before(self): + # stm_start_transaction() + # p1 = stm_allocate(16) + # p2 = stm_allocate(16) + # p1[8] = 'A' + # p2[8] = 'a' + # stm_stop_transaction(False) + # stm_start_transaction() + # # + # self.switch(1) + # stm_start_transaction() + # stm_write(p1) + # assert p1[8] == 'A' + # p1[8] = 'B' + # stm_stop_transaction(False) + # # + # self.switch(0) + # stm_write(p2) + # assert p2[8] == 'a' + # p2[8] = 'b' + # p3 = stm_allocate(16) # goes into the same page, which I already + # p3[8] = ':' # modified just above + # stm_stop_transaction(False) + # # + # assert p1[8] == 'B' + # assert p2[8] == 'b' + # assert p3[8] == ':' - def test_overflow_write_history(self): - stm_start_transaction() - plist = [stm_allocate(n) for n in range(16, 256, 8)] - stm_stop_transaction(False) - # - for i in range(20): - stm_start_transaction() - for p in plist: - stm_write(p) - stm_stop_transaction(False) + # def test_page_extra_malloc_changed_page_after(self): + # stm_start_transaction() + # p1 = stm_allocate(16) + # p2 = stm_allocate(16) + # p1[8] = 'A' + # p2[8] = 'a' + # stm_stop_transaction(False) + # stm_start_transaction() + # # + # self.switch(1) + # stm_start_transaction() + # stm_write(p1) + # assert p1[8] == 'A' + # p1[8] = 'B' + # stm_stop_transaction(False) + # # + # self.switch(0) + # p3 = stm_allocate(16) # goes into the same page, which I will + # p3[8] = ':' # modify just below + # stm_write(p2) + # assert p2[8] == 'a' + # p2[8] = 'b' + # stm_stop_transaction(False) + # # + # assert p1[8] == 'B' + # assert p2[8] == 'b' + # assert p3[8] == ':' + + # def test_overflow_write_history(self): + # stm_start_transaction() + # plist = [stm_allocate(n) for n in range(16, 256, 8)] + # stm_stop_transaction(False) + # # + # for i in range(20): + # stm_start_transaction() + # for p in plist: + # stm_write(p) + # stm_stop_transaction(False) From noreply at buildbot.pypy.org Thu Jan 16 18:08:58 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 16 Jan 2014 18:08:58 +0100 (CET) Subject: [pypy-commit] pypy refactor-str-types: give up with the idea of sharing idlower and isupper between bytes and unicode, they are slightly different Message-ID: <20140116170859.017601C0459@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: refactor-str-types Changeset: r68701:6b507443d1e4 Date: 2014-01-16 18:08 +0100 http://bitbucket.org/pypy/pypy/changeset/6b507443d1e4/ Log: give up with the idea of sharing idlower and isupper between bytes and unicode, they are slightly different diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -246,6 +246,7 @@ def descr_isdigit(self, space): return self._is_generic(space, '_isdigit') + # this is only for bytes and bytesarray: unicodeobject overrides it def descr_islower(self, space): v = self._val(space) if len(v) == 1: @@ -283,6 +284,7 @@ return space.newbool(cased) + # this is only for bytes and bytesarray: unicodeobject overrides it def descr_isupper(self, space): v = self._val(space) if len(v) == 1: diff --git a/pypy/objspace/std/test/test_unicodeobject.py b/pypy/objspace/std/test/test_unicodeobject.py --- a/pypy/objspace/std/test/test_unicodeobject.py +++ b/pypy/objspace/std/test/test_unicodeobject.py @@ -218,6 +218,12 @@ assert u'\u1FFc'.istitle() assert u'Greek \u1FFcitlecases ...'.istitle() + def test_islower_isupper_with_titlecase(self): + # \u01c5 is a char which is neither lowercase nor uppercase, but + # titlecase + assert not u'\u01c5abc'.islower() + assert not u'\u01c5ABC'.isupper() + def test_capitalize(self): assert u"brown fox".capitalize() == u"Brown fox" assert u' hello '.capitalize() == u' hello ' diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -327,6 +327,26 @@ def descr_isnumeric(self, space): return self._is_generic(space, '_isnumeric') + def descr_islower(self, space): + cased = False + for uchar in self._value: + if (unicodedb.isupper(ord(uchar)) or + unicodedb.istitle(ord(uchar))): + return space.w_False + if not cased and unicodedb.islower(ord(uchar)): + cased = True + return space.newbool(cased) + + def descr_isupper(self, space): + cased = False + for uchar in self._value: + if (unicodedb.islower(ord(uchar)) or + unicodedb.istitle(ord(uchar))): + return space.w_False + if not cased and unicodedb.isupper(ord(uchar)): + cased = True + return space.newbool(cased) + def wrapunicode(space, uni): return W_UnicodeObject(uni) From noreply at buildbot.pypy.org Thu Jan 16 18:12:11 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 16 Jan 2014 18:12:11 +0100 (CET) Subject: [pypy-commit] stmgc c7: Add some asserts Message-ID: <20140116171211.34FF31C0459@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7 Changeset: r614:5153d78a57ad Date: 2014-01-16 17:38 +0100 http://bitbucket.org/pypy/stmgc/changeset/5153d78a57ad/ Log: Add some asserts diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -237,8 +237,14 @@ SHARED_PAGE, REMAPPING_PAGE); #endif if (!was_shared) { - while (flag_page_private[pagenum] == REMAPPING_PAGE) + while (1) { + uint8_t state = ((uint8_t volatile *)flag_page_private)[pagenum]; + if (state != REMAPPING_PAGE) { + assert(state == PRIVATE_PAGE); + break; + } spin_loop(); + } return; } @@ -371,6 +377,7 @@ void _stm_write_slowpath(object_t *obj) { uintptr_t pagenum = ((uintptr_t)obj) / 4096; + assert(pagenum < NB_PAGES); /* old objects from the same transaction */ if (flag_page_private[pagenum] == UNCOMMITTED_SHARED_PAGE From noreply at buildbot.pypy.org Thu Jan 16 18:12:42 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 16 Jan 2014 18:12:42 +0100 (CET) Subject: [pypy-commit] stmgc c7: fix test Message-ID: <20140116171242.44A081C0459@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r615:0be7ec66d3be Date: 2014-01-16 18:12 +0100 http://bitbucket.org/pypy/stmgc/changeset/0be7ec66d3be/ Log: fix test diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -505,9 +505,10 @@ return; /* the location the object moved to is at an 8b offset */ - object_t **pforwared = (object_t**)(((char*)(*pobj)) + 8); + localchar_t *temp = ((localchar_t *)(*pobj)) + 8; + object_t * TLPREFIX *pforwarded = (object_t* TLPREFIX *)temp; if ((*pobj)->stm_flags & GCFLAG_MOVED) { - *pobj = *pforwared; + *pobj = *pforwarded; return; } @@ -520,7 +521,7 @@ size); (*pobj)->stm_flags |= GCFLAG_MOVED; - *pforwared = moved; + *pforwarded = moved; *pobj = moved; _STM_TL2->old_objects_to_trace = stm_list_append From noreply at buildbot.pypy.org Thu Jan 16 22:42:16 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 16 Jan 2014 22:42:16 +0100 (CET) Subject: [pypy-commit] pypy refactor-str-types: (antocuni, mjacob): fix a corner case Message-ID: <20140116214216.7344F1C0500@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: refactor-str-types Changeset: r68702:f3faf55d996e Date: 2014-01-16 22:41 +0100 http://bitbucket.org/pypy/pypy/changeset/f3faf55d996e/ Log: (antocuni, mjacob): fix a corner case diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -65,7 +65,7 @@ """x.__getitem__(y) <==> x[y]""" def descr_getnewargs(self, space): - """""" + "" def descr_getslice(self, space, w_start, w_stop): """x.__getslice__(i, j) <==> x[i:j] diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -45,7 +45,13 @@ return space.newbool(self._val(space).find(self._op_val(space, w_sub)) >= 0) def descr_add(self, space, w_other): - return self._new(self._val(space) + self._op_val(space, w_other)) + try: + other = self._op_val(space, w_other) + except OperationError, e: + if e.match(space, space.w_TypeError): + return space.w_NotImplemented + raise + return self._new(self._val(space) + other) def descr_mul(self, space, w_times): try: diff --git a/pypy/objspace/std/test/test_bytesobject.py b/pypy/objspace/std/test/test_bytesobject.py --- a/pypy/objspace/std/test/test_bytesobject.py +++ b/pypy/objspace/std/test/test_bytesobject.py @@ -776,6 +776,13 @@ iterable = "hello" raises(TypeError, len, iter(iterable)) + def test___radd__(self): + class Foo(object): + def __radd__(self, other): + return 42 + x = Foo() + assert "hello" + x == 42 + class AppTestPrebuilt(AppTestBytesObject): spaceconfig = {"objspace.std.withprebuiltchar": True} From noreply at buildbot.pypy.org Fri Jan 17 00:29:42 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 17 Jan 2014 00:29:42 +0100 (CET) Subject: [pypy-commit] pypy refactor-str-types: (antocuni, mjacob): add a fastpath for strings, trying to fix the slowdown that we observe in things like unicode_string+byte_string Message-ID: <20140116232942.318CE1C0291@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: refactor-str-types Changeset: r68703:ec53889e6f05 Date: 2014-01-17 00:29 +0100 http://bitbucket.org/pypy/pypy/changeset/ec53889e6f05/ Log: (antocuni, mjacob): add a fastpath for strings, trying to fix the slowdown that we observe in things like unicode_string+byte_string diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -88,6 +88,8 @@ def _op_val(self, space, w_other): if isinstance(w_other, W_UnicodeObject): return w_other._value + if space.isinstance_w(w_other, space.w_str): + return unicode_from_string(space, w_other)._value return unicode_from_encoded_object(space, w_other, None, "strict")._value def _chr(self, char): From noreply at buildbot.pypy.org Fri Jan 17 04:16:01 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 17 Jan 2014 04:16:01 +0100 (CET) Subject: [pypy-commit] pypy improve-docs: hg merge default Message-ID: <20140117031601.BE1481C30C5@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: improve-docs Changeset: r68704:3311496dd484 Date: 2014-01-17 02:50 +0000 http://bitbucket.org/pypy/pypy/changeset/3311496dd484/ Log: hg merge default diff too long, truncating to 2000 out of 58726 lines diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -28,7 +28,7 @@ DEALINGS IN THE SOFTWARE. -PyPy Copyright holders 2003-2013 +PyPy Copyright holders 2003-2014 ----------------------------------- Except when otherwise stated (look for LICENSE files or information at diff --git a/Makefile b/Makefile --- a/Makefile +++ b/Makefile @@ -1,7 +1,38 @@ all: pypy-c +PYPY_EXECUTABLE := $(shell which pypy) +URAM := $(shell python -c "import sys; print 4.5 if sys.maxint>1<<32 else 2.5") + +ifeq ($(PYPY_EXECUTABLE),) +RUNINTERP = python +else +RUNINTERP = $(PYPY_EXECUTABLE) +endif + pypy-c: - @echo "Building PyPy with JIT, it'll take about 40 minutes and 4G of RAM" - @sleep 3 - rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py + @echo + @echo "====================================================================" +ifeq ($(PYPY_EXECUTABLE),) + @echo "Building a regular (jitting) version of PyPy, using CPython." + @echo "This takes around 2 hours and $(URAM) GB of RAM." + @echo "Note that pre-installing a PyPy binary would reduce this time" + @echo "and produce basically the same result." +else + @echo "Building a regular (jitting) version of PyPy, using" + @echo "$(PYPY_EXECUTABLE) to run the translation itself." + @echo "This takes up to 1 hour and $(URAM) GB of RAM." +endif + @echo + @echo "For more control (e.g. to use multiple CPU cores during part of" + @echo "the process) you need to run \`\`rpython/bin/rpython'' directly." + @echo "For more information see \`\`http://pypy.org/download.html''." + @echo "====================================================================" + @echo + @sleep 5 + $(RUNINTERP) rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py + +# Note: the -jN option, or MAKEFLAGS=-jN, are not usable. They are +# replaced with an opaque --jobserver option by the time this Makefile +# runs. We cannot get their original value either: +# http://lists.gnu.org/archive/html/help-make/2010-08/msg00106.html diff --git a/README.rst b/README.rst --- a/README.rst +++ b/README.rst @@ -27,12 +27,14 @@ Building ======== -build with:: +build with: - rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py +.. code-block:: console + + $ rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py This ends up with ``pypy-c`` binary in the main pypy directory. We suggest -to use virtualenv with the resulting pypy-c as the interpreter, you can +to use virtualenv with the resulting pypy-c as the interpreter; you can find more details about various installation schemes here: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy diff --git a/lib-python/2.7/argparse.py b/lib-python/2.7/argparse.py --- a/lib-python/2.7/argparse.py +++ b/lib-python/2.7/argparse.py @@ -1780,7 +1780,19 @@ # error if this argument is not allowed with other previously # seen arguments, assuming that actions that use the default # value don't really count as "present" - if argument_values is not action.default: + + # XXX PyPy bug-to-bug compatibility: "is" on primitive types + # is not consistent in CPython. We'll assume it is close + # enough for ints (which is true only for "small ints"), but + # for floats and longs and complexes we'll go for the option + # of forcing "is" to say False, like it usually does on + # CPython. A fix is pending on CPython trunk + # (http://bugs.python.org/issue18943) but that might change + # the details of the semantics and so not be applied to 2.7. + # See the line AA below. + + if (argument_values is not action.default or + type(argument_values) in (float, long, complex)): # AA seen_non_default_actions.add(action) for conflict_action in action_conflicts.get(action, []): if conflict_action in seen_non_default_actions: diff --git a/lib-python/2.7/ctypes/__init__.py b/lib-python/2.7/ctypes/__init__.py --- a/lib-python/2.7/ctypes/__init__.py +++ b/lib-python/2.7/ctypes/__init__.py @@ -371,10 +371,9 @@ self._handle = handle def __repr__(self): - return "<%s '%s', handle %r at %x>" % \ - (self.__class__.__name__, self._name, - (self._handle), - id(self) & (_sys.maxint*2 + 1)) + return "<%s '%s', handle %r at 0x%x>" % ( + self.__class__.__name__, self._name, self._handle, + id(self) & (_sys.maxint * 2 + 1)) def __getattr__(self, name): diff --git a/lib-python/2.7/ctypes/test/test_python_api.py b/lib-python/2.7/ctypes/test/test_python_api.py --- a/lib-python/2.7/ctypes/test/test_python_api.py +++ b/lib-python/2.7/ctypes/test/test_python_api.py @@ -73,6 +73,7 @@ del pyobj self.assertEqual(grc(s), ref) + @xfail def test_PyOS_snprintf(self): PyOS_snprintf = pythonapi.PyOS_snprintf PyOS_snprintf.argtypes = POINTER(c_char), c_size_t, c_char_p diff --git a/lib-python/2.7/json/encoder.py b/lib-python/2.7/json/encoder.py --- a/lib-python/2.7/json/encoder.py +++ b/lib-python/2.7/json/encoder.py @@ -4,6 +4,21 @@ from __pypy__.builders import StringBuilder, UnicodeBuilder +class StringOrUnicodeBuilder(object): + def __init__(self): + self._builder = StringBuilder() + def append(self, string): + try: + self._builder.append(string) + except UnicodeEncodeError: + ub = UnicodeBuilder() + ub.append(self._builder.build()) + self._builder = ub + ub.append(string) + def build(self): + return self._builder.build() + + ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]') ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])') HAS_UTF8 = re.compile(r'[\x80-\xff]') @@ -192,7 +207,7 @@ if self.ensure_ascii: builder = StringBuilder() else: - builder = UnicodeBuilder() + builder = StringOrUnicodeBuilder() self.__encode(o, markers, builder, 0) return builder.build() diff --git a/lib-python/2.7/socket.py b/lib-python/2.7/socket.py --- a/lib-python/2.7/socket.py +++ b/lib-python/2.7/socket.py @@ -335,9 +335,10 @@ s = self._sock self._sock = None if s is not None: - s._drop() if self._close: s.close() + else: + s._drop() def __del__(self): try: diff --git a/lib-python/2.7/string.py b/lib-python/2.7/string.py --- a/lib-python/2.7/string.py +++ b/lib-python/2.7/string.py @@ -66,16 +66,17 @@ must be of the same length. """ - if len(fromstr) != len(tostr): + n = len(fromstr) + if n != len(tostr): raise ValueError, "maketrans arguments must have same length" - global _idmapL - if not _idmapL: - _idmapL = list(_idmap) - L = _idmapL[:] - fromstr = map(ord, fromstr) - for i in range(len(fromstr)): - L[fromstr[i]] = tostr[i] - return ''.join(L) + # this function has been rewritten to suit PyPy better; it is + # almost 10x faster than the original. + buf = bytearray(256) + for i in range(256): + buf[i] = i + for i in range(n): + buf[ord(fromstr[i])] = tostr[i] + return str(buf) diff --git a/lib-python/2.7/test/keycert.pem b/lib-python/2.7/test/keycert.pem --- a/lib-python/2.7/test/keycert.pem +++ b/lib-python/2.7/test/keycert.pem @@ -1,32 +1,31 @@ ------BEGIN RSA PRIVATE KEY----- -MIICXwIBAAKBgQC8ddrhm+LutBvjYcQlnH21PPIseJ1JVG2HMmN2CmZk2YukO+9L -opdJhTvbGfEj0DQs1IE8M+kTUyOmuKfVrFMKwtVeCJphrAnhoz7TYOuLBSqt7lVH -fhi/VwovESJlaBOp+WMnfhcduPEYHYx/6cnVapIkZnLt30zu2um+DzA9jQIDAQAB -AoGBAK0FZpaKj6WnJZN0RqhhK+ggtBWwBnc0U/ozgKz2j1s3fsShYeiGtW6CK5nU -D1dZ5wzhbGThI7LiOXDvRucc9n7vUgi0alqPQ/PFodPxAN/eEYkmXQ7W2k7zwsDA -IUK0KUhktQbLu8qF/m8qM86ba9y9/9YkXuQbZ3COl5ahTZrhAkEA301P08RKv3KM -oXnGU2UHTuJ1MAD2hOrPxjD4/wxA/39EWG9bZczbJyggB4RHu0I3NOSFjAm3HQm0 -ANOu5QK9owJBANgOeLfNNcF4pp+UikRFqxk5hULqRAWzVxVrWe85FlPm0VVmHbb/ -loif7mqjU8o1jTd/LM7RD9f2usZyE2psaw8CQQCNLhkpX3KO5kKJmS9N7JMZSc4j -oog58yeYO8BBqKKzpug0LXuQultYv2K4veaIO04iL9VLe5z9S/Q1jaCHBBuXAkEA -z8gjGoi1AOp6PBBLZNsncCvcV/0aC+1se4HxTNo2+duKSDnbq+ljqOM+E7odU+Nq -ewvIWOG//e8fssd0mq3HywJBAJ8l/c8GVmrpFTx8r/nZ2Pyyjt3dH1widooDXYSV -q6Gbf41Llo5sYAtmxdndTLASuHKecacTgZVhy0FryZpLKrU= ------END RSA PRIVATE KEY----- +-----BEGIN PRIVATE KEY----- +MIICdwIBADANBgkqhkiG9w0BAQEFAASCAmEwggJdAgEAAoGBANtb0+YrKuxevGpm +LrjaUhZSgz6zFAmuGFmKmUbdjmfv9zSmmdsQIksK++jK0Be9LeZy20j6ahOfuVa0 +ufEmPoP7Fy4hXegKZR9cCWcIe/A6H2xWF1IIJLRTLaU8ol/I7T+um5HD5AwAwNPP +USNU0Eegmvp+xxWu3NX2m1Veot85AgMBAAECgYA3ZdZ673X0oexFlq7AAmrutkHt +CL7LvwrpOiaBjhyTxTeSNWzvtQBkIU8DOI0bIazA4UreAFffwtvEuPmonDb3F+Iq +SMAu42XcGyVZEl+gHlTPU9XRX7nTOXVt+MlRRRxL6t9GkGfUAXI3XxJDXW3c0vBK +UL9xqD8cORXOfE06rQJBAP8mEX1ERkR64Ptsoe4281vjTlNfIbs7NMPkUnrn9N/Y +BLhjNIfQ3HFZG8BTMLfX7kCS9D593DW5tV4Z9BP/c6cCQQDcFzCcVArNh2JSywOQ +ZfTfRbJg/Z5Lt9Fkngv1meeGNPgIMLN8Sg679pAOOWmzdMO3V706rNPzSVMME7E5 +oPIfAkEA8pDddarP5tCvTTgUpmTFbakm0KoTZm2+FzHcnA4jRh+XNTjTOv98Y6Ik +eO5d1ZnKXseWvkZncQgxfdnMqqpj5wJAcNq/RVne1DbYlwWchT2Si65MYmmJ8t+F +0mcsULqjOnEMwf5e+ptq5LzwbyrHZYq5FNk7ocufPv/ZQrcSSC+cFwJBAKvOJByS +x56qyGeZLOQlWS2JS3KJo59XuLFGqcbgN9Om9xFa41Yb4N9NvplFivsvZdw3m1Q/ +SPIXQuT8RMPDVNQ= +-----END PRIVATE KEY----- -----BEGIN CERTIFICATE----- -MIICpzCCAhCgAwIBAgIJAP+qStv1cIGNMA0GCSqGSIb3DQEBBQUAMIGJMQswCQYD -VQQGEwJVUzERMA8GA1UECBMIRGVsYXdhcmUxEzARBgNVBAcTCldpbG1pbmd0b24x -IzAhBgNVBAoTGlB5dGhvbiBTb2Z0d2FyZSBGb3VuZGF0aW9uMQwwCgYDVQQLEwNT -U0wxHzAdBgNVBAMTFnNvbWVtYWNoaW5lLnB5dGhvbi5vcmcwHhcNMDcwODI3MTY1 -NDUwWhcNMTMwMjE2MTY1NDUwWjCBiTELMAkGA1UEBhMCVVMxETAPBgNVBAgTCERl -bGF3YXJlMRMwEQYDVQQHEwpXaWxtaW5ndG9uMSMwIQYDVQQKExpQeXRob24gU29m -dHdhcmUgRm91bmRhdGlvbjEMMAoGA1UECxMDU1NMMR8wHQYDVQQDExZzb21lbWFj -aGluZS5weXRob24ub3JnMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC8ddrh -m+LutBvjYcQlnH21PPIseJ1JVG2HMmN2CmZk2YukO+9LopdJhTvbGfEj0DQs1IE8 -M+kTUyOmuKfVrFMKwtVeCJphrAnhoz7TYOuLBSqt7lVHfhi/VwovESJlaBOp+WMn -fhcduPEYHYx/6cnVapIkZnLt30zu2um+DzA9jQIDAQABoxUwEzARBglghkgBhvhC -AQEEBAMCBkAwDQYJKoZIhvcNAQEFBQADgYEAF4Q5BVqmCOLv1n8je/Jw9K669VXb -08hyGzQhkemEBYQd6fzQ9A/1ZzHkJKb1P6yreOLSEh4KcxYPyrLRC1ll8nr5OlCx -CMhKkTnR6qBsdNV0XtdU2+N25hqW+Ma4ZeqsN/iiJVCGNOZGnvQuvCAGWF8+J/f/ -iHkC6gGdBJhogs4= +MIICVDCCAb2gAwIBAgIJANfHOBkZr8JOMA0GCSqGSIb3DQEBBQUAMF8xCzAJBgNV +BAYTAlhZMRcwFQYDVQQHEw5DYXN0bGUgQW50aHJheDEjMCEGA1UEChMaUHl0aG9u +IFNvZnR3YXJlIEZvdW5kYXRpb24xEjAQBgNVBAMTCWxvY2FsaG9zdDAeFw0xMDEw +MDgyMzAxNTZaFw0yMDEwMDUyMzAxNTZaMF8xCzAJBgNVBAYTAlhZMRcwFQYDVQQH +Ew5DYXN0bGUgQW50aHJheDEjMCEGA1UEChMaUHl0aG9uIFNvZnR3YXJlIEZvdW5k +YXRpb24xEjAQBgNVBAMTCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOBjQAw +gYkCgYEA21vT5isq7F68amYuuNpSFlKDPrMUCa4YWYqZRt2OZ+/3NKaZ2xAiSwr7 +6MrQF70t5nLbSPpqE5+5VrS58SY+g/sXLiFd6AplH1wJZwh78DofbFYXUggktFMt +pTyiX8jtP66bkcPkDADA089RI1TQR6Ca+n7HFa7c1fabVV6i3zkCAwEAAaMYMBYw +FAYDVR0RBA0wC4IJbG9jYWxob3N0MA0GCSqGSIb3DQEBBQUAA4GBAHPctQBEQ4wd +BJ6+JcpIraopLn8BGhbjNWj40mmRqWB/NAWF6M5ne7KpGAu7tLeG4hb1zLaldK8G +lxy2GPSRF6LFS48dpEj2HbMv2nvv6xxalDMJ9+DicWgAKTQ6bcX2j3GUkCR0g/T1 +CRlNBAAlvhKzO7Clpf9l0YKBEfraJByX -----END CERTIFICATE----- diff --git a/lib-python/2.7/test/sha256.pem b/lib-python/2.7/test/sha256.pem --- a/lib-python/2.7/test/sha256.pem +++ b/lib-python/2.7/test/sha256.pem @@ -1,129 +1,128 @@ # Certificate chain for https://sha256.tbs-internet.com - 0 s:/C=FR/postalCode=14000/ST=Calvados/L=CAEN/street=22 rue de Bretagne/O=TBS INTERNET/OU=0002 440443810/OU=sha-256 production/CN=sha256.tbs-internet.com - i:/C=FR/ST=Calvados/L=Caen/O=TBS INTERNET/OU=Terms and Conditions: http://www.tbs-internet.com/CA/repository/OU=TBS INTERNET CA/CN=TBS X509 CA SGC + 0 s:/C=FR/postalCode=14000/ST=Calvados/L=CAEN/street=22 rue de Bretagne/O=TBS INTERNET/OU=0002 440443810/OU=Certificats TBS X509/CN=ecom.tbs-x509.com + i:/C=FR/ST=Calvados/L=Caen/O=TBS INTERNET/OU=Terms and Conditions: http://www.tbs-internet.com/CA/repository/OU=TBS INTERNET CA/CN=TBS X509 CA business -----BEGIN CERTIFICATE----- -MIIGXTCCBUWgAwIBAgIRAMmag+ygSAdxZsbyzYjhuW0wDQYJKoZIhvcNAQELBQAw -gcQxCzAJBgNVBAYTAkZSMREwDwYDVQQIEwhDYWx2YWRvczENMAsGA1UEBxMEQ2Fl +MIIGTjCCBTagAwIBAgIQOh3d9dNDPq1cSdJmEiMpqDANBgkqhkiG9w0BAQUFADCB +yTELMAkGA1UEBhMCRlIxETAPBgNVBAgTCENhbHZhZG9zMQ0wCwYDVQQHEwRDYWVu +MRUwEwYDVQQKEwxUQlMgSU5URVJORVQxSDBGBgNVBAsTP1Rlcm1zIGFuZCBDb25k +aXRpb25zOiBodHRwOi8vd3d3LnRicy1pbnRlcm5ldC5jb20vQ0EvcmVwb3NpdG9y +eTEYMBYGA1UECxMPVEJTIElOVEVSTkVUIENBMR0wGwYDVQQDExRUQlMgWDUwOSBD +QSBidXNpbmVzczAeFw0xMTAxMjUwMDAwMDBaFw0xMzAyMDUyMzU5NTlaMIHHMQsw +CQYDVQQGEwJGUjEOMAwGA1UEERMFMTQwMDAxETAPBgNVBAgTCENhbHZhZG9zMQ0w +CwYDVQQHEwRDQUVOMRswGQYDVQQJExIyMiBydWUgZGUgQnJldGFnbmUxFTATBgNV +BAoTDFRCUyBJTlRFUk5FVDEXMBUGA1UECxMOMDAwMiA0NDA0NDM4MTAxHTAbBgNV +BAsTFENlcnRpZmljYXRzIFRCUyBYNTA5MRowGAYDVQQDExFlY29tLnRicy14NTA5 +LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKRrlHUnJ++1lpcg +jtYco7cdmRe+EEfTmwPfCdfV3G1QfsTSvY6FfMpm/83pqHfT+4ANwr18wD9ZrAEN +G16mf9VdCGK12+TP7DmqeZyGIqlFFoahQnmb8EarvE43/1UeQ2CV9XmzwZvpqeli +LfXsFonawrY3H6ZnMwS64St61Z+9gdyuZ/RbsoZBbT5KUjDEG844QRU4OT1IGeEI +eY5NM5RNIh6ZNhVtqeeCxMS7afONkHQrOco73RdSTRck/Hj96Ofl3MHNHryr+AMK +DGFk1kLCZGpPdXtkxXvaDeQoiYDlil26CWc+YK6xyDPMdsWvoG14ZLyCpzMXA7/7 +4YAQRH0CAwEAAaOCAjAwggIsMB8GA1UdIwQYMBaAFBoJBMz5CY+7HqDO1KQUf0vV +I1jNMB0GA1UdDgQWBBQgOU8HsWzbmD4WZP5Wtdw7jca2WDAOBgNVHQ8BAf8EBAMC +BaAwDAYDVR0TAQH/BAIwADAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIw +TAYDVR0gBEUwQzBBBgsrBgEEAYDlNwIBATAyMDAGCCsGAQUFBwIBFiRodHRwczov +L3d3dy50YnMtaW50ZXJuZXQuY29tL0NBL0NQUzEwdwYDVR0fBHAwbjA3oDWgM4Yx +aHR0cDovL2NybC50YnMtaW50ZXJuZXQuY29tL1RCU1g1MDlDQWJ1c2luZXNzLmNy +bDAzoDGgL4YtaHR0cDovL2NybC50YnMteDUwOS5jb20vVEJTWDUwOUNBYnVzaW5l +c3MuY3JsMIGwBggrBgEFBQcBAQSBozCBoDA9BggrBgEFBQcwAoYxaHR0cDovL2Ny +dC50YnMtaW50ZXJuZXQuY29tL1RCU1g1MDlDQWJ1c2luZXNzLmNydDA5BggrBgEF +BQcwAoYtaHR0cDovL2NydC50YnMteDUwOS5jb20vVEJTWDUwOUNBYnVzaW5lc3Mu +Y3J0MCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC50YnMteDUwOS5jb20wMwYDVR0R +BCwwKoIRZWNvbS50YnMteDUwOS5jb22CFXd3dy5lY29tLnRicy14NTA5LmNvbTAN +BgkqhkiG9w0BAQUFAAOCAQEArT4NHfbY87bGAw8lPV4DmHlmuDuVp/y7ltO3Ynse +3Rz8RxW2AzuO0Oy2F0Cu4yWKtMyEyMXyHqWtae7ElRbdTu5w5GwVBLJHClCzC8S9 +SpgMMQTx3Rgn8vjkHuU9VZQlulZyiPK7yunjc7c310S9FRZ7XxOwf8Nnx4WnB+No +WrfApzhhQl31w+RyrNxZe58hCfDDHmevRvwLjQ785ZoQXJDj2j3qAD4aI2yB8lB5 +oaE1jlCJzC7Kmz/Y9jzfmv/zAs1LQTm9ktevv4BTUFaGjv9jxnQ1xnS862ZiouLW +zZYIlYPf4F6JjXGiIQgQRglILUfq3ftJd9/ok9W9ZF8h8w== +-----END CERTIFICATE----- + 1 s:/C=FR/ST=Calvados/L=Caen/O=TBS INTERNET/OU=Terms and Conditions: http://www.tbs-internet.com/CA/repository/OU=TBS INTERNET CA/CN=TBS X509 CA business + i:/C=SE/O=AddTrust AB/OU=AddTrust External TTP Network/CN=AddTrust External CA Root +-----BEGIN CERTIFICATE----- +MIIFPzCCBCegAwIBAgIQDlBz/++iRSmLDeVRHT/hADANBgkqhkiG9w0BAQUFADBv +MQswCQYDVQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFk +ZFRydXN0IEV4dGVybmFsIFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBF +eHRlcm5hbCBDQSBSb290MB4XDTA1MTIwMTAwMDAwMFoXDTE5MDcwOTE4MTkyMlow +gckxCzAJBgNVBAYTAkZSMREwDwYDVQQIEwhDYWx2YWRvczENMAsGA1UEBxMEQ2Fl bjEVMBMGA1UEChMMVEJTIElOVEVSTkVUMUgwRgYDVQQLEz9UZXJtcyBhbmQgQ29u ZGl0aW9uczogaHR0cDovL3d3dy50YnMtaW50ZXJuZXQuY29tL0NBL3JlcG9zaXRv -cnkxGDAWBgNVBAsTD1RCUyBJTlRFUk5FVCBDQTEYMBYGA1UEAxMPVEJTIFg1MDkg -Q0EgU0dDMB4XDTEwMDIxODAwMDAwMFoXDTEyMDIxOTIzNTk1OVowgcsxCzAJBgNV -BAYTAkZSMQ4wDAYDVQQREwUxNDAwMDERMA8GA1UECBMIQ2FsdmFkb3MxDTALBgNV -BAcTBENBRU4xGzAZBgNVBAkTEjIyIHJ1ZSBkZSBCcmV0YWduZTEVMBMGA1UEChMM -VEJTIElOVEVSTkVUMRcwFQYDVQQLEw4wMDAyIDQ0MDQ0MzgxMDEbMBkGA1UECxMS -c2hhLTI1NiBwcm9kdWN0aW9uMSAwHgYDVQQDExdzaGEyNTYudGJzLWludGVybmV0 -LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKbuM8VT7f0nntwu -N3F7v9KIBlhKNAxqCrziOXU5iqUt8HrQB3DtHbdmII+CpVUlwlmepsx6G+srEZ9a -MIGAy0nxi5aLb7watkyIdPjJTMvTUBQ/+RPWzt5JtYbbY9BlJ+yci0dctP74f4NU -ISLtlrEjUbf2gTohLrcE01TfmOF6PDEbB5PKDi38cB3NzKfizWfrOaJW6Q1C1qOJ -y4/4jkUREX1UFUIxzx7v62VfjXSGlcjGpBX1fvtABQOSLeE0a6gciDZs1REqroFf -5eXtqYphpTa14Z83ITXMfgg5Nze1VtMnzI9Qx4blYBw4dgQVEuIsYr7FDBOITDzc -VEVXZx0CAwEAAaOCAj8wggI7MB8GA1UdIwQYMBaAFAdEdoWTKLx/bXjSCuv6TEvf -2YIfMB0GA1UdDgQWBBSJKI/AYVI9RQNY0QPIqc8ej2QivTAOBgNVHQ8BAf8EBAMC -BaAwDAYDVR0TAQH/BAIwADA0BgNVHSUELTArBggrBgEFBQcDAQYIKwYBBQUHAwIG -CisGAQQBgjcKAwMGCWCGSAGG+EIEATBMBgNVHSAERTBDMEEGCysGAQQBgOU3AgQB -MDIwMAYIKwYBBQUHAgEWJGh0dHBzOi8vd3d3LnRicy1pbnRlcm5ldC5jb20vQ0Ev -Q1BTNDBtBgNVHR8EZjBkMDKgMKAuhixodHRwOi8vY3JsLnRicy1pbnRlcm5ldC5j -b20vVEJTWDUwOUNBU0dDLmNybDAuoCygKoYoaHR0cDovL2NybC50YnMteDUwOS5j -b20vVEJTWDUwOUNBU0dDLmNybDCBpgYIKwYBBQUHAQEEgZkwgZYwOAYIKwYBBQUH -MAKGLGh0dHA6Ly9jcnQudGJzLWludGVybmV0LmNvbS9UQlNYNTA5Q0FTR0MuY3J0 -MDQGCCsGAQUFBzAChihodHRwOi8vY3J0LnRicy14NTA5LmNvbS9UQlNYNTA5Q0FT -R0MuY3J0MCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC50YnMteDUwOS5jb20wPwYD -VR0RBDgwNoIXc2hhMjU2LnRicy1pbnRlcm5ldC5jb22CG3d3dy5zaGEyNTYudGJz -LWludGVybmV0LmNvbTANBgkqhkiG9w0BAQsFAAOCAQEAA5NL0D4QSqhErhlkdPmz -XtiMvdGL+ZehM4coTRIpasM/Agt36Rc0NzCvnQwKE+wkngg1Gy2qe7Q0E/ziqBtB -fZYzdVgu1zdiL4kTaf+wFKYAFGsFbyeEmXysy+CMwaNoF2vpSjCU1UD56bEnTX/W -fxVZYxtBQUpnu2wOsm8cDZuZRv9XrYgAhGj9Tt6F0aVHSDGn59uwShG1+BVF/uju -SCyPTTjL1oc7YElJUzR/x4mQJYvtQI8gDIDAGEOs7v3R/gKa5EMfbUQUI4C84UbI -Yz09Jdnws/MkC/Hm1BZEqk89u7Hvfv+oHqEb0XaUo0TDfsxE0M1sMdnLb91QNQBm -UQ== ------END CERTIFICATE----- - 1 s:/C=FR/ST=Calvados/L=Caen/O=TBS INTERNET/OU=Terms and Conditions: http://www.tbs-internet.com/CA/repository/OU=TBS INTERNET CA/CN=TBS X509 CA SGC - i:/C=SE/O=AddTrust AB/OU=AddTrust External TTP Network/CN=AddTrust External CA Root ------BEGIN CERTIFICATE----- -MIIFVjCCBD6gAwIBAgIQXpDZ0ETJMV02WTx3GTnhhTANBgkqhkiG9w0BAQUFADBv -MQswCQYDVQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFk -ZFRydXN0IEV4dGVybmFsIFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBF -eHRlcm5hbCBDQSBSb290MB4XDTA1MTIwMTAwMDAwMFoXDTE5MDYyNDE5MDYzMFow -gcQxCzAJBgNVBAYTAkZSMREwDwYDVQQIEwhDYWx2YWRvczENMAsGA1UEBxMEQ2Fl -bjEVMBMGA1UEChMMVEJTIElOVEVSTkVUMUgwRgYDVQQLEz9UZXJtcyBhbmQgQ29u -ZGl0aW9uczogaHR0cDovL3d3dy50YnMtaW50ZXJuZXQuY29tL0NBL3JlcG9zaXRv -cnkxGDAWBgNVBAsTD1RCUyBJTlRFUk5FVCBDQTEYMBYGA1UEAxMPVEJTIFg1MDkg -Q0EgU0dDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsgOkO3f7wzN6 -rOjg45tR5vjBfzK7qmV9IBxb/QW9EEXxG+E7FNhZqQLtwGBKoSsHTnQqV75wWMk0 -9tinWvftBkSpj5sTi/8cbzJfUvTSVYh3Qxv6AVVjMMH/ruLjE6y+4PoaPs8WoYAQ -ts5R4Z1g8c/WnTepLst2x0/Wv7GmuoQi+gXvHU6YrBiu7XkeYhzc95QdviWSJRDk -owhb5K43qhcvjRmBfO/paGlCliDGZp8mHwrI21mwobWpVjTxZRwYO3bd4+TGcI4G -Ie5wmHwE8F7SK1tgSqbBacKjDa93j7txKkfz/Yd2n7TGqOXiHPsJpG655vrKtnXk -9vs1zoDeJQIDAQABo4IBljCCAZIwHQYDVR0OBBYEFAdEdoWTKLx/bXjSCuv6TEvf -2YIfMA4GA1UdDwEB/wQEAwIBBjASBgNVHRMBAf8ECDAGAQH/AgEAMCAGA1UdJQQZ -MBcGCisGAQQBgjcKAwMGCWCGSAGG+EIEATAYBgNVHSAEETAPMA0GCysGAQQBgOU3 -AgQBMHsGA1UdHwR0MHIwOKA2oDSGMmh0dHA6Ly9jcmwuY29tb2RvY2EuY29tL0Fk -ZFRydXN0RXh0ZXJuYWxDQVJvb3QuY3JsMDagNKAyhjBodHRwOi8vY3JsLmNvbW9k -by5uZXQvQWRkVHJ1c3RFeHRlcm5hbENBUm9vdC5jcmwwgYAGCCsGAQUFBwEBBHQw -cjA4BggrBgEFBQcwAoYsaHR0cDovL2NydC5jb21vZG9jYS5jb20vQWRkVHJ1c3RV -VE5TR0NDQS5jcnQwNgYIKwYBBQUHMAKGKmh0dHA6Ly9jcnQuY29tb2RvLm5ldC9B -ZGRUcnVzdFVUTlNHQ0NBLmNydDARBglghkgBhvhCAQEEBAMCAgQwDQYJKoZIhvcN -AQEFBQADggEBAK2zEzs+jcIrVK9oDkdDZNvhuBYTdCfpxfFs+OAujW0bIfJAy232 -euVsnJm6u/+OrqKudD2tad2BbejLLXhMZViaCmK7D9nrXHx4te5EP8rL19SUVqLY -1pTnv5dhNgEgvA7n5lIzDSYs7yRLsr7HJsYPr6SeYSuZizyX1SNz7ooJ32/F3X98 -RB0Mlc/E0OyOrkQ9/y5IrnpnaSora8CnUrV5XNOg+kyCz9edCyx4D5wXYcwZPVWz -8aDqquESrezPyjtfi4WRO4s/VD3HLZvOxzMrWAVYCDG9FxaOhF0QGuuG1F7F3GKV -v6prNyCl016kRl2j1UT+a7gLd8fA25A4C9E= +cnkxGDAWBgNVBAsTD1RCUyBJTlRFUk5FVCBDQTEdMBsGA1UEAxMUVEJTIFg1MDkg +Q0EgYnVzaW5lc3MwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDB1PAU +qudCcz3tmyGcf+u6EkZqonKKHrV4gZYbvVkIRojmmlhfi/jwvpHvo8bqSt/9Rj5S +jhCDW0pcbI+IPPtD1Jy+CHNSfnMqVDy6CKQ3p5maTzCMG6ZT+XjnvcND5v+FtaiB +xk1iCX6uvt0jeUtdZvYbyytsSDE6c3Y5//wRxOF8tM1JxibwO3pyER26jbbN2gQz +m/EkdGjLdJ4svPk23WDAvQ6G0/z2LcAaJB+XLfqRwfQpHQvfKa1uTi8PivC8qtip +rmNQMMPMjxSK2azX8cKjjTDJiUKaCb4VHlJDWKEsCFRpgJAoAuX8f7Yfs1M4esGo +sWb3PGspK3O22uIlAgMBAAGjggF6MIIBdjAdBgNVHQ4EFgQUGgkEzPkJj7seoM7U +pBR/S9UjWM0wDgYDVR0PAQH/BAQDAgEGMBIGA1UdEwEB/wQIMAYBAf8CAQAwGAYD +VR0gBBEwDzANBgsrBgEEAYDlNwIBATB7BgNVHR8EdDByMDigNqA0hjJodHRwOi8v +Y3JsLmNvbW9kb2NhLmNvbS9BZGRUcnVzdEV4dGVybmFsQ0FSb290LmNybDA2oDSg +MoYwaHR0cDovL2NybC5jb21vZG8ubmV0L0FkZFRydXN0RXh0ZXJuYWxDQVJvb3Qu +Y3JsMIGGBggrBgEFBQcBAQR6MHgwOwYIKwYBBQUHMAKGL2h0dHA6Ly9jcnQuY29t +b2RvY2EuY29tL0FkZFRydXN0VVROU2VydmVyQ0EuY3J0MDkGCCsGAQUFBzAChi1o +dHRwOi8vY3J0LmNvbW9kby5uZXQvQWRkVHJ1c3RVVE5TZXJ2ZXJDQS5jcnQwEQYJ +YIZIAYb4QgEBBAQDAgIEMA0GCSqGSIb3DQEBBQUAA4IBAQA7mqrMgk/MrE6QnbNA +h4nRCn2ti4bg4w2C3lB6bSvRPnYwuNw9Jb8vuKkNFzRDxNJXqVDZdfFW5CVQJuyd +nfAx83+wk+spzvFaE1KhFYfN9G9pQfXUfvDRoIcJgPEKUXL1wRiOG+IjU3VVI8pg +IgqHkr7ylln5i5zCiFAPuIJmYUSFg/gxH5xkCNcjJqqrHrHatJr6Qrrke93joupw +oU1njfAcZtYp6fbiK6u2b1pJqwkVBE8RsfLnPhRj+SFbpvjv8Od7o/ieJhFIYQNU +k2jX2u8qZnAiNw93LZW9lpYjtuvMXq8QQppENNja5b53q7UwI+lU7ZGjZ7quuESp +J6/5 -----END CERTIFICATE----- 2 s:/C=SE/O=AddTrust AB/OU=AddTrust External TTP Network/CN=AddTrust External CA Root - i:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN - DATACorp SGC + i:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN-USERFirst-Hardware -----BEGIN CERTIFICATE----- -MIIEZjCCA06gAwIBAgIQUSYKkxzif5zDpV954HKugjANBgkqhkiG9w0BAQUFADCB -kzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug +MIIETzCCAzegAwIBAgIQHM5EYpUZep1jUvnyI6m2mDANBgkqhkiG9w0BAQUFADCB +lzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho -dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZBgNVBAMTElVUTiAtIERBVEFDb3Jw -IFNHQzAeFw0wNTA2MDcwODA5MTBaFw0xOTA2MjQxOTA2MzBaMG8xCzAJBgNVBAYT -AlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEmMCQGA1UECxMdQWRkVHJ1c3QgRXh0 -ZXJuYWwgVFRQIE5ldHdvcmsxIjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENB -IFJvb3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC39xoz5vIABC05 -4E5b7R+8bA/Ntfojts7emxEzl6QpTH2Tn71KvJPtAxrjj8/lbVBa1pcplFqAsEl6 -2y6V/bjKvzc4LR4+kUGtcFbH8E8/6DKedMrIkFTpxl8PeJ2aQDwOrGGqXhSPnoeh -alDc15pOrwWzpnGUnHGzUGAKxxOdOAeGAqjpqGkmGJCrTLBPI6s6T4TY386f4Wlv -u9dC12tE5Met7m1BX3JacQg3s3llpFmglDf3AC8NwpJy2tA4ctsUqEXEXSp9t7TW -xO6szRNEt8kr3UMAJfphuWlqWCMRt6czj1Z1WfXNKddGtworZbbTQm8Vsrh7++/p -XVPVNFonAgMBAAGjgdgwgdUwHwYDVR0jBBgwFoAUUzLRs89/+uDxoF2FTpLSnkUd -tE8wHQYDVR0OBBYEFK29mHo0tCb3+sQmVO8DveAky1QaMA4GA1UdDwEB/wQEAwIB -BjAPBgNVHRMBAf8EBTADAQH/MBEGCWCGSAGG+EIBAQQEAwIBAjAgBgNVHSUEGTAX -BgorBgEEAYI3CgMDBglghkgBhvhCBAEwPQYDVR0fBDYwNDAyoDCgLoYsaHR0cDov -L2NybC51c2VydHJ1c3QuY29tL1VUTi1EQVRBQ29ycFNHQy5jcmwwDQYJKoZIhvcN -AQEFBQADggEBAMbuUxdoFLJRIh6QWA2U/b3xcOWGLcM2MY9USEbnLQg3vGwKYOEO -rVE04BKT6b64q7gmtOmWPSiPrmQH/uAB7MXjkesYoPF1ftsK5p+R26+udd8jkWjd -FwBaS/9kbHDrARrQkNnHptZt9hPk/7XJ0h4qy7ElQyZ42TCbTg0evmnv3+r+LbPM -+bDdtRTKkdSytaX7ARmjR3mfnYyVhzT4HziS2jamEfpr62vp3EV4FTkG101B5CHI -3C+H0be/SGB1pWLLJN47YaApIKa+xWycxOkKaSLvkTr6Jq/RW0GnOuL4OAdCq8Fb -+M5tug8EPzI0rNwEKNdwMBQmBsTkm5jVz3g= +dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3Qt +SGFyZHdhcmUwHhcNMDUwNjA3MDgwOTEwWhcNMTkwNzA5MTgxOTIyWjBvMQswCQYD +VQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFkZFRydXN0 +IEV4dGVybmFsIFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBFeHRlcm5h +bCBDQSBSb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAt/caM+by +AAQtOeBOW+0fvGwPzbX6I7bO3psRM5ekKUx9k5+9SryT7QMa44/P5W1QWtaXKZRa +gLBJetsulf24yr83OC0ePpFBrXBWx/BPP+gynnTKyJBU6cZfD3idmkA8Dqxhql4U +j56HoWpQ3NeaTq8Fs6ZxlJxxs1BgCscTnTgHhgKo6ahpJhiQq0ywTyOrOk+E2N/O +n+Fpb7vXQtdrROTHre5tQV9yWnEIN7N5ZaRZoJQ39wAvDcKSctrQOHLbFKhFxF0q +fbe01sTurM0TRLfJK91DACX6YblpalgjEbenM49WdVn1zSnXRrcKK2W200JvFbK4 +e/vv6V1T1TRaJwIDAQABo4G9MIG6MB8GA1UdIwQYMBaAFKFyXyYbKJhDlV0HN9WF +lp1L0sNFMB0GA1UdDgQWBBStvZh6NLQm9/rEJlTvA73gJMtUGjAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zARBglghkgBhvhCAQEEBAMCAQIwRAYDVR0f +BD0wOzA5oDegNYYzaHR0cDovL2NybC51c2VydHJ1c3QuY29tL1VUTi1VU0VSRmly +c3QtSGFyZHdhcmUuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQByQhANOs4kClrwF8BW +onvUOGCSjRK52zYZgDXYNjDtmr5rJ6NyPFDNn+JxkLpjYetIFMTbSRe679Bt8m7a +gIAoQYFQtxMuyLnJegB2aEbQiIxh/tC21UcFF7ktdnDoTlA6w3pLuvunaI84Of3o +2YBrhzkTbCfaYk5JRlTpudW9DkUkHBsyx3nknPKnplkIGaK0jgn8E0n+SFabYaHk +I9LroYT/+JtLefh9lgBdAgVv0UPbzoGfuDsrk/Zh+UrgbLFpHoVnElhzbkh64Z0X +OGaJunQc68cCZu5HTn/aK7fBGMcVflRCXLVEQpU9PIAdGA8Ynvg684t8GMaKsRl1 +jIGZ -----END CERTIFICATE----- - 3 s:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN - DATACorp SGC - i:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN - DATACorp SGC + 3 s:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN-USERFirst-Hardware + i:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN-USERFirst-Hardware -----BEGIN CERTIFICATE----- -MIIEXjCCA0agAwIBAgIQRL4Mi1AAIbQR0ypoBqmtaTANBgkqhkiG9w0BAQUFADCB -kzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug +MIIEdDCCA1ygAwIBAgIQRL4Mi1AAJLQR0zYq/mUK/TANBgkqhkiG9w0BAQUFADCB +lzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho -dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZBgNVBAMTElVUTiAtIERBVEFDb3Jw -IFNHQzAeFw05OTA2MjQxODU3MjFaFw0xOTA2MjQxOTA2MzBaMIGTMQswCQYDVQQG -EwJVUzELMAkGA1UECBMCVVQxFzAVBgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4wHAYD -VQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxITAfBgNVBAsTGGh0dHA6Ly93d3cu -dXNlcnRydXN0LmNvbTEbMBkGA1UEAxMSVVROIC0gREFUQUNvcnAgU0dDMIIBIjAN -BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3+5YEKIrblXEjr8uRgnn4AgPLit6 -E5Qbvfa2gI5lBZMAHryv4g+OGQ0SR+ysraP6LnD43m77VkIVni5c7yPeIbkFdicZ -D0/Ww5y0vpQZY/KmEQrrU0icvvIpOxboGqBMpsn0GFlowHDyUwDAXlCCpVZvNvlK -4ESGoE1O1kduSUrLZ9emxAW5jh70/P/N5zbgnAVssjMiFdC04MwXwLLA9P4yPykq -lXvY8qdOD1R8oQ2AswkDwf9c3V6aPryuvEeKaq5xyh+xKrhfQgUL7EYw0XILyulW -bfXv33i+Ybqypa4ETLyorGkVl73v67SMvzX41MPRKA5cOp9wGDMgd8SirwIDAQAB -o4GrMIGoMAsGA1UdDwQEAwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRT -MtGzz3/64PGgXYVOktKeRR20TzA9BgNVHR8ENjA0MDKgMKAuhixodHRwOi8vY3Js -LnVzZXJ0cnVzdC5jb20vVVROLURBVEFDb3JwU0dDLmNybDAqBgNVHSUEIzAhBggr -BgEFBQcDAQYKKwYBBAGCNwoDAwYJYIZIAYb4QgQBMA0GCSqGSIb3DQEBBQUAA4IB -AQAnNZcAiosovcYzMB4p/OL31ZjUQLtgyr+rFywJNn9Q+kHcrpY6CiM+iVnJowft -Gzet/Hy+UUla3joKVAgWRcKZsYfNjGjgaQPpxE6YsjuMFrMOoAyYUJuTqXAJyCyj -j98C5OBxOvG0I3KgqgHf35g+FFCgMSa9KOlaMCZ1+XtgHI3zzVAmbQQnmt/VDUVH -KWss5nbZqSl9Mt3JNjy9rjXxEZ4du5A/EkdOjtd+D2JzHVImOBwYSf0wdJrE5SIv -2MCN7ZF6TACPcn9d2t0bi0Vr591pl6jFVkwPDPafepE39peC4N1xaf92P2BNPM/3 -mfnGV/TJVTl4uix5yaaIK/QI +dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3Qt +SGFyZHdhcmUwHhcNOTkwNzA5MTgxMDQyWhcNMTkwNzA5MTgxOTIyWjCBlzELMAkG +A1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEe +MBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8v +d3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3QtSGFyZHdh +cmUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCx98M4P7Sof885glFn +0G2f0v9Y8+efK+wNiVSZuTiZFvfgIXlIwrthdBKWHTxqctU8EGc6Oe0rE81m65UJ +M6Rsl7HoxuzBdXmcRl6Nq9Bq/bkqVRcQVLMZ8Jr28bFdtqdt++BxF2uiiPsA3/4a +MXcMmgF6sTLjKwEHOG7DpV4jvEWbe1DByTCP2+UretNb+zNAHqDVmBe8i4fDidNd +oI6yqqr2jmmIBsX6iSHzCJ1pLgkzmykNRg+MzEk0sGlRvfkGzWitZky8PqxhvQqI +DsjfPe58BEydCl5rkdbux+0ojatNh4lz0G6k0B4WixThdkQDf2Os5M1JnMWS9Ksy +oUhbAgMBAAGjgbkwgbYwCwYDVR0PBAQDAgHGMA8GA1UdEwEB/wQFMAMBAf8wHQYD +VR0OBBYEFKFyXyYbKJhDlV0HN9WFlp1L0sNFMEQGA1UdHwQ9MDswOaA3oDWGM2h0 +dHA6Ly9jcmwudXNlcnRydXN0LmNvbS9VVE4tVVNFUkZpcnN0LUhhcmR3YXJlLmNy +bDAxBgNVHSUEKjAoBggrBgEFBQcDAQYIKwYBBQUHAwUGCCsGAQUFBwMGBggrBgEF +BQcDBzANBgkqhkiG9w0BAQUFAAOCAQEARxkP3nTGmZev/K0oXnWO6y1n7k57K9cM +//bey1WiCuFMVGWTYGufEpytXoMs61quwOQt9ABjHbjAbPLPSbtNk28Gpgoiskli +CE7/yMgUsogWXecB5BKV5UU0s4tpvc+0hY91UZ59Ojg6FEgSxvunOxqNDYJAB+gE +CJChicsZUN/KHAG8HQQZexB2lzvukJDKxA4fFm517zP4029bHpbj4HR3dHuKom4t +3XbWOTCC8KucUvIqx69JXn7HaOWCgchqJ/kniCrVWFCVH/A7HFe7fRQ5YiuayZSS +KqMiDP+JJn1fIytH1xUdqWqeUQ0qUZ6B+dQ7XnASfxAynB67nfhmqA== -----END CERTIFICATE----- diff --git a/lib-python/2.7/test/test_mailbox.py b/lib-python/2.7/test/test_mailbox.py --- a/lib-python/2.7/test/test_mailbox.py +++ b/lib-python/2.7/test/test_mailbox.py @@ -38,14 +38,9 @@ def _delete_recursively(self, target): # Delete a file or delete a directory recursively if os.path.isdir(target): - for path, dirs, files in os.walk(target, topdown=False): - for name in files: - os.remove(os.path.join(path, name)) - for name in dirs: - os.rmdir(os.path.join(path, name)) - os.rmdir(target) + test_support.rmtree(target) elif os.path.exists(target): - os.remove(target) + test_support.unlink(target) class TestMailbox(TestBase): @@ -137,6 +132,7 @@ msg = self._box.get(key1) self.assertEqual(msg['from'], 'foo') self.assertEqual(msg.fp.read(), '1') + msg.fp.close() def test_getitem(self): # Retrieve message using __getitem__() @@ -169,10 +165,14 @@ # Get file representations of messages key0 = self._box.add(self._template % 0) key1 = self._box.add(_sample_message) - self.assertEqual(self._box.get_file(key0).read().replace(os.linesep, '\n'), + msg0 = self._box.get_file(key0) + self.assertEqual(msg0.read().replace(os.linesep, '\n'), self._template % 0) - self.assertEqual(self._box.get_file(key1).read().replace(os.linesep, '\n'), + msg1 = self._box.get_file(key1) + self.assertEqual(msg1.read().replace(os.linesep, '\n'), _sample_message) + msg0.close() + msg1.close() def test_get_file_can_be_closed_twice(self): # Issue 11700 @@ -407,6 +407,7 @@ self._box.add(contents[0]) self._box.add(contents[1]) self._box.add(contents[2]) + oldbox = self._box method() if should_call_close: self._box.close() @@ -415,6 +416,7 @@ self.assertEqual(len(keys), 3) for key in keys: self.assertIn(self._box.get_string(key), contents) + oldbox.close() def test_dump_message(self): # Write message representations to disk @@ -1835,6 +1837,10 @@ def setUp(self): # create a new maildir mailbox to work with: self._dir = test_support.TESTFN + if os.path.isdir(self._dir): + test_support.rmtree(self._dir) + if os.path.isfile(self._dir): + test_support.unlink(self._dir) os.mkdir(self._dir) os.mkdir(os.path.join(self._dir, "cur")) os.mkdir(os.path.join(self._dir, "tmp")) @@ -1844,10 +1850,10 @@ def tearDown(self): map(os.unlink, self._msgfiles) - os.rmdir(os.path.join(self._dir, "cur")) - os.rmdir(os.path.join(self._dir, "tmp")) - os.rmdir(os.path.join(self._dir, "new")) - os.rmdir(self._dir) + test_support.rmdir(os.path.join(self._dir, "cur")) + test_support.rmdir(os.path.join(self._dir, "tmp")) + test_support.rmdir(os.path.join(self._dir, "new")) + test_support.rmdir(self._dir) def createMessage(self, dir, mbox=False): t = int(time.time() % 1000000) @@ -1883,7 +1889,9 @@ self.createMessage("cur") self.mbox = mailbox.Maildir(test_support.TESTFN) #self.assertTrue(len(self.mbox.boxes) == 1) - self.assertIsNot(self.mbox.next(), None) + msg = self.mbox.next() + self.assertIsNot(msg, None) + msg.fp.close() self.assertIs(self.mbox.next(), None) self.assertIs(self.mbox.next(), None) @@ -1891,7 +1899,9 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) #self.assertTrue(len(self.mbox.boxes) == 1) - self.assertIsNot(self.mbox.next(), None) + msg = self.mbox.next() + self.assertIsNot(msg, None) + msg.fp.close() self.assertIs(self.mbox.next(), None) self.assertIs(self.mbox.next(), None) @@ -1900,8 +1910,12 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) #self.assertTrue(len(self.mbox.boxes) == 2) - self.assertIsNot(self.mbox.next(), None) - self.assertIsNot(self.mbox.next(), None) + msg = self.mbox.next() + self.assertIsNot(msg, None) + msg.fp.close() + msg = self.mbox.next() + self.assertIsNot(msg, None) + msg.fp.close() self.assertIs(self.mbox.next(), None) self.assertIs(self.mbox.next(), None) @@ -1910,11 +1924,13 @@ import email.parser fname = self.createMessage("cur", True) n = 0 - for msg in mailbox.PortableUnixMailbox(open(fname), + fid = open(fname) + for msg in mailbox.PortableUnixMailbox(fid, email.parser.Parser().parse): n += 1 self.assertEqual(msg["subject"], "Simple Test") self.assertEqual(len(str(msg)), len(FROM_)+len(DUMMY_MESSAGE)) + fid.close() self.assertEqual(n, 1) ## End: classes from the original module (for backward compatibility). diff --git a/lib-python/2.7/test/test_mmap.py b/lib-python/2.7/test/test_mmap.py --- a/lib-python/2.7/test/test_mmap.py +++ b/lib-python/2.7/test/test_mmap.py @@ -11,7 +11,7 @@ def setUp(self): if os.path.exists(TESTFN): - os.unlink(TESTFN) + unlink(TESTFN) def tearDown(self): try: diff --git a/lib-python/2.7/test/test_multiprocessing.py b/lib-python/2.7/test/test_multiprocessing.py --- a/lib-python/2.7/test/test_multiprocessing.py +++ b/lib-python/2.7/test/test_multiprocessing.py @@ -1,5 +1,10 @@ #!/usr/bin/env python +## FIXME: remove when https://bugs.pypy.org/issue1644 is resolved +import sys +if sys.platform.startswith('freebsd'): + raise Exception("This test hangs on FreeBSD. Test deactivated for now until https://bugs.pypy.org/issue1644 get resolved") + # # Unit tests for the multiprocessing package # diff --git a/lib-python/2.7/test/test_old_mailbox.py b/lib-python/2.7/test/test_old_mailbox.py --- a/lib-python/2.7/test/test_old_mailbox.py +++ b/lib-python/2.7/test/test_old_mailbox.py @@ -73,7 +73,9 @@ self.createMessage("cur") self.mbox = mailbox.Maildir(test_support.TESTFN) self.assertTrue(len(self.mbox) == 1) - self.assertTrue(self.mbox.next() is not None) + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() self.assertTrue(self.mbox.next() is None) self.assertTrue(self.mbox.next() is None) @@ -81,7 +83,9 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) self.assertTrue(len(self.mbox) == 1) - self.assertTrue(self.mbox.next() is not None) + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() self.assertTrue(self.mbox.next() is None) self.assertTrue(self.mbox.next() is None) @@ -90,8 +94,12 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) self.assertTrue(len(self.mbox) == 2) - self.assertTrue(self.mbox.next() is not None) - self.assertTrue(self.mbox.next() is not None) + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() self.assertTrue(self.mbox.next() is None) self.assertTrue(self.mbox.next() is None) diff --git a/lib-python/2.7/test/test_os.py b/lib-python/2.7/test/test_os.py --- a/lib-python/2.7/test/test_os.py +++ b/lib-python/2.7/test/test_os.py @@ -75,7 +75,7 @@ self.assertFalse(os.path.exists(name), "file already exists for temporary file") # make sure we can create the file - open(name, "w") + open(name, "w").close() self.files.append(name) def test_tempnam(self): diff --git a/lib-python/2.7/test/test_ssl.py b/lib-python/2.7/test/test_ssl.py --- a/lib-python/2.7/test/test_ssl.py +++ b/lib-python/2.7/test/test_ssl.py @@ -111,13 +111,12 @@ if test_support.verbose: sys.stdout.write("\n" + pprint.pformat(p) + "\n") self.assertEqual(p['subject'], - ((('countryName', u'US'),), - (('stateOrProvinceName', u'Delaware'),), - (('localityName', u'Wilmington'),), - (('organizationName', u'Python Software Foundation'),), - (('organizationalUnitName', u'SSL'),), - (('commonName', u'somemachine.python.org'),)), + ((('countryName', 'XY'),), + (('localityName', 'Castle Anthrax'),), + (('organizationName', 'Python Software Foundation'),), + (('commonName', 'localhost'),)) ) + self.assertEqual(p['subjectAltName'], (('DNS', 'localhost'),)) # Issue #13034: the subjectAltName in some certificates # (notably projects.developer.nokia.com:443) wasn't parsed p = ssl._ssl._test_decode_cert(NOKIACERT) @@ -994,7 +993,7 @@ try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED) - try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, True) + try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False) diff --git a/lib-python/2.7/test/test_support.py b/lib-python/2.7/test/test_support.py --- a/lib-python/2.7/test/test_support.py +++ b/lib-python/2.7/test/test_support.py @@ -179,15 +179,79 @@ except KeyError: pass +if sys.platform.startswith("win"): + def _waitfor(func, pathname, waitall=False): + # Peform the operation + func(pathname) + # Now setup the wait loop + if waitall: + dirname = pathname + else: + dirname, name = os.path.split(pathname) + dirname = dirname or '.' + # Check for `pathname` to be removed from the filesystem. + # The exponential backoff of the timeout amounts to a total + # of ~1 second after which the deletion is probably an error + # anyway. + # Testing on a i7 at 4.3GHz shows that usually only 1 iteration is + # required when contention occurs. + timeout = 0.001 + while timeout < 1.0: + # Note we are only testing for the existance of the file(s) in + # the contents of the directory regardless of any security or + # access rights. If we have made it this far, we have sufficient + # permissions to do that much using Python's equivalent of the + # Windows API FindFirstFile. + # Other Windows APIs can fail or give incorrect results when + # dealing with files that are pending deletion. + L = os.listdir(dirname) + if not (L if waitall else name in L): + return + # Increase the timeout and try again + time.sleep(timeout) + timeout *= 2 + warnings.warn('tests may fail, delete still pending for ' + pathname, + RuntimeWarning, stacklevel=4) + + def _unlink(filename): + _waitfor(os.unlink, filename) + + def _rmdir(dirname): + _waitfor(os.rmdir, dirname) + + def _rmtree(path): + def _rmtree_inner(path): + for name in os.listdir(path): + fullname = os.path.join(path, name) + if os.path.isdir(fullname): + _waitfor(_rmtree_inner, fullname, waitall=True) + os.rmdir(fullname) + else: + os.unlink(fullname) + _waitfor(_rmtree_inner, path, waitall=True) + _waitfor(os.rmdir, path) +else: + _unlink = os.unlink + _rmdir = os.rmdir + _rmtree = shutil.rmtree + def unlink(filename): try: - os.unlink(filename) + _unlink(filename) except OSError: pass +def rmdir(dirname): + try: + _rmdir(dirname) + except OSError as error: + # The directory need not exist. + if error.errno != errno.ENOENT: + raise + def rmtree(path): try: - shutil.rmtree(path) + _rmtree(path) except OSError, e: # Unix returns ENOENT, Windows returns ESRCH. if e.errno not in (errno.ENOENT, errno.ESRCH): diff --git a/lib-python/2.7/test/test_tarfile.py b/lib-python/2.7/test/test_tarfile.py --- a/lib-python/2.7/test/test_tarfile.py +++ b/lib-python/2.7/test/test_tarfile.py @@ -300,26 +300,21 @@ def test_extract_hardlink(self): # Test hardlink extraction (e.g. bug #857297). - tar = tarfile.open(tarname, errorlevel=1, encoding="iso8859-1") + with tarfile.open(tarname, errorlevel=1, encoding="iso8859-1") as tar: + tar.extract("ustar/regtype", TEMPDIR) + self.addCleanup(os.remove, os.path.join(TEMPDIR, "ustar/regtype")) - tar.extract("ustar/regtype", TEMPDIR) - try: tar.extract("ustar/lnktype", TEMPDIR) - except EnvironmentError, e: - if e.errno == errno.ENOENT: - self.fail("hardlink not extracted properly") + self.addCleanup(os.remove, os.path.join(TEMPDIR, "ustar/lnktype")) + with open(os.path.join(TEMPDIR, "ustar/lnktype"), "rb") as f: + data = f.read() + self.assertEqual(md5sum(data), md5_regtype) - data = open(os.path.join(TEMPDIR, "ustar/lnktype"), "rb").read() - self.assertEqual(md5sum(data), md5_regtype) - - try: tar.extract("ustar/symtype", TEMPDIR) - except EnvironmentError, e: - if e.errno == errno.ENOENT: - self.fail("symlink not extracted properly") - - data = open(os.path.join(TEMPDIR, "ustar/symtype"), "rb").read() - self.assertEqual(md5sum(data), md5_regtype) + self.addCleanup(os.remove, os.path.join(TEMPDIR, "ustar/symtype")) + with open(os.path.join(TEMPDIR, "ustar/symtype"), "rb") as f: + data = f.read() + self.assertEqual(md5sum(data), md5_regtype) def test_extractall(self): # Test if extractall() correctly restores directory permissions @@ -340,7 +335,7 @@ # constructor in case of an error. For the test we rely on # the fact that opening an empty file raises a ReadError. empty = os.path.join(TEMPDIR, "empty") - open(empty, "wb").write("") + open(empty, "wb").close() try: tar = object.__new__(tarfile.TarFile) @@ -351,7 +346,7 @@ else: self.fail("ReadError not raised") finally: - os.remove(empty) + test_support.unlink(empty) class StreamReadTest(CommonReadTest): @@ -1327,7 +1322,7 @@ def setUp(self): self.tarname = tmpname if os.path.exists(self.tarname): - os.remove(self.tarname) + test_support.unlink(self.tarname) def _add_testfile(self, fileobj=None): tar = tarfile.open(self.tarname, "a", fileobj=fileobj) diff --git a/lib-python/2.7/traceback.py b/lib-python/2.7/traceback.py --- a/lib-python/2.7/traceback.py +++ b/lib-python/2.7/traceback.py @@ -107,7 +107,7 @@ return list -def print_exception(etype, value, tb, limit=None, file=None): +def print_exception(etype, value, tb, limit=None, file=None, _encoding=None): """Print exception up to 'limit' stack trace entries from 'tb' to 'file'. This differs from print_tb() in the following ways: (1) if @@ -123,7 +123,7 @@ if tb: _print(file, 'Traceback (most recent call last):') print_tb(tb, limit, file) - lines = format_exception_only(etype, value) + lines = format_exception_only(etype, value, _encoding) for line in lines: _print(file, line, '') @@ -144,7 +144,7 @@ list = list + format_exception_only(etype, value) return list -def format_exception_only(etype, value): +def format_exception_only(etype, value, _encoding=None): """Format the exception part of a traceback. The arguments are the exception type and value such as given by @@ -170,12 +170,12 @@ if (isinstance(etype, BaseException) or isinstance(etype, types.InstanceType) or etype is None or type(etype) is str): - return [_format_final_exc_line(etype, value)] + return [_format_final_exc_line(etype, value, _encoding)] stype = etype.__name__ if not issubclass(etype, SyntaxError): - return [_format_final_exc_line(stype, value)] + return [_format_final_exc_line(stype, value, _encoding)] # It was a syntax error; show exactly where the problem was found. lines = [] @@ -196,26 +196,26 @@ lines.append(' %s^\n' % ''.join(caretspace)) value = msg - lines.append(_format_final_exc_line(stype, value)) + lines.append(_format_final_exc_line(stype, value, _encoding)) return lines -def _format_final_exc_line(etype, value): +def _format_final_exc_line(etype, value, _encoding=None): """Return a list of a single line -- normal case for format_exception_only""" - valuestr = _some_str(value) + valuestr = _some_str(value, _encoding) if value is None or not valuestr: line = "%s\n" % etype else: line = "%s: %s\n" % (etype, valuestr) return line -def _some_str(value): +def _some_str(value, _encoding=None): try: return str(value) except Exception: pass try: value = unicode(value) - return value.encode("ascii", "backslashreplace") + return value.encode(_encoding or "ascii", "backslashreplace") except Exception: pass return '' % type(value).__name__ diff --git a/lib-python/2.7/uuid.py b/lib-python/2.7/uuid.py --- a/lib-python/2.7/uuid.py +++ b/lib-python/2.7/uuid.py @@ -44,6 +44,8 @@ UUID('00010203-0405-0607-0809-0a0b0c0d0e0f') """ +import struct + __author__ = 'Ka-Ping Yee ' RESERVED_NCS, RFC_4122, RESERVED_MICROSOFT, RESERVED_FUTURE = [ @@ -125,25 +127,39 @@ overriding the given 'hex', 'bytes', 'bytes_le', 'fields', or 'int'. """ - if [hex, bytes, bytes_le, fields, int].count(None) != 4: - raise TypeError('need one of hex, bytes, bytes_le, fields, or int') if hex is not None: + if (bytes is not None or bytes_le is not None or + fields is not None or int is not None): + raise TypeError('if the hex argument is given, bytes,' + ' bytes_le, fields, and int need to be None') hex = hex.replace('urn:', '').replace('uuid:', '') hex = hex.strip('{}').replace('-', '') if len(hex) != 32: raise ValueError('badly formed hexadecimal UUID string') int = long(hex, 16) - if bytes_le is not None: + elif bytes_le is not None: + if bytes is not None or fields is not None or int is not None: + raise TypeError('if the bytes_le argument is given, bytes,' + ' fields, and int need to be None') if len(bytes_le) != 16: raise ValueError('bytes_le is not a 16-char string') bytes = (bytes_le[3] + bytes_le[2] + bytes_le[1] + bytes_le[0] + bytes_le[5] + bytes_le[4] + bytes_le[7] + bytes_le[6] + bytes_le[8:]) - if bytes is not None: + int = (struct.unpack('>Q', bytes[:8])[0] << 64 | + struct.unpack('>Q', bytes[8:])[0]) + elif bytes is not None: + if fields is not None or int is not None: + raise TypeError('if the bytes argument is given, fields ' + 'and int need to be None') if len(bytes) != 16: raise ValueError('bytes is not a 16-char string') - int = long(('%02x'*16) % tuple(map(ord, bytes)), 16) - if fields is not None: + int = (struct.unpack('>Q', bytes[:8])[0] << 64 | + struct.unpack('>Q', bytes[8:])[0]) + elif fields is not None: + if int is not None: + raise TypeError('if the fields argument is given, int needs' + ' to be None') if len(fields) != 6: raise ValueError('fields is not a 6-tuple') (time_low, time_mid, time_hi_version, @@ -163,9 +179,12 @@ clock_seq = (clock_seq_hi_variant << 8L) | clock_seq_low int = ((time_low << 96L) | (time_mid << 80L) | (time_hi_version << 64L) | (clock_seq << 48L) | node) - if int is not None: + elif int is not None: if not 0 <= int < 1<<128L: raise ValueError('int is out of range (need a 128-bit value)') + else: + raise TypeError('one of hex, bytes, bytes_le, fields,' + ' or int need to be not None') if version is not None: if not 1 <= version <= 5: raise ValueError('illegal version number') @@ -175,7 +194,7 @@ # Set the version number. int &= ~(0xf000 << 64L) int |= version << 76L - self.__dict__['int'] = int + object.__setattr__(self, 'int', int) def __cmp__(self, other): if isinstance(other, UUID): diff --git a/lib-python/2.7/weakref.py b/lib-python/2.7/weakref.py --- a/lib-python/2.7/weakref.py +++ b/lib-python/2.7/weakref.py @@ -48,7 +48,14 @@ def remove(wr, selfref=ref(self)): self = selfref() if self is not None: - del self.data[wr.key] + # Changed this for PyPy: made more resistent. The + # issue is that in some corner cases, self.data + # might already be changed or removed by the time + # this weakref's callback is called. If that is + # the case, we don't want to randomly kill an + # unrelated entry. + if self.data.get(wr.key) is wr: + del self.data[wr.key] self._remove = remove UserDict.UserDict.__init__(self, *args, **kw) @@ -160,22 +167,26 @@ try: o = self.data.pop(key)() except KeyError: + o = None + if o is None: if args: return args[0] - raise - if o is None: raise KeyError, key else: return o + # The logic above was fixed in PyPy def setdefault(self, key, default=None): try: - wr = self.data[key] + o = self.data[key]() except KeyError: + o = None + if o is None: self.data[key] = KeyedRef(default, self._remove, key) return default else: - return wr() + return o + # The logic above was fixed in PyPy def update(self, dict=None, **kwargs): d = self.data diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -109,7 +109,7 @@ RegrTest('test_asynchat.py', usemodules='select fcntl'), RegrTest('test_asyncore.py', usemodules='select fcntl'), RegrTest('test_atexit.py', core=True), - RegrTest('test_audioop.py', skip="unsupported extension module"), + RegrTest('test_audioop.py', skip="incomplete module"), RegrTest('test_augassign.py', core=True), RegrTest('test_base64.py', usemodules='struct'), RegrTest('test_bastion.py'), diff --git a/lib_pypy/_ctypes/array.py b/lib_pypy/_ctypes/array.py --- a/lib_pypy/_ctypes/array.py +++ b/lib_pypy/_ctypes/array.py @@ -20,10 +20,13 @@ # we don't want to have buffers here if len(val) > self._length_: raise ValueError("%r too long" % (val,)) - for i in range(len(val)): - self[i] = val[i] + if isinstance(val, str): + _rawffi.rawstring2charp(self._buffer.buffer, val) + else: + for i in range(len(val)): + self[i] = val[i] if len(val) < self._length_: - self[len(val)] = '\x00' + self._buffer[len(val)] = '\x00' res.value = property(getvalue, setvalue) def getraw(self): @@ -33,8 +36,7 @@ def setraw(self, buffer): if len(buffer) > self._length_: raise ValueError("%r too long" % (buffer,)) - for i in range(len(buffer)): - self[i] = buffer[i] + _rawffi.rawstring2charp(self._buffer.buffer, buffer) res.raw = property(getraw, setraw) elif subletter == 'u': def getvalue(self): @@ -45,10 +47,14 @@ # we don't want to have buffers here if len(val) > self._length_: raise ValueError("%r too long" % (val,)) + if isinstance(val, unicode): + target = self._buffer + else: + target = self for i in range(len(val)): - self[i] = val[i] + target[i] = val[i] if len(val) < self._length_: - self[len(val)] = '\x00' + target[len(val)] = u'\x00' res.value = property(getvalue, setvalue) if '_length_' in typedict: diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -328,21 +328,23 @@ raise ValueError( "native COM method call without 'this' parameter" ) - thisarg = cast(args[0], POINTER(POINTER(c_void_p))) - keepalives, newargs, argtypes, outargs = self._convert_args(argtypes, - args[1:], kwargs) - newargs.insert(0, args[0].value) + thisvalue = args.pop(0) + thisarg = cast(thisvalue, POINTER(POINTER(c_void_p))) + keepalives, newargs, argtypes, outargs, errcheckargs = ( + self._convert_args(argtypes, args, kwargs)) + args.insert(0, thisvalue) + newargs.insert(0, thisvalue.value) argtypes.insert(0, c_void_p) else: thisarg = None - keepalives, newargs, argtypes, outargs = self._convert_args(argtypes, - args, kwargs) + keepalives, newargs, argtypes, outargs, errcheckargs = ( + self._convert_args(argtypes, args, kwargs)) funcptr = self._getfuncptr(argtypes, self._restype_, thisarg) result = self._call_funcptr(funcptr, *newargs) - result = self._do_errcheck(result, args) + result, forced = self._do_errcheck(result, errcheckargs) - if not outargs: + if not outargs or forced: return result from ctypes import c_void_p @@ -377,22 +379,22 @@ set_last_error(tmp) # try: - return self._build_result(self._restype_, result, newargs) + return self._build_result(self._restype_, result) finally: funcptr.free_temp_buffers() def _do_errcheck(self, result, args): # The 'errcheck' protocol if self._errcheck_: - v = self._errcheck_(result, self, args) + v = self._errcheck_(result, self, tuple(args)) # If the errcheck funtion failed, let it throw # If the errcheck function returned newargs unchanged, # continue normal processing. # If the errcheck function returned something else, # use that as result. if v is not args: - return v - return result + return v, True + return result, False def _getfuncptr_fromaddress(self, argtypes, restype): address = self._get_address() @@ -495,16 +497,16 @@ newargtypes = [] total = len(args) paramflags = self._paramflags - inargs_idx = 0 if not paramflags and total < len(argtypes): raise TypeError("not enough arguments") - for i, argtype in enumerate(argtypes): - flag = 0 - name = None - defval = marker - if paramflags: + if paramflags: + errcheckargs = [] + inargs_idx = 0 + for i, argtype in enumerate(argtypes): + flag = 0 + defval = marker paramflag = paramflags[i] paramlen = len(paramflag) name = None @@ -519,6 +521,7 @@ val = defval if val is marker: val = 0 + errcheckargs.append(val) keepalive, newarg, newargtype = self._conv_param(argtype, val) keepalives.append(keepalive) newargs.append(newarg) @@ -536,27 +539,31 @@ raise TypeError("required argument '%s' missing" % name) else: raise TypeError("not enough arguments") + errcheckargs.append(val) keepalive, newarg, newargtype = self._conv_param(argtype, val) keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) elif flag == PARAMFLAG_FOUT: if defval is not marker: - outargs.append(defval) + val = defval keepalive, newarg, newargtype = self._conv_param(argtype, defval) else: import ctypes val = argtype._type_() - outargs.append(val) keepalive = None newarg = ctypes.byref(val) newargtype = type(newarg) + errcheckargs.append(val) + outargs.append(val) keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) else: raise ValueError("paramflag %d not yet implemented" % flag) - else: + else: + errcheckargs = args + for i, argtype in enumerate(argtypes): try: keepalive, newarg, newargtype = self._conv_param(argtype, args[i]) except (UnicodeError, TypeError, ValueError), e: @@ -564,7 +571,6 @@ keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) - inargs_idx += 1 if len(newargs) < len(args): extra = args[len(newargs):] @@ -576,7 +582,7 @@ keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) - return keepalives, newargs, newargtypes, outargs + return keepalives, newargs, newargtypes, outargs, errcheckargs @staticmethod def _is_primitive(argtype): @@ -601,7 +607,7 @@ retval = restype._CData_retval(buf) return retval - def _build_result(self, restype, result, argsandobjs): + def _build_result(self, restype, result): """Build the function result: If there is no OUT parameter, return the actual function result If there is one OUT parameter, return it @@ -611,11 +617,6 @@ # i.e. an array of ints. Now it takes a result, which is already a # python object. All places that do "resbuffer[0]" should check that # result is actually an int and just use it. - # - # Also, argsandobjs used to be "args" in __call__, now it's "newargs" - # (i.e., the already unwrapped objects). It's used only when we have a - # PARAMFLAG_FOUT and it's probably wrong, I'll fix it when I find a - # failing test retval = None @@ -704,7 +705,7 @@ funcptr = self._getfuncptr(argtypes, restype, thisarg) try: result = self._call_funcptr(funcptr, *args) - result = self._do_errcheck(result, args) + result, _ = self._do_errcheck(result, args) except (TypeError, ArgumentError, UnicodeDecodeError): assert self._slowpath_allowed return CFuncPtr.__call__(self, *args) diff --git a/lib_pypy/_curses.py b/lib_pypy/_curses.py --- a/lib_pypy/_curses.py +++ b/lib_pypy/_curses.py @@ -1,6 +1,9 @@ """Reimplementation of the standard extension module '_curses' using cffi.""" import sys +if sys.platform == 'win32': + #This module does not exist in windows + raise ImportError('No module named _curses') from functools import wraps from cffi import FFI diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_pypy_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -33,14 +33,13 @@ # set link options output_filename = modulename + _get_c_extension_suffix() if sys.platform == 'win32': - # XXX libpypy-c.lib is currently not installed automatically - library = os.path.join(thisdir, '..', 'include', 'libpypy-c') + # XXX pyconfig.h uses a pragma to link to the import library, + # which is currently python27.lib + library = os.path.join(thisdir, '..', 'include', 'python27') if not os.path.exists(library + '.lib'): - #For a nightly build - library = os.path.join(thisdir, '..', 'include', 'python27') - if not os.path.exists(library + '.lib'): - # For a local translation - library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c') + # For a local translation or nightly build + library = os.path.join(thisdir, '..', 'pypy', 'goal', 'python27') + assert os.path.exists(library + '.lib'),'Could not find import library "%s"' % library libraries = [library, 'oleaut32'] extra_ldargs = ['/MANIFEST', # needed for VC10 '/EXPORT:init' + modulename] diff --git a/lib_pypy/_sha.py b/lib_pypy/_sha.py --- a/lib_pypy/_sha.py +++ b/lib_pypy/_sha.py @@ -115,14 +115,14 @@ ] class sha: - "An implementation of the MD5 hash function in pure Python." + "An implementation of the SHA hash function in pure Python." digest_size = digestsize = 20 - block_size = 1 + block_size = 512 // 8 def __init__(self): "Initialisation." - + # Initial message length in bits(!). self.length = 0 self.count = [0, 0] @@ -209,7 +209,7 @@ self.H2 = (self.H2 + C) & 0xffffffff self.H3 = (self.H3 + D) & 0xffffffff self.H4 = (self.H4 + E) & 0xffffffff - + # Down from here all methods follow the Python Standard Library # API of the sha module. @@ -295,13 +295,13 @@ _long2bytesBigEndian(self.H3, 4) + \ _long2bytesBigEndian(self.H4, 4) - self.H0 = H0 - self.H1 = H1 + self.H0 = H0 + self.H1 = H1 self.H2 = H2 self.H3 = H3 self.H4 = H4 - self.input = input - self.count = count + self.input = input + self.count = count return digest diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -268,10 +268,18 @@ if _has_load_extension(): _ffi.cdef("int sqlite3_enable_load_extension(sqlite3 *db, int onoff);") -_lib = _ffi.verify(""" -#include -""", libraries=['sqlite3'] -) +if sys.platform.startswith('freebsd'): + _lib = _ffi.verify(""" + #include + """, libraries=['sqlite3'], + include_dirs=['/usr/local/include'], + library_dirs=['/usr/local/lib'] + ) +else: + _lib = _ffi.verify(""" + #include + """, libraries=['sqlite3'] + ) exported_sqlite_symbols = [ 'SQLITE_ALTER_TABLE', @@ -322,6 +330,14 @@ # SQLite version information sqlite_version = str(_ffi.string(_lib.sqlite3_libversion()).decode('ascii')) +_STMT_TYPE_UPDATE = 0 +_STMT_TYPE_DELETE = 1 +_STMT_TYPE_INSERT = 2 +_STMT_TYPE_REPLACE = 3 +_STMT_TYPE_OTHER = 4 +_STMT_TYPE_SELECT = 5 +_STMT_TYPE_INVALID = 6 + class Error(StandardError): pass @@ -363,9 +379,11 @@ pass -def connect(database, **kwargs): - factory = kwargs.get("factory", Connection) - return factory(database, **kwargs) +def connect(database, timeout=5.0, detect_types=0, isolation_level="", + check_same_thread=True, factory=None, cached_statements=100): + factory = Connection if not factory else factory + return factory(database, timeout, detect_types, isolation_level, + check_same_thread, factory, cached_statements) def _unicode_text_factory(x): @@ -982,13 +1000,18 @@ self.__statement = self.__connection._statement_cache.get(sql) if self.__connection._isolation_level is not None: - if self.__statement._type in ("UPDATE", "DELETE", "INSERT", "REPLACE"): + if self.__statement._type in ( + _STMT_TYPE_UPDATE, + _STMT_TYPE_DELETE, + _STMT_TYPE_INSERT, + _STMT_TYPE_REPLACE + ): if not self.__connection._in_transaction: self.__connection._begin() - elif self.__statement._type == "OTHER": + elif self.__statement._type == _STMT_TYPE_OTHER: if self.__connection._in_transaction: self.__connection.commit() - elif self.__statement._type == "SELECT": + elif self.__statement._type == _STMT_TYPE_SELECT: if multiple: raise ProgrammingError("You cannot execute SELECT " "statements in executemany().") @@ -1011,12 +1034,17 @@ self.__statement._reset() raise self.__connection._get_exception(ret) - if self.__statement._type in ("UPDATE", "DELETE", "INSERT", "REPLACE"): + if self.__statement._type in ( + _STMT_TYPE_UPDATE, + _STMT_TYPE_DELETE, + _STMT_TYPE_INSERT, + _STMT_TYPE_REPLACE + ): if self.__rowcount == -1: self.__rowcount = 0 self.__rowcount += _lib.sqlite3_changes(self.__connection._db) - if not multiple and self.__statement._type == "INSERT": + if not multiple and self.__statement._type == _STMT_TYPE_INSERT: self.__lastrowid = _lib.sqlite3_last_insert_rowid(self.__connection._db) else: self.__lastrowid = None @@ -1166,11 +1194,19 @@ first_word = sql.lstrip().split(" ")[0].upper() if first_word == "": - self._type = "INVALID" - elif first_word in ("SELECT", "INSERT", "UPDATE", "DELETE", "REPLACE"): - self._type = first_word + self._type = _STMT_TYPE_INVALID + elif first_word == "SELECT": + self._type = _STMT_TYPE_SELECT + elif first_word == "INSERT": + self._type = _STMT_TYPE_INSERT + elif first_word == "UPDATE": + self._type = _STMT_TYPE_UPDATE + elif first_word == "DELETE": + self._type = _STMT_TYPE_DELETE + elif first_word == "REPLACE": + self._type = _STMT_TYPE_REPLACE else: - self._type = "OTHER" + self._type = _STMT_TYPE_OTHER if isinstance(sql, unicode): sql = sql.encode('utf-8') @@ -1183,7 +1219,7 @@ if ret == _lib.SQLITE_OK and not self._statement: # an empty statement, work around that, as it's the least trouble - self._type = "SELECT" + self._type = _STMT_TYPE_SELECT c_sql = _ffi.new("char[]", b"select 42") ret = _lib.sqlite3_prepare_v2(self.__con._db, c_sql, -1, statement_star, next_char) @@ -1229,7 +1265,10 @@ if cvt is not None: param = cvt(param) - param = adapt(param) + try: + param = adapt(param) + except: + pass # And use previous value if param is None: rc = _lib.sqlite3_bind_null(self._statement, idx) @@ -1299,7 +1338,12 @@ raise ValueError("parameters are of unsupported type") def _get_description(self): - if self._type in ("INSERT", "UPDATE", "DELETE", "REPLACE"): + if self._type in ( + _STMT_TYPE_INSERT, + _STMT_TYPE_UPDATE, + _STMT_TYPE_DELETE, + _STMT_TYPE_REPLACE + ): return None desc = [] for i in xrange(_lib.sqlite3_column_count(self._statement)): diff --git a/lib_pypy/_tkinter/__init__.py b/lib_pypy/_tkinter/__init__.py --- a/lib_pypy/_tkinter/__init__.py +++ b/lib_pypy/_tkinter/__init__.py @@ -22,6 +22,7 @@ READABLE = tklib.TCL_READABLE WRITABLE = tklib.TCL_WRITABLE EXCEPTION = tklib.TCL_EXCEPTION +DONT_WAIT = tklib.TCL_DONT_WAIT def create(screenName=None, baseName=None, className=None, interactive=False, wantobjects=False, wantTk=True, diff --git a/lib_pypy/_tkinter/app.py b/lib_pypy/_tkinter/app.py --- a/lib_pypy/_tkinter/app.py +++ b/lib_pypy/_tkinter/app.py @@ -4,7 +4,23 @@ from . import TclError from .tclobj import TclObject, FromObj, AsObj, TypeCache +import contextlib import sys +import threading +import time + + +class _DummyLock(object): + "A lock-like object that does not do anything" + def acquire(self): + pass + def release(self): + pass + def __enter__(self): + pass + def __exit__(self, *exc): + pass + def varname_converter(input): if isinstance(input, TclObject): @@ -37,17 +53,18 @@ def PythonCmd(clientData, interp, argc, argv): self = tkffi.from_handle(clientData) assert self.app.interp == interp - try: - args = [tkffi.string(arg) for arg in argv[1:argc]] - result = self.func(*args) - obj = AsObj(result) - tklib.Tcl_SetObjResult(interp, obj) - except: - self.app.errorInCmd = True - self.app.exc_info = sys.exc_info() - return tklib.TCL_ERROR - else: - return tklib.TCL_OK + with self.app._tcl_lock_released(): + try: + args = [tkffi.string(arg) for arg in argv[1:argc]] + result = self.func(*args) + obj = AsObj(result) + tklib.Tcl_SetObjResult(interp, obj) + except: + self.app.errorInCmd = True + self.app.exc_info = sys.exc_info() + return tklib.TCL_ERROR + else: + return tklib.TCL_OK @tkffi.callback("Tcl_CmdDeleteProc") def PythonCmdDelete(clientData): @@ -58,6 +75,8 @@ class TkApp(object): + _busywaitinterval = 0.02 # 20ms. + def __new__(cls, screenName, baseName, className, interactive, wantobjects, wantTk, sync, use): if not wantobjects: @@ -73,6 +92,12 @@ self.quitMainLoop = False self.errorInCmd = False + if not self.threaded: + # TCL is not thread-safe, calls needs to be serialized. + self._tcl_lock = threading.Lock() + else: + self._tcl_lock = _DummyLock() + self._typeCache = TypeCache() self._commands = {} @@ -133,6 +158,13 @@ if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): raise RuntimeError("Calling Tcl from different appartment") + @contextlib.contextmanager + def _tcl_lock_released(self): + "Context manager to temporarily release the tcl lock." + self._tcl_lock.release() + yield + self._tcl_lock.acquire() + def loadtk(self): # We want to guard against calling Tk_Init() multiple times err = tklib.Tcl_Eval(self.interp, "info exists tk_version") @@ -159,22 +191,25 @@ flags=tklib.TCL_LEAVE_ERR_MSG if global_only: flags |= tklib.TCL_GLOBAL_ONLY - res = tklib.Tcl_GetVar2Ex(self.interp, name1, name2, flags) - if not res: - self.raiseTclError() - assert self._wantobjects - return FromObj(self, res) + with self._tcl_lock: + res = tklib.Tcl_GetVar2Ex(self.interp, name1, name2, flags) + if not res: + self.raiseTclError() + assert self._wantobjects + return FromObj(self, res) def _setvar(self, name1, value, global_only=False): name1 = varname_converter(name1) + # XXX Acquire tcl lock??? newval = AsObj(value) flags=tklib.TCL_LEAVE_ERR_MSG if global_only: flags |= tklib.TCL_GLOBAL_ONLY - res = tklib.Tcl_SetVar2Ex(self.interp, name1, tkffi.NULL, - newval, flags) - if not res: - self.raiseTclError() + with self._tcl_lock: + res = tklib.Tcl_SetVar2Ex(self.interp, name1, tkffi.NULL, + newval, flags) + if not res: + self.raiseTclError() def _unsetvar(self, name1, name2=None, global_only=False): name1 = varname_converter(name1) @@ -183,9 +218,10 @@ flags=tklib.TCL_LEAVE_ERR_MSG if global_only: flags |= tklib.TCL_GLOBAL_ONLY - res = tklib.Tcl_UnsetVar2(self.interp, name1, name2, flags) - if res == tklib.TCL_ERROR: - self.raiseTclError() + with self._tcl_lock: + res = tklib.Tcl_UnsetVar2(self.interp, name1, name2, flags) + if res == tklib.TCL_ERROR: + self.raiseTclError() def getvar(self, name1, name2=None): return self._var_invoke(self._getvar, name1, name2) @@ -219,9 +255,10 @@ if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): raise NotImplementedError("Call from another thread") - res = tklib.Tcl_CreateCommand( - self.interp, cmdName, _CommandData.PythonCmd, - clientData, _CommandData.PythonCmdDelete) + with self._tcl_lock: + res = tklib.Tcl_CreateCommand( + self.interp, cmdName, _CommandData.PythonCmd, + clientData, _CommandData.PythonCmdDelete) if not res: raise TclError("can't create Tcl command") @@ -229,7 +266,8 @@ if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): raise NotImplementedError("Call from another thread") - res = tklib.Tcl_DeleteCommand(self.interp, cmdName) + with self._tcl_lock: + res = tklib.Tcl_DeleteCommand(self.interp, cmdName) if res == -1: raise TclError("can't delete Tcl command") @@ -256,11 +294,12 @@ tklib.Tcl_IncrRefCount(obj) objects[i] = obj - res = tklib.Tcl_EvalObjv(self.interp, argc, objects, flags) - if res == tklib.TCL_ERROR: - self.raiseTclError() - else: - result = self._callResult() + with self._tcl_lock: + res = tklib.Tcl_EvalObjv(self.interp, argc, objects, flags) + if res == tklib.TCL_ERROR: + self.raiseTclError() + else: + result = self._callResult() finally: for obj in objects: if obj: @@ -280,17 +319,19 @@ def eval(self, script): self._check_tcl_appartment() - res = tklib.Tcl_Eval(self.interp, script) - if res == tklib.TCL_ERROR: - self.raiseTclError() - return tkffi.string(tklib.Tcl_GetStringResult(self.interp)) + with self._tcl_lock: + res = tklib.Tcl_Eval(self.interp, script) + if res == tklib.TCL_ERROR: + self.raiseTclError() + return tkffi.string(tklib.Tcl_GetStringResult(self.interp)) def evalfile(self, filename): self._check_tcl_appartment() - res = tklib.Tcl_EvalFile(self.interp, filename) - if res == tklib.TCL_ERROR: - self.raiseTclError() - return tkffi.string(tklib.Tcl_GetStringResult(self.interp)) + with self._tcl_lock: + res = tklib.Tcl_EvalFile(self.interp, filename) + if res == tklib.TCL_ERROR: + self.raiseTclError() + return tkffi.string(tklib.Tcl_GetStringResult(self.interp)) def split(self, arg): if isinstance(arg, tuple): @@ -375,7 +416,10 @@ if self.threaded: result = tklib.Tcl_DoOneEvent(0) else: - raise NotImplementedError("TCL configured without threads") + with self._tcl_lock: + result = tklib.Tcl_DoOneEvent(tklib.TCL_DONT_WAIT) + if result == 0: + time.sleep(self._busywaitinterval) if result < 0: break diff --git a/lib_pypy/_tkinter/tclobj.py b/lib_pypy/_tkinter/tclobj.py --- a/lib_pypy/_tkinter/tclobj.py +++ b/lib_pypy/_tkinter/tclobj.py @@ -28,9 +28,11 @@ return result elif value.typePtr == typeCache.BooleanType: - return result + return bool(value.internalRep.longValue) elif value.typePtr == typeCache.ByteArrayType: - return result + size = tkffi.new('int*') + data = tklib.Tcl_GetByteArrayFromObj(value, size) + return tkffi.buffer(data, size[0])[:] elif value.typePtr == typeCache.DoubleType: return value.internalRep.doubleValue elif value.typePtr == typeCache.IntType: @@ -50,7 +52,7 @@ result.append(FromObj(app, tcl_elem[0])) return tuple(result) elif value.typePtr == typeCache.ProcBodyType: - return result + pass # fall through and return tcl object. elif value.typePtr == typeCache.StringType: buf = tklib.Tcl_GetUnicode(value) length = tklib.Tcl_GetCharLength(value) diff --git a/lib_pypy/_tkinter/tklib.py b/lib_pypy/_tkinter/tklib.py --- a/lib_pypy/_tkinter/tklib.py +++ b/lib_pypy/_tkinter/tklib.py @@ -1,6 +1,7 @@ # C bindings with libtcl and libtk. from cffi import FFI +import sys tkffi = FFI() @@ -18,6 +19,8 @@ #define TCL_EVAL_DIRECT ... #define TCL_EVAL_GLOBAL ... +#define TCL_DONT_WAIT ... + typedef unsigned short Tcl_UniChar; typedef ... Tcl_Interp; typedef ...* Tcl_ThreadId; @@ -69,6 +72,7 @@ int Tcl_GetBoolean(Tcl_Interp* interp, const char* src, int* boolPtr); char *Tcl_GetString(Tcl_Obj* objPtr); char *Tcl_GetStringFromObj(Tcl_Obj* objPtr, int* lengthPtr); +unsigned char *Tcl_GetByteArrayFromObj(Tcl_Obj* objPtr, int* lengthPtr); Tcl_UniChar *Tcl_GetUnicode(Tcl_Obj* objPtr); int Tcl_GetCharLength(Tcl_Obj* objPtr); @@ -102,6 +106,25 @@ int Tk_GetNumMainWindows(); """) +# XXX find a better way to detect paths +# XXX pick up CPPFLAGS and LDFLAGS and add to these paths? +if sys.platform.startswith("openbsd"): + incdirs = ['/usr/local/include/tcl8.5', '/usr/local/include/tk8.5', '/usr/X11R6/include'] + linklibs = ['tk85', 'tcl85'] + libdirs = ['/usr/local/lib', '/usr/X11R6/lib'] +elif sys.platform.startswith("freebsd"): + incdirs = ['/usr/local/include/tcl8.6', '/usr/local/include/tk8.6', '/usr/local/include/X11', '/usr/local/include'] + linklibs = ['tk86', 'tcl86'] + libdirs = ['/usr/local/lib'] +elif sys.platform == 'win32': + incdirs = [] + linklibs = ['tcl85', 'tk85'] + libdirs = [] +else: + incdirs=['/usr/include/tcl'] + linklibs=['tcl', 'tk'] + libdirs = [] + tklib = tkffi.verify(""" #include #include @@ -109,6 +132,7 @@ char *get_tk_version() { return TK_VERSION; } char *get_tcl_version() { return TCL_VERSION; } """, -include_dirs=['/usr/include/tcl'], -libraries=['tcl', 'tk'], +include_dirs=incdirs, +libraries=linklibs, +library_dirs = libdirs ) diff --git a/lib_pypy/audioop.py b/lib_pypy/audioop.py new file mode 100644 --- /dev/null +++ b/lib_pypy/audioop.py @@ -0,0 +1,29 @@ + +import struct + + +class error(Exception): + pass + + +def _check_size(size): + if size != 1 and size != 2 and size != 4: + raise error("Size should be 1, 2 or 4") + + +def _check_params(length, size): + _check_size(size) + if length % size != 0: + raise error("not a whole number of frames") + + +def getsample(cp, size, i): + _check_params(len(cp), size) + if not (0 <= i < len(cp) / size): + raise error("Index out of range") + if size == 1: + return struct.unpack_from("B", buffer(cp)[i:])[0] + elif size == 2: + return struct.unpack_from("H", buffer(cp)[i * 2:])[0] + elif size == 4: + return struct.unpack_from("I", buffer(cp)[i * 4:])[0] diff --git a/lib_pypy/cffi.egg-info b/lib_pypy/cffi.egg-info --- a/lib_pypy/cffi.egg-info +++ b/lib_pypy/cffi.egg-info @@ -1,6 +1,6 @@ Metadata-Version: 1.0 Name: cffi -Version: 0.7 +Version: 0.8 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.7" -__version_info__ = (0, 7) +__version__ = "0.8.1" +__version_info__ = (0, 8, 1) diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -1,4 +1,5 @@ import types +from .lock import allocate_lock try: callable @@ -54,12 +55,14 @@ # _cffi_backend.so compiled. From noreply at buildbot.pypy.org Fri Jan 17 10:28:00 2014 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 17 Jan 2014 10:28:00 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: (fijal, rguillebert) implement setfield and forced virtuals on resume Message-ID: <20140117092800.9B5901C0291@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68705:f3e813611a53 Date: 2014-01-16 15:39 +0100 http://bitbucket.org/pypy/pypy/changeset/f3e813611a53/ Log: (fijal, rguillebert) implement setfield and forced virtuals on resume diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -59,6 +59,9 @@ self.make_constant(box) # invariant: box is a Const if and only if level == LEVEL_CONSTANT + def get_resume_box(self): + return self.box + def make_len_gt(self, mode, descr, val): if self.lenbound: assert self.lenbound.mode == mode diff --git a/rpython/jit/metainterp/optimizeopt/resumeopt.py b/rpython/jit/metainterp/optimizeopt/resumeopt.py --- a/rpython/jit/metainterp/optimizeopt/resumeopt.py +++ b/rpython/jit/metainterp/optimizeopt/resumeopt.py @@ -8,7 +8,7 @@ self.pc = pc assert isinstance(jitcode, JitCode) self.jitcode = jitcode - self.boxes = [None] * jitcode.num_regs() + self.values = [None] * jitcode.num_regs() class OptResumeBuilder(object): def __init__(self, opt): @@ -46,11 +46,10 @@ op.getarg(1), op.getarg(2)], None) self.opt._newoperations.append(op) + no = op.getarg(2).getint() + self.framestack[op.getarg(1).getint()].values[no] = value else: self.opt.emit_operation(op) - #no = op.getarg(2).getint() - #box = self.opt.getvalue(op.getarg(0)).box - #self.framestack[op.getarg(1).getint()].boxes[no] = box def new_virtual(self, box): xxx @@ -62,6 +61,18 @@ op = ResOperation(rop.RESUME_NEW, [], newbox, descr=structdescr) self.opt._newoperations.append(op) + def setfield(self, box, fieldbox, descr): + op = ResOperation(rop.RESUME_SETFIELD_GC, [box, fieldbox], None, + descr=descr) + self.opt._newoperations.append(op) + def guard_seen(self, op, pendingfields): - #xxx - pass + for frame_pos, frame in enumerate(self.framestack): + for pos_in_frame, value in enumerate(frame.values): + if value is not None and value.is_forced_virtual(): + op = ResOperation(rop.RESUME_PUT, [value.get_resume_box(), + ConstInt(frame_pos), + ConstInt(pos_in_frame)], + None) + self.opt._newoperations.append(op) + frame.values[pos_in_frame] = None diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -454,27 +454,6 @@ """ self.optimize_loop(ops, expected) - def test_virtual_resume_info(self): - ops = """ - [i0] - enter_frame(-1, descr=jitcode) - p0 = new(descr=ssize) - resume_put(p0, 0, 0) - guard_true(i0) - leave_frame() - finish() - """ - expected = """ - [i0] - enter_frame(-1, descr=jitcode) - p0 = resume_new(descr=ssize) - resume_put(p0, 0, 0) - guard_true(i0) - leave_frame() - finish() - """ - self.optimize_loop(ops, expected) - def test_ooisnull_oononnull_via_virtual(self): ops = """ [p0] @@ -5139,3 +5118,78 @@ class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin): pass + +class TestOptimizeResume(BaseTestBasic, LLtypeMixin): + def test_virtual_resume_info(self): + ops = """ + [i0] + enter_frame(-1, descr=jitcode) + p0 = new(descr=ssize) + resume_put(p0, 0, 0) + guard_true(i0) + leave_frame() + finish() + """ + expected = """ + [i0] + enter_frame(-1, descr=jitcode) + p0 = resume_new(descr=ssize) + resume_put(p0, 0, 0) + guard_true(i0) + leave_frame() + finish() + """ + self.optimize_loop(ops, expected) + + def test_virtual_resume_info_field(self): + ops = """ + [i0] + enter_frame(-1, descr=jitcode) + p0 = new(descr=ssize) + setfield_gc(p0, 3, descr=valuedescr) + resume_put(p0, 0, 0) + guard_true(i0) + leave_frame() + finish() + """ + expected = """ + [i0] + enter_frame(-1, descr=jitcode) + p0 = resume_new(descr=ssize) + resume_setfield_gc(p0, 3, descr=valuedescr) + resume_put(p0, 0, 0) + guard_true(i0) + leave_frame() + finish() + """ + self.optimize_loop(ops, expected) + + def test_forced_virtual(self): + ops = """ + [i0, i1] + enter_frame(-1, descr=jitcode) + p0 = new(descr=ssize) + setfield_gc(p0, 3, descr=valuedescr) + resume_put(p0, 0, 0) + guard_true(i0) + escape(p0) + guard_true(i1) + leave_frame() + finish() + """ + expected = """ + [i0, i1] + enter_frame(-1, descr=jitcode) + p0 = resume_new(descr=ssize) + resume_setfield_gc(p0, 3, descr=valuedescr) + resume_put(p0, 0, 0) + guard_true(i0) + p1 = new(descr=ssize) + setfield_gc(p1, 3, descr=valuedescr) + escape(p1) + resume_put(p1, 0, 0) + guard_true(i1) + leave_frame() + finish() + """ + self.optimize_loop(ops, expected) diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -19,6 +19,7 @@ level = optimizer.LEVEL_NONNULL is_about_raw = False _cached_vinfo = None + resume_box = None def __init__(self, keybox, source_op=None): self.keybox = keybox # only used as a key in dictionaries @@ -28,6 +29,11 @@ def is_forced_virtual(self): return self.box is not None + def get_resume_box(self): + if self.is_forced_virtual(): + return self.box + return self.resume_box + def get_key_box(self): if self.box is None: return self.keybox @@ -655,6 +661,9 @@ if value.is_virtual(): fieldvalue = self.getvalue(op.getarg(1)) + self.optimizer.resumebuilder.setfield(value.resume_box, + fieldvalue.get_resume_box(), + op.getdescr()) value.setfield(op.getdescr(), fieldvalue) else: value.ensure_nonnull() diff --git a/rpython/jit/metainterp/test/test_resume2.py b/rpython/jit/metainterp/test/test_resume2.py --- a/rpython/jit/metainterp/test/test_resume2.py +++ b/rpython/jit/metainterp/test/test_resume2.py @@ -156,7 +156,6 @@ assert f.registers_i[1].getint() == 2 + 3 def test_new(self): - py.test.skip("finish") jitcode1 = JitCode("jitcode") jitcode1.setup(num_regs_i=1, num_regs_r=0, num_regs_f=0) base = parse(""" @@ -167,6 +166,7 @@ backend_put(12, leave_frame() """, namespace={'jitcode':jitcode1}) + XXX def test_reconstructing_resume_reader(self): jitcode1 = JitCode("jitcode") From noreply at buildbot.pypy.org Fri Jan 17 10:28:01 2014 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 17 Jan 2014 10:28:01 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: Some rewrite in progress in order to support new a bit everywhere. More rewrite Message-ID: <20140117092801.E32361C0291@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68706:af3f1d0b6333 Date: 2014-01-16 17:35 +0100 http://bitbucket.org/pypy/pypy/changeset/af3f1d0b6333/ Log: Some rewrite in progress in order to support new a bit everywhere. More rewrite in the pipeline ;-) diff --git a/TODO b/TODO --- a/TODO +++ b/TODO @@ -1,3 +1,9 @@ * kill resumedescr.guard_opnum and replace by classes +* in resume2.py, stuff stored on self.virtuals is inefficient + +* compress the resumedata in the backend + +* do escape analysis in the resumeopt.py + diff --git a/rpython/jit/backend/resumebuilder.py b/rpython/jit/backend/resumebuilder.py --- a/rpython/jit/backend/resumebuilder.py +++ b/rpython/jit/backend/resumebuilder.py @@ -26,9 +26,6 @@ def resume_clear(self, framepos, frontend_pos): self.framestack[framepos][frontend_pos] = None - def resume_put_const(self, box, framepos, frontend_pos): - xxx - def resume_new(self, result, descr): self.deps[result] = {} diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -473,7 +473,6 @@ 'RESUME_PUT/3', # arguments are as follows - box or position in the backend, # the frame index (counting from top) and position in the # frontend - 'RESUME_PUT_CONST/3', # the same but for a constant 'RESUME_NEW/0d', 'RESUME_NEW_WITH_VTABLE/1', 'RESUME_NEW_ARRAY/1d', diff --git a/rpython/jit/metainterp/resume2.py b/rpython/jit/metainterp/resume2.py --- a/rpython/jit/metainterp/resume2.py +++ b/rpython/jit/metainterp/resume2.py @@ -1,6 +1,8 @@ +import sys from rpython.jit.metainterp.resoperation import rop -from rpython.jit.metainterp.history import BoxInt, BoxPtr, BoxFloat, ConstInt +from rpython.jit.metainterp.history import BoxInt, BoxPtr, BoxFloat, ConstInt,\ + Box, INT, REF, FLOAT from rpython.jit.metainterp import history from rpython.jit.codewriter.jitcode import JitCode from rpython.rlib import rstack @@ -17,7 +19,21 @@ self.registers = [-1] * jitcode.num_regs() self.jitcode = jitcode self.pc = -1 - + +TAGCONST = 0x0 +TAGVIRTUAL = 0x2 +TAGBOX = 0x3 +TAGSMALLINT = 0x1 + +TAGOFFSET = 2 + +class Virtual(object): + def __init__(self, pos, descr): + self.pos = pos + self.fields = {} + self.descr = descr + + class AbstractResumeReader(object): """ A resume reader that can follow resume until given point. Consult the concrete classes for details @@ -26,6 +42,8 @@ def __init__(self): self.framestack = [] self.consts = [] # XXX cache? + self.virtuals = {} + self.virtual_list = [] def rebuild(self, faildescr): self._rebuild_until(faildescr.rd_resume_bytecode, @@ -41,16 +59,47 @@ self.framestack[-1].pc = pc self.framestack.append(ResumeFrame(jitcode)) - def resume_put(self, jitframe_pos_const, frame_no, frontend_position): - jitframe_pos = jitframe_pos_const.getint() + def encode_box(self, pos): + return TAGBOX | (pos << TAGOFFSET) + + def encode_virtual(self, box): + return TAGVIRTUAL | (self.virtuals[box].pos << TAGOFFSET) + + def encode_const(self, const): + if isinstance(const, ConstInt) and const.getint() < (sys.maxint >> 3): + return TAGSMALLINT | (const.getint() << TAGOFFSET) + self.consts.append(const) + return TAGCONST | ((len(self.consts) - 1) << TAGOFFSET) + + def decode(self, pos): + return pos & 0x3, pos >> TAGOFFSET + + def resume_put(self, jitframe_pos_box, frame_no, frontend_position): + if isinstance(jitframe_pos_box, Box): + jitframe_pos = self.encode_virtual(jitframe_pos_box) + else: + jitframe_pos = self.encode_box(jitframe_pos_box.getint()) self.framestack[frame_no].registers[frontend_position] = jitframe_pos + def encode(self, box): + xxx + + def resume_new(self, box, descr): + # XXX make it a list + v = Virtual(len(self.virtual_list), descr) + self.virtuals[box] = v + self.virtual_list.append(v) + + def resume_setfield_gc(self, box, fieldbox, descr): + # XXX optimize fields + self.virtuals[box].fields[descr] = self.encode(fieldbox) + def resume_clear(self, frame_no, frontend_position): self.framestack[frame_no].registers[frontend_position] = -1 def resume_put_const(self, const, frame_no, frontend_position): - self.framestack[frame_no].registers[frontend_position] = - 2 - len(self.consts) - self.consts.append(const) + pos = self.encode_const(const) + self.framestack[frame_no].registers[frontend_position] = pos def resume_set_pc(self, pc): self.framestack[-1].pc = pc @@ -160,21 +209,54 @@ self.deadframe = deadframe AbstractResumeReader.__init__(self) + def get_box_value(self, encoded_pos, TP): + if encoded_pos == -1: + return None + if encoded_pos in self.cache: + return self.cache[encoded_pos] + tag, pos = self.decode(encoded_pos) + if tag == TAGBOX: + if TP == INT: + val = self.metainterp.cpu.get_int_value(self.deadframe, pos) + res = BoxInt(val) + else: + xxx + self.cache[encoded_pos] = res + return res + elif tag == TAGSMALLINT: + return ConstInt(pos) + elif tag == TAGCONST: + return self.consts[pos] + else: + assert tag == TAGVIRTUAL + virtual = self.virtual_list[pos] + virtual_box = self.allocate_struct(virtual) + for fielddescr, encoded_field_pos in virtual.fields.iteritems(): + self.setfield(virtual, fielddescr, encoded_field_pos) + self.cache[encoded_pos] = virtual_box + return virtual_box + + def allocate_struct(self, virtual): + return self.metainterp.execute_and_record(rop.NEW, virtual.descr) + + def setfield(self, virtual, fielddescr, encoded_field_pos): + xxx + def store_int_box(self, res, pos, miframe, i, jitframe_pos): - if jitframe_pos in self.cache: - box = self.cache[jitframe_pos] - elif jitframe_pos == -1: + box = self.get_box_value(jitframe_pos, INT) + if box is None: return - elif jitframe_pos >= 0: - box = BoxInt(self.metainterp.cpu.get_int_value(self.deadframe, - jitframe_pos)) - elif jitframe_pos <= -2: - box = self.consts[-jitframe_pos - 2] miframe.registers_i[i] = box - self.cache[jitframe_pos] = box res[-1][pos] = box def store_ref_box(self, res, pos, miframe, i, jitframe_pos): + box = self.get_box_value(jitframe_pos, REF) + if box is None: + return + miframe.registers_r[i] = box + res[-1][pos] = box + return + xxx if jitframe_pos in self.cache: box = self.cache[jitframe_pos] elif jitframe_pos == -1: @@ -189,6 +271,10 @@ res[-1][pos] = box def store_float_box(self, res, pos, miframe, i, jitframe_pos): + box = self.get_box_value(jitframe_pos) + if box is None: + return + xxx if jitframe_pos in self.cache: box = self.cache[jitframe_pos] elif jitframe_pos == -1: diff --git a/rpython/jit/metainterp/test/test_resume2.py b/rpython/jit/metainterp/test/test_resume2.py --- a/rpython/jit/metainterp/test/test_resume2.py +++ b/rpython/jit/metainterp/test/test_resume2.py @@ -5,6 +5,7 @@ from rpython.jit.metainterp.history import AbstractDescr, Const, INT, Stats from rpython.jit.metainterp.resume2 import rebuild_from_resumedata,\ ResumeBytecode, AbstractResumeReader +from rpython.jit.metainterp.resoperation import rop from rpython.jit.codewriter.format import unformat_assembler from rpython.jit.codewriter.codewriter import CodeWriter from rpython.jit.backend.llgraph.runner import LLGraphCPU @@ -37,10 +38,19 @@ lst += [backend_values[x] for x in self.registers_r] lst += [backend_values[x] for x in self.registers_f] +class AnyBox(object): + def __eq__(self, other): + return True + class MockMetaInterp(object): def __init__(self): self.cpu = MockCPU() self.framestack = [] + self.history = [] + + def execute_and_record(self, *args): + self.history.append(args) + return AnyBox() def newframe(self, jitcode, record_resume=False): f = Frame(jitcode) @@ -157,16 +167,23 @@ def test_new(self): jitcode1 = JitCode("jitcode") - jitcode1.setup(num_regs_i=1, num_regs_r=0, num_regs_f=0) + jitcode1.setup(num_regs_i=0, num_regs_r=1, num_regs_f=0) base = parse(""" [] enter_frame(-1, descr=jitcode) - i0 = new(descr=structdescr) - resume_setfield(i0, 13, descr=fielddescr) - backend_put(12, + p0 = resume_new() + resume_setfield_gc(p0, 13) + resume_put(p0, 0, 0) leave_frame() """, namespace={'jitcode':jitcode1}) - XXX + descr = Descr() + descr.rd_resume_bytecode = ResumeBytecode(base.operations) + descr.rd_bytecode_position = 4 + metainterp = MockMetaInterp() + metainterp.cpu = MockCPU() + rebuild_from_resumedata(metainterp, "myframe", descr) + assert metainterp.history == [(rop.NEW, None), + (rop.SETFIELD_GC, None, AnyBox())] def test_reconstructing_resume_reader(self): jitcode1 = JitCode("jitcode") From noreply at buildbot.pypy.org Fri Jan 17 10:28:03 2014 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 17 Jan 2014 10:28:03 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: Move stuff around so we have all resume related things in one directory Message-ID: <20140117092803.410B01C0291@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68707:5b57b97f8470 Date: 2014-01-17 10:27 +0100 http://bitbucket.org/pypy/pypy/changeset/5b57b97f8470/ Log: Move stuff around so we have all resume related things in one directory diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -1,7 +1,7 @@ import py, weakref from rpython.jit.backend import model from rpython.jit.backend.llgraph import support -from rpython.jit.backend.resumebuilder import ResumeBuilder,\ +from rpython.jit.resume.backend import ResumeBuilder,\ LivenessAnalyzer, compute_vars_longevity, flatten from rpython.jit.metainterp.history import AbstractDescr from rpython.jit.metainterp.history import Const, getkind diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -5,7 +5,7 @@ from rpython.jit.backend.llsupport.assembler import (GuardToken, BaseAssembler, debug_bridge) from rpython.jit.backend.llsupport.asmmemmgr import MachineDataBlockWrapper -from rpython.jit.backend.resumebuilder import flatten +from rpython.jit.resume.backend import flatten from rpython.jit.metainterp.history import Const, Box, VOID from rpython.jit.metainterp.history import AbstractFailDescr, INT, REF, FLOAT from rpython.rtyper.lltypesystem import lltype, rffi, rstr, llmemory diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -7,7 +7,7 @@ from rpython.jit.backend.llsupport.descr import (ArrayDescr, CallDescr, unpack_arraydescr, unpack_fielddescr, unpack_interiorfielddescr) from rpython.jit.backend.llsupport.gcmap import allocate_gcmap -from rpython.jit.backend.resumebuilder import ResumeBuilder,\ +from rpython.jit.resume.backend import ResumeBuilder,\ compute_vars_longevity, flatten from rpython.jit.backend.llsupport.regalloc import (FrameManager, BaseRegalloc, RegisterManager, TempBox, is_comparison_or_ovf_op) diff --git a/rpython/jit/backend/x86/test/test_resumebuilder.py b/rpython/jit/backend/x86/test/test_resumebuilder.py --- a/rpython/jit/backend/x86/test/test_resumebuilder.py +++ b/rpython/jit/backend/x86/test/test_resumebuilder.py @@ -1,6 +1,6 @@ from rpython.jit.backend.x86.test.test_basic import Jit386Mixin -from rpython.jit.backend.llsupport.test.test_resumebuilder import ResumeTest +from rpython.jit.resume.test.test_backend import ResumeTest class TestResumeX86(Jit386Mixin, ResumeTest): # for the individual tests see diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -2,7 +2,8 @@ from rpython.jit.codewriter.jitcode import JitCode, SwitchDictDescr from rpython.jit.metainterp.compile import ResumeAtPositionDescr from rpython.jit.metainterp.jitexc import get_llexception, reraise -from rpython.jit.metainterp import jitexc, resume2 +from rpython.jit.metainterp import jitexc +from rpython.jit.resume import frontend as resume2 from rpython.rlib import longlong2float from rpython.rlib.debug import ll_assert, make_sure_not_resized from rpython.rlib.objectmodel import we_are_translated diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -14,9 +14,8 @@ from rpython.jit.metainterp import history, jitexc from rpython.jit.metainterp.optimize import InvalidLoop from rpython.jit.metainterp.inliner import Inliner -from rpython.jit.metainterp.resume import NUMBERING, PENDINGFIELDSP, ResumeDataDirectReader from rpython.jit.codewriter import heaptracker, longlong -from rpython.jit.backend.resumebuilder import flatten +from rpython.jit.resume.backend import flatten def giveup(): from rpython.jit.metainterp.pyjitpl import SwitchToBlackhole diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -5,7 +5,7 @@ ImmutableIntUnbounded, \ IntLowerBound, MININT, MAXINT from rpython.jit.metainterp.optimizeopt.util import make_dispatcher_method -from rpython.jit.metainterp.optimizeopt.resumeopt import OptResumeBuilder +from rpython.jit.resume.optimizer import OptResumeBuilder from rpython.jit.metainterp.resoperation import rop, ResOperation, AbstractResOp from rpython.jit.metainterp.typesystem import llhelper from rpython.tool.pairtype import extendabletype diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -5,7 +5,7 @@ from rpython.jit.codewriter import heaptracker from rpython.jit.codewriter.effectinfo import EffectInfo from rpython.jit.codewriter.jitcode import JitCode, SwitchDictDescr -from rpython.jit.metainterp import history, compile, resume2, executor, jitexc +from rpython.jit.metainterp import history, compile, executor, jitexc from rpython.jit.metainterp.heapcache import HeapCache from rpython.jit.metainterp.history import (Const, ConstInt, ConstPtr, ConstFloat, Box, TargetToken) @@ -13,7 +13,7 @@ from rpython.jit.metainterp.logger import Logger from rpython.jit.metainterp.optimizeopt.util import args_dict_box from rpython.jit.metainterp.resoperation import rop -from rpython.jit.metainterp.resume2 import ResumeRecorder +from rpython.jit.resume.frontend import ResumeRecorder from rpython.rlib import nonconst, rstack from rpython.rlib.debug import debug_start, debug_stop, debug_print, make_sure_not_resized from rpython.rlib.jit import Counters diff --git a/rpython/jit/resume/__init__.py b/rpython/jit/resume/__init__.py new file mode 100644 diff --git a/rpython/jit/backend/resumebuilder.py b/rpython/jit/resume/backend.py rename from rpython/jit/backend/resumebuilder.py rename to rpython/jit/resume/backend.py --- a/rpython/jit/backend/resumebuilder.py +++ b/rpython/jit/resume/backend.py @@ -1,7 +1,7 @@ from rpython.jit.metainterp.resoperation import rop, ResOperation from rpython.jit.metainterp.history import ConstInt, Box, Const -from rpython.jit.metainterp.resume2 import ResumeBytecode, AbstractResumeReader +from rpython.jit.resume.frontend import ResumeBytecode, AbstractResumeReader class LivenessAnalyzer(AbstractResumeReader): def __init__(self, inputframes=None): @@ -87,6 +87,7 @@ box = op.getarg(0) args = op.getarglist() if isinstance(box, Const): + XXX newop = op.copy_and_change(rop.RESUME_PUT_CONST) elif box in self.virtuals: newop = op diff --git a/rpython/jit/metainterp/resume2.py b/rpython/jit/resume/frontend.py rename from rpython/jit/metainterp/resume2.py rename to rpython/jit/resume/frontend.py --- a/rpython/jit/metainterp/resume2.py +++ b/rpython/jit/resume/frontend.py @@ -124,9 +124,6 @@ elif op.getopnum() == rop.RESUME_PUT: self.resume_put(op.getarg(0), op.getarg(1).getint(), op.getarg(2).getint()) - elif op.getopnum() == rop.RESUME_PUT_CONST: - self.resume_put_const(op.getarg(0), op.getarg(1).getint(), - op.getarg(2).getint()) elif op.getopnum() == rop.RESUME_NEW: self.resume_new(op.result, op.getdescr()) elif op.getopnum() == rop.RESUME_SETFIELD_GC: diff --git a/rpython/jit/metainterp/optimizeopt/resumeopt.py b/rpython/jit/resume/optimizer.py rename from rpython/jit/metainterp/optimizeopt/resumeopt.py rename to rpython/jit/resume/optimizer.py diff --git a/rpython/jit/resume/test/__init__.py b/rpython/jit/resume/test/__init__.py new file mode 100644 diff --git a/rpython/jit/backend/llsupport/test/test_resumebuilder.py b/rpython/jit/resume/test/test_backend.py rename from rpython/jit/backend/llsupport/test/test_resumebuilder.py rename to rpython/jit/resume/test/test_backend.py --- a/rpython/jit/backend/llsupport/test/test_resumebuilder.py +++ b/rpython/jit/resume/test/test_backend.py @@ -4,7 +4,7 @@ from rpython.jit.codewriter.jitcode import JitCode from rpython.jit.tool.oparser import parse from rpython.jit.metainterp.optimizeopt.util import equaloplists -from rpython.jit.metainterp.test.test_resume2 import rebuild_locs_from_resumedata +from rpython.jit.resume.test.test_frontend import rebuild_locs_from_resumedata from rpython.rtyper.lltypesystem import lltype class MockJitCode(JitCode): diff --git a/rpython/jit/metainterp/test/test_resume2.py b/rpython/jit/resume/test/test_frontend.py rename from rpython/jit/metainterp/test/test_resume2.py rename to rpython/jit/resume/test/test_frontend.py --- a/rpython/jit/metainterp/test/test_resume2.py +++ b/rpython/jit/resume/test/test_frontend.py @@ -1,9 +1,8 @@ -import py from rpython.jit.tool.oparser import parse from rpython.jit.codewriter.jitcode import JitCode from rpython.jit.metainterp.history import AbstractDescr, Const, INT, Stats -from rpython.jit.metainterp.resume2 import rebuild_from_resumedata,\ +from rpython.jit.resume.frontend import rebuild_from_resumedata,\ ResumeBytecode, AbstractResumeReader from rpython.jit.metainterp.resoperation import rop from rpython.jit.codewriter.format import unformat_assembler diff --git a/rpython/jit/tool/oparser.py b/rpython/jit/tool/oparser.py --- a/rpython/jit/tool/oparser.py +++ b/rpython/jit/tool/oparser.py @@ -300,6 +300,7 @@ loop.original_jitcell_token = self.original_jitcell_token loop.operations = ops loop.inputframes = [inpargs] + loop.inputargs = inpargs # for the tests loop.last_offset = last_offset return loop From noreply at buildbot.pypy.org Fri Jan 17 10:36:15 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 17 Jan 2014 10:36:15 +0100 (CET) Subject: [pypy-commit] stmgc c7: extend test Message-ID: <20140117093615.822D91C3093@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r616:e911a68889cf Date: 2014-01-16 18:17 +0100 http://bitbucket.org/pypy/stmgc/changeset/e911a68889cf/ Log: extend test diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -104,6 +104,8 @@ stm_push_root(lp) stm_stop_transaction() lp = stm_pop_root() + p1 = stm_get_real_address(lp) + assert p != p1 self.switch(1) @@ -111,6 +113,7 @@ stm_write(lp) # privatize page p_ = stm_get_real_address(lp) assert p != p_ + assert p1 != p_ assert p_[8] == 'u' stm_stop_transaction() From noreply at buildbot.pypy.org Fri Jan 17 10:36:16 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 17 Jan 2014 10:36:16 +0100 (CET) Subject: [pypy-commit] stmgc c7: fix and add a failing test Message-ID: <20140117093616.AE9271C3093@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r617:e9166bf77a19 Date: 2014-01-17 10:35 +0100 http://bitbucket.org/pypy/stmgc/changeset/e9166bf77a19/ Log: fix and add a failing test diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -379,16 +379,21 @@ uintptr_t pagenum = ((uintptr_t)obj) / 4096; assert(pagenum < NB_PAGES); - /* old objects from the same transaction */ - if (flag_page_private[pagenum] == UNCOMMITTED_SHARED_PAGE - || obj->stm_flags & GCFLAG_NOT_COMMITTED) { - _STM_TL2->old_objects_to_trace = stm_list_append - (_STM_TL2->old_objects_to_trace, obj); + _STM_TL2->old_objects_to_trace = stm_list_append + (_STM_TL2->old_objects_to_trace, obj); + obj->stm_flags &= ~GCFLAG_WRITE_BARRIER; + /* for old objects from the same transaction we don't need + to privatize the page */ + if ((flag_page_private[pagenum] == UNCOMMITTED_SHARED_PAGE) + || (obj->stm_flags & GCFLAG_NOT_COMMITTED)) { return; } + + /* privatize if SHARED_PAGE */ _stm_privatize(pagenum); + /* lock the object for writing in thread 0's page */ uintptr_t t0_offset = (uintptr_t)obj; char* t0_addr = get_thread_base(0) + t0_offset; struct object_s *t0_obj = (struct object_s *)t0_addr; @@ -397,12 +402,10 @@ if (previous) abort(); /* XXX */ - obj->stm_flags &= ~GCFLAG_WRITE_BARRIER; - stm_read(obj); - _STM_TL2->modified_objects = stm_list_append( - _STM_TL2->modified_objects, obj); + _STM_TL2->modified_objects = stm_list_append + (_STM_TL2->modified_objects, obj); } @@ -541,11 +544,16 @@ struct stm_list_s *old_objs = _STM_TL2->old_objects_to_trace; while (!stm_list_is_empty(old_objs)) { object_t *item = stm_list_pop_item(old_objs); - stmcb_trace(real_address(item), - trace_if_young); + + assert(!_is_young(item)); + assert(!(item->stm_flags & GCFLAG_WRITE_BARRIER)); + + /* re-add write-barrier */ + item->stm_flags |= GCFLAG_WRITE_BARRIER; + + stmcb_trace(real_address(item), trace_if_young); } - /* XXX fix modified_objects? */ // also move objects to PRIVATE_PAGE pages, but then // also add the GCFLAG_NOT_COMMITTED to these objects. diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -117,6 +117,48 @@ assert p_[8] == 'u' stm_stop_transaction() + + def test_commit_fresh_objects2(self): + self.switch(1) + stm_start_transaction() + lp, p = stm_allocate(16) + p[8] = 'u' + lp2, p2 = stm_allocate(16) + p2[8] = 'v' + assert p2 - p == 16 + stm_write(lp) # test not crash + stm_write(lp2) # test not crash + stm_read(lp) # test not crash + stm_read(lp2) # test not crash + stm_push_root(lp) + stm_push_root(lp2) + stm_stop_transaction() + lp2 = stm_pop_root() + lp = stm_pop_root() + + self.switch(0) + + stm_start_transaction() + stm_write(lp) # privatize page + p_ = stm_get_real_address(lp) + assert p_[8] == 'u' + p_[8] = 'x' + stm_write(lp2) + p2_ = stm_get_real_address(lp2) + assert p2_[8] == 'v' + p2_[8] = 'y' + stm_stop_transaction() + + self.switch(1) + + stm_start_transaction() + stm_write(lp) + p_ = stm_get_real_address(lp) + assert p_[8] == 'x' + stm_read(lp2) + p2_ = stm_get_real_address(lp2) + assert p2_[8] == 'y' + stm_stop_transaction() From noreply at buildbot.pypy.org Fri Jan 17 10:50:49 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Fri, 17 Jan 2014 10:50:49 +0100 (CET) Subject: [pypy-commit] pypy refactor-str-types: Add fast path for app-level str.lower() and str.upper(). Message-ID: <20140117095049.B966A1C02DA@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r68708:705202fc153f Date: 2014-01-17 10:50 +0100 http://bitbucket.org/pypy/pypy/changeset/705202fc153f/ Log: Add fast path for app-level str.lower() and str.upper(). diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -684,6 +684,12 @@ return self_as_uni._new(res) return self._StringMethods_descr_replace(space, w_old, w_new, count) + def descr_lower(self, space): + return W_BytesObject(self._value.lower()) + + def descr_upper(self, space): + return W_BytesObject(self._value.upper()) + def _join_return_one(self, space, w_obj): return (space.is_w(space.type(w_obj), space.w_str) or space.is_w(space.type(w_obj), space.w_unicode)) From noreply at buildbot.pypy.org Fri Jan 17 11:01:38 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 17 Jan 2014 11:01:38 +0100 (CET) Subject: [pypy-commit] stmgc c7: push modified objs to other threads Message-ID: <20140117100138.BBA651C314B@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r618:e4ab154d61af Date: 2014-01-17 11:01 +0100 http://bitbucket.org/pypy/stmgc/changeset/e4ab154d61af/ Log: push modified objs to other threads diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -191,6 +191,14 @@ stm_abort_transaction(); } +bool _stm_was_read_remote(char *base, object_t *obj) +{ + struct read_marker_s *marker = (struct read_marker_s *) + (base + (((uintptr_t)obj) >> 4)); + struct _thread_local1_s *other_TL1 = (struct _thread_local1_s*) + (base + (uintptr_t)_STM_TL1); + return (marker->rm == other_TL1->transaction_read_version); +} bool _stm_was_read(object_t *obj) @@ -318,54 +326,37 @@ -enum detect_conflicts_e { CANNOT_CONFLICT, CAN_CONFLICT, CHECK_CONFLICT }; - -static void update_to_current_version(enum detect_conflicts_e check_conflict) +static void push_modified_to_other_threads() { - /* Loop over objects in 'pending_updates': if they have been - read by the current transaction, the current transaction must - abort; then copy them out of the other thread's object space, - which is not modified so far (the other thread just committed - and will wait until we are done here before it starts the - next transaction). - */ - bool conflict_found_or_dont_check = (check_conflict == CANNOT_CONFLICT); + struct stm_list_s *modified = _STM_TL2->modified_objects; char *local_base = _STM_TL2->thread_base; char *remote_base = get_thread_base(1 - _STM_TL2->thread_num); - struct stm_list_s *pu = pending_updates; + bool conflicted = 0; + char *t0_base = get_thread_base(0); + + STM_LIST_FOREACH(modified, ({ + if (!conflicted) + conflicted = _stm_was_read_remote(remote_base, item); - assert(pu != _STM_TL2->modified_objects); - - STM_LIST_FOREACH(pu, ({ - - if (!conflict_found_or_dont_check) - conflict_found_or_dont_check = _stm_was_read(item); - - char *dst = REAL_ADDRESS(local_base, item); - char *src = REAL_ADDRESS(remote_base, item); - size_t size = stmcb_size((struct object_s*)src); - - memcpy(dst, src, size); - })); - - write_fence(); - pending_updates = NULL; - - if (conflict_found_or_dont_check) { - if (check_conflict == CAN_CONFLICT) { - stm_abort_transaction(); - } else { /* CHECK_CONFLICT */ - _STM_TL2->need_abort = 1; - } + /* clear the write-lock */ + struct object_s *t0_obj = (struct object_s*) + REAL_ADDRESS(t0_base, item); + assert(t0_obj->stm_write_lock); + t0_obj->stm_write_lock = 0; + + char *src = REAL_ADDRESS(local_base, item); + char *dst = REAL_ADDRESS(remote_base, item); + size_t size = stmcb_size((struct object_s*)src); + memcpy(dst, src, size); + })); + + if (conflicted) { + struct _thread_local2_s *remote_TL2 = (struct _thread_local2_s *) + REAL_ADDRESS(remote_base, _STM_TL2); + remote_TL2->need_abort = 1; } } -static void maybe_update(enum detect_conflicts_e check_conflict) -{ - if (pending_updates != NULL) { - update_to_current_version(check_conflict); - } -} static void wait_until_updated(void) { @@ -788,15 +779,10 @@ } wait_until_updated(); - stm_list_clear(_STM_TL2->modified_objects); + assert(stm_list_is_empty(_STM_TL2->modified_objects)); assert(stm_list_is_empty(_STM_TL2->old_objects_to_trace)); stm_list_clear(_STM_TL2->uncommitted_pages); - /* check that there is no stm_abort() in the following maybe_update() */ - _STM_TL1->jmpbufptr = NULL; - - maybe_update(CANNOT_CONFLICT); /* no read object: cannot conflict */ - _STM_TL1->jmpbufptr = jmpbufptr; _STM_TL2->running_transaction = 1; _STM_TL2->need_abort = 0; @@ -831,12 +817,8 @@ minor_collect(); /* copy modified object versions to other threads */ - pending_updates = _STM_TL2->modified_objects; - int my_thread_num = _STM_TL2->thread_num; - int other_thread_num = 1 - my_thread_num; - _stm_restore_local_state(other_thread_num); - update_to_current_version(CHECK_CONFLICT); /* sets need_abort */ - _stm_restore_local_state(my_thread_num); + push_modified_to_other_threads(); + stm_list_clear(_STM_TL2->modified_objects); /* uncommitted_pages */ long j; From noreply at buildbot.pypy.org Fri Jan 17 11:34:34 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 17 Jan 2014 11:34:34 +0100 (CET) Subject: [pypy-commit] stmgc c7: test for tracing objects Message-ID: <20140117103434.3B3CF1D230D@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r619:70c54b1a942d Date: 2014-01-17 11:34 +0100 http://bitbucket.org/pypy/stmgc/changeset/70c54b1a942d/ Log: test for tracing objects diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -37,7 +37,6 @@ # define HAVE_FULL_EXCHANGE_INSN #endif -typedef TLPREFIX char localchar_t; typedef TLPREFIX struct alloc_for_size_s alloc_for_size_t; typedef TLPREFIX struct _thread_local2_s _thread_local2_t; @@ -358,6 +357,7 @@ } + static void wait_until_updated(void) { while (pending_updates == _STM_TL2->modified_objects) diff --git a/c7/core.h b/c7/core.h --- a/c7/core.h +++ b/c7/core.h @@ -10,7 +10,7 @@ typedef TLPREFIX struct _thread_local1_s _thread_local1_t; typedef TLPREFIX struct object_s object_t; typedef TLPREFIX struct read_marker_s read_marker_t; - +typedef TLPREFIX char localchar_t; /* Structure of objects -------------------- diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -28,6 +28,7 @@ ffi.cdef(""" typedef ... object_t; typedef ... jmpbufptr_t; +#define SIZEOF_MYOBJ ... void stm_setup(void); void stm_setup_thread(void); @@ -61,6 +62,8 @@ void stm_push_root(object_t *obj); object_t *stm_pop_root(void); +void _set_ptr(object_t *obj, int n, object_t *v); +object_t * _get_ptr(object_t *obj, int n); void *memset(void *s, int c, size_t n); """) @@ -76,7 +79,7 @@ uint32_t type_id; }; typedef TLPREFIX struct myobj_s myobj_t; - +#define SIZEOF_MYOBJ sizeof(struct myobj_s) size_t stm_object_size_rounded_up(object_t * obj) { return 16; @@ -109,7 +112,8 @@ } -void _set_type_id(object_t *obj, uint32_t h) { +void _set_type_id(object_t *obj, uint32_t h) +{ ((myobj_t*)obj)->type_id = h; } @@ -117,6 +121,26 @@ return ((myobj_t*)obj)->type_id; } + +void _set_ptr(object_t *obj, int n, object_t *v) +{ + localchar_t *field_addr = ((localchar_t*)obj); + field_addr += SIZEOF_MYOBJ; /* header */ + field_addr += n * sizeof(void*); /* field */ + object_t * TLPREFIX * field = (object_t * TLPREFIX *)field_addr; + *field = v; +} + +object_t * _get_ptr(object_t *obj, int n) +{ + localchar_t *field_addr = ((localchar_t*)obj); + field_addr += SIZEOF_MYOBJ; /* header */ + field_addr += n * sizeof(void*); /* field */ + object_t * TLPREFIX * field = (object_t * TLPREFIX *)field_addr; + return *field; +} + + size_t stmcb_size(struct object_s *obj) { struct myobj_s *myobj = (struct myobj_s*)obj; @@ -156,8 +180,13 @@ force_generic_engine=True) -MAGIC_HEADER = ffi.cast('uint32_t', 42142) +import sys +if sys.maxint > 2**32: + WORD = 8 +else: + WORD = 4 +HDR = lib.SIZEOF_MYOBJ def is_in_nursery(ptr): return lib._stm_is_in_nursery(ptr) @@ -174,6 +203,20 @@ lib._set_type_id(o, tid) return o, lib._stm_real_address(o) +def stm_allocate_refs(n): + o = lib.stm_allocate(HDR + n * WORD) + tid = 42142 + n + lib._set_type_id(o, tid) + return o, lib._stm_real_address(o) + +def stm_set_ref(obj, idx, ref): + stm_write(obj) + lib._set_ptr(obj, idx, ref) + +def stm_get_ref(obj, idx): + stm_read(obj) + return lib._get_ptr(obj, idx) + def stm_get_real_address(obj): return lib._stm_real_address(ffi.cast('object_t*', obj)) diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -160,6 +160,29 @@ assert p2_[8] == 'y' stm_stop_transaction() + def test_simple_refs(self): + stm_start_transaction() + lp, p = stm_allocate_refs(3) + lq, q = stm_allocate(16) + lr, r = stm_allocate(16) + q[8] = 'x' + r[8] = 'y' + stm_set_ref(lp, 0, lq) + stm_set_ref(lp, 1, lr) + stm_push_root(lp) + stm_stop_transaction() + lp = stm_pop_root() + self.switch(1) + stm_start_transaction() + stm_write(lp) + lq = stm_get_ref(lp, 0) + lr = stm_get_ref(lp, 1) + stm_read(lq) + stm_read(lr) + assert stm_get_real_address(lq)[8] == 'x' + assert stm_get_real_address(lr)[8] == 'y' + stm_stop_transaction() + # def test_read_write_2(self): From noreply at buildbot.pypy.org Fri Jan 17 11:50:41 2014 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 17 Jan 2014 11:50:41 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: (fijal, rguillebert) Start implementing bytecode representation for Message-ID: <20140117105041.3D7981C02DA@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68709:77a2bb9785d6 Date: 2014-01-17 11:39 +0100 http://bitbucket.org/pypy/pypy/changeset/77a2bb9785d6/ Log: (fijal, rguillebert) Start implementing bytecode representation for the backend resume code diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1502,6 +1502,10 @@ self.setup_list_of_addr2name(asm.list_of_addr2name) # self.jitdrivers_sd = codewriter.callcontrol.jitdrivers_sd + self.alljitcodes = [] + for jitcode in codewriter.callcontrol.jitcodes.itervalues(): + jitcode.global_index = len(self.alljitcodes) + self.alljitcodes.append(jitcode) self.virtualref_info = codewriter.callcontrol.virtualref_info self.callinfocollection = codewriter.callcontrol.callinfocollection self.has_libffi_call = codewriter.callcontrol.has_libffi_call diff --git a/rpython/jit/resume/backend.py b/rpython/jit/resume/backend.py --- a/rpython/jit/resume/backend.py +++ b/rpython/jit/resume/backend.py @@ -1,9 +1,35 @@ from rpython.jit.metainterp.resoperation import rop, ResOperation from rpython.jit.metainterp.history import ConstInt, Box, Const -from rpython.jit.resume.frontend import ResumeBytecode, AbstractResumeReader +from rpython.jit.resume.rescode import ResumeBytecodeBuilder, TAGBOX,\ + ResumeBytecode -class LivenessAnalyzer(AbstractResumeReader): + # if op.getopnum() == rop.ENTER_FRAME: + # descr = op.getdescr() + # assert isinstance(descr, JitCode) + # self.enter_frame(op.getarg(0).getint(), descr) + # elif op.getopnum() == rop.LEAVE_FRAME: + # self.leave_frame() + # elif op.getopnum() == rop.RESUME_PUT: + # self.resume_put(op.getarg(0), op.getarg(1).getint(), + # op.getarg(2).getint()) + # elif op.getopnum() == rop.RESUME_NEW: + # self.resume_new(op.result, op.getdescr()) + # elif op.getopnum() == rop.RESUME_SETFIELD_GC: + # self.resume_setfield_gc(op.getarg(0), op.getarg(1), + # op.getdescr()) + # elif op.getopnum() == rop.RESUME_SET_PC: + # self.resume_set_pc(op.getarg(0).getint()) + # elif op.getopnum() == rop.RESUME_CLEAR: + # self.resume_clear(op.getarg(0).getint(), + # op.getarg(1).getint()) + # elif not op.is_resume(): + # pos += 1 + # continue + # else: + # xxx + +class LivenessAnalyzer(object): def __init__(self, inputframes=None): self.liveness = {} self.frame_starts = [0] @@ -35,6 +61,9 @@ def resume_set_pc(self, pc): pass + def interpret_until(self, *args): + pass + def _track(self, allboxes, box): if box in self.deps: for dep in self.deps[box].values(): @@ -60,12 +89,12 @@ class ResumeBuilder(object): def __init__(self, regalloc, frontend_liveness, descr, inputframes=None, inputlocs=None): - self.newops = [] self.regalloc = regalloc self.current_attachment = {} self.frontend_liveness = frontend_liveness self.frontend_pos = {} self.virtuals = {} + self.builder = ResumeBytecodeBuilder() if inputlocs is not None: i = 0 all = {} @@ -83,6 +112,30 @@ self.current_attachment[box] = loc_pos def process(self, op): + if op.getopnum() == rop.ENTER_FRAME: + self.builder.enter_frame(op.getarg(0).getint(), op.getdescr()) + elif op.getopnum() == rop.RESUME_PUT: + frame_pos = op.getarg(1).getint() + pos_in_frame = op.getarg(2).getint() + box = op.getarg(0) + if isinstance(box, Const): + pos = self.builder.encode_const(box) + self.builder.resume_put(pos, frame_pos, pos_in_frame) + return + try: + loc = self.regalloc.loc(box, must_exist=True).get_jitframe_position() + pos = self.builder.encode(TAGBOX, loc) + self.builder.resume_put(pos, frame_pos, pos_in_frame) + except KeyError: + xxx + self.current_attachment[box] = pos + self.frontend_pos[box] = (frame_pos, pos_in_frame) + elif op.getopnum() == rop.LEAVE_FRAME: + self.builder.leave_frame() + else: + xxx + return + xxxx if op.getopnum() == rop.RESUME_PUT: box = op.getarg(0) args = op.getarglist() @@ -119,8 +172,10 @@ return if v not in self.current_attachment: return + pos = self.builder.encode(TAGBOX, pos) if self.current_attachment[v] != pos: frame_index, frame_pos = self.frontend_pos[v] + xxx self.newops.append(ResOperation(rop.RESUME_PUT, [ ConstInt(pos), frame_index, frame_pos], None)) @@ -137,10 +192,11 @@ for v, loc in self.regalloc.xrm.reg_bindings.iteritems(): if v not in visited: self._mark_visited(v, loc) - return len(self.newops) + return self.builder.getpos() def finish(self, parent, parent_position, clt): - return ResumeBytecode(self.newops, parent, parent_position, clt) + return ResumeBytecode(self.builder.build(), parent, parent_position, + clt) def flatten(inputframes): diff --git a/rpython/jit/resume/frontend.py b/rpython/jit/resume/frontend.py --- a/rpython/jit/resume/frontend.py +++ b/rpython/jit/resume/frontend.py @@ -7,31 +7,6 @@ from rpython.jit.codewriter.jitcode import JitCode from rpython.rlib import rstack -class ResumeBytecode(object): - def __init__(self, opcodes, parent=None, parent_position=-1, loop=None): - self.opcodes = opcodes - self.parent = parent - self.parent_position = parent_position - self.loop = loop - -class ResumeFrame(object): - def __init__(self, jitcode): - self.registers = [-1] * jitcode.num_regs() - self.jitcode = jitcode - self.pc = -1 - -TAGCONST = 0x0 -TAGVIRTUAL = 0x2 -TAGBOX = 0x3 -TAGSMALLINT = 0x1 - -TAGOFFSET = 2 - -class Virtual(object): - def __init__(self, pos, descr): - self.pos = pos - self.fields = {} - self.descr = descr class AbstractResumeReader(object): @@ -144,6 +119,10 @@ def read_int(self, jitframe_pos): return self.metainterp.cpu.get_int_value(self.deadframe, jitframe_pos) +class Dumper(AbstractResumeReader): + def __init__(self): + xxx + class DirectResumeReader(AbstractResumeReader): """ Directly read values from the jitframe and put them in the blackhole interpreter diff --git a/rpython/jit/resume/reader.py b/rpython/jit/resume/reader.py new file mode 100644 --- /dev/null +++ b/rpython/jit/resume/reader.py @@ -0,0 +1,145 @@ + +import sys +from rpython.jit.metainterp.history import ConstInt +from rpython.jit.resume import rescode + +class ResumeFrame(object): + def __init__(self, jitcode): + self.registers = [-1] * jitcode.num_regs() + self.jitcode = jitcode + self.pc = -1 + + +class Virtual(object): + def __init__(self, pos, descr): + self.pos = pos + self.fields = {} + self.descr = descr + + +class AbstractResumeReader(object): + """ A resume reader that can follow resume until given point. Consult + the concrete classes for details + """ + + def __init__(self, staticdata): + self.framestack = [] + self.staticdata = staticdata + self.consts = [] # XXX cache? + self.virtuals = {} + self.virtual_list = [] + + def rebuild(self, faildescr): + self._rebuild_until(faildescr.rd_resume_bytecode, + faildescr.rd_bytecode_position) + return self.finish() + + def finish(self): + pass + + def enter_frame(self, pc, jitcode): + if self.framestack: + assert pc != -1 + self.framestack[-1].pc = pc + self.framestack.append(ResumeFrame(jitcode)) + + def encode_box(self, pos): + return rescode.TAGBOX | (pos << rescode.TAGOFFSET) + + def encode_virtual(self, box): + return rescode.TAGVIRTUAL | (self.virtuals[box].pos << rescode.TAGOFFSET) + + def encode_const(self, const): + XXX + if isinstance(const, ConstInt) and const.getint() < (sys.maxint >> 3): + return rescode.TAGSMALLINT | (const.getint() << rescode.TAGOFFSET) + self.consts.append(const) + return rescode.TAGCONST | ((len(self.consts) - 1) << TAGOFFSET) + + def decode(self, pos): + return pos & 0x3, pos >> rescode.TAGOFFSET + + def resume_put(self, jitframe_pos_box, frame_no, frontend_position): + XXX + if isinstance(jitframe_pos_box, Box): + jitframe_pos = self.encode_virtual(jitframe_pos_box) + else: + jitframe_pos = self.encode_box(jitframe_pos_box.getint()) + self.framestack[frame_no].registers[frontend_position] = jitframe_pos + + def encode(self, box): + xxx + + def resume_new(self, box, descr): + # XXX make it a list + v = Virtual(len(self.virtual_list), descr) + self.virtuals[box] = v + self.virtual_list.append(v) + + def resume_setfield_gc(self, box, fieldbox, descr): + # XXX optimize fields + self.virtuals[box].fields[descr] = self.encode(fieldbox) + + def resume_clear(self, frame_no, frontend_position): + self.framestack[frame_no].registers[frontend_position] = -1 + + def resume_put_const(self, const, frame_no, frontend_position): + pos = self.encode_const(const) + self.framestack[frame_no].registers[frontend_position] = pos + + def resume_set_pc(self, pc): + self.framestack[-1].pc = pc + + def leave_frame(self): + self.framestack.pop() + + def _rebuild_until(self, rb, position): + if rb.parent is not None: + self._rebuild_until(rb.parent, rb.parent_position) + self.interpret_until(rb.opcodes, position) + + def read(self, pos): + return ord(self.bytecode.opcodes[pos]) + + def read_short(self, pos): + return self.read(pos) + (self.read(pos + 1) << 16) + + def interpret_until(self, bytecode, until, pos=0): + self.bytecode = bytecode + while pos < until: + op = ord(bytecode.opcodes[pos]) + if op == rescode.UNUSED: + raise Exception("malformed bytecode") + elif op == rescode.ENTER_FRAME: + pc = self.read_short(pos + 1) - 1 + jitcode = self.staticdata.alljitcodes[self.read_short(pos + 3)] + self.enter_frame(pc, jitcode) + pos += 5 + elif op == rescode.RESUME_PUT: + encoded = self.read_short(pos + 1) + frame_pos = self.read(pos + 3) + pos_in_frame = self.read(pos + 4) + self.resume_put(encoded, frame_pos, pos_in_frame) + pos += 5 + else: + xxx + self.bytecode = None + + def read_int(self, jitframe_pos): + return self.metainterp.cpu.get_int_value(self.deadframe, jitframe_pos) + +class Dumper(AbstractResumeReader): + def __init__(self, staticdata): + AbstractResumeReader.__init__(self, staticdata) + self.l = [] + + def enter_frame(self, pc, jitcode): + self.l.append("enter_frame %d %s" % (pc, jitcode.name)) + + def resume_put(self, encoded, frame_pos, pos_in_frame): + tag, index = self.decode(encoded) + self.l.append("resume_put (%d, %d) %d %d" % (tag, index, frame_pos, + pos_in_frame)) + + def finish(self): + return "\n".join(self.l) diff --git a/rpython/jit/resume/rescode.py b/rpython/jit/resume/rescode.py new file mode 100644 --- /dev/null +++ b/rpython/jit/resume/rescode.py @@ -0,0 +1,66 @@ + +from rpython.jit.metainterp.history import ConstInt + +UNUSED, ENTER_FRAME, LEAVE_FRAME, RESUME_PUT = range(4) + +TAGCONST = 0x0 +TAGVIRTUAL = 0x2 +TAGBOX = 0x3 +TAGSMALLINT = 0x1 + +TAGOFFSET = 2 + +class ResumeBytecode(object): + def __init__(self, opcodes, parent=None, parent_position=-1, loop=None): + self.opcodes = opcodes + self.parent = parent + self.parent_position = parent_position + self.loop = loop + + def dump(self, staticdata, resume_pos): + from rpython.jit.resume.reader import Dumper + + d = Dumper(staticdata) + d.interpret_until(self, resume_pos) + return d.finish() + +class ResumeBytecodeBuilder(object): + def __init__(self): + self.l = [] + + def getpos(self): + return len(self.l) + + def build(self): + return "".join(self.l) + + def write(self, i): + assert 0 <= i < 256 + self.l.append(chr(i)) + + def write_short(self, i): + assert 0 <= i < 0x1000 + self.write(i & 0xff) + self.write(i >> 8) + + def enter_frame(self, pc, jitcode): + self.write(ENTER_FRAME) + self.write_short(pc + 1) # can be -1 ! + self.write_short(jitcode.global_index) + + def leave_frame(self): + self.write(LEAVE_FRAME) + + def encode(self, tag, loc): + return tag | (loc << 2) + + def encode_const(self, const): + if isinstance(const, ConstInt) and 0 <= const.getint() < 0x4000: + return TAGSMALLINT | (const.getint() << 2) + xxx + + def resume_put(self, pos, frame_pos, pos_in_frame): + self.write(RESUME_PUT) + self.write_short(pos) + self.write(frame_pos) + self.write(pos_in_frame) diff --git a/rpython/jit/resume/test/test_backend.py b/rpython/jit/resume/test/test_backend.py --- a/rpython/jit/resume/test/test_backend.py +++ b/rpython/jit/resume/test/test_backend.py @@ -10,7 +10,8 @@ class MockJitCode(JitCode): def __init__(self, no): self.no = no - self.name = 'frame %d' % no + self.global_index = no + self.name = 'frame-%d' % no def num_regs(self): return self.no @@ -18,13 +19,20 @@ def __repr__(self): return 'MockJitCode(%d)' % self.no +class MockStaticData(object): + def __init__(self, *jitcodes): + self.alljitcodes = list(jitcodes) + +def preparse(inp): + return "\n".join([s.strip() for s in inp.split("\n") if s.strip()]) + class ResumeTest(object): def setup_method(self, meth): self.cpu = self.CPUClass(None, None) self.cpu.setup_once() def test_simple(self): - jitcode = MockJitCode(3) + jitcode = MockJitCode(1) loop = parse(""" [i0] enter_frame(-1, descr=jitcode) @@ -37,16 +45,16 @@ self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) descr = loop.operations[3].getdescr() - assert descr.rd_bytecode_position == 3 - expected_resume = parse(""" - [] - enter_frame(-1, descr=jitcode) - resume_put(28, 0, 2) - resume_put_const(1, 0, 1) - leave_frame() - """, namespace={'jitcode': jitcode}) - equaloplists(descr.rd_resume_bytecode.opcodes, - expected_resume.operations) + assert descr.rd_bytecode_position == 15 + staticdata = MockStaticData(None, jitcode) + res = descr.rd_resume_bytecode.dump(staticdata, + descr.rd_bytecode_position) + expected_resume = preparse(""" + enter_frame -1 frame-1 + resume_put (3, 28) 0 2 + resume_put (1, 1) 0 1 + """) + assert res == expected_resume def test_resume_new(self): jitcode = JitCode("name") diff --git a/rpython/jit/resume/test/test_frontend.py b/rpython/jit/resume/test/test_frontend.py --- a/rpython/jit/resume/test/test_frontend.py +++ b/rpython/jit/resume/test/test_frontend.py @@ -2,8 +2,9 @@ from rpython.jit.tool.oparser import parse from rpython.jit.codewriter.jitcode import JitCode from rpython.jit.metainterp.history import AbstractDescr, Const, INT, Stats -from rpython.jit.resume.frontend import rebuild_from_resumedata,\ - ResumeBytecode, AbstractResumeReader +from rpython.jit.resume.frontend import rebuild_from_resumedata +from rpython.jit.resume.rescode import ResumeBytecode +from rpython.jit.resume.reader import AbstractResumeReader from rpython.jit.metainterp.resoperation import rop from rpython.jit.codewriter.format import unformat_assembler from rpython.jit.codewriter.codewriter import CodeWriter From noreply at buildbot.pypy.org Fri Jan 17 11:50:42 2014 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 17 Jan 2014 11:50:42 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: a little bit of progress Message-ID: <20140117105042.7465A1C02DA@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68710:47558757c155 Date: 2014-01-17 11:49 +0100 http://bitbucket.org/pypy/pypy/changeset/47558757c155/ Log: a little bit of progress diff --git a/rpython/jit/resume/backend.py b/rpython/jit/resume/backend.py --- a/rpython/jit/resume/backend.py +++ b/rpython/jit/resume/backend.py @@ -118,6 +118,8 @@ frame_pos = op.getarg(1).getint() pos_in_frame = op.getarg(2).getint() box = op.getarg(0) + if box in self.virtuals: + xxx if isinstance(box, Const): pos = self.builder.encode_const(box) self.builder.resume_put(pos, frame_pos, pos_in_frame) @@ -132,6 +134,11 @@ self.frontend_pos[box] = (frame_pos, pos_in_frame) elif op.getopnum() == rop.LEAVE_FRAME: self.builder.leave_frame() + elif op.getopnum() == rop.RESUME_NEW: + v_pos = len(self.virtuals) + self.virtuals[op.result] = v_pos + XXX + self.builder.resume_new(v_pos, op.getdescr()) else: xxx return diff --git a/rpython/jit/resume/reader.py b/rpython/jit/resume/reader.py --- a/rpython/jit/resume/reader.py +++ b/rpython/jit/resume/reader.py @@ -102,7 +102,7 @@ return ord(self.bytecode.opcodes[pos]) def read_short(self, pos): - return self.read(pos) + (self.read(pos + 1) << 16) + return self.read(pos) + (self.read(pos + 1) << 8) def interpret_until(self, bytecode, until, pos=0): self.bytecode = bytecode diff --git a/rpython/jit/resume/test/test_backend.py b/rpython/jit/resume/test/test_backend.py --- a/rpython/jit/resume/test/test_backend.py +++ b/rpython/jit/resume/test/test_backend.py @@ -58,6 +58,7 @@ def test_resume_new(self): jitcode = JitCode("name") + jitcode.global_index = 1 jitcode.setup(num_regs_i=1, num_regs_r=0, num_regs_f=0) S = lltype.GcStruct('S', ('field', lltype.Signed)) structdescr = self.cpu.sizeof(S) @@ -78,9 +79,9 @@ looptoken = JitCellToken() self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) - expected_resume = parse(""" - [i0] - enter_frame(-1, descr=jitcode) + xxx + expected_resume = preparse(""" + enter_frame -1 frame-1 p0 = resume_new(descr=structdescr) resume_setfield_gc(p0, i0, descr=fielddescr) resume_put(p0, 0, 0) From noreply at buildbot.pypy.org Fri Jan 17 12:03:16 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 17 Jan 2014 12:03:16 +0100 (CET) Subject: [pypy-commit] pypy default: Work around the "text file is busy" error Message-ID: <20140117110316.AAD311C0446@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68711:7812ad72a634 Date: 2014-01-17 12:02 +0100 http://bitbucket.org/pypy/pypy/changeset/7812ad72a634/ Log: Work around the "text file is busy" error diff --git a/rpython/translator/driver.py b/rpython/translator/driver.py --- a/rpython/translator/driver.py +++ b/rpython/translator/driver.py @@ -591,3 +591,12 @@ if sys.platform == 'win32': name = name.new(ext='exe') return name + +if os.name == 'posix': + def shutil_copy(src, dst): + # this version handles the case where 'dst' is an executable + # currently being executed + shutil.copy(src, dst + '~') + os.rename(dst + '~', dst) +else: + shutil_copy = shutil.copy diff --git a/rpython/translator/test/test_driver.py b/rpython/translator/test/test_driver.py --- a/rpython/translator/test/test_driver.py +++ b/rpython/translator/test/test_driver.py @@ -1,6 +1,6 @@ import py import os -from rpython.translator.driver import TranslationDriver +from rpython.translator.driver import TranslationDriver, shutil_copy from rpython.tool.udir import udir def test_ctr(): @@ -74,4 +74,9 @@ assert dst_name.new(ext='dll').read() == 'dll' assert dst_name.new(purebasename='python27',ext='lib').read() == 'lib' - +def test_shutil_copy(): + a = udir.join('file_a') + b = udir.join('file_a') + a.write('hello') + shutil_copy(str(a), str(b)) + assert b.read() == 'hello' From noreply at buildbot.pypy.org Fri Jan 17 12:12:49 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 17 Jan 2014 12:12:49 +0100 (CET) Subject: [pypy-commit] stmgc c7: fix test Message-ID: <20140117111249.67C191C0291@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r620:064e07d95b60 Date: 2014-01-17 11:40 +0100 http://bitbucket.org/pypy/stmgc/changeset/064e07d95b60/ Log: fix test diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -275,10 +275,14 @@ lib._stm_restore_local_state(1) if lib._stm_is_in_transaction(): stm_stop_transaction() - lib._stm_teardown_thread() + lib._stm_restore_local_state(0) if lib._stm_is_in_transaction(): stm_stop_transaction() + + lib._stm_restore_local_state(1) + lib._stm_teardown_thread() + lib._stm_restore_local_state(0) lib._stm_teardown_thread() lib._stm_teardown() diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -185,50 +185,27 @@ - # def test_read_write_2(self): - # stm_start_transaction() - # lp1, p1 = stm_allocate(16) - # p1[8] = 'a' - # stm_stop_transaction(False) - # # - # self.switch(1) - # stm_start_transaction() - # stm_write(lp1) - # p1 = stm_get_real_address(lp1) - # assert p1[8] == 'a' - # p1[8] = 'b' - # # - # self.switch(0) - # stm_start_transaction() - # stm_read(lp1) - # p1 = stm_get_real_address(lp1) - # assert p1[8] == 'a' - # # - # self.switch(1) - # stm_stop_transaction(False) - # # - # self.switch(0) - # p1 = stm_get_real_address(lp1) - # assert p1[8] == 'a' - + def test_start_transaction_updates(self): + stm_start_transaction() + lp1, p1 = stm_allocate(16) + p1[8] = 'a' + stm_push_root(lp1) + stm_stop_transaction() + lp1 = stm_pop_root() + # + self.switch(1) + stm_start_transaction() + stm_write(lp1) + p1 = stm_get_real_address(lp1) + assert p1[8] == 'a' + p1[8] = 'b' + stm_stop_transaction() + # + self.switch(0) + stm_start_transaction() + p1 = stm_get_real_address(lp1) + assert p1[8] == 'b' - # def test_start_transaction_updates(self): - # stm_start_transaction() - # p1 = stm_allocate(16) - # p1[8] = 'a' - # stm_stop_transaction(False) - # # - # self.switch(1) - # stm_start_transaction() - # stm_write(p1) - # assert p1[8] == 'a' - # p1[8] = 'b' - # stm_stop_transaction(False) - # # - # self.switch(0) - # assert p1[8] == 'a' - # stm_start_transaction() - # assert p1[8] == 'b' # def test_resolve_no_conflict_empty(self): # stm_start_transaction() From noreply at buildbot.pypy.org Fri Jan 17 12:12:50 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 17 Jan 2014 12:12:50 +0100 (CET) Subject: [pypy-commit] stmgc c7: adapt testing framework Message-ID: <20140117111250.994F11C0291@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r621:bf2603b4cfbb Date: 2014-01-17 12:12 +0100 http://bitbucket.org/pypy/stmgc/changeset/bf2603b4cfbb/ Log: adapt testing framework diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -291,17 +291,12 @@ return object_pages + thread_num * (NB_PAGES * 4096UL); } -bool _is_young(object_t *o) +bool _stm_is_young(object_t *o) { assert((uintptr_t)o >= FIRST_NURSERY_PAGE * 4096); return (uintptr_t)o < FIRST_AFTER_NURSERY_PAGE * 4096; } -bool _stm_is_in_nursery(char *ptr) -{ - object_t * o = _stm_tl_address(ptr); - return _is_young(o); -} char *_stm_real_address(object_t *o) { @@ -495,7 +490,7 @@ { if (*pobj == NULL) return; - if (!_is_young(*pobj)) + if (!_stm_is_young(*pobj)) return; /* the location the object moved to is at an 8b offset */ @@ -536,7 +531,7 @@ while (!stm_list_is_empty(old_objs)) { object_t *item = stm_list_pop_item(old_objs); - assert(!_is_young(item)); + assert(!_stm_is_young(item)); assert(!(item->stm_flags & GCFLAG_WRITE_BARRIER)); /* re-add write-barrier */ @@ -567,6 +562,7 @@ object_t *stm_allocate(size_t size) { + assert(_STM_TL2->running_transaction); assert(size % 8 == 0); size_t i = size / 8; assert(2 <= i && i < LARGE_OBJECT_WORDS);//XXX diff --git a/c7/core.h b/c7/core.h --- a/c7/core.h +++ b/c7/core.h @@ -115,7 +115,7 @@ char *_stm_real_address(object_t *o); object_t *_stm_tl_address(char *ptr); -bool _stm_is_in_nursery(char *ptr); +bool _stm_is_young(object_t *o); object_t *_stm_allocate_old(size_t size); void _stm_start_safe_point(void); diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -48,7 +48,7 @@ char *_stm_real_address(object_t *o); object_t *_stm_tl_address(char *ptr); -bool _stm_is_in_nursery(char *ptr); +bool _stm_is_young(object_t *o); object_t *_stm_allocate_old(size_t size); void _stm_start_safe_point(void); @@ -188,26 +188,26 @@ HDR = lib.SIZEOF_MYOBJ -def is_in_nursery(ptr): - return lib._stm_is_in_nursery(ptr) +def is_in_nursery(o): + return lib._stm_is_young(o) def stm_allocate_old(size): o = lib._stm_allocate_old(size) tid = 42 + size lib._set_type_id(o, tid) - return o, lib._stm_real_address(o) + return o def stm_allocate(size): o = lib.stm_allocate(size) tid = 42 + size lib._set_type_id(o, tid) - return o, lib._stm_real_address(o) + return o def stm_allocate_refs(n): o = lib.stm_allocate(HDR + n * WORD) tid = 42142 + n lib._set_type_id(o, tid) - return o, lib._stm_real_address(o) + return o def stm_set_ref(obj, idx, ref): stm_write(obj) @@ -217,6 +217,14 @@ stm_read(obj) return lib._get_ptr(obj, idx) +def stm_set_char(obj, c): + stm_write(obj) + stm_get_real_address(obj)[HDR] = c + +def stm_get_char(obj): + stm_read(obj) + return stm_get_real_address(obj)[HDR] + def stm_get_real_address(obj): return lib._stm_real_address(ffi.cast('object_t*', obj)) @@ -272,11 +280,12 @@ self.current_thread = 0 def teardown_method(self, meth): - lib._stm_restore_local_state(1) + if self.current_thread != 1: + self.switch(1) if lib._stm_is_in_transaction(): stm_stop_transaction() - lib._stm_restore_local_state(0) + self.switch(0) if lib._stm_is_in_transaction(): stm_stop_transaction() diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -7,21 +7,25 @@ pass def test_thread_local_allocations(self): - lp1, p1 = stm_allocate(16) - lp2, p2 = stm_allocate(16) - assert is_in_nursery(p1) - assert is_in_nursery(p2) - assert p2 - p1 == 16 - lp3, p3 = stm_allocate(16) - assert p3 - p2 == 16 + stm_start_transaction() + lp1 = stm_allocate(16) + lp2 = stm_allocate(16) + assert is_in_nursery(lp1) + assert is_in_nursery(lp2) + assert stm_get_real_address(lp2) - stm_get_real_address(lp1) == 16 + lp3 = stm_allocate(16) + p3 = stm_get_real_address(lp3) + assert p3 - stm_get_real_address(lp2) == 16 # self.switch(1) - lp1s, p1s = stm_allocate(16) - assert abs(p1s - p3) >= 4000 + stm_start_transaction() + lp1s = stm_allocate(16) + assert is_in_nursery(lp1s) + assert abs(stm_get_real_address(lp1s) - p3) >= 4000 # self.switch(0) - lp4, p4 = stm_allocate(16) - assert p4 - p3 == 16 + lp4 = stm_allocate(16) + assert stm_get_real_address(lp4) - p3 == 16 def test_transaction_start_stop(self): stm_start_transaction() @@ -35,62 +39,57 @@ def test_simple_read(self): stm_start_transaction() - lp1, _ = stm_allocate(16) + lp1 = stm_allocate(16) stm_read(lp1) assert stm_was_read(lp1) stm_stop_transaction() def test_simple_write(self): stm_start_transaction() - lp1, _ = stm_allocate(16) + lp1 = stm_allocate(16) assert stm_was_written(lp1) stm_write(lp1) assert stm_was_written(lp1) stm_stop_transaction() def test_allocate_old(self): - lp1, _ = stm_allocate_old(16) + lp1 = stm_allocate_old(16) self.switch(1) - lp2, _ = stm_allocate_old(16) + lp2 = stm_allocate_old(16) assert lp1 != lp2 def test_write_on_old(self): - lp1, p1 = stm_allocate_old(16) + lp1 = stm_allocate_old(16) stm_start_transaction() stm_write(lp1) assert stm_was_written(lp1) - p1[15] = 'a' + stm_set_char(lp1, 'a') self.switch(1) stm_start_transaction() stm_read(lp1) assert stm_was_read(lp1) - tp1 = stm_get_real_address(lp1) - assert tp1[15] == '\0' - stm_stop_transaction() - self.switch(0) - + assert stm_get_char(lp1) == '\0' stm_stop_transaction() + def test_read_write_1(self): - lp1, p1 = stm_allocate_old(16) - p1[8] = 'a' + lp1 = stm_allocate_old(16) + stm_get_real_address(lp1)[HDR] = 'a' #setchar stm_start_transaction() stm_stop_transaction() # self.switch(1) stm_start_transaction() stm_write(lp1) - p1 = stm_get_real_address(lp1) - assert p1[8] == 'a' - p1[8] = 'b' + assert stm_get_char(lp1) == 'a' + stm_set_char(lp1, 'b') # self.switch(0) stm_start_transaction() stm_read(lp1) - p1 = stm_get_real_address(lp1) - assert p1[8] == 'a' + assert stm_get_char(lp1) == 'a' # self.switch(1) stm_stop_transaction(False) @@ -99,8 +98,9 @@ def test_commit_fresh_objects(self): stm_start_transaction() - lp, p = stm_allocate(16) - p[8] = 'u' + lp = stm_allocate(16) + stm_set_char(lp, 'u') + p = stm_get_real_address(lp) stm_push_root(lp) stm_stop_transaction() lp = stm_pop_root() @@ -114,18 +114,18 @@ p_ = stm_get_real_address(lp) assert p != p_ assert p1 != p_ - assert p_[8] == 'u' + assert stm_get_char(lp) == 'u' stm_stop_transaction() def test_commit_fresh_objects2(self): self.switch(1) stm_start_transaction() - lp, p = stm_allocate(16) - p[8] = 'u' - lp2, p2 = stm_allocate(16) - p2[8] = 'v' - assert p2 - p == 16 + lp = stm_allocate(16) + stm_set_char(lp, 'u') + lp2 = stm_allocate(16) + stm_set_char(lp2, 'v') + assert stm_get_real_address(lp2) - stm_get_real_address(lp) == 16 stm_write(lp) # test not crash stm_write(lp2) # test not crash stm_read(lp) # test not crash @@ -140,55 +140,52 @@ stm_start_transaction() stm_write(lp) # privatize page - p_ = stm_get_real_address(lp) - assert p_[8] == 'u' - p_[8] = 'x' + assert stm_get_char(lp) == 'u' + stm_set_char(lp, 'x') stm_write(lp2) - p2_ = stm_get_real_address(lp2) - assert p2_[8] == 'v' - p2_[8] = 'y' + assert stm_get_char(lp2) == 'v' + stm_set_char(lp2, 'y') stm_stop_transaction() self.switch(1) stm_start_transaction() stm_write(lp) - p_ = stm_get_real_address(lp) - assert p_[8] == 'x' - stm_read(lp2) - p2_ = stm_get_real_address(lp2) - assert p2_[8] == 'y' + assert stm_get_char(lp) == 'x' + assert stm_get_char(lp2) == 'y' stm_stop_transaction() def test_simple_refs(self): stm_start_transaction() - lp, p = stm_allocate_refs(3) - lq, q = stm_allocate(16) - lr, r = stm_allocate(16) - q[8] = 'x' - r[8] = 'y' + lp = stm_allocate_refs(3) + lq = stm_allocate(16) + lr = stm_allocate(16) + stm_set_char(lq, 'x') + stm_set_char(lr, 'y') stm_set_ref(lp, 0, lq) stm_set_ref(lp, 1, lr) stm_push_root(lp) stm_stop_transaction() lp = stm_pop_root() + self.switch(1) + stm_start_transaction() stm_write(lp) lq = stm_get_ref(lp, 0) lr = stm_get_ref(lp, 1) stm_read(lq) stm_read(lr) - assert stm_get_real_address(lq)[8] == 'x' - assert stm_get_real_address(lr)[8] == 'y' + assert stm_get_char(lq) == 'x' + assert stm_get_char(lr) == 'y' stm_stop_transaction() def test_start_transaction_updates(self): stm_start_transaction() - lp1, p1 = stm_allocate(16) - p1[8] = 'a' + lp1 = stm_allocate(16) + stm_set_char(lp1, 'a') stm_push_root(lp1) stm_stop_transaction() lp1 = stm_pop_root() @@ -196,44 +193,54 @@ self.switch(1) stm_start_transaction() stm_write(lp1) - p1 = stm_get_real_address(lp1) - assert p1[8] == 'a' - p1[8] = 'b' + assert stm_get_char(lp1) == 'a' + stm_set_char(lp1, 'b') stm_stop_transaction() # self.switch(0) stm_start_transaction() - p1 = stm_get_real_address(lp1) - assert p1[8] == 'b' + assert stm_get_char(lp1) == 'b' - # def test_resolve_no_conflict_empty(self): - # stm_start_transaction() - # # - # self.switch(1) - # stm_start_transaction() - # stm_stop_transaction(False) - # # - # self.switch(0) - # stm_stop_transaction(False) + def test_resolve_no_conflict_empty(self): + stm_start_transaction() + # + self.switch(1) + stm_start_transaction() + stm_stop_transaction(False) + # + self.switch(0) + stm_stop_transaction(False) - # def test_resolve_no_conflict_write_only_in_already_committed(self): - # stm_start_transaction() - # p1 = stm_allocate(16) - # p1[8] = 'a' - # stm_stop_transaction(False) - # stm_start_transaction() - # # - # self.switch(1) - # stm_start_transaction() - # stm_write(p1) - # p1[8] = 'b' - # stm_stop_transaction(False) - # # - # self.switch(0) - # assert p1[8] == 'a' - # stm_stop_transaction(False) - # assert p1[8] == 'b' + def test_resolve_no_conflict_write_only_in_already_committed(self): + stm_start_transaction() + lp1 = stm_allocate(16) + p1 = stm_get_real_address(lp1) + p1[HDR] = 'a' + stm_push_root(lp1) + stm_stop_transaction(False) + lp1 = stm_pop_root() + # 'a' in SHARED_PAGE + + stm_start_transaction() + + self.switch(1) + + stm_start_transaction() + stm_write(lp1) # privatize page + p1 = stm_get_real_address(lp1) + assert p1[HDR] == 'a' + p1[HDR] = 'b' + stm_stop_transaction(False) + # 'b' both private pages + # + self.switch(0) + # + assert p1[HDR] == 'b' + p1 = stm_get_real_address(lp1) + assert p1[HDR] == 'b' + stm_stop_transaction(False) + assert p1[HDR] == 'b' # def test_resolve_write_read_conflict(self): # stm_start_transaction() From noreply at buildbot.pypy.org Fri Jan 17 12:34:01 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 17 Jan 2014 12:34:01 +0100 (CET) Subject: [pypy-commit] stmgc c7: fix and add tests Message-ID: <20140117113401.F21681C0291@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r622:c6c73c285527 Date: 2014-01-17 12:33 +0100 http://bitbucket.org/pypy/stmgc/changeset/c6c73c285527/ Log: fix and add tests diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -150,42 +150,38 @@ struct tx_descriptor *in_single_thread = NULL; -void stm_start_sharedlock(void) +void stm_start_shared_lock(void) { int err = pthread_rwlock_rdlock(&rwlock_shared); if (err != 0) abort(); } -void stm_stop_sharedlock(void) +void stm_stop_lock(void) { int err = pthread_rwlock_unlock(&rwlock_shared); if (err != 0) abort(); } -static void start_exclusivelock(void) +static void stm_start_exclusive_lock(void) { int err = pthread_rwlock_wrlock(&rwlock_shared); if (err != 0) abort(); -} - -static void stop_exclusivelock(void) -{ - int err = pthread_rwlock_unlock(&rwlock_shared); - if (err != 0) - abort(); + if (_STM_TL2->need_abort) { + stm_abort_transaction(); + } } void _stm_start_safe_point(void) { - stm_stop_sharedlock(); + stm_stop_lock(); } void _stm_stop_safe_point(void) { - stm_start_sharedlock(); + stm_start_shared_lock(); if (_STM_TL2->need_abort) stm_abort_transaction(); } @@ -757,7 +753,7 @@ { assert(!_STM_TL2->running_transaction); - stm_start_sharedlock(); + stm_start_shared_lock(); uint8_t old_rv = _STM_TL1->transaction_read_version; _STM_TL1->transaction_read_version = old_rv + 1; @@ -805,8 +801,8 @@ void stm_stop_transaction(void) { assert(_STM_TL2->running_transaction); - stm_stop_sharedlock(); - start_exclusivelock(); + stm_stop_lock(); + stm_start_exclusive_lock(); _STM_TL1->jmpbufptr = NULL; /* cannot abort any more */ @@ -908,7 +904,7 @@ /* } */ _STM_TL2->running_transaction = 0; - stop_exclusivelock(); + stm_stop_lock(); } void stm_abort_transaction(void) @@ -927,6 +923,6 @@ assert(_STM_TL1->jmpbufptr != NULL); assert(_STM_TL1->jmpbufptr != (jmpbufptr_t *)-1); /* for tests only */ _STM_TL2->running_transaction = 0; - stm_stop_sharedlock(); + stm_stop_lock(); __builtin_longjmp(*_STM_TL1->jmpbufptr, 1); } diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -188,6 +188,9 @@ HDR = lib.SIZEOF_MYOBJ +class Conflict(Exception): + pass + def is_in_nursery(o): return lib._stm_is_young(o) @@ -252,22 +255,19 @@ def stm_start_transaction(): lib.stm_start_transaction(ffi.cast("jmpbufptr_t*", -1)) -def stm_stop_transaction(expected_conflict=False): +def stm_stop_transaction(): res = lib._stm_stop_transaction() - if expected_conflict: - assert res == 1 - else: - assert res == 0 + if res: + raise Conflict() + def stm_start_safe_point(): lib._stm_start_safe_point() -def stm_stop_safe_point(expected_conflict=False): +def stm_stop_safe_point(): res = lib._stm_check_stop_safe_point() - if expected_conflict: - assert res == 1 - else: - assert res == 0 + if res: + raise Conflict() class BaseTest(object): @@ -295,14 +295,12 @@ lib._stm_teardown_thread() lib._stm_teardown() - def switch(self, thread_num, expect_conflict=False): + def switch(self, thread_num): assert thread_num != self.current_thread if lib._stm_is_in_transaction(): stm_start_safe_point() lib._stm_restore_local_state(thread_num) if lib._stm_is_in_transaction(): - stm_stop_safe_point(expect_conflict) - elif expect_conflict: - assert False + stm_stop_safe_point() self.current_thread = thread_num diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -1,5 +1,5 @@ from support import * - +import py class TestBasic(BaseTest): @@ -92,9 +92,9 @@ assert stm_get_char(lp1) == 'a' # self.switch(1) - stm_stop_transaction(False) + stm_stop_transaction() # - self.switch(0, expect_conflict=True) # detects rw conflict + py.test.raises(Conflict, self.switch, 0) # detects rw conflict def test_commit_fresh_objects(self): stm_start_transaction() @@ -207,10 +207,10 @@ # self.switch(1) stm_start_transaction() - stm_stop_transaction(False) + stm_stop_transaction() # self.switch(0) - stm_stop_transaction(False) + stm_stop_transaction() def test_resolve_no_conflict_write_only_in_already_committed(self): stm_start_transaction() @@ -218,7 +218,7 @@ p1 = stm_get_real_address(lp1) p1[HDR] = 'a' stm_push_root(lp1) - stm_stop_transaction(False) + stm_stop_transaction() lp1 = stm_pop_root() # 'a' in SHARED_PAGE @@ -231,7 +231,7 @@ p1 = stm_get_real_address(lp1) assert p1[HDR] == 'a' p1[HDR] = 'b' - stm_stop_transaction(False) + stm_stop_transaction() # 'b' both private pages # self.switch(0) @@ -239,29 +239,50 @@ assert p1[HDR] == 'b' p1 = stm_get_real_address(lp1) assert p1[HDR] == 'b' - stm_stop_transaction(False) + stm_stop_transaction() assert p1[HDR] == 'b' - # def test_resolve_write_read_conflict(self): - # stm_start_transaction() - # p1 = stm_allocate(16) - # p1[8] = 'a' - # stm_stop_transaction(False) - # stm_start_transaction() - # # - # self.switch(1) - # stm_start_transaction() - # stm_write(p1) - # p1[8] = 'b' - # stm_stop_transaction(False) - # # - # self.switch(0) - # stm_read(p1) - # assert p1[8] == 'a' - # stm_stop_transaction(expected_conflict=True) - # assert p1[8] in ('a', 'b') - # stm_start_transaction() - # assert p1[8] == 'b' + def test_not_resolve_write_read_conflict(self): + stm_start_transaction() + lp1 = stm_allocate(16) + stm_set_char(lp1, 'a') + stm_push_root(lp1) + stm_stop_transaction() + lp1 = stm_pop_root() + + stm_start_transaction() + stm_read(lp1) + # + self.switch(1) + stm_start_transaction() + stm_write(lp1) + stm_set_char(lp1, 'b') + stm_stop_transaction() + # + py.test.raises(Conflict, self.switch, 0) + stm_start_transaction() + assert stm_get_char(lp1) == 'b' + + def test_resolve_write_read_conflict(self): + stm_start_transaction() + lp1 = stm_allocate(16) + stm_set_char(lp1, 'a') + stm_push_root(lp1) + stm_stop_transaction() + lp1 = stm_pop_root() + + stm_start_transaction() + # + self.switch(1) + stm_start_transaction() + stm_write(lp1) + stm_set_char(lp1, 'b') + stm_stop_transaction() + # + self.switch(0) + assert stm_get_char(lp1) == 'b' + + # def test_resolve_write_write_conflict(self): # stm_start_transaction() From noreply at buildbot.pypy.org Fri Jan 17 12:57:00 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 17 Jan 2014 12:57:00 +0100 (CET) Subject: [pypy-commit] stmgc c7: changes in testing framework and fix Message-ID: <20140117115700.AB6751D2404@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r623:feff1f2a98a1 Date: 2014-01-17 12:56 +0100 http://bitbucket.org/pypy/stmgc/changeset/feff1f2a98a1/ Log: changes in testing framework and fix diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -363,18 +363,20 @@ _STM_TL2->old_objects_to_trace = stm_list_append (_STM_TL2->old_objects_to_trace, obj); - obj->stm_flags &= ~GCFLAG_WRITE_BARRIER; - + /* for old objects from the same transaction we don't need to privatize the page */ if ((flag_page_private[pagenum] == UNCOMMITTED_SHARED_PAGE) || (obj->stm_flags & GCFLAG_NOT_COMMITTED)) { + obj->stm_flags &= ~GCFLAG_WRITE_BARRIER; return; } /* privatize if SHARED_PAGE */ _stm_privatize(pagenum); + obj->stm_flags &= ~GCFLAG_WRITE_BARRIER; + /* lock the object for writing in thread 0's page */ uintptr_t t0_offset = (uintptr_t)obj; char* t0_addr = get_thread_base(0) + t0_offset; @@ -382,7 +384,7 @@ int previous = __sync_lock_test_and_set(&t0_obj->stm_write_lock, 1); if (previous) - abort(); /* XXX */ + stm_abort_transaction(); stm_read(obj); diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -39,6 +39,7 @@ void stm_read(object_t *object); void stm_write(object_t *object); +bool _checked_stm_write(object_t *object); _Bool _stm_was_read(object_t *object); _Bool _stm_was_written(object_t *object); @@ -85,6 +86,20 @@ return 16; } + +bool _checked_stm_write(object_t *object) { + jmpbufptr_t here; + if (__builtin_setjmp(here) == 0) { // returned directly + assert(_STM_TL1->jmpbufptr == (jmpbufptr_t*)-1); + _STM_TL1->jmpbufptr = &here; + stm_write(object); + _STM_TL1->jmpbufptr = (jmpbufptr_t*)-1; + return 0; + } + _STM_TL1->jmpbufptr = (jmpbufptr_t*)-1; + return 1; +} + bool _stm_stop_transaction(void) { jmpbufptr_t here; if (__builtin_setjmp(here) == 0) { // returned directly @@ -238,7 +253,8 @@ lib.stm_read(o) def stm_write(o): - lib.stm_write(o) + if lib._checked_stm_write(o): + raise Conflict() def stm_was_read(o): return lib._stm_was_read(o) @@ -256,8 +272,7 @@ lib.stm_start_transaction(ffi.cast("jmpbufptr_t*", -1)) def stm_stop_transaction(): - res = lib._stm_stop_transaction() - if res: + if lib._stm_stop_transaction(): raise Conflict() @@ -265,8 +280,7 @@ lib._stm_start_safe_point() def stm_stop_safe_point(): - res = lib._stm_check_stop_safe_point() - if res: + if lib._stm_check_stop_safe_point(): raise Conflict() diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -282,29 +282,21 @@ self.switch(0) assert stm_get_char(lp1) == 'b' - + def test_resolve_write_write_conflict(self): + stm_start_transaction() + lp1 = stm_allocate(16) + stm_set_char(lp1, 'a') + stm_push_root(lp1) + stm_stop_transaction() + lp1 = stm_pop_root() + stm_start_transaction() + stm_write(lp1) + # + self.switch(1) + stm_start_transaction() + py.test.raises(Conflict, stm_write, lp1) # write-write conflict - # def test_resolve_write_write_conflict(self): - # stm_start_transaction() - # p1 = stm_allocate(16) - # p1[8] = 'a' - # stm_stop_transaction(False) - # stm_start_transaction() - # # - # self.switch(1) - # stm_start_transaction() - # stm_write(p1) - # p1[8] = 'b' - # stm_stop_transaction(False) - # # - # self.switch(0) - # assert p1[8] == 'a' - # stm_write(p1) - # p1[8] = 'c' - # stm_stop_transaction(expected_conflict=True) - # assert p1[8] in ('a', 'b') - # stm_start_transaction() - # assert p1[8] == 'b' + # def test_resolve_write_write_no_conflict(self): # stm_start_transaction() From noreply at buildbot.pypy.org Fri Jan 17 13:26:56 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 17 Jan 2014 13:26:56 +0100 (CET) Subject: [pypy-commit] pypy default: make Jeremy happy Message-ID: <20140117122656.B31AA1C087E@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r68712:85605d599503 Date: 2013-11-07 14:48 -0500 http://bitbucket.org/pypy/pypy/changeset/85605d599503/ Log: make Jeremy happy diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -1336,10 +1336,9 @@ l[index] = self.unwrap(w_item) except IndexError: raise - return - - w_list.switch_to_object_strategy() - w_list.setitem(index, w_item) + else: + w_list.switch_to_object_strategy() + w_list.setitem(index, w_item) def setslice(self, w_list, start, step, slicelength, w_other): assert slicelength >= 0 From noreply at buildbot.pypy.org Fri Jan 17 13:26:57 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 17 Jan 2014 13:26:57 +0100 (CET) Subject: [pypy-commit] pypy default: move comment to where it belongs Message-ID: <20140117122657.E31AD1C087E@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r68713:ea542eb3aad3 Date: 2013-12-20 21:30 +0100 http://bitbucket.org/pypy/pypy/changeset/ea542eb3aad3/ Log: move comment to where it belongs diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -2102,11 +2102,11 @@ if not box1.same_constant(box2): break else: - # Found! Compile it as a loop. - # raises in case it works -- which is the common case if self.partial_trace: if start != self.retracing_from: raise SwitchToBlackhole(Counters.ABORT_BAD_LOOP) # For now + # Found! Compile it as a loop. + # raises in case it works -- which is the common case self.compile_loop(original_boxes, live_arg_boxes, start, resumedescr) # creation of the loop was cancelled! self.cancel_count += 1 From noreply at buildbot.pypy.org Fri Jan 17 13:26:59 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 17 Jan 2014 13:26:59 +0100 (CET) Subject: [pypy-commit] pypy default: issue892 testing Message-ID: <20140117122659.25CF31C087E@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r68714:84ab6e698cac Date: 2014-01-17 13:23 +0100 http://bitbucket.org/pypy/pypy/changeset/84ab6e698cac/ Log: issue892 testing integrate Mario Pernici's patch to speed up the naive multiplication algorithm Thanks! diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -1290,26 +1290,58 @@ # Even if it's not power of two it can still be useful. return _muladd1(b, digit) + # a is not b + # use the following identity to reduce the number of operations + # a * b = a_0*b_0 + sum_{i=1}^n(a_0*b_i + a_1*b_{i-1}) + a_1*b_n z = rbigint([NULLDIGIT] * (size_a + size_b), 1) - # gradeschool long mult i = UDIGIT_TYPE(0) - while i < size_a: - carry = 0 - f = a.widedigit(i) + size_a1 = UDIGIT_TYPE(size_a - 1) + size_b1 = UDIGIT_TYPE(size_b - 1) + while i < size_a1: + f0 = a.widedigit(i) + f1 = a.widedigit(i + 1) pz = i + carry = z.widedigit(pz) + b.widedigit(0) * f0 + z.setdigit(pz, carry) + pz += 1 + carry >>= SHIFT + j = UDIGIT_TYPE(0) + while j < size_b1: + # this operation does not overflow using + # SHIFT = (LONG_BIT // 2) - 1 = B - 1; in fact before it + # carry and z.widedigit(pz) are less than 2**(B - 1); + # b.widedigit(j + 1) * f0 < (2**(B-1) - 1)**2; so + # carry + z.widedigit(pz) + b.widedigit(j + 1) * f0 + + # b.widedigit(j) * f1 < 2**(2*B - 1) - 2**B < 2**LONG)BIT - 1 + carry += z.widedigit(pz) + b.widedigit(j + 1) * f0 + \ + b.widedigit(j) * f1 + z.setdigit(pz, carry) + pz += 1 + carry >>= SHIFT + j += 1 + # carry < 2**(B + 1) - 2 + carry += z.widedigit(pz) + b.widedigit(size_b1) * f1 + z.setdigit(pz, carry) + pz += 1 + carry >>= SHIFT + # carry < 4 + if carry: + z.setdigit(pz, carry) + assert (carry >> SHIFT) == 0 + i += 2 + if size_a & 1: + pz = size_a1 + f = a.widedigit(pz) pb = 0 + carry = _widen_digit(0) while pb < size_b: carry += z.widedigit(pz) + b.widedigit(pb) * f pb += 1 z.setdigit(pz, carry) pz += 1 carry >>= SHIFT - assert carry <= MASK if carry: - assert pz >= 0 z.setdigit(pz, z.widedigit(pz) + carry) - assert (carry >> SHIFT) == 0 - i += 1 z._normalize() return z From noreply at buildbot.pypy.org Fri Jan 17 13:27:00 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 17 Jan 2014 13:27:00 +0100 (CET) Subject: [pypy-commit] pypy default: merge Message-ID: <20140117122700.62FDA1C087E@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r68715:9dd14335d58e Date: 2014-01-17 13:26 +0100 http://bitbucket.org/pypy/pypy/changeset/9dd14335d58e/ Log: merge diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -1337,10 +1337,9 @@ l[index] = self.unwrap(w_item) except IndexError: raise - return - - w_list.switch_to_object_strategy() - w_list.setitem(index, w_item) + else: + w_list.switch_to_object_strategy() + w_list.setitem(index, w_item) def setslice(self, w_list, start, step, slicelength, w_other): assert slicelength >= 0 diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -2102,11 +2102,11 @@ if not box1.same_constant(box2): break else: - # Found! Compile it as a loop. - # raises in case it works -- which is the common case if self.partial_trace: if start != self.retracing_from: raise SwitchToBlackhole(Counters.ABORT_BAD_LOOP) # For now + # Found! Compile it as a loop. + # raises in case it works -- which is the common case self.compile_loop(original_boxes, live_arg_boxes, start, resumedescr) # creation of the loop was cancelled! self.cancel_count += 1 From noreply at buildbot.pypy.org Fri Jan 17 14:38:28 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 17 Jan 2014 14:38:28 +0100 (CET) Subject: [pypy-commit] stmgc c7: failing abort cleanup test Message-ID: <20140117133828.5025F1C3183@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r624:6d1c731e3872 Date: 2014-01-17 14:38 +0100 http://bitbucket.org/pypy/stmgc/changeset/6d1c731e3872/ Log: failing abort cleanup test diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -164,18 +164,18 @@ abort(); } -static void stm_start_exclusive_lock(void) +void stm_start_exclusive_lock(void) { int err = pthread_rwlock_wrlock(&rwlock_shared); if (err != 0) abort(); - if (_STM_TL2->need_abort) { + if (_STM_TL2->need_abort) stm_abort_transaction(); - } } void _stm_start_safe_point(void) { + assert(!_STM_TL2->need_abort); stm_stop_lock(); } @@ -375,17 +375,19 @@ /* privatize if SHARED_PAGE */ _stm_privatize(pagenum); - obj->stm_flags &= ~GCFLAG_WRITE_BARRIER; - /* lock the object for writing in thread 0's page */ uintptr_t t0_offset = (uintptr_t)obj; char* t0_addr = get_thread_base(0) + t0_offset; struct object_s *t0_obj = (struct object_s *)t0_addr; - int previous = __sync_lock_test_and_set(&t0_obj->stm_write_lock, 1); - if (previous) + int previous; + while ((previous = __sync_lock_test_and_set(&t0_obj->stm_write_lock, 1))) { stm_abort_transaction(); + /* XXX: only abort if we are younger */ + spin_loop(); + } + obj->stm_flags &= ~GCFLAG_WRITE_BARRIER; stm_read(obj); _STM_TL2->modified_objects = stm_list_append @@ -581,7 +583,7 @@ void stm_setup(void) -{ +{ pthread_rwlockattr_t attr; pthread_rwlockattr_init(&attr); pthread_rwlockattr_setkind_np(&attr, @@ -912,16 +914,28 @@ void stm_abort_transaction(void) { assert(_STM_TL2->running_transaction); - // XXX copy back the modified objects!! - long j; - for (j = 2; j < LARGE_OBJECT_WORDS; j++) { - alloc_for_size_t *alloc = &_STM_TL2->alloc[j]; - uint16_t num_allocated = ((uintptr_t)alloc->next) - alloc->start; - alloc->next -= num_allocated; - } + + // XXX reset all the modified objects!! + stm_list_clear(_STM_TL2->modified_objects); + + /* re-add GCFLAG_WRITE_BARRIER */ + stm_list_clear(_STM_TL2->old_objects_to_trace); + + /* clear the nursery */ + + /* unreserve uncommitted_pages */ + + /* XXX: forget about GCFLAG_UNCOMMITTED objects */ + + /* long j; */ + /* for (j = 2; j < LARGE_OBJECT_WORDS; j++) { */ + /* alloc_for_size_t *alloc = &_STM_TL2->alloc[j]; */ + /* uint16_t num_allocated = ((uintptr_t)alloc->next) - alloc->start; */ + /* alloc->next -= num_allocated; */ + /* } */ /* stm_list_clear(_STM_TL2->new_object_ranges); */ - stm_list_clear(_STM_TL2->modified_objects); - stm_list_clear(_STM_TL2->old_objects_to_trace); + + assert(_STM_TL1->jmpbufptr != NULL); assert(_STM_TL1->jmpbufptr != (jmpbufptr_t *)-1); /* for tests only */ _STM_TL2->running_transaction = 0; diff --git a/c7/core.h b/c7/core.h --- a/c7/core.h +++ b/c7/core.h @@ -120,6 +120,8 @@ void _stm_start_safe_point(void); void _stm_stop_safe_point(void); + +void stm_abort_transaction(void); #endif diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -66,6 +66,9 @@ void _set_ptr(object_t *obj, int n, object_t *v); object_t * _get_ptr(object_t *obj, int n); + +bool _stm_check_abort_transaction(void); + void *memset(void *s, int c, size_t n); """) @@ -126,6 +129,19 @@ return 1; } +bool _stm_check_abort_transaction(void) { + jmpbufptr_t here; + if (__builtin_setjmp(here) == 0) { // returned directly + assert(_STM_TL1->jmpbufptr == (jmpbufptr_t*)-1); + _STM_TL1->jmpbufptr = &here; + stm_abort_transaction(); + _STM_TL1->jmpbufptr = (jmpbufptr_t*)-1; + return 0; + } + _STM_TL1->jmpbufptr = (jmpbufptr_t*)-1; + return 1; +} + void _set_type_id(object_t *obj, uint32_t h) { @@ -275,6 +291,9 @@ if lib._stm_stop_transaction(): raise Conflict() +def stm_abort_transaction(): + return lib._stm_check_abort_transaction() + def stm_start_safe_point(): lib._stm_start_safe_point() diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -84,7 +84,6 @@ stm_write(lp1) assert stm_get_char(lp1) == 'a' stm_set_char(lp1, 'b') - # self.switch(0) stm_start_transaction() @@ -289,13 +288,28 @@ stm_push_root(lp1) stm_stop_transaction() lp1 = stm_pop_root() + stm_start_transaction() - stm_write(lp1) + stm_write(lp1) # acquire lock # self.switch(1) stm_start_transaction() py.test.raises(Conflict, stm_write, lp1) # write-write conflict + def test_abort_cleanup(self): + stm_start_transaction() + lp1 = stm_allocate(16) + stm_set_char(lp1, 'a') + stm_push_root(lp1) + stm_stop_transaction() + lp1 = stm_pop_root() + + stm_start_transaction() + stm_set_char(lp1, 'x') + assert stm_abort_transaction() + + stm_start_transaction() + assert stm_get_char(lp1) == 'a' # def test_resolve_write_write_no_conflict(self): From noreply at buildbot.pypy.org Fri Jan 17 14:45:35 2014 From: noreply at buildbot.pypy.org (timfel) Date: Fri, 17 Jan 2014 14:45:35 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: fix translation Message-ID: <20140117134535.5E9D71C0459@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r584:42d00b2f7502 Date: 2014-01-17 09:21 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/42d00b2f7502/ Log: fix translation diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -18,7 +18,7 @@ from spyvm import constants, error from rpython.rlib import rrandom, objectmodel, jit, signature -from rpython.rlib.rarithmetic import intmask, r_uint +from rpython.rlib.rarithmetic import intmask, r_uint, r_int from rpython.tool.pairtype import extendabletype from rpython.rlib.objectmodel import instantiate, compute_hash from rpython.rtyper.lltypesystem import lltype, rffi @@ -757,13 +757,13 @@ byte0 = ord(self.getchar(byte_index0)) byte1 = ord(self.getchar(byte_index0 + 1)) << 8 if byte1 & 0x8000 != 0: - byte1 = intmask(intmask(0xffff0000) | byte1) + byte1 = intmask(r_uint(0xffff0000) | r_uint(byte1)) return space.wrap_int(byte1 | byte0) def short_atput0(self, space, index0, w_value): from rpython.rlib.rarithmetic import int_between i_value = space.unwrap_int(w_value) - if not int_between(-32768, i_value, 0x8000): + if not int_between(-0x8000, i_value, 0x8000): raise error.PrimitiveFailedError byte_index0 = index0 * 2 byte0 = i_value & 0xff @@ -895,18 +895,18 @@ else: short = (word >> 16) & 0xffff if short & 0x8000 != 0: - short = intmask(0xffff0000) | short + short = r_uint(0xffff0000) | r_uint(short) return space.wrap_int(intmask(short)) def short_atput0(self, space, index0, w_value): from rpython.rlib.rarithmetic import int_between i_value = space.unwrap_int(w_value) - if not int_between(-32768, i_value, 0x8000): + if not int_between(-0x8000, i_value, 0x8000): raise error.PrimitiveFailedError word_index0 = index0 / 2 word = intmask(self.getword(word_index0)) if index0 % 2 == 0: - word = (word & intmask(0xffff0000)) | (i_value & 0xffff) + word = intmask(r_uint(word) & r_uint(0xffff0000)) | (i_value & 0xffff) else: word = (i_value << 16) | (word & 0xffff) value = r_uint(word) diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -649,9 +649,10 @@ try: s_frame._sendSelfSelector(interp.image.w_simulateCopyBits, 0, interp) except Return: - w_dest_form = w_rcvr.fetch(space, 0) - if w_dest_form.is_same_object(space.objtable['w_display']): - w_bitmap = w_dest_form.fetch(space, 0) + w_rcvr = s_frame.peek(0) + w_dest_form = w_rcvr.fetch(interp.space, 0) + if w_dest_form.is_same_object(interp.space.objtable['w_display']): + w_bitmap = w_dest_form.fetch(interp.space, 0) assert isinstance(w_bitmap, model.W_DisplayBitmap) w_bitmap.flush_to_screen() except shadow.MethodNotFound: From noreply at buildbot.pypy.org Fri Jan 17 14:45:36 2014 From: noreply at buildbot.pypy.org (timfel) Date: Fri, 17 Jan 2014 14:45:36 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: fix clock issue Message-ID: <20140117134536.792F21C0459@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r585:702bb4c819e0 Date: 2014-01-17 11:43 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/702bb4c819e0/ Log: fix clock issue diff --git a/spyvm/constants.py b/spyvm/constants.py --- a/spyvm/constants.py +++ b/spyvm/constants.py @@ -190,4 +190,4 @@ MAX_LOOP_DEPTH = 100 INTERRUPT_COUNTER_SIZE = 10000 -CompileTime = int(time.time() * 1000) +CompileTime = time.time() diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -207,7 +207,7 @@ def time_now(self): import time from rpython.rlib.rarithmetic import intmask - return intmask((int(time.time() * 1000) - self.startup_time)) + return intmask(int((time.time() - self.startup_time) * 1000) & constants.TAGGED_MASK) def padding(self, symbol=' '): return symbol * (self.max_stack_depth - self.remaining_stack_depth) diff --git a/spyvm/squeakimage.py b/spyvm/squeakimage.py --- a/spyvm/squeakimage.py +++ b/spyvm/squeakimage.py @@ -362,6 +362,8 @@ # ____________________________________________________________ class SqueakImage(object): + _immutable_fields_ = ["w_asSymbol", "w_simulateCopyBits", "version", + "is_modern", "startup_time"] def from_reader(self, space, reader): from spyvm import constants @@ -378,7 +380,7 @@ self.version = reader.version self.is_modern = reader.version.magic > 6502 self.run_spy_hacks(space) - self.startup_time = int(time.time() * 1000) + self.startup_time = time.time() def run_spy_hacks(self, space): pass From noreply at buildbot.pypy.org Fri Jan 17 14:45:37 2014 From: noreply at buildbot.pypy.org (timfel) Date: Fri, 17 Jan 2014 14:45:37 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: this assertion is not true Message-ID: <20140117134537.A544F1C0459@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r586:7a217be69118 Date: 2014-01-17 13:01 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/7a217be69118/ Log: this assertion is not true diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -356,7 +356,6 @@ def _sendSelector(self, w_selector, argcount, interp, receiver, receiverclassshadow): - assert isinstance(w_selector, model.W_BytesObject) if interp.should_trace(): print "%sSending selector %r to %r with: %r" % ( interp._last_indent, w_selector.as_repr_string(), receiver, From noreply at buildbot.pypy.org Fri Jan 17 14:45:38 2014 From: noreply at buildbot.pypy.org (timfel) Date: Fri, 17 Jan 2014 14:45:38 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: re-enable simulateCopyBits usage properly Message-ID: <20140117134538.AFFD61C0459@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r587:c1149fc4f6a4 Date: 2014-01-17 13:02 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/c1149fc4f6a4/ Log: re-enable simulateCopyBits usage properly diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -643,21 +643,23 @@ interp.image.lastWindowSize = ((ary[4] & 0xffff) << 16) | (ary[5] & 0xffff) return w_rcvr - at expose_primitive(BITBLT_COPY_BITS, clean_stack=False, no_result=True, compiled_method=True) + at expose_primitive(BITBLT_COPY_BITS, clean_stack=False, no_result=False, compiled_method=True) def func(interp, s_frame, argcount, s_method): from spyvm.interpreter import Return + w_rcvr = s_frame.peek(0) try: s_frame._sendSelfSelector(interp.image.w_simulateCopyBits, 0, interp) except Return: - w_rcvr = s_frame.peek(0) w_dest_form = w_rcvr.fetch(interp.space, 0) - if w_dest_form.is_same_object(interp.space.objtable['w_display']): - w_bitmap = w_dest_form.fetch(interp.space, 0) + w_display = interp.space.objtable['w_display'] + if w_dest_form.is_same_object(w_display): + w_bitmap = w_display.fetch(interp.space, 0) assert isinstance(w_bitmap, model.W_DisplayBitmap) w_bitmap.flush_to_screen() + return w_rcvr except shadow.MethodNotFound: from spyvm.plugins.bitblt import BitBltPlugin - BitBltPlugin.call("primitiveCopyBits", interp, s_frame, argcount, s_method) + return BitBltPlugin.call("primitiveCopyBits", interp, s_frame, argcount, s_method) @expose_primitive(BE_CURSOR) def func(interp, s_frame, argcount): From noreply at buildbot.pypy.org Fri Jan 17 14:45:39 2014 From: noreply at buildbot.pypy.org (timfel) Date: Fri, 17 Jan 2014 14:45:39 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: sync all invalid MethodDictShadows when SYMBOL_FLUSH_CACHE is called - slow, but might work Message-ID: <20140117134539.BCD051C0459@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r588:ca89f7a89419 Date: 2014-01-17 13:03 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/ca89f7a89419/ Log: sync all invalid MethodDictShadows when SYMBOL_FLUSH_CACHE is called - slow, but might work diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -912,9 +912,37 @@ w_class.as_class_get_shadow(interp.space).flush_caches() return w_rcvr - at expose_primitive(SYMBOL_FLUSH_CACHE, unwrap_spec=[object]) -def func(interp, s_frame, w_rcvr): - raise PrimitiveFailedError() + +if not stm_enabled(): + # XXX: We don't have a global symbol cache. Instead, we get all + # method dictionary shadows (those exists for all methodDicts that + # have been modified) and flush them + @expose_primitive(SYMBOL_FLUSH_CACHE, unwrap_spec=[object]) + def func(interp, s_frame, w_rcvr): + dicts_s = [] + from rpython.rlib import rgc + + roots = [gcref for gcref in rgc.get_rpy_roots() if gcref] + pending = roots[:] + while pending: + gcref = pending.pop() + if not rgc.get_gcflag_extra(gcref): + rgc.toggle_gcflag_extra(gcref) + w_obj = rgc.try_cast_gcref_to_instance(shadow.MethodDictionaryShadow, gcref) + if w_obj is not None: + dicts_s.append(w_obj) + pending.extend(rgc.get_rpy_referents(gcref)) + + while roots: + gcref = roots.pop() + if rgc.get_gcflag_extra(gcref): + rgc.toggle_gcflag_extra(gcref) + roots.extend(rgc.get_rpy_referents(gcref)) + + for s_dict in dicts_s: + if s_dict.invalid: + s_dict.sync_cache() + return w_rcvr # ___________________________________________________________________________ # Miscellaneous Primitives (120-127) From noreply at buildbot.pypy.org Fri Jan 17 14:45:40 2014 From: noreply at buildbot.pypy.org (timfel) Date: Fri, 17 Jan 2014 14:45:40 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: fix jittests for untagged ints, and add test for simulated bitblt Message-ID: <20140117134540.C1C951C0459@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r589:68fc7d747dd8 Date: 2014-01-17 14:27 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/68fc7d747dd8/ Log: fix jittests for untagged ints, and add test for simulated bitblt diff --git a/spyvm/test/jittest/base.py b/spyvm/test/jittest/base.py --- a/spyvm/test/jittest/base.py +++ b/spyvm/test/jittest/base.py @@ -24,7 +24,8 @@ proc = subprocess.Popen( [str(spy), "-r", code.replace("\n", "\r\n"), BenchmarkImage], cwd=str(tmpdir), - env={"PYPYLOG": "jit-log-opt:%s" % tmpdir.join("x.pypylog")} + env={"PYPYLOG": "jit-log-opt:%s" % tmpdir.join("x.pypylog"), + "SDL_VIDEODRIVER": "dummy"} ) proc.wait() data = logparser.parse_log_file(str(tmpdir.join("x.pypylog")), verbose=False) diff --git a/spyvm/test/jittest/test_basic.py b/spyvm/test/jittest/test_basic.py --- a/spyvm/test/jittest/test_basic.py +++ b/spyvm/test/jittest/test_basic.py @@ -13,9 +13,6 @@ i60 = int_le(i49, 10000) guard_true(i60, descr=) i61 = int_add(i49, 1) - i62 = int_sub(i61, -1073741824) - i63 = uint_lt(i62, -2147483648) - guard_true(i63, descr=) i64 = int_sub(i57, 1) setfield_gc(ConstPtr(ptr54), i64, descr=) i65 = int_le(i64, 0) @@ -41,9 +38,6 @@ i34 = int_le(i16, 1000000000) guard_true(i34, descr=) i36 = int_add(i16, 1) - i38 = int_sub(i36, -1073741824) - i40 = uint_lt(i38, -2147483648) - guard_true(i40, descr=) setfield_gc(ConstPtr(ptr19), 9999, descr=) jump(p0, p1, i36, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13, p14, p15, 9999, descr=TargetToken(158474976)) """) @@ -60,9 +54,6 @@ i68 = int_le(i58, 10000) guard_true(i68, descr=) i69 = int_add(i58, 1) - i70 = int_sub(i69, -1073741824) - i71 = uint_lt(i70, -2147483648) - guard_true(i71, descr=) i72 = int_sub(i65, 1) setfield_gc(ConstPtr(ptr55), i72, descr=) i73 = int_le(i72, 0) @@ -87,9 +78,6 @@ i76 = int_le(i65, 100000), guard_true(i76, descr=), i77 = int_add(i65, 1), - i78 = int_sub(i77, -1073741824), - i79 = uint_lt(i78, -2147483648), - guard_true(i79, descr=), i80 = int_sub(i73, 2), setfield_gc(ConstPtr(ptr70), i80, descr=), i81 = int_le(i80, 0), @@ -121,12 +109,257 @@ i73 = int_le(i62, 100000), guard_true(i73, descr=), i74 = int_add(i62, 1), - i75 = int_sub(i74, -1073741824), - i76 = uint_lt(i75, -2147483648), - guard_true(i76, descr=), i77 = int_sub(i70, 1), setfield_gc(ConstPtr(ptr67), i77, descr=), i78 = int_le(i77, 0), guard_false(i78, descr=), jump(p0, p3, i74, p8, p10, p12, p14, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, p40, p42, p44, p46, i77, descr=TargetToken(157713840)) """) + + def test_bitblt(self, spy, tmpdir): + # This used to have a call to array comparison in it + traces = self.run(spy, tmpdir, """ + Display beDisplay. + 1 to: 10000 do: [:i | Display fillWhite]. + """) + self.assert_matches(traces[0].loop, """ + guard_not_invalidated(descr=), + i540 = int_le(2, i151), + guard_false(i540, descr=), + i541 = getfield_gc_pure(p529, descr=), + i542 = int_add_ovf(i541, i158), + guard_no_overflow(descr=), + i543 = getfield_gc_pure(p532, descr=), + i544 = int_add_ovf(i543, i165), + guard_no_overflow(descr=), + i545 = int_add_ovf(i170, 1), + guard_no_overflow(descr=), + i546 = int_sub(i525, 3), + setfield_gc(ConstPtr(ptr171), i546, descr=), + i547 = int_le(i546, 0), + guard_false(i547, descr=), + i548 = int_le(i545, i179), + guard_true(i548, descr=), + i549 = getfield_gc_pure(p535, descr=), + i550 = int_mod(i549, i197), + i551 = int_rshift(i550, 31), + i552 = int_and(i197, i551), + i553 = int_add(i550, i552), + i554 = int_add_ovf(1, i553), + guard_no_overflow(descr=), + i555 = int_ge(i553, 0), + guard_true(i555, descr=), + i556 = int_lt(i553, i197), + guard_true(i556, descr=), + i557 = getarrayitem_gc(p213, i553, descr=), + i558 = uint_lt(i557, 0), + guard_false(i558, descr=), + i559 = uint_lt(i557, 2147483647), + guard_true(i559, descr=), + i560 = int_add_ovf(i549, i221), + guard_no_overflow(descr=), + i561 = int_ge(i557, 0), + guard_true(i561, descr=), + i562 = int_and(i557, i557), + i563 = uint_lt(i562, 2147483647), + guard_true(i563, descr=), + i564 = int_add_ovf(i544, 1), + guard_no_overflow(descr=), + i565 = int_ge(i544, 0), + guard_true(i565, descr=), + i566 = int_lt(i544, i250), + guard_true(i566, descr=), + i567 = getarrayitem_raw(i252, i544, descr=), + i568 = uint_lt(i567, 0), + guard_false(i568, descr=), + i569 = uint_lt(i567, 2147483647), + guard_true(i569, descr=), + i570 = int_ge(i562, 0), + guard_true(i570, descr=), + i571 = int_and(i282, i562), + i572 = uint_lt(i571, 2147483647), + guard_true(i572, descr=), + i573 = getarrayitem_raw(i252, i544, descr=), + i574 = uint_lt(i573, 0), + guard_false(i574, descr=), + i575 = uint_lt(i573, 2147483647), + guard_true(i575, descr=), + i576 = int_ge(i573, 0), + guard_true(i576, descr=), + i577 = int_and(i293, i573), + i578 = uint_lt(i577, 2147483647), + guard_true(i578, descr=), + i579 = int_ge(i571, 0), + guard_true(i579, descr=), + i580 = int_ge(i577, 0), + guard_true(i580, descr=), + i581 = int_or(i571, i577), + i582 = uint_lt(i581, 2147483647), + guard_true(i582, descr=), + setarrayitem_raw(i252, i544, i581, descr=), + i584 = int_lshift(i544, 3), + i585 = int_ge(i584, i250), + guard_false(i585, descr=), + i586 = uint_rshift(i581, i328), + i587 = int_lshift(i581, i315), + i588 = uint_rshift(i587, i328), + i589 = int_lshift(i588, 8), + i590 = int_or(i586, i589), + i591 = int_lshift(i587, i315), + i592 = uint_rshift(i591, i328), + i593 = int_lshift(i592, 16), + i594 = int_or(i590, i593), + i595 = int_lshift(i591, i315), + i596 = uint_rshift(i595, i328), + i597 = int_lshift(i596, 24), + i598 = int_or(i594, i597), + i599 = int_lshift(i595, i315), + setarrayitem_raw(i349, i584, i598, descr=), + i600 = int_add(i584, 1), + i601 = int_ge(i600, i250), + guard_false(i601, descr=), + i602 = uint_rshift(i599, i328), + i603 = int_lshift(i599, i315), + i604 = uint_rshift(i603, i328), + i605 = int_lshift(i604, 8), + i606 = int_or(i602, i605), + i607 = int_lshift(i603, i315), + i608 = uint_rshift(i607, i328), + i609 = int_lshift(i608, 16), + i610 = int_or(i606, i609), + i611 = int_lshift(i607, i315), + i612 = uint_rshift(i611, i328), + i613 = int_lshift(i612, 24), + i614 = int_or(i610, i613), + i615 = int_lshift(i611, i315), + setarrayitem_raw(i349, i600, i614, descr=), + i616 = int_add(i600, 1), + i617 = int_ge(i616, i250), + guard_false(i617, descr=), + i618 = uint_rshift(i615, i328), + i619 = int_lshift(i615, i315), + i620 = uint_rshift(i619, i328), + i621 = int_lshift(i620, 8), + i622 = int_or(i618, i621), + i623 = int_lshift(i619, i315), + i624 = uint_rshift(i623, i328), + i625 = int_lshift(i624, 16), + i626 = int_or(i622, i625), + i627 = int_lshift(i623, i315), + i628 = uint_rshift(i627, i328), + i629 = int_lshift(i628, 24), + i630 = int_or(i626, i629), + i631 = int_lshift(i627, i315), + setarrayitem_raw(i349, i616, i630, descr=), + i632 = int_add(i616, 1), + i633 = int_ge(i632, i250), + guard_false(i633, descr=), + i634 = uint_rshift(i631, i328), + i635 = int_lshift(i631, i315), + i636 = uint_rshift(i635, i328), + i637 = int_lshift(i636, 8), + i638 = int_or(i634, i637), + i639 = int_lshift(i635, i315), + i640 = uint_rshift(i639, i328), + i641 = int_lshift(i640, 16), + i642 = int_or(i638, i641), + i643 = int_lshift(i639, i315), + i644 = uint_rshift(i643, i328), + i645 = int_lshift(i644, 24), + i646 = int_or(i642, i645), + i647 = int_lshift(i643, i315), + setarrayitem_raw(i349, i632, i646, descr=), + i648 = int_add(i632, 1), + i649 = int_ge(i648, i250), + guard_false(i649, descr=), + i650 = uint_rshift(i647, i328), + i651 = int_lshift(i647, i315), + i652 = uint_rshift(i651, i328), + i653 = int_lshift(i652, 8), + i654 = int_or(i650, i653), + i655 = int_lshift(i651, i315), + i656 = uint_rshift(i655, i328), + i657 = int_lshift(i656, 16), + i658 = int_or(i654, i657), + i659 = int_lshift(i655, i315), + i660 = uint_rshift(i659, i328), + i661 = int_lshift(i660, 24), + i662 = int_or(i658, i661), + i663 = int_lshift(i659, i315), + setarrayitem_raw(i349, i648, i662, descr=), + i664 = int_add(i648, 1), + i665 = int_ge(i664, i250), + guard_false(i665, descr=), + i666 = uint_rshift(i663, i328), + i667 = int_lshift(i663, i315), + i668 = uint_rshift(i667, i328), + i669 = int_lshift(i668, 8), + i670 = int_or(i666, i669), + i671 = int_lshift(i667, i315), + i672 = uint_rshift(i671, i328), + i673 = int_lshift(i672, 16), + i674 = int_or(i670, i673), + i675 = int_lshift(i671, i315), + i676 = uint_rshift(i675, i328), + i677 = int_lshift(i676, 24), + i678 = int_or(i674, i677), + i679 = int_lshift(i675, i315), + setarrayitem_raw(i349, i664, i678, descr=), + i680 = int_add(i664, 1), + i681 = int_ge(i680, i250), + guard_false(i681, descr=), + i682 = uint_rshift(i679, i328), + i683 = int_lshift(i679, i315), + i684 = uint_rshift(i683, i328), + i685 = int_lshift(i684, 8), + i686 = int_or(i682, i685), + i687 = int_lshift(i683, i315), + i688 = uint_rshift(i687, i328), + i689 = int_lshift(i688, 16), + i690 = int_or(i686, i689), + i691 = int_lshift(i687, i315), + i692 = uint_rshift(i691, i328), + i693 = int_lshift(i692, 24), + i694 = int_or(i690, i693), + i695 = int_lshift(i691, i315), + setarrayitem_raw(i349, i680, i694, descr=), + i696 = int_add(i680, 1), + i697 = int_ge(i696, i250), + guard_false(i697, descr=), + i698 = uint_rshift(i695, i328), + i699 = int_lshift(i695, i315), + i700 = uint_rshift(i699, i328), + i701 = int_lshift(i700, 8), + i702 = int_or(i698, i701), + i703 = int_lshift(i699, i315), + i704 = uint_rshift(i703, i328), + i705 = int_lshift(i704, 16), + i706 = int_or(i702, i705), + i707 = int_lshift(i703, i315), + i708 = uint_rshift(i707, i328), + i709 = int_lshift(i708, 24), + i710 = int_or(i706, i709), + i711 = int_lshift(i707, i315), + setarrayitem_raw(i349, i696, i710, descr=), + i712 = int_add(i696, 1), + i713 = int_add_ovf(i542, i510), + guard_no_overflow(descr=), + i714 = int_add_ovf(i544, i510), + guard_no_overflow(descr=), + i715 = int_sub(i546, 26), + setfield_gc(ConstPtr(ptr171), i715, descr=), + i716 = int_le(i715, 0), + guard_false(i716, descr=), + p717 = new_with_vtable(ConstClass(W_SmallInteger)), + setfield_gc(p717, i713, descr=), + setarrayitem_gc(p146, 34, p717, descr=), + p718 = new_with_vtable(ConstClass(W_SmallInteger)), + setfield_gc(p718, i714, descr=), + setarrayitem_gc(p146, 35, p718, descr=), + p719 = new_with_vtable(ConstClass(W_SmallInteger)), + setfield_gc(p719, i560, descr=), + setarrayitem_gc(p146, 20, p719, descr=), + i720 = arraylen_gc(p146, descr=), + i721 = arraylen_gc(p521, descr=), + jump(p0, p3, p8, i557, p538, i562, p18, i545, p38, p40, p42, p44, p46, p48, p50, p52, p54, p56, p58, p60, p62, p64, p66, p68, p70, p72, p74, p76, p78, p80, p82, p84, p86, p88, p90, p92, p94, p96, p98, p100, p102, p104, p106, p108, p110, p112, p114, p116, p118, p120, p122, p124, p126, p128, p130, p132, p134, 1, p148, p717, i158, p156, p718, i165, p163, p146, i715, i179, p178, p719, i197, p188, p213, i221, p220, p228, p140, p242, i250, i252, i282, i293, i328, i315, i349, i510, p509, p538, p521, descr=TargetToken(169555520))] + """) From noreply at buildbot.pypy.org Fri Jan 17 14:45:41 2014 From: noreply at buildbot.pypy.org (timfel) Date: Fri, 17 Jan 2014 14:45:41 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: bitblt tests are flaky Message-ID: <20140117134541.CC4401C0459@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r590:740e1ac00fe0 Date: 2014-01-17 14:44 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/740e1ac00fe0/ Log: bitblt tests are flaky diff --git a/spyvm/test/jittest/test_basic.py b/spyvm/test/jittest/test_basic.py --- a/spyvm/test/jittest/test_basic.py +++ b/spyvm/test/jittest/test_basic.py @@ -116,12 +116,16 @@ jump(p0, p3, i74, p8, p10, p12, p14, p20, p22, p24, p26, p28, p30, p32, p34, p36, p38, p40, p42, p44, p46, i77, descr=TargetToken(157713840)) """) - def test_bitblt(self, spy, tmpdir): + def test_bitblt_fillWhite(self, spy, tmpdir): # This used to have a call to array comparison in it - traces = self.run(spy, tmpdir, """ - Display beDisplay. - 1 to: 10000 do: [:i | Display fillWhite]. - """) + traces = [] + retries = 10 + while len(traces) == 0 and retries > 0: + retries -= 1 + traces = self.run(spy, tmpdir, """ + Display beDisplay. + 1 to: 10000 do: [:i | Display fillWhite]. + """) self.assert_matches(traces[0].loop, """ guard_not_invalidated(descr=), i540 = int_le(2, i151), @@ -363,3 +367,13 @@ i721 = arraylen_gc(p521, descr=), jump(p0, p3, p8, i557, p538, i562, p18, i545, p38, p40, p42, p44, p46, p48, p50, p52, p54, p56, p58, p60, p62, p64, p66, p68, p70, p72, p74, p76, p78, p80, p82, p84, p86, p88, p90, p92, p94, p96, p98, p100, p102, p104, p106, p108, p110, p112, p114, p116, p118, p120, p122, p124, p126, p128, p130, p132, p134, 1, p148, p717, i158, p156, p718, i165, p163, p146, i715, i179, p178, p719, i197, p188, p213, i221, p220, p228, p140, p242, i250, i252, i282, i293, i328, i315, i349, i510, p509, p538, p521, descr=TargetToken(169555520))] """) + + @py.test.mark.skipif("'just dozens of long traces'") + def test_bitblt_draw_windows(self, spy, tmpdir): + # This used to have a call to array comparison in it + traces = self.run(spy, tmpdir, """ + Display beDisplay. + 1 to: 100 do: [:i | ControlManager startUp]. + """) + self.assert_matches(traces[0].loop, """ + """) From noreply at buildbot.pypy.org Fri Jan 17 14:47:06 2014 From: noreply at buildbot.pypy.org (jerith) Date: Fri, 17 Jan 2014 14:47:06 +0100 (CET) Subject: [pypy-commit] pypy remove-del-from-generatoriterator: add a CO_YIELD_INSIDE_TRY code flag Message-ID: <20140117134706.B14DF1C0459@cobra.cs.uni-duesseldorf.de> Author: Jeremy Thurgood Branch: remove-del-from-generatoriterator Changeset: r68716:b3bc55c98005 Date: 2014-01-17 11:17 +0200 http://bitbucket.org/pypy/pypy/changeset/b3bc55c98005/ Log: add a CO_YIELD_INSIDE_TRY code flag diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py --- a/pypy/interpreter/astcompiler/codegen.py +++ b/pypy/interpreter/astcompiler/codegen.py @@ -1234,6 +1234,8 @@ flags |= consts.CO_NESTED if scope.is_generator: flags |= consts.CO_GENERATOR + if scope.has_yield_inside_try: + flags |= consts.CO_YIELD_INSIDE_TRY if scope.has_variable_arg: flags |= consts.CO_VARARGS if scope.has_keywords_arg: diff --git a/pypy/interpreter/astcompiler/consts.py b/pypy/interpreter/astcompiler/consts.py --- a/pypy/interpreter/astcompiler/consts.py +++ b/pypy/interpreter/astcompiler/consts.py @@ -17,6 +17,7 @@ CO_FUTURE_UNICODE_LITERALS = 0x20000 #pypy specific: CO_KILL_DOCSTRING = 0x100000 +CO_YIELD_INSIDE_TRY = 0x200000 PyCF_SOURCE_IS_UTF8 = 0x0100 PyCF_DONT_IMPLY_DEDENT = 0x0200 diff --git a/pypy/interpreter/astcompiler/symtable.py b/pypy/interpreter/astcompiler/symtable.py --- a/pypy/interpreter/astcompiler/symtable.py +++ b/pypy/interpreter/astcompiler/symtable.py @@ -43,6 +43,7 @@ self.child_has_free = False self.nested = False self.doc_removable = False + self._visiting_try_body = False def lookup(self, name): """Find the scope of identifier 'name'.""" @@ -75,6 +76,14 @@ self.varnames.append(mangled) return mangled + def note_try_start(self, try_node): + """Called when a try is found, before visiting the body.""" + self._visiting_try_body = True + + def note_try_end(self, try_node): + """Called after visiting a try body.""" + self._visiting_try_body = False + def note_yield(self, yield_node): """Called when a yield is found.""" raise SyntaxError("'yield' outside function", yield_node.lineno, @@ -210,6 +219,7 @@ self.has_variable_arg = False self.has_keywords_arg = False self.is_generator = False + self.has_yield_inside_try = False self.optimized = True self.return_with_value = False self.import_star = None @@ -220,6 +230,8 @@ raise SyntaxError("'return' with argument inside generator", self.ret.lineno, self.ret.col_offset) self.is_generator = True + if self._visiting_try_body: + self.has_yield_inside_try = True def note_return(self, ret): if ret.value: @@ -505,3 +517,16 @@ else: role = SYM_ASSIGNED self.note_symbol(name.id, role) + + def visit_TryExcept(self, node): + self.scope.note_try_start(node) + self.visit_sequence(node.body) + self.scope.note_try_end(node) + self.visit_sequence(node.handlers) + self.visit_sequence(node.orelse) + + def visit_TryFinally(self, node): + self.scope.note_try_start(node) + self.visit_sequence(node.body) + self.scope.note_try_end(node) + self.visit_sequence(node.finalbody) diff --git a/pypy/interpreter/astcompiler/test/test_symtable.py b/pypy/interpreter/astcompiler/test/test_symtable.py --- a/pypy/interpreter/astcompiler/test/test_symtable.py +++ b/pypy/interpreter/astcompiler/test/test_symtable.py @@ -346,6 +346,14 @@ assert exc.msg == "'return' with argument inside generator" scp = self.func_scope("def f():\n return\n yield x") + def test_yield_inside_try(self): + scp = self.func_scope("def f(): yield x") + assert not scp.has_yield_inside_try + scp = self.func_scope("def f():\n try:\n yield x\n except: pass") + assert scp.has_yield_inside_try + scp = self.func_scope("def f():\n try:\n yield x\n finally: pass") + assert scp.has_yield_inside_try + def test_return(self): for input in ("class x: return", "return"): exc = py.test.raises(SyntaxError, self.func_scope, input).value From noreply at buildbot.pypy.org Fri Jan 17 14:47:08 2014 From: noreply at buildbot.pypy.org (jerith) Date: Fri, 17 Jan 2014 14:47:08 +0100 (CET) Subject: [pypy-commit] pypy remove-del-from-generatoriterator: move GeneratorIterator.__del__ to GeneratorIteratorWithDel subclass, used for code with the CO_YIELD_INSIDE_TRY flag Message-ID: <20140117134708.08CC21C0459@cobra.cs.uni-duesseldorf.de> Author: Jeremy Thurgood Branch: remove-del-from-generatoriterator Changeset: r68717:050108051e15 Date: 2014-01-17 15:45 +0200 http://bitbucket.org/pypy/pypy/changeset/050108051e15/ Log: move GeneratorIterator.__del__ to GeneratorIteratorWithDel subclass, used for code with the CO_YIELD_INSIDE_TRY flag diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -155,20 +155,6 @@ code_name = self.pycode.co_name return space.wrap(code_name) - def __del__(self): - # Only bother enqueuing self to raise an exception if the frame is - # still not finished and finally or except blocks are present. - self.clear_all_weakrefs() - if self.frame is not None: - block = self.frame.lastblock - while block is not None: - if not isinstance(block, LoopBlock): - self.enqueue_for_destruction(self.space, - GeneratorIterator.descr_close, - "interrupting generator of ") - break - block = block.previous - # Results can be either an RPython list of W_Root, or it can be an # app-level W_ListObject, which also has an append() method, that's why we # generate 2 versions of the function and 2 jit drivers. @@ -211,3 +197,20 @@ return unpack_into unpack_into = _create_unpack_into() unpack_into_w = _create_unpack_into() + + +class GeneratorIteratorWithDel(GeneratorIterator): + + def __del__(self): + # Only bother enqueuing self to raise an exception if the frame is + # still not finished and finally or except blocks are present. + self.clear_all_weakrefs() + if self.frame is not None: + block = self.frame.lastblock + while block is not None: + if not isinstance(block, LoopBlock): + self.enqueue_for_destruction(self.space, + GeneratorIterator.descr_close, + "interrupting generator of ") + break + block = block.previous diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -12,7 +12,7 @@ from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter.astcompiler.consts import ( CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS, CO_NESTED, - CO_GENERATOR, CO_KILL_DOCSTRING) + CO_GENERATOR, CO_KILL_DOCSTRING, CO_YIELD_INSIDE_TRY) from pypy.tool.stdlib_opcode import opcodedesc, HAVE_ARGUMENT from rpython.rlib.rarithmetic import intmask from rpython.rlib.objectmodel import compute_hash diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -167,8 +167,12 @@ def run(self): """Start this frame's execution.""" if self.getcode().co_flags & pycode.CO_GENERATOR: - from pypy.interpreter.generator import GeneratorIterator - return self.space.wrap(GeneratorIterator(self)) + if pycode.CO_YIELD_INSIDE_TRY: + from pypy.interpreter.generator import GeneratorIteratorWithDel + return self.space.wrap(GeneratorIteratorWithDel(self)) + else: + from pypy.interpreter.generator import GeneratorIterator + return self.space.wrap(GeneratorIterator(self)) else: return self.execute_frame() diff --git a/pypy/module/_pickle_support/maker.py b/pypy/module/_pickle_support/maker.py --- a/pypy/module/_pickle_support/maker.py +++ b/pypy/module/_pickle_support/maker.py @@ -5,7 +5,7 @@ from pypy.interpreter.module import Module from pypy.interpreter.pyframe import PyFrame from pypy.interpreter.pytraceback import PyTraceback -from pypy.interpreter.generator import GeneratorIterator +from pypy.interpreter.generator import GeneratorIteratorWithDel from rpython.rlib.objectmodel import instantiate from pypy.interpreter.gateway import unwrap_spec from pypy.objspace.std.iterobject import W_SeqIterObject, W_ReverseSeqIterObject @@ -60,7 +60,7 @@ return space.wrap(tb) def generator_new(space): - new_generator = instantiate(GeneratorIterator) + new_generator = instantiate(GeneratorIteratorWithDel) return space.wrap(new_generator) @unwrap_spec(current=int, remaining=int, step=int) From noreply at buildbot.pypy.org Fri Jan 17 14:51:00 2014 From: noreply at buildbot.pypy.org (rguillebert) Date: Fri, 17 Jan 2014 14:51:00 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: Add the RESUME_PUT_CONST resume resop Message-ID: <20140117135100.824B11C087E@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: resume-refactor Changeset: r68718:354d632cf82c Date: 2014-01-17 14:50 +0100 http://bitbucket.org/pypy/pypy/changeset/354d632cf82c/ Log: Add the RESUME_PUT_CONST resume resop diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -473,6 +473,7 @@ 'RESUME_PUT/3', # arguments are as follows - box or position in the backend, # the frame index (counting from top) and position in the # frontend + 'RESUME_PUT_CONST/3', 'RESUME_NEW/0d', 'RESUME_NEW_WITH_VTABLE/1', 'RESUME_NEW_ARRAY/1d', From noreply at buildbot.pypy.org Fri Jan 17 14:53:22 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 17 Jan 2014 14:53:22 +0100 (CET) Subject: [pypy-commit] stmgc c7: partially implement aborting Message-ID: <20140117135322.A79351C087E@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r625:90bb5600924e Date: 2014-01-17 14:53 +0100 http://bitbucket.org/pypy/stmgc/changeset/90bb5600924e/ Log: partially implement aborting diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -911,19 +911,69 @@ stm_stop_lock(); } + +static void reset_modified_from_other_threads() +{ + /* pull the right versions from other threads in order + to reset our pages as part of an abort */ + + struct stm_list_s *modified = _STM_TL2->modified_objects; + char *local_base = _STM_TL2->thread_base; + char *remote_base = get_thread_base(1 - _STM_TL2->thread_num); + char *t0_base = get_thread_base(0); + + STM_LIST_FOREACH(modified, ({ + /* note: same as push_modified_to... but src/dst swapped + XXX: unify both... */ + char *dst = REAL_ADDRESS(local_base, item); + char *src = REAL_ADDRESS(remote_base, item); + size_t size = stmcb_size((struct object_s*)src); + memcpy(dst, src, size); + + /* copying from the other thread re-added the + WRITE_BARRIER flag */ + assert(item->stm_flags & GCFLAG_WRITE_BARRIER); + + struct object_s *t0_obj = (struct object_s*) + REAL_ADDRESS(t0_base, item); + if (t0_base != local_base) { + /* clear the write-lock (WE have modified the obj) */ + assert(t0_obj->stm_write_lock); + t0_obj->stm_write_lock = 0; + } else { + /* done by the memcpy */ + assert(!t0_obj->stm_write_lock); + } + })); +} + + void stm_abort_transaction(void) { + /* here we hold the shared lock as a reader or writer */ assert(_STM_TL2->running_transaction); - // XXX reset all the modified objects!! + /* reset all the modified objects (incl. re-adding GCFLAG_WRITE_BARRIER) */ + reset_modified_from_other_threads(); stm_list_clear(_STM_TL2->modified_objects); - /* re-add GCFLAG_WRITE_BARRIER */ + /* clear old_objects_to_trace (they will have the WRITE_BARRIER flag + set because the ones we care about are also in modified_objects) */ stm_list_clear(_STM_TL2->old_objects_to_trace); /* clear the nursery */ + localchar_t *nursery_base = (localchar_t*)(FIRST_NURSERY_PAGE * 4096); + memset((void*)real_address((object_t*)nursery_base), 0x0, + _STM_TL2->nursery_current - nursery_base); + _STM_TL2->nursery_current = nursery_base; - /* unreserve uncommitted_pages */ + /* unreserve uncommitted_pages and mark them as SHARED again */ + /* STM_LIST_FOREACH(_STM_TL2->uncommitted_pages, ({ */ + /* uintptr_t pagenum = (uintptr_t)item; */ + /* flag_page_private[pagenum] = SHARED_PAGE; */ + /* })); */ + stm_list_clear(_STM_TL2->uncommitted_pages); + /* XXX: forget about GCFLAG_UNCOMMITTED objects */ From noreply at buildbot.pypy.org Fri Jan 17 14:55:42 2014 From: noreply at buildbot.pypy.org (rguillebert) Date: Fri, 17 Jan 2014 14:55:42 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: Backed out changeset 354d632cf82c Message-ID: <20140117135542.D27261C087E@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: resume-refactor Changeset: r68719:c93364c949d0 Date: 2014-01-17 14:54 +0100 http://bitbucket.org/pypy/pypy/changeset/c93364c949d0/ Log: Backed out changeset 354d632cf82c diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -473,7 +473,6 @@ 'RESUME_PUT/3', # arguments are as follows - box or position in the backend, # the frame index (counting from top) and position in the # frontend - 'RESUME_PUT_CONST/3', 'RESUME_NEW/0d', 'RESUME_NEW_WITH_VTABLE/1', 'RESUME_NEW_ARRAY/1d', From noreply at buildbot.pypy.org Fri Jan 17 15:09:12 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 17 Jan 2014 15:09:12 +0100 (CET) Subject: [pypy-commit] stmgc c7: indentation Message-ID: <20140117140912.550841C087E@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r626:90495c5a7997 Date: 2014-01-17 15:08 +0100 http://bitbucket.org/pypy/stmgc/changeset/90495c5a7997/ Log: indentation diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -324,21 +324,23 @@ bool conflicted = 0; char *t0_base = get_thread_base(0); - STM_LIST_FOREACH(modified, ({ - if (!conflicted) - conflicted = _stm_was_read_remote(remote_base, item); - - /* clear the write-lock */ - struct object_s *t0_obj = (struct object_s*) - REAL_ADDRESS(t0_base, item); - assert(t0_obj->stm_write_lock); - t0_obj->stm_write_lock = 0; - - char *src = REAL_ADDRESS(local_base, item); - char *dst = REAL_ADDRESS(remote_base, item); - size_t size = stmcb_size((struct object_s*)src); - memcpy(dst, src, size); - })); + STM_LIST_FOREACH( + modified, + ({ + if (!conflicted) + conflicted = _stm_was_read_remote(remote_base, item); + + /* clear the write-lock */ + struct object_s *t0_obj = (struct object_s*) + REAL_ADDRESS(t0_base, item); + assert(t0_obj->stm_write_lock); + t0_obj->stm_write_lock = 0; + + char *src = REAL_ADDRESS(local_base, item); + char *dst = REAL_ADDRESS(remote_base, item); + size_t size = stmcb_size((struct object_s*)src); + memcpy(dst, src, size); + })); if (conflicted) { struct _thread_local2_s *remote_TL2 = (struct _thread_local2_s *) @@ -835,10 +837,12 @@ } } - STM_LIST_FOREACH(_STM_TL2->uncommitted_pages, ({ - uintptr_t pagenum = (uintptr_t)item; - flag_page_private[pagenum] = SHARED_PAGE; - })); + STM_LIST_FOREACH( + _STM_TL2->uncommitted_pages, + ({ + uintptr_t pagenum = (uintptr_t)item; + flag_page_private[pagenum] = SHARED_PAGE; + })); stm_list_clear(_STM_TL2->uncommitted_pages); @@ -922,29 +926,31 @@ char *remote_base = get_thread_base(1 - _STM_TL2->thread_num); char *t0_base = get_thread_base(0); - STM_LIST_FOREACH(modified, ({ - /* note: same as push_modified_to... but src/dst swapped - XXX: unify both... */ - char *dst = REAL_ADDRESS(local_base, item); - char *src = REAL_ADDRESS(remote_base, item); - size_t size = stmcb_size((struct object_s*)src); - memcpy(dst, src, size); - - /* copying from the other thread re-added the - WRITE_BARRIER flag */ - assert(item->stm_flags & GCFLAG_WRITE_BARRIER); - - struct object_s *t0_obj = (struct object_s*) - REAL_ADDRESS(t0_base, item); - if (t0_base != local_base) { - /* clear the write-lock (WE have modified the obj) */ - assert(t0_obj->stm_write_lock); - t0_obj->stm_write_lock = 0; - } else { - /* done by the memcpy */ - assert(!t0_obj->stm_write_lock); - } - })); + STM_LIST_FOREACH( + modified, + ({ + /* note: same as push_modified_to... but src/dst swapped + XXX: unify both... */ + char *dst = REAL_ADDRESS(local_base, item); + char *src = REAL_ADDRESS(remote_base, item); + size_t size = stmcb_size((struct object_s*)src); + memcpy(dst, src, size); + + /* copying from the other thread re-added the + WRITE_BARRIER flag */ + assert(item->stm_flags & GCFLAG_WRITE_BARRIER); + + struct object_s *t0_obj = (struct object_s*) + REAL_ADDRESS(t0_base, item); + if (t0_base != local_base) { + /* clear the write-lock (WE have modified the obj) */ + assert(t0_obj->stm_write_lock); + t0_obj->stm_write_lock = 0; + } else { + /* done by the memcpy */ + assert(!t0_obj->stm_write_lock); + } + })); } From noreply at buildbot.pypy.org Fri Jan 17 15:11:36 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 17 Jan 2014 15:11:36 +0100 (CET) Subject: [pypy-commit] stmgc c7: small fix Message-ID: <20140117141136.E28FD1C087E@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r627:92e6ecac7c9d Date: 2014-01-17 15:11 +0100 http://bitbucket.org/pypy/stmgc/changeset/92e6ecac7c9d/ Log: small fix diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -330,10 +330,11 @@ def switch(self, thread_num): assert thread_num != self.current_thread + self.current_thread = thread_num if lib._stm_is_in_transaction(): stm_start_safe_point() lib._stm_restore_local_state(thread_num) if lib._stm_is_in_transaction(): stm_stop_safe_point() - self.current_thread = thread_num + From noreply at buildbot.pypy.org Fri Jan 17 15:29:26 2014 From: noreply at buildbot.pypy.org (jerith) Date: Fri, 17 Jan 2014 15:29:26 +0100 (CET) Subject: [pypy-commit] pypy remove-del-from-generatoriterator: handle yield inside with and also nested blocks Message-ID: <20140117142926.533601C0291@cobra.cs.uni-duesseldorf.de> Author: Jeremy Thurgood Branch: remove-del-from-generatoriterator Changeset: r68720:e25d33c7d99a Date: 2014-01-17 16:28 +0200 http://bitbucket.org/pypy/pypy/changeset/e25d33c7d99a/ Log: handle yield inside with and also nested blocks diff --git a/pypy/interpreter/astcompiler/symtable.py b/pypy/interpreter/astcompiler/symtable.py --- a/pypy/interpreter/astcompiler/symtable.py +++ b/pypy/interpreter/astcompiler/symtable.py @@ -43,7 +43,7 @@ self.child_has_free = False self.nested = False self.doc_removable = False - self._visiting_try_body = False + self._in_try_body_depth = 0 def lookup(self, name): """Find the scope of identifier 'name'.""" @@ -78,11 +78,11 @@ def note_try_start(self, try_node): """Called when a try is found, before visiting the body.""" - self._visiting_try_body = True + self._in_try_body_depth += 1 def note_try_end(self, try_node): """Called after visiting a try body.""" - self._visiting_try_body = False + self._in_try_body_depth -= 1 def note_yield(self, yield_node): """Called when a yield is found.""" @@ -230,7 +230,7 @@ raise SyntaxError("'return' with argument inside generator", self.ret.lineno, self.ret.col_offset) self.is_generator = True - if self._visiting_try_body: + if self._in_try_body_depth > 0: self.has_yield_inside_try = True def note_return(self, ret): @@ -475,7 +475,12 @@ self.scope.new_temporary_name() if wih.optional_vars: self.scope.new_temporary_name() - ast.GenericASTVisitor.visit_With(self, wih) + wih.context_expr.walkabout(self) + if wih.optional_vars: + wih.optional_vars.walkabout(self) + self.scope.note_try_start(wih) + self.visit_sequence(wih.body) + self.scope.note_try_end(wih) def visit_arguments(self, arguments): scope = self.scope diff --git a/pypy/interpreter/astcompiler/test/test_symtable.py b/pypy/interpreter/astcompiler/test/test_symtable.py --- a/pypy/interpreter/astcompiler/test/test_symtable.py +++ b/pypy/interpreter/astcompiler/test/test_symtable.py @@ -353,6 +353,15 @@ assert scp.has_yield_inside_try scp = self.func_scope("def f():\n try:\n yield x\n finally: pass") assert scp.has_yield_inside_try + scp = self.func_scope("def f():\n with x: yield y") + assert scp.has_yield_inside_try + + def test_yield_outside_try(self): + for input in ("try: pass\n except: pass", + "try: pass\n finally: pass", + "with x: pass"): + input = "def f():\n yield y\n %s\n yield y" % (input,) + assert not self.func_scope(input).has_yield_inside_try def test_return(self): for input in ("class x: return", "return"): From noreply at buildbot.pypy.org Fri Jan 17 15:50:00 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 17 Jan 2014 15:50:00 +0100 (CET) Subject: [pypy-commit] stmgc c7: fix race condition in abort Message-ID: <20140117145000.3C8801C3085@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r628:1030cba5f19d Date: 2014-01-17 15:49 +0100 http://bitbucket.org/pypy/stmgc/changeset/1030cba5f19d/ Log: fix race condition in abort diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -87,7 +87,7 @@ static uintptr_t index_page_never_used; static struct stm_list_s *volatile pending_updates; static uint8_t flag_page_private[NB_PAGES]; /* xxx_PAGE constants above */ - +static uint8_t write_locks[READMARKER_END - READMARKER_START]; /************************************************************/ uintptr_t _stm_reserve_page(void); @@ -318,23 +318,23 @@ static void push_modified_to_other_threads() { + /* WE HAVE THE EXCLUSIVE LOCK HERE */ + struct stm_list_s *modified = _STM_TL2->modified_objects; char *local_base = _STM_TL2->thread_base; char *remote_base = get_thread_base(1 - _STM_TL2->thread_num); bool conflicted = 0; - char *t0_base = get_thread_base(0); STM_LIST_FOREACH( modified, ({ if (!conflicted) conflicted = _stm_was_read_remote(remote_base, item); - + /* clear the write-lock */ - struct object_s *t0_obj = (struct object_s*) - REAL_ADDRESS(t0_base, item); - assert(t0_obj->stm_write_lock); - t0_obj->stm_write_lock = 0; + uintptr_t lock_idx = (((uintptr_t)item) >> 4) - READMARKER_START; + assert(write_locks[lock_idx]); + write_locks[lock_idx] = 0; char *src = REAL_ADDRESS(local_base, item); char *dst = REAL_ADDRESS(remote_base, item); @@ -377,13 +377,10 @@ /* privatize if SHARED_PAGE */ _stm_privatize(pagenum); - /* lock the object for writing in thread 0's page */ - uintptr_t t0_offset = (uintptr_t)obj; - char* t0_addr = get_thread_base(0) + t0_offset; - struct object_s *t0_obj = (struct object_s *)t0_addr; - - int previous; - while ((previous = __sync_lock_test_and_set(&t0_obj->stm_write_lock, 1))) { + /* claim the write-lock for this object */ + uintptr_t lock_idx = (((uintptr_t)obj) >> 4) - READMARKER_START; + uint8_t previous; + while ((previous = __sync_lock_test_and_set(&write_locks[lock_idx], 1))) { stm_abort_transaction(); /* XXX: only abort if we are younger */ spin_loop(); @@ -710,6 +707,7 @@ { munmap(object_pages, TOTAL_MEMORY); memset(flag_page_private, 0, sizeof(flag_page_private)); + memset(write_locks, 0, sizeof(write_locks)); pthread_rwlock_destroy(&rwlock_shared); object_pages = NULL; } @@ -924,32 +922,30 @@ struct stm_list_s *modified = _STM_TL2->modified_objects; char *local_base = _STM_TL2->thread_base; char *remote_base = get_thread_base(1 - _STM_TL2->thread_num); - char *t0_base = get_thread_base(0); STM_LIST_FOREACH( modified, ({ /* note: same as push_modified_to... but src/dst swapped XXX: unify both... */ + char *dst = REAL_ADDRESS(local_base, item); char *src = REAL_ADDRESS(remote_base, item); size_t size = stmcb_size((struct object_s*)src); memcpy(dst, src, size); - + /* copying from the other thread re-added the WRITE_BARRIER flag */ assert(item->stm_flags & GCFLAG_WRITE_BARRIER); + + /* write all changes to the object before we release the + write lock below */ + write_fence(); - struct object_s *t0_obj = (struct object_s*) - REAL_ADDRESS(t0_base, item); - if (t0_base != local_base) { - /* clear the write-lock (WE have modified the obj) */ - assert(t0_obj->stm_write_lock); - t0_obj->stm_write_lock = 0; - } else { - /* done by the memcpy */ - assert(!t0_obj->stm_write_lock); - } + /* clear the write-lock */ + uintptr_t lock_idx = (((uintptr_t)item) >> 4) - READMARKER_START; + assert(write_locks[lock_idx]); + write_locks[lock_idx] = 0; })); } diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -335,6 +335,6 @@ stm_start_safe_point() lib._stm_restore_local_state(thread_num) if lib._stm_is_in_transaction(): - stm_stop_safe_point() + stm_stop_safe_point() # can raise Conflict From noreply at buildbot.pypy.org Fri Jan 17 15:51:59 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 17 Jan 2014 15:51:59 +0100 (CET) Subject: [pypy-commit] stmgc c7: Bug fixed by previous checkin Message-ID: <20140117145159.EDD871C3085@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7 Changeset: r629:0e7d26cbc91a Date: 2014-01-17 15:51 +0100 http://bitbucket.org/pypy/stmgc/changeset/0e7d26cbc91a/ Log: Bug fixed by previous checkin diff --git a/c7/test/test_bug.py b/c7/test/test_bug.py --- a/c7/test/test_bug.py +++ b/c7/test/test_bug.py @@ -1,4 +1,5 @@ from support import * +import py class TestBug(BaseTest): @@ -427,3 +428,222 @@ # self.switch(0) stm_stop_transaction(False) #11 + + def test_write_marker_no_conflict(self): + # initialization + stm_start_transaction() + lp0 = stm_allocate(16) + stm_set_char(lp0, '\x00') + stm_push_root(lp0) + lp1 = stm_allocate(16) + stm_set_char(lp1, '\x01') + stm_push_root(lp1) + lp2 = stm_allocate(16) + stm_set_char(lp2, '\x02') + stm_push_root(lp2) + lp3 = stm_allocate(16) + stm_set_char(lp3, '\x03') + stm_push_root(lp3) + lp4 = stm_allocate(16) + stm_set_char(lp4, '\x04') + stm_push_root(lp4) + stm_stop_transaction() + lp4 = stm_pop_root() + lp3 = stm_pop_root() + lp2 = stm_pop_root() + lp1 = stm_pop_root() + lp0 = stm_pop_root() + # + self.switch(1) + stm_start_transaction() + assert stm_get_char(lp1) == '\x01' + stm_set_char(lp1, '\x15') + # + self.switch(0) + stm_start_transaction() + assert stm_get_char(lp2) == '\x02' + # + self.switch(1) + assert stm_get_char(lp4) == '\x04' + assert stm_get_char(lp4) == '\x04' + assert stm_get_char(lp2) == '\x02' + assert stm_get_char(lp0) == '\x00' + assert stm_get_char(lp1) == '\x15' + assert stm_get_char(lp2) == '\x02' + stm_stop_transaction() #1 lp1='\x15' + stm_start_transaction() + stm_stop_transaction() #2 + stm_start_transaction() + assert stm_get_char(lp2) == '\x02' + assert stm_get_char(lp4) == '\x04' + assert stm_get_char(lp2) == '\x02' + assert stm_get_char(lp0) == '\x00' + assert stm_get_char(lp0) == '\x00' + assert stm_get_char(lp1) == '\x15' + assert stm_get_char(lp0) == '\x00' + assert stm_get_char(lp2) == '\x02' + assert stm_get_char(lp0) == '\x00' + assert stm_get_char(lp4) == '\x04' + stm_set_char(lp4, '\xdf') + # + self.switch(0) + assert stm_get_char(lp3) == '\x03' + stm_stop_transaction() #3 + # + self.switch(1) + assert stm_get_char(lp4) == '\xdf' + stm_set_char(lp4, '\x0c') + assert stm_get_char(lp2) == '\x02' + assert stm_get_char(lp3) == '\x03' + # + self.switch(0) + stm_start_transaction() + assert stm_get_char(lp3) == '\x03' + stm_stop_transaction() #4 + # + self.switch(1) + assert stm_get_char(lp0) == '\x00' + stm_set_char(lp0, 's') + # + self.switch(0) + stm_start_transaction() + assert stm_get_char(lp1) == '\x15' + # + self.switch(1) + assert stm_get_char(lp4) == '\x0c' + stm_set_char(lp4, 'Q') + assert stm_get_char(lp2) == '\x02' + # + self.switch(0) + assert stm_get_char(lp3) == '\x03' + # + self.switch(1) + assert stm_get_char(lp0) == 's' + # + self.switch(0) + assert stm_get_char(lp4) == '\x04' + assert stm_get_char(lp1) == '\x15' + stm_set_char(lp1, '\xd1') + stm_stop_transaction() #5 lp1='\xd1' + stm_start_transaction() + assert stm_get_char(lp2) == '\x02' + stm_set_char(lp2, 'j') + # + py.test.raises(Conflict, self.switch, 1) + stm_start_transaction() + assert stm_get_char(lp3) == '\x03' + # + self.switch(0) + assert stm_get_char(lp4) == '\x04' + # + self.switch(1) + assert stm_get_char(lp0) == '\x00' + assert stm_get_char(lp3) == '\x03' + # + self.switch(0) + assert stm_get_char(lp4) == '\x04' + assert stm_get_char(lp4) == '\x04' + # + self.switch(1) + assert stm_get_char(lp0) == '\x00' + # + self.switch(0) + assert stm_get_char(lp0) == '\x00' + assert stm_get_char(lp1) == '\xd1' + # + self.switch(1) + assert stm_get_char(lp3) == '\x03' + # + self.switch(0) + assert stm_get_char(lp1) == '\xd1' + assert stm_get_char(lp0) == '\x00' + # + self.switch(1) + assert stm_get_char(lp2) == '\x02' + assert stm_get_char(lp2) == '\x02' + assert stm_get_char(lp0) == '\x00' + stm_set_char(lp0, '\xdf') + # + self.switch(0) + assert stm_get_char(lp2) == 'j' + stm_set_char(lp2, '\xed') + assert stm_get_char(lp1) == '\xd1' + # + self.switch(1) + assert stm_get_char(lp1) == '\xd1' + assert stm_get_char(lp3) == '\x03' + # + self.switch(0) + assert stm_get_char(lp2) == '\xed' + stm_set_char(lp2, '\x02') + assert stm_get_char(lp2) == '\x02' + stm_set_char(lp2, 'Q') + # + self.switch(1) + assert stm_get_char(lp0) == '\xdf' + stm_set_char(lp0, '#') + # + self.switch(0) + assert stm_get_char(lp1) == '\xd1' + stm_stop_transaction() #6 lp2='Q' + # + py.test.raises(Conflict, self.switch, 1) + stm_start_transaction() + assert stm_get_char(lp0) == '\x00' + assert stm_get_char(lp3) == '\x03' + stm_set_char(lp3, '\xf9') + # + self.switch(0) + stm_start_transaction() + assert stm_get_char(lp0) == '\x00' + assert stm_get_char(lp1) == '\xd1' + # + self.switch(1) + stm_stop_transaction() #7 lp3='\xf9' + # + self.switch(0) + stm_stop_transaction() #8 + stm_start_transaction() + assert stm_get_char(lp4) == '\x04' + assert stm_get_char(lp3) == '\xf9' + # + self.switch(1) + stm_start_transaction() + assert stm_get_char(lp0) == '\x00' + stm_set_char(lp0, 'N') + # + self.switch(0) + assert stm_get_char(lp4) == '\x04' + stm_set_char(lp4, 'K') + # + self.switch(1) + assert stm_get_char(lp4) == '\x04' + assert stm_get_char(lp3) == '\xf9' + # + self.switch(0) + assert stm_get_char(lp3) == '\xf9' + assert stm_get_char(lp4) == 'K' + stm_set_char(lp4, '\xce') + # + self.switch(1) + stm_stop_transaction() #9 lp0='N' + stm_start_transaction() + assert stm_get_char(lp2) == 'Q' + assert stm_get_char(lp4) == '\x04' + assert stm_get_char(lp1) == '\xd1' + stm_set_char(lp1, '\xdb') + stm_stop_transaction() #10 lp1='\xdb' + # + self.switch(0) + stm_stop_transaction() #11 lp4='\xce' + stm_start_transaction() + assert stm_get_char(lp2) == 'Q' + assert stm_get_char(lp0) == 'N' + # + self.switch(1) + stm_start_transaction() + assert stm_get_char(lp0) == 'N' + stm_set_char(lp0, '\x80') + # + stm_stop_transaction() + py.test.raises(Conflict, self.switch, 0) From noreply at buildbot.pypy.org Fri Jan 17 16:01:50 2014 From: noreply at buildbot.pypy.org (rguillebert) Date: Fri, 17 Jan 2014 16:01:50 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: Add missing imports Message-ID: <20140117150150.2EBF81C3183@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: resume-refactor Changeset: r68721:6c647ae0fbf8 Date: 2014-01-17 16:00 +0100 http://bitbucket.org/pypy/pypy/changeset/6c647ae0fbf8/ Log: Add missing imports diff --git a/rpython/jit/metainterp/test/test_resume.py b/rpython/jit/metainterp/test/test_resume.py --- a/rpython/jit/metainterp/test/test_resume.py +++ b/rpython/jit/metainterp/test/test_resume.py @@ -11,6 +11,7 @@ from rpython.jit.metainterp.optimizeopt.test.test_util import LLtypeMixin from rpython.jit.metainterp import executor from rpython.jit.codewriter import heaptracker, longlong +from rpython.jit.resume.rescode import TAGBOX, TAGCONST, TAGINT, TAGVIRTUAL class Storage: rd_frame_info_list = None @@ -24,14 +25,14 @@ class FakeOptimizer(object): def __init__(self, values): self.values = values - + def getvalue(self, box): try: value = self.values[box] except KeyError: value = self.values[box] = OptValue(box) return value - + def test_tag(): assert tag(3, 1) == rffi.r_short(3<<2|1) @@ -81,7 +82,7 @@ def newframe(self, jitcode): frame = FakeFrame(jitcode, -1) self.framestack.append(frame) - return frame + return frame def execute_and_record(self, opnum, descr, *argboxes): resbox = executor.execute(self.cpu, None, opnum, descr, *argboxes) @@ -255,7 +256,7 @@ class FakeResumeDataReader(AbstractResumeDataReader): VirtualCache = get_VirtualCache_class('Fake') - + def allocate_with_vtable(self, known_class): return FakeBuiltObject(vtable=known_class) def allocate_struct(self, typedescr): @@ -398,7 +399,7 @@ def setup_resume_at_op(self, pc, exception_target, env): self.__init__(self.jitcode, pc, exception_target, *env) - + def __eq__(self, other): return self.__dict__ == other.__dict__ def __ne__(self, other): @@ -516,7 +517,7 @@ def test_rebuild_from_resumedata(): py.test.skip("XXX rewrite") b1, b2, b3 = [BoxInt(), BoxPtr(), BoxInt()] - c1, c2, c3 = [ConstInt(1), ConstInt(2), ConstInt(3)] + c1, c2, c3 = [ConstInt(1), ConstInt(2), ConstInt(3)] storage = Storage() fs = [FakeFrame("code0", 0, b1, c1, b2), FakeFrame("code1", 3, b3, c2, b1), @@ -540,7 +541,7 @@ def test_rebuild_from_resumedata_with_virtualizable(): py.test.skip("XXX rewrite") b1, b2, b3, b4 = [BoxInt(), BoxPtr(), BoxInt(), BoxPtr()] - c1, c2, c3 = [ConstInt(1), ConstInt(2), ConstInt(3)] + c1, c2, c3 = [ConstInt(1), ConstInt(2), ConstInt(3)] storage = Storage() fs = [FakeFrame("code0", 0, b1, c1, b2), FakeFrame("code1", 3, b3, c2, b1), @@ -565,7 +566,7 @@ def test_rebuild_from_resumedata_two_guards(): py.test.skip("XXX rewrite") b1, b2, b3, b4 = [BoxInt(), BoxPtr(), BoxInt(), BoxInt()] - c1, c2, c3 = [ConstInt(1), ConstInt(2), ConstInt(3)] + c1, c2, c3 = [ConstInt(1), ConstInt(2), ConstInt(3)] storage = Storage() fs = [FakeFrame("code0", 0, b1, c1, b2), FakeFrame("code1", 3, b3, c2, b1), @@ -574,7 +575,7 @@ storage2 = Storage() fs = fs[:-1] + [FakeFrame("code2", 10, c3, b2, b4)] capture_resumedata(fs, None, [], storage2) - + memo = ResumeDataLoopMemo(FakeMetaInterpStaticData()) modifier = ResumeDataVirtualAdder(storage, memo) liveboxes = modifier.finish(FakeOptimizer({})) @@ -625,7 +626,7 @@ def test_rebuild_from_resumedata_two_guards_w_virtuals(): py.test.skip("XXX rewrite") - + b1, b2, b3, b4, b5 = [BoxInt(), BoxPtr(), BoxInt(), BoxInt(), BoxInt()] c1, c2, c3, c4 = [ConstInt(1), ConstInt(2), ConstInt(3), LLtypeMixin.nodebox.constbox()] @@ -637,7 +638,7 @@ storage2 = Storage() fs = fs[:-1] + [FakeFrame("code2", 10, c3, b2, b4)] capture_resumedata(fs, None, [], storage2) - + memo = ResumeDataLoopMemo(FakeMetaInterpStaticData()) values = {b2: virtual_value(b2, b5, c4)} modifier = ResumeDataVirtualAdder(storage, memo) @@ -648,12 +649,12 @@ b6 = BoxPtr() v6 = virtual_value(b6, c2, None) - v6.setfield(LLtypeMixin.nextdescr, v6) + v6.setfield(LLtypeMixin.nextdescr, v6) values = {b2: virtual_value(b2, b4, v6), b6: v6} memo.clear_box_virtual_numbers() modifier = ResumeDataVirtualAdder(storage2, memo) liveboxes2 = modifier.finish(FakeOptimizer(values)) - assert len(storage2.rd_virtuals) == 2 + assert len(storage2.rd_virtuals) == 2 assert storage2.rd_virtuals[0].fieldnums == [tag(len(liveboxes2)-1, TAGBOX), tag(-1, TAGVIRTUAL)] assert storage2.rd_virtuals[1].fieldnums == [tag(2, TAGINT), @@ -684,7 +685,7 @@ fs2 = [FakeFrame("code0", 0, b1t, c1, b2t), FakeFrame("code1", 3, b3t, c2, b1t), FakeFrame("code2", 10, c3, b2t, b4t)] - assert metainterp.framestack == fs2 + assert metainterp.framestack == fs2 def test_rebuild_from_resumedata_two_guards_w_shared_virtuals(): py.test.skip("XXX rewrite") @@ -694,7 +695,7 @@ storage = Storage() fs = [FakeFrame("code0", 0, c1, b2, b3)] capture_resumedata(fs, None, [], storage) - + memo = ResumeDataLoopMemo(FakeMetaInterpStaticData()) values = {b2: virtual_value(b2, b5, c4)} modifier = ResumeDataVirtualAdder(storage, memo) @@ -712,7 +713,7 @@ assert len(storage2.rd_virtuals) == 2 assert storage2.rd_virtuals[1].fieldnums == storage.rd_virtuals[0].fieldnums assert storage2.rd_virtuals[1] is storage.rd_virtuals[0] - + def test_resumedata_top_recursive_virtuals(): py.test.skip("XXX rewrite") @@ -720,7 +721,7 @@ storage = Storage() fs = [FakeFrame("code0", 0, b1, b2)] capture_resumedata(fs, None, [], storage) - + memo = ResumeDataLoopMemo(FakeMetaInterpStaticData()) v1 = virtual_value(b1, b3, None) v2 = virtual_value(b2, b3, v1) @@ -733,7 +734,7 @@ assert storage.rd_virtuals[0].fieldnums == [tag(-1, TAGBOX), tag(1, TAGVIRTUAL)] assert storage.rd_virtuals[1].fieldnums == [tag(-1, TAGBOX), - tag(0, TAGVIRTUAL)] + tag(0, TAGVIRTUAL)] # ____________________________________________________________ @@ -759,7 +760,7 @@ demo55o = lltype.cast_opaque_ptr(llmemory.GCREF, demo55) demo66 = lltype.malloc(LLtypeMixin.NODE) demo66o = lltype.cast_opaque_ptr(llmemory.GCREF, demo66) - + def test_ResumeDataLoopMemo_refs(): cpu = LLtypeMixin.cpu memo = ResumeDataLoopMemo(FakeMetaInterpStaticData()) @@ -767,7 +768,7 @@ tagged = memo.getconst(const) index, tagbits = untag(tagged) assert tagbits == TAGCONST - assert memo.consts[index] is const + assert memo.consts[index] is const tagged = memo.getconst(cpu.ts.ConstRef(demo55o)) index2, tagbits = untag(tagged) assert tagbits == TAGCONST @@ -775,7 +776,7 @@ tagged = memo.getconst(cpu.ts.ConstRef(demo66o)) index3, tagbits = untag(tagged) assert tagbits == TAGCONST - assert index3 != index + assert index3 != index tagged = memo.getconst(cpu.ts.CONST_NULL) assert tagged == NULLREF @@ -789,7 +790,7 @@ def test_ResumeDataLoopMemo_number(): b1, b2, b3, b4, b5 = [BoxInt(), BoxInt(), BoxInt(), BoxPtr(), BoxPtr()] - c1, c2, c3, c4 = [ConstInt(1), ConstInt(2), ConstInt(3), ConstInt(4)] + c1, c2, c3, c4 = [ConstInt(1), ConstInt(2), ConstInt(3), ConstInt(4)] env = [b1, c1, b2, b1, c2] snap = Snapshot(None, env) @@ -814,7 +815,7 @@ numb2, liveboxes2, v = memo.number(FakeOptimizer({}), snap2) assert v == 0 - + assert liveboxes2 == {b1: tag(0, TAGBOX), b2: tag(1, TAGBOX), b3: tag(2, TAGBOX)} assert liveboxes2 is not liveboxes @@ -840,7 +841,7 @@ numb3, liveboxes3, v = memo.number(FakeOptimizer({b3: FakeValue(False, c4)}), snap3) assert v == 0 - + assert liveboxes3 == {b1: tag(0, TAGBOX), b2: tag(1, TAGBOX)} assert list(numb3.nums) == [tag(3, TAGINT), tag(4, TAGINT), tag(0, TAGBOX), tag(3, TAGINT)] @@ -848,12 +849,12 @@ # virtual env4 = [c3, b4, b1, c3] - snap4 = Snapshot(snap, env4) + snap4 = Snapshot(snap, env4) numb4, liveboxes4, v = memo.number(FakeOptimizer({b4: FakeValue(True, b4)}), snap4) assert v == 1 - + assert liveboxes4 == {b1: tag(0, TAGBOX), b2: tag(1, TAGBOX), b4: tag(0, TAGVIRTUAL)} assert list(numb4.nums) == [tag(3, TAGINT), tag(0, TAGVIRTUAL), @@ -861,13 +862,13 @@ assert numb4.prev == numb.prev env5 = [b1, b4, b5] - snap5 = Snapshot(snap4, env5) + snap5 = Snapshot(snap4, env5) numb5, liveboxes5, v = memo.number(FakeOptimizer({b4: FakeValue(True, b4), b5: FakeValue(True, b5)}), snap5) assert v == 2 - + assert liveboxes5 == {b1: tag(0, TAGBOX), b2: tag(1, TAGBOX), b4: tag(0, TAGVIRTUAL), b5: tag(1, TAGVIRTUAL)} assert list(numb5.nums) == [tag(0, TAGBOX), tag(0, TAGVIRTUAL), @@ -957,7 +958,7 @@ storage = Storage() snapshot = Snapshot(None, [b1, ConstInt(1), b1, b2]) snapshot = Snapshot(snapshot, [ConstInt(2), ConstInt(3)]) - snapshot = Snapshot(snapshot, [b1, b2, b3]) + snapshot = Snapshot(snapshot, [b1, b2, b3]) storage.rd_snapshot = snapshot storage.rd_frame_info_list = None return storage @@ -965,7 +966,7 @@ def test_virtual_adder_int_constants(): b1s, b2s, b3s = [ConstInt(sys.maxint), ConstInt(2**16), ConstInt(-65)] storage = make_storage(b1s, b2s, b3s) - memo = ResumeDataLoopMemo(FakeMetaInterpStaticData()) + memo = ResumeDataLoopMemo(FakeMetaInterpStaticData()) modifier = ResumeDataVirtualAdder(storage, memo) liveboxes = modifier.finish(FakeOptimizer({})) assert storage.rd_snapshot is None @@ -989,7 +990,7 @@ storage2 = make_storage(b1s, b2s, b3s) modifier2 = ResumeDataVirtualAdder(storage2, memo) modifier2.finish(FakeOptimizer({})) - assert len(memo.consts) == 3 + assert len(memo.consts) == 3 assert storage2.rd_consts is memo.consts @@ -1053,7 +1054,7 @@ return b1_2 val = FakeValue() - values = {b1s: val, b2s: val} + values = {b1s: val, b2s: val} liveboxes = modifier.finish(FakeOptimizer(values)) assert storage.rd_snapshot is None b1t, b3t = [BoxInt(11), BoxInt(33)] @@ -1066,14 +1067,14 @@ assert_same(lst, [ConstInt(2), ConstInt(3)]) lst = reader.consume_boxes() assert_same(lst, [b1t, ConstInt(1), b1t, b1t]) - assert metainterp.trace == [] + assert metainterp.trace == [] def test_virtual_adder_make_constant(): b1s, b2s, b3s = [BoxInt(1), BoxPtr(), BoxInt(3)] b1s = ConstInt(111) storage = make_storage(b1s, b2s, b3s) - memo = ResumeDataLoopMemo(FakeMetaInterpStaticData()) + memo = ResumeDataLoopMemo(FakeMetaInterpStaticData()) modifier = ResumeDataVirtualAdder(storage, memo) liveboxes = modifier.finish(FakeOptimizer({})) b2t, b3t = [BoxPtr(demo55o), BoxInt(33)] @@ -1091,7 +1092,7 @@ def test_virtual_adder_make_virtual(): - b2s, b3s, b4s, b5s = [BoxPtr(), BoxInt(3), BoxPtr(), BoxPtr()] + b2s, b3s, b4s, b5s = [BoxPtr(), BoxInt(3), BoxPtr(), BoxPtr()] c1s = ConstInt(111) storage = Storage() memo = ResumeDataLoopMemo(FakeMetaInterpStaticData()) @@ -1407,7 +1408,7 @@ def test_invalidation_needed(): class options: failargs_limit = 10 - + metainterp_sd = FakeMetaInterpStaticData() metainterp_sd.options = options memo = ResumeDataLoopMemo(metainterp_sd) @@ -1421,5 +1422,5 @@ assert not modifier._invalidation_needed(10, 2) assert not modifier._invalidation_needed(10, 3) - assert modifier._invalidation_needed(10, 4) - + assert modifier._invalidation_needed(10, 4) + diff --git a/rpython/jit/resume/frontend.py b/rpython/jit/resume/frontend.py --- a/rpython/jit/resume/frontend.py +++ b/rpython/jit/resume/frontend.py @@ -6,6 +6,9 @@ from rpython.jit.metainterp import history from rpython.jit.codewriter.jitcode import JitCode from rpython.rlib import rstack +from rpython.jit.resume.reader import ResumeFrame, Virtual +from rpython.jit.resume.rescode import TAGBOX, TAGCONST, TAGVIRTUAL, TAGOFFSET + @@ -13,7 +16,7 @@ """ A resume reader that can follow resume until given point. Consult the concrete classes for details """ - + def __init__(self): self.framestack = [] self.consts = [] # XXX cache? @@ -127,7 +130,7 @@ """ Directly read values from the jitframe and put them in the blackhole interpreter """ - + def __init__(self, binterpbuilder, cpu, deadframe): self.bhinterpbuilder = binterpbuilder self.cpu = cpu @@ -179,7 +182,7 @@ """ Create boxes corresponding to the resume and store them in the metainterp """ - + def __init__(self, metainterp, deadframe): self.metainterp = metainterp self.deadframe = deadframe @@ -284,7 +287,7 @@ pos += 1 self.cache = None return res, [f.registers for f in self.framestack] - + def rebuild_from_resumedata(metainterp, deadframe, faildescr): """ Reconstruct metainterp frames from the resumedata """ @@ -303,7 +306,7 @@ cpu = metainterp_sd.cpu last_bhinterp = DirectResumeReader(interpbuilder, cpu, deadframe).rebuild(faildescr) - + return last_bhinterp class ResumeRecorder(object): From noreply at buildbot.pypy.org Fri Jan 17 16:06:43 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 17 Jan 2014 16:06:43 +0100 (CET) Subject: [pypy-commit] stmgc c7: The basic test_random passes, yuhuu Message-ID: <20140117150643.050031C3183@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7 Changeset: r630:c236694327c2 Date: 2014-01-17 16:06 +0100 http://bitbucket.org/pypy/stmgc/changeset/c236694327c2/ Log: The basic test_random passes, yuhuu diff --git a/c7/test/test_random.py b/c7/test/test_random.py --- a/c7/test/test_random.py +++ b/c7/test/test_random.py @@ -1,85 +1,120 @@ from support import * import sys, random +import py +from cStringIO import StringIO + + +class Exec(object): + def __init__(self, test): + self.content = {'self': test} + + def do(self, cmd): + print >> sys.stderr, cmd + exec cmd in globals(), self.content class TestRandom(BaseTest): - def test_fixed_16_bytes_objects(self): - rnd = random.Random(1010) + def test_fixed_16_bytes_objects(self, seed=1010): + rnd = random.Random(seed) - N_OBJECTS = 10 - N_THREADS = 3 - print >> sys.stderr, 'stm_start_transaction()' - stm_start_transaction() - plist = [stm_allocate(16) for i in range(N_OBJECTS)] - read_sets = [{} for i in range(N_THREADS)] + N_OBJECTS = 5 + N_THREADS = 2 + ex = Exec(self) + ex.do('# initialization') + ex.do('stm_start_transaction()') + head_state = [] + for i in range(N_OBJECTS): + ex.do('lp%d = stm_allocate(16)' % i) + ex.do('stm_set_char(lp%d, %r)' % (i, chr(i))) + head_state.append(chr(i)) + ex.do('stm_push_root(lp%d)' % i) + read_sets = [set() for i in range(N_THREADS)] write_sets = [{} for i in range(N_THREADS)] - active_transactions = {} + active_transactions = set() + need_abort = set() - for i in range(N_OBJECTS): - print >> sys.stderr, 'p%d = stm_allocate(16)' % i - for i in range(N_OBJECTS): - print >> sys.stderr, 'p%d[8] = %r' % (i, chr(i)) - plist[i][8] = chr(i) - head_state = [[chr(i) for i in range(N_OBJECTS)]] - commit_log = [] - print >> sys.stderr, 'stm_stop_transaction(False)' - stm_stop_transaction(False) + ex.do('stm_stop_transaction()') + for i in range(N_OBJECTS-1, -1, -1): + ex.do('lp%d = stm_pop_root()' % i) - for i in range(N_THREADS): - print >> sys.stderr, 'self.switch(%d)' % i - self.switch(i) stop_count = 1 + current_thread = 0 - for i in range(10000): + def aborted(): + active_transactions.remove(n_thread) + write_sets[n_thread].clear() + read_sets[n_thread].clear() + need_abort.discard(n_thread) + + remaining_steps = 200 + while remaining_steps > 0 or active_transactions: + remaining_steps -= 1 n_thread = rnd.randrange(0, N_THREADS) - print >> sys.stderr, '#\nself.switch(%d)' % n_thread - self.switch(n_thread) + if n_thread != current_thread: + ex.do('#') + current_thread = n_thread + if n_thread in need_abort: + ex.do('py.test.raises(Conflict, self.switch, %d)' % n_thread) + aborted() + continue + ex.do('self.switch(%d)' % n_thread) if n_thread not in active_transactions: - print >> sys.stderr, 'stm_start_transaction()' - stm_start_transaction() - active_transactions[n_thread] = len(commit_log) + if remaining_steps <= 0: + continue + ex.do('stm_start_transaction()') + active_transactions.add(n_thread) action = rnd.randrange(0, 7) - if action < 6: + if action < 6 and remaining_steps > 0: is_write = action >= 4 i = rnd.randrange(0, N_OBJECTS) - print >> sys.stderr, "stm_read(p%d)" % i - stm_read(plist[i]) - got = plist[i][8] - print >> sys.stderr, "assert p%d[8] ==" % i, - my_head_state = head_state[active_transactions[n_thread]] - prev = read_sets[n_thread].setdefault(i, my_head_state[i]) - print >> sys.stderr, "%r" % (prev,) - assert got == prev + if i in write_sets[n_thread]: + expected = write_sets[n_thread][i] + else: + expected = head_state[i] + ex.do("assert stm_get_char(lp%d) == %r" % (i, expected)) + read_sets[n_thread].add(i) # if is_write: - print >> sys.stderr, 'stm_write(p%d)' % i - stm_write(plist[i]) newval = chr(rnd.randrange(0, 256)) - print >> sys.stderr, 'p%d[8] = %r' % (i, newval) - plist[i][8] = newval - read_sets[n_thread][i] = write_sets[n_thread][i] = newval + write_write_conflict = False + for t in range(N_THREADS): + if t != n_thread: + write_write_conflict |= i in write_sets[t] + if write_write_conflict: + ex.do('py.test.raises(Conflict, stm_set_char, lp%d, %r)' + % (i, newval)) + aborted() + continue + else: + ex.do('stm_set_char(lp%d, %r)' % (i, newval)) + write_sets[n_thread][i] = newval else: - src_index = active_transactions.pop(n_thread) - conflict = False - for i in range(src_index, len(commit_log)): - for j in commit_log[i]: - if j in read_sets[n_thread]: - conflict = True - print >> sys.stderr, "stm_stop_transaction(%r) #%d" % ( - conflict, stop_count) - stop_count += 1 - stm_stop_transaction(conflict) - # - if not conflict: - hs = head_state[-1][:] - for i, newval in write_sets[n_thread].items(): - hs[i] = newval - assert plist[i][8] == newval - head_state.append(hs) - commit_log.append(write_sets[n_thread].keys()) - print >> sys.stderr, '#', head_state[-1] - print >> sys.stderr, '# log:', commit_log[-1] + active_transactions.remove(n_thread) + changes = [] + modified = sorted(write_sets[n_thread]) + for i in modified: + nval = write_sets[n_thread][i] + changes.append('lp%d=%r' % (i, nval)) + head_state[i] = nval write_sets[n_thread].clear() read_sets[n_thread].clear() + ex.do('stm_stop_transaction() #%d %s' % (stop_count, ' '.join(changes))) + stop_count += 1 + + for t in range(N_THREADS): + if t != n_thread: + for i in modified: + if i in read_sets[t]: + need_abort.add(t) + + def _make_fun(seed): + def test_fun(self): + self.test_fixed_16_bytes_objects(seed) + test_fun.__name__ = 'test_fixed_16_bytes_objects_%d' % seed + return test_fun + + for _seed in range(5000, 5100): + _fn = _make_fun(_seed) + locals()[_fn.__name__] = _fn From noreply at buildbot.pypy.org Fri Jan 17 16:35:12 2014 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 17 Jan 2014 16:35:12 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: (fijal, rguillebert) make resume_new tests pass Message-ID: <20140117153512.133671C3085@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68722:fa61ebe921c8 Date: 2014-01-17 16:34 +0100 http://bitbucket.org/pypy/pypy/changeset/fa61ebe921c8/ Log: (fijal, rguillebert) make resume_new tests pass diff --git a/rpython/jit/codewriter/assembler.py b/rpython/jit/codewriter/assembler.py --- a/rpython/jit/codewriter/assembler.py +++ b/rpython/jit/codewriter/assembler.py @@ -170,6 +170,7 @@ elif isinstance(x, AbstractDescr): if x not in self._descr_dict: self._descr_dict[x] = len(self.descrs) + x.global_descr_index = len(self.descrs) self.descrs.append(x) if isinstance(x, SwitchDictDescr): self.switchdictdescrs.append(x) diff --git a/rpython/jit/resume/backend.py b/rpython/jit/resume/backend.py --- a/rpython/jit/resume/backend.py +++ b/rpython/jit/resume/backend.py @@ -2,7 +2,7 @@ from rpython.jit.metainterp.resoperation import rop, ResOperation from rpython.jit.metainterp.history import ConstInt, Box, Const from rpython.jit.resume.rescode import ResumeBytecodeBuilder, TAGBOX,\ - ResumeBytecode + ResumeBytecode, TAGVIRTUAL # if op.getopnum() == rop.ENTER_FRAME: # descr = op.getdescr() @@ -111,34 +111,40 @@ if box not in self.current_attachment: self.current_attachment[box] = loc_pos + def get_box_pos(self, box): + if box in self.virtuals: + return TAGVIRTUAL | (self.virtuals[box] << 2) + if isinstance(box, Const): + return self.builder.encode_const(box) + try: + loc = self.regalloc.loc(box, must_exist=True).get_jitframe_position() + pos = self.builder.encode(TAGBOX, loc) + self.current_attachment[box] = pos + return pos + except KeyError: + raise + def process(self, op): if op.getopnum() == rop.ENTER_FRAME: self.builder.enter_frame(op.getarg(0).getint(), op.getdescr()) elif op.getopnum() == rop.RESUME_PUT: frame_pos = op.getarg(1).getint() pos_in_frame = op.getarg(2).getint() - box = op.getarg(0) - if box in self.virtuals: - xxx - if isinstance(box, Const): - pos = self.builder.encode_const(box) - self.builder.resume_put(pos, frame_pos, pos_in_frame) - return - try: - loc = self.regalloc.loc(box, must_exist=True).get_jitframe_position() - pos = self.builder.encode(TAGBOX, loc) - self.builder.resume_put(pos, frame_pos, pos_in_frame) - except KeyError: - xxx - self.current_attachment[box] = pos - self.frontend_pos[box] = (frame_pos, pos_in_frame) + pos = self.get_box_pos(op.getarg(0)) + self.builder.resume_put(pos, frame_pos, pos_in_frame) + if pos & TAGBOX: + self.frontend_pos[op.getarg(0)] = (frame_pos, pos_in_frame) elif op.getopnum() == rop.LEAVE_FRAME: self.builder.leave_frame() elif op.getopnum() == rop.RESUME_NEW: v_pos = len(self.virtuals) self.virtuals[op.result] = v_pos - XXX self.builder.resume_new(v_pos, op.getdescr()) + elif op.getopnum() == rop.RESUME_SETFIELD_GC: + structpos = self.get_box_pos(op.getarg(0)) + fieldpos = self.get_box_pos(op.getarg(1)) + descr = op.getdescr() + self.builder.resume_setfield_gc(structpos, fieldpos, descr) else: xxx return diff --git a/rpython/jit/resume/reader.py b/rpython/jit/resume/reader.py --- a/rpython/jit/resume/reader.py +++ b/rpython/jit/resume/reader.py @@ -71,6 +71,7 @@ xxx def resume_new(self, box, descr): + xxx # XXX make it a list v = Virtual(len(self.virtual_list), descr) self.virtuals[box] = v @@ -78,6 +79,7 @@ def resume_setfield_gc(self, box, fieldbox, descr): # XXX optimize fields + xxx self.virtuals[box].fields[descr] = self.encode(fieldbox) def resume_clear(self, frame_no, frontend_position): @@ -121,6 +123,18 @@ pos_in_frame = self.read(pos + 4) self.resume_put(encoded, frame_pos, pos_in_frame) pos += 5 + elif op == rescode.RESUME_NEW: + tag, v_pos = self.decode(self.read_short(pos + 1)) + assert tag == rescode.TAGVIRTUAL + descr = self.staticdata.opcode_descrs[self.read_short(pos + 3)] + self.resume_new(v_pos, descr) + pos += 5 + elif op == rescode.RESUME_SETFIELD_GC: + structpos = self.read_short(pos + 1) + fieldpos = self.read_short(pos + 3) + descr = self.staticdata.opcode_descrs[self.read_short(pos + 5)] + self.resume_setfield_gc(structpos, fieldpos, descr) + pos += 7 else: xxx self.bytecode = None @@ -141,5 +155,14 @@ self.l.append("resume_put (%d, %d) %d %d" % (tag, index, frame_pos, pos_in_frame)) + def resume_new(self, v_pos, descr): + self.l.append("%d = resume_new %d" % (v_pos, descr.global_descr_index)) + + def resume_setfield_gc(self, structpos, fieldpos, descr): + stag, sindex = self.decode(structpos) + ftag, findex = self.decode(fieldpos) + self.l.append("resume_setfield_gc (%d, %d) (%d, %d) %d" % ( + stag, sindex, ftag, findex, descr.global_descr_index)) + def finish(self): return "\n".join(self.l) diff --git a/rpython/jit/resume/rescode.py b/rpython/jit/resume/rescode.py --- a/rpython/jit/resume/rescode.py +++ b/rpython/jit/resume/rescode.py @@ -1,7 +1,8 @@ from rpython.jit.metainterp.history import ConstInt -UNUSED, ENTER_FRAME, LEAVE_FRAME, RESUME_PUT = range(4) +(UNUSED, ENTER_FRAME, LEAVE_FRAME, RESUME_PUT, + RESUME_NEW, RESUME_SETFIELD_GC) = range(6) TAGCONST = 0x0 TAGVIRTUAL = 0x2 @@ -64,3 +65,14 @@ self.write_short(pos) self.write(frame_pos) self.write(pos_in_frame) + + def resume_new(self, v_pos, descr): + self.write(RESUME_NEW) + self.write_short(self.encode(TAGVIRTUAL, v_pos)) + self.write_short(descr.global_descr_index) + + def resume_setfield_gc(self, structpos, fieldpos, descr): + self.write(RESUME_SETFIELD_GC) + self.write_short(structpos) + self.write_short(fieldpos) + self.write_short(descr.global_descr_index) diff --git a/rpython/jit/resume/test/test_backend.py b/rpython/jit/resume/test/test_backend.py --- a/rpython/jit/resume/test/test_backend.py +++ b/rpython/jit/resume/test/test_backend.py @@ -20,8 +20,9 @@ return 'MockJitCode(%d)' % self.no class MockStaticData(object): - def __init__(self, *jitcodes): - self.alljitcodes = list(jitcodes) + def __init__(self, jitcodes, descrs): + self.alljitcodes = jitcodes + self.opcode_descrs = descrs def preparse(inp): return "\n".join([s.strip() for s in inp.split("\n") if s.strip()]) @@ -46,7 +47,7 @@ looptoken) descr = loop.operations[3].getdescr() assert descr.rd_bytecode_position == 15 - staticdata = MockStaticData(None, jitcode) + staticdata = MockStaticData([None, jitcode], []) res = descr.rd_resume_bytecode.dump(staticdata, descr.rd_bytecode_position) expected_resume = preparse(""" @@ -62,7 +63,9 @@ jitcode.setup(num_regs_i=1, num_regs_r=0, num_regs_f=0) S = lltype.GcStruct('S', ('field', lltype.Signed)) structdescr = self.cpu.sizeof(S) + structdescr.global_descr_index = 0 fielddescr = self.cpu.fielddescrof(S, 'field') + fielddescr.global_descr_index = 1 namespace = {'jitcode':jitcode, 'structdescr':structdescr, 'fielddescr':fielddescr} loop = parse(""" @@ -79,18 +82,17 @@ looptoken = JitCellToken() self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) - xxx + staticdata = MockStaticData([None, jitcode], [structdescr, fielddescr]) + descr = loop.operations[5].getdescr() + res = descr.rd_resume_bytecode.dump(staticdata, + descr.rd_bytecode_position) expected_resume = preparse(""" - enter_frame -1 frame-1 - p0 = resume_new(descr=structdescr) - resume_setfield_gc(p0, i0, descr=fielddescr) - resume_put(p0, 0, 0) - leave_frame() - """, namespace=namespace) - descr = loop.operations[-3].getdescr() - assert descr.rd_bytecode_position == 4 - equaloplists(descr.rd_resume_bytecode.opcodes, - expected_resume.operations) + enter_frame -1 name + 0 = resume_new 0 + resume_setfield_gc (2, 0) (3, 28) 1 + resume_put (2, 0) 0 0 + """) + assert res == expected_resume def test_spill(self): jitcode = JitCode("name") From noreply at buildbot.pypy.org Fri Jan 17 16:49:48 2014 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 17 Jan 2014 16:49:48 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: (fijal, rguillebert) fix test_spill, reintroduce liveness analyser Message-ID: <20140117154948.6DB591C0291@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68723:81bacd53e1b5 Date: 2014-01-17 16:49 +0100 http://bitbucket.org/pypy/pypy/changeset/81bacd53e1b5/ Log: (fijal, rguillebert) fix test_spill, reintroduce liveness analyser diff --git a/rpython/jit/resume/backend.py b/rpython/jit/resume/backend.py --- a/rpython/jit/resume/backend.py +++ b/rpython/jit/resume/backend.py @@ -3,31 +3,8 @@ from rpython.jit.metainterp.history import ConstInt, Box, Const from rpython.jit.resume.rescode import ResumeBytecodeBuilder, TAGBOX,\ ResumeBytecode, TAGVIRTUAL +from rpython.jit.codewriter.jitcode import JitCode - # if op.getopnum() == rop.ENTER_FRAME: - # descr = op.getdescr() - # assert isinstance(descr, JitCode) - # self.enter_frame(op.getarg(0).getint(), descr) - # elif op.getopnum() == rop.LEAVE_FRAME: - # self.leave_frame() - # elif op.getopnum() == rop.RESUME_PUT: - # self.resume_put(op.getarg(0), op.getarg(1).getint(), - # op.getarg(2).getint()) - # elif op.getopnum() == rop.RESUME_NEW: - # self.resume_new(op.result, op.getdescr()) - # elif op.getopnum() == rop.RESUME_SETFIELD_GC: - # self.resume_setfield_gc(op.getarg(0), op.getarg(1), - # op.getdescr()) - # elif op.getopnum() == rop.RESUME_SET_PC: - # self.resume_set_pc(op.getarg(0).getint()) - # elif op.getopnum() == rop.RESUME_CLEAR: - # self.resume_clear(op.getarg(0).getint(), - # op.getarg(1).getint()) - # elif not op.is_resume(): - # pos += 1 - # continue - # else: - # xxx class LivenessAnalyzer(object): def __init__(self, inputframes=None): @@ -61,8 +38,37 @@ def resume_set_pc(self, pc): pass - def interpret_until(self, *args): - pass + def interpret_until(self, ops, until, pos=0): + while pos < until: + op = ops[pos] + if not op.is_resume(): + pos += 1 + continue + if op.getopnum() == rop.ENTER_FRAME: + descr = op.getdescr() + assert isinstance(descr, JitCode) + self.enter_frame(op.getarg(0).getint(), descr) + elif op.getopnum() == rop.LEAVE_FRAME: + self.leave_frame() + elif op.getopnum() == rop.RESUME_PUT: + self.resume_put(op.getarg(0), op.getarg(1).getint(), + op.getarg(2).getint()) + elif op.getopnum() == rop.RESUME_NEW: + self.resume_new(op.result, op.getdescr()) + elif op.getopnum() == rop.RESUME_SETFIELD_GC: + self.resume_setfield_gc(op.getarg(0), op.getarg(1), + op.getdescr()) + elif op.getopnum() == rop.RESUME_SET_PC: + self.resume_set_pc(op.getarg(0).getint()) + elif op.getopnum() == rop.RESUME_CLEAR: + self.resume_clear(op.getarg(0).getint(), + op.getarg(1).getint()) + elif not op.is_resume(): + pos += 1 + continue + else: + xxx + pos += 1 def _track(self, allboxes, box): if box in self.deps: @@ -187,11 +193,8 @@ return pos = self.builder.encode(TAGBOX, pos) if self.current_attachment[v] != pos: - frame_index, frame_pos = self.frontend_pos[v] - xxx - self.newops.append(ResOperation(rop.RESUME_PUT, [ - ConstInt(pos), frame_index, frame_pos], - None)) + frame_index, pos_in_frame = self.frontend_pos[v] + self.builder.resume_put(pos, frame_index, pos_in_frame) self.current_attachment[v] = pos def mark_resumable_position(self): diff --git a/rpython/jit/resume/test/test_backend.py b/rpython/jit/resume/test/test_backend.py --- a/rpython/jit/resume/test/test_backend.py +++ b/rpython/jit/resume/test/test_backend.py @@ -8,10 +8,10 @@ from rpython.rtyper.lltypesystem import lltype class MockJitCode(JitCode): - def __init__(self, no): + def __init__(self, no, index): self.no = no - self.global_index = no - self.name = 'frame-%d' % no + self.global_index = index + self.name = 'frame-%d' % index def num_regs(self): return self.no @@ -33,7 +33,7 @@ self.cpu.setup_once() def test_simple(self): - jitcode = MockJitCode(1) + jitcode = MockJitCode(3, 1) loop = parse(""" [i0] enter_frame(-1, descr=jitcode) @@ -97,6 +97,7 @@ def test_spill(self): jitcode = JitCode("name") jitcode.setup(num_regs_i=2, num_regs_r=0, num_regs_f=0) + jitcode.global_index = 0 faildescr1 = BasicFailDescr(1) faildescr2 = BasicFailDescr(2) loop = parse(""" @@ -114,19 +115,19 @@ self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) - expected_resume = parse(""" - [i2] - enter_frame(-1, descr=jitcode) - resume_put(1, 0, 1) - resume_put(29, 0, 1) - leave_frame() - """, namespace={'jitcode':jitcode}) + staticdata = MockStaticData([jitcode], []) + expected_resume = preparse(""" + enter_frame -1 name + resume_put (3, 1) 0 1 + resume_put (3, 29) 0 1 + """) descr1 = loop.operations[3].getdescr() descr2 = loop.operations[5].getdescr() - assert descr1.rd_bytecode_position == 2 - assert descr2.rd_bytecode_position == 3 - equaloplists(descr1.rd_resume_bytecode.opcodes, - expected_resume.operations) + assert descr1.rd_bytecode_position == 10 + assert descr2.rd_bytecode_position == 15 + res = descr2.rd_resume_bytecode.dump(staticdata, + descr2.rd_bytecode_position) + assert res == expected_resume def test_bridge(self): jitcode = JitCode("name") From noreply at buildbot.pypy.org Fri Jan 17 17:32:23 2014 From: noreply at buildbot.pypy.org (jerith) Date: Fri, 17 Jan 2014 17:32:23 +0100 (CET) Subject: [pypy-commit] pypy remove-del-from-generatoriterator: close branch for merging Message-ID: <20140117163223.259641C0459@cobra.cs.uni-duesseldorf.de> Author: Jeremy Thurgood Branch: remove-del-from-generatoriterator Changeset: r68724:157cf1b78b9c Date: 2014-01-17 18:29 +0200 http://bitbucket.org/pypy/pypy/changeset/157cf1b78b9c/ Log: close branch for merging From noreply at buildbot.pypy.org Fri Jan 17 17:32:24 2014 From: noreply at buildbot.pypy.org (jerith) Date: Fri, 17 Jan 2014 17:32:24 +0100 (CET) Subject: [pypy-commit] pypy default: merge remove-del-from-generatoriterator branch Message-ID: <20140117163224.6E5261C0459@cobra.cs.uni-duesseldorf.de> Author: Jeremy Thurgood Branch: Changeset: r68725:3f7024813427 Date: 2014-01-17 18:31 +0200 http://bitbucket.org/pypy/pypy/changeset/3f7024813427/ Log: merge remove-del-from-generatoriterator branch diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py --- a/pypy/interpreter/astcompiler/codegen.py +++ b/pypy/interpreter/astcompiler/codegen.py @@ -1234,6 +1234,8 @@ flags |= consts.CO_NESTED if scope.is_generator: flags |= consts.CO_GENERATOR + if scope.has_yield_inside_try: + flags |= consts.CO_YIELD_INSIDE_TRY if scope.has_variable_arg: flags |= consts.CO_VARARGS if scope.has_keywords_arg: diff --git a/pypy/interpreter/astcompiler/consts.py b/pypy/interpreter/astcompiler/consts.py --- a/pypy/interpreter/astcompiler/consts.py +++ b/pypy/interpreter/astcompiler/consts.py @@ -17,6 +17,7 @@ CO_FUTURE_UNICODE_LITERALS = 0x20000 #pypy specific: CO_KILL_DOCSTRING = 0x100000 +CO_YIELD_INSIDE_TRY = 0x200000 PyCF_SOURCE_IS_UTF8 = 0x0100 PyCF_DONT_IMPLY_DEDENT = 0x0200 diff --git a/pypy/interpreter/astcompiler/symtable.py b/pypy/interpreter/astcompiler/symtable.py --- a/pypy/interpreter/astcompiler/symtable.py +++ b/pypy/interpreter/astcompiler/symtable.py @@ -43,6 +43,7 @@ self.child_has_free = False self.nested = False self.doc_removable = False + self._in_try_body_depth = 0 def lookup(self, name): """Find the scope of identifier 'name'.""" @@ -75,6 +76,14 @@ self.varnames.append(mangled) return mangled + def note_try_start(self, try_node): + """Called when a try is found, before visiting the body.""" + self._in_try_body_depth += 1 + + def note_try_end(self, try_node): + """Called after visiting a try body.""" + self._in_try_body_depth -= 1 + def note_yield(self, yield_node): """Called when a yield is found.""" raise SyntaxError("'yield' outside function", yield_node.lineno, @@ -210,6 +219,7 @@ self.has_variable_arg = False self.has_keywords_arg = False self.is_generator = False + self.has_yield_inside_try = False self.optimized = True self.return_with_value = False self.import_star = None @@ -220,6 +230,8 @@ raise SyntaxError("'return' with argument inside generator", self.ret.lineno, self.ret.col_offset) self.is_generator = True + if self._in_try_body_depth > 0: + self.has_yield_inside_try = True def note_return(self, ret): if ret.value: @@ -463,7 +475,12 @@ self.scope.new_temporary_name() if wih.optional_vars: self.scope.new_temporary_name() - ast.GenericASTVisitor.visit_With(self, wih) + wih.context_expr.walkabout(self) + if wih.optional_vars: + wih.optional_vars.walkabout(self) + self.scope.note_try_start(wih) + self.visit_sequence(wih.body) + self.scope.note_try_end(wih) def visit_arguments(self, arguments): scope = self.scope @@ -505,3 +522,16 @@ else: role = SYM_ASSIGNED self.note_symbol(name.id, role) + + def visit_TryExcept(self, node): + self.scope.note_try_start(node) + self.visit_sequence(node.body) + self.scope.note_try_end(node) + self.visit_sequence(node.handlers) + self.visit_sequence(node.orelse) + + def visit_TryFinally(self, node): + self.scope.note_try_start(node) + self.visit_sequence(node.body) + self.scope.note_try_end(node) + self.visit_sequence(node.finalbody) diff --git a/pypy/interpreter/astcompiler/test/test_symtable.py b/pypy/interpreter/astcompiler/test/test_symtable.py --- a/pypy/interpreter/astcompiler/test/test_symtable.py +++ b/pypy/interpreter/astcompiler/test/test_symtable.py @@ -346,6 +346,23 @@ assert exc.msg == "'return' with argument inside generator" scp = self.func_scope("def f():\n return\n yield x") + def test_yield_inside_try(self): + scp = self.func_scope("def f(): yield x") + assert not scp.has_yield_inside_try + scp = self.func_scope("def f():\n try:\n yield x\n except: pass") + assert scp.has_yield_inside_try + scp = self.func_scope("def f():\n try:\n yield x\n finally: pass") + assert scp.has_yield_inside_try + scp = self.func_scope("def f():\n with x: yield y") + assert scp.has_yield_inside_try + + def test_yield_outside_try(self): + for input in ("try: pass\n except: pass", + "try: pass\n finally: pass", + "with x: pass"): + input = "def f():\n yield y\n %s\n yield y" % (input,) + assert not self.func_scope(input).has_yield_inside_try + def test_return(self): for input in ("class x: return", "return"): exc = py.test.raises(SyntaxError, self.func_scope, input).value diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -155,20 +155,6 @@ code_name = self.pycode.co_name return space.wrap(code_name) - def __del__(self): - # Only bother enqueuing self to raise an exception if the frame is - # still not finished and finally or except blocks are present. - self.clear_all_weakrefs() - if self.frame is not None: - block = self.frame.lastblock - while block is not None: - if not isinstance(block, LoopBlock): - self.enqueue_for_destruction(self.space, - GeneratorIterator.descr_close, - "interrupting generator of ") - break - block = block.previous - # Results can be either an RPython list of W_Root, or it can be an # app-level W_ListObject, which also has an append() method, that's why we # generate 2 versions of the function and 2 jit drivers. @@ -211,3 +197,20 @@ return unpack_into unpack_into = _create_unpack_into() unpack_into_w = _create_unpack_into() + + +class GeneratorIteratorWithDel(GeneratorIterator): + + def __del__(self): + # Only bother enqueuing self to raise an exception if the frame is + # still not finished and finally or except blocks are present. + self.clear_all_weakrefs() + if self.frame is not None: + block = self.frame.lastblock + while block is not None: + if not isinstance(block, LoopBlock): + self.enqueue_for_destruction(self.space, + GeneratorIterator.descr_close, + "interrupting generator of ") + break + block = block.previous diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -12,7 +12,7 @@ from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter.astcompiler.consts import ( CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS, CO_NESTED, - CO_GENERATOR, CO_KILL_DOCSTRING) + CO_GENERATOR, CO_KILL_DOCSTRING, CO_YIELD_INSIDE_TRY) from pypy.tool.stdlib_opcode import opcodedesc, HAVE_ARGUMENT from rpython.rlib.rarithmetic import intmask from rpython.rlib.objectmodel import compute_hash diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -167,8 +167,12 @@ def run(self): """Start this frame's execution.""" if self.getcode().co_flags & pycode.CO_GENERATOR: - from pypy.interpreter.generator import GeneratorIterator - return self.space.wrap(GeneratorIterator(self)) + if pycode.CO_YIELD_INSIDE_TRY: + from pypy.interpreter.generator import GeneratorIteratorWithDel + return self.space.wrap(GeneratorIteratorWithDel(self)) + else: + from pypy.interpreter.generator import GeneratorIterator + return self.space.wrap(GeneratorIterator(self)) else: return self.execute_frame() diff --git a/pypy/module/_pickle_support/maker.py b/pypy/module/_pickle_support/maker.py --- a/pypy/module/_pickle_support/maker.py +++ b/pypy/module/_pickle_support/maker.py @@ -5,7 +5,7 @@ from pypy.interpreter.module import Module from pypy.interpreter.pyframe import PyFrame from pypy.interpreter.pytraceback import PyTraceback -from pypy.interpreter.generator import GeneratorIterator +from pypy.interpreter.generator import GeneratorIteratorWithDel from rpython.rlib.objectmodel import instantiate from pypy.interpreter.gateway import unwrap_spec from pypy.objspace.std.iterobject import W_SeqIterObject, W_ReverseSeqIterObject @@ -60,7 +60,7 @@ return space.wrap(tb) def generator_new(space): - new_generator = instantiate(GeneratorIterator) + new_generator = instantiate(GeneratorIteratorWithDel) return space.wrap(new_generator) @unwrap_spec(current=int, remaining=int, step=int) From noreply at buildbot.pypy.org Fri Jan 17 17:33:32 2014 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 17 Jan 2014 17:33:32 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: (fijal, rguillebert) finish the test_backend Message-ID: <20140117163332.BD9071C0459@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68726:e1f1d329aa41 Date: 2014-01-17 17:32 +0100 http://bitbucket.org/pypy/pypy/changeset/e1f1d329aa41/ Log: (fijal, rguillebert) finish the test_backend diff --git a/rpython/jit/resume/backend.py b/rpython/jit/resume/backend.py --- a/rpython/jit/resume/backend.py +++ b/rpython/jit/resume/backend.py @@ -111,8 +111,7 @@ else: loc_pos = inputlocs[i].get_jitframe_position() i += 1 - self.frontend_pos[box] = (ConstInt(frame_pos), - ConstInt(pos_in_frame)) + self.frontend_pos[box] = (frame_pos, pos_in_frame) all[box] = None if box not in self.current_attachment: self.current_attachment[box] = loc_pos diff --git a/rpython/jit/resume/reader.py b/rpython/jit/resume/reader.py --- a/rpython/jit/resume/reader.py +++ b/rpython/jit/resume/reader.py @@ -59,13 +59,8 @@ def decode(self, pos): return pos & 0x3, pos >> rescode.TAGOFFSET - def resume_put(self, jitframe_pos_box, frame_no, frontend_position): - XXX - if isinstance(jitframe_pos_box, Box): - jitframe_pos = self.encode_virtual(jitframe_pos_box) - else: - jitframe_pos = self.encode_box(jitframe_pos_box.getint()) - self.framestack[frame_no].registers[frontend_position] = jitframe_pos + def resume_put(self, encoded_pos, frame_no, frontend_position): + self.framestack[frame_no].registers[frontend_position] = encoded_pos def encode(self, box): xxx @@ -83,12 +78,9 @@ self.virtuals[box].fields[descr] = self.encode(fieldbox) def resume_clear(self, frame_no, frontend_position): + xxx self.framestack[frame_no].registers[frontend_position] = -1 - def resume_put_const(self, const, frame_no, frontend_position): - pos = self.encode_const(const) - self.framestack[frame_no].registers[frontend_position] = pos - def resume_set_pc(self, pc): self.framestack[-1].pc = pc @@ -98,7 +90,7 @@ def _rebuild_until(self, rb, position): if rb.parent is not None: self._rebuild_until(rb.parent, rb.parent_position) - self.interpret_until(rb.opcodes, position) + self.interpret_until(rb, position) def read(self, pos): return ord(self.bytecode.opcodes[pos]) diff --git a/rpython/jit/resume/test/test_backend.py b/rpython/jit/resume/test/test_backend.py --- a/rpython/jit/resume/test/test_backend.py +++ b/rpython/jit/resume/test/test_backend.py @@ -131,6 +131,7 @@ def test_bridge(self): jitcode = JitCode("name") + jitcode.global_index = 0 jitcode.setup(num_regs_i=1, num_regs_r=0, num_regs_f=0) loop = parse(""" [i0] @@ -152,14 +153,12 @@ force_spill(i0) guard_false(i0) """) - locs = rebuild_locs_from_resumedata(descr) + staticdata = MockStaticData([jitcode], []) + locs = rebuild_locs_from_resumedata(descr, staticdata) self.cpu.compile_bridge(None, descr, [bridge.inputargs], locs, bridge.operations, looptoken) descr = bridge.operations[-1].getdescr() - expected_resume = parse(""" - [] - resume_put(28, 0, 0) - """) - equaloplists(descr.rd_resume_bytecode.opcodes, - expected_resume.operations) + res = descr.rd_resume_bytecode.dump(staticdata, + descr.rd_bytecode_position) + assert res == "resume_put (3, 28) 0 0" diff --git a/rpython/jit/resume/test/test_frontend.py b/rpython/jit/resume/test/test_frontend.py --- a/rpython/jit/resume/test/test_frontend.py +++ b/rpython/jit/resume/test/test_frontend.py @@ -3,7 +3,7 @@ from rpython.jit.codewriter.jitcode import JitCode from rpython.jit.metainterp.history import AbstractDescr, Const, INT, Stats from rpython.jit.resume.frontend import rebuild_from_resumedata -from rpython.jit.resume.rescode import ResumeBytecode +from rpython.jit.resume.rescode import ResumeBytecode, TAGBOX from rpython.jit.resume.reader import AbstractResumeReader from rpython.jit.metainterp.resoperation import rop from rpython.jit.codewriter.format import unformat_assembler @@ -66,11 +66,16 @@ return index + 3 class RebuildingResumeReader(AbstractResumeReader): + def unpack(self, r): + tag, index = self.decode(r) + assert tag == TAGBOX + return index + def finish(self): - return [f.registers for f in self.framestack] + return [[self.unpack(r) for r in f.registers] for f in self.framestack] -def rebuild_locs_from_resumedata(faildescr): - return RebuildingResumeReader().rebuild(faildescr) +def rebuild_locs_from_resumedata(faildescr, staticdata): + return RebuildingResumeReader(staticdata).rebuild(faildescr) class TestResumeDirect(object): def test_box_resume_reader(self): From noreply at buildbot.pypy.org Fri Jan 17 17:40:43 2014 From: noreply at buildbot.pypy.org (jerith) Date: Fri, 17 Jan 2014 17:40:43 +0100 (CET) Subject: [pypy-commit] pypy default: add two more tests for yield-inside-try edge cases Message-ID: <20140117164043.AEA7D1C0291@cobra.cs.uni-duesseldorf.de> Author: Jeremy Thurgood Branch: Changeset: r68727:6cbefcec4ceb Date: 2014-01-17 18:39 +0200 http://bitbucket.org/pypy/pypy/changeset/6cbefcec4ceb/ Log: add two more tests for yield-inside-try edge cases diff --git a/pypy/interpreter/astcompiler/test/test_symtable.py b/pypy/interpreter/astcompiler/test/test_symtable.py --- a/pypy/interpreter/astcompiler/test/test_symtable.py +++ b/pypy/interpreter/astcompiler/test/test_symtable.py @@ -358,7 +358,9 @@ def test_yield_outside_try(self): for input in ("try: pass\n except: pass", + "try: pass\n except: yield y", "try: pass\n finally: pass", + "try: pass\n finally: yield y", "with x: pass"): input = "def f():\n yield y\n %s\n yield y" % (input,) assert not self.func_scope(input).has_yield_inside_try From noreply at buildbot.pypy.org Fri Jan 17 17:41:08 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 17 Jan 2014 17:41:08 +0100 (CET) Subject: [pypy-commit] stmgc c7: Kill old entries in test_bug Message-ID: <20140117164108.DD0371C0291@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7 Changeset: r631:f77ad69e2916 Date: 2014-01-17 17:40 +0100 http://bitbucket.org/pypy/stmgc/changeset/f77ad69e2916/ Log: Kill old entries in test_bug diff --git a/c7/test/test_bug.py b/c7/test/test_bug.py --- a/c7/test/test_bug.py +++ b/c7/test/test_bug.py @@ -4,431 +4,6 @@ class TestBug(BaseTest): - def test_bug1(self): - stm_start_transaction() - p8 = stm_allocate(16) - p8[8] = '\x08' - stm_stop_transaction(False) - # - self.switch("sub1") - self.switch("main") - stm_start_transaction() - stm_write(p8) - p8[8] = '\x97' - # - self.switch("sub1") - stm_start_transaction() - stm_read(p8) - assert p8[8] == '\x08' - - def test_bug2(self): - stm_start_transaction() - p0 = stm_allocate(16) - p1 = stm_allocate(16) - p2 = stm_allocate(16) - p3 = stm_allocate(16) - p4 = stm_allocate(16) - p5 = stm_allocate(16) - p6 = stm_allocate(16) - p7 = stm_allocate(16) - p8 = stm_allocate(16) - p9 = stm_allocate(16) - p0[8] = '\x00' - p1[8] = '\x01' - p2[8] = '\x02' - p3[8] = '\x03' - p4[8] = '\x04' - p5[8] = '\x05' - p6[8] = '\x06' - p7[8] = '\x07' - p8[8] = '\x08' - p9[8] = '\t' - stm_stop_transaction(False) - self.switch(0) - self.switch(1) - self.switch(2) - # - self.switch(1) - stm_start_transaction() - stm_read(p7) - assert p7[8] == '\x07' - # - self.switch(1) - stm_read(p0) - assert p0[8] == '\x00' - # - self.switch(1) - stm_read(p4) - assert p4[8] == '\x04' - # - self.switch(0) - stm_start_transaction() - stm_read(p3) - assert p3[8] == '\x03' - # - self.switch(2) - stm_start_transaction() - stm_read(p8) - assert p8[8] == '\x08' - stm_write(p8) - p8[8] = '\x08' - # - self.switch(0) - stm_read(p0) - assert p0[8] == '\x00' - # - self.switch(0) - stm_read(p0) - assert p0[8] == '\x00' - # - self.switch(1) - stm_read(p2) - assert p2[8] == '\x02' - # - self.switch(2) - stm_read(p2) - assert p2[8] == '\x02' - # - self.switch(2) - stm_read(p2) - assert p2[8] == '\x02' - stm_write(p2) - p2[8] = 'm' - # - self.switch(0) - stm_read(p4) - assert p4[8] == '\x04' - stm_write(p4) - p4[8] = '\xc5' - # - self.switch(2) - stm_read(p1) - assert p1[8] == '\x01' - # - self.switch(2) - stm_stop_transaction(False) #1 - # ['\x00', '\x01', 'm', '\x03', '\x04', '\x05', '\x06', '\x07', '\x08', '\t'] - # log: [8, 2] - # - self.switch(0) - stm_stop_transaction(False) #2 - # ['\x00', '\x01', 'm', '\x03', '\xc5', '\x05', '\x06', '\x07', '\x08', '\t'] - # log: [4] - # - self.switch(0) - stm_start_transaction() - stm_read(p6) - assert p6[8] == '\x06' - # - self.switch(0) - stm_read(p4) - assert p4[8] == '\xc5' - # - self.switch(0) - stm_read(p4) - assert p4[8] == '\xc5' - # - self.switch(1) - stm_read(p0) - assert p0[8] == '\x00' - # - self.switch(1) - stm_stop_transaction(True) #3 - # conflict: 0xdf0a8028 - # - self.switch(2) - stm_start_transaction() - stm_read(p6) - assert p6[8] == '\x06' - # - self.switch(1) - stm_start_transaction() - stm_read(p1) - assert p1[8] == '\x01' - # - self.switch(0) - stm_read(p4) - assert p4[8] == '\xc5' - stm_write(p4) - p4[8] = '\x0c' - # - self.switch(2) - stm_read(p2) - assert p2[8] == 'm' - stm_write(p2) - p2[8] = '\x81' - # - self.switch(2) - stm_read(p7) - assert p7[8] == '\x07' - # - self.switch(0) - stm_read(p5) - assert p5[8] == '\x05' - stm_write(p5) - p5[8] = 'Z' - # - self.switch(1) - stm_stop_transaction(False) #4 - # ['\x00', '\x01', 'm', '\x03', '\xc5', '\x05', '\x06', '\x07', '\x08', '\t'] - # log: [] - # - self.switch(2) - stm_read(p8) - assert p8[8] == '\x08' - # - self.switch(0) - stm_read(p0) - assert p0[8] == '\x00' - # - self.switch(1) - stm_start_transaction() - stm_read(p0) - assert p0[8] == '\x00' - # - self.switch(2) - stm_read(p9) - assert p9[8] == '\t' - stm_write(p9) - p9[8] = '\x81' - # - self.switch(0) - stm_read(p0) - assert p0[8] == '\x00' - # - self.switch(1) - stm_read(p2) - assert p2[8] == 'm' - # - self.switch(2) - stm_read(p9) - assert p9[8] == '\x81' - stm_write(p9) - p9[8] = 'g' - # - self.switch(1) - stm_read(p3) - assert p3[8] == '\x03' - # - self.switch(2) - stm_read(p7) - assert p7[8] == '\x07' - # - self.switch(1) - stm_read(p1) - assert p1[8] == '\x01' - # - self.switch(0) - stm_read(p2) - assert p2[8] == 'm' - stm_write(p2) - p2[8] = 'T' - # - self.switch(2) - stm_read(p4) - assert p4[8] == '\xc5' - # - self.switch(2) - stm_read(p9) - assert p9[8] == 'g' - # - self.switch(2) - stm_read(p1) - assert p1[8] == '\x01' - stm_write(p1) - p1[8] = 'L' - # - self.switch(0) - stm_read(p0) - assert p0[8] == '\x00' - # - self.switch(2) - stm_read(p0) - assert p0[8] == '\x00' - stm_write(p0) - p0[8] = '\xf3' - # - self.switch(1) - stm_stop_transaction(False) #5 - # ['\x00', '\x01', 'm', '\x03', '\xc5', '\x05', '\x06', '\x07', '\x08', '\t'] - # log: [] - # - self.switch(0) - stm_read(p1) - assert p1[8] == '\x01' - stm_write(p1) - p1[8] = '*' - # - self.switch(1) - stm_start_transaction() - stm_read(p3) - assert p3[8] == '\x03' - stm_write(p3) - p3[8] = '\xd2' - # - self.switch(0) - stm_stop_transaction(False) #6 - # ['\x00', '*', 'T', '\x03', '\x0c', 'Z', '\x06', '\x07', '\x08', '\t'] - # log: [1, 2, 4, 5] - # - self.switch(1) - stm_read(p7) - assert p7[8] == '\x07' - stm_write(p7) - p7[8] = '.' - # - self.switch(0) - stm_start_transaction() - stm_read(p7) - assert p7[8] == '\x07' - # - self.switch(1) - stm_read(p2) - assert p2[8] == 'm' - stm_write(p2) - p2[8] = '\xe9' - # - self.switch(1) - stm_read(p0) - assert p0[8] == '\x00' - # - self.switch(0) - stm_read(p1) - assert p1[8] == '*' - # - self.switch(0) - stm_read(p8) - assert p8[8] == '\x08' - stm_write(p8) - p8[8] = 'X' - # - self.switch(2) - stm_stop_transaction(True) #7 - # conflict: 0xdf0a8018 - # - self.switch(1) - stm_read(p9) - assert p9[8] == '\t' - # - self.switch(0) - stm_read(p8) - assert p8[8] == 'X' - # - self.switch(1) - stm_read(p4) - assert p4[8] == '\xc5' - stm_write(p4) - p4[8] = '\xb2' - # - self.switch(0) - stm_read(p9) - assert p9[8] == '\t' - # - self.switch(2) - stm_start_transaction() - stm_read(p5) - assert p5[8] == 'Z' - stm_write(p5) - p5[8] = '\xfa' - # - self.switch(2) - stm_read(p3) - assert p3[8] == '\x03' - # - self.switch(1) - stm_read(p9) - assert p9[8] == '\t' - # - self.switch(1) - stm_read(p8) - assert p8[8] == '\x08' - stm_write(p8) - p8[8] = 'g' - # - self.switch(1) - stm_read(p8) - assert p8[8] == 'g' - # - self.switch(2) - stm_read(p5) - assert p5[8] == '\xfa' - stm_write(p5) - p5[8] = '\x86' - # - self.switch(2) - stm_read(p6) - assert p6[8] == '\x06' - # - self.switch(1) - stm_read(p4) - assert p4[8] == '\xb2' - stm_write(p4) - p4[8] = '\xce' - # - self.switch(2) - stm_read(p2) - assert p2[8] == 'T' - stm_write(p2) - p2[8] = 'Q' - # - self.switch(1) - stm_stop_transaction(True) #8 - # conflict: 0xdf0a8028 - # - self.switch(2) - stm_stop_transaction(False) #9 - # ['\x00', '*', 'Q', '\x03', '\x0c', '\x86', '\x06', '\x07', '\x08', '\t'] - # log: [2, 5] - # - self.switch(0) - stm_read(p0) - assert p0[8] == '\x00' - # - self.switch(1) - stm_start_transaction() - stm_read(p3) - assert p3[8] == '\x03' - # - self.switch(1) - stm_read(p5) - assert p5[8] == '\x86' - # - self.switch(2) - stm_start_transaction() - stm_read(p4) - assert p4[8] == '\x0c' - stm_write(p4) - p4[8] = '{' - # - self.switch(1) - stm_read(p2) - assert p2[8] == 'Q' - # - self.switch(2) - stm_read(p3) - assert p3[8] == '\x03' - stm_write(p3) - p3[8] = 'V' - # - self.switch(1) - stm_stop_transaction(False) #10 - # ['\x00', '*', 'Q', '\x03', '\x0c', '\x86', '\x06', '\x07', '\x08', '\t'] - # log: [] - # - self.switch(1) - stm_start_transaction() - stm_read(p7) - assert p7[8] == '\x07' - # - self.switch(2) - stm_read(p0) - assert p0[8] == '\x00' - stm_write(p0) - p0[8] = 'P' - # - self.switch(0) - stm_stop_transaction(False) #11 - def test_write_marker_no_conflict(self): # initialization stm_start_transaction() From noreply at buildbot.pypy.org Fri Jan 17 17:55:21 2014 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 17 Jan 2014 17:55:21 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: (fijal, rguillebert) make backend tests pass again Message-ID: <20140117165521.DD5721C0291@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68728:d31cbab84210 Date: 2014-01-17 17:54 +0100 http://bitbucket.org/pypy/pypy/changeset/d31cbab84210/ Log: (fijal, rguillebert) make backend tests pass again diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -138,7 +138,7 @@ newdescr) if op.is_guard(): newop.failargs = resumebuilder.get_numbering(mapping, op) - newop.getdescr().rd_bytecode_position = len(resumebuilder.newops) + newop.getdescr().rd_bytecode_position = resumebuilder.builder.getpos() self.operations.append(newop) if descr is None: diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -9,7 +9,8 @@ BoxFloat, ConstFloat) from rpython.jit.metainterp.resoperation import ResOperation, rop from rpython.jit.metainterp.typesystem import deref -from rpython.jit.metainterp.test.test_resume2 import rebuild_locs_from_resumedata +from rpython.jit.resume.test.test_frontend import rebuild_locs_from_resumedata +from rpython.jit.resume.test.test_backend import MockStaticData from rpython.jit.codewriter.effectinfo import EffectInfo from rpython.jit.codewriter.jitcode import JitCode from rpython.jit.tool.oparser import parse @@ -40,6 +41,14 @@ lltype.malloc(STUFF, immortal=True)) +def get_jitcode(num_regs_i=0, num_regs_r=0, num_regs_f=0): + jitcode = JitCode('name') + jitcode.setup(num_regs_i=num_regs_i, num_regs_r=num_regs_r, + num_regs_f=num_regs_f) + jitcode.global_index = 0 + staticdata = MockStaticData([jitcode], []) + return jitcode, staticdata + class Runner(object): add_loop_instructions = ['overload for a specific cpu'] @@ -163,6 +172,7 @@ looptoken = JitCellToken() targettoken = TargetToken() jitcode = JitCode("name") + jitcode.global_index = 0 jitcode.setup(num_regs_i=1, num_regs_r=0, num_regs_f=0) operations = [ ResOperation(rop.ENTER_FRAME, [ConstInt(-1)], None, descr=jitcode), @@ -221,6 +231,7 @@ looptoken = JitCellToken() targettoken = TargetToken() jitcode = JitCode("name") + jitcode.global_index = 0 jitcode.setup(num_regs_i=1, num_regs_r=0, num_regs_f=0) operations = [ ResOperation(rop.LABEL, [i0], None, descr=targettoken), @@ -245,13 +256,14 @@ ResOperation(rop.JUMP, [i1b], None, descr=targettoken), ] - locs = rebuild_locs_from_resumedata(faildescr1) + staticdata = MockStaticData([jitcode], []) + locs = rebuild_locs_from_resumedata(faildescr1, staticdata) self.cpu.compile_bridge(None, faildescr1, [[i1b]], locs, bridge, looptoken) deadframe = self.cpu.execute_token(looptoken, 2) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 2 - locs = rebuild_locs_from_resumedata(fail) + locs = rebuild_locs_from_resumedata(fail, staticdata) res = self.cpu.get_int_value(deadframe, 0) assert res == 20 @@ -268,8 +280,7 @@ faildescr2 = BasicFailDescr(2) looptoken = JitCellToken() targettoken = TargetToken() - jitcode = JitCode('name') - jitcode.setup(num_regs_i=3, num_regs_r=0, num_regs_f=0) + jitcode, staticdata = get_jitcode(3) operations = [ ResOperation(rop.ENTER_FRAME, [ConstInt(-1)], None, descr=jitcode), ResOperation(rop.INT_SUB, [i3, ConstInt(42)], i0), @@ -286,6 +297,7 @@ i1b = BoxInt() i3 = BoxInt() + staticdata = MockStaticData([jitcode], []) bridge = [ ResOperation(rop.INT_LE, [i1b, ConstInt(19)], i3), ResOperation(rop.RESUME_PUT, [i3, ConstInt(0), ConstInt(2)], @@ -294,20 +306,19 @@ ResOperation(rop.JUMP, [i1b], None, descr=targettoken), ] - locs = rebuild_locs_from_resumedata(faildescr1) + locs = rebuild_locs_from_resumedata(faildescr1, staticdata) self.cpu.compile_bridge(None, faildescr1, [[None, i1b, None]], locs, bridge, looptoken) deadframe = self.cpu.execute_token(looptoken, 2) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 2 - locs = rebuild_locs_from_resumedata(fail) + locs = rebuild_locs_from_resumedata(fail, staticdata) res = self.cpu.get_int_value(deadframe, locs[0][1]) assert res == 20 def test_compile_big_bridge_out_of_small_loop(self): - jitcode = JitCode("name") - jitcode.setup(num_regs_i=1, num_regs_r=0, num_regs_f=0) + jitcode, staticdata = get_jitcode(1) i0 = BoxInt() faildescr1 = BasicFailDescr(1) looptoken = JitCellToken() @@ -321,7 +332,9 @@ inputargs = [i0] self.cpu.compile_loop(None, inputargs, operations, looptoken) jitcode1 = JitCode("name1") + jitcode1.global_index = 1 jitcode1.setup(num_regs_i=150, num_regs_r=0, num_regs_f=0) + staticdata.alljitcodes.append(jitcode1) i1list = [BoxInt() for i in range(150)] bridge = [ ResOperation(rop.ENTER_FRAME, [ConstInt(13)], None, descr=jitcode1) @@ -337,12 +350,12 @@ bridge.append(ResOperation(rop.FINISH, [], None, descr=BasicFinalDescr(4))) - faillocs = rebuild_locs_from_resumedata(faildescr1) + faillocs = rebuild_locs_from_resumedata(faildescr1, staticdata) self.cpu.compile_bridge(None, faildescr1, [[i0]], faillocs, bridge, looptoken) deadframe = self.cpu.execute_token(looptoken, 1) fail = self.cpu.get_latest_descr(deadframe) - locs = rebuild_locs_from_resumedata(fail) + locs = rebuild_locs_from_resumedata(fail, staticdata) assert fail.identifier == 3 for i in range(len(i1list)): res = self.cpu.get_int_value(deadframe, locs[1][i]) @@ -422,8 +435,7 @@ z = BoxInt(579) t = BoxInt(455) u = BoxInt(0) # False - jitcode = JitCode('jitcode') - jitcode.setup(num_regs_i=2, num_regs_r=0, num_regs_f=0) + jitcode, staticdata = get_jitcode(2) looptoken = JitCellToken() targettoken = TargetToken() operations = [ @@ -442,7 +454,7 @@ cpu.compile_loop(None, [x, y], operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 0, 10) fail = self.cpu.get_latest_descr(deadframe) - locs = rebuild_locs_from_resumedata(fail) + locs = rebuild_locs_from_resumedata(fail, staticdata) assert self.cpu.get_int_value(deadframe, locs[0][0]) == 0 assert self.cpu.get_int_value(deadframe, locs[0][1]) == 55 @@ -480,8 +492,7 @@ v2 = BoxInt(testcases[0][1]) v_res = BoxInt() # - jitcode = JitCode('jitcode') - jitcode.setup(num_regs_i=1, num_regs_f=0, num_regs_r=0) + jitcode, staticdata = get_jitcode(1) if not reversed: ops = [ ResOperation(rop.ENTER_FRAME, [ConstInt(-1)], None, @@ -514,7 +525,7 @@ deadframe = self.cpu.execute_token(looptoken, x, y) fail = self.cpu.get_latest_descr(deadframe) if (z == boom) ^ reversed: - locs = rebuild_locs_from_resumedata(fail) + locs = rebuild_locs_from_resumedata(fail, staticdata) pos = locs[0][0] assert fail.identifier == 1 else: @@ -1257,9 +1268,7 @@ retvalues.insert(kk, y) # zero = BoxInt() - jitcode = JitCode("name") - jitcode.setup(num_regs_i=intboxes, num_regs_r=0, - num_regs_f=floatboxes) + jitcode, staticdata = get_jitcode(intboxes, 0, floatboxes) operations.extend([ ResOperation(rop.ENTER_FRAME, [ConstInt(-1)], None, descr=jitcode), @@ -1290,7 +1299,7 @@ # deadframe = self.cpu.execute_token(looptoken, *values) fail = self.cpu.get_latest_descr(deadframe) - locs = rebuild_locs_from_resumedata(fail) + locs = rebuild_locs_from_resumedata(fail, staticdata) assert fail.identifier == 42 # for k in range(intboxes): @@ -1352,9 +1361,8 @@ looptoken = JitCellToken() targettoken = TargetToken() faildescr = BasicFailDescr(15) - jitcode = JitCode("jitcode") - jitcode.setup(num_regs_i=len(intargs), num_regs_r=len(refargs), - num_regs_f=len(floatargs)) + jitcode, staticdata = get_jitcode(len(intargs), len(refargs), + len(floatargs)) operations = [ ResOperation(rop.ENTER_FRAME, [ConstInt(-1)], None, descr=jitcode), @@ -1413,7 +1421,7 @@ # assert dstvalues[index_counter] == 11 dstvalues[index_counter] = 0 - locs = rebuild_locs_from_resumedata(fail) + locs = rebuild_locs_from_resumedata(fail, staticdata) intvals = [] refvals = [] floatvals = [] @@ -1444,8 +1452,7 @@ faildescr1 = BasicFailDescr(1) faildescr2 = BasicFailDescr(2) faildescr3 = BasicFinalDescr(3) - jitcode = JitCode("jitcode") - jitcode.setup(num_regs_i=0, num_regs_r=0, num_regs_f=12) + jitcode, staticdata = get_jitcode(0, 0, 12) operations = [ ResOperation(rop.ENTER_FRAME, [ConstInt(-1)], None, descr=jitcode), ResOperation(rop.LABEL, fboxes, None, descr=targettoken), @@ -1472,7 +1479,7 @@ ] self.cpu.compile_bridge(None, faildescr1, [fboxes2], - rebuild_locs_from_resumedata(faildescr1), + rebuild_locs_from_resumedata(faildescr1, staticdata), bridge, looptoken) args = [] @@ -1482,7 +1489,7 @@ deadframe = self.cpu.execute_token(looptoken, *args) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 2 - locs = rebuild_locs_from_resumedata(fail) + locs = rebuild_locs_from_resumedata(fail, staticdata) res = self.cpu.get_float_value(deadframe, locs[0][0]) assert longlong.getrealfloat(res) == 8.5 for i in range(1, len(fboxes)): @@ -1495,8 +1502,7 @@ py.test.skip("requires floats") fboxes = [BoxFloat() for i in range(3)] faildescr1 = BasicFailDescr(100) - jitcode = JitCode("jitcode") - jitcode.setup(num_regs_i=0, num_regs_r=0, num_regs_f=3) + jitcode, staticdata = get_jitcode(0, 0, 3) loopops = """ [i0, f1, f2] enter_frame(-1, descr=jitcode) @@ -1519,7 +1525,7 @@ deadframe = self.cpu.execute_token(looptoken, *args) #xxx check fail = self.cpu.get_latest_descr(deadframe) assert loop.operations[-3].getdescr() is fail is faildescr1 - locs = rebuild_locs_from_resumedata(fail) + locs = rebuild_locs_from_resumedata(fail, staticdata) f1 = self.cpu.get_float_value(deadframe, locs[0][0]) f2 = self.cpu.get_float_value(deadframe, locs[0][1]) f3 = self.cpu.get_float_value(deadframe, locs[0][2]) @@ -1544,7 +1550,7 @@ deadframe = self.cpu.execute_token(looptoken, *args) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 103 - locs = rebuild_locs_from_resumedata(fail) + locs = rebuild_locs_from_resumedata(fail, staticdata) f1 = self.cpu.get_float_value(deadframe, locs[0][0]) f2 = self.cpu.get_float_value(deadframe, locs[0][1]) f3 = self.cpu.get_float_value(deadframe, locs[0][2]) @@ -2146,8 +2152,7 @@ if i: raise LLException(exc_tp, exc_ptr) - jitcode = JitCode("name") - jitcode.setup(num_regs_i=1, num_regs_r=0, num_regs_f=0) + jitcode, staticdata = get_jitcode(1, 0, 0) ops = ''' [i0] enter_frame(-1, descr=jitcode) @@ -2181,7 +2186,7 @@ excvalue = self.cpu.grab_exc_value(deadframe) assert not excvalue deadframe = self.cpu.execute_token(looptoken, 0) - locs = rebuild_locs_from_resumedata(faildescr) + locs = rebuild_locs_from_resumedata(faildescr, staticdata) assert self.cpu.get_int_value(deadframe, locs[0][0]) == 1 excvalue = self.cpu.grab_exc_value(deadframe) assert not excvalue @@ -2221,7 +2226,7 @@ looptoken = JitCellToken() self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 1) - locs = rebuild_locs_from_resumedata(faildescr) + locs = rebuild_locs_from_resumedata(faildescr, staticdata) assert self.cpu.get_int_value(deadframe, locs[0][0]) == 1 excvalue = self.cpu.grab_exc_value(deadframe) assert excvalue == xptr @@ -2391,8 +2396,7 @@ EffectInfo.MOST_GENERAL) # [i1, i2, i3, i4, i5, i6, f0, f1] - jitcode = JitCode('jitcode') - jitcode.setup(num_regs_i=6, num_regs_r=0, num_regs_f=2) + jitcode, staticdata = get_jitcode(6, 0, 2) faildescr = BasicFailDescr() ops = ''' [i0, i1, i2, i3, i4, i5, i6, f0, f1] @@ -2421,7 +2425,7 @@ f2 = longlong.getfloatstorage(3.4) frame = self.cpu.execute_token(looptoken, 1, 0, 1, 2, 3, 4, 5, f1, f2) assert not called - locs = rebuild_locs_from_resumedata(faildescr) + locs = rebuild_locs_from_resumedata(faildescr, staticdata) for j in range(5): assert self.cpu.get_int_value(frame, locs[0][j]) == j assert longlong.getrealfloat(self.cpu.get_float_value(frame, locs[0][6])) == 1.2 @@ -2439,7 +2443,7 @@ if flag: deadframe = self.cpu.force(token) fail = self.cpu.get_latest_descr(deadframe) - locs = rebuild_locs_from_resumedata(fail) + locs = rebuild_locs_from_resumedata(fail, staticdata) values.append(fail) values.append(self.cpu.get_int_value(deadframe, locs[0][0])) values.append(self.cpu.get_int_value(deadframe, locs[0][1])) @@ -2455,8 +2459,7 @@ i1 = BoxInt() tok = BoxPtr() faildescr = BasicFailDescr(1) - jitcode = JitCode('name') - jitcode.setup(num_regs_i=2, num_regs_r=0, num_regs_f=0) + jitcode, staticdata = get_jitcode(2, 0, 0) ops = [ ResOperation(rop.ENTER_FRAME, [ConstInt(-1)], None, descr=jitcode), ResOperation(rop.FORCE_TOKEN, [], tok), @@ -2478,7 +2481,7 @@ deadframe = self.cpu.execute_token(looptoken, 10, 1) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 1 - locs = rebuild_locs_from_resumedata(fail) + locs = rebuild_locs_from_resumedata(fail, staticdata) assert self.cpu.get_int_value(deadframe, locs[0][0]) == 1 assert self.cpu.get_int_value(deadframe, locs[0][1]) == 10 assert values == [faildescr, 1, 10] @@ -2491,7 +2494,7 @@ if flag: deadframe = self.cpu.force(token) fail = self.cpu.get_latest_descr(deadframe) - locs = rebuild_locs_from_resumedata(fail) + locs = rebuild_locs_from_resumedata(fail, staticdata) values.append(self.cpu.get_int_value(deadframe, locs[0][0])) values.append(self.cpu.get_int_value(deadframe, locs[0][2])) self.cpu.set_savedata_ref(deadframe, random_gcref) @@ -2508,8 +2511,7 @@ i2 = BoxInt() tok = BoxPtr() faildescr = BasicFailDescr(1) - jitcode = JitCode('name') - jitcode.setup(num_regs_i=3, num_regs_r=0, num_regs_f=0) + jitcode, staticdata = get_jitcode(3, 0, 0) ops = [ ResOperation(rop.ENTER_FRAME, [ConstInt(-1)], None, descr=jitcode), ResOperation(rop.FORCE_TOKEN, [], tok), @@ -2532,7 +2534,7 @@ deadframe = self.cpu.execute_token(looptoken, 10, 1) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 1 - locs = rebuild_locs_from_resumedata(fail) + locs = rebuild_locs_from_resumedata(fail, staticdata) assert self.cpu.get_int_value(deadframe, locs[0][0]) == 1 assert self.cpu.get_int_value(deadframe, locs[0][1]) == 42 assert self.cpu.get_int_value(deadframe, locs[0][2]) == 10 @@ -2547,7 +2549,7 @@ if flag: deadframe = self.cpu.force(token) fail = self.cpu.get_latest_descr(deadframe) - locs = rebuild_locs_from_resumedata(fail) + locs = rebuild_locs_from_resumedata(fail, staticdata) values.append(self.cpu.get_int_value(deadframe, locs[0][0])) values.append(self.cpu.get_int_value(deadframe, locs[0][1])) self.cpu.set_savedata_ref(deadframe, random_gcref) @@ -2564,8 +2566,7 @@ f2 = BoxFloat() tok = BoxPtr() faildescr = BasicFailDescr(1) - jitcode = JitCode('name') - jitcode.setup(num_regs_i=2, num_regs_r=0, num_regs_f=1) + jitcode, staticdata = get_jitcode(2, 0, 1) ops = [ ResOperation(rop.ENTER_FRAME, [ConstInt(-1)], None, descr=jitcode), ResOperation(rop.FORCE_TOKEN, [], tok), @@ -2589,7 +2590,7 @@ deadframe = self.cpu.execute_token(looptoken, 10, 1) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 1 - locs = rebuild_locs_from_resumedata(fail) + locs = rebuild_locs_from_resumedata(fail, staticdata) assert self.cpu.get_int_value(deadframe, locs[0][0]) == 1 x = self.cpu.get_float_value(deadframe, locs[0][2]) assert longlong.getrealfloat(x) == 42.5 @@ -2961,8 +2962,7 @@ i0 = BoxInt() i1 = BoxInt() faildescr = BasicFailDescr(1) - jitcode = JitCode('name') - jitcode.setup(num_regs_i=1, num_regs_r=0, num_regs_f=0) + jitcode, staticdata = get_jitcode(1) ops = [ ResOperation(rop.ENTER_FRAME, [ConstInt(-1)], None, descr=jitcode), ResOperation(rop.RESUME_PUT, [i1, ConstInt(0), ConstInt(0)], None), @@ -2985,7 +2985,7 @@ deadframe = self.cpu.execute_token(looptoken, -42, 9) fail = self.cpu.get_latest_descr(deadframe) - locs = rebuild_locs_from_resumedata(fail) + locs = rebuild_locs_from_resumedata(fail, staticdata) assert fail is faildescr assert self.cpu.get_int_value(deadframe, locs[0][0]) == 9 print 'step 2 ok' @@ -3888,8 +3888,7 @@ targettoken1 = TargetToken() targettoken2 = TargetToken() faildescr = BasicFailDescr(2) - jitcode = JitCode('name') - jitcode.setup(num_regs_i=1, num_regs_r=0, num_regs_f=0) + jitcode, staticdata = get_jitcode(1) operations = [ ResOperation(rop.ENTER_FRAME, [ConstInt(-1)], None, descr=jitcode), ResOperation(rop.LABEL, [i0], None, descr=targettoken1), @@ -3907,7 +3906,7 @@ self.cpu.compile_loop(None, inputargs, operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 2) fail = self.cpu.get_latest_descr(deadframe) - locs = rebuild_locs_from_resumedata(fail) + locs = rebuild_locs_from_resumedata(fail, staticdata) assert fail.identifier == 2 res = self.cpu.get_int_value(deadframe, locs[0][0]) assert res == 10 @@ -3921,7 +3920,7 @@ deadframe = self.cpu.execute_token(looptoken, 2) fail = self.cpu.get_latest_descr(deadframe) - locs = rebuild_locs_from_resumedata(fail) + locs = rebuild_locs_from_resumedata(fail, staticdata) assert fail.identifier == 3 res = self.cpu.get_int_value(deadframe, locs[0][0]) assert res == -10 @@ -3946,8 +3945,7 @@ py.test.skip("pointless test on non-asm") from rpython.jit.backend.tool.viewcode import machine_code_dump import ctypes - jitcode = JitCode('name') - jitcode.setup(num_regs_i=1, num_regs_r=0, num_regs_f=0) + jitcode, staticdata = get_jitcode(1) targettoken = TargetToken() ops = """ [i2] @@ -3969,7 +3967,7 @@ looptoken = JitCellToken() self.cpu.assembler.set_debug(False) info = self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) - locs = rebuild_locs_from_resumedata(faildescr) + locs = rebuild_locs_from_resumedata(faildescr, staticdata) bridge_info = self.cpu.compile_bridge(None, faildescr, [bridge.inputargs], locs, bridge.operations, looptoken) @@ -4008,8 +4006,7 @@ targettoken1 = TargetToken() faildescr1 = BasicFailDescr(2) inputargs = [i0] - jitcode = JitCode('name') - jitcode.setup(num_regs_i=1, num_regs_r=0, num_regs_f=0) + jitcode, staticdata = get_jitcode(1) operations = [ ResOperation(rop.ENTER_FRAME, [ConstInt(-1)], None, descr=jitcode), ResOperation(rop.INT_LE, [i0, ConstInt(1)], i1), @@ -4071,7 +4068,7 @@ ResOperation(rop.GUARD_TRUE, [i20], None, descr=BasicFailDescr(42)), ResOperation(rop.JUMP, [i19], None, descr=targettoken1), ] - locs = rebuild_locs_from_resumedata(faildescr1) + locs = rebuild_locs_from_resumedata(faildescr1, staticdata) self.cpu.compile_bridge(None, faildescr1, [inputargs], locs, operations2, looptoken1) looptoken2 = JitCellToken() @@ -4299,7 +4296,7 @@ def maybe_force(token, flag): deadframe = self.cpu.force(token) fail = self.cpu.get_latest_descr(deadframe) - locs = rebuild_locs_from_resumedata(fail) + locs = rebuild_locs_from_resumedata(fail, staticdata) values.append(self.cpu.get_int_value(deadframe, locs[0][0])) return 42 @@ -4313,8 +4310,7 @@ i2 = BoxInt() tok = BoxPtr() faildescr = BasicFailDescr(23) - jitcode = JitCode('name') - jitcode.setup(num_regs_i=1, num_regs_r=0, num_regs_f=0) + jitcode, staticdata = get_jitcode(1) ops = [ ResOperation(rop.ENTER_FRAME, [ConstInt(-1)], None, descr=jitcode), ResOperation(rop.FORCE_TOKEN, [], tok), @@ -4329,7 +4325,7 @@ deadframe = self.cpu.execute_token(looptoken, 20, 0) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 23 - locs = rebuild_locs_from_resumedata(fail) + locs = rebuild_locs_from_resumedata(fail, staticdata) assert self.cpu.get_int_value(deadframe, locs[0][0]) == 42 # make sure that force reads the registers from a zeroed piece of # memory @@ -4340,6 +4336,8 @@ def func(): jitcode2 = JitCode('name2') jitcode2.setup(num_regs_i=7, num_regs_r=0, num_regs_f=0) + jitcode2.global_index = 1 + staticdata.alljitcodes.append(jitcode2) bridge = parse(""" [i1, i2, px] @@ -4375,7 +4373,7 @@ """, namespace={'finaldescr': finaldescr, 'calldescr2': calldescr2, 'guarddescr': guarddescr, 'func2_ptr': func2_ptr, 'jitcode2': jitcode2}) - locs = rebuild_locs_from_resumedata(faildescr) + locs = rebuild_locs_from_resumedata(faildescr, staticdata) self.cpu.compile_bridge(None, faildescr, [bridge.inputargs], locs, bridge.operations, looptoken) @@ -4402,8 +4400,7 @@ faildescr = BasicFailDescr(0) looptoken = JitCellToken() - jitcode = JitCode('name') - jitcode.setup(num_regs_i=2, num_regs_r=1, num_regs_f=0) + jitcode, staticdata = get_jitcode(2, 1) loop = parse(""" [i0, i1, i2] enter_frame(-1, descr=jitcode) @@ -4426,7 +4423,7 @@ frame = lltype.cast_opaque_ptr(jitframe.JITFRAMEPTR, frame) assert len(frame.jf_frame) == frame.jf_frame_info.jfi_frame_depth - locs = rebuild_locs_from_resumedata(guarddescr) + locs = rebuild_locs_from_resumedata(guarddescr, staticdata) ref = self.cpu.get_ref_value(frame, locs[0][2]) token = lltype.cast_opaque_ptr(jitframe.JITFRAMEPTR, ref) assert token != frame @@ -4475,7 +4472,7 @@ 'faildescr2': BasicFailDescr(1), 'xtp': xtp, 'jitcode2': jitcode2, }) - locs = rebuild_locs_from_resumedata(faildescr) + locs = rebuild_locs_from_resumedata(faildescr, staticdata) self.cpu.compile_bridge(None, faildescr, [bridge.inputargs], locs, bridge.operations, looptoken) raise LLException(xtp, xptr) @@ -4487,10 +4484,11 @@ EffectInfo.MOST_GENERAL) looptoken = JitCellToken() - jitcode = JitCode('name') - jitcode.setup(num_regs_i=2, num_regs_r=0, num_regs_f=0) + jitcode, staticdata = get_jitcode(2) jitcode2 = JitCode('name2') jitcode2.setup(num_regs_i=7, num_regs_r=0, num_regs_f=0) + jitcode2.global_index = 1 + staticdata.alljitcodes.append(jitcode2) loop = parse(""" [i0, i1, i2] enter_frame(-1, descr=jitcode) diff --git a/rpython/jit/backend/x86/test/test_runner.py b/rpython/jit/backend/x86/test/test_runner.py --- a/rpython/jit/backend/x86/test/test_runner.py +++ b/rpython/jit/backend/x86/test/test_runner.py @@ -12,8 +12,8 @@ from rpython.jit.codewriter.jitcode import JitCode from rpython.jit.metainterp.resoperation import rop from rpython.jit.metainterp.executor import execute -from rpython.jit.backend.test.runner_test import LLtypeBackendTest -from rpython.jit.metainterp.test.test_resume2 import rebuild_locs_from_resumedata +from rpython.jit.backend.test.runner_test import LLtypeBackendTest, get_jitcode +from rpython.jit.resume.test.test_frontend import rebuild_locs_from_resumedata from rpython.jit.tool.oparser import parse import ctypes @@ -268,8 +268,7 @@ p = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.malloc(lltype.GcStruct('x'))) nullptr = lltype.nullptr(llmemory.GCREF.TO) - jitcode = JitCode('name') - jitcode.setup(num_regs_i=1, num_regs_r=0, num_regs_f=0) + jitcode, staticdata = get_jitcode(1) f = BoxInt() for op in allops: for guard in guards: @@ -301,7 +300,7 @@ if isinstance(descr, BasicFinalDescr): pos = 0 else: - locs = rebuild_locs_from_resumedata(descr) + locs = rebuild_locs_from_resumedata(descr, staticdata) pos = locs[0][0] result = self.cpu.get_int_value(deadframe, pos) if guard == rop.GUARD_FALSE: @@ -331,8 +330,7 @@ guards = [rop.GUARD_FALSE, rop.GUARD_TRUE] all = [rop.INT_EQ, rop.INT_NE, rop.INT_LE, rop.INT_LT, rop.INT_GT, rop.INT_GE, rop.UINT_GT, rop.UINT_LT, rop.UINT_LE, rop.UINT_GE] - jitcode = JitCode('name') - jitcode.setup(num_regs_i=1, num_regs_r=0, num_regs_f=0) + jitcode, staticdata = get_jitcode(1) for a, b in boxes: for guard in guards: for op in all: @@ -360,7 +358,7 @@ if isinstance(descr, BasicFinalDescr): pos = 0 else: - locs = rebuild_locs_from_resumedata(descr) + locs = rebuild_locs_from_resumedata(descr, staticdata) pos = locs[0][0] result = self.cpu.get_int_value(deadframe, pos) expected = execute(self.cpu, None, op, None, a, b).value diff --git a/rpython/jit/resume/backend.py b/rpython/jit/resume/backend.py --- a/rpython/jit/resume/backend.py +++ b/rpython/jit/resume/backend.py @@ -135,8 +135,13 @@ elif op.getopnum() == rop.RESUME_PUT: frame_pos = op.getarg(1).getint() pos_in_frame = op.getarg(2).getint() - pos = self.get_box_pos(op.getarg(0)) - self.builder.resume_put(pos, frame_pos, pos_in_frame) + try: + pos = self.get_box_pos(op.getarg(0)) + except KeyError: + pos = TAGBOX + self.current_attachment[op.getarg(0)] = -1 + else: + self.builder.resume_put(pos, frame_pos, pos_in_frame) if pos & TAGBOX: self.frontend_pos[op.getarg(0)] = (frame_pos, pos_in_frame) elif op.getopnum() == rop.LEAVE_FRAME: From noreply at buildbot.pypy.org Fri Jan 17 18:13:15 2014 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 17 Jan 2014 18:13:15 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: (fijal, rguillebert) make the first test_frontend pass Message-ID: <20140117171315.C99421C0291@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68729:7cee71ea433a Date: 2014-01-17 18:12 +0100 http://bitbucket.org/pypy/pypy/changeset/7cee71ea433a/ Log: (fijal, rguillebert) make the first test_frontend pass diff --git a/rpython/jit/resume/backend.py b/rpython/jit/resume/backend.py --- a/rpython/jit/resume/backend.py +++ b/rpython/jit/resume/backend.py @@ -1,6 +1,6 @@ -from rpython.jit.metainterp.resoperation import rop, ResOperation -from rpython.jit.metainterp.history import ConstInt, Box, Const +from rpython.jit.metainterp.resoperation import rop +from rpython.jit.metainterp.history import Box, Const from rpython.jit.resume.rescode import ResumeBytecodeBuilder, TAGBOX,\ ResumeBytecode, TAGVIRTUAL from rpython.jit.codewriter.jitcode import JitCode @@ -67,7 +67,7 @@ pos += 1 continue else: - xxx + raise Exception("strange operation") pos += 1 def _track(self, allboxes, box): @@ -156,37 +156,7 @@ descr = op.getdescr() self.builder.resume_setfield_gc(structpos, fieldpos, descr) else: - xxx - return - xxxx - if op.getopnum() == rop.RESUME_PUT: - box = op.getarg(0) - args = op.getarglist() - if isinstance(box, Const): - XXX - newop = op.copy_and_change(rop.RESUME_PUT_CONST) - elif box in self.virtuals: - newop = op - else: - try: - loc = self.regalloc.loc(box, must_exist=True) - pos = loc.get_jitframe_position() - except KeyError: - # the thing is not *yet* anywhere, which means we'll record - # we know about it, but not store the resume_put just yet - self.current_attachment[box] = -1 - self.frontend_pos[box] = (args[1], args[2]) - return - self.current_attachment[box] = pos - self.frontend_pos[box] = (args[1], args[2]) - args[0] = ConstInt(pos) - newop = op.copy_and_change(rop.RESUME_PUT, args=args) - elif op.getopnum() == rop.RESUME_NEW: - self.virtuals[op.result] = None - newop = op - else: - newop = op - self.newops.append(newop) + raise Exception("strange operation") def _mark_visited(self, v, loc): pos = loc.get_jitframe_position() @@ -215,7 +185,8 @@ return self.builder.getpos() def finish(self, parent, parent_position, clt): - return ResumeBytecode(self.builder.build(), parent, parent_position, + return ResumeBytecode(self.builder.build(), self.builder.consts, + parent, parent_position, clt) diff --git a/rpython/jit/resume/frontend.py b/rpython/jit/resume/frontend.py --- a/rpython/jit/resume/frontend.py +++ b/rpython/jit/resume/frontend.py @@ -1,141 +1,134 @@ -import sys from rpython.jit.metainterp.resoperation import rop from rpython.jit.metainterp.history import BoxInt, BoxPtr, BoxFloat, ConstInt,\ - Box, INT, REF, FLOAT + INT, REF from rpython.jit.metainterp import history -from rpython.jit.codewriter.jitcode import JitCode -from rpython.rlib import rstack -from rpython.jit.resume.reader import ResumeFrame, Virtual -from rpython.jit.resume.rescode import TAGBOX, TAGCONST, TAGVIRTUAL, TAGOFFSET +from rpython.jit.resume.reader import AbstractResumeReader +from rpython.jit.resume.rescode import TAGBOX, TAGCONST, TAGSMALLINT, TAGVIRTUAL +# class AbstractResumeReader(object): +# """ A resume reader that can follow resume until given point. Consult +# the concrete classes for details +# """ +# def __init__(self): +# self.framestack = [] +# self.consts = [] # XXX cache? +# self.virtuals = {} +# self.virtual_list = [] -class AbstractResumeReader(object): - """ A resume reader that can follow resume until given point. Consult - the concrete classes for details - """ +# def rebuild(self, faildescr): +# self._rebuild_until(faildescr.rd_resume_bytecode, +# faildescr.rd_bytecode_position) +# return self.finish() - def __init__(self): - self.framestack = [] - self.consts = [] # XXX cache? - self.virtuals = {} - self.virtual_list = [] +# def finish(self): +# pass - def rebuild(self, faildescr): - self._rebuild_until(faildescr.rd_resume_bytecode, - faildescr.rd_bytecode_position) - return self.finish() +# def enter_frame(self, pc, jitcode): +# if self.framestack: +# assert pc != -1 +# self.framestack[-1].pc = pc +# self.framestack.append(ResumeFrame(jitcode)) - def finish(self): - pass +# def encode_box(self, pos): +# return TAGBOX | (pos << TAGOFFSET) - def enter_frame(self, pc, jitcode): - if self.framestack: - assert pc != -1 - self.framestack[-1].pc = pc - self.framestack.append(ResumeFrame(jitcode)) +# def encode_virtual(self, box): +# return TAGVIRTUAL | (self.virtuals[box].pos << TAGOFFSET) - def encode_box(self, pos): - return TAGBOX | (pos << TAGOFFSET) +# def encode_const(self, const): +# if isinstance(const, ConstInt) and const.getint() < (sys.maxint >> 3): +# return TAGSMALLINT | (const.getint() << TAGOFFSET) +# self.consts.append(const) +# return TAGCONST | ((len(self.consts) - 1) << TAGOFFSET) - def encode_virtual(self, box): - return TAGVIRTUAL | (self.virtuals[box].pos << TAGOFFSET) +# def decode(self, pos): +# return pos & 0x3, pos >> TAGOFFSET - def encode_const(self, const): - if isinstance(const, ConstInt) and const.getint() < (sys.maxint >> 3): - return TAGSMALLINT | (const.getint() << TAGOFFSET) - self.consts.append(const) - return TAGCONST | ((len(self.consts) - 1) << TAGOFFSET) +# def resume_put(self, jitframe_pos_box, frame_no, frontend_position): +# if isinstance(jitframe_pos_box, Box): +# jitframe_pos = self.encode_virtual(jitframe_pos_box) +# else: +# jitframe_pos = self.encode_box(jitframe_pos_box.getint()) +# self.framestack[frame_no].registers[frontend_position] = jitframe_pos - def decode(self, pos): - return pos & 0x3, pos >> TAGOFFSET +# def encode(self, box): +# xxx - def resume_put(self, jitframe_pos_box, frame_no, frontend_position): - if isinstance(jitframe_pos_box, Box): - jitframe_pos = self.encode_virtual(jitframe_pos_box) - else: - jitframe_pos = self.encode_box(jitframe_pos_box.getint()) - self.framestack[frame_no].registers[frontend_position] = jitframe_pos +# def resume_new(self, box, descr): +# # XXX make it a list +# v = Virtual(len(self.virtual_list), descr) +# self.virtuals[box] = v +# self.virtual_list.append(v) - def encode(self, box): - xxx +# def resume_setfield_gc(self, box, fieldbox, descr): +# # XXX optimize fields +# self.virtuals[box].fields[descr] = self.encode(fieldbox) - def resume_new(self, box, descr): - # XXX make it a list - v = Virtual(len(self.virtual_list), descr) - self.virtuals[box] = v - self.virtual_list.append(v) +# def resume_clear(self, frame_no, frontend_position): +# self.framestack[frame_no].registers[frontend_position] = -1 - def resume_setfield_gc(self, box, fieldbox, descr): - # XXX optimize fields - self.virtuals[box].fields[descr] = self.encode(fieldbox) +# def resume_put_const(self, const, frame_no, frontend_position): +# pos = self.encode_const(const) +# self.framestack[frame_no].registers[frontend_position] = pos - def resume_clear(self, frame_no, frontend_position): - self.framestack[frame_no].registers[frontend_position] = -1 +# def resume_set_pc(self, pc): +# self.framestack[-1].pc = pc - def resume_put_const(self, const, frame_no, frontend_position): - pos = self.encode_const(const) - self.framestack[frame_no].registers[frontend_position] = pos +# def leave_frame(self): +# self.framestack.pop() - def resume_set_pc(self, pc): - self.framestack[-1].pc = pc +# def _rebuild_until(self, rb, position): +# if rb.parent is not None: +# self._rebuild_until(rb.parent, rb.parent_position) +# self.interpret_until(rb.opcodes, position) - def leave_frame(self): - self.framestack.pop() +# def interpret_until(self, bytecode, until, pos=0): +# while pos < until: +# op = bytecode[pos] +# if op == rescode.ENTER_FRAME: +# xxx +# descr = op.getdescr() +# assert isinstance(descr, JitCode) +# self.enter_frame(op.getarg(0).getint(), descr) +# elif op.getopnum() == rop.LEAVE_FRAME: +# self.leave_frame() +# elif op.getopnum() == rop.RESUME_PUT: +# self.resume_put(op.getarg(0), op.getarg(1).getint(), +# op.getarg(2).getint()) +# elif op.getopnum() == rop.RESUME_NEW: +# self.resume_new(op.result, op.getdescr()) +# elif op.getopnum() == rop.RESUME_SETFIELD_GC: +# self.resume_setfield_gc(op.getarg(0), op.getarg(1), +# op.getdescr()) +# elif op.getopnum() == rop.RESUME_SET_PC: +# self.resume_set_pc(op.getarg(0).getint()) +# elif op.getopnum() == rop.RESUME_CLEAR: +# self.resume_clear(op.getarg(0).getint(), +# op.getarg(1).getint()) +# elif not op.is_resume(): +# pos += 1 +# continue +# else: +# xxx +# pos += 1 - def _rebuild_until(self, rb, position): - if rb.parent is not None: - self._rebuild_until(rb.parent, rb.parent_position) - self.interpret_until(rb.opcodes, position) +# def read_int(self, jitframe_pos): +# return self.metainterp.cpu.get_int_value(self.deadframe, jitframe_pos) - def interpret_until(self, bytecode, until, pos=0): - while pos < until: - op = bytecode[pos] - if op.getopnum() == rop.ENTER_FRAME: - descr = op.getdescr() - assert isinstance(descr, JitCode) - self.enter_frame(op.getarg(0).getint(), descr) - elif op.getopnum() == rop.LEAVE_FRAME: - self.leave_frame() - elif op.getopnum() == rop.RESUME_PUT: - self.resume_put(op.getarg(0), op.getarg(1).getint(), - op.getarg(2).getint()) - elif op.getopnum() == rop.RESUME_NEW: - self.resume_new(op.result, op.getdescr()) - elif op.getopnum() == rop.RESUME_SETFIELD_GC: - self.resume_setfield_gc(op.getarg(0), op.getarg(1), - op.getdescr()) - elif op.getopnum() == rop.RESUME_SET_PC: - self.resume_set_pc(op.getarg(0).getint()) - elif op.getopnum() == rop.RESUME_CLEAR: - self.resume_clear(op.getarg(0).getint(), - op.getarg(1).getint()) - elif not op.is_resume(): - pos += 1 - continue - else: - xxx - pos += 1 - - def read_int(self, jitframe_pos): - return self.metainterp.cpu.get_int_value(self.deadframe, jitframe_pos) - -class Dumper(AbstractResumeReader): - def __init__(self): - xxx class DirectResumeReader(AbstractResumeReader): """ Directly read values from the jitframe and put them in the blackhole interpreter """ - def __init__(self, binterpbuilder, cpu, deadframe): + def __init__(self, metainterp_sd, binterpbuilder, cpu, deadframe): self.bhinterpbuilder = binterpbuilder self.cpu = cpu self.deadframe = deadframe - AbstractResumeReader.__init__(self) + AbstractResumeReader.__init__(self, metainterp_sd) def finish(self): nextbh = None @@ -186,7 +179,7 @@ def __init__(self, metainterp, deadframe): self.metainterp = metainterp self.deadframe = deadframe - AbstractResumeReader.__init__(self) + AbstractResumeReader.__init__(self, metainterp.staticdata) def get_box_value(self, encoded_pos, TP): if encoded_pos == -1: diff --git a/rpython/jit/resume/reader.py b/rpython/jit/resume/reader.py --- a/rpython/jit/resume/reader.py +++ b/rpython/jit/resume/reader.py @@ -88,6 +88,7 @@ self.framestack.pop() def _rebuild_until(self, rb, position): + self.consts = rb.consts if rb.parent is not None: self._rebuild_until(rb.parent, rb.parent_position) self.interpret_until(rb, position) diff --git a/rpython/jit/resume/rescode.py b/rpython/jit/resume/rescode.py --- a/rpython/jit/resume/rescode.py +++ b/rpython/jit/resume/rescode.py @@ -12,9 +12,11 @@ TAGOFFSET = 2 class ResumeBytecode(object): - def __init__(self, opcodes, parent=None, parent_position=-1, loop=None): + def __init__(self, opcodes, consts, parent=None, parent_position=-1, + loop=None): self.opcodes = opcodes self.parent = parent + self.consts = consts self.parent_position = parent_position self.loop = loop @@ -28,6 +30,7 @@ class ResumeBytecodeBuilder(object): def __init__(self): self.l = [] + self.consts = [] def getpos(self): return len(self.l) diff --git a/rpython/jit/resume/test/test_backend.py b/rpython/jit/resume/test/test_backend.py --- a/rpython/jit/resume/test/test_backend.py +++ b/rpython/jit/resume/test/test_backend.py @@ -5,6 +5,7 @@ from rpython.jit.tool.oparser import parse from rpython.jit.metainterp.optimizeopt.util import equaloplists from rpython.jit.resume.test.test_frontend import rebuild_locs_from_resumedata +from rpython.jit.resume.test.support import MockStaticData from rpython.rtyper.lltypesystem import lltype class MockJitCode(JitCode): @@ -19,11 +20,6 @@ def __repr__(self): return 'MockJitCode(%d)' % self.no -class MockStaticData(object): - def __init__(self, jitcodes, descrs): - self.alljitcodes = jitcodes - self.opcode_descrs = descrs - def preparse(inp): return "\n".join([s.strip() for s in inp.split("\n") if s.strip()]) diff --git a/rpython/jit/resume/test/test_frontend.py b/rpython/jit/resume/test/test_frontend.py --- a/rpython/jit/resume/test/test_frontend.py +++ b/rpython/jit/resume/test/test_frontend.py @@ -1,10 +1,13 @@ from rpython.jit.tool.oparser import parse from rpython.jit.codewriter.jitcode import JitCode -from rpython.jit.metainterp.history import AbstractDescr, Const, INT, Stats +from rpython.jit.metainterp.history import AbstractDescr, Const, INT, Stats,\ + ConstInt from rpython.jit.resume.frontend import rebuild_from_resumedata -from rpython.jit.resume.rescode import ResumeBytecode, TAGBOX +from rpython.jit.resume.rescode import ResumeBytecode, TAGBOX,\ + ResumeBytecodeBuilder, TAGCONST, TAGSMALLINT from rpython.jit.resume.reader import AbstractResumeReader +from rpython.jit.resume.test.support import MockStaticData from rpython.jit.metainterp.resoperation import rop from rpython.jit.codewriter.format import unformat_assembler from rpython.jit.codewriter.codewriter import CodeWriter @@ -80,25 +83,28 @@ class TestResumeDirect(object): def test_box_resume_reader(self): jitcode = JitCode("jitcode") + jitcode.global_index = 0 jitcode.setup(num_regs_i=13, num_regs_r=0, num_regs_f=0) - resume_loop = parse(""" - [] - enter_frame(-1, descr=jitcode1) - resume_put(10, 0, 1) - resume_put_const(1, 0, 2) - leave_frame() - """, namespace= {'jitcode1': jitcode}) + builder = ResumeBytecodeBuilder() + builder.enter_frame(-1, jitcode) + builder.resume_put(TAGBOX | (100 << 2), 0, 1) + builder.resume_put(TAGCONST | (0 << 2), 0, 2) + builder.resume_put(TAGSMALLINT | (13 << 2), 0, 3) + builder.consts.append(ConstInt(15)) descr = Descr() - descr.rd_resume_bytecode = ResumeBytecode(resume_loop.operations) - descr.rd_bytecode_position = 3 + descr.rd_resume_bytecode = ResumeBytecode(builder.build(), + builder.consts) + descr.rd_bytecode_position = len(descr.rd_resume_bytecode.opcodes) metainterp = MockMetaInterp() + metainterp.staticdata = MockStaticData([jitcode], []) metainterp.cpu = MockCPU() rebuild_from_resumedata(metainterp, "myframe", descr) assert len(metainterp.framestack) == 1 f = metainterp.framestack[-1] - assert f.registers_i[1].getint() == 13 + assert f.registers_i[1].getint() == 103 assert isinstance(f.registers_i[2], Const) - assert f.registers_i[2].getint() == 1 + assert f.registers_i[2].getint() == 15 + assert f.registers_i[3].getint() == 13 def test_nested_call(self): jitcode1 = JitCode("jitcode") From noreply at buildbot.pypy.org Fri Jan 17 18:36:35 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 17 Jan 2014 18:36:35 +0100 (CET) Subject: [pypy-commit] pypy annotator: kill missing_operation() Message-ID: <20140117173635.865291C0291@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: annotator Changeset: r68730:9a3e720c790c Date: 2013-12-14 13:18 +0100 http://bitbucket.org/pypy/pypy/changeset/9a3e720c790c/ Log: kill missing_operation() diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -12,7 +12,7 @@ SomeBuiltin, SomeIterator, SomePBC, SomeFloat, s_None, SomeByteArray, SomeWeakRef, SomeAddress, SomeTypedAddressAccess, SomeSingleFloat, SomeLongFloat, SomeType, SomeConstantType, unionof, UnionError, - missing_operation, read_can_only_throw, add_knowntypedata, + read_can_only_throw, add_knowntypedata, merge_knowntypedata,) from rpython.annotator.bookkeeper import getbookkeeper from rpython.flowspace.model import Variable, Constant @@ -26,8 +26,7 @@ BINARY_OPERATIONS = set([oper.opname for oper in op.__dict__.values() if oper.dispatch == 2]) -for opname in BINARY_OPERATIONS: - missing_operation(pairtype(SomeObject, SomeObject), opname) + class __extend__(pairtype(SomeObject, SomeObject)): diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -796,12 +796,6 @@ assert 0, "couldn't get to commonbase of %r and %r" % (cls1, cls2) -def missing_operation(cls, name): - def default_op(*args): - return s_ImpossibleValue - setattr(cls, name, default_op) - - class HarmlesslyBlocked(Exception): """Raised by the unaryop/binaryop to signal a harmless kind of BlockedInference: the current block is blocked, but not in a way diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -10,7 +10,7 @@ SomeString, SomeChar, SomeList, SomeDict, SomeTuple, SomeImpossibleValue, SomeUnicodeCodePoint, SomeInstance, SomeBuiltin, SomeFloat, SomeIterator, SomePBC, SomeTypedAddressAccess, SomeAddress, SomeType, s_ImpossibleValue, - s_Bool, s_None, unionof, missing_operation, add_knowntypedata, + s_Bool, s_None, unionof, add_knowntypedata, HarmlesslyBlocked, SomeWeakRef, SomeUnicodeString, SomeByteArray) from rpython.annotator.bookkeeper import getbookkeeper from rpython.annotator import builtin @@ -23,8 +23,6 @@ UNARY_OPERATIONS = set([oper.opname for oper in op.__dict__.values() if oper.dispatch == 1]) -for opname in UNARY_OPERATIONS: - missing_operation(SomeObject, opname) class __extend__(SomeObject): diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -14,6 +14,7 @@ from rpython.flowspace.model import (Constant, WrapException, const, Variable, SpaceOperation) from rpython.flowspace.specialcase import register_flow_sc +from rpython.annotator.model import s_ImpossibleValue NOT_REALLY_CONST = { Constant(sys): { @@ -136,13 +137,19 @@ class SingleDispatchMixin(object): dispatch = 1 def consider(self, annotator, arg, *other_args): - impl = getattr(arg, self.opname) + try: + impl = getattr(arg, self.opname) + except AttributeError: + return s_ImpossibleValue return impl(*other_args) class DoubleDispatchMixin(object): dispatch = 2 def consider(self, annotator, arg1, arg2, *other_args): - impl = getattr(pair(arg1, arg2), self.opname) + try: + impl = getattr(pair(arg1, arg2), self.opname) + except AttributeError: + return s_ImpossibleValue return impl(*other_args) From noreply at buildbot.pypy.org Fri Jan 17 18:36:36 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 17 Jan 2014 18:36:36 +0100 (CET) Subject: [pypy-commit] pypy annotator: Implement the fallbacks explicitly in unaryop.py and binaryop.py Message-ID: <20140117173636.BAD3D1C0291@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: annotator Changeset: r68731:0ae89c5687bb Date: 2013-12-14 20:22 +0100 http://bitbucket.org/pypy/pypy/changeset/0ae89c5687bb/ Log: Implement the fallbacks explicitly in unaryop.py and binaryop.py diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -131,6 +131,9 @@ raise AnnotatorError("Cannot find attribute %r on %r" % (attr, self)) getattr.can_only_throw = [] + def setattr(self, *args): + return s_ImpossibleValue + def bind_callables_under(self, classdef, name): return self # default unbound __get__ implementation @@ -150,6 +153,20 @@ def hint(self, *args_s): return self + def getslice(self, *args): + return s_ImpossibleValue + + def setslice(self, *args): + return s_ImpossibleValue + + def delslice(self, *args): + return s_ImpossibleValue + + def pos(self): + return s_ImpossibleValue + neg = abs = ord = invert = long = iter = next = pos + + class __extend__(SomeFloat): def pos(self): diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -14,7 +14,7 @@ from rpython.flowspace.model import (Constant, WrapException, const, Variable, SpaceOperation) from rpython.flowspace.specialcase import register_flow_sc -from rpython.annotator.model import s_ImpossibleValue + NOT_REALLY_CONST = { Constant(sys): { @@ -137,19 +137,13 @@ class SingleDispatchMixin(object): dispatch = 1 def consider(self, annotator, arg, *other_args): - try: - impl = getattr(arg, self.opname) - except AttributeError: - return s_ImpossibleValue + impl = getattr(arg, self.opname) return impl(*other_args) class DoubleDispatchMixin(object): dispatch = 2 def consider(self, annotator, arg1, arg2, *other_args): - try: - impl = getattr(pair(arg1, arg2), self.opname) - except AttributeError: - return s_ImpossibleValue + impl = getattr(pair(arg1, arg2), self.opname) return impl(*other_args) From noreply at buildbot.pypy.org Fri Jan 17 18:36:37 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 17 Jan 2014 18:36:37 +0100 (CET) Subject: [pypy-commit] pypy annotator: Kill all consider_op_*() methods Message-ID: <20140117173637.E06721C0291@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: annotator Changeset: r68732:f70cd5a088d6 Date: 2014-01-17 01:18 +0000 http://bitbucket.org/pypy/pypy/changeset/f70cd5a088d6/ Log: Kill all consider_op_*() methods diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -611,19 +611,6 @@ def noreturnvalue(self, op): return annmodel.s_ImpossibleValue # no return value (hook method) - # XXX "contains" clash with SomeObject method - def consider_op_contains(self, seq, elem): - return seq.op_contains(elem) - - def consider_op_newtuple(self, *args): - return annmodel.SomeTuple(items = args) - - def consider_op_newlist(self, *args): - return self.bookkeeper.newlist(*args) - - def consider_op_newdict(self): - return self.bookkeeper.newdict() - class BlockedInference(Exception): """This exception signals the type inference engine that the situation diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -14,6 +14,7 @@ from rpython.flowspace.model import (Constant, WrapException, const, Variable, SpaceOperation) from rpython.flowspace.specialcase import register_flow_sc +from rpython.annotator.model import SomeTuple NOT_REALLY_CONST = { @@ -365,6 +366,37 @@ add_operator('newslice', 3) add_operator('hint', None, dispatch=1) +class Contains(PureOperation): + opname = 'contains' + arity = 2 + pyfunc = staticmethod(operator.contains) + + # XXX "contains" clash with SomeObject method + def consider(self, annotator, seq, elem): + return seq.op_contains(elem) + + +class NewDict(HLOperation): + opname = 'newdict' + canraise = [] + def consider(self, annotator, *args): + return annotator.bookkeeper.newdict() + + +class NewTuple(PureOperation): + opname = 'newtuple' + pyfunc = staticmethod(lambda *args: args) + canraise = [] + def consider(self, annotator, *args): + return SomeTuple(items=args) + + +class NewList(HLOperation): + opname = 'newlist' + canraise = [] + def consider(self, annotator, *args): + return annotator.bookkeeper.newlist(*args) + class Pow(PureOperation): opname = 'pow' diff --git a/rpython/flowspace/test/test_objspace.py b/rpython/flowspace/test/test_objspace.py --- a/rpython/flowspace/test/test_objspace.py +++ b/rpython/flowspace/test/test_objspace.py @@ -1258,6 +1258,19 @@ assert ops[1].opname == 'simple_call' assert ops[1].args[0].value is os.unlink + def test_constfold_in(self): + def f(): + if 'x' in "xyz": + return 5 + else: + return 6 + graph = self.codetest(f) + assert graph.startblock.operations == [] + [link] = graph.startblock.exits + assert link.target is graph.returnblock + assert isinstance(link.args[0], Constant) + assert link.args[0].value == 5 + DATA = {'x': 5, 'y': 6} From noreply at buildbot.pypy.org Fri Jan 17 18:36:39 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 17 Jan 2014 18:36:39 +0100 (CET) Subject: [pypy-commit] pypy annotator: kill unused file Message-ID: <20140117173639.131C31C0291@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: annotator Changeset: r68733:ad4ea3128205 Date: 2013-12-28 00:47 +0100 http://bitbucket.org/pypy/pypy/changeset/ad4ea3128205/ Log: kill unused file diff --git a/rpython/rtyper/lltypesystem/rvirtualizable.py b/rpython/rtyper/lltypesystem/rvirtualizable.py deleted file mode 100644 --- a/rpython/rtyper/lltypesystem/rvirtualizable.py +++ /dev/null @@ -1,12 +0,0 @@ -from rpython.rtyper.lltypesystem import llmemory -from rpython.rtyper.lltypesystem.rclass import InstanceRepr -from rpython.rtyper.rvirtualizable import AbstractVirtualizableInstanceRepr - - -class VirtualizableInstanceRepr(AbstractVirtualizableInstanceRepr, - InstanceRepr): - def _setup_repr_llfields(self): - llfields = [] - if self.top_of_virtualizable_hierarchy: - llfields.append(('vable_token', llmemory.GCREF)) - return llfields From noreply at buildbot.pypy.org Fri Jan 17 18:36:40 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 17 Jan 2014 18:36:40 +0100 (CET) Subject: [pypy-commit] pypy annotator: kill some FlowObjSpace methods, use operations directly instead Message-ID: <20140117173640.3D0DC1C0291@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: annotator Changeset: r68734:8c85266bb4a5 Date: 2014-01-03 03:18 +0100 http://bitbucket.org/pypy/pypy/changeset/8c85266bb4a5/ Log: kill some FlowObjSpace methods, use operations directly instead diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -14,6 +14,7 @@ recursively_flatten) from rpython.flowspace.specialcase import (rpython_print_item, rpython_print_newline) +from rpython.flowspace.operation import op class FlowingError(Exception): @@ -213,60 +214,55 @@ # ____________________________________________________________ _unary_ops = [ - ('UNARY_POSITIVE', "pos"), - ('UNARY_NEGATIVE', "neg"), - ('UNARY_NOT', "not_"), - ('UNARY_CONVERT', "repr"), - ('UNARY_INVERT', "invert"), + ('UNARY_POSITIVE', op.pos), + ('UNARY_NEGATIVE', op.neg), + ('UNARY_CONVERT', op.repr), + ('UNARY_INVERT', op.invert), ] -def unaryoperation(OPCODE, op): +def unaryoperation(OPCODE, operation): def UNARY_OP(self, *ignored): - operation = getattr(self.space, op) w_1 = self.popvalue() - w_result = operation(w_1) + w_result = operation(w_1).eval(self) self.pushvalue(w_result) - UNARY_OP.unaryop = op UNARY_OP.func_name = OPCODE return UNARY_OP _binary_ops = [ - ('BINARY_MULTIPLY', "mul"), - ('BINARY_TRUE_DIVIDE', "truediv"), - ('BINARY_FLOOR_DIVIDE', "floordiv"), - ('BINARY_DIVIDE', "div"), - ('BINARY_MODULO', "mod"), - ('BINARY_ADD', "add"), - ('BINARY_SUBTRACT', "sub"), - ('BINARY_SUBSCR', "getitem"), - ('BINARY_LSHIFT', "lshift"), - ('BINARY_RSHIFT', "rshift"), - ('BINARY_AND', "and_"), - ('BINARY_XOR', "xor"), - ('BINARY_OR', "or_"), - ('INPLACE_MULTIPLY', "inplace_mul"), - ('INPLACE_TRUE_DIVIDE', "inplace_truediv"), - ('INPLACE_FLOOR_DIVIDE', "inplace_floordiv"), - ('INPLACE_DIVIDE', "inplace_div"), - ('INPLACE_MODULO', "inplace_mod"), - ('INPLACE_ADD', "inplace_add"), - ('INPLACE_SUBTRACT', "inplace_sub"), - ('INPLACE_LSHIFT', "inplace_lshift"), - ('INPLACE_RSHIFT', "inplace_rshift"), - ('INPLACE_AND', "inplace_and"), - ('INPLACE_XOR', "inplace_xor"), - ('INPLACE_OR', "inplace_or"), + ('BINARY_MULTIPLY', op.mul), + ('BINARY_TRUE_DIVIDE', op.truediv), + ('BINARY_FLOOR_DIVIDE', op.floordiv), + ('BINARY_DIVIDE', op.div), + ('BINARY_MODULO', op.mod), + ('BINARY_ADD', op.add), + ('BINARY_SUBTRACT', op.sub), + ('BINARY_SUBSCR', op.getitem), + ('BINARY_LSHIFT', op.lshift), + ('BINARY_RSHIFT', op.rshift), + ('BINARY_AND', op.and_), + ('BINARY_XOR', op.xor), + ('BINARY_OR', op.or_), + ('INPLACE_MULTIPLY', op.inplace_mul), + ('INPLACE_TRUE_DIVIDE', op.inplace_truediv), + ('INPLACE_FLOOR_DIVIDE', op.inplace_floordiv), + ('INPLACE_DIVIDE', op.inplace_div), + ('INPLACE_MODULO', op.inplace_mod), + ('INPLACE_ADD', op.inplace_add), + ('INPLACE_SUBTRACT', op.inplace_sub), + ('INPLACE_LSHIFT', op.inplace_lshift), + ('INPLACE_RSHIFT', op.inplace_rshift), + ('INPLACE_AND', op.inplace_and), + ('INPLACE_XOR', op.inplace_xor), + ('INPLACE_OR', op.inplace_or), ] -def binaryoperation(OPCODE, op): +def binaryoperation(OPCODE, operation): """NOT_RPYTHON""" - def BINARY_OP(self, *ignored): - operation = getattr(self.space, op) + def BINARY_OP(self, _): w_2 = self.popvalue() w_1 = self.popvalue() - w_result = operation(w_1, w_2) + w_result = operation(w_1, w_2).eval(self) self.pushvalue(w_result) - BINARY_OP.binop = op BINARY_OP.func_name = OPCODE return BINARY_OP @@ -585,6 +581,14 @@ def CONTINUE_LOOP(self, startofloop): raise Continue(startofloop) + def not_(self, w_obj): + w_bool = op.bool(w_obj).eval(self) + return const(not self.guessbool(w_bool)) + + def UNARY_NOT(self, _): + w_obj = self.popvalue() + self.pushvalue(self.not_(w_obj)) + def cmp_lt(self, w_1, w_2): return self.space.lt(w_1, w_2) @@ -607,13 +611,13 @@ return self.space.contains(w_2, w_1) def cmp_not_in(self, w_1, w_2): - return self.space.not_(self.space.contains(w_2, w_1)) + return self.not_(self.space.contains(w_2, w_1)) def cmp_is(self, w_1, w_2): - return self.space.is_(w_1, w_2) + return op.is_(w_1, w_2).eval(self) def cmp_is_not(self, w_1, w_2): - return self.space.not_(self.space.is_(w_1, w_2)) + return self.not_(op.is_(w_1, w_2).eval(self)) def cmp_exc_match(self, w_1, w_2): return self.space.newbool(self.space.exception_match(w_1, w_2)) @@ -722,34 +726,35 @@ def JUMP_IF_FALSE(self, target): # Python <= 2.6 only w_cond = self.peekvalue() - if not self.guessbool(self.space.bool(w_cond)): + if not self.guessbool(op.bool(w_cond).eval(self)): return target def JUMP_IF_TRUE(self, target): # Python <= 2.6 only w_cond = self.peekvalue() - if self.guessbool(self.space.bool(w_cond)): + if self.guessbool(op.bool(w_cond).eval(self)): return target def POP_JUMP_IF_FALSE(self, target): w_value = self.popvalue() - if not self.guessbool(self.space.bool(w_value)): + if not self.guessbool(op.bool(w_value).eval(self)): return target def POP_JUMP_IF_TRUE(self, target): w_value = self.popvalue() - if self.guessbool(self.space.bool(w_value)): + if self.guessbool(op.bool(w_value).eval(self)): return target def JUMP_IF_FALSE_OR_POP(self, target): w_value = self.peekvalue() - if not self.guessbool(self.space.bool(w_value)): + if not self.guessbool(op.bool(w_value).eval(self)): return target self.popvalue() def JUMP_IF_TRUE_OR_POP(self, target): w_value = self.peekvalue() - if self.guessbool(self.space.bool(w_value)): + if self.guessbool(op.bool(w_value).eval(self)): + return target return target self.popvalue() @@ -841,7 +846,7 @@ "obj.attributename" w_obj = self.popvalue() w_attributename = self.getname_w(nameindex) - w_value = self.space.getattr(w_obj, w_attributename) + w_value = op.getattr(w_obj, w_attributename).eval(self) self.pushvalue(w_value) LOOKUP_METHOD = LOAD_ATTR @@ -965,7 +970,7 @@ w_attributename = self.getname_w(nameindex) w_obj = self.popvalue() w_newvalue = self.popvalue() - self.space.setattr(w_obj, w_attributename, w_newvalue) + op.setattr(w_obj, w_attributename, w_newvalue).eval(self) def UNPACK_SEQUENCE(self, itemcount): w_iterable = self.popvalue() diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -11,7 +11,7 @@ from rpython.flowspace.model import (Constant, Variable, checkgraph, const, FSException) from rpython.flowspace.bytecode import HostCode -from rpython.flowspace.operation import op, NOT_REALLY_CONST +from rpython.flowspace.operation import op from rpython.flowspace.flowcontext import (FlowSpaceFrame, fixeggblocks, FlowingError, Raise) from rpython.flowspace.generator import (tweak_generator_graph, @@ -87,11 +87,11 @@ "Catching %s is not valid in RPython" % check_class.__name__) if not isinstance(check_class, tuple): # the simple case - return frame.guessbool(self.issubtype(w_exc_type, w_check_class)) + return frame.guessbool(op.issubtype(w_exc_type, w_check_class).eval(frame)) # special case for StackOverflow (see rlib/rstackovf.py) if check_class == rstackovf.StackOverflow: w_real_class = const(rstackovf._StackOverflow) - return frame.guessbool(self.issubtype(w_exc_type, w_real_class)) + return frame.guessbool(op.issubtype(w_exc_type, w_real_class).eval(frame)) # checking a tuple of classes for klass in w_check_class.value: if self.exception_match(w_exc_type, const(klass)): @@ -108,12 +108,12 @@ if frame.guessbool(self.call_function(const(isinstance), w_arg1, self.w_type)): # this is for all cases of the form (Class, something) - if frame.guessbool(self.is_(w_arg2, self.w_None)): + if frame.guessbool(op.is_(w_arg2, self.w_None).eval(frame)): # raise Type: we assume we have to instantiate Type w_value = self.call_function(w_arg1) else: - w_valuetype = self.type(w_arg2) - if frame.guessbool(self.issubtype(w_valuetype, w_arg1)): + w_valuetype = op.type(w_arg2).eval(frame) + if frame.guessbool(op.issubtype(w_valuetype, w_arg1).eval(frame)): # raise Type, Instance: let etype be the exact type of value w_value = w_arg2 else: @@ -121,7 +121,7 @@ w_value = self.call_function(w_arg1, w_arg2) else: # the only case left here is (inst, None), from a 'raise inst'. - if not frame.guessbool(self.is_(w_arg2, self.w_None)): + if not frame.guessbool(op.is_(w_arg2, self.w_None).eval(frame)): exc = TypeError("instance exception may not have a " "separate value") raise Raise(const(exc)) @@ -138,16 +138,13 @@ else: w_len = self.len(w_iterable) w_correct = self.eq(w_len, const(expected_length)) - if not self.frame.guessbool(self.bool(w_correct)): + if not self.frame.guessbool(op.bool(w_correct).eval(self.frame)): w_exc = self.exc_from_raise(self.w_ValueError, self.w_None) raise Raise(w_exc) return [self.getitem(w_iterable, const(i)) for i in range(expected_length)] # ____________________________________________________________ - def not_(self, w_obj): - return const(not self.frame.guessbool(self.bool(w_obj))) - def import_name(self, name, glob=None, loc=None, frm=None, level=-1): try: mod = __import__(name, glob, loc, frm, level) @@ -211,7 +208,11 @@ raise FlowingError("global name '%s' is not defined" % varname) return const(value) -for cls in op.__dict__.values(): + +for cls in [op.len, op.type, op.eq, op.ne, op.contains, op.getitem, op.getattr, + op.getslice, op.setslice, op.delslice, op.yield_, op.iter, op.next, + op.lt, op.gt, op.le, op.ge, op.str, + op.newlist, op.newtuple, op.newdict, op.setitem, op.delitem]: if getattr(FlowObjSpace, cls.opname, None) is None: setattr(FlowObjSpace, cls.opname, cls.make_sc()) diff --git a/rpython/flowspace/test/test_objspace.py b/rpython/flowspace/test/test_objspace.py --- a/rpython/flowspace/test/test_objspace.py +++ b/rpython/flowspace/test/test_objspace.py @@ -914,6 +914,13 @@ simplify_graph(graph) assert self.all_operations(graph) == {'getitem': 1} + def test_delitem(self): + def f(c, x): + del c[x] + graph = self.codetest(f) + simplify_graph(graph) + assert self.all_operations(graph) == {'delitem': 1} + def test_context_manager(self): def f(c, x): with x: From noreply at buildbot.pypy.org Fri Jan 17 18:36:41 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 17 Jan 2014 18:36:41 +0100 (CET) Subject: [pypy-commit] pypy annotator: kill more FlowObjSpace methods Message-ID: <20140117173641.5EA5F1C0291@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: annotator Changeset: r68735:e03a640bd053 Date: 2014-01-04 15:24 +0100 http://bitbucket.org/pypy/pypy/changeset/e03a640bd053/ Log: kill more FlowObjSpace methods diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -590,28 +590,28 @@ self.pushvalue(self.not_(w_obj)) def cmp_lt(self, w_1, w_2): - return self.space.lt(w_1, w_2) + return op.lt(w_1, w_2).eval(self) def cmp_le(self, w_1, w_2): - return self.space.le(w_1, w_2) + return op.le(w_1, w_2).eval(self) def cmp_eq(self, w_1, w_2): - return self.space.eq(w_1, w_2) + return op.eq(w_1, w_2).eval(self) def cmp_ne(self, w_1, w_2): - return self.space.ne(w_1, w_2) + return op.ne(w_1, w_2).eval(self) def cmp_gt(self, w_1, w_2): - return self.space.gt(w_1, w_2) + return op.gt(w_1, w_2).eval(self) def cmp_ge(self, w_1, w_2): - return self.space.ge(w_1, w_2) + return op.ge(w_1, w_2).eval(self) def cmp_in(self, w_1, w_2): - return self.space.contains(w_2, w_1) + return op.contains(w_2, w_1).eval(self) def cmp_not_in(self, w_1, w_2): - return self.not_(self.space.contains(w_2, w_1)) + return self.not_(self.cmp_in(w_1, w_2)) def cmp_is(self, w_1, w_2): return op.is_(w_1, w_2).eval(self) @@ -714,7 +714,7 @@ def PRINT_ITEM(self, oparg): w_item = self.popvalue() - w_s = self.space.str(w_item) + w_s = op.str(w_item).eval(self) self.space.appcall(rpython_print_item, w_s) def PRINT_NEWLINE(self, oparg): diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -126,7 +126,7 @@ "separate value") raise Raise(const(exc)) w_value = w_arg1 - w_type = self.type(w_value) + w_type = op.type(w_value).eval(frame) return FSException(w_type, w_value) def unpack_sequence(self, w_iterable, expected_length): @@ -136,8 +136,8 @@ raise ValueError return [const(x) for x in l] else: - w_len = self.len(w_iterable) - w_correct = self.eq(w_len, const(expected_length)) + w_len = op.len(w_iterable).eval(self.frame) + w_correct = op.eq(w_len, const(expected_length)).eval(self.frame) if not self.frame.guessbool(op.bool(w_correct).eval(self.frame)): w_exc = self.exc_from_raise(self.w_ValueError, self.w_None) raise Raise(w_exc) @@ -209,9 +209,8 @@ return const(value) -for cls in [op.len, op.type, op.eq, op.ne, op.contains, op.getitem, op.getattr, +for cls in [op.getitem, op.getattr, op.getslice, op.setslice, op.delslice, op.yield_, op.iter, op.next, - op.lt, op.gt, op.le, op.ge, op.str, op.newlist, op.newtuple, op.newdict, op.setitem, op.delitem]: if getattr(FlowObjSpace, cls.opname, None) is None: setattr(FlowObjSpace, cls.opname, cls.make_sc()) From noreply at buildbot.pypy.org Fri Jan 17 18:36:42 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 17 Jan 2014 18:36:42 +0100 (CET) Subject: [pypy-commit] pypy annotator: Move exc_from_raise() and unpack_sequence() out of FlowObjSpace Message-ID: <20140117173642.80C7C1C0291@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: annotator Changeset: r68736:3604d3829a8c Date: 2014-01-04 02:17 +0100 http://bitbucket.org/pypy/pypy/changeset/3604d3829a8c/ Log: Move exc_from_raise() and unpack_sequence() out of FlowObjSpace diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -628,6 +628,36 @@ w_result = getattr(self, compare_method[testnum])(w_1, w_2) self.pushvalue(w_result) + def exc_from_raise(self, w_arg1, w_arg2): + """ + Create a wrapped exception from the arguments of a raise statement. + + Returns an FSException object whose w_value is an instance of w_type. + """ + if self.guessbool(self.space.call_function(const(isinstance), w_arg1, + const(type))): + # this is for all cases of the form (Class, something) + if self.guessbool(op.is_(w_arg2, self.space.w_None).eval(self)): + # raise Type: we assume we have to instantiate Type + w_value = self.space.call_function(w_arg1) + else: + w_valuetype = op.type(w_arg2).eval(self) + if self.guessbool(op.issubtype(w_valuetype, w_arg1).eval(self)): + # raise Type, Instance: let etype be the exact type of value + w_value = w_arg2 + else: + # raise Type, X: assume X is the constructor argument + w_value = self.space.call_function(w_arg1, w_arg2) + else: + # the only case left here is (inst, None), from a 'raise inst'. + if not self.guessbool(op.is_(w_arg2, const(None)).eval(self)): + exc = TypeError("instance exception may not have a " + "separate value") + raise Raise(const(exc)) + w_value = w_arg1 + w_type = op.type(w_value).eval(self) + return FSException(w_type, w_value) + def RAISE_VARARGS(self, nbargs): space = self.space if nbargs == 0: @@ -643,13 +673,10 @@ if nbargs >= 2: w_value = self.popvalue() w_type = self.popvalue() - operror = space.exc_from_raise(w_type, w_value) + operror = self.exc_from_raise(w_type, w_value) else: w_type = self.popvalue() - if isinstance(w_type, FSException): - operror = w_type - else: - operror = space.exc_from_raise(w_type, space.w_None) + operror = self.exc_from_raise(w_type, space.w_None) raise Raise(operror) def IMPORT_NAME(self, nameindex): @@ -972,9 +999,18 @@ w_newvalue = self.popvalue() op.setattr(w_obj, w_attributename, w_newvalue).eval(self) + def unpack_sequence(self, w_iterable, expected_length): + w_len = op.len(w_iterable).eval(self) + w_correct = op.eq(w_len, const(expected_length)).eval(self) + if not self.guessbool(op.bool(w_correct).eval(self)): + w_exc = self.exc_from_raise(const(ValueError), const(None)) + raise Raise(w_exc) + return [self.space.getitem(w_iterable, const(i)) + for i in range(expected_length)] + def UNPACK_SEQUENCE(self, itemcount): w_iterable = self.popvalue() - items = self.space.unpack_sequence(w_iterable, itemcount) + items = self.unpack_sequence(w_iterable, itemcount) for w_item in reversed(items): self.pushvalue(w_item) diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -98,52 +98,6 @@ return True return False - def exc_from_raise(self, w_arg1, w_arg2): - """ - Create a wrapped exception from the arguments of a raise statement. - - Returns an FSException object whose w_value is an instance of w_type. - """ - frame = self.frame - if frame.guessbool(self.call_function(const(isinstance), w_arg1, - self.w_type)): - # this is for all cases of the form (Class, something) - if frame.guessbool(op.is_(w_arg2, self.w_None).eval(frame)): - # raise Type: we assume we have to instantiate Type - w_value = self.call_function(w_arg1) - else: - w_valuetype = op.type(w_arg2).eval(frame) - if frame.guessbool(op.issubtype(w_valuetype, w_arg1).eval(frame)): - # raise Type, Instance: let etype be the exact type of value - w_value = w_arg2 - else: - # raise Type, X: assume X is the constructor argument - w_value = self.call_function(w_arg1, w_arg2) - else: - # the only case left here is (inst, None), from a 'raise inst'. - if not frame.guessbool(op.is_(w_arg2, self.w_None).eval(frame)): - exc = TypeError("instance exception may not have a " - "separate value") - raise Raise(const(exc)) - w_value = w_arg1 - w_type = op.type(w_value).eval(frame) - return FSException(w_type, w_value) - - def unpack_sequence(self, w_iterable, expected_length): - if isinstance(w_iterable, Constant): - l = list(w_iterable.value) - if len(l) != expected_length: - raise ValueError - return [const(x) for x in l] - else: - w_len = op.len(w_iterable).eval(self.frame) - w_correct = op.eq(w_len, const(expected_length)).eval(self.frame) - if not self.frame.guessbool(op.bool(w_correct).eval(self.frame)): - w_exc = self.exc_from_raise(self.w_ValueError, self.w_None) - raise Raise(w_exc) - return [self.getitem(w_iterable, const(i)) - for i in range(expected_length)] - # ____________________________________________________________ def import_name(self, name, glob=None, loc=None, frm=None, level=-1): try: From noreply at buildbot.pypy.org Fri Jan 17 18:36:43 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 17 Jan 2014 18:36:43 +0100 (CET) Subject: [pypy-commit] pypy annotator: use operations instead of FlowObjSpace methods everywhere Message-ID: <20140117173643.9C3111C0291@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: annotator Changeset: r68737:65cb99edd161 Date: 2014-01-05 03:30 +0100 http://bitbucket.org/pypy/pypy/changeset/65cb99edd161/ Log: use operations instead of FlowObjSpace methods everywhere diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -730,7 +730,7 @@ def YIELD_VALUE(self, _): assert self.pycode.is_generator w_result = self.popvalue() - self.space.yield_(w_result) + op.yield_(w_result).eval(self) # XXX yield expressions not supported. This will blow up if the value # isn't popped straightaway. self.pushvalue(None) @@ -790,13 +790,13 @@ def GET_ITER(self, oparg): w_iterable = self.popvalue() - w_iterator = self.space.iter(w_iterable) + w_iterator = op.iter(w_iterable).eval(self) self.pushvalue(w_iterator) def FOR_ITER(self, target): w_iterator = self.peekvalue() try: - w_nextitem = self.space.next(w_iterator) + w_nextitem = op.next(w_iterator).eval(self) except Raise as e: w_exc = e.w_exc if not self.space.exception_match(w_exc.w_type, @@ -825,7 +825,7 @@ # directly call manager.__enter__(), don't use special lookup functions # which don't make sense on the RPython type system. w_manager = self.peekvalue() - w_exit = self.space.getattr(w_manager, const("__exit__")) + w_exit = op.getattr(w_manager, const("__exit__")).eval(self) self.settopvalue(w_exit) w_result = self.space.call_method(w_manager, "__enter__") block = WithBlock(self, target) @@ -949,7 +949,7 @@ # This opcode was added with pypy-1.8. Here is a simpler # version, enough for annotation. last_val = self.popvalue() - self.pushvalue(self.space.newlist()) + self.pushvalue(op.newlist().eval(self)) self.pushvalue(last_val) def call_function(self, oparg, w_star=None, w_starstar=None): @@ -1005,7 +1005,7 @@ if not self.guessbool(op.bool(w_correct).eval(self)): w_exc = self.exc_from_raise(const(ValueError), const(None)) raise Raise(w_exc) - return [self.space.getitem(w_iterable, const(i)) + return [op.getitem(w_iterable, const(i)).eval(self) for i in range(expected_length)] def UNPACK_SEQUENCE(self, itemcount): @@ -1016,7 +1016,7 @@ def slice(self, w_start, w_end): w_obj = self.popvalue() - w_result = self.space.getslice(w_obj, w_start, w_end) + w_result = op.getslice(w_obj, w_start, w_end).eval(self) self.pushvalue(w_result) def SLICE_0(self, oparg): @@ -1038,7 +1038,7 @@ def storeslice(self, w_start, w_end): w_obj = self.popvalue() w_newvalue = self.popvalue() - self.space.setslice(w_obj, w_start, w_end, w_newvalue) + op.setslice(w_obj, w_start, w_end, w_newvalue).eval(self) def STORE_SLICE_0(self, oparg): self.storeslice(self.space.w_None, self.space.w_None) @@ -1058,7 +1058,7 @@ def deleteslice(self, w_start, w_end): w_obj = self.popvalue() - self.space.delslice(w_obj, w_start, w_end) + op.delslice(w_obj, w_start, w_end).eval(self) def DELETE_SLICE_0(self, oparg): self.deleteslice(self.space.w_None, self.space.w_None) @@ -1095,14 +1095,14 @@ w_key = self.popvalue() w_value = self.popvalue() w_dict = self.peekvalue() - self.space.setitem(w_dict, w_key, w_value) + op.setitem(w_dict, w_key, w_value).eval(self) def STORE_SUBSCR(self, oparg): "obj[subscr] = newvalue" w_subscr = self.popvalue() w_obj = self.popvalue() w_newvalue = self.popvalue() - self.space.setitem(w_obj, w_subscr, w_newvalue) + op.setitem(w_obj, w_subscr, w_newvalue).eval(self) def BUILD_SLICE(self, numargs): if numargs == 3: @@ -1113,27 +1113,27 @@ raise BytecodeCorruption w_end = self.popvalue() w_start = self.popvalue() - w_slice = self.space.newslice(w_start, w_end, w_step) + w_slice = op.newslice(w_start, w_end, w_step).eval(self) self.pushvalue(w_slice) def DELETE_SUBSCR(self, oparg): "del obj[subscr]" w_subscr = self.popvalue() w_obj = self.popvalue() - self.space.delitem(w_obj, w_subscr) + op.delitem(w_obj, w_subscr).eval(self) def BUILD_TUPLE(self, itemcount): items = self.popvalues(itemcount) - w_tuple = self.space.newtuple(*items) + w_tuple = op.newtuple(*items).eval(self) self.pushvalue(w_tuple) def BUILD_LIST(self, itemcount): items = self.popvalues(itemcount) - w_list = self.space.newlist(*items) + w_list = op.newlist(*items).eval(self) self.pushvalue(w_list) def BUILD_MAP(self, itemcount): - w_dict = self.space.newdict() + w_dict = op.newdict().eval(self) self.pushvalue(w_dict) def NOP(self, *args): diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -8,8 +8,7 @@ from inspect import CO_NEWLOCALS from rpython.flowspace.argument import CallSpec -from rpython.flowspace.model import (Constant, Variable, checkgraph, const, - FSException) +from rpython.flowspace.model import Constant, Variable, checkgraph, const from rpython.flowspace.bytecode import HostCode from rpython.flowspace.operation import op from rpython.flowspace.flowcontext import (FlowSpaceFrame, fixeggblocks, @@ -110,13 +109,13 @@ assert isinstance(w_module, Constant) assert isinstance(w_name, Constant) try: - return self.getattr(w_module, w_name) + return op.getattr(w_module, w_name).eval(self.frame) except FlowingError: exc = ImportError("cannot import name '%s'" % w_name.value) raise Raise(const(exc)) def call_method(self, w_obj, methname, *arg_w): - w_meth = self.getattr(w_obj, const(methname)) + w_meth = op.getattr(w_obj, const(methname)).eval(self.frame) return self.call_function(w_meth, *arg_w) def call_function(self, w_func, *args_w): @@ -163,13 +162,6 @@ return const(value) -for cls in [op.getitem, op.getattr, - op.getslice, op.setslice, op.delslice, op.yield_, op.iter, op.next, - op.newlist, op.newtuple, op.newdict, op.setitem, op.delitem]: - if getattr(FlowObjSpace, cls.opname, None) is None: - setattr(FlowObjSpace, cls.opname, cls.make_sc()) - - def build_flow(func, space=FlowObjSpace()): """ Create the flow graph for the function. diff --git a/rpython/flowspace/specialcase.py b/rpython/flowspace/specialcase.py --- a/rpython/flowspace/specialcase.py +++ b/rpython/flowspace/specialcase.py @@ -42,7 +42,8 @@ if w_default is not None: return space.appcall(getattr, w_obj, w_index, w_default) else: - return space.getattr(w_obj, w_index) + from rpython.flowspace.operation import op + return op.getattr(w_obj, w_index).eval(space.frame) @register_flow_sc(open) def sc_open(space, *args_w): From noreply at buildbot.pypy.org Fri Jan 17 18:36:44 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 17 Jan 2014 18:36:44 +0100 (CET) Subject: [pypy-commit] pypy annotator: kill FlowObjSpace.newbool() Message-ID: <20140117173644.BB53A1C0291@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: annotator Changeset: r68738:b23ad5ee76a0 Date: 2014-01-06 21:35 +0100 http://bitbucket.org/pypy/pypy/changeset/b23ad5ee76a0/ Log: kill FlowObjSpace.newbool() diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -620,7 +620,7 @@ return self.not_(op.is_(w_1, w_2).eval(self)) def cmp_exc_match(self, w_1, w_2): - return self.space.newbool(self.space.exception_match(w_1, w_2)) + return const(self.space.exception_match(w_1, w_2)) def COMPARE_OP(self, testnum): w_2 = self.popvalue() diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -59,12 +59,6 @@ def build_flow(self, func): return build_flow(func, self) - def newbool(self, b): - if b: - return self.w_True - else: - return self.w_False - def newfunction(self, w_code, w_globals, defaults_w): if not all(isinstance(value, Constant) for value in defaults_w): raise FlowingError("Dynamically created function must" From noreply at buildbot.pypy.org Fri Jan 17 18:36:45 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 17 Jan 2014 18:36:45 +0100 (CET) Subject: [pypy-commit] pypy annotator: kill FlowObjSpace class attributes Message-ID: <20140117173645.E985A1C0291@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: annotator Changeset: r68739:d49b5967e764 Date: 2014-01-09 16:05 +0000 http://bitbucket.org/pypy/pypy/changeset/d49b5967e764/ Log: kill FlowObjSpace class attributes diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -16,6 +16,7 @@ rpython_print_newline) from rpython.flowspace.operation import op +w_None = const(None) class FlowingError(Exception): """ Signals invalid RPython in the function being analysed""" @@ -472,7 +473,7 @@ except Raise as e: w_exc = e.w_exc - if w_exc.w_type == self.space.w_ImportError: + if w_exc.w_type == const(ImportError): msg = 'import statement always raises %s' % e raise ImportError(msg) link = Link([w_exc.w_type, w_exc.w_value], self.graph.exceptblock) @@ -637,7 +638,7 @@ if self.guessbool(self.space.call_function(const(isinstance), w_arg1, const(type))): # this is for all cases of the form (Class, something) - if self.guessbool(op.is_(w_arg2, self.space.w_None).eval(self)): + if self.guessbool(op.is_(w_arg2, w_None).eval(self)): # raise Type: we assume we have to instantiate Type w_value = self.space.call_function(w_arg1) else: @@ -659,7 +660,6 @@ return FSException(w_type, w_value) def RAISE_VARARGS(self, nbargs): - space = self.space if nbargs == 0: if self.last_exception is not None: w_exc = self.last_exception @@ -676,7 +676,7 @@ operror = self.exc_from_raise(w_type, w_value) else: w_type = self.popvalue() - operror = self.exc_from_raise(w_type, space.w_None) + operror = self.exc_from_raise(w_type, w_None) raise Raise(operror) def IMPORT_NAME(self, nameindex): @@ -708,7 +708,7 @@ # item (unlike CPython which can have 1, 2 or 3 items): # [subclass of FlowSignal] w_top = self.popvalue() - if w_top == self.space.w_None: + if w_top == w_None: # finally: block with no unroller active return elif isinstance(w_top, FlowSignal): @@ -799,8 +799,7 @@ w_nextitem = op.next(w_iterator).eval(self) except Raise as e: w_exc = e.w_exc - if not self.space.exception_match(w_exc.w_type, - self.space.w_StopIteration): + if not self.space.exception_match(w_exc.w_type, const(StopIteration)): raise # iterator exhausted self.popvalue() @@ -844,7 +843,6 @@ w_exitfunc = self.popvalue() unroller = self.peekvalue(0) - w_None = self.space.w_None if isinstance(unroller, Raise): w_exc = unroller.w_exc # The annotator won't allow to merge exception types with None. @@ -1020,15 +1018,15 @@ self.pushvalue(w_result) def SLICE_0(self, oparg): - self.slice(self.space.w_None, self.space.w_None) + self.slice(w_None, w_None) def SLICE_1(self, oparg): w_start = self.popvalue() - self.slice(w_start, self.space.w_None) + self.slice(w_start, w_None) def SLICE_2(self, oparg): w_end = self.popvalue() - self.slice(self.space.w_None, w_end) + self.slice(w_None, w_end) def SLICE_3(self, oparg): w_end = self.popvalue() @@ -1041,15 +1039,15 @@ op.setslice(w_obj, w_start, w_end, w_newvalue).eval(self) def STORE_SLICE_0(self, oparg): - self.storeslice(self.space.w_None, self.space.w_None) + self.storeslice(w_None, w_None) def STORE_SLICE_1(self, oparg): w_start = self.popvalue() - self.storeslice(w_start, self.space.w_None) + self.storeslice(w_start, w_None) def STORE_SLICE_2(self, oparg): w_end = self.popvalue() - self.storeslice(self.space.w_None, w_end) + self.storeslice(w_None, w_end) def STORE_SLICE_3(self, oparg): w_end = self.popvalue() @@ -1061,15 +1059,15 @@ op.delslice(w_obj, w_start, w_end).eval(self) def DELETE_SLICE_0(self, oparg): - self.deleteslice(self.space.w_None, self.space.w_None) + self.deleteslice(w_None, w_None) def DELETE_SLICE_1(self, oparg): w_start = self.popvalue() - self.deleteslice(w_start, self.space.w_None) + self.deleteslice(w_start, w_None) def DELETE_SLICE_2(self, oparg): w_end = self.popvalue() - self.deleteslice(self.space.w_None, w_end) + self.deleteslice(w_None, w_end) def DELETE_SLICE_3(self, oparg): w_end = self.popvalue() @@ -1108,7 +1106,7 @@ if numargs == 3: w_step = self.popvalue() elif numargs == 2: - w_step = self.space.w_None + w_step = w_None else: raise BytecodeCorruption w_end = self.popvalue() diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -3,7 +3,6 @@ """ import __builtin__ -import sys import types from inspect import CO_NEWLOCALS @@ -39,23 +38,6 @@ the space operations that the interpreter generates when it interprets (the bytecode of) some function. """ - w_None = Constant(None) - sys = Constant(sys) - w_False = Constant(False) - w_True = Constant(True) - w_type = Constant(type) - w_tuple = Constant(tuple) - for exc in [KeyError, ValueError, IndexError, StopIteration, - AssertionError, TypeError, AttributeError, ImportError]: - clsname = exc.__name__ - locals()['w_' + clsname] = Constant(exc) - - # the following exceptions should not show up - # during flow graph construction - w_NameError = 'NameError' - w_UnboundLocalError = 'UnboundLocalError' - specialcases = SPECIAL_CASES - def build_flow(self, func): return build_flow(func, self) @@ -128,7 +110,7 @@ fn = fn._flowspace_rewrite_directly_as_ w_callable = const(fn) try: - sc = self.specialcases[fn] # TypeError if 'fn' not hashable + sc = SPECIAL_CASES[fn] # TypeError if 'fn' not hashable except (KeyError, TypeError): pass else: From noreply at buildbot.pypy.org Fri Jan 17 18:36:47 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 17 Jan 2014 18:36:47 +0100 (CET) Subject: [pypy-commit] pypy annotator: move newfunction() and exception_match() out of FlowObjSpace Message-ID: <20140117173647.20A021C0291@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: annotator Changeset: r68740:34b5f7f6bef6 Date: 2014-01-09 16:55 +0000 http://bitbucket.org/pypy/pypy/changeset/34b5f7f6bef6/ Log: move newfunction() and exception_match() out of FlowObjSpace diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -4,9 +4,11 @@ import sys import collections +import types from rpython.tool.error import source_lines from rpython.tool.stdlib_opcode import host_bytecode_spec +from rpython.rlib import rstackovf from rpython.flowspace.argument import CallSpec from rpython.flowspace.model import (Constant, Variable, Block, Link, c_last_exception, const, FSException) @@ -620,8 +622,29 @@ def cmp_is_not(self, w_1, w_2): return self.not_(op.is_(w_1, w_2).eval(self)) + def exception_match(self, w_exc_type, w_check_class): + """Checks if the given exception type matches 'w_check_class'.""" + if not isinstance(w_check_class, Constant): + raise FlowingError("Non-constant except guard.") + check_class = w_check_class.value + if check_class in (NotImplementedError, AssertionError): + raise FlowingError( + "Catching %s is not valid in RPython" % check_class.__name__) + if not isinstance(check_class, tuple): + # the simple case + return self.guessbool(op.issubtype(w_exc_type, w_check_class).eval(self)) + # special case for StackOverflow (see rlib/rstackovf.py) + if check_class == rstackovf.StackOverflow: + w_real_class = const(rstackovf._StackOverflow) + return self.guessbool(op.issubtype(w_exc_type, w_real_class).eval(self)) + # checking a tuple of classes + for klass in w_check_class.value: + if self.exception_match(w_exc_type, const(klass)): + return True + return False + def cmp_exc_match(self, w_1, w_2): - return const(self.space.exception_match(w_1, w_2)) + return const(self.exception_match(w_1, w_2)) def COMPARE_OP(self, testnum): w_2 = self.popvalue() @@ -797,15 +820,13 @@ w_iterator = self.peekvalue() try: w_nextitem = op.next(w_iterator).eval(self) + self.pushvalue(w_nextitem) except Raise as e: - w_exc = e.w_exc - if not self.space.exception_match(w_exc.w_type, const(StopIteration)): + if self.exception_match(e.w_exc.w_type, const(StopIteration)): + self.popvalue() + return target + else: raise - # iterator exhausted - self.popvalue() - return target - else: - self.pushvalue(w_nextitem) def SETUP_LOOP(self, target): block = LoopBlock(self, target) @@ -984,10 +1005,20 @@ w_varargs = self.popvalue() self.call_function(oparg, w_varargs, w_varkw) + def newfunction(self, w_code, defaults_w): + if not all(isinstance(value, Constant) for value in defaults_w): + raise FlowingError("Dynamically created function must" + " have constant default values.") + code = w_code.value + globals = self.w_globals.value + defaults = tuple([default.value for default in defaults_w]) + fn = types.FunctionType(code, globals, code.co_name, defaults) + return Constant(fn) + def MAKE_FUNCTION(self, numdefaults): w_codeobj = self.popvalue() defaults = self.popvalues(numdefaults) - fn = self.space.newfunction(w_codeobj, self.w_globals, defaults) + fn = self.newfunction(w_codeobj, defaults) self.pushvalue(fn) def STORE_ATTR(self, nameindex): diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -16,7 +16,6 @@ bootstrap_generator) from rpython.flowspace.pygraph import PyGraph from rpython.flowspace.specialcase import SPECIAL_CASES -from rpython.rlib import rstackovf @@ -41,38 +40,6 @@ def build_flow(self, func): return build_flow(func, self) - def newfunction(self, w_code, w_globals, defaults_w): - if not all(isinstance(value, Constant) for value in defaults_w): - raise FlowingError("Dynamically created function must" - " have constant default values.") - code = w_code.value - globals = w_globals.value - defaults = tuple([default.value for default in defaults_w]) - fn = types.FunctionType(code, globals, code.co_name, defaults) - return Constant(fn) - - def exception_match(self, w_exc_type, w_check_class): - """Checks if the given exception type matches 'w_check_class'.""" - frame = self.frame - if not isinstance(w_check_class, Constant): - raise FlowingError("Non-constant except guard.") - check_class = w_check_class.value - if check_class in (NotImplementedError, AssertionError): - raise FlowingError( - "Catching %s is not valid in RPython" % check_class.__name__) - if not isinstance(check_class, tuple): - # the simple case - return frame.guessbool(op.issubtype(w_exc_type, w_check_class).eval(frame)) - # special case for StackOverflow (see rlib/rstackovf.py) - if check_class == rstackovf.StackOverflow: - w_real_class = const(rstackovf._StackOverflow) - return frame.guessbool(op.issubtype(w_exc_type, w_real_class).eval(frame)) - # checking a tuple of classes - for klass in w_check_class.value: - if self.exception_match(w_exc_type, const(klass)): - return True - return False - # ____________________________________________________________ def import_name(self, name, glob=None, loc=None, frm=None, level=-1): try: From noreply at buildbot.pypy.org Fri Jan 17 18:36:48 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 17 Jan 2014 18:36:48 +0100 (CET) Subject: [pypy-commit] pypy annotator: move import_name() out of FlowObjSpace Message-ID: <20140117173648.3E5AA1C0291@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: annotator Changeset: r68741:bb99875af0a9 Date: 2014-01-09 17:54 +0000 http://bitbucket.org/pypy/pypy/changeset/bb99875af0a9/ Log: move import_name() out of FlowObjSpace diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -702,13 +702,19 @@ operror = self.exc_from_raise(w_type, w_None) raise Raise(operror) + def import_name(self, name, glob=None, loc=None, frm=None, level=-1): + try: + mod = __import__(name, glob, loc, frm, level) + except ImportError as e: + raise Raise(const(e)) + return const(mod) + def IMPORT_NAME(self, nameindex): - space = self.space modulename = self.getname_u(nameindex) glob = self.w_globals.value fromlist = self.popvalue().value level = self.popvalue().value - w_obj = space.import_name(modulename, glob, None, fromlist, level) + w_obj = self.import_name(modulename, glob, None, fromlist, level) self.pushvalue(w_obj) def IMPORT_FROM(self, nameindex): diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -3,7 +3,6 @@ """ import __builtin__ -import types from inspect import CO_NEWLOCALS from rpython.flowspace.argument import CallSpec @@ -18,7 +17,6 @@ from rpython.flowspace.specialcase import SPECIAL_CASES - def _assert_rpythonic(func): """Raise ValueError if ``func`` is obviously not RPython""" if func.func_doc and func.func_doc.lstrip().startswith('NOT_RPYTHON'): @@ -40,14 +38,6 @@ def build_flow(self, func): return build_flow(func, self) - # ____________________________________________________________ - def import_name(self, name, glob=None, loc=None, frm=None, level=-1): - try: - mod = __import__(name, glob, loc, frm, level) - except ImportError as e: - raise Raise(const(e)) - return const(mod) - def import_from(self, w_module, w_name): assert isinstance(w_module, Constant) assert isinstance(w_name, Constant) diff --git a/rpython/flowspace/specialcase.py b/rpython/flowspace/specialcase.py --- a/rpython/flowspace/specialcase.py +++ b/rpython/flowspace/specialcase.py @@ -19,7 +19,7 @@ assert len(args_w) > 0 and len(args_w) <= 5, 'import needs 1 to 5 arguments' assert all(isinstance(arg, Constant) for arg in args_w) args = [arg.value for arg in args_w] - return space.import_name(*args) + return space.frame.import_name(*args) @register_flow_sc(locals) def sc_locals(_, *args): From noreply at buildbot.pypy.org Fri Jan 17 18:36:49 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 17 Jan 2014 18:36:49 +0100 (CET) Subject: [pypy-commit] pypy annotator: move import_from() out of FlowObjSpace Message-ID: <20140117173649.5DB2A1C0291@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: annotator Changeset: r68742:f797aeca8026 Date: 2014-01-17 01:22 +0000 http://bitbucket.org/pypy/pypy/changeset/f797aeca8026/ Log: move import_from() out of FlowObjSpace diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -717,10 +717,19 @@ w_obj = self.import_name(modulename, glob, None, fromlist, level) self.pushvalue(w_obj) + def import_from(self, w_module, w_name): + assert isinstance(w_module, Constant) + assert isinstance(w_name, Constant) + try: + return op.getattr(w_module, w_name).eval(self) + except FlowingError: + exc = ImportError("cannot import name '%s'" % w_name.value) + raise Raise(const(exc)) + def IMPORT_FROM(self, nameindex): w_name = self.getname_w(nameindex) w_module = self.peekvalue() - self.pushvalue(self.space.import_from(w_module, w_name)) + self.pushvalue(self.import_from(w_module, w_name)) def RETURN_VALUE(self, oparg): w_returnvalue = self.popvalue() diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -38,15 +38,6 @@ def build_flow(self, func): return build_flow(func, self) - def import_from(self, w_module, w_name): - assert isinstance(w_module, Constant) - assert isinstance(w_name, Constant) - try: - return op.getattr(w_module, w_name).eval(self.frame) - except FlowingError: - exc = ImportError("cannot import name '%s'" % w_name.value) - raise Raise(const(exc)) - def call_method(self, w_obj, methname, *arg_w): w_meth = op.getattr(w_obj, const(methname)).eval(self.frame) return self.call_function(w_meth, *arg_w) From noreply at buildbot.pypy.org Fri Jan 17 18:36:50 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 17 Jan 2014 18:36:50 +0100 (CET) Subject: [pypy-commit] pypy annotator: move find_global() out of FlowObjSpace Message-ID: <20140117173650.7F9D41C0291@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: annotator Changeset: r68743:58127250a1ca Date: 2014-01-17 01:41 +0000 http://bitbucket.org/pypy/pypy/changeset/58127250a1ca/ Log: move find_global() out of FlowObjSpace diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -5,6 +5,7 @@ import sys import collections import types +import __builtin__ from rpython.tool.error import source_lines from rpython.tool.stdlib_opcode import host_bytecode_spec @@ -898,8 +899,19 @@ w_const = self.getconstant_w(constindex) self.pushvalue(w_const) + def find_global(self, w_globals, varname): + try: + value = w_globals.value[varname] + except KeyError: + # not in the globals, now look in the built-ins + try: + value = getattr(__builtin__, varname) + except AttributeError: + raise FlowingError("global name '%s' is not defined" % varname) + return const(value) + def LOAD_GLOBAL(self, nameindex): - w_result = self.space.find_global(self.w_globals, self.getname_u(nameindex)) + w_result = self.find_global(self.w_globals, self.getname_u(nameindex)) self.pushvalue(w_result) LOAD_NAME = LOAD_GLOBAL diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -2,7 +2,6 @@ with rpython.flowspace.flowcontext. """ -import __builtin__ from inspect import CO_NEWLOCALS from rpython.flowspace.argument import CallSpec @@ -74,17 +73,6 @@ hlop = op.simple_call(w_callable, *args.as_list()) return self.frame.do_op(hlop) - def find_global(self, w_globals, varname): - try: - value = w_globals.value[varname] - except KeyError: - # not in the globals, now look in the built-ins - try: - value = getattr(__builtin__, varname) - except AttributeError: - raise FlowingError("global name '%s' is not defined" % varname) - return const(value) - def build_flow(func, space=FlowObjSpace()): """ From noreply at buildbot.pypy.org Fri Jan 17 18:36:51 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 17 Jan 2014 18:36:51 +0100 (CET) Subject: [pypy-commit] pypy annotator: move appcall() out of FlowObjSpace Message-ID: <20140117173651.B0E261C0291@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: annotator Changeset: r68744:71ea2dd65784 Date: 2014-01-17 02:14 +0000 http://bitbucket.org/pypy/pypy/changeset/71ea2dd65784/ Log: move appcall() out of FlowObjSpace diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -576,6 +576,11 @@ def getname_w(self, index): return Constant(self.pycode.names[index]) + def appcall(self, func, *args_w): + """Call an app-level RPython function directly""" + w_func = const(func) + return op.simple_call(w_func, *args_w).eval(self) + def BAD_OPCODE(self, _): raise FlowingError("This operation is not RPython") @@ -781,10 +786,10 @@ def PRINT_ITEM(self, oparg): w_item = self.popvalue() w_s = op.str(w_item).eval(self) - self.space.appcall(rpython_print_item, w_s) + self.appcall(rpython_print_item, w_s) def PRINT_NEWLINE(self, oparg): - self.space.appcall(rpython_print_newline) + self.appcall(rpython_print_newline) def JUMP_FORWARD(self, target): return target diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -45,11 +45,6 @@ args = CallSpec(list(args_w)) return self.call(w_func, args) - def appcall(self, func, *args_w): - """Call an app-level RPython function directly""" - w_func = const(func) - return op.simple_call(w_func, *args_w).eval(self.frame) - def call(self, w_callable, args): if isinstance(w_callable, Constant): fn = w_callable.value diff --git a/rpython/flowspace/specialcase.py b/rpython/flowspace/specialcase.py --- a/rpython/flowspace/specialcase.py +++ b/rpython/flowspace/specialcase.py @@ -35,12 +35,12 @@ def sc_isinstance(space, w_instance, w_type): if w_instance.foldable() and w_type.foldable(): return const(isinstance(w_instance.value, w_type.value)) - return space.appcall(isinstance, w_instance, w_type) + return space.frame.appcall(isinstance, w_instance, w_type) @register_flow_sc(getattr) def sc_getattr(space, w_obj, w_index, w_default=None): if w_default is not None: - return space.appcall(getattr, w_obj, w_index, w_default) + return space.frame.appcall(getattr, w_obj, w_index, w_default) else: from rpython.flowspace.operation import op return op.getattr(w_obj, w_index).eval(space.frame) @@ -48,18 +48,18 @@ @register_flow_sc(open) def sc_open(space, *args_w): from rpython.rlib.rfile import create_file - return space.appcall(create_file, *args_w) + return space.frame.appcall(create_file, *args_w) @register_flow_sc(os.tmpfile) def sc_os_tmpfile(space): from rpython.rlib.rfile import create_temp_rfile - return space.appcall(create_temp_rfile) + return space.frame.appcall(create_temp_rfile) @register_flow_sc(os.remove) def sc_os_remove(space, *args_w): # on top of PyPy only: 'os.remove != os.unlink' # (on CPython they are '==', but not identical either) - return space.appcall(os.unlink, *args_w) + return space.frame.appcall(os.unlink, *args_w) # _________________________________________________________________________ # a simplified version of the basic printing routines, for RPython programs diff --git a/rpython/rlib/rarithmetic.py b/rpython/rlib/rarithmetic.py --- a/rpython/rlib/rarithmetic.py +++ b/rpython/rlib/rarithmetic.py @@ -521,7 +521,7 @@ # show up in the flow graphs at all) if isinstance(w_value, Constant): return Constant(r_uint(w_value.value)) - return space.appcall(r_uint, w_value) + return space.frame.appcall(r_uint, w_value) r_longlong = build_int('r_longlong', True, 64) From noreply at buildbot.pypy.org Fri Jan 17 18:36:52 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 17 Jan 2014 18:36:52 +0100 (CET) Subject: [pypy-commit] pypy annotator: move handling of _flowspace_rewrite_directly_as_ to const() Message-ID: <20140117173652.CC3A31C0291@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: annotator Changeset: r68745:e3fc10934126 Date: 2014-01-17 16:52 +0000 http://bitbucket.org/pypy/pypy/changeset/e3fc10934126/ Log: move handling of _flowspace_rewrite_directly_as_ to const() diff --git a/rpython/flowspace/model.py b/rpython/flowspace/model.py --- a/rpython/flowspace/model.py +++ b/rpython/flowspace/model.py @@ -400,6 +400,8 @@ type_with_bad_introspection = type(complex.real.__get__) def const(obj): + if hasattr(obj, "_flowspace_rewrite_directly_as_"): + obj = obj._flowspace_rewrite_directly_as_ if isinstance(obj, (Variable, Constant)): raise TypeError("already wrapped: " + repr(obj)) # method-wrapper have ill-defined comparison and introspection diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -48,9 +48,6 @@ def call(self, w_callable, args): if isinstance(w_callable, Constant): fn = w_callable.value - if hasattr(fn, "_flowspace_rewrite_directly_as_"): - fn = fn._flowspace_rewrite_directly_as_ - w_callable = const(fn) try: sc = SPECIAL_CASES[fn] # TypeError if 'fn' not hashable except (KeyError, TypeError): From noreply at buildbot.pypy.org Fri Jan 17 18:56:01 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 17 Jan 2014 18:56:01 +0100 (CET) Subject: [pypy-commit] stmgc c7: add demo2 and remove stuff Message-ID: <20140117175601.4C6C21C087E@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r632:73b59ed9cf7c Date: 2014-01-17 18:55 +0100 http://bitbucket.org/pypy/stmgc/changeset/73b59ed9cf7c/ Log: add demo2 and remove stuff diff --git a/c7/Makefile b/c7/Makefile new file mode 100644 --- /dev/null +++ b/c7/Makefile @@ -0,0 +1,27 @@ +# +# Makefile for the demos. +# + +DEBUG_EXE = debug-demo2 +BUILD_EXE = build-demo2 +RELEASE_EXE = release-demo2 + +debug: $(DEBUG_EXE) # with prints and asserts +build: $(BUILD_EXE) # without prints, but with asserts +release: $(RELEASE_EXE) # without prints nor asserts + +clean: + rm -f $(BUILD_EXE) $(DEBUG_EXE) $(RELEASE_EXE) + + +H_FILES = core.h list.h pagecopy.h + +C_FILES = core.c list.c pagecopy.c + +DEBUG = -g + + +# note that we don't say -DNDEBUG, so that asserts should still be compiled in +# also, all debug code with extra checks but not the debugprints +build-%: %.c ${H_FILES} ${C_FILES} + clang -pthread -g $< -o build-$* -Wall ${C_FILES} diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -552,7 +552,9 @@ localchar_t *collect_and_reserve(size_t size) { + _stm_start_safe_point(); minor_collect(); + _stm_stop_safe_point(); localchar_t *current = _STM_TL2->nursery_current; _STM_TL2->nursery_current = current + size; @@ -570,6 +572,7 @@ localchar_t *current = _STM_TL2->nursery_current; localchar_t *new_current = current + size; _STM_TL2->nursery_current = new_current; + assert((uintptr_t)new_current < (1L << 32)); if ((uintptr_t)new_current > FIRST_AFTER_NURSERY_PAGE * 4096) { current = collect_and_reserve(size); } @@ -686,7 +689,7 @@ { assert(!pthread_rwlock_trywrlock(&rwlock_shared)); assert(!pthread_rwlock_unlock(&rwlock_shared)); - + wait_until_updated(); stm_list_free(_STM_TL2->modified_objects); _STM_TL2->modified_objects = NULL; @@ -764,15 +767,6 @@ if (UNLIKELY(old_rv == 0xff)) reset_transaction_read_version(); - int old_wv = _STM_TL1->transaction_write_version; - _STM_TL1->transaction_write_version = old_wv + 1; - if (UNLIKELY(old_wv == 0xffff)) { - /* We run out of 16-bit numbers before we do the next major - collection, which resets it. XXX This case seems unlikely - for now, but check if it could become a bottleneck at some - point. */ - stm_major_collection(); - } wait_until_updated(); assert(stm_list_is_empty(_STM_TL2->modified_objects)); diff --git a/c7/core.h b/c7/core.h --- a/c7/core.h +++ b/c7/core.h @@ -58,7 +58,6 @@ struct _thread_local1_s { jmpbufptr_t *jmpbufptr; uint8_t transaction_read_version; - uint16_t transaction_write_version; object_t **shadow_stack; object_t **shadow_stack_base; }; diff --git a/c7/demo2.c b/c7/demo2.c new file mode 100644 --- /dev/null +++ b/c7/demo2.c @@ -0,0 +1,249 @@ +#include +#include +#include +#include +#include + +#include "core.h" + + +#define LIST_LENGTH 2000 + +typedef TLPREFIX struct node_s node_t; +typedef node_t* nodeptr_t; +typedef object_t* objptr_t; + +struct node_s { + struct object_s hdr; + long value; + nodeptr_t next; +}; + + +size_t stmcb_size(struct object_s *ob) +{ + return sizeof(struct node_s); +} + +void stmcb_trace(struct object_s *obj, void visit(object_t **)) +{ + struct node_s *n; + n = (struct node_s*)obj; + visit((object_t **)&n->next); +} + + +nodeptr_t global_chained_list = NULL; + + +long check_sorted() +{ + nodeptr_t r_n; + long prev, sum; + jmpbufptr_t here; + + back: + if (__builtin_setjmp(here) == 0) { + stm_start_transaction(&here); + + stm_read((objptr_t)global_chained_list); + r_n = global_chained_list; + assert(r_n->value == -1); + + prev = -1; + sum = 0; + while (r_n->next) { + r_n = r_n->next; + stm_read((objptr_t)r_n); + sum += r_n->value; + + if (prev >= r_n->value) { + stm_stop_transaction(); + return -1; + } + + prev = r_n->value; + } + + stm_stop_transaction(); + return sum; + } + goto back; +} + +nodeptr_t swap_nodes(nodeptr_t prev) +{ + jmpbufptr_t here; + + assert(prev != NULL); + back: + if (__builtin_setjmp(here) == 0) { + stm_start_transaction(&here); + + stm_read((objptr_t)prev); + nodeptr_t current = prev->next; + if (current == NULL) { + stm_stop_transaction(); + return NULL; + } + stm_read((objptr_t)current); + nodeptr_t next = current->next; + if (next == NULL) { + stm_stop_transaction(); + return NULL; + } + stm_read((objptr_t)next); + + if (next->value < current->value) { + stm_write((objptr_t)prev); + stm_write((objptr_t)current); + stm_write((objptr_t)next); + + prev->next = next; + current->next = next->next; + next->next = current; + } + + stm_stop_transaction(); + return current; + } + goto back; +} + + + + +void bubble_run() +{ + nodeptr_t r_current; + + r_current = global_chained_list; + while (r_current) { + r_current = swap_nodes(r_current); + } +} + + +/* initialize list with values in decreasing order */ +void setup_list() +{ + int i; + nodeptr_t w_newnode, w_prev; + + stm_start_transaction(NULL); + + global_chained_list = (nodeptr_t)stm_allocate(sizeof(struct node_s)); + global_chained_list->value = -1; + global_chained_list->next = NULL; + + stm_push_root((objptr_t)global_chained_list); + + w_prev = global_chained_list; + for (i = 0; i < LIST_LENGTH; i++) { + stm_push_root((objptr_t)w_prev); + w_newnode = (nodeptr_t)stm_allocate(sizeof(struct node_s)); + + w_prev = (nodeptr_t)stm_pop_root(); + w_newnode->value = LIST_LENGTH - i; + w_newnode->next = NULL; + + stm_write((objptr_t)w_prev); + w_prev->next = w_newnode; + w_prev = w_newnode; + } + + stm_stop_transaction(); + + global_chained_list = (nodeptr_t)stm_pop_root(); + + printf("setup ok\n"); +} + + +static sem_t done; +static sem_t go; +static sem_t initialized; + + +void *demo2(void *arg) +{ + + int status; + if (arg != NULL) { + /* we still need to initialize */ + stm_setup_thread(); + sem_post(&initialized); + status = sem_wait(&go); + assert(status == 0); + + } + + while (check_sorted() == -1) { + bubble_run(); + } + + if (arg != NULL) { + status = sem_post(&done); + assert(status == 0); + } + return NULL; +} + +void final_check(void) +{ + long sum; + + printf("final check\n"); + + sum = check_sorted(); + + // little Gauss: + assert(sum == (1 + LIST_LENGTH) * (LIST_LENGTH / 2)); + + printf("check ok\n"); +} + + +void newthread(void*(*func)(void*), void *arg) +{ + pthread_t th; + int status = pthread_create(&th, NULL, func, arg); + if (status != 0) + abort(); + pthread_detach(th); + printf("started new thread\n"); +} + + + +int main(void) +{ + int status; + + status = sem_init(&initialized, 0, 0); + assert(status == 0); + status = sem_init(&go, 0, 0); + assert(status == 0); + + stm_setup(); + stm_setup_thread(); + + newthread(demo2, (void*)1); + + status = sem_wait(&initialized); + assert(status == 0); + + setup_list(); + + status = sem_post(&go); + assert(status == 0); + + demo2(NULL); + + status = sem_wait(&done); + assert(status == 0); + + final_check(); + + return 0; +} From noreply at buildbot.pypy.org Fri Jan 17 18:56:02 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 17 Jan 2014 18:56:02 +0100 (CET) Subject: [pypy-commit] stmgc c7: merge Message-ID: <20140117175602.7443A1C087E@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r633:dc556612584e Date: 2014-01-17 18:55 +0100 http://bitbucket.org/pypy/stmgc/changeset/dc556612584e/ Log: merge diff --git a/c7/test/test_bug.py b/c7/test/test_bug.py --- a/c7/test/test_bug.py +++ b/c7/test/test_bug.py @@ -4,431 +4,6 @@ class TestBug(BaseTest): - def test_bug1(self): - stm_start_transaction() - p8 = stm_allocate(16) - p8[8] = '\x08' - stm_stop_transaction(False) - # - self.switch("sub1") - self.switch("main") - stm_start_transaction() - stm_write(p8) - p8[8] = '\x97' - # - self.switch("sub1") - stm_start_transaction() - stm_read(p8) - assert p8[8] == '\x08' - - def test_bug2(self): - stm_start_transaction() - p0 = stm_allocate(16) - p1 = stm_allocate(16) - p2 = stm_allocate(16) - p3 = stm_allocate(16) - p4 = stm_allocate(16) - p5 = stm_allocate(16) - p6 = stm_allocate(16) - p7 = stm_allocate(16) - p8 = stm_allocate(16) - p9 = stm_allocate(16) - p0[8] = '\x00' - p1[8] = '\x01' - p2[8] = '\x02' - p3[8] = '\x03' - p4[8] = '\x04' - p5[8] = '\x05' - p6[8] = '\x06' - p7[8] = '\x07' - p8[8] = '\x08' - p9[8] = '\t' - stm_stop_transaction(False) - self.switch(0) - self.switch(1) - self.switch(2) - # - self.switch(1) - stm_start_transaction() - stm_read(p7) - assert p7[8] == '\x07' - # - self.switch(1) - stm_read(p0) - assert p0[8] == '\x00' - # - self.switch(1) - stm_read(p4) - assert p4[8] == '\x04' - # - self.switch(0) - stm_start_transaction() - stm_read(p3) - assert p3[8] == '\x03' - # - self.switch(2) - stm_start_transaction() - stm_read(p8) - assert p8[8] == '\x08' - stm_write(p8) - p8[8] = '\x08' - # - self.switch(0) - stm_read(p0) - assert p0[8] == '\x00' - # - self.switch(0) - stm_read(p0) - assert p0[8] == '\x00' - # - self.switch(1) - stm_read(p2) - assert p2[8] == '\x02' - # - self.switch(2) - stm_read(p2) - assert p2[8] == '\x02' - # - self.switch(2) - stm_read(p2) - assert p2[8] == '\x02' - stm_write(p2) - p2[8] = 'm' - # - self.switch(0) - stm_read(p4) - assert p4[8] == '\x04' - stm_write(p4) - p4[8] = '\xc5' - # - self.switch(2) - stm_read(p1) - assert p1[8] == '\x01' - # - self.switch(2) - stm_stop_transaction(False) #1 - # ['\x00', '\x01', 'm', '\x03', '\x04', '\x05', '\x06', '\x07', '\x08', '\t'] - # log: [8, 2] - # - self.switch(0) - stm_stop_transaction(False) #2 - # ['\x00', '\x01', 'm', '\x03', '\xc5', '\x05', '\x06', '\x07', '\x08', '\t'] - # log: [4] - # - self.switch(0) - stm_start_transaction() - stm_read(p6) - assert p6[8] == '\x06' - # - self.switch(0) - stm_read(p4) - assert p4[8] == '\xc5' - # - self.switch(0) - stm_read(p4) - assert p4[8] == '\xc5' - # - self.switch(1) - stm_read(p0) - assert p0[8] == '\x00' - # - self.switch(1) - stm_stop_transaction(True) #3 - # conflict: 0xdf0a8028 - # - self.switch(2) - stm_start_transaction() - stm_read(p6) - assert p6[8] == '\x06' - # - self.switch(1) - stm_start_transaction() - stm_read(p1) - assert p1[8] == '\x01' - # - self.switch(0) - stm_read(p4) - assert p4[8] == '\xc5' - stm_write(p4) - p4[8] = '\x0c' - # - self.switch(2) - stm_read(p2) - assert p2[8] == 'm' - stm_write(p2) - p2[8] = '\x81' - # - self.switch(2) - stm_read(p7) - assert p7[8] == '\x07' - # - self.switch(0) - stm_read(p5) - assert p5[8] == '\x05' - stm_write(p5) - p5[8] = 'Z' - # - self.switch(1) - stm_stop_transaction(False) #4 - # ['\x00', '\x01', 'm', '\x03', '\xc5', '\x05', '\x06', '\x07', '\x08', '\t'] - # log: [] - # - self.switch(2) - stm_read(p8) - assert p8[8] == '\x08' - # - self.switch(0) - stm_read(p0) - assert p0[8] == '\x00' - # - self.switch(1) - stm_start_transaction() - stm_read(p0) - assert p0[8] == '\x00' - # - self.switch(2) - stm_read(p9) - assert p9[8] == '\t' - stm_write(p9) - p9[8] = '\x81' - # - self.switch(0) - stm_read(p0) - assert p0[8] == '\x00' - # - self.switch(1) - stm_read(p2) - assert p2[8] == 'm' - # - self.switch(2) - stm_read(p9) - assert p9[8] == '\x81' - stm_write(p9) - p9[8] = 'g' - # - self.switch(1) - stm_read(p3) - assert p3[8] == '\x03' - # - self.switch(2) - stm_read(p7) - assert p7[8] == '\x07' - # - self.switch(1) - stm_read(p1) - assert p1[8] == '\x01' - # - self.switch(0) - stm_read(p2) - assert p2[8] == 'm' - stm_write(p2) - p2[8] = 'T' - # - self.switch(2) - stm_read(p4) - assert p4[8] == '\xc5' - # - self.switch(2) - stm_read(p9) - assert p9[8] == 'g' - # - self.switch(2) - stm_read(p1) - assert p1[8] == '\x01' - stm_write(p1) - p1[8] = 'L' - # - self.switch(0) - stm_read(p0) - assert p0[8] == '\x00' - # - self.switch(2) - stm_read(p0) - assert p0[8] == '\x00' - stm_write(p0) - p0[8] = '\xf3' - # - self.switch(1) - stm_stop_transaction(False) #5 - # ['\x00', '\x01', 'm', '\x03', '\xc5', '\x05', '\x06', '\x07', '\x08', '\t'] - # log: [] - # - self.switch(0) - stm_read(p1) - assert p1[8] == '\x01' - stm_write(p1) - p1[8] = '*' - # - self.switch(1) - stm_start_transaction() - stm_read(p3) - assert p3[8] == '\x03' - stm_write(p3) - p3[8] = '\xd2' - # - self.switch(0) - stm_stop_transaction(False) #6 - # ['\x00', '*', 'T', '\x03', '\x0c', 'Z', '\x06', '\x07', '\x08', '\t'] - # log: [1, 2, 4, 5] - # - self.switch(1) - stm_read(p7) - assert p7[8] == '\x07' - stm_write(p7) - p7[8] = '.' - # - self.switch(0) - stm_start_transaction() - stm_read(p7) - assert p7[8] == '\x07' - # - self.switch(1) - stm_read(p2) - assert p2[8] == 'm' - stm_write(p2) - p2[8] = '\xe9' - # - self.switch(1) - stm_read(p0) - assert p0[8] == '\x00' - # - self.switch(0) - stm_read(p1) - assert p1[8] == '*' - # - self.switch(0) - stm_read(p8) - assert p8[8] == '\x08' - stm_write(p8) - p8[8] = 'X' - # - self.switch(2) - stm_stop_transaction(True) #7 - # conflict: 0xdf0a8018 - # - self.switch(1) - stm_read(p9) - assert p9[8] == '\t' - # - self.switch(0) - stm_read(p8) - assert p8[8] == 'X' - # - self.switch(1) - stm_read(p4) - assert p4[8] == '\xc5' - stm_write(p4) - p4[8] = '\xb2' - # - self.switch(0) - stm_read(p9) - assert p9[8] == '\t' - # - self.switch(2) - stm_start_transaction() - stm_read(p5) - assert p5[8] == 'Z' - stm_write(p5) - p5[8] = '\xfa' - # - self.switch(2) - stm_read(p3) - assert p3[8] == '\x03' - # - self.switch(1) - stm_read(p9) - assert p9[8] == '\t' - # - self.switch(1) - stm_read(p8) - assert p8[8] == '\x08' - stm_write(p8) - p8[8] = 'g' - # - self.switch(1) - stm_read(p8) - assert p8[8] == 'g' - # - self.switch(2) - stm_read(p5) - assert p5[8] == '\xfa' - stm_write(p5) - p5[8] = '\x86' - # - self.switch(2) - stm_read(p6) - assert p6[8] == '\x06' - # - self.switch(1) - stm_read(p4) - assert p4[8] == '\xb2' - stm_write(p4) - p4[8] = '\xce' - # - self.switch(2) - stm_read(p2) - assert p2[8] == 'T' - stm_write(p2) - p2[8] = 'Q' - # - self.switch(1) - stm_stop_transaction(True) #8 - # conflict: 0xdf0a8028 - # - self.switch(2) - stm_stop_transaction(False) #9 - # ['\x00', '*', 'Q', '\x03', '\x0c', '\x86', '\x06', '\x07', '\x08', '\t'] - # log: [2, 5] - # - self.switch(0) - stm_read(p0) - assert p0[8] == '\x00' - # - self.switch(1) - stm_start_transaction() - stm_read(p3) - assert p3[8] == '\x03' - # - self.switch(1) - stm_read(p5) - assert p5[8] == '\x86' - # - self.switch(2) - stm_start_transaction() - stm_read(p4) - assert p4[8] == '\x0c' - stm_write(p4) - p4[8] = '{' - # - self.switch(1) - stm_read(p2) - assert p2[8] == 'Q' - # - self.switch(2) - stm_read(p3) - assert p3[8] == '\x03' - stm_write(p3) - p3[8] = 'V' - # - self.switch(1) - stm_stop_transaction(False) #10 - # ['\x00', '*', 'Q', '\x03', '\x0c', '\x86', '\x06', '\x07', '\x08', '\t'] - # log: [] - # - self.switch(1) - stm_start_transaction() - stm_read(p7) - assert p7[8] == '\x07' - # - self.switch(2) - stm_read(p0) - assert p0[8] == '\x00' - stm_write(p0) - p0[8] = 'P' - # - self.switch(0) - stm_stop_transaction(False) #11 - def test_write_marker_no_conflict(self): # initialization stm_start_transaction() From noreply at buildbot.pypy.org Fri Jan 17 19:13:04 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 17 Jan 2014 19:13:04 +0100 (CET) Subject: [pypy-commit] stmgc c7: Bunch the swap_nodes Message-ID: <20140117181304.123B91C0291@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7 Changeset: r634:28dac27037ba Date: 2014-01-17 19:12 +0100 http://bitbucket.org/pypy/stmgc/changeset/28dac27037ba/ Log: Bunch the swap_nodes diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -986,5 +986,6 @@ assert(_STM_TL1->jmpbufptr != (jmpbufptr_t *)-1); /* for tests only */ _STM_TL2->running_transaction = 0; stm_stop_lock(); + fprintf(stderr, "aborted\n"); __builtin_longjmp(*_STM_TL1->jmpbufptr, 1); } diff --git a/c7/demo2.c b/c7/demo2.c --- a/c7/demo2.c +++ b/c7/demo2.c @@ -7,7 +7,8 @@ #include "core.h" -#define LIST_LENGTH 2000 +#define LIST_LENGTH 5000 +#define BUNCH 400 typedef TLPREFIX struct node_s node_t; typedef node_t* nodeptr_t; @@ -71,41 +72,49 @@ goto back; } -nodeptr_t swap_nodes(nodeptr_t prev) +nodeptr_t swap_nodes(nodeptr_t initial) { jmpbufptr_t here; - assert(prev != NULL); + assert(initial != NULL); back: if (__builtin_setjmp(here) == 0) { stm_start_transaction(&here); + nodeptr_t prev = initial; + stm_read((objptr_t)prev); - stm_read((objptr_t)prev); - nodeptr_t current = prev->next; - if (current == NULL) { - stm_stop_transaction(); - return NULL; - } - stm_read((objptr_t)current); - nodeptr_t next = current->next; - if (next == NULL) { - stm_stop_transaction(); - return NULL; - } - stm_read((objptr_t)next); - - if (next->value < current->value) { - stm_write((objptr_t)prev); - stm_write((objptr_t)current); - stm_write((objptr_t)next); + int i; + for (i=0; inext; + if (current == NULL) { + stm_stop_transaction(); + return NULL; + } + stm_read((objptr_t)current); + nodeptr_t next = current->next; + if (next == NULL) { + stm_stop_transaction(); + return NULL; + } + stm_read((objptr_t)next); - prev->next = next; - current->next = next->next; - next->next = current; + if (next->value < current->value) { + stm_write((objptr_t)prev); + stm_write((objptr_t)current); + stm_write((objptr_t)next); + + prev->next = next; + current->next = next->next; + next->next = current; + + _stm_start_safe_point(); + _stm_stop_safe_point(); + } + prev = current; } stm_stop_transaction(); - return current; + return prev; } goto back; } From noreply at buildbot.pypy.org Fri Jan 17 22:02:24 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 17 Jan 2014 22:02:24 +0100 (CET) Subject: [pypy-commit] pypy annotator: add missing fallbacks for annotating binary operations Message-ID: <20140117210224.A0ECA1C0291@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: annotator Changeset: r68748:67d83f7a3bc7 Date: 2014-01-17 21:00 +0000 http://bitbucket.org/pypy/pypy/changeset/67d83f7a3bc7/ Log: add missing fallbacks for annotating binary operations diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -143,6 +143,14 @@ def coerce((obj1, obj2)): return pair(obj1, obj2).union() # reasonable enough + def getitem((obj1, obj2)): + return s_ImpossibleValue + add = sub = mul = truediv = floordiv = div = mod = getitem + lshift = rshift = and_ = or_ = xor = delitem = getitem + + def setitem((obj1, obj2), _): + return s_ImpossibleValue + # approximation of an annotation intersection, the result should be the annotation obj or # the intersection of obj and improvement def improve((obj, improvement)): From noreply at buildbot.pypy.org Fri Jan 17 22:02:23 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 17 Jan 2014 22:02:23 +0100 (CET) Subject: [pypy-commit] pypy annotator: Kill FlowObjSpace.call_function() and FlowObjSpace.call_method() Message-ID: <20140117210223.7769D1C0291@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: annotator Changeset: r68747:0d9f33903c87 Date: 2014-01-17 20:21 +0000 http://bitbucket.org/pypy/pypy/changeset/0d9f33903c87/ Log: Kill FlowObjSpace.call_function() and FlowObjSpace.call_method() diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -14,7 +14,8 @@ from rpython.rlib.rarithmetic import r_uint, base_int, r_longlong, r_ulonglong from rpython.rlib.rarithmetic import r_singlefloat from rpython.rlib import objectmodel -from rpython.flowspace.objspace import build_flow, FlowingError +from rpython.flowspace.objspace import build_flow +from rpython.flowspace.flowcontext import FlowingError from rpython.flowspace.operation import op from rpython.translator.test import snippet diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -664,12 +664,12 @@ Returns an FSException object whose w_value is an instance of w_type. """ - if self.guessbool(self.space.call_function(const(isinstance), w_arg1, - const(type))): + w_is_type = op.simple_call(const(isinstance), w_arg1, const(type)).eval(self) + if self.guessbool(w_is_type): # this is for all cases of the form (Class, something) if self.guessbool(op.is_(w_arg2, w_None).eval(self)): # raise Type: we assume we have to instantiate Type - w_value = self.space.call_function(w_arg1) + w_value = op.simple_call(w_arg1).eval(self) else: w_valuetype = op.type(w_arg2).eval(self) if self.guessbool(op.issubtype(w_valuetype, w_arg1).eval(self)): @@ -677,7 +677,7 @@ w_value = w_arg2 else: # raise Type, X: assume X is the constructor argument - w_value = self.space.call_function(w_arg1, w_arg2) + w_value = op.simple_call(w_arg1, w_arg2).eval(self) else: # the only case left here is (inst, None), from a 'raise inst'. if not self.guessbool(op.is_(w_arg2, const(None)).eval(self)): @@ -868,7 +868,8 @@ w_manager = self.peekvalue() w_exit = op.getattr(w_manager, const("__exit__")).eval(self) self.settopvalue(w_exit) - w_result = self.space.call_method(w_manager, "__enter__") + w_enter = op.getattr(w_manager, const('__enter__')).eval(self) + w_result = op.simple_call(w_enter).eval(self) block = WithBlock(self, target) self.blockstack.append(block) self.pushvalue(w_result) @@ -889,10 +890,10 @@ w_exc = unroller.w_exc # The annotator won't allow to merge exception types with None. # Replace it with the exception value... - self.space.call_function(w_exitfunc, - w_exc.w_value, w_exc.w_value, w_None) + op.simple_call(w_exitfunc, w_exc.w_value, w_exc.w_value, w_None + ).eval(self) else: - self.space.call_function(w_exitfunc, w_None, w_None, w_None) + op.simple_call(w_exitfunc, w_None, w_None, w_None).eval(self) def LOAD_FAST(self, varindex): w_value = self.locals_stack_w[varindex] @@ -1138,12 +1139,13 @@ self.deleteslice(w_start, w_end) def LIST_APPEND(self, oparg): - w = self.popvalue() + w_value = self.popvalue() if sys.version_info < (2, 7): - v = self.popvalue() + w_list = self.popvalue() else: - v = self.peekvalue(oparg - 1) - self.space.call_method(v, 'append', w) + w_list = self.peekvalue(oparg - 1) + w_append_meth = op.getattr(w_list, const('append')).eval(self) + op.simple_call(w_append_meth, w_value).eval(self) def DELETE_FAST(self, varindex): if self.locals_stack_w[varindex] is None: diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -4,16 +4,13 @@ from inspect import CO_NEWLOCALS -from rpython.flowspace.argument import CallSpec -from rpython.flowspace.model import Constant, Variable, checkgraph, const +from rpython.flowspace.model import Constant, Variable, checkgraph from rpython.flowspace.bytecode import HostCode from rpython.flowspace.operation import op -from rpython.flowspace.flowcontext import (FlowSpaceFrame, fixeggblocks, - FlowingError, Raise) +from rpython.flowspace.flowcontext import (FlowSpaceFrame, fixeggblocks) from rpython.flowspace.generator import (tweak_generator_graph, bootstrap_generator) from rpython.flowspace.pygraph import PyGraph -from rpython.flowspace.specialcase import SPECIAL_CASES def _assert_rpythonic(func): @@ -37,14 +34,6 @@ def build_flow(self, func): return build_flow(func, self) - def call_method(self, w_obj, methname, *arg_w): - w_meth = op.getattr(w_obj, const(methname)).eval(self.frame) - return self.call_function(w_meth, *arg_w) - - def call_function(self, w_func, *args_w): - args = CallSpec(list(args_w)) - return self.call(w_func, args) - def call(self, w_callable, args): if args.keywords or isinstance(args.w_stararg, Variable): shape, args_w = args.flatten() From noreply at buildbot.pypy.org Fri Jan 17 22:02:22 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 17 Jan 2014 22:02:22 +0100 (CET) Subject: [pypy-commit] pypy annotator: move handling of call special cases to SimpleCall.eval() Message-ID: <20140117210222.471BC1C0291@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: annotator Changeset: r68746:e18a718a0185 Date: 2014-01-17 18:41 +0000 http://bitbucket.org/pypy/pypy/changeset/e18a718a0185/ Log: move handling of call special cases to SimpleCall.eval() diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -579,7 +579,7 @@ def appcall(self, func, *args_w): """Call an app-level RPython function directly""" w_func = const(func) - return op.simple_call(w_func, *args_w).eval(self) + return self.do_op(op.simple_call(w_func, *args_w)) def BAD_OPCODE(self, _): raise FlowingError("This operation is not RPython") diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -46,24 +46,12 @@ return self.call(w_func, args) def call(self, w_callable, args): - if isinstance(w_callable, Constant): - fn = w_callable.value - try: - sc = SPECIAL_CASES[fn] # TypeError if 'fn' not hashable - except (KeyError, TypeError): - pass - else: - if args.keywords: - raise FlowingError( - "should not call %r with keyword arguments" % (fn,)) - return sc(self, *args.as_list()) - if args.keywords or isinstance(args.w_stararg, Variable): shape, args_w = args.flatten() hlop = op.call_args(w_callable, Constant(shape), *args_w) else: hlop = op.simple_call(w_callable, *args.as_list()) - return self.frame.do_op(hlop) + return hlop.eval(self.frame) def build_flow(func, space=FlowObjSpace()): diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -15,6 +15,7 @@ SpaceOperation) from rpython.flowspace.specialcase import register_flow_sc from rpython.annotator.model import SomeTuple +from rpython.flowspace.specialcase import SPECIAL_CASES NOT_REALLY_CONST = { @@ -495,9 +496,36 @@ class SimpleCall(SingleDispatchMixin, CallOp): opname = 'simple_call' + def eval(self, frame): + w_callable, args_w = self.args[0], self.args[1:] + if isinstance(w_callable, Constant): + fn = w_callable.value + try: + sc = SPECIAL_CASES[fn] # TypeError if 'fn' not hashable + except (KeyError, TypeError): + pass + else: + return sc(frame.space, *args_w) + return frame.do_op(self) + + class CallArgs(SingleDispatchMixin, CallOp): opname = 'call_args' + def eval(self, frame): + w_callable = self.args[0] + if isinstance(w_callable, Constant): + fn = w_callable.value + try: + sc = SPECIAL_CASES[fn] # TypeError if 'fn' not hashable + except (KeyError, TypeError): + pass + else: + raise FlowingError( + "should not call %r with keyword arguments" % (fn,)) + return frame.do_op(self) + + # Other functions that get directly translated to SpaceOperators func2op[type] = op.type func2op[operator.truth] = op.bool From noreply at buildbot.pypy.org Fri Jan 17 22:54:45 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 17 Jan 2014 22:54:45 +0100 (CET) Subject: [pypy-commit] stmgc c7: small optimization moving the resetting of objects on abort outside of shared-lock Message-ID: <20140117215445.1E3B91C087E@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r635:78d154b18084 Date: 2014-01-17 22:54 +0100 http://bitbucket.org/pypy/stmgc/changeset/78d154b18084/ Log: small optimization moving the resetting of objects on abort outside of shared-lock diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -351,11 +351,6 @@ -static void wait_until_updated(void) -{ - while (pending_updates == _STM_TL2->modified_objects) - spin_loop(); -} void _stm_write_slowpath(object_t *obj) @@ -690,7 +685,6 @@ assert(!pthread_rwlock_trywrlock(&rwlock_shared)); assert(!pthread_rwlock_unlock(&rwlock_shared)); - wait_until_updated(); stm_list_free(_STM_TL2->modified_objects); _STM_TL2->modified_objects = NULL; @@ -767,8 +761,6 @@ if (UNLIKELY(old_rv == 0xff)) reset_transaction_read_version(); - - wait_until_updated(); assert(stm_list_is_empty(_STM_TL2->modified_objects)); assert(stm_list_is_empty(_STM_TL2->old_objects_to_trace)); stm_list_clear(_STM_TL2->uncommitted_pages); @@ -949,9 +941,6 @@ /* here we hold the shared lock as a reader or writer */ assert(_STM_TL2->running_transaction); - /* reset all the modified objects (incl. re-adding GCFLAG_WRITE_BARRIER) */ - reset_modified_from_other_threads(); - stm_list_clear(_STM_TL2->modified_objects); /* clear old_objects_to_trace (they will have the WRITE_BARRIER flag set because the ones we care about are also in modified_objects) */ @@ -986,6 +975,12 @@ assert(_STM_TL1->jmpbufptr != (jmpbufptr_t *)-1); /* for tests only */ _STM_TL2->running_transaction = 0; stm_stop_lock(); - fprintf(stderr, "aborted\n"); + fprintf(stderr, "a"); + + /* reset all the modified objects (incl. re-adding GCFLAG_WRITE_BARRIER) */ + reset_modified_from_other_threads(); + stm_list_clear(_STM_TL2->modified_objects); + + __builtin_longjmp(*_STM_TL1->jmpbufptr, 1); } From noreply at buildbot.pypy.org Fri Jan 17 22:56:35 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Fri, 17 Jan 2014 22:56:35 +0100 (CET) Subject: [pypy-commit] pypy remove-num-smm: Close this branch - it's superseded by remove-intlong-smm. Message-ID: <20140117215635.689F31C087E@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-num-smm Changeset: r68749:28f44947550a Date: 2014-01-17 11:09 +0100 http://bitbucket.org/pypy/pypy/changeset/28f44947550a/ Log: Close this branch - it's superseded by remove-intlong-smm. From noreply at buildbot.pypy.org Fri Jan 17 22:56:37 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Fri, 17 Jan 2014 22:56:37 +0100 (CET) Subject: [pypy-commit] pypy refactor-str-types: hg merge default Message-ID: <20140117215637.114791C087E@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r68750:c8c7be26e72f Date: 2014-01-17 22:47 +0100 http://bitbucket.org/pypy/pypy/changeset/c8c7be26e72f/ Log: hg merge default diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py --- a/pypy/interpreter/astcompiler/codegen.py +++ b/pypy/interpreter/astcompiler/codegen.py @@ -1234,6 +1234,8 @@ flags |= consts.CO_NESTED if scope.is_generator: flags |= consts.CO_GENERATOR + if scope.has_yield_inside_try: + flags |= consts.CO_YIELD_INSIDE_TRY if scope.has_variable_arg: flags |= consts.CO_VARARGS if scope.has_keywords_arg: diff --git a/pypy/interpreter/astcompiler/consts.py b/pypy/interpreter/astcompiler/consts.py --- a/pypy/interpreter/astcompiler/consts.py +++ b/pypy/interpreter/astcompiler/consts.py @@ -17,6 +17,7 @@ CO_FUTURE_UNICODE_LITERALS = 0x20000 #pypy specific: CO_KILL_DOCSTRING = 0x100000 +CO_YIELD_INSIDE_TRY = 0x200000 PyCF_SOURCE_IS_UTF8 = 0x0100 PyCF_DONT_IMPLY_DEDENT = 0x0200 diff --git a/pypy/interpreter/astcompiler/symtable.py b/pypy/interpreter/astcompiler/symtable.py --- a/pypy/interpreter/astcompiler/symtable.py +++ b/pypy/interpreter/astcompiler/symtable.py @@ -43,6 +43,7 @@ self.child_has_free = False self.nested = False self.doc_removable = False + self._in_try_body_depth = 0 def lookup(self, name): """Find the scope of identifier 'name'.""" @@ -75,6 +76,14 @@ self.varnames.append(mangled) return mangled + def note_try_start(self, try_node): + """Called when a try is found, before visiting the body.""" + self._in_try_body_depth += 1 + + def note_try_end(self, try_node): + """Called after visiting a try body.""" + self._in_try_body_depth -= 1 + def note_yield(self, yield_node): """Called when a yield is found.""" raise SyntaxError("'yield' outside function", yield_node.lineno, @@ -210,6 +219,7 @@ self.has_variable_arg = False self.has_keywords_arg = False self.is_generator = False + self.has_yield_inside_try = False self.optimized = True self.return_with_value = False self.import_star = None @@ -220,6 +230,8 @@ raise SyntaxError("'return' with argument inside generator", self.ret.lineno, self.ret.col_offset) self.is_generator = True + if self._in_try_body_depth > 0: + self.has_yield_inside_try = True def note_return(self, ret): if ret.value: @@ -463,7 +475,12 @@ self.scope.new_temporary_name() if wih.optional_vars: self.scope.new_temporary_name() - ast.GenericASTVisitor.visit_With(self, wih) + wih.context_expr.walkabout(self) + if wih.optional_vars: + wih.optional_vars.walkabout(self) + self.scope.note_try_start(wih) + self.visit_sequence(wih.body) + self.scope.note_try_end(wih) def visit_arguments(self, arguments): scope = self.scope @@ -505,3 +522,16 @@ else: role = SYM_ASSIGNED self.note_symbol(name.id, role) + + def visit_TryExcept(self, node): + self.scope.note_try_start(node) + self.visit_sequence(node.body) + self.scope.note_try_end(node) + self.visit_sequence(node.handlers) + self.visit_sequence(node.orelse) + + def visit_TryFinally(self, node): + self.scope.note_try_start(node) + self.visit_sequence(node.body) + self.scope.note_try_end(node) + self.visit_sequence(node.finalbody) diff --git a/pypy/interpreter/astcompiler/test/test_symtable.py b/pypy/interpreter/astcompiler/test/test_symtable.py --- a/pypy/interpreter/astcompiler/test/test_symtable.py +++ b/pypy/interpreter/astcompiler/test/test_symtable.py @@ -346,6 +346,25 @@ assert exc.msg == "'return' with argument inside generator" scp = self.func_scope("def f():\n return\n yield x") + def test_yield_inside_try(self): + scp = self.func_scope("def f(): yield x") + assert not scp.has_yield_inside_try + scp = self.func_scope("def f():\n try:\n yield x\n except: pass") + assert scp.has_yield_inside_try + scp = self.func_scope("def f():\n try:\n yield x\n finally: pass") + assert scp.has_yield_inside_try + scp = self.func_scope("def f():\n with x: yield y") + assert scp.has_yield_inside_try + + def test_yield_outside_try(self): + for input in ("try: pass\n except: pass", + "try: pass\n except: yield y", + "try: pass\n finally: pass", + "try: pass\n finally: yield y", + "with x: pass"): + input = "def f():\n yield y\n %s\n yield y" % (input,) + assert not self.func_scope(input).has_yield_inside_try + def test_return(self): for input in ("class x: return", "return"): exc = py.test.raises(SyntaxError, self.func_scope, input).value diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -155,20 +155,6 @@ code_name = self.pycode.co_name return space.wrap(code_name) - def __del__(self): - # Only bother enqueuing self to raise an exception if the frame is - # still not finished and finally or except blocks are present. - self.clear_all_weakrefs() - if self.frame is not None: - block = self.frame.lastblock - while block is not None: - if not isinstance(block, LoopBlock): - self.enqueue_for_destruction(self.space, - GeneratorIterator.descr_close, - "interrupting generator of ") - break - block = block.previous - # Results can be either an RPython list of W_Root, or it can be an # app-level W_ListObject, which also has an append() method, that's why we # generate 2 versions of the function and 2 jit drivers. @@ -211,3 +197,20 @@ return unpack_into unpack_into = _create_unpack_into() unpack_into_w = _create_unpack_into() + + +class GeneratorIteratorWithDel(GeneratorIterator): + + def __del__(self): + # Only bother enqueuing self to raise an exception if the frame is + # still not finished and finally or except blocks are present. + self.clear_all_weakrefs() + if self.frame is not None: + block = self.frame.lastblock + while block is not None: + if not isinstance(block, LoopBlock): + self.enqueue_for_destruction(self.space, + GeneratorIterator.descr_close, + "interrupting generator of ") + break + block = block.previous diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -12,7 +12,7 @@ from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter.astcompiler.consts import ( CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS, CO_NESTED, - CO_GENERATOR, CO_KILL_DOCSTRING) + CO_GENERATOR, CO_KILL_DOCSTRING, CO_YIELD_INSIDE_TRY) from pypy.tool.stdlib_opcode import opcodedesc, HAVE_ARGUMENT from rpython.rlib.rarithmetic import intmask from rpython.rlib.objectmodel import compute_hash diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -167,8 +167,12 @@ def run(self): """Start this frame's execution.""" if self.getcode().co_flags & pycode.CO_GENERATOR: - from pypy.interpreter.generator import GeneratorIterator - return self.space.wrap(GeneratorIterator(self)) + if pycode.CO_YIELD_INSIDE_TRY: + from pypy.interpreter.generator import GeneratorIteratorWithDel + return self.space.wrap(GeneratorIteratorWithDel(self)) + else: + from pypy.interpreter.generator import GeneratorIterator + return self.space.wrap(GeneratorIterator(self)) else: return self.execute_frame() diff --git a/pypy/module/_pickle_support/maker.py b/pypy/module/_pickle_support/maker.py --- a/pypy/module/_pickle_support/maker.py +++ b/pypy/module/_pickle_support/maker.py @@ -5,7 +5,7 @@ from pypy.interpreter.module import Module from pypy.interpreter.pyframe import PyFrame from pypy.interpreter.pytraceback import PyTraceback -from pypy.interpreter.generator import GeneratorIterator +from pypy.interpreter.generator import GeneratorIteratorWithDel from rpython.rlib.objectmodel import instantiate from pypy.interpreter.gateway import unwrap_spec from pypy.objspace.std.iterobject import W_SeqIterObject, W_ReverseSeqIterObject @@ -60,7 +60,7 @@ return space.wrap(tb) def generator_new(space): - new_generator = instantiate(GeneratorIterator) + new_generator = instantiate(GeneratorIteratorWithDel) return space.wrap(new_generator) @unwrap_spec(current=int, remaining=int, step=int) diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -1337,10 +1337,9 @@ l[index] = self.unwrap(w_item) except IndexError: raise - return - - w_list.switch_to_object_strategy() - w_list.setitem(index, w_item) + else: + w_list.switch_to_object_strategy() + w_list.setitem(index, w_item) def setslice(self, w_list, start, step, slicelength, w_other): assert slicelength >= 0 diff --git a/pypy/objspace/std/tupleobject.py b/pypy/objspace/std/tupleobject.py --- a/pypy/objspace/std/tupleobject.py +++ b/pypy/objspace/std/tupleobject.py @@ -27,6 +27,9 @@ jit.loop_unrolling_heuristic(other, other.length(), UNROLL_CUTOFF)) +contains_jmp = jit.JitDriver(greens = [], reds = 'auto', + name = 'tuple.contains') + class W_AbstractTupleObject(W_Root): __slots__ = () @@ -119,13 +122,26 @@ descr_gt = _make_tuple_comparison('gt') descr_ge = _make_tuple_comparison('ge') - @jit.look_inside_iff(lambda self, _1, _2: _unroll_condition(self)) def descr_contains(self, space, w_obj): + if _unroll_condition(self): + return self._descr_contains_unroll_safe(space, w_obj) + else: + return self._descr_contains_jmp(space, w_obj) + + @jit.unroll_safe + def _descr_contains_unroll_safe(self, space, w_obj): for w_item in self.tolist(): if space.eq_w(w_item, w_obj): return space.w_True return space.w_False + def _descr_contains_jmp(self, space, w_obj): + for w_item in self.tolist(): + contains_jmp.jit_merge_point() + if space.eq_w(w_item, w_obj): + return space.w_True + return space.w_False + def descr_add(self, space, w_other): if not isinstance(w_other, W_AbstractTupleObject): return space.w_NotImplemented diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -2102,11 +2102,11 @@ if not box1.same_constant(box2): break else: - # Found! Compile it as a loop. - # raises in case it works -- which is the common case if self.partial_trace: if start != self.retracing_from: raise SwitchToBlackhole(Counters.ABORT_BAD_LOOP) # For now + # Found! Compile it as a loop. + # raises in case it works -- which is the common case self.compile_loop(original_boxes, live_arg_boxes, start, resumedescr) # creation of the loop was cancelled! self.cancel_count += 1 diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -1290,26 +1290,58 @@ # Even if it's not power of two it can still be useful. return _muladd1(b, digit) + # a is not b + # use the following identity to reduce the number of operations + # a * b = a_0*b_0 + sum_{i=1}^n(a_0*b_i + a_1*b_{i-1}) + a_1*b_n z = rbigint([NULLDIGIT] * (size_a + size_b), 1) - # gradeschool long mult i = UDIGIT_TYPE(0) - while i < size_a: - carry = 0 - f = a.widedigit(i) + size_a1 = UDIGIT_TYPE(size_a - 1) + size_b1 = UDIGIT_TYPE(size_b - 1) + while i < size_a1: + f0 = a.widedigit(i) + f1 = a.widedigit(i + 1) pz = i + carry = z.widedigit(pz) + b.widedigit(0) * f0 + z.setdigit(pz, carry) + pz += 1 + carry >>= SHIFT + j = UDIGIT_TYPE(0) + while j < size_b1: + # this operation does not overflow using + # SHIFT = (LONG_BIT // 2) - 1 = B - 1; in fact before it + # carry and z.widedigit(pz) are less than 2**(B - 1); + # b.widedigit(j + 1) * f0 < (2**(B-1) - 1)**2; so + # carry + z.widedigit(pz) + b.widedigit(j + 1) * f0 + + # b.widedigit(j) * f1 < 2**(2*B - 1) - 2**B < 2**LONG)BIT - 1 + carry += z.widedigit(pz) + b.widedigit(j + 1) * f0 + \ + b.widedigit(j) * f1 + z.setdigit(pz, carry) + pz += 1 + carry >>= SHIFT + j += 1 + # carry < 2**(B + 1) - 2 + carry += z.widedigit(pz) + b.widedigit(size_b1) * f1 + z.setdigit(pz, carry) + pz += 1 + carry >>= SHIFT + # carry < 4 + if carry: + z.setdigit(pz, carry) + assert (carry >> SHIFT) == 0 + i += 2 + if size_a & 1: + pz = size_a1 + f = a.widedigit(pz) pb = 0 + carry = _widen_digit(0) while pb < size_b: carry += z.widedigit(pz) + b.widedigit(pb) * f pb += 1 z.setdigit(pz, carry) pz += 1 carry >>= SHIFT - assert carry <= MASK if carry: - assert pz >= 0 z.setdigit(pz, z.widedigit(pz) + carry) - assert (carry >> SHIFT) == 0 - i += 1 z._normalize() return z diff --git a/rpython/translator/driver.py b/rpython/translator/driver.py --- a/rpython/translator/driver.py +++ b/rpython/translator/driver.py @@ -591,3 +591,12 @@ if sys.platform == 'win32': name = name.new(ext='exe') return name + +if os.name == 'posix': + def shutil_copy(src, dst): + # this version handles the case where 'dst' is an executable + # currently being executed + shutil.copy(src, dst + '~') + os.rename(dst + '~', dst) +else: + shutil_copy = shutil.copy diff --git a/rpython/translator/test/test_driver.py b/rpython/translator/test/test_driver.py --- a/rpython/translator/test/test_driver.py +++ b/rpython/translator/test/test_driver.py @@ -1,6 +1,6 @@ import py import os -from rpython.translator.driver import TranslationDriver +from rpython.translator.driver import TranslationDriver, shutil_copy from rpython.tool.udir import udir def test_ctr(): @@ -74,4 +74,9 @@ assert dst_name.new(ext='dll').read() == 'dll' assert dst_name.new(purebasename='python27',ext='lib').read() == 'lib' - +def test_shutil_copy(): + a = udir.join('file_a') + b = udir.join('file_a') + a.write('hello') + shutil_copy(str(a), str(b)) + assert b.read() == 'hello' From noreply at buildbot.pypy.org Fri Jan 17 22:56:38 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Fri, 17 Jan 2014 22:56:38 +0100 (CET) Subject: [pypy-commit] pypy refactor-str-types: (mjacob, antocuni) Close to-be-merged branch. Message-ID: <20140117215638.431051C087E@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-str-types Changeset: r68751:730be73f3beb Date: 2014-01-17 22:47 +0100 http://bitbucket.org/pypy/pypy/changeset/730be73f3beb/ Log: (mjacob, antocuni) Close to-be-merged branch. From noreply at buildbot.pypy.org Fri Jan 17 22:56:40 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Fri, 17 Jan 2014 22:56:40 +0100 (CET) Subject: [pypy-commit] pypy default: hg merge refactor-str-types: This branch removes multimethods on str/unicode/bytearray and makes these types share code. Message-ID: <20140117215640.8B08A1C087E@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: Changeset: r68752:7bdc8396c973 Date: 2014-01-17 22:55 +0100 http://bitbucket.org/pypy/pypy/changeset/7bdc8396c973/ Log: hg merge refactor-str-types: This branch removes multimethods on str/unicode/bytearray and makes these types share code. diff too long, truncating to 2000 out of 9425 lines diff --git a/lib-python/2.7/test/test_memoryview.py b/lib-python/2.7/test/test_memoryview.py --- a/lib-python/2.7/test/test_memoryview.py +++ b/lib-python/2.7/test/test_memoryview.py @@ -166,11 +166,18 @@ self.assertTrue(m[0:6] == m[:]) self.assertFalse(m[0:5] == m) - # Comparison with objects which don't support the buffer API - self.assertFalse(m == u"abcdef") - self.assertTrue(m != u"abcdef") - self.assertFalse(u"abcdef" == m) - self.assertTrue(u"abcdef" != m) + if test_support.check_impl_detail(cpython=True): + # what is supported and what is not supported by memoryview is + # very inconsisten on CPython. In PyPy, memoryview supports + # the buffer interface, and thus the following comparison + # succeeds. See also the comment in + # pypy.modules.__builtin__.interp_memoryview.W_MemoryView.descr_buffer + # + # Comparison with objects which don't support the buffer API + self.assertFalse(m == u"abcdef", "%s %s" % (self, tp)) + self.assertTrue(m != u"abcdef") + self.assertFalse(u"abcdef" == m) + self.assertTrue(u"abcdef" != m) # Unordered comparisons are unimplemented, and therefore give # arbitrary results (they raise a TypeError in py3k) diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -231,6 +231,11 @@ msg = "__int__ returned non-int (type '%T')" raise operationerrfmt(space.w_TypeError, msg, w_result) + def ord(self, space): + typename = space.type(self).getname(space) + msg = "ord() expected string of length 1, but %s found" + raise operationerrfmt(space.w_TypeError, msg, typename) + def __spacebind__(self, space): return self @@ -1396,6 +1401,9 @@ # This is here mostly just for gateway.int_unwrapping_space_method(). return bool(self.int_w(w_obj)) + def ord(self, w_obj): + return w_obj.ord(self) + # This is all interface for gateway.py. def gateway_int_w(self, w_obj): if self.isinstance_w(w_obj, self.w_float): diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -520,12 +520,13 @@ # When a BuiltinCode is stored in a Function object, # you get the functionality of CPython's built-in function type. - def __init__(self, func, unwrap_spec=None, self_type=None, descrmismatch=None): + def __init__(self, func, unwrap_spec=None, self_type=None, + descrmismatch=None, doc=None): "NOT_RPYTHON" # 'implfunc' is the interpreter-level function. # Note that this uses a lot of (construction-time) introspection. Code.__init__(self, func.__name__) - self.docstring = func.__doc__ + self.docstring = doc or func.__doc__ self.identifier = "%s-%s-%s" % (func.__module__, func.__name__, getattr(self_type, '__name__', '*')) @@ -832,7 +833,7 @@ instancecache = {} def __new__(cls, f, app_name=None, unwrap_spec=None, descrmismatch=None, - as_classmethod=False): + as_classmethod=False, doc=None): "NOT_RPYTHON" # f must be a function whose name does NOT start with 'app_' @@ -861,7 +862,8 @@ cls.instancecache[key] = self self._code = BuiltinCode(f, unwrap_spec=unwrap_spec, self_type=self_type, - descrmismatch=descrmismatch) + descrmismatch=descrmismatch, + doc=doc) self.__name__ = f.func_name self.name = app_name self.as_classmethod = as_classmethod diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -708,6 +708,18 @@ never_called py.test.raises(AssertionError, space.wrap, gateway.interp2app_temp(g)) + def test_interp2app_doc(self): + space = self.space + def f(space, w_x): + """foo""" + w_f = space.wrap(gateway.interp2app_temp(f)) + assert space.unwrap(space.getattr(w_f, space.wrap('__doc__'))) == 'foo' + # + def g(space, w_x): + never_called + w_g = space.wrap(gateway.interp2app_temp(g, doc='bar')) + assert space.unwrap(space.getattr(w_g, space.wrap('__doc__'))) == 'bar' + class AppTestPyTestMark: @py.test.mark.unlikely_to_exist diff --git a/pypy/interpreter/unicodehelper.py b/pypy/interpreter/unicodehelper.py --- a/pypy/interpreter/unicodehelper.py +++ b/pypy/interpreter/unicodehelper.py @@ -30,7 +30,7 @@ # ____________________________________________________________ def encode(space, w_data, encoding=None, errors='strict'): - from pypy.objspace.std.unicodetype import encode_object + from pypy.objspace.std.unicodeobject import encode_object return encode_object(space, w_data, encoding, errors) # These functions take and return unwrapped rpython strings and unicodes diff --git a/pypy/module/__builtin__/interp_memoryview.py b/pypy/module/__builtin__/interp_memoryview.py --- a/pypy/module/__builtin__/interp_memoryview.py +++ b/pypy/module/__builtin__/interp_memoryview.py @@ -68,10 +68,14 @@ return W_MemoryView(buf) def descr_buffer(self, space): - """Note that memoryview() objects in PyPy support buffer(), whereas - not in CPython; but CPython supports passing memoryview() to most - built-in functions that accept buffers, with the notable exception - of the buffer() built-in.""" + """ + Note that memoryview() is very inconsistent in CPython: it does not + support the buffer interface but does support the new buffer + interface: as a result, it is possible to pass memoryview to + e.g. socket.send() but not to file.write(). For simplicity and + consistency, in PyPy memoryview DOES support buffer(), which means + that it is accepted in more places than CPython. + """ return space.wrap(self.buf) def descr_tobytes(self, space): diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -709,7 +709,7 @@ @unwrap_spec(data=str, errors='str_or_None') def escape_encode(space, data, errors='strict'): - from pypy.objspace.std.stringobject import string_escape_encode + from pypy.objspace.std.bytesobject import string_escape_encode result = string_escape_encode(data, quote="'") start = 1 end = len(result) - 1 diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py --- a/pypy/module/cpyext/unicodeobject.py +++ b/pypy/module/cpyext/unicodeobject.py @@ -12,7 +12,7 @@ from pypy.module.cpyext.stringobject import PyString_Check from pypy.module.sys.interp_encoding import setdefaultencoding from pypy.module._codecs.interp_codecs import CodecState -from pypy.objspace.std import unicodeobject, unicodetype +from pypy.objspace.std import unicodeobject from rpython.rlib import rstring, runicode from rpython.tool.sourcetools import func_renamer import sys @@ -262,7 +262,7 @@ def PyUnicode_GetDefaultEncoding(space): """Returns the currently active default encoding.""" if default_encoding[0] == '\x00': - encoding = unicodetype.getdefaultencoding(space) + encoding = unicodeobject.getdefaultencoding(space) i = 0 while i < len(encoding) and i < DEFAULT_ENCODING_SIZE: default_encoding[i] = encoding[i] @@ -295,7 +295,7 @@ encoding = rffi.charp2str(llencoding) if llerrors: errors = rffi.charp2str(llerrors) - return unicodetype.encode_object(space, w_unicode, encoding, errors) + return unicodeobject.encode_object(space, w_unicode, encoding, errors) @cpython_api([PyObject, CONST_STRING, CONST_STRING], PyObject) def PyUnicode_AsEncodedString(space, w_unicode, llencoding, llerrors): @@ -318,7 +318,7 @@ if not PyUnicode_Check(space, w_unicode): PyErr_BadArgument(space) - return unicodetype.encode_object(space, w_unicode, 'unicode-escape', 'strict') + return unicodeobject.encode_object(space, w_unicode, 'unicode-escape', 'strict') @cpython_api([CONST_WSTRING, Py_ssize_t], PyObject) def PyUnicode_FromUnicode(space, wchar_p, length): @@ -471,7 +471,7 @@ exception was raised by the codec.""" if not PyUnicode_Check(space, w_unicode): PyErr_BadArgument(space) - return unicodetype.encode_object(space, w_unicode, encoding, "strict") + return unicodeobject.encode_object(space, w_unicode, encoding, "strict") @cpython_api([CONST_STRING, Py_ssize_t, CONST_STRING], PyObject) @func_renamer('PyUnicode_Decode%s' % suffix) diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -2,9 +2,9 @@ from pypy.interpreter.error import operationerrfmt, OperationError from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty +from pypy.objspace.std.bytesobject import W_BytesObject from pypy.objspace.std.floattype import float_typedef -from pypy.objspace.std.stringtype import str_typedef -from pypy.objspace.std.unicodetype import unicode_typedef, unicode_from_object +from pypy.objspace.std.unicodeobject import W_UnicodeObject from pypy.objspace.std.inttype import int_typedef from pypy.objspace.std.complextype import complex_typedef from rpython.rlib.rarithmetic import LONG_BIT @@ -510,7 +510,7 @@ from pypy.module.micronumpy.interp_dtype import new_unicode_dtype - arg = space.unicode_w(unicode_from_object(space, w_arg)) + arg = space.unicode_w(space.unicode_from_object(w_arg)) # XXX size computations, we need tests anyway arr = VoidBoxStorage(len(arg), new_unicode_dtype(space, len(arg))) # XXX not this way, we need store @@ -773,13 +773,13 @@ __module__ = "numpy", ) -W_StringBox.typedef = TypeDef("string_", (W_CharacterBox.typedef, str_typedef), +W_StringBox.typedef = TypeDef("string_", (W_CharacterBox.typedef, W_BytesObject.typedef), __module__ = "numpy", __new__ = interp2app(W_StringBox.descr__new__string_box.im_func), __len__ = interp2app(W_StringBox.descr_len), ) -W_UnicodeBox.typedef = TypeDef("unicode_", (W_CharacterBox.typedef, unicode_typedef), +W_UnicodeBox.typedef = TypeDef("unicode_", (W_CharacterBox.typedef, W_UnicodeObject.typedef), __module__ = "numpy", __new__ = interp2app(W_UnicodeBox.descr__new__unicode_box.im_func), __len__ = interp2app(W_UnicodeBox.descr_len), diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -290,6 +290,9 @@ ec._py_repr = None return ec + def unicode_from_object(self, w_obj): + return w_some_obj() + # ---------- def translates(self, func=None, argtypes=None, seeobj_w=[], **kwds): diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -1,25 +1,23 @@ """The builtin bytearray implementation""" +from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.buffer import RWBuffer from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.interpreter.signature import Signature -from pypy.objspace.std import stringobject -from pypy.objspace.std.bytearraytype import ( - getbytevalue, makebytearraydata_w, new_bytearray) -from pypy.objspace.std.intobject import W_IntObject -from pypy.objspace.std.model import W_Object, registerimplementation -from pypy.objspace.std.multimethod import FailedToImplement -from pypy.objspace.std.noneobject import W_NoneObject -from pypy.objspace.std.register_all import register_all -from pypy.objspace.std.sliceobject import W_SliceObject, normalize_simple_slice -from pypy.objspace.std.stringobject import W_StringObject -from pypy.objspace.std.unicodeobject import W_UnicodeObject +from pypy.objspace.std.sliceobject import W_SliceObject +from pypy.objspace.std.stdtypedef import StdTypeDef +from pypy.objspace.std.stringmethods import StringMethods from pypy.objspace.std.util import get_positive_index +from rpython.rlib.objectmodel import newlist_hint, resizelist_hint, import_from_mixin from rpython.rlib.rstring import StringBuilder -class W_BytearrayObject(W_Object): - from pypy.objspace.std.bytearraytype import bytearray_typedef as typedef +def _make_data(s): + return [s[i] for i in range(len(s))] + +class W_BytearrayObject(W_Root): + import_from_mixin(StringMethods) def __init__(w_self, data): w_self.data = data @@ -28,310 +26,984 @@ """ representation for debugging purposes """ return "%s(%s)" % (w_self.__class__.__name__, ''.join(w_self.data)) -registerimplementation(W_BytearrayObject) + def _new(self, value): + return W_BytearrayObject(_make_data(value)) + + def _new_from_list(self, value): + return W_BytearrayObject(value) + + def _empty(self): + return W_BytearrayObject([]) + + def _len(self): + return len(self.data) + + def _val(self, space): + return space.bufferstr_w(self) + + def _op_val(self, space, w_other): + return space.bufferstr_new_w(w_other) + + def _chr(self, char): + assert len(char) == 1 + return str(char)[0] + + _builder = StringBuilder + + def _newlist_unwrapped(self, space, res): + return space.newlist([W_BytearrayObject(_make_data(i)) for i in res]) + + def _isupper(self, ch): + return ch.isupper() + + def _islower(self, ch): + return ch.islower() + + def _istitle(self, ch): + return ch.isupper() + + def _isspace(self, ch): + return ch.isspace() + + def _isalpha(self, ch): + return ch.isalpha() + + def _isalnum(self, ch): + return ch.isalnum() + + def _isdigit(self, ch): + return ch.isdigit() + + _iscased = _isalpha + + def _islinebreak(self, ch): + return (ch == '\n') or (ch == '\r') + + def _upper(self, ch): + if ch.islower(): + o = ord(ch) - 32 + return chr(o) + else: + return ch + + def _lower(self, ch): + if ch.isupper(): + o = ord(ch) + 32 + return chr(o) + else: + return ch + + _title = _upper + + def _join_return_one(self, space, w_obj): + return False + + def _join_check_item(self, space, w_obj): + if (space.isinstance_w(w_obj, space.w_str) or + space.isinstance_w(w_obj, space.w_bytearray)): + return 0 + return 1 + + def ord(self, space): + if len(self.data) != 1: + msg = "ord() expected a character, but string of length %d found" + raise operationerrfmt(space.w_TypeError, msg, len(self.data)) + return space.wrap(ord(self.data[0])) + + @staticmethod + def descr_new(space, w_bytearraytype, __args__): + return new_bytearray(space, w_bytearraytype, []) + + def descr_reduce(self, space): + assert isinstance(self, W_BytearrayObject) + w_dict = self.getdict(space) + if w_dict is None: + w_dict = space.w_None + return space.newtuple([ + space.type(self), space.newtuple([ + space.wrap(''.join(self.data).decode('latin-1')), + space.wrap('latin-1')]), + w_dict]) + + @staticmethod + def descr_fromhex(space, w_bytearraytype, w_hexstring): + "bytearray.fromhex(string) -> bytearray\n" + "\n" + "Create a bytearray object from a string of hexadecimal numbers.\n" + "Spaces between two numbers are accepted.\n" + "Example: bytearray.fromhex('B9 01EF') -> bytearray(b'\\xb9\\x01\\xef')." + hexstring = space.str_w(w_hexstring) + hexstring = hexstring.lower() + data = [] + length = len(hexstring) + i = -2 + while True: + i += 2 + while i < length and hexstring[i] == ' ': + i += 1 + if i >= length: + break + if i+1 == length: + raise OperationError(space.w_ValueError, space.wrap( + "non-hexadecimal number found in fromhex() arg at position %d" % i)) + + top = _hex_digit_to_int(hexstring[i]) + if top == -1: + raise OperationError(space.w_ValueError, space.wrap( + "non-hexadecimal number found in fromhex() arg at position %d" % i)) + bot = _hex_digit_to_int(hexstring[i+1]) + if bot == -1: + raise OperationError(space.w_ValueError, space.wrap( + "non-hexadecimal number found in fromhex() arg at position %d" % (i+1,))) + data.append(chr(top*16 + bot)) + + # in CPython bytearray.fromhex is a staticmethod, so + # we ignore w_type and always return a bytearray + return new_bytearray(space, space.w_bytearray, data) + + def descr_init(self, space, __args__): + # this is on the silly side + w_source, w_encoding, w_errors = __args__.parse_obj( + None, 'bytearray', init_signature, init_defaults) + + if w_source is None: + w_source = space.wrap('') + if w_encoding is None: + w_encoding = space.w_None + if w_errors is None: + w_errors = space.w_None + + # Unicode argument + if not space.is_w(w_encoding, space.w_None): + from pypy.objspace.std.unicodeobject import ( + _get_encoding_and_errors, encode_object + ) + encoding, errors = _get_encoding_and_errors(space, w_encoding, w_errors) + + # if w_source is an integer this correctly raises a TypeError + # the CPython error message is: "encoding or errors without a string argument" + # ours is: "expected unicode, got int object" + w_source = encode_object(space, w_source, encoding, errors) + + # Is it an int? + try: + count = space.int_w(w_source) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + else: + if count < 0: + raise OperationError(space.w_ValueError, + space.wrap("bytearray negative count")) + self.data = ['\0'] * count + return + + data = makebytearraydata_w(space, w_source) + self.data = data + + def descr_repr(self, space): + s = self.data + + # Good default if there are no replacements. + buf = StringBuilder(len("bytearray(b'')") + len(s)) + + buf.append("bytearray(b'") + + for i in range(len(s)): + c = s[i] + + if c == '\\' or c == "'": + buf.append('\\') + buf.append(c) + elif c == '\t': + buf.append('\\t') + elif c == '\r': + buf.append('\\r') + elif c == '\n': + buf.append('\\n') + elif not '\x20' <= c < '\x7f': + n = ord(c) + buf.append('\\x') + buf.append("0123456789abcdef"[n>>4]) + buf.append("0123456789abcdef"[n&0xF]) + else: + buf.append(c) + + buf.append("')") + + return space.wrap(buf.build()) + + def descr_str(self, space): + return space.wrap(''.join(self.data)) + + def descr_eq(self, space, w_other): + try: + return space.newbool(self._val(space) == self._op_val(space, w_other)) + except OperationError, e: + if e.match(space, space.w_TypeError): + return space.w_NotImplemented + raise + + def descr_ne(self, space, w_other): + try: + return space.newbool(self._val(space) != self._op_val(space, w_other)) + except OperationError, e: + if e.match(space, space.w_TypeError): + return space.w_NotImplemented + raise + + def descr_lt(self, space, w_other): + try: + return space.newbool(self._val(space) < self._op_val(space, w_other)) + except OperationError, e: + if e.match(space, space.w_TypeError): + return space.w_NotImplemented + raise + + def descr_le(self, space, w_other): + try: + return space.newbool(self._val(space) <= self._op_val(space, w_other)) + except OperationError, e: + if e.match(space, space.w_TypeError): + return space.w_NotImplemented + raise + + def descr_gt(self, space, w_other): + try: + return space.newbool(self._val(space) > self._op_val(space, w_other)) + except OperationError, e: + if e.match(space, space.w_TypeError): + return space.w_NotImplemented + raise + + def descr_ge(self, space, w_other): + try: + return space.newbool(self._val(space) >= self._op_val(space, w_other)) + except OperationError, e: + if e.match(space, space.w_TypeError): + return space.w_NotImplemented + raise + + def descr_buffer(self, space): + return BytearrayBuffer(self.data) + + def descr_inplace_add(self, space, w_other): + if isinstance(w_other, W_BytearrayObject): + self.data += w_other.data + else: + self.data += self._op_val(space, w_other) + return self + + def descr_inplace_mul(self, space, w_times): + try: + times = space.getindex_w(w_times, space.w_OverflowError) + except OperationError, e: + if e.match(space, space.w_TypeError): + return space.w_NotImplemented + raise + self.data *= times + return self + + def descr_setitem(self, space, w_index, w_other): + if isinstance(w_index, W_SliceObject): + oldsize = len(self.data) + start, stop, step, slicelength = w_index.indices4(space, oldsize) + sequence2 = makebytearraydata_w(space, w_other) + _setitem_slice_helper(space, self.data, start, step, + slicelength, sequence2, empty_elem='\x00') + else: + idx = space.getindex_w(w_index, space.w_IndexError, "bytearray index") + try: + self.data[idx] = getbytevalue(space, w_other) + except IndexError: + raise OperationError(space.w_IndexError, + space.wrap("bytearray index out of range")) + + def descr_delitem(self, space, w_idx): + if isinstance(w_idx, W_SliceObject): + start, stop, step, slicelength = w_idx.indices4(space, + len(self.data)) + _delitem_slice_helper(space, self.data, start, step, slicelength) + else: + idx = space.getindex_w(w_idx, space.w_IndexError, "bytearray index") + try: + del self.data[idx] + except IndexError: + raise OperationError(space.w_IndexError, + space.wrap("bytearray deletion index out of range")) + + def descr_append(self, space, w_item): + self.data.append(getbytevalue(space, w_item)) + + def descr_extend(self, space, w_other): + if isinstance(w_other, W_BytearrayObject): + self.data += w_other.data + else: + self.data += makebytearraydata_w(space, w_other) + return self + + def descr_insert(self, space, w_idx, w_other): + where = space.int_w(w_idx) + length = len(self.data) + index = get_positive_index(where, length) + val = getbytevalue(space, w_other) + self.data.insert(index, val) + return space.w_None + + @unwrap_spec(w_idx=WrappedDefault(-1)) + def descr_pop(self, space, w_idx): + index = space.int_w(w_idx) + try: + result = self.data.pop(index) + except IndexError: + if not self.data: + raise OperationError(space.w_IndexError, space.wrap( + "pop from empty bytearray")) + raise OperationError(space.w_IndexError, space.wrap( + "pop index out of range")) + return space.wrap(ord(result)) + + def descr_remove(self, space, w_char): + char = space.int_w(space.index(w_char)) + try: + self.data.remove(chr(char)) + except ValueError: + raise OperationError(space.w_ValueError, space.wrap( + "value not found in bytearray")) + + def descr_reverse(self, space): + self.data.reverse() + +def getbytevalue(space, w_value): + if space.isinstance_w(w_value, space.w_str): + string = space.str_w(w_value) + if len(string) != 1: + raise OperationError(space.w_ValueError, space.wrap( + "string must be of size 1")) + return string[0] + + value = space.getindex_w(w_value, None) + if not 0 <= value < 256: + # this includes the OverflowError in case the long is too large + raise OperationError(space.w_ValueError, space.wrap( + "byte must be in range(0, 256)")) + return chr(value) + +def new_bytearray(space, w_bytearraytype, data): + w_obj = space.allocate_instance(W_BytearrayObject, w_bytearraytype) + W_BytearrayObject.__init__(w_obj, data) + return w_obj + + +def makebytearraydata_w(space, w_source): + # String-like argument + try: + string = space.bufferstr_new_w(w_source) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + else: + return [c for c in string] + + # sequence of bytes + w_iter = space.iter(w_source) + length_hint = space.length_hint(w_source, 0) + data = newlist_hint(length_hint) + extended = 0 + while True: + try: + w_item = space.next(w_iter) + except OperationError, e: + if not e.match(space, space.w_StopIteration): + raise + break + value = getbytevalue(space, w_item) + data.append(value) + extended += 1 + if extended < length_hint: + resizelist_hint(data, extended) + return data + +def _hex_digit_to_int(d): + val = ord(d) + if 47 < val < 58: + return val - 48 + if 96 < val < 103: + return val - 87 + return -1 + + +class BytearrayDocstrings: + """bytearray(iterable_of_ints) -> bytearray + bytearray(string, encoding[, errors]) -> bytearray + bytearray(bytes_or_bytearray) -> mutable copy of bytes_or_bytearray + bytearray(memory_view) -> bytearray + + Construct an mutable bytearray object from: + - an iterable yielding integers in range(256) + - a text string encoded using the specified encoding + - a bytes or a bytearray object + - any object implementing the buffer API. + + bytearray(int) -> bytearray. + + Construct a zero-initialized bytearray of the given length. + + """ + + def __add__(): + """x.__add__(y) <==> x+y""" + + def __alloc__(): + """B.__alloc__() -> int + + Return the number of bytes actually allocated. + """ + + def __contains__(): + """x.__contains__(y) <==> y in x""" + + def __delitem__(): + """x.__delitem__(y) <==> del x[y]""" + + def __eq__(): + """x.__eq__(y) <==> x==y""" + + def __ge__(): + """x.__ge__(y) <==> x>=y""" + + def __getattribute__(): + """x.__getattribute__('name') <==> x.name""" + + def __getitem__(): + """x.__getitem__(y) <==> x[y]""" + + def __gt__(): + """x.__gt__(y) <==> x>y""" + + def __iadd__(): + """x.__iadd__(y) <==> x+=y""" + + def __imul__(): + """x.__imul__(y) <==> x*=y""" + + def __init__(): + """x.__init__(...) initializes x; see help(type(x)) for signature""" + + def __iter__(): + """x.__iter__() <==> iter(x)""" + + def __le__(): + """x.__le__(y) <==> x<=y""" + + def __len__(): + """x.__len__() <==> len(x)""" + + def __lt__(): + """x.__lt__(y) <==> x x*n""" + + def __ne__(): + """x.__ne__(y) <==> x!=y""" + + def __reduce__(): + """Return state information for pickling.""" + + def __repr__(): + """x.__repr__() <==> repr(x)""" + + def __rmul__(): + """x.__rmul__(n) <==> n*x""" + + def __setitem__(): + """x.__setitem__(i, y) <==> x[i]=y""" + + def __sizeof__(): + """B.__sizeof__() -> int + + Returns the size of B in memory, in bytes + """ + + def __str__(): + """x.__str__() <==> str(x)""" + + def append(): + """B.append(int) -> None + + Append a single item to the end of B. + """ + + def capitalize(): + """B.capitalize() -> copy of B + + Return a copy of B with only its first character capitalized (ASCII) + and the rest lower-cased. + """ + + def center(): + """B.center(width[, fillchar]) -> copy of B + + Return B centered in a string of length width. Padding is + done using the specified fill character (default is a space). + """ + + def count(): + """B.count(sub[, start[, end]]) -> int + + Return the number of non-overlapping occurrences of subsection sub in + bytes B[start:end]. Optional arguments start and end are interpreted + as in slice notation. + """ + + def decode(): + """B.decode(encoding=None, errors='strict') -> unicode + + Decode B using the codec registered for encoding. encoding defaults + to the default encoding. errors may be given to set a different error + handling scheme. Default is 'strict' meaning that encoding errors raise + a UnicodeDecodeError. Other possible values are 'ignore' and 'replace' + as well as any other name registered with codecs.register_error that is + able to handle UnicodeDecodeErrors. + """ + + def endswith(): + """B.endswith(suffix[, start[, end]]) -> bool + + Return True if B ends with the specified suffix, False otherwise. + With optional start, test B beginning at that position. + With optional end, stop comparing B at that position. + suffix can also be a tuple of strings to try. + """ + + def expandtabs(): + """B.expandtabs([tabsize]) -> copy of B + + Return a copy of B where all tab characters are expanded using spaces. + If tabsize is not given, a tab size of 8 characters is assumed. + """ + + def extend(): + """B.extend(iterable_of_ints) -> None + + Append all the elements from the iterator or sequence to the + end of B. + """ + + def find(): + """B.find(sub[, start[, end]]) -> int + + Return the lowest index in B where subsection sub is found, + such that sub is contained within B[start,end]. Optional + arguments start and end are interpreted as in slice notation. + + Return -1 on failure. + """ + + def fromhex(): + """bytearray.fromhex(string) -> bytearray (static method) + + Create a bytearray object from a string of hexadecimal numbers. + Spaces between two numbers are accepted. + Example: bytearray.fromhex('B9 01EF') -> bytearray(b'\xb9\x01\xef'). + """ + + def index(): + """B.index(sub[, start[, end]]) -> int + + Like B.find() but raise ValueError when the subsection is not found. + """ + + def insert(): + """B.insert(index, int) -> None + + Insert a single item into the bytearray before the given index. + """ + + def isalnum(): + """B.isalnum() -> bool + + Return True if all characters in B are alphanumeric + and there is at least one character in B, False otherwise. + """ + + def isalpha(): + """B.isalpha() -> bool + + Return True if all characters in B are alphabetic + and there is at least one character in B, False otherwise. + """ + + def isdigit(): + """B.isdigit() -> bool + + Return True if all characters in B are digits + and there is at least one character in B, False otherwise. + """ + + def islower(): + """B.islower() -> bool + + Return True if all cased characters in B are lowercase and there is + at least one cased character in B, False otherwise. + """ + + def isspace(): + """B.isspace() -> bool + + Return True if all characters in B are whitespace + and there is at least one character in B, False otherwise. + """ + + def istitle(): + """B.istitle() -> bool + + Return True if B is a titlecased string and there is at least one + character in B, i.e. uppercase characters may only follow uncased + characters and lowercase characters only cased ones. Return False + otherwise. + """ + + def isupper(): + """B.isupper() -> bool + + Return True if all cased characters in B are uppercase and there is + at least one cased character in B, False otherwise. + """ + + def join(): + """B.join(iterable_of_bytes) -> bytearray + + Concatenate any number of str/bytearray objects, with B + in between each pair, and return the result as a new bytearray. + """ + + def ljust(): + """B.ljust(width[, fillchar]) -> copy of B + + Return B left justified in a string of length width. Padding is + done using the specified fill character (default is a space). + """ + + def lower(): + """B.lower() -> copy of B + + Return a copy of B with all ASCII characters converted to lowercase. + """ + + def lstrip(): + """B.lstrip([bytes]) -> bytearray + + Strip leading bytes contained in the argument + and return the result as a new bytearray. + If the argument is omitted, strip leading ASCII whitespace. + """ + + def partition(): + """B.partition(sep) -> (head, sep, tail) + + Search for the separator sep in B, and return the part before it, + the separator itself, and the part after it. If the separator is not + found, returns B and two empty bytearray objects. + """ + + def pop(): + """B.pop([index]) -> int + + Remove and return a single item from B. If no index + argument is given, will pop the last value. + """ + + def remove(): + """B.remove(int) -> None + + Remove the first occurrence of a value in B. + """ + + def replace(): + """B.replace(old, new[, count]) -> bytearray + + Return a copy of B with all occurrences of subsection + old replaced by new. If the optional argument count is + given, only the first count occurrences are replaced. + """ + + def reverse(): + """B.reverse() -> None + + Reverse the order of the values in B in place. + """ + + def rfind(): + """B.rfind(sub[, start[, end]]) -> int + + Return the highest index in B where subsection sub is found, + such that sub is contained within B[start,end]. Optional + arguments start and end are interpreted as in slice notation. + + Return -1 on failure. + """ + + def rindex(): + """B.rindex(sub[, start[, end]]) -> int + + Like B.rfind() but raise ValueError when the subsection is not found. + """ + + def rjust(): + """B.rjust(width[, fillchar]) -> copy of B + + Return B right justified in a string of length width. Padding is + done using the specified fill character (default is a space) + """ + + def rpartition(): + """B.rpartition(sep) -> (head, sep, tail) + + Search for the separator sep in B, starting at the end of B, + and return the part before it, the separator itself, and the + part after it. If the separator is not found, returns two empty + bytearray objects and B. + """ + + def rsplit(): + """B.rsplit(sep=None, maxsplit=-1) -> list of bytearrays + + Return a list of the sections in B, using sep as the delimiter, + starting at the end of B and working to the front. + If sep is not given, B is split on ASCII whitespace characters + (space, tab, return, newline, formfeed, vertical tab). + If maxsplit is given, at most maxsplit splits are done. + """ + + def rstrip(): + """B.rstrip([bytes]) -> bytearray + + Strip trailing bytes contained in the argument + and return the result as a new bytearray. + If the argument is omitted, strip trailing ASCII whitespace. + """ + + def split(): + """B.split(sep=None, maxsplit=-1) -> list of bytearrays + + Return a list of the sections in B, using sep as the delimiter. + If sep is not given, B is split on ASCII whitespace characters + (space, tab, return, newline, formfeed, vertical tab). + If maxsplit is given, at most maxsplit splits are done. + """ + + def splitlines(): + """B.splitlines(keepends=False) -> list of lines + + Return a list of the lines in B, breaking at line boundaries. + Line breaks are not included in the resulting list unless keepends + is given and true. + """ + + def startswith(): + """B.startswith(prefix[, start[, end]]) -> bool + + Return True if B starts with the specified prefix, False otherwise. + With optional start, test B beginning at that position. + With optional end, stop comparing B at that position. + prefix can also be a tuple of strings to try. + """ + + def strip(): + """B.strip([bytes]) -> bytearray + + Strip leading and trailing bytes contained in the argument + and return the result as a new bytearray. + If the argument is omitted, strip ASCII whitespace. + """ + + def swapcase(): + """B.swapcase() -> copy of B + + Return a copy of B with uppercase ASCII characters converted + to lowercase ASCII and vice versa. + """ + + def title(): + """B.title() -> copy of B + + Return a titlecased version of B, i.e. ASCII words start with uppercase + characters, all remaining cased characters have lowercase. + """ + + def translate(): + """B.translate(table[, deletechars]) -> bytearray + + Return a copy of B, where all characters occurring in the + optional argument deletechars are removed, and the remaining + characters have been mapped through the given translation + table, which must be a bytes object of length 256. + """ + + def upper(): + """B.upper() -> copy of B + + Return a copy of B with all ASCII characters converted to uppercase. + """ + + def zfill(): + """B.zfill(width) -> copy of B + + Pad a numeric string B with zeros on the left, to fill a field + of the specified width. B is never truncated. + """ + + +W_BytearrayObject.typedef = StdTypeDef( + "bytearray", + __doc__ = BytearrayDocstrings.__doc__, + __new__ = interp2app(W_BytearrayObject.descr_new), + __hash__ = None, + __reduce__ = interp2app(W_BytearrayObject.descr_reduce, + doc=BytearrayDocstrings.__reduce__.__doc__), + fromhex = interp2app(W_BytearrayObject.descr_fromhex, as_classmethod=True, + doc=BytearrayDocstrings.fromhex.__doc__), + + __repr__ = interp2app(W_BytearrayObject.descr_repr, + doc=BytearrayDocstrings.__repr__.__doc__), + __str__ = interp2app(W_BytearrayObject.descr_str, + doc=BytearrayDocstrings.__str__.__doc__), + + __eq__ = interp2app(W_BytearrayObject.descr_eq, + doc=BytearrayDocstrings.__eq__.__doc__), + __ne__ = interp2app(W_BytearrayObject.descr_ne, + doc=BytearrayDocstrings.__ne__.__doc__), + __lt__ = interp2app(W_BytearrayObject.descr_lt, + doc=BytearrayDocstrings.__lt__.__doc__), + __le__ = interp2app(W_BytearrayObject.descr_le, + doc=BytearrayDocstrings.__le__.__doc__), + __gt__ = interp2app(W_BytearrayObject.descr_gt, + doc=BytearrayDocstrings.__gt__.__doc__), + __ge__ = interp2app(W_BytearrayObject.descr_ge, + doc=BytearrayDocstrings.__ge__.__doc__), + + __len__ = interp2app(W_BytearrayObject.descr_len, + doc=BytearrayDocstrings.__len__.__doc__), + __contains__ = interp2app(W_BytearrayObject.descr_contains, + doc=BytearrayDocstrings.__contains__.__doc__), + + __add__ = interp2app(W_BytearrayObject.descr_add, + doc=BytearrayDocstrings.__add__.__doc__), + __mul__ = interp2app(W_BytearrayObject.descr_mul, + doc=BytearrayDocstrings.__mul__.__doc__), + __rmul__ = interp2app(W_BytearrayObject.descr_mul, + doc=BytearrayDocstrings.__rmul__.__doc__), + + __getitem__ = interp2app(W_BytearrayObject.descr_getitem, + doc=BytearrayDocstrings.__getitem__.__doc__), + + capitalize = interp2app(W_BytearrayObject.descr_capitalize, + doc=BytearrayDocstrings.capitalize.__doc__), + center = interp2app(W_BytearrayObject.descr_center, + doc=BytearrayDocstrings.center.__doc__), + count = interp2app(W_BytearrayObject.descr_count, + doc=BytearrayDocstrings.count.__doc__), + decode = interp2app(W_BytearrayObject.descr_decode, + doc=BytearrayDocstrings.decode.__doc__), + expandtabs = interp2app(W_BytearrayObject.descr_expandtabs, + doc=BytearrayDocstrings.expandtabs.__doc__), + find = interp2app(W_BytearrayObject.descr_find, + doc=BytearrayDocstrings.find.__doc__), + rfind = interp2app(W_BytearrayObject.descr_rfind, + doc=BytearrayDocstrings.rfind.__doc__), + index = interp2app(W_BytearrayObject.descr_index, + doc=BytearrayDocstrings.index.__doc__), + rindex = interp2app(W_BytearrayObject.descr_rindex, + doc=BytearrayDocstrings.rindex.__doc__), + isalnum = interp2app(W_BytearrayObject.descr_isalnum, + doc=BytearrayDocstrings.isalnum.__doc__), + isalpha = interp2app(W_BytearrayObject.descr_isalpha, + doc=BytearrayDocstrings.isalpha.__doc__), + isdigit = interp2app(W_BytearrayObject.descr_isdigit, + doc=BytearrayDocstrings.isdigit.__doc__), + islower = interp2app(W_BytearrayObject.descr_islower, + doc=BytearrayDocstrings.islower.__doc__), + isspace = interp2app(W_BytearrayObject.descr_isspace, + doc=BytearrayDocstrings.isspace.__doc__), + istitle = interp2app(W_BytearrayObject.descr_istitle, + doc=BytearrayDocstrings.istitle.__doc__), + isupper = interp2app(W_BytearrayObject.descr_isupper, + doc=BytearrayDocstrings.isupper.__doc__), + join = interp2app(W_BytearrayObject.descr_join, + doc=BytearrayDocstrings.join.__doc__), + ljust = interp2app(W_BytearrayObject.descr_ljust, + doc=BytearrayDocstrings.ljust.__doc__), + rjust = interp2app(W_BytearrayObject.descr_rjust, + doc=BytearrayDocstrings.rjust.__doc__), + lower = interp2app(W_BytearrayObject.descr_lower, + doc=BytearrayDocstrings.lower.__doc__), + partition = interp2app(W_BytearrayObject.descr_partition, + doc=BytearrayDocstrings.partition.__doc__), + rpartition = interp2app(W_BytearrayObject.descr_rpartition, + doc=BytearrayDocstrings.rpartition.__doc__), + replace = interp2app(W_BytearrayObject.descr_replace, + doc=BytearrayDocstrings.replace.__doc__), + split = interp2app(W_BytearrayObject.descr_split, + doc=BytearrayDocstrings.split.__doc__), + rsplit = interp2app(W_BytearrayObject.descr_rsplit, + doc=BytearrayDocstrings.rsplit.__doc__), + splitlines = interp2app(W_BytearrayObject.descr_splitlines, + doc=BytearrayDocstrings.splitlines.__doc__), + startswith = interp2app(W_BytearrayObject.descr_startswith, + doc=BytearrayDocstrings.startswith.__doc__), + endswith = interp2app(W_BytearrayObject.descr_endswith, + doc=BytearrayDocstrings.endswith.__doc__), + strip = interp2app(W_BytearrayObject.descr_strip, + doc=BytearrayDocstrings.strip.__doc__), + lstrip = interp2app(W_BytearrayObject.descr_lstrip, + doc=BytearrayDocstrings.lstrip.__doc__), + rstrip = interp2app(W_BytearrayObject.descr_rstrip, + doc=BytearrayDocstrings.rstrip.__doc__), + swapcase = interp2app(W_BytearrayObject.descr_swapcase, + doc=BytearrayDocstrings.swapcase.__doc__), + title = interp2app(W_BytearrayObject.descr_title, + doc=BytearrayDocstrings.title.__doc__), + translate = interp2app(W_BytearrayObject.descr_translate, + doc=BytearrayDocstrings.translate.__doc__), + upper = interp2app(W_BytearrayObject.descr_upper, + doc=BytearrayDocstrings.upper.__doc__), + zfill = interp2app(W_BytearrayObject.descr_zfill, + doc=BytearrayDocstrings.zfill.__doc__), + + __init__ = interp2app(W_BytearrayObject.descr_init, + doc=BytearrayDocstrings.__init__.__doc__), + __buffer__ = interp2app(W_BytearrayObject.descr_buffer), + + __iadd__ = interp2app(W_BytearrayObject.descr_inplace_add, + doc=BytearrayDocstrings.__iadd__.__doc__), + __imul__ = interp2app(W_BytearrayObject.descr_inplace_mul, + doc=BytearrayDocstrings.__imul__.__doc__), + __setitem__ = interp2app(W_BytearrayObject.descr_setitem, + doc=BytearrayDocstrings.__setitem__.__doc__), + __delitem__ = interp2app(W_BytearrayObject.descr_delitem, + doc=BytearrayDocstrings.__delitem__.__doc__), + + append = interp2app(W_BytearrayObject.descr_append, + doc=BytearrayDocstrings.append.__doc__), + extend = interp2app(W_BytearrayObject.descr_extend, + doc=BytearrayDocstrings.extend.__doc__), + insert = interp2app(W_BytearrayObject.descr_insert, + doc=BytearrayDocstrings.insert.__doc__), + pop = interp2app(W_BytearrayObject.descr_pop, + doc=BytearrayDocstrings.pop.__doc__), + remove = interp2app(W_BytearrayObject.descr_remove, + doc=BytearrayDocstrings.remove.__doc__), + reverse = interp2app(W_BytearrayObject.descr_reverse, + doc=BytearrayDocstrings.reverse.__doc__), +) init_signature = Signature(['source', 'encoding', 'errors'], None, None) init_defaults = [None, None, None] -def init__Bytearray(space, w_bytearray, __args__): - # this is on the silly side - w_source, w_encoding, w_errors = __args__.parse_obj( - None, 'bytearray', init_signature, init_defaults) - if w_source is None: - w_source = space.wrap('') - if w_encoding is None: - w_encoding = space.w_None - if w_errors is None: - w_errors = space.w_None - - # Unicode argument - if not space.is_w(w_encoding, space.w_None): - from pypy.objspace.std.unicodetype import ( - _get_encoding_and_errors, encode_object - ) - encoding, errors = _get_encoding_and_errors(space, w_encoding, w_errors) - - # if w_source is an integer this correctly raises a TypeError - # the CPython error message is: "encoding or errors without a string argument" - # ours is: "expected unicode, got int object" - w_source = encode_object(space, w_source, encoding, errors) - - # Is it an int? - try: - count = space.int_w(w_source) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - else: - if count < 0: - raise OperationError(space.w_ValueError, - space.wrap("bytearray negative count")) - w_bytearray.data = ['\0'] * count - return - - data = makebytearraydata_w(space, w_source) - w_bytearray.data = data - -def len__Bytearray(space, w_bytearray): - result = len(w_bytearray.data) - return space.newint(result) - -def ord__Bytearray(space, w_bytearray): - if len(w_bytearray.data) != 1: - raise OperationError(space.w_TypeError, - space.wrap("expected a character, but string" - "of length %s found" % len(w_bytearray.data))) - return space.wrap(ord(w_bytearray.data[0])) - -def getitem__Bytearray_ANY(space, w_bytearray, w_index): - # getindex_w should get a second argument space.w_IndexError, - # but that doesn't exist the first time this is called. - try: - w_IndexError = space.w_IndexError - except AttributeError: - w_IndexError = None - index = space.getindex_w(w_index, w_IndexError, "bytearray index") - try: - return space.newint(ord(w_bytearray.data[index])) - except IndexError: - raise OperationError(space.w_IndexError, - space.wrap("bytearray index out of range")) - -def getitem__Bytearray_Slice(space, w_bytearray, w_slice): - data = w_bytearray.data - length = len(data) - start, stop, step, slicelength = w_slice.indices4(space, length) - assert slicelength >= 0 - if step == 1 and 0 <= start <= stop: - newdata = data[start:stop] - else: - newdata = _getitem_slice_multistep(data, start, step, slicelength) - return W_BytearrayObject(newdata) - -def _getitem_slice_multistep(data, start, step, slicelength): - return [data[start + i*step] for i in range(slicelength)] - -def contains__Bytearray_Int(space, w_bytearray, w_char): - char = space.int_w(w_char) - if not 0 <= char < 256: - raise OperationError(space.w_ValueError, - space.wrap("byte must be in range(0, 256)")) - for c in w_bytearray.data: - if ord(c) == char: - return space.w_True - return space.w_False - -def contains__Bytearray_String(space, w_bytearray, w_str): - # XXX slow - copies, needs rewriting - w_str2 = str__Bytearray(space, w_bytearray) - return stringobject.contains__String_String(space, w_str2, w_str) - -def contains__Bytearray_ANY(space, w_bytearray, w_sub): - # XXX slow - copies, needs rewriting - w_str = space.wrap(space.bufferstr_new_w(w_sub)) - w_str2 = str__Bytearray(space, w_bytearray) - return stringobject.contains__String_String(space, w_str2, w_str) - -def add__Bytearray_Bytearray(space, w_bytearray1, w_bytearray2): - data1 = w_bytearray1.data - data2 = w_bytearray2.data - return W_BytearrayObject(data1 + data2) - -def add__Bytearray_ANY(space, w_bytearray1, w_other): - data1 = w_bytearray1.data - data2 = [c for c in space.bufferstr_new_w(w_other)] - return W_BytearrayObject(data1 + data2) - -def add__String_Bytearray(space, w_str, w_bytearray): - data2 = w_bytearray.data - data1 = [c for c in space.str_w(w_str)] - return W_BytearrayObject(data1 + data2) - -def mul_bytearray_times(space, w_bytearray, w_times): - try: - times = space.getindex_w(w_times, space.w_OverflowError) - except OperationError, e: - if e.match(space, space.w_TypeError): - raise FailedToImplement - raise - data = w_bytearray.data - return W_BytearrayObject(data * times) - -def mul__Bytearray_ANY(space, w_bytearray, w_times): - return mul_bytearray_times(space, w_bytearray, w_times) - -def mul__ANY_Bytearray(space, w_times, w_bytearray): - return mul_bytearray_times(space, w_bytearray, w_times) - -def inplace_mul__Bytearray_ANY(space, w_bytearray, w_times): - try: - times = space.getindex_w(w_times, space.w_OverflowError) - except OperationError, e: - if e.match(space, space.w_TypeError): - raise FailedToImplement - raise - w_bytearray.data *= times - return w_bytearray - -def eq__Bytearray_Bytearray(space, w_bytearray1, w_bytearray2): - data1 = w_bytearray1.data - data2 = w_bytearray2.data - if len(data1) != len(data2): - return space.w_False - for i in range(len(data1)): - if data1[i] != data2[i]: - return space.w_False - return space.w_True - -def String2Bytearray(space, w_str): - data = [c for c in space.str_w(w_str)] - return W_BytearrayObject(data) - -def eq__Bytearray_String(space, w_bytearray, w_other): - return space.eq(str__Bytearray(space, w_bytearray), w_other) - -def eq__Bytearray_Unicode(space, w_bytearray, w_other): - return space.w_False - -def eq__Unicode_Bytearray(space, w_other, w_bytearray): - return space.w_False - -def ne__Bytearray_String(space, w_bytearray, w_other): - return space.ne(str__Bytearray(space, w_bytearray), w_other) - -def ne__Bytearray_Unicode(space, w_bytearray, w_other): - return space.w_True - -def ne__Unicode_Bytearray(space, w_other, w_bytearray): - return space.w_True - -def _min(a, b): - if a < b: - return a - return b - -def lt__Bytearray_Bytearray(space, w_bytearray1, w_bytearray2): - data1 = w_bytearray1.data - data2 = w_bytearray2.data - ncmp = _min(len(data1), len(data2)) - # Search for the first index where items are different - for p in range(ncmp): - if data1[p] != data2[p]: - return space.newbool(data1[p] < data2[p]) - # No more items to compare -- compare sizes - return space.newbool(len(data1) < len(data2)) - -def gt__Bytearray_Bytearray(space, w_bytearray1, w_bytearray2): - data1 = w_bytearray1.data - data2 = w_bytearray2.data - ncmp = _min(len(data1), len(data2)) - # Search for the first index where items are different - for p in range(ncmp): - if data1[p] != data2[p]: - return space.newbool(data1[p] > data2[p]) - # No more items to compare -- compare sizes - return space.newbool(len(data1) > len(data2)) - -def str_translate__Bytearray_ANY_ANY(space, w_bytearray1, w_table, w_deletechars): - # XXX slow, copies *twice* needs proper implementation - w_str_copy = str__Bytearray(space, w_bytearray1) - w_res = stringobject.str_translate__String_ANY_ANY(space, w_str_copy, - w_table, w_deletechars) - return String2Bytearray(space, w_res) - -# Mostly copied from repr__String, but without the "smart quote" -# functionality. -def repr__Bytearray(space, w_bytearray): - s = w_bytearray.data - - # Good default if there are no replacements. - buf = StringBuilder(len("bytearray(b'')") + len(s)) - - buf.append("bytearray(b'") - - for i in range(len(s)): - c = s[i] - - if c == '\\' or c == "'": - buf.append('\\') - buf.append(c) - elif c == '\t': - buf.append('\\t') - elif c == '\r': - buf.append('\\r') - elif c == '\n': - buf.append('\\n') - elif not '\x20' <= c < '\x7f': - n = ord(c) - buf.append('\\x') - buf.append("0123456789abcdef"[n>>4]) - buf.append("0123456789abcdef"[n&0xF]) - else: - buf.append(c) - - buf.append("')") - - return space.wrap(buf.build()) - -def str__Bytearray(space, w_bytearray): - return space.wrap(''.join(w_bytearray.data)) - -def str_count__Bytearray_ANY_ANY_ANY(space, w_bytearray, w_char, w_start, w_stop): - w_char = space.wrap(space.bufferstr_new_w(w_char)) - w_str = str__Bytearray(space, w_bytearray) - return stringobject.str_count__String_String_ANY_ANY(space, w_str, w_char, - w_start, w_stop) - -def str_index__Bytearray_ANY_ANY_ANY(space, w_bytearray, w_char, w_start, w_stop): - w_char = space.wrap(space.bufferstr_new_w(w_char)) - w_str = str__Bytearray(space, w_bytearray) - return stringobject.str_index__String_String_ANY_ANY(space, w_str, w_char, - w_start, w_stop) - -def str_rindex__Bytearray_ANY_ANY_ANY(space, w_bytearray, w_char, w_start, w_stop): - w_char = space.wrap(space.bufferstr_new_w(w_char)) - w_str = str__Bytearray(space, w_bytearray) - return stringobject.str_rindex__String_String_ANY_ANY(space, w_str, w_char, - w_start, w_stop) - -def str_find__Bytearray_ANY_ANY_ANY(space, w_bytearray, w_char, w_start, w_stop): - w_char = space.wrap(space.bufferstr_new_w(w_char)) - w_str = str__Bytearray(space, w_bytearray) - return stringobject.str_find__String_String_ANY_ANY(space, w_str, w_char, - w_start, w_stop) - -def str_rfind__Bytearray_ANY_ANY_ANY(space, w_bytearray, w_char, w_start, w_stop): - w_char = space.wrap(space.bufferstr_new_w(w_char)) - w_str = str__Bytearray(space, w_bytearray) - return stringobject.str_rfind__String_String_ANY_ANY(space, w_str, w_char, - w_start, w_stop) - -def str_startswith__Bytearray_ANY_ANY_ANY(space, w_bytearray, w_prefix, w_start, w_stop): - if space.isinstance_w(w_prefix, space.w_tuple): - w_str = str__Bytearray(space, w_bytearray) - w_prefix = space.newtuple([space.wrap(space.bufferstr_new_w(w_entry)) for w_entry in - space.fixedview(w_prefix)]) - return stringobject.str_startswith__String_ANY_ANY_ANY(space, w_str, w_prefix, - w_start, w_stop) - - w_prefix = space.wrap(space.bufferstr_new_w(w_prefix)) - w_str = str__Bytearray(space, w_bytearray) - return stringobject.str_startswith__String_String_ANY_ANY(space, w_str, w_prefix, - w_start, w_stop) - -def str_endswith__Bytearray_ANY_ANY_ANY(space, w_bytearray, w_suffix, w_start, w_stop): - if space.isinstance_w(w_suffix, space.w_tuple): - w_str = str__Bytearray(space, w_bytearray) - w_suffix = space.newtuple([space.wrap(space.bufferstr_new_w(w_entry)) for w_entry in - space.fixedview(w_suffix)]) - return stringobject.str_endswith__String_ANY_ANY_ANY(space, w_str, w_suffix, - w_start, w_stop) - w_suffix = space.wrap(space.bufferstr_new_w(w_suffix)) - w_str = str__Bytearray(space, w_bytearray) - return stringobject.str_endswith__String_String_ANY_ANY(space, w_str, w_suffix, - w_start, w_stop) - +# XXX consider moving to W_BytearrayObject or remove def str_join__Bytearray_ANY(space, w_self, w_list): list_w = space.listview(w_list) if not list_w: @@ -350,251 +1022,8 @@ newdata.extend([c for c in space.bufferstr_new_w(w_s)]) return W_BytearrayObject(newdata) -def str_decode__Bytearray_ANY_ANY(space, w_bytearray, w_encoding, w_errors): - w_str = str__Bytearray(space, w_bytearray) - return stringobject.str_decode__String_ANY_ANY(space, w_str, w_encoding, w_errors) - -def str_islower__Bytearray(space, w_bytearray): - w_str = str__Bytearray(space, w_bytearray) - return stringobject.str_islower__String(space, w_str) - -def str_isupper__Bytearray(space, w_bytearray): - w_str = str__Bytearray(space, w_bytearray) - return stringobject.str_isupper__String(space, w_str) - -def str_isalpha__Bytearray(space, w_bytearray): - w_str = str__Bytearray(space, w_bytearray) - return stringobject.str_isalpha__String(space, w_str) - -def str_isalnum__Bytearray(space, w_bytearray): - w_str = str__Bytearray(space, w_bytearray) - return stringobject.str_isalnum__String(space, w_str) - -def str_isdigit__Bytearray(space, w_bytearray): - w_str = str__Bytearray(space, w_bytearray) - return stringobject.str_isdigit__String(space, w_str) - -def str_istitle__Bytearray(space, w_bytearray): - w_str = str__Bytearray(space, w_bytearray) - return stringobject.str_istitle__String(space, w_str) - -def str_isspace__Bytearray(space, w_bytearray): - w_str = str__Bytearray(space, w_bytearray) - return stringobject.str_isspace__String(space, w_str) - -def bytearray_insert__Bytearray_Int_ANY(space, w_bytearray, w_idx, w_other): - where = space.int_w(w_idx) - length = len(w_bytearray.data) - index = get_positive_index(where, length) - val = getbytevalue(space, w_other) - w_bytearray.data.insert(index, val) - return space.w_None - -def bytearray_pop__Bytearray_Int(space, w_bytearray, w_idx): - index = space.int_w(w_idx) - try: - result = w_bytearray.data.pop(index) - except IndexError: - if not w_bytearray.data: - raise OperationError(space.w_IndexError, space.wrap( - "pop from empty bytearray")) - raise OperationError(space.w_IndexError, space.wrap( - "pop index out of range")) - return space.wrap(ord(result)) - -def bytearray_remove__Bytearray_ANY(space, w_bytearray, w_char): - char = space.int_w(space.index(w_char)) - try: - result = w_bytearray.data.remove(chr(char)) - except ValueError: - raise OperationError(space.w_ValueError, space.wrap( - "value not found in bytearray")) - -def bytearray_reverse__Bytearray(space, w_bytearray): - w_bytearray.data.reverse() - return space.w_None - _space_chars = ''.join([chr(c) for c in [9, 10, 11, 12, 13, 32]]) -def bytearray_strip__Bytearray_None(space, w_bytearray, w_chars): - return _strip(space, w_bytearray, _space_chars, 1, 1) - -def bytearray_strip__Bytearray_ANY(space, w_bytearray, w_chars): - return _strip(space, w_bytearray, space.bufferstr_new_w(w_chars), 1, 1) - -def bytearray_lstrip__Bytearray_None(space, w_bytearray, w_chars): - return _strip(space, w_bytearray, _space_chars, 1, 0) - -def bytearray_lstrip__Bytearray_ANY(space, w_bytearray, w_chars): - return _strip(space, w_bytearray, space.bufferstr_new_w(w_chars), 1, 0) - -def bytearray_rstrip__Bytearray_None(space, w_bytearray, w_chars): - return _strip(space, w_bytearray, _space_chars, 0, 1) - -def bytearray_rstrip__Bytearray_ANY(space, w_bytearray, w_chars): - return _strip(space, w_bytearray, space.bufferstr_new_w(w_chars), 0, 1) - -# These methods could just delegate to the string implementation, -# but they have to return a bytearray. -def str_replace__Bytearray_ANY_ANY_ANY(space, w_bytearray, w_str1, w_str2, w_max): - w_str = str__Bytearray(space, w_bytearray) - w_res = stringobject.str_replace__String_ANY_ANY_ANY(space, w_str, w_str1, - w_str2, w_max) - return String2Bytearray(space, w_res) - -def str_upper__Bytearray(space, w_bytearray): - w_str = str__Bytearray(space, w_bytearray) - w_res = stringobject.str_upper__String(space, w_str) - return String2Bytearray(space, w_res) - -def str_lower__Bytearray(space, w_bytearray): - w_str = str__Bytearray(space, w_bytearray) - w_res = stringobject.str_lower__String(space, w_str) - return String2Bytearray(space, w_res) - -def str_title__Bytearray(space, w_bytearray): - w_str = str__Bytearray(space, w_bytearray) - w_res = stringobject.str_title__String(space, w_str) - return String2Bytearray(space, w_res) - -def str_swapcase__Bytearray(space, w_bytearray): - w_str = str__Bytearray(space, w_bytearray) - w_res = stringobject.str_swapcase__String(space, w_str) - return String2Bytearray(space, w_res) - -def str_capitalize__Bytearray(space, w_bytearray): - w_str = str__Bytearray(space, w_bytearray) - w_res = stringobject.str_capitalize__String(space, w_str) - return String2Bytearray(space, w_res) - -def str_ljust__Bytearray_ANY_ANY(space, w_bytearray, w_width, w_fillchar): - w_str = str__Bytearray(space, w_bytearray) - w_res = stringobject.str_ljust__String_ANY_ANY(space, w_str, w_width, - w_fillchar) - return String2Bytearray(space, w_res) - -def str_rjust__Bytearray_ANY_ANY(space, w_bytearray, w_width, w_fillchar): - w_str = str__Bytearray(space, w_bytearray) - w_res = stringobject.str_rjust__String_ANY_ANY(space, w_str, w_width, - w_fillchar) - return String2Bytearray(space, w_res) - -def str_center__Bytearray_ANY_ANY(space, w_bytearray, w_width, w_fillchar): - w_str = str__Bytearray(space, w_bytearray) - w_res = stringobject.str_center__String_ANY_ANY(space, w_str, w_width, - w_fillchar) - return String2Bytearray(space, w_res) - -def str_zfill__Bytearray_ANY(space, w_bytearray, w_width): - w_str = str__Bytearray(space, w_bytearray) - w_res = stringobject.str_zfill__String_ANY(space, w_str, w_width) - return String2Bytearray(space, w_res) - -def str_expandtabs__Bytearray_ANY(space, w_bytearray, w_tabsize): - w_str = str__Bytearray(space, w_bytearray) - w_res = stringobject.str_expandtabs__String_ANY(space, w_str, w_tabsize) - return String2Bytearray(space, w_res) - -def str_splitlines__Bytearray_ANY(space, w_bytearray, w_keepends): - w_str = str__Bytearray(space, w_bytearray) - w_result = stringobject.str_splitlines__String_ANY(space, w_str, w_keepends) - return space.newlist([ - new_bytearray(space, space.w_bytearray, makebytearraydata_w(space, w_entry)) - for w_entry in space.unpackiterable(w_result) - ]) - -def str_split__Bytearray_ANY_ANY(space, w_bytearray, w_by, w_maxsplit=-1): - w_str = str__Bytearray(space, w_bytearray) - if not space.is_w(w_by, space.w_None): - w_by = space.wrap(space.bufferstr_new_w(w_by)) - w_list = space.call_method(w_str, "split", w_by, w_maxsplit) - length = space.int_w(space.len(w_list)) - for i in range(length): - w_i = space.wrap(i) - space.setitem(w_list, w_i, String2Bytearray(space, space.getitem(w_list, w_i))) - return w_list - -def str_rsplit__Bytearray_ANY_ANY(space, w_bytearray, w_by, w_maxsplit=-1): - w_str = str__Bytearray(space, w_bytearray) - if not space.is_w(w_by, space.w_None): - w_by = space.wrap(space.bufferstr_new_w(w_by)) - w_list = space.call_method(w_str, "rsplit", w_by, w_maxsplit) - length = space.int_w(space.len(w_list)) - for i in range(length): - w_i = space.wrap(i) - space.setitem(w_list, w_i, String2Bytearray(space, space.getitem(w_list, w_i))) - return w_list - -def str_partition__Bytearray_ANY(space, w_bytearray, w_sub): - w_str = str__Bytearray(space, w_bytearray) - w_sub = space.wrap(space.bufferstr_new_w(w_sub)) - w_tuple = stringobject.str_partition__String_String(space, w_str, w_sub) - w_a, w_b, w_c = space.fixedview(w_tuple, 3) - return space.newtuple([ - String2Bytearray(space, w_a), - String2Bytearray(space, w_b), - String2Bytearray(space, w_c)]) - -def str_rpartition__Bytearray_ANY(space, w_bytearray, w_sub): - w_str = str__Bytearray(space, w_bytearray) - w_sub = space.wrap(space.bufferstr_new_w(w_sub)) - w_tuple = stringobject.str_rpartition__String_String(space, w_str, w_sub) - w_a, w_b, w_c = space.fixedview(w_tuple, 3) - return space.newtuple([ - String2Bytearray(space, w_a), - String2Bytearray(space, w_b), - String2Bytearray(space, w_c)]) - -# __________________________________________________________ -# Mutability methods - -def bytearray_append__Bytearray_ANY(space, w_bytearray, w_item): - from pypy.objspace.std.bytearraytype import getbytevalue - w_bytearray.data.append(getbytevalue(space, w_item)) - -def bytearray_extend__Bytearray_Bytearray(space, w_bytearray, w_other): - w_bytearray.data += w_other.data - -def bytearray_extend__Bytearray_ANY(space, w_bytearray, w_other): - w_bytearray.data += makebytearraydata_w(space, w_other) - -def inplace_add__Bytearray_Bytearray(space, w_bytearray1, w_bytearray2): - bytearray_extend__Bytearray_Bytearray(space, w_bytearray1, w_bytearray2) - return w_bytearray1 - -def inplace_add__Bytearray_ANY(space, w_bytearray1, w_iterable2): - w_bytearray1.data += space.bufferstr_new_w(w_iterable2) - return w_bytearray1 - -def setitem__Bytearray_ANY_ANY(space, w_bytearray, w_index, w_item): - from pypy.objspace.std.bytearraytype import getbytevalue - idx = space.getindex_w(w_index, space.w_IndexError, "bytearray index") - try: - w_bytearray.data[idx] = getbytevalue(space, w_item) - except IndexError: - raise OperationError(space.w_IndexError, - space.wrap("bytearray index out of range")) - -def setitem__Bytearray_Slice_ANY(space, w_bytearray, w_slice, w_other): - oldsize = len(w_bytearray.data) - start, stop, step, slicelength = w_slice.indices4(space, oldsize) - sequence2 = makebytearraydata_w(space, w_other) - _setitem_slice_helper(space, w_bytearray.data, start, step, slicelength, sequence2, empty_elem='\x00') - -def delitem__Bytearray_ANY(space, w_bytearray, w_idx): - idx = space.getindex_w(w_idx, space.w_IndexError, "bytearray index") - try: - del w_bytearray.data[idx] - except IndexError: - raise OperationError(space.w_IndexError, - space.wrap("bytearray deletion index out of range")) - return space.w_None - -def delitem__Bytearray_Slice(space, w_bytearray, w_slice): - start, stop, step, slicelength = w_slice.indices4(space, - len(w_bytearray.data)) - _delitem_slice_helper(space, w_bytearray.data, start, step, slicelength) - #XXX share the code again with the stuff in listobject.py def _delitem_slice_helper(space, items, start, step, slicelength): if slicelength==0: @@ -673,27 +1102,6 @@ items[start] = sequence2[i] start += step -def _strip(space, w_bytearray, u_chars, left, right): - # note: mostly copied from stringobject._strip - # should really be shared - u_self = w_bytearray.data - - lpos = 0 - rpos = len(u_self) - - if left: - while lpos < rpos and u_self[lpos] in u_chars: - lpos += 1 - - if right: - while rpos > lpos and u_self[rpos - 1] in u_chars: - rpos -= 1 - assert rpos >= 0 - - return new_bytearray(space, space.w_bytearray, u_self[lpos:rpos]) - -# __________________________________________________________ -# Buffer interface class BytearrayBuffer(RWBuffer): def __init__(self, data): @@ -707,10 +1115,3 @@ def setitem(self, index, char): self.data[index] = char - -def buffer__Bytearray(space, self): - b = BytearrayBuffer(self.data) - return space.wrap(b) - -from pypy.objspace.std import bytearraytype -register_all(vars(), bytearraytype) diff --git a/pypy/objspace/std/bytearraytype.py b/pypy/objspace/std/bytearraytype.py deleted file mode 100644 --- a/pypy/objspace/std/bytearraytype.py +++ /dev/null @@ -1,176 +0,0 @@ -from pypy.interpreter.baseobjspace import ObjSpace, W_Root -from pypy.interpreter.error import OperationError -from pypy.interpreter.gateway import interp2app -from pypy.objspace.std.register_all import register_all -from pypy.objspace.std.stdtypedef import StdTypeDef, SMM - -from pypy.objspace.std.stringtype import ( - str_decode, - str_count, str_index, str_rindex, str_find, str_rfind, str_replace, - str_startswith, str_endswith, str_islower, str_isupper, str_isalpha, - str_isalnum, str_isdigit, str_isspace, str_istitle, - str_upper, str_lower, str_title, str_swapcase, str_capitalize, - str_expandtabs, str_ljust, str_rjust, str_center, str_zfill, - str_join, str_split, str_rsplit, str_partition, str_rpartition, - str_splitlines, str_translate) - -from rpython.rlib.objectmodel import newlist_hint, resizelist_hint - -bytearray_append = SMM('append', 2) -bytearray_extend = SMM('extend', 2) -bytearray_insert = SMM('insert', 3, - doc="B.insert(index, int) -> None\n\n" - "Insert a single item into the bytearray before " - "the given index.") - -bytearray_pop = SMM('pop', 2, defaults=(-1,), - doc="B.pop([index]) -> int\n\nRemove and return a " - "single item from B. If no index\nargument is given, " - "will pop the last value.") - -bytearray_remove = SMM('remove', 2, - doc="B.remove(int) -> None\n\n" - "Remove the first occurance of a value in B.") - -bytearray_reverse = SMM('reverse', 1, - doc="B.reverse() -> None\n\n" - "Reverse the order of the values in B in place.") - -bytearray_strip = SMM('strip', 2, defaults=(None,), - doc="B.strip([bytes]) -> bytearray\n\nStrip leading " - "and trailing bytes contained in the argument.\nIf " - "the argument is omitted, strip ASCII whitespace.") - -bytearray_lstrip = SMM('lstrip', 2, defaults=(None,), - doc="B.lstrip([bytes]) -> bytearray\n\nStrip leading " - "bytes contained in the argument.\nIf the argument is " - "omitted, strip leading ASCII whitespace.") - -bytearray_rstrip = SMM('rstrip', 2, defaults=(None,), - doc="'B.rstrip([bytes]) -> bytearray\n\nStrip trailing " - "bytes contained in the argument.\nIf the argument is " - "omitted, strip trailing ASCII whitespace.") - -def getbytevalue(space, w_value): - if space.isinstance_w(w_value, space.w_str): - string = space.str_w(w_value) - if len(string) != 1: - raise OperationError(space.w_ValueError, space.wrap( - "string must be of size 1")) - return string[0] - - value = space.getindex_w(w_value, None) - if not 0 <= value < 256: - # this includes the OverflowError in case the long is too large - raise OperationError(space.w_ValueError, space.wrap( - "byte must be in range(0, 256)")) - return chr(value) - -def new_bytearray(space, w_bytearraytype, data): - from pypy.objspace.std.bytearrayobject import W_BytearrayObject - w_obj = space.allocate_instance(W_BytearrayObject, w_bytearraytype) - W_BytearrayObject.__init__(w_obj, data) - return w_obj - - -def descr__new__(space, w_bytearraytype, __args__): - return new_bytearray(space,w_bytearraytype, []) - - -def makebytearraydata_w(space, w_source): - # String-like argument - try: - string = space.bufferstr_new_w(w_source) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - else: - return [c for c in string] - - # sequence of bytes - w_iter = space.iter(w_source) - length_hint = space.length_hint(w_source, 0) - data = newlist_hint(length_hint) - extended = 0 - while True: - try: - w_item = space.next(w_iter) - except OperationError, e: - if not e.match(space, space.w_StopIteration): - raise - break - value = getbytevalue(space, w_item) - data.append(value) - extended += 1 - if extended < length_hint: - resizelist_hint(data, extended) - return data - -def descr_bytearray__reduce__(space, w_self): - from pypy.objspace.std.bytearrayobject import W_BytearrayObject - assert isinstance(w_self, W_BytearrayObject) - w_dict = w_self.getdict(space) - if w_dict is None: - w_dict = space.w_None - return space.newtuple([ - space.type(w_self), space.newtuple([ - space.wrap(''.join(w_self.data).decode('latin-1')), - space.wrap('latin-1')]), - w_dict]) - -def _hex_digit_to_int(d): From noreply at buildbot.pypy.org Fri Jan 17 22:56:41 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Fri, 17 Jan 2014 22:56:41 +0100 (CET) Subject: [pypy-commit] pypy default: Document branch. Message-ID: <20140117215641.B6B4F1C087E@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: Changeset: r68753:2978bfdc5faa Date: 2014-01-17 22:56 +0100 http://bitbucket.org/pypy/pypy/changeset/2978bfdc5faa/ Log: Document branch. diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -41,3 +41,6 @@ Fix 3 broken links on PyPy published papers in docs. .. branch: jit-ordereddict + +.. branch: refactor-str-types +Remove multimethods on str/unicode/bytearray and make the implementations share code. From noreply at buildbot.pypy.org Fri Jan 17 23:03:23 2014 From: noreply at buildbot.pypy.org (jerith) Date: Fri, 17 Jan 2014 23:03:23 +0100 (CET) Subject: [pypy-commit] pypy default: actually check for CO_YIELD_INSIDE_TRY in co_flags Message-ID: <20140117220323.1DCC51C087E@cobra.cs.uni-duesseldorf.de> Author: Jeremy Thurgood Branch: Changeset: r68754:79512ccd52df Date: 2014-01-18 00:02 +0200 http://bitbucket.org/pypy/pypy/changeset/79512ccd52df/ Log: actually check for CO_YIELD_INSIDE_TRY in co_flags diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -167,7 +167,7 @@ def run(self): """Start this frame's execution.""" if self.getcode().co_flags & pycode.CO_GENERATOR: - if pycode.CO_YIELD_INSIDE_TRY: + if self.getcode().co_flags & pycode.CO_YIELD_INSIDE_TRY: from pypy.interpreter.generator import GeneratorIteratorWithDel return self.space.wrap(GeneratorIteratorWithDel(self)) else: From noreply at buildbot.pypy.org Fri Jan 17 23:25:22 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 17 Jan 2014 23:25:22 +0100 (CET) Subject: [pypy-commit] stmgc c7: reducing number of aborts by sleeping Message-ID: <20140117222522.78F8C1C2447@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r636:94b61d8b586a Date: 2014-01-17 23:25 +0100 http://bitbucket.org/pypy/stmgc/changeset/94b61d8b586a/ Log: reducing number of aborts by sleeping diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -376,6 +376,9 @@ uintptr_t lock_idx = (((uintptr_t)obj) >> 4) - READMARKER_START; uint8_t previous; while ((previous = __sync_lock_test_and_set(&write_locks[lock_idx], 1))) { + usleep(1); /* XXXXXX */ + if (!(previous = __sync_lock_test_and_set(&write_locks[lock_idx], 1))) + break; stm_abort_transaction(); /* XXX: only abort if we are younger */ spin_loop(); @@ -897,6 +900,7 @@ _STM_TL2->running_transaction = 0; stm_stop_lock(); + fprintf(stderr, "%c", 'C'+_STM_TL2->thread_num*32); } @@ -975,7 +979,7 @@ assert(_STM_TL1->jmpbufptr != (jmpbufptr_t *)-1); /* for tests only */ _STM_TL2->running_transaction = 0; stm_stop_lock(); - fprintf(stderr, "a"); + fprintf(stderr, "%c", 'A'+_STM_TL2->thread_num*32); /* reset all the modified objects (incl. re-adding GCFLAG_WRITE_BARRIER) */ reset_modified_from_other_threads(); From noreply at buildbot.pypy.org Sat Jan 18 02:55:05 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 18 Jan 2014 02:55:05 +0100 (CET) Subject: [pypy-commit] pypy annotator: Inline FlowObjSpace.call() in its only caller Message-ID: <20140118015505.F07FD1C087E@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: annotator Changeset: r68755:082ef34cf051 Date: 2014-01-17 21:11 +0000 http://bitbucket.org/pypy/pypy/changeset/082ef34cf051/ Log: Inline FlowObjSpace.call() in its only caller diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -1018,8 +1018,12 @@ arguments = self.popvalues(n_arguments) args = CallSpec(arguments, keywords, w_star) w_function = self.popvalue() - w_result = self.space.call(w_function, args) - self.pushvalue(w_result) + if args.keywords or isinstance(args.w_stararg, Variable): + shape, args_w = args.flatten() + hlop = op.call_args(w_function, Constant(shape), *args_w) + else: + hlop = op.simple_call(w_function, *args.as_list()) + self.pushvalue(hlop.eval(self)) def CALL_FUNCTION(self, oparg): self.call_function(oparg) diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -34,14 +34,6 @@ def build_flow(self, func): return build_flow(func, self) - def call(self, w_callable, args): - if args.keywords or isinstance(args.w_stararg, Variable): - shape, args_w = args.flatten() - hlop = op.call_args(w_callable, Constant(shape), *args_w) - else: - hlop = op.simple_call(w_callable, *args.as_list()) - return hlop.eval(self.frame) - def build_flow(func, space=FlowObjSpace()): """ From noreply at buildbot.pypy.org Sat Jan 18 02:55:07 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 18 Jan 2014 02:55:07 +0100 (CET) Subject: [pypy-commit] pypy annotator: Kill FlowObjSpace. Finally! Message-ID: <20140118015507.463251C087E@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: annotator Changeset: r68756:a3a0e4f03ee8 Date: 2014-01-18 01:54 +0000 http://bitbucket.org/pypy/pypy/changeset/a3a0e4f03ee8/ Log: Kill FlowObjSpace. Finally! diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -308,11 +308,10 @@ class FlowSpaceFrame(object): opcode_method_names = host_bytecode_spec.method_names - def __init__(self, space, graph, code): + def __init__(self, graph, code): self.graph = graph func = graph.func self.pycode = code - self.space = space self.w_globals = Constant(func.func_globals) self.blockstack = [] diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -4,9 +4,8 @@ from inspect import CO_NEWLOCALS -from rpython.flowspace.model import Constant, Variable, checkgraph +from rpython.flowspace.model import Variable, checkgraph from rpython.flowspace.bytecode import HostCode -from rpython.flowspace.operation import op from rpython.flowspace.flowcontext import (FlowSpaceFrame, fixeggblocks) from rpython.flowspace.generator import (tweak_generator_graph, bootstrap_generator) @@ -24,18 +23,7 @@ "the flag CO_NEWLOCALS set.") -# ______________________________________________________________________ -class FlowObjSpace(object): - """NOT_RPYTHON. - The flow objspace space is used to produce a flow graph by recording - the space operations that the interpreter generates when it interprets - (the bytecode of) some function. - """ - def build_flow(self, func): - return build_flow(func, self) - - -def build_flow(func, space=FlowObjSpace()): +def build_flow(func): """ Create the flow graph for the function. """ @@ -50,7 +38,7 @@ w_value.rename(name) return bootstrap_generator(graph) graph = PyGraph(func, code) - frame = space.frame = FlowSpaceFrame(space, graph, code) + frame = FlowSpaceFrame(graph, code) frame.build_flow() fixeggblocks(graph) checkgraph(graph) diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -76,8 +76,8 @@ @classmethod def make_sc(cls): - def sc_operator(space, *args_w): - return cls(*args_w).eval(space.frame) + def sc_operator(frame, *args_w): + return cls(*args_w).eval(frame) return sc_operator def eval(self, frame): @@ -505,7 +505,7 @@ except (KeyError, TypeError): pass else: - return sc(frame.space, *args_w) + return sc(frame, *args_w) return frame.do_op(self) @@ -521,6 +521,7 @@ except (KeyError, TypeError): pass else: + from rpython.flowspace.flowcontext import FlowingError raise FlowingError( "should not call %r with keyword arguments" % (fn,)) return frame.do_op(self) diff --git a/rpython/flowspace/specialcase.py b/rpython/flowspace/specialcase.py --- a/rpython/flowspace/specialcase.py +++ b/rpython/flowspace/specialcase.py @@ -7,7 +7,7 @@ """Decorator triggering special-case handling of ``func``. When the flow graph builder sees ``func``, it calls the decorated function - with ``decorated_func(space, *args_w)``, where ``args_w`` is a sequence of + with ``decorated_func(frame, *args_w)``, where ``args_w`` is a sequence of flow objects (Constants or Variables). """ def decorate(sc_func): @@ -15,11 +15,10 @@ return decorate @register_flow_sc(__import__) -def sc_import(space, *args_w): - assert len(args_w) > 0 and len(args_w) <= 5, 'import needs 1 to 5 arguments' +def sc_import(frame, *args_w): assert all(isinstance(arg, Constant) for arg in args_w) args = [arg.value for arg in args_w] - return space.frame.import_name(*args) + return frame.import_name(*args) @register_flow_sc(locals) def sc_locals(_, *args): @@ -32,34 +31,34 @@ "own project.") @register_flow_sc(isinstance) -def sc_isinstance(space, w_instance, w_type): +def sc_isinstance(frame, w_instance, w_type): if w_instance.foldable() and w_type.foldable(): return const(isinstance(w_instance.value, w_type.value)) - return space.frame.appcall(isinstance, w_instance, w_type) + return frame.appcall(isinstance, w_instance, w_type) @register_flow_sc(getattr) -def sc_getattr(space, w_obj, w_index, w_default=None): +def sc_getattr(frame, w_obj, w_index, w_default=None): if w_default is not None: - return space.frame.appcall(getattr, w_obj, w_index, w_default) + return frame.appcall(getattr, w_obj, w_index, w_default) else: from rpython.flowspace.operation import op - return op.getattr(w_obj, w_index).eval(space.frame) + return op.getattr(w_obj, w_index).eval(frame) @register_flow_sc(open) -def sc_open(space, *args_w): +def sc_open(frame, *args_w): from rpython.rlib.rfile import create_file - return space.frame.appcall(create_file, *args_w) + return frame.appcall(create_file, *args_w) @register_flow_sc(os.tmpfile) -def sc_os_tmpfile(space): +def sc_os_tmpfile(frame): from rpython.rlib.rfile import create_temp_rfile - return space.frame.appcall(create_temp_rfile) + return frame.appcall(create_temp_rfile) @register_flow_sc(os.remove) -def sc_os_remove(space, *args_w): +def sc_os_remove(frame, *args_w): # on top of PyPy only: 'os.remove != os.unlink' # (on CPython they are '==', but not identical either) - return space.frame.appcall(os.unlink, *args_w) + return frame.appcall(os.unlink, *args_w) # _________________________________________________________________________ # a simplified version of the basic printing routines, for RPython programs diff --git a/rpython/flowspace/test/test_framestate.py b/rpython/flowspace/test/test_framestate.py --- a/rpython/flowspace/test/test_framestate.py +++ b/rpython/flowspace/test/test_framestate.py @@ -1,6 +1,5 @@ from rpython.flowspace.model import * from rpython.rlib.unroll import SpecTag -from rpython.flowspace.objspace import FlowObjSpace from rpython.flowspace.flowcontext import FlowSpaceFrame from rpython.flowspace.bytecode import HostCode from rpython.flowspace.pygraph import PyGraph @@ -13,7 +12,7 @@ pass code = HostCode._from_code(func.func_code) graph = PyGraph(func, code) - frame = FlowSpaceFrame(FlowObjSpace(), graph, code) + frame = FlowSpaceFrame(graph, code) # hack the frame frame.setstate(graph.startblock.framestate) frame.locals_stack_w[frame.pycode.co_nlocals-1] = Constant(None) diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -282,7 +282,7 @@ return False @register_flow_sc(we_are_translated) -def sc_we_are_translated(space): +def sc_we_are_translated(frame): return Constant(True) @@ -706,7 +706,7 @@ class r_ordereddict(r_dict): def _newdict(self): from collections import OrderedDict - + return OrderedDict() class _r_dictkey(object): diff --git a/rpython/rlib/rarithmetic.py b/rpython/rlib/rarithmetic.py --- a/rpython/rlib/rarithmetic.py +++ b/rpython/rlib/rarithmetic.py @@ -516,12 +516,12 @@ r_uint = build_int('r_uint', False, LONG_BIT) @register_flow_sc(r_uint) -def sc_r_uint(space, w_value): +def sc_r_uint(frame, w_value): # (normally, the 32-bit constant is a long, and is not allowed to # show up in the flow graphs at all) if isinstance(w_value, Constant): return Constant(r_uint(w_value.value)) - return space.frame.appcall(r_uint, w_value) + return frame.appcall(r_uint, w_value) r_longlong = build_int('r_longlong', True, 64) From noreply at buildbot.pypy.org Sat Jan 18 11:08:33 2014 From: noreply at buildbot.pypy.org (jerith) Date: Sat, 18 Jan 2014 11:08:33 +0100 (CET) Subject: [pypy-commit] pypy default: increment default_magic for the new co_flag (oops) Message-ID: <20140118100833.665491C11ED@cobra.cs.uni-duesseldorf.de> Author: Jeremy Thurgood Branch: Changeset: r68757:9f9f78e7e299 Date: 2014-01-18 12:07 +0200 http://bitbucket.org/pypy/pypy/changeset/9f9f78e7e299/ Log: increment default_magic for the new co_flag (oops) diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -31,7 +31,7 @@ # Magic numbers for the bytecode version in code objects. # See comments in pypy/module/imp/importing. cpython_magic, = struct.unpack(" Author: Antonio Cuni Branch: utf8-unicode Changeset: r68758:e5291f543f0f Date: 2014-01-17 14:35 +0100 http://bitbucket.org/pypy/pypy/changeset/e5291f543f0f/ Log: a branch where to implement unicode objects are utf8-encoded rpython strings From noreply at buildbot.pypy.org Sat Jan 18 11:14:25 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Sat, 18 Jan 2014 11:14:25 +0100 (CET) Subject: [pypy-commit] pypy utf8-unicode: break the world, and implement W_UnicodeObject as utf8 rpython strings Message-ID: <20140118101425.19B4E1C1500@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: utf8-unicode Changeset: r68759:eb1500901ddf Date: 2014-01-17 22:54 +0100 http://bitbucket.org/pypy/pypy/changeset/eb1500901ddf/ Log: break the world, and implement W_UnicodeObject as utf8 rpython strings diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -199,7 +199,7 @@ def str_w(self, space): self._typed_unwrap_error(space, "string") - def unicode_w(self, space): + def utf8_w(self, space): self._typed_unwrap_error(space, "unicode") def int_w(self, space): @@ -1376,11 +1376,11 @@ self.wrap('argument must be a string')) return self.str_w(w_obj) - def unicode_w(self, w_obj): - return w_obj.unicode_w(self) + def utf8_w(self, w_obj): + return w_obj.utf8_w(self) - def unicode0_w(self, w_obj): - "Like unicode_w, but rejects strings with NUL bytes." + def utf8_0_w(self, w_obj): + "Like utf8_w, but rejects strings with NUL bytes." from rpython.rlib import rstring result = w_obj.unicode_w(self) if u'\x00' in result: diff --git a/pypy/interpreter/unicodehelper.py b/pypy/interpreter/unicodehelper.py --- a/pypy/interpreter/unicodehelper.py +++ b/pypy/interpreter/unicodehelper.py @@ -61,3 +61,20 @@ uni, len(uni), "strict", errorhandler=encode_error_handler(space), allow_surrogates=True) + +def ensure_ascii(space, s, errors='strict'): + # ASCII is equivalent to the first 128 ordinals in Unicode. + eh = decode_error_handler(space) + pos = 0 + size = len(s) + while pos < size: + c = s[pos] + if ord(c) >= 128: + r, pos = eh(errors, "ascii", "ordinal not in range(128)", + s, pos, pos + 1) + pos += 1 + return s + +def ensure_utf8(space, s, errors='strict'): + # XXXY implement me! + return s diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -658,8 +658,8 @@ if space.isinstance_w(w_sub, space.w_unicode): from pypy.objspace.std.unicodeobject import W_UnicodeObject assert isinstance(w_sub, W_UnicodeObject) - self_as_unicode = unicode_from_encoded_object(space, self, None, None) - return space.newbool(self_as_unicode._value.find(w_sub._value) >= 0) + self_as_utf8 = unicode_from_encoded_object(space, self, None, None) + return space.newbool(self_as_utf8._utf8val.find(w_sub._utf8val) >= 0) return self._StringMethods_descr_contains(space, w_sub) _StringMethods_descr_replace = descr_replace diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -1633,10 +1633,10 @@ _applevel_repr = "unicode" def wrap(self, stringval): - return self.space.wrap(stringval) + return self.space.wrap_utf8(stringval) def unwrap(self, w_string): - return self.space.unicode_w(w_string) + return self.space.utf8_w(w_string) erase, unerase = rerased.new_erasing_pair("unicode") erase = staticmethod(erase) diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -158,7 +158,8 @@ if isinstance(x, str): return wrapstr(self, x) if isinstance(x, unicode): - return wrapunicode(self, x) + # we might want to kill support for wrap(u'...') eventually + return wrapunicode(self, x.encode('utf-8')) if isinstance(x, float): return W_FloatObject(x) if isinstance(x, W_Root): @@ -181,6 +182,14 @@ return self._wrap_not_rpython(x) wrap._annspecialcase_ = "specialize:wrap" + def wrap_utf8(self, utf8val): + """ + Take an utf8-encoded RPython string an return an unicode applevel + object + """ + # the constructor of W_UnicodeObject checks that it's valid UTF8 + return wrapunicode(self, utf8val) + def _wrap_not_rpython(self, x): "NOT_RPYTHON" # _____ this code is here to support testing only _____ diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -11,7 +11,7 @@ from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.stringmethods import StringMethods from rpython.rlib.objectmodel import compute_hash, compute_unique_id, import_from_mixin -from rpython.rlib.rstring import UnicodeBuilder +from rpython.rlib.rstring import StringBuilder from rpython.rlib.runicode import (str_decode_utf_8, str_decode_ascii, unicode_encode_utf_8, unicode_encode_ascii, make_unicode_escape_function) @@ -22,24 +22,26 @@ class W_UnicodeObject(W_Root): import_from_mixin(StringMethods) - _immutable_fields_ = ['_value'] + _immutable_fields_ = ['_utf8val'] - def __init__(w_self, unistr): - assert isinstance(unistr, unicode) - w_self._value = unistr + def __init__(w_self, utf8val): + assert isinstance(utf8val, str) + w_self._utf8val = utf8val + # XXXY: we want a more efficient way to compute this + w_self._length = len(utf8val.decode('utf-8')) def __repr__(w_self): """ representation for debugging purposes """ - return "%s(%r)" % (w_self.__class__.__name__, w_self._value) + return "%s(%r)" % (w_self.__class__.__name__, w_self._utf8val.decode('utf8')) def unwrap(w_self, space): # for testing - return w_self._value + return w_self._utf8val.decode('utf-8') def create_if_subclassed(w_self): if type(w_self) is W_UnicodeObject: return w_self - return W_UnicodeObject(w_self._value) + return W_UnicodeObject(w_self._utf8val) def is_w(self, space, w_other): if not isinstance(w_other, W_UnicodeObject): @@ -48,55 +50,58 @@ return True if self.user_overridden_class or w_other.user_overridden_class: return False - return space.unicode_w(self) is space.unicode_w(w_other) + return space.utf8_w(self) is space.utf8_w(w_other) def immutable_unique_id(self, space): if self.user_overridden_class: return None - return space.wrap(compute_unique_id(space.unicode_w(self))) + return space.wrap(compute_unique_id(space.utf8_w(self))) def str_w(self, space): return space.str_w(space.str(self)) - def unicode_w(self, space): - return self._value + def utf8_w(self, space): + return self._utf8val def listview_unicode(w_self): - return _create_list_from_unicode(w_self._value) + return _create_list_from_unicode(w_self._utf8val) def ord(self, space): - if len(self._value) != 1: + if self._len() != 1: msg = "ord() expected a character, but string of length %d found" - raise operationerrfmt(space.w_TypeError, msg, len(self._value)) + raise operationerrfmt(space.w_TypeError, msg, self._len()) + XXX return space.wrap(ord(self._value[0])) - def _new(self, value): - return W_UnicodeObject(value) + def _new(self, utf8val): + assert isinstance(utf8val, str) + return W_UnicodeObject(utf8val) def _new_from_list(self, value): - return W_UnicodeObject(u''.join(value)) + # value is a RPython list of utf8-encoded strings + return W_UnicodeObject(''.join(value)) def _empty(self): return W_UnicodeObject.EMPTY def _len(self): - return len(self._value) + return self._length def _val(self, space): - return self._value + return self._utf8val def _op_val(self, space, w_other): if isinstance(w_other, W_UnicodeObject): - return w_other._value + return w_other._utf8val if space.isinstance_w(w_other, space.w_str): - return unicode_from_string(space, w_other)._value - return unicode_from_encoded_object(space, w_other, None, "strict")._value + return unicode_from_string(space, w_other)._utf8val + return unicode_from_encoded_object(space, w_other, None, "strict")._utf8val def _chr(self, char): assert len(char) == 1 return unicode(char)[0] - _builder = UnicodeBuilder + _builder = StringBuilder def _isupper(self, ch): return unicodedb.isupper(ord(ch)) @@ -189,7 +194,7 @@ return encode_object(space, self, None, None) def descr_hash(self, space): - x = compute_hash(self._value) + x = compute_hash(self._utf8val) return space.wrap(x) def descr_eq(self, space, w_other): @@ -350,8 +355,9 @@ return space.newbool(cased) -def wrapunicode(space, uni): - return W_UnicodeObject(uni) +def wrapunicode(space, utf8val): + # XXXY: we should check that it's valid UTF8 + return W_UnicodeObject(utf8val) def plain_str2unicode(space, s): try: @@ -426,17 +432,17 @@ encoding = getdefaultencoding(space) if errors is None or errors == 'strict': if encoding == 'ascii': - # XXX error handling s = space.bufferstr_w(w_obj) - eh = unicodehelper.decode_error_handler(space) - return space.wrap(str_decode_ascii( - s, len(s), None, final=True, errorhandler=eh)[0]) + s = unicodehelper.ensure_ascii(space, s) + return space.wrap_utf8(s) if encoding == 'utf-8': s = space.bufferstr_w(w_obj) - eh = unicodehelper.decode_error_handler(space) - return space.wrap(str_decode_utf_8( - s, len(s), None, final=True, errorhandler=eh, - allow_surrogates=True)[0]) + s = unicodehelper.ensure_utf8(space, s) + return space.wrap_utf8(s) + ## eh = unicodehelper.decode_error_handler(space) + ## return space.wrap(str_decode_utf_8( + ## s, len(s), None, final=True, errorhandler=eh, + ## allow_surrogates=True)[0]) w_codecs = space.getbuiltinmodule("_codecs") w_decode = space.getattr(w_codecs, space.wrap("decode")) if errors is None: @@ -489,11 +495,8 @@ if encoding != 'ascii': return unicode_from_encoded_object(space, w_str, encoding, "strict") s = space.str_w(w_str) - try: - return W_UnicodeObject(s.decode("ascii")) - except UnicodeDecodeError: - # raising UnicodeDecodeError is messy, "please crash for me" - return unicode_from_encoded_object(space, w_str, "ascii", "strict") + s = unicodehelper.ensure_ascii(space, s) + return W_UnicodeObject(s) class UnicodeDocstrings: @@ -1034,7 +1037,7 @@ return [s for s in value] -W_UnicodeObject.EMPTY = W_UnicodeObject(u'') +W_UnicodeObject.EMPTY = W_UnicodeObject('') # Helper for converting int/long def unicode_to_decimal_w(space, w_unistr): From noreply at buildbot.pypy.org Sat Jan 18 11:15:24 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 18 Jan 2014 11:15:24 +0100 (CET) Subject: [pypy-commit] pypy default: Try to clear up the comment Message-ID: <20140118101524.3CA8D1C1500@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68760:c8c63a87f605 Date: 2014-01-18 11:14 +0100 http://bitbucket.org/pypy/pypy/changeset/c8c63a87f605/ Log: Try to clear up the comment diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -839,9 +839,9 @@ # any number between CPython + 2 and CPython + 9. Right now, # default_magic = CPython + 7. # -# default_magic - 7 -- used by CPython without the -U option -# default_magic - 6 -- used by CPython with the -U option -# default_magic -- used by PyPy [because of CALL_METHOD] +# CPython + 0 -- used by CPython without the -U option +# CPython + 1 -- used by CPython with the -U option +# CPython + 7 = default_magic -- used by PyPy (incompatible!) # from pypy.interpreter.pycode import default_magic MARSHAL_VERSION_FOR_PYC = 2 From noreply at buildbot.pypy.org Sat Jan 18 11:39:54 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Sat, 18 Jan 2014 11:39:54 +0100 (CET) Subject: [pypy-commit] stmgc c7: add spinlock implementation of reader-writer lock Message-ID: <20140118103954.F29D21C0178@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r637:cae45c13aee6 Date: 2014-01-18 11:39 +0100 http://bitbucket.org/pypy/stmgc/changeset/cae45c13aee6/ Log: add spinlock implementation of reader-writer lock diff --git a/c7/Makefile b/c7/Makefile --- a/c7/Makefile +++ b/c7/Makefile @@ -14,9 +14,9 @@ rm -f $(BUILD_EXE) $(DEBUG_EXE) $(RELEASE_EXE) -H_FILES = core.h list.h pagecopy.h +H_FILES = core.h list.h pagecopy.h reader_writer_lock.h -C_FILES = core.c list.c pagecopy.c +C_FILES = core.c list.c pagecopy.c reader_writer_lock.c DEBUG = -g diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -13,6 +13,7 @@ #include "core.h" #include "list.h" #include "pagecopy.h" +#include "reader_writer_lock.h" #define NB_PAGES (256*256) // 256MB @@ -142,41 +143,40 @@ /************************************************************/ +rwticket rw_shared_lock; + /* a multi-reader, single-writer lock: transactions normally take a reader lock, so don't conflict with each other; when we need to do a global GC, we take a writer lock to "stop the world". Note the initializer here, which should give the correct priority for stm_possible_safe_point(). */ -static pthread_rwlock_t rwlock_shared; + struct tx_descriptor *in_single_thread = NULL; void stm_start_shared_lock(void) { - int err = pthread_rwlock_rdlock(&rwlock_shared); - if (err != 0) - abort(); + rwticket_rdlock(&rw_shared_lock); } -void stm_stop_lock(void) +void stm_stop_shared_lock(void) { - int err = pthread_rwlock_unlock(&rwlock_shared); - if (err != 0) - abort(); + rwticket_rdunlock(&rw_shared_lock); +} + +void stm_stop_exclusive_lock(void) +{ + rwticket_wrunlock(&rw_shared_lock); } void stm_start_exclusive_lock(void) { - int err = pthread_rwlock_wrlock(&rwlock_shared); - if (err != 0) - abort(); - if (_STM_TL2->need_abort) - stm_abort_transaction(); + rwticket_wrlock(&rw_shared_lock); } void _stm_start_safe_point(void) { assert(!_STM_TL2->need_abort); - stm_stop_lock(); + stm_stop_shared_lock(); } void _stm_stop_safe_point(void) @@ -376,9 +376,12 @@ uintptr_t lock_idx = (((uintptr_t)obj) >> 4) - READMARKER_START; uint8_t previous; while ((previous = __sync_lock_test_and_set(&write_locks[lock_idx], 1))) { - usleep(1); /* XXXXXX */ - if (!(previous = __sync_lock_test_and_set(&write_locks[lock_idx], 1))) - break; + /* XXXXXX */ + //_stm_start_semi_safe_point(); + usleep(1); + //_stm_stop_semi_safe_point(); + //if (!(previous = __sync_lock_test_and_set(&write_locks[lock_idx], 1))) + // break; stm_abort_transaction(); /* XXX: only abort if we are younger */ spin_loop(); @@ -583,13 +586,8 @@ void stm_setup(void) -{ - pthread_rwlockattr_t attr; - pthread_rwlockattr_init(&attr); - pthread_rwlockattr_setkind_np(&attr, - PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP); - pthread_rwlock_init(&rwlock_shared, &attr); - pthread_rwlockattr_destroy(&attr); +{ + memset(&rw_shared_lock, 0, sizeof(rwticket)); /* Check that some values are acceptable */ assert(4096 <= ((uintptr_t)_STM_TL1)); @@ -685,8 +683,8 @@ void _stm_teardown_thread(void) { - assert(!pthread_rwlock_trywrlock(&rwlock_shared)); - assert(!pthread_rwlock_unlock(&rwlock_shared)); + assert(!rwticket_wrtrylock(&rw_shared_lock)); + assert(!rwticket_wrunlock(&rw_shared_lock)); stm_list_free(_STM_TL2->modified_objects); _STM_TL2->modified_objects = NULL; @@ -708,7 +706,6 @@ munmap(object_pages, TOTAL_MEMORY); memset(flag_page_private, 0, sizeof(flag_page_private)); memset(write_locks, 0, sizeof(write_locks)); - pthread_rwlock_destroy(&rwlock_shared); object_pages = NULL; } @@ -794,7 +791,7 @@ void stm_stop_transaction(void) { assert(_STM_TL2->running_transaction); - stm_stop_lock(); + stm_stop_shared_lock(); stm_start_exclusive_lock(); _STM_TL1->jmpbufptr = NULL; /* cannot abort any more */ @@ -899,7 +896,7 @@ /* } */ _STM_TL2->running_transaction = 0; - stm_stop_lock(); + stm_stop_exclusive_lock(); fprintf(stderr, "%c", 'C'+_STM_TL2->thread_num*32); } @@ -978,7 +975,7 @@ assert(_STM_TL1->jmpbufptr != NULL); assert(_STM_TL1->jmpbufptr != (jmpbufptr_t *)-1); /* for tests only */ _STM_TL2->running_transaction = 0; - stm_stop_lock(); + stm_stop_shared_lock(); fprintf(stderr, "%c", 'A'+_STM_TL2->thread_num*32); /* reset all the modified objects (incl. re-adding GCFLAG_WRITE_BARRIER) */ diff --git a/c7/demo2.c b/c7/demo2.c --- a/c7/demo2.c +++ b/c7/demo2.c @@ -57,7 +57,9 @@ r_n = r_n->next; stm_read((objptr_t)r_n); sum += r_n->value; - + + _stm_start_safe_point(); + _stm_stop_safe_point(); if (prev >= r_n->value) { stm_stop_transaction(); return -1; @@ -184,7 +186,6 @@ sem_post(&initialized); status = sem_wait(&go); assert(status == 0); - } while (check_sorted() == -1) { diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -7,9 +7,9 @@ parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) header_files = [os.path.join(parent_dir, _n) for _n in - "core.h pagecopy.h list.h".split()] + "core.h pagecopy.h list.h reader_writer_lock.h".split()] source_files = [os.path.join(parent_dir, _n) for _n in - "core.c pagecopy.c list.c".split()] + "core.c pagecopy.c list.c reader_writer_lock.c".split()] _pycache_ = os.path.join(parent_dir, 'test', '__pycache__') if os.path.exists(_pycache_): From noreply at buildbot.pypy.org Sat Jan 18 12:15:53 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sat, 18 Jan 2014 12:15:53 +0100 (CET) Subject: [pypy-commit] pypy default: Fix strbufobject. Message-ID: <20140118111553.CF9D51C1500@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: Changeset: r68761:286528b6716e Date: 2014-01-18 12:16 +0100 http://bitbucket.org/pypy/pypy/changeset/286528b6716e/ Log: Fix strbufobject. diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -633,9 +633,15 @@ return space.add(self_as_bytearray, w_other) if space.config.objspace.std.withstrbuf: from pypy.objspace.std.strbufobject import W_StringBufferObject + try: + other = self._op_val(space, w_other) + except OperationError, e: + if e.match(space, space.w_TypeError): + return space.w_NotImplemented + raise builder = StringBuilder() builder.append(self._value) - builder.append(self._op_val(space, w_other)) + builder.append(other) return W_StringBufferObject(builder) return self._StringMethods_descr_add(space, w_other) From noreply at buildbot.pypy.org Sat Jan 18 12:40:47 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Sat, 18 Jan 2014 12:40:47 +0100 (CET) Subject: [pypy-commit] stmgc c7: makefile update Message-ID: <20140118114047.906D11C0178@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r638:1c101c79f4dd Date: 2014-01-18 12:40 +0100 http://bitbucket.org/pypy/stmgc/changeset/1c101c79f4dd/ Log: makefile update diff --git a/c7/Makefile b/c7/Makefile --- a/c7/Makefile +++ b/c7/Makefile @@ -24,4 +24,6 @@ # note that we don't say -DNDEBUG, so that asserts should still be compiled in # also, all debug code with extra checks but not the debugprints build-%: %.c ${H_FILES} ${C_FILES} - clang -pthread -g $< -o build-$* -Wall ${C_FILES} + clang -pthread -g -O1 $< -o build-$* -Wall ${C_FILES} +release-%: %.c ${H_FILES} ${C_FILES} + clang -pthread -g -DNDEBUG -O2 $< -o release-$* -Wall ${C_FILES} diff --git a/c7/demo2.c b/c7/demo2.c --- a/c7/demo2.c +++ b/c7/demo2.c @@ -7,7 +7,7 @@ #include "core.h" -#define LIST_LENGTH 5000 +#define LIST_LENGTH 6000 #define BUNCH 400 typedef TLPREFIX struct node_s node_t; From noreply at buildbot.pypy.org Sat Jan 18 13:26:18 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sat, 18 Jan 2014 13:26:18 +0100 (CET) Subject: [pypy-commit] pypy llvm-translation-backend: Fix datalayout fishing. Message-ID: <20140118122618.995B11C0178@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r68762:2870c4a2d133 Date: 2014-01-18 13:25 +0100 http://bitbucket.org/pypy/pypy/changeset/2870c4a2d133/ Log: Fix datalayout fishing. diff --git a/rpython/translator/llvm/genllvm.py b/rpython/translator/llvm/genllvm.py --- a/rpython/translator/llvm/genllvm.py +++ b/rpython/translator/llvm/genllvm.py @@ -1738,7 +1738,9 @@ output = cmdexec('clang -emit-llvm -S -x c {} -o -' .format(devnull)) self._parse_datalayout(output) - f.write(output) + for line in output.splitlines(True): + if line.startswith('target '): + f.write(line) database = Database(self, f) self._write_special_declarations(f) From noreply at buildbot.pypy.org Sat Jan 18 13:44:34 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Sat, 18 Jan 2014 13:44:34 +0100 (CET) Subject: [pypy-commit] stmgc c7: add files... Message-ID: <20140118124434.60B9F1C153F@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r639:cf5b0f66205f Date: 2014-01-18 13:44 +0100 http://bitbucket.org/pypy/stmgc/changeset/cf5b0f66205f/ Log: add files... diff --git a/c7/reader_writer_lock.c b/c7/reader_writer_lock.c new file mode 100644 --- /dev/null +++ b/c7/reader_writer_lock.c @@ -0,0 +1,93 @@ +/* Taken from: http://locklessinc.com/articles/locks/ + + Sticking to semi-portable C code, we can still do a little better. + There exists a form of the ticket lock that is designed for read-write + locks. An example written in assembly was posted to the Linux kernel + mailing list in 2002 by David Howells from RedHat. This was a highly + optimized version of a read-write ticket lock developed at IBM in the + early 90's by Joseph Seigh. Note that a similar (but not identical) + algorithm was published by John Mellor-Crummey and Michael Scott in + their landmark paper "Scalable Reader-Writer Synchronization for + Shared-Memory Multiprocessors". Converting the algorithm from + assembly language to C yields: +*/ + +#include "reader_writer_lock.h" + + +#define EBUSY 1 +#define atomic_xadd(P, V) __sync_fetch_and_add((P), (V)) +#define cmpxchg(P, O, N) __sync_val_compare_and_swap((P), (O), (N)) +#define atomic_inc(P) __sync_add_and_fetch((P), 1) +#define atomic_dec(P) __sync_add_and_fetch((P), -1) +#define atomic_add(P, V) __sync_add_and_fetch((P), (V)) +#define atomic_set_bit(P, V) __sync_or_and_fetch((P), 1<<(V)) +#define atomic_clear_bit(P, V) __sync_and_and_fetch((P), ~(1<<(V))) +/* Compile read-write barrier */ +#define barrier() asm volatile("": : :"memory") + +/* Pause instruction to prevent excess processor bus usage */ +#define cpu_relax() asm volatile("pause\n": : :"memory") + + + +void rwticket_wrlock(rwticket *l) +{ + unsigned me = atomic_xadd(&l->u, (1<<16)); + unsigned char val = me >> 16; + + while (val != l->s.write) cpu_relax(); +} + +int rwticket_wrunlock(rwticket *l) +{ + rwticket t = *l; + + barrier(); + + t.s.write++; + t.s.read++; + + *(unsigned short *) l = t.us; + return 0; +} + +int rwticket_wrtrylock(rwticket *l) +{ + unsigned me = l->s.users; + unsigned char menew = me + 1; + unsigned read = l->s.read << 8; + unsigned cmp = (me << 16) + read + me; + unsigned cmpnew = (menew << 16) + read + me; + + if (cmpxchg(&l->u, cmp, cmpnew) == cmp) return 0; + + return EBUSY; +} + +void rwticket_rdlock(rwticket *l) +{ + unsigned me = atomic_xadd(&l->u, (1<<16)); + unsigned char val = me >> 16; + + while (val != l->s.read) cpu_relax(); + l->s.read++; +} + +void rwticket_rdunlock(rwticket *l) +{ + atomic_inc(&l->s.write); +} + +int rwticket_rdtrylock(rwticket *l) +{ + unsigned me = l->s.users; + unsigned write = l->s.write; + unsigned char menew = me + 1; + unsigned cmp = (me << 16) + (me << 8) + write; + unsigned cmpnew = ((unsigned) menew << 16) + (menew << 8) + write; + + if (cmpxchg(&l->u, cmp, cmpnew) == cmp) return 0; + + return EBUSY; +} diff --git a/c7/reader_writer_lock.h b/c7/reader_writer_lock.h new file mode 100644 --- /dev/null +++ b/c7/reader_writer_lock.h @@ -0,0 +1,22 @@ + +typedef union rwticket rwticket; +union rwticket +{ + unsigned u; + unsigned short us; + struct + { + unsigned char write; + unsigned char read; + unsigned char users; + } s; +}; + +void rwticket_wrlock(rwticket *l); +int rwticket_wrunlock(rwticket *l); +int rwticket_wrtrylock(rwticket *l); +void rwticket_rdlock(rwticket *l); +void rwticket_rdunlock(rwticket *l); +int rwticket_rdtrylock(rwticket *l); + + From noreply at buildbot.pypy.org Sat Jan 18 14:44:51 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 18 Jan 2014 14:44:51 +0100 (CET) Subject: [pypy-commit] pypy default: Copy the CPython-style error messages more closely Message-ID: <20140118134451.345461C1500@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68763:3f01d4d55bcc Date: 2014-01-18 14:44 +0100 http://bitbucket.org/pypy/pypy/changeset/3f01d4d55bcc/ Log: Copy the CPython-style error messages more closely diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py --- a/rpython/rlib/runicode.py +++ b/rpython/rlib/runicode.py @@ -153,7 +153,7 @@ # about the pos anymore and we just ignore the value if not charsleft: # there's only the start byte and nothing else - r, pos = errorhandler(errors, 'utf-8', + r, pos = errorhandler(errors, 'utf8', 'unexpected end of data', s, pos, pos+1) result.append(r) @@ -165,14 +165,14 @@ (ordch1 == 0xe0 and ordch2 < 0xa0)): # or (ordch1 == 0xed and ordch2 > 0x9f) # second byte invalid, take the first and continue - r, pos = errorhandler(errors, 'utf-8', + r, pos = errorhandler(errors, 'utf8', 'invalid continuation byte', s, pos, pos+1) result.append(r) continue else: # second byte valid, but third byte missing - r, pos = errorhandler(errors, 'utf-8', + r, pos = errorhandler(errors, 'utf8', 'unexpected end of data', s, pos, pos+2) result.append(r) @@ -183,28 +183,28 @@ (ordch1 == 0xf0 and ordch2 < 0x90) or (ordch1 == 0xf4 and ordch2 > 0x8f)): # second byte invalid, take the first and continue - r, pos = errorhandler(errors, 'utf-8', + r, pos = errorhandler(errors, 'utf8', 'invalid continuation byte', s, pos, pos+1) result.append(r) continue elif charsleft == 2 and ord(s[pos+2])>>6 != 0x2: # 0b10 # third byte invalid, take the first two and continue - r, pos = errorhandler(errors, 'utf-8', + r, pos = errorhandler(errors, 'utf8', 'invalid continuation byte', s, pos, pos+2) result.append(r) continue else: # there's only 1 or 2 valid cb, but the others are missing - r, pos = errorhandler(errors, 'utf-8', + r, pos = errorhandler(errors, 'utf8', 'unexpected end of data', s, pos, pos+charsleft+1) result.append(r) break if n == 0: - r, pos = errorhandler(errors, 'utf-8', + r, pos = errorhandler(errors, 'utf8', 'invalid start byte', s, pos, pos+1) result.append(r) @@ -215,7 +215,7 @@ elif n == 2: ordch2 = ord(s[pos+1]) if ordch2>>6 != 0x2: # 0b10 - r, pos = errorhandler(errors, 'utf-8', + r, pos = errorhandler(errors, 'utf8', 'invalid continuation byte', s, pos, pos+1) result.append(r) @@ -233,13 +233,13 @@ # surrogates shouldn't be valid UTF-8! or (not allow_surrogates and ordch1 == 0xed and ordch2 > 0x9f) ): - r, pos = errorhandler(errors, 'utf-8', + r, pos = errorhandler(errors, 'utf8', 'invalid continuation byte', s, pos, pos+1) result.append(r) continue elif ordch3>>6 != 0x2: # 0b10 - r, pos = errorhandler(errors, 'utf-8', + r, pos = errorhandler(errors, 'utf8', 'invalid continuation byte', s, pos, pos+2) result.append(r) @@ -257,19 +257,19 @@ if (ordch2>>6 != 0x2 or # 0b10 (ordch1 == 0xf0 and ordch2 < 0x90) or (ordch1 == 0xf4 and ordch2 > 0x8f)): - r, pos = errorhandler(errors, 'utf-8', + r, pos = errorhandler(errors, 'utf8', 'invalid continuation byte', s, pos, pos+1) result.append(r) continue elif ordch3>>6 != 0x2: # 0b10 - r, pos = errorhandler(errors, 'utf-8', + r, pos = errorhandler(errors, 'utf8', 'invalid continuation byte', s, pos, pos+2) result.append(r) continue elif ordch4>>6 != 0x2: # 0b10 - r, pos = errorhandler(errors, 'utf-8', + r, pos = errorhandler(errors, 'utf8', 'invalid continuation byte', s, pos, pos+3) result.append(r) @@ -337,7 +337,7 @@ _encodeUCS4(result, ch3) continue if not allow_surrogates: - ru, rs, pos = errorhandler(errors, 'utf-8', + ru, rs, pos = errorhandler(errors, 'utf8', 'surrogates not allowed', s, pos-1, pos) if rs is not None: @@ -348,7 +348,7 @@ if ord(ch) < 0x80: result.append(chr(ord(ch))) else: - errorhandler('strict', 'utf-8', + errorhandler('strict', 'utf8', 'surrogates not allowed', s, pos-1, pos) continue @@ -441,7 +441,7 @@ if len(s) - pos < 2: if not final: break - r, pos = errorhandler(errors, 'utf-16', "truncated data", + r, pos = errorhandler(errors, 'utf16', "truncated data", s, pos, len(s)) result.append(r) if len(s) - pos < 2: @@ -456,7 +456,7 @@ if not final: break errmsg = "unexpected end of data" - r, pos = errorhandler(errors, 'utf-16', errmsg, s, pos - 2, len(s)) + r, pos = errorhandler(errors, 'utf16', errmsg, s, pos - 2, len(s)) result.append(r) if len(s) - pos < 2: break @@ -472,12 +472,12 @@ (ch2 & 0x3FF)) + 0x10000)) continue else: - r, pos = errorhandler(errors, 'utf-16', + r, pos = errorhandler(errors, 'utf16', "illegal UTF-16 surrogate", s, pos - 4, pos - 2) result.append(r) else: - r, pos = errorhandler(errors, 'utf-16', + r, pos = errorhandler(errors, 'utf16', "illegal encoding", s, pos - 2, pos) result.append(r) @@ -609,7 +609,7 @@ if len(s) - pos < 4: if not final: break - r, pos = errorhandler(errors, 'utf-32', "truncated data", + r, pos = errorhandler(errors, 'utf32', "truncated data", s, pos, len(s)) result.append(r) if len(s) - pos < 4: @@ -618,7 +618,7 @@ ch = ((ord(s[pos + iorder[3]]) << 24) | (ord(s[pos + iorder[2]]) << 16) | (ord(s[pos + iorder[1]]) << 8) | ord(s[pos + iorder[0]])) if ch >= 0x110000: - r, pos = errorhandler(errors, 'utf-32', "codepoint not in range(0x110000)", + r, pos = errorhandler(errors, 'utf32', "codepoint not in range(0x110000)", s, pos, len(s)) result.append(r) continue @@ -846,7 +846,7 @@ if base64bits >= 6: # We've seen at least one base-64 character msg = "partial character in shift sequence" - res, pos = errorhandler(errors, 'utf-7', + res, pos = errorhandler(errors, 'utf7', msg, s, pos-1, pos) result.append(res) continue @@ -854,7 +854,7 @@ # Some bits remain; they should be zero if base64buffer != 0: msg = "non-zero padding bits in shift sequence" - res, pos = errorhandler(errors, 'utf-7', + res, pos = errorhandler(errors, 'utf7', msg, s, pos-1, pos) result.append(res) continue @@ -883,7 +883,7 @@ else: pos += 1 msg = "unexpected special character" - res, pos = errorhandler(errors, 'utf-7', msg, s, pos-1, pos) + res, pos = errorhandler(errors, 'utf7', msg, s, pos-1, pos) result.append(res) # end of string @@ -894,7 +894,7 @@ base64bits >= 6 or (base64bits > 0 and base64buffer != 0)): msg = "unterminated shift sequence" - res, pos = errorhandler(errors, 'utf-7', msg, s, shiftOutStartPos, pos) + res, pos = errorhandler(errors, 'utf7', msg, s, shiftOutStartPos, pos) result.append(res) elif inShift: pos = shiftOutStartPos # back off output From noreply at buildbot.pypy.org Sat Jan 18 15:35:35 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 18 Jan 2014 15:35:35 +0100 (CET) Subject: [pypy-commit] stmgc c7: in-progress Message-ID: <20140118143535.A95081C153F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7 Changeset: r640:d1bebba84ce9 Date: 2014-01-18 15:35 +0100 http://bitbucket.org/pypy/stmgc/changeset/d1bebba84ce9/ Log: in-progress diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -219,6 +219,11 @@ return o; } +object_t *stm_allocate_prebuilt(size_t size) +{ + return _stm_allocate_old(size); /* XXX */ +} + static void _stm_privatize(uintptr_t pagenum) { diff --git a/c7/core.h b/c7/core.h --- a/c7/core.h +++ b/c7/core.h @@ -1,6 +1,7 @@ #ifndef _STM_CORE_H #define _STM_CORE_H +#include #include #include @@ -117,6 +118,8 @@ bool _stm_is_young(object_t *o); object_t *_stm_allocate_old(size_t size); +object_t *stm_allocate_prebuilt(size_t size); + void _stm_start_safe_point(void); void _stm_stop_safe_point(void); diff --git a/duhton/Makefile b/duhton/Makefile --- a/duhton/Makefile +++ b/duhton/Makefile @@ -1,11 +1,16 @@ + +C7SOURCES = ../c7/core.c \ + ../c7/pagecopy.c \ + ../c7/list.c \ + ../c7/reader_writer_lock.c all: duhton_debug duhton duhton: *.c *.h ../c4/*.c ../c4/*.h - gcc -pthread -g -O2 -o duhton *.c ../c4/stmgc.c -Wall -lrt + clang -pthread -g -O2 -o duhton *.c $(C7SOURCES) -Wall duhton_debug: *.c *.h ../c4/*.c ../c4/*.h - gcc -pthread -g -DDu_DEBUG -D_GC_DEBUGPRINTS=1 -DGC_NURSERY=2048 -o duhton_debug *.c ../c4/stmgc.c -Wall -lrt + clang -pthread -g -DDu_DEBUG -o duhton_debug *.c $(C7SOURCES) -Wall clean: rm -f duhton duhton_debug diff --git a/duhton/README b/duhton/README --- a/duhton/README +++ b/duhton/README @@ -16,3 +16,12 @@ There are demos: try "time duhton demo/many_square_roots.duh". For more general information see the PAPERS file. + + + + + +XXX +=== + +* remove _du_read1() on immutable objects diff --git a/duhton/consobject.c b/duhton/consobject.c --- a/duhton/consobject.c +++ b/duhton/consobject.c @@ -1,10 +1,10 @@ #include "duhton.h" -void cons_trace(DuConsObject *ob, void visit(gcptr *)) +void cons_trace(struct DuConsObject_s *ob, void visit(object_t **)) { - visit(&ob->car); - visit(&ob->cdr); + visit((object_t **)&ob->car); + visit((object_t **)&ob->cdr); } void cons_print(DuConsObject *ob) diff --git a/duhton/containerobject.c b/duhton/containerobject.c --- a/duhton/containerobject.c +++ b/duhton/containerobject.c @@ -1,14 +1,14 @@ #include "duhton.h" -typedef struct { - DuOBJECT_HEAD +typedef TLPREFIX struct DuContainerObject_s { + DuOBJECT_HEAD1 DuObject *ob_reference; } DuContainerObject; -void container_trace(DuContainerObject *ob, void visit(gcptr *)) +void container_trace(struct DuContainerObject_s *ob, void visit(object_t **)) { - visit(&ob->ob_reference); + visit((object_t **)&ob->ob_reference); } void container_print(DuContainerObject *ob) diff --git a/duhton/duhton.c b/duhton/duhton.c --- a/duhton/duhton.c +++ b/duhton/duhton.c @@ -1,3 +1,4 @@ +#include #include "duhton.h" #define DEFAULT_NUM_THREADS 4 diff --git a/duhton/duhton.h b/duhton/duhton.h --- a/duhton/duhton.h +++ b/duhton/duhton.h @@ -1,18 +1,20 @@ #ifndef _DUHTON_H_ #define _DUHTON_H_ -#include "../c4/stmgc.h" -#include "../c4/fprintcolor.h" +#include "../c7/core.h" #include #include #include -typedef struct stm_object_s DuObject; +struct DuObject_s { + struct object_s header; + uint32_t type_id; +}; +typedef TLPREFIX struct DuObject_s DuObject; -#define DuOBJECT_HEAD DuObject ob_base; -#define DuOBJECT_HEAD_INIT(type) { type | PREBUILT_FLAGS, PREBUILT_REVISION } +#define DuOBJECT_HEAD1 DuObject ob_base; #ifdef __GNUC__ @@ -22,8 +24,8 @@ #endif -typedef void(*trace_fn)(DuObject *, void visit(gcptr *)); -typedef size_t(*bytesize_fn)(DuObject *); +typedef void(*trace_fn)(struct DuObject_s *, void visit(object_t **)); +typedef size_t(*bytesize_fn)(struct DuObject_s *); typedef void(*print_fn)(DuObject *); typedef DuObject *(*eval_fn)(DuObject *, DuObject *); typedef int(*len_fn)(DuObject *); @@ -71,10 +73,9 @@ int DuObject_Length(DuObject *ob); -extern DuObject _Du_NoneStruct; -#define Du_None (&_Du_NoneStruct) +extern DuObject *Du_None; -#define _DuObject_TypeNum(ob) stm_get_tid((DuObject*)(ob)) +#define _DuObject_TypeNum(ob) (((DuObject*)(ob))->type_id) #define Du_TYPE(ob) (Du_Types[_DuObject_TypeNum(ob)]) #define DuInt_Check(ob) (_DuObject_TypeNum(ob) == DUTYPE_INT) #define DuSymbol_Check(ob) (_DuObject_TypeNum(ob) == DUTYPE_SYMBOL) @@ -107,9 +108,10 @@ DuObject *DuSymbol_FromString(const char *name); char *DuSymbol_AsString(DuObject *ob); +int DuSymbol_Id(DuObject *ob); -typedef struct { - DuOBJECT_HEAD +typedef TLPREFIX struct DuConsObject_s { + DuOBJECT_HEAD1 DuObject *car, *cdr; } DuConsObject; @@ -136,11 +138,10 @@ DuObject *arglist, DuObject *progn); DuObject *_DuFrame_EvalCall(DuObject *frame, DuObject *symbol, DuObject *rest, int execute_now); -DuObject *_Du_GetGlobals(void); void Du_Initialize(int); void Du_Finalize(void); -#define Du_Globals (_Du_GetGlobals()) +extern DuObject *Du_Globals; void Du_TransactionAdd(DuObject *code, DuObject *frame); void Du_TransactionRun(void); @@ -160,23 +161,23 @@ p2 = (typeof(p2))_pop_root(), \ p1 = (typeof(p1))_pop_root()) -#define _du_read1(p1) (p1 = (typeof(p1))stm_read_barrier((DuObject *)(p1))) -#define _du_write1(p1) (p1 = (typeof(p1))stm_write_barrier((DuObject *)(p1))) +#define _du_read1(p1) stm_read((object_t *)(p1)) +#define _du_write1(p1) stm_write((object_t *)(p1)) #ifdef NDEBUG -# define _push_root(ob) stm_push_root(ob) +# define _push_root(ob) stm_push_root((object_t *)ob) # define _pop_root() stm_pop_root() #else # define _check_not_free(ob) \ - assert(stm_get_tid((DuObject *)(ob)) > DUTYPE_INVALID && \ - stm_get_tid((DuObject *)(ob)) < _DUTYPE_TOTAL) -static inline void _push_root(gcptr ob) { + assert(_DuObject_TypeNum(ob) > DUTYPE_INVALID && \ + _DuObject_TypeNum(ob) < _DUTYPE_TOTAL) +static inline void _push_root(DuObject *ob) { if (ob) _check_not_free(ob); - stm_push_root(ob); + stm_push_root((object_t *)ob); } -static inline gcptr _pop_root(void) { - gcptr ob = stm_pop_root(); +static inline object_t *_pop_root(void) { + object_t *ob = stm_pop_root(); if (ob) _check_not_free(ob); return ob; } diff --git a/duhton/frame.c b/duhton/frame.c --- a/duhton/frame.c +++ b/duhton/frame.c @@ -1,70 +1,74 @@ #include "duhton.h" #include -struct dictentry { - revision_t symbol_id; +typedef TLPREFIX struct dictentry_s { + int symbol_id; DuObject *symbol; DuObject *value; eval_fn builtin_macro; DuObject *func_arglist; DuObject *func_progn; -}; +} dictentry_t; -typedef struct { - DuOBJECT_HEAD +typedef TLPREFIX struct DuFrameNodeObject_s { + DuOBJECT_HEAD1 int ob_count; - struct dictentry ob_items[1]; + struct dictentry_s ob_items[1]; } DuFrameNodeObject; -void framenode_trace(DuFrameNodeObject *ob, void visit(gcptr *)) + +void framenode_trace(struct DuFrameNodeObject_s *ob, void visit(object_t **)) { int i; for (i=ob->ob_count-1; i>=0; i--) { - struct dictentry *e = &ob->ob_items[i]; - visit(&e->symbol); - visit(&e->value); - visit(&e->func_arglist); - visit(&e->func_progn); + struct dictentry_s *e = &ob->ob_items[i]; + visit((object_t **)&e->symbol); + visit((object_t **)&e->value); + visit((object_t **)&e->func_arglist); + visit((object_t **)&e->func_progn); } } -size_t framenode_bytesize(DuFrameNodeObject *ob) +size_t framenode_bytesize(struct DuFrameNodeObject_s *ob) { return (sizeof(DuFrameNodeObject) + - (ob->ob_count - 1) * sizeof(struct dictentry)); + (ob->ob_count - 1) * sizeof(struct dictentry_s)); } -typedef struct { - DuOBJECT_HEAD +typedef TLPREFIX struct DuFrameObject_s { + DuOBJECT_HEAD1 DuFrameNodeObject *ob_nodes; } DuFrameObject; -static DuFrameNodeObject du_empty_framenode = { - DuOBJECT_HEAD_INIT(DUTYPE_FRAMENODE), - 0, -}; +DuObject *Du_Globals; +static DuFrameNodeObject *du_empty_framenode; -DuFrameObject Du_GlobalsFrame = { - DuOBJECT_HEAD_INIT(DUTYPE_FRAME), - &du_empty_framenode, -}; +void init_prebuilt_frame_objects(void) +{ + du_empty_framenode = (DuFrameNodeObject *) + stm_allocate_prebuilt(sizeof(DuFrameNodeObject)); + du_empty_framenode->ob_base.type_id = DUTYPE_FRAMENODE; + du_empty_framenode->ob_count = 0; -DuObject *_Du_GetGlobals() -{ - return (DuObject *)&Du_GlobalsFrame; + DuFrameObject *g = (DuFrameObject *) + stm_allocate_prebuilt(sizeof(DuFrameObject)); + g->ob_base.type_id = DUTYPE_FRAME; + g->ob_nodes = du_empty_framenode; + Du_Globals = (DuObject *)g; } DuObject *DuFrame_New() { DuFrameObject *ob = (DuFrameObject *)DuObject_New(&DuFrame_Type); - ob->ob_nodes = &du_empty_framenode; + ob->ob_nodes = du_empty_framenode; return (DuObject *)ob; } #if 0 DuObject *DuFrame_Copy(DuObject *frame) { + XXX fix or kill DuFrame_Ensure("DuFrame_Copy", frame); int i; DuFrameObject *src = (DuFrameObject *)frame; @@ -84,9 +88,9 @@ } #endif -void frame_trace(DuFrameObject *ob, void visit(gcptr *)) +void frame_trace(struct DuFrameObject_s *ob, void visit(object_t **)) { - visit((gcptr *)&ob->ob_nodes); + visit((object_t **)&ob->ob_nodes); } void frame_print(DuFrameObject *ob) @@ -94,7 +98,7 @@ printf(""); } -static struct dictentry * +static dictentry_t * find_entry(DuFrameObject *frame, DuObject *symbol, int write_mode) { _du_read1(frame); @@ -103,21 +107,21 @@ _du_read1(ob); int left = 0; int right = ob->ob_count; - struct dictentry *entries = ob->ob_items; - revision_t search_id = stm_id(symbol); + dictentry_t *entries = ob->ob_items; + int search_id = DuSymbol_Id(symbol); #if 0 #ifdef _GC_DEBUG int j; for (j = 0; j < right; j++) { - dprintf(("\t%p\n", (gcptr)entries[j].symbol_id)); + dprintf(("\t%d\n", entries[j].symbol_id)); } #endif #endif while (right > left) { int middle = (left + right) / 2; - revision_t found_id = entries[middle].symbol_id; + int found_id = entries[middle].symbol_id; if (search_id < found_id) right = middle; else if (search_id == found_id) { @@ -137,22 +141,25 @@ else { int i; size_t size = (sizeof(DuFrameNodeObject) + - (ob->ob_count + 1 - 1)*sizeof(struct dictentry)); + (ob->ob_count + 1 - 1)*sizeof(dictentry_t)); DuFrameNodeObject *newob; _du_save3(ob, symbol, frame); - newob = (DuFrameNodeObject *)stm_allocate(size, DUTYPE_FRAMENODE); + newob = (DuFrameNodeObject *)stm_allocate(size); + newob->ob_base.type_id = DUTYPE_FRAMENODE; _du_restore3(ob, symbol, frame); newob->ob_count = ob->ob_count + 1; - struct dictentry *newentries = newob->ob_items; + dictentry_t *newentries = newob->ob_items; entries = ob->ob_items; for (i=0; ibuiltin_macro = func; } @@ -220,7 +227,7 @@ DuObject *_DuFrame_EvalCall(DuObject *frame, DuObject *symbol, DuObject *rest, int execute_now) { - struct dictentry *e; + dictentry_t *e; DuFrame_Ensure("_DuFrame_EvalCall", frame); e = find_entry((DuFrameObject *)frame, symbol, 0); @@ -269,7 +276,7 @@ DuObject *DuFrame_GetSymbol(DuObject *frame, DuObject *symbol) { - struct dictentry *e; + dictentry_t *e; DuFrame_Ensure("DuFrame_GetSymbol", frame); e = find_entry((DuFrameObject *)frame, symbol, 0); @@ -278,7 +285,7 @@ void DuFrame_SetSymbol(DuObject *frame, DuObject *symbol, DuObject *value) { - struct dictentry *e; + dictentry_t *e; DuFrame_Ensure("DuFrame_SetSymbol", frame); _du_save1(value); @@ -300,7 +307,7 @@ void DuFrame_SetUserFunction(DuObject *frame, DuObject *symbol, DuObject *arglist, DuObject *progn) { - struct dictentry *e; + dictentry_t *e; DuFrame_Ensure("DuFrame_SetUserFunction", frame); _du_save2(arglist, progn); diff --git a/duhton/glob.c b/duhton/glob.c --- a/duhton/glob.c +++ b/duhton/glob.c @@ -609,9 +609,14 @@ return Du_None; } +extern void init_prebuilt_frame_objects(void); + void Du_Initialize(int num_threads) { stm_initialize(); + + init_prebuilt_frame_objects(); + all_threads_count = num_threads; all_threads = (pthread_t*)malloc(sizeof(pthread_t) * num_threads); diff --git a/duhton/listobject.c b/duhton/listobject.c --- a/duhton/listobject.c +++ b/duhton/listobject.c @@ -185,10 +185,7 @@ (len_fn)list_length, }; -static DuTupleObject du_empty_tuple = { - DuOBJECT_HEAD_INIT(DUTYPE_TUPLE), - 0, -}; +static DuTupleObject *du_empty_tuple; DuObject *DuList_New() { diff --git a/duhton/object.c b/duhton/object.c --- a/duhton/object.c +++ b/duhton/object.c @@ -17,21 +17,22 @@ /* callback: get the size of an object */ -size_t stmcb_size(gcptr obj) +size_t stmcb_size(struct object_s *obj) { - DuType *tp = Du_TYPE(obj); + DuType *tp = Du_Types[((struct DuObject_s *)obj)->type_id]; size_t result = tp->dt_size; if (result == 0) - result = tp->dt_bytesize(obj); + result = tp->dt_bytesize((struct DuObject_s *)obj); return result; } /* callback: trace the content of an object */ -void stmcb_trace(gcptr obj, void visit(gcptr *)) +void stmcb_trace(struct object_s *obj, void visit(object_t **)) { - trace_fn trace = Du_TYPE(obj)->dt_trace; + DuType *tp = Du_Types[((struct DuObject_s *)obj)->type_id]; + trace_fn trace = tp->dt_trace; if (trace) - trace(obj, visit); + trace((struct DuObject_s *)obj, visit); } diff --git a/duhton/symbol.c b/duhton/symbol.c --- a/duhton/symbol.c +++ b/duhton/symbol.c @@ -4,6 +4,7 @@ typedef struct _Du_Symbol { DuOBJECT_HEAD + int myid; char *name; struct _Du_Symbol *next; } DuSymbolObject; @@ -50,6 +51,9 @@ (eval_fn)symbol_eval, }; + +static int next_id = 1; + DuObject *DuSymbol_FromString(const char *name) { DuSymbolObject *p, *head = &_Du_AllSymbols; @@ -61,6 +65,7 @@ } p = (DuSymbolObject *)DuObject_New(&DuSymbol_Type); p->name = strdup(name); + p->myid = __sync_fetch_and_add(&next_id, 1); _du_write1(head); p->next = head->next; @@ -76,6 +81,12 @@ return ((DuSymbolObject *)ob)->name; } +int DuSymbol_Id(DuObject *ob) +{ + DuSymbol_Ensure("DuSymbol_Id", ob); + return ((DuSymbolObject *)ob)->id; +} + void DuSymbol_Ensure(char *where, DuObject *ob) { if (!DuSymbol_Check(ob)) From noreply at buildbot.pypy.org Sat Jan 18 16:48:11 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 18 Jan 2014 16:48:11 +0100 (CET) Subject: [pypy-commit] stmgc c7: getting closer Message-ID: <20140118154811.4EC4C1C153F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7 Changeset: r641:1de3c5245ed2 Date: 2014-01-18 16:29 +0100 http://bitbucket.org/pypy/stmgc/changeset/1de3c5245ed2/ Log: getting closer diff --git a/c7/core.h b/c7/core.h --- a/c7/core.h +++ b/c7/core.h @@ -124,7 +124,8 @@ void _stm_stop_safe_point(void); void stm_abort_transaction(void); + +#define stm_become_inevitable(msg) /* XXX implement me! */ + + #endif - - - diff --git a/duhton/duhton.c b/duhton/duhton.c --- a/duhton/duhton.c +++ b/duhton/duhton.c @@ -42,17 +42,22 @@ printf("))) "); fflush(stdout); } + stm_start_transaction(NULL); DuObject *code = Du_Compile(filename, interactive); + stm_stop_transaction(); if (code == NULL) { printf("\n"); break; } /*Du_Print(code, 1); printf("\n");*/ + stm_start_transaction(NULL); DuObject *res = Du_Eval(code, Du_Globals); if (interactive) { Du_Print(res, 1); } + stm_stop_transaction(); + Du_TransactionRun(); if (!interactive) break; diff --git a/duhton/glob.c b/duhton/glob.c --- a/duhton/glob.c +++ b/duhton/glob.c @@ -610,16 +610,30 @@ } extern void init_prebuilt_frame_objects(void); +extern void init_prebuilt_list_objects(void); +extern void init_prebuilt_object_objects(void); +extern void init_prebuilt_symbol_objects(void); +extern void init_prebuilt_transaction_objects(void); void Du_Initialize(int num_threads) { - stm_initialize(); + assert(num_threads == 2); + stm_setup(); + stm_setup_thread(); + stm_setup_thread(); + _stm_restore_local_state(0); + + init_prebuilt_object_objects(); + init_prebuilt_symbol_objects(); + init_prebuilt_list_objects(); init_prebuilt_frame_objects(); + init_prebuilt_transaction_objects(); all_threads_count = num_threads; all_threads = (pthread_t*)malloc(sizeof(pthread_t) * num_threads); + stm_start_transaction(NULL); DuFrame_SetBuiltinMacro(Du_Globals, "progn", Du_Progn); DuFrame_SetBuiltinMacro(Du_Globals, "setq", du_setq); DuFrame_SetBuiltinMacro(Du_Globals, "print", du_print); @@ -655,9 +669,16 @@ DuFrame_SetBuiltinMacro(Du_Globals, "pair?", du_pair); DuFrame_SetBuiltinMacro(Du_Globals, "assert", du_assert); DuFrame_SetSymbolStr(Du_Globals, "None", Du_None); + stm_stop_transaction(); } void Du_Finalize(void) { - stm_finalize(); + _stm_restore_local_state(1); + _stm_teardown_thread(); + + _stm_restore_local_state(0); + _stm_teardown_thread(); + + _stm_teardown(); } diff --git a/duhton/intobject.c b/duhton/intobject.c --- a/duhton/intobject.c +++ b/duhton/intobject.c @@ -1,7 +1,7 @@ #include "duhton.h" -typedef struct { - DuOBJECT_HEAD +typedef TLPREFIX struct DuIntObject_s { + DuOBJECT_HEAD1 int ob_intval; } DuIntObject; diff --git a/duhton/listobject.c b/duhton/listobject.c --- a/duhton/listobject.c +++ b/duhton/listobject.c @@ -5,34 +5,34 @@ /* 'tuple' objects are only used internally as the current items of 'list' objects */ -typedef struct { - DuOBJECT_HEAD +typedef TLPREFIX struct DuTupleObject_s { + DuOBJECT_HEAD1 int ob_count; DuObject *ob_items[1]; } DuTupleObject; -typedef struct { - DuOBJECT_HEAD +typedef TLPREFIX struct DuListObject_s { + DuOBJECT_HEAD1 DuTupleObject *ob_tuple; } DuListObject; -void tuple_trace(DuTupleObject *ob, void visit(gcptr *)) +void tuple_trace(struct DuTupleObject_s *ob, void visit(object_t **)) { int i; for (i=ob->ob_count-1; i>=0; i--) { - visit(&ob->ob_items[i]); + visit((object_t **)&ob->ob_items[i]); } } -size_t tuple_bytesize(DuTupleObject *ob) +size_t tuple_bytesize(struct DuTupleObject_s *ob) { return sizeof(DuTupleObject) + (ob->ob_count - 1) * sizeof(DuObject *); } -void list_trace(DuListObject *ob, void visit(gcptr *)) +void list_trace(struct DuListObject_s *ob, void visit(object_t **)) { - visit((gcptr *)&ob->ob_tuple); + visit((object_t **)&ob->ob_tuple); } void list_print(DuListObject *ob) @@ -68,7 +68,8 @@ { DuTupleObject *ob; size_t size = sizeof(DuTupleObject) + (length-1)*sizeof(DuObject *); - ob = (DuTupleObject *)stm_allocate(size, DUTYPE_TUPLE); + ob = (DuTupleObject *)stm_allocate(size); + ob->ob_base.type_id = DUTYPE_TUPLE; ob->ob_count = length; return ob; } @@ -187,10 +188,18 @@ static DuTupleObject *du_empty_tuple; +void init_prebuilt_list_objects(void) +{ + du_empty_tuple = (DuTupleObject *) + stm_allocate_prebuilt(sizeof(DuTupleObject)); + du_empty_tuple->ob_base.type_id = DUTYPE_TUPLE; + du_empty_tuple->ob_count = 0; +} + DuObject *DuList_New() { DuListObject *ob = (DuListObject *)DuObject_New(&DuList_Type); - ob->ob_tuple = &du_empty_tuple; + ob->ob_tuple = du_empty_tuple; return (DuObject *)ob; } diff --git a/duhton/object.c b/duhton/object.c --- a/duhton/object.c +++ b/duhton/object.c @@ -39,8 +39,9 @@ DuObject *DuObject_New(DuType *tp) { assert(tp->dt_size >= sizeof(DuObject)); - DuObject *ob = stm_allocate(tp->dt_size, tp->dt_typeindex); + DuObject *ob = (DuObject *)stm_allocate(tp->dt_size); assert(ob); + ob->type_id = tp->dt_typeindex; return ob; } @@ -64,8 +65,13 @@ none_is_true, }; -DuObject _Du_NoneStruct = - DuOBJECT_HEAD_INIT(DUTYPE_NONE); +DuObject *Du_None; + +void init_prebuilt_object_objects(void) +{ + Du_None = (DuObject *)stm_allocate_prebuilt(sizeof(DuObject)); + Du_None->type_id = DUTYPE_NONE; +} void Du_FatalError(char *msg, ...) { diff --git a/duhton/symbol.c b/duhton/symbol.c --- a/duhton/symbol.c +++ b/duhton/symbol.c @@ -2,22 +2,21 @@ #include #include "duhton.h" -typedef struct _Du_Symbol { - DuOBJECT_HEAD +typedef TLPREFIX struct DuSymbolObject_s DuSymbolObject; + +struct DuSymbolObject_s { + DuOBJECT_HEAD1 int myid; char *name; - struct _Du_Symbol *next; -} DuSymbolObject; + DuSymbolObject *next; +}; -static DuSymbolObject _Du_AllSymbols = { - DuOBJECT_HEAD_INIT(DUTYPE_SYMBOL), - "", - NULL}; +static DuSymbolObject *_Du_AllSymbols; -void symbol_trace(DuSymbolObject *ob, void visit(gcptr *)) +void symbol_trace(struct DuSymbolObject_s *ob, void visit(object_t **)) { - visit((gcptr *)&ob->next); + visit((object_t **)&ob->next); } void symbol_print(DuSymbolObject *ob) @@ -54,9 +53,19 @@ static int next_id = 1; +void init_prebuilt_symbol_objects(void) +{ + _Du_AllSymbols = (DuSymbolObject *) + stm_allocate_prebuilt(sizeof(DuSymbolObject)); + _Du_AllSymbols->ob_base.type_id = DUTYPE_SYMBOL; + _Du_AllSymbols->myid = 0; + _Du_AllSymbols->name = ""; + _Du_AllSymbols->next = NULL; +} + DuObject *DuSymbol_FromString(const char *name) { - DuSymbolObject *p, *head = &_Du_AllSymbols; + DuSymbolObject *p, *head = _Du_AllSymbols; for (p=head; p != NULL; p=p->next) { _du_read1(p); if (strcmp(name, p->name) == 0) { @@ -84,7 +93,7 @@ int DuSymbol_Id(DuObject *ob) { DuSymbol_Ensure("DuSymbol_Id", ob); - return ((DuSymbolObject *)ob)->id; + return ((DuSymbolObject *)ob)->myid; } void DuSymbol_Ensure(char *where, DuObject *ob) diff --git a/duhton/transaction.c b/duhton/transaction.c --- a/duhton/transaction.c +++ b/duhton/transaction.c @@ -3,14 +3,21 @@ #include -static DuConsObject du_pending_transactions = { - DuOBJECT_HEAD_INIT(DUTYPE_CONS), - NULL, - Du_None, +static DuConsObject *du_pending_transactions; + +void init_prebuilt_transaction_objects(void) +{ + assert(Du_None); /* already created */ + + du_pending_transactions = (DuConsObject *) + stm_allocate_prebuilt(sizeof(DuConsObject)); + du_pending_transactions->ob_base.type_id = DUTYPE_CONS; + du_pending_transactions->car = NULL; + du_pending_transactions->cdr = Du_None; }; static pthread_mutex_t mutex_sleep = PTHREAD_MUTEX_INITIALIZER; -static int thread_sleeping = 0; +static int volatile thread_sleeping = 0; static void *run_thread(void *); /* forward */ @@ -18,9 +25,12 @@ { int i; for (i = 0; i < all_threads_count; i++) { - int status = pthread_create(&all_threads[i], NULL, run_thread, NULL); - if (status != 0) - stm_fatalerror("status != 0\n"); + int status = pthread_create(&all_threads[i], NULL, run_thread, + (void *)(uintptr_t)i); + if (status != 0) { + fprintf(stderr, "status != 0\n"); + abort(); + } } for (i = 0; i < all_threads_count; i++) { pthread_join(all_threads[i], NULL); @@ -29,16 +39,19 @@ /************************************************************/ +__thread DuObject *stm_thread_local_obj = NULL; /* XXX temp */ + + void Du_TransactionAdd(DuObject *code, DuObject *frame) { DuObject *cell = DuCons_New(code, frame); - DuObject *pending = (DuObject *)stm_thread_local_obj; + DuObject *pending = stm_thread_local_obj; if (pending == NULL) { pending = Du_None; } pending = DuCons_New(cell, pending); - stm_thread_local_obj = (gcptr)pending; + stm_thread_local_obj = pending; } void Du_TransactionRun(void) @@ -46,27 +59,32 @@ if (stm_thread_local_obj == NULL) return; - DuConsObject *root = &du_pending_transactions; + stm_start_transaction(NULL); + DuConsObject *root = du_pending_transactions; _du_write1(root); root->cdr = stm_thread_local_obj; + stm_stop_transaction(); + stm_thread_local_obj = NULL; - stm_commit_transaction(); run_all_threads(); - stm_begin_inevitable_transaction(); } /************************************************************/ static DuObject *next_cell(void) { - DuObject *pending = (DuObject *)stm_thread_local_obj; + DuObject *pending = stm_thread_local_obj; + jmpbufptr_t here; if (pending == NULL) { /* fish from the global list of pending transactions */ DuConsObject *root; + while (__builtin_setjmp(here) == 1) { } restart: - root = &du_pending_transactions; + stm_start_transaction(&here); + + root = du_pending_transactions; _du_read1(root); if (root->cdr != Du_None) { @@ -77,22 +95,30 @@ DuObject *result = _DuCons_CAR(cell); root->cdr = _DuCons_NEXT(cell); + stm_stop_transaction(); + return result; } else { + stm_stop_transaction(); + /* nothing to do, wait */ - thread_sleeping++; - if (thread_sleeping == all_threads_count) { + int ts = __sync_add_and_fetch(&thread_sleeping, 1); + if (ts == all_threads_count) { pthread_mutex_unlock(&mutex_sleep); } - stm_commit_transaction(); pthread_mutex_lock(&mutex_sleep); - stm_begin_inevitable_transaction(); - if (thread_sleeping == all_threads_count) { - pthread_mutex_unlock(&mutex_sleep); - return NULL; + + while (1) { + ts = thread_sleeping; + if (ts == all_threads_count) { + pthread_mutex_unlock(&mutex_sleep); + return NULL; + } + assert(ts > 0); + if (__sync_bool_compare_and_swap(&thread_sleeping, ts, ts - 1)) + break; } - thread_sleeping--; goto restart; } } @@ -100,6 +126,9 @@ /* we have at least one thread-local transaction pending */ stm_thread_local_obj = NULL; + while (__builtin_setjmp(here) == 1) { } + stm_start_transaction(&here); + _du_read1(pending); DuObject *result = _DuCons_CAR(pending); DuObject *next = _DuCons_NEXT(pending); @@ -116,36 +145,43 @@ tail = tailnext; } - DuConsObject * root = &du_pending_transactions; + DuConsObject * root = du_pending_transactions; _du_write1(tail); _du_write1(root); ((DuConsObject *)tail)->cdr = root->cdr; root->cdr = next; } + stm_stop_transaction(); + return result; } -int run_transaction(gcptr cell, int retry_counter) +void run_transaction(DuObject *cell) { DuObject *code = DuCons_Car(cell); DuObject *frame = DuCons_Cdr(cell); Du_Progn(code, frame); - return 0; } -void *run_thread(void *ignored) +void *run_thread(void *thread_id) { - stm_initialize(); + jmpbufptr_t here; + int thread_num = (uintptr_t)thread_id; + _stm_restore_local_state(thread_num); + stm_thread_local_obj = NULL; while (1) { - /* we are inevitable here */ DuObject *cell = next_cell(); if (cell == NULL) break; - stm_perform_transaction(cell, run_transaction); + assert(stm_thread_local_obj == NULL); + + while (__builtin_setjmp(here) == 1) { } + stm_start_transaction(&here); + run_transaction(cell); + stm_stop_transaction(); } - stm_finalize(); return NULL; } From noreply at buildbot.pypy.org Sat Jan 18 16:48:12 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 18 Jan 2014 16:48:12 +0100 (CET) Subject: [pypy-commit] stmgc c7: tweaks Message-ID: <20140118154812.630211C153F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7 Changeset: r642:65db74df8ff6 Date: 2014-01-18 16:48 +0100 http://bitbucket.org/pypy/stmgc/changeset/65db74df8ff6/ Log: tweaks diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -19,7 +19,7 @@ #define NB_PAGES (256*256) // 256MB #define NB_THREADS 2 #define MAP_PAGES_FLAGS (MAP_SHARED | MAP_ANONYMOUS | MAP_NORESERVE) -#define LARGE_OBJECT_WORDS 36 +#define LARGE_OBJECT_WORDS 220 // XXX was 36 #define NB_NURSERY_PAGES 1024 #define LENGTH_SHADOW_STACK 163840 diff --git a/duhton/Makefile b/duhton/Makefile --- a/duhton/Makefile +++ b/duhton/Makefile @@ -4,12 +4,15 @@ ../c7/list.c \ ../c7/reader_writer_lock.c +C7HEADERS = ../c7/*.h + + all: duhton_debug duhton -duhton: *.c *.h ../c4/*.c ../c4/*.h +duhton: *.c *.h $(C7SOURCES) $(C7HEADERS) clang -pthread -g -O2 -o duhton *.c $(C7SOURCES) -Wall -duhton_debug: *.c *.h ../c4/*.c ../c4/*.h +duhton_debug: *.c *.h $(C7SOURCES) $(C7HEADERS) clang -pthread -g -DDu_DEBUG -o duhton_debug *.c $(C7SOURCES) -Wall clean: diff --git a/duhton/duhton.c b/duhton/duhton.c --- a/duhton/duhton.c +++ b/duhton/duhton.c @@ -1,7 +1,7 @@ #include #include "duhton.h" -#define DEFAULT_NUM_THREADS 4 +#define DEFAULT_NUM_THREADS 2 int main(int argc, char **argv) { diff --git a/duhton/duhton.h b/duhton/duhton.h --- a/duhton/duhton.h +++ b/duhton/duhton.h @@ -67,6 +67,8 @@ extern DuType *Du_Types[_DUTYPE_TOTAL]; +#define ROUND_UP(size) ((size) < 16 ? 16 : ((size) + 7) & ~7) + DuObject *DuObject_New(DuType *tp); int DuObject_IsTrue(DuObject *ob); diff --git a/duhton/object.c b/duhton/object.c --- a/duhton/object.c +++ b/duhton/object.c @@ -23,7 +23,7 @@ size_t result = tp->dt_size; if (result == 0) result = tp->dt_bytesize((struct DuObject_s *)obj); - return result; + return ROUND_UP(result); } /* callback: trace the content of an object */ From noreply at buildbot.pypy.org Sat Jan 18 17:13:40 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 18 Jan 2014 17:13:40 +0100 (CET) Subject: [pypy-commit] stmgc c7: fixes until minimal.duh works Message-ID: <20140118161340.0D0A71C0178@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7 Changeset: r643:d3f5c236e429 Date: 2014-01-18 17:13 +0100 http://bitbucket.org/pypy/stmgc/changeset/d3f5c236e429/ Log: fixes until minimal.duh works diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -543,6 +543,7 @@ item->stm_flags |= GCFLAG_WRITE_BARRIER; stmcb_trace(real_address(item), trace_if_young); + old_objs = _STM_TL2->old_objects_to_trace; } diff --git a/duhton/duhton.c b/duhton/duhton.c --- a/duhton/duhton.c +++ b/duhton/duhton.c @@ -44,7 +44,9 @@ } stm_start_transaction(NULL); DuObject *code = Du_Compile(filename, interactive); + _du_save1(code); stm_stop_transaction(); + _du_restore1(code); if (code == NULL) { printf("\n"); break; diff --git a/duhton/object.c b/duhton/object.c --- a/duhton/object.c +++ b/duhton/object.c @@ -39,7 +39,7 @@ DuObject *DuObject_New(DuType *tp) { assert(tp->dt_size >= sizeof(DuObject)); - DuObject *ob = (DuObject *)stm_allocate(tp->dt_size); + DuObject *ob = (DuObject *)stm_allocate(ROUND_UP(tp->dt_size)); assert(ob); ob->type_id = tp->dt_typeindex; return ob; From noreply at buildbot.pypy.org Sat Jan 18 17:33:18 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Sat, 18 Jan 2014 17:33:18 +0100 (CET) Subject: [pypy-commit] stmgc c7: fix warning Message-ID: <20140118163318.E297B1C0178@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r644:ccf30dfc88db Date: 2014-01-18 17:14 +0100 http://bitbucket.org/pypy/stmgc/changeset/ccf30dfc88db/ Log: fix warning diff --git a/duhton/duhton.h b/duhton/duhton.h --- a/duhton/duhton.h +++ b/duhton/duhton.h @@ -14,7 +14,7 @@ typedef TLPREFIX struct DuObject_s DuObject; -#define DuOBJECT_HEAD1 DuObject ob_base; +#define DuOBJECT_HEAD1 struct DuObject_s ob_base; #ifdef __GNUC__ From noreply at buildbot.pypy.org Sat Jan 18 17:33:19 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Sat, 18 Jan 2014 17:33:19 +0100 (CET) Subject: [pypy-commit] stmgc c7: fix Message-ID: <20140118163319.F42071C0178@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r645:53410d7b096d Date: 2014-01-18 17:33 +0100 http://bitbucket.org/pypy/stmgc/changeset/53410d7b096d/ Log: fix diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -19,7 +19,7 @@ #define NB_PAGES (256*256) // 256MB #define NB_THREADS 2 #define MAP_PAGES_FLAGS (MAP_SHARED | MAP_ANONYMOUS | MAP_NORESERVE) -#define LARGE_OBJECT_WORDS 220 // XXX was 36 +#define LARGE_OBJECT_WORDS 230 // XXX was 36 #define NB_NURSERY_PAGES 1024 #define LENGTH_SHADOW_STACK 163840 diff --git a/duhton/duhton.c b/duhton/duhton.c --- a/duhton/duhton.c +++ b/duhton/duhton.c @@ -58,7 +58,9 @@ if (interactive) { Du_Print(res, 1); } + _du_save1(stm_thread_local_obj); stm_stop_transaction(); + _du_restore1(stm_thread_local_obj); Du_TransactionRun(); if (!interactive) diff --git a/duhton/duhton.h b/duhton/duhton.h --- a/duhton/duhton.h +++ b/duhton/duhton.h @@ -188,4 +188,5 @@ extern pthread_t *all_threads; extern int all_threads_count; +extern __thread DuObject *stm_thread_local_obj; /* XXX temp */ #endif /* _DUHTON_H_ */ diff --git a/duhton/transaction.c b/duhton/transaction.c --- a/duhton/transaction.c +++ b/duhton/transaction.c @@ -64,6 +64,7 @@ _du_write1(root); root->cdr = stm_thread_local_obj; stm_stop_transaction(); + stm_thread_local_obj = NULL; run_all_threads(); From noreply at buildbot.pypy.org Sat Jan 18 17:36:19 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Sat, 18 Jan 2014 17:36:19 +0100 (CET) Subject: [pypy-commit] stmgc c7: push limit even higher Message-ID: <20140118163619.907CE1C0178@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r646:034be314c0b8 Date: 2014-01-18 17:36 +0100 http://bitbucket.org/pypy/stmgc/changeset/034be314c0b8/ Log: push limit even higher diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -19,7 +19,7 @@ #define NB_PAGES (256*256) // 256MB #define NB_THREADS 2 #define MAP_PAGES_FLAGS (MAP_SHARED | MAP_ANONYMOUS | MAP_NORESERVE) -#define LARGE_OBJECT_WORDS 230 // XXX was 36 +#define LARGE_OBJECT_WORDS 232 // XXX was 36 #define NB_NURSERY_PAGES 1024 #define LENGTH_SHADOW_STACK 163840 From noreply at buildbot.pypy.org Sat Jan 18 17:44:37 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Sat, 18 Jan 2014 17:44:37 +0100 (CET) Subject: [pypy-commit] stmgc c7: add some safepoint somewhere Message-ID: <20140118164437.668C51C0178@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r647:2296dea545e3 Date: 2014-01-18 17:44 +0100 http://bitbucket.org/pypy/stmgc/changeset/2296dea545e3/ Log: add some safepoint somewhere diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -570,6 +570,8 @@ object_t *stm_allocate(size_t size) { + _stm_start_safe_point(); + _stm_stop_safe_point(); assert(_STM_TL2->running_transaction); assert(size % 8 == 0); size_t i = size / 8; From noreply at buildbot.pypy.org Sat Jan 18 18:39:57 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 18 Jan 2014 18:39:57 +0100 (CET) Subject: [pypy-commit] cffi default: An extra test Message-ID: <20140118173957.E96991C0178@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1455:22a6af557fbc Date: 2014-01-14 14:17 +0100 http://bitbucket.org/cffi/cffi/changeset/22a6af557fbc/ Log: An extra test diff --git a/testing/test_verify.py b/testing/test_verify.py --- a/testing/test_verify.py +++ b/testing/test_verify.py @@ -1855,3 +1855,24 @@ def test_various_calls_libffi(): _test_various_calls(force_libffi=True) + +def test_ptr_to_opaque(): + ffi = FFI() + ffi.cdef("typedef ... foo_t; int f1(foo_t*); foo_t *f2(int);") + lib = ffi.verify(""" + #include + typedef struct { int x; } foo_t; + int f1(foo_t* p) { + int x = p->x; + free(p); + return x; + } + foo_t *f2(int x) { + foo_t *p = malloc(sizeof(foo_t)); + p->x = x; + return p; + } + """) + p = lib.f2(42) + x = lib.f1(p) + assert x == 42 From noreply at buildbot.pypy.org Sat Jan 18 18:39:59 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 18 Jan 2014 18:39:59 +0100 (CET) Subject: [pypy-commit] cffi default: Fix Message-ID: <20140118173959.1D1571C0178@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1456:7eb548fec961 Date: 2014-01-18 18:39 +0100 http://bitbucket.org/cffi/cffi/changeset/7eb548fec961/ Log: Fix diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -3785,6 +3785,7 @@ PyErr_Format(PyExc_TypeError, "field '%s.%s' is declared with :0", ct->ct_name, PyText_AS_UTF8(fname)); + goto error; } if (!(sflags & SF_MSVC_BITFIELDS)) { /* GCC's notion of "ftype :0;" */ From noreply at buildbot.pypy.org Sat Jan 18 21:23:21 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 18 Jan 2014 21:23:21 +0100 (CET) Subject: [pypy-commit] pypy annotator: Rename FlowSpaceFrame to FlowContext Message-ID: <20140118202321.F1DB61C11ED@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: annotator Changeset: r68764:5a3e82f87ef9 Date: 2014-01-18 20:07 +0000 http://bitbucket.org/pypy/pypy/changeset/5a3e82f87ef9/ Log: Rename FlowSpaceFrame to FlowContext diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -23,13 +23,13 @@ class FlowingError(Exception): """ Signals invalid RPython in the function being analysed""" - frame = None + ctx = None def __str__(self): msg = ["\n"] msg += map(str, self.args) msg += [""] - msg += source_lines(self.frame.graph, None, offset=self.frame.last_instr) + msg += source_lines(self.ctx.graph, None, offset=self.ctx.last_instr) return "\n".join(msg) class StopFlowing(Exception): @@ -116,7 +116,7 @@ def append(self, operation): raise NotImplementedError - def guessbool(self, frame, w_condition): + def guessbool(self, ctx, w_condition): raise AssertionError("cannot guessbool(%s)" % (w_condition,)) @@ -132,13 +132,13 @@ def append(self, operation): self.crnt_block.operations.append(operation) - def guessbool(self, frame, w_condition): + def guessbool(self, ctx, w_condition): block = self.crnt_block vars = block.getvariables() links = [] for case in [False, True]: egg = EggBlock(vars, block, case) - frame.pendingblocks.append(egg) + ctx.pendingblocks.append(egg) link = Link(vars, egg, case) links.append(link) @@ -150,7 +150,7 @@ # block.exits[True] = ifLink. raise StopFlowing - def guessexception(self, frame, *cases): + def guessexception(self, ctx, *cases): block = self.crnt_block bvars = vars = vars2 = block.getvariables() links = [] @@ -167,7 +167,7 @@ vars.extend([last_exc, last_exc_value]) vars2.extend([Variable(), Variable()]) egg = EggBlock(vars2, block, case) - frame.pendingblocks.append(egg) + ctx.pendingblocks.append(egg) link = Link(vars, egg, case) if case is not None: link.extravars(last_exception=last_exc, last_exc_value=last_exc_value) @@ -198,14 +198,14 @@ [str(s) for s in self.listtoreplay[self.index:]])) self.index += 1 - def guessbool(self, frame, w_condition): + def guessbool(self, ctx, w_condition): assert self.index == len(self.listtoreplay) - frame.recorder = self.nextreplayer + ctx.recorder = self.nextreplayer return self.booloutcome - def guessexception(self, frame, *classes): + def guessexception(self, ctx, *classes): assert self.index == len(self.listtoreplay) - frame.recorder = self.nextreplayer + ctx.recorder = self.nextreplayer outcome = self.booloutcome if outcome is not None: egg = self.nextreplayer.crnt_block @@ -305,7 +305,7 @@ "cmp_exc_match", ] -class FlowSpaceFrame(object): +class FlowContext(object): opcode_method_names = host_bytecode_spec.method_names def __init__(self, graph, code): @@ -320,7 +320,6 @@ self.last_instr = 0 self.init_locals_stack(code) - self.w_locals = None # XXX: only for compatibility with PyFrame self.joinpoints = {} @@ -402,7 +401,7 @@ return FrameState(data, self.blockstack[:], next_pos) def setstate(self, state): - """ Reset the frame to the given state. """ + """ Reset the context to the given frame state. """ data = state.mergeable[:] recursively_unflatten(data) self.restore_locals_stack(data[:-2]) # Nones == undefined locals @@ -490,8 +489,8 @@ self.recorder.crnt_block.closeblock(link) except FlowingError as exc: - if exc.frame is None: - exc.frame = self + if exc.ctx is None: + exc.ctx = self raise self.recorder = None @@ -1316,9 +1315,9 @@ """Abstract base class for frame blocks from the blockstack, used by the SETUP_XXX and POP_BLOCK opcodes.""" - def __init__(self, frame, handlerposition): + def __init__(self, ctx, handlerposition): self.handlerposition = handlerposition - self.valuestackdepth = frame.valuestackdepth + self.valuestackdepth = ctx.valuestackdepth def __eq__(self, other): return (self.__class__ is other.__class__ and @@ -1331,10 +1330,10 @@ def __hash__(self): return hash((self.handlerposition, self.valuestackdepth)) - def cleanupstack(self, frame): - frame.dropvaluesuntil(self.valuestackdepth) + def cleanupstack(self, ctx): + ctx.dropvaluesuntil(self.valuestackdepth) - def handle(self, frame, unroller): + def handle(self, ctx, unroller): raise NotImplementedError class LoopBlock(FrameBlock): @@ -1342,16 +1341,16 @@ handles = (Break, Continue) - def handle(self, frame, unroller): + def handle(self, ctx, unroller): if isinstance(unroller, Continue): # re-push the loop block without cleaning up the value stack, # and jump to the beginning of the loop, stored in the # exception's argument - frame.blockstack.append(self) + ctx.blockstack.append(self) return unroller.jump_to else: # jump to the end of the loop - self.cleanupstack(frame) + self.cleanupstack(ctx) return self.handlerposition class ExceptBlock(FrameBlock): @@ -1359,19 +1358,19 @@ handles = Raise - def handle(self, frame, unroller): + def handle(self, ctx, unroller): # push the exception to the value stack for inspection by the # exception handler (the code after the except:) - self.cleanupstack(frame) + self.cleanupstack(ctx) assert isinstance(unroller, Raise) w_exc = unroller.w_exc # the stack setup is slightly different than in CPython: # instead of the traceback, we store the unroller object, # wrapped. - frame.pushvalue(unroller) - frame.pushvalue(w_exc.w_value) - frame.pushvalue(w_exc.w_type) - frame.last_exception = w_exc + ctx.pushvalue(unroller) + ctx.pushvalue(w_exc.w_value) + ctx.pushvalue(w_exc.w_type) + ctx.last_exception = w_exc return self.handlerposition # jump to the handler class FinallyBlock(FrameBlock): @@ -1379,15 +1378,15 @@ handles = FlowSignal - def handle(self, frame, unroller): + def handle(self, ctx, unroller): # any abnormal reason for unrolling a finally: triggers the end of # the block unrolling and the entering the finally: handler. - self.cleanupstack(frame) - frame.pushvalue(unroller) + self.cleanupstack(ctx) + ctx.pushvalue(unroller) return self.handlerposition # jump to the handler class WithBlock(FinallyBlock): - def handle(self, frame, unroller): - return FinallyBlock.handle(self, frame, unroller) + def handle(self, ctx, unroller): + return FinallyBlock.handle(self, ctx, unroller) diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -6,7 +6,7 @@ from rpython.flowspace.model import Variable, checkgraph from rpython.flowspace.bytecode import HostCode -from rpython.flowspace.flowcontext import (FlowSpaceFrame, fixeggblocks) +from rpython.flowspace.flowcontext import (FlowContext, fixeggblocks) from rpython.flowspace.generator import (tweak_generator_graph, bootstrap_generator) from rpython.flowspace.pygraph import PyGraph @@ -38,8 +38,8 @@ w_value.rename(name) return bootstrap_generator(graph) graph = PyGraph(func, code) - frame = FlowSpaceFrame(graph, code) - frame.build_flow() + ctx = FlowContext(graph, code) + ctx.build_flow() fixeggblocks(graph) checkgraph(graph) if code.is_generator: diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -76,15 +76,15 @@ @classmethod def make_sc(cls): - def sc_operator(frame, *args_w): - return cls(*args_w).eval(frame) + def sc_operator(ctx, *args_w): + return cls(*args_w).eval(ctx) return sc_operator - def eval(self, frame): + def eval(self, ctx): result = self.constfold() if result is not None: return result - return frame.do_op(self) + return ctx.do_op(self) def constfold(self): return None @@ -433,7 +433,7 @@ canraise = [] pyfunc = staticmethod(next) - def eval(self, frame): + def eval(self, ctx): w_iter, = self.args if isinstance(w_iter, Constant): it = w_iter.value @@ -444,10 +444,10 @@ from rpython.flowspace.flowcontext import Raise raise Raise(const(StopIteration())) else: - frame.replace_in_stack(it, next_unroller) + ctx.replace_in_stack(it, next_unroller) return const(v) - w_item = frame.do_op(self) - frame.guessexception([StopIteration, RuntimeError], force=True) + w_item = ctx.do_op(self) + ctx.guessexception([StopIteration, RuntimeError], force=True) return w_item class GetAttr(SingleDispatchMixin, HLOperation): @@ -496,7 +496,7 @@ class SimpleCall(SingleDispatchMixin, CallOp): opname = 'simple_call' - def eval(self, frame): + def eval(self, ctx): w_callable, args_w = self.args[0], self.args[1:] if isinstance(w_callable, Constant): fn = w_callable.value @@ -505,14 +505,14 @@ except (KeyError, TypeError): pass else: - return sc(frame, *args_w) - return frame.do_op(self) + return sc(ctx, *args_w) + return ctx.do_op(self) class CallArgs(SingleDispatchMixin, CallOp): opname = 'call_args' - def eval(self, frame): + def eval(self, ctx): w_callable = self.args[0] if isinstance(w_callable, Constant): fn = w_callable.value @@ -524,7 +524,7 @@ from rpython.flowspace.flowcontext import FlowingError raise FlowingError( "should not call %r with keyword arguments" % (fn,)) - return frame.do_op(self) + return ctx.do_op(self) # Other functions that get directly translated to SpaceOperators diff --git a/rpython/flowspace/specialcase.py b/rpython/flowspace/specialcase.py --- a/rpython/flowspace/specialcase.py +++ b/rpython/flowspace/specialcase.py @@ -7,7 +7,7 @@ """Decorator triggering special-case handling of ``func``. When the flow graph builder sees ``func``, it calls the decorated function - with ``decorated_func(frame, *args_w)``, where ``args_w`` is a sequence of + with ``decorated_func(ctx, *args_w)``, where ``args_w`` is a sequence of flow objects (Constants or Variables). """ def decorate(sc_func): @@ -15,10 +15,10 @@ return decorate @register_flow_sc(__import__) -def sc_import(frame, *args_w): +def sc_import(ctx, *args_w): assert all(isinstance(arg, Constant) for arg in args_w) args = [arg.value for arg in args_w] - return frame.import_name(*args) + return ctx.import_name(*args) @register_flow_sc(locals) def sc_locals(_, *args): @@ -31,34 +31,34 @@ "own project.") @register_flow_sc(isinstance) -def sc_isinstance(frame, w_instance, w_type): +def sc_isinstance(ctx, w_instance, w_type): if w_instance.foldable() and w_type.foldable(): return const(isinstance(w_instance.value, w_type.value)) - return frame.appcall(isinstance, w_instance, w_type) + return ctx.appcall(isinstance, w_instance, w_type) @register_flow_sc(getattr) -def sc_getattr(frame, w_obj, w_index, w_default=None): +def sc_getattr(ctx, w_obj, w_index, w_default=None): if w_default is not None: - return frame.appcall(getattr, w_obj, w_index, w_default) + return ctx.appcall(getattr, w_obj, w_index, w_default) else: from rpython.flowspace.operation import op - return op.getattr(w_obj, w_index).eval(frame) + return op.getattr(w_obj, w_index).eval(ctx) @register_flow_sc(open) -def sc_open(frame, *args_w): +def sc_open(ctx, *args_w): from rpython.rlib.rfile import create_file - return frame.appcall(create_file, *args_w) + return ctx.appcall(create_file, *args_w) @register_flow_sc(os.tmpfile) -def sc_os_tmpfile(frame): +def sc_os_tmpfile(ctx): from rpython.rlib.rfile import create_temp_rfile - return frame.appcall(create_temp_rfile) + return ctx.appcall(create_temp_rfile) @register_flow_sc(os.remove) -def sc_os_remove(frame, *args_w): +def sc_os_remove(ctx, *args_w): # on top of PyPy only: 'os.remove != os.unlink' # (on CPython they are '==', but not identical either) - return frame.appcall(os.unlink, *args_w) + return ctx.appcall(os.unlink, *args_w) # _________________________________________________________________________ # a simplified version of the basic printing routines, for RPython programs diff --git a/rpython/flowspace/test/test_framestate.py b/rpython/flowspace/test/test_framestate.py --- a/rpython/flowspace/test/test_framestate.py +++ b/rpython/flowspace/test/test_framestate.py @@ -1,96 +1,96 @@ from rpython.flowspace.model import * from rpython.rlib.unroll import SpecTag -from rpython.flowspace.flowcontext import FlowSpaceFrame +from rpython.flowspace.flowcontext import FlowContext from rpython.flowspace.bytecode import HostCode from rpython.flowspace.pygraph import PyGraph class TestFrameState: - def getframe(self, func): + def get_context(self, func): try: func = func.im_func except AttributeError: pass code = HostCode._from_code(func.func_code) graph = PyGraph(func, code) - frame = FlowSpaceFrame(graph, code) + ctx = FlowContext(graph, code) # hack the frame - frame.setstate(graph.startblock.framestate) - frame.locals_stack_w[frame.pycode.co_nlocals-1] = Constant(None) - return frame + ctx.setstate(graph.startblock.framestate) + ctx.locals_stack_w[ctx.pycode.co_nlocals-1] = Constant(None) + return ctx def func_simple(x): spam = 5 return spam def test_eq_framestate(self): - frame = self.getframe(self.func_simple) - fs1 = frame.getstate(0) - fs2 = frame.getstate(0) + ctx = self.get_context(self.func_simple) + fs1 = ctx.getstate(0) + fs2 = ctx.getstate(0) assert fs1 == fs2 def test_neq_hacked_framestate(self): - frame = self.getframe(self.func_simple) - fs1 = frame.getstate(0) - frame.locals_stack_w[frame.pycode.co_nlocals-1] = Variable() - fs2 = frame.getstate(0) + ctx = self.get_context(self.func_simple) + fs1 = ctx.getstate(0) + ctx.locals_stack_w[ctx.pycode.co_nlocals-1] = Variable() + fs2 = ctx.getstate(0) assert fs1 != fs2 def test_union_on_equal_framestates(self): - frame = self.getframe(self.func_simple) - fs1 = frame.getstate(0) - fs2 = frame.getstate(0) + ctx = self.get_context(self.func_simple) + fs1 = ctx.getstate(0) + fs2 = ctx.getstate(0) assert fs1.union(fs2) == fs1 def test_union_on_hacked_framestates(self): - frame = self.getframe(self.func_simple) - fs1 = frame.getstate(0) - frame.locals_stack_w[frame.pycode.co_nlocals-1] = Variable() - fs2 = frame.getstate(0) + ctx = self.get_context(self.func_simple) + fs1 = ctx.getstate(0) + ctx.locals_stack_w[ctx.pycode.co_nlocals-1] = Variable() + fs2 = ctx.getstate(0) assert fs1.union(fs2) == fs2 # fs2 is more general assert fs2.union(fs1) == fs2 # fs2 is more general def test_restore_frame(self): - frame = self.getframe(self.func_simple) - fs1 = frame.getstate(0) - frame.locals_stack_w[frame.pycode.co_nlocals-1] = Variable() - frame.setstate(fs1) - assert fs1 == frame.getstate(0) + ctx = self.get_context(self.func_simple) + fs1 = ctx.getstate(0) + ctx.locals_stack_w[ctx.pycode.co_nlocals-1] = Variable() + ctx.setstate(fs1) + assert fs1 == ctx.getstate(0) def test_copy(self): - frame = self.getframe(self.func_simple) - fs1 = frame.getstate(0) + ctx = self.get_context(self.func_simple) + fs1 = ctx.getstate(0) fs2 = fs1.copy() assert fs1 == fs2 def test_getvariables(self): - frame = self.getframe(self.func_simple) - fs1 = frame.getstate(0) + ctx = self.get_context(self.func_simple) + fs1 = ctx.getstate(0) vars = fs1.getvariables() assert len(vars) == 1 def test_getoutputargs(self): - frame = self.getframe(self.func_simple) - fs1 = frame.getstate(0) - frame.locals_stack_w[frame.pycode.co_nlocals-1] = Variable() - fs2 = frame.getstate(0) + ctx = self.get_context(self.func_simple) + fs1 = ctx.getstate(0) + ctx.locals_stack_w[ctx.pycode.co_nlocals-1] = Variable() + fs2 = ctx.getstate(0) outputargs = fs1.getoutputargs(fs2) # 'x' -> 'x' is a Variable # locals_w[n-1] -> locals_w[n-1] is Constant(None) - assert outputargs == [frame.locals_stack_w[0], Constant(None)] + assert outputargs == [ctx.locals_stack_w[0], Constant(None)] def test_union_different_constants(self): - frame = self.getframe(self.func_simple) - fs1 = frame.getstate(0) - frame.locals_stack_w[frame.pycode.co_nlocals-1] = Constant(42) - fs2 = frame.getstate(0) + ctx = self.get_context(self.func_simple) + fs1 = ctx.getstate(0) + ctx.locals_stack_w[ctx.pycode.co_nlocals-1] = Constant(42) + fs2 = ctx.getstate(0) fs3 = fs1.union(fs2) - frame.setstate(fs3) - assert isinstance(frame.locals_stack_w[frame.pycode.co_nlocals-1], + ctx.setstate(fs3) + assert isinstance(ctx.locals_stack_w[ctx.pycode.co_nlocals-1], Variable) # generalized def test_union_spectag(self): - frame = self.getframe(self.func_simple) - fs1 = frame.getstate(0) - frame.locals_stack_w[frame.pycode.co_nlocals-1] = Constant(SpecTag()) - fs2 = frame.getstate(0) + ctx = self.get_context(self.func_simple) + fs1 = ctx.getstate(0) + ctx.locals_stack_w[ctx.pycode.co_nlocals-1] = Constant(SpecTag()) + fs2 = ctx.getstate(0) assert fs1.union(fs2) is None # UnionError diff --git a/rpython/flowspace/test/test_objspace.py b/rpython/flowspace/test/test_objspace.py --- a/rpython/flowspace/test/test_objspace.py +++ b/rpython/flowspace/test/test_objspace.py @@ -7,7 +7,7 @@ Constant, mkentrymap, c_last_exception, const) from rpython.translator.simplify import simplify_graph from rpython.flowspace.objspace import build_flow -from rpython.flowspace.flowcontext import FlowingError, FlowSpaceFrame +from rpython.flowspace.flowcontext import FlowingError, FlowContext from rpython.conftest import option from rpython.tool.stdlib_opcode import host_bytecode_spec @@ -54,7 +54,7 @@ def test_all_opcodes_defined(): opnames = set(host_bytecode_spec.method_names) - methods = set([name for name in dir(FlowSpaceFrame) if name.upper() == name]) + methods = set([name for name in dir(FlowContext) if name.upper() == name]) handled_elsewhere = set(['EXTENDED_ARG']) missing = opnames - methods - handled_elsewhere assert not missing diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -282,7 +282,7 @@ return False @register_flow_sc(we_are_translated) -def sc_we_are_translated(frame): +def sc_we_are_translated(ctx): return Constant(True) diff --git a/rpython/rlib/rarithmetic.py b/rpython/rlib/rarithmetic.py --- a/rpython/rlib/rarithmetic.py +++ b/rpython/rlib/rarithmetic.py @@ -516,12 +516,12 @@ r_uint = build_int('r_uint', False, LONG_BIT) @register_flow_sc(r_uint) -def sc_r_uint(frame, w_value): +def sc_r_uint(ctx, w_value): # (normally, the 32-bit constant is a long, and is not allowed to # show up in the flow graphs at all) if isinstance(w_value, Constant): return Constant(r_uint(w_value.value)) - return frame.appcall(r_uint, w_value) + return ctx.appcall(r_uint, w_value) r_longlong = build_int('r_longlong', True, 64) From noreply at buildbot.pypy.org Sun Jan 19 10:49:10 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Sun, 19 Jan 2014 10:49:10 +0100 (CET) Subject: [pypy-commit] stmgc c7: pushing some roots in duhton and reset shadowstack on abort Message-ID: <20140119094910.467191C0356@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r648:1df99f66dc6d Date: 2014-01-19 10:48 +0100 http://bitbucket.org/pypy/stmgc/changeset/1df99f66dc6d/ Log: pushing some roots in duhton and reset shadowstack on abort diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -58,7 +58,7 @@ struct stm_list_s *new_object_ranges; struct alloc_for_size_s alloc[LARGE_OBJECT_WORDS]; localchar_t *nursery_current; - + object_t **old_shadow_stack; struct stm_list_s *old_objects_to_trace; /* pages newly allocated in the current transaction only containing uncommitted objects */ @@ -776,6 +776,8 @@ _STM_TL1->jmpbufptr = jmpbufptr; _STM_TL2->running_transaction = 1; _STM_TL2->need_abort = 0; + + _STM_TL2->old_shadow_stack = _STM_TL1->shadow_stack; } #if 0 @@ -961,6 +963,9 @@ _STM_TL2->nursery_current - nursery_base); _STM_TL2->nursery_current = nursery_base; + /* reset shadowstack */ + _STM_TL1->shadow_stack = _STM_TL2->old_shadow_stack; + /* unreserve uncommitted_pages and mark them as SHARED again */ /* STM_LIST_FOREACH(_STM_TL2->uncommitted_pages, ({ */ /* uintptr_t pagenum = (uintptr_t)item; */ diff --git a/c7/reader_writer_lock.c b/c7/reader_writer_lock.c --- a/c7/reader_writer_lock.c +++ b/c7/reader_writer_lock.c @@ -11,7 +11,7 @@ Shared-Memory Multiprocessors". Converting the algorithm from assembly language to C yields: */ - +#include #include "reader_writer_lock.h" @@ -54,11 +54,13 @@ int rwticket_wrtrylock(rwticket *l) { - unsigned me = l->s.users; + unsigned cmp = l->u; + + unsigned me = cmp & 0xff;//l->s.users; unsigned char menew = me + 1; - unsigned read = l->s.read << 8; - unsigned cmp = (me << 16) + read + me; - unsigned cmpnew = (menew << 16) + read + me; + // unsigned read = (cmp & 0xffff) >> 8;//l->s.read << 8; + //unsigned cmp = (me << 16) + read + me; + unsigned cmpnew = (menew << 16) | (cmp & 0x0000ffff); //(menew << 16) + read + me; if (cmpxchg(&l->u, cmp, cmpnew) == cmp) return 0; @@ -81,6 +83,8 @@ int rwticket_rdtrylock(rwticket *l) { + assert(0); + /* XXX implement like wrtrylock */ unsigned me = l->s.users; unsigned write = l->s.write; unsigned char menew = me + 1; diff --git a/duhton/demo/container_transaction.duh b/duhton/demo/container_transaction.duh --- a/duhton/demo/container_transaction.duh +++ b/duhton/demo/container_transaction.duh @@ -3,11 +3,11 @@ (defun g (thread n) (set c (+ (get c) 1)) - (if (> (get c) 2000) + (if (> (get c) 20000) (print (quote overflow) (get c)) - (if (< n 1000) + (if (< n 10000) (transaction f thread (+ n 1)) - (if (< (get c) 2000) + (if (< (get c) 20000) (print (quote not-enough)) (print (quote ok)))))) diff --git a/duhton/transaction.c b/duhton/transaction.c --- a/duhton/transaction.c +++ b/duhton/transaction.c @@ -181,7 +181,10 @@ while (__builtin_setjmp(here) == 1) { } stm_start_transaction(&here); run_transaction(cell); + _du_save1(stm_thread_local_obj); stm_stop_transaction(); + _du_restore1(stm_thread_local_obj); + } return NULL; From noreply at buildbot.pypy.org Sun Jan 19 12:17:38 2014 From: noreply at buildbot.pypy.org (timfel) Date: Sun, 19 Jan 2014 12:17:38 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: oops, forgot to include this Message-ID: <20140119111738.A3F641D24B4@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r592:89c1c5b8fe02 Date: 2014-01-19 12:12 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/89c1c5b8fe02/ Log: oops, forgot to include this diff --git a/spyvm/system.py b/spyvm/system.py new file mode 100644 --- /dev/null +++ b/spyvm/system.py @@ -0,0 +1,9 @@ +import sys +import os +import platform + +IS_POSIX = os.name == "posix" +IS_WINDOWS = os.name == "nt" +IS_LINUX = "linux" in sys.platform +IS_64BIT = "64bit" in platform.architecture()[0] +IS_CYGWIN = "cygwin" == sys.platform From noreply at buildbot.pypy.org Sun Jan 19 12:17:39 2014 From: noreply at buildbot.pypy.org (timfel) Date: Sun, 19 Jan 2014 12:17:39 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: add BitBlt, GrafPort, Form, and Canvas classes that work in pure smalltalk Message-ID: <20140119111739.B78511D24B4@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r593:83b3e917889b Date: 2014-01-19 12:13 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/83b3e917889b/ Log: add BitBlt, GrafPort, Form, and Canvas classes that work in pure smalltalk diff too long, truncating to 2000 out of 5739 lines diff --git a/images/BitBltPureSmalltalk.st b/images/BitBltPureSmalltalk.st new file mode 100644 --- /dev/null +++ b/images/BitBltPureSmalltalk.st @@ -0,0 +1,1 @@ +Object subclass: #BitBltPure instanceVariableNames: 'destForm sourceForm halftoneForm combinationRule destX destY width height sourceX sourceY clipX clipY clipWidth clipHeight colorMap sourceWidth sourceHeight sourceDepth sourcePitch sourceBits sourcePPW sourceMSB destWidth destHeight destDepth destPitch destBits destPPW destMSB bitCount skew mask1 mask2 preload nWords destMask hDir vDir sourceIndex sourceDelta destIndex destDelta sx sy dx dy bbW bbH halftoneHeight noSource noHalftone halftoneBase sourceAlpha srcBitShift dstBitShift bitBltOop affectedL affectedR affectedT affectedB opTable maskTable ditherMatrix4x4 ditherThresholds16 ditherValues16 hasSurfaceLock warpSrcShift warpSrcMask warpAlignShift warpAlignMask warpBitShiftTable querySurfaceFn lockSurfaceFn unlockSurfaceFn isWarping cmFlags cmMask cmShiftTable cmMaskTable cmLookupTable cmBitsPerColor dither8Lookup componentAlphaModeColor componentAlphaModeAlpha ungammaLookupTable gammaLookupTable' classVariableNames: 'AllOnes AlphaIndex BBClipHeightIndex BBClipWidthIndex BBClipXIndex BBClipYIndex BBColorMapIndex BBDestFormIndex BBDestXIndex BBDestYIndex BBHalftoneFormIndex BBHeightIndex BBLastIndex BBRuleIndex BBSourceFormIndex BBSourceXIndex BBSourceYIndex BBWarpBase BBWidthIndex BBXTableIndex BinaryPoint BlueIndex ColorMapFixedPart ColorMapIndexedPart ColorMapNewStyle ColorMapPresent CrossedX Dither8Lookup DitherMatrix4x4 DitherThresholds16 DitherValues16 EndOfRun FixedPt1 FormBitsIndex FormDepthIndex FormHeightIndex FormWidthIndex GreenIndex JitBltHookSize MaskTable OpTable OpTableSize RedIndex' poolDictionaries: '' category: 'BitBltPureSmalltalk'! !BitBltPure commentStamp: '' prior: 0! This class implements BitBlt, much as specified in the Blue Book spec. Performance has been enhanced through the use of pointer variables such as sourceIndex and destIndex, and by separating several special cases of the inner loop. Operation has been extended to color, with support for 1, 2, 4, 8, 16, and 32-bit pixel sizes. Conversion between different pixel sizes is facilitated by accepting an optional color map. In addition to the original 16 combination rules, this BitBlt supports 16 fail (for old paint mode) 17 fail (for old mask mode) 18 sourceWord + destinationWord 19 sourceWord - destinationWord 20 rgbAdd: sourceWord with: destinationWord 21 rgbSub: sourceWord with: destinationWord 22 OLDrgbDiff: sourceWord with: destinationWord 23 OLDtallyIntoMap: destinationWord -- old vers doesn't clip to bit boundary 24 alphaBlend: sourceWord with: destinationWord 25 pixPaint: sourceWord with: destinationWord 26 pixMask: sourceWord with: destinationWord 27 rgbMax: sourceWord with: destinationWord 28 rgbMin: sourceWord with: destinationWord 29 rgbMin: sourceWord bitInvert32 with: destinationWord 30 alphaBlendConst: sourceWord with: destinationWord -- alpha passed as an arg 31 alphaPaintConst: sourceWord with: destinationWord -- alpha passed as an arg 32 rgbDiff: sourceWord with: destinationWord 33 tallyIntoMap: destinationWord 34 alphaBlendScaled: sourceWord with: destinationWord 35 alphaBlendScaled: sourceWord with: "unused here - only used by FXBlt" 36 alphaBlendScaled: sourceWord with: "unused here - only used by FXBlt" 37 rgbMul: sourceWord with: destinationWord 38 pixSwap: sourceWord with: destinationWord 39 pixClear: sourceWord with: destinationWord 40 fixAlpha: sourceWord with: destinationWord 41 rgbComponentAlpha: sourceWord with: destinationWord This implementation has also been fitted with an experimental "warp drive" that allows abritrary scaling and rotation (and even limited affine deformations) with all BitBlt storage modes supported. To add a new rule to BitBlt... 1. add the new rule method or methods in the category 'combination rules' of BBSim 2. describe it in the class comment of BBSim and in the class comment for BitBlt 3. add refs to initializeRuleTable in proper positions 4. add refs to initBBOpTable, following the pattern ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:01'! OLDrgbDiff: sourceWord with: destinationWord "Subract the pixels in the source and destination, color by color, and return the sum of the absolute value of all the differences. For non-rgb, XOR the two and return the number of differing pixels. Note that the region is not clipped to bit boundaries, but only to the nearest (enclosing) word. This is because copyLoop does not do pre-merge masking. For accurate results, you must subtract the values obtained from the left and right fringes." | diff pixMask | destDepth < 16 ifTrue: ["Just xor and count differing bits if not RGB" diff := sourceWord bitXor: destinationWord. pixMask := maskTable at: destDepth. [diff = 0] whileFalse: [(diff bitAnd: pixMask) ~= 0 ifTrue: [bitCount := bitCount + 1]. diff := diff >> destDepth]. ^ destinationWord "for no effect"]. destDepth = 16 ifTrue: [diff := (self partitionedSub: sourceWord from: destinationWord nBits: 5 nPartitions: 3). bitCount := bitCount + (diff bitAnd: 16r1F) + (diff>>5 bitAnd: 16r1F) + (diff>>10 bitAnd: 16r1F). diff := (self partitionedSub: sourceWord>>16 from: destinationWord>>16 nBits: 5 nPartitions: 3). bitCount := bitCount + (diff bitAnd: 16r1F) + (diff>>5 bitAnd: 16r1F) + (diff>>10 bitAnd: 16r1F)] ifFalse: [diff := (self partitionedSub: sourceWord from: destinationWord nBits: 8 nPartitions: 3). bitCount := bitCount + (diff bitAnd: 16rFF) + (diff>>8 bitAnd: 16rFF) + (diff>>16 bitAnd: 16rFF)]. ^ destinationWord "For no effect on dest"! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:01'! OLDtallyIntoMap: sourceWord with: destinationWord "Tally pixels into the color map. Note that the source should be specified = destination, in order for the proper color map checks to be performed at setup. Note that the region is not clipped to bit boundaries, but only to the nearest (enclosing) word. This is because copyLoop does not do pre-merge masking. For accurate results, you must subtract the values obtained from the left and right fringes." | mapIndex pixMask shiftWord | (cmFlags bitAnd: (ColorMapPresent bitOr: ColorMapIndexedPart)) = (ColorMapPresent bitOr: ColorMapIndexedPart) ifFalse: [^ destinationWord "no op"]. destDepth < 16 ifTrue: ["loop through all packed pixels." pixMask := (maskTable at: destDepth) bitAnd: cmMask. shiftWord := destinationWord. 1 to: destPPW do: [:i | mapIndex := shiftWord bitAnd: pixMask. self tallyMapAt: mapIndex put: (self tallyMapAt: mapIndex) + 1. shiftWord := shiftWord >> destDepth]. ^ destinationWord]. destDepth = 16 ifTrue: ["Two pixels Tally the right half..." mapIndex := self rgbMap: (destinationWord bitAnd: 16rFFFF) from: 5 to: cmBitsPerColor. self tallyMapAt: mapIndex put: (self tallyMapAt: mapIndex) + 1. "... and then left half" mapIndex := self rgbMap: destinationWord>>16 from: 5 to: cmBitsPerColor. self tallyMapAt: mapIndex put: (self tallyMapAt: mapIndex) + 1] ifFalse: ["Just one pixel." mapIndex := self rgbMap: destinationWord from: 8 to: cmBitsPerColor. self tallyMapAt: mapIndex put: (self tallyMapAt: mapIndex) + 1]. ^ destinationWord "For no effect on dest"! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:01'! addWord: sourceWord with: destinationWord ^sourceWord + destinationWord! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:01'! alphaBlend: sourceWord with: destinationWord "Blend sourceWord with destinationWord, assuming both are 32-bit pixels. The source is assumed to have 255*alpha in the high 8 bits of each pixel, while the high 8 bits of the destinationWord will be ignored. The blend produced is alpha*source + (1-alpha)*dest, with the computation being performed independently on each color component. The high byte of the result will be 0." | alpha unAlpha colorMask result blend shift | alpha := sourceWord >> 24. "High 8 bits of source pixel" alpha = 0 ifTrue: [ ^ destinationWord ]. alpha = 255 ifTrue: [ ^ sourceWord ]. unAlpha := 255 - alpha. colorMask := 16rFF. result := 0. "red" shift := 0. blend := ((sourceWord >> shift bitAnd: colorMask) * alpha) + ((destinationWord>>shift bitAnd: colorMask) * unAlpha) + 254 // 255 bitAnd: colorMask. result := result bitOr: blend << shift. "green" shift := 8. blend := ((sourceWord >> shift bitAnd: colorMask) * alpha) + ((destinationWord>>shift bitAnd: colorMask) * unAlpha) + 254 // 255 bitAnd: colorMask. result := result bitOr: blend << shift. "blue" shift := 16. blend := ((sourceWord >> shift bitAnd: colorMask) * alpha) + ((destinationWord>>shift bitAnd: colorMask) * unAlpha) + 254 // 255 bitAnd: colorMask. result := result bitOr: blend << shift. "alpha (pre-multiplied)" shift := 24. blend := (alpha * 255) + ((destinationWord>>shift bitAnd: colorMask) * unAlpha) + 254 // 255 bitAnd: colorMask. result := result bitOr: blend << shift. ^ result ! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:01'! alphaBlendConst: sourceWord with: destinationWord ^ self alphaBlendConst: sourceWord with: destinationWord paintMode: false! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:01'! alphaBlendConst: sourceWord with: destinationWord paintMode: paintMode "Blend sourceWord with destinationWord using a constant alpha. Alpha is encoded as 0 meaning 0.0, and 255 meaning 1.0. The blend produced is alpha*source + (1.0-alpha)*dest, with the computation being performed independently on each color component. This function could eventually blend into any depth destination, using the same color averaging and mapping as warpBlt. paintMode = true means do nothing if the source pixel value is zero." "This first implementation works with dest depths of 16 and 32 bits only. Normal color mapping will allow sources of lower depths in this case, and results can be mapped directly by truncation, so no extra color maps are needed. To allow storing into any depth will require subsequent addition of two other colormaps, as is the case with WarpBlt." | pixMask destShifted sourceShifted destPixVal rgbMask sourcePixVal unAlpha result pixBlend shift blend maskShifted bitsPerColor | destDepth < 16 ifTrue: [^ destinationWord "no-op"]. unAlpha := 255 - sourceAlpha. pixMask := maskTable at: destDepth. destDepth = 16 ifTrue: [bitsPerColor := 5] ifFalse:[bitsPerColor := 8]. rgbMask := (1<>shift bitAnd: rgbMask) * sourceAlpha) + ((destinationWord>>shift bitAnd: rgbMask) * unAlpha)) + 254 // 255 bitAnd: rgbMask. result := result bitOr: blend<>shift bitAnd: rgbMask) * sourceAlpha) + ((destPixVal>>shift bitAnd: rgbMask) * unAlpha)) + 254 // 255 bitAnd: rgbMask. pixBlend := pixBlend bitOr: blend<> destDepth. sourceShifted := sourceShifted >> destDepth. destShifted := destShifted >> destDepth]. ]. ^ result ! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:02'! alphaBlendScaled: sourceWord with: destinationWord "Blend sourceWord with destinationWord using the alpha value from sourceWord. Alpha is encoded as 0 meaning 0.0, and 255 meaning 1.0. In contrast to alphaBlend:with: the color produced is srcColor + (1-srcAlpha) * dstColor e.g., it is assumed that the source color is already scaled." | unAlpha dstMask srcMask b g r a | unAlpha := 255 - (sourceWord >> 24). "High 8 bits of source pixel" dstMask := destinationWord. srcMask := sourceWord. b := (dstMask bitAnd: 255) * unAlpha >> 8 + (srcMask bitAnd: 255). b > 255 ifTrue:[b := 255]. dstMask := dstMask >> 8. srcMask := srcMask >> 8. g := (dstMask bitAnd: 255) * unAlpha >> 8 + (srcMask bitAnd: 255). g > 255 ifTrue:[g := 255]. dstMask := dstMask >> 8. srcMask := srcMask >> 8. r := (dstMask bitAnd: 255) * unAlpha >> 8 + (srcMask bitAnd: 255). r > 255 ifTrue:[r := 255]. dstMask := dstMask >> 8. srcMask := srcMask >> 8. a := (dstMask bitAnd: 255) * unAlpha >> 8 + (srcMask bitAnd: 255). a > 255 ifTrue:[a := 255]. ^(((((a << 8) + r) << 8) + g) << 8) + b! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:01'! alphaPaintConst: sourceWord with: destinationWord sourceWord = 0 ifTrue: [^ destinationWord "opt for all-transparent source"]. ^ self alphaBlendConst: sourceWord with: destinationWord paintMode: true! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:01'! bitAnd: sourceWord with: destinationWord ^sourceWord bitAnd: destinationWord! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:01'! bitAndInvert: sourceWord with: destinationWord ^sourceWord bitAnd: destinationWord bitInvert32! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:01'! bitInvertAnd: sourceWord with: destinationWord ^sourceWord bitInvert32 bitAnd: destinationWord! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:01'! bitInvertAndInvert: sourceWord with: destinationWord ^sourceWord bitInvert32 bitAnd: destinationWord bitInvert32! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:01'! bitInvertDestination: sourceWord with: destinationWord ^destinationWord bitInvert32! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:01'! bitInvertOr: sourceWord with: destinationWord ^sourceWord bitInvert32 bitOr: destinationWord! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:01'! bitInvertOrInvert: sourceWord with: destinationWord ^sourceWord bitInvert32 bitOr: destinationWord bitInvert32! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:01'! bitInvertSource: sourceWord with: destinationWord ^sourceWord bitInvert32! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:01'! bitInvertXor: sourceWord with: destinationWord ^sourceWord bitInvert32 bitXor: destinationWord! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:01'! bitOr: sourceWord with: destinationWord ^sourceWord bitOr: destinationWord! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:01'! bitOrInvert: sourceWord with: destinationWord ^sourceWord bitOr: destinationWord bitInvert32! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:01'! bitXor: sourceWord with: destinationWord ^sourceWord bitXor: destinationWord! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:01'! clearWord: source with: destination ^ 0! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:01'! destinationWord: sourceWord with: destinationWord ^destinationWord! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:01'! fixAlpha: sourceWord with: destinationWord "For any non-zero pixel value in destinationWord with zero alpha channel take the alpha from sourceWord and fill it in. Intended for fixing alpha channels left at zero during 16->32 bpp conversions." destDepth = 32 ifFalse:[^destinationWord]. "no-op for non 32bpp" destinationWord = 0 ifTrue:[^0]. (destinationWord bitAnd: 16rFF000000) = 0 ifFalse:[^destinationWord]. ^destinationWord bitOr: (sourceWord bitAnd: 16rFF000000) ! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:02'! merge: sourceWord with: destinationWord "Sender warpLoop is too big to include this in-line" ^ self mergeFn: sourceWord with: destinationWord! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 20:04'! mergeFn: arg1 with: arg2 ^ self perform: (OpTable at: combinationRule+1) with: arg1 with: arg2! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:01'! partitionedAND: word1 to: word2 nBits: nBits nPartitions: nParts "AND word1 to word2 as nParts partitions of nBits each. Any field of word1 not all-ones is treated as all-zeroes. Used for erasing, eg, brush shapes prior to ORing in a color" | mask result | mask := maskTable at: nBits. "partition mask starts at the right" result := 0. 1 to: nParts do: [:i | (word1 bitAnd: mask) = mask ifTrue: [result := result bitOr: (word2 bitAnd: mask)]. mask := mask << nBits "slide left to next partition"]. ^ result ! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:03'! partitionedAdd: word1 to: word2 nBits: nBits nPartitions: nParts "Add word1 to word2 as nParts partitions of nBits each. This is useful for packed pixels, or packed colors" | mask sum result maskedWord1 | "In C, most arithmetic operations answer the same bit pattern regardless of the operands being signed or unsigned ints (this is due to the way 2's complement numbers work). However, comparisions might fail. Add the proper declaration of words as unsigned int in those cases where comparisions are done (jmv)" mask := maskTable at: nBits. "partition mask starts at the right" result := 0. 1 to: nParts do: [:i | maskedWord1 := word1 bitAnd: mask. sum := maskedWord1 + (word2 bitAnd: mask). (sum <= mask "result must not carry out of partition" and: [ sum >= maskedWord1 ]) "This is needed because in C, integer arithmetic overflows silently!! (jmv)" ifTrue: [result := result bitOr: sum] ifFalse: [result := result bitOr: mask]. mask := mask << nBits "slide left to next partition"]. ^ result ! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:03'! partitionedMax: word1 with: word2 nBits: nBits nPartitions: nParts "Max word1 to word2 as nParts partitions of nBits each" | mask result | "In C, most arithmetic operations answer the same bit pattern regardless of the operands being signed or unsigned ints (this is due to the way 2's complement numbers work). However, comparisions might fail. Add the proper declaration of words as unsigned int in those cases where comparisions are done (jmv)" mask := maskTable at: nBits. "partition mask starts at the right" result := 0. 1 to: nParts do: [:i | result := result bitOr: ((word2 bitAnd: mask) max: (word1 bitAnd: mask)). mask := mask << nBits "slide left to next partition"]. ^ result ! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:03'! partitionedMin: word1 with: word2 nBits: nBits nPartitions: nParts "Min word1 to word2 as nParts partitions of nBits each" | mask result | "In C, most arithmetic operations answer the same bit pattern regardless of the operands being signed or unsigned ints (this is due to the way 2's complement numbers work). However, comparisions might fail. Add the proper declaration of words as unsigned int in those cases where comparisions are done (jmv)" mask := maskTable at: nBits. "partition mask starts at the right" result := 0. 1 to: nParts do: [:i | result := result bitOr: ((word2 bitAnd: mask) min: (word1 bitAnd: mask)). mask := mask << nBits "slide left to next partition"]. ^ result ! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:01'! partitionedMul: word1 with: word2 nBits: nBits nPartitions: nParts "Multiply word1 with word2 as nParts partitions of nBits each. This is useful for packed pixels, or packed colors. Bug in loop version when non-white background" | sMask product result dMask | "In C, integer multiplication might answer a wrong value if the unsigned values are declared as signed. This problem does not affect this method, because the most significant bit (i.e. the sign bit) will always be zero (jmv)" sMask := maskTable at: nBits. "partition mask starts at the right" dMask := sMask << nBits. result := (((word1 bitAnd: sMask)+1) * ((word2 bitAnd: sMask)+1) - 1 bitAnd: dMask) >> nBits. "optimized first step" nParts = 1 ifTrue: [ ^result ]. product := (((word1>>nBits bitAnd: sMask)+1) * ((word2>>nBits bitAnd: sMask)+1) - 1 bitAnd: dMask). result := result bitOr: product. nParts = 2 ifTrue: [ ^result ]. product := (((word1>>(2*nBits) bitAnd: sMask)+1) * ((word2>>(2*nBits) bitAnd: sMask)+1) - 1 bitAnd: dMask). result := result bitOr: product << nBits. nParts = 3 ifTrue: [ ^result ]. product := (((word1>>(3*nBits) bitAnd: sMask)+1) * ((word2>>(3*nBits) bitAnd: sMask)+1) - 1 bitAnd: dMask). result := result bitOr: product << (2*nBits). ^ result " | sMask product result dMask | sMask := maskTable at: nBits. 'partition mask starts at the right' dMask := sMask << nBits. result := (((word1 bitAnd: sMask)+1) * ((word2 bitAnd: sMask)+1) - 1 bitAnd: dMask) >> nBits. 'optimized first step' nBits to: nBits * (nParts-1) by: nBits do: [:ofs | product := (((word1>>ofs bitAnd: sMask)+1) * ((word2>>ofs bitAnd: sMask)+1) - 1 bitAnd: dMask). result := result bitOr: (product bitAnd: dMask) << (ofs-nBits)]. ^ result"! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:01'! partitionedRgbComponentAlpha: sourceWord dest: destWord nBits: nBits nPartitions: nParts | mask result p1 p2 v | mask := maskTable at: nBits. "partition mask starts at the right" result := 0. 1 to: nParts do: [:i | p1 := (sourceWord bitAnd: mask) >> ((i - 1)*nBits). p2 := (destWord bitAnd: mask) >> ((i - 1)*nBits). nBits = 32 ifFalse:[ nBits = 16 ifTrue:[ p1 := (self rgbMap16To32: p1) bitOr: 16rFF000000. p2 := (self rgbMap16To32: p2) bitOr: 16rFF000000] ifFalse:[ p1 := (self rgbMap: p1 from: nBits to: 32) bitOr: 16rFF000000. p2 := (self rgbMap: p2 from: nBits to: 32) bitOr: 16rFF000000.]]. v := self rgbComponentAlpha32: p1 with: p2. nBits = 32 ifFalse:[ v := self rgbMap: v from: 32 to: nBits]. result := result bitOr: (v << ((i - 1)*nBits)). mask := mask << nBits "slide left to next partition"]. ^ result ! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:03'! partitionedSub: word1 from: word2 nBits: nBits nPartitions: nParts "Subtract word1 from word2 as nParts partitions of nBits each. This is useful for packed pixels, or packed colors" | mask result p1 p2 | "In C, most arithmetic operations answer the same bit pattern regardless of the operands being signed or unsigned ints (this is due to the way 2's complement numbers work). However, comparisions might fail. Add the proper declaration of words as unsigned int in those cases where comparisions are done (jmv)" mask := maskTable at: nBits. "partition mask starts at the right" result := 0. 1 to: nParts do: [:i | p1 := word1 bitAnd: mask. p2 := word2 bitAnd: mask. p1 < p2 "result is really abs value of thedifference" ifTrue: [result := result bitOr: p2 - p1] ifFalse: [result := result bitOr: p1 - p2]. mask := mask << nBits "slide left to next partition"]. ^ result ! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/18/2014 15:47'! pickSourcePixels: nPixels flags: mapperFlags srcMask: srcMask destMask: dstMask srcShiftInc: srcShiftInc dstShiftInc: dstShiftInc "Pick nPix pixels starting at srcBitIndex from the source, map by the color map, and justify them according to dstBitIndex in the resulting destWord." | sourceWord destWord sourcePix destPix srcShift dstShift nPix | sourceWord := self srcLongAt: sourceIndex. destWord := 0. srcShift := srcBitShift. "Hint: Keep in register" dstShift := dstBitShift. "Hint: Keep in register" nPix := nPixels. "always > 0 so we can use do { } while(--nPix);" (mapperFlags = (ColorMapPresent bitOr: ColorMapIndexedPart)) ifTrue:[ "a little optimization for (pretty crucial) blits using indexed lookups only" [ "grab, colormap and mix in pixel" sourcePix := sourceWord >> srcShift bitAnd: srcMask. destPix := cmLookupTable at: (sourcePix bitAnd: cmMask). destWord := destWord bitOr: (destPix bitAnd: dstMask) << dstShift. "adjust dest pix index" dstShift := dstShift + dstShiftInc. "adjust source pix index" ((srcShift := srcShift + srcShiftInc) bitAnd: 16rFFFFFFE0) = 0 ifFalse:[ sourceMSB ifTrue:[srcShift := srcShift + 32] ifFalse:[srcShift := srcShift - 32]. sourceWord := self srcLongAt: (sourceIndex := sourceIndex + 1)]. (nPix := nPix - 1) = 0] whileFalse. ] ifFalse:[ [ "grab, colormap and mix in pixel" sourcePix := sourceWord >> srcShift bitAnd: srcMask. destPix := self mapPixel: sourcePix flags: mapperFlags. destWord := destWord bitOr: (destPix bitAnd: dstMask) << dstShift. "adjust dest pix index" dstShift := dstShift + dstShiftInc. "adjust source pix index" ((srcShift := srcShift + srcShiftInc) bitAnd: 16rFFFFFFE0) = 0 ifFalse:[ sourceMSB ifTrue:[srcShift := srcShift + 32] ifFalse:[srcShift := srcShift - 32]. sourceWord := self srcLongAt: (sourceIndex := sourceIndex + 1)]. (nPix := nPix - 1) = 0] whileFalse. ]. srcBitShift := srcShift. "Store back" ^destWord ! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:03'! pixClear: sourceWord with: destinationWord "Clear all pixels in destinationWord for which the pixels of sourceWord have the same values. Used to clear areas of some constant color to zero." | mask result nBits pv | destDepth = 32 ifTrue:[ sourceWord = destinationWord ifTrue:[^0] ifFalse:[^destinationWord]. ]. nBits := destDepth. mask := maskTable at: nBits. "partition mask starts at the right" result := 0. 1 to: destPPW do:[:i | pv := destinationWord bitAnd: mask. (sourceWord bitAnd: mask) = pv ifTrue:[pv := 0]. result := result bitOr: pv. mask := mask << nBits "slide left to next partition"]. ^ result! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:03'! pixMask: sourceWord with: destinationWord ^ self partitionedAND: sourceWord bitInvert32 to: destinationWord nBits: destDepth nPartitions: destPPW! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:03'! pixPaint: sourceWord with: destinationWord sourceWord = 0 ifTrue: [^ destinationWord]. ^ sourceWord bitOr: (self partitionedAND: sourceWord bitInvert32 to: destinationWord nBits: destDepth nPartitions: destPPW)! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:04'! pixSwap: sourceWord with: destWord "Swap the pixels in destWord" | result shift lowMask highMask | destPPW = 1 ifTrue:[^destWord]. "a single pixel per word" result := 0. lowMask := (1 << destDepth) - 1. "mask low pixel" highMask := lowMask << (destPPW-1 * destDepth). "mask high pixel" shift := 32 - destDepth. result := result bitOr: ( (destWord bitAnd: lowMask) << shift bitOr: (destWord bitAnd: highMask) >> shift). destPPW <= 2 ifTrue:[^result]. 2 to: destPPW // 2 do:[:i| lowMask := lowMask << destDepth. highMask := highMask >> destDepth. shift := shift - (destDepth * 2). result := result bitOr: ( (destWord bitAnd: lowMask) << shift bitOr: (destWord bitAnd: highMask) >> shift)]. ^result! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:04'! rgbAdd: sourceWord with: destinationWord destDepth < 16 ifTrue: ["Add each pixel separately" ^ self partitionedAdd: sourceWord to: destinationWord nBits: destDepth nPartitions: destPPW]. destDepth = 16 ifTrue: ["Add RGB components of each pixel separately" ^ (self partitionedAdd: sourceWord to: destinationWord nBits: 5 nPartitions: 3) + ((self partitionedAdd: sourceWord>>16 to: destinationWord>>16 nBits: 5 nPartitions: 3) << 16)] ifFalse: ["Add RGBA components of the pixel separately" ^ self partitionedAdd: sourceWord to: destinationWord nBits: 8 nPartitions: 4]! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/18/2014 15:05'! rgbComponentAlpha16 "This version assumes combinationRule = 41 sourcePixSize = 32 destPixSize = 16 sourceForm ~= destForm. " | srcIndex dstIndex sourceWord srcAlpha destWord deltaX deltaY srcY dstY dstMask srcShift ditherBase ditherIndex ditherThreshold | deltaY := bbH + 1. "So we can pre-decrement" srcY := sy. dstY := dy. srcShift := (dx bitAnd: 1) * 16. destMSB ifTrue:[srcShift := 16 - srcShift]. mask1 := 16rFFFF << (16 - srcShift). "This is the outer loop" [(deltaY := deltaY - 1) ~= 0] whileTrue:[ srcIndex := sourceBits + (srcY * sourcePitch) + (sx * 4). dstIndex := destBits + (dstY * destPitch) + (dx // 2 * 4). ditherBase := (dstY bitAnd: 3) * 4. ditherIndex := (sx bitAnd: 3) - 1. "For pre-increment" deltaX := bbW + 1. "So we can pre-decrement" dstMask := mask1. dstMask = 16rFFFF ifTrue:[srcShift := 16] ifFalse:[srcShift := 0]. "This is the inner loop" [(deltaX := deltaX - 1) ~= 0] whileTrue:[ ditherThreshold := ditherMatrix4x4 at: ditherBase + (ditherIndex := ditherIndex + 1 bitAnd: 3) + 1. sourceWord := self srcLongAt: srcIndex. srcAlpha := sourceWord bitAnd: 16rFFFFFF. srcAlpha = 0 ifFalse:[ "0 < srcAlpha" "If we have to mix colors then just copy a single word" destWord := self dstLongAt: dstIndex. destWord := destWord bitAnd: dstMask bitInvert32. destWord := destWord >> srcShift. "Expand from 16 to 32 bit by adding zero bits" destWord := (((destWord bitAnd: 16r7C00) bitShift: 9) bitOr: ((destWord bitAnd: 16r3E0) bitShift: 6)) bitOr: (((destWord bitAnd: 16r1F) bitShift: 3) bitOr: 16rFF000000). "Mix colors" sourceWord := self rgbComponentAlpha32: sourceWord with: destWord. "And dither" sourceWord := self dither32To16: sourceWord threshold: ditherThreshold. sourceWord = 0 ifTrue:[sourceWord := 1 << srcShift] ifFalse:[sourceWord := sourceWord << srcShift]. "Store back" self dstLongAt: dstIndex put: sourceWord mask: dstMask. ]. srcIndex := srcIndex + 4. destMSB ifTrue:[srcShift = 0 ifTrue:[dstIndex := dstIndex + 4]] ifFalse:[srcShift = 0 ifFalse:[dstIndex := dstIndex + 4]]. srcShift := srcShift bitXor: 16. "Toggle between 0 and 16" dstMask := dstMask bitInvert32. "Mask other half word" ]. srcY := srcY + 1. dstY := dstY + 1. ]. ! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:04'! rgbComponentAlpha32 "This version assumes combinationRule = 41 sourcePixSize = destPixSize = 32 sourceForm ~= destForm. Note: The inner loop has been optimized for dealing with the special case of aR = aG = aB = 0 " | srcIndex dstIndex sourceWord srcAlpha destWord deltaX deltaY srcY dstY | deltaY := bbH + 1. "So we can pre-decrement" srcY := sy. dstY := dy. "This is the outer loop" [(deltaY := deltaY - 1) ~= 0] whileTrue:[ srcIndex := sourceBits + (srcY * sourcePitch) + (sx * 4). dstIndex := destBits + (dstY * destPitch) + (dx * 4). deltaX := bbW + 1. "So we can pre-decrement" "This is the inner loop" [(deltaX := deltaX - 1) ~= 0] whileTrue:[ sourceWord := self srcLongAt: srcIndex. srcAlpha := sourceWord bitAnd:16rFFFFFF. srcAlpha = 0 ifTrue:[ srcIndex := srcIndex + 4. dstIndex := dstIndex + 4. "Now skip as many words as possible," [(deltaX := deltaX - 1) ~= 0 and:[ ((sourceWord := self srcLongAt: srcIndex) bitAnd:16rFFFFFF) = 0]] whileTrue:[ srcIndex := srcIndex + 4. dstIndex := dstIndex + 4. ]. "Adjust deltaX" deltaX := deltaX + 1. ] ifFalse:[ "0 < srcAlpha" "If we have to mix colors then just copy a single word" destWord := self dstLongAt: dstIndex. destWord := self rgbComponentAlpha32: sourceWord with: destWord. self dstLongAt: dstIndex put: destWord. srcIndex := srcIndex + 4. dstIndex := dstIndex + 4. ]. ]. srcY := srcY + 1. dstY := dstY + 1. ].! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:05'! rgbComponentAlpha32: sourceWord with: destinationWord " componentAlphaModeColor is the color, sourceWord contains an alpha value for each component of RGB each of which is encoded as0 meaning 0.0 and 255 meaning 1.0 . the rule is... color = componentAlphaModeColor. colorAlpha = componentAlphaModeAlpha. mask = sourceWord. dst.A = colorAlpha + (1 - colorAlpha) * dst.A dst.R = color.R * mask.R * colorAlpha + (1 - (mask.R * colorAlpha)) * dst.R dst.G = color.G * mask.G * colorAlpha + (1 - (mask.G* colorAlpha)) * dst.G dst.B = color.B * mask.B * colorAlpha + (1 - (mask.B* colorAlpha)) * dst.B " | alpha dstMask srcColor srcAlpha b g r a aB aG aR aA answer s d | alpha := sourceWord. alpha = 0 ifTrue:[^destinationWord]. srcColor := componentAlphaModeColor. srcAlpha := componentAlphaModeAlpha bitAnd: 255. aB := alpha bitAnd: 255. alpha := alpha >> 8. aG := alpha bitAnd: 255. alpha := alpha >> 8. aR := alpha bitAnd: 255. alpha := alpha >> 8. aA := alpha bitAnd: 255. srcAlpha = 255 ifFalse:[ aA := aA * srcAlpha >> 8. aR := aR * srcAlpha >> 8. aG := aG * srcAlpha >> 8. aB := aB * srcAlpha >> 8]. dstMask := destinationWord. d := dstMask bitAnd: 255. s := srcColor bitAnd: 255. ungammaLookupTable == nil ifFalse:[ d := ungammaLookupTable at: d. s := ungammaLookupTable at: s.]. b := (d * (255 - aB) >> 8) + (s * aB >> 8). b > 255 ifTrue:[b := 255]. gammaLookupTable == nil ifFalse:[ b := gammaLookupTable at: b]. dstMask := dstMask >> 8. srcColor := srcColor >> 8. d := dstMask bitAnd: 255. s := srcColor bitAnd: 255. ungammaLookupTable == nil ifFalse:[ d := ungammaLookupTable at: d. s := ungammaLookupTable at: s.]. g := (d * (255 - aG) >> 8) + (s * aG >> 8). g > 255 ifTrue:[g := 255]. gammaLookupTable == nil ifFalse:[ g := gammaLookupTable at: g]. dstMask := dstMask >> 8. srcColor := srcColor >> 8. d := dstMask bitAnd: 255. s := srcColor bitAnd: 255. ungammaLookupTable == nil ifFalse:[ d := ungammaLookupTable at: d. s := ungammaLookupTable at: s.]. r := (d * (255 - aR) >> 8) + (s * aR >> 8). r > 255 ifTrue:[r := 255]. gammaLookupTable == nil ifFalse:[ r := gammaLookupTable at: r]. dstMask := dstMask >> 8. srcColor := srcColor >> 8. a := ((dstMask bitAnd: 255) * (255 - aA) >> 8) + aA. "no need to gamma correct alpha value ?" a > 255 ifTrue:[a := 255]. answer := (((((a << 8) + r) << 8) + g) << 8) + b. ^answer ! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:05'! rgbComponentAlpha8 "This version assumes combinationRule = 41 sourcePixSize = 32 destPixSize = 8 sourceForm ~= destForm. Note: This is not real blending since we don't have the source colors available. " | srcIndex dstIndex sourceWord srcAlpha destWord deltaX deltaY srcY dstY dstMask srcShift adjust mappingTable mapperFlags | mappingTable := self default8To32Table. mapperFlags := cmFlags bitAnd: ColorMapNewStyle bitInvert32. deltaY := bbH + 1. "So we can pre-decrement" srcY := sy. dstY := dy. mask1 := ((dx bitAnd: 3) * 8). destMSB ifTrue:[mask1 := 24 - mask1]. mask2 := AllOnes bitXor:(16rFF << mask1). (dx bitAnd: 1) = 0 ifTrue:[adjust := 0] ifFalse:[adjust := 16r1F1F1F1F]. (dy bitAnd: 1) = 0 ifTrue:[adjust := adjust bitXor: 16r1F1F1F1F]. "This is the outer loop" [(deltaY := deltaY - 1) ~= 0] whileTrue:[ adjust := adjust bitXor: 16r1F1F1F1F. srcIndex := sourceBits + (srcY * sourcePitch) + (sx * 4). dstIndex := destBits + (dstY * destPitch) + (dx // 4 * 4). deltaX := bbW + 1. "So we can pre-decrement" srcShift := mask1. dstMask := mask2. "This is the inner loop" [(deltaX := deltaX - 1) ~= 0] whileTrue:[ sourceWord := ((self srcLongAt: srcIndex) bitAnd: (adjust bitInvert32)) + adjust. srcAlpha := sourceWord bitAnd: 16rFFFFFF. "set srcAlpha to the average of the 3 separate aR,Ag,AB values" srcAlpha := ((srcAlpha >> 16) + (srcAlpha >> 8 bitAnd: 16rFF) + (srcAlpha bitAnd: 16rFF)) // 3. srcAlpha > 31 ifTrue:["Everything below 31 is transparent" srcAlpha > 224 ifTrue: ["treat everything above 224 as opaque" sourceWord := 16rFFFFFFFF]. destWord := self dstLongAt: dstIndex. destWord := destWord bitAnd: dstMask bitInvert32. destWord := destWord >> srcShift. destWord := mappingTable at: destWord. sourceWord := self rgbComponentAlpha32: sourceWord with: destWord. sourceWord := self mapPixel: sourceWord flags: mapperFlags. sourceWord := sourceWord << srcShift. "Store back" self dstLongAt: dstIndex put: sourceWord mask: dstMask. ]. srcIndex := srcIndex + 4. destMSB ifTrue:[ srcShift = 0 ifTrue:[dstIndex := dstIndex + 4. srcShift := 24. dstMask := 16r00FFFFFF] ifFalse:[srcShift := srcShift - 8. dstMask := (dstMask >> 8) bitOr: 16rFF000000]. ] ifFalse:[ srcShift = 32 ifTrue:[dstIndex := dstIndex + 4. srcShift := 0. dstMask := 16rFFFFFF00] ifFalse:[srcShift := srcShift + 8. dstMask := dstMask << 8 bitOr: 255]. ]. adjust := adjust bitXor: 16r1F1F1F1F. ]. srcY := srcY + 1. dstY := dstY + 1. ]. ! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:05'! rgbComponentAlpha: sourceWord with: destinationWord " componentAlphaModeColor is the color, sourceWord contains an alpha value for each component of RGB each of which is encoded as0 meaning 0.0 and 255 meaning 1.0 . the rule is... color = componentAlphaModeColor. colorAlpha = componentAlphaModeAlpha. mask = sourceWord. dst.A = colorAlpha + (1 - colorAlpha) * dst.A dst.R = color.R * mask.R * colorAlpha + (1 - (mask.R * colorAlpha)) * dst.R dst.G = color.G * mask.G * colorAlpha + (1 - (mask.G* colorAlpha)) * dst.G dst.B = color.B * mask.B * colorAlpha + (1 - (mask.B* colorAlpha)) * dst.B " | alpha | alpha := sourceWord. alpha = 0 ifTrue:[^destinationWord]. ^self partitionedRgbComponentAlpha: sourceWord dest: destinationWord nBits: destDepth nPartitions: destPPW.! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:05'! rgbDiff: sourceWord with: destinationWord "Subract the pixels in the source and destination, color by color, and return the sum of the absolute value of all the differences. For non-rgb, return the number of differing pixels." | pixMask destShifted sourceShifted destPixVal bitsPerColor rgbMask sourcePixVal diff maskShifted | pixMask := maskTable at: destDepth. destDepth = 16 ifTrue: [bitsPerColor := 5. rgbMask := 16r1F] ifFalse: [bitsPerColor := 8. rgbMask := 16rFF]. maskShifted := destMask. destShifted := destinationWord. sourceShifted := sourceWord. 1 to: destPPW do: [:i | (maskShifted bitAnd: pixMask) > 0 ifTrue: ["Only tally pixels within the destination rectangle" destPixVal := destShifted bitAnd: pixMask. sourcePixVal := sourceShifted bitAnd: pixMask. destDepth < 16 ifTrue: [sourcePixVal = destPixVal ifTrue: [diff := 0] ifFalse: [diff := 1]] ifFalse: [diff := (self partitionedSub: sourcePixVal from: destPixVal nBits: bitsPerColor nPartitions: 3). diff := (diff bitAnd: rgbMask) + (diff>>bitsPerColor bitAnd: rgbMask) + ((diff>>bitsPerColor)>>bitsPerColor bitAnd: rgbMask)]. bitCount := bitCount + diff]. maskShifted := maskShifted >> destDepth. sourceShifted := sourceShifted >> destDepth. destShifted := destShifted >> destDepth]. ^ destinationWord "For no effect on dest" ! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:06'! rgbMax: sourceWord with: destinationWord destDepth < 16 ifTrue: ["Max each pixel separately" ^ self partitionedMax: sourceWord with: destinationWord nBits: destDepth nPartitions: destPPW]. destDepth = 16 ifTrue: ["Max RGB components of each pixel separately" ^ (self partitionedMax: sourceWord with: destinationWord nBits: 5 nPartitions: 3) + ((self partitionedMax: sourceWord>>16 with: destinationWord>>16 nBits: 5 nPartitions: 3) << 16)] ifFalse: ["Max RGBA components of the pixel separately" ^ self partitionedMax: sourceWord with: destinationWord nBits: 8 nPartitions: 4]! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:06'! rgbMin: sourceWord with: destinationWord destDepth < 16 ifTrue: ["Min each pixel separately" ^ self partitionedMin: sourceWord with: destinationWord nBits: destDepth nPartitions: destPPW]. destDepth = 16 ifTrue: ["Min RGB components of each pixel separately" ^ (self partitionedMin: sourceWord with: destinationWord nBits: 5 nPartitions: 3) + ((self partitionedMin: sourceWord>>16 with: destinationWord>>16 nBits: 5 nPartitions: 3) << 16)] ifFalse: ["Min RGBA components of the pixel separately" ^ self partitionedMin: sourceWord with: destinationWord nBits: 8 nPartitions: 4]! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:06'! rgbMinInvert: wordToInvert with: destinationWord | sourceWord | sourceWord := wordToInvert bitInvert32. destDepth < 16 ifTrue: ["Min each pixel separately" ^ self partitionedMin: sourceWord with: destinationWord nBits: destDepth nPartitions: destPPW]. destDepth = 16 ifTrue: ["Min RGB components of each pixel separately" ^ (self partitionedMin: sourceWord with: destinationWord nBits: 5 nPartitions: 3) + ((self partitionedMin: sourceWord>>16 with: destinationWord>>16 nBits: 5 nPartitions: 3) << 16)] ifFalse: ["Min RGBA components of the pixel separately" ^ self partitionedMin: sourceWord with: destinationWord nBits: 8 nPartitions: 4]! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:06'! rgbMul: sourceWord with: destinationWord destDepth < 16 ifTrue: ["Mul each pixel separately" ^ self partitionedMul: sourceWord with: destinationWord nBits: destDepth nPartitions: destPPW]. destDepth = 16 ifTrue: ["Mul RGB components of each pixel separately" ^ (self partitionedMul: sourceWord with: destinationWord nBits: 5 nPartitions: 3) + ((self partitionedMul: sourceWord>>16 with: destinationWord>>16 nBits: 5 nPartitions: 3) << 16)] ifFalse: ["Mul RGBA components of the pixel separately" ^ self partitionedMul: sourceWord with: destinationWord nBits: 8 nPartitions: 4] " | scanner | Display repaintMorphicDisplay. scanner := DisplayScanner quickPrintOn: Display. MessageTally time: [0 to: 760 by: 4 do: [:y |scanner drawString: 'qwrepoiuasfd=)(/&()=#!!°lkjzxv.,mn124+09857907QROIYTOAFDJZXNBNB,M-.,Mqwrepoiuasfd=)(/&()=#!!°lkjzxv.,mn124+09857907QROIYTOAFDJZXNBNB,M-.,M1234124356785678' at: 0 at y]]. "! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:06'! rgbSub: sourceWord with: destinationWord destDepth < 16 ifTrue: ["Sub each pixel separately" ^ self partitionedSub: sourceWord from: destinationWord nBits: destDepth nPartitions: destPPW]. destDepth = 16 ifTrue: ["Sub RGB components of each pixel separately" ^ (self partitionedSub: sourceWord from: destinationWord nBits: 5 nPartitions: 3) + ((self partitionedSub: sourceWord>>16 from: destinationWord>>16 nBits: 5 nPartitions: 3) << 16)] ifFalse: ["Sub RGBA components of the pixel separately" ^ self partitionedSub: sourceWord from: destinationWord nBits: 8 nPartitions: 4]! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:01'! sourceWord: sourceWord with: destinationWord ^sourceWord! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:01'! subWord: sourceWord with: destinationWord ^sourceWord - destinationWord! ! !BitBltPure methodsFor: 'combination rules' stamp: 'tfel 1/17/2014 19:06'! tallyIntoMap: sourceWord with: destinationWord "Tally pixels into the color map. Those tallied are exactly those in the destination rectangle. Note that the source should be specified == destination, in order for the proper color map checks to be performed at setup." | mapIndex pixMask destShifted maskShifted pixVal | (cmFlags bitAnd: (ColorMapPresent bitOr: ColorMapIndexedPart)) = (ColorMapPresent bitOr: ColorMapIndexedPart) ifFalse: [^ destinationWord "no op"]. pixMask := maskTable at: destDepth. destShifted := destinationWord. maskShifted := destMask. 1 to: destPPW do: [:i | (maskShifted bitAnd: pixMask) = 0 ifFalse: ["Only tally pixels within the destination rectangle" pixVal := destShifted bitAnd: pixMask. destDepth < 16 ifTrue: [mapIndex := pixVal] ifFalse: [destDepth = 16 ifTrue: [mapIndex := self rgbMap: pixVal from: 5 to: cmBitsPerColor] ifFalse: [mapIndex := self rgbMap: pixVal from: 8 to: cmBitsPerColor]]. self tallyMapAt: mapIndex put: (self tallyMapAt: mapIndex) + 1]. maskShifted := maskShifted >> destDepth. destShifted := destShifted >> destDepth]. ^ destinationWord "For no effect on dest"! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! affectedB ^ affectedB! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! affectedB: anObject affectedB := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:01'! affectedBottom ^affectedB! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! affectedL ^ affectedL! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! affectedL: anObject affectedL := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:01'! affectedLeft ^affectedL! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! affectedR ^ affectedR! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! affectedR: anObject affectedR := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:01'! affectedRight ^affectedR! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! affectedT ^ affectedT! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! affectedT: anObject affectedT := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:01'! affectedTop ^affectedT! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! bbH ^ bbH! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! bbH: anObject bbH := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! bbW ^ bbW! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! bbW: anObject bbW := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! bitBltOop ^ bitBltOop! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! bitBltOop: anObject bitBltOop := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! bitCount ^ bitCount! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! bitCount: anObject bitCount := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! clipHeight ^ clipHeight! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! clipHeight: anObject clipHeight := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! clipWidth ^ clipWidth! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! clipWidth: anObject clipWidth := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! clipX ^ clipX! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! clipX: anObject clipX := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! clipY ^ clipY! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! clipY: anObject clipY := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! cmBitsPerColor ^ cmBitsPerColor! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! cmBitsPerColor: anObject cmBitsPerColor := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! cmFlags ^ cmFlags! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! cmFlags: anObject cmFlags := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! cmLookupTable ^ cmLookupTable! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! cmLookupTable: anObject cmLookupTable := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! cmMask ^ cmMask! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! cmMask: anObject cmMask := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! cmMaskTable ^ cmMaskTable! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! cmMaskTable: anObject cmMaskTable := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! cmShiftTable ^ cmShiftTable! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! cmShiftTable: anObject cmShiftTable := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:50'! colorMap ^ colorMap! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:50'! colorMap: anObject colorMap := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! combinationRule ^ combinationRule! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! combinationRule: anObject combinationRule := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! componentAlphaModeAlpha ^ componentAlphaModeAlpha! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! componentAlphaModeAlpha: anObject componentAlphaModeAlpha := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! componentAlphaModeColor ^ componentAlphaModeColor! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! componentAlphaModeColor: anObject componentAlphaModeColor := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! destBits ^ destBits! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! destBits: anObject destBits := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! destDelta ^ destDelta! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! destDelta: anObject destDelta := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! destDepth ^ destDepth! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! destDepth: anObject destDepth := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! destForm ^ destForm! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! destForm: anObject destForm := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! destHeight ^ destHeight! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! destHeight: anObject destHeight := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! destIndex ^ destIndex! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! destIndex: anObject destIndex := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! destMSB ^ destMSB! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! destMSB: anObject destMSB := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! destMask ^ destMask! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! destMask: anObject destMask := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! destPPW ^ destPPW! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! destPPW: anObject destPPW := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! destPitch ^ destPitch! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! destPitch: anObject destPitch := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! destWidth ^ destWidth! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! destWidth: anObject destWidth := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! destX ^ destX! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! destX: anObject destX := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! destY ^ destY! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! destY: anObject destY := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! dither8Lookup ^ dither8Lookup! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! dither8Lookup: anObject dither8Lookup := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! ditherMatrix4x4 ^ ditherMatrix4x4! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! ditherMatrix4x4: anObject ditherMatrix4x4 := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! ditherThresholds16 ^ ditherThresholds16! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! ditherThresholds16: anObject ditherThresholds16 := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! ditherValues16 ^ ditherValues16! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! ditherValues16: anObject ditherValues16 := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! dstBitShift ^ dstBitShift! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! dstBitShift: anObject dstBitShift := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! dx ^ dx! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! dx: anObject dx := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! dy ^ dy! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! dy: anObject dy := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! gammaLookupTable ^ gammaLookupTable! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! gammaLookupTable: anObject gammaLookupTable := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! hDir ^ hDir! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! hDir: anObject hDir := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! halftoneBase ^ halftoneBase! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! halftoneBase: anObject halftoneBase := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! halftoneForm ^ halftoneForm! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! halftoneForm: anObject halftoneForm := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! halftoneHeight ^ halftoneHeight! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! halftoneHeight: anObject halftoneHeight := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! hasSurfaceLock ^ hasSurfaceLock! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! hasSurfaceLock: anObject hasSurfaceLock := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! height ^ height! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! height: anObject height := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! isWarping ^ isWarping! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! isWarping: anObject isWarping := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! lockSurfaceFn ^ lockSurfaceFn! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! lockSurfaceFn: anObject lockSurfaceFn := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! mask1 ^ mask1! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! mask1: anObject mask1 := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! mask2 ^ mask2! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! mask2: anObject mask2 := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! maskTable ^ maskTable! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! maskTable: anObject maskTable := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! nWords ^ nWords! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! nWords: anObject nWords := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! noHalftone ^ noHalftone! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! noHalftone: anObject noHalftone := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! noSource ^ noSource! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! noSource: anObject noSource := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! opTable ^ opTable! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! opTable: anObject opTable := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! preload ^ preload! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! preload: anObject preload := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! querySurfaceFn ^ querySurfaceFn! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! querySurfaceFn: anObject querySurfaceFn := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! skew ^ skew! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! skew: anObject skew := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! sourceAlpha ^ sourceAlpha! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! sourceAlpha: anObject sourceAlpha := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! sourceBits ^ sourceBits! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! sourceBits: anObject sourceBits := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! sourceDelta ^ sourceDelta! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! sourceDelta: anObject sourceDelta := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! sourceDepth ^ sourceDepth! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! sourceDepth: anObject sourceDepth := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! sourceForm ^ sourceForm! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! sourceForm: anObject sourceForm := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! sourceHeight ^ sourceHeight! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! sourceHeight: anObject sourceHeight := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! sourceIndex ^ sourceIndex! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! sourceIndex: anObject sourceIndex := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! sourceMSB ^ sourceMSB! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! sourceMSB: anObject sourceMSB := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! sourcePPW ^ sourcePPW! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! sourcePPW: anObject sourcePPW := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! sourcePitch ^ sourcePitch! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! sourcePitch: anObject sourcePitch := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! sourceWidth ^ sourceWidth! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! sourceWidth: anObject sourceWidth := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! sourceX ^ sourceX! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! sourceX: anObject sourceX := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! sourceY ^ sourceY! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! sourceY: anObject sourceY := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! srcBitShift ^ srcBitShift! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! srcBitShift: anObject srcBitShift := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! sx ^ sx! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! sx: anObject sx := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! sy ^ sy! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! sy: anObject sy := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! ungammaLookupTable ^ ungammaLookupTable! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! ungammaLookupTable: anObject ungammaLookupTable := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! unlockSurfaceFn ^ unlockSurfaceFn! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! unlockSurfaceFn: anObject unlockSurfaceFn := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! vDir ^ vDir! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! vDir: anObject vDir := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! warpAlignMask ^ warpAlignMask! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! warpAlignMask: anObject warpAlignMask := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! warpAlignShift ^ warpAlignShift! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! warpAlignShift: anObject warpAlignShift := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! warpBitShiftTable ^ warpBitShiftTable! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! warpBitShiftTable: anObject warpBitShiftTable := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! warpSrcMask ^ warpSrcMask! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! warpSrcMask: anObject warpSrcMask := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! warpSrcShift ^ warpSrcShift! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! warpSrcShift: anObject warpSrcShift := anObject! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! width ^ width! ! !BitBltPure methodsFor: 'accessing' stamp: 'tfel 1/17/2014 19:47'! width: anObject width := anObject! ! !BitBltPure methodsFor: 'inner loop' stamp: 'tfel 1/18/2014 15:05'! alphaSourceBlendBits16 "This version assumes combinationRule = 34 sourcePixSize = 32 destPixSize = 16 sourceForm ~= destForm. " | srcIndex dstIndex sourceWord srcAlpha destWord deltaX deltaY srcY dstY dstMask srcShift ditherBase ditherIndex ditherThreshold | deltaY := bbH + 1. "So we can pre-decrement" srcY := sy. dstY := dy. srcShift := (dx bitAnd: 1) * 16. destMSB ifTrue:[srcShift := 16 - srcShift]. mask1 := 16rFFFF << (16 - srcShift). "This is the outer loop" [(deltaY := deltaY - 1) ~= 0] whileTrue:[ srcIndex := sourceBits + (srcY * sourcePitch) + (sx * 4). dstIndex := destBits + (dstY * destPitch) + (dx // 2 * 4). ditherBase := (dstY bitAnd: 3) * 4. ditherIndex := (sx bitAnd: 3) - 1. "For pre-increment" deltaX := bbW + 1. "So we can pre-decrement" dstMask := mask1. dstMask = 16rFFFF ifTrue:[srcShift := 16] ifFalse:[srcShift := 0]. "This is the inner loop" [(deltaX := deltaX - 1) ~= 0] whileTrue:[ ditherThreshold := ditherMatrix4x4 at: ditherBase + (ditherIndex := ditherIndex + 1 bitAnd: 3) + 1. sourceWord := self srcLongAt: srcIndex. srcAlpha := sourceWord >> 24. From noreply at buildbot.pypy.org Sun Jan 19 12:17:37 2014 From: noreply at buildbot.pypy.org (timfel) Date: Sun, 19 Jan 2014 12:17:37 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: add image that uses no BitBlt, MiscPrim, or LargInteger primitives Message-ID: <20140119111737.86DA21D24B4@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r591:b9e530ae3f46 Date: 2014-01-19 12:07 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/b9e530ae3f46/ Log: add image that uses no BitBlt, MiscPrim, or LargInteger primitives diff too long, truncating to 2000 out of 382194 lines diff --git a/images/Squeak4.5-noBitBlt.changes b/images/Squeak4.5-noBitBlt.changes new file mode 100644 --- /dev/null +++ b/images/Squeak4.5-noBitBlt.changes @@ -0,0 +1,39 @@ +'From Squeak4.1 of 17 April 2010 [latest update: #9957] on 17 April 2010 at 5:22:05 pm'! ----STARTUP----{17 April 2010 . 5:21:54 pm} as C:\Squeak\4.0\4.1-final\Squeak4.1.image! Smalltalk appendChangesTo: 'SqueakV41.sources'.! ----QUIT----{17 April 2010 . 5:22:11 pm} Squeak4.1.image priorSource: 89! ----STARTUP----{24 May 2010 . 8:07:26 pm} as C:\Squeak\4.2\Squeak4.1.image! ----SNAPSHOT----{24 May 2010 . 8:08:14 pm} Squeak4.2.image priorSource: 229! !HashedCollection commentStamp: 'ul 4/12/2010 22:37' prior: 0! I am an abstract collection of objects that implement hash and equality in a consitent way. This means that whenever two objects are equal, their hashes have to be equal too. If two objects are equal then I can only store one of them. Hashes are expected to be integers (preferably SmallIntegers). I also expect that the objects contained by me do not change their hashes. If that happens, hash invariants have to be re-established, which can be done by #rehash. Since I'm abstract, no instances of me should exist. My subclasses should implement #scanFor:, #fixCollisionsFrom: and #noCheckNoGrowFillFrom:. Instance Variables array: (typically Array or WeakArray) tally: (non-negative) array - An array whose size is a prime number, it's non-nil elements are the elements of the collection, and whose nil elements are empty slots. There is always at least one nil. In fact I try to keep my "load" at 75% or less so that hashing will work well. tally - The number of elements in the collection. The array size is always greater than this. Implementation details: I implement a hash table which uses open addressing with linear probing as the method of collision resolution. Searching for an element or a free slot for an element is done by #scanFor: which should return the index of the slot in array corresponding to it's argument. When an element is removed #fixCollisionsFrom: should rehash all elements in array between the original index of the removed element, wrapping around after the last slot until reaching an empty slot. My maximum load factor (75%) is hardcoded in #atNewIndex:put:, so it can only be changed by overriding that method. When my load factor reaches this limit I replace my array with a larger one (see #grow) ensuring that my load factor will be less than or equal to 50%. The new array is filled by #noCheckNoGrowFillFrom: which should use #scanForEmptySlotFor: instead of #scanFor: for better performance. I do not shrink. ! !WeakKeyDictionary methodsFor: 'private' stamp: 'ul 4/12/2010 22:59'! compact "Reduce the size of array so that the load factor will be ~75%." | newCapacity | newCapacity := self class goodPrimeAtLeast: self slowSize * 4 // 3. self growTo: newCapacity! ! !Collection methodsFor: 'adding' stamp: 'ul 4/12/2010 22:33' prior: 18816249! add: newObject withOccurrences: anInteger "Add newObject anInteger times to the receiver. Do nothing if anInteger is less than one. Answer newObject." anInteger timesRepeat: [self add: newObject]. ^ newObject! ! !HashedCollection class methodsFor: 'initialize-release' stamp: 'ul 4/12/2010 23:49'! compactAll "HashedCollection compactAll" self allSubclassesDo: #compactAllInstances! ! !HashedCollection class methodsFor: 'initialize-release' stamp: 'ul 4/12/2010 23:49'! compactAllInstances "Do not use #allInstancesDo: because compact may create new instances." self allInstances do: #compact! ! !HashedCollection class methodsFor: 'sizing' stamp: 'ul 4/7/2010 00:17' prior: 55063414! goodPrimes "Answer a sorted array of prime numbers less than one billion that make good hash table sizes. Should be expanded as needed. See comments below code" ^#( 5 11 17 23 31 43 59 79 107 149 199 269 359 479 641 857 1151 1549 2069 2237 2423 2617 2797 2999 3167 3359 3539 3727 3911 4441 4787 5119 5471 5801 6143 6521 6827 7177 7517 7853 8783 9601 10243 10867 11549 12239 12919 13679 14293 15013 15731 17569 19051 20443 21767 23159 24611 25847 27397 28571 30047 31397 35771 38201 40841 43973 46633 48989 51631 54371 57349 60139 62969 70589 76091 80347 85843 90697 95791 101051 106261 111143 115777 120691 126311 140863 150523 160969 170557 181243 190717 201653 211891 221251 232591 242873 251443 282089 300869 321949 341227 362353 383681 401411 422927 443231 464951 482033 504011 562621 605779 647659 681607 723623 763307 808261 844709 886163 926623 967229 1014617 1121987 1201469 1268789 1345651 1429531 1492177 1577839 1651547 1722601 1800377 1878623 1942141 2028401 2242727 2399581 2559173 2686813 2836357 3005579 3144971 3283993 3460133 3582923 3757093 3903769 4061261 4455361 4783837 5068529 5418079 5680243 6000023 6292981 6611497 6884641 7211599 7514189 7798313 8077189 9031853 9612721 10226107 10745291 11338417 11939203 12567671 13212697 13816333 14337529 14938571 15595673 16147291 17851577 18993941 20180239 21228533 22375079 23450491 24635579 25683871 26850101 27921689 29090911 30153841 31292507 32467307 35817611 37983761 40234253 42457253 44750177 46957969 49175831 51442639 53726417 55954637 58126987 60365939 62666977 64826669 71582779 76039231 80534381 84995153 89500331 93956777 98470819 102879613 107400389 111856841 116365721 120819287 125246581 129732203 143163379 152076289 161031319 169981667 179000669 187913573 196826447 205826729 214748357 223713691 232679021 241591901 250504801 259470131 285162679 301939921 318717121 335494331 352271573 369148753 385926017 402603193 419480419 436157621 453034849 469712051 486589307 503366497 520043707 570475349 603929813 637584271 671138659 704693081 738247541 771801929 805356457 838910803 872365267 905919671 939574117 973128521 1006682977 1040137411 1073741833) "The above primes past 2069 were chosen carefully so that they do not interact badly with 1664525 (used by hashMultiply), and so that gcd(p, (256^k) +/- a) = 1, for 0 cost ifTrue: [ cost := newCost ] ]. cost ]."! ! !HashedCollection methodsFor: 'adding' stamp: 'ul 4/12/2010 22:38' prior: 53647096! add: newObject withOccurrences: anInteger "Add newObject anInteger times to the receiver. Do nothing if anInteger is less than one. Answer newObject." anInteger < 1 ifTrue: [ ^newObject ]. ^self add: newObject "I can only store an object once." ! ! !HashedCollection methodsFor: 'private' stamp: 'ul 4/12/2010 22:53'! compact "Reduce the size of array so that the load factor will be ~75%." | newCapacity | newCapacity := self class goodPrimeAtLeast: tally * 4 // 3. self growTo: newCapacity! ! !WeakSet methodsFor: 'private' stamp: 'ul 4/12/2010 22:59'! compact "Reduce the size of array so that the load factor will be ~75%." | newCapacity | newCapacity := self class goodPrimeAtLeast: self slowSize * 4 // 3. self growTo: newCapacity! ! !Symbol class methodsFor: 'class initialization' stamp: 'ul 4/13/2010 00:00' prior: 30357901! compactSymbolTable "Reduce the size of the symbol table so that it holds all existing symbols with 25% free space." | oldSize | Smalltalk garbageCollect. oldSize := SymbolTable capacity. SymbolTable compact. ^(oldSize - SymbolTable capacity) printString, ' slot(s) reclaimed'! ! KeyedIdentitySet class removeSelector: #goodPrimes! WeakIdentityKeyDictionary class removeSelector: #goodPrimes! IdentitySet class removeSelector: #goodPrimes! IdentityDictionary class removeSelector: #goodPrimes! "Collections"! !HashedCollectionTest methodsFor: 'test - class - sizing' stamp: 'ul 4/7/2010 00:18' prior: 58761579! testPrimes: primes | badPrimes | badPrimes := #(3 5 71 139 479 5861 277421). "These primes are less than the hashMultiply constant (1664525) and 1664525 \\ prime is close to 0 (mod prime). The following snippet reproduces these numbers: | hashMultiplyConstant | hashMultiplyConstant := 1 hashMultiply. (Integer primesUpTo: hashMultiplyConstant) select: [ :each | | remainder | remainder := hashMultiplyConstant \\ each. remainder <= 1 or: [ remainder + 1 = each ] ]." self assert: primes isSorted. primes do: [ :each | self assert: each isPrime. self deny: (each > 2069 and: [ badPrimes includes: each ]) ]. self assert: ( primes select: [ :p | | result | result := false. p > 2069 ifTrue: [ 1 to: 8 do: [ :k | 1 to: 32 do: [ :a | (p gcd: (256 raisedTo: k) + a) = 1 ifFalse: [ result := true ]. (p gcd: (256 raisedTo: k) - a) = 1 ifFalse: [ result := true ] ] ] ]. result ]) isEmpty.! ! HashedCollectionTest removeSelector: #testGoodPrimesForIdentityBasedHashedCollections! "CollectionsTests"! !MCMczReader methodsFor: 'as yet unclassified' stamp: 'bf 4/18/2010 18:38' prior: 22938947! extractInfoFrom: dict ^MCWorkingCopy infoFromDictionary: dict cache: self infoCache! ! !MCWorkingCopy class methodsFor: 'as yet unclassified' stamp: 'bf 4/19/2010 00:39' prior: 23215403! infoFromDictionary: aDictionary cache: cache | id | id := (aDictionary at: #id) asString. ^ cache at: id ifAbsentPut: [MCVersionInfo name: (aDictionary at: #name ifAbsent: ['']) id: (UUID fromString: id) message: (aDictionary at: #message ifAbsent: ['']) date: ([Date fromString: (aDictionary at: #date)] ifError: [nil]) time: ([Time fromString: (aDictionary at: #time)] ifError: [nil]) author: (aDictionary at: #author ifAbsent: ['']) ancestors: (self ancestorsFromArray: (aDictionary at: #ancestors ifAbsent: []) cache: cache) stepChildren: (self ancestorsFromArray: (aDictionary at: #stepChildren ifAbsent: []) cache: cache)]! ! !MCVersionInfo methodsFor: 'converting' stamp: 'bf 4/18/2010 23:25' prior: 23175569! asDictionary ^ Dictionary new at: #name put: name; at: #id put: id asString; at: #message put: message; at: #date put: date; at: #time put: time; at: #author put: author; at: #ancestors put: (self ancestors collect: [:a | a asDictionary]); yourself! ! "Monticello"! !BlockContextTest methodsFor: 'running' stamp: 'md 9/6/2005 19:56' prior: 50431957! setUp super setUp. aBlockContext := [100 at 100 corner: 200 at 200]. contextOfaBlockContext := thisContext.! ! !BehaviorTest methodsFor: 'tests' stamp: 'md 2/18/2006 16:42' prior: 17365994! testBinding self assert: Object binding value = Object. self assert: Object binding key = #Object. self assert: Object class binding value = Object class. "returns nil for Metaclasses... like Encoder>>#associationFor:" self assert: Object class binding key = nil.! ! !CompledMethodTrailerTest methodsFor: 'testing' stamp: 'Igor.Stasenko 12/13/2009 21:13' prior: 53956757! testEmbeddingSourceCode | trailer newTrailer code | trailer := CompiledMethodTrailer new. code := 'foo'. trailer sourceCode: code. newTrailer := trailer testEncoding. self assert: (trailer kind == #EmbeddedSourceQCompress ). self assert: (newTrailer sourceCode = code). "the last bytecode index must be at 0" self assert: (newTrailer endPC = 0). code := 'testEmbeddingSourceCode | trailer newTrailer code | trailer := CompiledMethodTrailer new. trailer sourceCode: code. newTrailer := trailer testEncoding. self assert: (newTrailer sourceCode = code).'. trailer sourceCode: code. self assert: (trailer kind == #EmbeddedSourceZip ). newTrailer := trailer testEncoding. self assert: (newTrailer sourceCode = code). "the last bytecode index must be at 0" self assert: (newTrailer endPC = 0). ! ! !CompledMethodTrailerTest methodsFor: 'testing' stamp: 'Igor.Stasenko 12/13/2009 21:13' prior: 53957691! testEmbeddingTempNames | trailer newTrailer code | trailer := CompiledMethodTrailer new. code := 'foo'. trailer tempNames: code. newTrailer := trailer testEncoding. self assert: (trailer kind == #TempsNamesQCompress ). self assert: (newTrailer tempNames = code). "the last bytecode index must be at 0" self assert: (newTrailer endPC = 0). code := 'testEmbeddingSourceCode | trailer newTrailer code | trailer := CompiledMethodTrailer new. trailer sourceCode: code. newTrailer := trailer testEncoding. self assert: (newTrailer sourceCode = code).'. trailer tempNames: code. self assert: (trailer kind == #TempsNamesZip ). newTrailer := trailer testEncoding. self assert: (newTrailer tempNames = code). "the last bytecode index must be at 0" self assert: (newTrailer endPC = 0). ! ! !CompledMethodTrailerTest methodsFor: 'testing' stamp: 'Igor.Stasenko 12/13/2009 21:17' prior: 53958613! testEncodingNoTrailer | trailer | trailer := CompiledMethodTrailer new. "by default it should be a no-trailer" self assert: (trailer kind == #NoTrailer ). self assert: (trailer size = 1). trailer := trailer testEncoding. self assert: (trailer kind == #NoTrailer ). self assert: (trailer size = 1). "the last bytecode index must be at 0" self assert: (trailer endPC = 0). ! ! !CompledMethodTrailerTest methodsFor: 'testing' stamp: 'Igor.Stasenko 12/13/2009 21:14' prior: 53959109! testEncodingSourcePointer | trailer | trailer := CompiledMethodTrailer new. CompiledMethod allInstancesDo: [:method | | ptr | trailer method: method. self assert: ( (ptr := method sourcePointer) == trailer sourcePointer). "the last bytecode index must be at 0" ptr ~= 0 ifTrue: [ self assert: (method endPC = trailer endPC) ]. ].! ! !CompledMethodTrailerTest methodsFor: 'testing' stamp: 'Igor.Stasenko 12/13/2009 21:15' prior: 53959564! testEncodingVarLengthSourcePointer | trailer newTrailer | trailer := CompiledMethodTrailer new. trailer sourcePointer: 1. newTrailer := trailer testEncoding. self assert: (newTrailer sourcePointer = 1). trailer sourcePointer: 16r100000000000000. newTrailer := trailer testEncoding. self assert: (newTrailer sourcePointer = 16r100000000000000). "the last bytecode index must be at 0" self assert: (newTrailer endPC = 0). ! ! !CompledMethodTrailerTest methodsFor: 'testing' stamp: 'Igor.Stasenko 12/13/2009 21:15' prior: 53960108! testSourceByIdentifierEncoding | trailer id | trailer := CompiledMethodTrailer new. id := UUID new asString. trailer sourceIdentifier: id. self assert: (trailer kind == #SourceByStringIdentifier ). trailer := trailer testEncoding. self assert: (trailer kind == #SourceByStringIdentifier ). self assert: (trailer sourceIdentifier = id). "the last bytecode index must be at 0" self assert: (trailer endPC = 0). ! ! !CompledMethodTrailerTest methodsFor: 'testing' stamp: 'Igor.Stasenko 12/13/2009 21:49' prior: 53960643! testSourceBySelectorEncoding | trailer | trailer := CompiledMethodTrailer new. trailer setSourceBySelector. self assert: (trailer kind == #SourceBySelector ). self assert: (trailer size = 1). trailer := trailer testEncoding. self assert: (trailer kind == #SourceBySelector ). self assert: (trailer size = 1). "the last bytecode index must be at 0" self assert: (trailer endPC = 0). ! ! !CategorizerTest methodsFor: 'running' stamp: 'mtf 9/10/2007 10:10' prior: 18074036! setUp categorizer := Categorizer defaultList: #(a b c d e). categorizer classifyAll: #(a b c) under: 'abc'. categorizer addCategory: 'unreal'.! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 10:17' prior: 18074267! testClassifyNewElementNewCategory categorizer classify: #f under: #nice. self assert: categorizer printString = '(''as yet unclassified'' d e) (''abc'' a b c) (''unreal'') (''nice'' f) '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 10:18' prior: 18074541! testClassifyNewElementOldCategory categorizer classify: #f under: #unreal. self assert: categorizer printString = '(''as yet unclassified'' d e) (''abc'' a b c) (''unreal'' f) '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 10:17' prior: 18074806! testClassifyOldElementNewCategory categorizer classify: #e under: #nice. self assert: categorizer printString = '(''as yet unclassified'' d) (''abc'' a b c) (''unreal'') (''nice'' e) '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 12:54' prior: 18075078! testClassifyOldElementOldCategory categorizer classify: #e under: #unreal. self assert: categorizer printString = '(''as yet unclassified'' d) (''abc'' a b c) (''unreal'' e) '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 10:22' prior: 18075341! testDefaultCategoryIsTransient "Test that category 'as yet unclassified' disapears when all it's elements are removed'" categorizer classifyAll: #(d e) under: #abc. self assert: categorizer printString = '(''abc'' a b c d e) (''unreal'') '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/11/2007 15:15' prior: 18075669! testNullCategory "Test that category 'as yet unclassified' disapears when all it's elements are removed'" | aCategorizer | aCategorizer := Categorizer defaultList: #(). self assert: aCategorizer printString = '(''as yet unclassified'') '. self assert: aCategorizer categories = #('no messages'). aCategorizer classify: #a under: #b. self assert: aCategorizer printString = '(''b'' a) '. self assert: aCategorizer categories = #(b).! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 12:57' prior: 18076194! testRemoveEmptyCategory categorizer removeCategory: #unreal. self assert: categorizer printString = '(''as yet unclassified'' d e) (''abc'' a b c) '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 12:55' prior: 18076430! testRemoveExistingElement categorizer removeElement: #a. self assert: categorizer printString = '(''as yet unclassified'' d e) (''abc'' b c) (''unreal'') '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 12:59' prior: 18076673! testRemoveNonEmptyCategory self should: [categorizer removeCategory: #abc] raise: Error. self assert: categorizer printString = '(''as yet unclassified'' d e) (''abc'' a b c) (''unreal'') '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 12:59' prior: 18076950! testRemoveNonExistingCategory categorizer removeCategory: #nice. self assert: categorizer printString = '(''as yet unclassified'' d e) (''abc'' a b c) (''unreal'') '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 12:57' prior: 18077203! testRemoveNonExistingElement categorizer removeElement: #f. self assert: categorizer printString = '(''as yet unclassified'' d e) (''abc'' a b c) (''unreal'') '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/11/2007 14:49' prior: 18077451! testRemoveThenRename categorizer removeCategory: #unreal. categorizer renameCategory: #abc toBe: #unreal. self assert: categorizer printString = '(''as yet unclassified'' d e) (''unreal'' a b c) '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 10:14' prior: 18077736! testUnchanged self assert: categorizer printString = '(''as yet unclassified'' d e) (''abc'' a b c) (''unreal'') '! ! "KernelTests"! !SmalltalkImage methodsFor: 'accessing' stamp: 'ul 4/18/2010 22:22'! at: key ifPresentAndInMemory: aBlock "Lookup the given key in the receiver. If it is present, answer the value of evaluating the given block with the value associated with the key. Otherwise, answer nil." ^globals at: key ifPresentAndInMemory: aBlock! ! !SmalltalkImage methodsFor: 'image' stamp: 'dtl 4/11/2010 11:45'! image "Answer the object to query about the current object memory and execution environment." ^self! ! !SmalltalkImage methodsFor: 'image' stamp: 'dtl 4/11/2010 11:47'! imageFormatVersion "Answer an integer identifying the type of image. The image version number may identify the format of the image (e.g. 32 or 64-bit word size) or specific requirements of the image (e.g. block closure support required). This invokes an optional primitive that may not be available on all virtual machines." "Smalltalk image imageFormatVersion" self notify: 'This virtual machine does not support the optional primitive #primitiveImageFormatVersion' translated. ^''! ! !SmalltalkImage methodsFor: 'vm' stamp: 'dtl 4/11/2010 11:38'! interpreterSourceVersion "Answer a string corresponding to the version of the interpreter source. This represents the version level of the Smalltalk source code (interpreter and various plugins) that is translated to C by a CCodeGenerator, as distinct from the external platform source code, typically written in C and managed separately for each platform. An optional primitive is invoked that may not be available on all virtual machines." "Smalltalk vm interpreterSourceVersion" self notify: 'This virtual machine does not support the optional primitive #primitiveInterpreterSourceVersion' translated. ^''! ! !SmalltalkImage methodsFor: 'vm' stamp: 'dtl 4/11/2010 11:39'! platformSourceVersion "Answer a string corresponding to the version of the external platform source code, typically written in C and managed separately for each platform. This invokes an optional primitive that may not be available on all virtual machines." "Smalltalk vm platformSourceVersion" self notify: 'This virtual machine does not support the optional primitive #primitivePlatformSourceVersion' translated. ^''! ! !SmalltalkImage methodsFor: 'image' stamp: 'md 5/16/2006 12:34' prior: 58536670! version "Answer the version of this release." ^SystemVersion current version! ! !SmalltalkImage methodsFor: 'vm' stamp: 'dtl 4/11/2010 11:39'! versionLabel "Answer a string corresponding to the version of virtual machine. This represents the version level of the Smalltalk source code (interpreter and various plugins) that is translated to C by a CCodeGenerator, in addition to the external platform source code, typically written in C and managed separately for each platform. This invokes an optional primitive that may not be available on all virtual machines. See also vmVersion, which answers a string identifying the image from which virtual machine sources were generated." "Smalltalk vm versionLabel" self notify: 'This virtual machine does not support the optional primitive #primitiveVMVersion' translated. ^''! ! !SmalltalkImage methodsFor: 'vm' stamp: 'dtl 4/11/2010 11:15'! vm "Answer the object to query about virtual machine." ^self! ! !SmalltalkImage methodsFor: 'image' stamp: 'dtl 1/4/2010 21:40' prior: 58537225! wordSize "Answer the size in bytes of an object pointer or word in the object memory. The value does not change for a given image, but may be modified by a SystemTracer when converting the image to another format. The value is cached in WordSize to avoid the performance overhead of repeatedly consulting the VM." "Smalltalk wordSize" ^ WordSize ifNil: [WordSize := [SmalltalkImage current vmParameterAt: 40] on: Error do: [4]]! ! "System"! !SMLoaderPlus commentStamp: 'btr 12/1/2006 15:16' prior: 0! A simple package loader that is currently the standard UI for SqueakMap (the model is an SMSqueakMap instance). It uses ToolBuilder to construct its window. You can open one with: SMLoaderPlus open Instance Variables categoriesToFilterIds: The set of categories to filter the packages list. filters: The set of filters to apply to the packages list. map: The model SqueakMap. packagesList: The list of packages from the map. selectedCategory: The current category. selectedItem: The selected package or release. window: The window, held only so we can reOpen.! !SMLoaderCategoricalPlus commentStamp: 'btr 12/4/2006 15:47' prior: 0! A variant package loader that uses a more-or-less standard Smalltalk-80 browser perspective of selecting categories in one pane and then selecting items within in the next pane. You can open one with: SMLoaderCategoricalPlus open! !SMLoader commentStamp: 'btr 11/30/2006 18:00' prior: 27913009! A simple package loader that is currently the standard UI for SqueakMap (the model is an SMSqueakMap instance). You can open one with: SMLoader open! !SMLoaderCategorical commentStamp: 'btr 12/1/2006 15:16' prior: 0! A variant package loader that uses a more-or-less standard Smalltalk-80 browser perspective of selecting categories in one pane and then selecting items within in the next pane. You can open one with: SMLoaderCategorical open! !SMLoaderCategoricalPlus class methodsFor: 'menu registration' stamp: 'btr 12/1/2006 18:06'! initialize Smalltalk at: #ToolBuilder ifPresent: [:tb | (TheWorldMenu respondsTo: #registerOpenCommand:) ifTrue: [TheWorldMenu registerOpenCommand: {self openMenuString. {self. #open}}]]! ! !SMLoaderCategoricalPlus class methodsFor: 'menu registration' stamp: 'btr 12/1/2006 17:34'! openMenuString ^ 'SqueakMap Categories'! ! !SMLoaderCategoricalPlus class methodsFor: 'menu registration' stamp: 'btr 12/1/2006 17:34'! removeFromSystem (TheWorldMenu respondsTo: #registerOpenCommand:) ifTrue: [TheWorldMenu unregisterOpenCommand: self openMenuString]. self removeFromSystem: true! ! !SMLoaderCategoricalPlus class methodsFor: 'menu registration' stamp: 'btr 12/1/2006 17:34'! unload (TheWorldMenu respondsTo: #registerOpenCommand:) ifTrue: [TheWorldMenu unregisterOpenCommand: self openMenuString].! ! !SMLoaderCategoricalPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 06:50'! buildFancyWith: aBuilder "Creates a variant of the window where the package pane is split between installed and uninstalled packages." | buttonBarHeight searchHeight vertDivide horizDivide | buttonBarHeight := 0.07. searchHeight := 0.07. vertDivide := 0.5. horizDivide := 0.6. builder := aBuilder. window := builder build: (builder pluggableWindowSpec new model: self; label: #label; children: (OrderedCollection new add: ((self buildButtonBarWith: builder) frame: (0 @ 0 corner: 1 @ buttonBarHeight); yourself); add: ((self buildCategoriesListWith: builder) frame: (0 @ buttonBarHeight corner: vertDivide @ horizDivide); yourself); add: ((self buildSearchPaneWith: builder) frame: (vertDivide @ buttonBarHeight corner: 1 @ (buttonBarHeight + searchHeight)); yourself); add: ((self buildNotInstalledPackagesListWith: builder) frame: (vertDivide @ (buttonBarHeight + searchHeight) corner: 1 @ (horizDivide / 2)); yourself); add: ((self buildInstalledPackagesListWith: builder) frame: (vertDivide @ (horizDivide / 2) corner: 1 @ horizDivide); yourself); add: ((self buildPackagePaneWith: builder) frame: (0 @ horizDivide corner: 1 @ 1); yourself); yourself)). window on: #mouseEnter send: #paneTransition: to: window. window on: #mouseLeave send: #paneTransition: to: window. self setUpdatablePanesFrom: #(#installedPackageList #notInstalledPackageList ). currentPackageList := #notInstalled. window extent: self initialExtent. ^ window! ! !SMLoaderCategoricalPlus methodsFor: 'interface' stamp: 'btr 12/1/2006 17:56'! buildInstalledPackagesListWith: aBuilder ^ aBuilder pluggableTreeSpec new model: self; roots: #installedPackageList; getSelectedPath: #selectedItemPath; setSelected: #selectedItem:; menu: #packagesMenu:; label: #itemLabel:; getChildren: #itemChildren:; hasChildren: #itemHasChildren:; autoDeselect: true; wantsDrop: true; yourself! ! !SMLoaderCategoricalPlus methodsFor: 'interface' stamp: 'btr 12/1/2006 17:52'! buildNotInstalledPackagesListWith: aBuilder ^ aBuilder pluggableTreeSpec new model: self; roots: #notInstalledPackageList; getSelectedPath: #selectedItemPath; setSelected: #selectedItem:; menu: #packagesMenu:; label: #itemLabel:; getChildren: #itemChildren:; hasChildren: #itemHasChildren:; autoDeselect: true; wantsDrop: true; yourself! ! !SMLoaderCategoricalPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 06:55'! buildWith: aBuilder | buttonBarHeight searchHeight vertDivide horizDivide | buttonBarHeight := 0.07. searchHeight := 0.07. vertDivide := 0.5. horizDivide := 0.6. builder := aBuilder. window := builder build: (builder pluggableWindowSpec new model: self; label: #label; children: (OrderedCollection new add: ((self buildButtonBarWith: builder) frame: (0 @ 0 corner: 1 @ buttonBarHeight); yourself); add: ((self buildCategoriesListWith: builder) frame: (0 @ buttonBarHeight corner: vertDivide @ horizDivide); yourself); add: ((self buildSearchPaneWith: builder) frame: (vertDivide @ buttonBarHeight corner: 1 @ (buttonBarHeight + searchHeight))); add: ((self buildPackagesListWith: builder) frame: (vertDivide @ (buttonBarHeight + searchHeight) corner: 1 @ horizDivide)); add: ((self buildPackagePaneWith: builder) frame: (0 @ horizDivide corner: 1 @ 1)); yourself)). window on: #mouseEnter send: #paneTransition: to: window. window on: #mouseLeave send: #paneTransition: to: window. window extent: self initialExtent. ^ window! ! !SMLoaderCategoricalPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 17:34'! currentPackageList ^currentPackageList! ! !SMLoaderCategoricalPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 17:34'! currentPackageList: aSymbol currentPackageList := aSymbol. self changed: #installButtonLabel.! ! !SMLoaderCategoricalPlus methodsFor: 'interface' stamp: 'btr 12/4/2006 15:55'! defaultLabel ^ 'Categorical ' , super defaultLabel! ! !SMLoaderCategoricalPlus methodsFor: 'interface' stamp: 'btr 12/4/2006 15:58'! installButtonLabel ^ self currentPackageList = #notInstalled ifTrue: ['Install the above package'] ifFalse: ['Remove the above package']! ! !SMLoaderCategoricalPlus methodsFor: 'lists' stamp: 'btr 12/1/2006 17:52'! installedPackageList ^self packageList select: [:e | e isInstalled]! ! !SMLoaderCategoricalPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 18:02'! installedPackagesListIndex ^ self currentPackageList = #installed ifTrue: [self packagesListIndex] ifFalse: [0]! ! !SMLoaderCategoricalPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 17:34'! installedPackagesListIndex: anObject packagesListIndex := anObject. self currentPackageList ~= #installed ifTrue: [self currentPackageList: #installed. self changed: #currentPackageList]. self noteChanged! ! !SMLoaderCategoricalPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 17:34'! isOn ^false! ! !SMLoaderCategoricalPlus methodsFor: 'lists' stamp: 'btr 12/1/2006 17:53'! notInstalledPackageList ^self packageList reject: [:e | e isInstalled]! ! !SMLoaderCategoricalPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 18:02'! notInstalledPackagesListIndex ^ self currentPackageList = #notInstalled ifTrue: [self packagesListIndex] ifFalse: [0]! ! !SMLoaderCategoricalPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 18:03'! notInstalledPackagesListIndex: anObject packagesListIndex := anObject. self currentPackageList ~= #notInstalled ifTrue: [self currentPackageList: #notInstalled. self changed: #currentPackageList]. self changed: #packagesListIndex. "update my selection" self noteChanged. self contentsChanged! ! !SMLoaderCategoricalPlus methodsFor: 'private' stamp: 'btr 12/1/2006 17:53'! noteChanged self changed: #installedPackageList. self changed: #notInstalledPackageList. super noteChanged." self changed: #packageNameList. self changed: #packagesListIndex. self changed: #categoriesForPackage. self contentsChanged."! ! !SMLoaderCategoricalPlus methodsFor: 'lists' stamp: 'btr 12/1/2006 17:34'! packageList ^ self packages select: [:e | (e categories anySatisfy: [:cat | cat = self selectedCategory]) and: [(filters ifNil: [#()]) allSatisfy: [:currFilter | (self perform: currFilter) value: e]]]! ! !SMLoaderPlus class methodsFor: 'parts bin' stamp: 'btr 11/22/2006 15:02'! descriptionForPartsBin ^self partName: 'Package Loader' categories: #(Tools) documentation: 'SqueakMap UI' ! ! !SMLoaderPlus class methodsFor: 'class initialization' stamp: 'btr 12/1/2006 15:47'! initialize "Hook us up in the world menu." "self initialize" Smalltalk at: #ToolBuilder ifPresent: [:tb | self registerInFlapsRegistry. (Preferences windowColorFor: #SMLoader) = Color white "not set" ifTrue: [ Preferences setWindowColorFor: #SMLoader to: (Color colorFrom: self windowColorSpecification brightColor) ]. (TheWorldMenu respondsTo: #registerOpenCommand:) ifTrue: [| oldCmds | oldCmds := TheWorldMenu registry select: [:cmd | cmd first includesSubString: 'Package Loader']. oldCmds do: [:cmd | TheWorldMenu unregisterOpenCommand: cmd first]. TheWorldMenu registerOpenCommand: {self openMenuString. {self. #open}}]]. DefaultFilters := OrderedCollection new. DefaultCategoriesToFilterIds := OrderedCollection new! ! !SMLoaderPlus class methodsFor: 'new-morph participation' stamp: 'btr 11/22/2006 15:16'! initializedInstance ^ (ToolBuilder open: self new) extent: 400 at 400! ! !SMLoaderPlus class methodsFor: 'instance creation' stamp: 'btr 11/22/2006 15:02'! new "Create a SqueakMap loader on the default map." ^self newOn: SMSqueakMap default! ! !SMLoaderPlus class methodsFor: 'instance creation' stamp: 'btr 11/22/2006 15:02'! newOn: aMap "Create a SqueakMap loader on given map." ^super new on: aMap; yourself! ! !SMLoaderPlus class methodsFor: 'new-morph participation' stamp: 'btr 11/22/2006 15:16'! newStandAlone ^ ToolBuilder open: self new! ! !SMLoaderPlus class methodsFor: 'instance creation' stamp: 'btr 11/23/2006 11:13'! open "Create and open a SqueakMap Loader." "SMLoaderPlus open" ^ (Smalltalk at: #ToolBuilder) open: self new! ! !SMLoaderPlus class methodsFor: 'class initialization' stamp: 'btr 11/30/2006 21:50'! openMenuString ^ 'SqueakMap Catalog'! ! !SMLoaderPlus class methodsFor: 'instance creation' stamp: 'btr 11/23/2006 11:21'! openOn: aSqueakMap "Create and open a SqueakMap Loader on a given map." "self openOn: SqueakMap default" ^ (Smalltalk at: #ToolBuilder) open: (self newOn: aSqueakMap)! ! !SMLoaderPlus class methodsFor: 'new-morph participation' stamp: 'btr 11/22/2006 15:18'! prototypicalToolWindow ^ ToolBuilder open: self new; applyModelExtent; yourself! ! !SMLoaderPlus class methodsFor: 'new-morph participation' stamp: 'btr 11/22/2006 15:02'! registerInFlapsRegistry "Register the receiver in the system's flaps registry." self environment at: #Flaps ifPresent: [:cl | (cl respondsTo: #registerQuad:forFlapNamed:) ifTrue: [cl registerQuad: #(#SMLoader #prototypicalToolWindow 'Package Loader' 'The SqueakMap Package Loader' ) forFlapNamed: 'Tools']]! ! !SMLoaderPlus class methodsFor: 'class initialization' stamp: 'btr 11/30/2006 21:50'! unload (TheWorldMenu respondsTo: #registerOpenCommand:) ifTrue: [TheWorldMenu unregisterOpenCommand: self openMenuString]. self environment at: #Flaps ifPresent: [:cl | cl unregisterQuadsWithReceiver: self] ! ! !SMLoaderPlus class methodsFor: 'window color' stamp: 'btr 11/22/2006 15:02'! windowColorSpecification "Answer a WindowColorSpec object that declares my preference." ^WindowColorSpec classSymbol: self name wording: 'Package Loader' brightColor: Color yellow muchLighter duller pastelColor: Color yellow veryMuchLighter duller helpMessage: 'The SqueakMap Package Loader'! ! !SMLoaderPlus methodsFor: 'menus' stamp: 'btr 11/22/2006 15:02'! addFiltersToMenu: aMenu | filterSymbol help | self filterSpecs do: [:filterArray | filterSymbol := filterArray second. help := filterArray third. aMenu addUpdating: #showFilterString: target: self selector: #toggleFilterState: argumentList: (Array with: filterSymbol). aMenu balloonTextForLastItem: help]. aMenu addLine; addList: #(('Clear all filters' uncheckFilters 'Unchecks all filters to list all packages')) ! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! addSelectedCategoryAsFilter "Add a new filter that filters on the currently selected category. Make it enabled as default." categoriesToFilterIds add: self selectedCategory id! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 16:11'! askToLoadUpdates "Check how old the map is and ask to update it if it is older than 10 days or if there is no map on disk." | available | available := map isCheckpointAvailable. (available not or: [ (Date today subtractDate: (Date fromSeconds: (map directory directoryEntryFor: map lastCheckpointFilename) modificationTime)) > 3]) ifTrue: [ (self confirm: (available ifTrue: ['The map on disk is more than 10 days old, update it from the Internet?'] ifFalse: ['There is no map on disk, fetch it from the Internet?'])) ifTrue: [self loadUpdates]]! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/1/2006 01:43'! browseCacheDirectory "Open a FileList2 on the directory for the package or release." | item dir win | item := self selectedPackageOrRelease ifNil: [^ nil]. dir := item isPackage ifTrue: [map cache directoryForPackage: item] ifFalse: [map cache directoryForPackageRelease: item]. win := FileList2 morphicViewOnDirectory: dir. "withLabel: item name, ' cache directory'." win openInWorld! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 06:56'! buildButtonBarWith: aBuilder ^ aBuilder pluggablePanelSpec new model: self; layout: #horizontal; children: (self commandSpecs select: [ :spec | spec fourth includes: #all] thenCollect: [ :spec | aBuilder pluggableActionButtonSpec new model: self; label: spec first; action: spec second; help: spec third; enabled: ((spec fourth includes: #item) ifTrue: [#hasSelectedItem]); yourself]); name: #buttonBar; yourself! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 11/22/2006 15:02'! buildButtonNamed: labelText helpText: balloon action: action | btn | btn := PluggableButtonMorph on: self getState: nil action: action. btn color: Color transparent; hResizing: #shrinkWrap; vResizing: #spaceFill; label: labelText; setBalloonText: balloon; onColor: Color transparent offColor: Color transparent. ^ btn! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 06:56'! buildCategoriesListWith: aBuilder "Create the hierarchical list holding the category tree." ^ aBuilder pluggableTreeSpec new model: self; roots: #categoryList; getSelectedPath: #selectedCategoryPath; getChildren: #categoryChildren:; hasChildren: #categoryHasChildren:; setSelected: #selectedCategory:; menu: #categoriesMenu:; label: #categoryLabel:; autoDeselect: true; wantsDrop: true; name: #categoriesList; yourself! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 06:57'! buildPackagePaneWith: aBuilder "Create the text area to the right in the loader." ^ aBuilder pluggableTextSpec new model: self; getText: #itemDescription; name: #packagePane; yourself! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 06:57'! buildPackagesListWith: aBuilder "Create the hierarchical list holding the packages and releases." ^ aBuilder pluggableTreeSpec new model: self; roots: #packageList; getSelectedPath: #selectedItemPath; setSelected: #selectedItem:; menu: #packagesMenu:; label: #itemLabel:; getChildren: #itemChildren:; hasChildren: #itemHasChildren:; autoDeselect: true; wantsDrop: true; name: #packagesList; yourself! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 06:57'! buildSearchPaneWith: aBuilder ^ aBuilder pluggableInputFieldSpec new model: self; selection: #searchSelection; getText: #searchText; setText: #findPackage:notifying:; name: #search; yourself! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 06:54'! buildWith: aBuilder "Create the package loader window." | buttonBarHeight vertDivide horizDivide | buttonBarHeight := 0.07. vertDivide := 0.6. horizDivide := 0.3. builder := aBuilder. window := builder build: (builder pluggableWindowSpec new model: self; label: #label; children: (OrderedCollection new add: ((self buildButtonBarWith: builder) frame: (0 @ 0 corner: 1 @ buttonBarHeight)); add: ((self buildSearchPaneWith: builder) frame: (0 @ buttonBarHeight corner: horizDivide @ (buttonBarHeight * 2))); add: ((self buildPackagesListWith: builder) frame: (0 @ (buttonBarHeight * 2) corner: horizDivide @ vertDivide)); add: ((self buildCategoriesListWith: builder) frame: (0 @ vertDivide corner: horizDivide @ 1)); add: ((self buildPackagePaneWith: builder) frame: (horizDivide @ buttonBarHeight corner: 1 @ 1)); yourself); yourself). window on: #mouseEnter send: #paneTransition: to: window. window on: #mouseLeave send: #paneTransition: to: window. window extent: self initialExtent. ^ window! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 12/1/2006 01:38'! cachePackageReleaseAndOfferToCopy "Cache package release, then offer to copy it somewhere. Answer the chosen file's location after copy, or the cache location if no directory was chosen." | release installer newDir newName newFile oldFile oldName | release := self selectedPackageOrRelease. release isPackageRelease ifFalse: [ self error: 'Should be a package release!!']. installer := SMInstaller forPackageRelease: release. [UIManager default informUser: 'Caching ' , release asString during: [installer cache]] on: Error do: [:ex | | msg | msg := ex messageText ifNil: [ex asString]. self informException: ex msg: ('Error occurred during download:\', msg, '\') withCRs. ^nil ]. installer isCached ifFalse: [self inform: 'Download failed, see transcript for details'. ^nil]. oldName := installer fullFileName. newDir := FileList2 modalFolderSelector: installer directory. newDir ifNil: [ ^oldName ]. newDir = installer directory ifTrue: [ ^oldName ]. newName := newDir fullNameFor: installer fileName. newFile := FileStream newFileNamed: newName. newFile ifNil: [ ^oldName ]. newFile binary. oldFile := FileStream readOnlyFileNamed: oldName. oldFile ifNil: [ ^nil ]. oldFile binary. [[ newDir copyFile: oldFile toFile: newFile ] ensure: [ oldFile close. newFile close ]] on: Error do: [ :ex | ^oldName ]. ^newName! ! !SMLoaderPlus methodsFor: 'menus' stamp: 'btr 11/22/2006 15:02'! categoriesMenu: aMenu "Answer the categories-list menu." self selectedCategory ifNotNil: [aMenu addList: self categorySpecificOptions; addLine]. aMenu addList: self generalOptions. self addFiltersToMenu: aMenu. ^aMenu! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 11/24/2006 14:44'! categoryChildren: aCategory ^ aCategory subCategories! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 11/24/2006 14:45'! categoryHasChildren: aCategory ^ aCategory hasSubCategories! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 11/24/2006 14:46'! categoryLabel: aCategory ^ aCategory name! ! !SMLoaderPlus methodsFor: 'lists' stamp: 'btr 11/30/2006 21:01'! categoryList "Create the category list for the hierarchical list. We sort the categories by name but ensure that 'Squeak versions' is first if it exists." | list first | list := (map categories select: [:each | each parent isNil]) asArray sort: [:c1 :c2 | c1 name <= c2 name]. first := list detect: [:any | any name = 'Squeak versions'] ifNone: []. first ifNotNil: [list := list copyWithout: first. list := {first} , list]. ^ list! ! !SMLoaderPlus methodsFor: 'menus' stamp: 'btr 11/22/2006 15:02'! categorySpecificOptions | choices | choices := OrderedCollection new. (categoriesToFilterIds includes: self selectedCategory id) ifTrue: [ choices add: #('Remove filter' #removeSelectedCategoryAsFilter 'Remove the filter for the selected category.')] ifFalse: [ choices add: #('Add as filter' #addSelectedCategoryAsFilter 'Add the selection as a filter to hide unrelated packages.')]. categoriesToFilterIds isEmpty ifFalse: [ choices add: #('Remove all filters' #removeCategoryFilters 'Remove all category filters.')]. ^ choices! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 11/22/2006 15:02'! changeFilters: anObject "Update my selection." | oldItem index | oldItem := self selectedPackageOrRelease. filters := anObject. self packagesListIndex: ((index := self packageList indexOf: oldItem) ifNil: [0] ifNotNil: [index]). self noteChanged! ! !SMLoaderPlus methodsFor: 'menus' stamp: 'btr 11/22/2006 18:01'! commandSpecFor: selector ^ self commandSpecs detect: [:spec | spec second = selector]! ! !SMLoaderPlus methodsFor: 'menus' stamp: 'btr 11/22/2006 18:00'! commandSpecs ^ #(('Install' installPackageRelease 'Install the latest version from the server.' (item all)) ('Email' emailPackageMaintainers 'Open an editor to send an email to the owner and co-maintainers of this package.' (item all)) ('Browse cache' browseCacheDirectory 'Browse cache directory of the selection.' (item all)) ('Copy from cache' cachePackageReleaseAndOfferToCopy 'Download selected release into cache first if needed, and then offer to copy it somewhere else.' (item)) ('Force download into cache' downloadPackageRelease 'Force a download of the selected release into the cache.' (item)) ('Update' loadUpdates 'Update the package index from the servers.' (all)) ('Upgrade All' upgradeInstalledPackagesConfirm 'Upgrade all installed packages (conf8irming each).' (all)) ('Upgrade all installed packages' upgradeInstalledPackagesNoConfirm '' (item)) ('Upgrade all installed packages confirming each' upgradeInstalledPackagesConfirm '' (item)) ('Copy list' listInPasteBuffer 'Puts the list as text into the clipboard.' (all)) ('Save filters' saveFiltersAsDefault 'Saves the current filters as default.' (all)) ('Help' help 'What is this?' (all)))! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 11/22/2006 15:02'! defaultButtonPaneHeight "Answer the user's preferred default height for new button panes." ^ Preferences parameterAt: #defaultButtonPaneHeight ifAbsentPut: [25]! ! !SMLoaderPlus methodsFor: 'lists' stamp: 'btr 12/1/2006 01:50'! defaultLabel ^ 'SqueakMap Package Loader'! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 12/1/2006 01:38'! downloadPackageRelease "Force a download of the selected package release into the cache." | release | release := self selectedPackageOrRelease. release isPackageRelease ifFalse: [ self error: 'Should be a package release!!']. [UIManager default informUser: 'Downloading ' , release asString during: [ (SMInstaller forPackageRelease: release) download] ] on: Error do: [:ex | | msg | msg := ex messageText ifNil: [ex asString]. self informException: ex msg: ('Error occurred during download:\', msg, '\') withCRs]! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! emailPackageMaintainers "Send mail to package owner and co-maintainers." | item package toAddresses | item := self selectedPackageOrRelease ifNil: [^ nil]. package := item isPackageRelease ifTrue: [item package] ifFalse: [item]. "(this logic should be moved to MailMessage as soon as it can handle multiple To: addresses)" toAddresses := '<', package owner email, '>'. package maintainers ifNotNil: [ package maintainers do: [:maintainer | toAddresses := toAddresses, ', <', maintainer email, '>']]. SMUtilities sendMailTo: toAddresses regardingPackageRelease: item! ! !SMLoaderPlus methodsFor: 'filter utilities' stamp: 'btr 11/22/2006 15:02'! filterAdd: anObject self changeFilters: (self filters copyWith: anObject) ! ! !SMLoaderPlus methodsFor: 'filters' stamp: 'btr 11/22/2006 15:02'! filterAutoInstall ^[:package | package isInstallable]! ! !SMLoaderPlus methodsFor: 'filters' stamp: 'btr 12/1/2006 01:42'! filterAvailable ^[:package | package isAvailable]! ! !SMLoaderPlus methodsFor: 'filters' stamp: 'btr 11/22/2006 15:02'! filterInstalled ^[:package | package isInstalled]! ! !SMLoaderPlus methodsFor: 'filters' stamp: 'btr 11/22/2006 15:02'! filterNotInstalledYet ^[:package | package isInstalled not]! ! !SMLoaderPlus methodsFor: 'filters' stamp: 'btr 12/1/2006 01:42'! filterNotUptoDate ^[:package | package isAvailable]! ! !SMLoaderPlus methodsFor: 'filters' stamp: 'btr 11/22/2006 15:02'! filterPublished ^[:package | package isPublished]! ! !SMLoaderPlus methodsFor: 'filter utilities' stamp: 'btr 11/22/2006 15:02'! filterRemove: anObject self changeFilters: (self filters copyWithout: anObject) ! ! !SMLoaderPlus methodsFor: 'filters' stamp: 'btr 12/1/2006 01:43'! filterSafelyAvailable ^[:package | package isSafelyAvailable]! ! !SMLoaderPlus methodsFor: 'filter utilities' stamp: 'btr 11/30/2006 21:07'! filterSpecs "Return a specification for the filter menu. Is called each time." | specs | specs := #(#('Auto-installable packages' #filterAutoInstall 'display only packages that can be installed automatically') #('New available packages' #filterAvailable 'display only packages that are not installed or that have newer releases available.') #('New safely-available packages' #filterSafelyAvailable 'display only packages that are not installed or that have newer releases available that are safe to install, meaning that they are published and meant for the current version of Squeak.') #('Installed packages' #filterInstalled 'Display only packages that are installed.') #('Published packages' #filterPublished 'Display only packages that have at least one published release.') ) asOrderedCollection. categoriesToFilterIds do: [:catId | specs add: {'Packages in ' , (map object: catId) name. catId. 'Display only packages that are in the category.'}]. ^ specs! ! !SMLoaderPlus methodsFor: 'filters' stamp: 'btr 12/1/2006 01:43'! filterVersion "Ignore spaces in the version string, they're sometimes spurious. Not used anymore." ^[:package | package categories anySatisfy: [:cat | (cat name, '*') match: (Smalltalk version copyWithout: $ ) ]]! ! !SMLoaderPlus methodsFor: 'filter utilities' stamp: 'btr 11/22/2006 15:02'! filters ^filters! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/24/2006 13:49'! findPackage: aString notifying: aView "Search and select a package with the given (sub) string in the name or description. " | index list match descriptions | match := aString asString asLowercase. index := self packagesListIndex. list := self packageNameList. list isEmpty ifTrue: [^ self]. descriptions := self packageList collect: [:e | e description]. index + 1 to: list size do: [:i | (((list at: i) includesSubstring: match caseSensitive: false) or: [(descriptions at: i) includesSubstring: match caseSensitive: false]) ifTrue: [^ self packagesListIndex: i]]. "wrap around" 1 to: index do: [:i | (((list at: i) includesSubstring: match caseSensitive: false) or: [(descriptions at: i) includesSubstring: match caseSensitive: false]) ifTrue: [^ self packagesListIndex: i]]. self inform: 'No package matching ' , aString asString! ! !SMLoaderPlus methodsFor: 'menus' stamp: 'btr 11/22/2006 15:02'! generalOptions ^#( #('Upgrade all installed packages' upgradeInstalledPackagesNoConfirm) #('Upgrade all installed packages confirming each' upgradeInstalledPackagesConfirm) #('Put list in paste buffer' listInPasteBuffer) #('Save filters as default' saveFiltersAsDefault) #- ) ! ! !SMLoaderPlus methodsFor: 'private' stamp: 'btr 11/22/2006 18:36'! hasSelectedItem ^ self selectedPackageOrRelease notNil! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/1/2006 01:44'! help "Present help text. If there is a web server available, offer to open it. Use the WebBrowser registry if possible, or Scamper if available." | message browserClass | message := 'Welcome to the SqueakMap package loader. The names of packages are followed by versions: (installed -> latest). If there is no arrow, your installed version of the package is the latest. Bold packages and releases have been installed. The checkbox menu items modify which packages you''ll see. Take a look at them - only some packages are shown initially. The options available for a package depend on how it was packaged. Comment on a package by emailing the author or the squeak list.'. browserClass := Smalltalk at: #WebBrowser ifPresent: [ :registry | registry default ]. browserClass := browserClass ifNil: [ Smalltalk at: #Scamper ifAbsent: [ ^self inform: message ]]. (self confirm: message, ' Would you like to view more detailed help on the SqueakMap swiki page?') ifTrue: [ browserClass openOnUrl: 'http://wiki.squeak.org/2726' asUrl]! ! !SMLoaderPlus methodsFor: 'private' stamp: 'btr 11/22/2006 15:02'! informException: ex msg: msg "Tell the user that an error has occurred. Offer to open debug notifier." (self confirm: msg, 'Would you like to open a debugger?') ifTrue: [ex pass]! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 05:28'! initialExtent ^500 at 400! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! installPackageRelease "Install selected package or release. The cache is used." | item release | item := self selectedPackageOrRelease ifNil: [^ nil]. item isPackageRelease ifTrue: [ (item isPublished or: [self confirm: 'Selected release is not published yet, install anyway?']) ifTrue: [^self installPackageRelease: item]] ifFalse: [ release := item lastPublishedReleaseForCurrentSystemVersion. release ifNil: [ (self confirm: 'The package has no published release for your Squeak version, try releases for any Squeak version?') ifTrue: [ release := item lastPublishedRelease. release ifNil: [ (self confirm: 'The package has no published release at all, take the latest of the unpublished releases?') ifTrue: [release := item lastRelease]]]]. release ifNotNil: [^self installPackageRelease: release]]! ! !SMLoaderPlus methodsFor: 'private' stamp: 'btr 12/1/2006 01:53'! installPackageRelease: aRelease "Install a package release. The cache is used." | myRelease installer | aRelease isCompatibleWithCurrentSystemVersion ifFalse: [(self confirm: 'The package you are about to install is not listed as being compatible with your image version (', SystemVersion current majorMinorVersion, '), so the package may not work properly. Do you still want to proceed with the install?') ifFalse: [^ self]]. myRelease := self installedReleaseOfMe. installer := SMInstaller forPackageRelease: aRelease. [UIManager default informUser: 'Downloading ' , aRelease asString during: [installer download]. UIManager default informUser: 'Installing ' , aRelease asString during: [ installer install. myRelease = self installedReleaseOfMe ifFalse: [self reOpen] ifTrue: [self noteChanged]] ] on: Error do: [:ex | | msg | msg := ex messageText ifNil:[ex asString]. self informException: ex msg: ('Error occurred during install:\', msg, '\') withCRs].! ! !SMLoaderPlus methodsFor: 'private' stamp: 'btr 11/22/2006 15:02'! installedReleaseOfMe "Return the release of the installed package loader." ^SMSqueakMap default installedReleaseOf: (SMSqueakMap default packageWithId: '941c0108-4039-4071-9863-a8d7d2b3d4a3').! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 11/24/2006 14:44'! itemChildren: anItem ^ anItem isPackage ifTrue: [anItem releases] ifFalse: [#()]! ! !SMLoaderPlus methodsFor: 'private' stamp: 'btr 11/22/2006 19:56'! itemDescription ^ self selectedPackageOrRelease ifNil: [''] ifNotNilDo: [:item | item fullDescription]! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 11/24/2006 14:45'! itemHasChildren: anItem ^ anItem isPackage and: [anItem releases notEmpty]! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/1/2006 01:44'! itemLabel: anItem | label | label := anItem isPackage ifTrue: [anItem name , (anItem versionLabel ifEmpty: [''] ifNotEmptyDo: [:lbl | ' (' , anItem versionLabel , ')'])] ifFalse: [anItem smartVersion]. ^ anItem isInstalled ifTrue: [label asText allBold] ifFalse: [label]! ! !SMLoaderPlus methodsFor: 'lists' stamp: 'btr 11/24/2006 17:17'! label ^ self labelForShown: (packagesList ifNil: [self packageList])! ! !SMLoaderPlus methodsFor: 'filter utilities' stamp: 'btr 11/22/2006 15:02'! labelForFilter: aFilterSymbol ^(self filterSpecs detect: [:fs | fs second = aFilterSymbol]) first! ! !SMLoaderPlus methodsFor: 'lists' stamp: 'btr 12/1/2006 01:50'! labelForShown: packagesShown "Update the label of the window." ^ self defaultLabel , ' (', (packagesShown size < map packages size ifTrue: [packagesShown size printString, ' shown out of '] ifFalse: ['']) , map packages size printString, ' packages)'! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! listInPasteBuffer "Useful when talking with people etc. Uses the map to produce a nice String." Clipboard clipboardText: (String streamContents: [:s | packagesList do: [:p | s nextPutAll: p nameWithVersionLabel; cr ]]) asText! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 12/1/2006 01:31'! loadUpdates [UIManager default informUser: 'Loading Updates' during: [ map loadUpdates. self noteChanged ] ] on: Error do: [:ex | self informException: ex msg: ('Error occurred when updating map:\', ex messageText, '\') withCRs]! ! !SMLoaderPlus methodsFor: 'private' stamp: 'btr 11/24/2006 14:05'! noteChanged filters ifNil: [^ self reOpen]. map ifNotNil: [packagesList := nil. selectedCategory := nil. self changed: #categoryList. self changed: #packageList. self changed: #packagesListIndex. "update my selection" self contentsChanged]! ! !SMLoaderPlus methodsFor: 'initialization' stamp: 'btr 11/22/2006 16:11'! on: aSqueakMap "Initialize instance." map := aSqueakMap. map synchWithDisk. filters := DefaultFilters copy. categoriesToFilterIds := DefaultCategoriesToFilterIds copy. self askToLoadUpdates! ! !SMLoaderPlus methodsFor: 'filter utilities' stamp: 'btr 11/22/2006 15:02'! package: aPackage filteredByCategory: aCategory "Answer true if the package should be shown if we filter on . It should be shown if itself or any of its releases has the category." | releases | releases := aPackage releases. ^(aPackage hasCategoryOrSubCategoryOf: aCategory) or: [ releases anySatisfy: [:rel | rel hasCategoryOrSubCategoryOf: aCategory]]! ! !SMLoaderPlus methodsFor: 'lists' stamp: 'btr 12/1/2006 01:49'! packageList "Return a list of the SMPackages that should be visible by applying all the filters. Also filter based on the currently selected category - if any." | list | list := packagesList ifNil: [packagesList := self packageListCalculated]. selectedCategory ifNotNil: [ list := list select: [:each | self package: each filteredByCategory: selectedCategory]]. self updateLabel: list. ^ list! ! !SMLoaderPlus methodsFor: 'lists' stamp: 'btr 12/1/2006 01:49'! packageListCalculated "Return a list of the SMPackages that should be visible by applying all the filters. Also filter based on the currently selected category - if any." ^ self packages select: [:p | filters allSatisfy: [:currFilter | currFilter isSymbol ifTrue: [(self perform: currFilter) value: p] ifFalse: [self package: p filteredByCategory: (map object: currFilter)]]]! ! !SMLoaderPlus methodsFor: 'lists' stamp: 'btr 12/1/2006 01:50'! packageNameList ^ self packageList collect: [:e | e name]! ! !SMLoaderPlus methodsFor: 'menus' stamp: 'btr 11/22/2006 18:30'! packageSpecificOptions | choices packageOrRelease | packageOrRelease := self selectedPackageOrRelease. choices := OrderedCollection new. packageOrRelease isInstallable ifTrue: [ choices add: (self commandSpecFor: #installPackageRelease)]. (packageOrRelease isDownloadable and: [packageOrRelease isCached]) ifTrue: [ choices add: (self commandSpecFor: #browseCacheDirectory)]. (packageOrRelease isPackageRelease and: [packageOrRelease isDownloadable]) ifTrue: [ choices add: (self commandSpecFor: #cachePackageReleaseAndOfferToCopy). choices add: (self commandSpecFor: #downloadPackageRelease)]. choices add: (self commandSpecFor: #emailPackageMaintainers). ^ choices! ! !SMLoaderPlus methodsFor: 'private' stamp: 'btr 11/22/2006 16:11'! packages "We request the packages as sorted by name by default." ^map packagesByName asArray ! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 11/24/2006 14:01'! packagesListIndex ^ self packageList indexOf: self selectedItem! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 11/24/2006 14:01'! packagesListIndex: anObject self selectedItem: (anObject = 0 ifFalse: [self packageList at: anObject])! ! !SMLoaderPlus methodsFor: 'menus' stamp: 'btr 11/22/2006 15:02'! packagesMenu: aMenu "Answer the packages-list menu." self selectedPackageOrRelease ifNotNil: [aMenu addList: self packageSpecificOptions; addLine]. aMenu addList: self generalOptions. self addFiltersToMenu: aMenu. ^aMenu! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/1/2006 01:45'! perform: selector orSendTo: otherTarget "Selector was just chosen from a menu by a user. If can respond, then perform it on myself. If not, send it to otherTarget, presumably the editPane from which the menu was invoked." ^ (self respondsTo: selector) ifTrue: [self perform: selector] ifFalse: [super perform: selector orSendTo: otherTarget]! ! !SMLoaderPlus methodsFor: 'private' stamp: 'btr 11/26/2006 23:22'! reOpen "Close this package loader, probably because it has been updated, and open a new one." self inform: 'This package loader has been upgraded and will be closed and reopened to avoid strange side effects.'. window delete. (Smalltalk at: self class name) open! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! removeCategoryFilters "Remove all category filters." categoriesToFilterIds := OrderedCollection new! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! removeSelectedCategoryAsFilter "Remove the filter that filters on the currently selected category." categoriesToFilterIds remove: self selectedCategory id! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! saveFiltersAsDefault "Save the current filters as default so that they are selected the next time the loader is opened." DefaultFilters := filters copy. DefaultCategoriesToFilterIds := categoriesToFilterIds copy! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 11/24/2006 14:35'! searchSelection "Selects all of the default search text so that a type-in overwrites it." ^ {1. self searchText size}! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 11/24/2006 14:35'! searchText "A dummy default search text so that the field describes its purpose." ^ 'Search packages'! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 11/24/2006 14:02'! selectedCategory "Return selected category." ^ selectedCategory! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 16:37'! selectedCategory: anSMCategory "Change the selected category." selectedCategory := anSMCategory. selectedCategory ifNotNil: [(selectedCategory objects includes: self selectedItem) ifFalse: [self selectedItem: nil]]. self changed: #selectedCategory. self changed: #packageList! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 11/24/2006 14:52'! selectedCategoryPath "Return selected category's path." | path | path := #(). selectedCategory ifNotNil: [selectedCategory parent ifNotNilDo: [:p | path := path copyWith: p]. path := path copyWith: selectedCategory]. ^ path collect: [:cat | self categoryLabel: cat]! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 11/24/2006 14:02'! selectedItem ^ selectedItem! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 16:27'! selectedItem: anItem "This == workaround protects us from recursion since ToolBuilder's tree widgets will always tell us that the selection has been updated when we tell it that the selection path has been updated. Cleaner solutions invited." anItem == selectedItem ifFalse: [ selectedItem := anItem. self changed: #selectedItemPath. self changed: #itemDescription. self changed: #hasSelectedItem]! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 16:16'! selectedItemPath | path | path := #(). (selectedItem isKindOf: SMPackageRelease) ifTrue: [path := path copyWith: selectedItem package]. selectedItem ifNotNil: [path := path copyWith: selectedItem]. ^ path! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 11/24/2006 14:03'! selectedPackageOrRelease "Return selected package or package release." ^ selectedItem! ! !SMLoaderPlus methodsFor: 'filter utilities' stamp: 'btr 11/22/2006 15:02'! showFilterString: aFilterSymbol ^(self stateForFilter: aFilterSymbol), (self labelForFilter: aFilterSymbol)! ! !SMLoaderPlus methodsFor: 'filter utilities' stamp: 'btr 11/22/2006 15:02'! stateForFilter: aFilterSymbol ^(self filters includes: aFilterSymbol) ifTrue: [''] ifFalse: [''] ! ! !SMLoaderPlus methodsFor: 'filter utilities' stamp: 'btr 11/22/2006 15:02'! toggleFilterState: aFilterSymbol ^(self filters includes: (aFilterSymbol)) ifTrue: [self filterRemove: aFilterSymbol] ifFalse: [self filterAdd: aFilterSymbol]! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! uncheckFilters "Uncheck all filters." filters := OrderedCollection new. self noteChanged! ! !SMLoaderPlus methodsFor: 'lists' stamp: 'btr 12/1/2006 01:50'! updateLabel: packagesShown "Update the label of the window." window ifNotNilDo: [:w | w setLabel: (self labelForShown: packagesShown)]! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 12/1/2006 01:29'! upgradeInstalledPackages "Tries to upgrade all installed packages to the latest published release for this version of Squeak. So this is a conservative approach." | installed old myRelease toUpgrade info | installed := map installedPackages. old := map oldPackages. old isEmpty ifTrue: [ ^self inform: 'All ', installed size printString, ' installed packages are up to date.']. toUpgrade := map upgradeableAndOldPackages. toUpgrade isEmpty ifTrue: [ ^self inform: 'None of the ', old size printString, ' old packages of the ', installed size printString, ' installed can be automatically upgraded. You need to upgrade them manually.']. info := old size < toUpgrade size ifTrue: [ 'Of the ', old size printString, ' old packages only ', toUpgrade size printString, ' can be upgraded. The following packages will not be upgraded: ', (String streamContents: [:s | (old removeAll: toUpgrade; yourself) do: [:p | s nextPutAll: p nameWithVersionLabel; cr]])] ifFalse: ['All old packages upgradeable.']. (self confirm: info, ' About to upgrade the following packages: ', (String streamContents: [:s | toUpgrade do: [:p | s nextPutAll: p nameWithVersionLabel; cr]]), 'Proceed?') ifTrue: [ myRelease := self installedReleaseOfMe. [UIManager default informUser: 'Upgrading Installed Packages' during: [ map upgradeOldPackages. self inform: toUpgrade size printString, ' packages successfully upgraded.'. myRelease = self installedReleaseOfMe ifFalse: [self reOpen] ifTrue: [self noteChanged]] ] on: Error do: [:ex | self informException: ex msg: ('Error occurred when upgrading old packages:\', ex messageText, '\') withCRs]]! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! upgradeInstalledPackagesConfirm "Tries to upgrade all installed packages to the latest published release for this version of Squeak. Confirms on each upgrade." ^ self upgradeInstalledPackagesConfirm: true! ! !SMLoaderPlus methodsFor: 'private' stamp: 'btr 12/1/2006 01:29'! upgradeInstalledPackagesConfirm: confirmEach "Tries to upgrade all installed packages to the latest published release for this version of Squeak. If confirmEach is true we ask for every upgrade. " | installed old myRelease toUpgrade info | installed := map installedPackages. old := map oldPackages. old isEmpty ifTrue: [^ self inform: 'All ' , installed size printString , ' installed packages are up to date.']. toUpgrade := map upgradeableAndOldPackages. toUpgrade isEmpty ifTrue: [^ self inform: 'None of the ' , old size printString , ' old packages of the ' , installed size printString , ' installed can be automatically upgraded. You need to upgrade them manually.']. info := old size < toUpgrade size ifTrue: ['Of the ' , old size printString , ' old packages only ' , toUpgrade size printString , ' can be upgraded. The following packages will not be upgraded: ' , (String streamContents: [:s | (old removeAll: toUpgrade; yourself) do: [:p | s nextPutAll: p nameWithVersionLabel; cr]])] ifFalse: ['All old packages upgradeable.']. (self confirm: info , ' About to upgrade the following packages: ' , (String streamContents: [:s | toUpgrade do: [:p | s nextPutAll: p nameWithVersionLabel; cr]]) , 'Proceed?') ifTrue: [myRelease := self installedReleaseOfMe. [UIManager default informUser: 'Upgrading Installed Packages' during: [confirmEach ifTrue: [map upgradeOldPackagesConfirmBlock: [:p | self confirm: 'Upgrade ' , p installedRelease packageNameWithVersion , ' to ' , (p lastPublishedReleaseForCurrentSystemVersionNewerThan: p installedRelease) listName , '?']] ifFalse: [map upgradeOldPackages]. self inform: toUpgrade size printString , ' packages successfully processed.'. myRelease = self installedReleaseOfMe ifTrue: [self noteChanged] ifFalse: [self reOpen]]] on: Error do: [:ex | self informException: ex msg: ('Error occurred when upgrading old packages:\' , ex messageText , '\') withCRs]]! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! upgradeInstalledPackagesNoConfirm "Tries to upgrade all installed packages to the latest published release for this version of Squeak. No confirmation on each upgrade." ^ self upgradeInstalledPackagesConfirm: false! ! !SMPackageWrapper methodsFor: 'comparing' stamp: 'dvf 9/21/2003 16:25' prior: 27998626! = anObject ^self withoutListWrapper = anObject withoutListWrapper! ! !SMPackageWrapper methodsFor: 'converting' stamp: 'btr 11/22/2006 00:54' prior: 27998778! asString | string | string := item name, ' (', item versionLabel, ')'. item isInstalled ifTrue: [string := string asText allBold]. "(string includesSubString: '->') ifTrue: [string := string asText color: Color green]." ^ string! ! !SMPackageWrapper methodsFor: 'accessing' stamp: 'dvf 10/14/2003 18:58' prior: 27998902! contents ^item releases reversed collect: [:e | SMPackageReleaseWrapper with: e]! ! !SMPackageWrapper methodsFor: 'testing' stamp: 'dvf 9/21/2003 16:25' prior: 27999070! hash ^self withoutListWrapper hash! ! !SMPackageWrapper methodsFor: 'accessing' stamp: 'btr 11/22/2006 16:55'! help ^ 'This shows all packages with their releases that should be displayed according the current filter.'! ! !SMPackageWrapper methodsFor: 'accessing' stamp: 'btr 11/22/2006 16:49'! label ^ self asString! ! !SMPackageWrapper methodsFor: 'printing' stamp: 'dvf 9/21/2003 16:22' prior: 27999192! printOn: aStream aStream nextPutAll: 'wrapper for: ', item printString! ! !SMCategoryWrapper methodsFor: 'comparing' stamp: 'ar 2/9/2004 02:13' prior: 27849043! = anObject ^self withoutListWrapper = anObject withoutListWrapper! ! !SMCategoryWrapper methodsFor: 'converting' stamp: 'btr 11/30/2006 18:53' prior: 27849195! asString ^ item name , ' (' , self numberOfObjects printString , ')'! ! !SMCategoryWrapper methodsFor: 'accessing' stamp: 'ar 2/9/2004 02:35' prior: 27849301! category ^item! ! !SMCategoryWrapper methodsFor: 'accessing' stamp: 'btr 11/30/2006 21:02' prior: 27849402! contents ^ item subCategories collect: [:n | self class with: n model: n]! ! !SMCategoryWrapper methodsFor: 'model access' stamp: 'btr 11/30/2006 21:02'! getList ^ Array with: (self class with: self contents model: model)! ! !SMCategoryWrapper methodsFor: 'testing' stamp: 'btr 11/30/2006 18:53'! hasContents ^ item hasSubCategories! ! !SMCategoryWrapper methodsFor: 'comparing' stamp: 'ar 2/9/2004 02:13' prior: 27849700! hash ^self withoutListWrapper hash! ! !SMCategoryWrapper methodsFor: 'accessing' stamp: 'btr 11/22/2006 16:56'! help ^ 'The categories are structured in a tree. Packages and package releases belong to several categories. You can add one or more categories as filters and enable them in the menu.'! ! !SMCategoryWrapper methodsFor: 'accessing' stamp: 'BJP 11/22/2002 14:17'! model ^model! ! !SMCategoryWrapper methodsFor: 'accessing' stamp: 'btr 11/30/2006 18:53'! numberOfObjects " | total | total _ 0. model allCategoriesDo: [:c | total _ total + c objects size]. ^total" ^item objects size! ! !SMPackageReleaseWrapper methodsFor: 'converting' stamp: 'btr 11/30/2006 21:30' prior: 27997393! asString "Show installed releases with a trailing asterisk." | string | string := item smartVersion. "Older SMBase versions don't have isInstalled.'" (item respondsTo: #isInstalled) ifTrue: [item isInstalled ifTrue: [string := (string , ' *') asText allBold]]. ^ string! ! !SMPackageReleaseWrapper methodsFor: 'accessing' stamp: 'btr 11/22/2006 17:14'! contents ^ #()! ! !SMPackageReleaseWrapper methodsFor: 'accessing' stamp: 'btr 11/22/2006 16:49'! label ^ self asString ! ! !SMLoader class methodsFor: 'class initialization' stamp: 'btr 12/1/2006 15:47' prior: 27944626! initialize "Hook us up in the world menu." "self initialize" Smalltalk at: #ToolBuilder ifAbsent: [self registerInFlapsRegistry. (Preferences windowColorFor: #SMLoader) = Color white ifTrue: ["not set" Preferences setWindowColorFor: #SMLoader to: (Color colorFrom: self windowColorSpecification brightColor)]. (TheWorldMenu respondsTo: #registerOpenCommand:) ifTrue: [| oldCmds | oldCmds := TheWorldMenu registry select: [:cmd | cmd first includesSubString: 'Package Loader']. oldCmds do: [:cmd | TheWorldMenu unregisterOpenCommand: cmd first]. TheWorldMenu registerOpenCommand: {self openMenuString. {self. #open}}]]. DefaultFilters := OrderedCollection new. DefaultCategoriesToFilterIds := OrderedCollection new! ! !SMLoader class methodsFor: 'class initialization' stamp: 'btr 11/30/2006 21:52'! openMenuString ^ 'SqueakMap Catalog'! ! !SMLoader class methodsFor: 'class initialization' stamp: 'btr 11/30/2006 21:52' prior: 27945298! unload (TheWorldMenu respondsTo: #registerOpenCommand:) ifTrue: [TheWorldMenu unregisterOpenCommand: self openMenuString]. self environment at: #Flaps ifPresent: [:cl | cl unregisterQuadsWithReceiver: self] ! ! !SMLoader methodsFor: 'menus' stamp: 'btr 11/21/2006 16:08' prior: 54331069! addFiltersToMenu: aMenu | filterSymbol help | self filterSpecs do: [:filterArray | filterSymbol := filterArray second. help := filterArray third. aMenu addUpdating: #showFilterString: target: self selector: #toggleFilterState: argumentList: (Array with: filterSymbol). aMenu balloonTextForLastItem: help]. aMenu addLine; addList: #(('Clear all filters' uncheckFilters 'Unchecks all filters to list all packages')) ! ! !SMLoader methodsFor: 'interface' stamp: 'btr 11/22/2006 01:15' prior: 27927912! browseCacheDirectory "Open a FileList2 on the directory for the package or release." | item dir win | item := self selectedPackageOrRelease ifNil: [^ nil]. item ifNil: [^nil]. dir := item isPackage ifTrue: [model cache directoryForPackage: item] ifFalse: [model cache directoryForPackageRelease: item]. win := FileList2 morphicViewOnDirectory: dir. " withLabel: item name, ' cache directory'." win openInWorld ! ! !SMLoader methodsFor: 'interface' stamp: 'btr 11/22/2006 14:52'! buildButtonBar | aRow btn | aRow := AlignmentMorph newRow beSticky. aRow color: Color transparent; clipSubmorphs: true. self buttonSpecs do: [:spec | btn := self buildButtonNamed: spec first helpText: spec third action: spec second. aRow addMorphBack: btn] separatedBy: [aRow addTransparentSpacerOfSize: 3 at 0]. ^ aRow! ! !SMLoader methodsFor: 'interface' stamp: 'btr 11/22/2006 01:27'! buildButtonNamed: labelText helpText: balloon action: action | btn | btn := PluggableButtonMorph on: self getState: nil action: action. btn color: Color transparent; hResizing: #shrinkWrap; vResizing: #spaceFill; label: labelText; setBalloonText: balloon; onColor: Color transparent offColor: Color transparent. ^ btn! ! !SMLoader methodsFor: 'interface' stamp: 'btr 11/30/2006 19:04' prior: 27928394! buildMorphicCategoriesList "Create the hierarchical list holding the category tree." | list | list := (SimpleHierarchicalListMorph on: self list: #categoryWrapperList selected: #selectedCategoryWrapper changeSelected: #selectedCategoryWrapper: menu: #categoriesMenu: keystroke: nil) autoDeselect: true; enableDrag: false; enableDrop: true; yourself. list setBalloonText: 'The categories are structured in a tree. Packages and package releases belong to several categories. You can add one or more categories as filters and enable them in the menu.'. "list scroller submorphs do:[:each| list expandAll: each]." list adjustSubmorphPositions. ^ list! ! !SMLoader methodsFor: 'interface' stamp: 'btr 11/22/2006 00:22' prior: 27929139! buildMorphicPackagesList "Create the hierarchical list holding the packages and releases." ^(SimpleHierarchicalListMorph on: self list: #packageWrapperList selected: #selectedItemWrapper changeSelected: #selectedItemWrapper: menu: #packagesMenu: keystroke: nil) autoDeselect: false; enableDrag: false; enableDrop: true; setBalloonText: 'This shows all packages with their releases that should be displayed according the current filter.'; yourself! ! !SMLoader methodsFor: 'interface' stamp: 'btr 11/30/2006 21:13'! buildPackageButtonBar | aRow | "Somewhat patterned after IRCe's buttonRow method." aRow := AlignmentMorph newRow beSticky. aRow color: Color transparent; clipSubmorphs: true. ^ aRow! ! !SMLoader methodsFor: 'interface' stamp: 'gk 5/5/2006 02:05' prior: 27929686! buildPackagePane "Create the text area to the right in the loader." | ptm | ptm := PluggableTextMorph on: self text: #contents accept: nil readSelection: nil "#packageSelection " menu: nil. ptm setBalloonText: 'This is where the selected package or package release is displayed.'. ptm lock. ^ptm! ! !SMLoader methodsFor: 'interface' stamp: 'btr 11/30/2006 21:08' prior: 27930070! buildSearchPane "Cribbed from MessageNames>>inMorphicWindowWithInitialSearchString:" | typeInView searchButton typeInPane | typeInView := PluggableTextMorph on: self text: nil accept: #findPackage:notifying: readSelection: nil menu: nil. typeInView acceptOnCR: true; vResizing: #spaceFill; hResizing: #spaceFill; setTextMorphToSelectAllOnMouseEnter; askBeforeDiscardingEdits: false; setProperty: #alwaysAccept toValue: true. (typeInView respondsTo: #hideScrollBarsIndefinitely) ifTrue: [typeInView hideScrollBarsIndefinitely] ifFalse: [typeInView hideScrollBarIndefinitely]. searchButton := SimpleButtonMorph new target: typeInView; color: Color white; label: 'Search'; actionSelector: #accept; arguments: #(); yourself. typeInPane := AlignmentMorph newRow. typeInPane vResizing: #shrinkWrap; hResizing: #shrinkWrap; listDirection: #leftToRight; addMorphFront: searchButton; addTransparentSpacerOfSize: 6 @ 0; addMorphBack: typeInView; setBalloonText: 'Type into the pane, then press Search (or hit RETURN) to visit the next package matching what you typed.'. ^ typeInPane! ! !SMLoader methodsFor: 'interface' stamp: 'btr 11/22/2006 14:24'! buttonSpecs ^ #(('Install' installPackageRelease 'Install the latest version from the server.') ('Email' emailPackageMaintainers 'Open an editor to send an email to the owner and co-maintainers of this package.') ('Browse cache' browseCacheDirectory 'Browse cache directory of the selection.') ('Update' loadUpdates 'Update the package index from the servers.') ('Upgrade All' upgradeInstalledPackagesConfirm 'Upgrade all installed packages (confirming each).') ('Help' help 'What is this?'))! ! !SMLoader methodsFor: 'menus' stamp: 'btr 11/21/2006 16:11' prior: 27936393! categorySpecificOptions | choices | choices := OrderedCollection new. (categoriesToFilterIds includes: self selectedCategory id) ifTrue: [ choices add: #('Remove filter' #removeSelectedCategoryAsFilter 'Remove the filter for the selected category.')] ifFalse: [ choices add: #('Add as filter' #addSelectedCategoryAsFilter 'Add the selection as a filter to hide unrelated packages.')]. categoriesToFilterIds isEmpty ifFalse: [ choices add: #('Remove all filters' #removeCategoryFilters 'Remove all category filters.')]. ^ choices! ! !SMLoader methodsFor: 'lists' stamp: 'btr 11/30/2006 21:01' prior: 27933585! categoryWrapperList "Create the wrapper list for the hierarchical list. We sort the categories by name but ensure that 'Squeak versions' is first if it exists." | list first | list := (model categories select: [:each | each parent isNil]) asArray sort: [:c1 :c2 | c1 name <= c2 name]. first := list detect: [:any | any name = 'Squeak versions'] ifNone: []. first ifNotNil: [list := list copyWithout: first. list := {first} , list]. ^ list collect: [:cat | SMCategoryWrapper with: cat model: self]! ! !SMLoader methodsFor: 'filter utilities' stamp: 'gk 7/10/2004 15:45' prior: 27913226! changeFilters: anObject "Update my selection." | oldItem index | oldItem := self selectedPackageOrRelease. filters := anObject. self packagesListIndex: ((index := self packageList indexOf: oldItem) ifNil: [0] ifNotNil: [index]). self noteChanged! ! !SMLoader methodsFor: 'interface' stamp: 'btr 11/30/2006 17:30' prior: 27930584! createWindow | buttonBarHeight searchHeight vertDivide horizDivide | buttonBarHeight := 0.07. searchHeight := 0.07. vertDivide := 0.3. horizDivide := 0.6. self addMorph: (self buildButtonBar borderWidth: 0) frame: (0.0 @ 0.0 corner: 1.0 @ buttonBarHeight). self addMorph: (self buildSearchPane borderWidth: 0) frame: (0.0 @ buttonBarHeight corner: vertDivide @ searchHeight). self addMorph: (self buildMorphicPackagesList borderWidth: 0) frame: (0.0 @ (buttonBarHeight + searchHeight) corner: vertDivide @ horizDivide). self addMorph: (self buildMorphicCategoriesList borderWidth: 0) frame: (0.0 @ horizDivide corner: vertDivide @ 1.0). self addMorph: (self buildPackagePane borderWidth: 0) frame: (vertDivide @ buttonBarHeight corner: 1.0 @ 1.0). self on: #mouseEnter send: #paneTransition: to: self. self on: #mouseLeave send: #paneTransition: to: self! ! !SMLoader methodsFor: 'interface' stamp: 'gk 7/12/2004 11:14' prior: 27931214! defaultButtonPaneHeight "Answer the user's preferred default height for new button panes." ^ Preferences parameterAt: #defaultButtonPaneHeight ifAbsentPut: [25]! ! !SMLoader methodsFor: 'interface' stamp: 'btr 12/1/2006 02:01'! defaultLabel ^'SqueakMap Package Loader'! ! !SMLoader methodsFor: 'actions' stamp: 'btr 11/22/2006 01:14' prior: 27917579! emailPackageMaintainers "Send mail to package owner and co-maintainers." | item package toAddresses | item := self selectedPackageOrRelease ifNil: [^ nil]. package := item isPackageRelease ifTrue: [item package] ifFalse: [item]. "(this logic should be moved to MailMessage as soon as it can handle multiple To: addresses)" toAddresses := '<', package owner email, '>'. package maintainers ifNotNil: [ package maintainers do: [:maintainer | toAddresses := toAddresses, ', <', maintainer email, '>']]. SMUtilities sendMailTo: toAddresses regardingPackageRelease: item! ! !SMLoader methodsFor: 'filter utilities' stamp: 'btr 11/22/2006 00:14' prior: 27923782! filterSpecs "Return a specification for the filter menu. Is called each time." | specs | specs := #( #('Auto-installable packages' #filterAutoInstall 'display only packages that can be installed automatically') #('New available packages' #filterAvailable 'display only packages that are not installed or that have newer releases available.') #('New safely-available packages' #filterSafelyAvailable 'display only packages that are not installed or that have newer releases available that are safe to install, meaning that they are published and meant for the current version of Squeak.') From noreply at buildbot.pypy.org Sun Jan 19 18:04:46 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 19 Jan 2014 18:04:46 +0100 (CET) Subject: [pypy-commit] pypy default: Bah, thanks mjacob for spotting that 7812ad72a634 doesn't have Message-ID: <20140119170446.7E6551C1559@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68765:af2e2754fb20 Date: 2014-01-19 18:03 +0100 http://bitbucket.org/pypy/pypy/changeset/af2e2754fb20/ Log: Bah, thanks mjacob for spotting that 7812ad72a634 doesn't have any effect. diff --git a/rpython/translator/driver.py b/rpython/translator/driver.py --- a/rpython/translator/driver.py +++ b/rpython/translator/driver.py @@ -460,7 +460,7 @@ targetdir = cbuilder.targetdir fname = dump_static_data_info(self.log, database, targetdir) dstname = self.compute_exe_name() + '.staticdata.info' - shutil.copy(str(fname), str(dstname)) + shutil_copy(str(fname), str(dstname)) self.log.info('Static data info written to %s' % dstname) def compute_exe_name(self): @@ -476,11 +476,11 @@ if self.exe_name is not None: exename = self.c_entryp newexename = mkexename(self.compute_exe_name()) - shutil.copy(str(exename), str(newexename)) + shutil_copy(str(exename), str(newexename)) if self.cbuilder.shared_library_name is not None: soname = self.cbuilder.shared_library_name newsoname = newexename.new(basename=soname.basename) - shutil.copy(str(soname), str(newsoname)) + shutil_copy(str(soname), str(newsoname)) self.log.info("copied: %s" % (newsoname,)) if sys.platform == 'win32': # the import library is named python27.lib, according From noreply at buildbot.pypy.org Sun Jan 19 18:22:37 2014 From: noreply at buildbot.pypy.org (jerith) Date: Sun, 19 Jan 2014 18:22:37 +0100 (CET) Subject: [pypy-commit] pypy default: fix for issue #1671, ctypes array-in-struct keepalive Message-ID: <20140119172237.E1D281C0178@cobra.cs.uni-duesseldorf.de> Author: Jeremy Thurgood Branch: Changeset: r68766:1329fe522dfb Date: 2014-01-19 19:21 +0200 http://bitbucket.org/pypy/pypy/changeset/1329fe522dfb/ Log: fix for issue #1671, ctypes array-in-struct keepalive diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py --- a/lib_pypy/_ctypes/structure.py +++ b/lib_pypy/_ctypes/structure.py @@ -2,6 +2,8 @@ import _rawffi from _ctypes.basics import _CData, _CDataMeta, keepalive_key,\ store_reference, ensure_objects, CArgObject +from _ctypes.array import Array +from _ctypes.pointer import _Pointer import inspect def names_and_fields(self, _fields_, superclass, anonymous_fields=None): @@ -104,8 +106,11 @@ def __set__(self, obj, value): fieldtype = self.ctype cobj = fieldtype.from_param(value) - if ensure_objects(cobj) is not None: - key = keepalive_key(self.num) + key = keepalive_key(self.num) + if issubclass(fieldtype, _Pointer) and isinstance(cobj, Array): + # if our value is an Array we need the whole thing alive + store_reference(obj, key, cobj) + elif ensure_objects(cobj) is not None: store_reference(obj, key, cobj._objects) arg = cobj._get_buffer_value() if fieldtype._fficompositesize is not None: diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_keepalive.py b/pypy/module/test_lib_pypy/ctypes_tests/test_keepalive.py --- a/pypy/module/test_lib_pypy/ctypes_tests/test_keepalive.py +++ b/pypy/module/test_lib_pypy/ctypes_tests/test_keepalive.py @@ -31,7 +31,17 @@ assert p._objects == {} assert len(x._objects) == 1 assert x._objects['0'] is p._objects - + + def test_simple_structure_and_pointer_with_array(self): + class X(Structure): + _fields_ = [('array', POINTER(c_int))] + + x = X() + a = (c_int * 3)(1, 2, 3) + assert x._objects is None + x.array = a + assert x._objects['0'] is a + def test_structure_with_pointers(self): class X(Structure): _fields_ = [('x', POINTER(c_int)), From noreply at buildbot.pypy.org Sun Jan 19 18:38:27 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 19 Jan 2014 18:38:27 +0100 (CET) Subject: [pypy-commit] stmgc c7: Workarounds to avoid a bug of clang Message-ID: <20140119173827.3734A1C0178@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7 Changeset: r649:92285a8ce7a6 Date: 2014-01-19 18:37 +0100 http://bitbucket.org/pypy/stmgc/changeset/92285a8ce7a6/ Log: Workarounds to avoid a bug of clang diff --git a/duhton/frame.c b/duhton/frame.c --- a/duhton/frame.c +++ b/duhton/frame.c @@ -1,4 +1,5 @@ #include "duhton.h" +#include #include typedef TLPREFIX struct dictentry_s { @@ -98,6 +99,22 @@ printf(""); } +static void _copy(dictentry_t *dst, dictentry_t *src) +{ + /* workaround for a bug in clang-3.4: cannot do "*dst = *src;" */ + memcpy(_stm_real_address((object_t *)dst), + _stm_real_address((object_t *)src), + sizeof(dictentry_t)); +} + +static void _clear(dictentry_t *dst) +{ + /* workaround for a bug in clang-3.4: many "dst->field = NULL;" + turn into a single memset() call */ + memset(_stm_real_address((object_t *)dst), 0, + sizeof(dictentry_t)); +} + static dictentry_t * find_entry(DuFrameObject *frame, DuObject *symbol, int write_mode) { @@ -154,21 +171,18 @@ entries = ob->ob_items; for (i=0; iob_count; i++) - newentries[i] = entries[i-1]; + _copy(&newentries[i], &entries[i-1]); _du_write1(frame); frame->ob_nodes = newob; From noreply at buildbot.pypy.org Sun Jan 19 20:06:10 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 19 Jan 2014 20:06:10 +0100 (CET) Subject: [pypy-commit] pypy default: Removed an unused feature of the translater and some long unused PPC assembler files Message-ID: <20140119190610.37F151C1559@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r68767:681802e98b38 Date: 2014-01-19 13:05 -0600 http://bitbucket.org/pypy/pypy/changeset/681802e98b38/ Log: Removed an unused feature of the translater and some long unused PPC assembler files diff --git a/rpython/translator/c/extfunc.py b/rpython/translator/c/extfunc.py --- a/rpython/translator/c/extfunc.py +++ b/rpython/translator/c/extfunc.py @@ -1,18 +1,10 @@ import types + from rpython.flowspace.model import FunctionGraph -from rpython.rtyper.lltypesystem import lltype +from rpython.rtyper.lltypesystem import lltype, rstr, rlist +from rpython.rtyper.lltypesystem.rstr import STR, mallocstr from rpython.translator.c.support import cdecl -from rpython.rtyper.lltypesystem.rstr import STR, mallocstr -from rpython.rtyper.lltypesystem import rstr -from rpython.rtyper.lltypesystem import rlist -# table of functions hand-written in src/ll_*.h -# Note about *.im_func: The annotator and the rtyper expect direct -# references to functions, so we cannot insert classmethods here. - -EXTERNALS = {'LL_flush_icache': 'LL_flush_icache'} - -#______________________________________________________ def find_list_of_str(rtyper): for r in rtyper.reprs.itervalues(): @@ -80,14 +72,13 @@ return frags[0] for func, funcobj in db.externalfuncs.items(): - c_name = EXTERNALS[func] # construct a define LL_NEED_ to make it possible to isolate in-development externals and headers - modname = module_name(c_name) + modname = module_name(func) if modname not in modules: modules[modname] = True yield 'LL_NEED_%s' % modname.upper(), 1 funcptr = funcobj._as_ptr() - yield c_name, funcptr + yield func, funcptr def predeclare_exception_data(db, rtyper): # Exception-related types and constants diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -916,15 +916,6 @@ return sandbox_stub(fnobj, db) db.externalfuncs[fnobj._external_name] = fnobj return [] - elif fnobj._callable in extfunc.EXTERNALS: - # -- deprecated case -- - # 'fnobj' is one of the ll_xyz() functions with the suggested_primitive - # flag in rpython.rtyper.module.*. The corresponding C wrappers are - # written by hand in src/ll_*.h, and declared in extfunc.EXTERNALS. - if sandbox and not fnobj._name.startswith('ll_stack_'): # XXX!!! Temporary - return sandbox_stub(fnobj, db) - db.externalfuncs[fnobj._callable] = fnobj - return [] elif hasattr(fnobj, 'graph'): if sandbox and sandbox != "if_external": # apply the sandbox transformation diff --git a/rpython/translator/c/src/asm_ppc.c b/rpython/translator/c/src/asm_ppc.c deleted file mode 100644 --- a/rpython/translator/c/src/asm_ppc.c +++ /dev/null @@ -1,24 +0,0 @@ -#include "src/asm_ppc.h" - -#define __dcbst(base, index) \ - __asm__ ("dcbst %0, %1" : /*no result*/ : "b%" (index), "r" (base) : "memory") -#define __icbi(base, index) \ - __asm__ ("icbi %0, %1" : /*no result*/ : "b%" (index), "r" (base) : "memory") -#define __sync() __asm__ volatile ("sync") -#define __isync() \ - __asm__ volatile ("isync") - -void -LL_flush_icache(long base, long size) -{ - long i; - - for (i = 0; i < size; i += 32){ - __dcbst(base, i); - } - __sync(); - for (i = 0; i < size; i += 32){ - __icbi(base, i); - } - __isync(); -} diff --git a/rpython/translator/c/src/asm_ppc.h b/rpython/translator/c/src/asm_ppc.h deleted file mode 100644 --- a/rpython/translator/c/src/asm_ppc.h +++ /dev/null @@ -1,1 +0,0 @@ -void LL_flush_icache(long base, long size); From noreply at buildbot.pypy.org Sun Jan 19 21:03:20 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sun, 19 Jan 2014 21:03:20 +0100 (CET) Subject: [pypy-commit] pypy refactor-translator: hg merge default Message-ID: <20140119200320.878951C1559@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-translator Changeset: r68768:9a92c5bf6242 Date: 2014-01-19 17:29 +0100 http://bitbucket.org/pypy/pypy/changeset/9a92c5bf6242/ Log: hg merge default diff too long, truncating to 2000 out of 10346 lines diff --git a/lib-python/2.7/test/test_memoryview.py b/lib-python/2.7/test/test_memoryview.py --- a/lib-python/2.7/test/test_memoryview.py +++ b/lib-python/2.7/test/test_memoryview.py @@ -166,11 +166,18 @@ self.assertTrue(m[0:6] == m[:]) self.assertFalse(m[0:5] == m) - # Comparison with objects which don't support the buffer API - self.assertFalse(m == u"abcdef") - self.assertTrue(m != u"abcdef") - self.assertFalse(u"abcdef" == m) - self.assertTrue(u"abcdef" != m) + if test_support.check_impl_detail(cpython=True): + # what is supported and what is not supported by memoryview is + # very inconsisten on CPython. In PyPy, memoryview supports + # the buffer interface, and thus the following comparison + # succeeds. See also the comment in + # pypy.modules.__builtin__.interp_memoryview.W_MemoryView.descr_buffer + # + # Comparison with objects which don't support the buffer API + self.assertFalse(m == u"abcdef", "%s %s" % (self, tp)) + self.assertTrue(m != u"abcdef") + self.assertFalse(u"abcdef" == m) + self.assertTrue(u"abcdef" != m) # Unordered comparisons are unimplemented, and therefore give # arbitrary results (they raise a TypeError in py3k) diff --git a/lib_pypy/_ctypes/array.py b/lib_pypy/_ctypes/array.py --- a/lib_pypy/_ctypes/array.py +++ b/lib_pypy/_ctypes/array.py @@ -20,10 +20,13 @@ # we don't want to have buffers here if len(val) > self._length_: raise ValueError("%r too long" % (val,)) - for i in range(len(val)): - self[i] = val[i] + if isinstance(val, str): + _rawffi.rawstring2charp(self._buffer.buffer, val) + else: + for i in range(len(val)): + self[i] = val[i] if len(val) < self._length_: - self[len(val)] = '\x00' + self._buffer[len(val)] = '\x00' res.value = property(getvalue, setvalue) def getraw(self): @@ -33,8 +36,7 @@ def setraw(self, buffer): if len(buffer) > self._length_: raise ValueError("%r too long" % (buffer,)) - for i in range(len(buffer)): - self[i] = buffer[i] + _rawffi.rawstring2charp(self._buffer.buffer, buffer) res.raw = property(getraw, setraw) elif subletter == 'u': def getvalue(self): @@ -45,10 +47,14 @@ # we don't want to have buffers here if len(val) > self._length_: raise ValueError("%r too long" % (val,)) + if isinstance(val, unicode): + target = self._buffer + else: + target = self for i in range(len(val)): - self[i] = val[i] + target[i] = val[i] if len(val) < self._length_: - self[len(val)] = '\x00' + target[len(val)] = u'\x00' res.value = property(getvalue, setvalue) if '_length_' in typedict: diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -74,6 +74,10 @@ The actual details would be rather differen in PyPy, but we would like to have the same optimization implemented. +Or maybe not. We can also play around with the idea of using a single +representation: as a byte string in utf-8. (This idea needs some extra logic +for efficient indexing, like a cache.) + .. _`optimized unicode representation`: http://www.python.org/dev/peps/pep-0393/ Translation Toolchain diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -41,3 +41,6 @@ Fix 3 broken links on PyPy published papers in docs. .. branch: jit-ordereddict + +.. branch: refactor-str-types +Remove multimethods on str/unicode/bytearray and make the implementations share code. diff --git a/pypy/goal/getnightly.py b/pypy/goal/getnightly.py --- a/pypy/goal/getnightly.py +++ b/pypy/goal/getnightly.py @@ -26,7 +26,12 @@ if branch == 'default': branch = 'trunk' -filename = 'pypy-c-jit-latest-%s.tar.bz2' % arch +if '--nojit' in sys.argv: + kind = 'nojit' +else: + kind = 'jit' + +filename = 'pypy-c-%s-latest-%s.tar.bz2' % (kind, arch) url = 'http://buildbot.pypy.org/nightly/%s/%s' % (branch, filename) tmp = py.path.local.mkdtemp() mydir = tmp.chdir() diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py --- a/pypy/interpreter/astcompiler/codegen.py +++ b/pypy/interpreter/astcompiler/codegen.py @@ -1234,6 +1234,8 @@ flags |= consts.CO_NESTED if scope.is_generator: flags |= consts.CO_GENERATOR + if scope.has_yield_inside_try: + flags |= consts.CO_YIELD_INSIDE_TRY if scope.has_variable_arg: flags |= consts.CO_VARARGS if scope.has_keywords_arg: diff --git a/pypy/interpreter/astcompiler/consts.py b/pypy/interpreter/astcompiler/consts.py --- a/pypy/interpreter/astcompiler/consts.py +++ b/pypy/interpreter/astcompiler/consts.py @@ -17,6 +17,7 @@ CO_FUTURE_UNICODE_LITERALS = 0x20000 #pypy specific: CO_KILL_DOCSTRING = 0x100000 +CO_YIELD_INSIDE_TRY = 0x200000 PyCF_SOURCE_IS_UTF8 = 0x0100 PyCF_DONT_IMPLY_DEDENT = 0x0200 diff --git a/pypy/interpreter/astcompiler/symtable.py b/pypy/interpreter/astcompiler/symtable.py --- a/pypy/interpreter/astcompiler/symtable.py +++ b/pypy/interpreter/astcompiler/symtable.py @@ -43,6 +43,7 @@ self.child_has_free = False self.nested = False self.doc_removable = False + self._in_try_body_depth = 0 def lookup(self, name): """Find the scope of identifier 'name'.""" @@ -75,6 +76,14 @@ self.varnames.append(mangled) return mangled + def note_try_start(self, try_node): + """Called when a try is found, before visiting the body.""" + self._in_try_body_depth += 1 + + def note_try_end(self, try_node): + """Called after visiting a try body.""" + self._in_try_body_depth -= 1 + def note_yield(self, yield_node): """Called when a yield is found.""" raise SyntaxError("'yield' outside function", yield_node.lineno, @@ -210,6 +219,7 @@ self.has_variable_arg = False self.has_keywords_arg = False self.is_generator = False + self.has_yield_inside_try = False self.optimized = True self.return_with_value = False self.import_star = None @@ -220,6 +230,8 @@ raise SyntaxError("'return' with argument inside generator", self.ret.lineno, self.ret.col_offset) self.is_generator = True + if self._in_try_body_depth > 0: + self.has_yield_inside_try = True def note_return(self, ret): if ret.value: @@ -463,7 +475,12 @@ self.scope.new_temporary_name() if wih.optional_vars: self.scope.new_temporary_name() - ast.GenericASTVisitor.visit_With(self, wih) + wih.context_expr.walkabout(self) + if wih.optional_vars: + wih.optional_vars.walkabout(self) + self.scope.note_try_start(wih) + self.visit_sequence(wih.body) + self.scope.note_try_end(wih) def visit_arguments(self, arguments): scope = self.scope @@ -505,3 +522,16 @@ else: role = SYM_ASSIGNED self.note_symbol(name.id, role) + + def visit_TryExcept(self, node): + self.scope.note_try_start(node) + self.visit_sequence(node.body) + self.scope.note_try_end(node) + self.visit_sequence(node.handlers) + self.visit_sequence(node.orelse) + + def visit_TryFinally(self, node): + self.scope.note_try_start(node) + self.visit_sequence(node.body) + self.scope.note_try_end(node) + self.visit_sequence(node.finalbody) diff --git a/pypy/interpreter/astcompiler/test/test_symtable.py b/pypy/interpreter/astcompiler/test/test_symtable.py --- a/pypy/interpreter/astcompiler/test/test_symtable.py +++ b/pypy/interpreter/astcompiler/test/test_symtable.py @@ -346,6 +346,25 @@ assert exc.msg == "'return' with argument inside generator" scp = self.func_scope("def f():\n return\n yield x") + def test_yield_inside_try(self): + scp = self.func_scope("def f(): yield x") + assert not scp.has_yield_inside_try + scp = self.func_scope("def f():\n try:\n yield x\n except: pass") + assert scp.has_yield_inside_try + scp = self.func_scope("def f():\n try:\n yield x\n finally: pass") + assert scp.has_yield_inside_try + scp = self.func_scope("def f():\n with x: yield y") + assert scp.has_yield_inside_try + + def test_yield_outside_try(self): + for input in ("try: pass\n except: pass", + "try: pass\n except: yield y", + "try: pass\n finally: pass", + "try: pass\n finally: yield y", + "with x: pass"): + input = "def f():\n yield y\n %s\n yield y" % (input,) + assert not self.func_scope(input).has_yield_inside_try + def test_return(self): for input in ("class x: return", "return"): exc = py.test.raises(SyntaxError, self.func_scope, input).value diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -231,6 +231,11 @@ msg = "__int__ returned non-int (type '%T')" raise operationerrfmt(space.w_TypeError, msg, w_result) + def ord(self, space): + typename = space.type(self).getname(space) + msg = "ord() expected string of length 1, but %s found" + raise operationerrfmt(space.w_TypeError, msg, typename) + def __spacebind__(self, space): return self @@ -1396,6 +1401,9 @@ # This is here mostly just for gateway.int_unwrapping_space_method(). return bool(self.int_w(w_obj)) + def ord(self, w_obj): + return w_obj.ord(self) + # This is all interface for gateway.py. def gateway_int_w(self, w_obj): if self.isinstance_w(w_obj, self.w_float): diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -520,12 +520,13 @@ # When a BuiltinCode is stored in a Function object, # you get the functionality of CPython's built-in function type. - def __init__(self, func, unwrap_spec=None, self_type=None, descrmismatch=None): + def __init__(self, func, unwrap_spec=None, self_type=None, + descrmismatch=None, doc=None): "NOT_RPYTHON" # 'implfunc' is the interpreter-level function. # Note that this uses a lot of (construction-time) introspection. Code.__init__(self, func.__name__) - self.docstring = func.__doc__ + self.docstring = doc or func.__doc__ self.identifier = "%s-%s-%s" % (func.__module__, func.__name__, getattr(self_type, '__name__', '*')) @@ -832,7 +833,7 @@ instancecache = {} def __new__(cls, f, app_name=None, unwrap_spec=None, descrmismatch=None, - as_classmethod=False): + as_classmethod=False, doc=None): "NOT_RPYTHON" # f must be a function whose name does NOT start with 'app_' @@ -861,7 +862,8 @@ cls.instancecache[key] = self self._code = BuiltinCode(f, unwrap_spec=unwrap_spec, self_type=self_type, - descrmismatch=descrmismatch) + descrmismatch=descrmismatch, + doc=doc) self.__name__ = f.func_name self.name = app_name self.as_classmethod = as_classmethod diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -155,20 +155,6 @@ code_name = self.pycode.co_name return space.wrap(code_name) - def __del__(self): - # Only bother enqueuing self to raise an exception if the frame is - # still not finished and finally or except blocks are present. - self.clear_all_weakrefs() - if self.frame is not None: - block = self.frame.lastblock - while block is not None: - if not isinstance(block, LoopBlock): - self.enqueue_for_destruction(self.space, - GeneratorIterator.descr_close, - "interrupting generator of ") - break - block = block.previous - # Results can be either an RPython list of W_Root, or it can be an # app-level W_ListObject, which also has an append() method, that's why we # generate 2 versions of the function and 2 jit drivers. @@ -211,3 +197,20 @@ return unpack_into unpack_into = _create_unpack_into() unpack_into_w = _create_unpack_into() + + +class GeneratorIteratorWithDel(GeneratorIterator): + + def __del__(self): + # Only bother enqueuing self to raise an exception if the frame is + # still not finished and finally or except blocks are present. + self.clear_all_weakrefs() + if self.frame is not None: + block = self.frame.lastblock + while block is not None: + if not isinstance(block, LoopBlock): + self.enqueue_for_destruction(self.space, + GeneratorIterator.descr_close, + "interrupting generator of ") + break + block = block.previous diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -12,7 +12,7 @@ from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter.astcompiler.consts import ( CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS, CO_NESTED, - CO_GENERATOR, CO_KILL_DOCSTRING) + CO_GENERATOR, CO_KILL_DOCSTRING, CO_YIELD_INSIDE_TRY) from pypy.tool.stdlib_opcode import opcodedesc, HAVE_ARGUMENT from rpython.rlib.rarithmetic import intmask from rpython.rlib.objectmodel import compute_hash @@ -31,7 +31,7 @@ # Magic numbers for the bytecode version in code objects. # See comments in pypy/module/imp/importing. cpython_magic, = struct.unpack(" bytearray\n" + "\n" + "Create a bytearray object from a string of hexadecimal numbers.\n" + "Spaces between two numbers are accepted.\n" + "Example: bytearray.fromhex('B9 01EF') -> bytearray(b'\\xb9\\x01\\xef')." + hexstring = space.str_w(w_hexstring) + hexstring = hexstring.lower() + data = [] + length = len(hexstring) + i = -2 + while True: + i += 2 + while i < length and hexstring[i] == ' ': + i += 1 + if i >= length: + break + if i+1 == length: + raise OperationError(space.w_ValueError, space.wrap( + "non-hexadecimal number found in fromhex() arg at position %d" % i)) + + top = _hex_digit_to_int(hexstring[i]) + if top == -1: + raise OperationError(space.w_ValueError, space.wrap( + "non-hexadecimal number found in fromhex() arg at position %d" % i)) + bot = _hex_digit_to_int(hexstring[i+1]) + if bot == -1: + raise OperationError(space.w_ValueError, space.wrap( + "non-hexadecimal number found in fromhex() arg at position %d" % (i+1,))) + data.append(chr(top*16 + bot)) + + # in CPython bytearray.fromhex is a staticmethod, so + # we ignore w_type and always return a bytearray + return new_bytearray(space, space.w_bytearray, data) + + def descr_init(self, space, __args__): + # this is on the silly side + w_source, w_encoding, w_errors = __args__.parse_obj( + None, 'bytearray', init_signature, init_defaults) + + if w_source is None: + w_source = space.wrap('') + if w_encoding is None: + w_encoding = space.w_None + if w_errors is None: + w_errors = space.w_None + + # Unicode argument + if not space.is_w(w_encoding, space.w_None): + from pypy.objspace.std.unicodeobject import ( + _get_encoding_and_errors, encode_object + ) + encoding, errors = _get_encoding_and_errors(space, w_encoding, w_errors) + + # if w_source is an integer this correctly raises a TypeError + # the CPython error message is: "encoding or errors without a string argument" + # ours is: "expected unicode, got int object" + w_source = encode_object(space, w_source, encoding, errors) + + # Is it an int? + try: + count = space.int_w(w_source) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + else: + if count < 0: + raise OperationError(space.w_ValueError, + space.wrap("bytearray negative count")) + self.data = ['\0'] * count + return + + data = makebytearraydata_w(space, w_source) + self.data = data + + def descr_repr(self, space): + s = self.data + + # Good default if there are no replacements. + buf = StringBuilder(len("bytearray(b'')") + len(s)) + + buf.append("bytearray(b'") + + for i in range(len(s)): + c = s[i] + + if c == '\\' or c == "'": + buf.append('\\') + buf.append(c) + elif c == '\t': + buf.append('\\t') + elif c == '\r': + buf.append('\\r') + elif c == '\n': + buf.append('\\n') + elif not '\x20' <= c < '\x7f': + n = ord(c) + buf.append('\\x') + buf.append("0123456789abcdef"[n>>4]) + buf.append("0123456789abcdef"[n&0xF]) + else: + buf.append(c) + + buf.append("')") + + return space.wrap(buf.build()) + + def descr_str(self, space): + return space.wrap(''.join(self.data)) + + def descr_eq(self, space, w_other): + try: + return space.newbool(self._val(space) == self._op_val(space, w_other)) + except OperationError, e: + if e.match(space, space.w_TypeError): + return space.w_NotImplemented + raise + + def descr_ne(self, space, w_other): + try: + return space.newbool(self._val(space) != self._op_val(space, w_other)) + except OperationError, e: + if e.match(space, space.w_TypeError): + return space.w_NotImplemented + raise + + def descr_lt(self, space, w_other): + try: + return space.newbool(self._val(space) < self._op_val(space, w_other)) + except OperationError, e: + if e.match(space, space.w_TypeError): + return space.w_NotImplemented + raise + + def descr_le(self, space, w_other): + try: + return space.newbool(self._val(space) <= self._op_val(space, w_other)) + except OperationError, e: + if e.match(space, space.w_TypeError): + return space.w_NotImplemented + raise + + def descr_gt(self, space, w_other): + try: + return space.newbool(self._val(space) > self._op_val(space, w_other)) + except OperationError, e: + if e.match(space, space.w_TypeError): + return space.w_NotImplemented + raise + + def descr_ge(self, space, w_other): + try: + return space.newbool(self._val(space) >= self._op_val(space, w_other)) + except OperationError, e: + if e.match(space, space.w_TypeError): + return space.w_NotImplemented + raise + + def descr_buffer(self, space): + return BytearrayBuffer(self.data) + + def descr_inplace_add(self, space, w_other): + if isinstance(w_other, W_BytearrayObject): + self.data += w_other.data + else: + self.data += self._op_val(space, w_other) + return self + + def descr_inplace_mul(self, space, w_times): + try: + times = space.getindex_w(w_times, space.w_OverflowError) + except OperationError, e: + if e.match(space, space.w_TypeError): + return space.w_NotImplemented + raise + self.data *= times + return self + + def descr_setitem(self, space, w_index, w_other): + if isinstance(w_index, W_SliceObject): + oldsize = len(self.data) + start, stop, step, slicelength = w_index.indices4(space, oldsize) + sequence2 = makebytearraydata_w(space, w_other) + _setitem_slice_helper(space, self.data, start, step, + slicelength, sequence2, empty_elem='\x00') + else: + idx = space.getindex_w(w_index, space.w_IndexError, "bytearray index") + try: + self.data[idx] = getbytevalue(space, w_other) + except IndexError: + raise OperationError(space.w_IndexError, + space.wrap("bytearray index out of range")) + + def descr_delitem(self, space, w_idx): + if isinstance(w_idx, W_SliceObject): + start, stop, step, slicelength = w_idx.indices4(space, + len(self.data)) + _delitem_slice_helper(space, self.data, start, step, slicelength) + else: + idx = space.getindex_w(w_idx, space.w_IndexError, "bytearray index") + try: + del self.data[idx] + except IndexError: + raise OperationError(space.w_IndexError, + space.wrap("bytearray deletion index out of range")) + + def descr_append(self, space, w_item): + self.data.append(getbytevalue(space, w_item)) + + def descr_extend(self, space, w_other): + if isinstance(w_other, W_BytearrayObject): + self.data += w_other.data + else: + self.data += makebytearraydata_w(space, w_other) + return self + + def descr_insert(self, space, w_idx, w_other): + where = space.int_w(w_idx) + length = len(self.data) + index = get_positive_index(where, length) + val = getbytevalue(space, w_other) + self.data.insert(index, val) + return space.w_None + + @unwrap_spec(w_idx=WrappedDefault(-1)) + def descr_pop(self, space, w_idx): + index = space.int_w(w_idx) + try: + result = self.data.pop(index) + except IndexError: + if not self.data: + raise OperationError(space.w_IndexError, space.wrap( + "pop from empty bytearray")) + raise OperationError(space.w_IndexError, space.wrap( + "pop index out of range")) + return space.wrap(ord(result)) + + def descr_remove(self, space, w_char): + char = space.int_w(space.index(w_char)) + try: + self.data.remove(chr(char)) + except ValueError: + raise OperationError(space.w_ValueError, space.wrap( + "value not found in bytearray")) + + def descr_reverse(self, space): + self.data.reverse() + +def getbytevalue(space, w_value): + if space.isinstance_w(w_value, space.w_str): + string = space.str_w(w_value) + if len(string) != 1: + raise OperationError(space.w_ValueError, space.wrap( + "string must be of size 1")) + return string[0] + + value = space.getindex_w(w_value, None) + if not 0 <= value < 256: + # this includes the OverflowError in case the long is too large + raise OperationError(space.w_ValueError, space.wrap( + "byte must be in range(0, 256)")) + return chr(value) + +def new_bytearray(space, w_bytearraytype, data): + w_obj = space.allocate_instance(W_BytearrayObject, w_bytearraytype) + W_BytearrayObject.__init__(w_obj, data) + return w_obj + + +def makebytearraydata_w(space, w_source): + # String-like argument + try: + string = space.bufferstr_new_w(w_source) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + else: + return [c for c in string] + + # sequence of bytes + w_iter = space.iter(w_source) + length_hint = space.length_hint(w_source, 0) + data = newlist_hint(length_hint) + extended = 0 + while True: + try: + w_item = space.next(w_iter) + except OperationError, e: + if not e.match(space, space.w_StopIteration): + raise + break + value = getbytevalue(space, w_item) + data.append(value) + extended += 1 + if extended < length_hint: + resizelist_hint(data, extended) + return data + +def _hex_digit_to_int(d): + val = ord(d) + if 47 < val < 58: + return val - 48 + if 96 < val < 103: + return val - 87 + return -1 + + +class BytearrayDocstrings: + """bytearray(iterable_of_ints) -> bytearray + bytearray(string, encoding[, errors]) -> bytearray + bytearray(bytes_or_bytearray) -> mutable copy of bytes_or_bytearray + bytearray(memory_view) -> bytearray + + Construct an mutable bytearray object from: + - an iterable yielding integers in range(256) + - a text string encoded using the specified encoding + - a bytes or a bytearray object + - any object implementing the buffer API. + + bytearray(int) -> bytearray. + + Construct a zero-initialized bytearray of the given length. + + """ + + def __add__(): + """x.__add__(y) <==> x+y""" + + def __alloc__(): + """B.__alloc__() -> int + + Return the number of bytes actually allocated. + """ + + def __contains__(): + """x.__contains__(y) <==> y in x""" + + def __delitem__(): + """x.__delitem__(y) <==> del x[y]""" + + def __eq__(): + """x.__eq__(y) <==> x==y""" + + def __ge__(): + """x.__ge__(y) <==> x>=y""" + + def __getattribute__(): + """x.__getattribute__('name') <==> x.name""" + + def __getitem__(): + """x.__getitem__(y) <==> x[y]""" + + def __gt__(): + """x.__gt__(y) <==> x>y""" + + def __iadd__(): + """x.__iadd__(y) <==> x+=y""" + + def __imul__(): + """x.__imul__(y) <==> x*=y""" + + def __init__(): + """x.__init__(...) initializes x; see help(type(x)) for signature""" + + def __iter__(): + """x.__iter__() <==> iter(x)""" + + def __le__(): + """x.__le__(y) <==> x<=y""" + + def __len__(): + """x.__len__() <==> len(x)""" + + def __lt__(): + """x.__lt__(y) <==> x x*n""" + + def __ne__(): + """x.__ne__(y) <==> x!=y""" + + def __reduce__(): + """Return state information for pickling.""" + + def __repr__(): + """x.__repr__() <==> repr(x)""" + + def __rmul__(): + """x.__rmul__(n) <==> n*x""" + + def __setitem__(): + """x.__setitem__(i, y) <==> x[i]=y""" + + def __sizeof__(): + """B.__sizeof__() -> int + + Returns the size of B in memory, in bytes + """ + + def __str__(): + """x.__str__() <==> str(x)""" + + def append(): + """B.append(int) -> None + + Append a single item to the end of B. + """ + + def capitalize(): + """B.capitalize() -> copy of B + + Return a copy of B with only its first character capitalized (ASCII) + and the rest lower-cased. + """ + + def center(): + """B.center(width[, fillchar]) -> copy of B + + Return B centered in a string of length width. Padding is + done using the specified fill character (default is a space). + """ + + def count(): + """B.count(sub[, start[, end]]) -> int + + Return the number of non-overlapping occurrences of subsection sub in + bytes B[start:end]. Optional arguments start and end are interpreted + as in slice notation. + """ + + def decode(): + """B.decode(encoding=None, errors='strict') -> unicode + + Decode B using the codec registered for encoding. encoding defaults + to the default encoding. errors may be given to set a different error + handling scheme. Default is 'strict' meaning that encoding errors raise + a UnicodeDecodeError. Other possible values are 'ignore' and 'replace' + as well as any other name registered with codecs.register_error that is + able to handle UnicodeDecodeErrors. + """ + + def endswith(): + """B.endswith(suffix[, start[, end]]) -> bool + + Return True if B ends with the specified suffix, False otherwise. + With optional start, test B beginning at that position. + With optional end, stop comparing B at that position. + suffix can also be a tuple of strings to try. + """ + + def expandtabs(): + """B.expandtabs([tabsize]) -> copy of B + + Return a copy of B where all tab characters are expanded using spaces. + If tabsize is not given, a tab size of 8 characters is assumed. + """ + + def extend(): + """B.extend(iterable_of_ints) -> None + + Append all the elements from the iterator or sequence to the + end of B. + """ + + def find(): + """B.find(sub[, start[, end]]) -> int + + Return the lowest index in B where subsection sub is found, + such that sub is contained within B[start,end]. Optional + arguments start and end are interpreted as in slice notation. + + Return -1 on failure. + """ + + def fromhex(): + """bytearray.fromhex(string) -> bytearray (static method) + + Create a bytearray object from a string of hexadecimal numbers. + Spaces between two numbers are accepted. + Example: bytearray.fromhex('B9 01EF') -> bytearray(b'\xb9\x01\xef'). + """ + + def index(): + """B.index(sub[, start[, end]]) -> int + + Like B.find() but raise ValueError when the subsection is not found. + """ + + def insert(): + """B.insert(index, int) -> None + + Insert a single item into the bytearray before the given index. + """ + + def isalnum(): + """B.isalnum() -> bool + + Return True if all characters in B are alphanumeric + and there is at least one character in B, False otherwise. + """ + + def isalpha(): + """B.isalpha() -> bool + + Return True if all characters in B are alphabetic + and there is at least one character in B, False otherwise. + """ + + def isdigit(): + """B.isdigit() -> bool + + Return True if all characters in B are digits + and there is at least one character in B, False otherwise. + """ + + def islower(): + """B.islower() -> bool + + Return True if all cased characters in B are lowercase and there is + at least one cased character in B, False otherwise. + """ + + def isspace(): + """B.isspace() -> bool + + Return True if all characters in B are whitespace + and there is at least one character in B, False otherwise. + """ + + def istitle(): + """B.istitle() -> bool + + Return True if B is a titlecased string and there is at least one + character in B, i.e. uppercase characters may only follow uncased + characters and lowercase characters only cased ones. Return False + otherwise. + """ + + def isupper(): + """B.isupper() -> bool + + Return True if all cased characters in B are uppercase and there is + at least one cased character in B, False otherwise. + """ + + def join(): + """B.join(iterable_of_bytes) -> bytearray + + Concatenate any number of str/bytearray objects, with B + in between each pair, and return the result as a new bytearray. + """ + + def ljust(): + """B.ljust(width[, fillchar]) -> copy of B + + Return B left justified in a string of length width. Padding is + done using the specified fill character (default is a space). + """ + + def lower(): + """B.lower() -> copy of B + + Return a copy of B with all ASCII characters converted to lowercase. + """ + + def lstrip(): + """B.lstrip([bytes]) -> bytearray + + Strip leading bytes contained in the argument + and return the result as a new bytearray. + If the argument is omitted, strip leading ASCII whitespace. + """ + + def partition(): + """B.partition(sep) -> (head, sep, tail) + + Search for the separator sep in B, and return the part before it, + the separator itself, and the part after it. If the separator is not + found, returns B and two empty bytearray objects. + """ + + def pop(): + """B.pop([index]) -> int + + Remove and return a single item from B. If no index + argument is given, will pop the last value. + """ + + def remove(): + """B.remove(int) -> None + + Remove the first occurrence of a value in B. + """ + + def replace(): + """B.replace(old, new[, count]) -> bytearray + + Return a copy of B with all occurrences of subsection + old replaced by new. If the optional argument count is + given, only the first count occurrences are replaced. + """ + + def reverse(): + """B.reverse() -> None + + Reverse the order of the values in B in place. + """ + + def rfind(): + """B.rfind(sub[, start[, end]]) -> int + + Return the highest index in B where subsection sub is found, + such that sub is contained within B[start,end]. Optional + arguments start and end are interpreted as in slice notation. + + Return -1 on failure. + """ + + def rindex(): + """B.rindex(sub[, start[, end]]) -> int + + Like B.rfind() but raise ValueError when the subsection is not found. + """ + + def rjust(): + """B.rjust(width[, fillchar]) -> copy of B + + Return B right justified in a string of length width. Padding is + done using the specified fill character (default is a space) + """ + + def rpartition(): + """B.rpartition(sep) -> (head, sep, tail) + + Search for the separator sep in B, starting at the end of B, + and return the part before it, the separator itself, and the + part after it. If the separator is not found, returns two empty + bytearray objects and B. + """ + + def rsplit(): + """B.rsplit(sep=None, maxsplit=-1) -> list of bytearrays + + Return a list of the sections in B, using sep as the delimiter, + starting at the end of B and working to the front. + If sep is not given, B is split on ASCII whitespace characters + (space, tab, return, newline, formfeed, vertical tab). + If maxsplit is given, at most maxsplit splits are done. + """ + + def rstrip(): + """B.rstrip([bytes]) -> bytearray + + Strip trailing bytes contained in the argument + and return the result as a new bytearray. + If the argument is omitted, strip trailing ASCII whitespace. + """ + + def split(): + """B.split(sep=None, maxsplit=-1) -> list of bytearrays + + Return a list of the sections in B, using sep as the delimiter. + If sep is not given, B is split on ASCII whitespace characters + (space, tab, return, newline, formfeed, vertical tab). + If maxsplit is given, at most maxsplit splits are done. + """ + + def splitlines(): + """B.splitlines(keepends=False) -> list of lines + + Return a list of the lines in B, breaking at line boundaries. + Line breaks are not included in the resulting list unless keepends + is given and true. + """ + + def startswith(): + """B.startswith(prefix[, start[, end]]) -> bool + + Return True if B starts with the specified prefix, False otherwise. + With optional start, test B beginning at that position. + With optional end, stop comparing B at that position. + prefix can also be a tuple of strings to try. + """ + + def strip(): + """B.strip([bytes]) -> bytearray + + Strip leading and trailing bytes contained in the argument + and return the result as a new bytearray. + If the argument is omitted, strip ASCII whitespace. + """ + + def swapcase(): + """B.swapcase() -> copy of B + + Return a copy of B with uppercase ASCII characters converted + to lowercase ASCII and vice versa. + """ + + def title(): + """B.title() -> copy of B + + Return a titlecased version of B, i.e. ASCII words start with uppercase + characters, all remaining cased characters have lowercase. + """ + + def translate(): + """B.translate(table[, deletechars]) -> bytearray + + Return a copy of B, where all characters occurring in the + optional argument deletechars are removed, and the remaining + characters have been mapped through the given translation + table, which must be a bytes object of length 256. + """ + + def upper(): + """B.upper() -> copy of B + + Return a copy of B with all ASCII characters converted to uppercase. + """ + + def zfill(): + """B.zfill(width) -> copy of B + + Pad a numeric string B with zeros on the left, to fill a field + of the specified width. B is never truncated. + """ + + +W_BytearrayObject.typedef = StdTypeDef( + "bytearray", + __doc__ = BytearrayDocstrings.__doc__, + __new__ = interp2app(W_BytearrayObject.descr_new), + __hash__ = None, + __reduce__ = interp2app(W_BytearrayObject.descr_reduce, + doc=BytearrayDocstrings.__reduce__.__doc__), + fromhex = interp2app(W_BytearrayObject.descr_fromhex, as_classmethod=True, + doc=BytearrayDocstrings.fromhex.__doc__), + + __repr__ = interp2app(W_BytearrayObject.descr_repr, + doc=BytearrayDocstrings.__repr__.__doc__), + __str__ = interp2app(W_BytearrayObject.descr_str, + doc=BytearrayDocstrings.__str__.__doc__), + + __eq__ = interp2app(W_BytearrayObject.descr_eq, + doc=BytearrayDocstrings.__eq__.__doc__), + __ne__ = interp2app(W_BytearrayObject.descr_ne, + doc=BytearrayDocstrings.__ne__.__doc__), + __lt__ = interp2app(W_BytearrayObject.descr_lt, + doc=BytearrayDocstrings.__lt__.__doc__), + __le__ = interp2app(W_BytearrayObject.descr_le, + doc=BytearrayDocstrings.__le__.__doc__), + __gt__ = interp2app(W_BytearrayObject.descr_gt, + doc=BytearrayDocstrings.__gt__.__doc__), + __ge__ = interp2app(W_BytearrayObject.descr_ge, + doc=BytearrayDocstrings.__ge__.__doc__), + + __len__ = interp2app(W_BytearrayObject.descr_len, + doc=BytearrayDocstrings.__len__.__doc__), + __contains__ = interp2app(W_BytearrayObject.descr_contains, + doc=BytearrayDocstrings.__contains__.__doc__), + + __add__ = interp2app(W_BytearrayObject.descr_add, + doc=BytearrayDocstrings.__add__.__doc__), + __mul__ = interp2app(W_BytearrayObject.descr_mul, + doc=BytearrayDocstrings.__mul__.__doc__), + __rmul__ = interp2app(W_BytearrayObject.descr_mul, + doc=BytearrayDocstrings.__rmul__.__doc__), + + __getitem__ = interp2app(W_BytearrayObject.descr_getitem, + doc=BytearrayDocstrings.__getitem__.__doc__), + + capitalize = interp2app(W_BytearrayObject.descr_capitalize, + doc=BytearrayDocstrings.capitalize.__doc__), + center = interp2app(W_BytearrayObject.descr_center, + doc=BytearrayDocstrings.center.__doc__), + count = interp2app(W_BytearrayObject.descr_count, + doc=BytearrayDocstrings.count.__doc__), + decode = interp2app(W_BytearrayObject.descr_decode, + doc=BytearrayDocstrings.decode.__doc__), + expandtabs = interp2app(W_BytearrayObject.descr_expandtabs, + doc=BytearrayDocstrings.expandtabs.__doc__), + find = interp2app(W_BytearrayObject.descr_find, + doc=BytearrayDocstrings.find.__doc__), + rfind = interp2app(W_BytearrayObject.descr_rfind, + doc=BytearrayDocstrings.rfind.__doc__), + index = interp2app(W_BytearrayObject.descr_index, + doc=BytearrayDocstrings.index.__doc__), + rindex = interp2app(W_BytearrayObject.descr_rindex, + doc=BytearrayDocstrings.rindex.__doc__), + isalnum = interp2app(W_BytearrayObject.descr_isalnum, + doc=BytearrayDocstrings.isalnum.__doc__), + isalpha = interp2app(W_BytearrayObject.descr_isalpha, + doc=BytearrayDocstrings.isalpha.__doc__), + isdigit = interp2app(W_BytearrayObject.descr_isdigit, + doc=BytearrayDocstrings.isdigit.__doc__), + islower = interp2app(W_BytearrayObject.descr_islower, + doc=BytearrayDocstrings.islower.__doc__), + isspace = interp2app(W_BytearrayObject.descr_isspace, + doc=BytearrayDocstrings.isspace.__doc__), + istitle = interp2app(W_BytearrayObject.descr_istitle, + doc=BytearrayDocstrings.istitle.__doc__), + isupper = interp2app(W_BytearrayObject.descr_isupper, + doc=BytearrayDocstrings.isupper.__doc__), + join = interp2app(W_BytearrayObject.descr_join, + doc=BytearrayDocstrings.join.__doc__), + ljust = interp2app(W_BytearrayObject.descr_ljust, + doc=BytearrayDocstrings.ljust.__doc__), + rjust = interp2app(W_BytearrayObject.descr_rjust, + doc=BytearrayDocstrings.rjust.__doc__), + lower = interp2app(W_BytearrayObject.descr_lower, + doc=BytearrayDocstrings.lower.__doc__), + partition = interp2app(W_BytearrayObject.descr_partition, + doc=BytearrayDocstrings.partition.__doc__), + rpartition = interp2app(W_BytearrayObject.descr_rpartition, + doc=BytearrayDocstrings.rpartition.__doc__), + replace = interp2app(W_BytearrayObject.descr_replace, + doc=BytearrayDocstrings.replace.__doc__), + split = interp2app(W_BytearrayObject.descr_split, + doc=BytearrayDocstrings.split.__doc__), + rsplit = interp2app(W_BytearrayObject.descr_rsplit, + doc=BytearrayDocstrings.rsplit.__doc__), + splitlines = interp2app(W_BytearrayObject.descr_splitlines, + doc=BytearrayDocstrings.splitlines.__doc__), + startswith = interp2app(W_BytearrayObject.descr_startswith, + doc=BytearrayDocstrings.startswith.__doc__), + endswith = interp2app(W_BytearrayObject.descr_endswith, + doc=BytearrayDocstrings.endswith.__doc__), + strip = interp2app(W_BytearrayObject.descr_strip, + doc=BytearrayDocstrings.strip.__doc__), + lstrip = interp2app(W_BytearrayObject.descr_lstrip, + doc=BytearrayDocstrings.lstrip.__doc__), + rstrip = interp2app(W_BytearrayObject.descr_rstrip, + doc=BytearrayDocstrings.rstrip.__doc__), + swapcase = interp2app(W_BytearrayObject.descr_swapcase, + doc=BytearrayDocstrings.swapcase.__doc__), + title = interp2app(W_BytearrayObject.descr_title, + doc=BytearrayDocstrings.title.__doc__), + translate = interp2app(W_BytearrayObject.descr_translate, + doc=BytearrayDocstrings.translate.__doc__), + upper = interp2app(W_BytearrayObject.descr_upper, + doc=BytearrayDocstrings.upper.__doc__), + zfill = interp2app(W_BytearrayObject.descr_zfill, + doc=BytearrayDocstrings.zfill.__doc__), + + __init__ = interp2app(W_BytearrayObject.descr_init, + doc=BytearrayDocstrings.__init__.__doc__), + __buffer__ = interp2app(W_BytearrayObject.descr_buffer), + + __iadd__ = interp2app(W_BytearrayObject.descr_inplace_add, + doc=BytearrayDocstrings.__iadd__.__doc__), + __imul__ = interp2app(W_BytearrayObject.descr_inplace_mul, + doc=BytearrayDocstrings.__imul__.__doc__), + __setitem__ = interp2app(W_BytearrayObject.descr_setitem, + doc=BytearrayDocstrings.__setitem__.__doc__), + __delitem__ = interp2app(W_BytearrayObject.descr_delitem, + doc=BytearrayDocstrings.__delitem__.__doc__), + + append = interp2app(W_BytearrayObject.descr_append, + doc=BytearrayDocstrings.append.__doc__), + extend = interp2app(W_BytearrayObject.descr_extend, + doc=BytearrayDocstrings.extend.__doc__), + insert = interp2app(W_BytearrayObject.descr_insert, + doc=BytearrayDocstrings.insert.__doc__), + pop = interp2app(W_BytearrayObject.descr_pop, + doc=BytearrayDocstrings.pop.__doc__), + remove = interp2app(W_BytearrayObject.descr_remove, + doc=BytearrayDocstrings.remove.__doc__), + reverse = interp2app(W_BytearrayObject.descr_reverse, + doc=BytearrayDocstrings.reverse.__doc__), +) init_signature = Signature(['source', 'encoding', 'errors'], None, None) init_defaults = [None, None, None] -def init__Bytearray(space, w_bytearray, __args__): - # this is on the silly side - w_source, w_encoding, w_errors = __args__.parse_obj( - None, 'bytearray', init_signature, init_defaults) - if w_source is None: - w_source = space.wrap('') - if w_encoding is None: - w_encoding = space.w_None - if w_errors is None: - w_errors = space.w_None - - # Unicode argument - if not space.is_w(w_encoding, space.w_None): - from pypy.objspace.std.unicodetype import ( - _get_encoding_and_errors, encode_object - ) - encoding, errors = _get_encoding_and_errors(space, w_encoding, w_errors) - - # if w_source is an integer this correctly raises a TypeError - # the CPython error message is: "encoding or errors without a string argument" - # ours is: "expected unicode, got int object" - w_source = encode_object(space, w_source, encoding, errors) - - # Is it an int? - try: - count = space.int_w(w_source) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - else: - if count < 0: - raise OperationError(space.w_ValueError, - space.wrap("bytearray negative count")) - w_bytearray.data = ['\0'] * count - return - - data = makebytearraydata_w(space, w_source) - w_bytearray.data = data - -def len__Bytearray(space, w_bytearray): - result = len(w_bytearray.data) - return space.newint(result) - -def ord__Bytearray(space, w_bytearray): - if len(w_bytearray.data) != 1: - raise OperationError(space.w_TypeError, - space.wrap("expected a character, but string" - "of length %s found" % len(w_bytearray.data))) - return space.wrap(ord(w_bytearray.data[0])) - -def getitem__Bytearray_ANY(space, w_bytearray, w_index): - # getindex_w should get a second argument space.w_IndexError, - # but that doesn't exist the first time this is called. - try: - w_IndexError = space.w_IndexError - except AttributeError: - w_IndexError = None - index = space.getindex_w(w_index, w_IndexError, "bytearray index") - try: - return space.newint(ord(w_bytearray.data[index])) - except IndexError: - raise OperationError(space.w_IndexError, - space.wrap("bytearray index out of range")) - -def getitem__Bytearray_Slice(space, w_bytearray, w_slice): - data = w_bytearray.data - length = len(data) - start, stop, step, slicelength = w_slice.indices4(space, length) - assert slicelength >= 0 - if step == 1 and 0 <= start <= stop: - newdata = data[start:stop] - else: - newdata = _getitem_slice_multistep(data, start, step, slicelength) - return W_BytearrayObject(newdata) - -def _getitem_slice_multistep(data, start, step, slicelength): - return [data[start + i*step] for i in range(slicelength)] - -def contains__Bytearray_Int(space, w_bytearray, w_char): - char = space.int_w(w_char) - if not 0 <= char < 256: - raise OperationError(space.w_ValueError, - space.wrap("byte must be in range(0, 256)")) - for c in w_bytearray.data: - if ord(c) == char: - return space.w_True - return space.w_False - -def contains__Bytearray_String(space, w_bytearray, w_str): - # XXX slow - copies, needs rewriting - w_str2 = str__Bytearray(space, w_bytearray) - return stringobject.contains__String_String(space, w_str2, w_str) - -def contains__Bytearray_ANY(space, w_bytearray, w_sub): - # XXX slow - copies, needs rewriting - w_str = space.wrap(space.bufferstr_new_w(w_sub)) - w_str2 = str__Bytearray(space, w_bytearray) - return stringobject.contains__String_String(space, w_str2, w_str) - -def add__Bytearray_Bytearray(space, w_bytearray1, w_bytearray2): - data1 = w_bytearray1.data - data2 = w_bytearray2.data - return W_BytearrayObject(data1 + data2) - -def add__Bytearray_ANY(space, w_bytearray1, w_other): - data1 = w_bytearray1.data - data2 = [c for c in space.bufferstr_new_w(w_other)] - return W_BytearrayObject(data1 + data2) - -def add__String_Bytearray(space, w_str, w_bytearray): - data2 = w_bytearray.data - data1 = [c for c in space.str_w(w_str)] - return W_BytearrayObject(data1 + data2) - -def mul_bytearray_times(space, w_bytearray, w_times): - try: - times = space.getindex_w(w_times, space.w_OverflowError) - except OperationError, e: - if e.match(space, space.w_TypeError): - raise FailedToImplement - raise - data = w_bytearray.data - return W_BytearrayObject(data * times) - -def mul__Bytearray_ANY(space, w_bytearray, w_times): - return mul_bytearray_times(space, w_bytearray, w_times) - -def mul__ANY_Bytearray(space, w_times, w_bytearray): - return mul_bytearray_times(space, w_bytearray, w_times) - -def inplace_mul__Bytearray_ANY(space, w_bytearray, w_times): - try: - times = space.getindex_w(w_times, space.w_OverflowError) - except OperationError, e: - if e.match(space, space.w_TypeError): - raise FailedToImplement - raise - w_bytearray.data *= times - return w_bytearray - -def eq__Bytearray_Bytearray(space, w_bytearray1, w_bytearray2): - data1 = w_bytearray1.data - data2 = w_bytearray2.data - if len(data1) != len(data2): - return space.w_False - for i in range(len(data1)): - if data1[i] != data2[i]: - return space.w_False - return space.w_True - -def String2Bytearray(space, w_str): - data = [c for c in space.str_w(w_str)] - return W_BytearrayObject(data) - -def eq__Bytearray_String(space, w_bytearray, w_other): - return space.eq(str__Bytearray(space, w_bytearray), w_other) - -def eq__Bytearray_Unicode(space, w_bytearray, w_other): - return space.w_False - -def eq__Unicode_Bytearray(space, w_other, w_bytearray): - return space.w_False - -def ne__Bytearray_String(space, w_bytearray, w_other): - return space.ne(str__Bytearray(space, w_bytearray), w_other) - -def ne__Bytearray_Unicode(space, w_bytearray, w_other): - return space.w_True - -def ne__Unicode_Bytearray(space, w_other, w_bytearray): - return space.w_True - -def _min(a, b): - if a < b: - return a - return b - -def lt__Bytearray_Bytearray(space, w_bytearray1, w_bytearray2): - data1 = w_bytearray1.data - data2 = w_bytearray2.data - ncmp = _min(len(data1), len(data2)) - # Search for the first index where items are different - for p in range(ncmp): - if data1[p] != data2[p]: - return space.newbool(data1[p] < data2[p]) - # No more items to compare -- compare sizes - return space.newbool(len(data1) < len(data2)) - -def gt__Bytearray_Bytearray(space, w_bytearray1, w_bytearray2): - data1 = w_bytearray1.data - data2 = w_bytearray2.data - ncmp = _min(len(data1), len(data2)) - # Search for the first index where items are different - for p in range(ncmp): - if data1[p] != data2[p]: - return space.newbool(data1[p] > data2[p]) - # No more items to compare -- compare sizes - return space.newbool(len(data1) > len(data2)) - -def str_translate__Bytearray_ANY_ANY(space, w_bytearray1, w_table, w_deletechars): - # XXX slow, copies *twice* needs proper implementation - w_str_copy = str__Bytearray(space, w_bytearray1) - w_res = stringobject.str_translate__String_ANY_ANY(space, w_str_copy, - w_table, w_deletechars) - return String2Bytearray(space, w_res) - -# Mostly copied from repr__String, but without the "smart quote" -# functionality. -def repr__Bytearray(space, w_bytearray): - s = w_bytearray.data - - # Good default if there are no replacements. - buf = StringBuilder(len("bytearray(b'')") + len(s)) - - buf.append("bytearray(b'") - - for i in range(len(s)): - c = s[i] - - if c == '\\' or c == "'": - buf.append('\\') - buf.append(c) - elif c == '\t': - buf.append('\\t') - elif c == '\r': - buf.append('\\r') - elif c == '\n': - buf.append('\\n') - elif not '\x20' <= c < '\x7f': - n = ord(c) - buf.append('\\x') - buf.append("0123456789abcdef"[n>>4]) - buf.append("0123456789abcdef"[n&0xF]) - else: - buf.append(c) - - buf.append("')") - - return space.wrap(buf.build()) - -def str__Bytearray(space, w_bytearray): - return space.wrap(''.join(w_bytearray.data)) - -def str_count__Bytearray_ANY_ANY_ANY(space, w_bytearray, w_char, w_start, w_stop): - w_char = space.wrap(space.bufferstr_new_w(w_char)) - w_str = str__Bytearray(space, w_bytearray) - return stringobject.str_count__String_String_ANY_ANY(space, w_str, w_char, - w_start, w_stop) - -def str_index__Bytearray_ANY_ANY_ANY(space, w_bytearray, w_char, w_start, w_stop): - w_char = space.wrap(space.bufferstr_new_w(w_char)) - w_str = str__Bytearray(space, w_bytearray) - return stringobject.str_index__String_String_ANY_ANY(space, w_str, w_char, - w_start, w_stop) - -def str_rindex__Bytearray_ANY_ANY_ANY(space, w_bytearray, w_char, w_start, w_stop): - w_char = space.wrap(space.bufferstr_new_w(w_char)) - w_str = str__Bytearray(space, w_bytearray) - return stringobject.str_rindex__String_String_ANY_ANY(space, w_str, w_char, - w_start, w_stop) - -def str_find__Bytearray_ANY_ANY_ANY(space, w_bytearray, w_char, w_start, w_stop): - w_char = space.wrap(space.bufferstr_new_w(w_char)) - w_str = str__Bytearray(space, w_bytearray) - return stringobject.str_find__String_String_ANY_ANY(space, w_str, w_char, - w_start, w_stop) - -def str_rfind__Bytearray_ANY_ANY_ANY(space, w_bytearray, w_char, w_start, w_stop): - w_char = space.wrap(space.bufferstr_new_w(w_char)) - w_str = str__Bytearray(space, w_bytearray) - return stringobject.str_rfind__String_String_ANY_ANY(space, w_str, w_char, - w_start, w_stop) - -def str_startswith__Bytearray_ANY_ANY_ANY(space, w_bytearray, w_prefix, w_start, w_stop): - if space.isinstance_w(w_prefix, space.w_tuple): - w_str = str__Bytearray(space, w_bytearray) - w_prefix = space.newtuple([space.wrap(space.bufferstr_new_w(w_entry)) for w_entry in - space.fixedview(w_prefix)]) - return stringobject.str_startswith__String_ANY_ANY_ANY(space, w_str, w_prefix, - w_start, w_stop) - - w_prefix = space.wrap(space.bufferstr_new_w(w_prefix)) - w_str = str__Bytearray(space, w_bytearray) - return stringobject.str_startswith__String_String_ANY_ANY(space, w_str, w_prefix, - w_start, w_stop) - -def str_endswith__Bytearray_ANY_ANY_ANY(space, w_bytearray, w_suffix, w_start, w_stop): - if space.isinstance_w(w_suffix, space.w_tuple): - w_str = str__Bytearray(space, w_bytearray) - w_suffix = space.newtuple([space.wrap(space.bufferstr_new_w(w_entry)) for w_entry in - space.fixedview(w_suffix)]) - return stringobject.str_endswith__String_ANY_ANY_ANY(space, w_str, w_suffix, - w_start, w_stop) - w_suffix = space.wrap(space.bufferstr_new_w(w_suffix)) - w_str = str__Bytearray(space, w_bytearray) - return stringobject.str_endswith__String_String_ANY_ANY(space, w_str, w_suffix, - w_start, w_stop) - +# XXX consider moving to W_BytearrayObject or remove def str_join__Bytearray_ANY(space, w_self, w_list): list_w = space.listview(w_list) if not list_w: @@ -350,251 +1022,8 @@ newdata.extend([c for c in space.bufferstr_new_w(w_s)]) return W_BytearrayObject(newdata) -def str_decode__Bytearray_ANY_ANY(space, w_bytearray, w_encoding, w_errors): - w_str = str__Bytearray(space, w_bytearray) - return stringobject.str_decode__String_ANY_ANY(space, w_str, w_encoding, w_errors) - -def str_islower__Bytearray(space, w_bytearray): - w_str = str__Bytearray(space, w_bytearray) - return stringobject.str_islower__String(space, w_str) - -def str_isupper__Bytearray(space, w_bytearray): - w_str = str__Bytearray(space, w_bytearray) - return stringobject.str_isupper__String(space, w_str) - -def str_isalpha__Bytearray(space, w_bytearray): From noreply at buildbot.pypy.org Sun Jan 19 21:03:21 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sun, 19 Jan 2014 21:03:21 +0100 (CET) Subject: [pypy-commit] pypy refactor-translator: Rewrite main() in RPython. Add enough hacks to make translation of simple programs work. Remove some code in rpython/translator/c/extfunc.py that is unnecessary now. Message-ID: <20140119200321.D5BA01C1559@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-translator Changeset: r68769:e22c2e78adfb Date: 2014-01-19 21:03 +0100 http://bitbucket.org/pypy/pypy/changeset/e22c2e78adfb/ Log: Rewrite main() in RPython. Add enough hacks to make translation of simple programs work. Remove some code in rpython/translator/c/extfunc.py that is unnecessary now. diff --git a/rpython/translator/c/extfunc.py b/rpython/translator/c/extfunc.py --- a/rpython/translator/c/extfunc.py +++ b/rpython/translator/c/extfunc.py @@ -1,4 +1,5 @@ import types +from rpython.annotator import model as annmodel from rpython.flowspace.model import FunctionGraph from rpython.rtyper.lltypesystem import lltype from rpython.translator.c.support import cdecl @@ -14,61 +15,9 @@ #______________________________________________________ -def find_list_of_str(rtyper): - for r in rtyper.reprs.itervalues(): - if isinstance(r, rlist.ListRepr) and r.item_repr is rstr.string_repr: - return r.lowleveltype.TO - return None - def predeclare_common_types(db, rtyper): # Common types yield ('RPyString', STR) - LIST_OF_STR = find_list_of_str(rtyper) - if LIST_OF_STR is not None: - yield ('RPyListOfString', LIST_OF_STR) - -def predeclare_utility_functions(db, rtyper): - # Common utility functions - def RPyString_New(length=lltype.Signed): - return mallocstr(length) - - # !!! - # be extremely careful passing a gc tracked object - # from such an helper result to another one - # as argument, this could result in leaks - # Such result should be only from C code - # returned directly as results - - LIST_OF_STR = find_list_of_str(rtyper) - if LIST_OF_STR is not None: - p = lltype.Ptr(LIST_OF_STR) - - def _RPyListOfString_New(length=lltype.Signed): - return LIST_OF_STR.ll_newlist(length) - - def _RPyListOfString_SetItem(l=p, - index=lltype.Signed, - newstring=lltype.Ptr(STR)): - rlist.ll_setitem_nonneg(rlist.dum_nocheck, l, index, newstring) - - def _RPyListOfString_GetItem(l=p, - index=lltype.Signed): - return rlist.ll_getitem_fast(l, index) - - def _RPyListOfString_Length(l=p): - return rlist.ll_length(l) - - for fname, f in locals().items(): - if isinstance(f, types.FunctionType): - # XXX this is painful :( - if (LIST_OF_STR, fname) in db.helper2ptr: - yield (fname, db.helper2ptr[LIST_OF_STR, fname]) - else: - # hack: the defaults give the type of the arguments - graph = rtyper.annotate_helper(f, f.func_defaults) - db.helper2ptr[LIST_OF_STR, fname] = graph - yield (fname, graph) - def predeclare_extfuncs(db, rtyper): modules = {} @@ -106,17 +55,18 @@ for exccls in exceptiondata.standardexceptions: exc_llvalue = exceptiondata.get_standard_ll_exc_instance_by_class( exccls) + rtyper.getrepr(annmodel.lltype_to_annotation(lltype.typeOf(exc_llvalue))) # strange naming here because the macro name must be # a substring of PyExc_%s name = exccls.__name__ if exccls.__module__ != 'exceptions': name = '%s_%s' % (exccls.__module__.replace('.', '__'), name) yield ('RPyExc_%s' % name, exc_llvalue) + rtyper.call_all_setups() def predeclare_all(db, rtyper): for fn in [predeclare_common_types, - predeclare_utility_functions, predeclare_exception_data, predeclare_extfuncs, ]: @@ -126,7 +76,6 @@ def get_all(db, rtyper): for fn in [predeclare_common_types, - predeclare_utility_functions, predeclare_exception_data, predeclare_extfuncs, ]: diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -226,7 +226,6 @@ if db is None: db = self.build_database() - pf = self.getentrypointptr() if self.modulename is None: self.modulename = uniquemodulename('testing') modulename = self.modulename @@ -248,16 +247,14 @@ if not self.standalone: assert not self.config.translation.instrument else: - defines['PYPY_STANDALONE'] = db.get(pf) if self.config.translation.instrument: defines['PYPY_INSTRUMENT'] = 1 if CBuilder.have___thread: if not self.config.translation.no__thread: defines['USE___THREAD'] = 1 if self.config.translation.shared: - defines['PYPY_MAIN_FUNCTION'] = "pypy_main_startup" self.eci = self.eci.merge(ExternalCompilationInfo( - export_symbols=["pypy_main_startup", "pypy_debug_file"])) + export_symbols=["rpython_main", "pypy_debug_file"])) self.eci, cfile, extra = gen_source(db, modulename, targetdir, self.eci, defines=defines, split=self.split) @@ -303,10 +300,11 @@ and profbased[0] is ProfOpt) def getentrypointptr(self): - # XXX check that the entrypoint has the correct - # signature: list-of-strings -> int - bk = self.translator.annotator.bookkeeper - return getfunctionptr(bk.getdesc(self.entrypoint).getuniquegraph()) + from rpython.translator.gensupp import make_main + # XXX: only framework for now + setup = self.db.gctransformer.frameworkgc_setup_ptr.value + graph = make_main(self.translator, setup, self.entrypoint) + return getfunctionptr(graph) def cmdexec(self, args='', env=None, err=False, expect_crash=False): assert self._compiled @@ -742,8 +740,8 @@ def add_extra_files(eci): srcdir = py.path.local(__file__).join('..', 'src') files = [ - srcdir / 'entrypoint.c', # ifdef PYPY_STANDALONE - srcdir / 'allocator.c', # ifdef PYPY_STANDALONE + srcdir / 'main.c', + srcdir / 'allocator.c', srcdir / 'mem.c', srcdir / 'exception.c', srcdir / 'rtyper.c', diff --git a/rpython/translator/c/src/allocator.c b/rpython/translator/c/src/allocator.c --- a/rpython/translator/c/src/allocator.c +++ b/rpython/translator/c/src/allocator.c @@ -1,6 +1,5 @@ /* allocation functions */ #include "common_header.h" -#ifdef PYPY_STANDALONE #include #if defined(PYPY_USE_TRIVIAL_MALLOC) @@ -24,5 +23,3 @@ # include "src/obmalloc.c" #endif - -#endif /* PYPY_STANDALONE */ diff --git a/rpython/translator/c/src/entrypoint.c b/rpython/translator/c/src/entrypoint.c deleted file mode 100644 --- a/rpython/translator/c/src/entrypoint.c +++ /dev/null @@ -1,89 +0,0 @@ -#include "common_header.h" -#ifdef PYPY_STANDALONE -#include "structdef.h" -#include "forwarddecl.h" -#include "preimpl.h" -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#ifdef __GNUC__ -/* Hack to prevent this function from being inlined. Helps asmgcc - because the main() function has often a different prologue/epilogue. */ -int pypy_main_function(int argc, char *argv[]) __attribute__((__noinline__)); -#endif - -# ifdef PYPY_USE_ASMGCC -# include "structdef.h" -# include "forwarddecl.h" -# endif - -int pypy_main_function(int argc, char *argv[]) -{ - char *errmsg; - int i, exitcode; - RPyListOfString *list; - -#ifdef PYPY_USE_ASMGCC - pypy_g_rpython_rtyper_lltypesystem_rffi_StackCounter.sc_inst_stacks_counter++; -#endif - pypy_asm_stack_bottom(); -#ifdef PYPY_X86_CHECK_SSE2_DEFINED - pypy_x86_check_sse2(); -#endif - instrument_setup(); - -#ifndef MS_WINDOWS - /* this message does no longer apply to win64 :-) */ - if (sizeof(void*) != SIZEOF_LONG) { - errmsg = "only support platforms where sizeof(void*) == sizeof(long)," - " for now"; - goto error; - } -#endif - - errmsg = RPython_StartupCode(); - if (errmsg) goto error; - - list = _RPyListOfString_New(argc); - if (RPyExceptionOccurred()) goto memory_out; - for (i=0; irs_chars.items, buf, length); - return rps; -} diff --git a/rpython/translator/c/src/rtyper.h b/rpython/translator/c/src/rtyper.h --- a/rpython/translator/c/src/rtyper.h +++ b/rpython/translator/c/src/rtyper.h @@ -11,4 +11,3 @@ char *RPyString_AsCharP(RPyString *rps); void RPyString_FreeCache(void); -RPyString *RPyString_FromString(char *buf); diff --git a/rpython/translator/c/src/support.c b/rpython/translator/c/src/support.c --- a/rpython/translator/c/src/support.c +++ b/rpython/translator/c/src/support.c @@ -1,5 +1,8 @@ #include "common_header.h" #include +#include +#include +#include /************************************************************/ /*** C header subsection: support functions ***/ @@ -23,3 +26,26 @@ abort(); } + +void rpython_special_startup() +{ +#ifdef PYPY_X86_CHECK_SSE2_DEFINED + pypy_x86_check_sse2(); +#endif + instrument_setup(); + +#ifndef MS_WINDOWS + /* this message does no longer apply to win64 :-) */ + if (sizeof(void*) != SIZEOF_LONG) { + fprintf(stderr, "Only support platforms where sizeof(void*) == " + "sizeof(long), for now\n"); + abort(); + } +#endif +} + +void rpython_special_shutdown() +{ + pypy_debug_alloc_results(); + pypy_malloc_counters_results(); +} diff --git a/rpython/translator/c/src/support.h b/rpython/translator/c/src/support.h --- a/rpython/translator/c/src/support.h +++ b/rpython/translator/c/src/support.h @@ -63,3 +63,6 @@ # define RPyNLenItem(array, index) ((array)->items[index]) # define RPyBareItem(array, index) ((array)[index]) #endif + +void rpython_special_startup(); +void rpython_special_shutdown(); diff --git a/rpython/translator/gensupp.py b/rpython/translator/gensupp.py --- a/rpython/translator/gensupp.py +++ b/rpython/translator/gensupp.py @@ -2,6 +2,13 @@ Some support for genxxx implementations of source generators. Another name could be genEric, but well... """ +from os import write + +from rpython.annotator import model as annmodel +from rpython.rtyper.annlowlevel import MixLevelHelperAnnotator +from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rtyper.lltypesystem.lloperation import llop + def uniquemodulename(name, SEEN=set()): # never reuse the same module name within a Python session! @@ -116,3 +123,35 @@ self.mapping[name] = ret return ret + +rpython_special_startup = rffi.llexternal( + 'rpython_special_startup', [], lltype.Void, _nowrapper=True) +rpython_special_shutdown = rffi.llexternal( + 'rpython_special_shutdown', [], lltype.Void, _nowrapper=True) + +def make_main(translator, setup, entrypoint): + def rpython_main(argc, argv): + rffi.stackcounter.stacks_counter += 1 + llop.gc_stack_bottom(lltype.Void) + rpython_special_startup() + try: + if setup is not None: + setup() + args = [rffi.charp2str(argv[i]) for i in range(argc)] + exitcode = entrypoint(args) + except Exception as e: + write(2, 'DEBUG: An uncaught exception was raised in entrypoint: ' + + str(e) + '\n') + return 1 + rpython_special_shutdown(); + return exitcode + rpython_main.c_name = 'rpython_main' + + mixlevelannotator = MixLevelHelperAnnotator(translator.rtyper) + arg1 = annmodel.lltype_to_annotation(rffi.INT) + arg2 = annmodel.lltype_to_annotation(rffi.CCHARPP) + res = annmodel.lltype_to_annotation(lltype.Signed) + graph = mixlevelannotator.getgraph(rpython_main, [arg1, arg2], res) + mixlevelannotator.finish() + mixlevelannotator.backend_optimize() + return graph diff --git a/rpython/translator/platform/posix.py b/rpython/translator/platform/posix.py --- a/rpython/translator/platform/posix.py +++ b/rpython/translator/platform/posix.py @@ -172,12 +172,6 @@ if shared: m.definition('SHARED_IMPORT_LIB', libname), - m.definition('PYPY_MAIN_FUNCTION', "pypy_main_startup") - m.rule('main.c', '', - 'echo "' - 'int $(PYPY_MAIN_FUNCTION)(int, char*[]); ' - 'int main(int argc, char* argv[]) ' - '{ return $(PYPY_MAIN_FUNCTION)(argc, argv); }" > $@') m.rule('$(DEFAULT_TARGET)', ['$(TARGET)', 'main.o'], '$(CC_LINK) $(LDFLAGS_LINK) main.o -L. -l$(SHARED_IMPORT_LIB) -o $@') From noreply at buildbot.pypy.org Sun Jan 19 21:24:38 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sun, 19 Jan 2014 21:24:38 +0100 (CET) Subject: [pypy-commit] pypy refactor-translator: hg merge default Message-ID: <20140119202438.56BDB1C153F@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-translator Changeset: r68770:128d10637045 Date: 2014-01-19 21:18 +0100 http://bitbucket.org/pypy/pypy/changeset/128d10637045/ Log: hg merge default diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py --- a/lib_pypy/_ctypes/structure.py +++ b/lib_pypy/_ctypes/structure.py @@ -2,6 +2,8 @@ import _rawffi from _ctypes.basics import _CData, _CDataMeta, keepalive_key,\ store_reference, ensure_objects, CArgObject +from _ctypes.array import Array +from _ctypes.pointer import _Pointer import inspect def names_and_fields(self, _fields_, superclass, anonymous_fields=None): @@ -104,8 +106,11 @@ def __set__(self, obj, value): fieldtype = self.ctype cobj = fieldtype.from_param(value) - if ensure_objects(cobj) is not None: - key = keepalive_key(self.num) + key = keepalive_key(self.num) + if issubclass(fieldtype, _Pointer) and isinstance(cobj, Array): + # if our value is an Array we need the whole thing alive + store_reference(obj, key, cobj) + elif ensure_objects(cobj) is not None: store_reference(obj, key, cobj._objects) arg = cobj._get_buffer_value() if fieldtype._fficompositesize is not None: diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_keepalive.py b/pypy/module/test_lib_pypy/ctypes_tests/test_keepalive.py --- a/pypy/module/test_lib_pypy/ctypes_tests/test_keepalive.py +++ b/pypy/module/test_lib_pypy/ctypes_tests/test_keepalive.py @@ -31,7 +31,17 @@ assert p._objects == {} assert len(x._objects) == 1 assert x._objects['0'] is p._objects - + + def test_simple_structure_and_pointer_with_array(self): + class X(Structure): + _fields_ = [('array', POINTER(c_int))] + + x = X() + a = (c_int * 3)(1, 2, 3) + assert x._objects is None + x.array = a + assert x._objects['0'] is a + def test_structure_with_pointers(self): class X(Structure): _fields_ = [('x', POINTER(c_int)), diff --git a/rpython/translator/c/extfunc.py b/rpython/translator/c/extfunc.py --- a/rpython/translator/c/extfunc.py +++ b/rpython/translator/c/extfunc.py @@ -1,19 +1,9 @@ -import types from rpython.annotator import model as annmodel from rpython.flowspace.model import FunctionGraph from rpython.rtyper.lltypesystem import lltype +from rpython.rtyper.lltypesystem.rstr import STR from rpython.translator.c.support import cdecl -from rpython.rtyper.lltypesystem.rstr import STR, mallocstr -from rpython.rtyper.lltypesystem import rstr -from rpython.rtyper.lltypesystem import rlist -# table of functions hand-written in src/ll_*.h -# Note about *.im_func: The annotator and the rtyper expect direct -# references to functions, so we cannot insert classmethods here. - -EXTERNALS = {'LL_flush_icache': 'LL_flush_icache'} - -#______________________________________________________ def predeclare_common_types(db, rtyper): # Common types @@ -29,14 +19,13 @@ return frags[0] for func, funcobj in db.externalfuncs.items(): - c_name = EXTERNALS[func] # construct a define LL_NEED_ to make it possible to isolate in-development externals and headers - modname = module_name(c_name) + modname = module_name(func) if modname not in modules: modules[modname] = True yield 'LL_NEED_%s' % modname.upper(), 1 funcptr = funcobj._as_ptr() - yield c_name, funcptr + yield func, funcptr def predeclare_exception_data(db, rtyper): # Exception-related types and constants diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -916,15 +916,6 @@ return sandbox_stub(fnobj, db) db.externalfuncs[fnobj._external_name] = fnobj return [] - elif fnobj._callable in extfunc.EXTERNALS: - # -- deprecated case -- - # 'fnobj' is one of the ll_xyz() functions with the suggested_primitive - # flag in rpython.rtyper.module.*. The corresponding C wrappers are - # written by hand in src/ll_*.h, and declared in extfunc.EXTERNALS. - if sandbox and not fnobj._name.startswith('ll_stack_'): # XXX!!! Temporary - return sandbox_stub(fnobj, db) - db.externalfuncs[fnobj._callable] = fnobj - return [] elif hasattr(fnobj, 'graph'): if sandbox and sandbox != "if_external": # apply the sandbox transformation diff --git a/rpython/translator/c/src/asm_ppc.c b/rpython/translator/c/src/asm_ppc.c deleted file mode 100644 --- a/rpython/translator/c/src/asm_ppc.c +++ /dev/null @@ -1,24 +0,0 @@ -#include "src/asm_ppc.h" - -#define __dcbst(base, index) \ - __asm__ ("dcbst %0, %1" : /*no result*/ : "b%" (index), "r" (base) : "memory") -#define __icbi(base, index) \ - __asm__ ("icbi %0, %1" : /*no result*/ : "b%" (index), "r" (base) : "memory") -#define __sync() __asm__ volatile ("sync") -#define __isync() \ - __asm__ volatile ("isync") - -void -LL_flush_icache(long base, long size) -{ - long i; - - for (i = 0; i < size; i += 32){ - __dcbst(base, i); - } - __sync(); - for (i = 0; i < size; i += 32){ - __icbi(base, i); - } - __isync(); -} diff --git a/rpython/translator/c/src/asm_ppc.h b/rpython/translator/c/src/asm_ppc.h deleted file mode 100644 --- a/rpython/translator/c/src/asm_ppc.h +++ /dev/null @@ -1,1 +0,0 @@ -void LL_flush_icache(long base, long size); diff --git a/rpython/translator/driver.py b/rpython/translator/driver.py --- a/rpython/translator/driver.py +++ b/rpython/translator/driver.py @@ -89,7 +89,7 @@ targetdir = cbuilder.targetdir fname = dump_static_data_info(self.driver.log, database, targetdir) dstname = self.driver.compute_exe_name() + '.staticdata.info' - shutil.copy(str(fname), str(dstname)) + shutil_copy(str(fname), str(dstname)) self.driver.log.info('Static data info written to %s' % dstname) @taskdef("Compiling c source") @@ -119,11 +119,11 @@ newexename = self.driver.compute_exe_name() if sys.platform == 'win32': newexename = newexename.new(ext='exe') - shutil.copy(str(exename), str(newexename)) + shutil_copy(str(exename), str(newexename)) if self.cbuilder.shared_library_name is not None: soname = self.cbuilder.shared_library_name newsoname = newexename.new(basename=soname.basename) - shutil.copy(str(soname), str(newsoname)) + shutil_copy(str(soname), str(newsoname)) self.driver.log.info("copied: %s" % (newsoname,)) if sys.platform == 'win32': # the import library is named python27.lib, according From noreply at buildbot.pypy.org Sun Jan 19 21:24:39 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sun, 19 Jan 2014 21:24:39 +0100 (CET) Subject: [pypy-commit] pypy refactor-translator: Kill predeclare_extfuncs(). Message-ID: <20140119202439.927211C153F@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-translator Changeset: r68771:5893d92ec346 Date: 2014-01-19 21:23 +0100 http://bitbucket.org/pypy/pypy/changeset/5893d92ec346/ Log: Kill predeclare_extfuncs(). diff --git a/rpython/translator/c/extfunc.py b/rpython/translator/c/extfunc.py --- a/rpython/translator/c/extfunc.py +++ b/rpython/translator/c/extfunc.py @@ -9,24 +9,6 @@ # Common types yield ('RPyString', STR) -def predeclare_extfuncs(db, rtyper): - modules = {} - def module_name(c_name): - frags = c_name[3:].split('_') - if frags[0] == '': - return '_' + frags[1] - else: - return frags[0] - - for func, funcobj in db.externalfuncs.items(): - # construct a define LL_NEED_ to make it possible to isolate in-development externals and headers - modname = module_name(func) - if modname not in modules: - modules[modname] = True - yield 'LL_NEED_%s' % modname.upper(), 1 - funcptr = funcobj._as_ptr() - yield func, funcptr - def predeclare_exception_data(db, rtyper): # Exception-related types and constants exceptiondata = rtyper.exceptiondata @@ -57,7 +39,6 @@ def predeclare_all(db, rtyper): for fn in [predeclare_common_types, predeclare_exception_data, - predeclare_extfuncs, ]: for t in fn(db, rtyper): yield t @@ -66,7 +47,6 @@ def get_all(db, rtyper): for fn in [predeclare_common_types, predeclare_exception_data, - predeclare_extfuncs, ]: for t in fn(db, rtyper): yield t[1] From noreply at buildbot.pypy.org Sun Jan 19 21:47:33 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sun, 19 Jan 2014 21:47:33 +0100 (CET) Subject: [pypy-commit] pypy refactor-translator: Kill db.externalfuncs. Message-ID: <20140119204733.957761C153F@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-translator Changeset: r68772:5162c8617edc Date: 2014-01-19 21:30 +0100 http://bitbucket.org/pypy/pypy/changeset/5162c8617edc/ Log: Kill db.externalfuncs. diff --git a/rpython/translator/c/database.py b/rpython/translator/c/database.py --- a/rpython/translator/c/database.py +++ b/rpython/translator/c/database.py @@ -45,7 +45,6 @@ self.delayedfunctionptrs = [] self.completedcontainers = 0 self.containerstats = {} - self.externalfuncs = {} self.helper2ptr = {} # late_initializations is for when the value you want to diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -914,7 +914,6 @@ if hasattr(fnobj, '_external_name'): if sandbox: return sandbox_stub(fnobj, db) - db.externalfuncs[fnobj._external_name] = fnobj return [] elif hasattr(fnobj, 'graph'): if sandbox and sandbox != "if_external": From noreply at buildbot.pypy.org Sun Jan 19 21:47:34 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sun, 19 Jan 2014 21:47:34 +0100 (CET) Subject: [pypy-commit] pypy refactor-translator: Kill db.helper2ptr. Message-ID: <20140119204734.D7F1B1C153F@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-translator Changeset: r68773:df1ca389cffa Date: 2014-01-19 21:40 +0100 http://bitbucket.org/pypy/pypy/changeset/df1ca389cffa/ Log: Kill db.helper2ptr. diff --git a/rpython/translator/c/database.py b/rpython/translator/c/database.py --- a/rpython/translator/c/database.py +++ b/rpython/translator/c/database.py @@ -45,7 +45,6 @@ self.delayedfunctionptrs = [] self.completedcontainers = 0 self.containerstats = {} - self.helper2ptr = {} # late_initializations is for when the value you want to # assign to a constant object is something C doesn't think is From noreply at buildbot.pypy.org Mon Jan 20 12:39:27 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 20 Jan 2014 12:39:27 +0100 (CET) Subject: [pypy-commit] pypy default: Oups, sorry. Message-ID: <20140120113927.6C8641C0356@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68774:9b55cdc1b2c5 Date: 2014-01-20 12:38 +0100 http://bitbucket.org/pypy/pypy/changeset/9b55cdc1b2c5/ Log: Oups, sorry. diff --git a/rpython/rlib/test/test_runicode.py b/rpython/rlib/test/test_runicode.py --- a/rpython/rlib/test/test_runicode.py +++ b/rpython/rlib/test/test_runicode.py @@ -99,7 +99,7 @@ called[0] += 1 if called[0] == 1: assert errors == "foo!" - assert enc == encoding + assert enc == encoding.replace('-', '') assert t is s assert start == startingpos assert stop == endingpos From noreply at buildbot.pypy.org Mon Jan 20 12:43:28 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 20 Jan 2014 12:43:28 +0100 (CET) Subject: [pypy-commit] pypy llvm-translation-backend: hg merge default Message-ID: <20140120114328.19BCC1C0356@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r68775:17a333f624d3 Date: 2014-01-20 09:53 +0100 http://bitbucket.org/pypy/pypy/changeset/17a333f624d3/ Log: hg merge default diff too long, truncating to 2000 out of 10318 lines diff --git a/lib-python/2.7/test/test_memoryview.py b/lib-python/2.7/test/test_memoryview.py --- a/lib-python/2.7/test/test_memoryview.py +++ b/lib-python/2.7/test/test_memoryview.py @@ -166,11 +166,18 @@ self.assertTrue(m[0:6] == m[:]) self.assertFalse(m[0:5] == m) - # Comparison with objects which don't support the buffer API - self.assertFalse(m == u"abcdef") - self.assertTrue(m != u"abcdef") - self.assertFalse(u"abcdef" == m) - self.assertTrue(u"abcdef" != m) + if test_support.check_impl_detail(cpython=True): + # what is supported and what is not supported by memoryview is + # very inconsisten on CPython. In PyPy, memoryview supports + # the buffer interface, and thus the following comparison + # succeeds. See also the comment in + # pypy.modules.__builtin__.interp_memoryview.W_MemoryView.descr_buffer + # + # Comparison with objects which don't support the buffer API + self.assertFalse(m == u"abcdef", "%s %s" % (self, tp)) + self.assertTrue(m != u"abcdef") + self.assertFalse(u"abcdef" == m) + self.assertTrue(u"abcdef" != m) # Unordered comparisons are unimplemented, and therefore give # arbitrary results (they raise a TypeError in py3k) diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py --- a/lib_pypy/_ctypes/structure.py +++ b/lib_pypy/_ctypes/structure.py @@ -2,6 +2,8 @@ import _rawffi from _ctypes.basics import _CData, _CDataMeta, keepalive_key,\ store_reference, ensure_objects, CArgObject +from _ctypes.array import Array +from _ctypes.pointer import _Pointer import inspect def names_and_fields(self, _fields_, superclass, anonymous_fields=None): @@ -104,8 +106,11 @@ def __set__(self, obj, value): fieldtype = self.ctype cobj = fieldtype.from_param(value) - if ensure_objects(cobj) is not None: - key = keepalive_key(self.num) + key = keepalive_key(self.num) + if issubclass(fieldtype, _Pointer) and isinstance(cobj, Array): + # if our value is an Array we need the whole thing alive + store_reference(obj, key, cobj) + elif ensure_objects(cobj) is not None: store_reference(obj, key, cobj._objects) arg = cobj._get_buffer_value() if fieldtype._fficompositesize is not None: diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -74,6 +74,10 @@ The actual details would be rather differen in PyPy, but we would like to have the same optimization implemented. +Or maybe not. We can also play around with the idea of using a single +representation: as a byte string in utf-8. (This idea needs some extra logic +for efficient indexing, like a cache.) + .. _`optimized unicode representation`: http://www.python.org/dev/peps/pep-0393/ Translation Toolchain diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -41,3 +41,6 @@ Fix 3 broken links on PyPy published papers in docs. .. branch: jit-ordereddict + +.. branch: refactor-str-types +Remove multimethods on str/unicode/bytearray and make the implementations share code. diff --git a/pypy/goal/getnightly.py b/pypy/goal/getnightly.py --- a/pypy/goal/getnightly.py +++ b/pypy/goal/getnightly.py @@ -26,7 +26,12 @@ if branch == 'default': branch = 'trunk' -filename = 'pypy-c-jit-latest-%s.tar.bz2' % arch +if '--nojit' in sys.argv: + kind = 'nojit' +else: + kind = 'jit' + +filename = 'pypy-c-%s-latest-%s.tar.bz2' % (kind, arch) url = 'http://buildbot.pypy.org/nightly/%s/%s' % (branch, filename) tmp = py.path.local.mkdtemp() mydir = tmp.chdir() diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py --- a/pypy/interpreter/astcompiler/codegen.py +++ b/pypy/interpreter/astcompiler/codegen.py @@ -1234,6 +1234,8 @@ flags |= consts.CO_NESTED if scope.is_generator: flags |= consts.CO_GENERATOR + if scope.has_yield_inside_try: + flags |= consts.CO_YIELD_INSIDE_TRY if scope.has_variable_arg: flags |= consts.CO_VARARGS if scope.has_keywords_arg: diff --git a/pypy/interpreter/astcompiler/consts.py b/pypy/interpreter/astcompiler/consts.py --- a/pypy/interpreter/astcompiler/consts.py +++ b/pypy/interpreter/astcompiler/consts.py @@ -17,6 +17,7 @@ CO_FUTURE_UNICODE_LITERALS = 0x20000 #pypy specific: CO_KILL_DOCSTRING = 0x100000 +CO_YIELD_INSIDE_TRY = 0x200000 PyCF_SOURCE_IS_UTF8 = 0x0100 PyCF_DONT_IMPLY_DEDENT = 0x0200 diff --git a/pypy/interpreter/astcompiler/symtable.py b/pypy/interpreter/astcompiler/symtable.py --- a/pypy/interpreter/astcompiler/symtable.py +++ b/pypy/interpreter/astcompiler/symtable.py @@ -43,6 +43,7 @@ self.child_has_free = False self.nested = False self.doc_removable = False + self._in_try_body_depth = 0 def lookup(self, name): """Find the scope of identifier 'name'.""" @@ -75,6 +76,14 @@ self.varnames.append(mangled) return mangled + def note_try_start(self, try_node): + """Called when a try is found, before visiting the body.""" + self._in_try_body_depth += 1 + + def note_try_end(self, try_node): + """Called after visiting a try body.""" + self._in_try_body_depth -= 1 + def note_yield(self, yield_node): """Called when a yield is found.""" raise SyntaxError("'yield' outside function", yield_node.lineno, @@ -210,6 +219,7 @@ self.has_variable_arg = False self.has_keywords_arg = False self.is_generator = False + self.has_yield_inside_try = False self.optimized = True self.return_with_value = False self.import_star = None @@ -220,6 +230,8 @@ raise SyntaxError("'return' with argument inside generator", self.ret.lineno, self.ret.col_offset) self.is_generator = True + if self._in_try_body_depth > 0: + self.has_yield_inside_try = True def note_return(self, ret): if ret.value: @@ -463,7 +475,12 @@ self.scope.new_temporary_name() if wih.optional_vars: self.scope.new_temporary_name() - ast.GenericASTVisitor.visit_With(self, wih) + wih.context_expr.walkabout(self) + if wih.optional_vars: + wih.optional_vars.walkabout(self) + self.scope.note_try_start(wih) + self.visit_sequence(wih.body) + self.scope.note_try_end(wih) def visit_arguments(self, arguments): scope = self.scope @@ -505,3 +522,16 @@ else: role = SYM_ASSIGNED self.note_symbol(name.id, role) + + def visit_TryExcept(self, node): + self.scope.note_try_start(node) + self.visit_sequence(node.body) + self.scope.note_try_end(node) + self.visit_sequence(node.handlers) + self.visit_sequence(node.orelse) + + def visit_TryFinally(self, node): + self.scope.note_try_start(node) + self.visit_sequence(node.body) + self.scope.note_try_end(node) + self.visit_sequence(node.finalbody) diff --git a/pypy/interpreter/astcompiler/test/test_symtable.py b/pypy/interpreter/astcompiler/test/test_symtable.py --- a/pypy/interpreter/astcompiler/test/test_symtable.py +++ b/pypy/interpreter/astcompiler/test/test_symtable.py @@ -346,6 +346,25 @@ assert exc.msg == "'return' with argument inside generator" scp = self.func_scope("def f():\n return\n yield x") + def test_yield_inside_try(self): + scp = self.func_scope("def f(): yield x") + assert not scp.has_yield_inside_try + scp = self.func_scope("def f():\n try:\n yield x\n except: pass") + assert scp.has_yield_inside_try + scp = self.func_scope("def f():\n try:\n yield x\n finally: pass") + assert scp.has_yield_inside_try + scp = self.func_scope("def f():\n with x: yield y") + assert scp.has_yield_inside_try + + def test_yield_outside_try(self): + for input in ("try: pass\n except: pass", + "try: pass\n except: yield y", + "try: pass\n finally: pass", + "try: pass\n finally: yield y", + "with x: pass"): + input = "def f():\n yield y\n %s\n yield y" % (input,) + assert not self.func_scope(input).has_yield_inside_try + def test_return(self): for input in ("class x: return", "return"): exc = py.test.raises(SyntaxError, self.func_scope, input).value diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -231,6 +231,11 @@ msg = "__int__ returned non-int (type '%T')" raise operationerrfmt(space.w_TypeError, msg, w_result) + def ord(self, space): + typename = space.type(self).getname(space) + msg = "ord() expected string of length 1, but %s found" + raise operationerrfmt(space.w_TypeError, msg, typename) + def __spacebind__(self, space): return self @@ -1396,6 +1401,9 @@ # This is here mostly just for gateway.int_unwrapping_space_method(). return bool(self.int_w(w_obj)) + def ord(self, w_obj): + return w_obj.ord(self) + # This is all interface for gateway.py. def gateway_int_w(self, w_obj): if self.isinstance_w(w_obj, self.w_float): diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -520,12 +520,13 @@ # When a BuiltinCode is stored in a Function object, # you get the functionality of CPython's built-in function type. - def __init__(self, func, unwrap_spec=None, self_type=None, descrmismatch=None): + def __init__(self, func, unwrap_spec=None, self_type=None, + descrmismatch=None, doc=None): "NOT_RPYTHON" # 'implfunc' is the interpreter-level function. # Note that this uses a lot of (construction-time) introspection. Code.__init__(self, func.__name__) - self.docstring = func.__doc__ + self.docstring = doc or func.__doc__ self.identifier = "%s-%s-%s" % (func.__module__, func.__name__, getattr(self_type, '__name__', '*')) @@ -832,7 +833,7 @@ instancecache = {} def __new__(cls, f, app_name=None, unwrap_spec=None, descrmismatch=None, - as_classmethod=False): + as_classmethod=False, doc=None): "NOT_RPYTHON" # f must be a function whose name does NOT start with 'app_' @@ -861,7 +862,8 @@ cls.instancecache[key] = self self._code = BuiltinCode(f, unwrap_spec=unwrap_spec, self_type=self_type, - descrmismatch=descrmismatch) + descrmismatch=descrmismatch, + doc=doc) self.__name__ = f.func_name self.name = app_name self.as_classmethod = as_classmethod diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -155,20 +155,6 @@ code_name = self.pycode.co_name return space.wrap(code_name) - def __del__(self): - # Only bother enqueuing self to raise an exception if the frame is - # still not finished and finally or except blocks are present. - self.clear_all_weakrefs() - if self.frame is not None: - block = self.frame.lastblock - while block is not None: - if not isinstance(block, LoopBlock): - self.enqueue_for_destruction(self.space, - GeneratorIterator.descr_close, - "interrupting generator of ") - break - block = block.previous - # Results can be either an RPython list of W_Root, or it can be an # app-level W_ListObject, which also has an append() method, that's why we # generate 2 versions of the function and 2 jit drivers. @@ -211,3 +197,20 @@ return unpack_into unpack_into = _create_unpack_into() unpack_into_w = _create_unpack_into() + + +class GeneratorIteratorWithDel(GeneratorIterator): + + def __del__(self): + # Only bother enqueuing self to raise an exception if the frame is + # still not finished and finally or except blocks are present. + self.clear_all_weakrefs() + if self.frame is not None: + block = self.frame.lastblock + while block is not None: + if not isinstance(block, LoopBlock): + self.enqueue_for_destruction(self.space, + GeneratorIterator.descr_close, + "interrupting generator of ") + break + block = block.previous diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -12,7 +12,7 @@ from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter.astcompiler.consts import ( CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS, CO_NESTED, - CO_GENERATOR, CO_KILL_DOCSTRING) + CO_GENERATOR, CO_KILL_DOCSTRING, CO_YIELD_INSIDE_TRY) from pypy.tool.stdlib_opcode import opcodedesc, HAVE_ARGUMENT from rpython.rlib.rarithmetic import intmask from rpython.rlib.objectmodel import compute_hash @@ -31,7 +31,7 @@ # Magic numbers for the bytecode version in code objects. # See comments in pypy/module/imp/importing. cpython_magic, = struct.unpack(" bytearray\n" + "\n" + "Create a bytearray object from a string of hexadecimal numbers.\n" + "Spaces between two numbers are accepted.\n" + "Example: bytearray.fromhex('B9 01EF') -> bytearray(b'\\xb9\\x01\\xef')." + hexstring = space.str_w(w_hexstring) + hexstring = hexstring.lower() + data = [] + length = len(hexstring) + i = -2 + while True: + i += 2 + while i < length and hexstring[i] == ' ': + i += 1 + if i >= length: + break + if i+1 == length: + raise OperationError(space.w_ValueError, space.wrap( + "non-hexadecimal number found in fromhex() arg at position %d" % i)) + + top = _hex_digit_to_int(hexstring[i]) + if top == -1: + raise OperationError(space.w_ValueError, space.wrap( + "non-hexadecimal number found in fromhex() arg at position %d" % i)) + bot = _hex_digit_to_int(hexstring[i+1]) + if bot == -1: + raise OperationError(space.w_ValueError, space.wrap( + "non-hexadecimal number found in fromhex() arg at position %d" % (i+1,))) + data.append(chr(top*16 + bot)) + + # in CPython bytearray.fromhex is a staticmethod, so + # we ignore w_type and always return a bytearray + return new_bytearray(space, space.w_bytearray, data) + + def descr_init(self, space, __args__): + # this is on the silly side + w_source, w_encoding, w_errors = __args__.parse_obj( + None, 'bytearray', init_signature, init_defaults) + + if w_source is None: + w_source = space.wrap('') + if w_encoding is None: + w_encoding = space.w_None + if w_errors is None: + w_errors = space.w_None + + # Unicode argument + if not space.is_w(w_encoding, space.w_None): + from pypy.objspace.std.unicodeobject import ( + _get_encoding_and_errors, encode_object + ) + encoding, errors = _get_encoding_and_errors(space, w_encoding, w_errors) + + # if w_source is an integer this correctly raises a TypeError + # the CPython error message is: "encoding or errors without a string argument" + # ours is: "expected unicode, got int object" + w_source = encode_object(space, w_source, encoding, errors) + + # Is it an int? + try: + count = space.int_w(w_source) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + else: + if count < 0: + raise OperationError(space.w_ValueError, + space.wrap("bytearray negative count")) + self.data = ['\0'] * count + return + + data = makebytearraydata_w(space, w_source) + self.data = data + + def descr_repr(self, space): + s = self.data + + # Good default if there are no replacements. + buf = StringBuilder(len("bytearray(b'')") + len(s)) + + buf.append("bytearray(b'") + + for i in range(len(s)): + c = s[i] + + if c == '\\' or c == "'": + buf.append('\\') + buf.append(c) + elif c == '\t': + buf.append('\\t') + elif c == '\r': + buf.append('\\r') + elif c == '\n': + buf.append('\\n') + elif not '\x20' <= c < '\x7f': + n = ord(c) + buf.append('\\x') + buf.append("0123456789abcdef"[n>>4]) + buf.append("0123456789abcdef"[n&0xF]) + else: + buf.append(c) + + buf.append("')") + + return space.wrap(buf.build()) + + def descr_str(self, space): + return space.wrap(''.join(self.data)) + + def descr_eq(self, space, w_other): + try: + return space.newbool(self._val(space) == self._op_val(space, w_other)) + except OperationError, e: + if e.match(space, space.w_TypeError): + return space.w_NotImplemented + raise + + def descr_ne(self, space, w_other): + try: + return space.newbool(self._val(space) != self._op_val(space, w_other)) + except OperationError, e: + if e.match(space, space.w_TypeError): + return space.w_NotImplemented + raise + + def descr_lt(self, space, w_other): + try: + return space.newbool(self._val(space) < self._op_val(space, w_other)) + except OperationError, e: + if e.match(space, space.w_TypeError): + return space.w_NotImplemented + raise + + def descr_le(self, space, w_other): + try: + return space.newbool(self._val(space) <= self._op_val(space, w_other)) + except OperationError, e: + if e.match(space, space.w_TypeError): + return space.w_NotImplemented + raise + + def descr_gt(self, space, w_other): + try: + return space.newbool(self._val(space) > self._op_val(space, w_other)) + except OperationError, e: + if e.match(space, space.w_TypeError): + return space.w_NotImplemented + raise + + def descr_ge(self, space, w_other): + try: + return space.newbool(self._val(space) >= self._op_val(space, w_other)) + except OperationError, e: + if e.match(space, space.w_TypeError): + return space.w_NotImplemented + raise + + def descr_buffer(self, space): + return BytearrayBuffer(self.data) + + def descr_inplace_add(self, space, w_other): + if isinstance(w_other, W_BytearrayObject): + self.data += w_other.data + else: + self.data += self._op_val(space, w_other) + return self + + def descr_inplace_mul(self, space, w_times): + try: + times = space.getindex_w(w_times, space.w_OverflowError) + except OperationError, e: + if e.match(space, space.w_TypeError): + return space.w_NotImplemented + raise + self.data *= times + return self + + def descr_setitem(self, space, w_index, w_other): + if isinstance(w_index, W_SliceObject): + oldsize = len(self.data) + start, stop, step, slicelength = w_index.indices4(space, oldsize) + sequence2 = makebytearraydata_w(space, w_other) + _setitem_slice_helper(space, self.data, start, step, + slicelength, sequence2, empty_elem='\x00') + else: + idx = space.getindex_w(w_index, space.w_IndexError, "bytearray index") + try: + self.data[idx] = getbytevalue(space, w_other) + except IndexError: + raise OperationError(space.w_IndexError, + space.wrap("bytearray index out of range")) + + def descr_delitem(self, space, w_idx): + if isinstance(w_idx, W_SliceObject): + start, stop, step, slicelength = w_idx.indices4(space, + len(self.data)) + _delitem_slice_helper(space, self.data, start, step, slicelength) + else: + idx = space.getindex_w(w_idx, space.w_IndexError, "bytearray index") + try: + del self.data[idx] + except IndexError: + raise OperationError(space.w_IndexError, + space.wrap("bytearray deletion index out of range")) + + def descr_append(self, space, w_item): + self.data.append(getbytevalue(space, w_item)) + + def descr_extend(self, space, w_other): + if isinstance(w_other, W_BytearrayObject): + self.data += w_other.data + else: + self.data += makebytearraydata_w(space, w_other) + return self + + def descr_insert(self, space, w_idx, w_other): + where = space.int_w(w_idx) + length = len(self.data) + index = get_positive_index(where, length) + val = getbytevalue(space, w_other) + self.data.insert(index, val) + return space.w_None + + @unwrap_spec(w_idx=WrappedDefault(-1)) + def descr_pop(self, space, w_idx): + index = space.int_w(w_idx) + try: + result = self.data.pop(index) + except IndexError: + if not self.data: + raise OperationError(space.w_IndexError, space.wrap( + "pop from empty bytearray")) + raise OperationError(space.w_IndexError, space.wrap( + "pop index out of range")) + return space.wrap(ord(result)) + + def descr_remove(self, space, w_char): + char = space.int_w(space.index(w_char)) + try: + self.data.remove(chr(char)) + except ValueError: + raise OperationError(space.w_ValueError, space.wrap( + "value not found in bytearray")) + + def descr_reverse(self, space): + self.data.reverse() + +def getbytevalue(space, w_value): + if space.isinstance_w(w_value, space.w_str): + string = space.str_w(w_value) + if len(string) != 1: + raise OperationError(space.w_ValueError, space.wrap( + "string must be of size 1")) + return string[0] + + value = space.getindex_w(w_value, None) + if not 0 <= value < 256: + # this includes the OverflowError in case the long is too large + raise OperationError(space.w_ValueError, space.wrap( + "byte must be in range(0, 256)")) + return chr(value) + +def new_bytearray(space, w_bytearraytype, data): + w_obj = space.allocate_instance(W_BytearrayObject, w_bytearraytype) + W_BytearrayObject.__init__(w_obj, data) + return w_obj + + +def makebytearraydata_w(space, w_source): + # String-like argument + try: + string = space.bufferstr_new_w(w_source) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + else: + return [c for c in string] + + # sequence of bytes + w_iter = space.iter(w_source) + length_hint = space.length_hint(w_source, 0) + data = newlist_hint(length_hint) + extended = 0 + while True: + try: + w_item = space.next(w_iter) + except OperationError, e: + if not e.match(space, space.w_StopIteration): + raise + break + value = getbytevalue(space, w_item) + data.append(value) + extended += 1 + if extended < length_hint: + resizelist_hint(data, extended) + return data + +def _hex_digit_to_int(d): + val = ord(d) + if 47 < val < 58: + return val - 48 + if 96 < val < 103: + return val - 87 + return -1 + + +class BytearrayDocstrings: + """bytearray(iterable_of_ints) -> bytearray + bytearray(string, encoding[, errors]) -> bytearray + bytearray(bytes_or_bytearray) -> mutable copy of bytes_or_bytearray + bytearray(memory_view) -> bytearray + + Construct an mutable bytearray object from: + - an iterable yielding integers in range(256) + - a text string encoded using the specified encoding + - a bytes or a bytearray object + - any object implementing the buffer API. + + bytearray(int) -> bytearray. + + Construct a zero-initialized bytearray of the given length. + + """ + + def __add__(): + """x.__add__(y) <==> x+y""" + + def __alloc__(): + """B.__alloc__() -> int + + Return the number of bytes actually allocated. + """ + + def __contains__(): + """x.__contains__(y) <==> y in x""" + + def __delitem__(): + """x.__delitem__(y) <==> del x[y]""" + + def __eq__(): + """x.__eq__(y) <==> x==y""" + + def __ge__(): + """x.__ge__(y) <==> x>=y""" + + def __getattribute__(): + """x.__getattribute__('name') <==> x.name""" + + def __getitem__(): + """x.__getitem__(y) <==> x[y]""" + + def __gt__(): + """x.__gt__(y) <==> x>y""" + + def __iadd__(): + """x.__iadd__(y) <==> x+=y""" + + def __imul__(): + """x.__imul__(y) <==> x*=y""" + + def __init__(): + """x.__init__(...) initializes x; see help(type(x)) for signature""" + + def __iter__(): + """x.__iter__() <==> iter(x)""" + + def __le__(): + """x.__le__(y) <==> x<=y""" + + def __len__(): + """x.__len__() <==> len(x)""" + + def __lt__(): + """x.__lt__(y) <==> x x*n""" + + def __ne__(): + """x.__ne__(y) <==> x!=y""" + + def __reduce__(): + """Return state information for pickling.""" + + def __repr__(): + """x.__repr__() <==> repr(x)""" + + def __rmul__(): + """x.__rmul__(n) <==> n*x""" + + def __setitem__(): + """x.__setitem__(i, y) <==> x[i]=y""" + + def __sizeof__(): + """B.__sizeof__() -> int + + Returns the size of B in memory, in bytes + """ + + def __str__(): + """x.__str__() <==> str(x)""" + + def append(): + """B.append(int) -> None + + Append a single item to the end of B. + """ + + def capitalize(): + """B.capitalize() -> copy of B + + Return a copy of B with only its first character capitalized (ASCII) + and the rest lower-cased. + """ + + def center(): + """B.center(width[, fillchar]) -> copy of B + + Return B centered in a string of length width. Padding is + done using the specified fill character (default is a space). + """ + + def count(): + """B.count(sub[, start[, end]]) -> int + + Return the number of non-overlapping occurrences of subsection sub in + bytes B[start:end]. Optional arguments start and end are interpreted + as in slice notation. + """ + + def decode(): + """B.decode(encoding=None, errors='strict') -> unicode + + Decode B using the codec registered for encoding. encoding defaults + to the default encoding. errors may be given to set a different error + handling scheme. Default is 'strict' meaning that encoding errors raise + a UnicodeDecodeError. Other possible values are 'ignore' and 'replace' + as well as any other name registered with codecs.register_error that is + able to handle UnicodeDecodeErrors. + """ + + def endswith(): + """B.endswith(suffix[, start[, end]]) -> bool + + Return True if B ends with the specified suffix, False otherwise. + With optional start, test B beginning at that position. + With optional end, stop comparing B at that position. + suffix can also be a tuple of strings to try. + """ + + def expandtabs(): + """B.expandtabs([tabsize]) -> copy of B + + Return a copy of B where all tab characters are expanded using spaces. + If tabsize is not given, a tab size of 8 characters is assumed. + """ + + def extend(): + """B.extend(iterable_of_ints) -> None + + Append all the elements from the iterator or sequence to the + end of B. + """ + + def find(): + """B.find(sub[, start[, end]]) -> int + + Return the lowest index in B where subsection sub is found, + such that sub is contained within B[start,end]. Optional + arguments start and end are interpreted as in slice notation. + + Return -1 on failure. + """ + + def fromhex(): + """bytearray.fromhex(string) -> bytearray (static method) + + Create a bytearray object from a string of hexadecimal numbers. + Spaces between two numbers are accepted. + Example: bytearray.fromhex('B9 01EF') -> bytearray(b'\xb9\x01\xef'). + """ + + def index(): + """B.index(sub[, start[, end]]) -> int + + Like B.find() but raise ValueError when the subsection is not found. + """ + + def insert(): + """B.insert(index, int) -> None + + Insert a single item into the bytearray before the given index. + """ + + def isalnum(): + """B.isalnum() -> bool + + Return True if all characters in B are alphanumeric + and there is at least one character in B, False otherwise. + """ + + def isalpha(): + """B.isalpha() -> bool + + Return True if all characters in B are alphabetic + and there is at least one character in B, False otherwise. + """ + + def isdigit(): + """B.isdigit() -> bool + + Return True if all characters in B are digits + and there is at least one character in B, False otherwise. + """ + + def islower(): + """B.islower() -> bool + + Return True if all cased characters in B are lowercase and there is + at least one cased character in B, False otherwise. + """ + + def isspace(): + """B.isspace() -> bool + + Return True if all characters in B are whitespace + and there is at least one character in B, False otherwise. + """ + + def istitle(): + """B.istitle() -> bool + + Return True if B is a titlecased string and there is at least one + character in B, i.e. uppercase characters may only follow uncased + characters and lowercase characters only cased ones. Return False + otherwise. + """ + + def isupper(): + """B.isupper() -> bool + + Return True if all cased characters in B are uppercase and there is + at least one cased character in B, False otherwise. + """ + + def join(): + """B.join(iterable_of_bytes) -> bytearray + + Concatenate any number of str/bytearray objects, with B + in between each pair, and return the result as a new bytearray. + """ + + def ljust(): + """B.ljust(width[, fillchar]) -> copy of B + + Return B left justified in a string of length width. Padding is + done using the specified fill character (default is a space). + """ + + def lower(): + """B.lower() -> copy of B + + Return a copy of B with all ASCII characters converted to lowercase. + """ + + def lstrip(): + """B.lstrip([bytes]) -> bytearray + + Strip leading bytes contained in the argument + and return the result as a new bytearray. + If the argument is omitted, strip leading ASCII whitespace. + """ + + def partition(): + """B.partition(sep) -> (head, sep, tail) + + Search for the separator sep in B, and return the part before it, + the separator itself, and the part after it. If the separator is not + found, returns B and two empty bytearray objects. + """ + + def pop(): + """B.pop([index]) -> int + + Remove and return a single item from B. If no index + argument is given, will pop the last value. + """ + + def remove(): + """B.remove(int) -> None + + Remove the first occurrence of a value in B. + """ + + def replace(): + """B.replace(old, new[, count]) -> bytearray + + Return a copy of B with all occurrences of subsection + old replaced by new. If the optional argument count is + given, only the first count occurrences are replaced. + """ + + def reverse(): + """B.reverse() -> None + + Reverse the order of the values in B in place. + """ + + def rfind(): + """B.rfind(sub[, start[, end]]) -> int + + Return the highest index in B where subsection sub is found, + such that sub is contained within B[start,end]. Optional + arguments start and end are interpreted as in slice notation. + + Return -1 on failure. + """ + + def rindex(): + """B.rindex(sub[, start[, end]]) -> int + + Like B.rfind() but raise ValueError when the subsection is not found. + """ + + def rjust(): + """B.rjust(width[, fillchar]) -> copy of B + + Return B right justified in a string of length width. Padding is + done using the specified fill character (default is a space) + """ + + def rpartition(): + """B.rpartition(sep) -> (head, sep, tail) + + Search for the separator sep in B, starting at the end of B, + and return the part before it, the separator itself, and the + part after it. If the separator is not found, returns two empty + bytearray objects and B. + """ + + def rsplit(): + """B.rsplit(sep=None, maxsplit=-1) -> list of bytearrays + + Return a list of the sections in B, using sep as the delimiter, + starting at the end of B and working to the front. + If sep is not given, B is split on ASCII whitespace characters + (space, tab, return, newline, formfeed, vertical tab). + If maxsplit is given, at most maxsplit splits are done. + """ + + def rstrip(): + """B.rstrip([bytes]) -> bytearray + + Strip trailing bytes contained in the argument + and return the result as a new bytearray. + If the argument is omitted, strip trailing ASCII whitespace. + """ + + def split(): + """B.split(sep=None, maxsplit=-1) -> list of bytearrays + + Return a list of the sections in B, using sep as the delimiter. + If sep is not given, B is split on ASCII whitespace characters + (space, tab, return, newline, formfeed, vertical tab). + If maxsplit is given, at most maxsplit splits are done. + """ + + def splitlines(): + """B.splitlines(keepends=False) -> list of lines + + Return a list of the lines in B, breaking at line boundaries. + Line breaks are not included in the resulting list unless keepends + is given and true. + """ + + def startswith(): + """B.startswith(prefix[, start[, end]]) -> bool + + Return True if B starts with the specified prefix, False otherwise. + With optional start, test B beginning at that position. + With optional end, stop comparing B at that position. + prefix can also be a tuple of strings to try. + """ + + def strip(): + """B.strip([bytes]) -> bytearray + + Strip leading and trailing bytes contained in the argument + and return the result as a new bytearray. + If the argument is omitted, strip ASCII whitespace. + """ + + def swapcase(): + """B.swapcase() -> copy of B + + Return a copy of B with uppercase ASCII characters converted + to lowercase ASCII and vice versa. + """ + + def title(): + """B.title() -> copy of B + + Return a titlecased version of B, i.e. ASCII words start with uppercase + characters, all remaining cased characters have lowercase. + """ + + def translate(): + """B.translate(table[, deletechars]) -> bytearray + + Return a copy of B, where all characters occurring in the + optional argument deletechars are removed, and the remaining + characters have been mapped through the given translation + table, which must be a bytes object of length 256. + """ + + def upper(): + """B.upper() -> copy of B + + Return a copy of B with all ASCII characters converted to uppercase. + """ + + def zfill(): + """B.zfill(width) -> copy of B + + Pad a numeric string B with zeros on the left, to fill a field + of the specified width. B is never truncated. + """ + + +W_BytearrayObject.typedef = StdTypeDef( + "bytearray", + __doc__ = BytearrayDocstrings.__doc__, + __new__ = interp2app(W_BytearrayObject.descr_new), + __hash__ = None, + __reduce__ = interp2app(W_BytearrayObject.descr_reduce, + doc=BytearrayDocstrings.__reduce__.__doc__), + fromhex = interp2app(W_BytearrayObject.descr_fromhex, as_classmethod=True, + doc=BytearrayDocstrings.fromhex.__doc__), + + __repr__ = interp2app(W_BytearrayObject.descr_repr, + doc=BytearrayDocstrings.__repr__.__doc__), + __str__ = interp2app(W_BytearrayObject.descr_str, + doc=BytearrayDocstrings.__str__.__doc__), + + __eq__ = interp2app(W_BytearrayObject.descr_eq, + doc=BytearrayDocstrings.__eq__.__doc__), + __ne__ = interp2app(W_BytearrayObject.descr_ne, + doc=BytearrayDocstrings.__ne__.__doc__), + __lt__ = interp2app(W_BytearrayObject.descr_lt, + doc=BytearrayDocstrings.__lt__.__doc__), + __le__ = interp2app(W_BytearrayObject.descr_le, + doc=BytearrayDocstrings.__le__.__doc__), + __gt__ = interp2app(W_BytearrayObject.descr_gt, + doc=BytearrayDocstrings.__gt__.__doc__), + __ge__ = interp2app(W_BytearrayObject.descr_ge, + doc=BytearrayDocstrings.__ge__.__doc__), + + __len__ = interp2app(W_BytearrayObject.descr_len, + doc=BytearrayDocstrings.__len__.__doc__), + __contains__ = interp2app(W_BytearrayObject.descr_contains, + doc=BytearrayDocstrings.__contains__.__doc__), + + __add__ = interp2app(W_BytearrayObject.descr_add, + doc=BytearrayDocstrings.__add__.__doc__), + __mul__ = interp2app(W_BytearrayObject.descr_mul, + doc=BytearrayDocstrings.__mul__.__doc__), + __rmul__ = interp2app(W_BytearrayObject.descr_mul, + doc=BytearrayDocstrings.__rmul__.__doc__), + + __getitem__ = interp2app(W_BytearrayObject.descr_getitem, + doc=BytearrayDocstrings.__getitem__.__doc__), + + capitalize = interp2app(W_BytearrayObject.descr_capitalize, + doc=BytearrayDocstrings.capitalize.__doc__), + center = interp2app(W_BytearrayObject.descr_center, + doc=BytearrayDocstrings.center.__doc__), + count = interp2app(W_BytearrayObject.descr_count, + doc=BytearrayDocstrings.count.__doc__), + decode = interp2app(W_BytearrayObject.descr_decode, + doc=BytearrayDocstrings.decode.__doc__), + expandtabs = interp2app(W_BytearrayObject.descr_expandtabs, + doc=BytearrayDocstrings.expandtabs.__doc__), + find = interp2app(W_BytearrayObject.descr_find, + doc=BytearrayDocstrings.find.__doc__), + rfind = interp2app(W_BytearrayObject.descr_rfind, + doc=BytearrayDocstrings.rfind.__doc__), + index = interp2app(W_BytearrayObject.descr_index, + doc=BytearrayDocstrings.index.__doc__), + rindex = interp2app(W_BytearrayObject.descr_rindex, + doc=BytearrayDocstrings.rindex.__doc__), + isalnum = interp2app(W_BytearrayObject.descr_isalnum, + doc=BytearrayDocstrings.isalnum.__doc__), + isalpha = interp2app(W_BytearrayObject.descr_isalpha, + doc=BytearrayDocstrings.isalpha.__doc__), + isdigit = interp2app(W_BytearrayObject.descr_isdigit, + doc=BytearrayDocstrings.isdigit.__doc__), + islower = interp2app(W_BytearrayObject.descr_islower, + doc=BytearrayDocstrings.islower.__doc__), + isspace = interp2app(W_BytearrayObject.descr_isspace, + doc=BytearrayDocstrings.isspace.__doc__), + istitle = interp2app(W_BytearrayObject.descr_istitle, + doc=BytearrayDocstrings.istitle.__doc__), + isupper = interp2app(W_BytearrayObject.descr_isupper, + doc=BytearrayDocstrings.isupper.__doc__), + join = interp2app(W_BytearrayObject.descr_join, + doc=BytearrayDocstrings.join.__doc__), + ljust = interp2app(W_BytearrayObject.descr_ljust, + doc=BytearrayDocstrings.ljust.__doc__), + rjust = interp2app(W_BytearrayObject.descr_rjust, + doc=BytearrayDocstrings.rjust.__doc__), + lower = interp2app(W_BytearrayObject.descr_lower, + doc=BytearrayDocstrings.lower.__doc__), + partition = interp2app(W_BytearrayObject.descr_partition, + doc=BytearrayDocstrings.partition.__doc__), + rpartition = interp2app(W_BytearrayObject.descr_rpartition, + doc=BytearrayDocstrings.rpartition.__doc__), + replace = interp2app(W_BytearrayObject.descr_replace, + doc=BytearrayDocstrings.replace.__doc__), + split = interp2app(W_BytearrayObject.descr_split, + doc=BytearrayDocstrings.split.__doc__), + rsplit = interp2app(W_BytearrayObject.descr_rsplit, + doc=BytearrayDocstrings.rsplit.__doc__), + splitlines = interp2app(W_BytearrayObject.descr_splitlines, + doc=BytearrayDocstrings.splitlines.__doc__), + startswith = interp2app(W_BytearrayObject.descr_startswith, + doc=BytearrayDocstrings.startswith.__doc__), + endswith = interp2app(W_BytearrayObject.descr_endswith, + doc=BytearrayDocstrings.endswith.__doc__), + strip = interp2app(W_BytearrayObject.descr_strip, + doc=BytearrayDocstrings.strip.__doc__), + lstrip = interp2app(W_BytearrayObject.descr_lstrip, + doc=BytearrayDocstrings.lstrip.__doc__), + rstrip = interp2app(W_BytearrayObject.descr_rstrip, + doc=BytearrayDocstrings.rstrip.__doc__), + swapcase = interp2app(W_BytearrayObject.descr_swapcase, + doc=BytearrayDocstrings.swapcase.__doc__), + title = interp2app(W_BytearrayObject.descr_title, + doc=BytearrayDocstrings.title.__doc__), + translate = interp2app(W_BytearrayObject.descr_translate, + doc=BytearrayDocstrings.translate.__doc__), + upper = interp2app(W_BytearrayObject.descr_upper, + doc=BytearrayDocstrings.upper.__doc__), + zfill = interp2app(W_BytearrayObject.descr_zfill, + doc=BytearrayDocstrings.zfill.__doc__), + + __init__ = interp2app(W_BytearrayObject.descr_init, + doc=BytearrayDocstrings.__init__.__doc__), + __buffer__ = interp2app(W_BytearrayObject.descr_buffer), + + __iadd__ = interp2app(W_BytearrayObject.descr_inplace_add, + doc=BytearrayDocstrings.__iadd__.__doc__), + __imul__ = interp2app(W_BytearrayObject.descr_inplace_mul, + doc=BytearrayDocstrings.__imul__.__doc__), + __setitem__ = interp2app(W_BytearrayObject.descr_setitem, + doc=BytearrayDocstrings.__setitem__.__doc__), + __delitem__ = interp2app(W_BytearrayObject.descr_delitem, + doc=BytearrayDocstrings.__delitem__.__doc__), + + append = interp2app(W_BytearrayObject.descr_append, + doc=BytearrayDocstrings.append.__doc__), + extend = interp2app(W_BytearrayObject.descr_extend, + doc=BytearrayDocstrings.extend.__doc__), + insert = interp2app(W_BytearrayObject.descr_insert, + doc=BytearrayDocstrings.insert.__doc__), + pop = interp2app(W_BytearrayObject.descr_pop, + doc=BytearrayDocstrings.pop.__doc__), + remove = interp2app(W_BytearrayObject.descr_remove, + doc=BytearrayDocstrings.remove.__doc__), + reverse = interp2app(W_BytearrayObject.descr_reverse, + doc=BytearrayDocstrings.reverse.__doc__), +) init_signature = Signature(['source', 'encoding', 'errors'], None, None) init_defaults = [None, None, None] -def init__Bytearray(space, w_bytearray, __args__): - # this is on the silly side - w_source, w_encoding, w_errors = __args__.parse_obj( - None, 'bytearray', init_signature, init_defaults) - if w_source is None: - w_source = space.wrap('') - if w_encoding is None: - w_encoding = space.w_None - if w_errors is None: - w_errors = space.w_None - - # Unicode argument - if not space.is_w(w_encoding, space.w_None): - from pypy.objspace.std.unicodetype import ( - _get_encoding_and_errors, encode_object - ) - encoding, errors = _get_encoding_and_errors(space, w_encoding, w_errors) - - # if w_source is an integer this correctly raises a TypeError - # the CPython error message is: "encoding or errors without a string argument" - # ours is: "expected unicode, got int object" - w_source = encode_object(space, w_source, encoding, errors) - - # Is it an int? - try: - count = space.int_w(w_source) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - else: - if count < 0: - raise OperationError(space.w_ValueError, - space.wrap("bytearray negative count")) - w_bytearray.data = ['\0'] * count - return - - data = makebytearraydata_w(space, w_source) - w_bytearray.data = data - -def len__Bytearray(space, w_bytearray): - result = len(w_bytearray.data) - return space.newint(result) - -def ord__Bytearray(space, w_bytearray): - if len(w_bytearray.data) != 1: - raise OperationError(space.w_TypeError, - space.wrap("expected a character, but string" - "of length %s found" % len(w_bytearray.data))) - return space.wrap(ord(w_bytearray.data[0])) - -def getitem__Bytearray_ANY(space, w_bytearray, w_index): - # getindex_w should get a second argument space.w_IndexError, - # but that doesn't exist the first time this is called. - try: - w_IndexError = space.w_IndexError - except AttributeError: - w_IndexError = None - index = space.getindex_w(w_index, w_IndexError, "bytearray index") - try: - return space.newint(ord(w_bytearray.data[index])) - except IndexError: - raise OperationError(space.w_IndexError, - space.wrap("bytearray index out of range")) - -def getitem__Bytearray_Slice(space, w_bytearray, w_slice): - data = w_bytearray.data - length = len(data) - start, stop, step, slicelength = w_slice.indices4(space, length) - assert slicelength >= 0 - if step == 1 and 0 <= start <= stop: - newdata = data[start:stop] - else: - newdata = _getitem_slice_multistep(data, start, step, slicelength) - return W_BytearrayObject(newdata) - -def _getitem_slice_multistep(data, start, step, slicelength): - return [data[start + i*step] for i in range(slicelength)] - -def contains__Bytearray_Int(space, w_bytearray, w_char): - char = space.int_w(w_char) - if not 0 <= char < 256: - raise OperationError(space.w_ValueError, - space.wrap("byte must be in range(0, 256)")) - for c in w_bytearray.data: - if ord(c) == char: - return space.w_True - return space.w_False - -def contains__Bytearray_String(space, w_bytearray, w_str): - # XXX slow - copies, needs rewriting - w_str2 = str__Bytearray(space, w_bytearray) - return stringobject.contains__String_String(space, w_str2, w_str) - -def contains__Bytearray_ANY(space, w_bytearray, w_sub): - # XXX slow - copies, needs rewriting - w_str = space.wrap(space.bufferstr_new_w(w_sub)) - w_str2 = str__Bytearray(space, w_bytearray) - return stringobject.contains__String_String(space, w_str2, w_str) - -def add__Bytearray_Bytearray(space, w_bytearray1, w_bytearray2): - data1 = w_bytearray1.data - data2 = w_bytearray2.data - return W_BytearrayObject(data1 + data2) - -def add__Bytearray_ANY(space, w_bytearray1, w_other): - data1 = w_bytearray1.data - data2 = [c for c in space.bufferstr_new_w(w_other)] - return W_BytearrayObject(data1 + data2) - -def add__String_Bytearray(space, w_str, w_bytearray): - data2 = w_bytearray.data - data1 = [c for c in space.str_w(w_str)] - return W_BytearrayObject(data1 + data2) - -def mul_bytearray_times(space, w_bytearray, w_times): - try: - times = space.getindex_w(w_times, space.w_OverflowError) - except OperationError, e: - if e.match(space, space.w_TypeError): - raise FailedToImplement - raise - data = w_bytearray.data - return W_BytearrayObject(data * times) - -def mul__Bytearray_ANY(space, w_bytearray, w_times): - return mul_bytearray_times(space, w_bytearray, w_times) - -def mul__ANY_Bytearray(space, w_times, w_bytearray): - return mul_bytearray_times(space, w_bytearray, w_times) - -def inplace_mul__Bytearray_ANY(space, w_bytearray, w_times): - try: - times = space.getindex_w(w_times, space.w_OverflowError) - except OperationError, e: - if e.match(space, space.w_TypeError): - raise FailedToImplement - raise - w_bytearray.data *= times - return w_bytearray - -def eq__Bytearray_Bytearray(space, w_bytearray1, w_bytearray2): - data1 = w_bytearray1.data - data2 = w_bytearray2.data - if len(data1) != len(data2): - return space.w_False - for i in range(len(data1)): - if data1[i] != data2[i]: - return space.w_False - return space.w_True - -def String2Bytearray(space, w_str): - data = [c for c in space.str_w(w_str)] - return W_BytearrayObject(data) - -def eq__Bytearray_String(space, w_bytearray, w_other): - return space.eq(str__Bytearray(space, w_bytearray), w_other) - -def eq__Bytearray_Unicode(space, w_bytearray, w_other): - return space.w_False - -def eq__Unicode_Bytearray(space, w_other, w_bytearray): - return space.w_False - -def ne__Bytearray_String(space, w_bytearray, w_other): - return space.ne(str__Bytearray(space, w_bytearray), w_other) - -def ne__Bytearray_Unicode(space, w_bytearray, w_other): - return space.w_True - -def ne__Unicode_Bytearray(space, w_other, w_bytearray): - return space.w_True - -def _min(a, b): - if a < b: - return a - return b - -def lt__Bytearray_Bytearray(space, w_bytearray1, w_bytearray2): - data1 = w_bytearray1.data - data2 = w_bytearray2.data - ncmp = _min(len(data1), len(data2)) - # Search for the first index where items are different - for p in range(ncmp): - if data1[p] != data2[p]: - return space.newbool(data1[p] < data2[p]) - # No more items to compare -- compare sizes - return space.newbool(len(data1) < len(data2)) - -def gt__Bytearray_Bytearray(space, w_bytearray1, w_bytearray2): - data1 = w_bytearray1.data - data2 = w_bytearray2.data - ncmp = _min(len(data1), len(data2)) - # Search for the first index where items are different - for p in range(ncmp): - if data1[p] != data2[p]: - return space.newbool(data1[p] > data2[p]) - # No more items to compare -- compare sizes - return space.newbool(len(data1) > len(data2)) - -def str_translate__Bytearray_ANY_ANY(space, w_bytearray1, w_table, w_deletechars): - # XXX slow, copies *twice* needs proper implementation - w_str_copy = str__Bytearray(space, w_bytearray1) - w_res = stringobject.str_translate__String_ANY_ANY(space, w_str_copy, - w_table, w_deletechars) - return String2Bytearray(space, w_res) - -# Mostly copied from repr__String, but without the "smart quote" -# functionality. -def repr__Bytearray(space, w_bytearray): - s = w_bytearray.data - - # Good default if there are no replacements. - buf = StringBuilder(len("bytearray(b'')") + len(s)) - - buf.append("bytearray(b'") - - for i in range(len(s)): - c = s[i] - - if c == '\\' or c == "'": - buf.append('\\') - buf.append(c) - elif c == '\t': - buf.append('\\t') - elif c == '\r': - buf.append('\\r') - elif c == '\n': - buf.append('\\n') - elif not '\x20' <= c < '\x7f': - n = ord(c) - buf.append('\\x') - buf.append("0123456789abcdef"[n>>4]) - buf.append("0123456789abcdef"[n&0xF]) - else: - buf.append(c) - - buf.append("')") - - return space.wrap(buf.build()) - -def str__Bytearray(space, w_bytearray): - return space.wrap(''.join(w_bytearray.data)) - -def str_count__Bytearray_ANY_ANY_ANY(space, w_bytearray, w_char, w_start, w_stop): - w_char = space.wrap(space.bufferstr_new_w(w_char)) - w_str = str__Bytearray(space, w_bytearray) - return stringobject.str_count__String_String_ANY_ANY(space, w_str, w_char, - w_start, w_stop) - -def str_index__Bytearray_ANY_ANY_ANY(space, w_bytearray, w_char, w_start, w_stop): - w_char = space.wrap(space.bufferstr_new_w(w_char)) - w_str = str__Bytearray(space, w_bytearray) - return stringobject.str_index__String_String_ANY_ANY(space, w_str, w_char, - w_start, w_stop) - -def str_rindex__Bytearray_ANY_ANY_ANY(space, w_bytearray, w_char, w_start, w_stop): - w_char = space.wrap(space.bufferstr_new_w(w_char)) - w_str = str__Bytearray(space, w_bytearray) - return stringobject.str_rindex__String_String_ANY_ANY(space, w_str, w_char, - w_start, w_stop) - -def str_find__Bytearray_ANY_ANY_ANY(space, w_bytearray, w_char, w_start, w_stop): - w_char = space.wrap(space.bufferstr_new_w(w_char)) - w_str = str__Bytearray(space, w_bytearray) - return stringobject.str_find__String_String_ANY_ANY(space, w_str, w_char, - w_start, w_stop) - -def str_rfind__Bytearray_ANY_ANY_ANY(space, w_bytearray, w_char, w_start, w_stop): - w_char = space.wrap(space.bufferstr_new_w(w_char)) - w_str = str__Bytearray(space, w_bytearray) - return stringobject.str_rfind__String_String_ANY_ANY(space, w_str, w_char, - w_start, w_stop) - -def str_startswith__Bytearray_ANY_ANY_ANY(space, w_bytearray, w_prefix, w_start, w_stop): - if space.isinstance_w(w_prefix, space.w_tuple): - w_str = str__Bytearray(space, w_bytearray) - w_prefix = space.newtuple([space.wrap(space.bufferstr_new_w(w_entry)) for w_entry in - space.fixedview(w_prefix)]) - return stringobject.str_startswith__String_ANY_ANY_ANY(space, w_str, w_prefix, - w_start, w_stop) - - w_prefix = space.wrap(space.bufferstr_new_w(w_prefix)) - w_str = str__Bytearray(space, w_bytearray) - return stringobject.str_startswith__String_String_ANY_ANY(space, w_str, w_prefix, - w_start, w_stop) - -def str_endswith__Bytearray_ANY_ANY_ANY(space, w_bytearray, w_suffix, w_start, w_stop): - if space.isinstance_w(w_suffix, space.w_tuple): - w_str = str__Bytearray(space, w_bytearray) - w_suffix = space.newtuple([space.wrap(space.bufferstr_new_w(w_entry)) for w_entry in - space.fixedview(w_suffix)]) - return stringobject.str_endswith__String_ANY_ANY_ANY(space, w_str, w_suffix, - w_start, w_stop) - w_suffix = space.wrap(space.bufferstr_new_w(w_suffix)) - w_str = str__Bytearray(space, w_bytearray) - return stringobject.str_endswith__String_String_ANY_ANY(space, w_str, w_suffix, - w_start, w_stop) - +# XXX consider moving to W_BytearrayObject or remove def str_join__Bytearray_ANY(space, w_self, w_list): list_w = space.listview(w_list) if not list_w: @@ -350,251 +1022,8 @@ newdata.extend([c for c in space.bufferstr_new_w(w_s)]) return W_BytearrayObject(newdata) -def str_decode__Bytearray_ANY_ANY(space, w_bytearray, w_encoding, w_errors): - w_str = str__Bytearray(space, w_bytearray) - return stringobject.str_decode__String_ANY_ANY(space, w_str, w_encoding, w_errors) - -def str_islower__Bytearray(space, w_bytearray): - w_str = str__Bytearray(space, w_bytearray) - return stringobject.str_islower__String(space, w_str) - -def str_isupper__Bytearray(space, w_bytearray): - w_str = str__Bytearray(space, w_bytearray) - return stringobject.str_isupper__String(space, w_str) - -def str_isalpha__Bytearray(space, w_bytearray): - w_str = str__Bytearray(space, w_bytearray) - return stringobject.str_isalpha__String(space, w_str) - -def str_isalnum__Bytearray(space, w_bytearray): - w_str = str__Bytearray(space, w_bytearray) - return stringobject.str_isalnum__String(space, w_str) - -def str_isdigit__Bytearray(space, w_bytearray): - w_str = str__Bytearray(space, w_bytearray) - return stringobject.str_isdigit__String(space, w_str) - -def str_istitle__Bytearray(space, w_bytearray): - w_str = str__Bytearray(space, w_bytearray) - return stringobject.str_istitle__String(space, w_str) - -def str_isspace__Bytearray(space, w_bytearray): - w_str = str__Bytearray(space, w_bytearray) - return stringobject.str_isspace__String(space, w_str) - -def bytearray_insert__Bytearray_Int_ANY(space, w_bytearray, w_idx, w_other): - where = space.int_w(w_idx) - length = len(w_bytearray.data) - index = get_positive_index(where, length) - val = getbytevalue(space, w_other) - w_bytearray.data.insert(index, val) - return space.w_None - -def bytearray_pop__Bytearray_Int(space, w_bytearray, w_idx): - index = space.int_w(w_idx) - try: - result = w_bytearray.data.pop(index) - except IndexError: - if not w_bytearray.data: - raise OperationError(space.w_IndexError, space.wrap( - "pop from empty bytearray")) - raise OperationError(space.w_IndexError, space.wrap( - "pop index out of range")) - return space.wrap(ord(result)) - -def bytearray_remove__Bytearray_ANY(space, w_bytearray, w_char): - char = space.int_w(space.index(w_char)) - try: - result = w_bytearray.data.remove(chr(char)) - except ValueError: - raise OperationError(space.w_ValueError, space.wrap( - "value not found in bytearray")) - -def bytearray_reverse__Bytearray(space, w_bytearray): - w_bytearray.data.reverse() - return space.w_None - _space_chars = ''.join([chr(c) for c in [9, 10, 11, 12, 13, 32]]) -def bytearray_strip__Bytearray_None(space, w_bytearray, w_chars): - return _strip(space, w_bytearray, _space_chars, 1, 1) - -def bytearray_strip__Bytearray_ANY(space, w_bytearray, w_chars): - return _strip(space, w_bytearray, space.bufferstr_new_w(w_chars), 1, 1) - -def bytearray_lstrip__Bytearray_None(space, w_bytearray, w_chars): - return _strip(space, w_bytearray, _space_chars, 1, 0) From noreply at buildbot.pypy.org Mon Jan 20 12:43:29 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 20 Jan 2014 12:43:29 +0100 (CET) Subject: [pypy-commit] pypy llvm-translation-backend: Add const qualifiers here. Message-ID: <20140120114329.64B211C0356@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r68776:48ff5edc93a6 Date: 2014-01-20 10:09 +0100 http://bitbucket.org/pypy/pypy/changeset/48ff5edc93a6/ Log: Add const qualifiers here. diff --git a/rpython/translator/llvm/PyPyGC.cpp b/rpython/translator/llvm/PyPyGC.cpp --- a/rpython/translator/llvm/PyPyGC.cpp +++ b/rpython/translator/llvm/PyPyGC.cpp @@ -41,10 +41,10 @@ unsigned PtrSize = AP.TM.getDataLayout()->getPointerSize(); SmallPtrSet GCStackBottoms; - GlobalVariable *GV = getModule().getGlobalVariable("gc_stack_bottoms"); - ConstantArray *Inits = dyn_cast(GV->getInitializer()); + const GlobalVariable *GV = getModule().getGlobalVariable("gc_stack_bottoms"); + const ConstantArray *Inits = dyn_cast(GV->getInitializer()); for (unsigned i = 0, e = Inits->getNumOperands(); i != e; ++i) - if (Function *F = + if (const Function *F = dyn_cast(Inits->getOperand(i)->stripPointerCasts())) GCStackBottoms.insert(F); From noreply at buildbot.pypy.org Mon Jan 20 12:43:30 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 20 Jan 2014 12:43:30 +0100 (CET) Subject: [pypy-commit] pypy default: Copy the skip logic from 57de6303e2f4 to test__rawffi.py. Message-ID: <20140120114330.A9E271C0356@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: Changeset: r68777:05dc946fc44a Date: 2014-01-20 12:42 +0100 http://bitbucket.org/pypy/pypy/changeset/05dc946fc44a/ Log: Copy the skip logic from 57de6303e2f4 to test__rawffi.py. diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -1,5 +1,6 @@ from rpython.translator.platform import platform from rpython.translator.tool.cbuild import ExternalCompilationInfo +from pypy.conftest import option from pypy.module._rawffi.interp_rawffi import TYPEMAP, TYPEMAP_FLOAT_LETTERS from pypy.module._rawffi.tracker import Tracker @@ -1130,6 +1131,15 @@ def setup_class(cls): cls.w_sizes_and_alignments = cls.space.wrap(dict( [(k, (v.c_size, v.c_alignment)) for k,v in TYPEMAP.iteritems()])) + # + # detect if we're running on PyPy with DO_TRACING not compiled in + if option.runappdirect: + try: + import _rawffi + _rawffi._num_of_allocated_objects() + except (ImportError, RuntimeError), e: + py.test.skip(str(e)) + # Tracker.DO_TRACING = True def test_structure_autofree(self): From noreply at buildbot.pypy.org Mon Jan 20 12:43:31 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 20 Jan 2014 12:43:31 +0100 (CET) Subject: [pypy-commit] pypy default: hg merge Message-ID: <20140120114331.E17FD1C0356@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: Changeset: r68778:1f4ae51fadd0 Date: 2014-01-20 12:43 +0100 http://bitbucket.org/pypy/pypy/changeset/1f4ae51fadd0/ Log: hg merge diff --git a/rpython/rlib/test/test_runicode.py b/rpython/rlib/test/test_runicode.py --- a/rpython/rlib/test/test_runicode.py +++ b/rpython/rlib/test/test_runicode.py @@ -99,7 +99,7 @@ called[0] += 1 if called[0] == 1: assert errors == "foo!" - assert enc == encoding + assert enc == encoding.replace('-', '') assert t is s assert start == startingpos assert stop == endingpos From noreply at buildbot.pypy.org Mon Jan 20 12:50:20 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 20 Jan 2014 12:50:20 +0100 (CET) Subject: [pypy-commit] pypy default: Document lldebug0 option. Message-ID: <20140120115020.77C301C3CDC@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: Changeset: r68779:9b769b1c433c Date: 2014-01-20 12:49 +0100 http://bitbucket.org/pypy/pypy/changeset/9b769b1c433c/ Log: Document lldebug0 option. diff --git a/pypy/doc/config/translation.lldebug0.txt b/pypy/doc/config/translation.lldebug0.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/config/translation.lldebug0.txt @@ -0,0 +1,1 @@ +Like lldebug, but in addition compile C files with -O0 From noreply at buildbot.pypy.org Mon Jan 20 13:03:26 2014 From: noreply at buildbot.pypy.org (jerith) Date: Mon, 20 Jan 2014 13:03:26 +0100 (CET) Subject: [pypy-commit] pypy default: update whatsnew for remove-del-from-generatoriterator branch Message-ID: <20140120120326.475C01C3CDC@cobra.cs.uni-duesseldorf.de> Author: Jeremy Thurgood Branch: Changeset: r68780:200729d3aed6 Date: 2014-01-20 14:02 +0200 http://bitbucket.org/pypy/pypy/changeset/200729d3aed6/ Log: update whatsnew for remove-del-from-generatoriterator branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -44,3 +44,7 @@ .. branch: refactor-str-types Remove multimethods on str/unicode/bytearray and make the implementations share code. + +.. branch: remove-del-from-generatoriterator +Speed up generators that don't yield inside try or wait blocks by skipping +unnecessary cleanup. From noreply at buildbot.pypy.org Mon Jan 20 14:13:25 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 20 Jan 2014 14:13:25 +0100 (CET) Subject: [pypy-commit] pypy default: hg merge Message-ID: <20140120131325.88EEA1C0356@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: Changeset: r68783:818e2590ea5f Date: 2014-01-20 14:13 +0100 http://bitbucket.org/pypy/pypy/changeset/818e2590ea5f/ Log: hg merge diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -44,3 +44,7 @@ .. branch: refactor-str-types Remove multimethods on str/unicode/bytearray and make the implementations share code. + +.. branch: remove-del-from-generatoriterator +Speed up generators that don't yield inside try or wait blocks by skipping +unnecessary cleanup. From noreply at buildbot.pypy.org Mon Jan 20 14:13:23 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 20 Jan 2014 14:13:23 +0100 (CET) Subject: [pypy-commit] pypy default: Update _backend_test_c.py. Message-ID: <20140120131323.09C931C0356@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: Changeset: r68781:1e4e288a86b9 Date: 2014-01-20 13:00 +0100 http://bitbucket.org/pypy/pypy/changeset/1e4e288a86b9/ Log: Update _backend_test_c.py. diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -3137,6 +3137,34 @@ p = newp(BArray, None) assert sizeof(p[2:9]) == 7 * sizeof(BInt) +def test_packed(): + BLong = new_primitive_type("long") + BChar = new_primitive_type("char") + BShort = new_primitive_type("short") + BStruct = new_struct_type("struct foo") + complete_struct_or_union(BStruct, [('a1', BLong, -1), + ('a2', BChar, -1), + ('a3', BShort, -1)], + None, -1, -1, 8) # SF_PACKED==8 + d = BStruct.fields + assert len(d) == 3 + assert d[0][0] == 'a1' + assert d[0][1].type is BLong + assert d[0][1].offset == 0 + assert d[0][1].bitshift == -1 + assert d[0][1].bitsize == -1 + assert d[1][0] == 'a2' + assert d[1][1].type is BChar + assert d[1][1].offset == sizeof(BLong) + assert d[1][1].bitshift == -1 + assert d[1][1].bitsize == -1 + assert d[2][0] == 'a3' + assert d[2][1].type is BShort + assert d[2][1].offset == sizeof(BLong) + sizeof(BChar) + assert d[2][1].bitshift == -1 + assert d[2][1].bitsize == -1 + assert sizeof(BStruct) == sizeof(BLong) + sizeof(BChar) + sizeof(BShort) + assert alignof(BStruct) == 1 def test_version(): # this test is here mostly for PyPy From noreply at buildbot.pypy.org Mon Jan 20 14:13:24 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 20 Jan 2014 14:13:24 +0100 (CET) Subject: [pypy-commit] pypy default: Add support for packed structs in _cffi_backend (ported from cffi/c5e17441bc96). Message-ID: <20140120131324.579F91C0356@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: Changeset: r68782:4916f1d812ea Date: 2014-01-20 14:12 +0100 http://bitbucket.org/pypy/pypy/changeset/4916f1d812ea/ Log: Add support for packed structs in _cffi_backend (ported from cffi/c5e17441bc96). diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py --- a/pypy/module/_cffi_backend/newtype.py +++ b/pypy/module/_cffi_backend/newtype.py @@ -118,6 +118,7 @@ SF_MSVC_BITFIELDS = 1 SF_GCC_ARM_BITFIELDS = 2 SF_GCC_BIG_ENDIAN = 4 +SF_PACKED = 8 if sys.platform == 'win32': DEFAULT_SFLAGS = SF_MSVC_BITFIELDS @@ -190,8 +191,8 @@ boffset = 0 # reset each field at offset 0 # # update the total alignment requirement, but skip it if the - # field is an anonymous bitfield - falign = ftype.alignof() + # field is an anonymous bitfield or if SF_PACKED + falign = 1 if sflags & SF_PACKED else ftype.alignof() do_align = True if (sflags & SF_GCC_ARM_BITFIELDS) == 0 and fbitsize >= 0: if (sflags & SF_MSVC_BITFIELDS) == 0: From noreply at buildbot.pypy.org Mon Jan 20 15:19:11 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 20 Jan 2014 15:19:11 +0100 (CET) Subject: [pypy-commit] cffi default: Raise a NotImplementedError in one messy corner case Message-ID: <20140120141911.8D0361C34FC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1457:ca6d6a337335 Date: 2014-01-20 15:18 +0100 http://bitbucket.org/cffi/cffi/changeset/ca6d6a337335/ Log: Raise a NotImplementedError in one messy corner case diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -3817,6 +3817,14 @@ if (bits_already_occupied + fbitsize > 8 * ftype->ct_size) { /* it would not fit, we need to start at the next allowed position */ + if ((sflags & SF_PACKED) && + (bits_already_occupied & 7)) { + PyErr_Format(PyExc_NotImplementedError, + "with 'packed', gcc would compile field " + "'%s.%s' to reuse some bits in the previous " + "field", ct->ct_name, PyText_AS_UTF8(fname)); + goto error; + } field_offset_bytes += falign; assert(boffset < field_offset_bytes * 8); boffset = field_offset_bytes * 8; diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -3177,6 +3177,16 @@ assert sizeof(BStruct) == sizeof(BLong) + sizeof(BChar) + sizeof(BShort) assert alignof(BStruct) == 1 +def test_packed_with_bitfields(): + BLong = new_primitive_type("long") + BChar = new_primitive_type("char") + BStruct = new_struct_type("struct foo") + py.test.raises(NotImplementedError, + complete_struct_or_union, + BStruct, [('a1', BLong, 30), + ('a2', BChar, 5)], + None, -1, -1, 8) # SF_PACKED==8 + def test_version(): # this test is here mostly for PyPy assert __version__ == "0.8" From noreply at buildbot.pypy.org Mon Jan 20 15:22:47 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 20 Jan 2014 15:22:47 +0100 (CET) Subject: [pypy-commit] pypy default: Update to cffi/ca6d6a337335 Message-ID: <20140120142247.5CD631C34FC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68784:f4b9171da548 Date: 2014-01-20 15:22 +0100 http://bitbucket.org/pypy/pypy/changeset/f4b9171da548/ Log: Update to cffi/ca6d6a337335 diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py --- a/pypy/module/_cffi_backend/newtype.py +++ b/pypy/module/_cffi_backend/newtype.py @@ -306,6 +306,12 @@ if bits_already_occupied + fbitsize > 8 * ftype.size: # it would not fit, we need to start at the next # allowed position + if ((sflags & SF_PACKED) != 0 and + (bits_already_occupied & 7) != 0): + raise operationerrfmt(space.w_NotImplementedError, + "with 'packed', gcc would compile field " + "'%s.%s' to reuse some bits in the previous " + "field", w_ctype.name, fname) field_offset_bytes += falign assert boffset < field_offset_bytes * 8 boffset = field_offset_bytes * 8 diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -3166,6 +3166,16 @@ assert sizeof(BStruct) == sizeof(BLong) + sizeof(BChar) + sizeof(BShort) assert alignof(BStruct) == 1 +def test_packed_with_bitfields(): + BLong = new_primitive_type("long") + BChar = new_primitive_type("char") + BStruct = new_struct_type("struct foo") + py.test.raises(NotImplementedError, + complete_struct_or_union, + BStruct, [('a1', BLong, 30), + ('a2', BChar, 5)], + None, -1, -1, 8) # SF_PACKED==8 + def test_version(): # this test is here mostly for PyPy assert __version__ == "0.8" From noreply at buildbot.pypy.org Mon Jan 20 15:48:31 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 20 Jan 2014 15:48:31 +0100 (CET) Subject: [pypy-commit] pypy default: Support buffers instead of only strings here Message-ID: <20140120144831.314C31C3CD9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68785:da89455f80b5 Date: 2014-01-20 15:47 +0100 http://bitbucket.org/pypy/pypy/changeset/da89455f80b5/ Log: Support buffers instead of only strings here diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -579,7 +579,7 @@ s = rffi.wcharpsize2unicode(rffi.cast(rffi.CWCHARP, address), maxlength) return space.wrap(s) - at unwrap_spec(address=r_uint, newcontent=str) + at unwrap_spec(address=r_uint, newcontent='bufferstr') def rawstring2charp(space, address, newcontent): from rpython.rtyper.annlowlevel import llstr from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -330,6 +330,8 @@ a = A(10, 'x'*10) _rawffi.rawstring2charp(a.buffer, "foobar") assert ''.join([a[i] for i in range(10)]) == "foobarxxxx" + _rawffi.rawstring2charp(a.buffer, buffer("baz")) + assert ''.join([a[i] for i in range(10)]) == "bazbarxxxx" a.free() def test_raw_callable(self): From noreply at buildbot.pypy.org Mon Jan 20 15:48:32 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 20 Jan 2014 15:48:32 +0100 (CET) Subject: [pypy-commit] pypy default: Extra tests Message-ID: <20140120144832.8D1BD1C3CD9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68786:f64ca9169226 Date: 2014-01-20 15:47 +0100 http://bitbucket.org/pypy/pypy/changeset/f64ca9169226/ Log: Extra tests diff --git a/pypy/module/test_lib_pypy/ctypes_tests/support.py b/pypy/module/test_lib_pypy/ctypes_tests/support.py --- a/pypy/module/test_lib_pypy/ctypes_tests/support.py +++ b/pypy/module/test_lib_pypy/ctypes_tests/support.py @@ -37,6 +37,8 @@ pass def teardown_class(cls): + if not hasattr(sys, 'pypy_translation_info'): + return if sys.pypy_translation_info['translation.gc'] == 'boehm': return # it seems that boehm has problems with __del__, so not # everything is freed diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_strings.py b/pypy/module/test_lib_pypy/ctypes_tests/test_strings.py --- a/pypy/module/test_lib_pypy/ctypes_tests/test_strings.py +++ b/pypy/module/test_lib_pypy/ctypes_tests/test_strings.py @@ -38,6 +38,16 @@ buf.raw = "Hello, World" assert buf.value == "Hello, World" + def test_c_buffer_raw_from_buffer(self): + buf = c_buffer(32) + buf.raw = buffer("Hello, World") + assert buf.value == "Hello, World" + + def test_c_buffer_raw_from_memoryview(self): + buf = c_buffer(32) + buf.raw = memoryview("Hello, World") + assert buf.value == "Hello, World" + def test_param_1(self): BUF = c_char * 4 buf = BUF() From noreply at buildbot.pypy.org Mon Jan 20 15:52:21 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 20 Jan 2014 15:52:21 +0100 (CET) Subject: [pypy-commit] pypy default: Support these tests in -A mode when running on a PyPy where DO_TRACING Message-ID: <20140120145221.30F091C3CD9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68787:7de9113f7079 Date: 2014-01-20 15:51 +0100 http://bitbucket.org/pypy/pypy/changeset/7de9113f7079/ Log: Support these tests in -A mode when running on a PyPy where DO_TRACING is not enabled. diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -1149,24 +1149,32 @@ gc.collect() gc.collect() S = _rawffi.Structure([('x', 'i')]) - oldnum = _rawffi._num_of_allocated_objects() + try: + oldnum = _rawffi._num_of_allocated_objects() + except RuntimeError: + oldnum = '?' s = S(autofree=True) s.x = 3 s = None gc.collect() - assert oldnum == _rawffi._num_of_allocated_objects() + if oldnum != '?': + assert oldnum == _rawffi._num_of_allocated_objects() def test_array_autofree(self): import gc, _rawffi gc.collect() - oldnum = _rawffi._num_of_allocated_objects() + try: + oldnum = _rawffi._num_of_allocated_objects() + except RuntimeError: + oldnum = '?' A = _rawffi.Array('c') a = A(6, 'xxyxx\x00', autofree=True) assert _rawffi.charp2string(a.buffer) == 'xxyxx' a = None gc.collect() - assert oldnum == _rawffi._num_of_allocated_objects() + if oldnum != '?': + assert oldnum == _rawffi._num_of_allocated_objects() def teardown_class(cls): Tracker.DO_TRACING = False From noreply at buildbot.pypy.org Mon Jan 20 17:26:52 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 20 Jan 2014 17:26:52 +0100 (CET) Subject: [pypy-commit] pypy default: Back out changeset 05dc946fc44a. Message-ID: <20140120162652.F3DEC1C34FF@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: Changeset: r68788:00643528b116 Date: 2014-01-20 17:26 +0100 http://bitbucket.org/pypy/pypy/changeset/00643528b116/ Log: Back out changeset 05dc946fc44a. diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -1,6 +1,5 @@ from rpython.translator.platform import platform from rpython.translator.tool.cbuild import ExternalCompilationInfo -from pypy.conftest import option from pypy.module._rawffi.interp_rawffi import TYPEMAP, TYPEMAP_FLOAT_LETTERS from pypy.module._rawffi.tracker import Tracker @@ -1133,15 +1132,6 @@ def setup_class(cls): cls.w_sizes_and_alignments = cls.space.wrap(dict( [(k, (v.c_size, v.c_alignment)) for k,v in TYPEMAP.iteritems()])) - # - # detect if we're running on PyPy with DO_TRACING not compiled in - if option.runappdirect: - try: - import _rawffi - _rawffi._num_of_allocated_objects() - except (ImportError, RuntimeError), e: - py.test.skip(str(e)) - # Tracker.DO_TRACING = True def test_structure_autofree(self): From noreply at buildbot.pypy.org Mon Jan 20 18:11:37 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 20 Jan 2014 18:11:37 +0100 (CET) Subject: [pypy-commit] pypy default: The split of the __del__ in a subclass of GeneratorIterator triggers Message-ID: <20140120171137.D2D241C10B1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68789:62c039ea5e31 Date: 2014-01-20 17:10 +0000 http://bitbucket.org/pypy/pypy/changeset/62c039ea5e31/ Log: The split of the __del__ in a subclass of GeneratorIterator triggers an unidentified JIT bug. Disable it for now. Investigating... diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -167,7 +167,7 @@ def run(self): """Start this frame's execution.""" if self.getcode().co_flags & pycode.CO_GENERATOR: - if self.getcode().co_flags & pycode.CO_YIELD_INSIDE_TRY: + if 1:# self.getcode().co_flags & pycode.CO_YIELD_INSIDE_TRY: from pypy.interpreter.generator import GeneratorIteratorWithDel return self.space.wrap(GeneratorIteratorWithDel(self)) else: From noreply at buildbot.pypy.org Mon Jan 20 18:39:15 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 20 Jan 2014 18:39:15 +0100 (CET) Subject: [pypy-commit] pypy default: Move rpython/rtyper/raisingops/raisingops.py to rpython/rtyper/raisingops.py. Message-ID: <20140120173915.0E64C1C039A@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: Changeset: r68790:4f968d7a8486 Date: 2014-01-20 18:39 +0100 http://bitbucket.org/pypy/pypy/changeset/4f968d7a8486/ Log: Move rpython/rtyper/raisingops/raisingops.py to rpython/rtyper/raisingops.py. diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -167,7 +167,7 @@ # # This list corresponds to the operations implemented by the LLInterpreter. # Note that many exception-raising operations can be replaced by calls -# to helper functions in rpython.rtyper.raisingops.raisingops. +# to helper functions in rpython.rtyper.raisingops. # ***** Run test_lloperation after changes. ***** LL_OPERATIONS = { diff --git a/rpython/rtyper/raisingops/raisingops.py b/rpython/rtyper/raisingops.py rename from rpython/rtyper/raisingops/raisingops.py rename to rpython/rtyper/raisingops.py diff --git a/rpython/rtyper/raisingops/__init__.py b/rpython/rtyper/raisingops/__init__.py deleted file mode 100644 diff --git a/rpython/translator/backendopt/raisingop2direct_call.py b/rpython/translator/backendopt/raisingop2direct_call.py --- a/rpython/translator/backendopt/raisingop2direct_call.py +++ b/rpython/translator/backendopt/raisingop2direct_call.py @@ -1,5 +1,5 @@ from rpython.translator.backendopt.support import log, all_operations, annotate -import rpython.rtyper.raisingops.raisingops +import rpython.rtyper.raisingops log = log.raisingop2directcall @@ -15,7 +15,7 @@ def raisingop2direct_call(translator, graphs=None): """search for operations that could raise an exception and change that - operation into a direct_call to a function from the raisingops directory. + operation into a direct_call to a function from the raisingops module. This function also needs to be annotated and specialized. note: this could be extended to allow for any operation to be changed into @@ -30,7 +30,7 @@ for op in all_operations(graphs): if not is_raisingop(op): continue - func = getattr(rpython.rtyper.raisingops.raisingops, op.opname, None) + func = getattr(rpython.rtyper.raisingops, op.opname, None) if not func: log.warning("%s not found" % op.opname) continue From noreply at buildbot.pypy.org Mon Jan 20 23:38:12 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 20 Jan 2014 23:38:12 +0100 (CET) Subject: [pypy-commit] pypy py3k: adapt test for 30065a062e2c Message-ID: <20140120223812.B34171C34FC@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68791:28a49af1b6e6 Date: 2014-01-16 12:18 -0800 http://bitbucket.org/pypy/pypy/changeset/28a49af1b6e6/ Log: adapt test for 30065a062e2c diff --git a/lib-python/3/test/test_ssl.py b/lib-python/3/test/test_ssl.py --- a/lib-python/3/test/test_ssl.py +++ b/lib-python/3/test/test_ssl.py @@ -366,9 +366,7 @@ @skip_if_broken_ubuntu_ssl def test_options(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) - # OP_ALL is the default value - self.assertEqual(ssl.OP_ALL, ctx.options) - ctx.options |= ssl.OP_NO_SSLv2 + # OP_ALL | OP_NO_SSLv2 is the default value self.assertEqual(ssl.OP_ALL | ssl.OP_NO_SSLv2, ctx.options) ctx.options |= ssl.OP_NO_SSLv3 From noreply at buildbot.pypy.org Mon Jan 20 23:38:14 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 20 Jan 2014 23:38:14 +0100 (CET) Subject: [pypy-commit] pypy py3k: adapt to py3 Message-ID: <20140120223814.068381C34FC@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68792:f9287a74eb8e Date: 2014-01-20 14:37 -0800 http://bitbucket.org/pypy/pypy/changeset/f9287a74eb8e/ Log: adapt to py3 diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -327,9 +327,9 @@ def test_rawstring2charp(self): import _rawffi A = _rawffi.Array('c') - a = A(10, 'x'*10) - _rawffi.rawstring2charp(a.buffer, "foobar") - assert ''.join([a[i] for i in range(10)]) == "foobarxxxx" + a = A(10, b'x'*10) + _rawffi.rawstring2charp(a.buffer, b"foobar") + assert b''.join([a[i] for i in range(10)]) == b"foobarxxxx" a.free() def test_raw_callable(self): From noreply at buildbot.pypy.org Mon Jan 20 23:38:15 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 20 Jan 2014 23:38:15 +0100 (CET) Subject: [pypy-commit] pypy py3k: add print's help text Message-ID: <20140120223815.497A91C34FC@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68793:6e982b92251a Date: 2014-01-20 14:37 -0800 http://bitbucket.org/pypy/pypy/changeset/6e982b92251a/ Log: add print's help text diff --git a/pypy/module/__builtin__/app_io.py b/pypy/module/__builtin__/app_io.py --- a/pypy/module/__builtin__/app_io.py +++ b/pypy/module/__builtin__/app_io.py @@ -57,7 +57,14 @@ return line def print_(*args, **kwargs): - """The new-style print function from py3k.""" + r"""print(value, ..., sep=' ', end='\n', file=sys.stdout) + + Prints the values to a stream, or to sys.stdout by default. + Optional keyword arguments: + file: a file-like object (stream); defaults to the current sys.stdout. + sep: string inserted between values, default a space. + end: string appended after the last value, default a newline. + """ fp = kwargs.pop("file", None) if fp is None: fp = sys.stdout From noreply at buildbot.pypy.org Tue Jan 21 00:10:29 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 21 Jan 2014 00:10:29 +0100 (CET) Subject: [pypy-commit] pypy llvm-translation-backend: Remove commented out lines. Message-ID: <20140120231029.E66E41C0356@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r68794:b2d92652cfec Date: 2014-01-20 19:28 +0100 http://bitbucket.org/pypy/pypy/changeset/b2d92652cfec/ Log: Remove commented out lines. diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -45,9 +45,6 @@ "llvm": [("translation.type_system", "lltype"), ("translation.backendopt.raisingop2direct_call", True)], }, - #suggests={ - # "llvm": [("translation.gcrootfinder", "llvmgcroot")] - # }, cmdline="-b --backend"), BoolOption("shared", "Build as a shared library", From noreply at buildbot.pypy.org Tue Jan 21 00:10:31 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 21 Jan 2014 00:10:31 +0100 (CET) Subject: [pypy-commit] pypy llvm-translation-backend: Kill __init__ of test mixin class because it's not supported by pytest and a very bad idea anyway. Message-ID: <20140120231031.4EDF61C0356@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r68795:df3aa52cdd7b Date: 2014-01-21 00:09 +0100 http://bitbucket.org/pypy/pypy/changeset/df3aa52cdd7b/ Log: Kill __init__ of test mixin class because it's not supported by pytest and a very bad idea anyway. diff --git a/rpython/translator/llvm/test/test_genllvm.py b/rpython/translator/llvm/test/test_genllvm.py --- a/rpython/translator/llvm/test/test_genllvm.py +++ b/rpython/translator/llvm/test/test_genllvm.py @@ -325,17 +325,15 @@ _func = None _types = None - def __init__(self): - self.config_override = {} - self.annotator_policy = None - - def getcompiled(self, func, argtypes, gcpolicy='ref', backendopt=True): + def getcompiled(self, func, argtypes, gcpolicy='ref', backendopt=True, + annotator_policy=None, no_gcremovetypeptr=False): config = get_pypy_config(translating=True) config.translation.backendopt.raisingop2direct_call = True config.translation.gc = gcpolicy - config.override(self.config_override) + if no_gcremovetypeptr: + config.translation.gcremovetypeptr = False t = self._translator = TranslationContext(config=config) - a = t.buildannotator(self.annotator_policy) + a = t.buildannotator(annotator_policy) a.build_types(func, argtypes) a.simplify() t.buildrtyper().specialize() @@ -363,9 +361,9 @@ def _compile(self, func, args, policy=None, gcpolicy=None): types = [lltype.typeOf(arg) for arg in args] if not (func == self._func and types == self._types): - self.config_override['translation.gcremovetypeptr'] = False - self.annotator_policy = policy - self._compiled = self.getcompiled(func, types, gcpolicy=gcpolicy) + self._compiled = self.getcompiled(func, types, gcpolicy=gcpolicy, + annotator_policy=policy, + no_gcremovetypeptr=True) self._compiled.convert = False self._func = func self._types = types From noreply at buildbot.pypy.org Tue Jan 21 03:49:22 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 21 Jan 2014 03:49:22 +0100 (CET) Subject: [pypy-commit] pypy default: These aren't used any longer Message-ID: <20140121024922.E4BBC1C3CD9@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r68796:0a1f658e1e4a Date: 2014-01-20 20:48 -0600 http://bitbucket.org/pypy/pypy/changeset/0a1f658e1e4a/ Log: These aren't used any longer diff --git a/pypy/doc/_ref.txt b/pypy/doc/_ref.txt --- a/pypy/doc/_ref.txt +++ b/pypy/doc/_ref.txt @@ -109,6 +109,4 @@ .. _`rpython/translator/c/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/c/ .. _`rpython/translator/c/src/stacklet/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/c/src/stacklet/ .. _`rpython/translator/c/src/stacklet/stacklet.h`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/c/src/stacklet/stacklet.h -.. _`rpython/translator/cli/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/cli/ -.. _`rpython/translator/jvm/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/jvm/ .. _`rpython/translator/tool/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/tool/ From noreply at buildbot.pypy.org Tue Jan 21 05:20:09 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 21 Jan 2014 05:20:09 +0100 (CET) Subject: [pypy-commit] pypy default: Move struct.Struct to be RPYthon so we can mark fields as immutable Message-ID: <20140121042009.D0B521C10B1@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r68797:496deb64bcef Date: 2014-01-20 22:19 -0600 http://bitbucket.org/pypy/pypy/changeset/496deb64bcef/ Log: Move struct.Struct to be RPYthon so we can mark fields as immutable diff --git a/pypy/module/struct/__init__.py b/pypy/module/struct/__init__.py --- a/pypy/module/struct/__init__.py +++ b/pypy/module/struct/__init__.py @@ -49,11 +49,12 @@ 'calcsize': 'interp_struct.calcsize', 'pack': 'interp_struct.pack', 'unpack': 'interp_struct.unpack', - } + + 'Struct': 'interp_struct.W_Struct', + } appleveldefs = { 'error': 'app_struct.error', 'pack_into': 'app_struct.pack_into', 'unpack_from': 'app_struct.unpack_from', - 'Struct': 'app_struct.Struct', - } + } diff --git a/pypy/module/struct/app_struct.py b/pypy/module/struct/app_struct.py --- a/pypy/module/struct/app_struct.py +++ b/pypy/module/struct/app_struct.py @@ -4,6 +4,7 @@ """ import struct + class error(Exception): """Exception raised on various occasions; argument is a string describing what is wrong.""" @@ -21,21 +22,3 @@ raise error("unpack_from requires a buffer of at least %d bytes" % (size,)) return struct.unpack(fmt, data) - -# XXX inefficient -class Struct(object): - def __init__(self, format): - self.format = format - self.size = struct.calcsize(format) - - def pack(self, *args): - return struct.pack(self.format, *args) - - def unpack(self, s): - return struct.unpack(self.format, s) - - def pack_into(self, buffer, offset, *args): - return pack_into(self.format, buffer, offset, *args) - - def unpack_from(self, buffer, offset=0): - return unpack_from(self.format, buffer, offset) diff --git a/pypy/module/struct/interp_struct.py b/pypy/module/struct/interp_struct.py --- a/pypy/module/struct/interp_struct.py +++ b/pypy/module/struct/interp_struct.py @@ -1,15 +1,23 @@ -from pypy.interpreter.gateway import unwrap_spec -from pypy.interpreter.error import OperationError -from pypy.module.struct.formatiterator import PackFormatIterator, UnpackFormatIterator from rpython.rlib import jit from rpython.rlib.rstruct.error import StructError, StructOverflowError from rpython.rlib.rstruct.formatiterator import CalcSizeFormatIterator +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.gateway import interp2app, unwrap_spec +from pypy.interpreter.error import OperationError +from pypy.interpreter.typedef import ( + TypeDef, interp_attrproperty, interp_attrproperty_w +) +from pypy.module.struct.formatiterator import ( + PackFormatIterator, UnpackFormatIterator +) + @unwrap_spec(format=str) def calcsize(space, format): return space.wrap(_calcsize(space, format)) + def _calcsize(space, format): fmtiter = CalcSizeFormatIterator() try: @@ -52,3 +60,44 @@ w_error = space.getattr(w_module, space.wrap('error')) raise OperationError(w_error, space.wrap(e.msg)) return space.newtuple(fmtiter.result_w[:]) + + +class W_Struct(W_Root): + _immutable_fields_ = ["format", "size"] + + def __init__(self, space, format): + self.format = format + self.size = _calcsize(space, format) + + @unwrap_spec(format=str) + def descr__new__(space, w_subtype, format): + self = space.allocate_instance(W_Struct, w_subtype) + W_Struct.__init__(self, space, format) + return self + + def wrap_struct_method(name): + def impl(self, space, __args__): + w_module = space.getbuiltinmodule('struct') + w_method = space.getattr(w_module, space.wrap(name)) + return space.call_obj_args( + w_method, space.wrap(self.format), __args__ + ) + + return impl + + descr_pack = wrap_struct_method("pack") + descr_unpack = wrap_struct_method("unpack") + descr_pack_into = wrap_struct_method("pack_into") + descr_unpack_from = wrap_struct_method("unpack_from") + + +W_Struct.typedef = TypeDef("Struct", + __new__=interp2app(W_Struct.descr__new__.im_func), + format=interp_attrproperty("format", cls=W_Struct), + size=interp_attrproperty("size", cls=W_Struct), + + pack=interp2app(W_Struct.descr_pack), + unpack=interp2app(W_Struct.descr_unpack), + pack_into=interp2app(W_Struct.descr_pack_into), + unpack_from=interp2app(W_Struct.descr_unpack_from), +) From noreply at buildbot.pypy.org Tue Jan 21 05:56:20 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 21 Jan 2014 05:56:20 +0100 (CET) Subject: [pypy-commit] pypy default: Translation fix Message-ID: <20140121045620.A51EF1C34FC@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r68798:375a6e084bb1 Date: 2014-01-20 22:55 -0600 http://bitbucket.org/pypy/pypy/changeset/375a6e084bb1/ Log: Translation fix diff --git a/pypy/module/struct/interp_struct.py b/pypy/module/struct/interp_struct.py --- a/pypy/module/struct/interp_struct.py +++ b/pypy/module/struct/interp_struct.py @@ -1,13 +1,12 @@ from rpython.rlib import jit from rpython.rlib.rstruct.error import StructError, StructOverflowError from rpython.rlib.rstruct.formatiterator import CalcSizeFormatIterator +from rpython.tool.sourcetools import func_with_new_name from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.error import OperationError -from pypy.interpreter.typedef import ( - TypeDef, interp_attrproperty, interp_attrproperty_w -) +from pypy.interpreter.typedef import TypeDef, interp_attrproperty from pypy.module.struct.formatiterator import ( PackFormatIterator, UnpackFormatIterator ) @@ -83,7 +82,7 @@ w_method, space.wrap(self.format), __args__ ) - return impl + return func_with_new_name(impl, 'descr_' + name) descr_pack = wrap_struct_method("pack") descr_unpack = wrap_struct_method("unpack") From noreply at buildbot.pypy.org Tue Jan 21 10:14:31 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Tue, 21 Jan 2014 10:14:31 +0100 (CET) Subject: [pypy-commit] stmgc contention-counter: do penalty/contention based scheduling Message-ID: <20140121091431.9156B1C3391@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: contention-counter Changeset: r651:ce3149d0935f Date: 2014-01-10 13:20 +0100 http://bitbucket.org/pypy/stmgc/changeset/ce3149d0935f/ Log: do penalty/contention based scheduling diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -99,16 +99,16 @@ /* CONTENTION COUNTER THINGS */ #define RPY_STM_CONT_RMA_SAMPLES 64 +gcptr get_original_of(gcptr P) +{ + if (UNLIKELY(!(P->h_tid & GCFLAG_PREBUILT_ORIGINAL)) && P->h_original) + return (gcptr)P->h_original; + return P; +} + void abort_because_of(gcptr L) { - gcptr obj = (gcptr)L->h_original; - if (!obj || (L->h_tid & GCFLAG_PREBUILT_ORIGINAL)) { - obj = L; - - /* abort-object should never be a priv_from_prot - *without* an original */ - assert(!(L->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)); - } + gcptr obj = get_original_of(L); //g->h_contention += (g->h_contention + 1) << 2; revision_t old = (RPY_STM_CONT_RMA_SAMPLES - 1) * obj->h_contention; @@ -119,9 +119,7 @@ void commit_object(gcptr L) { - gcptr obj = L; - if (!(L->h_tid & GCFLAG_PREBUILT_ORIGINAL) && L->h_original) - obj = (gcptr)L->h_original; + gcptr obj = get_original_of(L); revision_t old = obj->h_contention; revision_t old_rma = (RPY_STM_CONT_RMA_SAMPLES - 1) * old; @@ -129,6 +127,8 @@ obj->h_contention = old_rma / RPY_STM_CONT_RMA_SAMPLES; } + + /************************************************************/ static void ValidateNow(struct tx_descriptor *); @@ -306,6 +306,10 @@ added just now by a parallel thread during stealing... */ /*assert(!(P->h_tid & GCFLAG_MOVED));*/ fxcache_add(&d->recent_reads_cache, P); + + /* update penalty for reading */ + gcptr o = get_original_of(P); + d->penalty += (o->h_contention >> 1) + 1; return P; follow_stub:; @@ -1156,6 +1160,7 @@ assert(!g2l_any_entry(&d->public_to_private)); assert(d->old_thread_local_obj == NULL); + d->penalty = 0; d->count_reads = 1; fxcache_clear(&d->recent_reads_cache); gcptrlist_clear(&d->abortinfo); diff --git a/c4/et.h b/c4/et.h --- a/c4/et.h +++ b/c4/et.h @@ -173,6 +173,8 @@ /* sync with pypy stmgc: */ NURSERY_FIELDS_DECL + revision_t penalty; + long atomic; /* 0 = not atomic, > 0 atomic */ unsigned long count_reads; unsigned long reads_size_limit; /* see should_break_tr. */ diff --git a/c4/stmsync.c b/c4/stmsync.c --- a/c4/stmsync.c +++ b/c4/stmsync.c @@ -46,7 +46,8 @@ assert(d->reads_size_limit_nonatomic == 0); #endif - return (sync_required | d->count_reads) >= d->reads_size_limit; + return (sync_required | d->penalty) >= d->reads_size_limit; + /* return (sync_required | d->count_reads) >= d->reads_size_limit; */ } static void init_shadowstack(void) @@ -179,12 +180,12 @@ When such a shortened transaction succeeds, the next one will see its length limit doubled, up to the maximum. */ if (counter == 0 && stm_active != 2) { - unsigned long limit = d->reads_size_limit_nonatomic; - if (limit != 0 && limit < (stm_regular_length_limit >> 1)) - limit = (limit << 1) | 1; - else - limit = stm_regular_length_limit; - d->reads_size_limit_nonatomic = limit; + /* unsigned long limit = d->reads_size_limit_nonatomic; */ + /* if (limit != 0 && limit < (stm_regular_length_limit >> 1)) */ + /* limit = (limit << 1) | 1; */ + /* else */ + /* limit = stm_regular_length_limit; */ + /* d->reads_size_limit_nonatomic = limit; */ } if (!d->atomic) { stm_begin_transaction(&_jmpbuf, NULL); From noreply at buildbot.pypy.org Tue Jan 21 10:14:30 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Tue, 21 Jan 2014 10:14:30 +0100 (CET) Subject: [pypy-commit] stmgc default: add a valgrind target to the makefile Message-ID: <20140121091430.5037C1C3391@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r650:266b6fe74c32 Date: 2014-01-10 12:52 +0100 http://bitbucket.org/pypy/stmgc/changeset/266b6fe74c32/ Log: add a valgrind target to the makefile diff --git a/c4/Makefile b/c4/Makefile --- a/c4/Makefile +++ b/c4/Makefile @@ -36,5 +36,9 @@ release-%: %.c ${H_FILES} ${C_FILES} stmgc.c gcc -pthread -DNDEBUG -O2 -g $< -o release-$* -Wall stmgc.c -lrt +valgrind-%: %.c ${H_FILES} ${C_FILES} stmgc.c + gcc -pthread -DNDEBUG -O1 -g $< -o valgrind-$* -Wall stmgc.c -lrt + + test-%: ./$* 2>/dev/null | grep "check ok" diff --git a/c7/core.c b/c7/core.c new file mode 100644 --- /dev/null +++ b/c7/core.c @@ -0,0 +1,721 @@ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "core.h" +#include "list.h" +#include "pagecopy.h" + + +/* number of pages per thread: */ +#define NB_PAGES (256*256) // 256MB + +#define NB_THREADS 2 +#define MAP_PAGES_FLAGS (MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE) +#define LARGE_OBJECT_WORDS 36 + +#if defined(__i386__) || defined(__x86_64__) +# define HAVE_FULL_EXCHANGE_INSN +#endif + + +typedef TLPREFIX char localchar_t; +typedef TLPREFIX struct alloc_for_size_s alloc_for_size_t; +typedef TLPREFIX struct _thread_local2_s _thread_local2_t; + + +struct alloc_for_size_s { + localchar_t *next; + uint16_t start, stop; + bool flag_partial_page; +}; + +struct _thread_local2_s { + struct _thread_local1_s _tl1; + int thread_num; + char *thread_base; + struct stm_list_s *modified_objects; + struct stm_list_s *new_object_ranges; + struct alloc_for_size_s alloc[LARGE_OBJECT_WORDS]; +}; +#define _STM_TL2 ((_thread_local2_t *)_STM_TL1) + +/* Logical page number (offset) must be offset by thread_num*NB_PAGES to get + the real page number */ +enum { + /* shared read-only page, this (logical) page is shared between threads */ + SHARED_PAGE=0, + /* this page is private for all (2) threads */ + REMAPPING_PAGE, + /* page is already private for all (2) threads */ + PRIVATE_PAGE +}; /* flag_page_private */ + + +/* all pages for all threads: */ +static char *object_pages; +/* pages for the undo-log that contains copies for objs modified by the leader */ +static char *undo_log_pages; +static char *undo_log_current; + +static int num_threads_started; +/* the thread which may be the current leader (als check for global_history!=0) */ +static int leader_thread_num; +/* next free page to allocate objs from */ +static uintptr_t index_page_never_used; +/* the next global write version. incremented by transaction starts, set + to 0 by collections */ +static int next_write_version; +/* protects the undo log */ +static int undo_lock; +/* list of objs modified by the leader */ +static struct stm_list_s *global_history; +/* approximate range to check if an obj needs to be added to the undo_log + because it may be in the global_history */ +static uint16_t gh_write_version_first; +static uint16_t gh_write_version_last; +/* stores the state of a page (xxx_PAGE constants above) */ +static uint8_t flag_page_private[NB_PAGES]; + + +/************************************************************/ + +static void spin_loop(void) +{ + asm("pause" : : : "memory"); +} + +static void acquire_lock(int *lock) +{ + while (__sync_lock_test_and_set(lock, 1) != 0) { + while (*lock != 0) + spin_loop(); + } +} + +#define ACQUIRE_LOCK_IF(lock, condition) \ +({ \ + bool _acquired = false; \ + while (condition) { \ + if (__sync_lock_test_and_set(lock, 1) == 0) { \ + if (condition) \ + _acquired = true; \ + else \ + __sync_lock_release(lock); \ + break; \ + } \ + spin_loop(); \ + } \ + _acquired; \ +}) + + +static void release_lock(int *lock) +{ + __sync_lock_release(lock); +} + +static void write_fence(void) +{ +#if defined(__amd64__) || defined(__i386__) + asm("" : : : "memory"); +#else +# error "Define write_fence() for your architecture" +#endif +} + +/* check if obj was read in current transaction */ +static bool _stm_was_read(object_t *obj) +{ + read_marker_t *marker = (read_marker_t *)(((uintptr_t)obj) >> 4); + return (marker->rm == _STM_TL1->transaction_read_version); +} + + +/* 2-thread version to privatize a page. A (logical) page is either shared + by the 2 threads, or private for both. Needs more logic (e.g. ref-count) + for more threads. */ +static void _stm_privatize(uintptr_t pagenum) +{ + /* pagenum is a logical pagenum < NB_PAGES */ + + if (flag_page_private[pagenum] == PRIVATE_PAGE) + return; + +#ifdef HAVE_FULL_EXCHANGE_INSN + /* use __sync_lock_test_and_set() as a cheaper alternative to + __sync_bool_compare_and_swap(). */ + int previous = __sync_lock_test_and_set(&flag_page_private[pagenum], + REMAPPING_PAGE); + if (previous == PRIVATE_PAGE) { + flag_page_private[pagenum] = PRIVATE_PAGE; + return; + } + bool was_shared = (previous == SHARED_PAGE); +#else + bool was_shared = __sync_bool_compare_and_swap(&flag_page_private[pagenum], + SHARED_PAGE, REMAPPING_PAGE); +#endif + if (!was_shared) { + while (flag_page_private[pagenum] == REMAPPING_PAGE) + spin_loop(); + return; + } + + /* 2 threads for now: thread_num = 0 or 1 */ + ssize_t pgoff1 = pagenum; + ssize_t pgoff2 = pagenum + NB_PAGES; + ssize_t localpgoff = pgoff1 + NB_PAGES * _STM_TL2->thread_num; + ssize_t otherpgoff = pgoff1 + NB_PAGES * (1 - _STM_TL2->thread_num); + + void *localpg = object_pages + localpgoff * 4096UL; + void *otherpg = object_pages + otherpgoff * 4096UL; + + int res = remap_file_pages(localpg, 4096, 0, pgoff2, 0); + if (res < 0) { + perror("remap_file_pages"); + abort(); + } + pagecopy(localpg, otherpg); + write_fence(); + assert(flag_page_private[pagenum] == REMAPPING_PAGE); + flag_page_private[pagenum] = PRIVATE_PAGE; +} + + +#define REAL_ADDRESS(object_pages, src) ((object_pages) + (uintptr_t)(src)) + +static char *real_address(uintptr_t src) +{ + return REAL_ADDRESS(_STM_TL2->thread_base, src); +} + +static char *get_thread_base(long thread_num) +{ + return object_pages + thread_num * (NB_PAGES * 4096UL); +} + +void stm_abort_transaction(void); + +enum detect_conflicts_e { CANNOT_CONFLICT, CAN_CONFLICT }; + +/* copy current versions of objs from the leader's object space */ +static void update_to_current_version(enum detect_conflicts_e check_conflict) +{ + /* XXX this can be done by acquiring the undo_lock for much less time, + but it needs to be carefully synchronized with _stm_write_slowpath(). + For now it must be called with the undo_lock acquired. */ + + /* Loop over objects in 'global_history': if they have been + read by the current transaction, the current transaction must + abort; then copy them out of the leader's object space --- + which may have been modified by the leader's uncommitted + transaction; this case will be fixed afterwards. + */ + bool conflict_found_or_dont_check = (check_conflict == CANNOT_CONFLICT); + char *local_base = _STM_TL2->thread_base; + char *remote_base = get_thread_base(1 - _STM_TL2->thread_num); + struct stm_list_s *gh, *gh_next; + + assert(leader_thread_num != _STM_TL2->thread_num); + + for (gh = global_history; gh != NULL; gh = gh_next) { + + STM_LIST_FOREACH(gh, ({ + + if (!conflict_found_or_dont_check) + conflict_found_or_dont_check = _stm_was_read(item); + + char *dst = REAL_ADDRESS(local_base, item); + char *src = REAL_ADDRESS(remote_base, item); + char *src_rebased = src - (uintptr_t)local_base; + size_t size = stm_object_size_rounded_up((object_t *)src_rebased); + + memcpy(dst + sizeof(char *), + src + sizeof(char *), + size - sizeof(char *)); + })); + + gh_next = gh->nextlist; + stm_list_free(gh); + } + global_history = NULL; + gh_write_version_first = 0xffff; + gh_write_version_last = 0; + + /* Finally, loop over objects modified by the leader, + and copy them out of the undo log. + */ + char *undo = undo_log_pages; + char *undo_end = undo_log_current; + + while (undo < undo_end) { + + char *src = undo; + char *dst = *(char **)src; + char *src_rebased = src - (uintptr_t)local_base; + + *(char **)src = *(char **)dst; /* fix the first word of the object in + the undo log, for stm_object_size() */ + size_t size = stm_object_size_rounded_up((object_t *)src_rebased); + + memcpy(dst + sizeof(char *), + src + sizeof(char *), + size - sizeof(char *)); + + undo += size; + } + undo_log_current = undo_log_pages; /* make empty again */ + + if (conflict_found_or_dont_check && check_conflict == CAN_CONFLICT) { + release_lock(&undo_lock); + stm_abort_transaction(); + } +} + + +/* if we are not leader and there is a global_history, we check + for conflicts and update our pages */ +static void maybe_update(enum detect_conflicts_e check_conflict) +{ + if (leader_thread_num != _STM_TL2->thread_num && global_history != NULL) { + acquire_lock(&undo_lock); + update_to_current_version(check_conflict); + release_lock(&undo_lock); + } +} + + +void _stm_write_slowpath(object_t *obj) +{ + maybe_update(CAN_CONFLICT); + + _stm_privatize(((uintptr_t)obj) / 4096); + + stm_read(obj); + + _STM_TL2->modified_objects = stm_list_append( + _STM_TL2->modified_objects, obj); + + uint16_t wv = obj->write_version; + obj->write_version = _STM_TL1->transaction_write_version; + + /* We only need to store a copy of the current version of the object if: + - we are the leader; + - the object is present in the global_history. + The second condition is approximated by the following range check. + Storing a few more objects than strictly needed is not really a problem. + */ + /* XXX this can be done without acquiring the undo_lock at all, + but we need more care in update_to_current_version(). */ + + /* XXX can we avoid writing an unbounded number of copies of the + same object in case we run a lot of transactions while the other + thread is busy? Unlikely case but in theory annoying. Should + we anyway bound the undo log's size to much less than NB_PAGES, + and if full here, sleep? Should the bound also count the size + taken by the global_history lists? */ + if (ACQUIRE_LOCK_IF(&undo_lock, + wv <= gh_write_version_last && wv >= gh_write_version_first + && leader_thread_num == _STM_TL2->thread_num)) { + /* record in the undo log a copy of the content of the object */ + size_t size = stm_object_size_rounded_up(obj); + char *source = real_address((uintptr_t)obj); + char *undo = undo_log_current; + *((object_t **)undo) = obj; + memcpy(undo + sizeof(object_t *), + source + sizeof(object_t *), + size - sizeof(object_t *)); + /*write_fence();*/ + undo_log_current = undo + size; + release_lock(&undo_lock); + } +} + + +uintptr_t _stm_reserve_page(void) +{ + /* Grab a free page, initially shared between the threads. */ + + // XXX look in some free list first + + /* Return the index'th object page, which is so far never used. */ + uintptr_t index = __sync_fetch_and_add(&index_page_never_used, 1); + if (index >= NB_PAGES) { + fprintf(stderr, "Out of mmap'ed memory!\n"); + abort(); + } + return index; +} + +#define TO_RANGE(range, start, stop) \ + ((range) = (object_t *)((start) | (((uintptr_t)(stop)) << 16))) + +#define FROM_RANGE(start, stop, range) \ + ((start) = (uint16_t)(uintptr_t)(range), \ + (stop) = ((uintptr_t)(range)) >> 16) + +localchar_t *_stm_alloc_next_page(size_t i) +{ + /* 'alloc->next' points to where the next allocation should go. The + present function is called instead when this next allocation is + equal to 'alloc->stop'. As we know that 'start', 'next' and + 'stop' are always nearby pointers, we play tricks and only store + the lower 16 bits of 'start' and 'stop', so that the three + variables plus some flags fit in 16 bytes. + + 'flag_partial_page' is *cleared* to mean that the 'alloc' + describes a complete page, so that it needs not be listed inside + 'new_object_ranges'. In all other cases it is *set*. + */ + uintptr_t page; + localchar_t *result; + alloc_for_size_t *alloc = &_STM_TL2->alloc[i]; + size_t size = i * 8; + + if (alloc->flag_partial_page) { + /* record this range in 'new_object_ranges' */ + localchar_t *ptr1 = alloc->next - size - 1; + object_t *range; + TO_RANGE(range, alloc->start, alloc->stop); + page = ((uintptr_t)ptr1) / 4096; + _STM_TL2->new_object_ranges = stm_list_append( + _STM_TL2->new_object_ranges, (object_t *)page); + _STM_TL2->new_object_ranges = stm_list_append( + _STM_TL2->new_object_ranges, range); + } + + /* reserve a fresh new page */ + page = _stm_reserve_page(); + + result = (localchar_t *)(page * 4096UL); + alloc->start = (uintptr_t)result; + alloc->stop = alloc->start + (4096 / size) * size; + alloc->next = result + size; + alloc->flag_partial_page = false; + return result; +} + +object_t *stm_allocate(size_t size) +{ + assert(size % 8 == 0); + size_t i = size / 8; + assert(2 <= i && i < LARGE_OBJECT_WORDS);//XXX + alloc_for_size_t *alloc = &_STM_TL2->alloc[i]; + + localchar_t *p = alloc->next; + alloc->next = p + size; + if ((uint16_t)(uintptr_t)p == alloc->stop) + p = _stm_alloc_next_page(i); + + object_t *result = (object_t *)p; + result->write_version = _STM_TL1->transaction_write_version; + return result; +} + + +#define TOTAL_MEMORY (NB_PAGES * 4096UL * (NB_THREADS + 1)) +#define READMARKER_END ((NB_PAGES * 4096UL) >> 4) +#define FIRST_OBJECT_PAGE ((READMARKER_END + 4095) / 4096UL) +#define READMARKER_START ((FIRST_OBJECT_PAGE * 4096UL) >> 4) +#define FIRST_READMARKER_PAGE (READMARKER_START / 4096UL) + +void stm_setup(void) +{ + /* Check that some values are acceptable */ + assert(4096 <= ((uintptr_t)_STM_TL1)); + assert(((uintptr_t)_STM_TL1) == ((uintptr_t)_STM_TL2)); + assert(((uintptr_t)_STM_TL2) + sizeof(*_STM_TL2) <= 8192); + assert(2 <= FIRST_READMARKER_PAGE); + assert(FIRST_READMARKER_PAGE * 4096UL <= READMARKER_START); + assert(READMARKER_START < READMARKER_END); + assert(READMARKER_END <= 4096UL * FIRST_OBJECT_PAGE); + assert(FIRST_OBJECT_PAGE < NB_PAGES); + + object_pages = mmap(NULL, TOTAL_MEMORY, + PROT_READ | PROT_WRITE, + MAP_PAGES_FLAGS, -1, 0); + if (object_pages == MAP_FAILED) { + perror("object_pages mmap"); + abort(); + } + + long i; + for (i = 0; i < NB_THREADS; i++) { + char *thread_base = get_thread_base(i); + + /* In each thread's section, the first page is where TLPREFIX'ed + NULL accesses land. We mprotect it so that accesses fail. */ + mprotect(thread_base, 4096, PROT_NONE); + + /* Fill the TLS page (page 1) with 0xDD */ + memset(REAL_ADDRESS(thread_base, 4096), 0xDD, 4096); + /* Make a "hole" at _STM_TL1 / _STM_TL2 */ + memset(REAL_ADDRESS(thread_base, _STM_TL2), 0, sizeof(*_STM_TL2)); + + /* Pages in range(2, FIRST_READMARKER_PAGE) are never used */ + if (FIRST_READMARKER_PAGE > 2) + mprotect(thread_base + 8192, (FIRST_READMARKER_PAGE - 2) * 4096UL, + PROT_NONE); + + _STM_TL2->thread_num = i; + _STM_TL2->thread_base = thread_base; + + if (i > 0) { + int res; + res = remap_file_pages(thread_base + FIRST_OBJECT_PAGE * 4096UL, + (NB_PAGES - FIRST_OBJECT_PAGE) * 4096UL, + 0, FIRST_OBJECT_PAGE, 0); + if (res != 0) { + perror("remap_file_pages"); + abort(); + } + } + } + + undo_log_pages = get_thread_base(NB_THREADS); + mprotect(undo_log_pages, 4096, PROT_NONE); + mprotect(undo_log_pages + (NB_PAGES - 1) * 4096UL, 4096, PROT_NONE); + undo_log_pages += 4096; + undo_log_current = undo_log_pages; + + num_threads_started = 0; + index_page_never_used = FIRST_OBJECT_PAGE; + next_write_version = 1; + leader_thread_num = 0; + global_history = NULL; + gh_write_version_first = 0xffff; + gh_write_version_last = 0; +} + +#define INVALID_GS_VALUE 0xDDDDDDDDDDDDDDDDUL + +static void set_gs_register(uint64_t value) +{ + int result = syscall(SYS_arch_prctl, ARCH_SET_GS, value); + assert(result == 0); +} + +void stm_setup_thread(void) +{ + int thread_num = __sync_fetch_and_add(&num_threads_started, 1); + assert(thread_num < 2); /* only 2 threads for now */ + + char *thread_base = get_thread_base(thread_num); + set_gs_register((uintptr_t)thread_base); + + assert(_STM_TL2->thread_num == thread_num); + assert(_STM_TL2->thread_base == thread_base); + + _STM_TL2->modified_objects = stm_list_create(); +} + +void _stm_teardown_thread(void) +{ + stm_list_free(_STM_TL2->modified_objects); + _STM_TL2->modified_objects = NULL; + + set_gs_register(INVALID_GS_VALUE); +} + +void _stm_teardown(void) +{ + munmap(object_pages, TOTAL_MEMORY); + object_pages = NULL; + undo_log_pages = NULL; + undo_log_current = NULL; +} + + +static void reset_transaction_read_version(void) +{ + /* force-reset all read markers to 0 */ + + /* XXX measure the time taken by this madvise() and the following + zeroing of pages done lazily by the kernel; compare it with using + 16-bit read_versions. + */ + /* XXX try to use madvise() on smaller ranges of memory. In my + measures, we could gain a factor 2 --- not really more, even if + the range of virtual addresses below is very large, as long as it + is already mostly non-reserved pages. (The following call keeps + them non-reserved; apparently the kernel just skips them very + quickly.) + */ + int res = madvise(real_address(FIRST_READMARKER_PAGE * 4096UL), + (FIRST_OBJECT_PAGE - FIRST_READMARKER_PAGE) * 4096UL, + MADV_DONTNEED); + if (res < 0) { + perror("madvise"); + abort(); + } + _STM_TL1->transaction_read_version = 0; +} + +void stm_major_collection(void) +{ + abort(); +} + +void stm_start_transaction(jmp_buf *jmpbufptr) +{ + if (_STM_TL1->transaction_read_version == 0xff) + reset_transaction_read_version(); + _STM_TL1->transaction_read_version++; + _STM_TL1->jmpbufptr = NULL; + + while (1) { + int wv = __sync_fetch_and_add(&next_write_version, 1); + if (LIKELY(wv <= 0xffff)) { + _STM_TL1->transaction_write_version = wv; + break; + } + /* We run out of 16-bit numbers before we do the next major + collection, which resets it. XXX This case seems unlikely + for now, but check if it could become a bottleneck at some + point. */ + stm_major_collection(); + } + assert(stm_list_is_empty(_STM_TL2->modified_objects)); + assert(stm_list_is_empty(_STM_TL2->new_object_ranges)); + + maybe_update(CANNOT_CONFLICT); /* no read object: cannot conflict */ + + _STM_TL1->jmpbufptr = jmpbufptr; +} + +static void update_new_objects_in_other_threads(uintptr_t pagenum, + uint16_t start, uint16_t stop) +{ + size_t size = (uint16_t)(stop - start); + assert(size <= 4096 - (start & 4095)); + assert((start & ~4095) == (uint16_t)(pagenum * 4096)); + + int thread_num = _STM_TL2->thread_num; + uintptr_t local_src = (pagenum * 4096UL) + (start & 4095); + char *dst = REAL_ADDRESS(get_thread_base(1 - thread_num), local_src); + char *src = REAL_ADDRESS(_STM_TL2->thread_base, local_src); + + memcpy(dst, src, size); +} + +void stm_stop_transaction(void) +{ + write_fence(); /* see later in this function for why */ + + acquire_lock(&undo_lock); + + if (leader_thread_num != _STM_TL2->thread_num) { + /* non-leader thread */ + if (global_history != NULL) { + update_to_current_version(CAN_CONFLICT); + assert(global_history == NULL); + } + + /* steal leadership now */ + leader_thread_num = _STM_TL2->thread_num; + } + + /* now we are the leader thread. the leader can always commit */ + _STM_TL1->jmpbufptr = NULL; /* cannot abort any more */ + undo_log_current = undo_log_pages; /* throw away the content */ + + /* add these objects to the global_history */ + _STM_TL2->modified_objects->nextlist = global_history; + global_history = _STM_TL2->modified_objects; + _STM_TL2->modified_objects = stm_list_create(); + + uint16_t wv = _STM_TL1->transaction_write_version; + if (wv < gh_write_version_last) gh_write_version_last = wv; + if (wv > gh_write_version_first) gh_write_version_first = wv; + + /* walk the new_object_ranges and manually copy the new objects + to the other thread's pages in the (hopefully rare) case that + the page they belong to is already unshared */ + long i; + struct stm_list_s *lst = _STM_TL2->new_object_ranges; + for (i = stm_list_count(lst); i > 0; ) { + i -= 2; + uintptr_t pagenum = (uintptr_t)stm_list_item(lst, i); + + /* NB. the read next line should work even against a parallel + thread, thanks to the lock acquisition we do earlier (see the + beginning of this function). Indeed, if this read returns + SHARED_PAGE, then we know that the real value in memory was + actually SHARED_PAGE at least at the time of the + acquire_lock(). It may have been modified afterwards by a + compare_and_swap() in the other thread, but then we know for + sure that the other thread is seeing the last, up-to-date + version of our data --- this is the reason of the + write_fence() just before the acquire_lock(). + */ + if (flag_page_private[pagenum] != SHARED_PAGE) { + object_t *range = stm_list_item(lst, i + 1); + uint16_t start, stop; + FROM_RANGE(start, stop, range); + update_new_objects_in_other_threads(pagenum, start, stop); + } + } + + /* do the same for the partially-allocated pages */ + long j; + for (j = 2; j < LARGE_OBJECT_WORDS; j++) { + alloc_for_size_t *alloc = &_STM_TL2->alloc[j]; + uint16_t start = alloc->start; + uint16_t cur = (uintptr_t)alloc->next; + + if (start == cur) { + /* nothing to do: this page (or fraction thereof) was left + empty by the previous transaction, and starts empty as + well in the new transaction. 'flag_partial_page' is + unchanged. */ + } + else { + uintptr_t pagenum = ((uintptr_t)(alloc->next - 1)) / 4096UL; + /* for the new transaction, it will start here: */ + alloc->start = cur; + + if (alloc->flag_partial_page) { + if (flag_page_private[pagenum] != SHARED_PAGE) { + update_new_objects_in_other_threads(pagenum, start, cur); + } + } + else { + /* we can skip checking flag_page_private[] in non-debug + builds, because the whole page can only contain + objects made by the just-finished transaction. */ + assert(flag_page_private[pagenum] == SHARED_PAGE); + + /* the next transaction will start with this page + containing objects that are now committed, so + we need to set this flag now */ + alloc->flag_partial_page = true; + } + } + } + + release_lock(&undo_lock); +} + +void stm_abort_transaction(void) +{ + long j; + for (j = 2; j < LARGE_OBJECT_WORDS; j++) { + alloc_for_size_t *alloc = &_STM_TL2->alloc[j]; + uint16_t num_allocated = ((uintptr_t)alloc->next) - alloc->start; + alloc->next -= num_allocated; + } + stm_list_clear(_STM_TL2->new_object_ranges); + stm_list_clear(_STM_TL2->modified_objects); + assert(_STM_TL1->jmpbufptr != NULL); + assert(_STM_TL1->jmpbufptr != (jmp_buf *)-1); /* for tests only */ + longjmp(*_STM_TL1->jmpbufptr, 1); +} + diff --git a/c7/core.h b/c7/core.h new file mode 100644 --- /dev/null +++ b/c7/core.h @@ -0,0 +1,78 @@ +#ifndef _STM_CORE_H +#define _STM_CORE_H + +#include +#include +#include + + +#define TLPREFIX __attribute__((address_space(256))) + +typedef TLPREFIX struct _thread_local1_s _thread_local1_t; +typedef TLPREFIX struct object_s object_t; +typedef TLPREFIX struct read_marker_s read_marker_t; + + +/* Structure of objects + -------------------- + + Objects manipulated by the user program, and managed by this library, + must start with a "struct object_s" field. Pointers to any user object + must use the "TLPREFIX struct foo *" type --- don't forget TLPREFIX. + The best is to use typedefs like above. + + The object_s part contains some fields reserved for the STM library, + as well as a 32-bit integer field that can be freely used by the user + program. However, right now this field must be read-only --- i.e. it + must never be modified on any object that may already belong to a + past transaction; you can only set it on just-allocated objects. The + best is to consider it as a field that is written to only once on + newly allocated objects. +*/ + +struct object_s { + uint16_t write_version; /* reserved for the STM library */ + /*uint8_t stm_flags;*/ + uint32_t header; /* for the user program -- only write in + newly allocated objects */ +}; + +struct read_marker_s { + uint8_t rm; +}; + +struct _thread_local1_s { + jmp_buf *jmpbufptr; + uint8_t transaction_read_version; + uint16_t transaction_write_version; +}; +#define _STM_TL1 ((_thread_local1_t *)4352) + + +/* this should use llvm's coldcc calling convention, + but it's not exposed to C code so far */ +void _stm_write_slowpath(object_t *); + +#define LIKELY(x) __builtin_expect(x, true) +#define UNLIKELY(x) __builtin_expect(x, false) + +/* invisible read, simply add to read-set */ +static inline void stm_read(object_t *obj) +{ + ((read_marker_t *)(((uintptr_t)obj) >> 4))->rm = + _STM_TL1->transaction_read_version; +} + +/* open object for writing, eagerly detects write-write conflicts */ +static inline void stm_write(object_t *obj) +{ + if (UNLIKELY(obj->write_version != _STM_TL1->transaction_write_version)) + _stm_write_slowpath(obj); +} + + +/* must be provided by the user of this library */ +extern size_t stm_object_size_rounded_up(object_t *); + + +#endif From noreply at buildbot.pypy.org Tue Jan 21 10:14:32 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Tue, 21 Jan 2014 10:14:32 +0100 (CET) Subject: [pypy-commit] stmgc contention-counter: fixes? Message-ID: <20140121091432.AA0C61C30AA@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: contention-counter Changeset: r652:004e8c4916f3 Date: 2014-01-15 12:24 +0100 http://bitbucket.org/pypy/stmgc/changeset/004e8c4916f3/ Log: fixes? diff --git a/c4/demo2.c b/c4/demo2.c --- a/c4/demo2.c +++ b/c4/demo2.c @@ -8,7 +8,7 @@ #include "fprintcolor.h" -#define LIST_LENGTH 200 +#define LIST_LENGTH 4000 #define NUMTHREADS 4 diff --git a/c4/et.c b/c4/et.c --- a/c4/et.c +++ b/c4/et.c @@ -101,7 +101,7 @@ gcptr get_original_of(gcptr P) { - if (UNLIKELY(!(P->h_tid & GCFLAG_PREBUILT_ORIGINAL)) && P->h_original) + if (!(P->h_tid & GCFLAG_PREBUILT_ORIGINAL) && P->h_original) return (gcptr)P->h_original; return P; } @@ -112,7 +112,7 @@ //g->h_contention += (g->h_contention + 1) << 2; revision_t old = (RPY_STM_CONT_RMA_SAMPLES - 1) * obj->h_contention; - old += 1000000; + old += 1000; obj->h_contention = old / RPY_STM_CONT_RMA_SAMPLES + ((old % RPY_STM_CONT_RMA_SAMPLES) != 0); } @@ -172,6 +172,7 @@ revision_t v; d->count_reads++; + assert(IMPLIES(!(P->h_tid & GCFLAG_OLD), stmgc_is_in_nursery(d, P))); assert(G->h_revision != 0); @@ -309,7 +310,8 @@ /* update penalty for reading */ gcptr o = get_original_of(P); - d->penalty += (o->h_contention >> 1) + 1; + d->penalty += /* (o->h_contention >> 1) + */ 1; + return P; follow_stub:; @@ -1030,14 +1032,14 @@ so far. This should ensure that, assuming the retry does the same thing, it will commit just before it reaches the conflicting point. Note that we should never *increase* the read length limit here. */ - limit = d->count_reads; - if (limit > d->reads_size_limit_nonatomic) { /* can occur if atomic */ - limit = d->reads_size_limit_nonatomic; - } - if (limit > 0) { - limit -= (limit >> 4); - d->reads_size_limit_nonatomic = limit; - } + /* limit = d->count_reads; */ + /* if (limit > d->reads_size_limit_nonatomic) { /\* can occur if atomic *\/ */ + /* limit = d->reads_size_limit_nonatomic; */ + /* } */ + /* if (limit > 0) { */ + /* limit -= (limit >> 4); */ + /* d->reads_size_limit_nonatomic = limit; */ + /* } */ AbortPrivateFromProtected(d); gcptrlist_clear(&d->list_of_read_objects); @@ -1160,7 +1162,7 @@ assert(!g2l_any_entry(&d->public_to_private)); assert(d->old_thread_local_obj == NULL); - d->penalty = 0; + d->penalty = 1; d->count_reads = 1; fxcache_clear(&d->recent_reads_cache); gcptrlist_clear(&d->abortinfo); diff --git a/c4/stmsync.c b/c4/stmsync.c --- a/c4/stmsync.c +++ b/c4/stmsync.c @@ -179,13 +179,13 @@ has configured 'reads_size_limit_nonatomic' to a smaller value. When such a shortened transaction succeeds, the next one will see its length limit doubled, up to the maximum. */ - if (counter == 0 && stm_active != 2) { - /* unsigned long limit = d->reads_size_limit_nonatomic; */ + if (/* counter == 0 && */stm_active != 2) { + unsigned long limit = d->reads_size_limit_nonatomic; /* if (limit != 0 && limit < (stm_regular_length_limit >> 1)) */ /* limit = (limit << 1) | 1; */ /* else */ - /* limit = stm_regular_length_limit; */ - /* d->reads_size_limit_nonatomic = limit; */ + limit = stm_regular_length_limit; + d->reads_size_limit_nonatomic = limit; } if (!d->atomic) { stm_begin_transaction(&_jmpbuf, NULL); From noreply at buildbot.pypy.org Tue Jan 21 10:14:33 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Tue, 21 Jan 2014 10:14:33 +0100 (CET) Subject: [pypy-commit] stmgc c7: add a test that fills the nursery Message-ID: <20140121091433.C1C1D1C30AA@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r653:a9cb16635de9 Date: 2014-01-21 09:27 +0100 http://bitbucket.org/pypy/stmgc/changeset/a9cb16635de9/ Log: add a test that fills the nursery diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -311,6 +311,30 @@ stm_start_transaction() assert stm_get_char(lp1) == 'a' + def test_many_allocs(self): + # assumes NB_NURSERY_PAGES 1024 + obj_size = 1024 + num = 5000 # more than what fits in the nursery (4MB) + + stm_start_transaction() + for i in range(num): + new = stm_allocate(obj_size) + stm_push_root(new) + + old = [] + young = [] + for _ in range(num): + r = stm_pop_root() + if is_in_nursery(r): + young.append(r) + else: + old.append(r) + + assert old + assert young + + + # def test_resolve_write_write_no_conflict(self): # stm_start_transaction() From noreply at buildbot.pypy.org Tue Jan 21 10:14:34 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Tue, 21 Jan 2014 10:14:34 +0100 (CET) Subject: [pypy-commit] stmgc c7: allocation of large objs < nursery_size Message-ID: <20140121091434.DED211C30AA@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r654:0e22a7939f9e Date: 2014-01-21 10:08 +0100 http://bitbucket.org/pypy/stmgc/changeset/0e22a7939f9e/ Log: allocation of large objs < nursery_size diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -19,7 +19,7 @@ #define NB_PAGES (256*256) // 256MB #define NB_THREADS 2 #define MAP_PAGES_FLAGS (MAP_SHARED | MAP_ANONYMOUS | MAP_NORESERVE) -#define LARGE_OBJECT_WORDS 232 // XXX was 36 +#define LARGE_OBJECT_WORDS 36 #define NB_NURSERY_PAGES 1024 #define LENGTH_SHADOW_STACK 163840 @@ -91,7 +91,7 @@ static uint8_t write_locks[READMARKER_END - READMARKER_START]; /************************************************************/ -uintptr_t _stm_reserve_page(void); +uintptr_t _stm_reserve_pages(int num); void stm_abort_transaction(void); localchar_t *_stm_alloc_next_page(size_t i); void mark_page_as_uncommitted(uintptr_t pagenum); @@ -212,8 +212,9 @@ object_t *_stm_allocate_old(size_t size) { - assert(size <= 4096); - localchar_t* addr = (localchar_t*)(_stm_reserve_page() * 4096); + int pages = (size + 4095) / 4096; + localchar_t* addr = (localchar_t*)(_stm_reserve_pages(pages) * 4096); + object_t* o = (object_t*)addr; o->stm_flags |= GCFLAG_WRITE_BARRIER; return o; @@ -400,16 +401,21 @@ } -uintptr_t _stm_reserve_page(void) +uintptr_t _stm_reserve_pages(int num) { /* Grab a free page, initially shared between the threads. */ // XXX look in some free list first /* Return the index'th object page, which is so far never used. */ - uintptr_t index = __sync_fetch_and_add(&index_page_never_used, 1); + uintptr_t index = __sync_fetch_and_add(&index_page_never_used, num); + + int i; + for (i = 0; i < num; i++) { + assert(flag_page_private[index+i] == SHARED_PAGE); + } assert(flag_page_private[index] == SHARED_PAGE); - if (index >= NB_PAGES) { + if (index + num >= NB_PAGES) { fprintf(stderr, "Out of mmap'ed memory!\n"); abort(); } @@ -425,17 +431,23 @@ localchar_t *_stm_alloc_old(size_t size) { + localchar_t *result; size_t size_class = size / 8; - alloc_for_size_t *alloc = &_STM_TL2->alloc[size_class]; - localchar_t *result; + assert(size_class >= 2); - if ((uint16_t)((uintptr_t)alloc->next) == alloc->stop) - result = _stm_alloc_next_page(size_class); - else { - result = alloc->next; - alloc->next += size; + if (size_class >= LARGE_OBJECT_WORDS) { + result = (localchar_t*)_stm_allocate_old(size); + ((object_t*)result)->stm_flags &= ~GCFLAG_WRITE_BARRIER; /* added by _stm_allocate_old... */ + } else { + alloc_for_size_t *alloc = &_STM_TL2->alloc[size_class]; + + if ((uint16_t)((uintptr_t)alloc->next) == alloc->stop) + result = _stm_alloc_next_page(size_class); + else { + result = alloc->next; + alloc->next += size; + } } - return result; } @@ -470,7 +482,7 @@ /* } */ /* reserve a fresh new page */ - page = _stm_reserve_page(); + page = _stm_reserve_pages(1); /* mark as UNCOMMITTED_... */ mark_page_as_uncommitted(page); @@ -557,6 +569,11 @@ _STM_TL2->nursery_current = nursery_base; } +void _stm_minor_collect() +{ + minor_collect(); +} + localchar_t *collect_and_reserve(size_t size) { _stm_start_safe_point(); @@ -574,9 +591,7 @@ _stm_stop_safe_point(); assert(_STM_TL2->running_transaction); assert(size % 8 == 0); - size_t i = size / 8; - assert(2 <= i && i < LARGE_OBJECT_WORDS);//XXX - assert(2 <= i && i < NB_NURSERY_PAGES * 4096);//XXX + assert(16 <= size && size < NB_NURSERY_PAGES * 4096);//XXX localchar_t *current = _STM_TL2->nursery_current; localchar_t *new_current = current + size; diff --git a/c7/core.h b/c7/core.h --- a/c7/core.h +++ b/c7/core.h @@ -125,6 +125,8 @@ void stm_abort_transaction(void); +void _stm_minor_collect(); + #define stm_become_inevitable(msg) /* XXX implement me! */ diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -66,6 +66,7 @@ void _set_ptr(object_t *obj, int n, object_t *v); object_t * _get_ptr(object_t *obj, int n); +void _stm_minor_collect(); bool _stm_check_abort_transaction(void); @@ -302,6 +303,9 @@ if lib._stm_check_stop_safe_point(): raise Conflict() +def stm_minor_collect(): + lib._stm_minor_collect() + class BaseTest(object): diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -332,7 +332,26 @@ assert old assert young - + + def test_large_obj_alloc(self): + # test obj which doesn't fit into the size_classes + # for now, we will still allocate it in the nursery. + # expects: LARGE_OBJECT_WORDS 36 + size_class = 1000 # too big + obj_size = size_class * 8 + assert obj_size > 4096 # we want more than 1 page + assert obj_size < 4096 * 1024 # in the nursery + + stm_start_transaction() + new = stm_allocate(obj_size) + assert is_in_nursery(new) + stm_push_root(new) + stm_minor_collect() + stm_minor_collect() + new = stm_pop_root() + + assert not is_in_nursery(new) + From noreply at buildbot.pypy.org Tue Jan 21 10:14:35 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Tue, 21 Jan 2014 10:14:35 +0100 (CET) Subject: [pypy-commit] stmgc c7: failing test for writing to 2nd page of an object Message-ID: <20140121091435.E2F4A1C30AA@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r655:0624b1cef158 Date: 2014-01-21 10:15 +0100 http://bitbucket.org/pypy/stmgc/changeset/0624b1cef158/ Log: failing test for writing to 2nd page of an object diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -347,11 +347,38 @@ assert is_in_nursery(new) stm_push_root(new) stm_minor_collect() - stm_minor_collect() new = stm_pop_root() assert not is_in_nursery(new) + + def test_large_obj_write(self): + # test obj which doesn't fit into the size_classes + # expects: LARGE_OBJECT_WORDS 36 + size_class = 1000 # too big + obj_size = size_class * 8 + assert obj_size > 4096 # we want more than 1 page + assert obj_size < 4096 * 1024 # in the nursery + + stm_start_transaction() + new = stm_allocate(obj_size) + assert is_in_nursery(new) + stm_push_root(new) + stm_stop_transaction() + new = stm_pop_root() + + stm_start_transaction() + stm_write(new) + # write to 2nd page of object!! + wnew = stm_get_real_address(new) + wnew[4097] = 'x' + + self.switch(1) + stm_start_transaction() + stm_read(new) + rnew = stm_get_real_address(new) + assert rnew[4097] == '\0' + From noreply at buildbot.pypy.org Tue Jan 21 10:40:14 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Tue, 21 Jan 2014 10:40:14 +0100 (CET) Subject: [pypy-commit] stmgc c7: fix some page flags Message-ID: <20140121094014.73DC01C039A@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r656:51d9b831ecdb Date: 2014-01-21 10:42 +0100 http://bitbucket.org/pypy/stmgc/changeset/51d9b831ecdb/ Log: fix some page flags diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -66,21 +66,6 @@ }; #define _STM_TL2 ((_thread_local2_t *)_STM_TL1) -enum { - /* unprivatized page seen by all threads */ - SHARED_PAGE=0, - - /* page being in the process of privatization */ - REMAPPING_PAGE, - - /* page private for each thread */ - PRIVATE_PAGE, - - /* set for SHARED pages that only contain objects belonging - to the current transaction, so the whole page is not - visible yet for other threads */ - UNCOMMITTED_SHARED_PAGE, -}; /* flag_page_private */ static char *object_pages; @@ -96,6 +81,11 @@ localchar_t *_stm_alloc_next_page(size_t i); void mark_page_as_uncommitted(uintptr_t pagenum); +uint8_t _stm_get_page_flag(int pagenum) +{ + return flag_page_private[pagenum]; +} + static void spin_loop(void) { asm("pause" : : : "memory"); @@ -438,6 +428,13 @@ if (size_class >= LARGE_OBJECT_WORDS) { result = (localchar_t*)_stm_allocate_old(size); ((object_t*)result)->stm_flags &= ~GCFLAG_WRITE_BARRIER; /* added by _stm_allocate_old... */ + + int page = ((uintptr_t)result) / 4096; + int pages = (size + 4095) / 4096; + int i; + for (i = 0; i < pages; i++) { + flag_page_private[page + i] = UNCOMMITTED_SHARED_PAGE; + } } else { alloc_for_size_t *alloc = &_STM_TL2->alloc[size_class]; @@ -668,6 +665,10 @@ } } + for (i = FIRST_NURSERY_PAGE; i < FIRST_AFTER_NURSERY_PAGE; i++) + flag_page_private[i] = PRIVATE_PAGE; /* nursery is private. + or should it be UNCOMMITTED??? */ + num_threads_started = 0; index_page_never_used = FIRST_AFTER_NURSERY_PAGE; pending_updates = NULL; diff --git a/c7/core.h b/c7/core.h --- a/c7/core.h +++ b/c7/core.h @@ -43,6 +43,23 @@ GCFLAG_MOVED = (1 << 2), }; +enum { + /* unprivatized page seen by all threads */ + SHARED_PAGE=0, + + /* page being in the process of privatization */ + REMAPPING_PAGE, + + /* page private for each thread */ + PRIVATE_PAGE, + + /* set for SHARED pages that only contain objects belonging + to the current transaction, so the whole page is not + visible yet for other threads */ + UNCOMMITTED_SHARED_PAGE, +}; /* flag_page_private */ + + struct object_s { uint8_t stm_flags; /* reserved for the STM library */ uint8_t stm_write_lock; /* 1 if writeable by some thread */ @@ -126,7 +143,7 @@ void stm_abort_transaction(void); void _stm_minor_collect(); - +uint8_t _stm_get_page_flag(int pagenum); #define stm_become_inevitable(msg) /* XXX implement me! */ diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -71,6 +71,16 @@ bool _stm_check_abort_transaction(void); void *memset(void *s, int c, size_t n); +extern size_t stmcb_size(struct object_s *); +extern void stmcb_trace(struct object_s *, void (object_t **)); + +enum { + SHARED_PAGE=0, + REMAPPING_PAGE, + PRIVATE_PAGE, + UNCOMMITTED_SHARED_PAGE, +}; /* flag_page_private */ +uint8_t _stm_get_page_flag(int pagenum); """) lib = ffi.verify(''' @@ -306,6 +316,17 @@ def stm_minor_collect(): lib._stm_minor_collect() +def stm_get_page_flag(pagenum): + return lib._stm_get_page_flag(pagenum) + +def stm_get_obj_size(o): + return lib.stmcb_size(stm_get_real_address(o)) + +def stm_get_obj_pages(o): + start = int(ffi.cast('uintptr_t', o)) + startp = start // 4096 + return range(startp, startp + stm_get_obj_size(o) // 4096 + 1) + class BaseTest(object): diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -345,10 +345,17 @@ stm_start_transaction() new = stm_allocate(obj_size) assert is_in_nursery(new) + assert len(stm_get_obj_pages(new)) == 2 + assert ([stm_get_page_flag(p) for p in stm_get_obj_pages(new)] + == [lib.PRIVATE_PAGE]*2) stm_push_root(new) stm_minor_collect() new = stm_pop_root() + assert len(stm_get_obj_pages(new)) == 2 + assert ([stm_get_page_flag(p) for p in stm_get_obj_pages(new)] + == [lib.UNCOMMITTED_SHARED_PAGE]*2) + assert not is_in_nursery(new) def test_large_obj_write(self): From noreply at buildbot.pypy.org Tue Jan 21 10:47:10 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Tue, 21 Jan 2014 10:47:10 +0100 (CET) Subject: [pypy-commit] stmgc c7: fix privatization of pages in write-barrier slowpath Message-ID: <20140121094710.D2A401C039A@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r657:a9bedcca21e2 Date: 2014-01-21 10:49 +0100 http://bitbucket.org/pypy/stmgc/changeset/a9bedcca21e2/ Log: fix privatization of pages in write-barrier slowpath diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -366,7 +366,10 @@ } /* privatize if SHARED_PAGE */ - _stm_privatize(pagenum); + /* xxx stmcb_size() is probably too slow */ + int pages = stmcb_size(real_address(obj)) / 4096; + for (; pages >= 0; pages--) + _stm_privatize(pagenum + pages); /* claim the write-lock for this object */ uintptr_t lock_idx = (((uintptr_t)obj) >> 4) - READMARKER_START; @@ -433,7 +436,7 @@ int pages = (size + 4095) / 4096; int i; for (i = 0; i < pages; i++) { - flag_page_private[page + i] = UNCOMMITTED_SHARED_PAGE; + mark_page_as_uncommitted(page + i); } } else { alloc_for_size_t *alloc = &_STM_TL2->alloc[size_class]; diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -373,8 +373,14 @@ stm_stop_transaction() new = stm_pop_root() + assert ([stm_get_page_flag(p) for p in stm_get_obj_pages(new)] + == [lib.SHARED_PAGE]*2) + stm_start_transaction() stm_write(new) + assert ([stm_get_page_flag(p) for p in stm_get_obj_pages(new)] + == [lib.PRIVATE_PAGE]*2) + # write to 2nd page of object!! wnew = stm_get_real_address(new) wnew[4097] = 'x' From noreply at buildbot.pypy.org Tue Jan 21 11:22:44 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 21 Jan 2014 11:22:44 +0100 (CET) Subject: [pypy-commit] pypy default: Move the "_ffi" module into the "_rawffi.alt" submodule. This is an Message-ID: <20140121102244.6AEC11C302F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68799:3e844dad3e26 Date: 2014-01-21 10:12 +0100 http://bitbucket.org/pypy/pypy/changeset/3e844dad3e26/ Log: Move the "_ffi" module into the "_rawffi.alt" submodule. This is an attempt to reduce namespace conflicts with the potentially confusing name "_ffi", as well as moving this half-finished module (which will likely never be finished now) out of the way. diff --git a/lib_pypy/_ctypes/array.py b/lib_pypy/_ctypes/array.py --- a/lib_pypy/_ctypes/array.py +++ b/lib_pypy/_ctypes/array.py @@ -1,4 +1,4 @@ -import _ffi +from _rawffi import alt as _ffi import _rawffi from _ctypes.basics import _CData, cdata_from_address, _CDataMeta, sizeof diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py --- a/lib_pypy/_ctypes/basics.py +++ b/lib_pypy/_ctypes/basics.py @@ -1,6 +1,6 @@ import _rawffi -import _ffi +from _rawffi import alt as _ffi import sys try: from __pypy__ import builtinify diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -5,7 +5,7 @@ from _ctypes.basics import is_struct_shape from _ctypes.builtin import get_errno, set_errno, get_last_error, set_last_error import _rawffi -import _ffi +from _rawffi import alt as _ffi import sys import traceback diff --git a/lib_pypy/_ctypes/pointer.py b/lib_pypy/_ctypes/pointer.py --- a/lib_pypy/_ctypes/pointer.py +++ b/lib_pypy/_ctypes/pointer.py @@ -1,6 +1,6 @@ import _rawffi -import _ffi +from _rawffi import alt as _ffi from _ctypes.basics import _CData, _CDataMeta, cdata_from_address, ArgumentError from _ctypes.basics import keepalive_key, store_reference, ensure_objects from _ctypes.basics import sizeof, byref, as_ffi_pointer diff --git a/lib_pypy/_ctypes/primitive.py b/lib_pypy/_ctypes/primitive.py --- a/lib_pypy/_ctypes/primitive.py +++ b/lib_pypy/_ctypes/primitive.py @@ -1,4 +1,4 @@ -import _ffi +from _rawffi import alt as _ffi import _rawffi import weakref import sys diff --git a/lib_pypy/_ffi.py b/lib_pypy/_ffi.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_ffi.py @@ -0,0 +1,2 @@ +# Backward compatibility hack +from _rawffi.alt import * diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -34,14 +34,14 @@ "struct", "_hashlib", "_md5", "_sha", "_minimal_curses", "cStringIO", "thread", "itertools", "pyexpat", "_ssl", "cpyext", "array", "binascii", "_multiprocessing", '_warnings', - "_collections", "_multibytecodec", "micronumpy", "_ffi", + "_collections", "_multibytecodec", "micronumpy", "_continuation", "_cffi_backend", "_csv", "cppyy", "_pypyjson"] )) translation_modules = default_modules.copy() translation_modules.update(dict.fromkeys( ["fcntl", "rctime", "select", "signal", "_rawffi", "zlib", - "struct", "_md5", "cStringIO", "array", "_ffi", + "struct", "_md5", "cStringIO", "array", "binascii", # the following are needed for pyrepl (and hence for the # interactive prompt/pdb) @@ -96,7 +96,6 @@ # no _rawffi if importing rpython.rlib.clibffi raises ImportError # or CompilationError or py.test.skip.Exception "_rawffi" : ["rpython.rlib.clibffi"], - "_ffi" : ["rpython.rlib.clibffi"], "zlib" : ["rpython.rlib.rzlib"], "bz2" : ["pypy.module.bz2.interp_bz2"], diff --git a/pypy/module/_multiprocessing/test/test_memory.py b/pypy/module/_multiprocessing/test/test_memory.py --- a/pypy/module/_multiprocessing/test/test_memory.py +++ b/pypy/module/_multiprocessing/test/test_memory.py @@ -1,6 +1,6 @@ class AppTestMemory: spaceconfig = dict(usemodules=('_multiprocessing', 'mmap', - '_rawffi', '_ffi', 'itertools')) + '_rawffi', 'itertools')) def test_address_of(self): import _multiprocessing diff --git a/pypy/module/_rawffi/__init__.py b/pypy/module/_rawffi/__init__.py --- a/pypy/module/_rawffi/__init__.py +++ b/pypy/module/_rawffi/__init__.py @@ -2,6 +2,7 @@ """ from pypy.interpreter.mixedmodule import MixedModule +from pypy.module._rawffi import alt class Module(MixedModule): interpleveldefs = { @@ -33,6 +34,10 @@ appleveldefs = { } + submodules = { + 'alt': alt.Module, + } + def buildloaders(cls): from pypy.module._rawffi import interp_rawffi diff --git a/pypy/module/_ffi/__init__.py b/pypy/module/_rawffi/alt/__init__.py rename from pypy/module/_ffi/__init__.py rename to pypy/module/_rawffi/alt/__init__.py diff --git a/pypy/module/_ffi/app_struct.py b/pypy/module/_rawffi/alt/app_struct.py rename from pypy/module/_ffi/app_struct.py rename to pypy/module/_rawffi/alt/app_struct.py --- a/pypy/module/_ffi/app_struct.py +++ b/pypy/module/_rawffi/alt/app_struct.py @@ -1,4 +1,4 @@ -import _ffi +from _rawffi import alt class MetaStructure(type): @@ -11,7 +11,7 @@ fields = dic.get('_fields_') if fields is None: return - struct_descr = _ffi._StructDescr(name, fields) + struct_descr = alt._StructDescr(name, fields) for field in fields: dic[field.name] = field dic['_struct_'] = struct_descr diff --git a/pypy/module/_ffi/interp_ffitype.py b/pypy/module/_rawffi/alt/interp_ffitype.py rename from pypy/module/_ffi/interp_ffitype.py rename to pypy/module/_rawffi/alt/interp_ffitype.py --- a/pypy/module/_ffi/interp_ffitype.py +++ b/pypy/module/_rawffi/alt/interp_ffitype.py @@ -116,7 +116,7 @@ types = [ # note: most of the type name directly come from the C equivalent, # with the exception of bytes: in C, ubyte and char are equivalent, - # but for _ffi the first expects a number while the second a 1-length + # but for here the first expects a number while the second a 1-length # string W_FFIType('slong', libffi.types.slong), W_FFIType('sint', libffi.types.sint), diff --git a/pypy/module/_ffi/interp_funcptr.py b/pypy/module/_rawffi/alt/interp_funcptr.py rename from pypy/module/_ffi/interp_funcptr.py rename to pypy/module/_rawffi/alt/interp_funcptr.py --- a/pypy/module/_ffi/interp_funcptr.py +++ b/pypy/module/_rawffi/alt/interp_funcptr.py @@ -3,7 +3,7 @@ operationerrfmt from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef -from pypy.module._ffi.interp_ffitype import W_FFIType +from pypy.module._rawffi.alt.interp_ffitype import W_FFIType # from rpython.rtyper.lltypesystem import lltype, rffi # @@ -13,7 +13,7 @@ from rpython.rlib.rdynload import DLOpenError from rpython.rlib.rarithmetic import r_uint from rpython.rlib.objectmodel import we_are_translated -from pypy.module._ffi.type_converter import FromAppLevelConverter, ToAppLevelConverter +from pypy.module._rawffi.alt.type_converter import FromAppLevelConverter, ToAppLevelConverter from pypy.module._rawffi.interp_rawffi import got_libffi_error, wrap_dlopenerror import os @@ -302,7 +302,7 @@ W_FuncPtr.typedef = TypeDef( - '_ffi.FuncPtr', + '_rawffi.alt.FuncPtr', __call__ = interp2app(W_FuncPtr.call), getaddr = interp2app(W_FuncPtr.getaddr), free_temp_buffers = interp2app(W_FuncPtr.free_temp_buffers), @@ -346,7 +346,7 @@ W_CDLL.typedef = TypeDef( - '_ffi.CDLL', + '_rawffi.alt.CDLL', __new__ = interp2app(descr_new_cdll), getfunc = interp2app(W_CDLL.getfunc), getaddressindll = interp2app(W_CDLL.getaddressindll), @@ -363,7 +363,7 @@ W_WinDLL.typedef = TypeDef( - '_ffi.WinDLL', + '_rawffi.alt.WinDLL', __new__ = interp2app(descr_new_windll), getfunc = interp2app(W_WinDLL.getfunc), getaddressindll = interp2app(W_WinDLL.getaddressindll), diff --git a/pypy/module/_ffi/interp_struct.py b/pypy/module/_rawffi/alt/interp_struct.py rename from pypy/module/_ffi/interp_struct.py rename to pypy/module/_rawffi/alt/interp_struct.py --- a/pypy/module/_ffi/interp_struct.py +++ b/pypy/module/_rawffi/alt/interp_struct.py @@ -8,8 +8,8 @@ from pypy.interpreter.typedef import TypeDef, interp_attrproperty from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.error import operationerrfmt -from pypy.module._ffi.interp_ffitype import W_FFIType -from pypy.module._ffi.type_converter import FromAppLevelConverter, ToAppLevelConverter +from pypy.module._rawffi.alt.interp_ffitype import W_FFIType +from pypy.module._rawffi.alt.type_converter import FromAppLevelConverter, ToAppLevelConverter class W_Field(W_Root): diff --git a/pypy/module/_ffi/test/__init__.py b/pypy/module/_rawffi/alt/test/__init__.py rename from pypy/module/_ffi/test/__init__.py rename to pypy/module/_rawffi/alt/test/__init__.py diff --git a/pypy/module/_ffi/test/test_ffitype.py b/pypy/module/_rawffi/alt/test/test_ffitype.py rename from pypy/module/_ffi/test/test_ffitype.py rename to pypy/module/_rawffi/alt/test/test_ffitype.py --- a/pypy/module/_ffi/test/test_ffitype.py +++ b/pypy/module/_rawffi/alt/test/test_ffitype.py @@ -1,21 +1,21 @@ -from pypy.module._ffi.test.test_funcptr import BaseAppTestFFI +from pypy.module._rawffi.alt.test.test_funcptr import BaseAppTestFFI class AppTestFFIType(BaseAppTestFFI): def test_simple_types(self): - from _ffi import types + from _rawffi.alt import types assert str(types.sint) == "" assert str(types.uint) == "" assert types.sint.name == 'sint' assert types.uint.name == 'uint' def test_sizeof(self): - from _ffi import types + from _rawffi.alt import types assert types.sbyte.sizeof() == 1 assert types.sint.sizeof() == 4 def test_typed_pointer(self): - from _ffi import types + from _rawffi.alt import types intptr = types.Pointer(types.sint) # create a typed pointer to sint assert intptr.deref_pointer() is types.sint assert str(intptr) == '' @@ -23,7 +23,7 @@ raises(TypeError, "types.Pointer(42)") def test_pointer_identity(self): - from _ffi import types + from _rawffi.alt import types x = types.Pointer(types.slong) y = types.Pointer(types.slong) z = types.Pointer(types.char) @@ -31,7 +31,7 @@ assert x is not z def test_char_p_cached(self): - from _ffi import types + from _rawffi.alt import types x = types.Pointer(types.char) assert x is types.char_p x = types.Pointer(types.unichar) diff --git a/pypy/module/_ffi/test/test_funcptr.py b/pypy/module/_rawffi/alt/test/test_funcptr.py rename from pypy/module/_ffi/test/test_funcptr.py rename to pypy/module/_rawffi/alt/test/test_funcptr.py --- a/pypy/module/_ffi/test/test_funcptr.py +++ b/pypy/module/_rawffi/alt/test/test_funcptr.py @@ -7,7 +7,7 @@ import sys, py class BaseAppTestFFI(object): - spaceconfig = dict(usemodules=('_ffi', '_rawffi')) + spaceconfig = dict(usemodules=('_rawffi',)) @classmethod def prepare_c_example(cls): @@ -62,17 +62,17 @@ cls.w_f_12_34_plus_56_78 = space.wrap(f_result) def test_libload(self): - import _ffi - _ffi.CDLL(self.libc_name) + import _rawffi.alt + _rawffi.alt.CDLL(self.libc_name) def test_libload_fail(self): - import _ffi - raises(OSError, _ffi.CDLL, "xxxxx_this_name_does_not_exist_xxxxx") + import _rawffi.alt + raises(OSError, _rawffi.alt.CDLL, "xxxxx_this_name_does_not_exist_xxxxx") def test_libload_None(self): if self.iswin32: skip("unix specific") - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types # this should return *all* loaded libs, dlopen(NULL) dll = CDLL(None) # libm should be loaded @@ -80,20 +80,20 @@ assert res == 1.0 def test_callfunc(self): - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libm = CDLL(self.libm_name) pow = libm.getfunc('pow', [types.double, types.double], types.double) assert pow(2, 3) == 8 def test_getaddr(self): - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libm = CDLL(self.libm_name) pow = libm.getfunc('pow', [types.double, types.double], types.double) assert pow.getaddr() == self.pow_addr def test_getaddressindll(self): import sys - from _ffi import CDLL + from _rawffi.alt import CDLL libm = CDLL(self.libm_name) pow_addr = libm.getaddressindll('pow') fff = sys.maxint*2-1 @@ -102,7 +102,7 @@ assert pow_addr == self.pow_addr & fff def test_func_fromaddr(self): - from _ffi import CDLL, types, FuncPtr + from _rawffi.alt import CDLL, types, FuncPtr libm = CDLL(self.libm_name) pow_addr = libm.getaddressindll('pow') pow = FuncPtr.fromaddr(pow_addr, 'pow', [types.double, types.double], @@ -117,7 +117,7 @@ } """ import sys - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) sum_xy = libfoo.getfunc('sum_xy', [types.sint, types.sint], types.sint) assert sum_xy(30, 12) == 42 @@ -129,7 +129,7 @@ DLLEXPORT void set_dummy(int val) { dummy = val; } DLLEXPORT int get_dummy() { return dummy; } """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) set_dummy = libfoo.getfunc('set_dummy', [types.sint], types.void) get_dummy = libfoo.getfunc('get_dummy', [], types.sint) @@ -144,7 +144,7 @@ DLLEXPORT int* get_dummy_ptr() { return &dummy; } DLLEXPORT void set_val_to_ptr(int* ptr, int val) { *ptr = val; } """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) get_dummy = libfoo.getfunc('get_dummy', [], types.sint) get_dummy_ptr = libfoo.getfunc('get_dummy_ptr', [], types.void_p) @@ -163,7 +163,7 @@ DLLEXPORT int* get_dummy_ptr(); // defined in test_pointer_args DLLEXPORT void set_val_to_ptr(int* ptr, int val); // ditto """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types class MyPointerWrapper(object): def __init__(self, value): @@ -197,7 +197,7 @@ return len; } """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types import _rawffi libfoo = CDLL(self.libfoo_name) mystrlen = libfoo.getfunc('mystrlen', [types.char_p], types.slong) @@ -223,7 +223,7 @@ return len; } """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types import _rawffi libfoo = CDLL(self.libfoo_name) mystrlen = libfoo.getfunc('mystrlen_u', [types.unichar_p], types.slong) @@ -247,7 +247,7 @@ return s; } """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types import _rawffi libfoo = CDLL(self.libfoo_name) do_nothing = libfoo.getfunc('do_nothing', [types.char_p], types.char_p) @@ -264,7 +264,7 @@ DLLEXPORT int* get_dummy_ptr(); // defined in test_pointer_args DLLEXPORT void set_val_to_ptr(int* ptr, int val); // ditto """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) intptr = types.Pointer(types.sint) @@ -283,7 +283,7 @@ DLLEXPORT long is_null_ptr(void* ptr) { return ptr == NULL; } """ import sys - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) is_null_ptr = libfoo.getfunc('is_null_ptr', [types.void_p], types.ulong) assert not is_null_ptr(sys.maxint+1) @@ -296,7 +296,7 @@ } """ import sys - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) sum_xy = libfoo.getfunc('sum_xy_ul', [types.ulong, types.ulong], types.ulong) @@ -313,7 +313,7 @@ return x+y; } """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) sum_xy = libfoo.getfunc('sum_xy_us', [types.ushort, types.ushort], types.ushort) @@ -327,7 +327,7 @@ return x+y; } """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) sum_xy = libfoo.getfunc('sum_xy_us', [types.ubyte, types.ubyte], types.ubyte) @@ -342,7 +342,7 @@ } """ import sys - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types maxint32 = 2147483647 libfoo = CDLL(self.libfoo_name) sum_xy = libfoo.getfunc('sum_xy_ui', [types.uint, types.uint], @@ -357,7 +357,7 @@ return x+y; } """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) sum_xy = libfoo.getfunc('sum_xy_sb', [types.sbyte, types.sbyte], types.sbyte) @@ -371,7 +371,7 @@ return x - ('a'-'A'); } """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) my_toupper = libfoo.getfunc('my_toupper', [types.char], types.char) @@ -385,7 +385,7 @@ return x + y; } """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) sum_xy = libfoo.getfunc('sum_xy_wc', [types.unichar, types.unichar], types.unichar) @@ -400,7 +400,7 @@ return x+y; } """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) sum_xy = libfoo.getfunc('sum_xy_float', [types.float, types.float], types.float) @@ -415,7 +415,7 @@ return x+y; } """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types maxint32 = 2147483647 # we cannot really go above maxint on 64 bits # (and we would not test anything, as there long # is the same as long long) @@ -437,7 +437,7 @@ return x+y; } """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types maxint64 = 9223372036854775807 # maxint64+1 does not fit into a # longlong, but it does into a # ulonglong @@ -464,7 +464,7 @@ return p.x + p.y; } """ - from _ffi import CDLL, types, _StructDescr, Field + from _rawffi.alt import CDLL, types, _StructDescr, Field Point = _StructDescr('Point', [ Field('x', types.slong), Field('y', types.slong), @@ -487,7 +487,7 @@ return p; } """ - from _ffi import CDLL, types, _StructDescr, Field + from _rawffi.alt import CDLL, types, _StructDescr, Field Point = _StructDescr('Point', [ Field('x', types.slong), Field('y', types.slong), @@ -500,9 +500,9 @@ assert p.getfield('x') == 12 assert p.getfield('y') == 34 - # XXX: support for _rawffi structures should be killed as soon as we - # implement ctypes.Structure on top of _ffi. In the meantime, we support - # both + # XXX: long ago the plan was to kill _rawffi structures in favor of + # _rawffi.alt structures. The plan never went anywhere, so we're + # stuck with both. def test_byval_argument__rawffi(self): """ // defined above @@ -510,7 +510,7 @@ DLLEXPORT long sum_point(struct Point p); """ import _rawffi - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types POINT = _rawffi.Structure([('x', 'l'), ('y', 'l')]) ffi_point = POINT.get_ffi_type() libfoo = CDLL(self.libfoo_name) @@ -529,7 +529,7 @@ DLLEXPORT struct Point make_point(long x, long y); """ import _rawffi - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types POINT = _rawffi.Structure([('x', 'l'), ('y', 'l')]) ffi_point = POINT.get_ffi_type() libfoo = CDLL(self.libfoo_name) @@ -542,23 +542,23 @@ def test_TypeError_numargs(self): - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) sum_xy = libfoo.getfunc('sum_xy', [types.sint, types.sint], types.sint) raises(TypeError, "sum_xy(1, 2, 3)") raises(TypeError, "sum_xy(1)") def test_TypeError_voidarg(self): - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) raises(TypeError, "libfoo.getfunc('sum_xy', [types.void], types.sint)") def test_OSError_loading(self): - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types raises(OSError, "CDLL('I do not exist')") def test_AttributeError_missing_function(self): - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) raises(AttributeError, "libfoo.getfunc('I_do_not_exist', [], types.void)") if self.iswin32: @@ -569,7 +569,7 @@ def test_calling_convention1(self): if not self.iswin32: skip("windows specific") - from _ffi import WinDLL, types + from _rawffi.alt import WinDLL, types libm = WinDLL(self.libm_name) pow = libm.getfunc('pow', [types.double, types.double], types.double) try: @@ -582,7 +582,7 @@ def test_calling_convention2(self): if not self.iswin32: skip("windows specific") - from _ffi import WinDLL, types + from _rawffi.alt import WinDLL, types kernel = WinDLL('Kernel32.dll') sleep = kernel.getfunc('Sleep', [types.uint], types.void) sleep(10) @@ -590,7 +590,7 @@ def test_calling_convention3(self): if not self.iswin32: skip("windows specific") - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types wrong_kernel = CDLL('Kernel32.dll') wrong_sleep = wrong_kernel.getfunc('Sleep', [types.uint], types.void) try: @@ -603,7 +603,7 @@ def test_func_fromaddr2(self): if not self.iswin32: skip("windows specific") - from _ffi import CDLL, types, FuncPtr + from _rawffi.alt import CDLL, types, FuncPtr from _rawffi import FUNCFLAG_STDCALL libm = CDLL(self.libm_name) pow_addr = libm.getaddressindll('pow') @@ -619,7 +619,7 @@ def test_func_fromaddr3(self): if not self.iswin32: skip("windows specific") - from _ffi import WinDLL, types, FuncPtr + from _rawffi.alt import WinDLL, types, FuncPtr from _rawffi import FUNCFLAG_STDCALL kernel = WinDLL('Kernel32.dll') sleep_addr = kernel.getaddressindll('Sleep') @@ -636,7 +636,7 @@ """ if not self.iswin32: skip("windows specific") - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) f_name = libfoo.getfunc('AAA_first_ordinal_function', [], types.sint) f_ordinal = libfoo.getfunc(1, [], types.sint) diff --git a/pypy/module/_ffi/test/test_struct.py b/pypy/module/_rawffi/alt/test/test_struct.py rename from pypy/module/_ffi/test/test_struct.py rename to pypy/module/_rawffi/alt/test/test_struct.py --- a/pypy/module/_ffi/test/test_struct.py +++ b/pypy/module/_rawffi/alt/test/test_struct.py @@ -1,7 +1,7 @@ from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.module._ffi.interp_ffitype import app_types, W_FFIType -from pypy.module._ffi.interp_struct import compute_size_and_alignement, W_Field -from pypy.module._ffi.test.test_funcptr import BaseAppTestFFI +from pypy.module._rawffi.alt.interp_ffitype import app_types, W_FFIType +from pypy.module._rawffi.alt.interp_struct import compute_size_and_alignement, W_Field +from pypy.module._rawffi.alt.test.test_funcptr import BaseAppTestFFI class TestStruct(object): @@ -69,7 +69,7 @@ cls.w_runappdirect = cls.space.wrap(cls.runappdirect) def test__StructDescr(self): - from _ffi import _StructDescr, Field, types + from _rawffi.alt import _StructDescr, Field, types longsize = types.slong.sizeof() fields = [ Field('x', types.slong), @@ -80,7 +80,7 @@ assert descr.ffitype.name == 'struct foo' def test_alignment(self): - from _ffi import _StructDescr, Field, types + from _rawffi.alt import _StructDescr, Field, types longsize = types.slong.sizeof() fields = [ Field('x', types.sbyte), @@ -92,7 +92,7 @@ assert fields[1].offset == longsize # aligned to WORD def test_missing_field(self): - from _ffi import _StructDescr, Field, types + from _rawffi.alt import _StructDescr, Field, types longsize = types.slong.sizeof() fields = [ Field('x', types.slong), @@ -106,7 +106,7 @@ def test_unknown_type(self): if self.runappdirect: skip('cannot use self.dummy_type with -A') - from _ffi import _StructDescr, Field + from _rawffi.alt import _StructDescr, Field fields = [ Field('x', self.dummy_type), ] @@ -116,7 +116,7 @@ raises(TypeError, "struct.setfield('x', 42)") def test_getfield_setfield(self): - from _ffi import _StructDescr, Field, types + from _rawffi.alt import _StructDescr, Field, types longsize = types.slong.sizeof() fields = [ Field('x', types.slong), @@ -133,7 +133,7 @@ def test_getfield_setfield_signed_types(self): import sys - from _ffi import _StructDescr, Field, types + from _rawffi.alt import _StructDescr, Field, types longsize = types.slong.sizeof() fields = [ Field('sbyte', types.sbyte), @@ -156,7 +156,7 @@ def test_getfield_setfield_unsigned_types(self): import sys - from _ffi import _StructDescr, Field, types + from _rawffi.alt import _StructDescr, Field, types longsize = types.slong.sizeof() fields = [ Field('ubyte', types.ubyte), @@ -188,7 +188,7 @@ def test_getfield_setfield_longlong(self): import sys - from _ffi import _StructDescr, Field, types + from _rawffi.alt import _StructDescr, Field, types longsize = types.slong.sizeof() fields = [ Field('slonglong', types.slonglong), @@ -205,7 +205,7 @@ def test_getfield_setfield_float(self): import sys - from _ffi import _StructDescr, Field, types + from _rawffi.alt import _StructDescr, Field, types longsize = types.slong.sizeof() fields = [ Field('x', types.double), @@ -219,7 +219,7 @@ def test_getfield_setfield_singlefloat(self): import sys - from _ffi import _StructDescr, Field, types + from _rawffi.alt import _StructDescr, Field, types longsize = types.slong.sizeof() fields = [ Field('x', types.float), @@ -237,7 +237,7 @@ assert mem == [123.5] def test_define_fields(self): - from _ffi import _StructDescr, Field, types + from _rawffi.alt import _StructDescr, Field, types longsize = types.slong.sizeof() fields = [ Field('x', types.slong), @@ -255,7 +255,7 @@ raises(ValueError, "descr.define_fields(fields)") def test_pointer_to_incomplete_struct(self): - from _ffi import _StructDescr, Field, types + from _rawffi.alt import _StructDescr, Field, types longsize = types.slong.sizeof() fields = [ Field('x', types.slong), @@ -271,7 +271,7 @@ assert types.Pointer(descr.ffitype) is foo_p def test_nested_structure(self): - from _ffi import _StructDescr, Field, types + from _rawffi.alt import _StructDescr, Field, types longsize = types.slong.sizeof() foo_fields = [ Field('x', types.slong), @@ -310,7 +310,7 @@ def test_compute_shape(self): - from _ffi import Structure, Field, types + from _rawffi.alt import Structure, Field, types class Point(Structure): _fields_ = [ Field('x', types.slong), diff --git a/pypy/module/_ffi/test/test_type_converter.py b/pypy/module/_rawffi/alt/test/test_type_converter.py rename from pypy/module/_ffi/test/test_type_converter.py rename to pypy/module/_rawffi/alt/test/test_type_converter.py --- a/pypy/module/_ffi/test/test_type_converter.py +++ b/pypy/module/_rawffi/alt/test/test_type_converter.py @@ -1,8 +1,8 @@ import sys from rpython.rlib.rarithmetic import r_uint, r_singlefloat, r_longlong, r_ulonglong from rpython.rlib.libffi import IS_32_BIT -from pypy.module._ffi.interp_ffitype import app_types, descr_new_pointer -from pypy.module._ffi.type_converter import FromAppLevelConverter, ToAppLevelConverter +from pypy.module._rawffi.alt.interp_ffitype import app_types, descr_new_pointer +from pypy.module._rawffi.alt.type_converter import FromAppLevelConverter, ToAppLevelConverter class DummyFromAppLevelConverter(FromAppLevelConverter): @@ -29,7 +29,7 @@ class TestFromAppLevel(object): - spaceconfig = dict(usemodules=('_ffi',)) + spaceconfig = dict(usemodules=('_rawffi',)) def setup_class(cls): converter = DummyFromAppLevelConverter(cls.space) @@ -104,12 +104,12 @@ def test__as_ffi_pointer_(self): space = self.space w_MyPointerWrapper = space.appexec([], """(): - import _ffi + from _rawffi.alt import types class MyPointerWrapper(object): def __init__(self, value): self.value = value def _as_ffi_pointer_(self, ffitype): - assert ffitype is _ffi.types.void_p + assert ffitype is types.void_p return self.value return MyPointerWrapper @@ -151,7 +151,7 @@ class TestToAppLevel(object): - spaceconfig = dict(usemodules=('_ffi',)) + spaceconfig = dict(usemodules=('_rawffi',)) def setup_class(cls): converter = DummyToAppLevelConverter(cls.space) diff --git a/pypy/module/_ffi/test/test_ztranslation.py b/pypy/module/_rawffi/alt/test/test_ztranslation.py rename from pypy/module/_ffi/test/test_ztranslation.py rename to pypy/module/_rawffi/alt/test/test_ztranslation.py --- a/pypy/module/_ffi/test/test_ztranslation.py +++ b/pypy/module/_rawffi/alt/test/test_ztranslation.py @@ -1,4 +1,4 @@ -from pypy.objspace.fake.checkmodule import checkmodule - -def test__ffi_translates(): - checkmodule('_ffi', '_rawffi') +from pypy.objspace.fake.checkmodule import checkmodule + +def test__ffi_translates(): + checkmodule('_rawffi') diff --git a/pypy/module/_ffi/type_converter.py b/pypy/module/_rawffi/alt/type_converter.py rename from pypy/module/_ffi/type_converter.py rename to pypy/module/_rawffi/alt/type_converter.py --- a/pypy/module/_ffi/type_converter.py +++ b/pypy/module/_rawffi/alt/type_converter.py @@ -3,7 +3,7 @@ from rpython.rlib.rarithmetic import r_uint from pypy.interpreter.error import operationerrfmt, OperationError from pypy.module._rawffi.structure import W_StructureInstance, W_Structure -from pypy.module._ffi.interp_ffitype import app_types +from pypy.module._rawffi.alt.interp_ffitype import app_types class FromAppLevelConverter(object): """ @@ -17,7 +17,7 @@ self.space = space def unwrap_and_do(self, w_ffitype, w_obj): - from pypy.module._ffi.interp_struct import W__StructInstance + from pypy.module._rawffi.alt.interp_struct import W__StructInstance space = self.space if w_ffitype.is_longlong(): # note that we must check for longlong first, because either @@ -194,7 +194,7 @@ self.space = space def do_and_wrap(self, w_ffitype): - from pypy.module._ffi.interp_struct import W__StructDescr + from pypy.module._rawffi.alt.interp_struct import W__StructDescr space = self.space if w_ffitype.is_longlong(): # note that we must check for longlong first, because either diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -311,10 +311,7 @@ raise NotImplementedError def descr_get_ffi_type(self, space): - # XXX: this assumes that you have the _ffi module enabled. In the long - # term, probably we will move the code for build structures and arrays - # from _rawffi to _ffi - from pypy.module._ffi.interp_ffitype import W_FFIType + from pypy.module._rawffi.alt.interp_ffitype import W_FFIType return W_FFIType('', self.get_basic_ffi_type(), self) @unwrap_spec(n=int) diff --git a/pypy/module/cppyy/test/test_crossing.py b/pypy/module/cppyy/test/test_crossing.py --- a/pypy/module/cppyy/test/test_crossing.py +++ b/pypy/module/cppyy/test/test_crossing.py @@ -65,7 +65,7 @@ return str(pydname) class AppTestCrossing(AppTestCpythonExtensionBase): - spaceconfig = dict(usemodules=['cpyext', 'cppyy', 'thread', '_rawffi', '_ffi', + spaceconfig = dict(usemodules=['cpyext', 'cppyy', 'thread', '_rawffi', 'array', 'itertools', 'rctime', 'binascii']) def setup_class(cls): diff --git a/pypy/module/cpyext/test/test_ztranslation.py b/pypy/module/cpyext/test/test_ztranslation.py --- a/pypy/module/cpyext/test/test_ztranslation.py +++ b/pypy/module/cpyext/test/test_ztranslation.py @@ -1,4 +1,4 @@ from pypy.objspace.fake.checkmodule import checkmodule def test_cpyext_translates(): - checkmodule('cpyext', '_ffi') + checkmodule('cpyext') diff --git a/pypy/module/pypyjit/policy.py b/pypy/module/pypyjit/policy.py --- a/pypy/module/pypyjit/policy.py +++ b/pypy/module/pypyjit/policy.py @@ -105,7 +105,7 @@ else: rest = '' if modname in ['pypyjit', 'signal', 'micronumpy', 'math', 'exceptions', - 'imp', 'sys', 'array', '_ffi', 'itertools', 'operator', + 'imp', 'sys', 'array', 'itertools', 'operator', 'posix', '_socket', '_sre', '_lsprof', '_weakref', '__pypy__', 'cStringIO', '_collections', 'struct', 'mmap', 'marshal', '_codecs', 'rctime', 'cppyy', diff --git a/pypy/module/test_lib_pypy/test_grp_extra.py b/pypy/module/test_lib_pypy/test_grp_extra.py --- a/pypy/module/test_lib_pypy/test_grp_extra.py +++ b/pypy/module/test_lib_pypy/test_grp_extra.py @@ -2,7 +2,7 @@ class AppTestGrp: - spaceconfig = dict(usemodules=('binascii', '_ffi', '_rawffi', 'itertools')) + spaceconfig = dict(usemodules=('binascii', '_rawffi', 'itertools')) def setup_class(cls): cls.w_grp = import_lib_pypy(cls.space, 'grp', diff --git a/pypy/module/test_lib_pypy/test_os_wait.py b/pypy/module/test_lib_pypy/test_os_wait.py --- a/pypy/module/test_lib_pypy/test_os_wait.py +++ b/pypy/module/test_lib_pypy/test_os_wait.py @@ -10,7 +10,7 @@ class AppTestOsWait: - spaceconfig = dict(usemodules=('_ffi', '_rawffi', 'itertools')) + spaceconfig = dict(usemodules=('_rawffi', 'itertools')) def setup_class(cls): if not hasattr(os, "fork"): diff --git a/pypy/module/test_lib_pypy/test_pwd.py b/pypy/module/test_lib_pypy/test_pwd.py --- a/pypy/module/test_lib_pypy/test_pwd.py +++ b/pypy/module/test_lib_pypy/test_pwd.py @@ -1,7 +1,7 @@ import py, sys class AppTestPwd: - spaceconfig = dict(usemodules=('_ffi', '_rawffi', 'itertools', 'binascii')) + spaceconfig = dict(usemodules=('_rawffi', 'itertools', 'binascii')) def setup_class(cls): if sys.platform == 'win32': diff --git a/pypy/module/test_lib_pypy/test_resource.py b/pypy/module/test_lib_pypy/test_resource.py --- a/pypy/module/test_lib_pypy/test_resource.py +++ b/pypy/module/test_lib_pypy/test_resource.py @@ -9,7 +9,7 @@ class AppTestResource: - spaceconfig = dict(usemodules=('_ffi', '_rawffi', 'itertools')) + spaceconfig = dict(usemodules=('_rawffi', 'itertools')) def setup_class(cls): rebuild.rebuild_one('resource.ctc.py') diff --git a/pypy/tool/pypyjit.py b/pypy/tool/pypyjit.py --- a/pypy/tool/pypyjit.py +++ b/pypy/tool/pypyjit.py @@ -37,7 +37,7 @@ config.objspace.usemodules._sre = False config.objspace.usemodules._lsprof = False # -config.objspace.usemodules._ffi = False +config.objspace.usemodules._rawffi = False config.objspace.usemodules.micronumpy = False # set_pypy_opt_level(config, level='jit') From noreply at buildbot.pypy.org Tue Jan 21 11:22:45 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 21 Jan 2014 11:22:45 +0100 (CET) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20140121102245.E83D01C302F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68800:4da44a7d1989 Date: 2014-01-21 11:21 +0100 http://bitbucket.org/pypy/pypy/changeset/4da44a7d1989/ Log: merge heads diff --git a/pypy/doc/_ref.txt b/pypy/doc/_ref.txt --- a/pypy/doc/_ref.txt +++ b/pypy/doc/_ref.txt @@ -109,6 +109,4 @@ .. _`rpython/translator/c/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/c/ .. _`rpython/translator/c/src/stacklet/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/c/src/stacklet/ .. _`rpython/translator/c/src/stacklet/stacklet.h`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/c/src/stacklet/stacklet.h -.. _`rpython/translator/cli/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/cli/ -.. _`rpython/translator/jvm/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/jvm/ .. _`rpython/translator/tool/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/tool/ diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -167,7 +167,7 @@ def run(self): """Start this frame's execution.""" if self.getcode().co_flags & pycode.CO_GENERATOR: - if self.getcode().co_flags & pycode.CO_YIELD_INSIDE_TRY: + if 1:# self.getcode().co_flags & pycode.CO_YIELD_INSIDE_TRY: from pypy.interpreter.generator import GeneratorIteratorWithDel return self.space.wrap(GeneratorIteratorWithDel(self)) else: diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -1,6 +1,5 @@ from rpython.translator.platform import platform from rpython.translator.tool.cbuild import ExternalCompilationInfo -from pypy.conftest import option from pypy.module._rawffi.interp_rawffi import TYPEMAP, TYPEMAP_FLOAT_LETTERS from pypy.module._rawffi.tracker import Tracker @@ -1133,15 +1132,6 @@ def setup_class(cls): cls.w_sizes_and_alignments = cls.space.wrap(dict( [(k, (v.c_size, v.c_alignment)) for k,v in TYPEMAP.iteritems()])) - # - # detect if we're running on PyPy with DO_TRACING not compiled in - if option.runappdirect: - try: - import _rawffi - _rawffi._num_of_allocated_objects() - except (ImportError, RuntimeError), e: - py.test.skip(str(e)) - # Tracker.DO_TRACING = True def test_structure_autofree(self): diff --git a/pypy/module/struct/__init__.py b/pypy/module/struct/__init__.py --- a/pypy/module/struct/__init__.py +++ b/pypy/module/struct/__init__.py @@ -49,11 +49,12 @@ 'calcsize': 'interp_struct.calcsize', 'pack': 'interp_struct.pack', 'unpack': 'interp_struct.unpack', - } + + 'Struct': 'interp_struct.W_Struct', + } appleveldefs = { 'error': 'app_struct.error', 'pack_into': 'app_struct.pack_into', 'unpack_from': 'app_struct.unpack_from', - 'Struct': 'app_struct.Struct', - } + } diff --git a/pypy/module/struct/app_struct.py b/pypy/module/struct/app_struct.py --- a/pypy/module/struct/app_struct.py +++ b/pypy/module/struct/app_struct.py @@ -4,6 +4,7 @@ """ import struct + class error(Exception): """Exception raised on various occasions; argument is a string describing what is wrong.""" @@ -21,21 +22,3 @@ raise error("unpack_from requires a buffer of at least %d bytes" % (size,)) return struct.unpack(fmt, data) - -# XXX inefficient -class Struct(object): - def __init__(self, format): - self.format = format - self.size = struct.calcsize(format) - - def pack(self, *args): - return struct.pack(self.format, *args) - - def unpack(self, s): - return struct.unpack(self.format, s) - - def pack_into(self, buffer, offset, *args): - return pack_into(self.format, buffer, offset, *args) - - def unpack_from(self, buffer, offset=0): - return unpack_from(self.format, buffer, offset) diff --git a/pypy/module/struct/interp_struct.py b/pypy/module/struct/interp_struct.py --- a/pypy/module/struct/interp_struct.py +++ b/pypy/module/struct/interp_struct.py @@ -1,15 +1,22 @@ -from pypy.interpreter.gateway import unwrap_spec -from pypy.interpreter.error import OperationError -from pypy.module.struct.formatiterator import PackFormatIterator, UnpackFormatIterator from rpython.rlib import jit from rpython.rlib.rstruct.error import StructError, StructOverflowError from rpython.rlib.rstruct.formatiterator import CalcSizeFormatIterator +from rpython.tool.sourcetools import func_with_new_name + +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.gateway import interp2app, unwrap_spec +from pypy.interpreter.error import OperationError +from pypy.interpreter.typedef import TypeDef, interp_attrproperty +from pypy.module.struct.formatiterator import ( + PackFormatIterator, UnpackFormatIterator +) @unwrap_spec(format=str) def calcsize(space, format): return space.wrap(_calcsize(space, format)) + def _calcsize(space, format): fmtiter = CalcSizeFormatIterator() try: @@ -52,3 +59,44 @@ w_error = space.getattr(w_module, space.wrap('error')) raise OperationError(w_error, space.wrap(e.msg)) return space.newtuple(fmtiter.result_w[:]) + + +class W_Struct(W_Root): + _immutable_fields_ = ["format", "size"] + + def __init__(self, space, format): + self.format = format + self.size = _calcsize(space, format) + + @unwrap_spec(format=str) + def descr__new__(space, w_subtype, format): + self = space.allocate_instance(W_Struct, w_subtype) + W_Struct.__init__(self, space, format) + return self + + def wrap_struct_method(name): + def impl(self, space, __args__): + w_module = space.getbuiltinmodule('struct') + w_method = space.getattr(w_module, space.wrap(name)) + return space.call_obj_args( + w_method, space.wrap(self.format), __args__ + ) + + return func_with_new_name(impl, 'descr_' + name) + + descr_pack = wrap_struct_method("pack") + descr_unpack = wrap_struct_method("unpack") + descr_pack_into = wrap_struct_method("pack_into") + descr_unpack_from = wrap_struct_method("unpack_from") + + +W_Struct.typedef = TypeDef("Struct", + __new__=interp2app(W_Struct.descr__new__.im_func), + format=interp_attrproperty("format", cls=W_Struct), + size=interp_attrproperty("size", cls=W_Struct), + + pack=interp2app(W_Struct.descr_pack), + unpack=interp2app(W_Struct.descr_unpack), + pack_into=interp2app(W_Struct.descr_pack_into), + unpack_from=interp2app(W_Struct.descr_unpack_from), +) diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -167,7 +167,7 @@ # # This list corresponds to the operations implemented by the LLInterpreter. # Note that many exception-raising operations can be replaced by calls -# to helper functions in rpython.rtyper.raisingops.raisingops. +# to helper functions in rpython.rtyper.raisingops. # ***** Run test_lloperation after changes. ***** LL_OPERATIONS = { diff --git a/rpython/rtyper/raisingops/raisingops.py b/rpython/rtyper/raisingops.py rename from rpython/rtyper/raisingops/raisingops.py rename to rpython/rtyper/raisingops.py diff --git a/rpython/rtyper/raisingops/__init__.py b/rpython/rtyper/raisingops/__init__.py deleted file mode 100644 diff --git a/rpython/translator/backendopt/raisingop2direct_call.py b/rpython/translator/backendopt/raisingop2direct_call.py --- a/rpython/translator/backendopt/raisingop2direct_call.py +++ b/rpython/translator/backendopt/raisingop2direct_call.py @@ -1,5 +1,5 @@ from rpython.translator.backendopt.support import log, all_operations, annotate -import rpython.rtyper.raisingops.raisingops +import rpython.rtyper.raisingops log = log.raisingop2directcall @@ -15,7 +15,7 @@ def raisingop2direct_call(translator, graphs=None): """search for operations that could raise an exception and change that - operation into a direct_call to a function from the raisingops directory. + operation into a direct_call to a function from the raisingops module. This function also needs to be annotated and specialized. note: this could be extended to allow for any operation to be changed into @@ -30,7 +30,7 @@ for op in all_operations(graphs): if not is_raisingop(op): continue - func = getattr(rpython.rtyper.raisingops.raisingops, op.opname, None) + func = getattr(rpython.rtyper.raisingops, op.opname, None) if not func: log.warning("%s not found" % op.opname) continue From noreply at buildbot.pypy.org Tue Jan 21 11:22:47 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 21 Jan 2014 11:22:47 +0100 (CET) Subject: [pypy-commit] pypy default: Move some "for" loops out of the fast path, to allow the JIT to look Message-ID: <20140121102247.287BF1C302F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68801:8f53215f2dc2 Date: 2014-01-21 11:21 +0100 http://bitbucket.org/pypy/pypy/changeset/8f53215f2dc2/ Log: Move some "for" loops out of the fast path, to allow the JIT to look inside the functions. diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -35,13 +35,7 @@ if (isinstance(self, W_BytearrayObject) and space.isinstance_w(w_sub, space.w_int)): char = space.int_w(w_sub) - if not 0 <= char < 256: - raise OperationError(space.w_ValueError, - space.wrap("byte must be in range(0, 256)")) - for c in self.data: - if ord(c) == char: - return space.w_True - return space.w_False + return _descr_contains_bytearray(self.data, space, char) return space.newbool(self._val(space).find(self._op_val(space, w_sub)) >= 0) def descr_add(self, space, w_other): @@ -79,7 +73,7 @@ assert start >= 0 and stop >= 0 return self._sliced(space, selfvalue, start, stop, self) else: - ret = [selfvalue[start + i*step] for i in range(sl)] + ret = _descr_getslice_slowpath(selfvalue, start, step, sl) return self._new_from_list(ret) index = space.getindex_w(w_index, space.w_IndexError, "string index") @@ -253,17 +247,21 @@ return self._is_generic(space, '_isdigit') # this is only for bytes and bytesarray: unicodeobject overrides it + def _descr_islower_slowpath(self, space, v): + cased = False + for idx in range(len(v)): + if self._isupper(v[idx]): + return False + elif not cased and self._islower(v[idx]): + cased = True + return cased + def descr_islower(self, space): v = self._val(space) if len(v) == 1: c = v[0] return space.newbool(self._islower(c)) - cased = False - for idx in range(len(v)): - if self._isupper(v[idx]): - return space.w_False - elif not cased and self._islower(v[idx]): - cased = True + cased = self._descr_islower_slowpath(space, v) return space.newbool(cased) def descr_isspace(self, space): @@ -291,17 +289,21 @@ return space.newbool(cased) # this is only for bytes and bytesarray: unicodeobject overrides it + def _descr_isupper_slowpath(self, space, v): + cased = False + for idx in range(len(v)): + if self._islower(v[idx]): + return False + elif not cased and self._isupper(v[idx]): + cased = True + return cased + def descr_isupper(self, space): v = self._val(space) if len(v) == 1: c = v[0] return space.newbool(self._isupper(c)) - cased = False - for idx in range(len(v)): - if self._islower(v[idx]): - return space.w_False - elif not cased and self._isupper(v[idx]): - cased = True + cased = self._descr_isupper_slowpath(space, v) return space.newbool(cased) def descr_join(self, space, w_list): @@ -677,3 +679,19 @@ def descr_getnewargs(self, space): return space.newtuple([self._new(self._val(space))]) + +# ____________________________________________________________ +# helpers for slow paths, moved out because they contain loops + +def _descr_contains_bytearray(data, space, char): + if not 0 <= char < 256: + raise OperationError(space.w_ValueError, + space.wrap("byte must be in range(0, 256)")) + for c in data: + if ord(c) == char: + return space.w_True + return space.w_False + + at specialize.argtype(0) +def _descr_getslice_slowpath(selfvalue, start, step, sl): + return [selfvalue[start + i*step] for i in range(sl)] From noreply at buildbot.pypy.org Tue Jan 21 11:50:03 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 21 Jan 2014 11:50:03 +0100 (CET) Subject: [pypy-commit] pypy llvm-translation-backend: Instead of marking every llexternal that calls a macro with a macro flag, we generate call wrappers for external functions by default. A call wrapper is a short C snippet that just calls the external function. This way we work on the API level instead of the ABI level, because the C compiler generates the actual code to call the external function. These functions are inlined because we use link-time optimization. One problem is currently that the call wrappers are not stripped from the binary. Message-ID: <20140121105003.831F21C3360@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r68802:e4ab9d710ba1 Date: 2014-01-21 11:50 +0100 http://bitbucket.org/pypy/pypy/changeset/e4ab9d710ba1/ Log: Instead of marking every llexternal that calls a macro with a macro flag, we generate call wrappers for external functions by default. A call wrapper is a short C snippet that just calls the external function. This way we work on the API level instead of the ABI level, because the C compiler generates the actual code to call the external function. These functions are inlined because we use link- time optimization. One problem is currently that the call wrappers are not stripped from the binary. diff --git a/pypy/module/_cffi_backend/misc.py b/pypy/module/_cffi_backend/misc.py --- a/pypy/module/_cffi_backend/misc.py +++ b/pypy/module/_cffi_backend/misc.py @@ -238,7 +238,7 @@ _is_nonnull_longdouble = rffi.llexternal( "pypy__is_nonnull_longdouble", [rffi.LONGDOUBLE], lltype.Bool, compilation_info=eci, _nowrapper=True, elidable_function=True, - sandboxsafe=True, macro=True) + sandboxsafe=True) # split here for JIT backends that don't support floats/longlongs/etc. def is_nonnull_longdouble(cdata): diff --git a/rpython/jit/backend/x86/valgrind.py b/rpython/jit/backend/x86/valgrind.py --- a/rpython/jit/backend/x86/valgrind.py +++ b/rpython/jit/backend/x86/valgrind.py @@ -21,8 +21,7 @@ lltype.Void, compilation_info=eci, _nowrapper=True, - sandboxsafe=True, - macro=True) + sandboxsafe=True) # ____________________________________________________________ diff --git a/rpython/rlib/longlong2float.py b/rpython/rlib/longlong2float.py --- a/rpython/rlib/longlong2float.py +++ b/rpython/rlib/longlong2float.py @@ -68,14 +68,12 @@ uint2singlefloat = rffi.llexternal( "pypy__uint2singlefloat", [rffi.UINT], rffi.FLOAT, _callable=uint2singlefloat_emulator, compilation_info=eci, - _nowrapper=True, elidable_function=True, sandboxsafe=True, - llvm_wrapper=True) + _nowrapper=True, elidable_function=True, sandboxsafe=True) singlefloat2uint = rffi.llexternal( "pypy__singlefloat2uint", [rffi.FLOAT], rffi.UINT, _callable=singlefloat2uint_emulator, compilation_info=eci, - _nowrapper=True, elidable_function=True, sandboxsafe=True, - llvm_wrapper=True) + _nowrapper=True, elidable_function=True, sandboxsafe=True) class Float2LongLongEntry(ExtRegistryEntry): diff --git a/rpython/rlib/rmd5.py b/rpython/rlib/rmd5.py --- a/rpython/rlib/rmd5.py +++ b/rpython/rlib/rmd5.py @@ -51,7 +51,7 @@ _rotateLeft = rffi.llexternal( "pypy__rotateLeft", [lltype.Unsigned, lltype.Signed], lltype.Unsigned, _callable=_rotateLeft_emulator, compilation_info=eci, - _nowrapper=True, elidable_function=True, llvm_wrapper=True) + _nowrapper=True, elidable_function=True) # we expect the function _rotateLeft to be actually inlined diff --git a/rpython/rlib/rstack.py b/rpython/rlib/rstack.py --- a/rpython/rlib/rstack.py +++ b/rpython/rlib/rstack.py @@ -20,15 +20,15 @@ includes=['src/stack.h'], separate_module_files=[srcdir / 'stack.c', srcdir / 'threadlocal.c']) -def llexternal(name, args, res, _callable=None, macro=None): +def llexternal(name, args, res, _callable=None): return rffi.llexternal(name, args, res, compilation_info=compilation_info, sandboxsafe=True, _nowrapper=True, - _callable=_callable, macro=macro) + _callable=_callable) _stack_get_end = llexternal('LL_stack_get_end', [], lltype.Signed, - lambda: 0, True) + lambda: 0) _stack_get_length = llexternal('LL_stack_get_length', [], lltype.Signed, - lambda: 1, True) + lambda: 1) _stack_set_length_fraction = llexternal('LL_stack_set_length_fraction', [lltype.Float], lltype.Void, lambda frac: None) @@ -36,17 +36,15 @@ [lltype.Signed], lltype.Char, lambda cur: '\x00') # the following is used by the JIT -_stack_get_end_adr = llexternal('LL_stack_get_end_adr', [], lltype.Signed, - macro=True) -_stack_get_length_adr= llexternal('LL_stack_get_length_adr',[], lltype.Signed, - macro=True) +_stack_get_end_adr = llexternal('LL_stack_get_end_adr', [], lltype.Signed) +_stack_get_length_adr= llexternal('LL_stack_get_length_adr',[], lltype.Signed) # the following is also used by the JIT: "critical code" paths are paths in # which we should not raise StackOverflow at all, but just ignore the stack limit _stack_criticalcode_start = llexternal('LL_stack_criticalcode_start', [], - lltype.Void, lambda: None, True) + lltype.Void, lambda: None) _stack_criticalcode_stop = llexternal('LL_stack_criticalcode_stop', [], - lltype.Void, lambda: None, True) + lltype.Void, lambda: None) def stack_check(): if not we_are_translated(): diff --git a/rpython/rtyper/lltypesystem/llarena.py b/rpython/rtyper/lltypesystem/llarena.py --- a/rpython/rtyper/lltypesystem/llarena.py +++ b/rpython/rtyper/lltypesystem/llarena.py @@ -602,7 +602,6 @@ lltype.Signed, sandboxsafe=True, _nowrapper=True, - macro=True, compilation_info=_eci) register_external(_round_up_for_allocation, [int, int], int, 'll_arena.round_up_for_allocation', diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -63,8 +63,7 @@ sandboxsafe=False, releasegil='auto', _nowrapper=False, calling_conv='c', elidable_function=False, macro=None, - random_effects_on_gcobjs='auto', - llvm_wrapper=None): + random_effects_on_gcobjs='auto'): """Build an external function that will invoke the C function 'name' with the given 'args' types and 'result' type. @@ -84,14 +83,9 @@ don't bother releasing the GIL. An explicit True or False overrides this logic. macro: whether to write a macro wrapper for this function. This is - necessary for calling macros in tests or when using the llvm - translation backend. Setting it to True generates a macro wrapper - named '_rpy_call_wrapper_{name}'. Setting it to a string - generates a macro wrapper named '_rpy_call_wrapper_{macro}'. - llvm_wrapper: same semantics as macro but for calling ordinary functions - that the llvm translation backend can't handle, for example - static functions defined in ExternalCompilationInfo's - post_include_bits or functions with varargs. + necessary for calling macros in tests. Setting it to True generates + a macro wrapper named '_rpy_call_wrapper_{name}'. Setting it to a + string generates a macro wrapper named '_rpy_call_wrapper_{macro}'. """ if _callable is not None: assert callable(_callable) @@ -130,11 +124,6 @@ invoke_around_handlers or # because it can release the GIL has_callback) # because the callback can do it - if llvm_wrapper is None: - llvm_wrapper = macro - if llvm_wrapper is not None: - kwds['llvm_wrapper'] = llvm_wrapper - funcptr = lltype.functionptr(ext_type, name, external='C', compilation_info=compilation_info, _callable=_callable, diff --git a/rpython/rtyper/lltypesystem/test/test_rffi.py b/rpython/rtyper/lltypesystem/test/test_rffi.py --- a/rpython/rtyper/lltypesystem/test/test_rffi.py +++ b/rpython/rtyper/lltypesystem/test/test_rffi.py @@ -47,7 +47,7 @@ eci = ExternalCompilationInfo(includes=['stuff.h'], include_dirs=[udir]) - z = llexternal('X', [Signed], Signed, compilation_info=eci, macro=True) + z = llexternal('X', [Signed], Signed, compilation_info=eci) def f(): return z(8) @@ -298,7 +298,7 @@ STUFFP = COpaquePtr(typedef='stuff_ptr', compilation_info=eci) ll_get = llexternal('get', [STUFFP], lltype.Signed, - compilation_info=eci, llvm_wrapper=True) + compilation_info=eci) def f(): return ll_get(lltype.nullptr(STUFFP.TO)) diff --git a/rpython/rtyper/module/ll_os.py b/rpython/rtyper/module/ll_os.py --- a/rpython/rtyper/module/ll_os.py +++ b/rpython/rtyper/module/ll_os.py @@ -22,7 +22,7 @@ from rpython.rtyper.tool import rffi_platform as platform from rpython.rlib import rposix from rpython.translator.tool.cbuild import ExternalCompilationInfo -from rpython.rtyper.lltypesystem.llmemory import itemoffsetof, offsetof, Address +from rpython.rtyper.lltypesystem.llmemory import itemoffsetof, offsetof from rpython.rtyper.lltypesystem.rstr import STR from rpython.rlib.objectmodel import specialize @@ -977,8 +977,7 @@ @registering_if(os, 'makedev') def register_os_makedev(self): - c_makedev = self.llexternal('makedev', [rffi.INT, rffi.INT], rffi.INT, - macro=True) + c_makedev = self.llexternal('makedev', [rffi.INT, rffi.INT], rffi.INT) def makedev_llimpl(maj, min): return c_makedev(maj, min) return extdef([int, int], int, @@ -986,7 +985,7 @@ @registering_if(os, 'major') def register_os_major(self): - c_major = self.llexternal('major', [rffi.INT], rffi.INT, macro=True) + c_major = self.llexternal('major', [rffi.INT], rffi.INT) def major_llimpl(dev): return c_major(dev) return extdef([int], int, @@ -994,7 +993,7 @@ @registering_if(os, 'minor') def register_os_minor(self): - c_minor = self.llexternal('minor', [rffi.INT], rffi.INT, macro=True) + c_minor = self.llexternal('minor', [rffi.INT], rffi.INT) def minor_llimpl(dev): return c_minor(dev) return extdef([int], int, @@ -1030,7 +1029,7 @@ @registering(os.write) def register_os_write(self): os_write = self.llexternal(UNDERSCORE_ON_WIN32 + 'write', - [rffi.INT, Address, rffi.SIZE_T], + [rffi.INT, rffi.VOIDP, rffi.SIZE_T], rffi.SIZE_T) def os_write_llimpl(fd, data): @@ -1040,8 +1039,7 @@ try: written = rffi.cast(lltype.Signed, os_write( rffi.cast(rffi.INT, fd), - rffi.cast(Address, buf), - rffi.cast(rffi.SIZE_T, count))) + buf, rffi.cast(rffi.SIZE_T, count))) if written < 0: raise OSError(rposix.get_errno(), "os_write failed") finally: diff --git a/rpython/translator/llvm/genllvm.py b/rpython/translator/llvm/genllvm.py --- a/rpython/translator/llvm/genllvm.py +++ b/rpython/translator/llvm/genllvm.py @@ -656,35 +656,18 @@ def repr_ref(self, ptr_type, obj): if getattr(obj, 'external', None) == 'C': - if hasattr(obj, 'llvm_wrapper'): + if obj._name.startswith('llvm'): + name = '@' + obj._name + else: wrapper_name, source = rffi._write_call_wrapper( - obj._name, obj.llvm_wrapper, obj._TYPE) + obj._name, database.unique_name(obj._name, False), + obj._TYPE) name = '@' + wrapper_name database.genllvm.sources.append(source) - else: - name = '@' + obj._name - # Hack to support functions with different function signatures - prev_type = database.external_declared.get(name) - if prev_type is None: - database.external_declared[name] = self - elif prev_type is self: - ptr_type.refs[obj] = name - return - else: - ptr_type.refs[obj] = 'bitcast({}* {} to {}*)'.format( - prev_type.repr_type(), name, self.repr_type()) - return ptr_type.refs[obj] = name - if obj.calling_conv == 'c': - calling_conv = '' - elif obj.calling_conv == 'win': - # assume that x86_stdcallcc implies dllimport - calling_conv = 'dllimport x86_stdcallcc ' - else: - raise NotImplementedError - database.f.write('declare {}{} {}({})\n'.format( - calling_conv, self.result.repr_type(), name, + database.f.write('declare {} {}({})\n'.format( + self.result.repr_type(), name, ', '.join(arg.repr_type() for arg in self.args))) database.genllvm.ecis.append(obj.compilation_info) else: @@ -740,7 +723,6 @@ self.f = f self.names_counter = {} self.types = PRIMITIVES.copy() - self.external_declared = {} self.hashes = [] self.stack_bottoms = [] @@ -764,14 +746,15 @@ .get_gc_fields_lltype() # hint for ll2ctypes return ret - def unique_name(self, name): + def unique_name(self, name, llvm_name=True): if name not in self.names_counter: self.names_counter[name] = 0 - if self.identifier_regex.match(name) is None: + if llvm_name and self.identifier_regex.match(name) is None: return '{}"{}"'.format(name[0], name[1:]) return name self.names_counter[name] += 1 - return self.unique_name('{}_{}'.format(name, self.names_counter[name])) + return self.unique_name('{}_{}'.format(name, self.names_counter[name]), + llvm_name) OPS = { @@ -1148,19 +1131,13 @@ tmp.append('{arg.TV}'.format(arg=arg)) args = ', '.join(tmp) - if (isinstance(fn, ConstantRepr) and - getattr(fn.value._obj, 'calling_conv', None) == 'win'): - calling_conv = 'x86_stdcallcc ' - else: - calling_conv = '' - if result.type_ is LLVMVoid: - fmt = 'call {calling_conv}void {fn.V}({args})' + fmt = 'call void {fn.V}({args})' elif (isinstance(result.type_, PtrType) and isinstance(result.type_.to, FuncType)): - fmt = '{result.V} = call {calling_conv}{fn.TV}({args})' + fmt = '{result.V} = call {fn.TV}({args})' else: - fmt = '{result.V} = call {calling_conv}{result.T} {fn.V}({args})' + fmt = '{result.V} = call {result.T} {fn.V}({args})' self.w(fmt.format(**locals())) op_indirect_call = op_direct_call @@ -1764,15 +1741,14 @@ exports.clear() def _compile(self, shared=False): - self.sources.append(py.code.Source(r''' + self.sources.append(str(py.code.Source(r''' void pypy_debug_catch_fatal_exception(void) { fprintf(stderr, "Fatal RPython error\n"); abort(); - } - ''')) + }'''))) eci = ExternalCompilationInfo( includes=['stdio.h', 'stdlib.h'], - separate_module_sources=self.sources, + separate_module_sources=['\n'.join(self.sources)], post_include_bits=['typedef _Bool bool_t;'] ).merge(*self.ecis).convert_sources_to_files() From noreply at buildbot.pypy.org Tue Jan 21 12:49:02 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Tue, 21 Jan 2014 12:49:02 +0100 (CET) Subject: [pypy-commit] stmgc c7: add failing test for partially uncommitted pages / uncommitted objects Message-ID: <20140121114902.E12801C302F@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r658:70275e7c7c74 Date: 2014-01-21 12:50 +0100 http://bitbucket.org/pypy/stmgc/changeset/70275e7c7c74/ Log: add failing test for partially uncommitted pages / uncommitted objects diff --git a/c7/core.h b/c7/core.h --- a/c7/core.h +++ b/c7/core.h @@ -39,7 +39,8 @@ or PRIVATE), but not committed yet. So only visible from this transaction. */ GCFLAG_NOT_COMMITTED = (1 << 1), - + /* only used during collections to mark an obj as moved out of the + generation it was in */ GCFLAG_MOVED = (1 << 2), }; @@ -62,7 +63,6 @@ struct object_s { uint8_t stm_flags; /* reserved for the STM library */ - uint8_t stm_write_lock; /* 1 if writeable by some thread */ /* make sure it doesn't get bigger than 4 bytes for performance reasons */ }; diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -74,13 +74,22 @@ extern size_t stmcb_size(struct object_s *); extern void stmcb_trace(struct object_s *, void (object_t **)); +uint8_t _stm_get_flags(object_t *obj); +uint8_t _stm_get_page_flag(int pagenum); enum { SHARED_PAGE=0, REMAPPING_PAGE, PRIVATE_PAGE, UNCOMMITTED_SHARED_PAGE, }; /* flag_page_private */ -uint8_t _stm_get_page_flag(int pagenum); + +enum { + GCFLAG_WRITE_BARRIER = 1, + GCFLAG_NOT_COMMITTED = 2, + GCFLAG_MOVED = 4 +}; + + """) lib = ffi.verify(''' @@ -101,6 +110,11 @@ } +uint8_t _stm_get_flags(object_t *obj) { + return obj->stm_flags; +} + + bool _checked_stm_write(object_t *object) { jmpbufptr_t here; if (__builtin_setjmp(here) == 0) { // returned directly @@ -327,6 +341,9 @@ startp = start // 4096 return range(startp, startp + stm_get_obj_size(o) // 4096 + 1) +def stm_get_flags(o): + return lib._stm_get_flags(o) + class BaseTest(object): diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -391,8 +391,18 @@ rnew = stm_get_real_address(new) assert rnew[4097] == '\0' + def test_partial_alloced_pages(self): + stm_start_transaction() + new = stm_allocate(16) + stm_push_root(new) + stm_minor_collect() + new = stm_pop_root() + assert stm_get_page_flag(stm_get_obj_pages(new)[0]) == lib.UNCOMMITTED_SHARED_PAGE + assert stm_get_flags(new) & lib.GCFLAG_NOT_COMMITTED - + stm_stop_transaction() + assert stm_get_page_flag(stm_get_obj_pages(new)[0]) == lib.SHARED_PAGE + assert not (stm_get_flags(new) & lib.GCFLAG_NOT_COMMITTED) # def test_resolve_write_write_no_conflict(self): From noreply at buildbot.pypy.org Tue Jan 21 13:16:47 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 21 Jan 2014 13:16:47 +0100 (CET) Subject: [pypy-commit] cffi default: Maybe a better error message Message-ID: <20140121121647.1FC151C039A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1458:61c4b449086c Date: 2014-01-21 13:16 +0100 http://bitbucket.org/cffi/cffi/changeset/61c4b449086c/ Log: Maybe a better error message diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -835,7 +835,7 @@ return new_simple_cdata(ptrdata, ct); } else if (ct->ct_flags & CT_IS_OPAQUE) { - PyErr_Format(PyExc_TypeError, "cannot return a cdata '%s'", + PyErr_Format(PyExc_TypeError, "cdata '%s' is opaque", ct->ct_name); return NULL; } From noreply at buildbot.pypy.org Tue Jan 21 13:18:09 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 21 Jan 2014 13:18:09 +0100 (CET) Subject: [pypy-commit] pypy default: Maybe a better error message Message-ID: <20140121121809.CF65E1C039A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68803:dfca95ea34f0 Date: 2014-01-21 13:17 +0100 http://bitbucket.org/pypy/pypy/changeset/dfca95ea34f0/ Log: Maybe a better error message diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py --- a/pypy/module/_cffi_backend/ctypestruct.py +++ b/pypy/module/_cffi_backend/ctypestruct.py @@ -33,7 +33,7 @@ if self.fields_dict is None: space = self.space raise operationerrfmt(w_errorcls or space.w_TypeError, - "'%s' is not completed yet", self.name) + "'%s' is opaque or not completed yet", self.name) def _alignof(self): self.check_complete(w_errorcls=self.space.w_ValueError) From noreply at buildbot.pypy.org Tue Jan 21 14:22:20 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 21 Jan 2014 14:22:20 +0100 (CET) Subject: [pypy-commit] pypy default: Port fix for ropenssl from the llvm branch to default. Message-ID: <20140121132220.B2FBA1C00E3@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: Changeset: r68804:c077b2ff29c7 Date: 2014-01-21 14:10 +0100 http://bitbucket.org/pypy/pypy/changeset/c077b2ff29c7/ Log: Port fix for ropenssl from the llvm branch to default. diff --git a/rpython/rlib/ropenssl.py b/rpython/rlib/ropenssl.py --- a/rpython/rlib/ropenssl.py +++ b/rpython/rlib/ropenssl.py @@ -56,9 +56,17 @@ ASN1_STRING = lltype.Ptr(lltype.ForwardReference()) ASN1_ITEM = rffi.COpaquePtr('ASN1_ITEM') -ASN1_ITEM_EXP = lltype.Ptr(lltype.FuncType([], ASN1_ITEM)) X509_NAME = rffi.COpaquePtr('X509_NAME') +class CConfigBootstrap: + _compilation_info_ = eci + OPENSSL_EXPORT_VAR_AS_FUNCTION = rffi_platform.Defined( + "OPENSSL_EXPORT_VAR_AS_FUNCTION") +if rffi_platform.configure(CConfigBootstrap)["OPENSSL_EXPORT_VAR_AS_FUNCTION"]: + ASN1_ITEM_EXP = lltype.Ptr(lltype.FuncType([], ASN1_ITEM)) +else: + ASN1_ITEM_EXP = ASN1_ITEM + class CConfig: _compilation_info_ = eci @@ -128,8 +136,6 @@ ('block_size', rffi.INT)]) EVP_MD_SIZE = rffi_platform.SizeOf('EVP_MD') EVP_MD_CTX_SIZE = rffi_platform.SizeOf('EVP_MD_CTX') - OPENSSL_EXPORT_VAR_AS_FUNCTION = rffi_platform.Defined( - "OPENSSL_EXPORT_VAR_AS_FUNCTION") OBJ_NAME_st = rffi_platform.Struct( 'OBJ_NAME', @@ -259,10 +265,7 @@ ssl_external('i2a_ASN1_INTEGER', [BIO, ASN1_INTEGER], rffi.INT) ssl_external('ASN1_item_d2i', [rffi.VOIDP, rffi.CCHARPP, rffi.LONG, ASN1_ITEM], rffi.VOIDP) -if OPENSSL_EXPORT_VAR_AS_FUNCTION: - ssl_external('ASN1_ITEM_ptr', [ASN1_ITEM_EXP], ASN1_ITEM, macro=True) -else: - ssl_external('ASN1_ITEM_ptr', [rffi.VOIDP], ASN1_ITEM, macro=True) +ssl_external('ASN1_ITEM_ptr', [ASN1_ITEM_EXP], ASN1_ITEM, macro=True) ssl_external('sk_GENERAL_NAME_num', [GENERAL_NAMES], rffi.INT, macro=True) From noreply at buildbot.pypy.org Tue Jan 21 14:22:21 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 21 Jan 2014 14:22:21 +0100 (CET) Subject: [pypy-commit] pypy default: Port another minor fix from the llvm branch. Message-ID: <20140121132221.E89FE1C00E3@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: Changeset: r68805:f6615f79d1cc Date: 2014-01-21 14:18 +0100 http://bitbucket.org/pypy/pypy/changeset/f6615f79d1cc/ Log: Port another minor fix from the llvm branch. diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -454,17 +454,19 @@ unicode_ofs_length = self.unicode_descr.lendescr.offset def malloc_str(length): + type_id = llop.extract_ushort(llgroup.HALFWORD, str_type_id) return llop1.do_malloc_varsize_clear( llmemory.GCREF, - str_type_id, length, str_basesize, str_itemsize, + type_id, length, str_basesize, str_itemsize, str_ofs_length) self.generate_function('malloc_str', malloc_str, [lltype.Signed]) def malloc_unicode(length): + type_id = llop.extract_ushort(llgroup.HALFWORD, unicode_type_id) return llop1.do_malloc_varsize_clear( llmemory.GCREF, - unicode_type_id, length, unicode_basesize, unicode_itemsize, + type_id, length, unicode_basesize, unicode_itemsize, unicode_ofs_length) self.generate_function('malloc_unicode', malloc_unicode, [lltype.Signed]) From noreply at buildbot.pypy.org Tue Jan 21 14:38:47 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 21 Jan 2014 14:38:47 +0100 (CET) Subject: [pypy-commit] pypy default: Simplify code. Message-ID: <20140121133847.9C3061C010E@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: Changeset: r68806:709d090d1c17 Date: 2014-01-21 14:31 +0100 http://bitbucket.org/pypy/pypy/changeset/709d090d1c17/ Log: Simplify code. diff --git a/rpython/translator/driver.py b/rpython/translator/driver.py --- a/rpython/translator/driver.py +++ b/rpython/translator/driver.py @@ -33,13 +33,6 @@ # TODO: # sanity-checks using states -_BACKEND_TO_TYPESYSTEM = { - 'c': 'lltype', -} - -def backend_to_typesystem(backend): - return _BACKEND_TO_TYPESYSTEM[backend] - # set of translation steps to profile PROFILE = set([]) @@ -132,7 +125,7 @@ if backend == postfix: expose_task(task, explicit_task) elif ts: - if ts == backend_to_typesystem(postfix): + if ts == 'lltype': expose_task(explicit_task) else: expose_task(explicit_task) From noreply at buildbot.pypy.org Tue Jan 21 14:38:48 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 21 Jan 2014 14:38:48 +0100 (CET) Subject: [pypy-commit] pypy default: Remove dead code. Message-ID: <20140121133848.B8F4B1C010E@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: Changeset: r68807:89c0969caec9 Date: 2014-01-21 14:35 +0100 http://bitbucket.org/pypy/pypy/changeset/89c0969caec9/ Log: Remove dead code. diff --git a/rpython/translator/driver.py b/rpython/translator/driver.py --- a/rpython/translator/driver.py +++ b/rpython/translator/driver.py @@ -33,9 +33,6 @@ # TODO: # sanity-checks using states -# set of translation steps to profile -PROFILE = set([]) - class Instrument(Exception): pass @@ -248,15 +245,6 @@ def info(self, msg): log.info(msg) - def _profile(self, goal, func): - from cProfile import Profile - from rpython.tool.lsprofcalltree import KCacheGrind - d = {'func':func} - prof = Profile() - prof.runctx("res = func()", globals(), d) - KCacheGrind(prof).output(open(goal + ".out", "w")) - return d['res'] - def _do(self, goal, func, *args, **kwds): title = func.task_title if goal in self.done: @@ -270,10 +258,7 @@ try: instrument = False try: - if goal in PROFILE: - res = self._profile(goal, func) - else: - res = func() + res = func() except Instrument: instrument = True if not func.task_idempotent: From noreply at buildbot.pypy.org Tue Jan 21 14:44:50 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 21 Jan 2014 14:44:50 +0100 (CET) Subject: [pypy-commit] pypy llvm-translation-backend: I don't think this is still necessary. Message-ID: <20140121134450.A953D1C039A@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r68808:803c70128eeb Date: 2014-01-21 14:42 +0100 http://bitbucket.org/pypy/pypy/changeset/803c70128eeb/ Log: I don't think this is still necessary. diff --git a/rpython/translator/c/src/obmalloc.c b/rpython/translator/c/src/obmalloc.c --- a/rpython/translator/c/src/obmalloc.c +++ b/rpython/translator/c/src/obmalloc.c @@ -222,8 +222,7 @@ #define uchar unsigned char /* assuming == 8 bits */ #undef uint -typedef unsigned int uint; -//#define uint unsigned int /* assuming >= 16 bits */ +#define uint unsigned int /* assuming >= 16 bits */ #undef ulong #define ulong Unsigned /* assuming >= 32 bits */ From noreply at buildbot.pypy.org Tue Jan 21 14:44:52 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 21 Jan 2014 14:44:52 +0100 (CET) Subject: [pypy-commit] pypy default: Back out changeset 89c0969caec9. Message-ID: <20140121134452.0684A1C039A@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: Changeset: r68809:3f632ce16792 Date: 2014-01-21 14:44 +0100 http://bitbucket.org/pypy/pypy/changeset/3f632ce16792/ Log: Back out changeset 89c0969caec9. diff --git a/rpython/translator/driver.py b/rpython/translator/driver.py --- a/rpython/translator/driver.py +++ b/rpython/translator/driver.py @@ -33,6 +33,9 @@ # TODO: # sanity-checks using states +# set of translation steps to profile +PROFILE = set([]) + class Instrument(Exception): pass @@ -245,6 +248,15 @@ def info(self, msg): log.info(msg) + def _profile(self, goal, func): + from cProfile import Profile + from rpython.tool.lsprofcalltree import KCacheGrind + d = {'func':func} + prof = Profile() + prof.runctx("res = func()", globals(), d) + KCacheGrind(prof).output(open(goal + ".out", "w")) + return d['res'] + def _do(self, goal, func, *args, **kwds): title = func.task_title if goal in self.done: @@ -258,7 +270,10 @@ try: instrument = False try: - res = func() + if goal in PROFILE: + res = self._profile(goal, func) + else: + res = func() except Instrument: instrument = True if not func.task_idempotent: From noreply at buildbot.pypy.org Tue Jan 21 15:07:54 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Tue, 21 Jan 2014 15:07:54 +0100 (CET) Subject: [pypy-commit] stmgc c7: change test, still failing Message-ID: <20140121140755.0A5141C3973@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r659:754f18d24407 Date: 2014-01-21 15:09 +0100 http://bitbucket.org/pypy/stmgc/changeset/754f18d24407/ Log: change test, still failing diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -398,12 +398,27 @@ stm_minor_collect() new = stm_pop_root() assert stm_get_page_flag(stm_get_obj_pages(new)[0]) == lib.UNCOMMITTED_SHARED_PAGE - assert stm_get_flags(new) & lib.GCFLAG_NOT_COMMITTED + assert not (stm_get_flags(new) & lib.GCFLAG_NOT_COMMITTED) stm_stop_transaction() assert stm_get_page_flag(stm_get_obj_pages(new)[0]) == lib.SHARED_PAGE assert not (stm_get_flags(new) & lib.GCFLAG_NOT_COMMITTED) + stm_start_transaction() + newer = stm_allocate(16) + stm_push_root(newer) + stm_minor_collect() + newer = stm_pop_root() + # 'new' is still in shared_page and committed + assert stm_get_page_flag(stm_get_obj_pages(new)[0]) == lib.SHARED_PAGE + assert not (stm_get_flags(new) & lib.GCFLAG_NOT_COMMITTED) + # 'newer' is now part of the SHARED page with 'new', but + # marked as UNCOMMITTED, so no privatization has to take place: + assert stm_get_obj_pages(new) == stm_get_obj_pages(newer) + assert stm_get_flags(newer) & lib.GCFLAG_NOT_COMMITTED + stm_write(newer) # does not privatize + assert stm_get_page_flag(stm_get_obj_pages(newer)[0]) == lib.SHARED_PAGE + # def test_resolve_write_write_no_conflict(self): # stm_start_transaction() From noreply at buildbot.pypy.org Tue Jan 21 15:33:26 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 21 Jan 2014 15:33:26 +0100 (CET) Subject: [pypy-commit] pypy llvm-translation-backend: Remove trailing whitespace. Message-ID: <20140121143326.535CC1C33B0@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r68810:df2a63368e88 Date: 2014-01-21 14:49 +0100 http://bitbucket.org/pypy/pypy/changeset/df2a63368e88/ Log: Remove trailing whitespace. diff --git a/rpython/translator/llvm/common_header.h b/rpython/translator/llvm/common_header.h --- a/rpython/translator/llvm/common_header.h +++ b/rpython/translator/llvm/common_header.h @@ -1,7 +1,7 @@ #ifdef _WIN64 typedef __int64 Signed; typedef unsigned __int64 Unsigned; -# define SIGNED_MIN LLONG_MIN +# define SIGNED_MIN LLONG_MIN #else typedef long Signed; typedef unsigned long Unsigned; From noreply at buildbot.pypy.org Tue Jan 21 15:33:27 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 21 Jan 2014 15:33:27 +0100 (CET) Subject: [pypy-commit] pypy llvm-translation-backend: Remove more trailing whitespace. Message-ID: <20140121143327.A94DF1C33B0@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r68811:51c2f9a02d22 Date: 2014-01-21 14:52 +0100 http://bitbucket.org/pypy/pypy/changeset/51c2f9a02d22/ Log: Remove more trailing whitespace. diff --git a/rpython/translator/test/test_interactive.py b/rpython/translator/test/test_interactive.py --- a/rpython/translator/test/test_interactive.py +++ b/rpython/translator/test/test_interactive.py @@ -61,11 +61,11 @@ t = Translation(f, [int, int], backend='llvm') t.source(gc='boehm') assert 'source_llvm' in t.driver.done - + t = Translation(f, [int, int]) t.source_llvm() assert 'source_llvm' in t.driver.done - + def test_disable_logic(): def f(x,y): From noreply at buildbot.pypy.org Tue Jan 21 15:33:29 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 21 Jan 2014 15:33:29 +0100 (CET) Subject: [pypy-commit] pypy llvm-translation-backend: hg merge default Message-ID: <20140121143329.AC9C81C33B0@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r68812:67ac13633b48 Date: 2014-01-21 14:54 +0100 http://bitbucket.org/pypy/pypy/changeset/67ac13633b48/ Log: hg merge default diff too long, truncating to 2000 out of 2137 lines diff --git a/lib_pypy/_ctypes/array.py b/lib_pypy/_ctypes/array.py --- a/lib_pypy/_ctypes/array.py +++ b/lib_pypy/_ctypes/array.py @@ -1,4 +1,4 @@ -import _ffi +from _rawffi import alt as _ffi import _rawffi from _ctypes.basics import _CData, cdata_from_address, _CDataMeta, sizeof diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py --- a/lib_pypy/_ctypes/basics.py +++ b/lib_pypy/_ctypes/basics.py @@ -1,6 +1,6 @@ import _rawffi -import _ffi +from _rawffi import alt as _ffi import sys try: from __pypy__ import builtinify diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -5,7 +5,7 @@ from _ctypes.basics import is_struct_shape from _ctypes.builtin import get_errno, set_errno, get_last_error, set_last_error import _rawffi -import _ffi +from _rawffi import alt as _ffi import sys import traceback diff --git a/lib_pypy/_ctypes/pointer.py b/lib_pypy/_ctypes/pointer.py --- a/lib_pypy/_ctypes/pointer.py +++ b/lib_pypy/_ctypes/pointer.py @@ -1,6 +1,6 @@ import _rawffi -import _ffi +from _rawffi import alt as _ffi from _ctypes.basics import _CData, _CDataMeta, cdata_from_address, ArgumentError from _ctypes.basics import keepalive_key, store_reference, ensure_objects from _ctypes.basics import sizeof, byref, as_ffi_pointer diff --git a/lib_pypy/_ctypes/primitive.py b/lib_pypy/_ctypes/primitive.py --- a/lib_pypy/_ctypes/primitive.py +++ b/lib_pypy/_ctypes/primitive.py @@ -1,4 +1,4 @@ -import _ffi +from _rawffi import alt as _ffi import _rawffi import weakref import sys diff --git a/lib_pypy/_ffi.py b/lib_pypy/_ffi.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_ffi.py @@ -0,0 +1,2 @@ +# Backward compatibility hack +from _rawffi.alt import * diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -34,14 +34,14 @@ "struct", "_hashlib", "_md5", "_sha", "_minimal_curses", "cStringIO", "thread", "itertools", "pyexpat", "_ssl", "cpyext", "array", "binascii", "_multiprocessing", '_warnings', - "_collections", "_multibytecodec", "micronumpy", "_ffi", + "_collections", "_multibytecodec", "micronumpy", "_continuation", "_cffi_backend", "_csv", "cppyy", "_pypyjson"] )) translation_modules = default_modules.copy() translation_modules.update(dict.fromkeys( ["fcntl", "rctime", "select", "signal", "_rawffi", "zlib", - "struct", "_md5", "cStringIO", "array", "_ffi", + "struct", "_md5", "cStringIO", "array", "binascii", # the following are needed for pyrepl (and hence for the # interactive prompt/pdb) @@ -96,7 +96,6 @@ # no _rawffi if importing rpython.rlib.clibffi raises ImportError # or CompilationError or py.test.skip.Exception "_rawffi" : ["rpython.rlib.clibffi"], - "_ffi" : ["rpython.rlib.clibffi"], "zlib" : ["rpython.rlib.rzlib"], "bz2" : ["pypy.module.bz2.interp_bz2"], diff --git a/pypy/doc/_ref.txt b/pypy/doc/_ref.txt --- a/pypy/doc/_ref.txt +++ b/pypy/doc/_ref.txt @@ -109,6 +109,4 @@ .. _`rpython/translator/c/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/c/ .. _`rpython/translator/c/src/stacklet/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/c/src/stacklet/ .. _`rpython/translator/c/src/stacklet/stacklet.h`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/c/src/stacklet/stacklet.h -.. _`rpython/translator/cli/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/cli/ -.. _`rpython/translator/jvm/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/jvm/ .. _`rpython/translator/tool/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/tool/ diff --git a/pypy/doc/config/translation.lldebug0.txt b/pypy/doc/config/translation.lldebug0.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/config/translation.lldebug0.txt @@ -0,0 +1,1 @@ +Like lldebug, but in addition compile C files with -O0 diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -44,3 +44,7 @@ .. branch: refactor-str-types Remove multimethods on str/unicode/bytearray and make the implementations share code. + +.. branch: remove-del-from-generatoriterator +Speed up generators that don't yield inside try or wait blocks by skipping +unnecessary cleanup. diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -167,7 +167,7 @@ def run(self): """Start this frame's execution.""" if self.getcode().co_flags & pycode.CO_GENERATOR: - if self.getcode().co_flags & pycode.CO_YIELD_INSIDE_TRY: + if 1:# self.getcode().co_flags & pycode.CO_YIELD_INSIDE_TRY: from pypy.interpreter.generator import GeneratorIteratorWithDel return self.space.wrap(GeneratorIteratorWithDel(self)) else: diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py --- a/pypy/module/_cffi_backend/ctypestruct.py +++ b/pypy/module/_cffi_backend/ctypestruct.py @@ -33,7 +33,7 @@ if self.fields_dict is None: space = self.space raise operationerrfmt(w_errorcls or space.w_TypeError, - "'%s' is not completed yet", self.name) + "'%s' is opaque or not completed yet", self.name) def _alignof(self): self.check_complete(w_errorcls=self.space.w_ValueError) diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py --- a/pypy/module/_cffi_backend/newtype.py +++ b/pypy/module/_cffi_backend/newtype.py @@ -118,6 +118,7 @@ SF_MSVC_BITFIELDS = 1 SF_GCC_ARM_BITFIELDS = 2 SF_GCC_BIG_ENDIAN = 4 +SF_PACKED = 8 if sys.platform == 'win32': DEFAULT_SFLAGS = SF_MSVC_BITFIELDS @@ -190,8 +191,8 @@ boffset = 0 # reset each field at offset 0 # # update the total alignment requirement, but skip it if the - # field is an anonymous bitfield - falign = ftype.alignof() + # field is an anonymous bitfield or if SF_PACKED + falign = 1 if sflags & SF_PACKED else ftype.alignof() do_align = True if (sflags & SF_GCC_ARM_BITFIELDS) == 0 and fbitsize >= 0: if (sflags & SF_MSVC_BITFIELDS) == 0: @@ -305,6 +306,12 @@ if bits_already_occupied + fbitsize > 8 * ftype.size: # it would not fit, we need to start at the next # allowed position + if ((sflags & SF_PACKED) != 0 and + (bits_already_occupied & 7) != 0): + raise operationerrfmt(space.w_NotImplementedError, + "with 'packed', gcc would compile field " + "'%s.%s' to reuse some bits in the previous " + "field", w_ctype.name, fname) field_offset_bytes += falign assert boffset < field_offset_bytes * 8 boffset = field_offset_bytes * 8 diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -3137,6 +3137,44 @@ p = newp(BArray, None) assert sizeof(p[2:9]) == 7 * sizeof(BInt) +def test_packed(): + BLong = new_primitive_type("long") + BChar = new_primitive_type("char") + BShort = new_primitive_type("short") + BStruct = new_struct_type("struct foo") + complete_struct_or_union(BStruct, [('a1', BLong, -1), + ('a2', BChar, -1), + ('a3', BShort, -1)], + None, -1, -1, 8) # SF_PACKED==8 + d = BStruct.fields + assert len(d) == 3 + assert d[0][0] == 'a1' + assert d[0][1].type is BLong + assert d[0][1].offset == 0 + assert d[0][1].bitshift == -1 + assert d[0][1].bitsize == -1 + assert d[1][0] == 'a2' + assert d[1][1].type is BChar + assert d[1][1].offset == sizeof(BLong) + assert d[1][1].bitshift == -1 + assert d[1][1].bitsize == -1 + assert d[2][0] == 'a3' + assert d[2][1].type is BShort + assert d[2][1].offset == sizeof(BLong) + sizeof(BChar) + assert d[2][1].bitshift == -1 + assert d[2][1].bitsize == -1 + assert sizeof(BStruct) == sizeof(BLong) + sizeof(BChar) + sizeof(BShort) + assert alignof(BStruct) == 1 + +def test_packed_with_bitfields(): + BLong = new_primitive_type("long") + BChar = new_primitive_type("char") + BStruct = new_struct_type("struct foo") + py.test.raises(NotImplementedError, + complete_struct_or_union, + BStruct, [('a1', BLong, 30), + ('a2', BChar, 5)], + None, -1, -1, 8) # SF_PACKED==8 def test_version(): # this test is here mostly for PyPy diff --git a/pypy/module/_multiprocessing/test/test_memory.py b/pypy/module/_multiprocessing/test/test_memory.py --- a/pypy/module/_multiprocessing/test/test_memory.py +++ b/pypy/module/_multiprocessing/test/test_memory.py @@ -1,6 +1,6 @@ class AppTestMemory: spaceconfig = dict(usemodules=('_multiprocessing', 'mmap', - '_rawffi', '_ffi', 'itertools')) + '_rawffi', 'itertools')) def test_address_of(self): import _multiprocessing diff --git a/pypy/module/_rawffi/__init__.py b/pypy/module/_rawffi/__init__.py --- a/pypy/module/_rawffi/__init__.py +++ b/pypy/module/_rawffi/__init__.py @@ -2,6 +2,7 @@ """ from pypy.interpreter.mixedmodule import MixedModule +from pypy.module._rawffi import alt class Module(MixedModule): interpleveldefs = { @@ -33,6 +34,10 @@ appleveldefs = { } + submodules = { + 'alt': alt.Module, + } + def buildloaders(cls): from pypy.module._rawffi import interp_rawffi diff --git a/pypy/module/_ffi/__init__.py b/pypy/module/_rawffi/alt/__init__.py rename from pypy/module/_ffi/__init__.py rename to pypy/module/_rawffi/alt/__init__.py diff --git a/pypy/module/_ffi/app_struct.py b/pypy/module/_rawffi/alt/app_struct.py rename from pypy/module/_ffi/app_struct.py rename to pypy/module/_rawffi/alt/app_struct.py --- a/pypy/module/_ffi/app_struct.py +++ b/pypy/module/_rawffi/alt/app_struct.py @@ -1,4 +1,4 @@ -import _ffi +from _rawffi import alt class MetaStructure(type): @@ -11,7 +11,7 @@ fields = dic.get('_fields_') if fields is None: return - struct_descr = _ffi._StructDescr(name, fields) + struct_descr = alt._StructDescr(name, fields) for field in fields: dic[field.name] = field dic['_struct_'] = struct_descr diff --git a/pypy/module/_ffi/interp_ffitype.py b/pypy/module/_rawffi/alt/interp_ffitype.py rename from pypy/module/_ffi/interp_ffitype.py rename to pypy/module/_rawffi/alt/interp_ffitype.py --- a/pypy/module/_ffi/interp_ffitype.py +++ b/pypy/module/_rawffi/alt/interp_ffitype.py @@ -116,7 +116,7 @@ types = [ # note: most of the type name directly come from the C equivalent, # with the exception of bytes: in C, ubyte and char are equivalent, - # but for _ffi the first expects a number while the second a 1-length + # but for here the first expects a number while the second a 1-length # string W_FFIType('slong', libffi.types.slong), W_FFIType('sint', libffi.types.sint), diff --git a/pypy/module/_ffi/interp_funcptr.py b/pypy/module/_rawffi/alt/interp_funcptr.py rename from pypy/module/_ffi/interp_funcptr.py rename to pypy/module/_rawffi/alt/interp_funcptr.py --- a/pypy/module/_ffi/interp_funcptr.py +++ b/pypy/module/_rawffi/alt/interp_funcptr.py @@ -3,7 +3,7 @@ operationerrfmt from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef -from pypy.module._ffi.interp_ffitype import W_FFIType +from pypy.module._rawffi.alt.interp_ffitype import W_FFIType # from rpython.rtyper.lltypesystem import lltype, rffi # @@ -13,7 +13,7 @@ from rpython.rlib.rdynload import DLOpenError from rpython.rlib.rarithmetic import r_uint from rpython.rlib.objectmodel import we_are_translated -from pypy.module._ffi.type_converter import FromAppLevelConverter, ToAppLevelConverter +from pypy.module._rawffi.alt.type_converter import FromAppLevelConverter, ToAppLevelConverter from pypy.module._rawffi.interp_rawffi import got_libffi_error, wrap_dlopenerror import os @@ -302,7 +302,7 @@ W_FuncPtr.typedef = TypeDef( - '_ffi.FuncPtr', + '_rawffi.alt.FuncPtr', __call__ = interp2app(W_FuncPtr.call), getaddr = interp2app(W_FuncPtr.getaddr), free_temp_buffers = interp2app(W_FuncPtr.free_temp_buffers), @@ -346,7 +346,7 @@ W_CDLL.typedef = TypeDef( - '_ffi.CDLL', + '_rawffi.alt.CDLL', __new__ = interp2app(descr_new_cdll), getfunc = interp2app(W_CDLL.getfunc), getaddressindll = interp2app(W_CDLL.getaddressindll), @@ -363,7 +363,7 @@ W_WinDLL.typedef = TypeDef( - '_ffi.WinDLL', + '_rawffi.alt.WinDLL', __new__ = interp2app(descr_new_windll), getfunc = interp2app(W_WinDLL.getfunc), getaddressindll = interp2app(W_WinDLL.getaddressindll), diff --git a/pypy/module/_ffi/interp_struct.py b/pypy/module/_rawffi/alt/interp_struct.py rename from pypy/module/_ffi/interp_struct.py rename to pypy/module/_rawffi/alt/interp_struct.py --- a/pypy/module/_ffi/interp_struct.py +++ b/pypy/module/_rawffi/alt/interp_struct.py @@ -8,8 +8,8 @@ from pypy.interpreter.typedef import TypeDef, interp_attrproperty from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.error import operationerrfmt -from pypy.module._ffi.interp_ffitype import W_FFIType -from pypy.module._ffi.type_converter import FromAppLevelConverter, ToAppLevelConverter +from pypy.module._rawffi.alt.interp_ffitype import W_FFIType +from pypy.module._rawffi.alt.type_converter import FromAppLevelConverter, ToAppLevelConverter class W_Field(W_Root): diff --git a/pypy/module/_ffi/test/__init__.py b/pypy/module/_rawffi/alt/test/__init__.py rename from pypy/module/_ffi/test/__init__.py rename to pypy/module/_rawffi/alt/test/__init__.py diff --git a/pypy/module/_ffi/test/test_ffitype.py b/pypy/module/_rawffi/alt/test/test_ffitype.py rename from pypy/module/_ffi/test/test_ffitype.py rename to pypy/module/_rawffi/alt/test/test_ffitype.py --- a/pypy/module/_ffi/test/test_ffitype.py +++ b/pypy/module/_rawffi/alt/test/test_ffitype.py @@ -1,21 +1,21 @@ -from pypy.module._ffi.test.test_funcptr import BaseAppTestFFI +from pypy.module._rawffi.alt.test.test_funcptr import BaseAppTestFFI class AppTestFFIType(BaseAppTestFFI): def test_simple_types(self): - from _ffi import types + from _rawffi.alt import types assert str(types.sint) == "" assert str(types.uint) == "" assert types.sint.name == 'sint' assert types.uint.name == 'uint' def test_sizeof(self): - from _ffi import types + from _rawffi.alt import types assert types.sbyte.sizeof() == 1 assert types.sint.sizeof() == 4 def test_typed_pointer(self): - from _ffi import types + from _rawffi.alt import types intptr = types.Pointer(types.sint) # create a typed pointer to sint assert intptr.deref_pointer() is types.sint assert str(intptr) == '' @@ -23,7 +23,7 @@ raises(TypeError, "types.Pointer(42)") def test_pointer_identity(self): - from _ffi import types + from _rawffi.alt import types x = types.Pointer(types.slong) y = types.Pointer(types.slong) z = types.Pointer(types.char) @@ -31,7 +31,7 @@ assert x is not z def test_char_p_cached(self): - from _ffi import types + from _rawffi.alt import types x = types.Pointer(types.char) assert x is types.char_p x = types.Pointer(types.unichar) diff --git a/pypy/module/_ffi/test/test_funcptr.py b/pypy/module/_rawffi/alt/test/test_funcptr.py rename from pypy/module/_ffi/test/test_funcptr.py rename to pypy/module/_rawffi/alt/test/test_funcptr.py --- a/pypy/module/_ffi/test/test_funcptr.py +++ b/pypy/module/_rawffi/alt/test/test_funcptr.py @@ -7,7 +7,7 @@ import sys, py class BaseAppTestFFI(object): - spaceconfig = dict(usemodules=('_ffi', '_rawffi')) + spaceconfig = dict(usemodules=('_rawffi',)) @classmethod def prepare_c_example(cls): @@ -62,17 +62,17 @@ cls.w_f_12_34_plus_56_78 = space.wrap(f_result) def test_libload(self): - import _ffi - _ffi.CDLL(self.libc_name) + import _rawffi.alt + _rawffi.alt.CDLL(self.libc_name) def test_libload_fail(self): - import _ffi - raises(OSError, _ffi.CDLL, "xxxxx_this_name_does_not_exist_xxxxx") + import _rawffi.alt + raises(OSError, _rawffi.alt.CDLL, "xxxxx_this_name_does_not_exist_xxxxx") def test_libload_None(self): if self.iswin32: skip("unix specific") - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types # this should return *all* loaded libs, dlopen(NULL) dll = CDLL(None) # libm should be loaded @@ -80,20 +80,20 @@ assert res == 1.0 def test_callfunc(self): - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libm = CDLL(self.libm_name) pow = libm.getfunc('pow', [types.double, types.double], types.double) assert pow(2, 3) == 8 def test_getaddr(self): - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libm = CDLL(self.libm_name) pow = libm.getfunc('pow', [types.double, types.double], types.double) assert pow.getaddr() == self.pow_addr def test_getaddressindll(self): import sys - from _ffi import CDLL + from _rawffi.alt import CDLL libm = CDLL(self.libm_name) pow_addr = libm.getaddressindll('pow') fff = sys.maxint*2-1 @@ -102,7 +102,7 @@ assert pow_addr == self.pow_addr & fff def test_func_fromaddr(self): - from _ffi import CDLL, types, FuncPtr + from _rawffi.alt import CDLL, types, FuncPtr libm = CDLL(self.libm_name) pow_addr = libm.getaddressindll('pow') pow = FuncPtr.fromaddr(pow_addr, 'pow', [types.double, types.double], @@ -117,7 +117,7 @@ } """ import sys - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) sum_xy = libfoo.getfunc('sum_xy', [types.sint, types.sint], types.sint) assert sum_xy(30, 12) == 42 @@ -129,7 +129,7 @@ DLLEXPORT void set_dummy(int val) { dummy = val; } DLLEXPORT int get_dummy() { return dummy; } """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) set_dummy = libfoo.getfunc('set_dummy', [types.sint], types.void) get_dummy = libfoo.getfunc('get_dummy', [], types.sint) @@ -144,7 +144,7 @@ DLLEXPORT int* get_dummy_ptr() { return &dummy; } DLLEXPORT void set_val_to_ptr(int* ptr, int val) { *ptr = val; } """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) get_dummy = libfoo.getfunc('get_dummy', [], types.sint) get_dummy_ptr = libfoo.getfunc('get_dummy_ptr', [], types.void_p) @@ -163,7 +163,7 @@ DLLEXPORT int* get_dummy_ptr(); // defined in test_pointer_args DLLEXPORT void set_val_to_ptr(int* ptr, int val); // ditto """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types class MyPointerWrapper(object): def __init__(self, value): @@ -197,7 +197,7 @@ return len; } """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types import _rawffi libfoo = CDLL(self.libfoo_name) mystrlen = libfoo.getfunc('mystrlen', [types.char_p], types.slong) @@ -223,7 +223,7 @@ return len; } """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types import _rawffi libfoo = CDLL(self.libfoo_name) mystrlen = libfoo.getfunc('mystrlen_u', [types.unichar_p], types.slong) @@ -247,7 +247,7 @@ return s; } """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types import _rawffi libfoo = CDLL(self.libfoo_name) do_nothing = libfoo.getfunc('do_nothing', [types.char_p], types.char_p) @@ -264,7 +264,7 @@ DLLEXPORT int* get_dummy_ptr(); // defined in test_pointer_args DLLEXPORT void set_val_to_ptr(int* ptr, int val); // ditto """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) intptr = types.Pointer(types.sint) @@ -283,7 +283,7 @@ DLLEXPORT long is_null_ptr(void* ptr) { return ptr == NULL; } """ import sys - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) is_null_ptr = libfoo.getfunc('is_null_ptr', [types.void_p], types.ulong) assert not is_null_ptr(sys.maxint+1) @@ -296,7 +296,7 @@ } """ import sys - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) sum_xy = libfoo.getfunc('sum_xy_ul', [types.ulong, types.ulong], types.ulong) @@ -313,7 +313,7 @@ return x+y; } """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) sum_xy = libfoo.getfunc('sum_xy_us', [types.ushort, types.ushort], types.ushort) @@ -327,7 +327,7 @@ return x+y; } """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) sum_xy = libfoo.getfunc('sum_xy_us', [types.ubyte, types.ubyte], types.ubyte) @@ -342,7 +342,7 @@ } """ import sys - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types maxint32 = 2147483647 libfoo = CDLL(self.libfoo_name) sum_xy = libfoo.getfunc('sum_xy_ui', [types.uint, types.uint], @@ -357,7 +357,7 @@ return x+y; } """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) sum_xy = libfoo.getfunc('sum_xy_sb', [types.sbyte, types.sbyte], types.sbyte) @@ -371,7 +371,7 @@ return x - ('a'-'A'); } """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) my_toupper = libfoo.getfunc('my_toupper', [types.char], types.char) @@ -385,7 +385,7 @@ return x + y; } """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) sum_xy = libfoo.getfunc('sum_xy_wc', [types.unichar, types.unichar], types.unichar) @@ -400,7 +400,7 @@ return x+y; } """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) sum_xy = libfoo.getfunc('sum_xy_float', [types.float, types.float], types.float) @@ -415,7 +415,7 @@ return x+y; } """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types maxint32 = 2147483647 # we cannot really go above maxint on 64 bits # (and we would not test anything, as there long # is the same as long long) @@ -437,7 +437,7 @@ return x+y; } """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types maxint64 = 9223372036854775807 # maxint64+1 does not fit into a # longlong, but it does into a # ulonglong @@ -464,7 +464,7 @@ return p.x + p.y; } """ - from _ffi import CDLL, types, _StructDescr, Field + from _rawffi.alt import CDLL, types, _StructDescr, Field Point = _StructDescr('Point', [ Field('x', types.slong), Field('y', types.slong), @@ -487,7 +487,7 @@ return p; } """ - from _ffi import CDLL, types, _StructDescr, Field + from _rawffi.alt import CDLL, types, _StructDescr, Field Point = _StructDescr('Point', [ Field('x', types.slong), Field('y', types.slong), @@ -500,9 +500,9 @@ assert p.getfield('x') == 12 assert p.getfield('y') == 34 - # XXX: support for _rawffi structures should be killed as soon as we - # implement ctypes.Structure on top of _ffi. In the meantime, we support - # both + # XXX: long ago the plan was to kill _rawffi structures in favor of + # _rawffi.alt structures. The plan never went anywhere, so we're + # stuck with both. def test_byval_argument__rawffi(self): """ // defined above @@ -510,7 +510,7 @@ DLLEXPORT long sum_point(struct Point p); """ import _rawffi - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types POINT = _rawffi.Structure([('x', 'l'), ('y', 'l')]) ffi_point = POINT.get_ffi_type() libfoo = CDLL(self.libfoo_name) @@ -529,7 +529,7 @@ DLLEXPORT struct Point make_point(long x, long y); """ import _rawffi - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types POINT = _rawffi.Structure([('x', 'l'), ('y', 'l')]) ffi_point = POINT.get_ffi_type() libfoo = CDLL(self.libfoo_name) @@ -542,23 +542,23 @@ def test_TypeError_numargs(self): - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) sum_xy = libfoo.getfunc('sum_xy', [types.sint, types.sint], types.sint) raises(TypeError, "sum_xy(1, 2, 3)") raises(TypeError, "sum_xy(1)") def test_TypeError_voidarg(self): - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) raises(TypeError, "libfoo.getfunc('sum_xy', [types.void], types.sint)") def test_OSError_loading(self): - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types raises(OSError, "CDLL('I do not exist')") def test_AttributeError_missing_function(self): - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) raises(AttributeError, "libfoo.getfunc('I_do_not_exist', [], types.void)") if self.iswin32: @@ -569,7 +569,7 @@ def test_calling_convention1(self): if not self.iswin32: skip("windows specific") - from _ffi import WinDLL, types + from _rawffi.alt import WinDLL, types libm = WinDLL(self.libm_name) pow = libm.getfunc('pow', [types.double, types.double], types.double) try: @@ -582,7 +582,7 @@ def test_calling_convention2(self): if not self.iswin32: skip("windows specific") - from _ffi import WinDLL, types + from _rawffi.alt import WinDLL, types kernel = WinDLL('Kernel32.dll') sleep = kernel.getfunc('Sleep', [types.uint], types.void) sleep(10) @@ -590,7 +590,7 @@ def test_calling_convention3(self): if not self.iswin32: skip("windows specific") - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types wrong_kernel = CDLL('Kernel32.dll') wrong_sleep = wrong_kernel.getfunc('Sleep', [types.uint], types.void) try: @@ -603,7 +603,7 @@ def test_func_fromaddr2(self): if not self.iswin32: skip("windows specific") - from _ffi import CDLL, types, FuncPtr + from _rawffi.alt import CDLL, types, FuncPtr from _rawffi import FUNCFLAG_STDCALL libm = CDLL(self.libm_name) pow_addr = libm.getaddressindll('pow') @@ -619,7 +619,7 @@ def test_func_fromaddr3(self): if not self.iswin32: skip("windows specific") - from _ffi import WinDLL, types, FuncPtr + from _rawffi.alt import WinDLL, types, FuncPtr from _rawffi import FUNCFLAG_STDCALL kernel = WinDLL('Kernel32.dll') sleep_addr = kernel.getaddressindll('Sleep') @@ -636,7 +636,7 @@ """ if not self.iswin32: skip("windows specific") - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) f_name = libfoo.getfunc('AAA_first_ordinal_function', [], types.sint) f_ordinal = libfoo.getfunc(1, [], types.sint) diff --git a/pypy/module/_ffi/test/test_struct.py b/pypy/module/_rawffi/alt/test/test_struct.py rename from pypy/module/_ffi/test/test_struct.py rename to pypy/module/_rawffi/alt/test/test_struct.py --- a/pypy/module/_ffi/test/test_struct.py +++ b/pypy/module/_rawffi/alt/test/test_struct.py @@ -1,7 +1,7 @@ from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.module._ffi.interp_ffitype import app_types, W_FFIType -from pypy.module._ffi.interp_struct import compute_size_and_alignement, W_Field -from pypy.module._ffi.test.test_funcptr import BaseAppTestFFI +from pypy.module._rawffi.alt.interp_ffitype import app_types, W_FFIType +from pypy.module._rawffi.alt.interp_struct import compute_size_and_alignement, W_Field +from pypy.module._rawffi.alt.test.test_funcptr import BaseAppTestFFI class TestStruct(object): @@ -69,7 +69,7 @@ cls.w_runappdirect = cls.space.wrap(cls.runappdirect) def test__StructDescr(self): - from _ffi import _StructDescr, Field, types + from _rawffi.alt import _StructDescr, Field, types longsize = types.slong.sizeof() fields = [ Field('x', types.slong), @@ -80,7 +80,7 @@ assert descr.ffitype.name == 'struct foo' def test_alignment(self): - from _ffi import _StructDescr, Field, types + from _rawffi.alt import _StructDescr, Field, types longsize = types.slong.sizeof() fields = [ Field('x', types.sbyte), @@ -92,7 +92,7 @@ assert fields[1].offset == longsize # aligned to WORD def test_missing_field(self): - from _ffi import _StructDescr, Field, types + from _rawffi.alt import _StructDescr, Field, types longsize = types.slong.sizeof() fields = [ Field('x', types.slong), @@ -106,7 +106,7 @@ def test_unknown_type(self): if self.runappdirect: skip('cannot use self.dummy_type with -A') - from _ffi import _StructDescr, Field + from _rawffi.alt import _StructDescr, Field fields = [ Field('x', self.dummy_type), ] @@ -116,7 +116,7 @@ raises(TypeError, "struct.setfield('x', 42)") def test_getfield_setfield(self): - from _ffi import _StructDescr, Field, types + from _rawffi.alt import _StructDescr, Field, types longsize = types.slong.sizeof() fields = [ Field('x', types.slong), @@ -133,7 +133,7 @@ def test_getfield_setfield_signed_types(self): import sys - from _ffi import _StructDescr, Field, types + from _rawffi.alt import _StructDescr, Field, types longsize = types.slong.sizeof() fields = [ Field('sbyte', types.sbyte), @@ -156,7 +156,7 @@ def test_getfield_setfield_unsigned_types(self): import sys - from _ffi import _StructDescr, Field, types + from _rawffi.alt import _StructDescr, Field, types longsize = types.slong.sizeof() fields = [ Field('ubyte', types.ubyte), @@ -188,7 +188,7 @@ def test_getfield_setfield_longlong(self): import sys - from _ffi import _StructDescr, Field, types + from _rawffi.alt import _StructDescr, Field, types longsize = types.slong.sizeof() fields = [ Field('slonglong', types.slonglong), @@ -205,7 +205,7 @@ def test_getfield_setfield_float(self): import sys - from _ffi import _StructDescr, Field, types + from _rawffi.alt import _StructDescr, Field, types longsize = types.slong.sizeof() fields = [ Field('x', types.double), @@ -219,7 +219,7 @@ def test_getfield_setfield_singlefloat(self): import sys - from _ffi import _StructDescr, Field, types + from _rawffi.alt import _StructDescr, Field, types longsize = types.slong.sizeof() fields = [ Field('x', types.float), @@ -237,7 +237,7 @@ assert mem == [123.5] def test_define_fields(self): - from _ffi import _StructDescr, Field, types + from _rawffi.alt import _StructDescr, Field, types longsize = types.slong.sizeof() fields = [ Field('x', types.slong), @@ -255,7 +255,7 @@ raises(ValueError, "descr.define_fields(fields)") def test_pointer_to_incomplete_struct(self): - from _ffi import _StructDescr, Field, types + from _rawffi.alt import _StructDescr, Field, types longsize = types.slong.sizeof() fields = [ Field('x', types.slong), @@ -271,7 +271,7 @@ assert types.Pointer(descr.ffitype) is foo_p def test_nested_structure(self): - from _ffi import _StructDescr, Field, types + from _rawffi.alt import _StructDescr, Field, types longsize = types.slong.sizeof() foo_fields = [ Field('x', types.slong), @@ -310,7 +310,7 @@ def test_compute_shape(self): - from _ffi import Structure, Field, types + from _rawffi.alt import Structure, Field, types class Point(Structure): _fields_ = [ Field('x', types.slong), diff --git a/pypy/module/_ffi/test/test_type_converter.py b/pypy/module/_rawffi/alt/test/test_type_converter.py rename from pypy/module/_ffi/test/test_type_converter.py rename to pypy/module/_rawffi/alt/test/test_type_converter.py --- a/pypy/module/_ffi/test/test_type_converter.py +++ b/pypy/module/_rawffi/alt/test/test_type_converter.py @@ -1,8 +1,8 @@ import sys from rpython.rlib.rarithmetic import r_uint, r_singlefloat, r_longlong, r_ulonglong from rpython.rlib.libffi import IS_32_BIT -from pypy.module._ffi.interp_ffitype import app_types, descr_new_pointer -from pypy.module._ffi.type_converter import FromAppLevelConverter, ToAppLevelConverter +from pypy.module._rawffi.alt.interp_ffitype import app_types, descr_new_pointer +from pypy.module._rawffi.alt.type_converter import FromAppLevelConverter, ToAppLevelConverter class DummyFromAppLevelConverter(FromAppLevelConverter): @@ -29,7 +29,7 @@ class TestFromAppLevel(object): - spaceconfig = dict(usemodules=('_ffi',)) + spaceconfig = dict(usemodules=('_rawffi',)) def setup_class(cls): converter = DummyFromAppLevelConverter(cls.space) @@ -104,12 +104,12 @@ def test__as_ffi_pointer_(self): space = self.space w_MyPointerWrapper = space.appexec([], """(): - import _ffi + from _rawffi.alt import types class MyPointerWrapper(object): def __init__(self, value): self.value = value def _as_ffi_pointer_(self, ffitype): - assert ffitype is _ffi.types.void_p + assert ffitype is types.void_p return self.value return MyPointerWrapper @@ -151,7 +151,7 @@ class TestToAppLevel(object): - spaceconfig = dict(usemodules=('_ffi',)) + spaceconfig = dict(usemodules=('_rawffi',)) def setup_class(cls): converter = DummyToAppLevelConverter(cls.space) diff --git a/pypy/module/_ffi/test/test_ztranslation.py b/pypy/module/_rawffi/alt/test/test_ztranslation.py rename from pypy/module/_ffi/test/test_ztranslation.py rename to pypy/module/_rawffi/alt/test/test_ztranslation.py --- a/pypy/module/_ffi/test/test_ztranslation.py +++ b/pypy/module/_rawffi/alt/test/test_ztranslation.py @@ -1,4 +1,4 @@ -from pypy.objspace.fake.checkmodule import checkmodule - -def test__ffi_translates(): - checkmodule('_ffi', '_rawffi') +from pypy.objspace.fake.checkmodule import checkmodule + +def test__ffi_translates(): + checkmodule('_rawffi') diff --git a/pypy/module/_ffi/type_converter.py b/pypy/module/_rawffi/alt/type_converter.py rename from pypy/module/_ffi/type_converter.py rename to pypy/module/_rawffi/alt/type_converter.py --- a/pypy/module/_ffi/type_converter.py +++ b/pypy/module/_rawffi/alt/type_converter.py @@ -3,7 +3,7 @@ from rpython.rlib.rarithmetic import r_uint from pypy.interpreter.error import operationerrfmt, OperationError from pypy.module._rawffi.structure import W_StructureInstance, W_Structure -from pypy.module._ffi.interp_ffitype import app_types +from pypy.module._rawffi.alt.interp_ffitype import app_types class FromAppLevelConverter(object): """ @@ -17,7 +17,7 @@ self.space = space def unwrap_and_do(self, w_ffitype, w_obj): - from pypy.module._ffi.interp_struct import W__StructInstance + from pypy.module._rawffi.alt.interp_struct import W__StructInstance space = self.space if w_ffitype.is_longlong(): # note that we must check for longlong first, because either @@ -194,7 +194,7 @@ self.space = space def do_and_wrap(self, w_ffitype): - from pypy.module._ffi.interp_struct import W__StructDescr + from pypy.module._rawffi.alt.interp_struct import W__StructDescr space = self.space if w_ffitype.is_longlong(): # note that we must check for longlong first, because either diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -311,10 +311,7 @@ raise NotImplementedError def descr_get_ffi_type(self, space): - # XXX: this assumes that you have the _ffi module enabled. In the long - # term, probably we will move the code for build structures and arrays - # from _rawffi to _ffi - from pypy.module._ffi.interp_ffitype import W_FFIType + from pypy.module._rawffi.alt.interp_ffitype import W_FFIType return W_FFIType('', self.get_basic_ffi_type(), self) @unwrap_spec(n=int) @@ -579,7 +576,7 @@ s = rffi.wcharpsize2unicode(rffi.cast(rffi.CWCHARP, address), maxlength) return space.wrap(s) - at unwrap_spec(address=r_uint, newcontent=str) + at unwrap_spec(address=r_uint, newcontent='bufferstr') def rawstring2charp(space, address, newcontent): from rpython.rtyper.annlowlevel import llstr from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -329,6 +329,8 @@ a = A(10, 'x'*10) _rawffi.rawstring2charp(a.buffer, "foobar") assert ''.join([a[i] for i in range(10)]) == "foobarxxxx" + _rawffi.rawstring2charp(a.buffer, buffer("baz")) + assert ''.join([a[i] for i in range(10)]) == "bazbarxxxx" a.free() def test_raw_callable(self): @@ -1137,24 +1139,32 @@ gc.collect() gc.collect() S = _rawffi.Structure([('x', 'i')]) - oldnum = _rawffi._num_of_allocated_objects() + try: + oldnum = _rawffi._num_of_allocated_objects() + except RuntimeError: + oldnum = '?' s = S(autofree=True) s.x = 3 s = None gc.collect() - assert oldnum == _rawffi._num_of_allocated_objects() + if oldnum != '?': + assert oldnum == _rawffi._num_of_allocated_objects() def test_array_autofree(self): import gc, _rawffi gc.collect() - oldnum = _rawffi._num_of_allocated_objects() + try: + oldnum = _rawffi._num_of_allocated_objects() + except RuntimeError: + oldnum = '?' A = _rawffi.Array('c') a = A(6, 'xxyxx\x00', autofree=True) assert _rawffi.charp2string(a.buffer) == 'xxyxx' a = None gc.collect() - assert oldnum == _rawffi._num_of_allocated_objects() + if oldnum != '?': + assert oldnum == _rawffi._num_of_allocated_objects() def teardown_class(cls): Tracker.DO_TRACING = False diff --git a/pypy/module/cppyy/test/test_crossing.py b/pypy/module/cppyy/test/test_crossing.py --- a/pypy/module/cppyy/test/test_crossing.py +++ b/pypy/module/cppyy/test/test_crossing.py @@ -65,7 +65,7 @@ return str(pydname) class AppTestCrossing(AppTestCpythonExtensionBase): - spaceconfig = dict(usemodules=['cpyext', 'cppyy', 'thread', '_rawffi', '_ffi', + spaceconfig = dict(usemodules=['cpyext', 'cppyy', 'thread', '_rawffi', 'array', 'itertools', 'rctime', 'binascii']) def setup_class(cls): diff --git a/pypy/module/cpyext/test/test_ztranslation.py b/pypy/module/cpyext/test/test_ztranslation.py --- a/pypy/module/cpyext/test/test_ztranslation.py +++ b/pypy/module/cpyext/test/test_ztranslation.py @@ -1,4 +1,4 @@ from pypy.objspace.fake.checkmodule import checkmodule def test_cpyext_translates(): - checkmodule('cpyext', '_ffi') + checkmodule('cpyext') diff --git a/pypy/module/pypyjit/policy.py b/pypy/module/pypyjit/policy.py --- a/pypy/module/pypyjit/policy.py +++ b/pypy/module/pypyjit/policy.py @@ -105,7 +105,7 @@ else: rest = '' if modname in ['pypyjit', 'signal', 'micronumpy', 'math', 'exceptions', - 'imp', 'sys', 'array', '_ffi', 'itertools', 'operator', + 'imp', 'sys', 'array', 'itertools', 'operator', 'posix', '_socket', '_sre', '_lsprof', '_weakref', '__pypy__', 'cStringIO', '_collections', 'struct', 'mmap', 'marshal', '_codecs', 'rctime', 'cppyy', diff --git a/pypy/module/struct/__init__.py b/pypy/module/struct/__init__.py --- a/pypy/module/struct/__init__.py +++ b/pypy/module/struct/__init__.py @@ -49,11 +49,12 @@ 'calcsize': 'interp_struct.calcsize', 'pack': 'interp_struct.pack', 'unpack': 'interp_struct.unpack', - } + + 'Struct': 'interp_struct.W_Struct', + } appleveldefs = { 'error': 'app_struct.error', 'pack_into': 'app_struct.pack_into', 'unpack_from': 'app_struct.unpack_from', - 'Struct': 'app_struct.Struct', - } + } diff --git a/pypy/module/struct/app_struct.py b/pypy/module/struct/app_struct.py --- a/pypy/module/struct/app_struct.py +++ b/pypy/module/struct/app_struct.py @@ -4,6 +4,7 @@ """ import struct + class error(Exception): """Exception raised on various occasions; argument is a string describing what is wrong.""" @@ -21,21 +22,3 @@ raise error("unpack_from requires a buffer of at least %d bytes" % (size,)) return struct.unpack(fmt, data) - -# XXX inefficient -class Struct(object): - def __init__(self, format): - self.format = format - self.size = struct.calcsize(format) - - def pack(self, *args): - return struct.pack(self.format, *args) - - def unpack(self, s): - return struct.unpack(self.format, s) - - def pack_into(self, buffer, offset, *args): - return pack_into(self.format, buffer, offset, *args) - - def unpack_from(self, buffer, offset=0): - return unpack_from(self.format, buffer, offset) diff --git a/pypy/module/struct/interp_struct.py b/pypy/module/struct/interp_struct.py --- a/pypy/module/struct/interp_struct.py +++ b/pypy/module/struct/interp_struct.py @@ -1,15 +1,22 @@ -from pypy.interpreter.gateway import unwrap_spec -from pypy.interpreter.error import OperationError -from pypy.module.struct.formatiterator import PackFormatIterator, UnpackFormatIterator from rpython.rlib import jit from rpython.rlib.rstruct.error import StructError, StructOverflowError from rpython.rlib.rstruct.formatiterator import CalcSizeFormatIterator +from rpython.tool.sourcetools import func_with_new_name + +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.gateway import interp2app, unwrap_spec +from pypy.interpreter.error import OperationError +from pypy.interpreter.typedef import TypeDef, interp_attrproperty +from pypy.module.struct.formatiterator import ( + PackFormatIterator, UnpackFormatIterator +) @unwrap_spec(format=str) def calcsize(space, format): return space.wrap(_calcsize(space, format)) + def _calcsize(space, format): fmtiter = CalcSizeFormatIterator() try: @@ -52,3 +59,44 @@ w_error = space.getattr(w_module, space.wrap('error')) raise OperationError(w_error, space.wrap(e.msg)) return space.newtuple(fmtiter.result_w[:]) + + +class W_Struct(W_Root): + _immutable_fields_ = ["format", "size"] + + def __init__(self, space, format): + self.format = format + self.size = _calcsize(space, format) + + @unwrap_spec(format=str) + def descr__new__(space, w_subtype, format): + self = space.allocate_instance(W_Struct, w_subtype) + W_Struct.__init__(self, space, format) + return self + + def wrap_struct_method(name): + def impl(self, space, __args__): + w_module = space.getbuiltinmodule('struct') + w_method = space.getattr(w_module, space.wrap(name)) + return space.call_obj_args( + w_method, space.wrap(self.format), __args__ + ) + + return func_with_new_name(impl, 'descr_' + name) + + descr_pack = wrap_struct_method("pack") + descr_unpack = wrap_struct_method("unpack") + descr_pack_into = wrap_struct_method("pack_into") + descr_unpack_from = wrap_struct_method("unpack_from") + + +W_Struct.typedef = TypeDef("Struct", + __new__=interp2app(W_Struct.descr__new__.im_func), + format=interp_attrproperty("format", cls=W_Struct), + size=interp_attrproperty("size", cls=W_Struct), + + pack=interp2app(W_Struct.descr_pack), + unpack=interp2app(W_Struct.descr_unpack), + pack_into=interp2app(W_Struct.descr_pack_into), + unpack_from=interp2app(W_Struct.descr_unpack_from), +) diff --git a/pypy/module/test_lib_pypy/ctypes_tests/support.py b/pypy/module/test_lib_pypy/ctypes_tests/support.py --- a/pypy/module/test_lib_pypy/ctypes_tests/support.py +++ b/pypy/module/test_lib_pypy/ctypes_tests/support.py @@ -37,6 +37,8 @@ pass def teardown_class(cls): + if not hasattr(sys, 'pypy_translation_info'): + return if sys.pypy_translation_info['translation.gc'] == 'boehm': return # it seems that boehm has problems with __del__, so not # everything is freed diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_strings.py b/pypy/module/test_lib_pypy/ctypes_tests/test_strings.py --- a/pypy/module/test_lib_pypy/ctypes_tests/test_strings.py +++ b/pypy/module/test_lib_pypy/ctypes_tests/test_strings.py @@ -38,6 +38,16 @@ buf.raw = "Hello, World" assert buf.value == "Hello, World" + def test_c_buffer_raw_from_buffer(self): + buf = c_buffer(32) + buf.raw = buffer("Hello, World") + assert buf.value == "Hello, World" + + def test_c_buffer_raw_from_memoryview(self): + buf = c_buffer(32) + buf.raw = memoryview("Hello, World") + assert buf.value == "Hello, World" + def test_param_1(self): BUF = c_char * 4 buf = BUF() diff --git a/pypy/module/test_lib_pypy/test_grp_extra.py b/pypy/module/test_lib_pypy/test_grp_extra.py --- a/pypy/module/test_lib_pypy/test_grp_extra.py +++ b/pypy/module/test_lib_pypy/test_grp_extra.py @@ -2,7 +2,7 @@ class AppTestGrp: - spaceconfig = dict(usemodules=('binascii', '_ffi', '_rawffi', 'itertools')) + spaceconfig = dict(usemodules=('binascii', '_rawffi', 'itertools')) def setup_class(cls): cls.w_grp = import_lib_pypy(cls.space, 'grp', diff --git a/pypy/module/test_lib_pypy/test_os_wait.py b/pypy/module/test_lib_pypy/test_os_wait.py --- a/pypy/module/test_lib_pypy/test_os_wait.py +++ b/pypy/module/test_lib_pypy/test_os_wait.py @@ -10,7 +10,7 @@ class AppTestOsWait: - spaceconfig = dict(usemodules=('_ffi', '_rawffi', 'itertools')) + spaceconfig = dict(usemodules=('_rawffi', 'itertools')) def setup_class(cls): if not hasattr(os, "fork"): diff --git a/pypy/module/test_lib_pypy/test_pwd.py b/pypy/module/test_lib_pypy/test_pwd.py --- a/pypy/module/test_lib_pypy/test_pwd.py +++ b/pypy/module/test_lib_pypy/test_pwd.py @@ -1,7 +1,7 @@ import py, sys class AppTestPwd: - spaceconfig = dict(usemodules=('_ffi', '_rawffi', 'itertools', 'binascii')) + spaceconfig = dict(usemodules=('_rawffi', 'itertools', 'binascii')) def setup_class(cls): if sys.platform == 'win32': diff --git a/pypy/module/test_lib_pypy/test_resource.py b/pypy/module/test_lib_pypy/test_resource.py --- a/pypy/module/test_lib_pypy/test_resource.py +++ b/pypy/module/test_lib_pypy/test_resource.py @@ -9,7 +9,7 @@ class AppTestResource: - spaceconfig = dict(usemodules=('_ffi', '_rawffi', 'itertools')) + spaceconfig = dict(usemodules=('_rawffi', 'itertools')) def setup_class(cls): rebuild.rebuild_one('resource.ctc.py') diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -35,13 +35,7 @@ if (isinstance(self, W_BytearrayObject) and space.isinstance_w(w_sub, space.w_int)): char = space.int_w(w_sub) - if not 0 <= char < 256: - raise OperationError(space.w_ValueError, - space.wrap("byte must be in range(0, 256)")) - for c in self.data: - if ord(c) == char: - return space.w_True - return space.w_False + return _descr_contains_bytearray(self.data, space, char) return space.newbool(self._val(space).find(self._op_val(space, w_sub)) >= 0) def descr_add(self, space, w_other): @@ -79,7 +73,7 @@ assert start >= 0 and stop >= 0 return self._sliced(space, selfvalue, start, stop, self) else: - ret = [selfvalue[start + i*step] for i in range(sl)] + ret = _descr_getslice_slowpath(selfvalue, start, step, sl) return self._new_from_list(ret) index = space.getindex_w(w_index, space.w_IndexError, "string index") @@ -253,17 +247,21 @@ return self._is_generic(space, '_isdigit') # this is only for bytes and bytesarray: unicodeobject overrides it + def _descr_islower_slowpath(self, space, v): + cased = False + for idx in range(len(v)): + if self._isupper(v[idx]): + return False + elif not cased and self._islower(v[idx]): + cased = True + return cased + def descr_islower(self, space): v = self._val(space) if len(v) == 1: c = v[0] return space.newbool(self._islower(c)) - cased = False - for idx in range(len(v)): - if self._isupper(v[idx]): - return space.w_False - elif not cased and self._islower(v[idx]): - cased = True + cased = self._descr_islower_slowpath(space, v) return space.newbool(cased) def descr_isspace(self, space): @@ -291,17 +289,21 @@ return space.newbool(cased) # this is only for bytes and bytesarray: unicodeobject overrides it + def _descr_isupper_slowpath(self, space, v): + cased = False + for idx in range(len(v)): + if self._islower(v[idx]): + return False + elif not cased and self._isupper(v[idx]): + cased = True + return cased + def descr_isupper(self, space): v = self._val(space) if len(v) == 1: c = v[0] return space.newbool(self._isupper(c)) - cased = False - for idx in range(len(v)): - if self._islower(v[idx]): - return space.w_False - elif not cased and self._isupper(v[idx]): - cased = True + cased = self._descr_isupper_slowpath(space, v) return space.newbool(cased) def descr_join(self, space, w_list): @@ -677,3 +679,19 @@ def descr_getnewargs(self, space): return space.newtuple([self._new(self._val(space))]) + +# ____________________________________________________________ +# helpers for slow paths, moved out because they contain loops + +def _descr_contains_bytearray(data, space, char): + if not 0 <= char < 256: + raise OperationError(space.w_ValueError, + space.wrap("byte must be in range(0, 256)")) + for c in data: + if ord(c) == char: + return space.w_True + return space.w_False + + at specialize.argtype(0) +def _descr_getslice_slowpath(selfvalue, start, step, sl): + return [selfvalue[start + i*step] for i in range(sl)] diff --git a/pypy/tool/pypyjit.py b/pypy/tool/pypyjit.py --- a/pypy/tool/pypyjit.py +++ b/pypy/tool/pypyjit.py @@ -37,7 +37,7 @@ config.objspace.usemodules._sre = False config.objspace.usemodules._lsprof = False # -config.objspace.usemodules._ffi = False +config.objspace.usemodules._rawffi = False config.objspace.usemodules.micronumpy = False # set_pypy_opt_level(config, level='jit') diff --git a/rpython/rlib/ropenssl.py b/rpython/rlib/ropenssl.py --- a/rpython/rlib/ropenssl.py +++ b/rpython/rlib/ropenssl.py @@ -58,7 +58,6 @@ ASN1_ITEM = rffi.COpaquePtr('ASN1_ITEM') X509_NAME = rffi.COpaquePtr('X509_NAME') -# maybe there's a better way but this is correct and fixes LLVM translation class CConfigBootstrap: _compilation_info_ = eci OPENSSL_EXPORT_VAR_AS_FUNCTION = rffi_platform.Defined( diff --git a/rpython/rlib/test/test_runicode.py b/rpython/rlib/test/test_runicode.py --- a/rpython/rlib/test/test_runicode.py +++ b/rpython/rlib/test/test_runicode.py @@ -99,7 +99,7 @@ called[0] += 1 if called[0] == 1: assert errors == "foo!" - assert enc == encoding + assert enc == encoding.replace('-', '') assert t is s assert start == startingpos assert stop == endingpos diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -167,7 +167,7 @@ # # This list corresponds to the operations implemented by the LLInterpreter. # Note that many exception-raising operations can be replaced by calls -# to helper functions in rpython.rtyper.raisingops.raisingops. +# to helper functions in rpython.rtyper.raisingops. # ***** Run test_lloperation after changes. ***** LL_OPERATIONS = { diff --git a/rpython/rtyper/raisingops.py b/rpython/rtyper/raisingops.py new file mode 100644 --- /dev/null +++ b/rpython/rtyper/raisingops.py @@ -0,0 +1,295 @@ +import sys +from rpython.rlib.rarithmetic import r_longlong, r_uint, intmask +from rpython.rtyper.lltypesystem.lloperation import llop +from rpython.rtyper.lltypesystem.lltype import Signed, SignedLongLong, \ + UnsignedLongLong + +#XXX original SIGNED_RIGHT_SHIFT_ZERO_FILLS not taken into account +#XXX assuming HAVE_LONG_LONG (int_mul_ovf) +#XXX should int_mod and int_floordiv return an intmask(...) instead? + +LONG_MAX = sys.maxint +LONG_MIN = -sys.maxint-1 + +LLONG_MAX = r_longlong(2 ** (r_longlong.BITS-1) - 1) +LLONG_MIN = -LLONG_MAX-1 + +def int_floordiv_zer(x, y): + '''#define OP_INT_FLOORDIV_ZER(x,y,r,err) \ + if ((y)) { OP_INT_FLOORDIV(x,y,r,err); } \ + else FAIL_ZER(err, "integer division") + ''' + if y: + return llop.int_floordiv(Signed, x, y) + else: + raise ZeroDivisionError("integer division") + +def uint_floordiv_zer(x, y): + '''#define OP_UINT_FLOORDIV_ZER(x,y,r,err) \ + if ((y)) { OP_UINT_FLOORDIV(x,y,r,err); } \ + else FAIL_ZER(err, "unsigned integer division") + ''' + if y: + return x / y + else: + raise ZeroDivisionError("unsigned integer division") + +def llong_floordiv_zer(x, y): + '''#define OP_LLONG_FLOORDIV_ZER(x,y,r) \ + if ((y)) { OP_LLONG_FLOORDIV(x,y,r); } \ + else FAIL_ZER("integer division") + ''' + if y: + return llop.llong_floordiv(SignedLongLong, x, y) + else: + raise ZeroDivisionError("integer division") + +def ullong_floordiv_zer(x, y): + '''#define OP_ULLONG_FLOORDIV_ZER(x,y,r) \ + if ((y)) { OP_ULLONG_FLOORDIV(x,y,r); } \ + else FAIL_ZER("unsigned integer division") + ''' + if y: + return llop.llong_floordiv(UnsignedLongLong, x, y) + else: + raise ZeroDivisionError("unsigned integer division") + + +def int_neg_ovf(x): + if x == LONG_MIN: + raise OverflowError("integer negate") + return -x + +def llong_neg_ovf(x): + if x == LLONG_MIN: + raise OverflowError("integer negate") + return -x + +def int_abs_ovf(x): + if x == LONG_MIN: + raise OverflowError("integer absolute") + if x < 0: + return -x + else: + return x + +def llong_abs_ovf(x): + if x == LLONG_MIN: + raise OverflowError("integer absolute") + if x < 0: + return -x + else: + return x + +def _int_add_ovf(x, y): + '''#define OP_INT_ADD_OVF(x,y,r,err) \ + OP_INT_ADD(x,y,r,err); \ + if ((r^(x)) >= 0 || (r^(y)) >= 0); \ + else FAIL_OVF(err, "integer addition") + ''' + r = x + y + if r^x >= 0 or r^y >= 0: + return r + else: + raise OverflowError("integer addition") + +def _int_add_nonneg_ovf(x, y): + ''' + OP_INT_ADD(x,y,r); \ + if (r >= (x)); \ + else FAIL_OVF("integer addition") + ''' + r = x + y + if r >= x: + return r + else: + raise OverflowError("integer addition") + +def _int_sub_ovf(x, y): + '''#define OP_INT_SUB_OVF(x,y,r,err) \ + OP_INT_SUB(x,y,r,err); \ + if ((r^(x)) >= 0 || (r^~(y)) >= 0); \ + else FAIL_OVF(err, "integer subtraction") + ''' + r = x - y + if r^x >= 0 or r^~y >= 0: + return r + else: + raise OverflowError("integer subtraction") + +def int_lshift_ovf(x, y): + '''#define OP_INT_LSHIFT_OVF(x,y,r,err) \ + OP_INT_LSHIFT(x,y,r,err); \ + if ((x) != Py_ARITHMETIC_RIGHT_SHIFT(long, r, (y))) \ + FAIL_OVF(err, "x<= 0) { OP_INT_RSHIFT(x,y,r,err); } \ + else FAIL_VAL(err, "negative shift count") + ''' + if y >= 0: + return _Py_ARITHMETIC_RIGHT_SHIFT(x, y) + else: + raise ValueError("negative shift count") + +def int_lshift_val(x, y): + '''#define OP_INT_LSHIFT_VAL(x,y,r,err) \ + if ((y) >= 0) { OP_INT_LSHIFT(x,y,r,err); } \ + else FAIL_VAL(err, "negative shift count") + ''' + if y >= 0: + return x << y + else: + raise ValueError("negative shift count") + +def int_lshift_ovf_val(x, y): + '''#define OP_INT_LSHIFT_OVF_VAL(x,y,r,err) \ + if ((y) >= 0) { OP_INT_LSHIFT_OVF(x,y,r,err); } \ + else FAIL_VAL(err, "negative shift count") + ''' + if y >= 0: + return int_lshift_ovf(x, y) + else: + raise ValueError("negative shift count") + +def int_floordiv_ovf(x, y): + '''#define OP_INT_FLOORDIV_OVF(x,y,r,err) \ + if ((y) == -1 && (x) < 0 && ((unsigned long)(x) << 1) == 0) \ + FAIL_OVF(err, "integer division"); \ + OP_INT_FLOORDIV(x,y,r,err) + ''' + if y == -1 and x < 0 and (r_uint(x) << 1) == 0: + raise OverflowError("integer division") + else: + return llop.int_floordiv(Signed, x, y) + +def int_floordiv_ovf_zer(x, y): + '''#define OP_INT_FLOORDIV_OVF_ZER(x,y,r,err) \ + if ((y)) { OP_INT_FLOORDIV_OVF(x,y,r,err); } \ + else FAIL_ZER(err, "integer division") + ''' + if y: + return int_floordiv_ovf(x, y) + else: + raise ZeroDivisionError("integer division") + +def int_mod_ovf(x, y): + '''#define OP_INT_MOD_OVF(x,y,r,err) \ + if ((y) == -1 && (x) < 0 && ((unsigned long)(x) << 1) == 0) \ + FAIL_OVF(err, "integer modulo"); \ + OP_INT_MOD(x,y,r,err) + ''' + if y == -1 and x < 0 and (r_uint(x) << 1) == 0: + raise OverflowError("integer modulo") + else: + return llop.int_mod(Signed, x, y) + +def int_mod_zer(x, y): + '''#define OP_INT_MOD_ZER(x,y,r,err) \ + if ((y)) { OP_INT_MOD(x,y,r,err); } \ + else FAIL_ZER(err, "integer modulo") + ''' + if y: + return llop.int_mod(Signed, x, y) + else: + raise ZeroDivisionError("integer modulo") + +def uint_mod_zer(x, y): + '''#define OP_UINT_MOD_ZER(x,y,r,err) \ + if ((y)) { OP_UINT_MOD(x,y,r,err); } \ + else FAIL_ZER(err, "unsigned integer modulo") + ''' + if y: + return x % y + else: + raise ZeroDivisionError("unsigned integer modulo") + +def int_mod_ovf_zer(x, y): + '''#define OP_INT_MOD_OVF_ZER(x,y,r,err) \ + if ((y)) { OP_INT_MOD_OVF(x,y,r,err); } \ + else FAIL_ZER(err, "integer modulo") + ''' + if y: + return int_mod_ovf(x, y) + else: + raise ZeroDivisionError("integer modulo") + +def llong_mod_zer(x, y): + '''#define OP_LLONG_MOD_ZER(x,y,r) \ + if ((y)) { OP_LLONG_MOD(x,y,r); } \ + else FAIL_ZER("integer modulo") + ''' + if y: + return llop.int_mod(SignedLongLong, x, y) + else: + raise ZeroDivisionError("integer modulo") + +# Helpers... + +def _Py_ARITHMETIC_RIGHT_SHIFT(i, j): + ''' +// Py_ARITHMETIC_RIGHT_SHIFT +// C doesn't define whether a right-shift of a signed integer sign-extends +// or zero-fills. Here a macro to force sign extension: +// Py_ARITHMETIC_RIGHT_SHIFT(TYPE, I, J) +// Return I >> J, forcing sign extension. +// Requirements: +// I is of basic signed type TYPE (char, short, int, long, or long long). +// TYPE is one of char, short, int, long, or long long, although long long +// must not be used except on platforms that support it. +// J is an integer >= 0 and strictly less than the number of bits in TYPE +// (because C doesn't define what happens for J outside that range either). +// Caution: +// I may be evaluated more than once. + +#ifdef SIGNED_RIGHT_SHIFT_ZERO_FILLS + #define Py_ARITHMETIC_RIGHT_SHIFT(TYPE, I, J) \ + ((I) < 0 ? ~((~(unsigned TYPE)(I)) >> (J)) : (I) >> (J)) +#else + #define Py_ARITHMETIC_RIGHT_SHIFT(TYPE, I, J) ((I) >> (J)) +#endif + ''' + return i >> j + +#XXX some code from src/int.h seems missing +#def int_mul_ovf(x, y): #HAVE_LONG_LONG version +# '''{ \ +# PY_LONG_LONG lr = (PY_LONG_LONG)(x) * (PY_LONG_LONG)(y); \ +# r = (long)lr; \ +# if ((PY_LONG_LONG)r == lr); \ +# else FAIL_OVF(err, "integer multiplication"); \ +# } +# ''' +# lr = r_longlong(x) * r_longlong(y); +# r = intmask(lr) +# if r_longlong(r) == lr: +# return r +# else: +# raise OverflowError("integer multiplication") + +#not HAVE_LONG_LONG version +def _int_mul_ovf(a, b): #long a, long b, long *longprod): + longprod = a * b + doubleprod = float(a) * float(b) + doubled_longprod = float(longprod) + + # Fast path for normal case: small multiplicands, and no info is lost in either method. + if doubled_longprod == doubleprod: + return longprod + + # Somebody somewhere lost info. Close enough, or way off? Note + # that a != 0 and b != 0 (else doubled_longprod == doubleprod == 0). + # The difference either is or isn't significant compared to the + # true value (of which doubleprod is a good approximation). + # absdiff/absprod <= 1/32 iff 32 * absdiff <= absprod -- 5 good bits is "close enough" + if 32.0 * abs(doubled_longprod - doubleprod) <= abs(doubleprod): + return longprod + + raise OverflowError("integer multiplication") diff --git a/rpython/rtyper/raisingops/__init__.py b/rpython/rtyper/raisingops/__init__.py deleted file mode 100644 diff --git a/rpython/rtyper/raisingops/raisingops.py b/rpython/rtyper/raisingops/raisingops.py deleted file mode 100644 --- a/rpython/rtyper/raisingops/raisingops.py +++ /dev/null @@ -1,295 +0,0 @@ -import sys -from rpython.rlib.rarithmetic import r_longlong, r_uint, intmask -from rpython.rtyper.lltypesystem.lloperation import llop -from rpython.rtyper.lltypesystem.lltype import Signed, SignedLongLong, \ - UnsignedLongLong - -#XXX original SIGNED_RIGHT_SHIFT_ZERO_FILLS not taken into account -#XXX assuming HAVE_LONG_LONG (int_mul_ovf) -#XXX should int_mod and int_floordiv return an intmask(...) instead? - -LONG_MAX = sys.maxint -LONG_MIN = -sys.maxint-1 - -LLONG_MAX = r_longlong(2 ** (r_longlong.BITS-1) - 1) -LLONG_MIN = -LLONG_MAX-1 - -def int_floordiv_zer(x, y): - '''#define OP_INT_FLOORDIV_ZER(x,y,r,err) \ - if ((y)) { OP_INT_FLOORDIV(x,y,r,err); } \ - else FAIL_ZER(err, "integer division") - ''' - if y: - return llop.int_floordiv(Signed, x, y) - else: - raise ZeroDivisionError("integer division") - -def uint_floordiv_zer(x, y): - '''#define OP_UINT_FLOORDIV_ZER(x,y,r,err) \ - if ((y)) { OP_UINT_FLOORDIV(x,y,r,err); } \ - else FAIL_ZER(err, "unsigned integer division") - ''' - if y: - return x / y - else: - raise ZeroDivisionError("unsigned integer division") - -def llong_floordiv_zer(x, y): - '''#define OP_LLONG_FLOORDIV_ZER(x,y,r) \ - if ((y)) { OP_LLONG_FLOORDIV(x,y,r); } \ - else FAIL_ZER("integer division") - ''' - if y: - return llop.llong_floordiv(SignedLongLong, x, y) - else: - raise ZeroDivisionError("integer division") - -def ullong_floordiv_zer(x, y): - '''#define OP_ULLONG_FLOORDIV_ZER(x,y,r) \ - if ((y)) { OP_ULLONG_FLOORDIV(x,y,r); } \ - else FAIL_ZER("unsigned integer division") - ''' - if y: - return llop.llong_floordiv(UnsignedLongLong, x, y) - else: - raise ZeroDivisionError("unsigned integer division") - - -def int_neg_ovf(x): - if x == LONG_MIN: - raise OverflowError("integer negate") - return -x - -def llong_neg_ovf(x): - if x == LLONG_MIN: - raise OverflowError("integer negate") - return -x - -def int_abs_ovf(x): - if x == LONG_MIN: - raise OverflowError("integer absolute") - if x < 0: - return -x - else: - return x - -def llong_abs_ovf(x): - if x == LLONG_MIN: - raise OverflowError("integer absolute") - if x < 0: - return -x - else: - return x - -def _int_add_ovf(x, y): - '''#define OP_INT_ADD_OVF(x,y,r,err) \ - OP_INT_ADD(x,y,r,err); \ - if ((r^(x)) >= 0 || (r^(y)) >= 0); \ - else FAIL_OVF(err, "integer addition") - ''' - r = x + y - if r^x >= 0 or r^y >= 0: - return r - else: - raise OverflowError("integer addition") - -def _int_add_nonneg_ovf(x, y): - ''' - OP_INT_ADD(x,y,r); \ - if (r >= (x)); \ - else FAIL_OVF("integer addition") - ''' - r = x + y - if r >= x: - return r - else: - raise OverflowError("integer addition") - -def _int_sub_ovf(x, y): - '''#define OP_INT_SUB_OVF(x,y,r,err) \ - OP_INT_SUB(x,y,r,err); \ - if ((r^(x)) >= 0 || (r^~(y)) >= 0); \ - else FAIL_OVF(err, "integer subtraction") - ''' - r = x - y - if r^x >= 0 or r^~y >= 0: - return r - else: - raise OverflowError("integer subtraction") - -def int_lshift_ovf(x, y): - '''#define OP_INT_LSHIFT_OVF(x,y,r,err) \ - OP_INT_LSHIFT(x,y,r,err); \ - if ((x) != Py_ARITHMETIC_RIGHT_SHIFT(long, r, (y))) \ - FAIL_OVF(err, "x<= 0) { OP_INT_RSHIFT(x,y,r,err); } \ - else FAIL_VAL(err, "negative shift count") - ''' - if y >= 0: - return _Py_ARITHMETIC_RIGHT_SHIFT(x, y) - else: - raise ValueError("negative shift count") - -def int_lshift_val(x, y): - '''#define OP_INT_LSHIFT_VAL(x,y,r,err) \ - if ((y) >= 0) { OP_INT_LSHIFT(x,y,r,err); } \ - else FAIL_VAL(err, "negative shift count") - ''' - if y >= 0: - return x << y - else: - raise ValueError("negative shift count") - -def int_lshift_ovf_val(x, y): - '''#define OP_INT_LSHIFT_OVF_VAL(x,y,r,err) \ - if ((y) >= 0) { OP_INT_LSHIFT_OVF(x,y,r,err); } \ - else FAIL_VAL(err, "negative shift count") - ''' - if y >= 0: - return int_lshift_ovf(x, y) - else: - raise ValueError("negative shift count") - -def int_floordiv_ovf(x, y): - '''#define OP_INT_FLOORDIV_OVF(x,y,r,err) \ - if ((y) == -1 && (x) < 0 && ((unsigned long)(x) << 1) == 0) \ - FAIL_OVF(err, "integer division"); \ - OP_INT_FLOORDIV(x,y,r,err) - ''' - if y == -1 and x < 0 and (r_uint(x) << 1) == 0: - raise OverflowError("integer division") - else: - return llop.int_floordiv(Signed, x, y) - -def int_floordiv_ovf_zer(x, y): - '''#define OP_INT_FLOORDIV_OVF_ZER(x,y,r,err) \ - if ((y)) { OP_INT_FLOORDIV_OVF(x,y,r,err); } \ - else FAIL_ZER(err, "integer division") - ''' - if y: - return int_floordiv_ovf(x, y) - else: - raise ZeroDivisionError("integer division") - -def int_mod_ovf(x, y): - '''#define OP_INT_MOD_OVF(x,y,r,err) \ - if ((y) == -1 && (x) < 0 && ((unsigned long)(x) << 1) == 0) \ - FAIL_OVF(err, "integer modulo"); \ - OP_INT_MOD(x,y,r,err) - ''' - if y == -1 and x < 0 and (r_uint(x) << 1) == 0: - raise OverflowError("integer modulo") - else: - return llop.int_mod(Signed, x, y) - -def int_mod_zer(x, y): - '''#define OP_INT_MOD_ZER(x,y,r,err) \ - if ((y)) { OP_INT_MOD(x,y,r,err); } \ - else FAIL_ZER(err, "integer modulo") - ''' - if y: - return llop.int_mod(Signed, x, y) - else: - raise ZeroDivisionError("integer modulo") - -def uint_mod_zer(x, y): - '''#define OP_UINT_MOD_ZER(x,y,r,err) \ - if ((y)) { OP_UINT_MOD(x,y,r,err); } \ - else FAIL_ZER(err, "unsigned integer modulo") - ''' - if y: - return x % y - else: - raise ZeroDivisionError("unsigned integer modulo") - From noreply at buildbot.pypy.org Tue Jan 21 15:33:30 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 21 Jan 2014 15:33:30 +0100 (CET) Subject: [pypy-commit] pypy llvm-translation-backend: Remove this hack until something better comes into my mind. Message-ID: <20140121143330.D93751C33B0@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r68813:ac260e03714e Date: 2014-01-21 15:32 +0100 http://bitbucket.org/pypy/pypy/changeset/ac260e03714e/ Log: Remove this hack until something better comes into my mind. diff --git a/rpython/rtyper/raisingops.py b/rpython/rtyper/raisingops.py --- a/rpython/rtyper/raisingops.py +++ b/rpython/rtyper/raisingops.py @@ -81,7 +81,7 @@ else: return x -def _int_add_ovf(x, y): +def int_add_ovf(x, y): '''#define OP_INT_ADD_OVF(x,y,r,err) \ OP_INT_ADD(x,y,r,err); \ if ((r^(x)) >= 0 || (r^(y)) >= 0); \ @@ -93,7 +93,7 @@ else: raise OverflowError("integer addition") -def _int_add_nonneg_ovf(x, y): +def int_add_nonneg_ovf(x, y): ''' OP_INT_ADD(x,y,r); \ if (r >= (x)); \ @@ -105,7 +105,7 @@ else: raise OverflowError("integer addition") -def _int_sub_ovf(x, y): +def int_sub_ovf(x, y): '''#define OP_INT_SUB_OVF(x,y,r,err) \ OP_INT_SUB(x,y,r,err); \ if ((r^(x)) >= 0 || (r^~(y)) >= 0); \ @@ -275,7 +275,7 @@ # raise OverflowError("integer multiplication") #not HAVE_LONG_LONG version -def _int_mul_ovf(a, b): #long a, long b, long *longprod): +def int_mul_ovf(a, b): #long a, long b, long *longprod): longprod = a * b doubleprod = float(a) * float(b) doubled_longprod = float(longprod) From noreply at buildbot.pypy.org Tue Jan 21 15:38:40 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 21 Jan 2014 15:38:40 +0100 (CET) Subject: [pypy-commit] pypy llvm-translation-backend: Simplify this and make sure it doesn't modify the argument. Message-ID: <20140121143840.295301C010E@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r68814:bdde2236b903 Date: 2014-01-21 15:38 +0100 http://bitbucket.org/pypy/pypy/changeset/bdde2236b903/ Log: Simplify this and make sure it doesn't modify the argument. diff --git a/rpython/translator/backendopt/all.py b/rpython/translator/backendopt/all.py --- a/rpython/translator/backendopt/all.py +++ b/rpython/translator/backendopt/all.py @@ -41,8 +41,7 @@ config = translator.config.translation.backendopt.copy(as_default=True) config.set(**kwds) - translator_graphs = graphs is None - if translator_graphs: + if graphs is None: graphs = translator.graphs for graph in graphs: assert not hasattr(graph, '_seen_by_the_backend') @@ -53,10 +52,8 @@ if config.raisingop2direct_call: additional_graphs = raisingop2direct_call(translator, graphs) - if translator_graphs: - graphs = translator.graphs - else: - graphs.extend(additional_graphs) + if graphs is not translator.graphs: + graphs = graphs + additional_graphs if config.remove_asserts: constfold(config, graphs) From noreply at buildbot.pypy.org Tue Jan 21 16:19:31 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 21 Jan 2014 16:19:31 +0100 (CET) Subject: [pypy-commit] pypy default: Have "rpython -Ojit --jittest" work again, step 1 Message-ID: <20140121151931.D77121C319A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68815:d3bab2233245 Date: 2014-01-21 15:04 +0100 http://bitbucket.org/pypy/pypy/changeset/d3bab2233245/ Log: Have "rpython -Ojit --jittest" work again, step 1 diff --git a/rpython/jit/metainterp/warmspot.py b/rpython/jit/metainterp/warmspot.py --- a/rpython/jit/metainterp/warmspot.py +++ b/rpython/jit/metainterp/warmspot.py @@ -73,7 +73,7 @@ translator = interp.typer.annotator.translator try: translator.config.translation.gc = "boehm" - except ConfigError: + except (ConfigError, TypeError): pass try: translator.config.translation.list_comprehension_operations = True diff --git a/rpython/jit/tl/jittest.py b/rpython/jit/tl/jittest.py --- a/rpython/jit/tl/jittest.py +++ b/rpython/jit/tl/jittest.py @@ -4,20 +4,20 @@ only after the '---> Checkpoint' fork. """ -from rpython.conftest import option +from rpython import conftest from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.llinterp import LLInterpreter from rpython.rtyper.annlowlevel import llstr from rpython.jit.metainterp import warmspot -from rpython.rlib.jit import OPTIMIZER_FULL -ARGS = ["jittest", "100"] +ARGS = ["--jit", "trace_eagerness=18,threshold=50", "-S", + "/home/arigo/pypysrc/32compiled/z.py"] def jittest(driver): - graph = driver.translator.graphs[0] - interp = LLInterpreter(driver.translator.rtyper, malloc_check=False) + graph = driver.translator._graphof(driver.entry_point) + interp = LLInterpreter(driver.translator.rtyper) def returns_null(T, *args, **kwds): return lltype.nullptr(T) @@ -32,12 +32,15 @@ def apply_jit(policy, interp, graph, CPUClass): print 'warmspot.jittify_and_run() started...' - option.view = True + if conftest.option is None: + class MyOpt: + pass + conftest.option = MyOpt() + conftest.option.view = True LIST = graph.getargs()[0].concretetype lst = LIST.TO.ll_newlist(len(ARGS)) for i, arg in enumerate(ARGS): lst.ll_setitem_fast(i, llstr(arg)) warmspot.jittify_and_run(interp, graph, [lst], policy=policy, listops=True, CPUClass=CPUClass, - backendopt=True, inline=True, - optimizer=OPTIMIZER_FULL) + backendopt=True, inline=True) From noreply at buildbot.pypy.org Tue Jan 21 16:19:32 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 21 Jan 2014 16:19:32 +0100 (CET) Subject: [pypy-commit] pypy default: More jittest-ability. Message-ID: <20140121151932.F2AE51C319A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68816:1ae31b6d89d1 Date: 2014-01-21 16:18 +0100 http://bitbucket.org/pypy/pypy/changeset/1ae31b6d89d1/ Log: More jittest-ability. diff --git a/rpython/jit/backend/llgraph/support.py b/rpython/jit/backend/llgraph/support.py --- a/rpython/jit/backend/llgraph/support.py +++ b/rpython/jit/backend/llgraph/support.py @@ -67,10 +67,11 @@ if isinstance(TYPE, lltype.Ptr): if isinstance(x, (int, long, llmemory.AddressAsInt)): x = llmemory.cast_int_to_adr(x) - #if repr(x.ptr).startswith('<* Checkpoint' fork. """ +import os from rpython import conftest from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.llinterp import LLInterpreter from rpython.rtyper.annlowlevel import llstr from rpython.jit.metainterp import warmspot +from rpython.tool import runsubprocess +os.environ['PYPY_DONT_RUN_SUBPROCESS'] = '1' +reload(runsubprocess) -ARGS = ["--jit", "trace_eagerness=18,threshold=50", "-S", - "/home/arigo/pypysrc/32compiled/z.py"] + +ARGS = ["--jit", "threshold=100000,trace_eagerness=100000", + "-S", "/home/arigo/pypysrc/32compiled/z.py"] def jittest(driver): @@ -36,7 +41,8 @@ class MyOpt: pass conftest.option = MyOpt() - conftest.option.view = True + conftest.option.view = False + conftest.option.viewloops = True # XXX doesn't seem to work LIST = graph.getargs()[0].concretetype lst = LIST.TO.ll_newlist(len(ARGS)) for i, arg in enumerate(ARGS): From noreply at buildbot.pypy.org Tue Jan 21 16:19:34 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 21 Jan 2014 16:19:34 +0100 (CET) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20140121151934.372261C319A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68817:cda44e90c717 Date: 2014-01-21 16:18 +0100 http://bitbucket.org/pypy/pypy/changeset/cda44e90c717/ Log: merge heads diff --git a/pypy/doc/_ref.txt b/pypy/doc/_ref.txt --- a/pypy/doc/_ref.txt +++ b/pypy/doc/_ref.txt @@ -109,6 +109,4 @@ .. _`rpython/translator/c/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/c/ .. _`rpython/translator/c/src/stacklet/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/c/src/stacklet/ .. _`rpython/translator/c/src/stacklet/stacklet.h`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/c/src/stacklet/stacklet.h -.. _`rpython/translator/cli/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/cli/ -.. _`rpython/translator/jvm/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/jvm/ .. _`rpython/translator/tool/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/tool/ diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -167,7 +167,7 @@ def run(self): """Start this frame's execution.""" if self.getcode().co_flags & pycode.CO_GENERATOR: - if self.getcode().co_flags & pycode.CO_YIELD_INSIDE_TRY: + if 1:# self.getcode().co_flags & pycode.CO_YIELD_INSIDE_TRY: from pypy.interpreter.generator import GeneratorIteratorWithDel return self.space.wrap(GeneratorIteratorWithDel(self)) else: diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py --- a/pypy/module/_cffi_backend/ctypestruct.py +++ b/pypy/module/_cffi_backend/ctypestruct.py @@ -33,7 +33,7 @@ if self.fields_dict is None: space = self.space raise operationerrfmt(w_errorcls or space.w_TypeError, - "'%s' is not completed yet", self.name) + "'%s' is opaque or not completed yet", self.name) def _alignof(self): self.check_complete(w_errorcls=self.space.w_ValueError) diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -1,6 +1,5 @@ from rpython.translator.platform import platform from rpython.translator.tool.cbuild import ExternalCompilationInfo -from pypy.conftest import option from pypy.module._rawffi.interp_rawffi import TYPEMAP, TYPEMAP_FLOAT_LETTERS from pypy.module._rawffi.tracker import Tracker @@ -1133,15 +1132,6 @@ def setup_class(cls): cls.w_sizes_and_alignments = cls.space.wrap(dict( [(k, (v.c_size, v.c_alignment)) for k,v in TYPEMAP.iteritems()])) - # - # detect if we're running on PyPy with DO_TRACING not compiled in - if option.runappdirect: - try: - import _rawffi - _rawffi._num_of_allocated_objects() - except (ImportError, RuntimeError), e: - py.test.skip(str(e)) - # Tracker.DO_TRACING = True def test_structure_autofree(self): diff --git a/pypy/module/struct/__init__.py b/pypy/module/struct/__init__.py --- a/pypy/module/struct/__init__.py +++ b/pypy/module/struct/__init__.py @@ -49,11 +49,12 @@ 'calcsize': 'interp_struct.calcsize', 'pack': 'interp_struct.pack', 'unpack': 'interp_struct.unpack', - } + + 'Struct': 'interp_struct.W_Struct', + } appleveldefs = { 'error': 'app_struct.error', 'pack_into': 'app_struct.pack_into', 'unpack_from': 'app_struct.unpack_from', - 'Struct': 'app_struct.Struct', - } + } diff --git a/pypy/module/struct/app_struct.py b/pypy/module/struct/app_struct.py --- a/pypy/module/struct/app_struct.py +++ b/pypy/module/struct/app_struct.py @@ -4,6 +4,7 @@ """ import struct + class error(Exception): """Exception raised on various occasions; argument is a string describing what is wrong.""" @@ -21,21 +22,3 @@ raise error("unpack_from requires a buffer of at least %d bytes" % (size,)) return struct.unpack(fmt, data) - -# XXX inefficient -class Struct(object): - def __init__(self, format): - self.format = format - self.size = struct.calcsize(format) - - def pack(self, *args): - return struct.pack(self.format, *args) - - def unpack(self, s): - return struct.unpack(self.format, s) - - def pack_into(self, buffer, offset, *args): - return pack_into(self.format, buffer, offset, *args) - - def unpack_from(self, buffer, offset=0): - return unpack_from(self.format, buffer, offset) diff --git a/pypy/module/struct/interp_struct.py b/pypy/module/struct/interp_struct.py --- a/pypy/module/struct/interp_struct.py +++ b/pypy/module/struct/interp_struct.py @@ -1,15 +1,22 @@ -from pypy.interpreter.gateway import unwrap_spec -from pypy.interpreter.error import OperationError -from pypy.module.struct.formatiterator import PackFormatIterator, UnpackFormatIterator from rpython.rlib import jit from rpython.rlib.rstruct.error import StructError, StructOverflowError from rpython.rlib.rstruct.formatiterator import CalcSizeFormatIterator +from rpython.tool.sourcetools import func_with_new_name + +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.gateway import interp2app, unwrap_spec +from pypy.interpreter.error import OperationError +from pypy.interpreter.typedef import TypeDef, interp_attrproperty +from pypy.module.struct.formatiterator import ( + PackFormatIterator, UnpackFormatIterator +) @unwrap_spec(format=str) def calcsize(space, format): return space.wrap(_calcsize(space, format)) + def _calcsize(space, format): fmtiter = CalcSizeFormatIterator() try: @@ -52,3 +59,44 @@ w_error = space.getattr(w_module, space.wrap('error')) raise OperationError(w_error, space.wrap(e.msg)) return space.newtuple(fmtiter.result_w[:]) + + +class W_Struct(W_Root): + _immutable_fields_ = ["format", "size"] + + def __init__(self, space, format): + self.format = format + self.size = _calcsize(space, format) + + @unwrap_spec(format=str) + def descr__new__(space, w_subtype, format): + self = space.allocate_instance(W_Struct, w_subtype) + W_Struct.__init__(self, space, format) + return self + + def wrap_struct_method(name): + def impl(self, space, __args__): + w_module = space.getbuiltinmodule('struct') + w_method = space.getattr(w_module, space.wrap(name)) + return space.call_obj_args( + w_method, space.wrap(self.format), __args__ + ) + + return func_with_new_name(impl, 'descr_' + name) + + descr_pack = wrap_struct_method("pack") + descr_unpack = wrap_struct_method("unpack") + descr_pack_into = wrap_struct_method("pack_into") + descr_unpack_from = wrap_struct_method("unpack_from") + + +W_Struct.typedef = TypeDef("Struct", + __new__=interp2app(W_Struct.descr__new__.im_func), + format=interp_attrproperty("format", cls=W_Struct), + size=interp_attrproperty("size", cls=W_Struct), + + pack=interp2app(W_Struct.descr_pack), + unpack=interp2app(W_Struct.descr_unpack), + pack_into=interp2app(W_Struct.descr_pack_into), + unpack_from=interp2app(W_Struct.descr_unpack_from), +) diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -35,13 +35,7 @@ if (isinstance(self, W_BytearrayObject) and space.isinstance_w(w_sub, space.w_int)): char = space.int_w(w_sub) - if not 0 <= char < 256: - raise OperationError(space.w_ValueError, - space.wrap("byte must be in range(0, 256)")) - for c in self.data: - if ord(c) == char: - return space.w_True - return space.w_False + return _descr_contains_bytearray(self.data, space, char) return space.newbool(self._val(space).find(self._op_val(space, w_sub)) >= 0) def descr_add(self, space, w_other): @@ -79,7 +73,7 @@ assert start >= 0 and stop >= 0 return self._sliced(space, selfvalue, start, stop, self) else: - ret = [selfvalue[start + i*step] for i in range(sl)] + ret = _descr_getslice_slowpath(selfvalue, start, step, sl) return self._new_from_list(ret) index = space.getindex_w(w_index, space.w_IndexError, "string index") @@ -253,17 +247,21 @@ return self._is_generic(space, '_isdigit') # this is only for bytes and bytesarray: unicodeobject overrides it + def _descr_islower_slowpath(self, space, v): + cased = False + for idx in range(len(v)): + if self._isupper(v[idx]): + return False + elif not cased and self._islower(v[idx]): + cased = True + return cased + def descr_islower(self, space): v = self._val(space) if len(v) == 1: c = v[0] return space.newbool(self._islower(c)) - cased = False - for idx in range(len(v)): - if self._isupper(v[idx]): - return space.w_False - elif not cased and self._islower(v[idx]): - cased = True + cased = self._descr_islower_slowpath(space, v) return space.newbool(cased) def descr_isspace(self, space): @@ -291,17 +289,21 @@ return space.newbool(cased) # this is only for bytes and bytesarray: unicodeobject overrides it + def _descr_isupper_slowpath(self, space, v): + cased = False + for idx in range(len(v)): + if self._islower(v[idx]): + return False + elif not cased and self._isupper(v[idx]): + cased = True + return cased + def descr_isupper(self, space): v = self._val(space) if len(v) == 1: c = v[0] return space.newbool(self._isupper(c)) - cased = False - for idx in range(len(v)): - if self._islower(v[idx]): - return space.w_False - elif not cased and self._isupper(v[idx]): - cased = True + cased = self._descr_isupper_slowpath(space, v) return space.newbool(cased) def descr_join(self, space, w_list): @@ -677,3 +679,19 @@ def descr_getnewargs(self, space): return space.newtuple([self._new(self._val(space))]) + +# ____________________________________________________________ +# helpers for slow paths, moved out because they contain loops + +def _descr_contains_bytearray(data, space, char): + if not 0 <= char < 256: + raise OperationError(space.w_ValueError, + space.wrap("byte must be in range(0, 256)")) + for c in data: + if ord(c) == char: + return space.w_True + return space.w_False + + at specialize.argtype(0) +def _descr_getslice_slowpath(selfvalue, start, step, sl): + return [selfvalue[start + i*step] for i in range(sl)] diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -454,17 +454,19 @@ unicode_ofs_length = self.unicode_descr.lendescr.offset def malloc_str(length): + type_id = llop.extract_ushort(llgroup.HALFWORD, str_type_id) return llop1.do_malloc_varsize_clear( llmemory.GCREF, - str_type_id, length, str_basesize, str_itemsize, + type_id, length, str_basesize, str_itemsize, str_ofs_length) self.generate_function('malloc_str', malloc_str, [lltype.Signed]) def malloc_unicode(length): + type_id = llop.extract_ushort(llgroup.HALFWORD, unicode_type_id) return llop1.do_malloc_varsize_clear( llmemory.GCREF, - unicode_type_id, length, unicode_basesize, unicode_itemsize, + type_id, length, unicode_basesize, unicode_itemsize, unicode_ofs_length) self.generate_function('malloc_unicode', malloc_unicode, [lltype.Signed]) diff --git a/rpython/rlib/ropenssl.py b/rpython/rlib/ropenssl.py --- a/rpython/rlib/ropenssl.py +++ b/rpython/rlib/ropenssl.py @@ -56,9 +56,17 @@ ASN1_STRING = lltype.Ptr(lltype.ForwardReference()) ASN1_ITEM = rffi.COpaquePtr('ASN1_ITEM') -ASN1_ITEM_EXP = lltype.Ptr(lltype.FuncType([], ASN1_ITEM)) X509_NAME = rffi.COpaquePtr('X509_NAME') +class CConfigBootstrap: + _compilation_info_ = eci + OPENSSL_EXPORT_VAR_AS_FUNCTION = rffi_platform.Defined( + "OPENSSL_EXPORT_VAR_AS_FUNCTION") +if rffi_platform.configure(CConfigBootstrap)["OPENSSL_EXPORT_VAR_AS_FUNCTION"]: + ASN1_ITEM_EXP = lltype.Ptr(lltype.FuncType([], ASN1_ITEM)) +else: + ASN1_ITEM_EXP = ASN1_ITEM + class CConfig: _compilation_info_ = eci @@ -128,8 +136,6 @@ ('block_size', rffi.INT)]) EVP_MD_SIZE = rffi_platform.SizeOf('EVP_MD') EVP_MD_CTX_SIZE = rffi_platform.SizeOf('EVP_MD_CTX') - OPENSSL_EXPORT_VAR_AS_FUNCTION = rffi_platform.Defined( - "OPENSSL_EXPORT_VAR_AS_FUNCTION") OBJ_NAME_st = rffi_platform.Struct( 'OBJ_NAME', @@ -259,10 +265,7 @@ ssl_external('i2a_ASN1_INTEGER', [BIO, ASN1_INTEGER], rffi.INT) ssl_external('ASN1_item_d2i', [rffi.VOIDP, rffi.CCHARPP, rffi.LONG, ASN1_ITEM], rffi.VOIDP) -if OPENSSL_EXPORT_VAR_AS_FUNCTION: - ssl_external('ASN1_ITEM_ptr', [ASN1_ITEM_EXP], ASN1_ITEM, macro=True) -else: - ssl_external('ASN1_ITEM_ptr', [rffi.VOIDP], ASN1_ITEM, macro=True) +ssl_external('ASN1_ITEM_ptr', [ASN1_ITEM_EXP], ASN1_ITEM, macro=True) ssl_external('sk_GENERAL_NAME_num', [GENERAL_NAMES], rffi.INT, macro=True) diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -167,7 +167,7 @@ # # This list corresponds to the operations implemented by the LLInterpreter. # Note that many exception-raising operations can be replaced by calls -# to helper functions in rpython.rtyper.raisingops.raisingops. +# to helper functions in rpython.rtyper.raisingops. # ***** Run test_lloperation after changes. ***** LL_OPERATIONS = { diff --git a/rpython/rtyper/raisingops/raisingops.py b/rpython/rtyper/raisingops.py rename from rpython/rtyper/raisingops/raisingops.py rename to rpython/rtyper/raisingops.py diff --git a/rpython/rtyper/raisingops/__init__.py b/rpython/rtyper/raisingops/__init__.py deleted file mode 100644 diff --git a/rpython/translator/backendopt/raisingop2direct_call.py b/rpython/translator/backendopt/raisingop2direct_call.py --- a/rpython/translator/backendopt/raisingop2direct_call.py +++ b/rpython/translator/backendopt/raisingop2direct_call.py @@ -1,5 +1,5 @@ from rpython.translator.backendopt.support import log, all_operations, annotate -import rpython.rtyper.raisingops.raisingops +import rpython.rtyper.raisingops log = log.raisingop2directcall @@ -15,7 +15,7 @@ def raisingop2direct_call(translator, graphs=None): """search for operations that could raise an exception and change that - operation into a direct_call to a function from the raisingops directory. + operation into a direct_call to a function from the raisingops module. This function also needs to be annotated and specialized. note: this could be extended to allow for any operation to be changed into @@ -30,7 +30,7 @@ for op in all_operations(graphs): if not is_raisingop(op): continue - func = getattr(rpython.rtyper.raisingops.raisingops, op.opname, None) + func = getattr(rpython.rtyper.raisingops, op.opname, None) if not func: log.warning("%s not found" % op.opname) continue diff --git a/rpython/translator/driver.py b/rpython/translator/driver.py --- a/rpython/translator/driver.py +++ b/rpython/translator/driver.py @@ -33,13 +33,6 @@ # TODO: # sanity-checks using states -_BACKEND_TO_TYPESYSTEM = { - 'c': 'lltype', -} - -def backend_to_typesystem(backend): - return _BACKEND_TO_TYPESYSTEM[backend] - # set of translation steps to profile PROFILE = set([]) @@ -132,7 +125,7 @@ if backend == postfix: expose_task(task, explicit_task) elif ts: - if ts == backend_to_typesystem(postfix): + if ts == 'lltype': expose_task(explicit_task) else: expose_task(explicit_task) From noreply at buildbot.pypy.org Tue Jan 21 16:20:45 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 21 Jan 2014 16:20:45 +0100 (CET) Subject: [pypy-commit] pypy default: A failing test about heapcache Message-ID: <20140121152045.427FE1C319A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68818:a7b725461c8a Date: 2014-01-21 16:19 +0100 http://bitbucket.org/pypy/pypy/changeset/a7b725461c8a/ Log: A failing test about heapcache diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -3952,3 +3952,21 @@ res = self.interp_operations(f, []) assert res == 2 self.check_operations_history(call_release_gil=1, call_may_force=0) + + def test_unescaped_write_zero(self): + class A: + pass + def g(): + return A() + @dont_look_inside + def escape(): + print "hi!" + def f(n): + a = g() + a.x = n + escape() + a.x = 0 + escape() + return a.x + res = self.interp_operations(f, [42]) + assert res == 0 From noreply at buildbot.pypy.org Tue Jan 21 16:20:46 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 21 Jan 2014 16:20:46 +0100 (CET) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20140121152046.797A31C319A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68819:7d0474323a1d Date: 2014-01-21 16:19 +0100 http://bitbucket.org/pypy/pypy/changeset/7d0474323a1d/ Log: merge heads diff --git a/rpython/jit/backend/llgraph/support.py b/rpython/jit/backend/llgraph/support.py --- a/rpython/jit/backend/llgraph/support.py +++ b/rpython/jit/backend/llgraph/support.py @@ -67,10 +67,11 @@ if isinstance(TYPE, lltype.Ptr): if isinstance(x, (int, long, llmemory.AddressAsInt)): x = llmemory.cast_int_to_adr(x) - #if repr(x.ptr).startswith('<* Checkpoint' fork. """ -from rpython.conftest import option +import os +from rpython import conftest from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.llinterp import LLInterpreter from rpython.rtyper.annlowlevel import llstr from rpython.jit.metainterp import warmspot -from rpython.rlib.jit import OPTIMIZER_FULL +from rpython.tool import runsubprocess +os.environ['PYPY_DONT_RUN_SUBPROCESS'] = '1' +reload(runsubprocess) -ARGS = ["jittest", "100"] + +ARGS = ["--jit", "threshold=100000,trace_eagerness=100000", + "-S", "/home/arigo/pypysrc/32compiled/z.py"] def jittest(driver): - graph = driver.translator.graphs[0] - interp = LLInterpreter(driver.translator.rtyper, malloc_check=False) + graph = driver.translator._graphof(driver.entry_point) + interp = LLInterpreter(driver.translator.rtyper) def returns_null(T, *args, **kwds): return lltype.nullptr(T) @@ -32,12 +37,16 @@ def apply_jit(policy, interp, graph, CPUClass): print 'warmspot.jittify_and_run() started...' - option.view = True + if conftest.option is None: + class MyOpt: + pass + conftest.option = MyOpt() + conftest.option.view = False + conftest.option.viewloops = True # XXX doesn't seem to work LIST = graph.getargs()[0].concretetype lst = LIST.TO.ll_newlist(len(ARGS)) for i, arg in enumerate(ARGS): lst.ll_setitem_fast(i, llstr(arg)) warmspot.jittify_and_run(interp, graph, [lst], policy=policy, listops=True, CPUClass=CPUClass, - backendopt=True, inline=True, - optimizer=OPTIMIZER_FULL) + backendopt=True, inline=True) diff --git a/rpython/rlib/ropenssl.py b/rpython/rlib/ropenssl.py --- a/rpython/rlib/ropenssl.py +++ b/rpython/rlib/ropenssl.py @@ -56,9 +56,17 @@ ASN1_STRING = lltype.Ptr(lltype.ForwardReference()) ASN1_ITEM = rffi.COpaquePtr('ASN1_ITEM') -ASN1_ITEM_EXP = lltype.Ptr(lltype.FuncType([], ASN1_ITEM)) X509_NAME = rffi.COpaquePtr('X509_NAME') +class CConfigBootstrap: + _compilation_info_ = eci + OPENSSL_EXPORT_VAR_AS_FUNCTION = rffi_platform.Defined( + "OPENSSL_EXPORT_VAR_AS_FUNCTION") +if rffi_platform.configure(CConfigBootstrap)["OPENSSL_EXPORT_VAR_AS_FUNCTION"]: + ASN1_ITEM_EXP = lltype.Ptr(lltype.FuncType([], ASN1_ITEM)) +else: + ASN1_ITEM_EXP = ASN1_ITEM + class CConfig: _compilation_info_ = eci @@ -128,8 +136,6 @@ ('block_size', rffi.INT)]) EVP_MD_SIZE = rffi_platform.SizeOf('EVP_MD') EVP_MD_CTX_SIZE = rffi_platform.SizeOf('EVP_MD_CTX') - OPENSSL_EXPORT_VAR_AS_FUNCTION = rffi_platform.Defined( - "OPENSSL_EXPORT_VAR_AS_FUNCTION") OBJ_NAME_st = rffi_platform.Struct( 'OBJ_NAME', @@ -259,10 +265,7 @@ ssl_external('i2a_ASN1_INTEGER', [BIO, ASN1_INTEGER], rffi.INT) ssl_external('ASN1_item_d2i', [rffi.VOIDP, rffi.CCHARPP, rffi.LONG, ASN1_ITEM], rffi.VOIDP) -if OPENSSL_EXPORT_VAR_AS_FUNCTION: - ssl_external('ASN1_ITEM_ptr', [ASN1_ITEM_EXP], ASN1_ITEM, macro=True) -else: - ssl_external('ASN1_ITEM_ptr', [rffi.VOIDP], ASN1_ITEM, macro=True) +ssl_external('ASN1_ITEM_ptr', [ASN1_ITEM_EXP], ASN1_ITEM, macro=True) ssl_external('sk_GENERAL_NAME_num', [GENERAL_NAMES], rffi.INT, macro=True) diff --git a/rpython/translator/driver.py b/rpython/translator/driver.py --- a/rpython/translator/driver.py +++ b/rpython/translator/driver.py @@ -33,13 +33,6 @@ # TODO: # sanity-checks using states -_BACKEND_TO_TYPESYSTEM = { - 'c': 'lltype', -} - -def backend_to_typesystem(backend): - return _BACKEND_TO_TYPESYSTEM[backend] - # set of translation steps to profile PROFILE = set([]) @@ -132,7 +125,7 @@ if backend == postfix: expose_task(task, explicit_task) elif ts: - if ts == backend_to_typesystem(postfix): + if ts == 'lltype': expose_task(explicit_task) else: expose_task(explicit_task) From noreply at buildbot.pypy.org Tue Jan 21 16:32:26 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 21 Jan 2014 16:32:26 +0100 (CET) Subject: [pypy-commit] pypy default: Bah, we can also get AttributeError. Give up and catch Exception Message-ID: <20140121153226.4DF131D23D0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68820:f84c2a8494be Date: 2014-01-21 16:31 +0100 http://bitbucket.org/pypy/pypy/changeset/f84c2a8494be/ Log: Bah, we can also get AttributeError. Give up and catch Exception diff --git a/rpython/jit/backend/llgraph/support.py b/rpython/jit/backend/llgraph/support.py --- a/rpython/jit/backend/llgraph/support.py +++ b/rpython/jit/backend/llgraph/support.py @@ -69,7 +69,7 @@ x = llmemory.cast_int_to_adr(x) try: # pom pom pom return llmemory.cast_adr_to_ptr(x, TYPE) - except (TypeError, RuntimeError, NotImplementedError, ValueError): + except Exception: # assume that we want a "C-style" cast, without typechecking the value return rffi.cast(TYPE, x) elif TYPE == llmemory.Address: diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -594,9 +594,9 @@ if tobox is not None: # sanity check: see whether the current struct value # corresponds to what the cache thinks the value is - #resbox = executor.execute(self.metainterp.cpu, self.metainterp, - # rop.GETFIELD_GC, fielddescr, box) - # XXX the sanity check does not seem to do anything, remove? + resbox = executor.execute(self.metainterp.cpu, self.metainterp, + rop.GETFIELD_GC, fielddescr, box) + assert resbox.constbox().same_constant(tobox.constbox()) return tobox resbox = self.execute_with_descr(opnum, fielddescr, box) self.metainterp.heapcache.getfield_now_known(box, fielddescr, resbox) From noreply at buildbot.pypy.org Tue Jan 21 16:41:11 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 21 Jan 2014 16:41:11 +0100 (CET) Subject: [pypy-commit] pypy default: Fix the bug for now by letting the SETFIELD_GC operation through. Message-ID: <20140121154111.662081C00E3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68821:d865b2499a5f Date: 2014-01-21 16:38 +0100 http://bitbucket.org/pypy/pypy/changeset/d865b2499a5f/ Log: Fix the bug for now by letting the SETFIELD_GC operation through. I believe that it should be killed by the optimizers anyway. diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -623,7 +623,7 @@ tobox = self.metainterp.heapcache.getfield(box, fielddescr) if tobox is valuebox: return - if tobox is not None or not self.metainterp.heapcache.is_unescaped(box) or not isinstance(valuebox, Const) or valuebox.nonnull(): + if 1: # tobox is not None or not self.metainterp.heapcache.is_unescaped(box) or not isinstance(valuebox, Const) or valuebox.nonnull(): self.execute_with_descr(rop.SETFIELD_GC, fielddescr, box, valuebox) self.metainterp.heapcache.setfield(box, valuebox, fielddescr) opimpl_setfield_gc_i = _opimpl_setfield_gc_any From noreply at buildbot.pypy.org Tue Jan 21 16:41:12 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 21 Jan 2014 16:41:12 +0100 (CET) Subject: [pypy-commit] pypy default: Skip this test for now. Message-ID: <20140121154112.9D1491C00E3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68822:17941d88a490 Date: 2014-01-21 16:40 +0100 http://bitbucket.org/pypy/pypy/changeset/17941d88a490/ Log: Skip this test for now. diff --git a/rpython/jit/metainterp/test/test_tracingopts.py b/rpython/jit/metainterp/test/test_tracingopts.py --- a/rpython/jit/metainterp/test/test_tracingopts.py +++ b/rpython/jit/metainterp/test/test_tracingopts.py @@ -1,4 +1,4 @@ -import sys +import sys, py from rpython.jit.metainterp.test.support import LLJitMixin from rpython.rlib import jit @@ -647,6 +647,7 @@ self.check_operations_history(guard_class=0) def test_dont_record_setfield_gc_zeros(self): + py.test.skip("see test_unescaped_write_zero in test_ajit") class A(object): pass From noreply at buildbot.pypy.org Tue Jan 21 16:49:05 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Tue, 21 Jan 2014 16:49:05 +0100 (CET) Subject: [pypy-commit] stmgc c7: make test pass Message-ID: <20140121154905.94D371C010E@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r660:33eb0c7ce3c8 Date: 2014-01-21 16:49 +0100 http://bitbucket.org/pypy/stmgc/changeset/33eb0c7ce3c8/ Log: make test pass diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -55,7 +55,8 @@ bool need_abort; char *thread_base; struct stm_list_s *modified_objects; - struct stm_list_s *new_object_ranges; + struct stm_list_s *uncommitted_objects; + struct stm_list_s *uncommitted_object_ranges; struct alloc_for_size_s alloc[LARGE_OBJECT_WORDS]; localchar_t *nursery_current; object_t **old_shadow_stack; @@ -345,6 +346,36 @@ } } +static void push_uncommitted_to_other_threads() +{ + /* WE HAVE THE EXCLUSIVE LOCK HERE */ + + struct stm_list_s *uncommitted = _STM_TL2->uncommitted_objects; + char *local_base = _STM_TL2->thread_base; + char *remote_base = get_thread_base(1 - _STM_TL2->thread_num); + + STM_LIST_FOREACH( + uncommitted, + ({ + /* write-lock always cleared for these objects */ + uintptr_t lock_idx; + assert(lock_idx = (((uintptr_t)item) >> 4) - READMARKER_START); + assert(!write_locks[lock_idx]); + + /* remove the flag (they are now committed) */ + item->stm_flags &= ~GCFLAG_NOT_COMMITTED; + + uintptr_t pagenum = ((uintptr_t)item) / 4096UL; + if (flag_page_private[pagenum] == PRIVATE_PAGE) { + /* page was privatized... */ + char *src = REAL_ADDRESS(local_base, item); + char *dst = REAL_ADDRESS(remote_base, item); + size_t size = stmcb_size((struct object_s*)src); + memcpy(dst, src, size); + } + })); +} + @@ -353,9 +384,8 @@ { uintptr_t pagenum = ((uintptr_t)obj) / 4096; assert(pagenum < NB_PAGES); - - _STM_TL2->old_objects_to_trace = stm_list_append - (_STM_TL2->old_objects_to_trace, obj); + + LIST_APPEND(_STM_TL2->old_objects_to_trace, obj); /* for old objects from the same transaction we don't need to privatize the page */ @@ -389,8 +419,7 @@ obj->stm_flags &= ~GCFLAG_WRITE_BARRIER; stm_read(obj); - _STM_TL2->modified_objects = stm_list_append - (_STM_TL2->modified_objects, obj); + LIST_APPEND(_STM_TL2->modified_objects, obj); } @@ -422,15 +451,15 @@ ((start) = (uint16_t)(uintptr_t)(range), \ (stop) = ((uintptr_t)(range)) >> 16) -localchar_t *_stm_alloc_old(size_t size) +object_t *_stm_alloc_old(size_t size) { - localchar_t *result; + object_t *result; size_t size_class = size / 8; assert(size_class >= 2); if (size_class >= LARGE_OBJECT_WORDS) { - result = (localchar_t*)_stm_allocate_old(size); - ((object_t*)result)->stm_flags &= ~GCFLAG_WRITE_BARRIER; /* added by _stm_allocate_old... */ + result = _stm_allocate_old(size); + result->stm_flags &= ~GCFLAG_WRITE_BARRIER; /* added by _stm_allocate_old... */ int page = ((uintptr_t)result) / 4096; int pages = (size + 4095) / 4096; @@ -440,18 +469,22 @@ } } else { alloc_for_size_t *alloc = &_STM_TL2->alloc[size_class]; - - if ((uint16_t)((uintptr_t)alloc->next) == alloc->stop) - result = _stm_alloc_next_page(size_class); - else { - result = alloc->next; + + if ((uint16_t)((uintptr_t)alloc->next) == alloc->stop) { + result = (object_t *)_stm_alloc_next_page(size_class); + } else { + result = (object_t *)alloc->next; alloc->next += size; + if (alloc->flag_partial_page) { + LIST_APPEND(_STM_TL2->uncommitted_objects, result); + result->stm_flags |= GCFLAG_NOT_COMMITTED; + } } } return result; } -localchar_t *_stm_alloc_next_page(size_t i) +localchar_t *_stm_alloc_next_page(size_t size_class) { /* 'alloc->next' points to where the next allocation should go. The present function is called instead when this next allocation is @@ -459,27 +492,11 @@ 'stop' are always nearby pointers, we play tricks and only store the lower 16 bits of 'start' and 'stop', so that the three variables plus some flags fit in 16 bytes. - - 'flag_partial_page' is *cleared* to mean that the 'alloc' - describes a complete page, so that it needs not be listed inside - 'new_object_ranges'. In all other cases it is *set*. */ uintptr_t page; localchar_t *result; - alloc_for_size_t *alloc = &_STM_TL2->alloc[i]; - size_t size = i * 8; - - /* if (alloc->flag_partial_page) { */ - /* /\* record this range in 'new_object_ranges' *\/ */ - /* localchar_t *ptr1 = alloc->next - size - 1; */ - /* object_t *range; */ - /* TO_RANGE(range, alloc->start, alloc->stop); */ - /* page = ((uintptr_t)ptr1) / 4096; */ - /* _STM_TL2->new_object_ranges = stm_list_append( */ - /* _STM_TL2->new_object_ranges, (object_t *)page); */ - /* _STM_TL2->new_object_ranges = stm_list_append( */ - /* _STM_TL2->new_object_ranges, range); */ - /* } */ + alloc_for_size_t *alloc = &_STM_TL2->alloc[size_class]; + size_t size = size_class * 8; /* reserve a fresh new page */ page = _stm_reserve_pages(1); @@ -499,8 +516,7 @@ void mark_page_as_uncommitted(uintptr_t pagenum) { flag_page_private[pagenum] = UNCOMMITTED_SHARED_PAGE; - _STM_TL2->uncommitted_pages = stm_list_append - (_STM_TL2->uncommitted_pages, (object_t*)pagenum); + LIST_APPEND(_STM_TL2->uncommitted_pages, (object_t*)pagenum); } void trace_if_young(object_t **pobj) @@ -521,6 +537,10 @@ /* move obj to somewhere else */ size_t size = stmcb_size(real_address(*pobj)); object_t *moved = (object_t*)_stm_alloc_old(size); + + if (moved->stm_flags & GCFLAG_NOT_COMMITTED) + (*pobj)->stm_flags |= GCFLAG_NOT_COMMITTED; /* XXX: memcpy below overwrites this otherwise. + find better solution.*/ memcpy((void*)real_address(moved), (void*)real_address(*pobj), @@ -530,8 +550,7 @@ *pforwarded = moved; *pobj = moved; - _STM_TL2->old_objects_to_trace = stm_list_append - (_STM_TL2->old_objects_to_trace, moved); + LIST_APPEND(_STM_TL2->old_objects_to_trace, moved); } void minor_collect() @@ -700,6 +719,7 @@ _STM_TL2->uncommitted_pages = stm_list_create(); _STM_TL2->modified_objects = stm_list_create(); + _STM_TL2->uncommitted_objects = stm_list_create(); assert(!_STM_TL2->running_transaction); } @@ -716,6 +736,10 @@ stm_list_free(_STM_TL2->modified_objects); _STM_TL2->modified_objects = NULL; + assert(stm_list_is_empty(_STM_TL2->uncommitted_objects)); + stm_list_free(_STM_TL2->uncommitted_objects); + _STM_TL2->uncommitted_objects = NULL; + assert(_STM_TL1->shadow_stack == _STM_TL1->shadow_stack_base); free(_STM_TL1->shadow_stack); @@ -831,6 +855,10 @@ push_modified_to_other_threads(); stm_list_clear(_STM_TL2->modified_objects); + /* uncommitted objects / partially COMMITTED pages */ + push_uncommitted_to_other_threads(); + stm_list_clear(_STM_TL2->uncommitted_objects); + /* uncommitted_pages */ long j; for (j = 2; j < LARGE_OBJECT_WORDS; j++) { @@ -841,12 +869,8 @@ continue; uintptr_t pagenum = ((uintptr_t)(alloc->next - 1)) / 4096UL; if (flag_page_private[pagenum] == UNCOMMITTED_SHARED_PAGE) { - /* mark it as empty so it doesn't get used in the next - transaction */ - /* XXX: flag_partial_page!! */ - alloc->start = 0; - alloc->next = 0; - alloc->stop = 0; + /* becomes a SHARED (s.b.) partially used page */ + alloc->flag_partial_page = 1; } } @@ -858,12 +882,14 @@ })); stm_list_clear(_STM_TL2->uncommitted_pages); + - /* /\* walk the new_object_ranges and manually copy the new objects */ + + /* /\* walk the uncommitted_object_ranges and manually copy the new objects */ /* to the other thread's pages in the (hopefully rare) case that */ /* the page they belong to is already unshared *\/ */ /* long i; */ - /* struct stm_list_s *lst = _STM_TL2->new_object_ranges; */ + /* struct stm_list_s *lst = _STM_TL2->uncommitted_object_ranges; */ /* for (i = stm_list_count(lst); i > 0; ) { */ /* i -= 2; */ /* uintptr_t pagenum = (uintptr_t)stm_list_item(lst, i); */ @@ -1001,7 +1027,7 @@ /* uint16_t num_allocated = ((uintptr_t)alloc->next) - alloc->start; */ /* alloc->next -= num_allocated; */ /* } */ - /* stm_list_clear(_STM_TL2->new_object_ranges); */ + /* stm_list_clear(_STM_TL2->uncommitted_object_ranges); */ assert(_STM_TL1->jmpbufptr != NULL); diff --git a/c7/list.h b/c7/list.h --- a/c7/list.h +++ b/c7/list.h @@ -33,6 +33,10 @@ return lst; } +#define LIST_APPEND(lst, e) { \ + lst = stm_list_append(lst, e); \ + } + static inline void stm_list_clear(struct stm_list_s *lst) { lst->count = 0; diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -418,6 +418,11 @@ assert stm_get_flags(newer) & lib.GCFLAG_NOT_COMMITTED stm_write(newer) # does not privatize assert stm_get_page_flag(stm_get_obj_pages(newer)[0]) == lib.SHARED_PAGE + stm_stop_transaction() + + assert stm_get_page_flag(stm_get_obj_pages(newer)[0]) == lib.SHARED_PAGE + assert not (stm_get_flags(newer) & lib.GCFLAG_NOT_COMMITTED) + # def test_resolve_write_write_no_conflict(self): From noreply at buildbot.pypy.org Tue Jan 21 16:50:10 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 21 Jan 2014 16:50:10 +0100 (CET) Subject: [pypy-commit] pypy default: Add comment Message-ID: <20140121155010.933531C3360@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68823:68a4e65eba06 Date: 2014-01-21 16:49 +0100 http://bitbucket.org/pypy/pypy/changeset/68a4e65eba06/ Log: Add comment diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -623,6 +623,10 @@ tobox = self.metainterp.heapcache.getfield(box, fielddescr) if tobox is valuebox: return + # The following test is disabled because buggy. It is supposed + # to be: not(we're writing null into a freshly allocated object) + # but the bug is that is_unescaped() can be True even after the + # field cache is cleared --- see test_ajit:test_unescaped_write_zero if 1: # tobox is not None or not self.metainterp.heapcache.is_unescaped(box) or not isinstance(valuebox, Const) or valuebox.nonnull(): self.execute_with_descr(rop.SETFIELD_GC, fielddescr, box, valuebox) self.metainterp.heapcache.setfield(box, valuebox, fielddescr) From noreply at buildbot.pypy.org Tue Jan 21 17:16:54 2014 From: noreply at buildbot.pypy.org (rguillebert) Date: Tue, 21 Jan 2014 17:16:54 +0100 (CET) Subject: [pypy-commit] pypy numpypy-array_prepare_-array_wrap: Merge default Message-ID: <20140121161654.629AB1C3970@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: numpypy-array_prepare_-array_wrap Changeset: r68824:2c1b044dd0a4 Date: 2014-01-21 17:15 +0100 http://bitbucket.org/pypy/pypy/changeset/2c1b044dd0a4/ Log: Merge default diff too long, truncating to 2000 out of 12268 lines diff --git a/lib-python/2.7/test/test_memoryview.py b/lib-python/2.7/test/test_memoryview.py --- a/lib-python/2.7/test/test_memoryview.py +++ b/lib-python/2.7/test/test_memoryview.py @@ -166,11 +166,18 @@ self.assertTrue(m[0:6] == m[:]) self.assertFalse(m[0:5] == m) - # Comparison with objects which don't support the buffer API - self.assertFalse(m == u"abcdef") - self.assertTrue(m != u"abcdef") - self.assertFalse(u"abcdef" == m) - self.assertTrue(u"abcdef" != m) + if test_support.check_impl_detail(cpython=True): + # what is supported and what is not supported by memoryview is + # very inconsisten on CPython. In PyPy, memoryview supports + # the buffer interface, and thus the following comparison + # succeeds. See also the comment in + # pypy.modules.__builtin__.interp_memoryview.W_MemoryView.descr_buffer + # + # Comparison with objects which don't support the buffer API + self.assertFalse(m == u"abcdef", "%s %s" % (self, tp)) + self.assertTrue(m != u"abcdef") + self.assertFalse(u"abcdef" == m) + self.assertTrue(u"abcdef" != m) # Unordered comparisons are unimplemented, and therefore give # arbitrary results (they raise a TypeError in py3k) diff --git a/lib_pypy/_ctypes/array.py b/lib_pypy/_ctypes/array.py --- a/lib_pypy/_ctypes/array.py +++ b/lib_pypy/_ctypes/array.py @@ -1,4 +1,4 @@ -import _ffi +from _rawffi import alt as _ffi import _rawffi from _ctypes.basics import _CData, cdata_from_address, _CDataMeta, sizeof @@ -20,10 +20,13 @@ # we don't want to have buffers here if len(val) > self._length_: raise ValueError("%r too long" % (val,)) - for i in range(len(val)): - self[i] = val[i] + if isinstance(val, str): + _rawffi.rawstring2charp(self._buffer.buffer, val) + else: + for i in range(len(val)): + self[i] = val[i] if len(val) < self._length_: - self[len(val)] = '\x00' + self._buffer[len(val)] = '\x00' res.value = property(getvalue, setvalue) def getraw(self): @@ -33,8 +36,7 @@ def setraw(self, buffer): if len(buffer) > self._length_: raise ValueError("%r too long" % (buffer,)) - for i in range(len(buffer)): - self[i] = buffer[i] + _rawffi.rawstring2charp(self._buffer.buffer, buffer) res.raw = property(getraw, setraw) elif subletter == 'u': def getvalue(self): @@ -45,10 +47,14 @@ # we don't want to have buffers here if len(val) > self._length_: raise ValueError("%r too long" % (val,)) + if isinstance(val, unicode): + target = self._buffer + else: + target = self for i in range(len(val)): - self[i] = val[i] + target[i] = val[i] if len(val) < self._length_: - self[len(val)] = '\x00' + target[len(val)] = u'\x00' res.value = property(getvalue, setvalue) if '_length_' in typedict: diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py --- a/lib_pypy/_ctypes/basics.py +++ b/lib_pypy/_ctypes/basics.py @@ -1,6 +1,6 @@ import _rawffi -import _ffi +from _rawffi import alt as _ffi import sys try: from __pypy__ import builtinify diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -5,7 +5,7 @@ from _ctypes.basics import is_struct_shape from _ctypes.builtin import get_errno, set_errno, get_last_error, set_last_error import _rawffi -import _ffi +from _rawffi import alt as _ffi import sys import traceback diff --git a/lib_pypy/_ctypes/pointer.py b/lib_pypy/_ctypes/pointer.py --- a/lib_pypy/_ctypes/pointer.py +++ b/lib_pypy/_ctypes/pointer.py @@ -1,6 +1,6 @@ import _rawffi -import _ffi +from _rawffi import alt as _ffi from _ctypes.basics import _CData, _CDataMeta, cdata_from_address, ArgumentError from _ctypes.basics import keepalive_key, store_reference, ensure_objects from _ctypes.basics import sizeof, byref, as_ffi_pointer diff --git a/lib_pypy/_ctypes/primitive.py b/lib_pypy/_ctypes/primitive.py --- a/lib_pypy/_ctypes/primitive.py +++ b/lib_pypy/_ctypes/primitive.py @@ -1,4 +1,4 @@ -import _ffi +from _rawffi import alt as _ffi import _rawffi import weakref import sys diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py --- a/lib_pypy/_ctypes/structure.py +++ b/lib_pypy/_ctypes/structure.py @@ -2,6 +2,8 @@ import _rawffi from _ctypes.basics import _CData, _CDataMeta, keepalive_key,\ store_reference, ensure_objects, CArgObject +from _ctypes.array import Array +from _ctypes.pointer import _Pointer import inspect def names_and_fields(self, _fields_, superclass, anonymous_fields=None): @@ -104,8 +106,11 @@ def __set__(self, obj, value): fieldtype = self.ctype cobj = fieldtype.from_param(value) - if ensure_objects(cobj) is not None: - key = keepalive_key(self.num) + key = keepalive_key(self.num) + if issubclass(fieldtype, _Pointer) and isinstance(cobj, Array): + # if our value is an Array we need the whole thing alive + store_reference(obj, key, cobj) + elif ensure_objects(cobj) is not None: store_reference(obj, key, cobj._objects) arg = cobj._get_buffer_value() if fieldtype._fficompositesize is not None: diff --git a/lib_pypy/_ffi.py b/lib_pypy/_ffi.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_ffi.py @@ -0,0 +1,2 @@ +# Backward compatibility hack +from _rawffi.alt import * diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -34,14 +34,14 @@ "struct", "_hashlib", "_md5", "_sha", "_minimal_curses", "cStringIO", "thread", "itertools", "pyexpat", "_ssl", "cpyext", "array", "binascii", "_multiprocessing", '_warnings', - "_collections", "_multibytecodec", "micronumpy", "_ffi", + "_collections", "_multibytecodec", "micronumpy", "_continuation", "_cffi_backend", "_csv", "cppyy", "_pypyjson"] )) translation_modules = default_modules.copy() translation_modules.update(dict.fromkeys( ["fcntl", "rctime", "select", "signal", "_rawffi", "zlib", - "struct", "_md5", "cStringIO", "array", "_ffi", + "struct", "_md5", "cStringIO", "array", "binascii", # the following are needed for pyrepl (and hence for the # interactive prompt/pdb) @@ -96,7 +96,6 @@ # no _rawffi if importing rpython.rlib.clibffi raises ImportError # or CompilationError or py.test.skip.Exception "_rawffi" : ["rpython.rlib.clibffi"], - "_ffi" : ["rpython.rlib.clibffi"], "zlib" : ["rpython.rlib.rzlib"], "bz2" : ["pypy.module.bz2.interp_bz2"], diff --git a/pypy/doc/_ref.txt b/pypy/doc/_ref.txt --- a/pypy/doc/_ref.txt +++ b/pypy/doc/_ref.txt @@ -109,6 +109,4 @@ .. _`rpython/translator/c/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/c/ .. _`rpython/translator/c/src/stacklet/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/c/src/stacklet/ .. _`rpython/translator/c/src/stacklet/stacklet.h`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/c/src/stacklet/stacklet.h -.. _`rpython/translator/cli/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/cli/ -.. _`rpython/translator/jvm/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/jvm/ .. _`rpython/translator/tool/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/tool/ diff --git a/pypy/doc/config/translation.lldebug0.txt b/pypy/doc/config/translation.lldebug0.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/config/translation.lldebug0.txt @@ -0,0 +1,1 @@ +Like lldebug, but in addition compile C files with -O0 diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -74,6 +74,10 @@ The actual details would be rather differen in PyPy, but we would like to have the same optimization implemented. +Or maybe not. We can also play around with the idea of using a single +representation: as a byte string in utf-8. (This idea needs some extra logic +for efficient indexing, like a cache.) + .. _`optimized unicode representation`: http://www.python.org/dev/peps/pep-0393/ Translation Toolchain diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -41,3 +41,10 @@ Fix 3 broken links on PyPy published papers in docs. .. branch: jit-ordereddict + +.. branch: refactor-str-types +Remove multimethods on str/unicode/bytearray and make the implementations share code. + +.. branch: remove-del-from-generatoriterator +Speed up generators that don't yield inside try or wait blocks by skipping +unnecessary cleanup. diff --git a/pypy/goal/getnightly.py b/pypy/goal/getnightly.py --- a/pypy/goal/getnightly.py +++ b/pypy/goal/getnightly.py @@ -26,7 +26,12 @@ if branch == 'default': branch = 'trunk' -filename = 'pypy-c-jit-latest-%s.tar.bz2' % arch +if '--nojit' in sys.argv: + kind = 'nojit' +else: + kind = 'jit' + +filename = 'pypy-c-%s-latest-%s.tar.bz2' % (kind, arch) url = 'http://buildbot.pypy.org/nightly/%s/%s' % (branch, filename) tmp = py.path.local.mkdtemp() mydir = tmp.chdir() diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py --- a/pypy/interpreter/astcompiler/codegen.py +++ b/pypy/interpreter/astcompiler/codegen.py @@ -1234,6 +1234,8 @@ flags |= consts.CO_NESTED if scope.is_generator: flags |= consts.CO_GENERATOR + if scope.has_yield_inside_try: + flags |= consts.CO_YIELD_INSIDE_TRY if scope.has_variable_arg: flags |= consts.CO_VARARGS if scope.has_keywords_arg: diff --git a/pypy/interpreter/astcompiler/consts.py b/pypy/interpreter/astcompiler/consts.py --- a/pypy/interpreter/astcompiler/consts.py +++ b/pypy/interpreter/astcompiler/consts.py @@ -17,6 +17,7 @@ CO_FUTURE_UNICODE_LITERALS = 0x20000 #pypy specific: CO_KILL_DOCSTRING = 0x100000 +CO_YIELD_INSIDE_TRY = 0x200000 PyCF_SOURCE_IS_UTF8 = 0x0100 PyCF_DONT_IMPLY_DEDENT = 0x0200 diff --git a/pypy/interpreter/astcompiler/symtable.py b/pypy/interpreter/astcompiler/symtable.py --- a/pypy/interpreter/astcompiler/symtable.py +++ b/pypy/interpreter/astcompiler/symtable.py @@ -43,6 +43,7 @@ self.child_has_free = False self.nested = False self.doc_removable = False + self._in_try_body_depth = 0 def lookup(self, name): """Find the scope of identifier 'name'.""" @@ -75,6 +76,14 @@ self.varnames.append(mangled) return mangled + def note_try_start(self, try_node): + """Called when a try is found, before visiting the body.""" + self._in_try_body_depth += 1 + + def note_try_end(self, try_node): + """Called after visiting a try body.""" + self._in_try_body_depth -= 1 + def note_yield(self, yield_node): """Called when a yield is found.""" raise SyntaxError("'yield' outside function", yield_node.lineno, @@ -210,6 +219,7 @@ self.has_variable_arg = False self.has_keywords_arg = False self.is_generator = False + self.has_yield_inside_try = False self.optimized = True self.return_with_value = False self.import_star = None @@ -220,6 +230,8 @@ raise SyntaxError("'return' with argument inside generator", self.ret.lineno, self.ret.col_offset) self.is_generator = True + if self._in_try_body_depth > 0: + self.has_yield_inside_try = True def note_return(self, ret): if ret.value: @@ -463,7 +475,12 @@ self.scope.new_temporary_name() if wih.optional_vars: self.scope.new_temporary_name() - ast.GenericASTVisitor.visit_With(self, wih) + wih.context_expr.walkabout(self) + if wih.optional_vars: + wih.optional_vars.walkabout(self) + self.scope.note_try_start(wih) + self.visit_sequence(wih.body) + self.scope.note_try_end(wih) def visit_arguments(self, arguments): scope = self.scope @@ -505,3 +522,16 @@ else: role = SYM_ASSIGNED self.note_symbol(name.id, role) + + def visit_TryExcept(self, node): + self.scope.note_try_start(node) + self.visit_sequence(node.body) + self.scope.note_try_end(node) + self.visit_sequence(node.handlers) + self.visit_sequence(node.orelse) + + def visit_TryFinally(self, node): + self.scope.note_try_start(node) + self.visit_sequence(node.body) + self.scope.note_try_end(node) + self.visit_sequence(node.finalbody) diff --git a/pypy/interpreter/astcompiler/test/test_symtable.py b/pypy/interpreter/astcompiler/test/test_symtable.py --- a/pypy/interpreter/astcompiler/test/test_symtable.py +++ b/pypy/interpreter/astcompiler/test/test_symtable.py @@ -346,6 +346,25 @@ assert exc.msg == "'return' with argument inside generator" scp = self.func_scope("def f():\n return\n yield x") + def test_yield_inside_try(self): + scp = self.func_scope("def f(): yield x") + assert not scp.has_yield_inside_try + scp = self.func_scope("def f():\n try:\n yield x\n except: pass") + assert scp.has_yield_inside_try + scp = self.func_scope("def f():\n try:\n yield x\n finally: pass") + assert scp.has_yield_inside_try + scp = self.func_scope("def f():\n with x: yield y") + assert scp.has_yield_inside_try + + def test_yield_outside_try(self): + for input in ("try: pass\n except: pass", + "try: pass\n except: yield y", + "try: pass\n finally: pass", + "try: pass\n finally: yield y", + "with x: pass"): + input = "def f():\n yield y\n %s\n yield y" % (input,) + assert not self.func_scope(input).has_yield_inside_try + def test_return(self): for input in ("class x: return", "return"): exc = py.test.raises(SyntaxError, self.func_scope, input).value diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -231,6 +231,11 @@ msg = "__int__ returned non-int (type '%T')" raise operationerrfmt(space.w_TypeError, msg, w_result) + def ord(self, space): + typename = space.type(self).getname(space) + msg = "ord() expected string of length 1, but %s found" + raise operationerrfmt(space.w_TypeError, msg, typename) + def __spacebind__(self, space): return self @@ -1396,6 +1401,9 @@ # This is here mostly just for gateway.int_unwrapping_space_method(). return bool(self.int_w(w_obj)) + def ord(self, w_obj): + return w_obj.ord(self) + # This is all interface for gateway.py. def gateway_int_w(self, w_obj): if self.isinstance_w(w_obj, self.w_float): diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -520,12 +520,13 @@ # When a BuiltinCode is stored in a Function object, # you get the functionality of CPython's built-in function type. - def __init__(self, func, unwrap_spec=None, self_type=None, descrmismatch=None): + def __init__(self, func, unwrap_spec=None, self_type=None, + descrmismatch=None, doc=None): "NOT_RPYTHON" # 'implfunc' is the interpreter-level function. # Note that this uses a lot of (construction-time) introspection. Code.__init__(self, func.__name__) - self.docstring = func.__doc__ + self.docstring = doc or func.__doc__ self.identifier = "%s-%s-%s" % (func.__module__, func.__name__, getattr(self_type, '__name__', '*')) @@ -832,7 +833,7 @@ instancecache = {} def __new__(cls, f, app_name=None, unwrap_spec=None, descrmismatch=None, - as_classmethod=False): + as_classmethod=False, doc=None): "NOT_RPYTHON" # f must be a function whose name does NOT start with 'app_' @@ -861,7 +862,8 @@ cls.instancecache[key] = self self._code = BuiltinCode(f, unwrap_spec=unwrap_spec, self_type=self_type, - descrmismatch=descrmismatch) + descrmismatch=descrmismatch, + doc=doc) self.__name__ = f.func_name self.name = app_name self.as_classmethod = as_classmethod diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -155,20 +155,6 @@ code_name = self.pycode.co_name return space.wrap(code_name) - def __del__(self): - # Only bother enqueuing self to raise an exception if the frame is - # still not finished and finally or except blocks are present. - self.clear_all_weakrefs() - if self.frame is not None: - block = self.frame.lastblock - while block is not None: - if not isinstance(block, LoopBlock): - self.enqueue_for_destruction(self.space, - GeneratorIterator.descr_close, - "interrupting generator of ") - break - block = block.previous - # Results can be either an RPython list of W_Root, or it can be an # app-level W_ListObject, which also has an append() method, that's why we # generate 2 versions of the function and 2 jit drivers. @@ -211,3 +197,20 @@ return unpack_into unpack_into = _create_unpack_into() unpack_into_w = _create_unpack_into() + + +class GeneratorIteratorWithDel(GeneratorIterator): + + def __del__(self): + # Only bother enqueuing self to raise an exception if the frame is + # still not finished and finally or except blocks are present. + self.clear_all_weakrefs() + if self.frame is not None: + block = self.frame.lastblock + while block is not None: + if not isinstance(block, LoopBlock): + self.enqueue_for_destruction(self.space, + GeneratorIterator.descr_close, + "interrupting generator of ") + break + block = block.previous diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -12,7 +12,7 @@ from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter.astcompiler.consts import ( CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS, CO_NESTED, - CO_GENERATOR, CO_KILL_DOCSTRING) + CO_GENERATOR, CO_KILL_DOCSTRING, CO_YIELD_INSIDE_TRY) from pypy.tool.stdlib_opcode import opcodedesc, HAVE_ARGUMENT from rpython.rlib.rarithmetic import intmask from rpython.rlib.objectmodel import compute_hash @@ -31,7 +31,7 @@ # Magic numbers for the bytecode version in code objects. # See comments in pypy/module/imp/importing. cpython_magic, = struct.unpack("= 0: if (sflags & SF_MSVC_BITFIELDS) == 0: @@ -305,6 +306,12 @@ if bits_already_occupied + fbitsize > 8 * ftype.size: # it would not fit, we need to start at the next # allowed position + if ((sflags & SF_PACKED) != 0 and + (bits_already_occupied & 7) != 0): + raise operationerrfmt(space.w_NotImplementedError, + "with 'packed', gcc would compile field " + "'%s.%s' to reuse some bits in the previous " + "field", w_ctype.name, fname) field_offset_bytes += falign assert boffset < field_offset_bytes * 8 boffset = field_offset_bytes * 8 diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -3137,6 +3137,44 @@ p = newp(BArray, None) assert sizeof(p[2:9]) == 7 * sizeof(BInt) +def test_packed(): + BLong = new_primitive_type("long") + BChar = new_primitive_type("char") + BShort = new_primitive_type("short") + BStruct = new_struct_type("struct foo") + complete_struct_or_union(BStruct, [('a1', BLong, -1), + ('a2', BChar, -1), + ('a3', BShort, -1)], + None, -1, -1, 8) # SF_PACKED==8 + d = BStruct.fields + assert len(d) == 3 + assert d[0][0] == 'a1' + assert d[0][1].type is BLong + assert d[0][1].offset == 0 + assert d[0][1].bitshift == -1 + assert d[0][1].bitsize == -1 + assert d[1][0] == 'a2' + assert d[1][1].type is BChar + assert d[1][1].offset == sizeof(BLong) + assert d[1][1].bitshift == -1 + assert d[1][1].bitsize == -1 + assert d[2][0] == 'a3' + assert d[2][1].type is BShort + assert d[2][1].offset == sizeof(BLong) + sizeof(BChar) + assert d[2][1].bitshift == -1 + assert d[2][1].bitsize == -1 + assert sizeof(BStruct) == sizeof(BLong) + sizeof(BChar) + sizeof(BShort) + assert alignof(BStruct) == 1 + +def test_packed_with_bitfields(): + BLong = new_primitive_type("long") + BChar = new_primitive_type("char") + BStruct = new_struct_type("struct foo") + py.test.raises(NotImplementedError, + complete_struct_or_union, + BStruct, [('a1', BLong, 30), + ('a2', BChar, 5)], + None, -1, -1, 8) # SF_PACKED==8 def test_version(): # this test is here mostly for PyPy diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -709,7 +709,7 @@ @unwrap_spec(data=str, errors='str_or_None') def escape_encode(space, data, errors='strict'): - from pypy.objspace.std.stringobject import string_escape_encode + from pypy.objspace.std.bytesobject import string_escape_encode result = string_escape_encode(data, quote="'") start = 1 end = len(result) - 1 diff --git a/pypy/module/_multiprocessing/test/test_memory.py b/pypy/module/_multiprocessing/test/test_memory.py --- a/pypy/module/_multiprocessing/test/test_memory.py +++ b/pypy/module/_multiprocessing/test/test_memory.py @@ -1,6 +1,6 @@ class AppTestMemory: spaceconfig = dict(usemodules=('_multiprocessing', 'mmap', - '_rawffi', '_ffi', 'itertools')) + '_rawffi', 'itertools')) def test_address_of(self): import _multiprocessing diff --git a/pypy/module/_pickle_support/maker.py b/pypy/module/_pickle_support/maker.py --- a/pypy/module/_pickle_support/maker.py +++ b/pypy/module/_pickle_support/maker.py @@ -5,7 +5,7 @@ from pypy.interpreter.module import Module from pypy.interpreter.pyframe import PyFrame from pypy.interpreter.pytraceback import PyTraceback -from pypy.interpreter.generator import GeneratorIterator +from pypy.interpreter.generator import GeneratorIteratorWithDel from rpython.rlib.objectmodel import instantiate from pypy.interpreter.gateway import unwrap_spec from pypy.objspace.std.iterobject import W_SeqIterObject, W_ReverseSeqIterObject @@ -60,7 +60,7 @@ return space.wrap(tb) def generator_new(space): - new_generator = instantiate(GeneratorIterator) + new_generator = instantiate(GeneratorIteratorWithDel) return space.wrap(new_generator) @unwrap_spec(current=int, remaining=int, step=int) diff --git a/pypy/module/_rawffi/__init__.py b/pypy/module/_rawffi/__init__.py --- a/pypy/module/_rawffi/__init__.py +++ b/pypy/module/_rawffi/__init__.py @@ -2,6 +2,7 @@ """ from pypy.interpreter.mixedmodule import MixedModule +from pypy.module._rawffi import alt class Module(MixedModule): interpleveldefs = { @@ -19,6 +20,7 @@ 'wcharp2unicode' : 'interp_rawffi.wcharp2unicode', 'charp2rawstring' : 'interp_rawffi.charp2rawstring', 'wcharp2rawunicode' : 'interp_rawffi.wcharp2rawunicode', + 'rawstring2charp' : 'interp_rawffi.rawstring2charp', 'CallbackPtr' : 'callback.W_CallbackPtr', '_num_of_allocated_objects' : 'tracker.num_of_allocated_objects', 'get_libc' : 'interp_rawffi.get_libc', @@ -32,6 +34,10 @@ appleveldefs = { } + submodules = { + 'alt': alt.Module, + } + def buildloaders(cls): from pypy.module._rawffi import interp_rawffi diff --git a/pypy/module/_ffi/__init__.py b/pypy/module/_rawffi/alt/__init__.py rename from pypy/module/_ffi/__init__.py rename to pypy/module/_rawffi/alt/__init__.py diff --git a/pypy/module/_ffi/app_struct.py b/pypy/module/_rawffi/alt/app_struct.py rename from pypy/module/_ffi/app_struct.py rename to pypy/module/_rawffi/alt/app_struct.py --- a/pypy/module/_ffi/app_struct.py +++ b/pypy/module/_rawffi/alt/app_struct.py @@ -1,4 +1,4 @@ -import _ffi +from _rawffi import alt class MetaStructure(type): @@ -11,7 +11,7 @@ fields = dic.get('_fields_') if fields is None: return - struct_descr = _ffi._StructDescr(name, fields) + struct_descr = alt._StructDescr(name, fields) for field in fields: dic[field.name] = field dic['_struct_'] = struct_descr diff --git a/pypy/module/_ffi/interp_ffitype.py b/pypy/module/_rawffi/alt/interp_ffitype.py rename from pypy/module/_ffi/interp_ffitype.py rename to pypy/module/_rawffi/alt/interp_ffitype.py --- a/pypy/module/_ffi/interp_ffitype.py +++ b/pypy/module/_rawffi/alt/interp_ffitype.py @@ -116,7 +116,7 @@ types = [ # note: most of the type name directly come from the C equivalent, # with the exception of bytes: in C, ubyte and char are equivalent, - # but for _ffi the first expects a number while the second a 1-length + # but for here the first expects a number while the second a 1-length # string W_FFIType('slong', libffi.types.slong), W_FFIType('sint', libffi.types.sint), diff --git a/pypy/module/_ffi/interp_funcptr.py b/pypy/module/_rawffi/alt/interp_funcptr.py rename from pypy/module/_ffi/interp_funcptr.py rename to pypy/module/_rawffi/alt/interp_funcptr.py --- a/pypy/module/_ffi/interp_funcptr.py +++ b/pypy/module/_rawffi/alt/interp_funcptr.py @@ -3,7 +3,7 @@ operationerrfmt from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef -from pypy.module._ffi.interp_ffitype import W_FFIType +from pypy.module._rawffi.alt.interp_ffitype import W_FFIType # from rpython.rtyper.lltypesystem import lltype, rffi # @@ -13,7 +13,7 @@ from rpython.rlib.rdynload import DLOpenError from rpython.rlib.rarithmetic import r_uint from rpython.rlib.objectmodel import we_are_translated -from pypy.module._ffi.type_converter import FromAppLevelConverter, ToAppLevelConverter +from pypy.module._rawffi.alt.type_converter import FromAppLevelConverter, ToAppLevelConverter from pypy.module._rawffi.interp_rawffi import got_libffi_error, wrap_dlopenerror import os @@ -302,7 +302,7 @@ W_FuncPtr.typedef = TypeDef( - '_ffi.FuncPtr', + '_rawffi.alt.FuncPtr', __call__ = interp2app(W_FuncPtr.call), getaddr = interp2app(W_FuncPtr.getaddr), free_temp_buffers = interp2app(W_FuncPtr.free_temp_buffers), @@ -346,7 +346,7 @@ W_CDLL.typedef = TypeDef( - '_ffi.CDLL', + '_rawffi.alt.CDLL', __new__ = interp2app(descr_new_cdll), getfunc = interp2app(W_CDLL.getfunc), getaddressindll = interp2app(W_CDLL.getaddressindll), @@ -363,7 +363,7 @@ W_WinDLL.typedef = TypeDef( - '_ffi.WinDLL', + '_rawffi.alt.WinDLL', __new__ = interp2app(descr_new_windll), getfunc = interp2app(W_WinDLL.getfunc), getaddressindll = interp2app(W_WinDLL.getaddressindll), diff --git a/pypy/module/_ffi/interp_struct.py b/pypy/module/_rawffi/alt/interp_struct.py rename from pypy/module/_ffi/interp_struct.py rename to pypy/module/_rawffi/alt/interp_struct.py --- a/pypy/module/_ffi/interp_struct.py +++ b/pypy/module/_rawffi/alt/interp_struct.py @@ -8,8 +8,8 @@ from pypy.interpreter.typedef import TypeDef, interp_attrproperty from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.error import operationerrfmt -from pypy.module._ffi.interp_ffitype import W_FFIType -from pypy.module._ffi.type_converter import FromAppLevelConverter, ToAppLevelConverter +from pypy.module._rawffi.alt.interp_ffitype import W_FFIType +from pypy.module._rawffi.alt.type_converter import FromAppLevelConverter, ToAppLevelConverter class W_Field(W_Root): diff --git a/pypy/module/_ffi/test/__init__.py b/pypy/module/_rawffi/alt/test/__init__.py rename from pypy/module/_ffi/test/__init__.py rename to pypy/module/_rawffi/alt/test/__init__.py diff --git a/pypy/module/_ffi/test/test_ffitype.py b/pypy/module/_rawffi/alt/test/test_ffitype.py rename from pypy/module/_ffi/test/test_ffitype.py rename to pypy/module/_rawffi/alt/test/test_ffitype.py --- a/pypy/module/_ffi/test/test_ffitype.py +++ b/pypy/module/_rawffi/alt/test/test_ffitype.py @@ -1,21 +1,21 @@ -from pypy.module._ffi.test.test_funcptr import BaseAppTestFFI +from pypy.module._rawffi.alt.test.test_funcptr import BaseAppTestFFI class AppTestFFIType(BaseAppTestFFI): def test_simple_types(self): - from _ffi import types + from _rawffi.alt import types assert str(types.sint) == "" assert str(types.uint) == "" assert types.sint.name == 'sint' assert types.uint.name == 'uint' def test_sizeof(self): - from _ffi import types + from _rawffi.alt import types assert types.sbyte.sizeof() == 1 assert types.sint.sizeof() == 4 def test_typed_pointer(self): - from _ffi import types + from _rawffi.alt import types intptr = types.Pointer(types.sint) # create a typed pointer to sint assert intptr.deref_pointer() is types.sint assert str(intptr) == '' @@ -23,7 +23,7 @@ raises(TypeError, "types.Pointer(42)") def test_pointer_identity(self): - from _ffi import types + from _rawffi.alt import types x = types.Pointer(types.slong) y = types.Pointer(types.slong) z = types.Pointer(types.char) @@ -31,7 +31,7 @@ assert x is not z def test_char_p_cached(self): - from _ffi import types + from _rawffi.alt import types x = types.Pointer(types.char) assert x is types.char_p x = types.Pointer(types.unichar) diff --git a/pypy/module/_ffi/test/test_funcptr.py b/pypy/module/_rawffi/alt/test/test_funcptr.py rename from pypy/module/_ffi/test/test_funcptr.py rename to pypy/module/_rawffi/alt/test/test_funcptr.py --- a/pypy/module/_ffi/test/test_funcptr.py +++ b/pypy/module/_rawffi/alt/test/test_funcptr.py @@ -7,7 +7,7 @@ import sys, py class BaseAppTestFFI(object): - spaceconfig = dict(usemodules=('_ffi', '_rawffi')) + spaceconfig = dict(usemodules=('_rawffi',)) @classmethod def prepare_c_example(cls): @@ -62,17 +62,17 @@ cls.w_f_12_34_plus_56_78 = space.wrap(f_result) def test_libload(self): - import _ffi - _ffi.CDLL(self.libc_name) + import _rawffi.alt + _rawffi.alt.CDLL(self.libc_name) def test_libload_fail(self): - import _ffi - raises(OSError, _ffi.CDLL, "xxxxx_this_name_does_not_exist_xxxxx") + import _rawffi.alt + raises(OSError, _rawffi.alt.CDLL, "xxxxx_this_name_does_not_exist_xxxxx") def test_libload_None(self): if self.iswin32: skip("unix specific") - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types # this should return *all* loaded libs, dlopen(NULL) dll = CDLL(None) # libm should be loaded @@ -80,20 +80,20 @@ assert res == 1.0 def test_callfunc(self): - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libm = CDLL(self.libm_name) pow = libm.getfunc('pow', [types.double, types.double], types.double) assert pow(2, 3) == 8 def test_getaddr(self): - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libm = CDLL(self.libm_name) pow = libm.getfunc('pow', [types.double, types.double], types.double) assert pow.getaddr() == self.pow_addr def test_getaddressindll(self): import sys - from _ffi import CDLL + from _rawffi.alt import CDLL libm = CDLL(self.libm_name) pow_addr = libm.getaddressindll('pow') fff = sys.maxint*2-1 @@ -102,7 +102,7 @@ assert pow_addr == self.pow_addr & fff def test_func_fromaddr(self): - from _ffi import CDLL, types, FuncPtr + from _rawffi.alt import CDLL, types, FuncPtr libm = CDLL(self.libm_name) pow_addr = libm.getaddressindll('pow') pow = FuncPtr.fromaddr(pow_addr, 'pow', [types.double, types.double], @@ -117,7 +117,7 @@ } """ import sys - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) sum_xy = libfoo.getfunc('sum_xy', [types.sint, types.sint], types.sint) assert sum_xy(30, 12) == 42 @@ -129,7 +129,7 @@ DLLEXPORT void set_dummy(int val) { dummy = val; } DLLEXPORT int get_dummy() { return dummy; } """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) set_dummy = libfoo.getfunc('set_dummy', [types.sint], types.void) get_dummy = libfoo.getfunc('get_dummy', [], types.sint) @@ -144,7 +144,7 @@ DLLEXPORT int* get_dummy_ptr() { return &dummy; } DLLEXPORT void set_val_to_ptr(int* ptr, int val) { *ptr = val; } """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) get_dummy = libfoo.getfunc('get_dummy', [], types.sint) get_dummy_ptr = libfoo.getfunc('get_dummy_ptr', [], types.void_p) @@ -163,7 +163,7 @@ DLLEXPORT int* get_dummy_ptr(); // defined in test_pointer_args DLLEXPORT void set_val_to_ptr(int* ptr, int val); // ditto """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types class MyPointerWrapper(object): def __init__(self, value): @@ -197,7 +197,7 @@ return len; } """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types import _rawffi libfoo = CDLL(self.libfoo_name) mystrlen = libfoo.getfunc('mystrlen', [types.char_p], types.slong) @@ -223,7 +223,7 @@ return len; } """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types import _rawffi libfoo = CDLL(self.libfoo_name) mystrlen = libfoo.getfunc('mystrlen_u', [types.unichar_p], types.slong) @@ -247,7 +247,7 @@ return s; } """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types import _rawffi libfoo = CDLL(self.libfoo_name) do_nothing = libfoo.getfunc('do_nothing', [types.char_p], types.char_p) @@ -264,7 +264,7 @@ DLLEXPORT int* get_dummy_ptr(); // defined in test_pointer_args DLLEXPORT void set_val_to_ptr(int* ptr, int val); // ditto """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) intptr = types.Pointer(types.sint) @@ -283,7 +283,7 @@ DLLEXPORT long is_null_ptr(void* ptr) { return ptr == NULL; } """ import sys - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) is_null_ptr = libfoo.getfunc('is_null_ptr', [types.void_p], types.ulong) assert not is_null_ptr(sys.maxint+1) @@ -296,7 +296,7 @@ } """ import sys - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) sum_xy = libfoo.getfunc('sum_xy_ul', [types.ulong, types.ulong], types.ulong) @@ -313,7 +313,7 @@ return x+y; } """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) sum_xy = libfoo.getfunc('sum_xy_us', [types.ushort, types.ushort], types.ushort) @@ -327,7 +327,7 @@ return x+y; } """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) sum_xy = libfoo.getfunc('sum_xy_us', [types.ubyte, types.ubyte], types.ubyte) @@ -342,7 +342,7 @@ } """ import sys - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types maxint32 = 2147483647 libfoo = CDLL(self.libfoo_name) sum_xy = libfoo.getfunc('sum_xy_ui', [types.uint, types.uint], @@ -357,7 +357,7 @@ return x+y; } """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) sum_xy = libfoo.getfunc('sum_xy_sb', [types.sbyte, types.sbyte], types.sbyte) @@ -371,7 +371,7 @@ return x - ('a'-'A'); } """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) my_toupper = libfoo.getfunc('my_toupper', [types.char], types.char) @@ -385,7 +385,7 @@ return x + y; } """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) sum_xy = libfoo.getfunc('sum_xy_wc', [types.unichar, types.unichar], types.unichar) @@ -400,7 +400,7 @@ return x+y; } """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) sum_xy = libfoo.getfunc('sum_xy_float', [types.float, types.float], types.float) @@ -415,7 +415,7 @@ return x+y; } """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types maxint32 = 2147483647 # we cannot really go above maxint on 64 bits # (and we would not test anything, as there long # is the same as long long) @@ -437,7 +437,7 @@ return x+y; } """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types maxint64 = 9223372036854775807 # maxint64+1 does not fit into a # longlong, but it does into a # ulonglong @@ -464,7 +464,7 @@ return p.x + p.y; } """ - from _ffi import CDLL, types, _StructDescr, Field + from _rawffi.alt import CDLL, types, _StructDescr, Field Point = _StructDescr('Point', [ Field('x', types.slong), Field('y', types.slong), @@ -487,7 +487,7 @@ return p; } """ - from _ffi import CDLL, types, _StructDescr, Field + from _rawffi.alt import CDLL, types, _StructDescr, Field Point = _StructDescr('Point', [ Field('x', types.slong), Field('y', types.slong), @@ -500,9 +500,9 @@ assert p.getfield('x') == 12 assert p.getfield('y') == 34 - # XXX: support for _rawffi structures should be killed as soon as we - # implement ctypes.Structure on top of _ffi. In the meantime, we support - # both + # XXX: long ago the plan was to kill _rawffi structures in favor of + # _rawffi.alt structures. The plan never went anywhere, so we're + # stuck with both. def test_byval_argument__rawffi(self): """ // defined above @@ -510,7 +510,7 @@ DLLEXPORT long sum_point(struct Point p); """ import _rawffi - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types POINT = _rawffi.Structure([('x', 'l'), ('y', 'l')]) ffi_point = POINT.get_ffi_type() libfoo = CDLL(self.libfoo_name) @@ -529,7 +529,7 @@ DLLEXPORT struct Point make_point(long x, long y); """ import _rawffi - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types POINT = _rawffi.Structure([('x', 'l'), ('y', 'l')]) ffi_point = POINT.get_ffi_type() libfoo = CDLL(self.libfoo_name) @@ -542,23 +542,23 @@ def test_TypeError_numargs(self): - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) sum_xy = libfoo.getfunc('sum_xy', [types.sint, types.sint], types.sint) raises(TypeError, "sum_xy(1, 2, 3)") raises(TypeError, "sum_xy(1)") def test_TypeError_voidarg(self): - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) raises(TypeError, "libfoo.getfunc('sum_xy', [types.void], types.sint)") def test_OSError_loading(self): - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types raises(OSError, "CDLL('I do not exist')") def test_AttributeError_missing_function(self): - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) raises(AttributeError, "libfoo.getfunc('I_do_not_exist', [], types.void)") if self.iswin32: @@ -569,7 +569,7 @@ def test_calling_convention1(self): if not self.iswin32: skip("windows specific") - from _ffi import WinDLL, types + from _rawffi.alt import WinDLL, types libm = WinDLL(self.libm_name) pow = libm.getfunc('pow', [types.double, types.double], types.double) try: @@ -582,7 +582,7 @@ def test_calling_convention2(self): if not self.iswin32: skip("windows specific") - from _ffi import WinDLL, types + from _rawffi.alt import WinDLL, types kernel = WinDLL('Kernel32.dll') sleep = kernel.getfunc('Sleep', [types.uint], types.void) sleep(10) @@ -590,7 +590,7 @@ def test_calling_convention3(self): if not self.iswin32: skip("windows specific") - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types wrong_kernel = CDLL('Kernel32.dll') wrong_sleep = wrong_kernel.getfunc('Sleep', [types.uint], types.void) try: @@ -603,7 +603,7 @@ def test_func_fromaddr2(self): if not self.iswin32: skip("windows specific") - from _ffi import CDLL, types, FuncPtr + from _rawffi.alt import CDLL, types, FuncPtr from _rawffi import FUNCFLAG_STDCALL libm = CDLL(self.libm_name) pow_addr = libm.getaddressindll('pow') @@ -619,7 +619,7 @@ def test_func_fromaddr3(self): if not self.iswin32: skip("windows specific") - from _ffi import WinDLL, types, FuncPtr + from _rawffi.alt import WinDLL, types, FuncPtr from _rawffi import FUNCFLAG_STDCALL kernel = WinDLL('Kernel32.dll') sleep_addr = kernel.getaddressindll('Sleep') @@ -636,7 +636,7 @@ """ if not self.iswin32: skip("windows specific") - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) f_name = libfoo.getfunc('AAA_first_ordinal_function', [], types.sint) f_ordinal = libfoo.getfunc(1, [], types.sint) diff --git a/pypy/module/_ffi/test/test_struct.py b/pypy/module/_rawffi/alt/test/test_struct.py rename from pypy/module/_ffi/test/test_struct.py rename to pypy/module/_rawffi/alt/test/test_struct.py --- a/pypy/module/_ffi/test/test_struct.py +++ b/pypy/module/_rawffi/alt/test/test_struct.py @@ -1,7 +1,7 @@ from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.module._ffi.interp_ffitype import app_types, W_FFIType -from pypy.module._ffi.interp_struct import compute_size_and_alignement, W_Field -from pypy.module._ffi.test.test_funcptr import BaseAppTestFFI +from pypy.module._rawffi.alt.interp_ffitype import app_types, W_FFIType +from pypy.module._rawffi.alt.interp_struct import compute_size_and_alignement, W_Field +from pypy.module._rawffi.alt.test.test_funcptr import BaseAppTestFFI class TestStruct(object): @@ -69,7 +69,7 @@ cls.w_runappdirect = cls.space.wrap(cls.runappdirect) def test__StructDescr(self): - from _ffi import _StructDescr, Field, types + from _rawffi.alt import _StructDescr, Field, types longsize = types.slong.sizeof() fields = [ Field('x', types.slong), @@ -80,7 +80,7 @@ assert descr.ffitype.name == 'struct foo' def test_alignment(self): - from _ffi import _StructDescr, Field, types + from _rawffi.alt import _StructDescr, Field, types longsize = types.slong.sizeof() fields = [ Field('x', types.sbyte), @@ -92,7 +92,7 @@ assert fields[1].offset == longsize # aligned to WORD def test_missing_field(self): - from _ffi import _StructDescr, Field, types + from _rawffi.alt import _StructDescr, Field, types longsize = types.slong.sizeof() fields = [ Field('x', types.slong), @@ -106,7 +106,7 @@ def test_unknown_type(self): if self.runappdirect: skip('cannot use self.dummy_type with -A') - from _ffi import _StructDescr, Field + from _rawffi.alt import _StructDescr, Field fields = [ Field('x', self.dummy_type), ] @@ -116,7 +116,7 @@ raises(TypeError, "struct.setfield('x', 42)") def test_getfield_setfield(self): - from _ffi import _StructDescr, Field, types + from _rawffi.alt import _StructDescr, Field, types longsize = types.slong.sizeof() fields = [ Field('x', types.slong), @@ -133,7 +133,7 @@ def test_getfield_setfield_signed_types(self): import sys - from _ffi import _StructDescr, Field, types + from _rawffi.alt import _StructDescr, Field, types longsize = types.slong.sizeof() fields = [ Field('sbyte', types.sbyte), @@ -156,7 +156,7 @@ def test_getfield_setfield_unsigned_types(self): import sys - from _ffi import _StructDescr, Field, types + from _rawffi.alt import _StructDescr, Field, types longsize = types.slong.sizeof() fields = [ Field('ubyte', types.ubyte), @@ -188,7 +188,7 @@ def test_getfield_setfield_longlong(self): import sys - from _ffi import _StructDescr, Field, types + from _rawffi.alt import _StructDescr, Field, types longsize = types.slong.sizeof() fields = [ Field('slonglong', types.slonglong), @@ -205,7 +205,7 @@ def test_getfield_setfield_float(self): import sys - from _ffi import _StructDescr, Field, types + from _rawffi.alt import _StructDescr, Field, types longsize = types.slong.sizeof() fields = [ Field('x', types.double), @@ -219,7 +219,7 @@ def test_getfield_setfield_singlefloat(self): import sys - from _ffi import _StructDescr, Field, types + from _rawffi.alt import _StructDescr, Field, types longsize = types.slong.sizeof() fields = [ Field('x', types.float), @@ -237,7 +237,7 @@ assert mem == [123.5] def test_define_fields(self): - from _ffi import _StructDescr, Field, types + from _rawffi.alt import _StructDescr, Field, types longsize = types.slong.sizeof() fields = [ Field('x', types.slong), @@ -255,7 +255,7 @@ raises(ValueError, "descr.define_fields(fields)") def test_pointer_to_incomplete_struct(self): - from _ffi import _StructDescr, Field, types + from _rawffi.alt import _StructDescr, Field, types longsize = types.slong.sizeof() fields = [ Field('x', types.slong), @@ -271,7 +271,7 @@ assert types.Pointer(descr.ffitype) is foo_p def test_nested_structure(self): - from _ffi import _StructDescr, Field, types + from _rawffi.alt import _StructDescr, Field, types longsize = types.slong.sizeof() foo_fields = [ Field('x', types.slong), @@ -310,7 +310,7 @@ def test_compute_shape(self): - from _ffi import Structure, Field, types + from _rawffi.alt import Structure, Field, types class Point(Structure): _fields_ = [ Field('x', types.slong), diff --git a/pypy/module/_ffi/test/test_type_converter.py b/pypy/module/_rawffi/alt/test/test_type_converter.py rename from pypy/module/_ffi/test/test_type_converter.py rename to pypy/module/_rawffi/alt/test/test_type_converter.py --- a/pypy/module/_ffi/test/test_type_converter.py +++ b/pypy/module/_rawffi/alt/test/test_type_converter.py @@ -1,8 +1,8 @@ import sys from rpython.rlib.rarithmetic import r_uint, r_singlefloat, r_longlong, r_ulonglong from rpython.rlib.libffi import IS_32_BIT -from pypy.module._ffi.interp_ffitype import app_types, descr_new_pointer -from pypy.module._ffi.type_converter import FromAppLevelConverter, ToAppLevelConverter +from pypy.module._rawffi.alt.interp_ffitype import app_types, descr_new_pointer +from pypy.module._rawffi.alt.type_converter import FromAppLevelConverter, ToAppLevelConverter class DummyFromAppLevelConverter(FromAppLevelConverter): @@ -29,7 +29,7 @@ class TestFromAppLevel(object): - spaceconfig = dict(usemodules=('_ffi',)) + spaceconfig = dict(usemodules=('_rawffi',)) def setup_class(cls): converter = DummyFromAppLevelConverter(cls.space) @@ -104,12 +104,12 @@ def test__as_ffi_pointer_(self): space = self.space w_MyPointerWrapper = space.appexec([], """(): - import _ffi + from _rawffi.alt import types class MyPointerWrapper(object): def __init__(self, value): self.value = value def _as_ffi_pointer_(self, ffitype): - assert ffitype is _ffi.types.void_p + assert ffitype is types.void_p return self.value return MyPointerWrapper @@ -151,7 +151,7 @@ class TestToAppLevel(object): - spaceconfig = dict(usemodules=('_ffi',)) + spaceconfig = dict(usemodules=('_rawffi',)) def setup_class(cls): converter = DummyToAppLevelConverter(cls.space) diff --git a/pypy/module/_ffi/test/test_ztranslation.py b/pypy/module/_rawffi/alt/test/test_ztranslation.py rename from pypy/module/_ffi/test/test_ztranslation.py rename to pypy/module/_rawffi/alt/test/test_ztranslation.py --- a/pypy/module/_ffi/test/test_ztranslation.py +++ b/pypy/module/_rawffi/alt/test/test_ztranslation.py @@ -1,4 +1,4 @@ -from pypy.objspace.fake.checkmodule import checkmodule - -def test__ffi_translates(): - checkmodule('_ffi', '_rawffi') +from pypy.objspace.fake.checkmodule import checkmodule + +def test__ffi_translates(): + checkmodule('_rawffi') diff --git a/pypy/module/_ffi/type_converter.py b/pypy/module/_rawffi/alt/type_converter.py rename from pypy/module/_ffi/type_converter.py rename to pypy/module/_rawffi/alt/type_converter.py --- a/pypy/module/_ffi/type_converter.py +++ b/pypy/module/_rawffi/alt/type_converter.py @@ -3,7 +3,7 @@ from rpython.rlib.rarithmetic import r_uint from pypy.interpreter.error import operationerrfmt, OperationError from pypy.module._rawffi.structure import W_StructureInstance, W_Structure -from pypy.module._ffi.interp_ffitype import app_types +from pypy.module._rawffi.alt.interp_ffitype import app_types class FromAppLevelConverter(object): """ @@ -17,7 +17,7 @@ self.space = space def unwrap_and_do(self, w_ffitype, w_obj): - from pypy.module._ffi.interp_struct import W__StructInstance + from pypy.module._rawffi.alt.interp_struct import W__StructInstance space = self.space if w_ffitype.is_longlong(): # note that we must check for longlong first, because either @@ -194,7 +194,7 @@ self.space = space def do_and_wrap(self, w_ffitype): - from pypy.module._ffi.interp_struct import W__StructDescr + from pypy.module._rawffi.alt.interp_struct import W__StructDescr space = self.space if w_ffitype.is_longlong(): # note that we must check for longlong first, because either diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -311,10 +311,7 @@ raise NotImplementedError def descr_get_ffi_type(self, space): - # XXX: this assumes that you have the _ffi module enabled. In the long - # term, probably we will move the code for build structures and arrays - # from _rawffi to _ffi - from pypy.module._ffi.interp_ffitype import W_FFIType + from pypy.module._rawffi.alt.interp_ffitype import W_FFIType return W_FFIType('', self.get_basic_ffi_type(), self) @unwrap_spec(n=int) @@ -579,6 +576,13 @@ s = rffi.wcharpsize2unicode(rffi.cast(rffi.CWCHARP, address), maxlength) return space.wrap(s) + at unwrap_spec(address=r_uint, newcontent='bufferstr') +def rawstring2charp(space, address, newcontent): + from rpython.rtyper.annlowlevel import llstr + from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw + array = rffi.cast(rffi.CCHARP, address) + copy_string_to_raw(llstr(newcontent), array, 0, len(newcontent)) + if _MS_WINDOWS: @unwrap_spec(code=int) def FormatError(space, code): diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -323,6 +323,16 @@ assert res == u'xx' a.free() + def test_rawstring2charp(self): + import _rawffi + A = _rawffi.Array('c') + a = A(10, 'x'*10) + _rawffi.rawstring2charp(a.buffer, "foobar") + assert ''.join([a[i] for i in range(10)]) == "foobarxxxx" + _rawffi.rawstring2charp(a.buffer, buffer("baz")) + assert ''.join([a[i] for i in range(10)]) == "bazbarxxxx" + a.free() + def test_raw_callable(self): import _rawffi lib = _rawffi.CDLL(self.lib_name) @@ -1129,24 +1139,32 @@ gc.collect() gc.collect() S = _rawffi.Structure([('x', 'i')]) - oldnum = _rawffi._num_of_allocated_objects() + try: + oldnum = _rawffi._num_of_allocated_objects() + except RuntimeError: + oldnum = '?' s = S(autofree=True) s.x = 3 s = None gc.collect() - assert oldnum == _rawffi._num_of_allocated_objects() + if oldnum != '?': + assert oldnum == _rawffi._num_of_allocated_objects() def test_array_autofree(self): import gc, _rawffi gc.collect() - oldnum = _rawffi._num_of_allocated_objects() + try: + oldnum = _rawffi._num_of_allocated_objects() + except RuntimeError: + oldnum = '?' A = _rawffi.Array('c') a = A(6, 'xxyxx\x00', autofree=True) assert _rawffi.charp2string(a.buffer) == 'xxyxx' a = None gc.collect() - assert oldnum == _rawffi._num_of_allocated_objects() + if oldnum != '?': + assert oldnum == _rawffi._num_of_allocated_objects() def teardown_class(cls): Tracker.DO_TRACING = False diff --git a/pypy/module/cppyy/test/test_crossing.py b/pypy/module/cppyy/test/test_crossing.py --- a/pypy/module/cppyy/test/test_crossing.py +++ b/pypy/module/cppyy/test/test_crossing.py @@ -65,7 +65,7 @@ return str(pydname) class AppTestCrossing(AppTestCpythonExtensionBase): - spaceconfig = dict(usemodules=['cpyext', 'cppyy', 'thread', '_rawffi', '_ffi', + spaceconfig = dict(usemodules=['cpyext', 'cppyy', 'thread', '_rawffi', 'array', 'itertools', 'rctime', 'binascii']) def setup_class(cls): diff --git a/pypy/module/cpyext/test/test_ztranslation.py b/pypy/module/cpyext/test/test_ztranslation.py --- a/pypy/module/cpyext/test/test_ztranslation.py +++ b/pypy/module/cpyext/test/test_ztranslation.py @@ -1,4 +1,4 @@ from pypy.objspace.fake.checkmodule import checkmodule def test_cpyext_translates(): - checkmodule('cpyext', '_ffi') + checkmodule('cpyext') diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py --- a/pypy/module/cpyext/unicodeobject.py +++ b/pypy/module/cpyext/unicodeobject.py @@ -12,7 +12,7 @@ from pypy.module.cpyext.stringobject import PyString_Check from pypy.module.sys.interp_encoding import setdefaultencoding from pypy.module._codecs.interp_codecs import CodecState -from pypy.objspace.std import unicodeobject, unicodetype +from pypy.objspace.std import unicodeobject from rpython.rlib import rstring, runicode from rpython.tool.sourcetools import func_renamer import sys @@ -262,7 +262,7 @@ def PyUnicode_GetDefaultEncoding(space): """Returns the currently active default encoding.""" if default_encoding[0] == '\x00': - encoding = unicodetype.getdefaultencoding(space) + encoding = unicodeobject.getdefaultencoding(space) i = 0 while i < len(encoding) and i < DEFAULT_ENCODING_SIZE: default_encoding[i] = encoding[i] @@ -295,7 +295,7 @@ encoding = rffi.charp2str(llencoding) if llerrors: errors = rffi.charp2str(llerrors) - return unicodetype.encode_object(space, w_unicode, encoding, errors) + return unicodeobject.encode_object(space, w_unicode, encoding, errors) @cpython_api([PyObject, CONST_STRING, CONST_STRING], PyObject) def PyUnicode_AsEncodedString(space, w_unicode, llencoding, llerrors): @@ -318,7 +318,7 @@ if not PyUnicode_Check(space, w_unicode): PyErr_BadArgument(space) - return unicodetype.encode_object(space, w_unicode, 'unicode-escape', 'strict') + return unicodeobject.encode_object(space, w_unicode, 'unicode-escape', 'strict') @cpython_api([CONST_WSTRING, Py_ssize_t], PyObject) def PyUnicode_FromUnicode(space, wchar_p, length): @@ -471,7 +471,7 @@ exception was raised by the codec.""" if not PyUnicode_Check(space, w_unicode): PyErr_BadArgument(space) - return unicodetype.encode_object(space, w_unicode, encoding, "strict") + return unicodeobject.encode_object(space, w_unicode, encoding, "strict") @cpython_api([CONST_STRING, Py_ssize_t, CONST_STRING], PyObject) @func_renamer('PyUnicode_Decode%s' % suffix) diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -837,11 +837,11 @@ # CPython leaves a gap of 10 when it increases its own magic number. # To avoid assigning exactly the same numbers as CPython, we can pick # any number between CPython + 2 and CPython + 9. Right now, -# default_magic = CPython + 6. +# default_magic = CPython + 7. # -# default_magic - 6 -- used by CPython without the -U option -# default_magic - 5 -- used by CPython with the -U option -# default_magic -- used by PyPy [because of CALL_METHOD] +# CPython + 0 -- used by CPython without the -U option +# CPython + 1 -- used by CPython with the -U option +# CPython + 7 = default_magic -- used by PyPy (incompatible!) # from pypy.interpreter.pycode import default_magic MARSHAL_VERSION_FOR_PYC = 2 diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -2,9 +2,9 @@ from pypy.interpreter.error import operationerrfmt, OperationError from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty +from pypy.objspace.std.bytesobject import W_BytesObject from pypy.objspace.std.floattype import float_typedef -from pypy.objspace.std.stringtype import str_typedef -from pypy.objspace.std.unicodetype import unicode_typedef, unicode_from_object +from pypy.objspace.std.unicodeobject import W_UnicodeObject from pypy.objspace.std.inttype import int_typedef from pypy.objspace.std.complextype import complex_typedef from rpython.rlib.rarithmetic import LONG_BIT @@ -510,7 +510,7 @@ from pypy.module.micronumpy.interp_dtype import new_unicode_dtype - arg = space.unicode_w(unicode_from_object(space, w_arg)) + arg = space.unicode_w(space.unicode_from_object(w_arg)) # XXX size computations, we need tests anyway arr = VoidBoxStorage(len(arg), new_unicode_dtype(space, len(arg))) # XXX not this way, we need store @@ -773,13 +773,13 @@ __module__ = "numpy", ) -W_StringBox.typedef = TypeDef("string_", (W_CharacterBox.typedef, str_typedef), +W_StringBox.typedef = TypeDef("string_", (W_CharacterBox.typedef, W_BytesObject.typedef), __module__ = "numpy", __new__ = interp2app(W_StringBox.descr__new__string_box.im_func), __len__ = interp2app(W_StringBox.descr_len), ) -W_UnicodeBox.typedef = TypeDef("unicode_", (W_CharacterBox.typedef, unicode_typedef), +W_UnicodeBox.typedef = TypeDef("unicode_", (W_CharacterBox.typedef, W_UnicodeObject.typedef), __module__ = "numpy", __new__ = interp2app(W_UnicodeBox.descr__new__unicode_box.im_func), __len__ = interp2app(W_UnicodeBox.descr_len), diff --git a/pypy/module/pypyjit/policy.py b/pypy/module/pypyjit/policy.py --- a/pypy/module/pypyjit/policy.py +++ b/pypy/module/pypyjit/policy.py @@ -105,7 +105,7 @@ else: rest = '' if modname in ['pypyjit', 'signal', 'micronumpy', 'math', 'exceptions', - 'imp', 'sys', 'array', '_ffi', 'itertools', 'operator', + 'imp', 'sys', 'array', 'itertools', 'operator', 'posix', '_socket', '_sre', '_lsprof', '_weakref', '__pypy__', 'cStringIO', '_collections', 'struct', 'mmap', 'marshal', '_codecs', 'rctime', 'cppyy', diff --git a/pypy/module/struct/__init__.py b/pypy/module/struct/__init__.py --- a/pypy/module/struct/__init__.py +++ b/pypy/module/struct/__init__.py @@ -49,11 +49,12 @@ 'calcsize': 'interp_struct.calcsize', 'pack': 'interp_struct.pack', 'unpack': 'interp_struct.unpack', - } + + 'Struct': 'interp_struct.W_Struct', + } appleveldefs = { 'error': 'app_struct.error', 'pack_into': 'app_struct.pack_into', 'unpack_from': 'app_struct.unpack_from', - 'Struct': 'app_struct.Struct', - } + } diff --git a/pypy/module/struct/app_struct.py b/pypy/module/struct/app_struct.py --- a/pypy/module/struct/app_struct.py +++ b/pypy/module/struct/app_struct.py @@ -4,6 +4,7 @@ """ import struct + class error(Exception): """Exception raised on various occasions; argument is a string describing what is wrong.""" @@ -21,21 +22,3 @@ raise error("unpack_from requires a buffer of at least %d bytes" % (size,)) return struct.unpack(fmt, data) - -# XXX inefficient -class Struct(object): - def __init__(self, format): - self.format = format - self.size = struct.calcsize(format) - - def pack(self, *args): - return struct.pack(self.format, *args) - - def unpack(self, s): - return struct.unpack(self.format, s) - - def pack_into(self, buffer, offset, *args): - return pack_into(self.format, buffer, offset, *args) - - def unpack_from(self, buffer, offset=0): - return unpack_from(self.format, buffer, offset) diff --git a/pypy/module/struct/interp_struct.py b/pypy/module/struct/interp_struct.py --- a/pypy/module/struct/interp_struct.py +++ b/pypy/module/struct/interp_struct.py @@ -1,15 +1,22 @@ -from pypy.interpreter.gateway import unwrap_spec -from pypy.interpreter.error import OperationError -from pypy.module.struct.formatiterator import PackFormatIterator, UnpackFormatIterator from rpython.rlib import jit from rpython.rlib.rstruct.error import StructError, StructOverflowError from rpython.rlib.rstruct.formatiterator import CalcSizeFormatIterator +from rpython.tool.sourcetools import func_with_new_name + +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.gateway import interp2app, unwrap_spec +from pypy.interpreter.error import OperationError +from pypy.interpreter.typedef import TypeDef, interp_attrproperty +from pypy.module.struct.formatiterator import ( + PackFormatIterator, UnpackFormatIterator +) @unwrap_spec(format=str) def calcsize(space, format): return space.wrap(_calcsize(space, format)) + def _calcsize(space, format): fmtiter = CalcSizeFormatIterator() try: @@ -52,3 +59,44 @@ w_error = space.getattr(w_module, space.wrap('error')) raise OperationError(w_error, space.wrap(e.msg)) return space.newtuple(fmtiter.result_w[:]) + + +class W_Struct(W_Root): + _immutable_fields_ = ["format", "size"] + + def __init__(self, space, format): + self.format = format + self.size = _calcsize(space, format) + + @unwrap_spec(format=str) + def descr__new__(space, w_subtype, format): + self = space.allocate_instance(W_Struct, w_subtype) + W_Struct.__init__(self, space, format) + return self + + def wrap_struct_method(name): + def impl(self, space, __args__): + w_module = space.getbuiltinmodule('struct') + w_method = space.getattr(w_module, space.wrap(name)) + return space.call_obj_args( + w_method, space.wrap(self.format), __args__ + ) + + return func_with_new_name(impl, 'descr_' + name) + + descr_pack = wrap_struct_method("pack") + descr_unpack = wrap_struct_method("unpack") + descr_pack_into = wrap_struct_method("pack_into") + descr_unpack_from = wrap_struct_method("unpack_from") + + +W_Struct.typedef = TypeDef("Struct", + __new__=interp2app(W_Struct.descr__new__.im_func), + format=interp_attrproperty("format", cls=W_Struct), + size=interp_attrproperty("size", cls=W_Struct), + + pack=interp2app(W_Struct.descr_pack), + unpack=interp2app(W_Struct.descr_unpack), + pack_into=interp2app(W_Struct.descr_pack_into), + unpack_from=interp2app(W_Struct.descr_unpack_from), +) diff --git a/pypy/module/test_lib_pypy/ctypes_tests/support.py b/pypy/module/test_lib_pypy/ctypes_tests/support.py --- a/pypy/module/test_lib_pypy/ctypes_tests/support.py +++ b/pypy/module/test_lib_pypy/ctypes_tests/support.py @@ -31,10 +31,14 @@ import gc for _ in range(4): gc.collect() - cls.old_num = _rawffi._num_of_allocated_objects() - + try: + cls.old_num = _rawffi._num_of_allocated_objects() + except RuntimeError: + pass def teardown_class(cls): + if not hasattr(sys, 'pypy_translation_info'): + return if sys.pypy_translation_info['translation.gc'] == 'boehm': return # it seems that boehm has problems with __del__, so not # everything is freed diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_keepalive.py b/pypy/module/test_lib_pypy/ctypes_tests/test_keepalive.py --- a/pypy/module/test_lib_pypy/ctypes_tests/test_keepalive.py +++ b/pypy/module/test_lib_pypy/ctypes_tests/test_keepalive.py @@ -31,7 +31,17 @@ assert p._objects == {} assert len(x._objects) == 1 assert x._objects['0'] is p._objects - + + def test_simple_structure_and_pointer_with_array(self): + class X(Structure): + _fields_ = [('array', POINTER(c_int))] + + x = X() + a = (c_int * 3)(1, 2, 3) + assert x._objects is None + x.array = a + assert x._objects['0'] is a + def test_structure_with_pointers(self): class X(Structure): _fields_ = [('x', POINTER(c_int)), diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_strings.py b/pypy/module/test_lib_pypy/ctypes_tests/test_strings.py --- a/pypy/module/test_lib_pypy/ctypes_tests/test_strings.py +++ b/pypy/module/test_lib_pypy/ctypes_tests/test_strings.py @@ -38,6 +38,16 @@ buf.raw = "Hello, World" assert buf.value == "Hello, World" + def test_c_buffer_raw_from_buffer(self): + buf = c_buffer(32) + buf.raw = buffer("Hello, World") + assert buf.value == "Hello, World" + + def test_c_buffer_raw_from_memoryview(self): + buf = c_buffer(32) + buf.raw = memoryview("Hello, World") + assert buf.value == "Hello, World" + def test_param_1(self): BUF = c_char * 4 buf = BUF() diff --git a/pypy/module/test_lib_pypy/test_grp_extra.py b/pypy/module/test_lib_pypy/test_grp_extra.py --- a/pypy/module/test_lib_pypy/test_grp_extra.py +++ b/pypy/module/test_lib_pypy/test_grp_extra.py @@ -2,7 +2,7 @@ class AppTestGrp: - spaceconfig = dict(usemodules=('binascii', '_ffi', '_rawffi', 'itertools')) + spaceconfig = dict(usemodules=('binascii', '_rawffi', 'itertools')) def setup_class(cls): cls.w_grp = import_lib_pypy(cls.space, 'grp', diff --git a/pypy/module/test_lib_pypy/test_os_wait.py b/pypy/module/test_lib_pypy/test_os_wait.py --- a/pypy/module/test_lib_pypy/test_os_wait.py +++ b/pypy/module/test_lib_pypy/test_os_wait.py @@ -10,7 +10,7 @@ class AppTestOsWait: - spaceconfig = dict(usemodules=('_ffi', '_rawffi', 'itertools')) + spaceconfig = dict(usemodules=('_rawffi', 'itertools')) def setup_class(cls): if not hasattr(os, "fork"): diff --git a/pypy/module/test_lib_pypy/test_pwd.py b/pypy/module/test_lib_pypy/test_pwd.py --- a/pypy/module/test_lib_pypy/test_pwd.py +++ b/pypy/module/test_lib_pypy/test_pwd.py @@ -1,7 +1,7 @@ import py, sys class AppTestPwd: - spaceconfig = dict(usemodules=('_ffi', '_rawffi', 'itertools', 'binascii')) + spaceconfig = dict(usemodules=('_rawffi', 'itertools', 'binascii')) def setup_class(cls): if sys.platform == 'win32': diff --git a/pypy/module/test_lib_pypy/test_resource.py b/pypy/module/test_lib_pypy/test_resource.py --- a/pypy/module/test_lib_pypy/test_resource.py +++ b/pypy/module/test_lib_pypy/test_resource.py @@ -9,7 +9,7 @@ class AppTestResource: - spaceconfig = dict(usemodules=('_ffi', '_rawffi', 'itertools')) + spaceconfig = dict(usemodules=('_rawffi', 'itertools')) def setup_class(cls): rebuild.rebuild_one('resource.ctc.py') diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -290,6 +290,9 @@ ec._py_repr = None return ec + def unicode_from_object(self, w_obj): + return w_some_obj() + # ---------- def translates(self, func=None, argtypes=None, seeobj_w=[], **kwds): diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -1,25 +1,23 @@ """The builtin bytearray implementation""" +from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.buffer import RWBuffer from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.interpreter.signature import Signature -from pypy.objspace.std import stringobject -from pypy.objspace.std.bytearraytype import ( - getbytevalue, makebytearraydata_w, new_bytearray) -from pypy.objspace.std.intobject import W_IntObject -from pypy.objspace.std.model import W_Object, registerimplementation -from pypy.objspace.std.multimethod import FailedToImplement -from pypy.objspace.std.noneobject import W_NoneObject From noreply at buildbot.pypy.org Tue Jan 21 17:20:21 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 21 Jan 2014 17:20:21 +0100 (CET) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20140121162021.ED6711C33B0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68826:5ceff9c38289 Date: 2014-01-21 17:19 +0100 http://bitbucket.org/pypy/pypy/changeset/5ceff9c38289/ Log: merge heads diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -623,6 +623,10 @@ tobox = self.metainterp.heapcache.getfield(box, fielddescr) if tobox is valuebox: return + # The following test is disabled because buggy. It is supposed + # to be: not(we're writing null into a freshly allocated object) + # but the bug is that is_unescaped() can be True even after the + # field cache is cleared --- see test_ajit:test_unescaped_write_zero if 1: # tobox is not None or not self.metainterp.heapcache.is_unescaped(box) or not isinstance(valuebox, Const) or valuebox.nonnull(): self.execute_with_descr(rop.SETFIELD_GC, fielddescr, box, valuebox) self.metainterp.heapcache.setfield(box, valuebox, fielddescr) From noreply at buildbot.pypy.org Tue Jan 21 17:20:20 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 21 Jan 2014 17:20:20 +0100 (CET) Subject: [pypy-commit] pypy default: Redo the GeneratorIterator/WithDel optimization, which should work now Message-ID: <20140121162020.C1E621C33B0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68825:db6d61fc43ed Date: 2014-01-21 16:41 +0100 http://bitbucket.org/pypy/pypy/changeset/db6d61fc43ed/ Log: Redo the GeneratorIterator/WithDel optimization, which should work now that the JIT bug was fixed. diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -167,7 +167,7 @@ def run(self): """Start this frame's execution.""" if self.getcode().co_flags & pycode.CO_GENERATOR: - if 1:# self.getcode().co_flags & pycode.CO_YIELD_INSIDE_TRY: + if self.getcode().co_flags & pycode.CO_YIELD_INSIDE_TRY: from pypy.interpreter.generator import GeneratorIteratorWithDel return self.space.wrap(GeneratorIteratorWithDel(self)) else: From noreply at buildbot.pypy.org Tue Jan 21 17:47:42 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 21 Jan 2014 17:47:42 +0100 (CET) Subject: [pypy-commit] pypy annotator: update flowspace module descriptions Message-ID: <20140121164742.E8C701C00E3@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: annotator Changeset: r68827:ad6dac580b69 Date: 2014-01-21 16:30 +0000 http://bitbucket.org/pypy/pypy/changeset/ad6dac580b69/ Log: update flowspace module descriptions diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -1,5 +1,4 @@ -"""Implements the core parts of flow graph creation, in tandem -with rpython.flowspace.objspace. +"""Implements the core parts of flow graph creation. """ import sys diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -1,5 +1,4 @@ -"""Implements the core parts of flow graph creation, in tandem -with rpython.flowspace.flowcontext. +"""Implements the main interface for flow graph creation: build_flow(). """ from inspect import CO_NEWLOCALS diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -1,6 +1,5 @@ """ -This module defines mappings between operation names and Python's -built-in functions (or type constructors) implementing them. +This module defines all the SpaceOeprations used in rpython.flowspace. """ import __builtin__ From noreply at buildbot.pypy.org Tue Jan 21 17:47:44 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 21 Jan 2014 17:47:44 +0100 (CET) Subject: [pypy-commit] pypy annotator: small clean-up in rpython.flowspace.operation Message-ID: <20140121164744.1781E1C00E3@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: annotator Changeset: r68828:684f44b83918 Date: 2014-01-21 16:46 +0000 http://bitbucket.org/pypy/pypy/changeset/684f44b83918/ Log: small clean-up in rpython.flowspace.operation diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -42,7 +42,8 @@ } -class _OpHolder(object): pass +class _OpHolder(object): + pass op = _OpHolder() func2op = {} @@ -130,6 +131,7 @@ class OverflowingOperation(PureOperation): can_overflow = True + def ovfchecked(self): ovf = self.ovf_variant(*self.args) ovf.offset = self.offset @@ -137,12 +139,14 @@ class SingleDispatchMixin(object): dispatch = 1 + def consider(self, annotator, arg, *other_args): impl = getattr(arg, self.opname) return impl(*other_args) class DoubleDispatchMixin(object): dispatch = 2 + def consider(self, annotator, arg1, arg2, *other_args): impl = getattr(pair(arg1, arg2), self.opname) return impl(*other_args) @@ -360,9 +364,6 @@ add_operator('userdel', 1, pyfunc=userdel) add_operator('buffer', 1, pyfunc=buffer, pure=True) # see buffer.py add_operator('yield_', 1) -add_operator('newdict', 0) -add_operator('newtuple', None, pure=True, pyfunc=lambda *args:args) -add_operator('newlist', None) add_operator('newslice', 3) add_operator('hint', None, dispatch=1) @@ -379,6 +380,7 @@ class NewDict(HLOperation): opname = 'newdict' canraise = [] + def consider(self, annotator, *args): return annotator.bookkeeper.newdict() @@ -387,6 +389,7 @@ opname = 'newtuple' pyfunc = staticmethod(lambda *args: args) canraise = [] + def consider(self, annotator, *args): return SomeTuple(items=args) @@ -394,6 +397,7 @@ class NewList(HLOperation): opname = 'newlist' canraise = [] + def consider(self, annotator, *args): return annotator.bookkeeper.newlist(*args) @@ -487,7 +491,7 @@ types.BuiltinMethodType, types.ClassType, types.TypeType)) and - c.__module__ in ['__builtin__', 'exceptions']): + c.__module__ in ['__builtin__', 'exceptions']): return builtins_exceptions.get(c, []) # *any* exception for non-builtins return [Exception] @@ -545,7 +549,7 @@ KeyError: 'key', ZeroDivisionError: 'zer', ValueError: 'val', - } +} # specifying IndexError, and KeyError beyond Exception, # allows the annotator to be more precise, see test_reraiseAnything/KeyError in @@ -571,7 +575,7 @@ # duplicate exceptions and add OverflowError for name in names.split(): oper = getattr(op, name) - oper_ovf = getattr(op, name+'_ovf') + oper_ovf = getattr(op, name + '_ovf') oper_ovf.canraise = list(oper.canraise) oper_ovf.canraise.append(OverflowError) @@ -583,9 +587,9 @@ _add_exceptions("""truediv divmod inplace_add inplace_sub inplace_mul inplace_truediv inplace_floordiv inplace_div inplace_mod inplace_pow - inplace_lshift""", OverflowError) # without a _ovf version + inplace_lshift""", OverflowError) # without a _ovf version _add_except_ovf("""neg abs add sub mul floordiv div mod lshift""") # with a _ovf version _add_exceptions("""pow""", - OverflowError) # for the float case + OverflowError) # for the float case del _add_exceptions, _add_except_ovf From noreply at buildbot.pypy.org Tue Jan 21 18:20:54 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 21 Jan 2014 18:20:54 +0100 (CET) Subject: [pypy-commit] pypy default: Add an XXX comment Message-ID: <20140121172054.EB29A1C00E3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68829:cdeeb9213ba3 Date: 2014-01-21 18:20 +0100 http://bitbucket.org/pypy/pypy/changeset/cdeeb9213ba3/ Log: Add an XXX comment diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py --- a/rpython/jit/metainterp/heapcache.py +++ b/rpython/jit/metainterp/heapcache.py @@ -196,6 +196,9 @@ del boxes[box] return + # XXX when is it useful to clear() the complete dictionaries? + # isn't it enough in all cases to do the same as the two + # loops just above? self.heap_cache.clear() self.heap_array_cache.clear() From noreply at buildbot.pypy.org Tue Jan 21 20:34:46 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 21 Jan 2014 20:34:46 +0100 (CET) Subject: [pypy-commit] pypy default: document branch Message-ID: <20140121193446.B8FEA1D23D0@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r68834:6c97b595273c Date: 2014-01-21 19:33 +0000 http://bitbucket.org/pypy/pypy/changeset/6c97b595273c/ Log: document branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -48,3 +48,7 @@ .. branch: remove-del-from-generatoriterator Speed up generators that don't yield inside try or wait blocks by skipping unnecessary cleanup. + +.. branch: annotator +Remove FlowObjSpace. +Improve cohesion between rpython.flowspace and rpython.annotator. From noreply at buildbot.pypy.org Tue Jan 21 20:34:44 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 21 Jan 2014 20:34:44 +0100 (CET) Subject: [pypy-commit] pypy annotator: close branch before merging Message-ID: <20140121193444.064591D23D0@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: annotator Changeset: r68832:7045f8e50def Date: 2014-01-21 19:30 +0000 http://bitbucket.org/pypy/pypy/changeset/7045f8e50def/ Log: close branch before merging From noreply at buildbot.pypy.org Tue Jan 21 20:34:45 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 21 Jan 2014 20:34:45 +0100 (CET) Subject: [pypy-commit] pypy default: merge branch annotator Message-ID: <20140121193445.94B521D23D0@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r68833:f5a09827c242 Date: 2014-01-21 19:30 +0000 http://bitbucket.org/pypy/pypy/changeset/f5a09827c242/ Log: merge branch annotator diff too long, truncating to 2000 out of 2641 lines diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -9,7 +9,7 @@ from rpython.flowspace.model import (Variable, Constant, FunctionGraph, c_last_exception, checkgraph) from rpython.translator import simplify, transform -from rpython.annotator import model as annmodel, signature, unaryop, binaryop +from rpython.annotator import model as annmodel, signature from rpython.annotator.bookkeeper import Bookkeeper import py @@ -455,12 +455,12 @@ # occour for this specific, typed operation. if block.exitswitch == c_last_exception: op = block.operations[-1] - if op.opname in binaryop.BINARY_OPERATIONS: + if op.dispatch == 2: arg1 = self.binding(op.args[0]) arg2 = self.binding(op.args[1]) binop = getattr(pair(arg1, arg2), op.opname, None) can_only_throw = annmodel.read_can_only_throw(binop, arg1, arg2) - elif op.opname in unaryop.UNARY_OPERATIONS: + elif op.dispatch == 1: arg1 = self.binding(op.args[0]) opname = op.opname if opname == 'contains': opname = 'op_contains' @@ -611,44 +611,6 @@ def noreturnvalue(self, op): return annmodel.s_ImpossibleValue # no return value (hook method) - # XXX "contains" clash with SomeObject method - def consider_op_contains(self, seq, elem): - self.bookkeeper.count("contains", seq) - return seq.op_contains(elem) - - def consider_op_newtuple(self, *args): - return annmodel.SomeTuple(items = args) - - def consider_op_newlist(self, *args): - return self.bookkeeper.newlist(*args) - - def consider_op_newdict(self): - return self.bookkeeper.newdict() - - - def _registeroperations(cls, unary_ops, binary_ops): - # All unary operations - d = {} - for opname in unary_ops: - fnname = 'consider_op_' + opname - exec py.code.Source(""" -def consider_op_%s(self, arg, *args): - return arg.%s(*args) -""" % (opname, opname)).compile() in globals(), d - setattr(cls, fnname, d[fnname]) - # All binary operations - for opname in binary_ops: - fnname = 'consider_op_' + opname - exec py.code.Source(""" -def consider_op_%s(self, arg1, arg2, *args): - return pair(arg1,arg2).%s(*args) -""" % (opname, opname)).compile() in globals(), d - setattr(cls, fnname, d[fnname]) - _registeroperations = classmethod(_registeroperations) - -# register simple operations handling -RPythonAnnotator._registeroperations(unaryop.UNARY_OPERATIONS, binaryop.BINARY_OPERATIONS) - class BlockedInference(Exception): """This exception signals the type inference engine that the situation diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -12,10 +12,11 @@ SomeBuiltin, SomeIterator, SomePBC, SomeFloat, s_None, SomeByteArray, SomeWeakRef, SomeAddress, SomeTypedAddressAccess, SomeSingleFloat, SomeLongFloat, SomeType, SomeConstantType, unionof, UnionError, - missing_operation, read_can_only_throw, add_knowntypedata, + read_can_only_throw, add_knowntypedata, merge_knowntypedata,) from rpython.annotator.bookkeeper import getbookkeeper from rpython.flowspace.model import Variable, Constant +from rpython.flowspace.operation import op from rpython.rlib import rarithmetic from rpython.annotator.model import AnnotatorError @@ -23,28 +24,9 @@ def immutablevalue(x): return getbookkeeper().immutablevalue(x) -# XXX unify this with ObjSpace.MethodTable -BINARY_OPERATIONS = set(['add', 'sub', 'mul', 'div', 'mod', - 'truediv', 'floordiv', 'divmod', - 'and_', 'or_', 'xor', - 'lshift', 'rshift', - 'getitem', 'setitem', 'delitem', - 'getitem_idx', 'getitem_key', 'getitem_idx_key', - 'inplace_add', 'inplace_sub', 'inplace_mul', - 'inplace_truediv', 'inplace_floordiv', 'inplace_div', - 'inplace_mod', - 'inplace_lshift', 'inplace_rshift', - 'inplace_and', 'inplace_or', 'inplace_xor', - 'lt', 'le', 'eq', 'ne', 'gt', 'ge', 'is_', 'cmp', - 'coerce', - ] - +[opname+'_ovf' for opname in - """add sub mul floordiv div mod lshift - """.split() - ]) +BINARY_OPERATIONS = set([oper.opname for oper in op.__dict__.values() + if oper.dispatch == 2]) -for opname in BINARY_OPERATIONS: - missing_operation(pairtype(SomeObject, SomeObject), opname) class __extend__(pairtype(SomeObject, SomeObject)): @@ -78,46 +60,39 @@ if obj1.is_immutable_constant() and obj2.is_immutable_constant(): return immutablevalue(obj1.const < obj2.const) else: - getbookkeeper().count("non_int_comp", obj1, obj2) return s_Bool def le((obj1, obj2)): if obj1.is_immutable_constant() and obj2.is_immutable_constant(): return immutablevalue(obj1.const <= obj2.const) else: - getbookkeeper().count("non_int_comp", obj1, obj2) return s_Bool def eq((obj1, obj2)): if obj1.is_immutable_constant() and obj2.is_immutable_constant(): return immutablevalue(obj1.const == obj2.const) else: - getbookkeeper().count("non_int_eq", obj1, obj2) return s_Bool def ne((obj1, obj2)): if obj1.is_immutable_constant() and obj2.is_immutable_constant(): return immutablevalue(obj1.const != obj2.const) else: - getbookkeeper().count("non_int_eq", obj1, obj2) return s_Bool def gt((obj1, obj2)): if obj1.is_immutable_constant() and obj2.is_immutable_constant(): return immutablevalue(obj1.const > obj2.const) else: - getbookkeeper().count("non_int_comp", obj1, obj2) return s_Bool def ge((obj1, obj2)): if obj1.is_immutable_constant() and obj2.is_immutable_constant(): return immutablevalue(obj1.const >= obj2.const) else: - getbookkeeper().count("non_int_comp", obj1, obj2) return s_Bool def cmp((obj1, obj2)): - getbookkeeper().count("cmp", obj1, obj2) if obj1.is_immutable_constant() and obj2.is_immutable_constant(): return immutablevalue(cmp(obj1.const, obj2.const)) else: @@ -163,13 +138,19 @@ return r def divmod((obj1, obj2)): - getbookkeeper().count("divmod", obj1, obj2) return SomeTuple([pair(obj1, obj2).div(), pair(obj1, obj2).mod()]) def coerce((obj1, obj2)): - getbookkeeper().count("coerce", obj1, obj2) return pair(obj1, obj2).union() # reasonable enough + def getitem((obj1, obj2)): + return s_ImpossibleValue + add = sub = mul = truediv = floordiv = div = mod = getitem + lshift = rshift = and_ = or_ = xor = delitem = getitem + + def setitem((obj1, obj2), _): + return s_ImpossibleValue + # approximation of an annotation intersection, the result should be the annotation obj or # the intersection of obj and improvement def improve((obj, improvement)): @@ -466,7 +447,6 @@ SomeUnicodeString))): raise AnnotatorError( "string formatting mixing strings and unicode not supported") - getbookkeeper().count('strformat', s_string, s_tuple) no_nul = s_string.no_nul for s_item in s_tuple.items: if isinstance(s_item, SomeFloat): @@ -484,7 +464,6 @@ pairtype(SomeUnicodeString, SomeObject)): def mod((s_string, args)): - getbookkeeper().count('strformat', s_string, args) return s_string.__class__() class __extend__(pairtype(SomeFloat, SomeFloat)): @@ -586,19 +565,16 @@ return [KeyError] def getitem((dic1, obj2)): - getbookkeeper().count("dict_getitem", dic1) dic1.dictdef.generalize_key(obj2) return dic1.dictdef.read_value() getitem.can_only_throw = _can_only_throw def setitem((dic1, obj2), s_value): - getbookkeeper().count("dict_setitem", dic1) dic1.dictdef.generalize_key(obj2) dic1.dictdef.generalize_value(s_value) setitem.can_only_throw = _can_only_throw def delitem((dic1, obj2)): - getbookkeeper().count("dict_delitem", dic1) dic1.dictdef.generalize_key(obj2) delitem.can_only_throw = _can_only_throw @@ -612,7 +588,6 @@ except IndexError: return s_ImpossibleValue else: - getbookkeeper().count("tuple_random_getitem", tup1) return unionof(*tup1.items) getitem.can_only_throw = [IndexError] @@ -623,74 +598,63 @@ return lst1.listdef.offspring() def getitem((lst1, int2)): - getbookkeeper().count("list_getitem", int2) return lst1.listdef.read_item() getitem.can_only_throw = [] getitem_key = getitem def getitem_idx((lst1, int2)): - getbookkeeper().count("list_getitem", int2) return lst1.listdef.read_item() getitem_idx.can_only_throw = [IndexError] getitem_idx_key = getitem_idx def setitem((lst1, int2), s_value): - getbookkeeper().count("list_setitem", int2) lst1.listdef.mutate() lst1.listdef.generalize(s_value) setitem.can_only_throw = [IndexError] def delitem((lst1, int2)): - getbookkeeper().count("list_delitem", int2) lst1.listdef.resize() delitem.can_only_throw = [IndexError] class __extend__(pairtype(SomeString, SomeInteger)): def getitem((str1, int2)): - getbookkeeper().count("str_getitem", int2) return SomeChar(no_nul=str1.no_nul) getitem.can_only_throw = [] getitem_key = getitem def getitem_idx((str1, int2)): - getbookkeeper().count("str_getitem", int2) return SomeChar(no_nul=str1.no_nul) getitem_idx.can_only_throw = [IndexError] getitem_idx_key = getitem_idx def mul((str1, int2)): # xxx do we want to support this - getbookkeeper().count("str_mul", str1, int2) return SomeString(no_nul=str1.no_nul) class __extend__(pairtype(SomeUnicodeString, SomeInteger)): def getitem((str1, int2)): - getbookkeeper().count("str_getitem", int2) return SomeUnicodeCodePoint() getitem.can_only_throw = [] getitem_key = getitem def getitem_idx((str1, int2)): - getbookkeeper().count("str_getitem", int2) return SomeUnicodeCodePoint() getitem_idx.can_only_throw = [IndexError] getitem_idx_key = getitem_idx def mul((str1, int2)): # xxx do we want to support this - getbookkeeper().count("str_mul", str1, int2) return SomeUnicodeString() class __extend__(pairtype(SomeInteger, SomeString), pairtype(SomeInteger, SomeUnicodeString)): def mul((int1, str2)): # xxx do we want to support this - getbookkeeper().count("str_mul", str2, int1) return str2.basestringclass() class __extend__(pairtype(SomeUnicodeCodePoint, SomeUnicodeString), diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -25,112 +25,6 @@ from rpython.rtyper import extregistry -class Stats(object): - - def __init__(self, bookkeeper): - self.bookkeeper = bookkeeper - self.classify = {} - - def count(self, category, *args): - for_category = self.classify.setdefault(category, {}) - classifier = getattr(self, 'consider_%s' % category, self.consider_generic) - outcome = classifier(*args) - for_category[self.bookkeeper.position_key] = outcome - - def indexrepr(self, idx): - if idx.is_constant(): - if idx.const is None: - return '' - if isinstance(idx, SomeInteger): - if idx.const >=0: - return 'pos-constant' - else: - return 'Neg-constant' - return idx.const - else: - if isinstance(idx, SomeInteger): - if idx.nonneg: - return "non-neg" - else: - return "MAYBE-NEG" - else: - return self.typerepr(idx) - - def steprepr(self, stp): - if stp.is_constant(): - if stp.const in (1, None): - return 'step=1' - else: - return 'step=%s?' % stp.const - else: - return 'non-const-step %s' % self.typerepr(stp) - - def consider_generic(self, *args): - return tuple([self.typerepr(x) for x in args]) - - def consider_list_list_eq(self, obj1, obj2): - return obj1, obj2 - - def consider_contains(self, seq): - return seq - - def consider_non_int_eq(self, obj1, obj2): - if obj1.knowntype == obj2.knowntype == list: - self.count("list_list_eq", obj1, obj2) - return self.typerepr(obj1), self.typerepr(obj2) - - def consider_non_int_comp(self, obj1, obj2): - return self.typerepr(obj1), self.typerepr(obj2) - - def typerepr(self, obj): - if isinstance(obj, SomeInstance): - return obj.classdef.name - else: - return obj.knowntype.__name__ - - def consider_tuple_random_getitem(self, tup): - return tuple([self.typerepr(x) for x in tup.items]) - - def consider_list_index(self): - return '!' - - def consider_list_getitem(self, idx): - return self.indexrepr(idx) - - def consider_list_setitem(self, idx): - return self.indexrepr(idx) - - def consider_list_delitem(self, idx): - return self.indexrepr(idx) - - def consider_str_join(self, s): - if s.is_constant(): - return repr(s.const) - else: - return "NON-CONSTANT" - - def consider_str_getitem(self, idx): - return self.indexrepr(idx) - - def consider_strformat(self, str, args): - if str.is_constant(): - s = repr(str.const) - else: - s = "?!!!!!!" - if isinstance(args, SomeTuple): - return (s, tuple([self.typerepr(x) for x in args.items])) - else: - return (s, self.typerepr(args)) - - def consider_dict_getitem(self, dic): - return dic - - def consider_dict_setitem(self, dic): - return dic - - def consider_dict_delitem(self, dic): - return dic - class Bookkeeper(object): """The log of choices that have been made while analysing the operations. It ensures that the same 'choice objects' will be returned if we ask @@ -165,13 +59,8 @@ self.needs_generic_instantiate = {} - self.stats = Stats(self) - delayed_imports() - def count(self, category, *args): - self.stats.count(category, *args) - def enter(self, position_key): """Start of an operation. The operation is uniquely identified by the given key.""" diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -347,9 +347,6 @@ def test(*args): return s_Bool -def import_func(*args): - return SomeObject() - # collect all functions import __builtin__ BUILTIN_ANALYZERS = {} @@ -397,9 +394,6 @@ else: BUILTIN_ANALYZERS[object.__init__] = object_init -# import -BUILTIN_ANALYZERS[__import__] = import_func - # annotation of low-level types from rpython.annotator.model import SomePtr from rpython.rtyper.lltypesystem import lltype diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -799,21 +799,6 @@ assert 0, "couldn't get to commonbase of %r and %r" % (cls1, cls2) -def missing_operation(cls, name): - def default_op(*args): - if args and isinstance(args[0], tuple): - flattened = tuple(args[0]) + args[1:] - else: - flattened = args - for arg in flattened: - if arg.__class__ is SomeObject and arg.knowntype is not type: - return SomeObject() - bookkeeper = rpython.annotator.bookkeeper.getbookkeeper() - bookkeeper.warning("no precise annotation supplied for %s%r" % (name, args)) - return s_ImpossibleValue - setattr(cls, name, default_op) - - class HarmlesslyBlocked(Exception): """Raised by the unaryop/binaryop to signal a harmless kind of BlockedInference: the current block is blocked, but not in a way diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -14,7 +14,8 @@ from rpython.rlib.rarithmetic import r_uint, base_int, r_longlong, r_ulonglong from rpython.rlib.rarithmetic import r_singlefloat from rpython.rlib import objectmodel -from rpython.flowspace.objspace import build_flow, FlowingError +from rpython.flowspace.objspace import build_flow +from rpython.flowspace.flowcontext import FlowingError from rpython.flowspace.operation import op from rpython.translator.test import snippet diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -5,11 +5,12 @@ from __future__ import absolute_import from types import MethodType +from rpython.flowspace.operation import op from rpython.annotator.model import (SomeObject, SomeInteger, SomeBool, SomeString, SomeChar, SomeList, SomeDict, SomeTuple, SomeImpossibleValue, SomeUnicodeCodePoint, SomeInstance, SomeBuiltin, SomeFloat, SomeIterator, SomePBC, SomeTypedAddressAccess, SomeAddress, SomeType, s_ImpossibleValue, - s_Bool, s_None, unionof, missing_operation, add_knowntypedata, + s_Bool, s_None, unionof, add_knowntypedata, HarmlesslyBlocked, SomeWeakRef, SomeUnicodeString, SomeByteArray) from rpython.annotator.bookkeeper import getbookkeeper from rpython.annotator import builtin @@ -20,17 +21,8 @@ def immutablevalue(x): return getbookkeeper().immutablevalue(x) -UNARY_OPERATIONS = set(['len', 'bool', 'getattr', 'setattr', 'delattr', - 'simple_call', 'call_args', 'str', 'repr', - 'iter', 'next', 'invert', 'type', 'issubtype', - 'pos', 'neg', 'abs', 'hex', 'oct', - 'ord', 'int', 'float', 'long', - 'hash', 'id', # <== not supported any more - 'getslice', 'setslice', 'delslice', - 'neg_ovf', 'abs_ovf', 'hint', 'unicode', 'unichr']) - -for opname in UNARY_OPERATIONS: - missing_operation(SomeObject, opname) +UNARY_OPERATIONS = set([oper.opname for oper in op.__dict__.values() + if oper.dispatch == 1]) class __extend__(SomeObject): @@ -84,23 +76,18 @@ raise AnnotatorError("cannot use hash() in RPython") def str(self): - getbookkeeper().count('str', self) return SomeString() def unicode(self): - getbookkeeper().count('unicode', self) return SomeUnicodeString() def repr(self): - getbookkeeper().count('repr', self) return SomeString() def hex(self): - getbookkeeper().count('hex', self) return SomeString() def oct(self): - getbookkeeper().count('oct', self) return SomeString() def id(self): @@ -144,6 +131,9 @@ raise AnnotatorError("Cannot find attribute %r on %r" % (attr, self)) getattr.can_only_throw = [] + def setattr(self, *args): + return s_ImpossibleValue + def bind_callables_under(self, classdef, name): return self # default unbound __get__ implementation @@ -163,6 +153,20 @@ def hint(self, *args_s): return self + def getslice(self, *args): + return s_ImpossibleValue + + def setslice(self, *args): + return s_ImpossibleValue + + def delslice(self, *args): + return s_ImpossibleValue + + def pos(self): + return s_ImpossibleValue + neg = abs = ord = invert = long = iter = next = pos + + class __extend__(SomeFloat): def pos(self): @@ -237,7 +241,6 @@ return immutablevalue(len(self.items)) def iter(self): - getbookkeeper().count("tuple_iter", self) return SomeIterator(self) iter.can_only_throw = [] @@ -281,7 +284,6 @@ method_pop.can_only_throw = [IndexError] def method_index(self, s_value): - getbookkeeper().count("list_index") self.listdef.generalize(s_value) return SomeInteger(nonneg=True) @@ -472,7 +474,6 @@ def method_join(self, s_list): if s_None.contains(s_list): return SomeImpossibleValue() - getbookkeeper().count("str_join", self) s_item = s_list.listdef.read_item() if s_None.contains(s_item): if isinstance(self, SomeUnicodeString): @@ -489,7 +490,6 @@ return self.basecharclass() def method_split(self, patt, max=-1): - getbookkeeper().count("str_split", self, patt) if max == -1 and patt.is_constant() and patt.const == "\0": no_nul = True else: @@ -498,7 +498,6 @@ return getbookkeeper().newlist(s_item) def method_rsplit(self, patt, max=-1): - getbookkeeper().count("str_rsplit", self, patt) s_item = self.basestringclass(no_nul=self.no_nul) return getbookkeeper().newlist(s_item) @@ -709,8 +708,6 @@ if self.s_self is not None: return self.analyser(self.s_self, *args) else: - if self.methodname: - getbookkeeper().count(self.methodname.replace('.', '_'), *args) return self.analyser(*args) simple_call.can_only_throw = _can_only_throw diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -1,12 +1,14 @@ -"""Implements the core parts of flow graph creation, in tandem -with rpython.flowspace.objspace. +"""Implements the core parts of flow graph creation. """ import sys import collections +import types +import __builtin__ from rpython.tool.error import source_lines from rpython.tool.stdlib_opcode import host_bytecode_spec +from rpython.rlib import rstackovf from rpython.flowspace.argument import CallSpec from rpython.flowspace.model import (Constant, Variable, Block, Link, c_last_exception, const, FSException) @@ -14,17 +16,19 @@ recursively_flatten) from rpython.flowspace.specialcase import (rpython_print_item, rpython_print_newline) +from rpython.flowspace.operation import op +w_None = const(None) class FlowingError(Exception): """ Signals invalid RPython in the function being analysed""" - frame = None + ctx = None def __str__(self): msg = ["\n"] msg += map(str, self.args) msg += [""] - msg += source_lines(self.frame.graph, None, offset=self.frame.last_instr) + msg += source_lines(self.ctx.graph, None, offset=self.ctx.last_instr) return "\n".join(msg) class StopFlowing(Exception): @@ -111,7 +115,7 @@ def append(self, operation): raise NotImplementedError - def guessbool(self, frame, w_condition): + def guessbool(self, ctx, w_condition): raise AssertionError("cannot guessbool(%s)" % (w_condition,)) @@ -127,13 +131,13 @@ def append(self, operation): self.crnt_block.operations.append(operation) - def guessbool(self, frame, w_condition): + def guessbool(self, ctx, w_condition): block = self.crnt_block vars = block.getvariables() links = [] for case in [False, True]: egg = EggBlock(vars, block, case) - frame.pendingblocks.append(egg) + ctx.pendingblocks.append(egg) link = Link(vars, egg, case) links.append(link) @@ -145,7 +149,7 @@ # block.exits[True] = ifLink. raise StopFlowing - def guessexception(self, frame, *cases): + def guessexception(self, ctx, *cases): block = self.crnt_block bvars = vars = vars2 = block.getvariables() links = [] @@ -162,7 +166,7 @@ vars.extend([last_exc, last_exc_value]) vars2.extend([Variable(), Variable()]) egg = EggBlock(vars2, block, case) - frame.pendingblocks.append(egg) + ctx.pendingblocks.append(egg) link = Link(vars, egg, case) if case is not None: link.extravars(last_exception=last_exc, last_exc_value=last_exc_value) @@ -193,14 +197,14 @@ [str(s) for s in self.listtoreplay[self.index:]])) self.index += 1 - def guessbool(self, frame, w_condition): + def guessbool(self, ctx, w_condition): assert self.index == len(self.listtoreplay) - frame.recorder = self.nextreplayer + ctx.recorder = self.nextreplayer return self.booloutcome - def guessexception(self, frame, *classes): + def guessexception(self, ctx, *classes): assert self.index == len(self.listtoreplay) - frame.recorder = self.nextreplayer + ctx.recorder = self.nextreplayer outcome = self.booloutcome if outcome is not None: egg = self.nextreplayer.crnt_block @@ -213,60 +217,55 @@ # ____________________________________________________________ _unary_ops = [ - ('UNARY_POSITIVE', "pos"), - ('UNARY_NEGATIVE', "neg"), - ('UNARY_NOT', "not_"), - ('UNARY_CONVERT', "repr"), - ('UNARY_INVERT', "invert"), + ('UNARY_POSITIVE', op.pos), + ('UNARY_NEGATIVE', op.neg), + ('UNARY_CONVERT', op.repr), + ('UNARY_INVERT', op.invert), ] -def unaryoperation(OPCODE, op): +def unaryoperation(OPCODE, operation): def UNARY_OP(self, *ignored): - operation = getattr(self.space, op) w_1 = self.popvalue() - w_result = operation(w_1) + w_result = operation(w_1).eval(self) self.pushvalue(w_result) - UNARY_OP.unaryop = op UNARY_OP.func_name = OPCODE return UNARY_OP _binary_ops = [ - ('BINARY_MULTIPLY', "mul"), - ('BINARY_TRUE_DIVIDE', "truediv"), - ('BINARY_FLOOR_DIVIDE', "floordiv"), - ('BINARY_DIVIDE', "div"), - ('BINARY_MODULO', "mod"), - ('BINARY_ADD', "add"), - ('BINARY_SUBTRACT', "sub"), - ('BINARY_SUBSCR', "getitem"), - ('BINARY_LSHIFT', "lshift"), - ('BINARY_RSHIFT', "rshift"), - ('BINARY_AND', "and_"), - ('BINARY_XOR', "xor"), - ('BINARY_OR', "or_"), - ('INPLACE_MULTIPLY', "inplace_mul"), - ('INPLACE_TRUE_DIVIDE', "inplace_truediv"), - ('INPLACE_FLOOR_DIVIDE', "inplace_floordiv"), - ('INPLACE_DIVIDE', "inplace_div"), - ('INPLACE_MODULO', "inplace_mod"), - ('INPLACE_ADD', "inplace_add"), - ('INPLACE_SUBTRACT', "inplace_sub"), - ('INPLACE_LSHIFT', "inplace_lshift"), - ('INPLACE_RSHIFT', "inplace_rshift"), - ('INPLACE_AND', "inplace_and"), - ('INPLACE_XOR', "inplace_xor"), - ('INPLACE_OR', "inplace_or"), + ('BINARY_MULTIPLY', op.mul), + ('BINARY_TRUE_DIVIDE', op.truediv), + ('BINARY_FLOOR_DIVIDE', op.floordiv), + ('BINARY_DIVIDE', op.div), + ('BINARY_MODULO', op.mod), + ('BINARY_ADD', op.add), + ('BINARY_SUBTRACT', op.sub), + ('BINARY_SUBSCR', op.getitem), + ('BINARY_LSHIFT', op.lshift), + ('BINARY_RSHIFT', op.rshift), + ('BINARY_AND', op.and_), + ('BINARY_XOR', op.xor), + ('BINARY_OR', op.or_), + ('INPLACE_MULTIPLY', op.inplace_mul), + ('INPLACE_TRUE_DIVIDE', op.inplace_truediv), + ('INPLACE_FLOOR_DIVIDE', op.inplace_floordiv), + ('INPLACE_DIVIDE', op.inplace_div), + ('INPLACE_MODULO', op.inplace_mod), + ('INPLACE_ADD', op.inplace_add), + ('INPLACE_SUBTRACT', op.inplace_sub), + ('INPLACE_LSHIFT', op.inplace_lshift), + ('INPLACE_RSHIFT', op.inplace_rshift), + ('INPLACE_AND', op.inplace_and), + ('INPLACE_XOR', op.inplace_xor), + ('INPLACE_OR', op.inplace_or), ] -def binaryoperation(OPCODE, op): +def binaryoperation(OPCODE, operation): """NOT_RPYTHON""" - def BINARY_OP(self, *ignored): - operation = getattr(self.space, op) + def BINARY_OP(self, _): w_2 = self.popvalue() w_1 = self.popvalue() - w_result = operation(w_1, w_2) + w_result = operation(w_1, w_2).eval(self) self.pushvalue(w_result) - BINARY_OP.binop = op BINARY_OP.func_name = OPCODE return BINARY_OP @@ -305,14 +304,13 @@ "cmp_exc_match", ] -class FlowSpaceFrame(object): +class FlowContext(object): opcode_method_names = host_bytecode_spec.method_names - def __init__(self, space, graph, code): + def __init__(self, graph, code): self.graph = graph func = graph.func self.pycode = code - self.space = space self.w_globals = Constant(func.func_globals) self.blockstack = [] @@ -321,7 +319,6 @@ self.last_instr = 0 self.init_locals_stack(code) - self.w_locals = None # XXX: only for compatibility with PyFrame self.joinpoints = {} @@ -403,7 +400,7 @@ return FrameState(data, self.blockstack[:], next_pos) def setstate(self, state): - """ Reset the frame to the given state. """ + """ Reset the context to the given frame state. """ data = state.mergeable[:] recursively_unflatten(data) self.restore_locals_stack(data[:-2]) # Nones == undefined locals @@ -476,7 +473,7 @@ except Raise as e: w_exc = e.w_exc - if w_exc.w_type == self.space.w_ImportError: + if w_exc.w_type == const(ImportError): msg = 'import statement always raises %s' % e raise ImportError(msg) link = Link([w_exc.w_type, w_exc.w_value], self.graph.exceptblock) @@ -491,8 +488,8 @@ self.recorder.crnt_block.closeblock(link) except FlowingError as exc: - if exc.frame is None: - exc.frame = self + if exc.ctx is None: + exc.ctx = self raise self.recorder = None @@ -576,6 +573,11 @@ def getname_w(self, index): return Constant(self.pycode.names[index]) + def appcall(self, func, *args_w): + """Call an app-level RPython function directly""" + w_func = const(func) + return self.do_op(op.simple_call(w_func, *args_w)) + def BAD_OPCODE(self, _): raise FlowingError("This operation is not RPython") @@ -585,38 +587,67 @@ def CONTINUE_LOOP(self, startofloop): raise Continue(startofloop) + def not_(self, w_obj): + w_bool = op.bool(w_obj).eval(self) + return const(not self.guessbool(w_bool)) + + def UNARY_NOT(self, _): + w_obj = self.popvalue() + self.pushvalue(self.not_(w_obj)) + def cmp_lt(self, w_1, w_2): - return self.space.lt(w_1, w_2) + return op.lt(w_1, w_2).eval(self) def cmp_le(self, w_1, w_2): - return self.space.le(w_1, w_2) + return op.le(w_1, w_2).eval(self) def cmp_eq(self, w_1, w_2): - return self.space.eq(w_1, w_2) + return op.eq(w_1, w_2).eval(self) def cmp_ne(self, w_1, w_2): - return self.space.ne(w_1, w_2) + return op.ne(w_1, w_2).eval(self) def cmp_gt(self, w_1, w_2): - return self.space.gt(w_1, w_2) + return op.gt(w_1, w_2).eval(self) def cmp_ge(self, w_1, w_2): - return self.space.ge(w_1, w_2) + return op.ge(w_1, w_2).eval(self) def cmp_in(self, w_1, w_2): - return self.space.contains(w_2, w_1) + return op.contains(w_2, w_1).eval(self) def cmp_not_in(self, w_1, w_2): - return self.space.not_(self.space.contains(w_2, w_1)) + return self.not_(self.cmp_in(w_1, w_2)) def cmp_is(self, w_1, w_2): - return self.space.is_(w_1, w_2) + return op.is_(w_1, w_2).eval(self) def cmp_is_not(self, w_1, w_2): - return self.space.not_(self.space.is_(w_1, w_2)) + return self.not_(op.is_(w_1, w_2).eval(self)) + + def exception_match(self, w_exc_type, w_check_class): + """Checks if the given exception type matches 'w_check_class'.""" + if not isinstance(w_check_class, Constant): + raise FlowingError("Non-constant except guard.") + check_class = w_check_class.value + if check_class in (NotImplementedError, AssertionError): + raise FlowingError( + "Catching %s is not valid in RPython" % check_class.__name__) + if not isinstance(check_class, tuple): + # the simple case + return self.guessbool(op.issubtype(w_exc_type, w_check_class).eval(self)) + # special case for StackOverflow (see rlib/rstackovf.py) + if check_class == rstackovf.StackOverflow: + w_real_class = const(rstackovf._StackOverflow) + return self.guessbool(op.issubtype(w_exc_type, w_real_class).eval(self)) + # checking a tuple of classes + for klass in w_check_class.value: + if self.exception_match(w_exc_type, const(klass)): + return True + return False def cmp_exc_match(self, w_1, w_2): - return self.space.newbool(self.space.exception_match(w_1, w_2)) + return const(self.exception_match(w_1, w_2)) def COMPARE_OP(self, testnum): w_2 = self.popvalue() @@ -624,8 +655,37 @@ w_result = getattr(self, compare_method[testnum])(w_1, w_2) self.pushvalue(w_result) + def exc_from_raise(self, w_arg1, w_arg2): + """ + Create a wrapped exception from the arguments of a raise statement. + + Returns an FSException object whose w_value is an instance of w_type. + """ + w_is_type = op.simple_call(const(isinstance), w_arg1, const(type)).eval(self) + if self.guessbool(w_is_type): + # this is for all cases of the form (Class, something) + if self.guessbool(op.is_(w_arg2, w_None).eval(self)): + # raise Type: we assume we have to instantiate Type + w_value = op.simple_call(w_arg1).eval(self) + else: + w_valuetype = op.type(w_arg2).eval(self) + if self.guessbool(op.issubtype(w_valuetype, w_arg1).eval(self)): + # raise Type, Instance: let etype be the exact type of value + w_value = w_arg2 + else: + # raise Type, X: assume X is the constructor argument + w_value = op.simple_call(w_arg1, w_arg2).eval(self) + else: + # the only case left here is (inst, None), from a 'raise inst'. + if not self.guessbool(op.is_(w_arg2, const(None)).eval(self)): + exc = TypeError("instance exception may not have a " + "separate value") + raise Raise(const(exc)) + w_value = w_arg1 + w_type = op.type(w_value).eval(self) + return FSException(w_type, w_value) + def RAISE_VARARGS(self, nbargs): - space = self.space if nbargs == 0: if self.last_exception is not None: w_exc = self.last_exception @@ -639,28 +699,40 @@ if nbargs >= 2: w_value = self.popvalue() w_type = self.popvalue() - operror = space.exc_from_raise(w_type, w_value) + operror = self.exc_from_raise(w_type, w_value) else: w_type = self.popvalue() - if isinstance(w_type, FSException): - operror = w_type - else: - operror = space.exc_from_raise(w_type, space.w_None) + operror = self.exc_from_raise(w_type, w_None) raise Raise(operror) + def import_name(self, name, glob=None, loc=None, frm=None, level=-1): + try: + mod = __import__(name, glob, loc, frm, level) + except ImportError as e: + raise Raise(const(e)) + return const(mod) + def IMPORT_NAME(self, nameindex): - space = self.space modulename = self.getname_u(nameindex) glob = self.w_globals.value fromlist = self.popvalue().value level = self.popvalue().value - w_obj = space.import_name(modulename, glob, None, fromlist, level) + w_obj = self.import_name(modulename, glob, None, fromlist, level) self.pushvalue(w_obj) + def import_from(self, w_module, w_name): + assert isinstance(w_module, Constant) + assert isinstance(w_name, Constant) + try: + return op.getattr(w_module, w_name).eval(self) + except FlowingError: + exc = ImportError("cannot import name '%s'" % w_name.value) + raise Raise(const(exc)) + def IMPORT_FROM(self, nameindex): w_name = self.getname_w(nameindex) w_module = self.peekvalue() - self.pushvalue(self.space.import_from(w_module, w_name)) + self.pushvalue(self.import_from(w_module, w_name)) def RETURN_VALUE(self, oparg): w_returnvalue = self.popvalue() @@ -677,7 +749,7 @@ # item (unlike CPython which can have 1, 2 or 3 items): # [subclass of FlowSignal] w_top = self.popvalue() - if w_top == self.space.w_None: + if w_top == w_None: # finally: block with no unroller active return elif isinstance(w_top, FlowSignal): @@ -699,7 +771,7 @@ def YIELD_VALUE(self, _): assert self.pycode.is_generator w_result = self.popvalue() - self.space.yield_(w_result) + op.yield_(w_result).eval(self) # XXX yield expressions not supported. This will blow up if the value # isn't popped straightaway. self.pushvalue(None) @@ -710,11 +782,11 @@ def PRINT_ITEM(self, oparg): w_item = self.popvalue() - w_s = self.space.str(w_item) - self.space.appcall(rpython_print_item, w_s) + w_s = op.str(w_item).eval(self) + self.appcall(rpython_print_item, w_s) def PRINT_NEWLINE(self, oparg): - self.space.appcall(rpython_print_newline) + self.appcall(rpython_print_newline) def JUMP_FORWARD(self, target): return target @@ -722,34 +794,35 @@ def JUMP_IF_FALSE(self, target): # Python <= 2.6 only w_cond = self.peekvalue() - if not self.guessbool(self.space.bool(w_cond)): + if not self.guessbool(op.bool(w_cond).eval(self)): return target def JUMP_IF_TRUE(self, target): # Python <= 2.6 only w_cond = self.peekvalue() - if self.guessbool(self.space.bool(w_cond)): + if self.guessbool(op.bool(w_cond).eval(self)): return target def POP_JUMP_IF_FALSE(self, target): w_value = self.popvalue() - if not self.guessbool(self.space.bool(w_value)): + if not self.guessbool(op.bool(w_value).eval(self)): return target def POP_JUMP_IF_TRUE(self, target): w_value = self.popvalue() - if self.guessbool(self.space.bool(w_value)): + if self.guessbool(op.bool(w_value).eval(self)): return target def JUMP_IF_FALSE_OR_POP(self, target): w_value = self.peekvalue() - if not self.guessbool(self.space.bool(w_value)): + if not self.guessbool(op.bool(w_value).eval(self)): return target self.popvalue() def JUMP_IF_TRUE_OR_POP(self, target): w_value = self.peekvalue() - if self.guessbool(self.space.bool(w_value)): + if self.guessbool(op.bool(w_value).eval(self)): + return target return target self.popvalue() @@ -758,23 +831,20 @@ def GET_ITER(self, oparg): w_iterable = self.popvalue() - w_iterator = self.space.iter(w_iterable) + w_iterator = op.iter(w_iterable).eval(self) self.pushvalue(w_iterator) def FOR_ITER(self, target): w_iterator = self.peekvalue() try: - w_nextitem = self.space.next(w_iterator) + w_nextitem = op.next(w_iterator).eval(self) + self.pushvalue(w_nextitem) except Raise as e: - w_exc = e.w_exc - if not self.space.exception_match(w_exc.w_type, - self.space.w_StopIteration): + if self.exception_match(e.w_exc.w_type, const(StopIteration)): + self.popvalue() + return target + else: raise - # iterator exhausted - self.popvalue() - return target - else: - self.pushvalue(w_nextitem) def SETUP_LOOP(self, target): block = LoopBlock(self, target) @@ -793,9 +863,10 @@ # directly call manager.__enter__(), don't use special lookup functions # which don't make sense on the RPython type system. w_manager = self.peekvalue() - w_exit = self.space.getattr(w_manager, const("__exit__")) + w_exit = op.getattr(w_manager, const("__exit__")).eval(self) self.settopvalue(w_exit) - w_result = self.space.call_method(w_manager, "__enter__") + w_enter = op.getattr(w_manager, const('__enter__')).eval(self) + w_result = op.simple_call(w_enter).eval(self) block = WithBlock(self, target) self.blockstack.append(block) self.pushvalue(w_result) @@ -812,15 +883,14 @@ w_exitfunc = self.popvalue() unroller = self.peekvalue(0) - w_None = self.space.w_None if isinstance(unroller, Raise): w_exc = unroller.w_exc # The annotator won't allow to merge exception types with None. # Replace it with the exception value... - self.space.call_function(w_exitfunc, - w_exc.w_value, w_exc.w_value, w_None) + op.simple_call(w_exitfunc, w_exc.w_value, w_exc.w_value, w_None + ).eval(self) else: - self.space.call_function(w_exitfunc, w_None, w_None, w_None) + op.simple_call(w_exitfunc, w_None, w_None, w_None).eval(self) def LOAD_FAST(self, varindex): w_value = self.locals_stack_w[varindex] @@ -832,8 +902,19 @@ w_const = self.getconstant_w(constindex) self.pushvalue(w_const) + def find_global(self, w_globals, varname): + try: + value = w_globals.value[varname] + except KeyError: + # not in the globals, now look in the built-ins + try: + value = getattr(__builtin__, varname) + except AttributeError: + raise FlowingError("global name '%s' is not defined" % varname) + return const(value) + def LOAD_GLOBAL(self, nameindex): - w_result = self.space.find_global(self.w_globals, self.getname_u(nameindex)) + w_result = self.find_global(self.w_globals, self.getname_u(nameindex)) self.pushvalue(w_result) LOAD_NAME = LOAD_GLOBAL @@ -841,7 +922,7 @@ "obj.attributename" w_obj = self.popvalue() w_attributename = self.getname_w(nameindex) - w_value = self.space.getattr(w_obj, w_attributename) + w_value = op.getattr(w_obj, w_attributename).eval(self) self.pushvalue(w_value) LOOKUP_METHOD = LOAD_ATTR @@ -917,7 +998,7 @@ # This opcode was added with pypy-1.8. Here is a simpler # version, enough for annotation. last_val = self.popvalue() - self.pushvalue(self.space.newlist()) + self.pushvalue(op.newlist().eval(self)) self.pushvalue(last_val) def call_function(self, oparg, w_star=None, w_starstar=None): @@ -934,8 +1015,12 @@ arguments = self.popvalues(n_arguments) args = CallSpec(arguments, keywords, w_star) w_function = self.popvalue() - w_result = self.space.call(w_function, args) - self.pushvalue(w_result) + if args.keywords or isinstance(args.w_stararg, Variable): + shape, args_w = args.flatten() + hlop = op.call_args(w_function, Constant(shape), *args_w) + else: + hlop = op.simple_call(w_function, *args.as_list()) + self.pushvalue(hlop.eval(self)) def CALL_FUNCTION(self, oparg): self.call_function(oparg) @@ -954,10 +1039,20 @@ w_varargs = self.popvalue() self.call_function(oparg, w_varargs, w_varkw) + def newfunction(self, w_code, defaults_w): + if not all(isinstance(value, Constant) for value in defaults_w): + raise FlowingError("Dynamically created function must" + " have constant default values.") + code = w_code.value + globals = self.w_globals.value + defaults = tuple([default.value for default in defaults_w]) + fn = types.FunctionType(code, globals, code.co_name, defaults) + return Constant(fn) + def MAKE_FUNCTION(self, numdefaults): w_codeobj = self.popvalue() defaults = self.popvalues(numdefaults) - fn = self.space.newfunction(w_codeobj, self.w_globals, defaults) + fn = self.newfunction(w_codeobj, defaults) self.pushvalue(fn) def STORE_ATTR(self, nameindex): @@ -965,29 +1060,38 @@ w_attributename = self.getname_w(nameindex) w_obj = self.popvalue() w_newvalue = self.popvalue() - self.space.setattr(w_obj, w_attributename, w_newvalue) + op.setattr(w_obj, w_attributename, w_newvalue).eval(self) + + def unpack_sequence(self, w_iterable, expected_length): + w_len = op.len(w_iterable).eval(self) + w_correct = op.eq(w_len, const(expected_length)).eval(self) + if not self.guessbool(op.bool(w_correct).eval(self)): + w_exc = self.exc_from_raise(const(ValueError), const(None)) + raise Raise(w_exc) + return [op.getitem(w_iterable, const(i)).eval(self) + for i in range(expected_length)] def UNPACK_SEQUENCE(self, itemcount): w_iterable = self.popvalue() - items = self.space.unpack_sequence(w_iterable, itemcount) + items = self.unpack_sequence(w_iterable, itemcount) for w_item in reversed(items): self.pushvalue(w_item) def slice(self, w_start, w_end): w_obj = self.popvalue() - w_result = self.space.getslice(w_obj, w_start, w_end) + w_result = op.getslice(w_obj, w_start, w_end).eval(self) self.pushvalue(w_result) def SLICE_0(self, oparg): - self.slice(self.space.w_None, self.space.w_None) + self.slice(w_None, w_None) def SLICE_1(self, oparg): w_start = self.popvalue() - self.slice(w_start, self.space.w_None) + self.slice(w_start, w_None) def SLICE_2(self, oparg): w_end = self.popvalue() - self.slice(self.space.w_None, w_end) + self.slice(w_None, w_end) def SLICE_3(self, oparg): w_end = self.popvalue() @@ -997,18 +1101,18 @@ def storeslice(self, w_start, w_end): w_obj = self.popvalue() w_newvalue = self.popvalue() - self.space.setslice(w_obj, w_start, w_end, w_newvalue) + op.setslice(w_obj, w_start, w_end, w_newvalue).eval(self) def STORE_SLICE_0(self, oparg): - self.storeslice(self.space.w_None, self.space.w_None) + self.storeslice(w_None, w_None) def STORE_SLICE_1(self, oparg): w_start = self.popvalue() - self.storeslice(w_start, self.space.w_None) + self.storeslice(w_start, w_None) def STORE_SLICE_2(self, oparg): w_end = self.popvalue() - self.storeslice(self.space.w_None, w_end) + self.storeslice(w_None, w_end) def STORE_SLICE_3(self, oparg): w_end = self.popvalue() @@ -1017,18 +1121,18 @@ def deleteslice(self, w_start, w_end): w_obj = self.popvalue() - self.space.delslice(w_obj, w_start, w_end) + op.delslice(w_obj, w_start, w_end).eval(self) def DELETE_SLICE_0(self, oparg): - self.deleteslice(self.space.w_None, self.space.w_None) + self.deleteslice(w_None, w_None) def DELETE_SLICE_1(self, oparg): w_start = self.popvalue() - self.deleteslice(w_start, self.space.w_None) + self.deleteslice(w_start, w_None) def DELETE_SLICE_2(self, oparg): w_end = self.popvalue() - self.deleteslice(self.space.w_None, w_end) + self.deleteslice(w_None, w_end) def DELETE_SLICE_3(self, oparg): w_end = self.popvalue() @@ -1036,12 +1140,13 @@ self.deleteslice(w_start, w_end) def LIST_APPEND(self, oparg): - w = self.popvalue() + w_value = self.popvalue() if sys.version_info < (2, 7): - v = self.popvalue() + w_list = self.popvalue() else: - v = self.peekvalue(oparg - 1) - self.space.call_method(v, 'append', w) + w_list = self.peekvalue(oparg - 1) + w_append_meth = op.getattr(w_list, const('append')).eval(self) + op.simple_call(w_append_meth, w_value).eval(self) def DELETE_FAST(self, varindex): if self.locals_stack_w[varindex] is None: @@ -1054,45 +1159,45 @@ w_key = self.popvalue() w_value = self.popvalue() w_dict = self.peekvalue() - self.space.setitem(w_dict, w_key, w_value) + op.setitem(w_dict, w_key, w_value).eval(self) def STORE_SUBSCR(self, oparg): "obj[subscr] = newvalue" w_subscr = self.popvalue() w_obj = self.popvalue() w_newvalue = self.popvalue() - self.space.setitem(w_obj, w_subscr, w_newvalue) + op.setitem(w_obj, w_subscr, w_newvalue).eval(self) def BUILD_SLICE(self, numargs): if numargs == 3: w_step = self.popvalue() elif numargs == 2: - w_step = self.space.w_None + w_step = w_None else: raise BytecodeCorruption w_end = self.popvalue() w_start = self.popvalue() - w_slice = self.space.newslice(w_start, w_end, w_step) + w_slice = op.newslice(w_start, w_end, w_step).eval(self) self.pushvalue(w_slice) def DELETE_SUBSCR(self, oparg): "del obj[subscr]" w_subscr = self.popvalue() w_obj = self.popvalue() - self.space.delitem(w_obj, w_subscr) + op.delitem(w_obj, w_subscr).eval(self) def BUILD_TUPLE(self, itemcount): items = self.popvalues(itemcount) - w_tuple = self.space.newtuple(*items) + w_tuple = op.newtuple(*items).eval(self) self.pushvalue(w_tuple) def BUILD_LIST(self, itemcount): items = self.popvalues(itemcount) - w_list = self.space.newlist(*items) + w_list = op.newlist(*items).eval(self) self.pushvalue(w_list) def BUILD_MAP(self, itemcount): - w_dict = self.space.newdict() + w_dict = op.newdict().eval(self) self.pushvalue(w_dict) def NOP(self, *args): @@ -1209,9 +1314,9 @@ """Abstract base class for frame blocks from the blockstack, used by the SETUP_XXX and POP_BLOCK opcodes.""" - def __init__(self, frame, handlerposition): + def __init__(self, ctx, handlerposition): self.handlerposition = handlerposition - self.valuestackdepth = frame.valuestackdepth + self.valuestackdepth = ctx.valuestackdepth def __eq__(self, other): return (self.__class__ is other.__class__ and @@ -1224,10 +1329,10 @@ def __hash__(self): return hash((self.handlerposition, self.valuestackdepth)) - def cleanupstack(self, frame): - frame.dropvaluesuntil(self.valuestackdepth) + def cleanupstack(self, ctx): + ctx.dropvaluesuntil(self.valuestackdepth) - def handle(self, frame, unroller): + def handle(self, ctx, unroller): raise NotImplementedError class LoopBlock(FrameBlock): @@ -1235,16 +1340,16 @@ handles = (Break, Continue) - def handle(self, frame, unroller): + def handle(self, ctx, unroller): if isinstance(unroller, Continue): # re-push the loop block without cleaning up the value stack, # and jump to the beginning of the loop, stored in the # exception's argument - frame.blockstack.append(self) + ctx.blockstack.append(self) return unroller.jump_to else: # jump to the end of the loop - self.cleanupstack(frame) + self.cleanupstack(ctx) return self.handlerposition class ExceptBlock(FrameBlock): @@ -1252,19 +1357,19 @@ handles = Raise - def handle(self, frame, unroller): + def handle(self, ctx, unroller): # push the exception to the value stack for inspection by the # exception handler (the code after the except:) - self.cleanupstack(frame) + self.cleanupstack(ctx) assert isinstance(unroller, Raise) w_exc = unroller.w_exc # the stack setup is slightly different than in CPython: # instead of the traceback, we store the unroller object, # wrapped. - frame.pushvalue(unroller) - frame.pushvalue(w_exc.w_value) - frame.pushvalue(w_exc.w_type) - frame.last_exception = w_exc + ctx.pushvalue(unroller) + ctx.pushvalue(w_exc.w_value) + ctx.pushvalue(w_exc.w_type) + ctx.last_exception = w_exc return self.handlerposition # jump to the handler class FinallyBlock(FrameBlock): @@ -1272,15 +1377,15 @@ handles = FlowSignal - def handle(self, frame, unroller): + def handle(self, ctx, unroller): # any abnormal reason for unrolling a finally: triggers the end of # the block unrolling and the entering the finally: handler. - self.cleanupstack(frame) - frame.pushvalue(unroller) + self.cleanupstack(ctx) + ctx.pushvalue(unroller) return self.handlerposition # jump to the handler class WithBlock(FinallyBlock): - def handle(self, frame, unroller): - return FinallyBlock.handle(self, frame, unroller) + def handle(self, ctx, unroller): + return FinallyBlock.handle(self, ctx, unroller) diff --git a/rpython/flowspace/model.py b/rpython/flowspace/model.py --- a/rpython/flowspace/model.py +++ b/rpython/flowspace/model.py @@ -400,6 +400,8 @@ type_with_bad_introspection = type(complex.real.__get__) def const(obj): + if hasattr(obj, "_flowspace_rewrite_directly_as_"): + obj = obj._flowspace_rewrite_directly_as_ if isinstance(obj, (Variable, Constant)): raise TypeError("already wrapped: " + repr(obj)) # method-wrapper have ill-defined comparison and introspection diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -1,25 +1,14 @@ -"""Implements the core parts of flow graph creation, in tandem -with rpython.flowspace.flowcontext. +"""Implements the main interface for flow graph creation: build_flow(). """ -import __builtin__ -import sys -import types from inspect import CO_NEWLOCALS -from rpython.flowspace.argument import CallSpec -from rpython.flowspace.model import (Constant, Variable, checkgraph, const, - FSException) +from rpython.flowspace.model import Variable, checkgraph from rpython.flowspace.bytecode import HostCode -from rpython.flowspace.operation import op, NOT_REALLY_CONST -from rpython.flowspace.flowcontext import (FlowSpaceFrame, fixeggblocks, - FlowingError, Raise) +from rpython.flowspace.flowcontext import (FlowContext, fixeggblocks) from rpython.flowspace.generator import (tweak_generator_graph, bootstrap_generator) from rpython.flowspace.pygraph import PyGraph -from rpython.flowspace.specialcase import SPECIAL_CASES -from rpython.rlib import rstackovf - def _assert_rpythonic(func): @@ -33,190 +22,7 @@ "the flag CO_NEWLOCALS set.") -# ______________________________________________________________________ -class FlowObjSpace(object): - """NOT_RPYTHON. - The flow objspace space is used to produce a flow graph by recording - the space operations that the interpreter generates when it interprets - (the bytecode of) some function. - """ - w_None = Constant(None) - sys = Constant(sys) - w_False = Constant(False) - w_True = Constant(True) - w_type = Constant(type) - w_tuple = Constant(tuple) - for exc in [KeyError, ValueError, IndexError, StopIteration, - AssertionError, TypeError, AttributeError, ImportError]: - clsname = exc.__name__ - locals()['w_' + clsname] = Constant(exc) - - # the following exceptions should not show up - # during flow graph construction - w_NameError = 'NameError' - w_UnboundLocalError = 'UnboundLocalError' - specialcases = SPECIAL_CASES - - def build_flow(self, func): - return build_flow(func, self) - - def newbool(self, b): - if b: - return self.w_True - else: - return self.w_False - - def newfunction(self, w_code, w_globals, defaults_w): - if not all(isinstance(value, Constant) for value in defaults_w): - raise FlowingError("Dynamically created function must" - " have constant default values.") - code = w_code.value - globals = w_globals.value - defaults = tuple([default.value for default in defaults_w]) - fn = types.FunctionType(code, globals, code.co_name, defaults) - return Constant(fn) - - def exception_match(self, w_exc_type, w_check_class): - """Checks if the given exception type matches 'w_check_class'.""" - frame = self.frame - if not isinstance(w_check_class, Constant): - raise FlowingError("Non-constant except guard.") - check_class = w_check_class.value - if check_class in (NotImplementedError, AssertionError): - raise FlowingError( - "Catching %s is not valid in RPython" % check_class.__name__) - if not isinstance(check_class, tuple): - # the simple case - return frame.guessbool(self.issubtype(w_exc_type, w_check_class)) - # special case for StackOverflow (see rlib/rstackovf.py) - if check_class == rstackovf.StackOverflow: - w_real_class = const(rstackovf._StackOverflow) - return frame.guessbool(self.issubtype(w_exc_type, w_real_class)) - # checking a tuple of classes - for klass in w_check_class.value: - if self.exception_match(w_exc_type, const(klass)): - return True - return False - - def exc_from_raise(self, w_arg1, w_arg2): - """ - Create a wrapped exception from the arguments of a raise statement. - - Returns an FSException object whose w_value is an instance of w_type. - """ - frame = self.frame - if frame.guessbool(self.call_function(const(isinstance), w_arg1, - self.w_type)): - # this is for all cases of the form (Class, something) - if frame.guessbool(self.is_(w_arg2, self.w_None)): - # raise Type: we assume we have to instantiate Type - w_value = self.call_function(w_arg1) - else: - w_valuetype = self.type(w_arg2) - if frame.guessbool(self.issubtype(w_valuetype, w_arg1)): - # raise Type, Instance: let etype be the exact type of value - w_value = w_arg2 - else: - # raise Type, X: assume X is the constructor argument - w_value = self.call_function(w_arg1, w_arg2) - else: - # the only case left here is (inst, None), from a 'raise inst'. - if not frame.guessbool(self.is_(w_arg2, self.w_None)): - exc = TypeError("instance exception may not have a " - "separate value") - raise Raise(const(exc)) - w_value = w_arg1 - w_type = self.type(w_value) - return FSException(w_type, w_value) - - def unpack_sequence(self, w_iterable, expected_length): - if isinstance(w_iterable, Constant): - l = list(w_iterable.value) - if len(l) != expected_length: - raise ValueError - return [const(x) for x in l] - else: - w_len = self.len(w_iterable) - w_correct = self.eq(w_len, const(expected_length)) - if not self.frame.guessbool(self.bool(w_correct)): - w_exc = self.exc_from_raise(self.w_ValueError, self.w_None) - raise Raise(w_exc) - return [self.getitem(w_iterable, const(i)) - for i in range(expected_length)] - - # ____________________________________________________________ - def not_(self, w_obj): - return const(not self.frame.guessbool(self.bool(w_obj))) - - def import_name(self, name, glob=None, loc=None, frm=None, level=-1): - try: - mod = __import__(name, glob, loc, frm, level) - except ImportError as e: - raise Raise(const(e)) - return const(mod) - - def import_from(self, w_module, w_name): - assert isinstance(w_module, Constant) - assert isinstance(w_name, Constant) - try: - return self.getattr(w_module, w_name) - except FlowingError: - exc = ImportError("cannot import name '%s'" % w_name.value) - raise Raise(const(exc)) - - def call_method(self, w_obj, methname, *arg_w): - w_meth = self.getattr(w_obj, const(methname)) - return self.call_function(w_meth, *arg_w) - - def call_function(self, w_func, *args_w): - args = CallSpec(list(args_w)) - return self.call(w_func, args) - - def appcall(self, func, *args_w): - """Call an app-level RPython function directly""" - w_func = const(func) - return op.simple_call(w_func, *args_w).eval(self.frame) - - def call(self, w_callable, args): - if isinstance(w_callable, Constant): - fn = w_callable.value - if hasattr(fn, "_flowspace_rewrite_directly_as_"): - fn = fn._flowspace_rewrite_directly_as_ - w_callable = const(fn) - try: - sc = self.specialcases[fn] # TypeError if 'fn' not hashable - except (KeyError, TypeError): - pass - else: - if args.keywords: - raise FlowingError( - "should not call %r with keyword arguments" % (fn,)) - return sc(self, *args.as_list()) - - if args.keywords or isinstance(args.w_stararg, Variable): - shape, args_w = args.flatten() - hlop = op.call_args(w_callable, Constant(shape), *args_w) - else: - hlop = op.simple_call(w_callable, *args.as_list()) - return self.frame.do_op(hlop) - - def find_global(self, w_globals, varname): - try: - value = w_globals.value[varname] - except KeyError: - # not in the globals, now look in the built-ins - try: - value = getattr(__builtin__, varname) - except AttributeError: - raise FlowingError("global name '%s' is not defined" % varname) - return const(value) - -for cls in op.__dict__.values(): - if getattr(FlowObjSpace, cls.opname, None) is None: - setattr(FlowObjSpace, cls.opname, cls.make_sc()) - - -def build_flow(func, space=FlowObjSpace()): +def build_flow(func): """ Create the flow graph for the function. """ @@ -231,8 +37,8 @@ w_value.rename(name) return bootstrap_generator(graph) graph = PyGraph(func, code) - frame = space.frame = FlowSpaceFrame(space, graph, code) - frame.build_flow() + ctx = FlowContext(graph, code) + ctx.build_flow() fixeggblocks(graph) checkgraph(graph) if code.is_generator: diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -1,6 +1,5 @@ """ -This module defines mappings between operation names and Python's -built-in functions (or type constructors) implementing them. +This module defines all the SpaceOeprations used in rpython.flowspace. """ import __builtin__ @@ -8,11 +7,15 @@ import operator import sys import types +from rpython.tool.pairtype import pair from rpython.rlib.unroll import unrolling_iterable, _unroller from rpython.tool.sourcetools import compile2 from rpython.flowspace.model import (Constant, WrapException, const, Variable, SpaceOperation) from rpython.flowspace.specialcase import register_flow_sc +from rpython.annotator.model import SomeTuple +from rpython.flowspace.specialcase import SPECIAL_CASES + NOT_REALLY_CONST = { Constant(sys): { @@ -39,7 +42,8 @@ } -class _OpHolder(object): pass +class _OpHolder(object): + pass op = _OpHolder() func2op = {} @@ -54,6 +58,8 @@ __metaclass__ = HLOperationMeta pure = False can_overflow = False + dispatch = None # number of arguments to dispatch on + # (None means special handling) def __init__(self, *args): self.args = list(args) @@ -70,15 +76,15 @@ @classmethod def make_sc(cls): - def sc_operator(space, *args_w): - return cls(*args_w).eval(space.frame) + def sc_operator(ctx, *args_w): + return cls(*args_w).eval(ctx) return sc_operator - def eval(self, frame): + def eval(self, ctx): result = self.constfold() if result is not None: return result - return frame.do_op(self) + return ctx.do_op(self) def constfold(self): return None @@ -125,14 +131,35 @@ class OverflowingOperation(PureOperation): can_overflow = True + def ovfchecked(self): ovf = self.ovf_variant(*self.args) ovf.offset = self.offset return ovf +class SingleDispatchMixin(object): + dispatch = 1 -def add_operator(name, arity, pyfunc=None, pure=False, ovf=False): + def consider(self, annotator, arg, *other_args): + impl = getattr(arg, self.opname) + return impl(*other_args) + +class DoubleDispatchMixin(object): + dispatch = 2 + + def consider(self, annotator, arg1, arg2, *other_args): + impl = getattr(pair(arg1, arg2), self.opname) + return impl(*other_args) + + +def add_operator(name, arity, dispatch=None, pyfunc=None, pure=False, ovf=False): operator_func = getattr(operator, name, None) + if dispatch == 1: + bases = [SingleDispatchMixin] + elif dispatch == 2: + bases = [DoubleDispatchMixin] + else: + bases = [] if ovf: assert pure base_cls = OverflowingOperation @@ -140,8 +167,10 @@ base_cls = PureOperation else: base_cls = HLOperation - cls = HLOperationMeta(name, (base_cls,), {'opname': name, 'arity': arity, - 'canraise': []}) + bases.append(base_cls) + cls = HLOperationMeta(name, tuple(bases), {'opname': name, 'arity': arity, + 'canraise': [], + 'dispatch': dispatch}) if pyfunc is not None: func2op[pyfunc] = cls if operator_func: @@ -153,7 +182,7 @@ if ovf: from rpython.rlib.rarithmetic import ovfcheck ovf_func = lambda *args: ovfcheck(cls.pyfunc(*args)) - add_operator(name + '_ovf', arity, pyfunc=ovf_func) + add_operator(name + '_ovf', arity, dispatch, pyfunc=ovf_func) cls.ovf_variant = getattr(op, name + '_ovf') # ____________________________________________________________ @@ -260,74 +289,74 @@ raise ValueError("this is not supported") -add_operator('is_', 2, pure=True) -add_operator('id', 1, pyfunc=id) -add_operator('type', 1, pyfunc=new_style_type, pure=True) -add_operator('issubtype', 2, pyfunc=issubclass, pure=True) # not for old-style classes -add_operator('repr', 1, pyfunc=repr, pure=True) -add_operator('str', 1, pyfunc=str, pure=True) +add_operator('is_', 2, dispatch=2, pure=True) +add_operator('id', 1, dispatch=1, pyfunc=id) +add_operator('type', 1, dispatch=1, pyfunc=new_style_type, pure=True) +add_operator('issubtype', 2, dispatch=1, pyfunc=issubclass, pure=True) # not for old-style classes +add_operator('repr', 1, dispatch=1, pyfunc=repr, pure=True) +add_operator('str', 1, dispatch=1, pyfunc=str, pure=True) add_operator('format', 2, pyfunc=unsupported) -add_operator('len', 1, pyfunc=len, pure=True) -add_operator('hash', 1, pyfunc=hash) -add_operator('setattr', 3, pyfunc=setattr) -add_operator('delattr', 2, pyfunc=delattr) -add_operator('getitem', 2, pure=True) -add_operator('getitem_idx', 2, pure=True) -add_operator('getitem_key', 2, pure=True) -add_operator('getitem_idx_key', 2, pure=True) -add_operator('setitem', 3) -add_operator('delitem', 2) -add_operator('getslice', 3, pyfunc=do_getslice, pure=True) -add_operator('setslice', 4, pyfunc=do_setslice) -add_operator('delslice', 3, pyfunc=do_delslice) +add_operator('len', 1, dispatch=1, pyfunc=len, pure=True) +add_operator('hash', 1, dispatch=1, pyfunc=hash) +add_operator('setattr', 3, dispatch=1, pyfunc=setattr) +add_operator('delattr', 2, dispatch=1, pyfunc=delattr) +add_operator('getitem', 2, dispatch=2, pure=True) +add_operator('getitem_idx', 2, dispatch=2, pure=True) +add_operator('getitem_key', 2, dispatch=2, pure=True) +add_operator('getitem_idx_key', 2, dispatch=2, pure=True) +add_operator('setitem', 3, dispatch=2) +add_operator('delitem', 2, dispatch=2) +add_operator('getslice', 3, dispatch=1, pyfunc=do_getslice, pure=True) +add_operator('setslice', 4, dispatch=1, pyfunc=do_setslice) +add_operator('delslice', 3, dispatch=1, pyfunc=do_delslice) add_operator('trunc', 1, pyfunc=unsupported) -add_operator('pos', 1, pure=True) -add_operator('neg', 1, pure=True, ovf=True) -add_operator('bool', 1, pyfunc=bool, pure=True) +add_operator('pos', 1, dispatch=1, pure=True) +add_operator('neg', 1, dispatch=1, pure=True, ovf=True) +add_operator('bool', 1, dispatch=1, pyfunc=bool, pure=True) op.is_true = op.nonzero = op.bool # for llinterp -add_operator('abs', 1, pyfunc=abs, pure=True, ovf=True) -add_operator('hex', 1, pyfunc=hex, pure=True) -add_operator('oct', 1, pyfunc=oct, pure=True) -add_operator('ord', 1, pyfunc=ord, pure=True) -add_operator('invert', 1, pure=True) -add_operator('add', 2, pure=True, ovf=True) -add_operator('sub', 2, pure=True, ovf=True) -add_operator('mul', 2, pure=True, ovf=True) -add_operator('truediv', 2, pure=True) -add_operator('floordiv', 2, pure=True, ovf=True) -add_operator('div', 2, pure=True, ovf=True) -add_operator('mod', 2, pure=True, ovf=True) +add_operator('abs', 1, dispatch=1, pyfunc=abs, pure=True, ovf=True) +add_operator('hex', 1, dispatch=1, pyfunc=hex, pure=True) +add_operator('oct', 1, dispatch=1, pyfunc=oct, pure=True) +add_operator('ord', 1, dispatch=1, pyfunc=ord, pure=True) +add_operator('invert', 1, dispatch=1, pure=True) +add_operator('add', 2, dispatch=2, pure=True, ovf=True) +add_operator('sub', 2, dispatch=2, pure=True, ovf=True) +add_operator('mul', 2, dispatch=2, pure=True, ovf=True) +add_operator('truediv', 2, dispatch=2, pure=True) +add_operator('floordiv', 2, dispatch=2, pure=True, ovf=True) +add_operator('div', 2, dispatch=2, pure=True, ovf=True) +add_operator('mod', 2, dispatch=2, pure=True, ovf=True) add_operator('divmod', 2, pyfunc=divmod, pure=True) -add_operator('lshift', 2, pure=True, ovf=True) -add_operator('rshift', 2, pure=True) -add_operator('and_', 2, pure=True) -add_operator('or_', 2, pure=True) -add_operator('xor', 2, pure=True) -add_operator('int', 1, pyfunc=do_int, pure=True) +add_operator('lshift', 2, dispatch=2, pure=True, ovf=True) +add_operator('rshift', 2, dispatch=2, pure=True) +add_operator('and_', 2, dispatch=2, pure=True) +add_operator('or_', 2, dispatch=2, pure=True) +add_operator('xor', 2, dispatch=2, pure=True) From noreply at buildbot.pypy.org Tue Jan 21 20:55:21 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 21 Jan 2014 20:55:21 +0100 (CET) Subject: [pypy-commit] pypy default: Do some (hopefully) uncontroversiol style changes. Message-ID: <20140121195521.0DB871C33B0@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: Changeset: r68835:534c06c36a58 Date: 2014-01-21 20:55 +0100 http://bitbucket.org/pypy/pypy/changeset/534c06c36a58/ Log: Do some (hopefully) uncontroversiol style changes. diff --git a/rpython/flowspace/bytecode.py b/rpython/flowspace/bytecode.py --- a/rpython/flowspace/bytecode.py +++ b/rpython/flowspace/bytecode.py @@ -34,8 +34,8 @@ opnames = host_bytecode_spec.method_names def __init__(self, argcount, nlocals, stacksize, flags, - code, consts, names, varnames, filename, - name, firstlineno, lnotab, freevars): + code, consts, names, varnames, filename, + name, firstlineno, lnotab, freevars): """Initialize a new code object""" assert nlocals >= 0 self.co_argcount = argcount @@ -58,18 +58,18 @@ """Initialize the code object from a real (CPython) one. """ return cls(code.co_argcount, - code.co_nlocals, - code.co_stacksize, - code.co_flags, - code.co_code, - list(code.co_consts), - list(code.co_names), - list(code.co_varnames), - code.co_filename, - code.co_name, - code.co_firstlineno, - code.co_lnotab, - list(code.co_freevars)) + code.co_nlocals, + code.co_stacksize, + code.co_flags, + code.co_code, + list(code.co_consts), + list(code.co_names), + list(code.co_varnames), + code.co_filename, + code.co_name, + code.co_firstlineno, + code.co_lnotab, + list(code.co_freevars)) @property def formalargcount(self): diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -436,7 +436,7 @@ if not exceptions: return if not force and not any(isinstance(block, (ExceptBlock, FinallyBlock)) - for block in self.blockstack): + for block in self.blockstack): # The implicit exception wouldn't be caught and would later get # removed, so don't bother creating it. return @@ -1042,7 +1042,7 @@ def newfunction(self, w_code, defaults_w): if not all(isinstance(value, Constant) for value in defaults_w): raise FlowingError("Dynamically created function must" - " have constant default values.") + " have constant default values.") code = w_code.value globals = self.w_globals.value defaults = tuple([default.value for default in defaults_w]) @@ -1069,7 +1069,7 @@ w_exc = self.exc_from_raise(const(ValueError), const(None)) raise Raise(w_exc) return [op.getitem(w_iterable, const(i)).eval(self) - for i in range(expected_length)] + for i in range(expected_length)] def UNPACK_SEQUENCE(self, itemcount): w_iterable = self.popvalue() diff --git a/rpython/flowspace/model.py b/rpython/flowspace/model.py --- a/rpython/flowspace/model.py +++ b/rpython/flowspace/model.py @@ -35,17 +35,17 @@ class FunctionGraph(object): def __init__(self, name, startblock, return_var=None): - self.name = name # function name (possibly mangled already) - self.startblock = startblock + self.name = name # function name (possibly mangled already) + self.startblock = startblock # build default returnblock self.returnblock = Block([return_var or Variable()]) self.returnblock.operations = () - self.returnblock.exits = () + self.returnblock.exits = () # block corresponding to exception results self.exceptblock = Block([Variable('etype'), # exception class Variable('evalue')]) # exception value self.exceptblock.operations = () - self.exceptblock.exits = () + self.exceptblock.exits = () self.tag = None def getargs(self): @@ -187,7 +187,7 @@ self.operations = [] # list of SpaceOperation(s) self.exitswitch = None # a variable or # Constant(last_exception), see below - self.exits = [] # list of Link(s) + self.exits = [] # list of Link(s) def at(self): if self.operations and self.operations[0].offset >= 0: @@ -276,7 +276,7 @@ __slots__ = ["_name", "_nr", "concretetype"] dummyname = 'v' - namesdict = {dummyname : (dummyname, 0)} + namesdict = {dummyname: (dummyname, 0)} @property def name(self): @@ -338,7 +338,7 @@ class Constant(Hashable): __slots__ = ["concretetype"] - def __init__(self, value, concretetype = None): + def __init__(self, value, concretetype=None): Hashable.__init__(self, value) if concretetype is not None: self.concretetype = concretetype @@ -416,7 +416,7 @@ def __init__(self, opname, args, result, offset=-1): self.opname = intern(opname) # operation name - self.args = list(args) # mixed list of var/const + self.args = list(args) # mixed list of var/const self.result = result # either Variable or Constant instance self.offset = offset # offset in code string @@ -430,7 +430,7 @@ return not (self == other) def __hash__(self): - return hash((self.opname,tuple(self.args),self.result)) + return hash((self.opname, tuple(self.args), self.result)) def __repr__(self): return "%r = %s(%s)" % (self.result, self.opname, @@ -443,7 +443,7 @@ class Atom(object): def __init__(self, name): - self.__name__ = name # make save_global happy + self.__name__ = name # make save_global happy def __repr__(self): return self.__name__ @@ -470,7 +470,8 @@ try: for atom in flattenobj(*arg): yield atom - except: yield arg + except: + yield arg def mkentrymap(funcgraph): "Returns a dict mapping Blocks to lists of Links." diff --git a/rpython/flowspace/objspace.py b/rpython/flowspace/objspace.py --- a/rpython/flowspace/objspace.py +++ b/rpython/flowspace/objspace.py @@ -19,7 +19,7 @@ raise ValueError("RPython functions cannot create closures") if not (func.func_code.co_flags & CO_NEWLOCALS): raise ValueError("The code object for a RPython function should have " - "the flag CO_NEWLOCALS set.") + "the flag CO_NEWLOCALS set.") def build_flow(func): diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py --- a/rpython/flowspace/operation.py +++ b/rpython/flowspace/operation.py @@ -169,8 +169,8 @@ base_cls = HLOperation bases.append(base_cls) cls = HLOperationMeta(name, tuple(bases), {'opname': name, 'arity': arity, - 'canraise': [], - 'dispatch': dispatch}) + 'canraise': [], + 'dispatch': dispatch}) if pyfunc is not None: func2op[pyfunc] = cls if operator_func: diff --git a/rpython/flowspace/pygraph.py b/rpython/flowspace/pygraph.py --- a/rpython/flowspace/pygraph.py +++ b/rpython/flowspace/pygraph.py @@ -31,4 +31,3 @@ for c in "<>&!": name = name.replace(c, '_') return name - From noreply at buildbot.pypy.org Wed Jan 22 00:19:24 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 22 Jan 2014 00:19:24 +0100 (CET) Subject: [pypy-commit] pypy default: make my editor happy Message-ID: <20140121231925.06AE51C010E@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r68836:bc308796c275 Date: 2014-01-20 14:43 -0800 http://bitbucket.org/pypy/pypy/changeset/bc308796c275/ Log: make my editor happy diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -531,7 +531,7 @@ """x.__getitem__(y) <==> x[y]""" def __getnewargs__(): - """""" + "" def __getslice__(): """x.__getslice__(i, j) <==> x[i:j] From noreply at buildbot.pypy.org Wed Jan 22 00:19:26 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 22 Jan 2014 00:19:26 +0100 (CET) Subject: [pypy-commit] pypy default: rename the String strategies and {getitems, listview, newlist}_str -> bytes to be Message-ID: <20140121231926.4AD6D1C010E@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r68837:0fb4c2282ffc Date: 2014-01-21 14:25 -0800 http://bitbucket.org/pypy/pypy/changeset/0fb4c2282ffc/ Log: rename the String strategies and {getitems,listview,newlist}_str -> bytes to be consistent with W_BytesObject diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -910,7 +910,7 @@ """ return self.unpackiterable(w_iterable, expected_length) - def listview_str(self, w_list): + def listview_bytes(self, w_list): """ Return a list of unwrapped strings out of a list of strings. If the argument is not a list or does not contain only strings, return None. May return None anyway. @@ -944,7 +944,7 @@ """ return (None, None) - def newlist_str(self, list_s): + def newlist_bytes(self, list_s): return self.newlist([self.wrap(s) for s in list_s]) def newlist_unicode(self, list_u): diff --git a/pypy/module/__pypy__/test/test_special.py b/pypy/module/__pypy__/test/test_special.py --- a/pypy/module/__pypy__/test/test_special.py +++ b/pypy/module/__pypy__/test/test_special.py @@ -51,7 +51,9 @@ l = [1, 2, 3] assert list_strategy(l) == "int" l = ["a", "b", "c"] - assert list_strategy(l) == "str" + assert list_strategy(l) == "bytes" + l = [u"a", u"b", u"c"] + assert list_strategy(l) == "unicode" l = [1.1, 2.2, 3.3] assert list_strategy(l) == "float" l = range(3) diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -213,12 +213,14 @@ Build an extension module linked against the cpyext api library. """ if not space.is_none(w_separate_module_files): - separate_module_files = space.listview_str(w_separate_module_files) + separate_module_files = space.listview_bytes( + w_separate_module_files) assert separate_module_files is not None else: separate_module_files = [] if not space.is_none(w_separate_module_sources): - separate_module_sources = space.listview_str(w_separate_module_sources) + separate_module_sources = space.listview_bytes( + w_separate_module_sources) assert separate_module_sources is not None else: separate_module_sources = [] diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -441,8 +441,8 @@ def str_w(self, space): return self._value - def listview_str(self): - return _create_list_from_string(self._value) + def listview_bytes(self): + return _create_list_from_bytes(self._value) def ord(self, space): if len(self._value) != 1: @@ -518,7 +518,7 @@ _title = _upper def _newlist_unwrapped(self, space, lst): - return space.newlist_str(lst) + return space.newlist_bytes(lst) @staticmethod @unwrap_spec(w_object = WrappedDefault("")) @@ -725,9 +725,9 @@ return tformat.formatter_field_name_split() -def _create_list_from_string(value): +def _create_list_from_bytes(value): # need this helper function to allow the jit to look inside and inline - # listview_str + # listview_bytes return [s for s in value] W_BytesObject.EMPTY = W_BytesObject('') diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py --- a/pypy/objspace/std/celldict.py +++ b/pypy/objspace/std/celldict.py @@ -127,7 +127,7 @@ def w_keys(self, w_dict): space = self.space l = self.unerase(w_dict.dstorage).keys() - return space.newlist_str(l) + return space.newlist_bytes(l) def values(self, w_dict): iterator = self.unerase(w_dict.dstorage).itervalues diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -58,7 +58,7 @@ strategy = space.fromcache(MapDictStrategy) elif instance or strdict or module: assert w_type is None - strategy = space.fromcache(StringDictStrategy) + strategy = space.fromcache(BytesDictStrategy) elif kwargs: assert w_type is None from pypy.objspace.std.kwargsdict import EmptyKwargsDictStrategy @@ -117,9 +117,9 @@ if space.is_w(w_type, space.w_dict): w_dict = W_DictMultiObject.allocate_and_init_instance(space, w_type) - strlist = space.listview_str(w_keys) - if strlist is not None: - for key in strlist: + byteslist = space.listview_bytes(w_keys) + if byteslist is not None: + for key in byteslist: w_dict.setitem_str(key, w_fill) else: for w_key in space.listview(w_keys): @@ -333,7 +333,7 @@ popitem delitem clear \ length w_keys values items \ iterkeys itervalues iteritems \ - listview_str listview_unicode listview_int \ + listview_bytes listview_unicode listview_int \ view_as_kwargs".split() def make_method(method): @@ -482,7 +482,7 @@ w_dict.strategy = strategy w_dict.dstorage = storage - def listview_str(self, w_dict): + def listview_bytes(self, w_dict): return None def listview_unicode(self, w_dict): @@ -506,7 +506,7 @@ def switch_to_correct_strategy(self, w_dict, w_key): withidentitydict = self.space.config.objspace.std.withidentitydict if type(w_key) is self.space.StringObjectCls: - self.switch_to_string_strategy(w_dict) + self.switch_to_bytes_strategy(w_dict) return elif type(w_key) is self.space.UnicodeObjectCls: self.switch_to_unicode_strategy(w_dict) @@ -519,8 +519,8 @@ else: self.switch_to_object_strategy(w_dict) - def switch_to_string_strategy(self, w_dict): - strategy = self.space.fromcache(StringDictStrategy) + def switch_to_bytes_strategy(self, w_dict): + strategy = self.space.fromcache(BytesDictStrategy) storage = strategy.get_empty_storage() w_dict.strategy = strategy w_dict.dstorage = storage @@ -572,7 +572,7 @@ w_dict.setitem(w_key, w_value) def setitem_str(self, w_dict, key, w_value): - self.switch_to_string_strategy(w_dict) + self.switch_to_bytes_strategy(w_dict) w_dict.setitem_str(key, w_value) def delitem(self, w_dict, w_key): @@ -874,8 +874,8 @@ create_iterator_classes(ObjectDictStrategy) -class StringDictStrategy(AbstractTypedStrategy, DictStrategy): - erase, unerase = rerased.new_erasing_pair("string") +class BytesDictStrategy(AbstractTypedStrategy, DictStrategy): + erase, unerase = rerased.new_erasing_pair("bytes") erase = staticmethod(erase) unerase = staticmethod(unerase) @@ -913,11 +913,11 @@ assert key is not None return self.unerase(w_dict.dstorage).get(key, None) - def listview_str(self, w_dict): + def listview_bytes(self, w_dict): return self.unerase(w_dict.dstorage).keys() def w_keys(self, w_dict): - return self.space.newlist_str(self.listview_str(w_dict)) + return self.space.newlist_bytes(self.listview_bytes(w_dict)) def wrapkey(space, key): return space.wrap(key) @@ -935,7 +935,7 @@ i += 1 return keys, values -create_iterator_classes(StringDictStrategy) +create_iterator_classes(BytesDictStrategy) class UnicodeDictStrategy(AbstractTypedStrategy, DictStrategy): @@ -961,7 +961,7 @@ def _never_equal_to(self, w_lookup_type): return _never_equal_to_string(self.space, w_lookup_type) - # we should implement the same shortcuts as we do for StringDictStrategy + # we should implement the same shortcuts as we do for BytesDictStrategy ## def setitem_str(self, w_dict, key, w_value): ## assert key is not None @@ -983,7 +983,7 @@ return self.unerase(w_dict.dstorage).keys() ## def w_keys(self, w_dict): - ## return self.space.newlist_str(self.listview_str(w_dict)) + ## return self.space.newlist_bytes(self.listview_bytes(w_dict)) def wrapkey(space, key): return space.wrap(key) diff --git a/pypy/objspace/std/dictproxyobject.py b/pypy/objspace/std/dictproxyobject.py --- a/pypy/objspace/std/dictproxyobject.py +++ b/pypy/objspace/std/dictproxyobject.py @@ -84,7 +84,7 @@ def w_keys(self, w_dict): space = self.space - return space.newlist_str(self.unerase(w_dict.dstorage).dict_w.keys()) + return space.newlist_bytes(self.unerase(w_dict.dstorage).dict_w.keys()) def values(self, w_dict): return [unwrap_cell(self.space, w_value) for w_value in self.unerase(w_dict.dstorage).dict_w.itervalues()] diff --git a/pypy/objspace/std/kwargsdict.py b/pypy/objspace/std/kwargsdict.py --- a/pypy/objspace/std/kwargsdict.py +++ b/pypy/objspace/std/kwargsdict.py @@ -2,15 +2,13 @@ ## dict strategy (see dictmultiobject.py) from rpython.rlib import rerased, jit -from pypy.objspace.std.dictmultiobject import (DictStrategy, - create_iterator_classes, - EmptyDictStrategy, - ObjectDictStrategy, - StringDictStrategy) +from pypy.objspace.std.dictmultiobject import ( + BytesDictStrategy, DictStrategy, EmptyDictStrategy, ObjectDictStrategy, + create_iterator_classes) class EmptyKwargsDictStrategy(EmptyDictStrategy): - def switch_to_string_strategy(self, w_dict): + def switch_to_bytes_strategy(self, w_dict): strategy = self.space.fromcache(KwargsDictStrategy) storage = strategy.get_empty_storage() w_dict.strategy = strategy @@ -61,7 +59,7 @@ else: # limit the size so that the linear searches don't become too long if len(keys) >= 16: - self.switch_to_string_strategy(w_dict) + self.switch_to_bytes_strategy(w_dict) w_dict.setitem_str(key, w_value) else: keys.append(key) @@ -111,7 +109,7 @@ def w_keys(self, w_dict): l = self.unerase(w_dict.dstorage)[0] - return self.space.newlist_str(l[:]) + return self.space.newlist_bytes(l[:]) def values(self, w_dict): return self.unerase(w_dict.dstorage)[1][:] # to make non-resizable @@ -142,8 +140,8 @@ w_dict.strategy = strategy w_dict.dstorage = strategy.erase(d_new) - def switch_to_string_strategy(self, w_dict): - strategy = self.space.fromcache(StringDictStrategy) + def switch_to_bytes_strategy(self, w_dict): + strategy = self.space.fromcache(BytesDictStrategy) keys, values_w = self.unerase(w_dict.dstorage) storage = strategy.get_empty_storage() d_new = strategy.unerase(storage) diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -81,7 +81,7 @@ if not type(w_obj) is W_BytesObject: break else: - return space.fromcache(StringListStrategy) + return space.fromcache(BytesListStrategy) # check for unicode for w_obj in list_w: @@ -162,8 +162,8 @@ return self @staticmethod - def newlist_str(space, list_s): - strategy = space.fromcache(StringListStrategy) + def newlist_bytes(space, list_s): + strategy = space.fromcache(BytesListStrategy) storage = strategy.erase(list_s) return W_ListObject.from_storage_and_strategy(space, storage, strategy) @@ -278,10 +278,10 @@ ObjectListStrategy.""" return self.strategy.getitems_copy(self) - def getitems_str(self): + def getitems_bytes(self): """Return the items in the list as unwrapped strings. If the list does not use the list strategy, return None.""" - return self.strategy.getitems_str(self) + return self.strategy.getitems_bytes(self) def getitems_unicode(self): """Return the items in the list as unwrapped unicodes. If the list does @@ -753,7 +753,7 @@ def getitems_copy(self, w_list): raise NotImplementedError - def getitems_str(self, w_list): + def getitems_bytes(self, w_list): return None def getitems_unicode(self, w_list): @@ -897,7 +897,7 @@ if type(w_item) is W_IntObject: strategy = self.space.fromcache(IntegerListStrategy) elif type(w_item) is W_BytesObject: - strategy = self.space.fromcache(StringListStrategy) + strategy = self.space.fromcache(BytesListStrategy) elif type(w_item) is W_UnicodeObject: strategy = self.space.fromcache(UnicodeListStrategy) elif type(w_item) is W_FloatObject: @@ -962,11 +962,11 @@ w_list.lstorage = strategy.erase(floatlist) return - strlist = space.listview_str(w_iterable) - if strlist is not None: - w_list.strategy = strategy = space.fromcache(StringListStrategy) + byteslist = space.listview_bytes(w_iterable) + if byteslist is not None: + w_list.strategy = strategy = space.fromcache(BytesListStrategy) # need to copy because intlist can share with w_iterable - w_list.lstorage = strategy.erase(strlist[:]) + w_list.lstorage = strategy.erase(byteslist[:]) return unilist = space.listview_unicode(w_iterable) @@ -1592,11 +1592,11 @@ return self.unerase(w_list.lstorage) -class StringListStrategy(ListStrategy): +class BytesListStrategy(ListStrategy): import_from_mixin(AbstractUnwrappedStrategy) _none_value = None - _applevel_repr = "str" + _applevel_repr = "bytes" def wrap(self, stringval): return self.space.wrap(stringval) @@ -1604,7 +1604,7 @@ def unwrap(self, w_string): return self.space.str_w(w_string) - erase, unerase = rerased.new_erasing_pair("string") + erase, unerase = rerased.new_erasing_pair("bytes") erase = staticmethod(erase) unerase = staticmethod(unerase) @@ -1612,7 +1612,7 @@ return type(w_obj) is W_BytesObject def list_is_correct_type(self, w_list): - return w_list.strategy is self.space.fromcache(StringListStrategy) + return w_list.strategy is self.space.fromcache(BytesListStrategy) def sort(self, w_list, reverse): l = self.unerase(w_list.lstorage) @@ -1621,7 +1621,7 @@ if reverse: l.reverse() - def getitems_str(self, w_list): + def getitems_bytes(self, w_list): return self.unerase(w_list.lstorage) diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -700,7 +700,7 @@ self.delitem(w_dict, w_key) return (w_key, w_value) - # XXX could implement a more efficient w_keys based on space.newlist_str + # XXX could implement a more efficient w_keys based on space.newlist_bytes def iterkeys(self, w_dict): return MapDictIteratorKeys(self.space, self, w_dict) diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -292,8 +292,8 @@ assert not list_w or sizehint == -1 return W_ListObject(self, list_w, sizehint) - def newlist_str(self, list_s): - return W_ListObject.newlist_str(self, list_s) + def newlist_bytes(self, list_s): + return W_ListObject.newlist_bytes(self, list_s) def newlist_unicode(self, list_u): return W_ListObject.newlist_unicode(self, list_u) @@ -431,19 +431,19 @@ raise self._wrap_expected_length(expected_length, len(t)) return t - def listview_str(self, w_obj): + def listview_bytes(self, w_obj): # note: uses exact type checking for objects with strategies, # and isinstance() for others. See test_listobject.test_uses_custom... if type(w_obj) is W_ListObject: - return w_obj.getitems_str() + return w_obj.getitems_bytes() if type(w_obj) is W_DictMultiObject: - return w_obj.listview_str() + return w_obj.listview_bytes() if type(w_obj) is W_SetObject or type(w_obj) is W_FrozensetObject: - return w_obj.listview_str() + return w_obj.listview_bytes() if isinstance(w_obj, W_BytesObject) and self._uses_no_iter(w_obj): - return w_obj.listview_str() + return w_obj.listview_bytes() if isinstance(w_obj, W_ListObject) and self._uses_list_iter(w_obj): - return w_obj.getitems_str() + return w_obj.getitems_bytes() return None def listview_unicode(self, w_obj): diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -79,9 +79,9 @@ """ Returns a dict with all elements of the set. Needed only for switching to ObjectSetStrategy. """ return self.strategy.getdict_w(self) - def listview_str(self): + def listview_bytes(self): """ If this is a string set return its contents as a list of uwnrapped strings. Otherwise return None. """ - return self.strategy.listview_str(self) + return self.strategy.listview_bytes(self) def listview_unicode(self): """ If this is a unicode set return its contents as a list of uwnrapped unicodes. Otherwise return None. """ @@ -669,7 +669,7 @@ """ Returns an empty storage (erased) object. Used to initialize an empty set.""" raise NotImplementedError - def listview_str(self, w_set): + def listview_bytes(self, w_set): return None def listview_unicode(self, w_set): @@ -776,7 +776,7 @@ if type(w_key) is W_IntObject: strategy = self.space.fromcache(IntegerSetStrategy) elif type(w_key) is W_BytesObject: - strategy = self.space.fromcache(StringSetStrategy) + strategy = self.space.fromcache(BytesSetStrategy) elif type(w_key) is W_UnicodeObject: strategy = self.space.fromcache(UnicodeSetStrategy) elif self.space.type(w_key).compares_by_identity(): @@ -1196,8 +1196,8 @@ return self.wrap(result[0]) -class StringSetStrategy(AbstractUnwrappedSetStrategy, SetStrategy): - erase, unerase = rerased.new_erasing_pair("string") +class BytesSetStrategy(AbstractUnwrappedSetStrategy, SetStrategy): + erase, unerase = rerased.new_erasing_pair("bytes") erase = staticmethod(erase) unerase = staticmethod(unerase) @@ -1207,7 +1207,7 @@ def get_empty_dict(self): return {} - def listview_str(self, w_set): + def listview_bytes(self, w_set): return self.unerase(w_set.sstorage).keys() def is_correct_type(self, w_key): @@ -1229,7 +1229,7 @@ return self.space.wrap(item) def iter(self, w_set): - return StringIteratorImplementation(self.space, self, w_set) + return BytesIteratorImplementation(self.space, self, w_set) class UnicodeSetStrategy(AbstractUnwrappedSetStrategy, SetStrategy): @@ -1286,7 +1286,7 @@ return type(w_key) is W_IntObject def may_contain_equal_elements(self, strategy): - if strategy is self.space.fromcache(StringSetStrategy): + if strategy is self.space.fromcache(BytesSetStrategy): return False elif strategy is self.space.fromcache(UnicodeSetStrategy): return False @@ -1371,7 +1371,7 @@ return False if strategy is self.space.fromcache(IntegerSetStrategy): return False - if strategy is self.space.fromcache(StringSetStrategy): + if strategy is self.space.fromcache(BytesSetStrategy): return False if strategy is self.space.fromcache(UnicodeSetStrategy): return False @@ -1436,7 +1436,7 @@ return None -class StringIteratorImplementation(IteratorImplementation): +class BytesIteratorImplementation(IteratorImplementation): def __init__(self, space, strategy, w_set): IteratorImplementation.__init__(self, space, strategy, w_set) d = strategy.unerase(w_set.sstorage) @@ -1546,11 +1546,11 @@ w_set.sstorage = w_iterable.get_storage_copy() return - stringlist = space.listview_str(w_iterable) - if stringlist is not None: - strategy = space.fromcache(StringSetStrategy) + byteslist = space.listview_bytes(w_iterable) + if byteslist is not None: + strategy = space.fromcache(BytesSetStrategy) w_set.strategy = strategy - w_set.sstorage = strategy.get_storage_from_unwrapped_list(stringlist) + w_set.sstorage = strategy.get_storage_from_unwrapped_list(byteslist) return unicodelist = space.listview_unicode(w_iterable) @@ -1593,7 +1593,7 @@ if type(w_item) is not W_BytesObject: break else: - w_set.strategy = space.fromcache(StringSetStrategy) + w_set.strategy = space.fromcache(BytesSetStrategy) w_set.sstorage = w_set.strategy.get_storage_from_list(iterable_w) return diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -311,7 +311,7 @@ from pypy.objspace.std.unicodeobject import W_UnicodeObject if isinstance(self, W_BytesObject): - l = space.listview_str(w_list) + l = space.listview_bytes(w_list) if l is not None: if len(l) == 1: return space.wrap(l[0]) diff --git a/pypy/objspace/std/test/test_bytesobject.py b/pypy/objspace/std/test/test_bytesobject.py --- a/pypy/objspace/std/test/test_bytesobject.py +++ b/pypy/objspace/std/test/test_bytesobject.py @@ -80,9 +80,9 @@ w_slice = space.newslice(w(1), w_None, w(2)) assert self.space.eq_w(space.getitem(w_str, w_slice), w('el')) - def test_listview_str(self): + def test_listview_bytes(self): w_str = self.space.wrap('abcd') - assert self.space.listview_str(w_str) == list("abcd") + assert self.space.listview_bytes(w_str) == list("abcd") class AppTestBytesObject: diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -2,7 +2,7 @@ import py from pypy.objspace.std.dictmultiobject import (W_DictMultiObject, - StringDictStrategy, ObjectDictStrategy) + BytesDictStrategy, ObjectDictStrategy) class TestW_DictObject(object): @@ -134,11 +134,11 @@ assert space.eq_w(w_d.getitem_str("a"), space.w_None) assert space.eq_w(w_d.getitem_str("b"), space.w_None) - def test_listview_str_dict(self): + def test_listview_bytes_dict(self): w = self.space.wrap w_d = self.space.newdict() w_d.initialize_content([(w("a"), w(1)), (w("b"), w(2))]) - assert self.space.listview_str(w_d) == ["a", "b"] + assert self.space.listview_bytes(w_d) == ["a", "b"] def test_listview_unicode_dict(self): w = self.space.wrap @@ -160,7 +160,7 @@ w_l = self.space.call_method(w_d, "keys") assert sorted(self.space.listview_int(w_l)) == [1,2] - # make sure that .keys() calls newlist_str for string dicts + # make sure that .keys() calls newlist_bytes for string dicts def not_allowed(*args): assert False, 'should not be called' monkeypatch.setattr(self.space, 'newlist', not_allowed) @@ -168,7 +168,7 @@ w_d = self.space.newdict() w_d.initialize_content([(w("a"), w(1)), (w("b"), w(6))]) w_l = self.space.call_method(w_d, "keys") - assert sorted(self.space.listview_str(w_l)) == ["a", "b"] + assert sorted(self.space.listview_bytes(w_l)) == ["a", "b"] # XXX: it would be nice if the test passed without monkeypatch.undo(), # but we need space.newlist_unicode for it @@ -944,7 +944,7 @@ d = {} assert "EmptyDictStrategy" in self.get_strategy(d) d["a"] = 1 - assert "StringDictStrategy" in self.get_strategy(d) + assert "BytesDictStrategy" in self.get_strategy(d) class O(object): pass @@ -952,7 +952,7 @@ d = o.__dict__ = {} assert "EmptyDictStrategy" in self.get_strategy(d) o.a = 1 - assert "StringDictStrategy" in self.get_strategy(d) + assert "BytesDictStrategy" in self.get_strategy(d) def test_empty_to_unicode(self): d = {} @@ -1033,7 +1033,7 @@ eq_w = eq def newlist(self, l): return l - def newlist_str(self, l): + def newlist_bytes(self, l): return l DictObjectCls = W_DictMultiObject def type(self, w_obj): @@ -1275,9 +1275,9 @@ assert "s" not in d.w_keys() assert F() not in d.w_keys() -class TestStrDictImplementation(BaseTestRDictImplementation): - StrategyClass = StringDictStrategy - #ImplementionClass = StrDictImplementation +class TestBytesDictImplementation(BaseTestRDictImplementation): + StrategyClass = BytesDictStrategy + #ImplementionClass = BytesDictImplementation def test_str_shortcut(self): self.fill_impl() @@ -1301,12 +1301,12 @@ def check_not_devolved(self): pass -class TestDevolvedStrDictImplementation(BaseTestDevolvedDictImplementation): - StrategyClass = StringDictStrategy +class TestDevolvedBytesDictImplementation(BaseTestDevolvedDictImplementation): + StrategyClass = BytesDictStrategy def test_module_uses_strdict(): fakespace = FakeSpace() d = fakespace.newdict(module=True) - assert type(d.strategy) is StringDictStrategy + assert type(d.strategy) is BytesDictStrategy diff --git a/pypy/objspace/std/test/test_kwargsdict.py b/pypy/objspace/std/test/test_kwargsdict.py --- a/pypy/objspace/std/test/test_kwargsdict.py +++ b/pypy/objspace/std/test/test_kwargsdict.py @@ -73,7 +73,7 @@ for i in range(100): assert d.setitem_str("d%s" % i, 4) is None assert d.strategy is not strategy - assert "StringDictStrategy" == d.strategy.__class__.__name__ + assert "BytesDictStrategy" == d.strategy.__class__.__name__ def test_keys_doesnt_wrap(): space = FakeSpace() diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -1,5 +1,5 @@ import sys -from pypy.objspace.std.listobject import W_ListObject, EmptyListStrategy, ObjectListStrategy, IntegerListStrategy, FloatListStrategy, StringListStrategy, RangeListStrategy, make_range_list, UnicodeListStrategy +from pypy.objspace.std.listobject import W_ListObject, EmptyListStrategy, ObjectListStrategy, IntegerListStrategy, FloatListStrategy, BytesListStrategy, RangeListStrategy, make_range_list, UnicodeListStrategy from pypy.objspace.std import listobject from pypy.objspace.std.test.test_listobject import TestW_ListObject @@ -13,7 +13,7 @@ assert isinstance(W_ListObject(space, [w(1),w(2),w(3)]).strategy, IntegerListStrategy) assert isinstance(W_ListObject(space, [w('a'), w('b')]).strategy, - StringListStrategy) + BytesListStrategy) assert isinstance(W_ListObject(space, [w(u'a'), w(u'b')]).strategy, UnicodeListStrategy) assert isinstance(W_ListObject(space, [w(u'a'), w('b')]).strategy, @@ -35,7 +35,7 @@ l = W_ListObject(space, []) assert isinstance(l.strategy, EmptyListStrategy) l.append(w('a')) - assert isinstance(l.strategy, StringListStrategy) + assert isinstance(l.strategy, BytesListStrategy) l = W_ListObject(space, []) assert isinstance(l.strategy, EmptyListStrategy) @@ -59,9 +59,9 @@ def test_string_to_any(self): l = W_ListObject(self.space, [self.space.wrap('a'),self.space.wrap('b'),self.space.wrap('c')]) - assert isinstance(l.strategy, StringListStrategy) + assert isinstance(l.strategy, BytesListStrategy) l.append(self.space.wrap('d')) - assert isinstance(l.strategy, StringListStrategy) + assert isinstance(l.strategy, BytesListStrategy) l.append(self.space.wrap(3)) assert isinstance(l.strategy, ObjectListStrategy) @@ -92,7 +92,7 @@ l.setitem(0, w('d')) assert space.eq_w(l.getitem(0), w('d')) - assert isinstance(l.strategy, StringListStrategy) + assert isinstance(l.strategy, BytesListStrategy) # IntStrategy to ObjectStrategy l = W_ListObject(space, [w(1),w(2),w(3)]) @@ -100,9 +100,9 @@ l.setitem(0, w('d')) assert isinstance(l.strategy, ObjectListStrategy) - # StringStrategy to ObjectStrategy + # BytesStrategy to ObjectStrategy l = W_ListObject(space, [w('a'),w('b'),w('c')]) - assert isinstance(l.strategy, StringListStrategy) + assert isinstance(l.strategy, BytesListStrategy) l.setitem(0, w(2)) assert isinstance(l.strategy, ObjectListStrategy) @@ -127,9 +127,9 @@ l.insert(3, w(4)) assert isinstance(l.strategy, IntegerListStrategy) - # StringStrategy + # BytesStrategy l = W_ListObject(space, [w('a'),w('b'),w('c')]) - assert isinstance(l.strategy, StringListStrategy) + assert isinstance(l.strategy, BytesListStrategy) l.insert(3, w(2)) assert isinstance(l.strategy, ObjectListStrategy) @@ -155,7 +155,7 @@ l = W_ListObject(space, []) assert isinstance(l.strategy, EmptyListStrategy) l.insert(0, w('a')) - assert isinstance(l.strategy, StringListStrategy) + assert isinstance(l.strategy, BytesListStrategy) l = W_ListObject(space, []) assert isinstance(l.strategy, EmptyListStrategy) @@ -207,9 +207,9 @@ l.setslice(0, 1, 2, W_ListObject(space, [w('a'), w('b'), w('c')])) assert isinstance(l.strategy, ObjectListStrategy) - # StringStrategy to ObjectStrategy + # BytesStrategy to ObjectStrategy l = W_ListObject(space, [w('a'), w('b'), w('c')]) - assert isinstance(l.strategy, StringListStrategy) + assert isinstance(l.strategy, BytesListStrategy) l.setslice(0, 1, 2, W_ListObject(space, [w(1), w(2), w(3)])) assert isinstance(l.strategy, ObjectListStrategy) @@ -261,7 +261,7 @@ l = W_ListObject(space, wrapitems(["a","b","c","d","e"])) other = W_ListObject(space, wrapitems(["a", "b", "c"])) keep_other_strategy(l, 0, 2, other.length(), other) - assert l.strategy is space.fromcache(StringListStrategy) + assert l.strategy is space.fromcache(BytesListStrategy) l = W_ListObject(space, wrapitems([u"a",u"b",u"c",u"d",u"e"])) other = W_ListObject(space, wrapitems([u"a", u"b", u"c"])) @@ -330,7 +330,7 @@ empty = W_ListObject(space, []) assert isinstance(empty.strategy, EmptyListStrategy) empty.extend(W_ListObject(space, [w("a"), w("b"), w("c")])) - assert isinstance(empty.strategy, StringListStrategy) + assert isinstance(empty.strategy, BytesListStrategy) empty = W_ListObject(space, []) assert isinstance(empty.strategy, EmptyListStrategy) @@ -514,17 +514,17 @@ def test_unicode(self): l1 = W_ListObject(self.space, [self.space.wrap("eins"), self.space.wrap("zwei")]) - assert isinstance(l1.strategy, StringListStrategy) + assert isinstance(l1.strategy, BytesListStrategy) l2 = W_ListObject(self.space, [self.space.wrap(u"eins"), self.space.wrap(u"zwei")]) assert isinstance(l2.strategy, UnicodeListStrategy) l3 = W_ListObject(self.space, [self.space.wrap("eins"), self.space.wrap(u"zwei")]) assert isinstance(l3.strategy, ObjectListStrategy) - def test_listview_str(self): + def test_listview_bytes(self): space = self.space - assert space.listview_str(space.wrap(1)) == None + assert space.listview_bytes(space.wrap(1)) == None w_l = self.space.newlist([self.space.wrap('a'), self.space.wrap('b')]) - assert space.listview_str(w_l) == ["a", "b"] + assert space.listview_bytes(w_l) == ["a", "b"] def test_listview_unicode(self): space = self.space @@ -532,7 +532,7 @@ w_l = self.space.newlist([self.space.wrap(u'a'), self.space.wrap(u'b')]) assert space.listview_unicode(w_l) == [u"a", u"b"] - def test_string_join_uses_listview_str(self): + def test_string_join_uses_listview_bytes(self): space = self.space w_l = self.space.newlist([self.space.wrap('a'), self.space.wrap('b')]) w_l.getitems = None @@ -556,14 +556,14 @@ w_l.getitems = None assert space.is_w(space.call_method(space.wrap(u" -- "), "join", w_l), w_text) - def test_newlist_str(self): + def test_newlist_bytes(self): space = self.space l = ['a', 'b'] - w_l = self.space.newlist_str(l) - assert isinstance(w_l.strategy, StringListStrategy) - assert space.listview_str(w_l) is l + w_l = self.space.newlist_bytes(l) + assert isinstance(w_l.strategy, BytesListStrategy) + assert space.listview_bytes(w_l) is l - def test_string_uses_newlist_str(self): + def test_string_uses_newlist_bytes(self): space = self.space w_s = space.wrap("a b c") space.newlist = None @@ -574,10 +574,10 @@ w_l4 = space.call_method(w_s, "rsplit", space.wrap(" ")) finally: del space.newlist - assert space.listview_str(w_l) == ["a", "b", "c"] - assert space.listview_str(w_l2) == ["a", "b", "c"] - assert space.listview_str(w_l3) == ["a", "b", "c"] - assert space.listview_str(w_l4) == ["a", "b", "c"] + assert space.listview_bytes(w_l) == ["a", "b", "c"] + assert space.listview_bytes(w_l2) == ["a", "b", "c"] + assert space.listview_bytes(w_l3) == ["a", "b", "c"] + assert space.listview_bytes(w_l4) == ["a", "b", "c"] def test_unicode_uses_newlist_unicode(self): space = self.space @@ -630,10 +630,10 @@ assert space.eq_w(w_l, w_l2) - def test_listview_str_list(self): + def test_listview_bytes_list(self): space = self.space w_l = W_ListObject(space, [space.wrap("a"), space.wrap("b")]) - assert self.space.listview_str(w_l) == ["a", "b"] + assert self.space.listview_bytes(w_l) == ["a", "b"] def test_listview_unicode_list(self): space = self.space diff --git a/pypy/objspace/std/test/test_setobject.py b/pypy/objspace/std/test/test_setobject.py --- a/pypy/objspace/std/test/test_setobject.py +++ b/pypy/objspace/std/test/test_setobject.py @@ -82,7 +82,7 @@ def test_create_set_from_list(self): from pypy.interpreter.baseobjspace import W_Root - from pypy.objspace.std.setobject import ObjectSetStrategy, StringSetStrategy, UnicodeSetStrategy + from pypy.objspace.std.setobject import BytesSetStrategy, ObjectSetStrategy, UnicodeSetStrategy from pypy.objspace.std.floatobject import W_FloatObject w = self.space.wrap @@ -100,7 +100,7 @@ w_list = W_ListObject(self.space, [w("1"), w("2"), w("3")]) w_set = W_SetObject(self.space) _initialize_set(self.space, w_set, w_list) - assert w_set.strategy is self.space.fromcache(StringSetStrategy) + assert w_set.strategy is self.space.fromcache(BytesSetStrategy) assert w_set.strategy.unerase(w_set.sstorage) == {"1":None, "2":None, "3":None} w_list = self.space.iter(W_ListObject(self.space, [w(u"1"), w(u"2"), w(u"3")])) @@ -126,18 +126,18 @@ # changed cached object, need to change it back for other tests to pass intstr.get_storage_from_list = tmp_func - def test_listview_str_int_on_set(self): + def test_listview_bytes_int_on_set(self): w = self.space.wrap w_a = W_SetObject(self.space) _initialize_set(self.space, w_a, w("abcdefg")) - assert sorted(self.space.listview_str(w_a)) == list("abcdefg") + assert sorted(self.space.listview_bytes(w_a)) == list("abcdefg") assert self.space.listview_int(w_a) is None w_b = W_SetObject(self.space) _initialize_set(self.space, w_b, self.space.newlist([w(1),w(2),w(3),w(4),w(5)])) assert sorted(self.space.listview_int(w_b)) == [1,2,3,4,5] - assert self.space.listview_str(w_b) is None + assert self.space.listview_bytes(w_b) is None class AppTestAppSetTest: diff --git a/pypy/objspace/std/test/test_setstrategies.py b/pypy/objspace/std/test/test_setstrategies.py --- a/pypy/objspace/std/test/test_setstrategies.py +++ b/pypy/objspace/std/test/test_setstrategies.py @@ -1,10 +1,8 @@ from pypy.objspace.std.setobject import W_SetObject -from pypy.objspace.std.setobject import (IntegerSetStrategy, ObjectSetStrategy, - EmptySetStrategy, StringSetStrategy, - UnicodeSetStrategy, - IntegerIteratorImplementation, - StringIteratorImplementation, - UnicodeIteratorImplementation) +from pypy.objspace.std.setobject import ( + BytesIteratorImplementation, BytesSetStrategy, EmptySetStrategy, + IntegerIteratorImplementation, IntegerSetStrategy, ObjectSetStrategy, + UnicodeIteratorImplementation, UnicodeSetStrategy) from pypy.objspace.std.listobject import W_ListObject class TestW_SetStrategies: @@ -26,7 +24,7 @@ assert s.strategy is self.space.fromcache(EmptySetStrategy) s = W_SetObject(self.space, self.wrapped(["a", "b"])) - assert s.strategy is self.space.fromcache(StringSetStrategy) + assert s.strategy is self.space.fromcache(BytesSetStrategy) s = W_SetObject(self.space, self.wrapped([u"a", u"b"])) assert s.strategy is self.space.fromcache(UnicodeSetStrategy) @@ -126,7 +124,7 @@ # s = W_SetObject(space, self.wrapped(["a", "b"])) it = s.iter() - assert isinstance(it, StringIteratorImplementation) + assert isinstance(it, BytesIteratorImplementation) assert space.unwrap(it.next()) == "a" assert space.unwrap(it.next()) == "b" # @@ -142,7 +140,7 @@ assert sorted(space.listview_int(s)) == [1, 2] # s = W_SetObject(space, self.wrapped(["a", "b"])) - assert sorted(space.listview_str(s)) == ["a", "b"] + assert sorted(space.listview_bytes(s)) == ["a", "b"] # s = W_SetObject(space, self.wrapped([u"a", u"b"])) assert sorted(space.listview_unicode(s)) == [u"a", u"b"] From noreply at buildbot.pypy.org Wed Jan 22 09:45:00 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Wed, 22 Jan 2014 09:45:00 +0100 (CET) Subject: [pypy-commit] stmgc c7: add and fix test Message-ID: <20140122084500.355781C0291@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r661:c44a0c4106ec Date: 2014-01-22 09:44 +0100 http://bitbucket.org/pypy/stmgc/changeset/c44a0c4106ec/ Log: add and fix test diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -453,6 +453,7 @@ object_t *_stm_alloc_old(size_t size) { + /* may return uninitialized objects */ object_t *result; size_t size_class = size / 8; assert(size_class >= 2); @@ -486,6 +487,8 @@ localchar_t *_stm_alloc_next_page(size_t size_class) { + /* may return uninitialized pages */ + /* 'alloc->next' points to where the next allocation should go. The present function is called instead when this next allocation is equal to 'alloc->stop'. As we know that 'start', 'next' and @@ -867,6 +870,8 @@ uint16_t cur = (uintptr_t)alloc->next; if (start == cur) continue; + + alloc->start = cur; /* next transaction starts there */ uintptr_t pagenum = ((uintptr_t)(alloc->next - 1)) / 4096UL; if (flag_page_private[pagenum] == UNCOMMITTED_SHARED_PAGE) { /* becomes a SHARED (s.b.) partially used page */ @@ -1021,13 +1026,12 @@ /* XXX: forget about GCFLAG_UNCOMMITTED objects */ - /* long j; */ - /* for (j = 2; j < LARGE_OBJECT_WORDS; j++) { */ - /* alloc_for_size_t *alloc = &_STM_TL2->alloc[j]; */ - /* uint16_t num_allocated = ((uintptr_t)alloc->next) - alloc->start; */ - /* alloc->next -= num_allocated; */ - /* } */ - /* stm_list_clear(_STM_TL2->uncommitted_object_ranges); */ + long j; + for (j = 2; j < LARGE_OBJECT_WORDS; j++) { + alloc_for_size_t *alloc = &_STM_TL2->alloc[j]; + uint16_t num_allocated = ((uintptr_t)alloc->next) - alloc->start; + alloc->next -= num_allocated; + } assert(_STM_TL1->jmpbufptr != NULL); diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -423,7 +423,23 @@ assert stm_get_page_flag(stm_get_obj_pages(newer)[0]) == lib.SHARED_PAGE assert not (stm_get_flags(newer) & lib.GCFLAG_NOT_COMMITTED) + def test_reset_partial_alloc_pages(self): + stm_start_transaction() + new = stm_allocate(16) + stm_set_char(new, 'a') + stm_push_root(new) + stm_minor_collect() + new = stm_pop_root() + stm_abort_transaction() + stm_start_transaction() + newer = stm_allocate(16) + stm_push_root(newer) + stm_minor_collect() + newer = stm_pop_root() + assert stm_get_real_address(new) == stm_get_real_address(newer) + assert stm_get_char(newer) == '\0' + # def test_resolve_write_write_no_conflict(self): # stm_start_transaction() From noreply at buildbot.pypy.org Wed Jan 22 10:10:21 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Wed, 22 Jan 2014 10:10:21 +0100 (CET) Subject: [pypy-commit] stmgc c7: some cleanup and a fix Message-ID: <20140122091021.E2BBC1C00E3@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r662:edd92edaa89f Date: 2014-01-22 10:09 +0100 http://bitbucket.org/pypy/stmgc/changeset/edd92edaa89f/ Log: some cleanup and a fix diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -453,14 +453,18 @@ object_t *_stm_alloc_old(size_t size) { - /* may return uninitialized objects */ + /* may return uninitialized objects. except for the + GCFLAG_NOT_COMMITTED, it is set exactly if + we allocated the object in a SHARED and partially + committed page. (XXX: add the flag in some other place) + */ object_t *result; size_t size_class = size / 8; assert(size_class >= 2); if (size_class >= LARGE_OBJECT_WORDS) { result = _stm_allocate_old(size); - result->stm_flags &= ~GCFLAG_WRITE_BARRIER; /* added by _stm_allocate_old... */ + result->stm_flags &= ~GCFLAG_NOT_COMMITTED; /* page may be non-zeroed */ int page = ((uintptr_t)result) / 4096; int pages = (size + 4095) / 4096; @@ -468,6 +472,8 @@ for (i = 0; i < pages; i++) { mark_page_as_uncommitted(page + i); } + /* make sure the flag is not set (page is not zeroed!) */ + result->stm_flags &= ~GCFLAG_NOT_COMMITTED; } else { alloc_for_size_t *alloc = &_STM_TL2->alloc[size_class]; @@ -479,6 +485,9 @@ if (alloc->flag_partial_page) { LIST_APPEND(_STM_TL2->uncommitted_objects, result); result->stm_flags |= GCFLAG_NOT_COMMITTED; + } else { + /* make sure the flag is not set (page is not zeroed!) */ + result->stm_flags &= ~GCFLAG_NOT_COMMITTED; } } } @@ -826,23 +835,6 @@ _STM_TL2->old_shadow_stack = _STM_TL1->shadow_stack; } -#if 0 -static void update_new_objects_in_other_threads(uintptr_t pagenum, - uint16_t start, uint16_t stop) -{ - size_t size = (uint16_t)(stop - start); - assert(size <= 4096 - (start & 4095)); - assert((start & ~4095) == (uint16_t)(pagenum * 4096)); - - int thread_num = _STM_TL2->thread_num; - uintptr_t local_src = (pagenum * 4096UL) + (start & 4095); - char *dst = REAL_ADDRESS(get_thread_base(1 - thread_num), local_src); - char *src = REAL_ADDRESS(_STM_TL2->thread_base, local_src); - - memcpy(dst, src, size); - abort(); -} -#endif void stm_stop_transaction(void) { @@ -868,13 +860,16 @@ alloc_for_size_t *alloc = &_STM_TL2->alloc[j]; uint16_t start = alloc->start; uint16_t cur = (uintptr_t)alloc->next; + if (start == cur) - continue; + continue; /* page full -> will be replaced automatically */ - alloc->start = cur; /* next transaction starts there */ + alloc->start = cur; /* next transaction has different 'start' to + reset in case of an abort */ + uintptr_t pagenum = ((uintptr_t)(alloc->next - 1)) / 4096UL; if (flag_page_private[pagenum] == UNCOMMITTED_SHARED_PAGE) { - /* becomes a SHARED (s.b.) partially used page */ + /* becomes a SHARED (done below) partially used page */ alloc->flag_partial_page = 1; } } @@ -888,73 +883,6 @@ stm_list_clear(_STM_TL2->uncommitted_pages); - - - /* /\* walk the uncommitted_object_ranges and manually copy the new objects */ - /* to the other thread's pages in the (hopefully rare) case that */ - /* the page they belong to is already unshared *\/ */ - /* long i; */ - /* struct stm_list_s *lst = _STM_TL2->uncommitted_object_ranges; */ - /* for (i = stm_list_count(lst); i > 0; ) { */ - /* i -= 2; */ - /* uintptr_t pagenum = (uintptr_t)stm_list_item(lst, i); */ - - /* /\* NB. the read next line should work even against a parallel */ - /* thread, thanks to the lock acquisition we do earlier (see the */ - /* beginning of this function). Indeed, if this read returns */ - /* SHARED_PAGE, then we know that the real value in memory was */ - /* actually SHARED_PAGE at least at the time of the */ - /* acquire_lock(). It may have been modified afterwards by a */ - /* compare_and_swap() in the other thread, but then we know for */ - /* sure that the other thread is seeing the last, up-to-date */ - /* version of our data --- this is the reason of the */ - /* write_fence() just before the acquire_lock(). */ - /* *\/ */ - /* if (flag_page_private[pagenum] != SHARED_PAGE) { */ - /* object_t *range = stm_list_item(lst, i + 1); */ - /* uint16_t start, stop; */ - /* FROM_RANGE(start, stop, range); */ - /* update_new_objects_in_other_threads(pagenum, start, stop); */ - /* } */ - /* } */ - - /* /\* do the same for the partially-allocated pages *\/ */ - /* long j; */ - /* for (j = 2; j < LARGE_OBJECT_WORDS; j++) { */ - /* alloc_for_size_t *alloc = &_STM_TL2->alloc[j]; */ - /* uint16_t start = alloc->start; */ - /* uint16_t cur = (uintptr_t)alloc->next; */ - - /* if (start == cur) { */ - /* /\* nothing to do: this page (or fraction thereof) was left */ - /* empty by the previous transaction, and starts empty as */ - /* well in the new transaction. 'flag_partial_page' is */ - /* unchanged. *\/ */ - /* } */ - /* else { */ - /* uintptr_t pagenum = ((uintptr_t)(alloc->next - 1)) / 4096UL; */ - /* /\* for the new transaction, it will start here: *\/ */ - /* alloc->start = cur; */ - - /* if (alloc->flag_partial_page) { */ - /* if (flag_page_private[pagenum] != SHARED_PAGE) { */ - /* update_new_objects_in_other_threads(pagenum, start, cur); */ - /* } */ - /* } */ - /* else { */ - /* /\* we can skip checking flag_page_private[] in non-debug */ - /* builds, because the whole page can only contain */ - /* objects made by the just-finished transaction. *\/ */ - /* assert(flag_page_private[pagenum] == SHARED_PAGE); */ - - /* /\* the next transaction will start with this page */ - /* containing objects that are now committed, so */ - /* we need to set this flag now *\/ */ - /* alloc->flag_partial_page = true; */ - /* } */ - /* } */ - /* } */ - _STM_TL2->running_transaction = 0; stm_stop_exclusive_lock(); fprintf(stderr, "%c", 'C'+_STM_TL2->thread_num*32); @@ -974,7 +902,7 @@ modified, ({ /* note: same as push_modified_to... but src/dst swapped - XXX: unify both... */ + TODO: unify both... */ char *dst = REAL_ADDRESS(local_base, item); char *src = REAL_ADDRESS(remote_base, item); @@ -1024,12 +952,13 @@ stm_list_clear(_STM_TL2->uncommitted_pages); - /* XXX: forget about GCFLAG_UNCOMMITTED objects */ - + /* forget about GCFLAG_NOT_COMMITTED objects by + resetting alloc-pages */ long j; for (j = 2; j < LARGE_OBJECT_WORDS; j++) { alloc_for_size_t *alloc = &_STM_TL2->alloc[j]; uint16_t num_allocated = ((uintptr_t)alloc->next) - alloc->start; + /* forget about all non-committed objects */ alloc->next -= num_allocated; } From noreply at buildbot.pypy.org Wed Jan 22 10:36:44 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 22 Jan 2014 10:36:44 +0100 (CET) Subject: [pypy-commit] pypy default: Test and fix: the parser would use UTF-16 and produce surrogates Message-ID: <20140122093644.BE0771C315D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68838:574aa48e4875 Date: 2014-01-22 10:33 +0100 http://bitbucket.org/pypy/pypy/changeset/574aa48e4875/ Log: Test and fix: the parser would use UTF-16 and produce surrogates from the source code, even if we're a UTF-32 version of pypy. diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -1,4 +1,4 @@ -import py +import py, sys from pypy.interpreter.astcompiler import codegen, astbuilder, symtable, optimize from pypy.interpreter.pyparser import pyparse from pypy.interpreter.pyparser.test import expressions @@ -867,6 +867,9 @@ class AppTestCompiler: + def setup_class(cls): + cls.w_maxunicode = cls.space.wrap(sys.maxunicode) + def test_docstring_not_loaded(self): import StringIO, dis, sys ns = {} @@ -911,7 +914,17 @@ l = [a for a in Foo()] assert hint_called[0] assert l == list(range(5)) - + + def test_unicode_in_source(self): + import sys + d = {} + exec '# -*- coding: utf-8 -*-\n\nu = u"\xf0\x9f\x92\x8b"' in d + if sys.maxunicode > 65535 and self.maxunicode > 65535: + expected_length = 1 + else: + expected_length = 2 + assert len(d['u']) == expected_length + class TestOptimizations: def count_instructions(self, source): diff --git a/pypy/interpreter/pyparser/parsestring.py b/pypy/interpreter/pyparser/parsestring.py --- a/pypy/interpreter/pyparser/parsestring.py +++ b/pypy/interpreter/pyparser/parsestring.py @@ -15,7 +15,6 @@ Yes, it's very inefficient. Yes, CPython has very similar code. """ - # we use ps as "pointer to s" # q is the virtual last char index of the string ps = 0 @@ -54,42 +53,10 @@ if unicode_literal: # XXX Py_UnicodeFlag is ignored for now if encoding is None or encoding == "iso-8859-1": # 'unicode_escape' expects latin-1 bytes, string is ready. - buf = s - bufp = ps - bufq = q - u = None + assert 0 <= ps <= q + substr = s[ps:q] else: - # String is utf8-encoded, but 'unicode_escape' expects - # latin-1; So multibyte sequences must be escaped. - lis = [] # using a list to assemble the value - end = q - # Worst case: "\XX" may become "\u005c\uHHLL" (12 bytes) - while ps < end: - if s[ps] == '\\': - lis.append(s[ps]) - ps += 1 - if ord(s[ps]) & 0x80: - # A multibyte sequence will follow, it will be - # escaped like \u1234. To avoid confusion with - # the backslash we just wrote, we emit "\u005c" - # instead. - lis.append("u005c") - if ord(s[ps]) & 0x80: # XXX inefficient - w, ps = decode_utf8(space, s, ps, end, "utf-16-be") - rn = len(w) - assert rn % 2 == 0 - for i in range(0, rn, 2): - lis.append('\\u') - lis.append(hexbyte(ord(w[i]))) - lis.append(hexbyte(ord(w[i+1]))) - else: - lis.append(s[ps]) - ps += 1 - buf = ''.join(lis) - bufp = 0 - bufq = len(buf) - assert 0 <= bufp <= bufq - substr = buf[bufp:bufq] + substr = decode_unicode_utf8(space, s, ps, q) if rawmode: v = unicodehelper.decode_raw_unicode_escape(space, substr) else: @@ -121,6 +88,39 @@ result = "0" + result return result +def decode_unicode_utf8(space, s, ps, q): + # ****The Python 2.7 version, producing UTF-32 escapes**** + # String is utf8-encoded, but 'unicode_escape' expects + # latin-1; So multibyte sequences must be escaped. + lis = [] # using a list to assemble the value + end = q + # Worst case: + # "<92><195><164>" may become "\u005c\U000000E4" (16 bytes) + while ps < end: + if s[ps] == '\\': + lis.append(s[ps]) + ps += 1 + if ord(s[ps]) & 0x80: + # A multibyte sequence will follow, it will be + # escaped like \u1234. To avoid confusion with + # the backslash we just wrote, we emit "\u005c" + # instead. + lis.append("u005c") + if ord(s[ps]) & 0x80: # XXX inefficient + w, ps = decode_utf8(space, s, ps, end, "utf-32-be") + rn = len(w) + assert rn % 4 == 0 + for i in range(0, rn, 4): + lis.append('\\U') + lis.append(hexbyte(ord(w[i]))) + lis.append(hexbyte(ord(w[i+1]))) + lis.append(hexbyte(ord(w[i+2]))) + lis.append(hexbyte(ord(w[i+3]))) + else: + lis.append(s[ps]) + ps += 1 + return ''.join(lis) + def PyString_DecodeEscape(space, s, recode_encoding): """ Unescape a backslash-escaped string. If recode_encoding is non-zero, diff --git a/pypy/interpreter/pyparser/test/test_parsestring.py b/pypy/interpreter/pyparser/test/test_parsestring.py --- a/pypy/interpreter/pyparser/test/test_parsestring.py +++ b/pypy/interpreter/pyparser/test/test_parsestring.py @@ -1,5 +1,5 @@ from pypy.interpreter.pyparser import parsestring -import py +import py, sys class TestParsetring: def parse_and_compare(self, literal, value): @@ -91,3 +91,18 @@ input = ["'", 'x', ' ', chr(0xc3), chr(0xa9), ' ', chr(92), 'n', "'"] w_ret = parsestring.parsestr(space, 'utf8', ''.join(input)) assert space.str_w(w_ret) == ''.join(expected) + + def test_wide_unicode_in_source(self): + if sys.maxunicode == 65535: + py.test.skip("requires a wide-unicode host") + self.parse_and_compare('u"\xf0\x9f\x92\x8b"', + unichr(0x1f48b), + encoding='utf-8') + + def test_decode_unicode_utf8(self): + buf = parsestring.decode_unicode_utf8(self.space, + 'u"\xf0\x9f\x92\x8b"', 2, 6) + if sys.maxunicode == 65535: + assert buf == r"\U0000d83d\U0000dc8b" + else: + assert buf == r"\U0001f48b" From noreply at buildbot.pypy.org Wed Jan 22 10:36:47 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 22 Jan 2014 10:36:47 +0100 (CET) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20140122093647.3CAE31C315D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68839:7f07524f0084 Date: 2014-01-22 10:35 +0100 http://bitbucket.org/pypy/pypy/changeset/7f07524f0084/ Log: merge heads diff too long, truncating to 2000 out of 3801 lines diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -48,3 +48,7 @@ .. branch: remove-del-from-generatoriterator Speed up generators that don't yield inside try or wait blocks by skipping unnecessary cleanup. + +.. branch: annotator +Remove FlowObjSpace. +Improve cohesion between rpython.flowspace and rpython.annotator. diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -910,7 +910,7 @@ """ return self.unpackiterable(w_iterable, expected_length) - def listview_str(self, w_list): + def listview_bytes(self, w_list): """ Return a list of unwrapped strings out of a list of strings. If the argument is not a list or does not contain only strings, return None. May return None anyway. @@ -944,7 +944,7 @@ """ return (None, None) - def newlist_str(self, list_s): + def newlist_bytes(self, list_s): return self.newlist([self.wrap(s) for s in list_s]) def newlist_unicode(self, list_u): diff --git a/pypy/module/__pypy__/test/test_special.py b/pypy/module/__pypy__/test/test_special.py --- a/pypy/module/__pypy__/test/test_special.py +++ b/pypy/module/__pypy__/test/test_special.py @@ -51,7 +51,9 @@ l = [1, 2, 3] assert list_strategy(l) == "int" l = ["a", "b", "c"] - assert list_strategy(l) == "str" + assert list_strategy(l) == "bytes" + l = [u"a", u"b", u"c"] + assert list_strategy(l) == "unicode" l = [1.1, 2.2, 3.3] assert list_strategy(l) == "float" l = range(3) diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -213,12 +213,14 @@ Build an extension module linked against the cpyext api library. """ if not space.is_none(w_separate_module_files): - separate_module_files = space.listview_str(w_separate_module_files) + separate_module_files = space.listview_bytes( + w_separate_module_files) assert separate_module_files is not None else: separate_module_files = [] if not space.is_none(w_separate_module_sources): - separate_module_sources = space.listview_str(w_separate_module_sources) + separate_module_sources = space.listview_bytes( + w_separate_module_sources) assert separate_module_sources is not None else: separate_module_sources = [] diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -441,8 +441,8 @@ def str_w(self, space): return self._value - def listview_str(self): - return _create_list_from_string(self._value) + def listview_bytes(self): + return _create_list_from_bytes(self._value) def ord(self, space): if len(self._value) != 1: @@ -518,7 +518,7 @@ _title = _upper def _newlist_unwrapped(self, space, lst): - return space.newlist_str(lst) + return space.newlist_bytes(lst) @staticmethod @unwrap_spec(w_object = WrappedDefault("")) @@ -725,9 +725,9 @@ return tformat.formatter_field_name_split() -def _create_list_from_string(value): +def _create_list_from_bytes(value): # need this helper function to allow the jit to look inside and inline - # listview_str + # listview_bytes return [s for s in value] W_BytesObject.EMPTY = W_BytesObject('') diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py --- a/pypy/objspace/std/celldict.py +++ b/pypy/objspace/std/celldict.py @@ -127,7 +127,7 @@ def w_keys(self, w_dict): space = self.space l = self.unerase(w_dict.dstorage).keys() - return space.newlist_str(l) + return space.newlist_bytes(l) def values(self, w_dict): iterator = self.unerase(w_dict.dstorage).itervalues diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -58,7 +58,7 @@ strategy = space.fromcache(MapDictStrategy) elif instance or strdict or module: assert w_type is None - strategy = space.fromcache(StringDictStrategy) + strategy = space.fromcache(BytesDictStrategy) elif kwargs: assert w_type is None from pypy.objspace.std.kwargsdict import EmptyKwargsDictStrategy @@ -117,9 +117,9 @@ if space.is_w(w_type, space.w_dict): w_dict = W_DictMultiObject.allocate_and_init_instance(space, w_type) - strlist = space.listview_str(w_keys) - if strlist is not None: - for key in strlist: + byteslist = space.listview_bytes(w_keys) + if byteslist is not None: + for key in byteslist: w_dict.setitem_str(key, w_fill) else: for w_key in space.listview(w_keys): @@ -333,7 +333,7 @@ popitem delitem clear \ length w_keys values items \ iterkeys itervalues iteritems \ - listview_str listview_unicode listview_int \ + listview_bytes listview_unicode listview_int \ view_as_kwargs".split() def make_method(method): @@ -482,7 +482,7 @@ w_dict.strategy = strategy w_dict.dstorage = storage - def listview_str(self, w_dict): + def listview_bytes(self, w_dict): return None def listview_unicode(self, w_dict): @@ -506,7 +506,7 @@ def switch_to_correct_strategy(self, w_dict, w_key): withidentitydict = self.space.config.objspace.std.withidentitydict if type(w_key) is self.space.StringObjectCls: - self.switch_to_string_strategy(w_dict) + self.switch_to_bytes_strategy(w_dict) return elif type(w_key) is self.space.UnicodeObjectCls: self.switch_to_unicode_strategy(w_dict) @@ -519,8 +519,8 @@ else: self.switch_to_object_strategy(w_dict) - def switch_to_string_strategy(self, w_dict): - strategy = self.space.fromcache(StringDictStrategy) + def switch_to_bytes_strategy(self, w_dict): + strategy = self.space.fromcache(BytesDictStrategy) storage = strategy.get_empty_storage() w_dict.strategy = strategy w_dict.dstorage = storage @@ -572,7 +572,7 @@ w_dict.setitem(w_key, w_value) def setitem_str(self, w_dict, key, w_value): - self.switch_to_string_strategy(w_dict) + self.switch_to_bytes_strategy(w_dict) w_dict.setitem_str(key, w_value) def delitem(self, w_dict, w_key): @@ -874,8 +874,8 @@ create_iterator_classes(ObjectDictStrategy) -class StringDictStrategy(AbstractTypedStrategy, DictStrategy): - erase, unerase = rerased.new_erasing_pair("string") +class BytesDictStrategy(AbstractTypedStrategy, DictStrategy): + erase, unerase = rerased.new_erasing_pair("bytes") erase = staticmethod(erase) unerase = staticmethod(unerase) @@ -913,11 +913,11 @@ assert key is not None return self.unerase(w_dict.dstorage).get(key, None) - def listview_str(self, w_dict): + def listview_bytes(self, w_dict): return self.unerase(w_dict.dstorage).keys() def w_keys(self, w_dict): - return self.space.newlist_str(self.listview_str(w_dict)) + return self.space.newlist_bytes(self.listview_bytes(w_dict)) def wrapkey(space, key): return space.wrap(key) @@ -935,7 +935,7 @@ i += 1 return keys, values -create_iterator_classes(StringDictStrategy) +create_iterator_classes(BytesDictStrategy) class UnicodeDictStrategy(AbstractTypedStrategy, DictStrategy): @@ -961,7 +961,7 @@ def _never_equal_to(self, w_lookup_type): return _never_equal_to_string(self.space, w_lookup_type) - # we should implement the same shortcuts as we do for StringDictStrategy + # we should implement the same shortcuts as we do for BytesDictStrategy ## def setitem_str(self, w_dict, key, w_value): ## assert key is not None @@ -983,7 +983,7 @@ return self.unerase(w_dict.dstorage).keys() ## def w_keys(self, w_dict): - ## return self.space.newlist_str(self.listview_str(w_dict)) + ## return self.space.newlist_bytes(self.listview_bytes(w_dict)) def wrapkey(space, key): return space.wrap(key) diff --git a/pypy/objspace/std/dictproxyobject.py b/pypy/objspace/std/dictproxyobject.py --- a/pypy/objspace/std/dictproxyobject.py +++ b/pypy/objspace/std/dictproxyobject.py @@ -84,7 +84,7 @@ def w_keys(self, w_dict): space = self.space - return space.newlist_str(self.unerase(w_dict.dstorage).dict_w.keys()) + return space.newlist_bytes(self.unerase(w_dict.dstorage).dict_w.keys()) def values(self, w_dict): return [unwrap_cell(self.space, w_value) for w_value in self.unerase(w_dict.dstorage).dict_w.itervalues()] diff --git a/pypy/objspace/std/kwargsdict.py b/pypy/objspace/std/kwargsdict.py --- a/pypy/objspace/std/kwargsdict.py +++ b/pypy/objspace/std/kwargsdict.py @@ -2,15 +2,13 @@ ## dict strategy (see dictmultiobject.py) from rpython.rlib import rerased, jit -from pypy.objspace.std.dictmultiobject import (DictStrategy, - create_iterator_classes, - EmptyDictStrategy, - ObjectDictStrategy, - StringDictStrategy) +from pypy.objspace.std.dictmultiobject import ( + BytesDictStrategy, DictStrategy, EmptyDictStrategy, ObjectDictStrategy, + create_iterator_classes) class EmptyKwargsDictStrategy(EmptyDictStrategy): - def switch_to_string_strategy(self, w_dict): + def switch_to_bytes_strategy(self, w_dict): strategy = self.space.fromcache(KwargsDictStrategy) storage = strategy.get_empty_storage() w_dict.strategy = strategy @@ -61,7 +59,7 @@ else: # limit the size so that the linear searches don't become too long if len(keys) >= 16: - self.switch_to_string_strategy(w_dict) + self.switch_to_bytes_strategy(w_dict) w_dict.setitem_str(key, w_value) else: keys.append(key) @@ -111,7 +109,7 @@ def w_keys(self, w_dict): l = self.unerase(w_dict.dstorage)[0] - return self.space.newlist_str(l[:]) + return self.space.newlist_bytes(l[:]) def values(self, w_dict): return self.unerase(w_dict.dstorage)[1][:] # to make non-resizable @@ -142,8 +140,8 @@ w_dict.strategy = strategy w_dict.dstorage = strategy.erase(d_new) - def switch_to_string_strategy(self, w_dict): - strategy = self.space.fromcache(StringDictStrategy) + def switch_to_bytes_strategy(self, w_dict): + strategy = self.space.fromcache(BytesDictStrategy) keys, values_w = self.unerase(w_dict.dstorage) storage = strategy.get_empty_storage() d_new = strategy.unerase(storage) diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -81,7 +81,7 @@ if not type(w_obj) is W_BytesObject: break else: - return space.fromcache(StringListStrategy) + return space.fromcache(BytesListStrategy) # check for unicode for w_obj in list_w: @@ -162,8 +162,8 @@ return self @staticmethod - def newlist_str(space, list_s): - strategy = space.fromcache(StringListStrategy) + def newlist_bytes(space, list_s): + strategy = space.fromcache(BytesListStrategy) storage = strategy.erase(list_s) return W_ListObject.from_storage_and_strategy(space, storage, strategy) @@ -278,10 +278,10 @@ ObjectListStrategy.""" return self.strategy.getitems_copy(self) - def getitems_str(self): + def getitems_bytes(self): """Return the items in the list as unwrapped strings. If the list does not use the list strategy, return None.""" - return self.strategy.getitems_str(self) + return self.strategy.getitems_bytes(self) def getitems_unicode(self): """Return the items in the list as unwrapped unicodes. If the list does @@ -753,7 +753,7 @@ def getitems_copy(self, w_list): raise NotImplementedError - def getitems_str(self, w_list): + def getitems_bytes(self, w_list): return None def getitems_unicode(self, w_list): @@ -897,7 +897,7 @@ if type(w_item) is W_IntObject: strategy = self.space.fromcache(IntegerListStrategy) elif type(w_item) is W_BytesObject: - strategy = self.space.fromcache(StringListStrategy) + strategy = self.space.fromcache(BytesListStrategy) elif type(w_item) is W_UnicodeObject: strategy = self.space.fromcache(UnicodeListStrategy) elif type(w_item) is W_FloatObject: @@ -962,11 +962,11 @@ w_list.lstorage = strategy.erase(floatlist) return - strlist = space.listview_str(w_iterable) - if strlist is not None: - w_list.strategy = strategy = space.fromcache(StringListStrategy) + byteslist = space.listview_bytes(w_iterable) + if byteslist is not None: + w_list.strategy = strategy = space.fromcache(BytesListStrategy) # need to copy because intlist can share with w_iterable - w_list.lstorage = strategy.erase(strlist[:]) + w_list.lstorage = strategy.erase(byteslist[:]) return unilist = space.listview_unicode(w_iterable) @@ -1592,11 +1592,11 @@ return self.unerase(w_list.lstorage) -class StringListStrategy(ListStrategy): +class BytesListStrategy(ListStrategy): import_from_mixin(AbstractUnwrappedStrategy) _none_value = None - _applevel_repr = "str" + _applevel_repr = "bytes" def wrap(self, stringval): return self.space.wrap(stringval) @@ -1604,7 +1604,7 @@ def unwrap(self, w_string): return self.space.str_w(w_string) - erase, unerase = rerased.new_erasing_pair("string") + erase, unerase = rerased.new_erasing_pair("bytes") erase = staticmethod(erase) unerase = staticmethod(unerase) @@ -1612,7 +1612,7 @@ return type(w_obj) is W_BytesObject def list_is_correct_type(self, w_list): - return w_list.strategy is self.space.fromcache(StringListStrategy) + return w_list.strategy is self.space.fromcache(BytesListStrategy) def sort(self, w_list, reverse): l = self.unerase(w_list.lstorage) @@ -1621,7 +1621,7 @@ if reverse: l.reverse() - def getitems_str(self, w_list): + def getitems_bytes(self, w_list): return self.unerase(w_list.lstorage) diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -700,7 +700,7 @@ self.delitem(w_dict, w_key) return (w_key, w_value) - # XXX could implement a more efficient w_keys based on space.newlist_str + # XXX could implement a more efficient w_keys based on space.newlist_bytes def iterkeys(self, w_dict): return MapDictIteratorKeys(self.space, self, w_dict) diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -292,8 +292,8 @@ assert not list_w or sizehint == -1 return W_ListObject(self, list_w, sizehint) - def newlist_str(self, list_s): - return W_ListObject.newlist_str(self, list_s) + def newlist_bytes(self, list_s): + return W_ListObject.newlist_bytes(self, list_s) def newlist_unicode(self, list_u): return W_ListObject.newlist_unicode(self, list_u) @@ -431,19 +431,19 @@ raise self._wrap_expected_length(expected_length, len(t)) return t - def listview_str(self, w_obj): + def listview_bytes(self, w_obj): # note: uses exact type checking for objects with strategies, # and isinstance() for others. See test_listobject.test_uses_custom... if type(w_obj) is W_ListObject: - return w_obj.getitems_str() + return w_obj.getitems_bytes() if type(w_obj) is W_DictMultiObject: - return w_obj.listview_str() + return w_obj.listview_bytes() if type(w_obj) is W_SetObject or type(w_obj) is W_FrozensetObject: - return w_obj.listview_str() + return w_obj.listview_bytes() if isinstance(w_obj, W_BytesObject) and self._uses_no_iter(w_obj): - return w_obj.listview_str() + return w_obj.listview_bytes() if isinstance(w_obj, W_ListObject) and self._uses_list_iter(w_obj): - return w_obj.getitems_str() + return w_obj.getitems_bytes() return None def listview_unicode(self, w_obj): diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -79,9 +79,9 @@ """ Returns a dict with all elements of the set. Needed only for switching to ObjectSetStrategy. """ return self.strategy.getdict_w(self) - def listview_str(self): + def listview_bytes(self): """ If this is a string set return its contents as a list of uwnrapped strings. Otherwise return None. """ - return self.strategy.listview_str(self) + return self.strategy.listview_bytes(self) def listview_unicode(self): """ If this is a unicode set return its contents as a list of uwnrapped unicodes. Otherwise return None. """ @@ -669,7 +669,7 @@ """ Returns an empty storage (erased) object. Used to initialize an empty set.""" raise NotImplementedError - def listview_str(self, w_set): + def listview_bytes(self, w_set): return None def listview_unicode(self, w_set): @@ -776,7 +776,7 @@ if type(w_key) is W_IntObject: strategy = self.space.fromcache(IntegerSetStrategy) elif type(w_key) is W_BytesObject: - strategy = self.space.fromcache(StringSetStrategy) + strategy = self.space.fromcache(BytesSetStrategy) elif type(w_key) is W_UnicodeObject: strategy = self.space.fromcache(UnicodeSetStrategy) elif self.space.type(w_key).compares_by_identity(): @@ -1196,8 +1196,8 @@ return self.wrap(result[0]) -class StringSetStrategy(AbstractUnwrappedSetStrategy, SetStrategy): - erase, unerase = rerased.new_erasing_pair("string") +class BytesSetStrategy(AbstractUnwrappedSetStrategy, SetStrategy): + erase, unerase = rerased.new_erasing_pair("bytes") erase = staticmethod(erase) unerase = staticmethod(unerase) @@ -1207,7 +1207,7 @@ def get_empty_dict(self): return {} - def listview_str(self, w_set): + def listview_bytes(self, w_set): return self.unerase(w_set.sstorage).keys() def is_correct_type(self, w_key): @@ -1229,7 +1229,7 @@ return self.space.wrap(item) def iter(self, w_set): - return StringIteratorImplementation(self.space, self, w_set) + return BytesIteratorImplementation(self.space, self, w_set) class UnicodeSetStrategy(AbstractUnwrappedSetStrategy, SetStrategy): @@ -1286,7 +1286,7 @@ return type(w_key) is W_IntObject def may_contain_equal_elements(self, strategy): - if strategy is self.space.fromcache(StringSetStrategy): + if strategy is self.space.fromcache(BytesSetStrategy): return False elif strategy is self.space.fromcache(UnicodeSetStrategy): return False @@ -1371,7 +1371,7 @@ return False if strategy is self.space.fromcache(IntegerSetStrategy): return False - if strategy is self.space.fromcache(StringSetStrategy): + if strategy is self.space.fromcache(BytesSetStrategy): return False if strategy is self.space.fromcache(UnicodeSetStrategy): return False @@ -1436,7 +1436,7 @@ return None -class StringIteratorImplementation(IteratorImplementation): +class BytesIteratorImplementation(IteratorImplementation): def __init__(self, space, strategy, w_set): IteratorImplementation.__init__(self, space, strategy, w_set) d = strategy.unerase(w_set.sstorage) @@ -1546,11 +1546,11 @@ w_set.sstorage = w_iterable.get_storage_copy() return - stringlist = space.listview_str(w_iterable) - if stringlist is not None: - strategy = space.fromcache(StringSetStrategy) + byteslist = space.listview_bytes(w_iterable) + if byteslist is not None: + strategy = space.fromcache(BytesSetStrategy) w_set.strategy = strategy - w_set.sstorage = strategy.get_storage_from_unwrapped_list(stringlist) + w_set.sstorage = strategy.get_storage_from_unwrapped_list(byteslist) return unicodelist = space.listview_unicode(w_iterable) @@ -1593,7 +1593,7 @@ if type(w_item) is not W_BytesObject: break else: - w_set.strategy = space.fromcache(StringSetStrategy) + w_set.strategy = space.fromcache(BytesSetStrategy) w_set.sstorage = w_set.strategy.get_storage_from_list(iterable_w) return diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -311,7 +311,7 @@ from pypy.objspace.std.unicodeobject import W_UnicodeObject if isinstance(self, W_BytesObject): - l = space.listview_str(w_list) + l = space.listview_bytes(w_list) if l is not None: if len(l) == 1: return space.wrap(l[0]) diff --git a/pypy/objspace/std/test/test_bytesobject.py b/pypy/objspace/std/test/test_bytesobject.py --- a/pypy/objspace/std/test/test_bytesobject.py +++ b/pypy/objspace/std/test/test_bytesobject.py @@ -80,9 +80,9 @@ w_slice = space.newslice(w(1), w_None, w(2)) assert self.space.eq_w(space.getitem(w_str, w_slice), w('el')) - def test_listview_str(self): + def test_listview_bytes(self): w_str = self.space.wrap('abcd') - assert self.space.listview_str(w_str) == list("abcd") + assert self.space.listview_bytes(w_str) == list("abcd") class AppTestBytesObject: diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -2,7 +2,7 @@ import py from pypy.objspace.std.dictmultiobject import (W_DictMultiObject, - StringDictStrategy, ObjectDictStrategy) + BytesDictStrategy, ObjectDictStrategy) class TestW_DictObject(object): @@ -134,11 +134,11 @@ assert space.eq_w(w_d.getitem_str("a"), space.w_None) assert space.eq_w(w_d.getitem_str("b"), space.w_None) - def test_listview_str_dict(self): + def test_listview_bytes_dict(self): w = self.space.wrap w_d = self.space.newdict() w_d.initialize_content([(w("a"), w(1)), (w("b"), w(2))]) - assert self.space.listview_str(w_d) == ["a", "b"] + assert self.space.listview_bytes(w_d) == ["a", "b"] def test_listview_unicode_dict(self): w = self.space.wrap @@ -160,7 +160,7 @@ w_l = self.space.call_method(w_d, "keys") assert sorted(self.space.listview_int(w_l)) == [1,2] - # make sure that .keys() calls newlist_str for string dicts + # make sure that .keys() calls newlist_bytes for string dicts def not_allowed(*args): assert False, 'should not be called' monkeypatch.setattr(self.space, 'newlist', not_allowed) @@ -168,7 +168,7 @@ w_d = self.space.newdict() w_d.initialize_content([(w("a"), w(1)), (w("b"), w(6))]) w_l = self.space.call_method(w_d, "keys") - assert sorted(self.space.listview_str(w_l)) == ["a", "b"] + assert sorted(self.space.listview_bytes(w_l)) == ["a", "b"] # XXX: it would be nice if the test passed without monkeypatch.undo(), # but we need space.newlist_unicode for it @@ -944,7 +944,7 @@ d = {} assert "EmptyDictStrategy" in self.get_strategy(d) d["a"] = 1 - assert "StringDictStrategy" in self.get_strategy(d) + assert "BytesDictStrategy" in self.get_strategy(d) class O(object): pass @@ -952,7 +952,7 @@ d = o.__dict__ = {} assert "EmptyDictStrategy" in self.get_strategy(d) o.a = 1 - assert "StringDictStrategy" in self.get_strategy(d) + assert "BytesDictStrategy" in self.get_strategy(d) def test_empty_to_unicode(self): d = {} @@ -1033,7 +1033,7 @@ eq_w = eq def newlist(self, l): return l - def newlist_str(self, l): + def newlist_bytes(self, l): return l DictObjectCls = W_DictMultiObject def type(self, w_obj): @@ -1275,9 +1275,9 @@ assert "s" not in d.w_keys() assert F() not in d.w_keys() -class TestStrDictImplementation(BaseTestRDictImplementation): - StrategyClass = StringDictStrategy - #ImplementionClass = StrDictImplementation +class TestBytesDictImplementation(BaseTestRDictImplementation): + StrategyClass = BytesDictStrategy + #ImplementionClass = BytesDictImplementation def test_str_shortcut(self): self.fill_impl() @@ -1301,12 +1301,12 @@ def check_not_devolved(self): pass -class TestDevolvedStrDictImplementation(BaseTestDevolvedDictImplementation): - StrategyClass = StringDictStrategy +class TestDevolvedBytesDictImplementation(BaseTestDevolvedDictImplementation): + StrategyClass = BytesDictStrategy def test_module_uses_strdict(): fakespace = FakeSpace() d = fakespace.newdict(module=True) - assert type(d.strategy) is StringDictStrategy + assert type(d.strategy) is BytesDictStrategy diff --git a/pypy/objspace/std/test/test_kwargsdict.py b/pypy/objspace/std/test/test_kwargsdict.py --- a/pypy/objspace/std/test/test_kwargsdict.py +++ b/pypy/objspace/std/test/test_kwargsdict.py @@ -73,7 +73,7 @@ for i in range(100): assert d.setitem_str("d%s" % i, 4) is None assert d.strategy is not strategy - assert "StringDictStrategy" == d.strategy.__class__.__name__ + assert "BytesDictStrategy" == d.strategy.__class__.__name__ def test_keys_doesnt_wrap(): space = FakeSpace() diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -1,5 +1,5 @@ import sys -from pypy.objspace.std.listobject import W_ListObject, EmptyListStrategy, ObjectListStrategy, IntegerListStrategy, FloatListStrategy, StringListStrategy, RangeListStrategy, make_range_list, UnicodeListStrategy +from pypy.objspace.std.listobject import W_ListObject, EmptyListStrategy, ObjectListStrategy, IntegerListStrategy, FloatListStrategy, BytesListStrategy, RangeListStrategy, make_range_list, UnicodeListStrategy from pypy.objspace.std import listobject from pypy.objspace.std.test.test_listobject import TestW_ListObject @@ -13,7 +13,7 @@ assert isinstance(W_ListObject(space, [w(1),w(2),w(3)]).strategy, IntegerListStrategy) assert isinstance(W_ListObject(space, [w('a'), w('b')]).strategy, - StringListStrategy) + BytesListStrategy) assert isinstance(W_ListObject(space, [w(u'a'), w(u'b')]).strategy, UnicodeListStrategy) assert isinstance(W_ListObject(space, [w(u'a'), w('b')]).strategy, @@ -35,7 +35,7 @@ l = W_ListObject(space, []) assert isinstance(l.strategy, EmptyListStrategy) l.append(w('a')) - assert isinstance(l.strategy, StringListStrategy) + assert isinstance(l.strategy, BytesListStrategy) l = W_ListObject(space, []) assert isinstance(l.strategy, EmptyListStrategy) @@ -59,9 +59,9 @@ def test_string_to_any(self): l = W_ListObject(self.space, [self.space.wrap('a'),self.space.wrap('b'),self.space.wrap('c')]) - assert isinstance(l.strategy, StringListStrategy) + assert isinstance(l.strategy, BytesListStrategy) l.append(self.space.wrap('d')) - assert isinstance(l.strategy, StringListStrategy) + assert isinstance(l.strategy, BytesListStrategy) l.append(self.space.wrap(3)) assert isinstance(l.strategy, ObjectListStrategy) @@ -92,7 +92,7 @@ l.setitem(0, w('d')) assert space.eq_w(l.getitem(0), w('d')) - assert isinstance(l.strategy, StringListStrategy) + assert isinstance(l.strategy, BytesListStrategy) # IntStrategy to ObjectStrategy l = W_ListObject(space, [w(1),w(2),w(3)]) @@ -100,9 +100,9 @@ l.setitem(0, w('d')) assert isinstance(l.strategy, ObjectListStrategy) - # StringStrategy to ObjectStrategy + # BytesStrategy to ObjectStrategy l = W_ListObject(space, [w('a'),w('b'),w('c')]) - assert isinstance(l.strategy, StringListStrategy) + assert isinstance(l.strategy, BytesListStrategy) l.setitem(0, w(2)) assert isinstance(l.strategy, ObjectListStrategy) @@ -127,9 +127,9 @@ l.insert(3, w(4)) assert isinstance(l.strategy, IntegerListStrategy) - # StringStrategy + # BytesStrategy l = W_ListObject(space, [w('a'),w('b'),w('c')]) - assert isinstance(l.strategy, StringListStrategy) + assert isinstance(l.strategy, BytesListStrategy) l.insert(3, w(2)) assert isinstance(l.strategy, ObjectListStrategy) @@ -155,7 +155,7 @@ l = W_ListObject(space, []) assert isinstance(l.strategy, EmptyListStrategy) l.insert(0, w('a')) - assert isinstance(l.strategy, StringListStrategy) + assert isinstance(l.strategy, BytesListStrategy) l = W_ListObject(space, []) assert isinstance(l.strategy, EmptyListStrategy) @@ -207,9 +207,9 @@ l.setslice(0, 1, 2, W_ListObject(space, [w('a'), w('b'), w('c')])) assert isinstance(l.strategy, ObjectListStrategy) - # StringStrategy to ObjectStrategy + # BytesStrategy to ObjectStrategy l = W_ListObject(space, [w('a'), w('b'), w('c')]) - assert isinstance(l.strategy, StringListStrategy) + assert isinstance(l.strategy, BytesListStrategy) l.setslice(0, 1, 2, W_ListObject(space, [w(1), w(2), w(3)])) assert isinstance(l.strategy, ObjectListStrategy) @@ -261,7 +261,7 @@ l = W_ListObject(space, wrapitems(["a","b","c","d","e"])) other = W_ListObject(space, wrapitems(["a", "b", "c"])) keep_other_strategy(l, 0, 2, other.length(), other) - assert l.strategy is space.fromcache(StringListStrategy) + assert l.strategy is space.fromcache(BytesListStrategy) l = W_ListObject(space, wrapitems([u"a",u"b",u"c",u"d",u"e"])) other = W_ListObject(space, wrapitems([u"a", u"b", u"c"])) @@ -330,7 +330,7 @@ empty = W_ListObject(space, []) assert isinstance(empty.strategy, EmptyListStrategy) empty.extend(W_ListObject(space, [w("a"), w("b"), w("c")])) - assert isinstance(empty.strategy, StringListStrategy) + assert isinstance(empty.strategy, BytesListStrategy) empty = W_ListObject(space, []) assert isinstance(empty.strategy, EmptyListStrategy) @@ -514,17 +514,17 @@ def test_unicode(self): l1 = W_ListObject(self.space, [self.space.wrap("eins"), self.space.wrap("zwei")]) - assert isinstance(l1.strategy, StringListStrategy) + assert isinstance(l1.strategy, BytesListStrategy) l2 = W_ListObject(self.space, [self.space.wrap(u"eins"), self.space.wrap(u"zwei")]) assert isinstance(l2.strategy, UnicodeListStrategy) l3 = W_ListObject(self.space, [self.space.wrap("eins"), self.space.wrap(u"zwei")]) assert isinstance(l3.strategy, ObjectListStrategy) - def test_listview_str(self): + def test_listview_bytes(self): space = self.space - assert space.listview_str(space.wrap(1)) == None + assert space.listview_bytes(space.wrap(1)) == None w_l = self.space.newlist([self.space.wrap('a'), self.space.wrap('b')]) - assert space.listview_str(w_l) == ["a", "b"] + assert space.listview_bytes(w_l) == ["a", "b"] def test_listview_unicode(self): space = self.space @@ -532,7 +532,7 @@ w_l = self.space.newlist([self.space.wrap(u'a'), self.space.wrap(u'b')]) assert space.listview_unicode(w_l) == [u"a", u"b"] - def test_string_join_uses_listview_str(self): + def test_string_join_uses_listview_bytes(self): space = self.space w_l = self.space.newlist([self.space.wrap('a'), self.space.wrap('b')]) w_l.getitems = None @@ -556,14 +556,14 @@ w_l.getitems = None assert space.is_w(space.call_method(space.wrap(u" -- "), "join", w_l), w_text) - def test_newlist_str(self): + def test_newlist_bytes(self): space = self.space l = ['a', 'b'] - w_l = self.space.newlist_str(l) - assert isinstance(w_l.strategy, StringListStrategy) - assert space.listview_str(w_l) is l + w_l = self.space.newlist_bytes(l) + assert isinstance(w_l.strategy, BytesListStrategy) + assert space.listview_bytes(w_l) is l - def test_string_uses_newlist_str(self): + def test_string_uses_newlist_bytes(self): space = self.space w_s = space.wrap("a b c") space.newlist = None @@ -574,10 +574,10 @@ w_l4 = space.call_method(w_s, "rsplit", space.wrap(" ")) finally: del space.newlist - assert space.listview_str(w_l) == ["a", "b", "c"] - assert space.listview_str(w_l2) == ["a", "b", "c"] - assert space.listview_str(w_l3) == ["a", "b", "c"] - assert space.listview_str(w_l4) == ["a", "b", "c"] + assert space.listview_bytes(w_l) == ["a", "b", "c"] + assert space.listview_bytes(w_l2) == ["a", "b", "c"] + assert space.listview_bytes(w_l3) == ["a", "b", "c"] + assert space.listview_bytes(w_l4) == ["a", "b", "c"] def test_unicode_uses_newlist_unicode(self): space = self.space @@ -630,10 +630,10 @@ assert space.eq_w(w_l, w_l2) - def test_listview_str_list(self): + def test_listview_bytes_list(self): space = self.space w_l = W_ListObject(space, [space.wrap("a"), space.wrap("b")]) - assert self.space.listview_str(w_l) == ["a", "b"] + assert self.space.listview_bytes(w_l) == ["a", "b"] def test_listview_unicode_list(self): space = self.space diff --git a/pypy/objspace/std/test/test_setobject.py b/pypy/objspace/std/test/test_setobject.py --- a/pypy/objspace/std/test/test_setobject.py +++ b/pypy/objspace/std/test/test_setobject.py @@ -82,7 +82,7 @@ def test_create_set_from_list(self): from pypy.interpreter.baseobjspace import W_Root - from pypy.objspace.std.setobject import ObjectSetStrategy, StringSetStrategy, UnicodeSetStrategy + from pypy.objspace.std.setobject import BytesSetStrategy, ObjectSetStrategy, UnicodeSetStrategy from pypy.objspace.std.floatobject import W_FloatObject w = self.space.wrap @@ -100,7 +100,7 @@ w_list = W_ListObject(self.space, [w("1"), w("2"), w("3")]) w_set = W_SetObject(self.space) _initialize_set(self.space, w_set, w_list) - assert w_set.strategy is self.space.fromcache(StringSetStrategy) + assert w_set.strategy is self.space.fromcache(BytesSetStrategy) assert w_set.strategy.unerase(w_set.sstorage) == {"1":None, "2":None, "3":None} w_list = self.space.iter(W_ListObject(self.space, [w(u"1"), w(u"2"), w(u"3")])) @@ -126,18 +126,18 @@ # changed cached object, need to change it back for other tests to pass intstr.get_storage_from_list = tmp_func - def test_listview_str_int_on_set(self): + def test_listview_bytes_int_on_set(self): w = self.space.wrap w_a = W_SetObject(self.space) _initialize_set(self.space, w_a, w("abcdefg")) - assert sorted(self.space.listview_str(w_a)) == list("abcdefg") + assert sorted(self.space.listview_bytes(w_a)) == list("abcdefg") assert self.space.listview_int(w_a) is None w_b = W_SetObject(self.space) _initialize_set(self.space, w_b, self.space.newlist([w(1),w(2),w(3),w(4),w(5)])) assert sorted(self.space.listview_int(w_b)) == [1,2,3,4,5] - assert self.space.listview_str(w_b) is None + assert self.space.listview_bytes(w_b) is None class AppTestAppSetTest: diff --git a/pypy/objspace/std/test/test_setstrategies.py b/pypy/objspace/std/test/test_setstrategies.py --- a/pypy/objspace/std/test/test_setstrategies.py +++ b/pypy/objspace/std/test/test_setstrategies.py @@ -1,10 +1,8 @@ from pypy.objspace.std.setobject import W_SetObject -from pypy.objspace.std.setobject import (IntegerSetStrategy, ObjectSetStrategy, - EmptySetStrategy, StringSetStrategy, - UnicodeSetStrategy, - IntegerIteratorImplementation, - StringIteratorImplementation, - UnicodeIteratorImplementation) +from pypy.objspace.std.setobject import ( + BytesIteratorImplementation, BytesSetStrategy, EmptySetStrategy, + IntegerIteratorImplementation, IntegerSetStrategy, ObjectSetStrategy, + UnicodeIteratorImplementation, UnicodeSetStrategy) from pypy.objspace.std.listobject import W_ListObject class TestW_SetStrategies: @@ -26,7 +24,7 @@ assert s.strategy is self.space.fromcache(EmptySetStrategy) s = W_SetObject(self.space, self.wrapped(["a", "b"])) - assert s.strategy is self.space.fromcache(StringSetStrategy) + assert s.strategy is self.space.fromcache(BytesSetStrategy) s = W_SetObject(self.space, self.wrapped([u"a", u"b"])) assert s.strategy is self.space.fromcache(UnicodeSetStrategy) @@ -126,7 +124,7 @@ # s = W_SetObject(space, self.wrapped(["a", "b"])) it = s.iter() - assert isinstance(it, StringIteratorImplementation) + assert isinstance(it, BytesIteratorImplementation) assert space.unwrap(it.next()) == "a" assert space.unwrap(it.next()) == "b" # @@ -142,7 +140,7 @@ assert sorted(space.listview_int(s)) == [1, 2] # s = W_SetObject(space, self.wrapped(["a", "b"])) - assert sorted(space.listview_str(s)) == ["a", "b"] + assert sorted(space.listview_bytes(s)) == ["a", "b"] # s = W_SetObject(space, self.wrapped([u"a", u"b"])) assert sorted(space.listview_unicode(s)) == [u"a", u"b"] diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -531,7 +531,7 @@ """x.__getitem__(y) <==> x[y]""" def __getnewargs__(): - """""" + "" def __getslice__(): """x.__getslice__(i, j) <==> x[i:j] diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -9,7 +9,7 @@ from rpython.flowspace.model import (Variable, Constant, FunctionGraph, c_last_exception, checkgraph) from rpython.translator import simplify, transform -from rpython.annotator import model as annmodel, signature, unaryop, binaryop +from rpython.annotator import model as annmodel, signature from rpython.annotator.bookkeeper import Bookkeeper import py @@ -455,12 +455,12 @@ # occour for this specific, typed operation. if block.exitswitch == c_last_exception: op = block.operations[-1] - if op.opname in binaryop.BINARY_OPERATIONS: + if op.dispatch == 2: arg1 = self.binding(op.args[0]) arg2 = self.binding(op.args[1]) binop = getattr(pair(arg1, arg2), op.opname, None) can_only_throw = annmodel.read_can_only_throw(binop, arg1, arg2) - elif op.opname in unaryop.UNARY_OPERATIONS: + elif op.dispatch == 1: arg1 = self.binding(op.args[0]) opname = op.opname if opname == 'contains': opname = 'op_contains' @@ -611,44 +611,6 @@ def noreturnvalue(self, op): return annmodel.s_ImpossibleValue # no return value (hook method) - # XXX "contains" clash with SomeObject method - def consider_op_contains(self, seq, elem): - self.bookkeeper.count("contains", seq) - return seq.op_contains(elem) - - def consider_op_newtuple(self, *args): - return annmodel.SomeTuple(items = args) - - def consider_op_newlist(self, *args): - return self.bookkeeper.newlist(*args) - - def consider_op_newdict(self): - return self.bookkeeper.newdict() - - - def _registeroperations(cls, unary_ops, binary_ops): - # All unary operations - d = {} - for opname in unary_ops: - fnname = 'consider_op_' + opname - exec py.code.Source(""" -def consider_op_%s(self, arg, *args): - return arg.%s(*args) -""" % (opname, opname)).compile() in globals(), d - setattr(cls, fnname, d[fnname]) - # All binary operations - for opname in binary_ops: - fnname = 'consider_op_' + opname - exec py.code.Source(""" -def consider_op_%s(self, arg1, arg2, *args): - return pair(arg1,arg2).%s(*args) -""" % (opname, opname)).compile() in globals(), d - setattr(cls, fnname, d[fnname]) - _registeroperations = classmethod(_registeroperations) - -# register simple operations handling -RPythonAnnotator._registeroperations(unaryop.UNARY_OPERATIONS, binaryop.BINARY_OPERATIONS) - class BlockedInference(Exception): """This exception signals the type inference engine that the situation diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -12,10 +12,11 @@ SomeBuiltin, SomeIterator, SomePBC, SomeFloat, s_None, SomeByteArray, SomeWeakRef, SomeAddress, SomeTypedAddressAccess, SomeSingleFloat, SomeLongFloat, SomeType, SomeConstantType, unionof, UnionError, - missing_operation, read_can_only_throw, add_knowntypedata, + read_can_only_throw, add_knowntypedata, merge_knowntypedata,) from rpython.annotator.bookkeeper import getbookkeeper from rpython.flowspace.model import Variable, Constant +from rpython.flowspace.operation import op from rpython.rlib import rarithmetic from rpython.annotator.model import AnnotatorError @@ -23,28 +24,9 @@ def immutablevalue(x): return getbookkeeper().immutablevalue(x) -# XXX unify this with ObjSpace.MethodTable -BINARY_OPERATIONS = set(['add', 'sub', 'mul', 'div', 'mod', - 'truediv', 'floordiv', 'divmod', - 'and_', 'or_', 'xor', - 'lshift', 'rshift', - 'getitem', 'setitem', 'delitem', - 'getitem_idx', 'getitem_key', 'getitem_idx_key', - 'inplace_add', 'inplace_sub', 'inplace_mul', - 'inplace_truediv', 'inplace_floordiv', 'inplace_div', - 'inplace_mod', - 'inplace_lshift', 'inplace_rshift', - 'inplace_and', 'inplace_or', 'inplace_xor', - 'lt', 'le', 'eq', 'ne', 'gt', 'ge', 'is_', 'cmp', - 'coerce', - ] - +[opname+'_ovf' for opname in - """add sub mul floordiv div mod lshift - """.split() - ]) +BINARY_OPERATIONS = set([oper.opname for oper in op.__dict__.values() + if oper.dispatch == 2]) -for opname in BINARY_OPERATIONS: - missing_operation(pairtype(SomeObject, SomeObject), opname) class __extend__(pairtype(SomeObject, SomeObject)): @@ -78,46 +60,39 @@ if obj1.is_immutable_constant() and obj2.is_immutable_constant(): return immutablevalue(obj1.const < obj2.const) else: - getbookkeeper().count("non_int_comp", obj1, obj2) return s_Bool def le((obj1, obj2)): if obj1.is_immutable_constant() and obj2.is_immutable_constant(): return immutablevalue(obj1.const <= obj2.const) else: - getbookkeeper().count("non_int_comp", obj1, obj2) return s_Bool def eq((obj1, obj2)): if obj1.is_immutable_constant() and obj2.is_immutable_constant(): return immutablevalue(obj1.const == obj2.const) else: - getbookkeeper().count("non_int_eq", obj1, obj2) return s_Bool def ne((obj1, obj2)): if obj1.is_immutable_constant() and obj2.is_immutable_constant(): return immutablevalue(obj1.const != obj2.const) else: - getbookkeeper().count("non_int_eq", obj1, obj2) return s_Bool def gt((obj1, obj2)): if obj1.is_immutable_constant() and obj2.is_immutable_constant(): return immutablevalue(obj1.const > obj2.const) else: - getbookkeeper().count("non_int_comp", obj1, obj2) return s_Bool def ge((obj1, obj2)): if obj1.is_immutable_constant() and obj2.is_immutable_constant(): return immutablevalue(obj1.const >= obj2.const) else: - getbookkeeper().count("non_int_comp", obj1, obj2) return s_Bool def cmp((obj1, obj2)): - getbookkeeper().count("cmp", obj1, obj2) if obj1.is_immutable_constant() and obj2.is_immutable_constant(): return immutablevalue(cmp(obj1.const, obj2.const)) else: @@ -163,13 +138,19 @@ return r def divmod((obj1, obj2)): - getbookkeeper().count("divmod", obj1, obj2) return SomeTuple([pair(obj1, obj2).div(), pair(obj1, obj2).mod()]) def coerce((obj1, obj2)): - getbookkeeper().count("coerce", obj1, obj2) return pair(obj1, obj2).union() # reasonable enough + def getitem((obj1, obj2)): + return s_ImpossibleValue + add = sub = mul = truediv = floordiv = div = mod = getitem + lshift = rshift = and_ = or_ = xor = delitem = getitem + + def setitem((obj1, obj2), _): + return s_ImpossibleValue + # approximation of an annotation intersection, the result should be the annotation obj or # the intersection of obj and improvement def improve((obj, improvement)): @@ -466,7 +447,6 @@ SomeUnicodeString))): raise AnnotatorError( "string formatting mixing strings and unicode not supported") - getbookkeeper().count('strformat', s_string, s_tuple) no_nul = s_string.no_nul for s_item in s_tuple.items: if isinstance(s_item, SomeFloat): @@ -484,7 +464,6 @@ pairtype(SomeUnicodeString, SomeObject)): def mod((s_string, args)): - getbookkeeper().count('strformat', s_string, args) return s_string.__class__() class __extend__(pairtype(SomeFloat, SomeFloat)): @@ -586,19 +565,16 @@ return [KeyError] def getitem((dic1, obj2)): - getbookkeeper().count("dict_getitem", dic1) dic1.dictdef.generalize_key(obj2) return dic1.dictdef.read_value() getitem.can_only_throw = _can_only_throw def setitem((dic1, obj2), s_value): - getbookkeeper().count("dict_setitem", dic1) dic1.dictdef.generalize_key(obj2) dic1.dictdef.generalize_value(s_value) setitem.can_only_throw = _can_only_throw def delitem((dic1, obj2)): - getbookkeeper().count("dict_delitem", dic1) dic1.dictdef.generalize_key(obj2) delitem.can_only_throw = _can_only_throw @@ -612,7 +588,6 @@ except IndexError: return s_ImpossibleValue else: - getbookkeeper().count("tuple_random_getitem", tup1) return unionof(*tup1.items) getitem.can_only_throw = [IndexError] @@ -623,74 +598,63 @@ return lst1.listdef.offspring() def getitem((lst1, int2)): - getbookkeeper().count("list_getitem", int2) return lst1.listdef.read_item() getitem.can_only_throw = [] getitem_key = getitem def getitem_idx((lst1, int2)): - getbookkeeper().count("list_getitem", int2) return lst1.listdef.read_item() getitem_idx.can_only_throw = [IndexError] getitem_idx_key = getitem_idx def setitem((lst1, int2), s_value): - getbookkeeper().count("list_setitem", int2) lst1.listdef.mutate() lst1.listdef.generalize(s_value) setitem.can_only_throw = [IndexError] def delitem((lst1, int2)): - getbookkeeper().count("list_delitem", int2) lst1.listdef.resize() delitem.can_only_throw = [IndexError] class __extend__(pairtype(SomeString, SomeInteger)): def getitem((str1, int2)): - getbookkeeper().count("str_getitem", int2) return SomeChar(no_nul=str1.no_nul) getitem.can_only_throw = [] getitem_key = getitem def getitem_idx((str1, int2)): - getbookkeeper().count("str_getitem", int2) return SomeChar(no_nul=str1.no_nul) getitem_idx.can_only_throw = [IndexError] getitem_idx_key = getitem_idx def mul((str1, int2)): # xxx do we want to support this - getbookkeeper().count("str_mul", str1, int2) return SomeString(no_nul=str1.no_nul) class __extend__(pairtype(SomeUnicodeString, SomeInteger)): def getitem((str1, int2)): - getbookkeeper().count("str_getitem", int2) return SomeUnicodeCodePoint() getitem.can_only_throw = [] getitem_key = getitem def getitem_idx((str1, int2)): - getbookkeeper().count("str_getitem", int2) return SomeUnicodeCodePoint() getitem_idx.can_only_throw = [IndexError] getitem_idx_key = getitem_idx def mul((str1, int2)): # xxx do we want to support this - getbookkeeper().count("str_mul", str1, int2) return SomeUnicodeString() class __extend__(pairtype(SomeInteger, SomeString), pairtype(SomeInteger, SomeUnicodeString)): def mul((int1, str2)): # xxx do we want to support this - getbookkeeper().count("str_mul", str2, int1) return str2.basestringclass() class __extend__(pairtype(SomeUnicodeCodePoint, SomeUnicodeString), diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -25,112 +25,6 @@ from rpython.rtyper import extregistry -class Stats(object): - - def __init__(self, bookkeeper): - self.bookkeeper = bookkeeper - self.classify = {} - - def count(self, category, *args): - for_category = self.classify.setdefault(category, {}) - classifier = getattr(self, 'consider_%s' % category, self.consider_generic) - outcome = classifier(*args) - for_category[self.bookkeeper.position_key] = outcome - - def indexrepr(self, idx): - if idx.is_constant(): - if idx.const is None: - return '' - if isinstance(idx, SomeInteger): - if idx.const >=0: - return 'pos-constant' - else: - return 'Neg-constant' - return idx.const - else: - if isinstance(idx, SomeInteger): - if idx.nonneg: - return "non-neg" - else: - return "MAYBE-NEG" - else: - return self.typerepr(idx) - - def steprepr(self, stp): - if stp.is_constant(): - if stp.const in (1, None): - return 'step=1' - else: - return 'step=%s?' % stp.const - else: - return 'non-const-step %s' % self.typerepr(stp) - - def consider_generic(self, *args): - return tuple([self.typerepr(x) for x in args]) - - def consider_list_list_eq(self, obj1, obj2): - return obj1, obj2 - - def consider_contains(self, seq): - return seq - - def consider_non_int_eq(self, obj1, obj2): - if obj1.knowntype == obj2.knowntype == list: - self.count("list_list_eq", obj1, obj2) - return self.typerepr(obj1), self.typerepr(obj2) - - def consider_non_int_comp(self, obj1, obj2): - return self.typerepr(obj1), self.typerepr(obj2) - - def typerepr(self, obj): - if isinstance(obj, SomeInstance): - return obj.classdef.name - else: - return obj.knowntype.__name__ - - def consider_tuple_random_getitem(self, tup): - return tuple([self.typerepr(x) for x in tup.items]) - - def consider_list_index(self): - return '!' - - def consider_list_getitem(self, idx): - return self.indexrepr(idx) - - def consider_list_setitem(self, idx): - return self.indexrepr(idx) - - def consider_list_delitem(self, idx): - return self.indexrepr(idx) - - def consider_str_join(self, s): - if s.is_constant(): - return repr(s.const) - else: - return "NON-CONSTANT" - - def consider_str_getitem(self, idx): - return self.indexrepr(idx) - - def consider_strformat(self, str, args): - if str.is_constant(): - s = repr(str.const) - else: - s = "?!!!!!!" - if isinstance(args, SomeTuple): - return (s, tuple([self.typerepr(x) for x in args.items])) - else: - return (s, self.typerepr(args)) - - def consider_dict_getitem(self, dic): - return dic - - def consider_dict_setitem(self, dic): - return dic - - def consider_dict_delitem(self, dic): - return dic - class Bookkeeper(object): """The log of choices that have been made while analysing the operations. It ensures that the same 'choice objects' will be returned if we ask @@ -165,13 +59,8 @@ self.needs_generic_instantiate = {} - self.stats = Stats(self) - delayed_imports() - def count(self, category, *args): - self.stats.count(category, *args) - def enter(self, position_key): """Start of an operation. The operation is uniquely identified by the given key.""" diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -347,9 +347,6 @@ def test(*args): return s_Bool -def import_func(*args): - return SomeObject() - # collect all functions import __builtin__ BUILTIN_ANALYZERS = {} @@ -397,9 +394,6 @@ else: BUILTIN_ANALYZERS[object.__init__] = object_init -# import -BUILTIN_ANALYZERS[__import__] = import_func - # annotation of low-level types from rpython.annotator.model import SomePtr from rpython.rtyper.lltypesystem import lltype diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -799,21 +799,6 @@ assert 0, "couldn't get to commonbase of %r and %r" % (cls1, cls2) -def missing_operation(cls, name): - def default_op(*args): - if args and isinstance(args[0], tuple): - flattened = tuple(args[0]) + args[1:] - else: - flattened = args - for arg in flattened: - if arg.__class__ is SomeObject and arg.knowntype is not type: - return SomeObject() - bookkeeper = rpython.annotator.bookkeeper.getbookkeeper() - bookkeeper.warning("no precise annotation supplied for %s%r" % (name, args)) - return s_ImpossibleValue - setattr(cls, name, default_op) - - class HarmlesslyBlocked(Exception): """Raised by the unaryop/binaryop to signal a harmless kind of BlockedInference: the current block is blocked, but not in a way diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -14,7 +14,8 @@ from rpython.rlib.rarithmetic import r_uint, base_int, r_longlong, r_ulonglong from rpython.rlib.rarithmetic import r_singlefloat from rpython.rlib import objectmodel -from rpython.flowspace.objspace import build_flow, FlowingError +from rpython.flowspace.objspace import build_flow +from rpython.flowspace.flowcontext import FlowingError from rpython.flowspace.operation import op from rpython.translator.test import snippet diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -5,11 +5,12 @@ from __future__ import absolute_import from types import MethodType +from rpython.flowspace.operation import op from rpython.annotator.model import (SomeObject, SomeInteger, SomeBool, SomeString, SomeChar, SomeList, SomeDict, SomeTuple, SomeImpossibleValue, SomeUnicodeCodePoint, SomeInstance, SomeBuiltin, SomeFloat, SomeIterator, SomePBC, SomeTypedAddressAccess, SomeAddress, SomeType, s_ImpossibleValue, - s_Bool, s_None, unionof, missing_operation, add_knowntypedata, + s_Bool, s_None, unionof, add_knowntypedata, HarmlesslyBlocked, SomeWeakRef, SomeUnicodeString, SomeByteArray) from rpython.annotator.bookkeeper import getbookkeeper from rpython.annotator import builtin @@ -20,17 +21,8 @@ def immutablevalue(x): return getbookkeeper().immutablevalue(x) -UNARY_OPERATIONS = set(['len', 'bool', 'getattr', 'setattr', 'delattr', - 'simple_call', 'call_args', 'str', 'repr', - 'iter', 'next', 'invert', 'type', 'issubtype', - 'pos', 'neg', 'abs', 'hex', 'oct', - 'ord', 'int', 'float', 'long', - 'hash', 'id', # <== not supported any more - 'getslice', 'setslice', 'delslice', - 'neg_ovf', 'abs_ovf', 'hint', 'unicode', 'unichr']) - -for opname in UNARY_OPERATIONS: - missing_operation(SomeObject, opname) +UNARY_OPERATIONS = set([oper.opname for oper in op.__dict__.values() + if oper.dispatch == 1]) class __extend__(SomeObject): @@ -84,23 +76,18 @@ raise AnnotatorError("cannot use hash() in RPython") def str(self): - getbookkeeper().count('str', self) return SomeString() def unicode(self): - getbookkeeper().count('unicode', self) return SomeUnicodeString() def repr(self): - getbookkeeper().count('repr', self) return SomeString() def hex(self): - getbookkeeper().count('hex', self) return SomeString() def oct(self): - getbookkeeper().count('oct', self) return SomeString() def id(self): @@ -144,6 +131,9 @@ raise AnnotatorError("Cannot find attribute %r on %r" % (attr, self)) getattr.can_only_throw = [] + def setattr(self, *args): + return s_ImpossibleValue + def bind_callables_under(self, classdef, name): return self # default unbound __get__ implementation @@ -163,6 +153,20 @@ def hint(self, *args_s): return self + def getslice(self, *args): + return s_ImpossibleValue + + def setslice(self, *args): + return s_ImpossibleValue + + def delslice(self, *args): + return s_ImpossibleValue + + def pos(self): + return s_ImpossibleValue + neg = abs = ord = invert = long = iter = next = pos + + class __extend__(SomeFloat): def pos(self): @@ -237,7 +241,6 @@ return immutablevalue(len(self.items)) def iter(self): - getbookkeeper().count("tuple_iter", self) return SomeIterator(self) iter.can_only_throw = [] @@ -281,7 +284,6 @@ method_pop.can_only_throw = [IndexError] def method_index(self, s_value): - getbookkeeper().count("list_index") self.listdef.generalize(s_value) return SomeInteger(nonneg=True) @@ -472,7 +474,6 @@ def method_join(self, s_list): if s_None.contains(s_list): return SomeImpossibleValue() - getbookkeeper().count("str_join", self) s_item = s_list.listdef.read_item() if s_None.contains(s_item): if isinstance(self, SomeUnicodeString): @@ -489,7 +490,6 @@ return self.basecharclass() def method_split(self, patt, max=-1): - getbookkeeper().count("str_split", self, patt) if max == -1 and patt.is_constant() and patt.const == "\0": no_nul = True else: @@ -498,7 +498,6 @@ return getbookkeeper().newlist(s_item) def method_rsplit(self, patt, max=-1): - getbookkeeper().count("str_rsplit", self, patt) s_item = self.basestringclass(no_nul=self.no_nul) return getbookkeeper().newlist(s_item) @@ -709,8 +708,6 @@ if self.s_self is not None: return self.analyser(self.s_self, *args) else: - if self.methodname: - getbookkeeper().count(self.methodname.replace('.', '_'), *args) return self.analyser(*args) simple_call.can_only_throw = _can_only_throw diff --git a/rpython/flowspace/bytecode.py b/rpython/flowspace/bytecode.py --- a/rpython/flowspace/bytecode.py +++ b/rpython/flowspace/bytecode.py @@ -34,8 +34,8 @@ opnames = host_bytecode_spec.method_names def __init__(self, argcount, nlocals, stacksize, flags, - code, consts, names, varnames, filename, - name, firstlineno, lnotab, freevars): + code, consts, names, varnames, filename, + name, firstlineno, lnotab, freevars): """Initialize a new code object""" assert nlocals >= 0 self.co_argcount = argcount @@ -58,18 +58,18 @@ """Initialize the code object from a real (CPython) one. """ return cls(code.co_argcount, - code.co_nlocals, - code.co_stacksize, - code.co_flags, - code.co_code, - list(code.co_consts), - list(code.co_names), - list(code.co_varnames), - code.co_filename, - code.co_name, - code.co_firstlineno, - code.co_lnotab, - list(code.co_freevars)) + code.co_nlocals, + code.co_stacksize, + code.co_flags, + code.co_code, + list(code.co_consts), + list(code.co_names), + list(code.co_varnames), + code.co_filename, + code.co_name, + code.co_firstlineno, + code.co_lnotab, + list(code.co_freevars)) @property def formalargcount(self): diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py --- a/rpython/flowspace/flowcontext.py +++ b/rpython/flowspace/flowcontext.py @@ -1,12 +1,14 @@ -"""Implements the core parts of flow graph creation, in tandem -with rpython.flowspace.objspace. +"""Implements the core parts of flow graph creation. """ import sys import collections +import types +import __builtin__ from rpython.tool.error import source_lines from rpython.tool.stdlib_opcode import host_bytecode_spec +from rpython.rlib import rstackovf from rpython.flowspace.argument import CallSpec from rpython.flowspace.model import (Constant, Variable, Block, Link, c_last_exception, const, FSException) @@ -14,17 +16,19 @@ recursively_flatten) from rpython.flowspace.specialcase import (rpython_print_item, rpython_print_newline) +from rpython.flowspace.operation import op +w_None = const(None) class FlowingError(Exception): """ Signals invalid RPython in the function being analysed""" - frame = None + ctx = None def __str__(self): msg = ["\n"] msg += map(str, self.args) msg += [""] - msg += source_lines(self.frame.graph, None, offset=self.frame.last_instr) + msg += source_lines(self.ctx.graph, None, offset=self.ctx.last_instr) return "\n".join(msg) class StopFlowing(Exception): @@ -111,7 +115,7 @@ def append(self, operation): raise NotImplementedError - def guessbool(self, frame, w_condition): + def guessbool(self, ctx, w_condition): raise AssertionError("cannot guessbool(%s)" % (w_condition,)) @@ -127,13 +131,13 @@ def append(self, operation): self.crnt_block.operations.append(operation) - def guessbool(self, frame, w_condition): + def guessbool(self, ctx, w_condition): block = self.crnt_block vars = block.getvariables() links = [] for case in [False, True]: egg = EggBlock(vars, block, case) - frame.pendingblocks.append(egg) + ctx.pendingblocks.append(egg) link = Link(vars, egg, case) links.append(link) @@ -145,7 +149,7 @@ # block.exits[True] = ifLink. raise StopFlowing - def guessexception(self, frame, *cases): + def guessexception(self, ctx, *cases): block = self.crnt_block bvars = vars = vars2 = block.getvariables() links = [] @@ -162,7 +166,7 @@ vars.extend([last_exc, last_exc_value]) vars2.extend([Variable(), Variable()]) egg = EggBlock(vars2, block, case) - frame.pendingblocks.append(egg) + ctx.pendingblocks.append(egg) link = Link(vars, egg, case) if case is not None: link.extravars(last_exception=last_exc, last_exc_value=last_exc_value) @@ -193,14 +197,14 @@ [str(s) for s in self.listtoreplay[self.index:]])) self.index += 1 - def guessbool(self, frame, w_condition): + def guessbool(self, ctx, w_condition): assert self.index == len(self.listtoreplay) - frame.recorder = self.nextreplayer + ctx.recorder = self.nextreplayer return self.booloutcome - def guessexception(self, frame, *classes): + def guessexception(self, ctx, *classes): assert self.index == len(self.listtoreplay) - frame.recorder = self.nextreplayer + ctx.recorder = self.nextreplayer outcome = self.booloutcome if outcome is not None: egg = self.nextreplayer.crnt_block @@ -213,60 +217,55 @@ # ____________________________________________________________ _unary_ops = [ - ('UNARY_POSITIVE', "pos"), - ('UNARY_NEGATIVE', "neg"), - ('UNARY_NOT', "not_"), - ('UNARY_CONVERT', "repr"), - ('UNARY_INVERT', "invert"), + ('UNARY_POSITIVE', op.pos), + ('UNARY_NEGATIVE', op.neg), + ('UNARY_CONVERT', op.repr), + ('UNARY_INVERT', op.invert), ] -def unaryoperation(OPCODE, op): +def unaryoperation(OPCODE, operation): def UNARY_OP(self, *ignored): - operation = getattr(self.space, op) w_1 = self.popvalue() - w_result = operation(w_1) + w_result = operation(w_1).eval(self) self.pushvalue(w_result) - UNARY_OP.unaryop = op UNARY_OP.func_name = OPCODE return UNARY_OP _binary_ops = [ - ('BINARY_MULTIPLY', "mul"), - ('BINARY_TRUE_DIVIDE', "truediv"), - ('BINARY_FLOOR_DIVIDE', "floordiv"), - ('BINARY_DIVIDE', "div"), - ('BINARY_MODULO', "mod"), - ('BINARY_ADD', "add"), - ('BINARY_SUBTRACT', "sub"), - ('BINARY_SUBSCR', "getitem"), - ('BINARY_LSHIFT', "lshift"), - ('BINARY_RSHIFT', "rshift"), - ('BINARY_AND', "and_"), - ('BINARY_XOR', "xor"), - ('BINARY_OR', "or_"), - ('INPLACE_MULTIPLY', "inplace_mul"), - ('INPLACE_TRUE_DIVIDE', "inplace_truediv"), - ('INPLACE_FLOOR_DIVIDE', "inplace_floordiv"), - ('INPLACE_DIVIDE', "inplace_div"), - ('INPLACE_MODULO', "inplace_mod"), - ('INPLACE_ADD', "inplace_add"), - ('INPLACE_SUBTRACT', "inplace_sub"), - ('INPLACE_LSHIFT', "inplace_lshift"), - ('INPLACE_RSHIFT', "inplace_rshift"), - ('INPLACE_AND', "inplace_and"), - ('INPLACE_XOR', "inplace_xor"), - ('INPLACE_OR', "inplace_or"), + ('BINARY_MULTIPLY', op.mul), + ('BINARY_TRUE_DIVIDE', op.truediv), + ('BINARY_FLOOR_DIVIDE', op.floordiv), + ('BINARY_DIVIDE', op.div), + ('BINARY_MODULO', op.mod), + ('BINARY_ADD', op.add), + ('BINARY_SUBTRACT', op.sub), + ('BINARY_SUBSCR', op.getitem), + ('BINARY_LSHIFT', op.lshift), + ('BINARY_RSHIFT', op.rshift), + ('BINARY_AND', op.and_), + ('BINARY_XOR', op.xor), + ('BINARY_OR', op.or_), + ('INPLACE_MULTIPLY', op.inplace_mul), + ('INPLACE_TRUE_DIVIDE', op.inplace_truediv), + ('INPLACE_FLOOR_DIVIDE', op.inplace_floordiv), + ('INPLACE_DIVIDE', op.inplace_div), + ('INPLACE_MODULO', op.inplace_mod), + ('INPLACE_ADD', op.inplace_add), + ('INPLACE_SUBTRACT', op.inplace_sub), + ('INPLACE_LSHIFT', op.inplace_lshift), + ('INPLACE_RSHIFT', op.inplace_rshift), + ('INPLACE_AND', op.inplace_and), + ('INPLACE_XOR', op.inplace_xor), + ('INPLACE_OR', op.inplace_or), ] -def binaryoperation(OPCODE, op): +def binaryoperation(OPCODE, operation): """NOT_RPYTHON""" - def BINARY_OP(self, *ignored): - operation = getattr(self.space, op) + def BINARY_OP(self, _): w_2 = self.popvalue() w_1 = self.popvalue() - w_result = operation(w_1, w_2) + w_result = operation(w_1, w_2).eval(self) self.pushvalue(w_result) - BINARY_OP.binop = op BINARY_OP.func_name = OPCODE return BINARY_OP @@ -305,14 +304,13 @@ "cmp_exc_match", ] -class FlowSpaceFrame(object): +class FlowContext(object): opcode_method_names = host_bytecode_spec.method_names - def __init__(self, space, graph, code): + def __init__(self, graph, code): self.graph = graph func = graph.func self.pycode = code - self.space = space self.w_globals = Constant(func.func_globals) self.blockstack = [] @@ -321,7 +319,6 @@ self.last_instr = 0 self.init_locals_stack(code) - self.w_locals = None # XXX: only for compatibility with PyFrame self.joinpoints = {} @@ -403,7 +400,7 @@ return FrameState(data, self.blockstack[:], next_pos) def setstate(self, state): - """ Reset the frame to the given state. """ + """ Reset the context to the given frame state. """ data = state.mergeable[:] recursively_unflatten(data) self.restore_locals_stack(data[:-2]) # Nones == undefined locals @@ -439,7 +436,7 @@ if not exceptions: return if not force and not any(isinstance(block, (ExceptBlock, FinallyBlock)) - for block in self.blockstack): + for block in self.blockstack): # The implicit exception wouldn't be caught and would later get # removed, so don't bother creating it. return @@ -476,7 +473,7 @@ except Raise as e: w_exc = e.w_exc - if w_exc.w_type == self.space.w_ImportError: + if w_exc.w_type == const(ImportError): msg = 'import statement always raises %s' % e raise ImportError(msg) link = Link([w_exc.w_type, w_exc.w_value], self.graph.exceptblock) @@ -491,8 +488,8 @@ self.recorder.crnt_block.closeblock(link) except FlowingError as exc: - if exc.frame is None: - exc.frame = self + if exc.ctx is None: + exc.ctx = self raise self.recorder = None @@ -576,6 +573,11 @@ def getname_w(self, index): return Constant(self.pycode.names[index]) + def appcall(self, func, *args_w): + """Call an app-level RPython function directly""" + w_func = const(func) + return self.do_op(op.simple_call(w_func, *args_w)) + def BAD_OPCODE(self, _): raise FlowingError("This operation is not RPython") @@ -585,38 +587,67 @@ def CONTINUE_LOOP(self, startofloop): raise Continue(startofloop) + def not_(self, w_obj): + w_bool = op.bool(w_obj).eval(self) + return const(not self.guessbool(w_bool)) + + def UNARY_NOT(self, _): + w_obj = self.popvalue() + self.pushvalue(self.not_(w_obj)) + def cmp_lt(self, w_1, w_2): - return self.space.lt(w_1, w_2) + return op.lt(w_1, w_2).eval(self) def cmp_le(self, w_1, w_2): - return self.space.le(w_1, w_2) + return op.le(w_1, w_2).eval(self) def cmp_eq(self, w_1, w_2): - return self.space.eq(w_1, w_2) + return op.eq(w_1, w_2).eval(self) def cmp_ne(self, w_1, w_2): - return self.space.ne(w_1, w_2) + return op.ne(w_1, w_2).eval(self) def cmp_gt(self, w_1, w_2): - return self.space.gt(w_1, w_2) + return op.gt(w_1, w_2).eval(self) def cmp_ge(self, w_1, w_2): - return self.space.ge(w_1, w_2) From noreply at buildbot.pypy.org Wed Jan 22 12:20:03 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 22 Jan 2014 12:20:03 +0100 (CET) Subject: [pypy-commit] pypy default: Move gc_stack_bottom before the increment of stacks_counter. This is needed because debug builds check the value of stack_counter and expect the old one. Message-ID: <20140122112003.BCFA81C315E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68840:7cb3f52582ea Date: 2014-01-22 12:18 +0100 http://bitbucket.org/pypy/pypy/changeset/7cb3f52582ea/ Log: Move gc_stack_bottom before the increment of stacks_counter. This is needed because debug builds check the value of stack_counter and expect the old one. diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -600,8 +600,8 @@ from pypy.module.cpyext.pyobject import Reference # we hope that malloc removal removes the newtuple() that is # inserted exactly here by the varargs specializer + llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py rffi.stackcounter.stacks_counter += 1 - llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py retval = fatal_value boxed_args = () try: From noreply at buildbot.pypy.org Wed Jan 22 12:32:28 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 22 Jan 2014 12:32:28 +0100 (CET) Subject: [pypy-commit] pypy default: Backed out changeset 7cb3f52582ea Message-ID: <20140122113228.202FB1C315E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68841:07c5a6b6d96a Date: 2014-01-22 12:27 +0100 http://bitbucket.org/pypy/pypy/changeset/07c5a6b6d96a/ Log: Backed out changeset 7cb3f52582ea diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -600,8 +600,8 @@ from pypy.module.cpyext.pyobject import Reference # we hope that malloc removal removes the newtuple() that is # inserted exactly here by the varargs specializer + rffi.stackcounter.stacks_counter += 1 llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py - rffi.stackcounter.stacks_counter += 1 retval = fatal_value boxed_args = () try: From noreply at buildbot.pypy.org Wed Jan 22 13:11:39 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 22 Jan 2014 13:11:39 +0100 (CET) Subject: [pypy-commit] pypy default: Fix (I think): when calling C functions that call back some RPython Message-ID: <20140122121139.C321D1C315E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68842:9bfd0a649773 Date: 2014-01-22 13:10 +0100 http://bitbucket.org/pypy/pypy/changeset/9bfd0a649773/ Log: Fix (I think): when calling C functions that call back some RPython functions, never use _nowrapped=True. This breaks e.g. asmgcc's stack tracing and triggers an assertion in debug builds. Instead, I think that just saying "releasegil=False" is what we need here, according to c23cd52909fb. diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -692,11 +692,11 @@ else: prefix = 'cpyexttest' init_buffer = rffi.llexternal('_%s_init_bufferobject' % prefix, [], lltype.Void, - compilation_info=eci, _nowrapper=True) + compilation_info=eci, releasegil=False) init_pycobject = rffi.llexternal('_%s_init_pycobject' % prefix, [], lltype.Void, - compilation_info=eci, _nowrapper=True) + compilation_info=eci, releasegil=False) init_capsule = rffi.llexternal('_%s_init_capsule' % prefix, [], lltype.Void, - compilation_info=eci, _nowrapper=True) + compilation_info=eci, releasegil=False) INIT_FUNCTIONS.extend([ lambda space: init_buffer(), lambda space: init_pycobject(), From noreply at buildbot.pypy.org Wed Jan 22 14:06:15 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 22 Jan 2014 14:06:15 +0100 (CET) Subject: [pypy-commit] pypy default: Improve itertools.product() Message-ID: <20140122130615.0F3E91C315D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68843:c051852e3f7d Date: 2014-01-22 14:05 +0100 http://bitbucket.org/pypy/pypy/changeset/c051852e3f7d/ Log: Improve itertools.product() diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -1083,58 +1083,64 @@ class W_Product(W_Root): def __init__(self, space, args_w, w_repeat): self.gears = [ - space.fixedview(arg_w) for arg_w in args_w + space.unpackiterable(arg_w) for arg_w in args_w ] * space.int_w(w_repeat) - self.num_gears = len(self.gears) - # initialization of indicies to loop over - self.indicies = [ - (0, len(gear)) - for gear in self.gears - ] - self.cont = True - for _, lim in self.indicies: - if lim <= 0: - self.cont = False + # + for gear in self.gears: + if len(gear) == 0: + self.lst = None break + else: + self.indices = [0] * len(self.gears) + self.lst = [gear[0] for gear in self.gears] - def roll_gears(self): - if self.num_gears == 0: - self.cont = False - return + def _rotate_previous_gears(self): + lst = self.lst + x = len(self.gears) - 1 + lst[x] = self.gears[x][0] + self.indices[x] = 0 + x -= 1 + # the outer loop runs as long as a we have a carry + while x >= 0: + gear = self.gears[x] + index = self.indices[x] + 1 + if index < len(gear): + # no carry: done + lst[x] = gear[index] + self.indices[x] = index + return + lst[x] = gear[0] + self.indices[x] = 0 + x -= 1 + else: + self.lst = None - # Starting from the end of the gear indicies work to the front - # incrementing the gear until the limit is reached. When the limit - # is reached carry operation to the next gear - should_carry = True - - for n in range(0, self.num_gears): - nth_gear = self.num_gears - n - 1 - if should_carry: - count, lim = self.indicies[nth_gear] - count += 1 - if count == lim and nth_gear == 0: - self.cont = False - if count == lim: - should_carry = True - count = 0 - else: - should_carry = False - self.indicies[nth_gear] = (count, lim) + def fill_next_result(self): + # the last gear is done here, in a function with no loop, + # to allow the JIT to look inside + lst = self.lst + x = len(self.gears) - 1 + if x >= 0: + gear = self.gears[x] + index = self.indices[x] + 1 + if index < len(gear): + # no carry: done + lst[x] = gear[index] + self.indices[x] = index else: - break + self._rotate_previous_gears() + else: + self.lst = None def iter_w(self, space): return space.wrap(self) def next_w(self, space): - if not self.cont: + if self.lst is None: raise OperationError(space.w_StopIteration, space.w_None) - l = [None] * self.num_gears - for x in range(0, self.num_gears): - index, limit = self.indicies[x] - l[x] = self.gears[x][index] - self.roll_gears() - return space.newtuple(l) + w_result = space.newtuple(self.lst[:]) + self.fill_next_result() + return w_result def W_Product__new__(space, w_subtype, __args__): From noreply at buildbot.pypy.org Wed Jan 22 14:41:30 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Wed, 22 Jan 2014 14:41:30 +0100 (CET) Subject: [pypy-commit] stmgc c7: small comment Message-ID: <20140122134130.8ACC11C315E@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r663:dde3727c15b6 Date: 2014-01-22 11:07 +0100 http://bitbucket.org/pypy/stmgc/changeset/dde3727c15b6/ Log: small comment diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -944,7 +944,8 @@ /* reset shadowstack */ _STM_TL1->shadow_stack = _STM_TL2->old_shadow_stack; - /* unreserve uncommitted_pages and mark them as SHARED again */ + /* unreserve uncommitted_pages and mark them as SHARED again + IFF they are not in alloc[] */ /* STM_LIST_FOREACH(_STM_TL2->uncommitted_pages, ({ */ /* uintptr_t pagenum = (uintptr_t)item; */ /* flag_page_private[pagenum] = SHARED_PAGE; */ From noreply at buildbot.pypy.org Wed Jan 22 14:41:34 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Wed, 22 Jan 2014 14:41:34 +0100 (CET) Subject: [pypy-commit] stmgc c7: move the global lock to an extra file "stmsync" Message-ID: <20140122134134.A65AC1C315E@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r667:82077634d84a Date: 2014-01-22 14:37 +0100 http://bitbucket.org/pypy/stmgc/changeset/82077634d84a/ Log: move the global lock to an extra file "stmsync" diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -15,6 +15,7 @@ #include "reader_writer_lock.h" #include "nursery.h" #include "pages.h" +#include "stmsync.h" @@ -24,46 +25,6 @@ - -/* a multi-reader, single-writer lock: transactions normally take a reader - lock, so don't conflict with each other; when we need to do a global GC, - we take a writer lock to "stop the world". */ - -rwticket rw_shared_lock; /* the "GIL" */ - -void stm_start_shared_lock(void) -{ - rwticket_rdlock(&rw_shared_lock); -} - -void stm_stop_shared_lock(void) -{ - rwticket_rdunlock(&rw_shared_lock); -} - -void stm_stop_exclusive_lock(void) -{ - rwticket_wrunlock(&rw_shared_lock); -} - -void stm_start_exclusive_lock(void) -{ - rwticket_wrlock(&rw_shared_lock); -} - -void _stm_start_safe_point(void) -{ - assert(!_STM_TL->need_abort); - stm_stop_shared_lock(); -} - -void _stm_stop_safe_point(void) -{ - stm_start_shared_lock(); - if (_STM_TL->need_abort) - stm_abort_transaction(); -} - bool _stm_was_read_remote(char *base, object_t *obj) { struct read_marker_s *marker = (struct read_marker_s *) @@ -73,7 +34,6 @@ return (marker->rm == other_TL1->transaction_read_version); } - bool _stm_was_read(object_t *obj) { read_marker_t *marker = (read_marker_t *)(((uintptr_t)obj) >> 4); @@ -89,30 +49,6 @@ - - -char *_stm_real_address(object_t *o) -{ - if (o == NULL) - return NULL; - assert(FIRST_OBJECT_PAGE * 4096 <= (uintptr_t)o - && (uintptr_t)o < NB_PAGES * 4096); - return (char*)real_address(o); -} - -object_t *_stm_tl_address(char *ptr) -{ - if (ptr == NULL) - return NULL; - - uintptr_t res = ptr - _STM_TL->thread_base; - assert(FIRST_OBJECT_PAGE * 4096 <= res - && res < NB_PAGES * 4096); - return (object_t*)res; -} - - - static void push_modified_to_other_threads() { /* WE HAVE THE EXCLUSIVE LOCK HERE */ @@ -148,10 +84,6 @@ - - - - void _stm_write_slowpath(object_t *obj) { uintptr_t pagenum = ((uintptr_t)obj) / 4096; @@ -200,7 +132,7 @@ void stm_setup(void) { - memset(&rw_shared_lock, 0, sizeof(rwticket)); + _stm_reset_shared_lock(); /* Check that some values are acceptable */ assert(4096 <= ((uintptr_t)_STM_TL)); @@ -300,8 +232,7 @@ void _stm_teardown_thread(void) { - assert(!rwticket_wrtrylock(&rw_shared_lock)); - assert(!rwticket_wrunlock(&rw_shared_lock)); + _stm_reset_shared_lock(); stm_list_free(_STM_TL->modified_objects); _STM_TL->modified_objects = NULL; diff --git a/c7/core.h b/c7/core.h --- a/c7/core.h +++ b/c7/core.h @@ -4,7 +4,7 @@ #include #include #include - +#include #define NB_PAGES (256*256) // 256MB #define NB_THREADS 2 @@ -133,6 +133,26 @@ return (struct object_s*)REAL_ADDRESS(_STM_TL->thread_base, src); } +static inline char *_stm_real_address(object_t *o) +{ + if (o == NULL) + return NULL; + assert(FIRST_OBJECT_PAGE * 4096 <= (uintptr_t)o + && (uintptr_t)o < NB_PAGES * 4096); + return (char*)real_address(o); +} + +static inline object_t *_stm_tl_address(char *ptr) +{ + if (ptr == NULL) + return NULL; + + uintptr_t res = ptr - _STM_TL->thread_base; + assert(FIRST_OBJECT_PAGE * 4096 <= res + && res < NB_PAGES * 4096); + return (object_t*)res; +} + static inline char *get_thread_base(long thread_num) { return object_pages + thread_num * (NB_PAGES * 4096UL); @@ -195,16 +215,12 @@ void stm_setup_thread(void); void stm_start_transaction(jmpbufptr_t *jmpbufptr); void stm_stop_transaction(void); -char *_stm_real_address(object_t *o); -object_t *_stm_tl_address(char *ptr); + object_t *_stm_allocate_old(size_t size); object_t *stm_allocate_prebuilt(size_t size); -void _stm_start_safe_point(void); -void _stm_stop_safe_point(void); - void stm_abort_transaction(void); void _stm_minor_collect(); diff --git a/c7/nursery.c b/c7/nursery.c --- a/c7/nursery.c +++ b/c7/nursery.c @@ -14,7 +14,7 @@ #include "list.h" #include "nursery.h" #include "pages.h" - +#include "stmsync.h" void stm_major_collection(void) { diff --git a/c7/stmsync.c b/c7/stmsync.c new file mode 100644 --- /dev/null +++ b/c7/stmsync.c @@ -0,0 +1,53 @@ +#include "stmsync.h" +#include "core.h" +#include "reader_writer_lock.h" +#include +#include + + +/* a multi-reader, single-writer lock: transactions normally take a reader + lock, so don't conflict with each other; when we need to do a global GC, + we take a writer lock to "stop the world". */ + +rwticket rw_shared_lock; /* the "GIL" */ + +void _stm_reset_shared_lock() +{ + assert(!rwticket_wrtrylock(&rw_shared_lock)); + assert(!rwticket_wrunlock(&rw_shared_lock)); + + memset(&rw_shared_lock, 0, sizeof(rwticket)); +} + +void stm_start_shared_lock(void) +{ + rwticket_rdlock(&rw_shared_lock); +} + +void stm_stop_shared_lock(void) +{ + rwticket_rdunlock(&rw_shared_lock); +} + +void stm_stop_exclusive_lock(void) +{ + rwticket_wrunlock(&rw_shared_lock); +} + +void stm_start_exclusive_lock(void) +{ + rwticket_wrlock(&rw_shared_lock); +} + +void _stm_start_safe_point(void) +{ + assert(!_STM_TL->need_abort); + stm_stop_shared_lock(); +} + +void _stm_stop_safe_point(void) +{ + stm_start_shared_lock(); + if (_STM_TL->need_abort) + stm_abort_transaction(); +} diff --git a/c7/stmsync.h b/c7/stmsync.h new file mode 100644 --- /dev/null +++ b/c7/stmsync.h @@ -0,0 +1,11 @@ + + +void stm_start_shared_lock(void); +void stm_stop_shared_lock(void); +void stm_stop_exclusive_lock(void); +void stm_start_exclusive_lock(void); +void _stm_start_safe_point(void); +void _stm_stop_safe_point(void); +void _stm_reset_shared_lock(); + + diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -9,11 +9,13 @@ header_files = [os.path.join(parent_dir, _n) for _n in """core.h pagecopy.h list.h reader_writer_lock.h - nursery.h pages.h""".split()] + nursery.h pages.h + stmsync.h""".split()] source_files = [os.path.join(parent_dir, _n) for _n in """core.c pagecopy.c list.c reader_writer_lock.c - nursery.c pages.c""".split()] + nursery.c pages.c + stmsync.c""".split()] _pycache_ = os.path.join(parent_dir, 'test', '__pycache__') if os.path.exists(_pycache_): @@ -103,6 +105,7 @@ #include "core.h" #include "pages.h" #include "nursery.h" +#include "stmsync.h" struct myobj_s { struct object_s hdr; From noreply at buildbot.pypy.org Wed Jan 22 14:41:32 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Wed, 22 Jan 2014 14:41:32 +0100 (CET) Subject: [pypy-commit] stmgc c7: move a bit of the page management to its own file Message-ID: <20140122134132.AE8721C315E@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r665:1605e7b96e40 Date: 2014-01-22 14:06 +0100 http://bitbucket.org/pypy/stmgc/changeset/1605e7b96e40/ Log: move a bit of the page management to its own file diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -15,7 +15,7 @@ #include "pagecopy.h" #include "reader_writer_lock.h" #include "nursery.h" - +#include "pages.h" @@ -27,19 +27,12 @@ char *object_pages; static int num_threads_started; - -uint8_t flag_page_private[NB_PAGES]; uint8_t write_locks[READMARKER_END - READMARKER_START]; /************************************************************/ -uint8_t _stm_get_page_flag(int pagenum) -{ - return flag_page_private[pagenum]; -} - static void spin_loop(void) { asm("pause" : : : "memory"); @@ -179,11 +172,6 @@ -bool _stm_is_young(object_t *o) -{ - assert((uintptr_t)o >= FIRST_NURSERY_PAGE * 4096); - return (uintptr_t)o < FIRST_AFTER_NURSERY_PAGE * 4096; -} char *_stm_real_address(object_t *o) @@ -292,12 +280,6 @@ - - - - - - void stm_setup(void) { memset(&rw_shared_lock, 0, sizeof(rwticket)); @@ -465,11 +447,6 @@ _STM_TL->transaction_read_version = 1; } -void stm_major_collection(void) -{ - assert(_STM_TL->running_transaction); - abort(); -} void stm_start_transaction(jmpbufptr_t *jmpbufptr) { diff --git a/c7/core.h b/c7/core.h --- a/c7/core.h +++ b/c7/core.h @@ -37,21 +37,6 @@ GCFLAG_MOVED = (1 << 2), }; -enum { - /* unprivatized page seen by all threads */ - SHARED_PAGE=0, - - /* page being in the process of privatization */ - REMAPPING_PAGE, - - /* page private for each thread */ - PRIVATE_PAGE, - - /* set for SHARED pages that only contain objects belonging - to the current transaction, so the whole page is not - visible yet for other threads */ - UNCOMMITTED_SHARED_PAGE, -}; /* flag_page_private */ @@ -128,7 +113,6 @@ -extern uint8_t flag_page_private[NB_PAGES]; /* xxx_PAGE constants above */ extern char *object_pages; /* start of MMAP region */ extern uint8_t write_locks[READMARKER_END - READMARKER_START]; @@ -198,7 +182,6 @@ char *_stm_real_address(object_t *o); object_t *_stm_tl_address(char *ptr); -bool _stm_is_young(object_t *o); object_t *_stm_allocate_old(size_t size); object_t *stm_allocate_prebuilt(size_t size); @@ -209,7 +192,6 @@ void stm_abort_transaction(void); void _stm_minor_collect(); -uint8_t _stm_get_page_flag(int pagenum); #define stm_become_inevitable(msg) /* XXX implement me! */ diff --git a/c7/nursery.c b/c7/nursery.c --- a/c7/nursery.c +++ b/c7/nursery.c @@ -13,28 +13,20 @@ #include "core.h" #include "list.h" #include "nursery.h" +#include "pages.h" -uintptr_t index_page_never_used; -uintptr_t _stm_reserve_pages(int num) +void stm_major_collection(void) { - /* Grab a free page, initially shared between the threads. */ + assert(_STM_TL->running_transaction); + abort(); +} - // XXX look in some free list first - /* Return the index'th object page, which is so far never used. */ - uintptr_t index = __sync_fetch_and_add(&index_page_never_used, num); - - int i; - for (i = 0; i < num; i++) { - assert(flag_page_private[index+i] == SHARED_PAGE); - } - assert(flag_page_private[index] == SHARED_PAGE); - if (index + num >= NB_PAGES) { - fprintf(stderr, "Out of mmap'ed memory!\n"); - abort(); - } - return index; +bool _stm_is_young(object_t *o) +{ + assert((uintptr_t)o >= FIRST_NURSERY_PAGE * 4096); + return (uintptr_t)o < FIRST_AFTER_NURSERY_PAGE * 4096; } @@ -44,12 +36,10 @@ LIST_APPEND(_STM_TL->uncommitted_pages, (object_t*)pagenum); } - - object_t *_stm_allocate_old(size_t size) { int pages = (size + 4095) / 4096; - localchar_t* addr = (localchar_t*)(_stm_reserve_pages(pages) * 4096); + localchar_t* addr = (localchar_t*)(stm_pages_reserve(pages) * 4096); object_t* o = (object_t*)addr; o->stm_flags |= GCFLAG_WRITE_BARRIER; @@ -78,7 +68,7 @@ size_t size = size_class * 8; /* reserve a fresh new page */ - page = _stm_reserve_pages(1); + page = stm_pages_reserve(1); /* mark as UNCOMMITTED_... */ mark_page_as_uncommitted(page); diff --git a/c7/nursery.h b/c7/nursery.h --- a/c7/nursery.h +++ b/c7/nursery.h @@ -6,12 +6,14 @@ object_t *stm_allocate(size_t size); void _stm_minor_collect(); +bool _stm_is_young(object_t *o); void nursery_on_abort(); void nursery_on_commit(); void nursery_on_start(); + extern uintptr_t index_page_never_used; diff --git a/c7/pages.c b/c7/pages.c new file mode 100644 --- /dev/null +++ b/c7/pages.c @@ -0,0 +1,50 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#include "core.h" +#include "list.h" +#include "pages.h" + +uint8_t flag_page_private[NB_PAGES]; +uintptr_t index_page_never_used; + + +uint8_t _stm_get_page_flag(int pagenum) +{ + return flag_page_private[pagenum]; +} + + +uintptr_t stm_pages_reserve(int num) +{ + /* grab free, possibly uninitialized pages */ + + // XXX look in some free list first + + /* Return the index'th object page, which is so far never used. */ + uintptr_t index = __sync_fetch_and_add(&index_page_never_used, num); + + int i; + for (i = 0; i < num; i++) { + assert(flag_page_private[index+i] == SHARED_PAGE); + } + assert(flag_page_private[index] == SHARED_PAGE); + if (index + num >= NB_PAGES) { + fprintf(stderr, "Out of mmap'ed memory!\n"); + abort(); + } + return index; +} + + + + diff --git a/c7/pages.h b/c7/pages.h new file mode 100644 --- /dev/null +++ b/c7/pages.h @@ -0,0 +1,23 @@ +enum { + /* unprivatized page seen by all threads */ + SHARED_PAGE=0, + + /* page being in the process of privatization */ + REMAPPING_PAGE, + + /* page private for each thread */ + PRIVATE_PAGE, + + /* set for SHARED pages that only contain objects belonging + to the current transaction, so the whole page is not + visible yet for other threads */ + UNCOMMITTED_SHARED_PAGE, +}; /* flag_page_private */ + + +uintptr_t stm_pages_reserve(int num); +uint8_t _stm_get_page_flag(int pagenum); + +extern uint8_t flag_page_private[NB_PAGES]; + + diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -9,11 +9,11 @@ header_files = [os.path.join(parent_dir, _n) for _n in """core.h pagecopy.h list.h reader_writer_lock.h - nursery.h""".split()] + nursery.h pages.h""".split()] source_files = [os.path.join(parent_dir, _n) for _n in """core.c pagecopy.c list.c reader_writer_lock.c - nursery.c""".split()] + nursery.c pages.c""".split()] _pycache_ = os.path.join(parent_dir, 'test', '__pycache__') if os.path.exists(_pycache_): @@ -101,6 +101,8 @@ #include #include "core.h" +#include "pages.h" +#include "nursery.h" struct myobj_s { struct object_s hdr; From noreply at buildbot.pypy.org Wed Jan 22 14:41:33 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Wed, 22 Jan 2014 14:41:33 +0100 (CET) Subject: [pypy-commit] stmgc c7: move some more code Message-ID: <20140122134133.A80031C315E@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r666:4fb6332ddf82 Date: 2014-01-22 14:24 +0100 http://bitbucket.org/pypy/stmgc/changeset/4fb6332ddf82/ Log: move some more code diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -12,43 +12,17 @@ #include "core.h" #include "list.h" -#include "pagecopy.h" #include "reader_writer_lock.h" #include "nursery.h" #include "pages.h" -#if defined(__i386__) || defined(__x86_64__) -# define HAVE_FULL_EXCHANGE_INSN -#endif - - - char *object_pages; static int num_threads_started; uint8_t write_locks[READMARKER_END - READMARKER_START]; -/************************************************************/ - - -static void spin_loop(void) -{ - asm("pause" : : : "memory"); -} - - -static void write_fence(void) -{ -#if defined(__amd64__) || defined(__i386__) - asm("" : : : "memory"); -#else -# error "Define write_fence() for your architecture" -#endif -} - -/************************************************************/ /* a multi-reader, single-writer lock: transactions normally take a reader @@ -116,63 +90,6 @@ -static void _stm_privatize(uintptr_t pagenum) -{ - if (flag_page_private[pagenum] == PRIVATE_PAGE) - return; - -#ifdef HAVE_FULL_EXCHANGE_INSN - /* use __sync_lock_test_and_set() as a cheaper alternative to - __sync_bool_compare_and_swap(). */ - int previous = __sync_lock_test_and_set(&flag_page_private[pagenum], - REMAPPING_PAGE); - if (previous == PRIVATE_PAGE) { - flag_page_private[pagenum] = PRIVATE_PAGE; - return; - } - bool was_shared = (previous == SHARED_PAGE); -#else - bool was_shared = __sync_bool_compare_and_swap(&flag_page_private[pagenum], - SHARED_PAGE, REMAPPING_PAGE); -#endif - if (!was_shared) { - while (1) { - uint8_t state = ((uint8_t volatile *)flag_page_private)[pagenum]; - if (state != REMAPPING_PAGE) { - assert(state == PRIVATE_PAGE); - break; - } - spin_loop(); - } - return; - } - - ssize_t pgoff1 = pagenum; - ssize_t pgoff2 = pagenum + NB_PAGES; - ssize_t localpgoff = pgoff1 + NB_PAGES * _STM_TL->thread_num; - ssize_t otherpgoff = pgoff1 + NB_PAGES * (1 - _STM_TL->thread_num); - - void *localpg = object_pages + localpgoff * 4096UL; - void *otherpg = object_pages + otherpgoff * 4096UL; - - // XXX should not use pgoff2, but instead the next unused page in - // thread 2, so that after major GCs the next dirty pages are the - // same as the old ones - int res = remap_file_pages(localpg, 4096, 0, pgoff2, 0); - if (res < 0) { - perror("remap_file_pages"); - abort(); - } - pagecopy(localpg, otherpg); - write_fence(); - assert(flag_page_private[pagenum] == REMAPPING_PAGE); - flag_page_private[pagenum] = PRIVATE_PAGE; -} - - - - - char *_stm_real_address(object_t *o) { @@ -244,17 +161,18 @@ /* for old objects from the same transaction we don't need to privatize the page */ - if ((flag_page_private[pagenum] == UNCOMMITTED_SHARED_PAGE) + if ((stm_get_page_flag(pagenum) == UNCOMMITTED_SHARED_PAGE) || (obj->stm_flags & GCFLAG_NOT_COMMITTED)) { obj->stm_flags &= ~GCFLAG_WRITE_BARRIER; return; } /* privatize if SHARED_PAGE */ - /* xxx stmcb_size() is probably too slow */ + /* xxx stmcb_size() is probably too slow, maybe add a GCFLAG_LARGE for + objs with more than 1 page */ int pages = stmcb_size(real_address(obj)) / 4096; for (; pages >= 0; pages--) - _stm_privatize(pagenum + pages); + stm_pages_privatize(pagenum + pages); /* claim the write-lock for this object */ uintptr_t lock_idx = (((uintptr_t)obj) >> 4) - READMARKER_START; @@ -341,7 +259,7 @@ } for (i = FIRST_NURSERY_PAGE; i < FIRST_AFTER_NURSERY_PAGE; i++) - flag_page_private[i] = PRIVATE_PAGE; /* nursery is private. + stm_set_page_flag(i, PRIVATE_PAGE); /* nursery is private. or should it be UNCOMMITTED??? */ num_threads_started = 0; @@ -407,7 +325,7 @@ void _stm_teardown(void) { munmap(object_pages, TOTAL_MEMORY); - memset(flag_page_private, 0, sizeof(flag_page_private)); + _stm_reset_page_flags(); memset(write_locks, 0, sizeof(write_locks)); object_pages = NULL; } diff --git a/c7/core.h b/c7/core.h --- a/c7/core.h +++ b/c7/core.h @@ -138,8 +138,24 @@ return object_pages + thread_num * (NB_PAGES * 4096UL); } +static inline void spin_loop(void) +{ + asm("pause" : : : "memory"); +} + + +static inline void write_fence(void) +{ +#if defined(__amd64__) || defined(__i386__) + asm("" : : : "memory"); +#else +# error "Define write_fence() for your architecture" +#endif +} + /* ==================== API ==================== */ + static inline void stm_read(object_t *obj) { ((read_marker_t *)(((uintptr_t)obj) >> 4))->rm = diff --git a/c7/nursery.c b/c7/nursery.c --- a/c7/nursery.c +++ b/c7/nursery.c @@ -32,7 +32,7 @@ void mark_page_as_uncommitted(uintptr_t pagenum) { - flag_page_private[pagenum] = UNCOMMITTED_SHARED_PAGE; + stm_set_page_flag(pagenum, UNCOMMITTED_SHARED_PAGE); LIST_APPEND(_STM_TL->uncommitted_pages, (object_t*)pagenum); } @@ -253,7 +253,7 @@ item->stm_flags &= ~GCFLAG_NOT_COMMITTED; uintptr_t pagenum = ((uintptr_t)item) / 4096UL; - if (flag_page_private[pagenum] == PRIVATE_PAGE) { + if (stm_get_page_flag(pagenum) == PRIVATE_PAGE) { /* page was privatized... */ char *src = REAL_ADDRESS(local_base, item); char *dst = REAL_ADDRESS(remote_base, item); @@ -293,7 +293,7 @@ reset in case of an abort */ uintptr_t pagenum = ((uintptr_t)(alloc->next - 1)) / 4096UL; - if (flag_page_private[pagenum] == UNCOMMITTED_SHARED_PAGE) { + if (stm_get_page_flag(pagenum) == UNCOMMITTED_SHARED_PAGE) { /* becomes a SHARED (done below) partially used page */ alloc->flag_partial_page = 1; } @@ -303,7 +303,7 @@ _STM_TL->uncommitted_pages, ({ uintptr_t pagenum = (uintptr_t)item; - flag_page_private[pagenum] = SHARED_PAGE; + stm_set_page_flag(pagenum, SHARED_PAGE); })); stm_list_clear(_STM_TL->uncommitted_pages); } diff --git a/c7/pages.c b/c7/pages.c --- a/c7/pages.c +++ b/c7/pages.c @@ -1,3 +1,4 @@ +#define _GNU_SOURCE #include #include #include @@ -13,16 +14,89 @@ #include "core.h" #include "list.h" #include "pages.h" +#include "pagecopy.h" + + +#if defined(__i386__) || defined(__x86_64__) +# define HAVE_FULL_EXCHANGE_INSN +#endif + + uint8_t flag_page_private[NB_PAGES]; uintptr_t index_page_never_used; +void _stm_reset_page_flags() +{ + memset(flag_page_private, 0, sizeof(flag_page_private)); +} -uint8_t _stm_get_page_flag(int pagenum) +uint8_t stm_get_page_flag(int pagenum) { return flag_page_private[pagenum]; } +void stm_set_page_flag(int pagenum, uint8_t flag) +{ + assert(flag_page_private[pagenum] != flag); + flag_page_private[pagenum] = flag; +} + + + +void stm_pages_privatize(uintptr_t pagenum) +{ + if (flag_page_private[pagenum] == PRIVATE_PAGE) + return; + +#ifdef HAVE_FULL_EXCHANGE_INSN + /* use __sync_lock_test_and_set() as a cheaper alternative to + __sync_bool_compare_and_swap(). */ + int previous = __sync_lock_test_and_set(&flag_page_private[pagenum], + REMAPPING_PAGE); + if (previous == PRIVATE_PAGE) { + flag_page_private[pagenum] = PRIVATE_PAGE; + return; + } + bool was_shared = (previous == SHARED_PAGE); +#else + bool was_shared = __sync_bool_compare_and_swap(&flag_page_private[pagenum], + SHARED_PAGE, REMAPPING_PAGE); +#endif + if (!was_shared) { + while (1) { + uint8_t state = ((uint8_t volatile *)flag_page_private)[pagenum]; + if (state != REMAPPING_PAGE) { + assert(state == PRIVATE_PAGE); + break; + } + spin_loop(); + } + return; + } + + ssize_t pgoff1 = pagenum; + ssize_t pgoff2 = pagenum + NB_PAGES; + ssize_t localpgoff = pgoff1 + NB_PAGES * _STM_TL->thread_num; + ssize_t otherpgoff = pgoff1 + NB_PAGES * (1 - _STM_TL->thread_num); + + void *localpg = object_pages + localpgoff * 4096UL; + void *otherpg = object_pages + otherpgoff * 4096UL; + + // XXX should not use pgoff2, but instead the next unused page in + // thread 2, so that after major GCs the next dirty pages are the + // same as the old ones + int res = remap_file_pages(localpg, 4096, 0, pgoff2, 0); + if (res < 0) { + perror("remap_file_pages"); + abort(); + } + pagecopy(localpg, otherpg); + write_fence(); + assert(flag_page_private[pagenum] == REMAPPING_PAGE); + flag_page_private[pagenum] = PRIVATE_PAGE; +} + uintptr_t stm_pages_reserve(int num) { diff --git a/c7/pages.h b/c7/pages.h --- a/c7/pages.h +++ b/c7/pages.h @@ -15,9 +15,12 @@ }; /* flag_page_private */ +void stm_pages_privatize(uintptr_t pagenum); uintptr_t stm_pages_reserve(int num); -uint8_t _stm_get_page_flag(int pagenum); +uint8_t stm_get_page_flag(int pagenum); +void stm_set_page_flag(int pagenum, uint8_t flag); +void _stm_reset_page_flags(void); -extern uint8_t flag_page_private[NB_PAGES]; + diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -79,7 +79,7 @@ extern void stmcb_trace(struct object_s *, void (object_t **)); uint8_t _stm_get_flags(object_t *obj); -uint8_t _stm_get_page_flag(int pagenum); +uint8_t stm_get_page_flag(int pagenum); enum { SHARED_PAGE=0, REMAPPING_PAGE, @@ -337,7 +337,7 @@ lib._stm_minor_collect() def stm_get_page_flag(pagenum): - return lib._stm_get_page_flag(pagenum) + return lib.stm_get_page_flag(pagenum) def stm_get_obj_size(o): return lib.stmcb_size(stm_get_real_address(o)) From noreply at buildbot.pypy.org Wed Jan 22 14:41:31 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Wed, 22 Jan 2014 14:41:31 +0100 (CET) Subject: [pypy-commit] stmgc c7: WIP separate object allocation from the rest Message-ID: <20140122134131.A71281C315E@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r664:0b06ac589879 Date: 2014-01-22 13:32 +0100 http://bitbucket.org/pypy/stmgc/changeset/0b06ac589879/ Log: WIP separate object allocation from the rest diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -14,73 +14,26 @@ #include "list.h" #include "pagecopy.h" #include "reader_writer_lock.h" +#include "nursery.h" -#define NB_PAGES (256*256) // 256MB -#define NB_THREADS 2 -#define MAP_PAGES_FLAGS (MAP_SHARED | MAP_ANONYMOUS | MAP_NORESERVE) -#define LARGE_OBJECT_WORDS 36 -#define NB_NURSERY_PAGES 1024 -#define LENGTH_SHADOW_STACK 163840 - - -#define TOTAL_MEMORY (NB_PAGES * 4096UL * NB_THREADS) -#define READMARKER_END ((NB_PAGES * 4096UL) >> 4) -#define FIRST_OBJECT_PAGE ((READMARKER_END + 4095) / 4096UL) -#define FIRST_NURSERY_PAGE FIRST_OBJECT_PAGE -#define READMARKER_START ((FIRST_OBJECT_PAGE * 4096UL) >> 4) -#define FIRST_READMARKER_PAGE (READMARKER_START / 4096UL) -#define FIRST_AFTER_NURSERY_PAGE (FIRST_OBJECT_PAGE + NB_NURSERY_PAGES) - #if defined(__i386__) || defined(__x86_64__) # define HAVE_FULL_EXCHANGE_INSN #endif -typedef TLPREFIX struct alloc_for_size_s alloc_for_size_t; -typedef TLPREFIX struct _thread_local2_s _thread_local2_t; -struct alloc_for_size_s { - localchar_t *next; - uint16_t start, stop; - bool flag_partial_page; -}; +char *object_pages; +static int num_threads_started; -struct _thread_local2_s { - struct _thread_local1_s _tl1; - int thread_num; - bool running_transaction; - bool need_abort; - char *thread_base; - struct stm_list_s *modified_objects; - struct stm_list_s *uncommitted_objects; - struct stm_list_s *uncommitted_object_ranges; - struct alloc_for_size_s alloc[LARGE_OBJECT_WORDS]; - localchar_t *nursery_current; - object_t **old_shadow_stack; - struct stm_list_s *old_objects_to_trace; - /* pages newly allocated in the current transaction only containing - uncommitted objects */ - struct stm_list_s *uncommitted_pages; -}; -#define _STM_TL2 ((_thread_local2_t *)_STM_TL1) +uint8_t flag_page_private[NB_PAGES]; +uint8_t write_locks[READMARKER_END - READMARKER_START]; - -static char *object_pages; -static int num_threads_started; -static uintptr_t index_page_never_used; -static struct stm_list_s *volatile pending_updates; -static uint8_t flag_page_private[NB_PAGES]; /* xxx_PAGE constants above */ -static uint8_t write_locks[READMARKER_END - READMARKER_START]; - /************************************************************/ -uintptr_t _stm_reserve_pages(int num); -void stm_abort_transaction(void); -localchar_t *_stm_alloc_next_page(size_t i); -void mark_page_as_uncommitted(uintptr_t pagenum); + uint8_t _stm_get_page_flag(int pagenum) { @@ -92,36 +45,6 @@ asm("pause" : : : "memory"); } -#if 0 -static void acquire_lock(int *lock) -{ - while (__sync_lock_test_and_set(lock, 1) != 0) { - while (*lock != 0) - spin_loop(); - } -} - -#define ACQUIRE_LOCK_IF(lock, condition) \ -({ \ - bool _acquired = false; \ - while (condition) { \ - if (__sync_lock_test_and_set(lock, 1) == 0) { \ - if (condition) \ - _acquired = true; \ - else \ - __sync_lock_release(lock); \ - break; \ - } \ - spin_loop(); \ - } \ - _acquired; \ -}) - -static void release_lock(int *lock) -{ - __sync_lock_release(lock); -} -#endif static void write_fence(void) { @@ -134,15 +57,12 @@ /************************************************************/ -rwticket rw_shared_lock; /* a multi-reader, single-writer lock: transactions normally take a reader lock, so don't conflict with each other; when we need to do a global GC, - we take a writer lock to "stop the world". Note the initializer here, - which should give the correct priority for stm_possible_safe_point(). */ + we take a writer lock to "stop the world". */ - -struct tx_descriptor *in_single_thread = NULL; +rwticket rw_shared_lock; /* the "GIL" */ void stm_start_shared_lock(void) { @@ -166,14 +86,14 @@ void _stm_start_safe_point(void) { - assert(!_STM_TL2->need_abort); + assert(!_STM_TL->need_abort); stm_stop_shared_lock(); } void _stm_stop_safe_point(void) { stm_start_shared_lock(); - if (_STM_TL2->need_abort) + if (_STM_TL->need_abort) stm_abort_transaction(); } @@ -182,7 +102,7 @@ struct read_marker_s *marker = (struct read_marker_s *) (base + (((uintptr_t)obj) >> 4)); struct _thread_local1_s *other_TL1 = (struct _thread_local1_s*) - (base + (uintptr_t)_STM_TL1); + (base + (uintptr_t)_STM_TL); return (marker->rm == other_TL1->transaction_read_version); } @@ -190,7 +110,7 @@ bool _stm_was_read(object_t *obj) { read_marker_t *marker = (read_marker_t *)(((uintptr_t)obj) >> 4); - return (marker->rm == _STM_TL1->transaction_read_version); + return (marker->rm == _STM_TL->transaction_read_version); } bool _stm_was_written(object_t *obj) @@ -201,20 +121,6 @@ } -object_t *_stm_allocate_old(size_t size) -{ - int pages = (size + 4095) / 4096; - localchar_t* addr = (localchar_t*)(_stm_reserve_pages(pages) * 4096); - - object_t* o = (object_t*)addr; - o->stm_flags |= GCFLAG_WRITE_BARRIER; - return o; -} - -object_t *stm_allocate_prebuilt(size_t size) -{ - return _stm_allocate_old(size); /* XXX */ -} static void _stm_privatize(uintptr_t pagenum) @@ -250,8 +156,8 @@ ssize_t pgoff1 = pagenum; ssize_t pgoff2 = pagenum + NB_PAGES; - ssize_t localpgoff = pgoff1 + NB_PAGES * _STM_TL2->thread_num; - ssize_t otherpgoff = pgoff1 + NB_PAGES * (1 - _STM_TL2->thread_num); + ssize_t localpgoff = pgoff1 + NB_PAGES * _STM_TL->thread_num; + ssize_t otherpgoff = pgoff1 + NB_PAGES * (1 - _STM_TL->thread_num); void *localpg = object_pages + localpgoff * 4096UL; void *otherpg = object_pages + otherpgoff * 4096UL; @@ -271,18 +177,7 @@ } -#define REAL_ADDRESS(object_pages, src) ((object_pages) + (uintptr_t)(src)) -static struct object_s *real_address(object_t *src) -{ - return (struct object_s*)REAL_ADDRESS(_STM_TL2->thread_base, src); -} - - -static char *get_thread_base(long thread_num) -{ - return object_pages + thread_num * (NB_PAGES * 4096UL); -} bool _stm_is_young(object_t *o) { @@ -305,7 +200,7 @@ if (ptr == NULL) return NULL; - uintptr_t res = ptr - _STM_TL2->thread_base; + uintptr_t res = ptr - _STM_TL->thread_base; assert(FIRST_OBJECT_PAGE * 4096 <= res && res < NB_PAGES * 4096); return (object_t*)res; @@ -317,9 +212,9 @@ { /* WE HAVE THE EXCLUSIVE LOCK HERE */ - struct stm_list_s *modified = _STM_TL2->modified_objects; - char *local_base = _STM_TL2->thread_base; - char *remote_base = get_thread_base(1 - _STM_TL2->thread_num); + struct stm_list_s *modified = _STM_TL->modified_objects; + char *local_base = _STM_TL->thread_base; + char *remote_base = get_thread_base(1 - _STM_TL->thread_num); bool conflicted = 0; STM_LIST_FOREACH( @@ -340,41 +235,13 @@ })); if (conflicted) { - struct _thread_local2_s *remote_TL2 = (struct _thread_local2_s *) - REAL_ADDRESS(remote_base, _STM_TL2); - remote_TL2->need_abort = 1; + struct _thread_local1_s *remote_TL = (struct _thread_local1_s *) + REAL_ADDRESS(remote_base, _STM_TL); + remote_TL->need_abort = 1; } } -static void push_uncommitted_to_other_threads() -{ - /* WE HAVE THE EXCLUSIVE LOCK HERE */ - - struct stm_list_s *uncommitted = _STM_TL2->uncommitted_objects; - char *local_base = _STM_TL2->thread_base; - char *remote_base = get_thread_base(1 - _STM_TL2->thread_num); - - STM_LIST_FOREACH( - uncommitted, - ({ - /* write-lock always cleared for these objects */ - uintptr_t lock_idx; - assert(lock_idx = (((uintptr_t)item) >> 4) - READMARKER_START); - assert(!write_locks[lock_idx]); - /* remove the flag (they are now committed) */ - item->stm_flags &= ~GCFLAG_NOT_COMMITTED; - - uintptr_t pagenum = ((uintptr_t)item) / 4096UL; - if (flag_page_private[pagenum] == PRIVATE_PAGE) { - /* page was privatized... */ - char *src = REAL_ADDRESS(local_base, item); - char *dst = REAL_ADDRESS(remote_base, item); - size_t size = stmcb_size((struct object_s*)src); - memcpy(dst, src, size); - } - })); -} @@ -385,7 +252,7 @@ uintptr_t pagenum = ((uintptr_t)obj) / 4096; assert(pagenum < NB_PAGES); - LIST_APPEND(_STM_TL2->old_objects_to_trace, obj); + LIST_APPEND(_STM_TL->old_objects_to_trace, obj); /* for old objects from the same transaction we don't need to privatize the page */ @@ -419,222 +286,14 @@ obj->stm_flags &= ~GCFLAG_WRITE_BARRIER; stm_read(obj); - LIST_APPEND(_STM_TL2->modified_objects, obj); + LIST_APPEND(_STM_TL->modified_objects, obj); } -uintptr_t _stm_reserve_pages(int num) -{ - /* Grab a free page, initially shared between the threads. */ - // XXX look in some free list first - /* Return the index'th object page, which is so far never used. */ - uintptr_t index = __sync_fetch_and_add(&index_page_never_used, num); - int i; - for (i = 0; i < num; i++) { - assert(flag_page_private[index+i] == SHARED_PAGE); - } - assert(flag_page_private[index] == SHARED_PAGE); - if (index + num >= NB_PAGES) { - fprintf(stderr, "Out of mmap'ed memory!\n"); - abort(); - } - return index; -} -#define TO_RANGE(range, start, stop) \ - ((range) = (object_t *)((start) | (((uintptr_t)(stop)) << 16))) - -#define FROM_RANGE(start, stop, range) \ - ((start) = (uint16_t)(uintptr_t)(range), \ - (stop) = ((uintptr_t)(range)) >> 16) - -object_t *_stm_alloc_old(size_t size) -{ - /* may return uninitialized objects. except for the - GCFLAG_NOT_COMMITTED, it is set exactly if - we allocated the object in a SHARED and partially - committed page. (XXX: add the flag in some other place) - */ - object_t *result; - size_t size_class = size / 8; - assert(size_class >= 2); - - if (size_class >= LARGE_OBJECT_WORDS) { - result = _stm_allocate_old(size); - result->stm_flags &= ~GCFLAG_NOT_COMMITTED; /* page may be non-zeroed */ - - int page = ((uintptr_t)result) / 4096; - int pages = (size + 4095) / 4096; - int i; - for (i = 0; i < pages; i++) { - mark_page_as_uncommitted(page + i); - } - /* make sure the flag is not set (page is not zeroed!) */ - result->stm_flags &= ~GCFLAG_NOT_COMMITTED; - } else { - alloc_for_size_t *alloc = &_STM_TL2->alloc[size_class]; - - if ((uint16_t)((uintptr_t)alloc->next) == alloc->stop) { - result = (object_t *)_stm_alloc_next_page(size_class); - } else { - result = (object_t *)alloc->next; - alloc->next += size; - if (alloc->flag_partial_page) { - LIST_APPEND(_STM_TL2->uncommitted_objects, result); - result->stm_flags |= GCFLAG_NOT_COMMITTED; - } else { - /* make sure the flag is not set (page is not zeroed!) */ - result->stm_flags &= ~GCFLAG_NOT_COMMITTED; - } - } - } - return result; -} - -localchar_t *_stm_alloc_next_page(size_t size_class) -{ - /* may return uninitialized pages */ - - /* 'alloc->next' points to where the next allocation should go. The - present function is called instead when this next allocation is - equal to 'alloc->stop'. As we know that 'start', 'next' and - 'stop' are always nearby pointers, we play tricks and only store - the lower 16 bits of 'start' and 'stop', so that the three - variables plus some flags fit in 16 bytes. - */ - uintptr_t page; - localchar_t *result; - alloc_for_size_t *alloc = &_STM_TL2->alloc[size_class]; - size_t size = size_class * 8; - - /* reserve a fresh new page */ - page = _stm_reserve_pages(1); - - /* mark as UNCOMMITTED_... */ - mark_page_as_uncommitted(page); - - result = (localchar_t *)(page * 4096UL); - alloc->start = (uintptr_t)result; - alloc->stop = alloc->start + (4096 / size) * size; - alloc->next = result + size; - alloc->flag_partial_page = false; - return result; -} - - -void mark_page_as_uncommitted(uintptr_t pagenum) -{ - flag_page_private[pagenum] = UNCOMMITTED_SHARED_PAGE; - LIST_APPEND(_STM_TL2->uncommitted_pages, (object_t*)pagenum); -} - -void trace_if_young(object_t **pobj) -{ - if (*pobj == NULL) - return; - if (!_stm_is_young(*pobj)) - return; - - /* the location the object moved to is at an 8b offset */ - localchar_t *temp = ((localchar_t *)(*pobj)) + 8; - object_t * TLPREFIX *pforwarded = (object_t* TLPREFIX *)temp; - if ((*pobj)->stm_flags & GCFLAG_MOVED) { - *pobj = *pforwarded; - return; - } - - /* move obj to somewhere else */ - size_t size = stmcb_size(real_address(*pobj)); - object_t *moved = (object_t*)_stm_alloc_old(size); - - if (moved->stm_flags & GCFLAG_NOT_COMMITTED) - (*pobj)->stm_flags |= GCFLAG_NOT_COMMITTED; /* XXX: memcpy below overwrites this otherwise. - find better solution.*/ - - memcpy((void*)real_address(moved), - (void*)real_address(*pobj), - size); - - (*pobj)->stm_flags |= GCFLAG_MOVED; - *pforwarded = moved; - *pobj = moved; - - LIST_APPEND(_STM_TL2->old_objects_to_trace, moved); -} - -void minor_collect() -{ - /* visit shadowstack & add to old_obj_to_trace */ - object_t **current = _STM_TL1->shadow_stack; - object_t **base = _STM_TL1->shadow_stack_base; - while (current-- != base) { - trace_if_young(current); - } - - /* visit old_obj_to_trace until empty */ - struct stm_list_s *old_objs = _STM_TL2->old_objects_to_trace; - while (!stm_list_is_empty(old_objs)) { - object_t *item = stm_list_pop_item(old_objs); - - assert(!_stm_is_young(item)); - assert(!(item->stm_flags & GCFLAG_WRITE_BARRIER)); - - /* re-add write-barrier */ - item->stm_flags |= GCFLAG_WRITE_BARRIER; - - stmcb_trace(real_address(item), trace_if_young); - old_objs = _STM_TL2->old_objects_to_trace; - } - - - // also move objects to PRIVATE_PAGE pages, but then - // also add the GCFLAG_NOT_COMMITTED to these objects. - - /* clear nursery */ - localchar_t *nursery_base = (localchar_t*)(FIRST_NURSERY_PAGE * 4096); - memset((void*)real_address((object_t*)nursery_base), 0x0, - _STM_TL2->nursery_current - nursery_base); - _STM_TL2->nursery_current = nursery_base; -} - -void _stm_minor_collect() -{ - minor_collect(); -} - -localchar_t *collect_and_reserve(size_t size) -{ - _stm_start_safe_point(); - minor_collect(); - _stm_stop_safe_point(); - - localchar_t *current = _STM_TL2->nursery_current; - _STM_TL2->nursery_current = current + size; - return current; -} - -object_t *stm_allocate(size_t size) -{ - _stm_start_safe_point(); - _stm_stop_safe_point(); - assert(_STM_TL2->running_transaction); - assert(size % 8 == 0); - assert(16 <= size && size < NB_NURSERY_PAGES * 4096);//XXX - - localchar_t *current = _STM_TL2->nursery_current; - localchar_t *new_current = current + size; - _STM_TL2->nursery_current = new_current; - assert((uintptr_t)new_current < (1L << 32)); - if ((uintptr_t)new_current > FIRST_AFTER_NURSERY_PAGE * 4096) { - current = collect_and_reserve(size); - } - - object_t *result = (object_t *)current; - return result; -} @@ -644,9 +303,9 @@ memset(&rw_shared_lock, 0, sizeof(rwticket)); /* Check that some values are acceptable */ - assert(4096 <= ((uintptr_t)_STM_TL1)); - assert(((uintptr_t)_STM_TL1) == ((uintptr_t)_STM_TL2)); - assert(((uintptr_t)_STM_TL2) + sizeof(*_STM_TL2) <= 8192); + assert(4096 <= ((uintptr_t)_STM_TL)); + assert(((uintptr_t)_STM_TL) == ((uintptr_t)_STM_TL)); + assert(((uintptr_t)_STM_TL) + sizeof(*_STM_TL) <= 8192); assert(2 <= FIRST_READMARKER_PAGE); assert(FIRST_READMARKER_PAGE * 4096UL <= READMARKER_START); assert(READMARKER_START < READMARKER_END); @@ -671,16 +330,16 @@ /* Fill the TLS page (page 1) with 0xDD */ memset(REAL_ADDRESS(thread_base, 4096), 0xDD, 4096); - /* Make a "hole" at _STM_TL1 / _STM_TL2 */ - memset(REAL_ADDRESS(thread_base, _STM_TL2), 0, sizeof(*_STM_TL2)); + /* Make a "hole" at _STM_TL / _STM_TL */ + memset(REAL_ADDRESS(thread_base, _STM_TL), 0, sizeof(*_STM_TL)); /* Pages in range(2, FIRST_READMARKER_PAGE) are never used */ if (FIRST_READMARKER_PAGE > 2) mprotect(thread_base + 8192, (FIRST_READMARKER_PAGE - 2) * 4096UL, PROT_NONE); - struct _thread_local2_s *th = - (struct _thread_local2_s *)REAL_ADDRESS(thread_base, _STM_TL2); + struct _thread_local1_s *th = + (struct _thread_local1_s *)REAL_ADDRESS(thread_base, _STM_TL); th->thread_num = i; th->thread_base = thread_base; @@ -705,7 +364,6 @@ num_threads_started = 0; index_page_never_used = FIRST_AFTER_NURSERY_PAGE; - pending_updates = NULL; } #define INVALID_GS_VALUE 0x6D6D6D6D @@ -723,21 +381,21 @@ _stm_restore_local_state(thread_num); - _STM_TL2->nursery_current = (localchar_t*)(FIRST_NURSERY_PAGE * 4096); - _STM_TL1->shadow_stack = (object_t**)malloc(LENGTH_SHADOW_STACK * sizeof(void*)); - _STM_TL1->shadow_stack_base = _STM_TL1->shadow_stack; + _STM_TL->nursery_current = (localchar_t*)(FIRST_NURSERY_PAGE * 4096); + _STM_TL->shadow_stack = (object_t**)malloc(LENGTH_SHADOW_STACK * sizeof(void*)); + _STM_TL->shadow_stack_base = _STM_TL->shadow_stack; - _STM_TL2->old_objects_to_trace = stm_list_create(); - _STM_TL2->uncommitted_pages = stm_list_create(); + _STM_TL->old_objects_to_trace = stm_list_create(); + _STM_TL->uncommitted_pages = stm_list_create(); - _STM_TL2->modified_objects = stm_list_create(); - _STM_TL2->uncommitted_objects = stm_list_create(); - assert(!_STM_TL2->running_transaction); + _STM_TL->modified_objects = stm_list_create(); + _STM_TL->uncommitted_objects = stm_list_create(); + assert(!_STM_TL->running_transaction); } bool _stm_is_in_transaction(void) { - return _STM_TL2->running_transaction; + return _STM_TL->running_transaction; } void _stm_teardown_thread(void) @@ -745,21 +403,21 @@ assert(!rwticket_wrtrylock(&rw_shared_lock)); assert(!rwticket_wrunlock(&rw_shared_lock)); - stm_list_free(_STM_TL2->modified_objects); - _STM_TL2->modified_objects = NULL; + stm_list_free(_STM_TL->modified_objects); + _STM_TL->modified_objects = NULL; - assert(stm_list_is_empty(_STM_TL2->uncommitted_objects)); - stm_list_free(_STM_TL2->uncommitted_objects); - _STM_TL2->uncommitted_objects = NULL; + assert(stm_list_is_empty(_STM_TL->uncommitted_objects)); + stm_list_free(_STM_TL->uncommitted_objects); + _STM_TL->uncommitted_objects = NULL; - assert(_STM_TL1->shadow_stack == _STM_TL1->shadow_stack_base); - free(_STM_TL1->shadow_stack); + assert(_STM_TL->shadow_stack == _STM_TL->shadow_stack_base); + free(_STM_TL->shadow_stack); - assert(_STM_TL2->old_objects_to_trace->count == 0); - stm_list_free(_STM_TL2->old_objects_to_trace); + assert(_STM_TL->old_objects_to_trace->count == 0); + stm_list_free(_STM_TL->old_objects_to_trace); - assert(_STM_TL2->uncommitted_pages->count == 0); - stm_list_free(_STM_TL2->uncommitted_pages); + assert(_STM_TL->uncommitted_pages->count == 0); + stm_list_free(_STM_TL->uncommitted_pages); set_gs_register(INVALID_GS_VALUE); } @@ -777,8 +435,8 @@ char *thread_base = get_thread_base(thread_num); set_gs_register((uintptr_t)thread_base); - assert(_STM_TL2->thread_num == thread_num); - assert(_STM_TL2->thread_base == thread_base); + assert(_STM_TL->thread_num == thread_num); + assert(_STM_TL->thread_base == thread_base); } static void reset_transaction_read_version(void) @@ -804,88 +462,58 @@ perror("madvise"); abort(); } - _STM_TL1->transaction_read_version = 1; + _STM_TL->transaction_read_version = 1; } void stm_major_collection(void) { - assert(_STM_TL2->running_transaction); + assert(_STM_TL->running_transaction); abort(); } void stm_start_transaction(jmpbufptr_t *jmpbufptr) { - assert(!_STM_TL2->running_transaction); + assert(!_STM_TL->running_transaction); stm_start_shared_lock(); - uint8_t old_rv = _STM_TL1->transaction_read_version; - _STM_TL1->transaction_read_version = old_rv + 1; + uint8_t old_rv = _STM_TL->transaction_read_version; + _STM_TL->transaction_read_version = old_rv + 1; if (UNLIKELY(old_rv == 0xff)) reset_transaction_read_version(); - assert(stm_list_is_empty(_STM_TL2->modified_objects)); - assert(stm_list_is_empty(_STM_TL2->old_objects_to_trace)); - stm_list_clear(_STM_TL2->uncommitted_pages); - - _STM_TL1->jmpbufptr = jmpbufptr; - _STM_TL2->running_transaction = 1; - _STM_TL2->need_abort = 0; - - _STM_TL2->old_shadow_stack = _STM_TL1->shadow_stack; + assert(stm_list_is_empty(_STM_TL->modified_objects)); + + nursery_on_start(); + + _STM_TL->jmpbufptr = jmpbufptr; + _STM_TL->running_transaction = 1; + _STM_TL->need_abort = 0; } void stm_stop_transaction(void) { - assert(_STM_TL2->running_transaction); + assert(_STM_TL->running_transaction); stm_stop_shared_lock(); stm_start_exclusive_lock(); - _STM_TL1->jmpbufptr = NULL; /* cannot abort any more */ + _STM_TL->jmpbufptr = NULL; /* cannot abort any more */ - minor_collect(); + /* do a minor_collection, + push uncommitted objects to other threads, + make completely uncommitted pages SHARED, + */ + nursery_on_commit(); /* copy modified object versions to other threads */ push_modified_to_other_threads(); - stm_list_clear(_STM_TL2->modified_objects); - - /* uncommitted objects / partially COMMITTED pages */ - push_uncommitted_to_other_threads(); - stm_list_clear(_STM_TL2->uncommitted_objects); - - /* uncommitted_pages */ - long j; - for (j = 2; j < LARGE_OBJECT_WORDS; j++) { - alloc_for_size_t *alloc = &_STM_TL2->alloc[j]; - uint16_t start = alloc->start; - uint16_t cur = (uintptr_t)alloc->next; - - if (start == cur) - continue; /* page full -> will be replaced automatically */ - - alloc->start = cur; /* next transaction has different 'start' to - reset in case of an abort */ - - uintptr_t pagenum = ((uintptr_t)(alloc->next - 1)) / 4096UL; - if (flag_page_private[pagenum] == UNCOMMITTED_SHARED_PAGE) { - /* becomes a SHARED (done below) partially used page */ - alloc->flag_partial_page = 1; - } - } - - STM_LIST_FOREACH( - _STM_TL2->uncommitted_pages, - ({ - uintptr_t pagenum = (uintptr_t)item; - flag_page_private[pagenum] = SHARED_PAGE; - })); - stm_list_clear(_STM_TL2->uncommitted_pages); + stm_list_clear(_STM_TL->modified_objects); - _STM_TL2->running_transaction = 0; + _STM_TL->running_transaction = 0; stm_stop_exclusive_lock(); - fprintf(stderr, "%c", 'C'+_STM_TL2->thread_num*32); + fprintf(stderr, "%c", 'C'+_STM_TL->thread_num*32); } @@ -894,9 +522,9 @@ /* pull the right versions from other threads in order to reset our pages as part of an abort */ - struct stm_list_s *modified = _STM_TL2->modified_objects; - char *local_base = _STM_TL2->thread_base; - char *remote_base = get_thread_base(1 - _STM_TL2->thread_num); + struct stm_list_s *modified = _STM_TL->modified_objects; + char *local_base = _STM_TL->thread_base; + char *remote_base = get_thread_base(1 - _STM_TL->thread_num); STM_LIST_FOREACH( modified, @@ -928,52 +556,24 @@ void stm_abort_transaction(void) { /* here we hold the shared lock as a reader or writer */ - assert(_STM_TL2->running_transaction); + assert(_STM_TL->running_transaction); - /* clear old_objects_to_trace (they will have the WRITE_BARRIER flag - set because the ones we care about are also in modified_objects) */ - stm_list_clear(_STM_TL2->old_objects_to_trace); + /* reset shadowstack */ + _STM_TL->shadow_stack = _STM_TL->old_shadow_stack; - /* clear the nursery */ - localchar_t *nursery_base = (localchar_t*)(FIRST_NURSERY_PAGE * 4096); - memset((void*)real_address((object_t*)nursery_base), 0x0, - _STM_TL2->nursery_current - nursery_base); - _STM_TL2->nursery_current = nursery_base; - - /* reset shadowstack */ - _STM_TL1->shadow_stack = _STM_TL2->old_shadow_stack; - - /* unreserve uncommitted_pages and mark them as SHARED again - IFF they are not in alloc[] */ - /* STM_LIST_FOREACH(_STM_TL2->uncommitted_pages, ({ */ - /* uintptr_t pagenum = (uintptr_t)item; */ - /* flag_page_private[pagenum] = SHARED_PAGE; */ - /* })); */ - stm_list_clear(_STM_TL2->uncommitted_pages); - - - /* forget about GCFLAG_NOT_COMMITTED objects by - resetting alloc-pages */ - long j; - for (j = 2; j < LARGE_OBJECT_WORDS; j++) { - alloc_for_size_t *alloc = &_STM_TL2->alloc[j]; - uint16_t num_allocated = ((uintptr_t)alloc->next) - alloc->start; - /* forget about all non-committed objects */ - alloc->next -= num_allocated; - } + nursery_on_abort(); - - assert(_STM_TL1->jmpbufptr != NULL); - assert(_STM_TL1->jmpbufptr != (jmpbufptr_t *)-1); /* for tests only */ - _STM_TL2->running_transaction = 0; + assert(_STM_TL->jmpbufptr != NULL); + assert(_STM_TL->jmpbufptr != (jmpbufptr_t *)-1); /* for tests only */ + _STM_TL->running_transaction = 0; stm_stop_shared_lock(); - fprintf(stderr, "%c", 'A'+_STM_TL2->thread_num*32); + fprintf(stderr, "%c", 'A'+_STM_TL->thread_num*32); /* reset all the modified objects (incl. re-adding GCFLAG_WRITE_BARRIER) */ reset_modified_from_other_threads(); - stm_list_clear(_STM_TL2->modified_objects); + stm_list_clear(_STM_TL->modified_objects); - __builtin_longjmp(*_STM_TL1->jmpbufptr, 1); + __builtin_longjmp(*_STM_TL->jmpbufptr, 1); } diff --git a/c7/core.h b/c7/core.h --- a/c7/core.h +++ b/c7/core.h @@ -6,29 +6,22 @@ #include -#define TLPREFIX __attribute__((address_space(256))) +#define NB_PAGES (256*256) // 256MB +#define NB_THREADS 2 +#define MAP_PAGES_FLAGS (MAP_SHARED | MAP_ANONYMOUS | MAP_NORESERVE) +#define LARGE_OBJECT_WORDS 36 +#define NB_NURSERY_PAGES 1024 +#define LENGTH_SHADOW_STACK 163840 -typedef TLPREFIX struct _thread_local1_s _thread_local1_t; -typedef TLPREFIX struct object_s object_t; -typedef TLPREFIX struct read_marker_s read_marker_t; -typedef TLPREFIX char localchar_t; -/* Structure of objects - -------------------- +#define TOTAL_MEMORY (NB_PAGES * 4096UL * NB_THREADS) +#define READMARKER_END ((NB_PAGES * 4096UL) >> 4) +#define FIRST_OBJECT_PAGE ((READMARKER_END + 4095) / 4096UL) +#define FIRST_NURSERY_PAGE FIRST_OBJECT_PAGE +#define READMARKER_START ((FIRST_OBJECT_PAGE * 4096UL) >> 4) +#define FIRST_READMARKER_PAGE (READMARKER_START / 4096UL) +#define FIRST_AFTER_NURSERY_PAGE (FIRST_OBJECT_PAGE + NB_NURSERY_PAGES) - Objects manipulated by the user program, and managed by this library, - must start with a "struct object_s" field. Pointers to any user object - must use the "TLPREFIX struct foo *" type --- don't forget TLPREFIX. - The best is to use typedefs like above. - - The object_s part contains some fields reserved for the STM library, - as well as a 32-bit integer field that can be freely used by the user - program. However, right now this field must be read-only --- i.e. it - must never be modified on any object that may already belong to a - past transaction; you can only set it on just-allocated objects. The - best is to consider it as a field that is written to only once on - newly allocated objects. -*/ enum { /* set if the write-barrier slowpath needs to trigger. set on all @@ -61,6 +54,35 @@ }; /* flag_page_private */ + + +#define TLPREFIX __attribute__((address_space(256))) + +typedef TLPREFIX struct _thread_local1_s _thread_local1_t; +typedef TLPREFIX struct object_s object_t; +typedef TLPREFIX struct read_marker_s read_marker_t; +typedef TLPREFIX char localchar_t; +typedef TLPREFIX struct alloc_for_size_s alloc_for_size_t; +typedef void* jmpbufptr_t[5]; /* for use with __builtin_setjmp() */ + +/* Structure of objects + -------------------- + + Objects manipulated by the user program, and managed by this library, + must start with a "struct object_s" field. Pointers to any user object + must use the "TLPREFIX struct foo *" type --- don't forget TLPREFIX. + The best is to use typedefs like above. + + The object_s part contains some fields reserved for the STM library, + as well as a 32-bit integer field that can be freely used by the user + program. However, right now this field must be read-only --- i.e. it + must never be modified on any object that may already belong to a + past transaction; you can only set it on just-allocated objects. The + best is to consider it as a field that is written to only once on + newly allocated objects. +*/ + + struct object_s { uint8_t stm_flags; /* reserved for the STM library */ /* make sure it doesn't get bigger than 4 bytes for performance @@ -71,29 +93,73 @@ uint8_t rm; }; -typedef void* jmpbufptr_t[5]; /* for use with __builtin_setjmp() */ +struct alloc_for_size_s { + localchar_t *next; + uint16_t start, stop; + bool flag_partial_page; +}; + struct _thread_local1_s { jmpbufptr_t *jmpbufptr; uint8_t transaction_read_version; + + int thread_num; + bool running_transaction; + bool need_abort; + char *thread_base; + struct stm_list_s *modified_objects; + + object_t **old_shadow_stack; object_t **shadow_stack; object_t **shadow_stack_base; + + struct alloc_for_size_s alloc[LARGE_OBJECT_WORDS]; + struct stm_list_s *uncommitted_objects; + /* pages newly allocated in the current transaction only containing + uncommitted objects */ + struct stm_list_s *uncommitted_pages; + + + localchar_t *nursery_current; + struct stm_list_s *old_objects_to_trace; }; -#define _STM_TL1 ((_thread_local1_t *)4352) +#define _STM_TL ((_thread_local1_t *)4352) + +extern uint8_t flag_page_private[NB_PAGES]; /* xxx_PAGE constants above */ +extern char *object_pages; /* start of MMAP region */ +extern uint8_t write_locks[READMARKER_END - READMARKER_START]; + /* this should use llvm's coldcc calling convention, but it's not exposed to C code so far */ void _stm_write_slowpath(object_t *); + +/* ==================== HELPERS ==================== */ + #define LIKELY(x) __builtin_expect(x, true) #define UNLIKELY(x) __builtin_expect(x, false) +#define REAL_ADDRESS(object_pages, src) ((object_pages) + (uintptr_t)(src)) +static inline struct object_s *real_address(object_t *src) +{ + return (struct object_s*)REAL_ADDRESS(_STM_TL->thread_base, src); +} + +static inline char *get_thread_base(long thread_num) +{ + return object_pages + thread_num * (NB_PAGES * 4096UL); +} + + +/* ==================== API ==================== */ static inline void stm_read(object_t *obj) { ((read_marker_t *)(((uintptr_t)obj) >> 4))->rm = - _STM_TL1->transaction_read_version; + _STM_TL->transaction_read_version; } static inline void stm_write(object_t *obj) @@ -104,12 +170,12 @@ static inline void stm_push_root(object_t *obj) { - *(_STM_TL1->shadow_stack++) = obj; + *(_STM_TL->shadow_stack++) = obj; } static inline object_t *stm_pop_root(void) { - return *(--_STM_TL1->shadow_stack); + return *(--_STM_TL->shadow_stack); } /* must be provided by the user of this library */ diff --git a/c7/nursery.c b/c7/nursery.c new file mode 100644 --- /dev/null +++ b/c7/nursery.c @@ -0,0 +1,356 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#include "core.h" +#include "list.h" +#include "nursery.h" + +uintptr_t index_page_never_used; + +uintptr_t _stm_reserve_pages(int num) +{ + /* Grab a free page, initially shared between the threads. */ + + // XXX look in some free list first + + /* Return the index'th object page, which is so far never used. */ + uintptr_t index = __sync_fetch_and_add(&index_page_never_used, num); + + int i; + for (i = 0; i < num; i++) { + assert(flag_page_private[index+i] == SHARED_PAGE); + } + assert(flag_page_private[index] == SHARED_PAGE); + if (index + num >= NB_PAGES) { + fprintf(stderr, "Out of mmap'ed memory!\n"); + abort(); + } + return index; +} + + +void mark_page_as_uncommitted(uintptr_t pagenum) +{ + flag_page_private[pagenum] = UNCOMMITTED_SHARED_PAGE; + LIST_APPEND(_STM_TL->uncommitted_pages, (object_t*)pagenum); +} + + + +object_t *_stm_allocate_old(size_t size) +{ + int pages = (size + 4095) / 4096; + localchar_t* addr = (localchar_t*)(_stm_reserve_pages(pages) * 4096); + + object_t* o = (object_t*)addr; + o->stm_flags |= GCFLAG_WRITE_BARRIER; + return o; +} + +object_t *stm_allocate_prebuilt(size_t size) +{ + return _stm_allocate_old(size); /* XXX */ +} + +localchar_t *_stm_alloc_next_page(size_t size_class) +{ + /* may return uninitialized pages */ + + /* 'alloc->next' points to where the next allocation should go. The + present function is called instead when this next allocation is + equal to 'alloc->stop'. As we know that 'start', 'next' and + 'stop' are always nearby pointers, we play tricks and only store + the lower 16 bits of 'start' and 'stop', so that the three + variables plus some flags fit in 16 bytes. + */ + uintptr_t page; + localchar_t *result; + alloc_for_size_t *alloc = &_STM_TL->alloc[size_class]; + size_t size = size_class * 8; + + /* reserve a fresh new page */ + page = _stm_reserve_pages(1); + + /* mark as UNCOMMITTED_... */ + mark_page_as_uncommitted(page); + + result = (localchar_t *)(page * 4096UL); + alloc->start = (uintptr_t)result; + alloc->stop = alloc->start + (4096 / size) * size; + alloc->next = result + size; + alloc->flag_partial_page = false; + return result; +} + + + + +object_t *_stm_alloc_old(size_t size) +{ + /* may return uninitialized objects. except for the + GCFLAG_NOT_COMMITTED, it is set exactly if + we allocated the object in a SHARED and partially + committed page. (XXX: add the flag in some other place) + */ + object_t *result; + size_t size_class = size / 8; + assert(size_class >= 2); + + if (size_class >= LARGE_OBJECT_WORDS) { + result = _stm_allocate_old(size); + result->stm_flags &= ~GCFLAG_NOT_COMMITTED; /* page may be non-zeroed */ + + int page = ((uintptr_t)result) / 4096; + int pages = (size + 4095) / 4096; + int i; + for (i = 0; i < pages; i++) { + mark_page_as_uncommitted(page + i); + } + /* make sure the flag is not set (page is not zeroed!) */ + result->stm_flags &= ~GCFLAG_NOT_COMMITTED; + } else { + alloc_for_size_t *alloc = &_STM_TL->alloc[size_class]; + + if ((uint16_t)((uintptr_t)alloc->next) == alloc->stop) { + result = (object_t *)_stm_alloc_next_page(size_class); + } else { + result = (object_t *)alloc->next; + alloc->next += size; + if (alloc->flag_partial_page) { + LIST_APPEND(_STM_TL->uncommitted_objects, result); + result->stm_flags |= GCFLAG_NOT_COMMITTED; + } else { + /* make sure the flag is not set (page is not zeroed!) */ + result->stm_flags &= ~GCFLAG_NOT_COMMITTED; + } + } + } + return result; +} + + + + +void trace_if_young(object_t **pobj) +{ + if (*pobj == NULL) + return; + if (!_stm_is_young(*pobj)) + return; + + /* the location the object moved to is at an 8b offset */ + localchar_t *temp = ((localchar_t *)(*pobj)) + 8; + object_t * TLPREFIX *pforwarded = (object_t* TLPREFIX *)temp; + if ((*pobj)->stm_flags & GCFLAG_MOVED) { + *pobj = *pforwarded; + return; + } + + /* move obj to somewhere else */ + size_t size = stmcb_size(real_address(*pobj)); + object_t *moved = (object_t*)_stm_alloc_old(size); + + if (moved->stm_flags & GCFLAG_NOT_COMMITTED) + (*pobj)->stm_flags |= GCFLAG_NOT_COMMITTED; /* XXX: memcpy below overwrites this otherwise. + find better solution.*/ + + memcpy((void*)real_address(moved), + (void*)real_address(*pobj), + size); + + (*pobj)->stm_flags |= GCFLAG_MOVED; + *pforwarded = moved; + *pobj = moved; + + LIST_APPEND(_STM_TL->old_objects_to_trace, moved); +} + +void minor_collect() +{ + /* visit shadowstack & add to old_obj_to_trace */ + object_t **current = _STM_TL->shadow_stack; + object_t **base = _STM_TL->shadow_stack_base; + while (current-- != base) { + trace_if_young(current); + } + + /* visit old_obj_to_trace until empty */ + struct stm_list_s *old_objs = _STM_TL->old_objects_to_trace; + while (!stm_list_is_empty(old_objs)) { + object_t *item = stm_list_pop_item(old_objs); + + assert(!_stm_is_young(item)); + assert(!(item->stm_flags & GCFLAG_WRITE_BARRIER)); + + /* re-add write-barrier */ + item->stm_flags |= GCFLAG_WRITE_BARRIER; + + stmcb_trace(real_address(item), trace_if_young); + old_objs = _STM_TL->old_objects_to_trace; + } + + /* clear nursery */ + localchar_t *nursery_base = (localchar_t*)(FIRST_NURSERY_PAGE * 4096); + memset((void*)real_address((object_t*)nursery_base), 0x0, + _STM_TL->nursery_current - nursery_base); + _STM_TL->nursery_current = nursery_base; +} + +void _stm_minor_collect() +{ + minor_collect(); +} + +localchar_t *collect_and_reserve(size_t size) +{ + _stm_start_safe_point(); + minor_collect(); + _stm_stop_safe_point(); + + localchar_t *current = _STM_TL->nursery_current; + _STM_TL->nursery_current = current + size; + return current; +} + + +object_t *stm_allocate(size_t size) +{ + _stm_start_safe_point(); + _stm_stop_safe_point(); + assert(_STM_TL->running_transaction); + assert(size % 8 == 0); + assert(16 <= size && size < NB_NURSERY_PAGES * 4096);//XXX + + localchar_t *current = _STM_TL->nursery_current; + localchar_t *new_current = current + size; + _STM_TL->nursery_current = new_current; + assert((uintptr_t)new_current < (1L << 32)); + if ((uintptr_t)new_current > FIRST_AFTER_NURSERY_PAGE * 4096) { + current = collect_and_reserve(size); + } + + object_t *result = (object_t *)current; + return result; +} + + +void push_uncommitted_to_other_threads() +{ + /* WE HAVE THE EXCLUSIVE LOCK HERE */ + + struct stm_list_s *uncommitted = _STM_TL->uncommitted_objects; + char *local_base = _STM_TL->thread_base; + char *remote_base = get_thread_base(1 - _STM_TL->thread_num); + + STM_LIST_FOREACH( + uncommitted, + ({ + /* write-lock always cleared for these objects */ + uintptr_t lock_idx; + assert(lock_idx = (((uintptr_t)item) >> 4) - READMARKER_START); + assert(!write_locks[lock_idx]); + + /* remove the flag (they are now committed) */ + item->stm_flags &= ~GCFLAG_NOT_COMMITTED; + + uintptr_t pagenum = ((uintptr_t)item) / 4096UL; + if (flag_page_private[pagenum] == PRIVATE_PAGE) { + /* page was privatized... */ + char *src = REAL_ADDRESS(local_base, item); + char *dst = REAL_ADDRESS(remote_base, item); + size_t size = stmcb_size((struct object_s*)src); + memcpy(dst, src, size); + } + })); +} + +void nursery_on_start() +{ + assert(stm_list_is_empty(_STM_TL->old_objects_to_trace)); + stm_list_clear(_STM_TL->uncommitted_pages); + + _STM_TL->old_shadow_stack = _STM_TL->shadow_stack; +} + +void nursery_on_commit() +{ + minor_collect(); + + /* uncommitted objects / partially COMMITTED pages */ + push_uncommitted_to_other_threads(); + stm_list_clear(_STM_TL->uncommitted_objects); + + /* uncommitted_pages */ + long j; + for (j = 2; j < LARGE_OBJECT_WORDS; j++) { + alloc_for_size_t *alloc = &_STM_TL->alloc[j]; + uint16_t start = alloc->start; + uint16_t cur = (uintptr_t)alloc->next; + + if (start == cur) + continue; /* page full -> will be replaced automatically */ + + alloc->start = cur; /* next transaction has different 'start' to + reset in case of an abort */ + + uintptr_t pagenum = ((uintptr_t)(alloc->next - 1)) / 4096UL; + if (flag_page_private[pagenum] == UNCOMMITTED_SHARED_PAGE) { + /* becomes a SHARED (done below) partially used page */ + alloc->flag_partial_page = 1; + } + } + + STM_LIST_FOREACH( + _STM_TL->uncommitted_pages, + ({ + uintptr_t pagenum = (uintptr_t)item; + flag_page_private[pagenum] = SHARED_PAGE; + })); + stm_list_clear(_STM_TL->uncommitted_pages); +} + +void nursery_on_abort() +{ + + /* clear old_objects_to_trace (they will have the WRITE_BARRIER flag + set because the ones we care about are also in modified_objects) */ + stm_list_clear(_STM_TL->old_objects_to_trace); + + /* clear the nursery */ + localchar_t *nursery_base = (localchar_t*)(FIRST_NURSERY_PAGE * 4096); + memset((void*)real_address((object_t*)nursery_base), 0x0, + _STM_TL->nursery_current - nursery_base); + _STM_TL->nursery_current = nursery_base; + + + /* unreserve uncommitted_pages and mark them as SHARED again + IFF they are not in alloc[] */ + /* STM_LIST_FOREACH(_STM_TL->uncommitted_pages, ({ */ + /* uintptr_t pagenum = (uintptr_t)item; */ + /* flag_page_private[pagenum] = SHARED_PAGE; */ + /* })); */ + stm_list_clear(_STM_TL->uncommitted_pages); + + + /* forget about GCFLAG_NOT_COMMITTED objects by + resetting alloc-pages */ + long j; + for (j = 2; j < LARGE_OBJECT_WORDS; j++) { + alloc_for_size_t *alloc = &_STM_TL->alloc[j]; + uint16_t num_allocated = ((uintptr_t)alloc->next) - alloc->start; + /* forget about all non-committed objects */ + alloc->next -= num_allocated; + } +} + + + diff --git a/c7/nursery.h b/c7/nursery.h new file mode 100644 --- /dev/null +++ b/c7/nursery.h @@ -0,0 +1,17 @@ + + + +object_t *stm_allocate_prebuilt(size_t size); +object_t *_stm_allocate_old(size_t size); +object_t *stm_allocate(size_t size); + +void _stm_minor_collect(); + +void nursery_on_abort(); +void nursery_on_commit(); +void nursery_on_start(); + + +extern uintptr_t index_page_never_used; + + diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -7,9 +7,13 @@ parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) header_files = [os.path.join(parent_dir, _n) for _n in - "core.h pagecopy.h list.h reader_writer_lock.h".split()] + """core.h pagecopy.h list.h + reader_writer_lock.h + nursery.h""".split()] source_files = [os.path.join(parent_dir, _n) for _n in - "core.c pagecopy.c list.c reader_writer_lock.c".split()] + """core.c pagecopy.c list.c + reader_writer_lock.c + nursery.c""".split()] _pycache_ = os.path.join(parent_dir, 'test', '__pycache__') if os.path.exists(_pycache_): @@ -118,52 +122,52 @@ bool _checked_stm_write(object_t *object) { jmpbufptr_t here; if (__builtin_setjmp(here) == 0) { // returned directly - assert(_STM_TL1->jmpbufptr == (jmpbufptr_t*)-1); - _STM_TL1->jmpbufptr = &here; + assert(_STM_TL->jmpbufptr == (jmpbufptr_t*)-1); + _STM_TL->jmpbufptr = &here; stm_write(object); - _STM_TL1->jmpbufptr = (jmpbufptr_t*)-1; + _STM_TL->jmpbufptr = (jmpbufptr_t*)-1; return 0; } - _STM_TL1->jmpbufptr = (jmpbufptr_t*)-1; + _STM_TL->jmpbufptr = (jmpbufptr_t*)-1; return 1; } bool _stm_stop_transaction(void) { jmpbufptr_t here; if (__builtin_setjmp(here) == 0) { // returned directly - assert(_STM_TL1->jmpbufptr == (jmpbufptr_t*)-1); - _STM_TL1->jmpbufptr = &here; + assert(_STM_TL->jmpbufptr == (jmpbufptr_t*)-1); + _STM_TL->jmpbufptr = &here; stm_stop_transaction(); - _STM_TL1->jmpbufptr = (jmpbufptr_t*)-1; + _STM_TL->jmpbufptr = (jmpbufptr_t*)-1; return 0; } - _STM_TL1->jmpbufptr = (jmpbufptr_t*)-1; + _STM_TL->jmpbufptr = (jmpbufptr_t*)-1; return 1; } bool _stm_check_stop_safe_point(void) { jmpbufptr_t here; if (__builtin_setjmp(here) == 0) { // returned directly - assert(_STM_TL1->jmpbufptr == (jmpbufptr_t*)-1); - _STM_TL1->jmpbufptr = &here; + assert(_STM_TL->jmpbufptr == (jmpbufptr_t*)-1); + _STM_TL->jmpbufptr = &here; _stm_stop_safe_point(); - _STM_TL1->jmpbufptr = (jmpbufptr_t*)-1; + _STM_TL->jmpbufptr = (jmpbufptr_t*)-1; return 0; } - _STM_TL1->jmpbufptr = (jmpbufptr_t*)-1; + _STM_TL->jmpbufptr = (jmpbufptr_t*)-1; return 1; } bool _stm_check_abort_transaction(void) { jmpbufptr_t here; if (__builtin_setjmp(here) == 0) { // returned directly - assert(_STM_TL1->jmpbufptr == (jmpbufptr_t*)-1); - _STM_TL1->jmpbufptr = &here; + assert(_STM_TL->jmpbufptr == (jmpbufptr_t*)-1); + _STM_TL->jmpbufptr = &here; stm_abort_transaction(); - _STM_TL1->jmpbufptr = (jmpbufptr_t*)-1; + _STM_TL->jmpbufptr = (jmpbufptr_t*)-1; return 0; } - _STM_TL1->jmpbufptr = (jmpbufptr_t*)-1; + _STM_TL->jmpbufptr = (jmpbufptr_t*)-1; return 1; } From noreply at buildbot.pypy.org Wed Jan 22 14:41:35 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Wed, 22 Jan 2014 14:41:35 +0100 (CET) Subject: [pypy-commit] stmgc c7: update demo makefile Message-ID: <20140122134135.9DCF71C315E@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r668:95fad5b3c052 Date: 2014-01-22 14:40 +0100 http://bitbucket.org/pypy/stmgc/changeset/95fad5b3c052/ Log: update demo makefile diff --git a/c7/Makefile b/c7/Makefile --- a/c7/Makefile +++ b/c7/Makefile @@ -14,9 +14,9 @@ rm -f $(BUILD_EXE) $(DEBUG_EXE) $(RELEASE_EXE) -H_FILES = core.h list.h pagecopy.h reader_writer_lock.h +H_FILES = core.h list.h pagecopy.h reader_writer_lock.h stmsync.h pages.h nursery.h -C_FILES = core.c list.c pagecopy.c reader_writer_lock.c +C_FILES = core.c list.c pagecopy.c reader_writer_lock.c stmsync.c pages.c nursery.c DEBUG = -g diff --git a/c7/demo2.c b/c7/demo2.c --- a/c7/demo2.c +++ b/c7/demo2.c @@ -5,7 +5,7 @@ #include #include "core.h" - +#include "stmsync.h" #define LIST_LENGTH 6000 #define BUNCH 400 From noreply at buildbot.pypy.org Wed Jan 22 14:41:36 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Wed, 22 Jan 2014 14:41:36 +0100 (CET) Subject: [pypy-commit] stmgc c7: update duhton makefile Message-ID: <20140122134136.AF1C91C315E@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r669:e3f4b5c27027 Date: 2014-01-22 14:41 +0100 http://bitbucket.org/pypy/stmgc/changeset/e3f4b5c27027/ Log: update duhton makefile diff --git a/duhton/Makefile b/duhton/Makefile --- a/duhton/Makefile +++ b/duhton/Makefile @@ -2,6 +2,9 @@ C7SOURCES = ../c7/core.c \ ../c7/pagecopy.c \ ../c7/list.c \ + ../c7/pages.c \ + ../c7/nursery.c \ + ../c7/stmsync.c \ ../c7/reader_writer_lock.c C7HEADERS = ../c7/*.h From noreply at buildbot.pypy.org Wed Jan 22 16:02:23 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Wed, 22 Jan 2014 16:02:23 +0100 (CET) Subject: [pypy-commit] stmgc c7: simple page-reuse for aborted transactions Message-ID: <20140122150223.C3B861C315D@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r670:a867104d94d6 Date: 2014-01-22 16:02 +0100 http://bitbucket.org/pypy/stmgc/changeset/a867104d94d6/ Log: simple page-reuse for aborted transactions diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -133,7 +133,8 @@ void stm_setup(void) { _stm_reset_shared_lock(); - + _stm_reset_pages(); + /* Check that some values are acceptable */ assert(4096 <= ((uintptr_t)_STM_TL)); assert(((uintptr_t)_STM_TL) == ((uintptr_t)_STM_TL)); @@ -195,7 +196,6 @@ or should it be UNCOMMITTED??? */ num_threads_started = 0; - index_page_never_used = FIRST_AFTER_NURSERY_PAGE; } #define INVALID_GS_VALUE 0x6D6D6D6D @@ -256,7 +256,7 @@ void _stm_teardown(void) { munmap(object_pages, TOTAL_MEMORY); - _stm_reset_page_flags(); + _stm_reset_pages(); memset(write_locks, 0, sizeof(write_locks)); object_pages = NULL; } diff --git a/c7/nursery.c b/c7/nursery.c --- a/c7/nursery.c +++ b/c7/nursery.c @@ -322,24 +322,36 @@ _STM_TL->nursery_current = nursery_base; - /* unreserve uncommitted_pages and mark them as SHARED again - IFF they are not in alloc[] */ - /* STM_LIST_FOREACH(_STM_TL->uncommitted_pages, ({ */ - /* uintptr_t pagenum = (uintptr_t)item; */ - /* flag_page_private[pagenum] = SHARED_PAGE; */ - /* })); */ - stm_list_clear(_STM_TL->uncommitted_pages); - - /* forget about GCFLAG_NOT_COMMITTED objects by resetting alloc-pages */ long j; for (j = 2; j < LARGE_OBJECT_WORDS; j++) { alloc_for_size_t *alloc = &_STM_TL->alloc[j]; uint16_t num_allocated = ((uintptr_t)alloc->next) - alloc->start; - /* forget about all non-committed objects */ - alloc->next -= num_allocated; + uintptr_t next = (uintptr_t)alloc->next; + + if (num_allocated) { + /* forget about all non-committed objects */ + alloc->next -= num_allocated; + + uintptr_t pagenum = ((uintptr_t)(next - 1)) / 4096UL; + if (stm_get_page_flag(pagenum) == UNCOMMITTED_SHARED_PAGE) { + /* the page will be freed below, we need a new one for the + next allocation */ + alloc->next = 0; + alloc->stop = 0; + alloc->start = 0; + } + } } + + /* unreserve uncommitted_pages and mark them as SHARED again + IFF they are not in alloc[] */ + STM_LIST_FOREACH(_STM_TL->uncommitted_pages, ({ + stm_pages_unreserve((uintptr_t)item); + })); + stm_list_clear(_STM_TL->uncommitted_pages); + } diff --git a/c7/pages.c b/c7/pages.c --- a/c7/pages.c +++ b/c7/pages.c @@ -22,12 +22,23 @@ #endif +uintptr_t index_page_never_used; +uint8_t flag_page_private[NB_PAGES]; -uint8_t flag_page_private[NB_PAGES]; -uintptr_t index_page_never_used; +uint8_t list_lock = 0; +struct stm_list_s *single_page_list; -void _stm_reset_page_flags() + +void _stm_reset_pages() { + assert(!list_lock); + if (!single_page_list) + single_page_list = stm_list_create(); + else + stm_list_clear(single_page_list); + + index_page_never_used = FIRST_AFTER_NURSERY_PAGE; + memset(flag_page_private, 0, sizeof(flag_page_private)); } @@ -43,7 +54,6 @@ } - void stm_pages_privatize(uintptr_t pagenum) { if (flag_page_private[pagenum] == PRIVATE_PAGE) @@ -98,11 +108,23 @@ } + uintptr_t stm_pages_reserve(int num) { /* grab free, possibly uninitialized pages */ - - // XXX look in some free list first + if (!stm_list_is_empty(single_page_list)) { + uint8_t previous; + while ((previous = __sync_lock_test_and_set(&list_lock, 1))) + spin_loop(); + + if (!stm_list_is_empty(single_page_list)) { + uintptr_t res = (uintptr_t)stm_list_pop_item(single_page_list); + list_lock = 0; + return res; + } + + list_lock = 0; + } /* Return the index'th object page, which is so far never used. */ uintptr_t index = __sync_fetch_and_add(&index_page_never_used, num); @@ -111,7 +133,7 @@ for (i = 0; i < num; i++) { assert(flag_page_private[index+i] == SHARED_PAGE); } - assert(flag_page_private[index] == SHARED_PAGE); + if (index + num >= NB_PAGES) { fprintf(stderr, "Out of mmap'ed memory!\n"); abort(); @@ -119,6 +141,17 @@ return index; } +void stm_pages_unreserve(uintptr_t pagenum) +{ + uint8_t previous; + while ((previous = __sync_lock_test_and_set(&list_lock, 1))) + spin_loop(); + + flag_page_private[pagenum] = SHARED_PAGE; + LIST_APPEND(single_page_list, (object_t*)pagenum); + list_lock = 0; +} + diff --git a/c7/pages.h b/c7/pages.h --- a/c7/pages.h +++ b/c7/pages.h @@ -19,8 +19,8 @@ uintptr_t stm_pages_reserve(int num); uint8_t stm_get_page_flag(int pagenum); void stm_set_page_flag(int pagenum, uint8_t flag); -void _stm_reset_page_flags(void); +void _stm_reset_pages(void); +void stm_pages_unreserve(uintptr_t num); - diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -439,8 +439,24 @@ newer = stm_pop_root() assert stm_get_real_address(new) == stm_get_real_address(newer) assert stm_get_char(newer) == '\0' + + def test_reuse_page(self): + stm_start_transaction() + new = stm_allocate(16) + stm_push_root(new) + stm_minor_collect() + new = stm_pop_root() + assert stm_get_page_flag(stm_get_obj_pages(new)[0]) == lib.UNCOMMITTED_SHARED_PAGE + stm_abort_transaction() + + stm_start_transaction() + newer = stm_allocate(16) + stm_push_root(newer) + stm_minor_collect() + newer = stm_pop_root() + assert new == newer + - # def test_resolve_write_write_no_conflict(self): # stm_start_transaction() # p1 = stm_allocate(16) From noreply at buildbot.pypy.org Wed Jan 22 17:50:53 2014 From: noreply at buildbot.pypy.org (rguillebert) Date: Wed, 22 Jan 2014 17:50:53 +0100 (CET) Subject: [pypy-commit] pypy numpypy-array_prepare_-array_wrap: More tests Message-ID: <20140122165053.8FFCD1C3969@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: numpypy-array_prepare_-array_wrap Changeset: r68844:4685b47e9d49 Date: 2014-01-22 17:15 +0100 http://bitbucket.org/pypy/pypy/changeset/4685b47e9d49/ Log: More tests diff --git a/pypy/module/micronumpy/test/test_subtype.py b/pypy/module/micronumpy/test/test_subtype.py --- a/pypy/module/micronumpy/test/test_subtype.py +++ b/pypy/module/micronumpy/test/test_subtype.py @@ -311,6 +311,11 @@ assert type(x) == with_prepare assert x.called_prepare raises(TypeError, add, a, b, out=c) + a = array(1).view(type=with_prepare) + b = array(1) + x = add(a, b) + assert x == 2 + assert x.called_prepare def test___array_prepare__1arg_scalar(self): from numpypy import ndarray, array, log, ones @@ -368,6 +373,16 @@ assert x.called_prepare raises(TypeError, add, a, b, out=c) + def test_result_is_subtype(self): + from numpypy import ndarray, add, array + class subtype(ndarray): + pass + + a = array(10).view(subtype) + b = 10 + res = add(a, b) + assert isinstance(res, subtype) + def test__getitem_modifies_shape(self): import numpypy as N # numpy's matrix class caused an infinite loop From noreply at buildbot.pypy.org Wed Jan 22 17:50:54 2014 From: noreply at buildbot.pypy.org (rguillebert) Date: Wed, 22 Jan 2014 17:50:54 +0100 (CET) Subject: [pypy-commit] pypy numpypy-array_prepare_-array_wrap: Call __array_prepare__ in more cases Message-ID: <20140122165054.D54FA1C3969@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: numpypy-array_prepare_-array_wrap Changeset: r68845:07b429e67743 Date: 2014-01-22 17:19 +0100 http://bitbucket.org/pypy/pypy/changeset/07b429e67743/ Log: Call __array_prepare__ in more cases diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -419,15 +419,17 @@ w_rhs.get_scalar_value().convert_to(space, calc_dtype) ) if isinstance(out, W_NDimArray): - # TODO: Call __array_prepare__ + # TODO: Array priority + out = loop.call_prepare(space, w_out, w_out) + if out.is_scalar(): out.set_scalar_value(arr) else: out.fill(space, arr) else: - # TODO: Call __array_prepare__ - out = W_NDimArray(Scalar(res_dtype, res_dtype.box(0))) - out.set_scalar_value(arr) + # TODO: Array priority + out = loop.call_prepare(space, w_lhs, arr) + # XXX: How to set the value on the box since they're immutable ? return out new_shape = shape_agreement(space, w_lhs.get_shape(), w_rhs) From noreply at buildbot.pypy.org Wed Jan 22 18:23:17 2014 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 22 Jan 2014 18:23:17 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: improve the test and fix Message-ID: <20140122172317.0D6C91C3969@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68846:95626ec9e6bd Date: 2014-01-17 18:48 +0100 http://bitbucket.org/pypy/pypy/changeset/95626ec9e6bd/ Log: improve the test and fix diff --git a/rpython/jit/resume/backend.py b/rpython/jit/resume/backend.py --- a/rpython/jit/resume/backend.py +++ b/rpython/jit/resume/backend.py @@ -195,7 +195,8 @@ all = {} for frame in inputframes: for x in frame: - if x is not None and not isinstance(x, Const) and x not in all: + if x is not None and x not in all: + assert not isinstance(x, Const) count += 1 all[x] = None inputargs = [None] * count @@ -203,8 +204,7 @@ all = {} for frame in inputframes: for item in frame: - if (item is not None and not isinstance(item, Const) and - item not in all): + if item is not None and item not in all: inputargs[pos] = item all[item] = None pos += 1 diff --git a/rpython/jit/resume/frontend.py b/rpython/jit/resume/frontend.py --- a/rpython/jit/resume/frontend.py +++ b/rpython/jit/resume/frontend.py @@ -1,7 +1,7 @@ from rpython.jit.metainterp.resoperation import rop from rpython.jit.metainterp.history import BoxInt, BoxPtr, BoxFloat, ConstInt,\ - INT, REF + INT, REF, Const from rpython.jit.metainterp import history from rpython.jit.resume.reader import AbstractResumeReader from rpython.jit.resume.rescode import TAGBOX, TAGCONST, TAGSMALLINT, TAGVIRTUAL @@ -219,7 +219,8 @@ if box is None: return miframe.registers_i[i] = box - res[-1][pos] = box + if not isinstance(box, Const): + res[-1][pos] = box def store_ref_box(self, res, pos, miframe, i, jitframe_pos): box = self.get_box_value(jitframe_pos, REF) @@ -260,6 +261,12 @@ self.cache[jitframe_pos] = box res[-1][pos] = box + def get_loc(self, p): + tag, pos = self.decode(p) + if tag == TAGBOX: + return pos + return -1 + def finish(self): res = [] self.cache = {} @@ -279,7 +286,8 @@ self.store_float_box(res, pos, miframe, i, frame.registers[pos]) pos += 1 self.cache = None - return res, [f.registers for f in self.framestack] + return res, [[self.get_loc(r) for r in f.registers] + for f in self.framestack] def rebuild_from_resumedata(metainterp, deadframe, faildescr): """ Reconstruct metainterp frames from the resumedata diff --git a/rpython/jit/resume/test/test_frontend.py b/rpython/jit/resume/test/test_frontend.py --- a/rpython/jit/resume/test/test_frontend.py +++ b/rpython/jit/resume/test/test_frontend.py @@ -84,7 +84,7 @@ def test_box_resume_reader(self): jitcode = JitCode("jitcode") jitcode.global_index = 0 - jitcode.setup(num_regs_i=13, num_regs_r=0, num_regs_f=0) + jitcode.setup(num_regs_i=4, num_regs_r=0, num_regs_f=0) builder = ResumeBytecodeBuilder() builder.enter_frame(-1, jitcode) builder.resume_put(TAGBOX | (100 << 2), 0, 1) @@ -98,13 +98,15 @@ metainterp = MockMetaInterp() metainterp.staticdata = MockStaticData([jitcode], []) metainterp.cpu = MockCPU() - rebuild_from_resumedata(metainterp, "myframe", descr) + inpframes, inplocs = rebuild_from_resumedata(metainterp, "myframe", descr) assert len(metainterp.framestack) == 1 f = metainterp.framestack[-1] assert f.registers_i[1].getint() == 103 assert isinstance(f.registers_i[2], Const) assert f.registers_i[2].getint() == 15 assert f.registers_i[3].getint() == 13 + assert inpframes == [[None, AnyBox(), None, None]] + assert inplocs == [[-1, 100, -1, -1]] def test_nested_call(self): jitcode1 = JitCode("jitcode") From noreply at buildbot.pypy.org Wed Jan 22 18:23:23 2014 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 22 Jan 2014 18:23:23 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: kill some dead code and fix first frontend test Message-ID: <20140122172323.866B41C3969@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68851:3407ed54bd43 Date: 2014-01-18 14:01 +0100 http://bitbucket.org/pypy/pypy/changeset/3407ed54bd43/ Log: kill some dead code and fix first frontend test diff --git a/rpython/jit/metainterp/optimizeopt/__init__.py b/rpython/jit/metainterp/optimizeopt/__init__.py --- a/rpython/jit/metainterp/optimizeopt/__init__.py +++ b/rpython/jit/metainterp/optimizeopt/__init__.py @@ -50,7 +50,7 @@ def optimize_trace(metainterp_sd, loop, enable_opts, inline_short_preamble=True): """Optimize loop.operations to remove internal overheadish operations. """ - from rpython.jit.backend.resumebuilder import flatten + from rpython.jit.resume.backend import flatten debug_start("jit-optimize") try: diff --git a/rpython/jit/resume/backend.py b/rpython/jit/resume/backend.py --- a/rpython/jit/resume/backend.py +++ b/rpython/jit/resume/backend.py @@ -155,6 +155,8 @@ fieldpos = self.get_box_pos(op.getarg(1)) descr = op.getdescr() self.builder.resume_setfield_gc(structpos, fieldpos, descr) + elif op.getopnum() == rop.RESUME_SET_PC: + self.builder.resume_set_pc(op.getarg(0).getint()) else: raise Exception("strange operation") diff --git a/rpython/jit/resume/reader.py b/rpython/jit/resume/reader.py --- a/rpython/jit/resume/reader.py +++ b/rpython/jit/resume/reader.py @@ -41,28 +41,12 @@ self.framestack[-1].pc = pc self.framestack.append(ResumeFrame(jitcode)) - def encode_box(self, pos): - return rescode.TAGBOX | (pos << rescode.TAGOFFSET) - - def encode_virtual(self, box): - return rescode.TAGVIRTUAL | (self.virtuals[box].pos << rescode.TAGOFFSET) - - def encode_const(self, const): - XXX - if isinstance(const, ConstInt) and const.getint() < (sys.maxint >> 3): - return rescode.TAGSMALLINT | (const.getint() << rescode.TAGOFFSET) - self.consts.append(const) - return rescode.TAGCONST | ((len(self.consts) - 1) << TAGOFFSET) - def decode(self, pos): return pos & 0x3, pos >> rescode.TAGOFFSET def resume_put(self, encoded_pos, frame_no, frontend_position): self.framestack[frame_no].registers[frontend_position] = encoded_pos - def encode(self, box): - xxx - def resume_new(self, v_pos, descr): v = Virtual(v_pos, descr) if v_pos >= len(self.virtuals): diff --git a/rpython/jit/resume/rescode.py b/rpython/jit/resume/rescode.py --- a/rpython/jit/resume/rescode.py +++ b/rpython/jit/resume/rescode.py @@ -2,7 +2,7 @@ from rpython.jit.metainterp.history import ConstInt (UNUSED, ENTER_FRAME, LEAVE_FRAME, RESUME_PUT, - RESUME_NEW, RESUME_SETFIELD_GC) = range(6) + RESUME_NEW, RESUME_SETFIELD_GC, RESUME_SET_PC) = range(7) TAGCONST = 0x0 TAGVIRTUAL = 0x2 @@ -63,6 +63,10 @@ return TAGSMALLINT | (const.getint() << 2) xxx + def resume_set_pc(self, pc): + self.write(RESUME_SET_PC) + self.write_short(pc) + def resume_put(self, pos, frame_pos, pos_in_frame): self.write(RESUME_PUT) self.write_short(pos) diff --git a/rpython/jit/resume/test/test_frontend.py b/rpython/jit/resume/test/test_frontend.py --- a/rpython/jit/resume/test/test_frontend.py +++ b/rpython/jit/resume/test/test_frontend.py @@ -264,6 +264,7 @@ codewriter = CodeWriter() ssarepr = unformat_assembler(assembler, name='one') jitcode = codewriter.assembler.assemble(ssarepr) + jitcode.global_index = 0 jitcode.is_portal = True reds = ['v' + str(i) for i in range(no_reds)] jitdriver_sd = JitDriverStaticData(JitDriver(greens = [], @@ -278,6 +279,7 @@ cpu = LLGraphCPU(None, stats) metainterp_sd = MetaInterpStaticData(cpu, None) metainterp_sd.finish_setup(codewriter) + metainterp_sd.alljitcodes = [jitcode] return MetaInterp(metainterp_sd, jitdriver_sd), stats, jitdriver_sd class TestResumeRecorder(object): @@ -314,6 +316,7 @@ resume_put(i1, 0, 1) resume_put(i2, 0, 0) resume_set_pc(24) + leave_frame() """, namespace={'jitcode': jitcode}) equaloplists(resume_ops, expected.operations, cache=True) From noreply at buildbot.pypy.org Wed Jan 22 18:23:18 2014 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 22 Jan 2014 18:23:18 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: finish this test Message-ID: <20140122172318.5E1581C3969@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68847:4dbef4926427 Date: 2014-01-17 18:55 +0100 http://bitbucket.org/pypy/pypy/changeset/4dbef4926427/ Log: finish this test diff --git a/rpython/jit/resume/reader.py b/rpython/jit/resume/reader.py --- a/rpython/jit/resume/reader.py +++ b/rpython/jit/resume/reader.py @@ -110,6 +110,9 @@ jitcode = self.staticdata.alljitcodes[self.read_short(pos + 3)] self.enter_frame(pc, jitcode) pos += 5 + elif op == rescode.LEAVE_FRAME: + self.leave_frame() + pos += 1 elif op == rescode.RESUME_PUT: encoded = self.read_short(pos + 1) frame_pos = self.read(pos + 3) diff --git a/rpython/jit/resume/test/test_frontend.py b/rpython/jit/resume/test/test_frontend.py --- a/rpython/jit/resume/test/test_frontend.py +++ b/rpython/jit/resume/test/test_frontend.py @@ -111,24 +111,22 @@ def test_nested_call(self): jitcode1 = JitCode("jitcode") jitcode1.setup(num_regs_i=13, num_regs_r=0, num_regs_f=0) + jitcode1.global_index = 0 jitcode2 = JitCode("jitcode2") jitcode2.setup(num_regs_i=9, num_regs_r=0, num_regs_f=0) - resume_loop = parse(""" - [] - enter_frame(-1, descr=jitcode1) - resume_put(11, 0, 2) - enter_frame(12, descr=jitcode2) - resume_put(12, 1, 3) - resume_put(8, 0, 4) - leave_frame() - resume_put(10, 0, 1) - leave_frame() - """, namespace={'jitcode1': jitcode1, 'jitcode2': jitcode2}) + jitcode2.global_index = 1 + builder = ResumeBytecodeBuilder() + builder.enter_frame(-1, jitcode1) + builder.resume_put(TAGBOX | (11 << 2), 0, 2) + builder.enter_frame(12, jitcode2) + builder.resume_put(TAGBOX | (12 << 2), 1, 3) + builder.resume_put(TAGBOX | (8 << 2), 0, 4) metainterp = MockMetaInterp() + metainterp.staticdata = MockStaticData([jitcode1, jitcode2], []) metainterp.cpu = MockCPU() descr = Descr() - descr.rd_resume_bytecode = ResumeBytecode(resume_loop.operations) - descr.rd_bytecode_position = 5 + descr.rd_resume_bytecode = ResumeBytecode(builder.build(), []) + descr.rd_bytecode_position = len(descr.rd_resume_bytecode.opcodes) rebuild_from_resumedata(metainterp, "myframe", descr) assert len(metainterp.framestack) == 2 f = metainterp.framestack[-1] @@ -138,8 +136,13 @@ assert f.registers_i[3].getint() == 12 + 3 assert f2.registers_i[4].getint() == 8 + 3 assert f2.registers_i[2].getint() == 11 + 3 + + builder.leave_frame() + builder.resume_put(TAGBOX | (10 << 2), 0, 1) - descr.rd_bytecode_position = 7 + descr.rd_resume_bytecode = ResumeBytecode(builder.build(), []) + descr.rd_bytecode_position = len(descr.rd_resume_bytecode.opcodes) + metainterp.framestack = [] rebuild_from_resumedata(metainterp, "myframe", descr) assert len(metainterp.framestack) == 1 From noreply at buildbot.pypy.org Wed Jan 22 18:23:24 2014 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 22 Jan 2014 18:23:24 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: finish fixing direct tests Message-ID: <20140122172324.C4EF41C3969@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68852:edc8ed962047 Date: 2014-01-18 14:03 +0100 http://bitbucket.org/pypy/pypy/changeset/edc8ed962047/ Log: finish fixing direct tests diff --git a/rpython/jit/resume/backend.py b/rpython/jit/resume/backend.py --- a/rpython/jit/resume/backend.py +++ b/rpython/jit/resume/backend.py @@ -157,6 +157,9 @@ self.builder.resume_setfield_gc(structpos, fieldpos, descr) elif op.getopnum() == rop.RESUME_SET_PC: self.builder.resume_set_pc(op.getarg(0).getint()) + elif op.getopnum() == rop.RESUME_CLEAR: + self.builder.resume_clear(op.getarg(0).getint(), + op.getarg(1).getint()) else: raise Exception("strange operation") diff --git a/rpython/jit/resume/rescode.py b/rpython/jit/resume/rescode.py --- a/rpython/jit/resume/rescode.py +++ b/rpython/jit/resume/rescode.py @@ -2,7 +2,7 @@ from rpython.jit.metainterp.history import ConstInt (UNUSED, ENTER_FRAME, LEAVE_FRAME, RESUME_PUT, - RESUME_NEW, RESUME_SETFIELD_GC, RESUME_SET_PC) = range(7) + RESUME_NEW, RESUME_SETFIELD_GC, RESUME_SET_PC, RESUME_CLEAR) = range(8) TAGCONST = 0x0 TAGVIRTUAL = 0x2 @@ -83,3 +83,9 @@ self.write_short(structpos) self.write_short(fieldpos) self.write_short(descr.global_descr_index) + + def resume_clear(self, frame_pos, pos_in_frame): + self.write(RESUME_CLEAR) + self.write(frame_pos) + self.write(pos_in_frame) + From noreply at buildbot.pypy.org Wed Jan 22 18:23:19 2014 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 22 Jan 2014 18:23:19 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: finish this test Message-ID: <20140122172319.A09D71C3969@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68848:387a4123aa1c Date: 2014-01-17 18:57 +0100 http://bitbucket.org/pypy/pypy/changeset/387a4123aa1c/ Log: finish this test diff --git a/rpython/jit/resume/test/test_frontend.py b/rpython/jit/resume/test/test_frontend.py --- a/rpython/jit/resume/test/test_frontend.py +++ b/rpython/jit/resume/test/test_frontend.py @@ -155,23 +155,22 @@ def test_bridge(self): jitcode1 = JitCode("jitcode") jitcode1.setup(num_regs_i=13, num_regs_r=0, num_regs_f=0) - base = parse(""" - [] - enter_frame(-1, descr=jitcode1) - resume_put(42, 0, 0) - # here is the split caused by a guard - resume_put(1, 0, 1) - leave_frame() - """, namespace={'jitcode1': jitcode1}) - bridge = parse(""" - [] - resume_put(2, 0, 1) - """) + jitcode1.global_index = 0 + builder = ResumeBytecodeBuilder() + builder.enter_frame(-1, jitcode1) + builder.resume_put(TAGBOX | (42 << 2), 0, 0) + rd1 = builder.build() + lgt1 = len(rd1.opcodes) + + builder = ResumeBytecodeBuilder() + builder.resume_put(TAGBOX | (2 << 2), 0, 1) + rd2 = builder.build() + lgt2 = len(rd2.opcodes) + descr = Descr() - descr.rd_bytecode_position = 1 - parent = ResumeBytecode(base.operations) - b = ResumeBytecode(bridge.operations, parent=parent, - parent_position=2) + descr.rd_bytecode_position = lgt2 + parent = ResumeBytecode(rd1, []) + b = ResumeBytecode(rd2, [], parent, parent_position=lgt1) descr.rd_resume_bytecode = b metainterp = MockMetaInterp() metainterp.cpu = MockCPU() From noreply at buildbot.pypy.org Wed Jan 22 18:23:26 2014 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 22 Jan 2014 18:23:26 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: hack enough to start passing some frontend tests Message-ID: <20140122172326.192DD1C3969@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68853:06109163139d Date: 2014-01-18 14:29 +0100 http://bitbucket.org/pypy/pypy/changeset/06109163139d/ Log: hack enough to start passing some frontend tests diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -13,7 +13,8 @@ from rpython.jit.metainterp.logger import Logger from rpython.jit.metainterp.optimizeopt.util import args_dict_box from rpython.jit.metainterp.resoperation import rop -from rpython.jit.resume.frontend import ResumeRecorder +from rpython.jit.resume.frontend import ResumeRecorder,\ + rebuild_from_resumedata from rpython.rlib import nonconst, rstack from rpython.rlib.debug import debug_start, debug_stop, debug_print, make_sure_not_resized from rpython.rlib.jit import Counters @@ -2479,8 +2480,8 @@ vinfo = self.jitdriver_sd.virtualizable_info ginfo = self.jitdriver_sd.greenfield_info self.framestack = [] - inputlocs = resume2.rebuild_from_resumedata(self, deadframe, - resumedescr) + inputlocs = rebuild_from_resumedata(self, deadframe, + resumedescr) virtualizable_boxes = [] virtualref_boxes = [] # diff --git a/rpython/jit/resume/frontend.py b/rpython/jit/resume/frontend.py --- a/rpython/jit/resume/frontend.py +++ b/rpython/jit/resume/frontend.py @@ -4,7 +4,8 @@ INT, REF, Const from rpython.jit.metainterp import history from rpython.jit.resume.reader import AbstractResumeReader -from rpython.jit.resume.rescode import TAGBOX, TAGCONST, TAGSMALLINT, TAGVIRTUAL +from rpython.jit.resume.rescode import TAGBOX, TAGCONST, TAGSMALLINT,\ + TAGVIRTUAL, CLEAR_POSITION # class AbstractResumeReader(object): @@ -150,14 +151,28 @@ pos += 1 return curbh - def store_int_value(self, curbh, i, jitframe_pos): + def store_int_value(self, curbh, i, encoded_pos): + if encoded_pos == CLEAR_POSITION: + return + tag, index = self.decode(encoded_pos) + if tag & TAGBOX: + curbh.registers_i[i] = self.cpu.get_int_value(self.deadframe, index) + return + xxx if jitframe_pos >= 0: curbh.registers_i[i] = self.cpu.get_int_value( self.deadframe, jitframe_pos) elif jitframe_pos < -1: curbh.registers_i[i] = self.consts[-jitframe_pos - 2].getint() - def store_ref_value(self, curbh, i, jitframe_pos): + def store_ref_value(self, curbh, i, encoded_pos): + if encoded_pos == CLEAR_POSITION: + return + tag, index = self.decode(encoded_pos) + if tag & TAGBOX: + curbh.registers_r[i] = self.cpu.get_ref_value(self.deadframe, index) + return + xxxx if jitframe_pos >= 0: curbh.registers_r[i] = self.cpu.get_ref_value( self.deadframe, jitframe_pos) @@ -165,6 +180,7 @@ curbh.registers_r[i] = self.consts[-jitframe_pos - 2].getref_base() def store_float_value(self, curbh, i, jitframe_pos): + xxx if jitframe_pos >= 0: curbh.registers_f[i] = self.cpu.get_float_value( self.deadframe, jitframe_pos) @@ -182,7 +198,7 @@ AbstractResumeReader.__init__(self, metainterp.staticdata) def get_box_value(self, encoded_pos, TP): - if encoded_pos == -1: + if encoded_pos == CLEAR_POSITION: return None if encoded_pos in self.cache: return self.cache[encoded_pos] @@ -314,7 +330,7 @@ #finally: # rstack._stack_criticalcode_stop() cpu = metainterp_sd.cpu - last_bhinterp = DirectResumeReader(interpbuilder, cpu, + last_bhinterp = DirectResumeReader(metainterp_sd, interpbuilder, cpu, deadframe).rebuild(faildescr) return last_bhinterp diff --git a/rpython/jit/resume/reader.py b/rpython/jit/resume/reader.py --- a/rpython/jit/resume/reader.py +++ b/rpython/jit/resume/reader.py @@ -1,11 +1,9 @@ -import sys -from rpython.jit.metainterp.history import ConstInt from rpython.jit.resume import rescode class ResumeFrame(object): def __init__(self, jitcode): - self.registers = [-1] * jitcode.num_regs() + self.registers = [rescode.CLEAR_POSITION] * jitcode.num_regs() self.jitcode = jitcode self.pc = -1 @@ -113,6 +111,12 @@ descr = self.staticdata.opcode_descrs[self.read_short(pos + 5)] self.resume_setfield_gc(structpos, fieldpos, descr) pos += 7 + elif op == rescode.RESUME_CLEAR: + xxx + elif op == rescode.RESUME_SET_PC: + pc = self.read_short(pos + 1) + self.resume_set_pc(pc) + pos += 3 else: xxx self.bytecode = None diff --git a/rpython/jit/resume/rescode.py b/rpython/jit/resume/rescode.py --- a/rpython/jit/resume/rescode.py +++ b/rpython/jit/resume/rescode.py @@ -11,6 +11,8 @@ TAGOFFSET = 2 +CLEAR_POSITION = 0xffff + class ResumeBytecode(object): def __init__(self, opcodes, consts, parent=None, parent_position=-1, loop=None): From noreply at buildbot.pypy.org Wed Jan 22 18:23:21 2014 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 22 Jan 2014 18:23:21 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: finish this test Message-ID: <20140122172321.12BF21C3969@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68849:abb0362b055f Date: 2014-01-18 13:51 +0100 http://bitbucket.org/pypy/pypy/changeset/abb0362b055f/ Log: finish this test diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1873,7 +1873,6 @@ self.attach_debug_info(op) return resbox - def attach_debug_info(self, op): if (not we_are_translated() and op is not None and getattr(self, 'framestack', None)): diff --git a/rpython/jit/resume/frontend.py b/rpython/jit/resume/frontend.py --- a/rpython/jit/resume/frontend.py +++ b/rpython/jit/resume/frontend.py @@ -201,18 +201,20 @@ return self.consts[pos] else: assert tag == TAGVIRTUAL - virtual = self.virtual_list[pos] + virtual = self.virtuals[pos] virtual_box = self.allocate_struct(virtual) for fielddescr, encoded_field_pos in virtual.fields.iteritems(): - self.setfield(virtual, fielddescr, encoded_field_pos) + self.setfield_gc(virtual_box, encoded_field_pos, fielddescr) self.cache[encoded_pos] = virtual_box return virtual_box def allocate_struct(self, virtual): return self.metainterp.execute_and_record(rop.NEW, virtual.descr) - def setfield(self, virtual, fielddescr, encoded_field_pos): - xxx + def setfield_gc(self, box, encoded_field_pos, fielddescr): + field_box = self.get_box_value(encoded_field_pos, fielddescr.kind) + self.metainterp.execute_and_record(rop.SETFIELD_GC, fielddescr, + box, field_box) def store_int_box(self, res, pos, miframe, i, jitframe_pos): box = self.get_box_value(jitframe_pos, INT) @@ -227,7 +229,14 @@ if box is None: return miframe.registers_r[i] = box - res[-1][pos] = box + tag, index = self.decode(jitframe_pos) + if tag == TAGBOX: + res[-1][pos] = box + elif tag == TAGVIRTUAL: + self.metainterp.history.record(rop.RESUME_PUT, + [box, ConstInt(len(res) - 1), + ConstInt(pos)], None, None) + # we can't have virtual ints return xxx if jitframe_pos in self.cache: diff --git a/rpython/jit/resume/reader.py b/rpython/jit/resume/reader.py --- a/rpython/jit/resume/reader.py +++ b/rpython/jit/resume/reader.py @@ -25,9 +25,7 @@ def __init__(self, staticdata): self.framestack = [] self.staticdata = staticdata - self.consts = [] # XXX cache? - self.virtuals = {} - self.virtual_list = [] + self.virtuals = [] def rebuild(self, faildescr): self._rebuild_until(faildescr.rd_resume_bytecode, @@ -65,17 +63,17 @@ def encode(self, box): xxx - def resume_new(self, box, descr): - xxx - # XXX make it a list - v = Virtual(len(self.virtual_list), descr) - self.virtuals[box] = v - self.virtual_list.append(v) + def resume_new(self, v_pos, descr): + v = Virtual(v_pos, descr) + if v_pos >= len(self.virtuals): + self.virtuals += [None] * (len(self.virtuals) - v_pos + 1) + self.virtuals[v_pos] = v - def resume_setfield_gc(self, box, fieldbox, descr): + def resume_setfield_gc(self, pos, fieldpos, descr): # XXX optimize fields - xxx - self.virtuals[box].fields[descr] = self.encode(fieldbox) + tag, index = self.decode(pos) + assert tag == rescode.TAGVIRTUAL # for now + self.virtuals[index].fields[descr] = fieldpos def resume_clear(self, frame_no, frontend_position): xxx diff --git a/rpython/jit/resume/test/test_frontend.py b/rpython/jit/resume/test/test_frontend.py --- a/rpython/jit/resume/test/test_frontend.py +++ b/rpython/jit/resume/test/test_frontend.py @@ -5,7 +5,7 @@ ConstInt from rpython.jit.resume.frontend import rebuild_from_resumedata from rpython.jit.resume.rescode import ResumeBytecode, TAGBOX,\ - ResumeBytecodeBuilder, TAGCONST, TAGSMALLINT + ResumeBytecodeBuilder, TAGCONST, TAGSMALLINT, TAGVIRTUAL from rpython.jit.resume.reader import AbstractResumeReader from rpython.jit.resume.test.support import MockStaticData from rpython.jit.metainterp.resoperation import rop @@ -44,12 +44,20 @@ class AnyBox(object): def __eq__(self, other): return True - + +class EqConstInt(ConstInt): + def __eq__(self, other): + return self.same_box(other) + +class mylist(list): + def record(self, opnum, argboxes, resbox, descr): + self.append(tuple([opnum, descr] + argboxes)) + class MockMetaInterp(object): def __init__(self): self.cpu = MockCPU() self.framestack = [] - self.history = [] + self.history = mylist() def execute_and_record(self, *args): self.history.append(args) @@ -160,12 +168,12 @@ builder.enter_frame(-1, jitcode1) builder.resume_put(TAGBOX | (42 << 2), 0, 0) rd1 = builder.build() - lgt1 = len(rd1.opcodes) + lgt1 = len(rd1) builder = ResumeBytecodeBuilder() builder.resume_put(TAGBOX | (2 << 2), 0, 1) rd2 = builder.build() - lgt2 = len(rd2.opcodes) + lgt2 = len(rd2) descr = Descr() descr.rd_bytecode_position = lgt2 @@ -173,6 +181,7 @@ b = ResumeBytecode(rd2, [], parent, parent_position=lgt1) descr.rd_resume_bytecode = b metainterp = MockMetaInterp() + metainterp.staticdata = MockStaticData([jitcode1], []) metainterp.cpu = MockCPU() rebuild_from_resumedata(metainterp, "myframe", descr) f = metainterp.framestack[-1] @@ -182,23 +191,32 @@ def test_new(self): jitcode1 = JitCode("jitcode") + jitcode1.global_index = 0 jitcode1.setup(num_regs_i=0, num_regs_r=1, num_regs_f=0) - base = parse(""" - [] - enter_frame(-1, descr=jitcode) - p0 = resume_new() - resume_setfield_gc(p0, 13) - resume_put(p0, 0, 0) - leave_frame() - """, namespace={'jitcode':jitcode1}) + builder = ResumeBytecodeBuilder() descr = Descr() - descr.rd_resume_bytecode = ResumeBytecode(base.operations) - descr.rd_bytecode_position = 4 + descr.global_descr_index = 0 + builder.enter_frame(-1, jitcode1) + builder.resume_new(0, descr) + d2 = Descr() + d2.kind = INT + d2.global_descr_index = 1 + builder.resume_setfield_gc(TAGVIRTUAL | (0 << 2), + TAGSMALLINT | (1 << 2), d2) + builder.resume_put(TAGVIRTUAL | (0 << 2), 0, 0) + rd = builder.build() + descr = Descr() + descr.rd_resume_bytecode = ResumeBytecode(rd, []) + descr.rd_bytecode_position = len(rd) metainterp = MockMetaInterp() + metainterp.staticdata = MockStaticData([jitcode1], [descr, d2]) metainterp.cpu = MockCPU() rebuild_from_resumedata(metainterp, "myframe", descr) - assert metainterp.history == [(rop.NEW, None), - (rop.SETFIELD_GC, None, AnyBox())] + expected = [(rop.NEW, descr), + (rop.SETFIELD_GC, d2, AnyBox(), EqConstInt(1)), + (rop.RESUME_PUT, None, AnyBox(), EqConstInt(0), + EqConstInt(0))] + assert metainterp.history == expected def test_reconstructing_resume_reader(self): jitcode1 = JitCode("jitcode") From noreply at buildbot.pypy.org Wed Jan 22 18:23:27 2014 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 22 Jan 2014 18:23:27 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: Finish few bits and bobs to pass test_loop again Message-ID: <20140122172327.6183A1C3969@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68854:2b6d417a7f05 Date: 2014-01-18 15:00 +0100 http://bitbucket.org/pypy/pypy/changeset/2b6d417a7f05/ Log: Finish few bits and bobs to pass test_loop again diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -817,7 +817,7 @@ else: inline_short_preamble = True try: - optimize_trace(metainterp_sd, new_trace, state.enable_opts, inline_short_preamble) + optimize_trace(metainterp_sd, new_trace, state.enable_opts, inline_short_preamble, inpframes=new_trace.inputframes) except InvalidLoop: debug_print("compile_new_bridge: got an InvalidLoop") # XXX I am fairly convinced that optimize_bridge cannot actually raise diff --git a/rpython/jit/metainterp/optimizeopt/__init__.py b/rpython/jit/metainterp/optimizeopt/__init__.py --- a/rpython/jit/metainterp/optimizeopt/__init__.py +++ b/rpython/jit/metainterp/optimizeopt/__init__.py @@ -47,7 +47,8 @@ return optimizations, unroll -def optimize_trace(metainterp_sd, loop, enable_opts, inline_short_preamble=True): +def optimize_trace(metainterp_sd, loop, enable_opts, inline_short_preamble=True, + inpframes=None): """Optimize loop.operations to remove internal overheadish operations. """ from rpython.jit.resume.backend import flatten @@ -60,7 +61,8 @@ if unroll: optimize_unroll(metainterp_sd, loop, optimizations, inline_short_preamble) else: - optimizer = Optimizer(metainterp_sd, loop, optimizations) + optimizer = Optimizer(metainterp_sd, loop, optimizations, + inpframes=inpframes) optimizer.propagate_all_forward() finally: debug_stop("jit-optimize") diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -344,7 +344,8 @@ class Optimizer(Optimization): - def __init__(self, metainterp_sd, loop, optimizations=None): + def __init__(self, metainterp_sd, loop, optimizations=None, + inpframes=None): self.metainterp_sd = metainterp_sd self.cpu = metainterp_sd.cpu self.loop = loop @@ -367,7 +368,7 @@ self.call_pure_results = loop.call_pure_results self.set_optimizations(optimizations) - self.resumebuilder = OptResumeBuilder(self) + self.resumebuilder = OptResumeBuilder(self, inpframes) self.setup() def set_optimizations(self, optimizations): diff --git a/rpython/jit/resume/frontend.py b/rpython/jit/resume/frontend.py --- a/rpython/jit/resume/frontend.py +++ b/rpython/jit/resume/frontend.py @@ -155,9 +155,13 @@ if encoded_pos == CLEAR_POSITION: return tag, index = self.decode(encoded_pos) - if tag & TAGBOX: + if tag == TAGBOX: curbh.registers_i[i] = self.cpu.get_int_value(self.deadframe, index) - return + elif tag == TAGSMALLINT: + curbh.registers_i[i] = index + else: + xxx + return xxx if jitframe_pos >= 0: curbh.registers_i[i] = self.cpu.get_int_value( @@ -169,9 +173,13 @@ if encoded_pos == CLEAR_POSITION: return tag, index = self.decode(encoded_pos) - if tag & TAGBOX: + if tag == TAGBOX: curbh.registers_r[i] = self.cpu.get_ref_value(self.deadframe, index) - return + elif tag == TAGCONST: + curbh.registers_r[i] = self.consts[index].getref_base() + else: + xxx + return xxxx if jitframe_pos >= 0: curbh.registers_r[i] = self.cpu.get_ref_value( @@ -207,6 +215,9 @@ if TP == INT: val = self.metainterp.cpu.get_int_value(self.deadframe, pos) res = BoxInt(val) + elif TP == REF: + val = self.metainterp.cpu.get_ref_value(self.deadframe, pos) + res = BoxPtr(val) else: xxx self.cache[encoded_pos] = res diff --git a/rpython/jit/resume/optimizer.py b/rpython/jit/resume/optimizer.py --- a/rpython/jit/resume/optimizer.py +++ b/rpython/jit/resume/optimizer.py @@ -4,18 +4,25 @@ from rpython.jit.codewriter.jitcode import JitCode class ResumeFrame(object): - def __init__(self, pc, jitcode): + def __init__(self, pc, jitcode, no=-1): self.pc = pc - assert isinstance(jitcode, JitCode) - self.jitcode = jitcode - self.values = [None] * jitcode.num_regs() + if jitcode is None: + assert no >= 0 + self.values = [None] * no + else: + assert isinstance(jitcode, JitCode) + self.jitcode = jitcode + self.values = [None] * jitcode.num_regs() class OptResumeBuilder(object): - def __init__(self, opt): + def __init__(self, opt, inpframes=None): self.framestack = [] self.last_flushed_pos = 0 self.opt = opt self.virtuals = {} + if inpframes is not None: + for frame in inpframes: + self.framestack.append(ResumeFrame(0, None, len(frame))) def enter_frame(self, pc, jitcode): self.framestack.append(ResumeFrame(pc, jitcode)) diff --git a/rpython/jit/resume/reader.py b/rpython/jit/resume/reader.py --- a/rpython/jit/resume/reader.py +++ b/rpython/jit/resume/reader.py @@ -58,8 +58,7 @@ self.virtuals[index].fields[descr] = fieldpos def resume_clear(self, frame_no, frontend_position): - xxx - self.framestack[frame_no].registers[frontend_position] = -1 + self.framestack[frame_no].registers[frontend_position] = rescode.CLEAR_POSITION def resume_set_pc(self, pc): self.framestack[-1].pc = pc @@ -112,7 +111,10 @@ self.resume_setfield_gc(structpos, fieldpos, descr) pos += 7 elif op == rescode.RESUME_CLEAR: - xxx + frame_pos = self.read(pos + 1) + pos_in_frame = self.read(pos + 2) + self.resume_clear(frame_pos, pos_in_frame) + pos += 3 elif op == rescode.RESUME_SET_PC: pc = self.read_short(pos + 1) self.resume_set_pc(pc) @@ -140,11 +142,17 @@ def resume_new(self, v_pos, descr): self.l.append("%d = resume_new %d" % (v_pos, descr.global_descr_index)) + def leave_frame(self): + self.l.append("leave_frame") + def resume_setfield_gc(self, structpos, fieldpos, descr): stag, sindex = self.decode(structpos) ftag, findex = self.decode(fieldpos) self.l.append("resume_setfield_gc (%d, %d) (%d, %d) %d" % ( stag, sindex, ftag, findex, descr.global_descr_index)) + def resume_set_pc(self, pc): + self.l.append("set_resume_pc %d" % pc) + def finish(self): return "\n".join(self.l) diff --git a/rpython/jit/resume/rescode.py b/rpython/jit/resume/rescode.py --- a/rpython/jit/resume/rescode.py +++ b/rpython/jit/resume/rescode.py @@ -63,7 +63,8 @@ def encode_const(self, const): if isinstance(const, ConstInt) and 0 <= const.getint() < 0x4000: return TAGSMALLINT | (const.getint() << 2) - xxx + self.consts.append(const) + return TAGCONST | ((len(self.consts) - 1) << 2) def resume_set_pc(self, pc): self.write(RESUME_SET_PC) From noreply at buildbot.pypy.org Wed Jan 22 18:23:22 2014 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 22 Jan 2014 18:23:22 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: port one more test Message-ID: <20140122172322.5E12F1C3969@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68850:d14ebb76353f Date: 2014-01-18 13:57 +0100 http://bitbucket.org/pypy/pypy/changeset/d14ebb76353f/ Log: port one more test diff --git a/rpython/jit/resume/test/test_frontend.py b/rpython/jit/resume/test/test_frontend.py --- a/rpython/jit/resume/test/test_frontend.py +++ b/rpython/jit/resume/test/test_frontend.py @@ -220,24 +220,23 @@ def test_reconstructing_resume_reader(self): jitcode1 = JitCode("jitcode") + jitcode1.global_index = 0 jitcode1.setup(num_regs_i=2, num_regs_f=0, num_regs_r=0) jitcode2 = JitCode("jitcode2") + jitcode2.global_index = 1 jitcode2.setup(num_regs_i=1, num_regs_f=0, num_regs_r=0) - resume_loop = parse(""" - [] - enter_frame(-1, descr=jitcode1) - resume_put(11, 0, 1) - enter_frame(12, descr=jitcode2) - resume_put(12, 1, 0) - resume_put(8, 0, 0) - leave_frame() - leave_frame() - """, namespace={'jitcode1': jitcode1, - 'jitcode2': jitcode2}) + builder = ResumeBytecodeBuilder() + builder.enter_frame(-1, jitcode1) + builder.resume_put(TAGBOX | (11 << 2), 0, 1) + builder.enter_frame(12, jitcode2) + builder.resume_put(TAGBOX | (12 << 2), 1, 0) + builder.resume_put(TAGBOX | (8 << 2), 0, 0) descr = Descr() - descr.rd_resume_bytecode = ResumeBytecode(resume_loop.operations) - descr.rd_bytecode_position = 5 - locs = rebuild_locs_from_resumedata(descr) + rd = builder.build() + descr.rd_resume_bytecode = ResumeBytecode(rd, []) + descr.rd_bytecode_position = len(rd) + staticdata = MockStaticData([jitcode1, jitcode2], []) + locs = rebuild_locs_from_resumedata(descr, staticdata) assert locs == [[8, 11], [12]] class AssemblerExecuted(Exception): From noreply at buildbot.pypy.org Wed Jan 22 18:23:28 2014 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 22 Jan 2014 18:23:28 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: Refactor the world again - we don't try hard to store stuff on inputframes, Message-ID: <20140122172328.F3AA61C3969@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68855:8e03695f39ba Date: 2014-01-18 15:41 +0100 http://bitbucket.org/pypy/pypy/changeset/8e03695f39ba/ Log: Refactor the world again - we don't try hard to store stuff on inputframes, instead we reconstruct stuff when making a bridge diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -2,7 +2,7 @@ from rpython.jit.backend import model from rpython.jit.backend.llgraph import support from rpython.jit.resume.backend import ResumeBuilder,\ - LivenessAnalyzer, compute_vars_longevity, flatten + LivenessAnalyzer, compute_vars_longevity from rpython.jit.metainterp.history import AbstractDescr from rpython.jit.metainterp.history import Const, getkind from rpython.jit.metainterp.history import INT, REF, FLOAT, VOID @@ -30,12 +30,13 @@ self.start_pos = start_pos class LLGraphResumeBuilder(ResumeBuilder): - def __init__(self, frontend_liveness, descr, inputframes, inputlocs): + def __init__(self, frontend_liveness, descr, inputargs, inputlocs): self.liveness = LivenessAnalyzer() self.numbering = {} self.framestack = [] locs = None start_pos = 0 + xxx if inputlocs is not None: locs = [] for frame_pos, frame in enumerate(inputframes): @@ -101,7 +102,7 @@ has_been_freed = False invalid = False - def __init__(self, inputframes, operations, descr, locs=None): + def __init__(self, inputargs, operations, descr, locs=None): # We need to clone the list of operations because the # front-end will mutate them under our feet again. We also # need to make sure things get freed. @@ -114,13 +115,13 @@ newbox = _cache[box] = box.__class__() return newbox # - self.inputargs = map(mapping, flatten(inputframes)) + self.inputargs = map(mapping, inputargs) self.operations = [] - x = compute_vars_longevity(inputframes, operations, descr) + x = compute_vars_longevity(inputargs, operations, descr) longevity, last_real_usage, frontend_liveness = x resumebuilder = LLGraphResumeBuilder(frontend_liveness, descr, - inputframes, locs) + inputargs, locs) for op in operations: if op.is_resume(): resumebuilder.process(op) @@ -295,11 +296,11 @@ clt._llgraph_alltraces = [lltrace] self._record_labels(lltrace) - def compile_bridge(self, logger, faildescr, inputframes, locs, operations, + def compile_bridge(self, logger, faildescr, inputargs, locs, operations, original_loop_token, log=True): clt = original_loop_token.compiled_loop_token clt.compiling_a_bridge() - lltrace = LLTrace(inputframes, operations, faildescr, locs) + lltrace = LLTrace(inputargs, operations, faildescr, locs) faildescr._llgraph_bridge = lltrace clt._llgraph_alltraces.append(lltrace) self._record_labels(lltrace) diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -110,7 +110,7 @@ self._debug = v return r - def rebuild_faillocs_from_descr(self, descr, inputframes, loc_positions): + def rebuild_faillocs_from_descr(self, descr, inputargs, loc_positions): locs = [] GPR_REGS = len(self.cpu.gen_regs) XMM_REGS = len(self.cpu.float_regs) @@ -118,27 +118,23 @@ coeff = 1 else: coeff = 2 - all = {} - for i, frame in enumerate(inputframes): - inputlocs = loc_positions[i] - assert len(inputlocs) == len(frame) - for j, item in enumerate(frame): - if item is None or isinstance(item, Const) or item in all: - continue - all[item] = None - pos = inputlocs[j] - if pos < GPR_REGS: - locs.append(self.cpu.gen_regs[pos]) - elif pos < (GPR_REGS + XMM_REGS * coeff): - pos = (pos - GPR_REGS) // coeff - locs.append(self.cpu.float_regs[pos]) - else: - stack_pos = pos - self.cpu.JITFRAME_FIXED_SIZE - assert stack_pos >= 0 - tp = item.type - locs.append(self.new_stack_loc(stack_pos, - pos * WORD, tp)) - return locs[:] + assert len(inputargs) == len(loc_positions) + locs = [None] * len(inputargs) + for i in range(len(inputargs)): + pos = loc_positions[i] + item = inputargs[i] + if pos < GPR_REGS: + locs[i] = self.cpu.gen_regs[pos] + elif pos < (GPR_REGS + XMM_REGS * coeff): + pos = (pos - GPR_REGS) // coeff + locs[i] = self.cpu.float_regs[pos] + else: + stack_pos = pos - self.cpu.JITFRAME_FIXED_SIZE + assert stack_pos >= 0 + tp = item.type + locs[i] = self.new_stack_loc(stack_pos, + pos * WORD, tp) + return locs def store_info_on_descr(self, startspos, guardtok, resume_bytecode): withfloats = guardtok.has_floats diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -258,7 +258,7 @@ staticdata = MockStaticData([jitcode], []) locs = rebuild_locs_from_resumedata(faildescr1, staticdata) - self.cpu.compile_bridge(None, faildescr1, [[i1b]], locs, bridge, looptoken) + self.cpu.compile_bridge(None, faildescr1, [i1b], locs, bridge, looptoken) deadframe = self.cpu.execute_token(looptoken, 2) fail = self.cpu.get_latest_descr(deadframe) @@ -299,6 +299,10 @@ i3 = BoxInt() staticdata = MockStaticData([jitcode], []) bridge = [ + ResOperation(rop.ENTER_FRAME, [ConstInt(-1)], None, + descr=jitcode), + ResOperation(rop.RESUME_PUT, [i1b, ConstInt(0), ConstInt(1)], + None), ResOperation(rop.INT_LE, [i1b, ConstInt(19)], i3), ResOperation(rop.RESUME_PUT, [i3, ConstInt(0), ConstInt(2)], None), @@ -307,14 +311,14 @@ ] locs = rebuild_locs_from_resumedata(faildescr1, staticdata) - self.cpu.compile_bridge(None, faildescr1, [[None, i1b, None]], + self.cpu.compile_bridge(None, faildescr1, [i1b], locs, bridge, looptoken) deadframe = self.cpu.execute_token(looptoken, 2) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 2 locs = rebuild_locs_from_resumedata(fail, staticdata) - res = self.cpu.get_int_value(deadframe, locs[0][1]) + res = self.cpu.get_int_value(deadframe, locs[0]) assert res == 20 def test_compile_big_bridge_out_of_small_loop(self): @@ -337,6 +341,8 @@ staticdata.alljitcodes.append(jitcode1) i1list = [BoxInt() for i in range(150)] bridge = [ + ResOperation(rop.ENTER_FRAME, [ConstInt(-1)], None, descr=jitcode), + ResOperation(rop.RESUME_PUT, [i0, ConstInt(0), ConstInt(0)], None), ResOperation(rop.ENTER_FRAME, [ConstInt(13)], None, descr=jitcode1) ] iprev = i0 @@ -351,14 +357,14 @@ descr=BasicFinalDescr(4))) faillocs = rebuild_locs_from_resumedata(faildescr1, staticdata) - self.cpu.compile_bridge(None, faildescr1, [[i0]], faillocs, bridge, looptoken) + self.cpu.compile_bridge(None, faildescr1, [i0], faillocs, bridge, looptoken) deadframe = self.cpu.execute_token(looptoken, 1) fail = self.cpu.get_latest_descr(deadframe) locs = rebuild_locs_from_resumedata(fail, staticdata) assert fail.identifier == 3 for i in range(len(i1list)): - res = self.cpu.get_int_value(deadframe, locs[1][i]) + res = self.cpu.get_int_value(deadframe, locs[i + 1]) assert res == 2 + i def test_finish(self): @@ -455,8 +461,8 @@ deadframe = self.cpu.execute_token(looptoken, 0, 10) fail = self.cpu.get_latest_descr(deadframe) locs = rebuild_locs_from_resumedata(fail, staticdata) - assert self.cpu.get_int_value(deadframe, locs[0][0]) == 0 - assert self.cpu.get_int_value(deadframe, locs[0][1]) == 55 + assert self.cpu.get_int_value(deadframe, locs[0]) == 0 + assert self.cpu.get_int_value(deadframe, locs[1]) == 55 def test_int_operations(self): from rpython.jit.metainterp.test.test_executor import get_int_tests @@ -526,7 +532,7 @@ fail = self.cpu.get_latest_descr(deadframe) if (z == boom) ^ reversed: locs = rebuild_locs_from_resumedata(fail, staticdata) - pos = locs[0][0] + pos = locs[0] assert fail.identifier == 1 else: pos = 0 @@ -1303,10 +1309,10 @@ assert fail.identifier == 42 # for k in range(intboxes): - got = self.cpu.get_int_value(deadframe, locs[0][k]) + got = self.cpu.get_int_value(deadframe, locs[k]) assert got == expvalues[k] for k in range(floatboxes): - got = self.cpu.get_float_value(deadframe, locs[0][k + intboxes]) + got = self.cpu.get_float_value(deadframe, locs[k + intboxes]) assert got == expvalues[k + intboxes] def test_jump(self): @@ -1433,13 +1439,13 @@ else: refvals.append(val) for i, val in enumerate(intvals): - got = self.cpu.get_int_value(deadframe, locs[0][i]) + got = self.cpu.get_int_value(deadframe, locs[i]) assert got == val for i, val in enumerate(refvals): - got = self.cpu.get_ref_value(deadframe, locs[0][i + len(intvals)]) + got = self.cpu.get_ref_value(deadframe, locs[i + len(intvals)]) assert got == val for i, val in enumerate(floatvals): - got = self.cpu.get_float_value(deadframe, locs[0][ + got = self.cpu.get_float_value(deadframe, locs[ i + len(intvals) + len(refvals)]) assert got == val @@ -1478,7 +1484,7 @@ ResOperation(rop.JUMP, [f3]+fboxes2[1:], None, descr=targettoken), ] - self.cpu.compile_bridge(None, faildescr1, [fboxes2], + self.cpu.compile_bridge(None, faildescr1, fboxes2, rebuild_locs_from_resumedata(faildescr1, staticdata), bridge, looptoken) @@ -1490,11 +1496,11 @@ fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 2 locs = rebuild_locs_from_resumedata(fail, staticdata) - res = self.cpu.get_float_value(deadframe, locs[0][0]) + res = self.cpu.get_float_value(deadframe, locs[0]) assert longlong.getrealfloat(res) == 8.5 for i in range(1, len(fboxes)): got = longlong.getrealfloat(self.cpu.get_float_value( - deadframe, locs[0][i])) + deadframe, locs[i])) assert got == 13.5 + 6.73 * i def test_compile_bridge_spilled_float(self): @@ -1526,9 +1532,9 @@ fail = self.cpu.get_latest_descr(deadframe) assert loop.operations[-3].getdescr() is fail is faildescr1 locs = rebuild_locs_from_resumedata(fail, staticdata) - f1 = self.cpu.get_float_value(deadframe, locs[0][0]) - f2 = self.cpu.get_float_value(deadframe, locs[0][1]) - f3 = self.cpu.get_float_value(deadframe, locs[0][2]) + f1 = self.cpu.get_float_value(deadframe, locs[0]) + f2 = self.cpu.get_float_value(deadframe, locs[1]) + f3 = self.cpu.get_float_value(deadframe, locs[2]) assert longlong.getrealfloat(f1) == 132.25 assert longlong.getrealfloat(f2) == 0.75 assert longlong.getrealfloat(f3) == 133.0 @@ -1537,12 +1543,19 @@ faildescr3 = BasicFailDescr(103) zero = BoxInt() bridgeops = [ + ResOperation(rop.ENTER_FRAME, [ConstInt(-1)], None, descr=jitcode), + ResOperation(rop.RESUME_PUT, [fboxes[0], ConstInt(0), ConstInt(0)], + None), + ResOperation(rop.RESUME_PUT, [fboxes[1], ConstInt(0), ConstInt(1)], + None), + ResOperation(rop.RESUME_PUT, [fboxes[2], ConstInt(0), ConstInt(2)], + None), ResOperation(rop.SAME_AS, [ConstInt(0)], zero), ResOperation(rop.GUARD_TRUE, [zero], None, descr=faildescr3), ResOperation(rop.LEAVE_FRAME, [], None), ResOperation(rop.FINISH, [], None, descr=faildescr2), ] - self.cpu.compile_bridge(None, faildescr1, [fboxes], + self.cpu.compile_bridge(None, faildescr1, fboxes, locs, bridgeops, looptoken) args = [1, longlong.getfloatstorage(132.25), @@ -1551,9 +1564,9 @@ fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 103 locs = rebuild_locs_from_resumedata(fail, staticdata) - f1 = self.cpu.get_float_value(deadframe, locs[0][0]) - f2 = self.cpu.get_float_value(deadframe, locs[0][1]) - f3 = self.cpu.get_float_value(deadframe, locs[0][2]) + f1 = self.cpu.get_float_value(deadframe, locs[0]) + f2 = self.cpu.get_float_value(deadframe, locs[1]) + f3 = self.cpu.get_float_value(deadframe, locs[2]) assert longlong.getrealfloat(f1) == 132.25 assert longlong.getrealfloat(f2) == 0.75 assert longlong.getrealfloat(f3) == 133.0 @@ -2187,7 +2200,7 @@ assert not excvalue deadframe = self.cpu.execute_token(looptoken, 0) locs = rebuild_locs_from_resumedata(faildescr, staticdata) - assert self.cpu.get_int_value(deadframe, locs[0][0]) == 1 + assert self.cpu.get_int_value(deadframe, locs[0]) == 1 excvalue = self.cpu.grab_exc_value(deadframe) assert not excvalue @@ -2206,7 +2219,7 @@ looptoken = JitCellToken() self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 1) - assert self.cpu.get_int_value(deadframe, locs[0][0]) == 1 + assert self.cpu.get_int_value(deadframe, locs[0]) == 1 excvalue = self.cpu.grab_exc_value(deadframe) assert excvalue == yptr @@ -2227,11 +2240,11 @@ self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) deadframe = self.cpu.execute_token(looptoken, 1) locs = rebuild_locs_from_resumedata(faildescr, staticdata) - assert self.cpu.get_int_value(deadframe, locs[0][0]) == 1 + assert self.cpu.get_int_value(deadframe, locs[0]) == 1 excvalue = self.cpu.grab_exc_value(deadframe) assert excvalue == xptr deadframe = self.cpu.execute_token(looptoken, 0) - assert self.cpu.get_int_value(deadframe, locs[0][0]) == 0 + assert self.cpu.get_int_value(deadframe, locs[0]) == 0 excvalue = self.cpu.grab_exc_value(deadframe) assert not excvalue @@ -2427,15 +2440,15 @@ assert not called locs = rebuild_locs_from_resumedata(faildescr, staticdata) for j in range(5): - assert self.cpu.get_int_value(frame, locs[0][j]) == j - assert longlong.getrealfloat(self.cpu.get_float_value(frame, locs[0][6])) == 1.2 - assert longlong.getrealfloat(self.cpu.get_float_value(frame, locs[0][7])) == 3.4 + assert self.cpu.get_int_value(frame, locs[j]) == j + assert longlong.getrealfloat(self.cpu.get_float_value(frame, locs[6])) == 1.2 + assert longlong.getrealfloat(self.cpu.get_float_value(frame, locs[7])) == 3.4 frame = self.cpu.execute_token(looptoken, 1, 1, 1, 2, 3, 4, 5, f1, f2) assert called == [tuple(range(1, i + 1))] for j in range(4): - assert self.cpu.get_int_value(frame, locs[0][j + 1]) == j + 1 - assert longlong.getrealfloat(self.cpu.get_float_value(frame, locs[0][6])) == 1.2 - assert longlong.getrealfloat(self.cpu.get_float_value(frame, locs[0][7])) == 3.4 + assert self.cpu.get_int_value(frame, locs[j + 1]) == j + 1 + assert longlong.getrealfloat(self.cpu.get_float_value(frame, locs[6])) == 1.2 + assert longlong.getrealfloat(self.cpu.get_float_value(frame, locs[7])) == 3.4 def test_force_operations_returning_void(self): values = [] @@ -2445,8 +2458,8 @@ fail = self.cpu.get_latest_descr(deadframe) locs = rebuild_locs_from_resumedata(fail, staticdata) values.append(fail) - values.append(self.cpu.get_int_value(deadframe, locs[0][0])) - values.append(self.cpu.get_int_value(deadframe, locs[0][1])) + values.append(self.cpu.get_int_value(deadframe, locs[0])) + values.append(self.cpu.get_int_value(deadframe, locs[1])) self.cpu.set_savedata_ref(deadframe, random_gcref) FUNC = self.FuncType([llmemory.GCREF, lltype.Signed], lltype.Void) @@ -2482,8 +2495,8 @@ fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 1 locs = rebuild_locs_from_resumedata(fail, staticdata) - assert self.cpu.get_int_value(deadframe, locs[0][0]) == 1 - assert self.cpu.get_int_value(deadframe, locs[0][1]) == 10 + assert self.cpu.get_int_value(deadframe, locs[0]) == 1 + assert self.cpu.get_int_value(deadframe, locs[1]) == 10 assert values == [faildescr, 1, 10] assert self.cpu.get_savedata_ref(deadframe) # not NULL assert self.cpu.get_savedata_ref(deadframe) == random_gcref @@ -2495,8 +2508,8 @@ deadframe = self.cpu.force(token) fail = self.cpu.get_latest_descr(deadframe) locs = rebuild_locs_from_resumedata(fail, staticdata) - values.append(self.cpu.get_int_value(deadframe, locs[0][0])) - values.append(self.cpu.get_int_value(deadframe, locs[0][2])) + values.append(self.cpu.get_int_value(deadframe, locs[0])) + values.append(self.cpu.get_int_value(deadframe, locs[2])) self.cpu.set_savedata_ref(deadframe, random_gcref) return 42 @@ -2535,9 +2548,9 @@ fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 1 locs = rebuild_locs_from_resumedata(fail, staticdata) - assert self.cpu.get_int_value(deadframe, locs[0][0]) == 1 - assert self.cpu.get_int_value(deadframe, locs[0][1]) == 42 - assert self.cpu.get_int_value(deadframe, locs[0][2]) == 10 + assert self.cpu.get_int_value(deadframe, locs[0]) == 1 + assert self.cpu.get_int_value(deadframe, locs[1]) == 42 + assert self.cpu.get_int_value(deadframe, locs[2]) == 10 assert values == [1, 10] assert self.cpu.get_savedata_ref(deadframe) == random_gcref @@ -2550,8 +2563,8 @@ deadframe = self.cpu.force(token) fail = self.cpu.get_latest_descr(deadframe) locs = rebuild_locs_from_resumedata(fail, staticdata) - values.append(self.cpu.get_int_value(deadframe, locs[0][0])) - values.append(self.cpu.get_int_value(deadframe, locs[0][1])) + values.append(self.cpu.get_int_value(deadframe, locs[0])) + values.append(self.cpu.get_int_value(deadframe, locs[1])) self.cpu.set_savedata_ref(deadframe, random_gcref) return 42.5 @@ -2591,10 +2604,10 @@ fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 1 locs = rebuild_locs_from_resumedata(fail, staticdata) - assert self.cpu.get_int_value(deadframe, locs[0][0]) == 1 - x = self.cpu.get_float_value(deadframe, locs[0][2]) + assert self.cpu.get_int_value(deadframe, locs[0]) == 1 + x = self.cpu.get_float_value(deadframe, locs[2]) assert longlong.getrealfloat(x) == 42.5 - assert self.cpu.get_int_value(deadframe, locs[0][1]) == 10 + assert self.cpu.get_int_value(deadframe, locs[1]) == 10 assert values == [1, 10] assert self.cpu.get_savedata_ref(deadframe) == random_gcref @@ -2987,7 +3000,7 @@ fail = self.cpu.get_latest_descr(deadframe) locs = rebuild_locs_from_resumedata(fail, staticdata) assert fail is faildescr - assert self.cpu.get_int_value(deadframe, locs[0][0]) == 9 + assert self.cpu.get_int_value(deadframe, locs[0]) == 9 print 'step 2 ok' print '-'*79 @@ -2998,7 +3011,7 @@ ResOperation(rop.GUARD_NOT_INVALIDATED, [],None, descr=faildescr2), ResOperation(rop.FINISH, [i2], None, descr=BasicFinalDescr(3)) ] - self.cpu.compile_bridge(None, faildescr, [[i2]], locs, ops, looptoken) + self.cpu.compile_bridge(None, faildescr, [i2], locs, ops, looptoken) deadframe = self.cpu.execute_token(looptoken, -42, 9) fail = self.cpu.get_latest_descr(deadframe) @@ -3038,7 +3051,7 @@ ops2 = [ ResOperation(rop.JUMP, [ConstInt(333)], None, descr=labeldescr), ] - self.cpu.compile_bridge(None, faildescr, [[]], [[]], ops2, looptoken) + self.cpu.compile_bridge(None, faildescr, [], [], ops2, looptoken) # run: must not be caught in an infinite loop deadframe = self.cpu.execute_token(looptoken, 16) fail = self.cpu.get_latest_descr(deadframe) @@ -3908,7 +3921,7 @@ fail = self.cpu.get_latest_descr(deadframe) locs = rebuild_locs_from_resumedata(fail, staticdata) assert fail.identifier == 2 - res = self.cpu.get_int_value(deadframe, locs[0][0]) + res = self.cpu.get_int_value(deadframe, locs[0]) assert res == 10 inputargs2 = [i0] @@ -3916,13 +3929,13 @@ ResOperation(rop.INT_SUB, [i0, ConstInt(20)], i2), ResOperation(rop.JUMP, [i2], None, descr=targettoken2), ] - self.cpu.compile_bridge(None, faildescr, [inputargs2], locs, operations2, looptoken) + self.cpu.compile_bridge(None, faildescr, inputargs2, locs, operations2, looptoken) deadframe = self.cpu.execute_token(looptoken, 2) fail = self.cpu.get_latest_descr(deadframe) locs = rebuild_locs_from_resumedata(fail, staticdata) assert fail.identifier == 3 - res = self.cpu.get_int_value(deadframe, locs[0][0]) + res = self.cpu.get_int_value(deadframe, locs[0]) assert res == -10 def test_int_force_ge_zero(self): @@ -3968,7 +3981,7 @@ self.cpu.assembler.set_debug(False) info = self.cpu.compile_loop(None, loop.inputargs, loop.operations, looptoken) locs = rebuild_locs_from_resumedata(faildescr, staticdata) - bridge_info = self.cpu.compile_bridge(None, faildescr, [bridge.inputargs], + bridge_info = self.cpu.compile_bridge(None, faildescr, bridge.inputargs, locs, bridge.operations, looptoken) self.cpu.assembler.set_debug(True) # always on untranslated @@ -4069,7 +4082,7 @@ ResOperation(rop.JUMP, [i19], None, descr=targettoken1), ] locs = rebuild_locs_from_resumedata(faildescr1, staticdata) - self.cpu.compile_bridge(None, faildescr1, [inputargs], locs, operations2, looptoken1) + self.cpu.compile_bridge(None, faildescr1, inputargs, locs, operations2, looptoken1) looptoken2 = JitCellToken() inputargs = [BoxInt()] @@ -4096,7 +4109,7 @@ operations = [ ResOperation(rop.FINISH, [], None, descr=BasicFinalDescr(99)) ] - self.cpu.compile_bridge(None, faildescr, [[]], [[]], operations, looptoken) + self.cpu.compile_bridge(None, faildescr, [], [], operations, looptoken) deadframe = self.cpu.execute_token(looptoken, null_box.getref_base()) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 99 @@ -4297,7 +4310,7 @@ deadframe = self.cpu.force(token) fail = self.cpu.get_latest_descr(deadframe) locs = rebuild_locs_from_resumedata(fail, staticdata) - values.append(self.cpu.get_int_value(deadframe, locs[0][0])) + values.append(self.cpu.get_int_value(deadframe, locs[0])) return 42 FUNC = self.FuncType([llmemory.GCREF, lltype.Signed], lltype.Signed) @@ -4326,7 +4339,7 @@ fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 23 locs = rebuild_locs_from_resumedata(fail, staticdata) - assert self.cpu.get_int_value(deadframe, locs[0][0]) == 42 + assert self.cpu.get_int_value(deadframe, locs[0]) == 42 # make sure that force reads the registers from a zeroed piece of # memory assert values[0] == 0 @@ -4341,6 +4354,10 @@ bridge = parse(""" [i1, i2, px] + enter_frame(-1, descr=jitcode) + resume_put(i1, 0, 0) + resume_put(i2, 0, 1) + resume_put(px, 0, 2) i3 = int_add(i1, i2) i4 = int_add(i1, i3) i5 = int_add(i1, i4) @@ -4372,9 +4389,9 @@ finish(i1, descr=finaldescr) """, namespace={'finaldescr': finaldescr, 'calldescr2': calldescr2, 'guarddescr': guarddescr, 'func2_ptr': func2_ptr, - 'jitcode2': jitcode2}) + 'jitcode2': jitcode2, 'jitcode': jitcode}) locs = rebuild_locs_from_resumedata(faildescr, staticdata) - self.cpu.compile_bridge(None, faildescr, [bridge.inputargs], locs, + self.cpu.compile_bridge(None, faildescr, bridge.inputargs, locs, bridge.operations, looptoken) cpu = self.cpu @@ -4424,7 +4441,7 @@ frame = lltype.cast_opaque_ptr(jitframe.JITFRAMEPTR, frame) assert len(frame.jf_frame) == frame.jf_frame_info.jfi_frame_depth locs = rebuild_locs_from_resumedata(guarddescr, staticdata) - ref = self.cpu.get_ref_value(frame, locs[0][2]) + ref = self.cpu.get_ref_value(frame, locs[2]) token = lltype.cast_opaque_ptr(jitframe.JITFRAMEPTR, ref) assert token != frame token = token.resolve() @@ -4440,6 +4457,9 @@ def raising(): bridge = parse(""" [i1, i2] + enter_frame(-1, descr=jitcode) + resume_put(i1, 0, 0) + resume_put(i2, 0, 1) enter_frame(1, descr=jitcode2) px = guard_exception(ConstClass(xtp), descr=faildescr2) i3 = int_add(i1, i2) @@ -4471,9 +4491,10 @@ """, namespace={'finaldescr': BasicFinalDescr(42), 'faildescr2': BasicFailDescr(1), 'xtp': xtp, 'jitcode2': jitcode2, + 'jitcode': jitcode, }) locs = rebuild_locs_from_resumedata(faildescr, staticdata) - self.cpu.compile_bridge(None, faildescr, [bridge.inputargs], locs, + self.cpu.compile_bridge(None, faildescr, bridge.inputargs, locs, bridge.operations, looptoken) raise LLException(xtp, xptr) diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -5,7 +5,6 @@ from rpython.jit.backend.llsupport.assembler import (GuardToken, BaseAssembler, debug_bridge) from rpython.jit.backend.llsupport.asmmemmgr import MachineDataBlockWrapper -from rpython.jit.resume.backend import flatten from rpython.jit.metainterp.history import Const, Box, VOID from rpython.jit.metainterp.history import AbstractFailDescr, INT, REF, FLOAT from rpython.rtyper.lltypesystem import lltype, rffi, rstr, llmemory @@ -475,7 +474,7 @@ self.update_frame_depth(frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE) # size_excluding_failure_stuff = self.mc.get_relative_pos() - self.resume_bytecode = regalloc.resumebuilder.finish(None, 0, looptoken) + self.resume_bytecode = regalloc.resumebuilder.finish(looptoken) self.write_pending_failure_recoveries() full_size = self.mc.get_relative_pos() # @@ -513,7 +512,7 @@ return AsmInfo(ops_offset, rawstart + looppos, size_excluding_failure_stuff - looppos) - def assemble_bridge(self, logger, faildescr, inputframes, backend_positions, + def assemble_bridge(self, logger, faildescr, inputargs, backend_positions, operations, original_loop_token, log): self.setup(original_loop_token) @@ -522,12 +521,11 @@ operations = self._inject_debugging_code(faildescr, operations, 'b', descr_number) - arglocs = self.rebuild_faillocs_from_descr(faildescr, inputframes, + arglocs = self.rebuild_faillocs_from_descr(faildescr, inputargs, backend_positions) - inputargs = flatten(inputframes) regalloc = RegAlloc(self, self.cpu.translate_support_code) startpos = self.mc.get_relative_pos() - operations = regalloc.prepare_bridge(inputframes, arglocs, + operations = regalloc.prepare_bridge(inputargs, arglocs, operations, self.current_clt.allgcrefs, self.current_clt.frame_info, @@ -536,7 +534,7 @@ frame_depth_no_fixed_size = self._assemble(regalloc, inputargs, operations) codeendpos = self.mc.get_relative_pos() self.resume_bytecode = regalloc.resumebuilder.finish( - faildescr.rd_resume_bytecode, faildescr.rd_bytecode_position, original_loop_token) + original_loop_token) self.write_pending_failure_recoveries() fullsize = self.mc.get_relative_pos() # diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -8,7 +8,7 @@ unpack_arraydescr, unpack_fielddescr, unpack_interiorfielddescr) from rpython.jit.backend.llsupport.gcmap import allocate_gcmap from rpython.jit.resume.backend import ResumeBuilder,\ - compute_vars_longevity, flatten + compute_vars_longevity from rpython.jit.backend.llsupport.regalloc import (FrameManager, BaseRegalloc, RegisterManager, TempBox, is_comparison_or_ovf_op) from rpython.jit.backend.x86 import rx86 @@ -133,17 +133,16 @@ self.jump_target_descr = None self.final_jump_op = None - def _prepare(self, inputframes, operations, allgcrefs, descr=None, + def _prepare(self, inputargs, operations, allgcrefs, descr=None, locs=None): cpu = self.assembler.cpu self.fm = X86FrameManager(cpu.get_baseofs_of_frame_field()) operations = cpu.gc_ll_descr.rewrite_assembler(cpu, operations, allgcrefs) # compute longevity of variables - x = compute_vars_longevity(inputframes, operations, descr) + x = compute_vars_longevity(inputargs, operations, descr) longevity, last_real_usage, frontend_liveness = x - self.resumebuilder = ResumeBuilder(self, frontend_liveness, descr, - inputframes, locs) + self.resumebuilder = ResumeBuilder(self, frontend_liveness, descr) self.longevity = longevity self.last_real_usage = last_real_usage self.rm = gpr_reg_mgr_cls(self.longevity, @@ -154,7 +153,7 @@ return operations def prepare_loop(self, inputargs, operations, looptoken, allgcrefs): - operations = self._prepare([inputargs], operations, allgcrefs) + operations = self._prepare(inputargs, operations, allgcrefs) self._set_initial_bindings(inputargs, looptoken) # note: we need to make a copy of inputargs because possibly_free_vars # is also used on op args, which is a non-resizable list @@ -165,11 +164,11 @@ self.min_bytes_before_label = 13 return operations - def prepare_bridge(self, inputframes, arglocs, operations, allgcrefs, + def prepare_bridge(self, inputargs, arglocs, operations, allgcrefs, frame_info, descr): - operations = self._prepare(inputframes, operations, allgcrefs, descr, + operations = self._prepare(inputargs, operations, allgcrefs, descr, locs=arglocs) - self._update_bindings(arglocs, inputframes) + self._update_bindings(arglocs, inputargs) self.min_bytes_before_label = 0 return operations @@ -233,11 +232,10 @@ else: return self.xrm.make_sure_var_in_reg(var, forbidden_vars) - def _update_bindings(self, locs, inputframes): + def _update_bindings(self, locs, inputargs): # XXX this should probably go to llsupport/regalloc.py used = {} i = 0 - inputargs = flatten(inputframes) assert len(inputargs) == len(locs) for loc in locs: if loc is None: # xxx bit kludgy diff --git a/rpython/jit/backend/x86/test/test_runner.py b/rpython/jit/backend/x86/test/test_runner.py --- a/rpython/jit/backend/x86/test/test_runner.py +++ b/rpython/jit/backend/x86/test/test_runner.py @@ -301,7 +301,7 @@ pos = 0 else: locs = rebuild_locs_from_resumedata(descr, staticdata) - pos = locs[0][0] + pos = locs[0] result = self.cpu.get_int_value(deadframe, pos) if guard == rop.GUARD_FALSE: assert result == execute(self.cpu, None, @@ -359,7 +359,7 @@ pos = 0 else: locs = rebuild_locs_from_resumedata(descr, staticdata) - pos = locs[0][0] + pos = locs[0] result = self.cpu.get_int_value(deadframe, pos) expected = execute(self.cpu, None, op, None, a, b).value if guard == rop.GUARD_FALSE: diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -15,7 +15,7 @@ from rpython.jit.metainterp.optimize import InvalidLoop from rpython.jit.metainterp.inliner import Inliner from rpython.jit.codewriter import heaptracker, longlong -from rpython.jit.resume.backend import flatten + def giveup(): from rpython.jit.metainterp.pyjitpl import SwitchToBlackhole @@ -300,13 +300,13 @@ inputargs, operations, looptoken, log=log, name=name) -def do_compile_bridge(metainterp_sd, faildescr, inputframes, +def do_compile_bridge(metainterp_sd, faildescr, inputargs, inputlocs, operations, original_loop_token, log=True): - metainterp_sd.logger_ops.log_bridge(flatten(inputframes), operations, + metainterp_sd.logger_ops.log_bridge(inputargs, operations, "compiling") assert isinstance(faildescr, AbstractFailDescr) return metainterp_sd.cpu.compile_bridge(metainterp_sd.logger_ops, - faildescr, inputframes, + faildescr, inputargs, inputlocs, operations, original_loop_token, log=log) @@ -364,11 +364,11 @@ if metainterp_sd.warmrunnerdesc is not None: # for tests metainterp_sd.warmrunnerdesc.memory_manager.keep_loop_alive(original_jitcell_token) -def send_bridge_to_backend(jitdriver_sd, metainterp_sd, faildescr, inputframes, +def send_bridge_to_backend(jitdriver_sd, metainterp_sd, faildescr, inputargs, inputlocs, operations, original_loop_token): if not we_are_translated(): show_procedures(metainterp_sd) - seen = dict.fromkeys(flatten(inputframes)) + seen = dict.fromkeys(inputargs) TreeLoop.check_consistency_of_branch(operations, seen) if metainterp_sd.warmrunnerdesc is not None: hooks = metainterp_sd.warmrunnerdesc.hooks @@ -400,7 +400,7 @@ ops_offset = asminfo.ops_offset else: ops_offset = None - metainterp_sd.logger_ops.log_bridge(flatten(inputframes), operations, + metainterp_sd.logger_ops.log_bridge(inputargs, operations, None, faildescr, ops_offset) # #if metainterp_sd.warmrunnerdesc is not None: # for tests diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -615,7 +615,6 @@ return 'TargetToken(%d)' % compute_unique_id(self) class TreeLoop(object): - inputframes = None inputargs = None inputlocs = None operations = None @@ -657,7 +656,7 @@ return self.operations def get_display_text(self): # for graphpage.py - return self.name + '\n' + repr(self.inputframes) + return self.name + '\n' + repr(self.inputargs) def show(self, errmsg=None): "NOT_RPYTHON" @@ -666,7 +665,7 @@ def check_consistency(self): # for testing "NOT_RPYTHON" - self.check_consistency_of(self.inputframes, self.operations) + self.check_consistency_of(self.inputargs, self.operations) for op in self.operations: descr = op.getdescr() if op.getopnum() == rop.LABEL and isinstance(descr, TargetToken): @@ -719,7 +718,7 @@ def dump(self): # RPython-friendly - print '%r: inputargs =' % self, self._dump_args(self.inputframes) + print '%r: inputargs =' % self, self._dump_args(self.inputargs) for op in self.operations: args = op.getarglist() print '\t', op.getopname(), self._dump_args(args), \ @@ -753,7 +752,7 @@ class History(object): def __init__(self): - self.inputframes = None + self.inputargs = None self.inputlocs = None self.operations = [] diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1978,7 +1978,7 @@ num_green_args = self.jitdriver_sd.num_green_args original_greenkey = original_boxes[:num_green_args] self.resumekey = compile.ResumeFromInterpDescr(original_greenkey) - self.history.inputframes = [original_boxes[num_green_args:]] + self.history.inputargs = original_boxes[num_green_args:][:] self.seen_loop_header_for_jdindex = -1 try: self.interpret() @@ -2121,7 +2121,7 @@ return ints[:], refs[:], floats[:] def raise_continue_running_normally(self, live_arg_boxes, loop_token): - self.history.inputframes = None + self.history.inputargs = None self.history.inputlocs = None self.history.operations = None # For simplicity, we just raise ContinueRunningNormally here and @@ -2359,7 +2359,7 @@ self.portal_call_depth = -1 # always one portal around self.history = history.History() state = self.rebuild_state_after_failure(resumedescr, deadframe) - self.history.inputframes, self.history.inputlocs = state + self.history.inputargs, self.history.inputlocs = state self.resumerecorder = ResumeRecorder(self, True) finally: rstack._stack_criticalcode_stop() diff --git a/rpython/jit/resume/backend.py b/rpython/jit/resume/backend.py --- a/rpython/jit/resume/backend.py +++ b/rpython/jit/resume/backend.py @@ -7,15 +7,11 @@ class LivenessAnalyzer(object): - def __init__(self, inputframes=None): + def __init__(self): self.liveness = {} self.frame_starts = [0] self.framestack = [] self.deps = {} - if inputframes is not None: - for frame in inputframes: - self.frame_starts.append(self.frame_starts[-1] + len(frame)) - self.framestack.append(frame[:]) def enter_frame(self, pc, jitcode): self.frame_starts.append(self.frame_starts[-1] + jitcode.num_regs()) @@ -93,28 +89,13 @@ raise Exception("should not be called") class ResumeBuilder(object): - def __init__(self, regalloc, frontend_liveness, descr, inputframes=None, - inputlocs=None): + def __init__(self, regalloc, frontend_liveness, descr): self.regalloc = regalloc self.current_attachment = {} self.frontend_liveness = frontend_liveness self.frontend_pos = {} self.virtuals = {} self.builder = ResumeBytecodeBuilder() - if inputlocs is not None: - i = 0 - all = {} - for frame_pos, frame in enumerate(inputframes): - for pos_in_frame, box in enumerate(frame): - if box is None or isinstance(box, Const) or box in all: - loc_pos = -1 - else: - loc_pos = inputlocs[i].get_jitframe_position() - i += 1 - self.frontend_pos[box] = (frame_pos, pos_in_frame) - all[box] = None - if box not in self.current_attachment: - self.current_attachment[box] = loc_pos def get_box_pos(self, box): if box in self.virtuals: @@ -189,34 +170,11 @@ self._mark_visited(v, loc) return self.builder.getpos() - def finish(self, parent, parent_position, clt): - return ResumeBytecode(self.builder.build(), self.builder.consts, - parent, parent_position, - clt) + def finish(self, clt): + return ResumeBytecode(self.builder.build(), self.builder.consts, clt) -def flatten(inputframes): - count = 0 - all = {} - for frame in inputframes: - for x in frame: - if x is not None and x not in all: - assert not isinstance(x, Const) - count += 1 - all[x] = None - inputargs = [None] * count - pos = 0 - all = {} - for frame in inputframes: - for item in frame: - if item is not None and item not in all: - inputargs[pos] = item - all[item] = None - pos += 1 - return inputargs - - -def compute_vars_longevity(inputframes, operations, descr=None): +def compute_vars_longevity(inputargs, operations, descr=None): # compute a dictionary that maps variables to index in # operations that is a "last-time-seen" @@ -228,12 +186,7 @@ last_used = {} last_real_usage = {} frontend_alive = {} - if descr is None: - inputargs = inputframes[0] - liveness_analyzer = LivenessAnalyzer() - else: - inputargs = flatten(inputframes) - liveness_analyzer = LivenessAnalyzer(inputframes) + liveness_analyzer = LivenessAnalyzer() start_pos = 0 for position, op in enumerate(operations): if op.is_guard(): diff --git a/rpython/jit/resume/optimizer.py b/rpython/jit/resume/optimizer.py --- a/rpython/jit/resume/optimizer.py +++ b/rpython/jit/resume/optimizer.py @@ -6,23 +6,16 @@ class ResumeFrame(object): def __init__(self, pc, jitcode, no=-1): self.pc = pc - if jitcode is None: - assert no >= 0 - self.values = [None] * no - else: - assert isinstance(jitcode, JitCode) - self.jitcode = jitcode - self.values = [None] * jitcode.num_regs() + assert isinstance(jitcode, JitCode) + self.jitcode = jitcode + self.values = [None] * jitcode.num_regs() class OptResumeBuilder(object): - def __init__(self, opt, inpframes=None): + def __init__(self, opt): self.framestack = [] self.last_flushed_pos = 0 self.opt = opt self.virtuals = {} - if inpframes is not None: - for frame in inpframes: - self.framestack.append(ResumeFrame(0, None, len(frame))) def enter_frame(self, pc, jitcode): self.framestack.append(ResumeFrame(pc, jitcode)) diff --git a/rpython/jit/resume/reader.py b/rpython/jit/resume/reader.py --- a/rpython/jit/resume/reader.py +++ b/rpython/jit/resume/reader.py @@ -68,8 +68,6 @@ def _rebuild_until(self, rb, position): self.consts = rb.consts - if rb.parent is not None: - self._rebuild_until(rb.parent, rb.parent_position) self.interpret_until(rb, position) def read(self, pos): diff --git a/rpython/jit/resume/rescode.py b/rpython/jit/resume/rescode.py --- a/rpython/jit/resume/rescode.py +++ b/rpython/jit/resume/rescode.py @@ -14,12 +14,9 @@ CLEAR_POSITION = 0xffff class ResumeBytecode(object): - def __init__(self, opcodes, consts, parent=None, parent_position=-1, - loop=None): + def __init__(self, opcodes, consts, loop=None): self.opcodes = opcodes - self.parent = parent self.consts = consts - self.parent_position = parent_position self.loop = loop def dump(self, staticdata, resume_pos): diff --git a/rpython/jit/resume/test/test_backend.py b/rpython/jit/resume/test/test_backend.py --- a/rpython/jit/resume/test/test_backend.py +++ b/rpython/jit/resume/test/test_backend.py @@ -146,15 +146,22 @@ bridge = parse(""" [i0] + enter_frame(-1, descr=jitcode) + resume_put(i0, 0, 0) force_spill(i0) guard_false(i0) - """) + """, namespace={'jitcode': jitcode}) staticdata = MockStaticData([jitcode], []) locs = rebuild_locs_from_resumedata(descr, staticdata) - self.cpu.compile_bridge(None, descr, [bridge.inputargs], locs, + self.cpu.compile_bridge(None, descr, bridge.inputargs, locs, bridge.operations, looptoken) descr = bridge.operations[-1].getdescr() res = descr.rd_resume_bytecode.dump(staticdata, descr.rd_bytecode_position) - assert res == "resume_put (3, 28) 0 0" + exp = preparse(""" + enter_frame -1 name + resume_put (3, 1) 0 0 + resume_put (3, 28) 0 0 + """) + assert res == exp diff --git a/rpython/jit/resume/test/test_frontend.py b/rpython/jit/resume/test/test_frontend.py --- a/rpython/jit/resume/test/test_frontend.py +++ b/rpython/jit/resume/test/test_frontend.py @@ -5,7 +5,7 @@ ConstInt from rpython.jit.resume.frontend import rebuild_from_resumedata from rpython.jit.resume.rescode import ResumeBytecode, TAGBOX,\ - ResumeBytecodeBuilder, TAGCONST, TAGSMALLINT, TAGVIRTUAL + ResumeBytecodeBuilder, TAGCONST, TAGSMALLINT, TAGVIRTUAL, CLEAR_POSITION from rpython.jit.resume.reader import AbstractResumeReader from rpython.jit.resume.test.support import MockStaticData from rpython.jit.metainterp.resoperation import rop @@ -77,13 +77,18 @@ return index + 3 class RebuildingResumeReader(AbstractResumeReader): - def unpack(self, r): - tag, index = self.decode(r) - assert tag == TAGBOX - return index - def finish(self): - return [[self.unpack(r) for r in f.registers] for f in self.framestack] + res = [] + all = {} + for f in self.framestack: + for reg in f.registers: + if reg == CLEAR_POSITION: + continue + tag, index = self.decode(reg) + if tag == TAGBOX and index not in all: + all[index] = None # no duplicates + res.append(index) + return res def rebuild_locs_from_resumedata(faildescr, staticdata): return RebuildingResumeReader(staticdata).rebuild(faildescr) From noreply at buildbot.pypy.org Wed Jan 22 18:23:30 2014 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 22 Jan 2014 18:23:30 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: finish refactoring the world - now inputframes no longer exists and we Message-ID: <20140122172330.5DCD21C3969@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68856:faf703373bb5 Date: 2014-01-18 15:57 +0100 http://bitbucket.org/pypy/pypy/changeset/faf703373bb5/ Log: finish refactoring the world - now inputframes no longer exists and we replay the necessary resume ops diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -34,23 +34,10 @@ self.liveness = LivenessAnalyzer() self.numbering = {} self.framestack = [] - locs = None - start_pos = 0 - xxx if inputlocs is not None: - locs = [] - for frame_pos, frame in enumerate(inputframes): - self.framestack.append(ResumeFrame(len(frame), start_pos)) - for pos_in_frame, box in enumerate(frame): - if box is None: - continue - pos = inputlocs[frame_pos][pos_in_frame] - self.framestack[-1].registers[pos_in_frame] = box - self.numbering[box] = pos - locs.append(Position(pos)) - start_pos += 1 - ResumeBuilder.__init__(self, self, frontend_liveness, descr, - inputframes, locs) + for arg, loc in zip(inputargs, inputlocs): + self.numbering[arg] = loc + ResumeBuilder.__init__(self, self, frontend_liveness, descr) def loc(self, box, must_exist=True): return Position(self.numbering[box]) @@ -142,13 +129,7 @@ newop.getdescr().rd_bytecode_position = resumebuilder.builder.getpos() self.operations.append(newop) - if descr is None: - parent = None - parent_position = 0 - else: - parent = descr.rd_resume_bytecode - parent_position = descr.rd_bytecode_position - bytecode = resumebuilder.finish(parent, parent_position, self) + bytecode = resumebuilder.finish(self) for op in operations: if op.is_guard(): op.getdescr().rd_resume_bytecode = bytecode @@ -291,7 +272,7 @@ name=''): clt = model.CompiledLoopToken(self, looptoken.number) looptoken.compiled_loop_token = clt - lltrace = LLTrace([inputargs], operations, None) + lltrace = LLTrace(inputargs, operations, None) clt._llgraph_loop = lltrace clt._llgraph_alltraces = [lltrace] self._record_labels(lltrace) diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -250,6 +250,10 @@ i1b = BoxInt() i3 = BoxInt() bridge = [ + ResOperation(rop.ENTER_FRAME, [ConstInt(-1)], None, + descr=jitcode), + ResOperation(rop.RESUME_PUT, [i1b, ConstInt(0), ConstInt(0)], + None), ResOperation(rop.INT_LE, [i1b, ConstInt(19)], i3), ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2), ResOperation(rop.LEAVE_FRAME, [], None), diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -124,7 +124,7 @@ jitcell_token = make_jitcell_token(jitdriver_sd) part = create_empty_loop(metainterp) - part.inputframes = [inputargs[:]] + part.inputargs = inputargs[:] h_ops = history.operations part.resume_at_jump_descr = resume_at_jump_descr part.operations = [ResOperation(rop.LABEL, inputargs, None, descr=TargetToken(jitcell_token))] + \ @@ -140,7 +140,7 @@ all_target_tokens = [target_token] loop = create_empty_loop(metainterp) - loop.inputframes = part.inputframes + loop.inputargs = part.inputargs loop.operations = part.operations loop.quasi_immutable_deps = {} if part.quasi_immutable_deps: @@ -198,7 +198,7 @@ assert partial_trace.operations[-1].getopnum() == rop.LABEL part = create_empty_loop(metainterp) - part.inputframes = [inputargs[:]] + part.inputargs = inputargs[:] part.resume_at_jump_descr = resume_at_jump_descr h_ops = history.operations @@ -253,10 +253,10 @@ def patch_new_loop_to_load_virtualizable_fields(loop, jitdriver_sd): vinfo = jitdriver_sd.virtualizable_info extra_ops = [] - inputargs = loop.inputframes[0] + inputargs = loop.inputargs vable_box = inputargs[jitdriver_sd.index_of_virtualizable] i = jitdriver_sd.num_red_args - loop.inputframes = [inputargs[:i]] + loop.inputargs = inputargs[:i] for descr in vinfo.static_field_descrs: assert i < len(inputargs) box = inputargs[i] @@ -338,8 +338,7 @@ metainterp_sd.profiler.start_backend() debug_start("jit-backend") try: - assert len(loop.inputframes) == 1 - asminfo = do_compile_loop(metainterp_sd, loop.inputframes[0], + asminfo = do_compile_loop(metainterp_sd, loop.inputargs, operations, original_jitcell_token, name=loopname) finally: @@ -357,7 +356,7 @@ ops_offset = asminfo.ops_offset else: ops_offset = None - metainterp_sd.logger_ops.log_loop(loop.inputframes[0], loop.operations, n, + metainterp_sd.logger_ops.log_loop(loop.inputargs, loop.operations, n, type, ops_offset, name=loopname) # @@ -383,7 +382,7 @@ metainterp_sd.profiler.start_backend() debug_start("jit-backend") try: - asminfo = do_compile_bridge(metainterp_sd, faildescr, inputframes, + asminfo = do_compile_bridge(metainterp_sd, faildescr, inputargs, inputlocs, operations, original_loop_token) finally: @@ -609,13 +608,13 @@ # to the corresponding guard_op and compile from there assert metainterp.resumekey_original_loop_token is not None new_loop.original_jitcell_token = metainterp.resumekey_original_loop_token - inputframes = metainterp.history.inputframes + inputargs = metainterp.history.inputargs inputlocs = metainterp.history.inputlocs if not we_are_translated(): self._debug_suboperations = new_loop.operations propagate_original_jitcell_token(new_loop) send_bridge_to_backend(metainterp.jitdriver_sd, metainterp.staticdata, - self, inputframes, inputlocs, + self, inputargs, inputlocs, new_loop.operations, new_loop.original_jitcell_token) @@ -803,7 +802,7 @@ # Attempt to use optimize_bridge(). This may return None in case # it does not work -- i.e. none of the existing old_loop_tokens match. new_trace = create_empty_loop(metainterp) - new_trace.inputframes = metainterp.history.inputframes[:] + new_trace.inputargs = metainterp.history.inputargs[:] if metainterp.history.inputlocs is not None: new_trace.inputlocs = metainterp.history.inputlocs[:] # clone ops, as optimize_bridge can mutate the ops @@ -817,7 +816,8 @@ else: inline_short_preamble = True try: - optimize_trace(metainterp_sd, new_trace, state.enable_opts, inline_short_preamble, inpframes=new_trace.inputframes) + optimize_trace(metainterp_sd, new_trace, state.enable_opts, + inline_short_preamble) except InvalidLoop: debug_print("compile_new_bridge: got an InvalidLoop") # XXX I am fairly convinced that optimize_bridge cannot actually raise diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -675,11 +675,10 @@ def check_consistency_of(inputargs, operations): seen = {} all = 0 - for frame in inputargs: - for box in frame: - assert isinstance(box, Box), "Loop.inputargs contains %r" % (box,) - seen[box] = None - all += 1 + for box in inputargs: + assert isinstance(box, Box), "Loop.inputargs contains %r" % (box,) + seen[box] = None + all += 1 assert len(seen) == all, ( "duplicate Box in the Loop.inputargs") TreeLoop.check_consistency_of_branch(operations, seen) diff --git a/rpython/jit/metainterp/optimizeopt/__init__.py b/rpython/jit/metainterp/optimizeopt/__init__.py --- a/rpython/jit/metainterp/optimizeopt/__init__.py +++ b/rpython/jit/metainterp/optimizeopt/__init__.py @@ -47,22 +47,19 @@ return optimizations, unroll -def optimize_trace(metainterp_sd, loop, enable_opts, inline_short_preamble=True, - inpframes=None): +def optimize_trace(metainterp_sd, loop, enable_opts, + inline_short_preamble=True): """Optimize loop.operations to remove internal overheadish operations. """ - from rpython.jit.resume.backend import flatten - debug_start("jit-optimize") try: loop.logops = metainterp_sd.logger_noopt.log_loop( - flatten(loop.inputframes), loop.operations) + loop.inputargs, loop.operations) optimizations, unroll = build_opt_chain(metainterp_sd, enable_opts) if unroll: optimize_unroll(metainterp_sd, loop, optimizations, inline_short_preamble) else: - optimizer = Optimizer(metainterp_sd, loop, optimizations, - inpframes=inpframes) + optimizer = Optimizer(metainterp_sd, loop, optimizations) optimizer.propagate_all_forward() finally: debug_stop("jit-optimize") diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -344,8 +344,7 @@ class Optimizer(Optimization): - def __init__(self, metainterp_sd, loop, optimizations=None, - inpframes=None): + def __init__(self, metainterp_sd, loop, optimizations=None): self.metainterp_sd = metainterp_sd self.cpu = metainterp_sd.cpu self.loop = loop @@ -368,7 +367,7 @@ self.call_pure_results = loop.call_pure_results self.set_optimizations(optimizations) - self.resumebuilder = OptResumeBuilder(self, inpframes) + self.resumebuilder = OptResumeBuilder(self) self.setup() def set_optimizations(self, optimizations): diff --git a/rpython/jit/resume/frontend.py b/rpython/jit/resume/frontend.py --- a/rpython/jit/resume/frontend.py +++ b/rpython/jit/resume/frontend.py @@ -205,7 +205,7 @@ self.deadframe = deadframe AbstractResumeReader.__init__(self, metainterp.staticdata) - def get_box_value(self, encoded_pos, TP): + def get_box_value(self, pos_in_frame, frame_pos, encoded_pos, TP): if encoded_pos == CLEAR_POSITION: return None if encoded_pos in self.cache: @@ -214,14 +214,21 @@ if tag == TAGBOX: if TP == INT: val = self.metainterp.cpu.get_int_value(self.deadframe, pos) - res = BoxInt(val) + box = BoxInt(val) elif TP == REF: val = self.metainterp.cpu.get_ref_value(self.deadframe, pos) - res = BoxPtr(val) + box = BoxPtr(val) else: xxx - self.cache[encoded_pos] = res - return res + if pos_in_frame != -1: + self.metainterp.history.record(rop.RESUME_PUT, + [box, ConstInt(frame_pos), + ConstInt(pos_in_frame)], + None, None) + self.result.append(box) + self.locs.append(pos) + self.cache[encoded_pos] = box + return box elif tag == TAGSMALLINT: return ConstInt(pos) elif tag == TAGCONST: @@ -233,51 +240,34 @@ for fielddescr, encoded_field_pos in virtual.fields.iteritems(): self.setfield_gc(virtual_box, encoded_field_pos, fielddescr) self.cache[encoded_pos] = virtual_box + if pos_in_frame != -1: + self.metainterp.history.record(rop.RESUME_PUT, + [virtual_box, + ConstInt(frame_pos), + ConstInt(pos_in_frame)], + None, None) return virtual_box def allocate_struct(self, virtual): return self.metainterp.execute_and_record(rop.NEW, virtual.descr) def setfield_gc(self, box, encoded_field_pos, fielddescr): - field_box = self.get_box_value(encoded_field_pos, fielddescr.kind) + field_box = self.get_box_value(-1, -1, encoded_field_pos, + fielddescr.kind) self.metainterp.execute_and_record(rop.SETFIELD_GC, fielddescr, box, field_box) - def store_int_box(self, res, pos, miframe, i, jitframe_pos): - box = self.get_box_value(jitframe_pos, INT) + def store_int_box(self, pos, miframe, i, jitframe_pos): + box = self.get_box_value(pos, i, jitframe_pos, INT) if box is None: return miframe.registers_i[i] = box - if not isinstance(box, Const): - res[-1][pos] = box - def store_ref_box(self, res, pos, miframe, i, jitframe_pos): - box = self.get_box_value(jitframe_pos, REF) + def store_ref_box(self, pos, miframe, i, jitframe_pos): + box = self.get_box_value(pos, i, jitframe_pos, REF) if box is None: return miframe.registers_r[i] = box - tag, index = self.decode(jitframe_pos) - if tag == TAGBOX: - res[-1][pos] = box - elif tag == TAGVIRTUAL: - self.metainterp.history.record(rop.RESUME_PUT, - [box, ConstInt(len(res) - 1), - ConstInt(pos)], None, None) - # we can't have virtual ints - return - xxx - if jitframe_pos in self.cache: - box = self.cache[jitframe_pos] - elif jitframe_pos == -1: - return - elif jitframe_pos >= 0: - box = BoxPtr(self.metainterp.cpu.get_ref_value(self.deadframe, - jitframe_pos)) - elif jitframe_pos <= -2: - box = self.consts[-jitframe_pos - 2] - miframe.registers_r[i] = box - self.cache[jitframe_pos] = box - res[-1][pos] = box def store_float_box(self, res, pos, miframe, i, jitframe_pos): box = self.get_box_value(jitframe_pos) @@ -304,26 +294,28 @@ return -1 def finish(self): - res = [] + self.result = [] self.cache = {} + self.locs = [] for frame in self.framestack: jitcode = frame.jitcode - res.append([None] * jitcode.num_regs()) miframe = self.metainterp.newframe(jitcode, record_resume=False) miframe.pc = frame.pc pos = 0 for i in range(jitcode.num_regs_i()): - self.store_int_box(res, pos, miframe, i, frame.registers[pos]) + self.store_int_box(pos, miframe, i, frame.registers[pos]) pos += 1 for i in range(jitcode.num_regs_r()): - self.store_ref_box(res, pos, miframe, i, frame.registers[pos]) + self.store_ref_box(pos, miframe, i, frame.registers[pos]) pos += 1 for i in range(jitcode.num_regs_f()): - self.store_float_box(res, pos, miframe, i, frame.registers[pos]) + self.store_float_box(pos, miframe, i, frame.registers[pos]) pos += 1 self.cache = None - return res, [[self.get_loc(r) for r in f.registers] - for f in self.framestack] + state = self.result, self.locs + self.result = None + self.locs = None + return state def rebuild_from_resumedata(metainterp, deadframe, faildescr): """ Reconstruct metainterp frames from the resumedata diff --git a/rpython/jit/resume/test/test_frontend.py b/rpython/jit/resume/test/test_frontend.py --- a/rpython/jit/resume/test/test_frontend.py +++ b/rpython/jit/resume/test/test_frontend.py @@ -111,15 +111,16 @@ metainterp = MockMetaInterp() metainterp.staticdata = MockStaticData([jitcode], []) metainterp.cpu = MockCPU() - inpframes, inplocs = rebuild_from_resumedata(metainterp, "myframe", descr) + inputargs, inplocs = rebuild_from_resumedata(metainterp, "myframe", + descr) assert len(metainterp.framestack) == 1 f = metainterp.framestack[-1] assert f.registers_i[1].getint() == 103 assert isinstance(f.registers_i[2], Const) assert f.registers_i[2].getint() == 15 assert f.registers_i[3].getint() == 13 - assert inpframes == [[None, AnyBox(), None, None]] - assert inplocs == [[-1, 100, -1, -1]] + assert len(inputargs) == 1 + assert inplocs == [100] def test_nested_call(self): jitcode1 = JitCode("jitcode") @@ -165,35 +166,6 @@ assert f.registers_i[2].getint() == 11 + 3 assert f.registers_i[4].getint() == 8 + 3 - def test_bridge(self): - jitcode1 = JitCode("jitcode") - jitcode1.setup(num_regs_i=13, num_regs_r=0, num_regs_f=0) - jitcode1.global_index = 0 - builder = ResumeBytecodeBuilder() - builder.enter_frame(-1, jitcode1) - builder.resume_put(TAGBOX | (42 << 2), 0, 0) - rd1 = builder.build() - lgt1 = len(rd1) - - builder = ResumeBytecodeBuilder() - builder.resume_put(TAGBOX | (2 << 2), 0, 1) - rd2 = builder.build() - lgt2 = len(rd2) - - descr = Descr() - descr.rd_bytecode_position = lgt2 - parent = ResumeBytecode(rd1, []) - b = ResumeBytecode(rd2, [], parent, parent_position=lgt1) - descr.rd_resume_bytecode = b - metainterp = MockMetaInterp() - metainterp.staticdata = MockStaticData([jitcode1], []) - metainterp.cpu = MockCPU() - rebuild_from_resumedata(metainterp, "myframe", descr) - f = metainterp.framestack[-1] - assert f.num_nonempty_regs() == 2 - assert f.registers_i[0].getint() == 42 + 3 - assert f.registers_i[1].getint() == 2 + 3 - def test_new(self): jitcode1 = JitCode("jitcode") jitcode1.global_index = 0 @@ -242,7 +214,7 @@ descr.rd_bytecode_position = len(rd) staticdata = MockStaticData([jitcode1, jitcode2], []) locs = rebuild_locs_from_resumedata(descr, staticdata) - assert locs == [[8, 11], [12]] + assert locs == [8, 11, 12] class AssemblerExecuted(Exception): pass From noreply at buildbot.pypy.org Wed Jan 22 18:23:31 2014 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 22 Jan 2014 18:23:31 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: fixes Message-ID: <20140122172331.B78C71C3969@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68857:e0a29fe61a19 Date: 2014-01-18 16:05 +0100 http://bitbucket.org/pypy/pypy/changeset/e0a29fe61a19/ Log: fixes diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1648,13 +1648,12 @@ def is_main_jitcode(self, jitcode): return self.jitdriver_sd is not None and jitcode is self.jitdriver_sd.mainjitcode - def newframe(self, jitcode, greenkey=None, record_resume=True): + def newframe(self, jitcode, greenkey=None): if self.framestack: pc = self.framestack[-1].pc else: pc = -1 - if record_resume: - self.resumerecorder.enter_frame(pc, jitcode) + self.resumerecorder.enter_frame(pc, jitcode) if jitcode.is_portal: self.portal_call_depth += 1 self.call_ids.append(self.current_call_id) @@ -2342,7 +2341,7 @@ # ----- make a new frame ----- self.portal_call_depth = -1 # always one portal around self.framestack = [] - self.resumerecorder = ResumeRecorder(self, False) + self.resumerecorder = ResumeRecorder(self) f = self.newframe(self.jitdriver_sd.mainjitcode) f.setup_call(original_boxes) assert self.portal_call_depth == 0 @@ -2358,9 +2357,9 @@ try: self.portal_call_depth = -1 # always one portal around self.history = history.History() + self.resumerecorder = ResumeRecorder(self) state = self.rebuild_state_after_failure(resumedescr, deadframe) self.history.inputargs, self.history.inputlocs = state - self.resumerecorder = ResumeRecorder(self, True) finally: rstack._stack_criticalcode_stop() diff --git a/rpython/jit/resume/frontend.py b/rpython/jit/resume/frontend.py --- a/rpython/jit/resume/frontend.py +++ b/rpython/jit/resume/frontend.py @@ -205,7 +205,7 @@ self.deadframe = deadframe AbstractResumeReader.__init__(self, metainterp.staticdata) - def get_box_value(self, pos_in_frame, frame_pos, encoded_pos, TP): + def get_box_value(self, frame_pos, pos_in_frame, encoded_pos, TP): if encoded_pos == CLEAR_POSITION: return None if encoded_pos in self.cache: @@ -257,19 +257,19 @@ self.metainterp.execute_and_record(rop.SETFIELD_GC, fielddescr, box, field_box) - def store_int_box(self, pos, miframe, i, jitframe_pos): - box = self.get_box_value(pos, i, jitframe_pos, INT) + def store_int_box(self, frame_pos, pos, miframe, i, jitframe_pos): + box = self.get_box_value(frame_pos, pos, jitframe_pos, INT) if box is None: return miframe.registers_i[i] = box - def store_ref_box(self, pos, miframe, i, jitframe_pos): - box = self.get_box_value(pos, i, jitframe_pos, REF) + def store_ref_box(self, frame_pos, pos, miframe, i, jitframe_pos): + box = self.get_box_value(frame_pos, pos, jitframe_pos, REF) if box is None: return miframe.registers_r[i] = box - def store_float_box(self, res, pos, miframe, i, jitframe_pos): + def store_float_box(self, frame_pos, pos, miframe, i, jitframe_pos): box = self.get_box_value(jitframe_pos) if box is None: return @@ -297,19 +297,22 @@ self.result = [] self.cache = {} self.locs = [] - for frame in self.framestack: + for frame_pos, frame in enumerate(self.framestack): jitcode = frame.jitcode - miframe = self.metainterp.newframe(jitcode, record_resume=False) + miframe = self.metainterp.newframe(jitcode) miframe.pc = frame.pc pos = 0 for i in range(jitcode.num_regs_i()): - self.store_int_box(pos, miframe, i, frame.registers[pos]) + self.store_int_box(frame_pos, pos, miframe, i, + frame.registers[pos]) pos += 1 for i in range(jitcode.num_regs_r()): - self.store_ref_box(pos, miframe, i, frame.registers[pos]) + self.store_ref_box(frame_pos, pos, miframe, i, + frame.registers[pos]) pos += 1 for i in range(jitcode.num_regs_f()): - self.store_float_box(pos, miframe, i, frame.registers[pos]) + self.store_float_box(frame_pos, pos, miframe, i, + frame.registers[pos]) pos += 1 self.cache = None state = self.result, self.locs @@ -341,12 +344,9 @@ class ResumeRecorder(object): """ Created by metainterp to record the resume as we record operations """ - def __init__(self, metainterp, is_bridge=False): + def __init__(self, metainterp): self.metainterp = metainterp self.cachestack = [] - if is_bridge: - for frame in metainterp.framestack: - self.cachestack.append([None] * frame.jitcode.num_regs()) def enter_frame(self, pc, jitcode): self.metainterp.history.record(rop.ENTER_FRAME, [ConstInt(pc)], None, From noreply at buildbot.pypy.org Wed Jan 22 18:40:25 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 22 Jan 2014 18:40:25 +0100 (CET) Subject: [pypy-commit] pypy default: Some extra tests Message-ID: <20140122174025.EA8FD1C315D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68858:0e43b0a21fe8 Date: 2014-01-22 18:39 +0100 http://bitbucket.org/pypy/pypy/changeset/0e43b0a21fe8/ Log: Some extra tests diff --git a/pypy/module/itertools/test/test_itertools.py b/pypy/module/itertools/test/test_itertools.py --- a/pypy/module/itertools/test/test_itertools.py +++ b/pypy/module/itertools/test/test_itertools.py @@ -862,6 +862,28 @@ assert prod.next() == () raises (StopIteration, prod.next) + def test_product_powers_of_two(self): + from itertools import product + assert list(product()) == [()] + assert list(product('ab')) == [('a',), ('b',)] + assert list(product('ab', 'cd')) == [ + ('a', 'c'), ('a', 'd'), + ('b', 'c'), ('b', 'd')] + assert list(product('ab', 'cd', 'ef')) == [ + ('a', 'c', 'e'), ('a', 'c', 'f'), + ('a', 'd', 'e'), ('a', 'd', 'f'), + ('b', 'c', 'e'), ('b', 'c', 'f'), + ('b', 'd', 'e'), ('b', 'd', 'f')] + + def test_product_empty_item(self): + from itertools import product + assert list(product('')) == [] + assert list(product('ab', '')) == [] + assert list(product('', 'cd')) == [] + assert list(product('ab', 'cd', '')) == [] + assert list(product('ab', '', 'ef')) == [] + assert list(product('', 'cd', 'ef')) == [] + def test_permutations(self): from itertools import permutations assert list(permutations('AB')) == [('A', 'B'), ('B', 'A')] From noreply at buildbot.pypy.org Wed Jan 22 19:33:59 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 22 Jan 2014 19:33:59 +0100 (CET) Subject: [pypy-commit] pypy default: Update the docs to mention `incminimark`. Message-ID: <20140122183359.4241B1C315D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68859:b1648afa21cf Date: 2014-01-22 19:33 +0100 http://bitbucket.org/pypy/pypy/changeset/b1648afa21cf/ Log: Update the docs to mention `incminimark`. diff --git a/pypy/doc/garbage_collection.rst b/pypy/doc/garbage_collection.rst --- a/pypy/doc/garbage_collection.rst +++ b/pypy/doc/garbage_collection.rst @@ -210,4 +210,12 @@ are preserved. If the object dies then the pre-reserved location becomes free garbage, to be collected at the next major collection. +The exact name of this GC is either `minimark` or `incminimark`. The +latter is a version that does major collections incrementally (i.e. one +major collection is split along some number of minor collections, rather +than being done all at once after a specific minor collection). The +default is `incminimark`, as it seems to have a very minimal impact on +performance and memory usage at the benefit of avoiding the long pauses +of `minimark`. + .. include:: _ref.txt diff --git a/pypy/doc/gc_info.rst b/pypy/doc/gc_info.rst --- a/pypy/doc/gc_info.rst +++ b/pypy/doc/gc_info.rst @@ -6,7 +6,7 @@ Minimark -------- -PyPy's default ``minimark`` garbage collector is configurable through +PyPy's default ``incminimark`` garbage collector is configurable through several environment variables: ``PYPY_GC_NURSERY`` @@ -14,6 +14,17 @@ Defaults to 1/2 of your cache or ``4M``. Small values (like 1 or 1KB) are useful for debugging. +``PYPY_GC_NURSERY_CLEANUP`` + The interval at which nursery is cleaned up. Must + be smaller than the nursery size and bigger than the + biggest object we can allotate in the nursery. + +``PYPY_GC_INCREMENT_STEP`` + The size of memory marked during the marking step. Default is size of + nursery times 2. If you mark it too high your GC is not incremental at + all. The minimum is set to size that survives minor collection times + 1.5 so we reclaim anything all the time. + ``PYPY_GC_MAJOR_COLLECT`` Major collection memory factor. Default is ``1.82``, which means trigger a major collection when the From noreply at buildbot.pypy.org Thu Jan 23 04:12:31 2014 From: noreply at buildbot.pypy.org (wlav) Date: Thu, 23 Jan 2014 04:12:31 +0100 (CET) Subject: [pypy-commit] pypy reflex-support: support access to void* data members and add cppyy.gbl.nullptr (and associated tests) Message-ID: <20140123031231.C94C91C3969@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r68861:1edf9b7c3bdb Date: 2014-01-22 18:39 -0800 http://bitbucket.org/pypy/pypy/changeset/1edf9b7c3bdb/ Log: support access to void* data members and add cppyy.gbl.nullptr (and associated tests) diff --git a/pypy/module/cppyy/__init__.py b/pypy/module/cppyy/__init__.py --- a/pypy/module/cppyy/__init__.py +++ b/pypy/module/cppyy/__init__.py @@ -15,6 +15,7 @@ '_set_function_generator': 'interp_cppyy.set_function_generator', '_register_class' : 'interp_cppyy.register_class', '_is_static' : 'interp_cppyy.is_static', + '_get_nullptr' : 'interp_cppyy.get_nullptr', 'CPPInstance' : 'interp_cppyy.W_CPPInstance', 'addressof' : 'interp_cppyy.addressof', 'bind_object' : 'interp_cppyy.bind_object', diff --git a/pypy/module/cppyy/converter.py b/pypy/module/cppyy/converter.py --- a/pypy/module/cppyy/converter.py +++ b/pypy/module/cppyy/converter.py @@ -7,7 +7,7 @@ from rpython.rlib import jit_libffi, rfloat from pypy.module._rawffi.interp_rawffi import unpack_simple_shape -from pypy.module._rawffi.array import W_Array +from pypy.module._rawffi.array import W_Array, W_ArrayInstance from pypy.module.cppyy import helper, capi, ffitypes @@ -48,20 +48,33 @@ return capi.C_NULL_OBJECT def is_nullpointer_specialcase(space, w_obj): - # special case: allow integer 0 as (void*)0 + # 0, None, and nullptr may serve as "NULL", check for any of them + + # integer 0 try: return space.int_w(w_obj) == 0 except Exception: pass - # special case: allow None as (void*)0 - return space.is_true(space.is_(w_obj, space.w_None)) + # None or nullptr + from pypy.module.cppyy import interp_cppyy + return space.is_true(space.is_(w_obj, space.w_None)) or \ + space.is_true(space.is_(w_obj, interp_cppyy.get_nullptr(space))) def get_rawbuffer(space, w_obj): + # raw buffer try: buf = space.buffer_w(w_obj) return rffi.cast(rffi.VOIDP, buf.get_raw_address()) except Exception: pass + # array type + try: + arr = space.interp_w(W_ArrayInstance, w_obj, can_be_None=True) + if arr: + return rffi.cast(rffi.VOIDP, space.uint_w(arr.getbuffer(space))) + except Exception: + pass + # pre-defined NULL if is_nullpointer_specialcase(space, w_obj): return rffi.cast(rffi.VOIDP, 0) raise TypeError("not an addressable buffer") @@ -140,8 +153,6 @@ self.size = array_size def from_memory(self, space, w_obj, w_pycppclass, offset): - if hasattr(space, "fake"): - raise NotImplementedError # read access, so no copy needed address_value = self._get_raw_address(space, w_obj, offset) address = rffi.cast(rffi.ULONG, address_value) @@ -390,6 +401,24 @@ x = rffi.cast(rffi.VOIDPP, address) x[0] = self._unwrap_object(space, w_obj) + def from_memory(self, space, w_obj, w_pycppclass, offset): + # returned as a long value for the address (INTPTR_T is not proper + # per se, but rffi does not come with a PTRDIFF_T) + address = self._get_raw_address(space, w_obj, offset) + ptrval = rffi.cast(rffi.ULONG, rffi.cast(rffi.VOIDPP, address)[0]) + if ptrval == 0: + from pypy.module.cppyy import interp_cppyy + return interp_cppyy.get_nullptr(space) + arr = space.interp_w(W_Array, unpack_simple_shape(space, space.wrap('P'))) + return arr.fromaddress(space, ptrval, sys.maxint) + + def to_memory(self, space, w_obj, w_value, offset): + address = rffi.cast(rffi.VOIDPP, self._get_raw_address(space, w_obj, offset)) + if is_nullpointer_specialcase(space, w_value): + address[0] = rffi.cast(rffi.VOIDP, 0) + else: + address[0] = rffi.cast(rffi.VOIDP, self._unwrap_object(space, w_value)) + class VoidPtrPtrConverter(TypeConverter): _immutable_fields_ = ['uses_local'] diff --git a/pypy/module/cppyy/executor.py b/pypy/module/cppyy/executor.py --- a/pypy/module/cppyy/executor.py +++ b/pypy/module/cppyy/executor.py @@ -53,17 +53,12 @@ if hasattr(space, "fake"): raise NotImplementedError lresult = capi.c_call_l(space, cppmethod, cppthis, num_args, args) - address = rffi.cast(rffi.ULONG, lresult) + ptrval = rffi.cast(rffi.ULONG, lresult) arr = space.interp_w(W_Array, unpack_simple_shape(space, space.wrap(self.typecode))) - if address == 0: - # TODO: fix this hack; fromaddress() will allocate memory if address - # is null and there seems to be no way around it (ll_buffer can not - # be touched directly) - nullarr = arr.fromaddress(space, address, 0) - assert isinstance(nullarr, W_ArrayInstance) - nullarr.free(space) - return nullarr - return arr.fromaddress(space, address, sys.maxint) + if ptrval == 0: + from pypy.module.cppyy import interp_cppyy + return interp_cppyy.get_nullptr(space) + return arr.fromaddress(space, ptrval, sys.maxint) class VoidExecutor(FunctionExecutor): diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -40,11 +40,29 @@ def __init__(self, space): self.cppscope_cache = { "void" : W_CPPClass(space, "void", capi.C_NULL_TYPE) } + self.w_nullptr = None self.cpptemplate_cache = {} self.cppclass_registry = {} self.w_clgen_callback = None self.w_fngen_callback = None +def get_nullptr(space): + if hasattr(space, "fake"): + raise NotImplementedError + state = space.fromcache(State) + if state.w_nullptr is None: + from pypy.module._rawffi.interp_rawffi import unpack_simple_shape + from pypy.module._rawffi.array import W_Array, W_ArrayInstance + arr = space.interp_w(W_Array, unpack_simple_shape(space, space.wrap('P'))) + # TODO: fix this hack; fromaddress() will allocate memory if address + # is null and there seems to be no way around it (ll_buffer can not + # be touched directly) + nullarr = arr.fromaddress(space, rffi.cast(rffi.ULONG, 0), 0) + assert isinstance(nullarr, W_ArrayInstance) + nullarr.free(space) + state.w_nullptr = space.wrap(nullarr) + return state.w_nullptr + @unwrap_spec(name=str) def resolve_name(space, name): return space.wrap(capi.c_resolve_name(space, name)) @@ -1184,16 +1202,30 @@ memory_regulator.register(cppinstance) return w_cppinstance - at unwrap_spec(w_cppinstance=W_CPPInstance) -def addressof(space, w_cppinstance): - """Takes a bound C++ instance, returns the raw address.""" - address = rffi.cast(rffi.LONG, w_cppinstance.get_rawobject()) +def _addressof(space, w_obj): + try: + # attempt to extract address from array + return rffi.cast(rffi.INTPTR_T, converter.get_rawbuffer(space, w_obj)) + except TypeError: + pass + # attempt to get address of C++ instance + return rffi.cast(rffi.INTPTR_T, converter.get_rawobject(space, w_obj)) + + at unwrap_spec(w_obj=W_Root) +def addressof(space, w_obj): + """Takes a bound C++ instance or array, returns the raw address.""" + address = _addressof(space, w_obj) return space.wrap(address) - at unwrap_spec(address=int, owns=bool) -def bind_object(space, address, w_pycppclass, owns=False): + at unwrap_spec(owns=bool) +def bind_object(space, w_obj, w_pycppclass, owns=False): """Takes an address and a bound C++ class proxy, returns a bound instance.""" - rawobject = rffi.cast(capi.C_OBJECT, address) + try: + # attempt address from array or C++ instance + rawobject = rffi.cast(capi.C_OBJECT, _addressof(space, w_obj)) + except Exception: + # accept integer value as address + rawobject = rffi.cast(capi.C_OBJECT, space.uint_w(w_obj)) w_cppclass = space.findattr(w_pycppclass, space.wrap("_cpp_proxy")) if not w_cppclass: w_cppclass = scope_byname(space, space.str_w(w_pycppclass)) diff --git a/pypy/module/cppyy/pythonify.py b/pypy/module/cppyy/pythonify.py --- a/pypy/module/cppyy/pythonify.py +++ b/pypy/module/cppyy/pythonify.py @@ -447,6 +447,9 @@ # be the same issue for all typedef'd builtin types setattr(gbl, 'unsigned int', int) + # install nullptr as a unique reference + setattr(gbl, 'nullptr', cppyy._get_nullptr()) + # install for user access cppyy.gbl = gbl diff --git a/pypy/module/cppyy/test/datatypes.cxx b/pypy/module/cppyy/test/datatypes.cxx --- a/pypy/module/cppyy/test/datatypes.cxx +++ b/pypy/module/cppyy/test/datatypes.cxx @@ -18,6 +18,7 @@ m_float = -66.f; m_double = -77.; m_enum = kNothing; + m_voidp = (void*)0; m_bool_array2 = new bool[N]; m_short_array2 = new short[N]; @@ -97,6 +98,7 @@ float cppyy_test_data::get_float() { return m_float; } double cppyy_test_data::get_double() { return m_double; } cppyy_test_data::what cppyy_test_data::get_enum() { return m_enum; } +void* cppyy_test_data::get_voidp() { return m_voidp; } bool* cppyy_test_data::get_bool_array() { return m_bool_array; } bool* cppyy_test_data::get_bool_array2() { return m_bool_array2; } @@ -150,6 +152,7 @@ void cppyy_test_data::set_double(double d) { m_double = d; } void cppyy_test_data::set_double_c(const double& d) { m_double = d; } void cppyy_test_data::set_enum(what w) { m_enum = w; } +void cppyy_test_data::set_voidp(void* p) { m_voidp = p; } void cppyy_test_data::set_pod_val(cppyy_test_pod p) { m_pod = p; } void cppyy_test_data::set_pod_ptr_in(cppyy_test_pod* pp) { m_pod = *pp; } @@ -186,6 +189,7 @@ float cppyy_test_data::s_float = -606.f; double cppyy_test_data::s_double = -707.; cppyy_test_data::what cppyy_test_data::s_enum = cppyy_test_data::kNothing; +void* cppyy_test_data::s_voidp = (void*)0; //- strings ----------------------------------------------------------------- const char* cppyy_test_data::get_valid_string(const char* in) { return in; } diff --git a/pypy/module/cppyy/test/datatypes.h b/pypy/module/cppyy/test/datatypes.h --- a/pypy/module/cppyy/test/datatypes.h +++ b/pypy/module/cppyy/test/datatypes.h @@ -68,6 +68,7 @@ float get_float(); double get_double(); what get_enum(); + void* get_voidp(); bool* get_bool_array(); bool* get_bool_array2(); @@ -121,6 +122,7 @@ void set_double(double d); void set_double_c(const double& d); void set_enum(what w); + void set_voidp(void* p); void set_pod_val(cppyy_test_pod); // for m_pod void set_pod_ptr_in(cppyy_test_pod*); @@ -172,6 +174,7 @@ float m_float; double m_double; what m_enum; + void* m_voidp; // array types bool m_bool_array[N]; @@ -212,6 +215,7 @@ static float s_float; static double s_double; static what s_enum; + static void* s_voidp; private: bool m_owns_arrays; diff --git a/pypy/module/cppyy/test/test_datatypes.py b/pypy/module/cppyy/test/test_datatypes.py --- a/pypy/module/cppyy/test/test_datatypes.py +++ b/pypy/module/cppyy/test/test_datatypes.py @@ -232,11 +232,13 @@ for i in range(self.N): assert ca[i] == b[i] - # NULL/None passing (will use short*) + # NULL/None/nullptr passing (will use short*) assert not c.pass_array(0) raises(Exception, c.pass_array(0).__getitem__, 0) # raises SegfaultException assert not c.pass_array(None) raises(Exception, c.pass_array(None).__getitem__, 0) # id. + assert not c.pass_array(cppyy.gbl.nullptr) + raises(Exception, c.pass_array(cppyy.gbl.nullptr).__getitem__, 0) # id. id. c.destruct() @@ -694,3 +696,51 @@ l = list(arr) for i in range(self.N): assert arr[i] == l[i] + + def test21_voidp(self): + """Test usage of void* data""" + + import cppyy + cppyy_test_data = cppyy.gbl.cppyy_test_data + + c = cppyy_test_data() + + assert not cppyy.gbl.nullptr + + assert c.s_voidp is cppyy.gbl.nullptr + assert cppyy_test_data.s_voidp is cppyy.gbl.nullptr + + assert c.m_voidp is cppyy.gbl.nullptr + assert c.get_voidp() is cppyy.gbl.nullptr + + c2 = cppyy_test_data() + assert c2.m_voidp is cppyy.gbl.nullptr + c.set_voidp(c2.m_voidp) + assert c.m_voidp is cppyy.gbl.nullptr + c.set_voidp(c2.get_voidp()) + assert c.m_voidp is cppyy.gbl.nullptr + c.set_voidp(cppyy.gbl.nullptr) + assert c.m_voidp is cppyy.gbl.nullptr + + c.set_voidp(c2) + def address_equality_test(a, b): + assert cppyy.addressof(a) == cppyy.addressof(b) + b2 = cppyy.bind_object(a, cppyy_test_data) + assert b is b2 # memory regulator recycles + b3 = cppyy.bind_object(cppyy.addressof(a), cppyy_test_data) + assert b is b3 # likewise + + address_equality_test(c.m_voidp, c2) + address_equality_test(c.get_voidp(), c2) + + def null_test(null): + c.m_voidp = null + assert c.m_voidp is cppyy.gbl.nullptr + map(null_test, [0, None, cppyy.gbl.nullptr]) + + c.m_voidp = c2 + address_equality_test(c.m_voidp, c2) + address_equality_test(c.get_voidp(), c2) + + c.s_voidp = c2 + address_equality_test(c.s_voidp, c2) diff --git a/pypy/module/cppyy/test/test_fragile.py b/pypy/module/cppyy/test/test_fragile.py --- a/pypy/module/cppyy/test/test_fragile.py +++ b/pypy/module/cppyy/test/test_fragile.py @@ -109,9 +109,11 @@ cppyy.addressof(f) raises(TypeError, cppyy.addressof, o) - raises(TypeError, cppyy.addressof, 0) raises(TypeError, cppyy.addressof, 1) - raises(TypeError, cppyy.addressof, None) + # 0, None, and nullptr allowed + assert cppyy.addressof(0) == 0 + assert cppyy.addressof(None) == 0 + assert cppyy.addressof(cppyy.gbl.nullptr) == 0 def test06_wrong_this(self): """Test that using an incorrect self argument raises""" From noreply at buildbot.pypy.org Thu Jan 23 04:12:30 2014 From: noreply at buildbot.pypy.org (wlav) Date: Thu, 23 Jan 2014 04:12:30 +0100 (CET) Subject: [pypy-commit] pypy reflex-support: merge default into branch Message-ID: <20140123031230.9D0621C3969@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r68860:efd17665296a Date: 2014-01-08 13:21 -0800 http://bitbucket.org/pypy/pypy/changeset/efd17665296a/ Log: merge default into branch diff too long, truncating to 2000 out of 2787 lines diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -28,7 +28,7 @@ DEALINGS IN THE SOFTWARE. -PyPy Copyright holders 2003-2013 +PyPy Copyright holders 2003-2014 ----------------------------------- Except when otherwise stated (look for LICENSE files or information at diff --git a/lib-python/2.7/ctypes/__init__.py b/lib-python/2.7/ctypes/__init__.py --- a/lib-python/2.7/ctypes/__init__.py +++ b/lib-python/2.7/ctypes/__init__.py @@ -371,10 +371,9 @@ self._handle = handle def __repr__(self): - return "<%s '%s', handle %r at %x>" % \ - (self.__class__.__name__, self._name, - (self._handle), - id(self) & (_sys.maxint*2 + 1)) + return "<%s '%s', handle %r at 0x%x>" % ( + self.__class__.__name__, self._name, self._handle, + id(self) & (_sys.maxint * 2 + 1)) def __getattr__(self, name): diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -109,7 +109,7 @@ RegrTest('test_asynchat.py', usemodules='select fcntl'), RegrTest('test_asyncore.py', usemodules='select fcntl'), RegrTest('test_atexit.py', core=True), - RegrTest('test_audioop.py', skip="unsupported extension module"), + RegrTest('test_audioop.py', skip="incomplete module"), RegrTest('test_augassign.py', core=True), RegrTest('test_base64.py', usemodules='struct'), RegrTest('test_bastion.py'), diff --git a/lib_pypy/audioop.py b/lib_pypy/audioop.py new file mode 100644 --- /dev/null +++ b/lib_pypy/audioop.py @@ -0,0 +1,29 @@ + +import struct + + +class error(Exception): + pass + + +def _check_size(size): + if size != 1 and size != 2 and size != 4: + raise error("Size should be 1, 2 or 4") + + +def _check_params(length, size): + _check_size(size) + if length % size != 0: + raise error("not a whole number of frames") + + +def getsample(cp, size, i): + _check_params(len(cp), size) + if not (0 <= i < len(cp) / size): + raise error("Index out of range") + if size == 1: + return struct.unpack_from("B", buffer(cp)[i:])[0] + elif size == 2: + return struct.unpack_from("H", buffer(cp)[i * 2:])[0] + elif size == 4: + return struct.unpack_from("I", buffer(cp)[i * 4:])[0] diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -38,7 +38,7 @@ # General information about the project. project = u'PyPy' -copyright = u'2013, The PyPy Project' +copyright = u'2014, The PyPy Project' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -39,3 +39,5 @@ .. branch: OlivierBlanvillain/fix-3-broken-links-on-pypy-published-pap-1386250839215 Fix 3 broken links on PyPy published papers in docs. + +.. branch: jit-ordereddict diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -805,8 +805,8 @@ raise TypeError("Varargs and keywords not supported in unwrap_spec") argspec = ', '.join([arg for arg in args.args[1:]]) func_code = py.code.Source(""" - def f(w_obj, %(args)s): - return w_obj.%(func_name)s(%(args)s) + def f(self, %(args)s): + return self.%(func_name)s(%(args)s) """ % {'args': argspec, 'func_name': func.func_name}) d = {} exec func_code.compile() in d @@ -821,7 +821,7 @@ else: assert isinstance(unwrap_spec, dict) unwrap_spec = unwrap_spec.copy() - unwrap_spec['w_obj'] = base_cls + unwrap_spec['self'] = base_cls return interp2app(globals()['unwrap_spec'](**unwrap_spec)(f)) class interp2app(W_Root): diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -5,7 +5,7 @@ from pypy.interpreter.gateway import interp2app from pypy.interpreter.typedef import TypeDef, make_weakref_descr -from rpython.rlib import objectmodel, rgc +from rpython.rlib import rgc from rpython.rlib.objectmodel import keepalive_until_here, specialize from rpython.rtyper.lltypesystem import lltype, rffi from rpython.tool.sourcetools import func_with_new_name diff --git a/pypy/module/_cffi_backend/handle.py b/pypy/module/_cffi_backend/handle.py --- a/pypy/module/_cffi_backend/handle.py +++ b/pypy/module/_cffi_backend/handle.py @@ -42,7 +42,7 @@ if self.handles[d]() is None: self.look_distance = d + 1 return d - # full! extend, but don't use '!=' here + # full! extend, but don't use '+=' here self.handles = self.handles + [dead_ref] * (length // 3 + 5) self.look_distance = length + 1 return length diff --git a/pypy/module/_sre/interp_sre.py b/pypy/module/_sre/interp_sre.py --- a/pypy/module/_sre/interp_sre.py +++ b/pypy/module/_sre/interp_sre.py @@ -415,7 +415,13 @@ except OperationError, e: if not e.match(space, space.w_TypeError): raise - w_groupnum = space.getitem(self.srepat.w_groupindex, w_arg) + try: + w_groupnum = space.getitem(self.srepat.w_groupindex, w_arg) + except OperationError, e: + if not e.match(space, space.w_KeyError): + raise + raise OperationError(space.w_IndexError, + space.wrap("no such group")) groupnum = space.int_w(w_groupnum) if groupnum == 0: return self.ctx.match_start, self.ctx.match_end diff --git a/pypy/module/_sre/test/test_app_sre.py b/pypy/module/_sre/test/test_app_sre.py --- a/pypy/module/_sre/test/test_app_sre.py +++ b/pypy/module/_sre/test/test_app_sre.py @@ -172,6 +172,9 @@ assert ("1", "1", None) == m.group(1, 2, 3) assert ("1", None) == m.group("first", "second") raises(IndexError, m.group, 1, 4) + assert ("1", None) == m.group(1, "second") + raises(IndexError, m.group, 'foobarbaz') + raises(IndexError, m.group, 'first', 'foobarbaz') def test_expand(self): import re diff --git a/pypy/module/micronumpy/arrayimpl/scalar.py b/pypy/module/micronumpy/arrayimpl/scalar.py --- a/pypy/module/micronumpy/arrayimpl/scalar.py +++ b/pypy/module/micronumpy/arrayimpl/scalar.py @@ -131,7 +131,11 @@ if space.isinstance_w(w_idx, space.w_tuple): if space.len_w(w_idx) == 0: return self.get_scalar_value() - if space.is_none(w_idx): + elif space.isinstance_w(w_idx, space.w_str): + if self.dtype.is_record_type(): + w_val = self.value.descr_getitem(space, w_idx) + return convert_to_array(space, w_val) + elif space.is_none(w_idx): new_shape = [1] arr = W_NDimArray.from_shape(space, new_shape, self.dtype) arr_iter = arr.create_iter(new_shape) @@ -145,6 +149,12 @@ space.wrap("0-d arrays can't be indexed")) def descr_setitem(self, space, _, w_idx, w_val): + if space.isinstance_w(w_idx, space.w_tuple): + if space.len_w(w_idx) == 0: + return self.set_scalar_value(self.dtype.coerce(space, w_val)) + elif space.isinstance_w(w_idx, space.w_str): + if self.dtype.is_record_type(): + return self.value.descr_setitem(space, w_idx, w_val) raise OperationError(space.w_IndexError, space.wrap("0-d arrays can't be indexed")) @@ -176,7 +186,7 @@ s = self.dtype.itemtype.bool(self.value) w_res = W_NDimArray.from_shape(space, [s], index_type) if s == 1: - w_res.implementation.setitem(0, index_type.itemtype.box(0)) + w_res.implementation.setitem(0, index_type.itemtype.box(0)) return space.newtuple([w_res]) def fill(self, space, w_value): diff --git a/pypy/module/micronumpy/arrayimpl/sort.py b/pypy/module/micronumpy/arrayimpl/sort.py --- a/pypy/module/micronumpy/arrayimpl/sort.py +++ b/pypy/module/micronumpy/arrayimpl/sort.py @@ -123,7 +123,8 @@ if w_axis is space.w_None: # note that it's fine ot pass None here as we're not going # to pass the result around (None is the link to base in slices) - arr = arr.reshape(space, None, [arr.get_size()]) + if arr.get_size() > 0: + arr = arr.reshape(space, None, [arr.get_size()]) axis = 0 elif w_axis is None: axis = -1 diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -38,7 +38,7 @@ from pypy.module.micronumpy.arrayimpl import concrete, scalar if not shape: - w_val = dtype.base.coerce(space, space.wrap(0)) + w_val = dtype.base.coerce(space, None) impl = scalar.Scalar(dtype.base, w_val) else: strides, backstrides = calc_strides(shape, dtype.base, order) diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -11,6 +11,7 @@ from rpython.rtyper.lltypesystem import rffi from rpython.tool.sourcetools import func_with_new_name from pypy.module.micronumpy.arrayimpl.voidbox import VoidBoxStorage +from pypy.module.micronumpy.base import W_NDimArray from pypy.module.micronumpy.interp_flagsobj import W_FlagsObject from pypy.interpreter.mixedmodule import MixedModule from rpython.rtyper.lltypesystem import lltype @@ -275,14 +276,25 @@ def descr_view(self, space, w_dtype): from pypy.module.micronumpy.interp_dtype import W_Dtype - dtype = space.interp_w(W_Dtype, - space.call_function(space.gettypefor(W_Dtype), w_dtype)) - if dtype.get_size() == 0: - raise OperationError(space.w_TypeError, space.wrap( - "data-type must not be 0-sized")) - if dtype.get_size() != self.get_dtype(space).get_size(): - raise OperationError(space.w_ValueError, space.wrap( - "new type not compatible with array.")) + try: + subclass = space.is_true(space.issubtype( + w_dtype, space.gettypefor(W_NDimArray))) + except OperationError, e: + if e.match(space, space.w_TypeError): + subclass = False + else: + raise + if subclass: + dtype = self.get_dtype(space) + else: + dtype = space.interp_w(W_Dtype, + space.call_function(space.gettypefor(W_Dtype), w_dtype)) + if dtype.get_size() == 0: + raise OperationError(space.w_TypeError, space.wrap( + "data-type must not be 0-sized")) + if dtype.get_size() != self.get_dtype(space).get_size(): + raise OperationError(space.w_ValueError, space.wrap( + "new type not compatible with array.")) if dtype.is_str_or_unicode(): return dtype.coerce(space, space.wrap(self.raw_str())) elif dtype.is_record_type(): @@ -350,28 +362,22 @@ descr__new__, _get_dtype, descr_reduce = new_dtype_getter("uint16") class W_Int32Box(W_SignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("int32") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter("i") class W_UInt32Box(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("uint32") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter("I") + +class W_Int64Box(W_SignedIntegerBox, PrimitiveBox): + descr__new__, _get_dtype, descr_reduce = new_dtype_getter("q") + +class W_UInt64Box(W_UnsignedIntegerBox, PrimitiveBox): + descr__new__, _get_dtype, descr_reduce = new_dtype_getter("Q") class W_LongBox(W_SignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("long") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter("l") class W_ULongBox(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("ulong") - -class W_Int64Box(W_SignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("int64") - -class W_LongLongBox(W_SignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter('longlong') - -class W_UInt64Box(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("uint64") - -class W_ULongLongBox(W_SignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter('ulonglong') + descr__new__, _get_dtype, descr_reduce = new_dtype_getter("L") class W_InexactBox(W_NumberBox): pass @@ -427,7 +433,7 @@ self.dtype = dtype def get_dtype(self, space): - return self.arr.dtype + return self.dtype def raw_str(self): return self.arr.dtype.itemtype.to_str(self) @@ -464,13 +470,17 @@ return space.wrap(dtype.itemtype.to_str(read_val)) return read_val - @unwrap_spec(item=str) - def descr_setitem(self, space, item, w_value): + def descr_setitem(self, space, w_item, w_value): + if space.isinstance_w(w_item, space.w_basestring): + item = space.str_w(w_item) + else: + raise OperationError(space.w_IndexError, space.wrap( + "invalid index")) try: ofs, dtype = self.dtype.fields[item] except KeyError: - raise OperationError(space.w_IndexError, - space.wrap("Field %s does not exist" % item)) + raise OperationError(space.w_ValueError, + space.wrap("field named %s not found" % item)) dtype.itemtype.store(self.arr, self.ofs, ofs, dtype.coerce(space, w_value)) @@ -663,13 +673,6 @@ __reduce__ = interp2app(W_Int64Box.descr_reduce), ) -if LONG_BIT == 32: - W_LongBox = W_Int32Box - W_ULongBox = W_UInt32Box -elif LONG_BIT == 64: - W_LongBox = W_Int64Box - W_ULongBox = W_UInt64Box - W_UInt64Box.typedef = TypeDef("uint64", W_UnsignedIntegerBox.typedef, __module__ = "numpy", __new__ = interp2app(W_UInt64Box.descr__new__.im_func), @@ -677,6 +680,21 @@ __reduce__ = interp2app(W_UInt64Box.descr_reduce), ) +W_LongBox.typedef = TypeDef("int%d" % LONG_BIT, + (W_SignedIntegerBox.typedef, int_typedef), + __module__ = "numpy", + __new__ = interp2app(W_LongBox.descr__new__.im_func), + __index__ = interp2app(W_LongBox.descr_index), + __reduce__ = interp2app(W_LongBox.descr_reduce), +) + +W_ULongBox.typedef = TypeDef("uint%d" % LONG_BIT, W_UnsignedIntegerBox.typedef, + __module__ = "numpy", + __new__ = interp2app(W_ULongBox.descr__new__.im_func), + __index__ = interp2app(W_ULongBox.descr_index), + __reduce__ = interp2app(W_ULongBox.descr_reduce), +) + W_InexactBox.typedef = TypeDef("inexact", W_NumberBox.typedef, __module__ = "numpy", ) diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -1,4 +1,3 @@ -import sys from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import interp2app, unwrap_spec @@ -137,6 +136,8 @@ return space.wrap(self.itemtype.alignment) def descr_get_subdtype(self, space): + if self.subdtype is None: + return space.w_None return space.newtuple([space.wrap(self.subdtype), self.descr_get_shape(space)]) def descr_get_str(self, space): @@ -158,8 +159,20 @@ return space.newlist([space.newtuple([space.wrap(""), self.descr_get_str(space)])]) else: - raise OperationError(space.w_NotImplementedError, space.wrap( - "descr not implemented for record types")) + descr = [] + for name in self.fieldnames: + subdtype = self.fields[name][1] + subdescr = [space.wrap(name)] + if subdtype.is_record_type(): + subdescr.append(subdtype.descr_get_descr(space)) + elif subdtype.subdtype is not None: + subdescr.append(subdtype.subdtype.descr_get_str(space)) + else: + subdescr.append(subdtype.descr_get_str(space)) + if subdtype.shape != []: + subdescr.append(subdtype.descr_get_shape(space)) + descr.append(space.newtuple(subdescr[:])) + return space.newlist(descr) def descr_get_base(self, space): return space.wrap(self.base) @@ -651,6 +664,7 @@ w_box_type = space.gettypefor(interp_boxes.W_Float64Box), alternate_constructors=[space.w_float, space.gettypefor(interp_boxes.W_NumberBox), + space.gettypefor(interp_boxes.W_FloatingBox), ], aliases=["float", "double"], ) @@ -680,7 +694,8 @@ name="complex128", char=NPY_CDOUBLELTR, w_box_type = space.gettypefor(interp_boxes.W_Complex128Box), - alternate_constructors=[space.w_complex], + alternate_constructors=[space.w_complex, + space.gettypefor(interp_boxes.W_ComplexFloatingBox)], aliases=["complex", 'cfloat', 'cdouble'], float_type = self.w_float64dtype, ) @@ -702,7 +717,8 @@ name='string', char=NPY_STRINGLTR, w_box_type = space.gettypefor(interp_boxes.W_StringBox), - alternate_constructors=[space.w_str, space.gettypefor(interp_boxes.W_CharacterBox)], + alternate_constructors=[space.w_str, + space.gettypefor(interp_boxes.W_CharacterBox)], aliases=["str"], ) self.w_unicodedtype = W_Dtype( @@ -736,38 +752,21 @@ char=NPY_HALFLTR, w_box_type=space.gettypefor(interp_boxes.W_Float16Box), ) - ptr_size = rffi.sizeof(rffi.CCHARP) - if ptr_size == 4: - intp_box = interp_boxes.W_Int32Box - intp_type = types.Int32() - intp_num = NPY_INT - uintp_box = interp_boxes.W_UInt32Box - uintp_type = types.UInt32() - uintp_num = NPY_UINT - elif ptr_size == 8: - intp_box = interp_boxes.W_Int64Box - intp_type = types.Int64() - intp_num = NPY_LONG - uintp_box = interp_boxes.W_UInt64Box - uintp_type = types.UInt64() - uintp_num = NPY_ULONG - else: - raise ValueError('unknown point size %d' % ptr_size) self.w_intpdtype = W_Dtype( - intp_type, - num=intp_num, - kind=NPY_INTPLTR, + types.Long(), + num=NPY_LONG, + kind=NPY_SIGNEDLTR, name='intp', char=NPY_INTPLTR, - w_box_type = space.gettypefor(intp_box), + w_box_type = space.gettypefor(interp_boxes.W_LongBox), ) self.w_uintpdtype = W_Dtype( - uintp_type, - num=uintp_num, - kind=NPY_UINTPLTR, + types.ULong(), + num=NPY_ULONG, + kind=NPY_UNSIGNEDLTR, name='uintp', char=NPY_UINTPLTR, - w_box_type = space.gettypefor(uintp_box), + w_box_type = space.gettypefor(interp_boxes.W_ULongBox), ) float_dtypes = [self.w_float16dtype, self.w_float32dtype, self.w_float64dtype, self.w_floatlongdtype] diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -731,11 +731,15 @@ def descr_view(self, space, w_dtype=None, w_type=None): if not w_type and w_dtype: try: - if space.is_true(space.issubtype(w_dtype, space.gettypefor(W_NDimArray))): + if space.is_true(space.issubtype( + w_dtype, space.gettypefor(W_NDimArray))): w_type = w_dtype w_dtype = None - except (OperationError, TypeError): - pass + except OperationError, e: + if e.match(space, space.w_TypeError): + pass + else: + raise if w_dtype: dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), @@ -1185,12 +1189,15 @@ def take(a, indices, axis, out, mode): assert mode == 'raise' if axis is None: - res = a.ravel()[indices] + from numpy import array + indices = array(indices) + res = a.ravel()[indices.ravel()].reshape(indices.shape) else: + from operator import mul if axis < 0: axis += len(a.shape) s0, s1 = a.shape[:axis], a.shape[axis+1:] - l0 = prod(s0) if s0 else 1 - l1 = prod(s1) if s1 else 1 + l0 = reduce(mul, s0) if s0 else 1 + l1 = reduce(mul, s1) if s1 else 1 res = a.reshape((l0, -1, l1))[:,indices,:].reshape(s0 + (-1,) + s1) if out is not None: out[:] = res @@ -1439,12 +1446,11 @@ arr_iter.next() return w_arr - at unwrap_spec(order=str) -def zeros(space, w_shape, w_dtype=None, order='C'): +def zeros(space, w_shape, w_dtype=None, w_order=None): dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) shape = _find_shape(space, w_shape, dtype) - return W_NDimArray.from_shape(space, shape, dtype=dtype, order=order) + return W_NDimArray.from_shape(space, shape, dtype=dtype) @unwrap_spec(subok=bool) def empty_like(space, w_a, w_dtype=None, w_order=None, subok=True): diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -33,6 +33,9 @@ self.allow_complex = allow_complex self.complex_to_float = complex_to_float + def descr_get_name(self, space): + return space.wrap(self.name) + def descr_repr(self, space): return space.wrap("" % self.name) @@ -373,14 +376,19 @@ w_rdtype = w_ldtype elif w_lhs.is_scalar() and not w_rhs.is_scalar(): w_ldtype = w_rdtype - if (self.int_only and (not w_ldtype.is_int_type() or not w_rdtype.is_int_type()) or - not self.allow_bool and (w_ldtype.is_bool_type() or w_rdtype.is_bool_type()) or - not self.allow_complex and (w_ldtype.is_complex_type() or w_rdtype.is_complex_type())): - raise OperationError(space.w_TypeError, space.wrap("Unsupported types")) calc_dtype = find_binop_result_dtype(space, w_ldtype, w_rdtype, promote_to_float=self.promote_to_float, promote_bools=self.promote_bools) + if (self.int_only and (not w_ldtype.is_int_type() or + not w_rdtype.is_int_type() or + not calc_dtype.is_int_type()) or + not self.allow_bool and (w_ldtype.is_bool_type() or + w_rdtype.is_bool_type()) or + not self.allow_complex and (w_ldtype.is_complex_type() or + w_rdtype.is_complex_type())): + raise OperationError(space.w_TypeError, space.wrap( + "ufunc '%s' not supported for the input types" % self.name)) if space.is_none(w_out): out = None elif not isinstance(w_out, W_NDimArray): @@ -417,6 +425,7 @@ __call__ = interp2app(W_Ufunc.descr_call), __repr__ = interp2app(W_Ufunc.descr_repr), + __name__ = GetSetProperty(W_Ufunc.descr_get_name), identity = GetSetProperty(W_Ufunc.descr_get_identity), accumulate = interp2app(W_Ufunc.descr_accumulate), @@ -428,6 +437,8 @@ def find_binop_result_dtype(space, dt1, dt2, promote_to_float=False, promote_bools=False): + if dt2 is None: + return dt1 # dt1.num should be <= dt2.num if dt1.num > dt2.num: dt1, dt2 = dt2, dt1 @@ -498,13 +509,14 @@ promote_bools=False, promote_to_largest=False): if promote_to_largest: if dt.kind == NPY_GENBOOLLTR or dt.kind == NPY_SIGNEDLTR: - return interp_dtype.get_dtype_cache(space).w_int64dtype + if dt.get_size() * 8 < LONG_BIT: + return interp_dtype.get_dtype_cache(space).w_longdtype elif dt.kind == NPY_UNSIGNEDLTR: - return interp_dtype.get_dtype_cache(space).w_uint64dtype - elif dt.kind == NPY_FLOATINGLTR or dt.kind == NPY_COMPLEXLTR: - return dt + if dt.get_size() * 8 < LONG_BIT: + return interp_dtype.get_dtype_cache(space).w_ulongdtype else: - assert False + assert dt.kind == NPY_FLOATINGLTR or dt.kind == NPY_COMPLEXLTR + return dt if promote_bools and (dt.kind == NPY_GENBOOLLTR): return interp_dtype.get_dtype_cache(space).w_int8dtype if promote_to_float: @@ -522,36 +534,32 @@ bool_dtype = interp_dtype.get_dtype_cache(space).w_booldtype long_dtype = interp_dtype.get_dtype_cache(space).w_longdtype int64_dtype = interp_dtype.get_dtype_cache(space).w_int64dtype - complex_type = interp_dtype.get_dtype_cache(space).w_complex128dtype - float_type = interp_dtype.get_dtype_cache(space).w_float64dtype + uint64_dtype = interp_dtype.get_dtype_cache(space).w_uint64dtype + complex_dtype = interp_dtype.get_dtype_cache(space).w_complex128dtype + float_dtype = interp_dtype.get_dtype_cache(space).w_float64dtype if isinstance(w_obj, interp_boxes.W_GenericBox): dtype = w_obj.get_dtype(space) - if current_guess is None: - return dtype return find_binop_result_dtype(space, dtype, current_guess) if space.isinstance_w(w_obj, space.w_bool): - if current_guess is None or current_guess is bool_dtype: - return bool_dtype - return current_guess + return find_binop_result_dtype(space, bool_dtype, current_guess) elif space.isinstance_w(w_obj, space.w_int): - if (current_guess is None or current_guess is bool_dtype or - current_guess is long_dtype): - return long_dtype - return current_guess + return find_binop_result_dtype(space, long_dtype, current_guess) elif space.isinstance_w(w_obj, space.w_long): - if (current_guess is None or current_guess is bool_dtype or - current_guess is long_dtype or current_guess is int64_dtype): - return int64_dtype - return current_guess + try: + space.int_w(w_obj) + except OperationError, e: + if e.match(space, space.w_OverflowError): + return find_binop_result_dtype(space, uint64_dtype, + current_guess) + raise + return find_binop_result_dtype(space, int64_dtype, current_guess) + elif space.isinstance_w(w_obj, space.w_float): + return find_binop_result_dtype(space, float_dtype, current_guess) elif space.isinstance_w(w_obj, space.w_complex): - if (current_guess is None or current_guess is bool_dtype or - current_guess is long_dtype or current_guess is int64_dtype or - current_guess is complex_type or current_guess is float_type): - return complex_type - return current_guess + return complex_dtype elif space.isinstance_w(w_obj, space.w_str): - if (current_guess is None): + if current_guess is None: return interp_dtype.variable_dtype(space, 'S%d' % space.len_w(w_obj)) elif current_guess.num == NPY_STRING: @@ -559,12 +567,6 @@ return interp_dtype.variable_dtype(space, 'S%d' % space.len_w(w_obj)) return current_guess - if current_guess is complex_type: - return complex_type - if space.isinstance_w(w_obj, space.w_float): - return float_type - elif space.isinstance_w(w_obj, space.w_slice): - return long_dtype raise operationerrfmt(space.w_NotImplementedError, 'unable to create dtype from objects, ' '"%T" instance not supported', w_obj) diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -69,9 +69,11 @@ return True def find_shape_and_elems(space, w_iterable, dtype): + is_rec_type = dtype is not None and dtype.is_record_type() + if is_rec_type and is_single_elem(space, w_iterable, is_rec_type): + return [], [w_iterable] shape = [space.len_w(w_iterable)] batch = space.listview(w_iterable) - is_rec_type = dtype is not None and dtype.is_record_type() while True: if not batch: return shape[:], [] diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -33,6 +33,11 @@ assert typeinfo['CFLOAT'] == ('F', 14, 64, 8, np.complex64) assert typeinfo['CDOUBLE'] == ('D', 15, 128, 16, np.complex128) assert typeinfo['HALF'] == ('e', 23, 16, 2, np.float16) + assert typeinfo['INTP'] == ('p', np.dtype('int').num, + self.ptr_size*8, self.ptr_size, + 2**(self.ptr_size*8 - 1) - 1, + -2**(self.ptr_size*8 - 1), + np.dtype('int').type) def test_dtype_basic(self): from numpypy import dtype @@ -49,6 +54,7 @@ assert dtype(int).fields is None assert dtype(int).names is None assert dtype(int).hasobject is False + assert dtype(int).subdtype is None assert dtype(None) is dtype(float) @@ -109,15 +115,11 @@ assert dtype(bool).num == 0 if self.ptr_size == 4: - assert dtype('intp').num == 5 - assert dtype('uintp').num == 6 assert dtype('int32').num == 7 assert dtype('uint32').num == 8 assert dtype('int64').num == 9 assert dtype('uint64').num == 10 else: - assert dtype('intp').num == 7 - assert dtype('uintp').num == 8 assert dtype('int32').num == 5 assert dtype('uint32').num == 6 assert dtype('int64').num == 7 @@ -125,6 +127,8 @@ assert dtype(int).num == 7 assert dtype('int').num == 7 assert dtype('uint').num == 8 + assert dtype('intp').num == 7 + assert dtype('uintp').num == 8 assert dtype(long).num == 9 assert dtype(float).num == 12 assert dtype('float').num == 12 @@ -366,16 +370,22 @@ # numpy allows abstract types in array creation a_n = numpy.array([4,4], numpy.number) + a_f = numpy.array([4,4], numpy.floating) + a_c = numpy.array([4,4], numpy.complexfloating) a_i = numpy.array([4,4], numpy.integer) a_s = numpy.array([4,4], numpy.signedinteger) a_u = numpy.array([4,4], numpy.unsignedinteger) assert a_n.dtype.num == 12 + assert a_f.dtype.num == 12 + assert a_c.dtype.num == 15 assert a_i.dtype.num == 7 assert a_s.dtype.num == 7 assert a_u.dtype.num == 8 assert a_n.dtype is numpy.dtype('float64') + assert a_f.dtype is numpy.dtype('float64') + assert a_c.dtype is numpy.dtype('complex128') if self.ptr_size == 4: assert a_i.dtype is numpy.dtype('int32') assert a_s.dtype is numpy.dtype('int32') @@ -473,8 +483,7 @@ assert numpy.int16('32768') == -32768 def test_uint16(self): - import numpypy as numpy - + import numpy assert numpy.uint16(65535) == 65535 assert numpy.uint16(65536) == 0 assert numpy.uint16('65535') == 65535 @@ -482,8 +491,7 @@ def test_int32(self): import sys - import numpypy as numpy - + import numpy x = numpy.int32(23) assert x == 23 assert numpy.int32(2147483647) == 2147483647 @@ -498,10 +506,8 @@ def test_uint32(self): import sys - import numpypy as numpy - + import numpy assert numpy.uint32(10) == 10 - if sys.maxint > 2 ** 31 - 1: assert numpy.uint32(4294967295) == 4294967295 assert numpy.uint32(4294967296) == 0 @@ -518,8 +524,7 @@ def test_int64(self): import sys - import numpypy as numpy - + import numpy if sys.maxint == 2 ** 63 -1: assert numpy.int64.mro() == [numpy.int64, numpy.signedinteger, numpy.integer, numpy.number, @@ -534,30 +539,30 @@ assert numpy.int64(9223372036854775807) == 9223372036854775807 assert numpy.int64(9223372036854775807) == 9223372036854775807 - raises(OverflowError, numpy.int64, 9223372036854775808) raises(OverflowError, numpy.int64, 9223372036854775808L) def test_uint64(self): - import sys - import numpypy as numpy - + import numpy + assert numpy.dtype(numpy.uint64).type is numpy.uint64 assert numpy.uint64.mro() == [numpy.uint64, numpy.unsignedinteger, numpy.integer, numpy.number, numpy.generic, object] - - assert numpy.dtype(numpy.uint64).type is numpy.uint64 - skip("see comment") - # These tests pass "by chance" on numpy, things that are larger than - # platform long (i.e. a python int), don't get put in a normal box, - # instead they become an object array containing a long, we don't have - # yet, so these can't pass. - assert numpy.uint64(9223372036854775808) == 9223372036854775808 - assert numpy.uint64(18446744073709551615) == 18446744073709551615 - raises(OverflowError, numpy.uint64(18446744073709551616)) + import sys + if '__pypy__' not in sys.builtin_module_names: + # These tests pass "by chance" on numpy, things that are larger than + # platform long (i.e. a python int), don't get put in a normal box, + # instead they become an object array containing a long, we don't have + # yet, so these can't pass. + assert numpy.uint64(9223372036854775808) == 9223372036854775808 + assert numpy.uint64(18446744073709551615) == 18446744073709551615 + else: + raises(OverflowError, numpy.int64, 9223372036854775808) + raises(OverflowError, numpy.int64, 18446744073709551615) + raises(OverflowError, numpy.uint64, 18446744073709551616) def test_float16(self): - import numpypy as numpy + import numpy assert numpy.float16.mro() == [numpy.float16, numpy.floating, numpy.inexact, numpy.number, numpy.generic, object] @@ -568,8 +573,7 @@ def test_float32(self): - import numpypy as numpy - + import numpy assert numpy.float32.mro() == [numpy.float32, numpy.floating, numpy.inexact, numpy.number, numpy.generic, object] @@ -579,8 +583,7 @@ raises(ValueError, numpy.float32, '23.2df') def test_float64(self): - import numpypy as numpy - + import numpy assert numpy.float64.mro() == [numpy.float64, numpy.floating, numpy.inexact, numpy.number, numpy.generic, float, object] @@ -596,14 +599,14 @@ raises(ValueError, numpy.float64, '23.2df') def test_float_None(self): - import numpypy as numpy + import numpy from math import isnan assert isnan(numpy.float32(None)) assert isnan(numpy.float64(None)) assert isnan(numpy.longdouble(None)) def test_longfloat(self): - import numpypy as numpy + import numpy # it can be float96 or float128 if numpy.longfloat != numpy.float64: assert numpy.longfloat.mro()[1:] == [numpy.floating, @@ -616,8 +619,7 @@ raises(ValueError, numpy.longfloat, '23.2df') def test_complex_floating(self): - import numpypy as numpy - + import numpy assert numpy.complexfloating.__mro__ == (numpy.complexfloating, numpy.inexact, numpy.number, numpy.generic, object) @@ -715,10 +717,14 @@ assert numpy.int16 is numpy.short assert numpy.int8 is numpy.byte assert numpy.bool_ is numpy.bool8 + assert numpy.intp().dtype.num == 7 + assert numpy.intp().dtype.char == 'l' if self.ptr_size == 4: + assert numpy.intp().dtype.name == 'int32' assert numpy.intp is numpy.int32 assert numpy.uintp is numpy.uint32 elif self.ptr_size == 8: + assert numpy.intp().dtype.name == 'int64' assert numpy.intp is numpy.int64 assert numpy.uintp is numpy.uint64 @@ -787,8 +793,22 @@ def test_intp(self): from numpypy import dtype - assert dtype('p') == dtype('intp') - assert dtype('P') == dtype('uintp') + assert dtype('p') is dtype('intp') + assert dtype('P') is dtype('uintp') + #assert dtype('p') is dtype('int') + #assert dtype('P') is dtype('uint') + assert dtype('p').num == 7 + assert dtype('P').num == 8 + #assert dtype('p').char == 'l' + #assert dtype('P').char == 'L' + assert dtype('p').kind == 'i' + assert dtype('P').kind == 'u' + #if self.ptr_size == 4: + # assert dtype('p').name == 'int32' + # assert dtype('P').name == 'uint32' + #else: + # assert dtype('p').name == 'int64' + # assert dtype('P').name == 'uint64' def test_alignment(self): from numpypy import dtype @@ -836,12 +856,12 @@ import numpy as np assert np.dtype('> 2 == [0, 0, 0, 0, 1, 1, 1, 1, 2, 2]).all() - a = array([True, False]) + a = np.array([True, False]) assert (a >> 1 == [0, 0]).all() - a = arange(3, dtype=float) + a = np.arange(3, dtype=float) raises(TypeError, lambda: a >> 1) + a = np.array([123], dtype='uint64') + b = a >> 1 + assert b == 61 + assert b.dtype.type is np.uint64 + a = np.array(123, dtype='uint64') + exc = raises(TypeError, "a >> 1") + assert 'not supported for the input types' in exc.value.message def test_rrshift(self): from numpypy import arange @@ -1400,16 +1412,18 @@ assert (array([[1,2],[3,4]]).prod(1) == [2, 12]).all() def test_prod(self): - from numpypy import array, int_, dtype + from numpypy import array, dtype a = array(range(1, 6)) assert a.prod() == 120.0 assert a[:4].prod() == 24.0 - a = array([True, False]) - assert a.prod() == 0 - assert type(a.prod()) is int_ - a = array([True, False], dtype='uint') - assert a.prod() == 0 - assert type(a.prod()) is dtype('uint').type + for dt in ['bool', 'int8', 'uint8', 'int16', 'uint16']: + a = array([True, False], dtype=dt) + assert a.prod() == 0 + assert a.prod().dtype is dtype('uint' if dt[0] == 'u' else 'int') + for dt in ['l', 'L', 'q', 'Q', 'e', 'f', 'd', 'F', 'D']: + a = array([True, False], dtype=dt) + assert a.prod() == 0 + assert a.prod().dtype is dtype(dt) def test_max(self): from numpypy import array, zeros @@ -1492,12 +1506,12 @@ def test_dtype_guessing(self): from numpypy import array, dtype - + import sys assert array([True]).dtype is dtype(bool) assert array([True, False]).dtype is dtype(bool) assert array([True, 1]).dtype is dtype(int) assert array([1, 2, 3]).dtype is dtype(int) - #assert array([1L, 2, 3]).dtype is dtype(long) + assert array([1L, 2, 3]).dtype is dtype('q') assert array([1.2, True]).dtype is dtype(float) assert array([1.2, 5]).dtype is dtype(float) assert array([]).dtype is dtype(float) @@ -1508,6 +1522,12 @@ assert array([int8(3)]).dtype is dtype("int8") assert array([bool_(True)]).dtype is dtype(bool) assert array([bool_(True), 3.0]).dtype is dtype(float) + assert array(sys.maxint + 42).dtype is dtype('Q') + assert array([sys.maxint + 42] * 2).dtype is dtype('Q') + assert array([sys.maxint + 42, 123]).dtype is dtype(float) + assert array([sys.maxint + 42, 123L]).dtype is dtype(float) + assert array([1+2j, 123]).dtype is dtype(complex) + assert array([1+2j, 123L]).dtype is dtype(complex) def test_comparison(self): import operator @@ -2183,12 +2203,6 @@ a[b] = 1. assert (a == [[1., 1., 1.]]).all() - @py.test.mark.xfail - def test_boolean_array(self): - import numpypy as np - a = np.ndarray([1], dtype=bool) - assert a[0] == True - class AppTestNumArrayFromBuffer(BaseNumpyAppTest): spaceconfig = dict(usemodules=["micronumpy", "array", "mmap"]) @@ -2251,7 +2265,6 @@ f.close() - class AppTestMultiDim(BaseNumpyAppTest): def test_init(self): import numpypy @@ -2725,7 +2738,12 @@ assert (arange(10).take([1, 2, 1, 1]) == [1, 2, 1, 1]).all() raises(IndexError, "arange(3).take([15])") a = arange(6).reshape(2, 3) + assert a.take(3) == 3 + assert a.take(3).shape == () assert (a.take([1, 0, 3]) == [1, 0, 3]).all() + assert (a.take([[1, 0], [2, 3]]) == [[1, 0], [2, 3]]).all() + assert (a.take([1], axis=0) == [[3, 4, 5]]).all() + assert (a.take([1], axis=1) == [[1], [4]]).all() assert ((a + a).take([3]) == [6]).all() a = arange(12).reshape(2, 6) assert (a[:,::2].take([3, 2, 1]) == [6, 4, 2]).all() @@ -2822,7 +2840,11 @@ assert b[35] == 200 b[[slice(25, 30)]] = range(5) assert all(a[:5] == range(5)) - raises(TypeError, 'b[[[slice(25, 125)]]]') + import sys + if '__pypy__' not in sys.builtin_module_names: + raises(TypeError, 'b[[[slice(25, 125)]]]') + else: + raises(NotImplementedError, 'b[[[slice(25, 125)]]]') def test_cumsum(self): from numpypy import arange @@ -2976,17 +2998,18 @@ assert j[0] == 12 k = fromstring(self.float16val, dtype='float16') assert k[0] == dtype('float16').type(5.) - dt = array([5],dtype='longfloat').dtype - if dt.itemsize == 12: + dt = array([5], dtype='longfloat').dtype + if dt.itemsize == 8: + m = fromstring('\x00\x00\x00\x00\x00\x00\x14@', + dtype='float64') + elif dt.itemsize == 12: m = fromstring('\x00\x00\x00\x00\x00\x00\x00\xa0\x01@\x00\x00', dtype='float96') elif dt.itemsize == 16: m = fromstring('\x00\x00\x00\x00\x00\x00\x00\xa0\x01@\x00\x00' \ '\x00\x00\x00\x00', dtype='float128') - elif dt.itemsize == 8: - skip('longfloat is float64') else: - skip('unknown itemsize for longfloat') + assert False, 'unknown itemsize for longfloat' assert m[0] == dtype('longfloat').type(5.) def test_fromstring_invalid(self): @@ -3046,7 +3069,13 @@ spaceconfig = dict(usemodules=["micronumpy", "struct", "binascii"]) def test_zeros(self): - from numpypy import zeros + from numpypy import zeros, void + a = zeros((), dtype=[('x', int), ('y', float)]) + assert type(a[()]) is void + assert type(a.item()) is tuple + assert a[()]['x'] == 0 + assert a[()]['y'] == 0 + assert a.shape == () a = zeros(2, dtype=[('x', int), ('y', float)]) raises(IndexError, 'a[0]["xyz"]') assert a[0]['x'] == 0 @@ -3061,7 +3090,12 @@ assert a[1]['y'] == 2 def test_views(self): - from numpypy import array + from numpypy import array, zeros, ndarray + a = zeros((), dtype=[('x', int), ('y', float)]) + raises(IndexError, 'a[0]') + assert type(a['x']) is ndarray + assert a['x'] == 0 + assert a['y'] == 0 a = array([(1, 2), (3, 4)], dtype=[('x', int), ('y', float)]) raises((IndexError, ValueError), 'array([1])["x"]') raises((IndexError, ValueError), 'a["z"]') @@ -3082,14 +3116,44 @@ def test_creation_and_repr(self): from numpypy import array + a = array((1, 2), dtype=[('x', int), ('y', float)]) + assert a.shape == () + assert repr(a[()]) == '(1, 2.0)' a = array([(1, 2), (3, 4)], dtype=[('x', int), ('y', float)]) assert repr(a[0]) == '(1, 2.0)' + def test_void_copyswap(self): + import numpy as np + dt = np.dtype([('one', ' 0 and x['two'] > 2 + else: + assert x['one'] == 1 and x['two'] == 2 + def test_nested_dtype(self): - from numpypy import zeros + import numpy as np a = [('x', int), ('y', float)] b = [('x', int), ('y', a)] - arr = zeros(3, dtype=b) + arr = np.zeros((), dtype=b) + assert arr['x'] == 0 + arr['x'] = 2 + assert arr['x'] == 2 + exc = raises(IndexError, "arr[3L]") + assert exc.value.message == "0-d arrays can't be indexed" + exc = raises(ValueError, "arr['xx'] = 2") + assert exc.value.message == "field named xx not found" + assert arr['y'].dtype == a + assert arr['y'].shape == () + assert arr['y'][()]['x'] == 0 + assert arr['y'][()]['y'] == 0 + arr['y'][()]['x'] = 2 + arr['y'][()]['y'] = 3 + assert arr['y'][()]['x'] == 2 + assert arr['y'][()]['y'] == 3 + arr = np.zeros(3, dtype=b) arr[1]['x'] = 15 assert arr[1]['x'] == 15 arr[1]['y']['y'] = 3.5 @@ -3214,11 +3278,15 @@ def test_subarrays(self): from numpypy import dtype, array, zeros - d = dtype([("x", "int", 3), ("y", "float", 5)]) + + a = zeros((), dtype=d) + #assert a['x'].dtype == int + #assert a['x'].shape == (3,) + #assert (a['x'] == [0, 0, 0]).all() + a = array([([1, 2, 3], [0.5, 1.5, 2.5, 3.5, 4.5]), ([4, 5, 6], [5.5, 6.5, 7.5, 8.5, 9.5])], dtype=d) - for v in ['x', u'x', 0, -2]: assert (a[0][v] == [1, 2, 3]).all() assert (a[1][v] == [4, 5, 6]).all() @@ -3236,6 +3304,13 @@ a[0]["x"][0] = 200 assert a[0]["x"][0] == 200 + a[1]["x"][2] = 123 + assert (a[1]["x"] == [4, 5, 123]).all() + a[1]['y'][3] = 4 + assert a[1]['y'][3] == 4 + assert a['y'][1][3] == 4 + a['y'][1][4] = 5 + assert a[1]['y'][4] == 5 d = dtype([("x", "int64", (2, 3))]) a = array([([[1, 2, 3], [4, 5, 6]],)], dtype=d) @@ -3309,14 +3384,16 @@ a = array([('aaaa', 1.0, 8.0, [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])], dtype=dt) - s = str(a) i = a.item() assert isinstance(i, tuple) assert len(i) == 4 - skip('incorrect formatting via dump_data') - assert s.endswith("[('aaaa', 1.0, 8.0, [[[1, 2, 3], [4, 5, 6]], " - "[[7, 8, 9], [10, 11, 12]]])]") - + import sys + if '__pypy__' not in sys.builtin_module_names: + assert str(a) == "[('aaaa', 1.0, 8.0, [[[1, 2, 3], [4, 5, 6]], " \ + "[[7, 8, 9], [10, 11, 12]]])]" + else: + assert str(a) == "array([('aaaa', 1.0, 8.0, [1, 2, 3, 4, 5, 6, " \ + "7, 8, 9, 10, 11, 12])])" def test_issue_1589(self): import numpypy as numpy @@ -3329,6 +3406,7 @@ a = np.array([1,2,3], dtype='int16') assert (a * 2).dtype == np.dtype('int16') + class AppTestPyPy(BaseNumpyAppTest): def setup_class(cls): if option.runappdirect and '__pypy__' not in sys.builtin_module_names: diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -4,8 +4,9 @@ spaceconfig = dict(usemodules=["micronumpy", "binascii", "struct"]) def test_init(self): - import numpypy as np + import numpy as np import math + import sys assert np.intp() == np.intp(0) assert np.intp('123') == np.intp(123) raises(TypeError, np.intp, None) @@ -17,6 +18,12 @@ assert np.complex_() == np.complex_(0) #raises(TypeError, np.complex_, '1+2j') assert math.isnan(np.complex_(None)) + for c in ['i', 'I', 'l', 'L', 'q', 'Q']: + assert np.dtype(c).type().dtype.char == c + for c in ['l', 'q']: + assert np.dtype(c).type(sys.maxint) == sys.maxint + for c in ['L', 'Q']: + assert np.dtype(c).type(sys.maxint + 42) == sys.maxint + 42 def test_builtin(self): import numpy as np @@ -37,7 +44,7 @@ assert len(np.string_('123')) == 3 def test_pickle(self): - from numpypy import dtype, zeros + from numpy import dtype, zeros try: from numpy.core.multiarray import scalar except ImportError: @@ -111,8 +118,17 @@ assert a.squeeze() is a raises(TypeError, a.squeeze, 2) + def test_bitshift(self): + import numpy as np + assert np.int32(123) >> 1 == 61 + assert type(np.int32(123) >> 1) is np.int_ + assert np.int64(123) << 1 == 246 + assert type(np.int64(123) << 1) is np.int64 + exc = raises(TypeError, "np.uint64(123) >> 1") + assert 'not supported for the input types' in exc.value.message + def test_attributes(self): - import numpypy as np + import numpy as np value = np.dtype('int64').type(12345) assert value.dtype == np.dtype('int64') assert value.size == 1 diff --git a/pypy/module/micronumpy/test/test_sorting.py b/pypy/module/micronumpy/test/test_sorting.py --- a/pypy/module/micronumpy/test/test_sorting.py +++ b/pypy/module/micronumpy/test/test_sorting.py @@ -45,6 +45,9 @@ def test_argsort_axis(self): from numpypy import array + a = array([]) + for axis in [None, -1, 0]: + assert a.argsort(axis=axis).shape == (0,) a = array([[4, 2], [1, 3]]) assert (a.argsort(axis=None) == [2, 1, 3, 0]).all() assert (a.argsort(axis=-1) == [[1, 0], [0, 1]]).all() @@ -306,9 +309,8 @@ # tests from numpy/core/tests/test_regression.py def test_sort_bigendian(self): - skip('not implemented yet') - from numpypy import array, dtype - a = array(range(11),dtype='float64') + from numpy import array, dtype + a = array(range(11), dtype='float64') c = a.astype(dtype('" assert repr(ufunc) == "" + assert add.__name__ == 'add' def test_ufunc_attrs(self): from numpypy import add, multiply, sin @@ -390,23 +391,17 @@ assert (a == ref).all() def test_signbit(self): - from numpypy import signbit, add - + from numpy import signbit, add, copysign, nan + assert signbit(add.identity) == False assert (signbit([0, 0.0, 1, 1.0, float('inf')]) == - [False, False, False, False, False]).all() + [False, False, False, False, False]).all() assert (signbit([-0, -0.0, -1, -1.0, float('-inf')]) == - [False, True, True, True, True]).all() - - a = add.identity - assert signbit(a) == False - - skip('sign of nan is non-determinant') - assert (signbit([float('nan'), float('-nan'), -float('nan')]) == - [False, True, True]).all() + [False, True, True, True, True]).all() + assert (signbit([copysign(nan, 1), copysign(nan, -1)]) == + [False, True]).all() def test_reciprocal(self): - from numpypy import array, reciprocal - + from numpy import array, reciprocal inf = float('inf') nan = float('nan') reference = [-0.2, inf, -inf, 2.0, nan] diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -12,7 +12,7 @@ from rpython.rlib.rawstorage import (alloc_raw_storage, raw_storage_setitem, raw_storage_getitem) from rpython.rlib.objectmodel import specialize -from rpython.rlib.rarithmetic import widen, byteswap, r_ulonglong, most_neg_value_of +from rpython.rlib.rarithmetic import widen, byteswap, r_ulonglong, most_neg_value_of, LONG_BIT from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib.rstruct.runpack import runpack from rpython.rlib.rstruct.nativefmttable import native_is_bigendian @@ -568,16 +568,6 @@ BoxType = interp_boxes.W_UInt32Box format_code = "I" -class Long(BaseType, Integer): - T = rffi.LONG - BoxType = interp_boxes.W_LongBox - format_code = "l" - -class ULong(BaseType, Integer): - T = rffi.ULONG - BoxType = interp_boxes.W_ULongBox - format_code = "L" - def _int64_coerce(self, space, w_item): try: return self._base_coerce(space, w_item) @@ -596,7 +586,8 @@ BoxType = interp_boxes.W_Int64Box format_code = "q" - _coerce = func_with_new_name(_int64_coerce, '_coerce') + if LONG_BIT == 32: + _coerce = func_with_new_name(_int64_coerce, '_coerce') def _uint64_coerce(self, space, w_item): try: @@ -618,6 +609,31 @@ _coerce = func_with_new_name(_uint64_coerce, '_coerce') +class Long(BaseType, Integer): + T = rffi.LONG + BoxType = interp_boxes.W_LongBox + format_code = "l" + +def _ulong_coerce(self, space, w_item): + try: + return self._base_coerce(space, w_item) + except OperationError, e: + if not e.match(space, space.w_OverflowError): + raise + bigint = space.bigint_w(w_item) + try: + value = bigint.touint() + except OverflowError: + raise OperationError(space.w_OverflowError, space.w_None) + return self.box(value) + +class ULong(BaseType, Integer): + T = rffi.ULONG + BoxType = interp_boxes.W_ULongBox + format_code = "L" + + _coerce = func_with_new_name(_ulong_coerce, '_coerce') + class Float(Primitive): _mixin_ = True @@ -1620,6 +1636,8 @@ from pypy.module.micronumpy.interp_dtype import new_string_dtype if isinstance(w_item, interp_boxes.W_StringBox): return w_item + if w_item is None: + w_item = space.wrap('') arg = space.str_w(space.str(w_item)) arr = VoidBoxStorage(len(arg), new_string_dtype(space, len(arg))) for i in range(len(arg)): @@ -1733,13 +1751,16 @@ def _coerce(self, space, arr, ofs, dtype, w_items, shape): # TODO: Make sure the shape and the array match from interp_dtype import W_Dtype - items_w = space.fixedview(w_items) + if w_items is not None: + items_w = space.fixedview(w_items) + else: + items_w = [None] * shape[0] subdtype = dtype.subdtype assert isinstance(subdtype, W_Dtype) itemtype = subdtype.itemtype if len(shape) <= 1: for i in range(len(items_w)): - w_box = itemtype.coerce(space, dtype.subdtype, items_w[i]) + w_box = itemtype.coerce(space, subdtype, items_w[i]) itemtype.store(arr, 0, ofs, w_box) ofs += itemtype.get_element_size() else: @@ -1758,7 +1779,9 @@ @jit.unroll_safe def store(self, arr, i, ofs, box): + assert i == 0 assert isinstance(box, interp_boxes.W_VoidBox) + assert box.dtype is box.arr.dtype for k in range(box.arr.dtype.get_size()): arr.storage[k + ofs] = box.arr.storage[k + box.ofs] @@ -1819,29 +1842,35 @@ def coerce(self, space, dtype, w_item): if isinstance(w_item, interp_boxes.W_VoidBox): return w_item - # we treat every sequence as sequence, no special support - # for arrays - if not space.issequence_w(w_item): - raise OperationError(space.w_TypeError, space.wrap( - "expected sequence")) - if len(dtype.fields) != space.len_w(w_item): - raise OperationError(space.w_ValueError, space.wrap( - "wrong length")) - items_w = space.fixedview(w_item) + if w_item is not None: + # we treat every sequence as sequence, no special support + # for arrays + if not space.issequence_w(w_item): + raise OperationError(space.w_TypeError, space.wrap( + "expected sequence")) + if len(dtype.fields) != space.len_w(w_item): + raise OperationError(space.w_ValueError, space.wrap( + "wrong length")) + items_w = space.fixedview(w_item) + else: + items_w = [None] * len(dtype.fields) arr = VoidBoxStorage(dtype.get_size(), dtype) for i in range(len(items_w)): ofs, subdtype = dtype.fields[dtype.fieldnames[i]] itemtype = subdtype.itemtype - w_item = items_w[i] - w_box = itemtype.coerce(space, subdtype, w_item) + w_box = itemtype.coerce(space, subdtype, items_w[i]) itemtype.store(arr, 0, ofs, w_box) return interp_boxes.W_VoidBox(arr, 0, dtype) @jit.unroll_safe def store(self, arr, i, ofs, box): assert isinstance(box, interp_boxes.W_VoidBox) - for k in range(box.arr.dtype.get_size()): - arr.storage[k + i] = box.arr.storage[k + box.ofs] + for k in range(box.dtype.get_size()): + arr.storage[k + i + ofs] = box.arr.storage[k + box.ofs] + + def byteswap(self, w_v): + # XXX implement + return w_v def to_builtin_type(self, space, box): assert isinstance(box, interp_boxes.W_VoidBox) diff --git a/pypy/module/sys/app.py b/pypy/module/sys/app.py --- a/pypy/module/sys/app.py +++ b/pypy/module/sys/app.py @@ -70,11 +70,11 @@ return None copyright_str = """ -Copyright 2003-2013 PyPy development team. +Copyright 2003-2014 PyPy development team. All Rights Reserved. For further information, see -Portions Copyright (c) 2001-2013 Python Software Foundation. +Portions Copyright (c) 2001-2014 Python Software Foundation. All Rights Reserved. Portions Copyright (c) 2000 BeOpen.com. diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -51,6 +51,13 @@ return w_iter list_iter._annspecialcase_ = 'specialize:memo' +def tuple_iter(space): + "Utility that returns the app-level descriptor tuple.__iter__." + w_src, w_iter = space.lookup_in_type_where(space.w_tuple, + '__iter__') + return w_iter +tuple_iter._annspecialcase_ = 'specialize:memo' + def raiseattrerror(space, w_obj, name, w_descr=None): if w_descr is None: raise operationerrfmt(space.w_AttributeError, diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -945,7 +945,8 @@ def _extend_from_iterable(self, w_list, w_iterable): space = self.space - if isinstance(w_iterable, W_AbstractTupleObject): + if (isinstance(w_iterable, W_AbstractTupleObject) + and space._uses_tuple_iter(w_iterable)): w_list.__init__(space, w_iterable.getitems_copy()) return diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -382,7 +382,7 @@ self.wrap("expected length %d, got %d" % (expected, got))) def unpackiterable(self, w_obj, expected_length=-1): - if isinstance(w_obj, W_AbstractTupleObject): + if isinstance(w_obj, W_AbstractTupleObject) and self._uses_tuple_iter(w_obj): t = w_obj.getitems_copy() elif type(w_obj) is W_ListObject: t = w_obj.getitems_copy() @@ -396,7 +396,7 @@ def fixedview(self, w_obj, expected_length=-1, unroll=False): """ Fast paths """ - if isinstance(w_obj, W_AbstractTupleObject): + if isinstance(w_obj, W_AbstractTupleObject) and self._uses_tuple_iter(w_obj): t = w_obj.tolist() elif type(w_obj) is W_ListObject: if unroll: @@ -421,7 +421,7 @@ def listview(self, w_obj, expected_length=-1): if type(w_obj) is W_ListObject: t = w_obj.getitems() - elif isinstance(w_obj, W_AbstractTupleObject): + elif isinstance(w_obj, W_AbstractTupleObject) and self._uses_tuple_iter(w_obj): t = w_obj.getitems_copy() elif isinstance(w_obj, W_ListObject) and self._uses_list_iter(w_obj): t = w_obj.getitems() @@ -440,7 +440,7 @@ return w_obj.listview_str() if type(w_obj) is W_SetObject or type(w_obj) is W_FrozensetObject: return w_obj.listview_str() - if isinstance(w_obj, W_StringObject): + if isinstance(w_obj, W_StringObject) and self._uses_no_iter(w_obj): return w_obj.listview_str() if isinstance(w_obj, W_ListObject) and self._uses_list_iter(w_obj): return w_obj.getitems_str() @@ -455,7 +455,7 @@ return w_obj.listview_unicode() if type(w_obj) is W_SetObject or type(w_obj) is W_FrozensetObject: return w_obj.listview_unicode() - if isinstance(w_obj, W_UnicodeObject): + if isinstance(w_obj, W_UnicodeObject) and self._uses_no_iter(w_obj): return w_obj.listview_unicode() if isinstance(w_obj, W_ListObject) and self._uses_list_iter(w_obj): return w_obj.getitems_unicode() @@ -490,6 +490,13 @@ from pypy.objspace.descroperation import list_iter return self.lookup(w_obj, '__iter__') is list_iter(self) + def _uses_tuple_iter(self, w_obj): + from pypy.objspace.descroperation import tuple_iter + return self.lookup(w_obj, '__iter__') is tuple_iter(self) + + def _uses_no_iter(self, w_obj): + return self.lookup(w_obj, '__iter__') is None + def sliceindices(self, w_slice, w_length): if isinstance(w_slice, W_SliceObject): a, b, c = w_slice.indices3(self, self.int_w(w_length)) diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -1314,6 +1314,57 @@ non_list = NonList() assert [] != non_list + def test_extend_from_empty_list_with_subclasses(self): + # some of these tests used to fail by ignoring the + # custom __iter__() --- but only if the list has so + # far the empty strategy, as opposed to .extend()ing + # a non-empty list. + class T(tuple): + def __iter__(self): + yield "ok" + assert list(T([5, 6])) == ["ok"] + # + class L(list): + def __iter__(self): + yield "ok" + assert list(L([5, 6])) == ["ok"] + assert list(L([5.2, 6.3])) == ["ok"] + # + class S(str): + def __iter__(self): + yield "ok" + assert list(S("don't see me")) == ["ok"] + # + class U(unicode): + def __iter__(self): + yield "ok" + assert list(U(u"don't see me")) == ["ok"] + + def test_extend_from_nonempty_list_with_subclasses(self): + l = ["hi!"] + class T(tuple): + def __iter__(self): + yield "okT" + l.extend(T([5, 6])) + # + class L(list): + def __iter__(self): + yield "okL" + l.extend(L([5, 6])) + l.extend(L([5.2, 6.3])) + # + class S(str): + def __iter__(self): + yield "okS" + l.extend(S("don't see me")) + # + class U(unicode): + def __iter__(self): + yield "okU" + l.extend(U(u"don't see me")) + # + assert l == ["hi!", "okT", "okL", "okL", "okS", "okU"] + class AppTestForRangeLists(AppTestW_ListObject): spaceconfig = {"objspace.std.withrangelist": True} diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -115,10 +115,11 @@ continue print "Picking %s" % p binaries.append((p, p.basename)) - if pypy_c.dirpath().join("libpypy-c.lib").check(): - shutil.copyfile(str(pypy_c.dirpath().join("libpypy-c.lib")), + importlib_name = 'python27.lib' + if pypy_c.dirpath().join(importlib_name).check(): + shutil.copyfile(str(pypy_c.dirpath().join(importlib_name)), str(pypydir.join('include/python27.lib'))) - print "Picking %s as %s" % (pypy_c.dirpath().join("libpypy-c.lib"), + print "Picking %s as %s" % (pypy_c.dirpath().join(importlib_name), pypydir.join('include/python27.lib')) else: pass diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -412,10 +412,7 @@ return SomeByteArray(can_be_None=can_be_None) def add((b1, b2)): - result = SomeByteArray() - if b1.is_immutable_constant() and b2.is_immutable_constant(): - result.const = b1.const + b2.const - return result + return SomeByteArray() class __extend__(pairtype(SomeByteArray, SomeInteger)): def getitem((s_b, s_i)): @@ -429,10 +426,7 @@ pairtype(SomeChar, SomeByteArray), pairtype(SomeByteArray, SomeChar)): def add((b1, b2)): - result = SomeByteArray() - if b1.is_immutable_constant() and b2.is_immutable_constant(): - result.const = b1.const + b2.const - return result + return SomeByteArray() class __extend__(pairtype(SomeChar, SomeChar)): diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -122,7 +122,7 @@ return constpropagate(unicode, [s_unicode], SomeUnicodeString()) def builtin_bytearray(s_str): - return constpropagate(bytearray, [s_str], SomeByteArray()) + return SomeByteArray() def our_issubclass(cls1, cls2): """ we're going to try to be less silly in the face of old-style classes""" diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -215,7 +215,8 @@ class SomeStringOrUnicode(SomeObject): - """Base class for shared implementation of SomeString and SomeUnicodeString. + """Base class for shared implementation of SomeString, + SomeUnicodeString and SomeByteArray. Cannot be an annotation.""" @@ -228,6 +229,7 @@ if can_be_None: self.can_be_None = True if no_nul: + assert self.immutable #'no_nul' cannot be used with SomeByteArray self.no_nul = True def can_be_none(self): @@ -263,6 +265,7 @@ class SomeByteArray(SomeStringOrUnicode): + immutable = False knowntype = bytearray diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -3987,7 +3987,9 @@ return bytearray("xyz") a = self.RPythonAnnotator() - assert isinstance(a.build_types(f, []), annmodel.SomeByteArray) + s = a.build_types(f, []) + assert isinstance(s, annmodel.SomeByteArray) + assert not s.is_constant() # never a constant! def test_bytearray_add(self): def f(a): diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -125,10 +125,12 @@ class ArrayDescr(AbstractDescr): def __init__(self, A): - self.A = A + self.A = self.OUTERA = A + if isinstance(A, lltype.Struct): + self.A = A._flds[A._arrayfld] def __repr__(self): - return 'ArrayDescr(%r)' % (self.A,) + return 'ArrayDescr(%r)' % (self.OUTERA,) def is_array_of_pointers(self): return getkind(self.A.OF) == 'ref' @@ -424,6 +426,8 @@ def bh_arraylen_gc(self, a, descr): array = a._obj.container + if descr.A is not descr.OUTERA: + array = getattr(array, descr.OUTERA._arrayfld) return array.getlength() def bh_getarrayitem_gc(self, a, index, descr): diff --git a/rpython/jit/backend/llsupport/test/test_descr.py b/rpython/jit/backend/llsupport/test/test_descr.py --- a/rpython/jit/backend/llsupport/test/test_descr.py +++ b/rpython/jit/backend/llsupport/test/test_descr.py @@ -424,3 +424,11 @@ " > >") # caching: assert fielddescr is get_field_arraylen_descr(c0, rstr.STR) + +def test_bytearray_descr(): + c0 = GcCache(False) + descr = get_array_descr(c0, rstr.STR) # for bytearray + assert descr.flag == FLAG_UNSIGNED + assert descr.basesize == struct.calcsize("PP") # hash, length + assert descr.lendescr.offset == struct.calcsize("P") # hash + assert not descr.is_array_of_pointers() diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -13,6 +13,7 @@ from rpython.rlib.jit import _we_are_jitted from rpython.rlib.rgc import lltype_is_gc from rpython.rtyper.lltypesystem import lltype, llmemory, rstr, rclass, rffi +from rpython.rtyper.lltypesystem import rbytearray from rpython.rtyper.rclass import IR_QUASIIMMUTABLE, IR_QUASIIMMUTABLE_ARRAY from rpython.translator.unsimplify import varoftype @@ -643,6 +644,12 @@ return SpaceOperation('arraylen_gc', [op.args[0], arraydescr], op.result) + def rewrite_op_getarraysubstruct(self, op): + ARRAY = op.args[0].concretetype.TO + assert ARRAY._gckind == 'raw' + assert ARRAY._hints.get('nolength') is True + return self.rewrite_op_direct_ptradd(op) + def _array_of_voids(self, ARRAY): return ARRAY.OF == lltype.Void @@ -836,9 +843,14 @@ optype = op.args[0].concretetype if optype == lltype.Ptr(rstr.STR): opname = "strlen" + elif optype == lltype.Ptr(rstr.UNICODE): + opname = "unicodelen" + elif optype == lltype.Ptr(rbytearray.BYTEARRAY): + bytearraydescr = self.cpu.arraydescrof(rbytearray.BYTEARRAY) + return SpaceOperation('arraylen_gc', [op.args[0], bytearraydescr], + op.result) else: - assert optype == lltype.Ptr(rstr.UNICODE) - opname = "unicodelen" + assert 0, "supported type %r" % (optype,) return SpaceOperation(opname, [op.args[0]], op.result) def rewrite_op_getinteriorfield(self, op): @@ -850,6 +862,12 @@ elif optype == lltype.Ptr(rstr.UNICODE): opname = "unicodegetitem" return SpaceOperation(opname, [op.args[0], op.args[2]], op.result) + elif optype == lltype.Ptr(rbytearray.BYTEARRAY): + bytearraydescr = self.cpu.arraydescrof(rbytearray.BYTEARRAY) + v_index = op.args[2] + return SpaceOperation('getarrayitem_gc_i', + [op.args[0], v_index, bytearraydescr], + op.result) else: v_inst, v_index, c_field = op.args if op.result.concretetype is lltype.Void: @@ -876,6 +894,11 @@ opname = "unicodesetitem" return SpaceOperation(opname, [op.args[0], op.args[2], op.args[3]], op.result) + elif optype == lltype.Ptr(rbytearray.BYTEARRAY): + bytearraydescr = self.cpu.arraydescrof(rbytearray.BYTEARRAY) + opname = "setarrayitem_gc_i" + return SpaceOperation(opname, [op.args[0], op.args[2], op.args[3], + bytearraydescr], op.result) else: v_inst, v_index, c_field, v_value = op.args if v_value.concretetype is lltype.Void: @@ -1709,6 +1732,8 @@ "stroruni.copy_string_to_raw": EffectInfo.OS_UNI_COPY_TO_RAW } CHR = lltype.UniChar + elif SoU.TO == rbytearray.BYTEARRAY: + raise NotSupported("bytearray operation") else: assert 0, "args[0].concretetype must be STR or UNICODE" From noreply at buildbot.pypy.org Thu Jan 23 04:12:33 2014 From: noreply at buildbot.pypy.org (wlav) Date: Thu, 23 Jan 2014 04:12:33 +0100 (CET) Subject: [pypy-commit] pypy reflex-support: allow bind_object() to cast Message-ID: <20140123031233.0DECB1C3969@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r68862:8e2e375941a4 Date: 2014-01-22 19:11 -0800 http://bitbucket.org/pypy/pypy/changeset/8e2e375941a4/ Log: allow bind_object() to cast diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -1217,8 +1217,8 @@ address = _addressof(space, w_obj) return space.wrap(address) - at unwrap_spec(owns=bool) -def bind_object(space, w_obj, w_pycppclass, owns=False): + at unwrap_spec(owns=bool, cast=bool) +def bind_object(space, w_obj, w_pycppclass, owns=False, cast=False): """Takes an address and a bound C++ class proxy, returns a bound instance.""" try: # attempt address from array or C++ instance @@ -1233,4 +1233,4 @@ raise OperationError(space.w_TypeError, space.wrap("no such class: %s" % space.str_w(w_pycppclass))) cppclass = space.interp_w(W_CPPClass, w_cppclass, can_be_None=False) - return wrap_cppobject(space, rawobject, cppclass, do_cast=False, python_owns=owns) + return wrap_cppobject(space, rawobject, cppclass, do_cast=cast, python_owns=owns) diff --git a/pypy/module/cppyy/test/advancedcpp.h b/pypy/module/cppyy/test/advancedcpp.h --- a/pypy/module/cppyy/test/advancedcpp.h +++ b/pypy/module/cppyy/test/advancedcpp.h @@ -35,6 +35,8 @@ virtual base_class* cycle(base_class* b) { return b; } virtual base_class* clone() { return new base_class; } + virtual void* mask(void* p) { return p; } + public: int m_b; double m_db; diff --git a/pypy/module/cppyy/test/test_advancedcpp.py b/pypy/module/cppyy/test/test_advancedcpp.py --- a/pypy/module/cppyy/test/test_advancedcpp.py +++ b/pypy/module/cppyy/test/test_advancedcpp.py @@ -517,6 +517,20 @@ assert isinstance(b.clone(), base_class) # TODO: clone() leaks assert isinstance(d.clone(), derived_class) # TODO: clone() leaks + # special case when round-tripping through a void* ptr + voidp = b.mask(d) + assert not isinstance(voidp, base_class) + assert not isinstance(voidp, derived_class) + + d1 = cppyy.bind_object(voidp, base_class, cast=True) + assert isinstance(d1, derived_class) + assert d1 is d + + b1 = cppyy.bind_object(voidp, base_class) + assert isinstance(b1, base_class) + assert cppyy.addressof(b1) == cppyy.addressof(d) + assert not (b1 is d) + def test13_actual_type_virtual_multi(self): """Test auto-downcast in adverse inheritance situation""" From noreply at buildbot.pypy.org Thu Jan 23 10:07:53 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Thu, 23 Jan 2014 10:07:53 +0100 (CET) Subject: [pypy-commit] stmgc c7: do minor collections before acquiring the exclusive lock on commit Message-ID: <20140123090753.582CD1D2424@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r671:b5bde85ec52b Date: 2014-01-23 10:07 +0100 http://bitbucket.org/pypy/stmgc/changeset/b5bde85ec52b/ Log: do minor collections before acquiring the exclusive lock on commit diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -321,13 +321,19 @@ void stm_stop_transaction(void) { assert(_STM_TL->running_transaction); + + /* do the minor_collection here and not in nursery_on_commit, + since here we can still run concurrently with other threads + as we don't hold the exclusive lock yet. */ + _stm_minor_collect(); + + /* Some operations require us to have the EXCLUSIVE lock */ stm_stop_shared_lock(); stm_start_exclusive_lock(); _STM_TL->jmpbufptr = NULL; /* cannot abort any more */ - /* do a minor_collection, - push uncommitted objects to other threads, + /* push uncommitted objects to other threads, make completely uncommitted pages SHARED, */ nursery_on_commit(); diff --git a/c7/nursery.c b/c7/nursery.c --- a/c7/nursery.c +++ b/c7/nursery.c @@ -273,7 +273,9 @@ void nursery_on_commit() { - minor_collect(); + /* DON'T do a minor_collect. This is already done in + the caller (optimization) */ + /* minor_collect(); */ /* uncommitted objects / partially COMMITTED pages */ push_uncommitted_to_other_threads(); From noreply at buildbot.pypy.org Thu Jan 23 10:51:46 2014 From: noreply at buildbot.pypy.org (stefanor) Date: Thu, 23 Jan 2014 10:51:46 +0100 (CET) Subject: [pypy-commit] cffi default: wchar_t is 4 byte, signed on arm64 Message-ID: <20140123095146.0F4F51C06CD@cobra.cs.uni-duesseldorf.de> Author: Stefano Rivera Branch: Changeset: r1459:cee02a6ecfcd Date: 2014-01-22 13:06 +0200 http://bitbucket.org/cffi/cffi/changeset/cee02a6ecfcd/ Log: wchar_t is 4 byte, signed on arm64 diff --git a/testing/backend_tests.py b/testing/backend_tests.py --- a/testing/backend_tests.py +++ b/testing/backend_tests.py @@ -1,4 +1,5 @@ import py +import platform import sys, ctypes from cffi import FFI, CDefError from testing.support import * @@ -755,6 +756,8 @@ p = ffi.cast("long long", ffi.cast("wchar_t", -1)) if SIZE_OF_WCHAR == 2: # 2 bytes, unsigned assert int(p) == 0xffff + elif platform.machine() == 'aarch64': # 4 bytes, unsigned + assert int(p) == 0xffffffff else: # 4 bytes, signed assert int(p) == -1 p = ffi.cast("int", u+'\u1234') From noreply at buildbot.pypy.org Thu Jan 23 10:51:47 2014 From: noreply at buildbot.pypy.org (stefanor) Date: Thu, 23 Jan 2014 10:51:47 +0100 (CET) Subject: [pypy-commit] cffi default: Skip test that crashes the interpreter on sparc Message-ID: <20140123095147.1EAF61C06CD@cobra.cs.uni-duesseldorf.de> Author: Stefano Rivera Branch: Changeset: r1460:10240375e6e4 Date: 2014-01-22 13:08 +0200 http://bitbucket.org/cffi/cffi/changeset/10240375e6e4/ Log: Skip test that crashes the interpreter on sparc diff --git a/testing/test_verify.py b/testing/test_verify.py --- a/testing/test_verify.py +++ b/testing/test_verify.py @@ -1133,6 +1133,9 @@ xxx def test_opaque_integer_as_function_result(): + import platform + if platform.machine().startswith('sparc'): + py.test.skip('Breaks horribly on sparc (SIGILL + corrupted stack)') # XXX bad abuse of "struct { ...; }". It only works a bit by chance # anyway. XXX think about something better :-( ffi = FFI() From noreply at buildbot.pypy.org Thu Jan 23 16:47:36 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 23 Jan 2014 16:47:36 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: First version of my FOSDEM 2014 talk Message-ID: <20140123154736.B39981C0962@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5133:492c0a7dd061 Date: 2014-01-23 16:46 +0100 http://bitbucket.org/pypy/extradoc/changeset/492c0a7dd061/ Log: First version of my FOSDEM 2014 talk diff --git a/talk/fosdem2014/talk.rst b/talk/fosdem2014/talk.rst new file mode 100644 --- /dev/null +++ b/talk/fosdem2014/talk.rst @@ -0,0 +1,207 @@ +========================================================== +Using All These Cores: Transactional Memory under the hood +========================================================== + + +.. summary: + - Intro + - Using multiple threads: C++, Java; Jython, IronPython + - the GIL in CPython + - "bytecode" is uninteresting for the Python programmer + - but larger blocks are + - if we can make these larger blocks atomic, we win + - "with atomic:" + - theoretical only so far! + - best example: event-driven *non-multithreaded* systems + - under the hood: transactional memory + + +Introduction +============ + +* Armin Rigo + +* PyPy dev, CPython dev + + +Problem +======= + +* Most computer's CPUs today have multiple cores + +* How to use them? + + +Multithread programming +======================= + +* C, C++, Java, .NET + +* Jython, IronPython + + +CPython, PyPy +============= + +* No story so far + +* Alternatives for various cases + +* Some fine and some horrible + + +The GIL +======= + +* Global Interpreter Lock + +* "Each bytecode is executed atomically" + + +Transactional Memory +==================== + +* Recent research + +* Optimistically runs multiple threads even if they + are supposed to be waiting on the same lock + +* High overheads, working on it + + +Expected results +================ + +* Runs multiple threads despite having a single GIL + +* Does not remove the GIL, but solves the original problem anyway + + +Kinds of Transactional Memory +============================= + +* STM: Software Transactional Memory + +* HTM: Hardware Transactional Memory + +* Hybrids + + +Status +====== + +* STM is still at least 2x slower + +* HTM in Ruby with Intel Haswell CPUs: not bad but + still disappointing (imo) + + +STM C7 +====== + +* Our group's research + +* Hope: much less than 2x slower for "PyPy-like" usages + +* (description) + + +Atomic sections +=============== + +* GIL = "each bytecode is atomic" + +* One bytecode? Obscure for the regular Python programmer + +* Larger atomic sections: ``with atomic:`` + + +So... +===== + +* New way to synchronize multiple threads: ``with atomic:`` + +* All ``atomic`` blocks appear to run serialized + +* With STM/HTM, they actually run in parallel as far as possible + + +No threads? +=========== + +* Works even if you don't use threads! + +* Modify the Twisted reactor to start a pool of threads, + and to run all events in ``with atomic:`` + +* The end result is the same, for any Twisted program + + +Behind-the-scene threads +======================== + +* The thread pool added behind the scene lets a STM/HTM-enabled + Python run on several cores + +* The ``with atomic:`` means that the semantics of the Twisted + program didn't change + + +Optimistic summary +================== + +* If you are a (Twisted / Eventlet / Stackless / etc.) developer + +* Just wait and your program will run on multiple cores ``:-)`` + + +Conflicts +========= + +* Your program will likely fail to use multiple cores out of + the box + +* Because of "conflicts": events that should be independent but + are not (e.g. incrementing a global counter, etc.) + + +Some work left for you to do +============================ + +* You need to figure out where the conficts are, and trim them + +* Maybe using some debugger-like tool that reports conflicts + + +What did we win? +================ + +* The point is that your program is always *correct* + +* You need to work in order to fix the most obvious conflicts + +* Regular approach to multithreading: your program is always *fast*, + but you need to work in order to fix all the bugs (races, deadlocks...) + + +Scope +===== + +* Twisted / Eventlet / Stackless / etc.: event-driven programming + +* Any program computing something complicated, e.g. over all items in + a dictionary, occasionally updating a shared state, etc. + +* In general, any CPU-bound program with identifiable sections that + have a good chance to be parallelizable: "a good chance" is enough + + +Conclusion +========== + +* Mostly theoretical for now: there is a risk it won't work in + practice + +* I bet it will ``:-)`` + +* Expect progress in the following months: http://morepypy.blogspot.com/ From noreply at buildbot.pypy.org Thu Jan 23 16:47:37 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 23 Jan 2014 16:47:37 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: merge heads Message-ID: <20140123154737.E0F521C0962@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5134:a158a379906c Date: 2014-01-23 16:47 +0100 http://bitbucket.org/pypy/extradoc/changeset/a158a379906c/ Log: merge heads diff --git a/sprintinfo/leysin-winter-2014/planning.txt b/sprintinfo/leysin-winter-2014/planning.txt --- a/sprintinfo/leysin-winter-2014/planning.txt +++ b/sprintinfo/leysin-winter-2014/planning.txt @@ -6,18 +6,18 @@ Remi Meier Maciej Fijalkowski Romain Guillebert -Armin Rigo (late) +Armin Rigo Manuel Jacob - +Antonio Cuni Topics ------ -* numpy stuff, fix bugs from bug tracker (rguillebert, ?) +* numpy stuff, fix bugs from bug tracker (rguillebert, antocuni around) * look at codespeed2 -* resume-refactor branch (rguillebert, fijal) (PROGRESS) +* resume-refactor branch (fijal, rguillebert) MORE PROGRESS * GC pinning @@ -27,14 +27,20 @@ * CFFI 1.0 -* STM (remi, armin) SOME PROGRESS in transaction breaks +* STM (remi, armin) DONE in transaction breaks, started c7 * discuss about C++ / cppyy, look into importing pyshiboken (johan pessimistic, ?) -* ctypes: https://bugs.pypy.org/issue1671 +* try cppyy to run on windows (johan) IN PROGRESS + +* ctypes: https://bugs.pypy.org/issue1671 DONE * longs multiplication: patch at https://bugs.pypy.org/issue892 -* look into merging refactor-str-types (johan, mjacob) +* look into merging refactor-str-types (mjacob, antocuni) FIX TRANSLATION -* tweaking ast classes: https://bugs.pypy.org/issue1673 +* tweaking ast classes: https://bugs.pypy.org/issue1673 (mjacob) + +* skiing (fijal, DONE) + +* add jit_merge_point to tuple_contains (anybody) From noreply at buildbot.pypy.org Thu Jan 23 16:50:54 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 23 Jan 2014 16:50:54 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Rename, to make room for rguillebert's other talks :-) Message-ID: <20140123155054.DB9AB1C0962@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5135:0c6e3ee25339 Date: 2014-01-23 16:50 +0100 http://bitbucket.org/pypy/extradoc/changeset/0c6e3ee25339/ Log: Rename, to make room for rguillebert's other talks :-) diff --git a/talk/fosdem2014/talk.rst b/talk/fosdem2014/pypy-stm.rst rename from talk/fosdem2014/talk.rst rename to talk/fosdem2014/pypy-stm.rst From noreply at buildbot.pypy.org Thu Jan 23 17:04:42 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 23 Jan 2014 17:04:42 +0100 (CET) Subject: [pypy-commit] pypy default: Update for renamed class Message-ID: <20140123160442.7C7FF1C0962@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r68863:ce72ae86ae37 Date: 2014-01-23 10:03 -0600 http://bitbucket.org/pypy/pypy/changeset/ce72ae86ae37/ Log: Update for renamed class diff --git a/pypy/module/__builtin__/test/test_classobj.py b/pypy/module/__builtin__/test/test_classobj.py --- a/pypy/module/__builtin__/test/test_classobj.py +++ b/pypy/module/__builtin__/test/test_classobj.py @@ -1061,14 +1061,14 @@ assert (D() >= A()) == 'D:A.ge' -class AppTestOldStyleClassStrDict(object): +class AppTestOldStyleClassBytesDict(object): def setup_class(cls): if cls.runappdirect: py.test.skip("can only be run on py.py") def is_strdict(space, w_class): - from pypy.objspace.std.dictmultiobject import StringDictStrategy + from pypy.objspace.std.dictmultiobject import BytesDictStrategy w_d = w_class.getdict(space) - return space.wrap(isinstance(w_d.strategy, StringDictStrategy)) + return space.wrap(isinstance(w_d.strategy, BytesDictStrategy)) cls.w_is_strdict = cls.space.wrap(gateway.interp2app(is_strdict)) From noreply at buildbot.pypy.org Thu Jan 23 17:06:19 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 23 Jan 2014 17:06:19 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Improvements Message-ID: <20140123160619.B02681D241B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5136:5c1254c5a6b1 Date: 2014-01-23 17:06 +0100 http://bitbucket.org/pypy/extradoc/changeset/5c1254c5a6b1/ Log: Improvements diff --git a/talk/fosdem2014/pypy-stm.rst b/talk/fosdem2014/pypy-stm.rst --- a/talk/fosdem2014/pypy-stm.rst +++ b/talk/fosdem2014/pypy-stm.rst @@ -35,7 +35,7 @@ Multithread programming ======================= -* C, C++, Java, .NET +* C, C++, Java, .NET, ... * Jython, IronPython @@ -66,7 +66,7 @@ * Optimistically runs multiple threads even if they are supposed to be waiting on the same lock -* High overheads, working on it +* High overheads (but working on it) Expected results @@ -90,7 +90,7 @@ Status ====== -* STM is still at least 2x slower +* STM is still at least 2x slower (speed on a single core) * HTM in Ruby with Intel Haswell CPUs: not bad but still disappointing (imo) @@ -103,7 +103,7 @@ * Hope: much less than 2x slower for "PyPy-like" usages -* (description) +* (insert description here) Atomic sections @@ -131,10 +131,10 @@ * Works even if you don't use threads! -* Modify the Twisted reactor to start a pool of threads, +* If the Twisted reactor was modified to start a pool of threads, and to run all events in ``with atomic:`` -* The end result is the same, for any Twisted program +* ...Then the end result is the same, for any Twisted program Behind-the-scene threads @@ -147,10 +147,10 @@ program didn't change -Optimistic summary -================== +Summary (optimistic) +==================== -* If you are a (Twisted / Eventlet / Stackless / etc.) developer +* If you are a Twisted developer... * Just wait and your program will run on multiple cores ``:-)`` @@ -158,30 +158,44 @@ Conflicts ========= -* Your program will likely fail to use multiple cores out of - the box +* Actually, your program will likely fail to use multiple cores + out of the box -* Because of "conflicts": events that should be independent but - are not (e.g. incrementing a global counter, etc.) +* ...Because of "conflicts": each event should be "often" independent, + but may not be (e.g. because they each incrementing a global counter + or similar) Some work left for you to do ============================ -* You need to figure out where the conficts are, and trim them +* You need to figure out where the conficts are * Maybe using some debugger-like tool that reports conflicts +* Then you need small rewrites to avoid them + + +What is the point? +================== + +* The point is that with STM/HTM your program is always *correct* + (as much as the single-core version is) + +* You need to work in order to fix the most obvious conflicts + +* If you don't, it won't be faster than the single-core original + What did we win? ================ -* The point is that your program is always *correct* +* Regular approach to multithreading: your program is always *fast* -* You need to work in order to fix the most obvious conflicts +* You need to work in order to fix the bugs (races, deadlocks...) -* Regular approach to multithreading: your program is always *fast*, - but you need to work in order to fix all the bugs (races, deadlocks...) +* You need to find and fix *all* bugs -- as opposed to the STM/HTM + version where you only fix *some* issues until it is fast enough Scope @@ -200,8 +214,6 @@ ========== * Mostly theoretical for now: there is a risk it won't work in - practice - -* I bet it will ``:-)`` + practice (I bet it will ``:-)``) * Expect progress in the following months: http://morepypy.blogspot.com/ From noreply at buildbot.pypy.org Thu Jan 23 17:36:35 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 23 Jan 2014 17:36:35 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Updates Message-ID: <20140123163635.F292D1C33EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5137:fc3667ad5e70 Date: 2014-01-23 17:36 +0100 http://bitbucket.org/pypy/extradoc/changeset/fc3667ad5e70/ Log: Updates diff --git a/talk/fosdem2014/Makefile b/talk/fosdem2014/Makefile new file mode 100644 --- /dev/null +++ b/talk/fosdem2014/Makefile @@ -0,0 +1,10 @@ +# Note to myself (arigo): run in the 64-bit environment + +pypy-stm.pdf: pypy-stm.tex + pdflatex pypy-stm.tex + +pypy-stm.tex: pypy-stm.rst + rst2beamer.py --stylesheet=stylesheet.latex --documentoptions=14pt --input-encoding=utf8 --output-encoding=utf8 $< | python expand-itemize.py > pypy-stm.tex + +clean: + rm -f pypy-stm.tex pypy-stm.pdf diff --git a/talk/fosdem2014/expand-itemize.py b/talk/fosdem2014/expand-itemize.py new file mode 100644 --- /dev/null +++ b/talk/fosdem2014/expand-itemize.py @@ -0,0 +1,10 @@ +import sys + +def expand(in_file, out_file): + for line in in_file: + line = line.replace(r'\begin{itemize}', + r'\begin{itemize}\setlength{\itemsep}{10pt}') + out_file.write(line) + +if __name__ == '__main__': + expand(sys.stdin, sys.stdout) diff --git a/talk/fosdem2014/pypy-stm.rst b/talk/fosdem2014/pypy-stm.rst --- a/talk/fosdem2014/pypy-stm.rst +++ b/talk/fosdem2014/pypy-stm.rst @@ -23,6 +23,8 @@ * PyPy dev, CPython dev +* This talk applies to Python or any similar language + Problem ======= @@ -61,24 +63,24 @@ Transactional Memory ==================== -* Recent research +* Recent research (past ~10 years) * Optimistically runs multiple threads even if they are supposed to be waiting on the same lock -* High overheads (but working on it) +* Usually, high overheads Expected results ================ -* Runs multiple threads despite having a single GIL +* Runs multiple threads despite a single GIL * Does not remove the GIL, but solves the original problem anyway -Kinds of Transactional Memory -============================= +Transactional Memory +==================== * STM: Software Transactional Memory @@ -90,16 +92,17 @@ Status ====== -* STM is still at least 2x slower (speed on a single core) +* STM is still at least 2x slower (on one core) -* HTM in Ruby with Intel Haswell CPUs: not bad but +* HTM: tested in Ruby with Intel Haswell CPUs, not bad but still disappointing (imo) STM C7 ====== -* Our group's research +* c7 is our group's research (there were a lot of previous + research that failed to give good results) * Hope: much less than 2x slower for "PyPy-like" usages @@ -113,13 +116,18 @@ * One bytecode? Obscure for the regular Python programmer -* Larger atomic sections: ``with atomic:`` +* Larger atomic sections: +:: -So... -===== + with atomic: + ... -* New way to synchronize multiple threads: ``with atomic:`` + +Larger atomic sections +====================== + +* New way to synchronize multiple threads * All ``atomic`` blocks appear to run serialized @@ -131,8 +139,8 @@ * Works even if you don't use threads! -* If the Twisted reactor was modified to start a pool of threads, - and to run all events in ``with atomic:`` +* If the Twisted reactor (say) was modified to start a pool of threads, + and to run all events in "``with atomic:``" * ...Then the end result is the same, for any Twisted program @@ -143,7 +151,7 @@ * The thread pool added behind the scene lets a STM/HTM-enabled Python run on several cores -* The ``with atomic:`` means that the semantics of the Twisted +* The "``with atomic:``" means that the semantics of the Twisted program didn't change @@ -171,9 +179,9 @@ * You need to figure out where the conficts are -* Maybe using some debugger-like tool that reports conflicts +* Maybe using some debugger-like tools that report conflicts -* Then you need small rewrites to avoid them +* Then you need (hopefully small) rewrites to avoid them What is the point? @@ -214,6 +222,12 @@ ========== * Mostly theoretical for now: there is a risk it won't work in - practice (I bet it will ``:-)``) + practice [1] * Expect progress in the following months: http://morepypy.blogspot.com/ + +:: + + - + +[1] I bet it will, eventually ``:-)`` diff --git a/talk/fosdem2014/stylesheet.latex b/talk/fosdem2014/stylesheet.latex new file mode 100644 --- /dev/null +++ b/talk/fosdem2014/stylesheet.latex @@ -0,0 +1,10 @@ +\usetheme{Warsaw} +\usecolortheme{whale} +\setbeamercovered{transparent} +\definecolor{darkgreen}{rgb}{0, 0.5, 0.0} +\newcommand{\docutilsrolegreen}[1]{\color{darkgreen}#1\normalcolor} +\newcommand{\docutilsrolered}[1]{\color{red}#1\normalcolor} +\addtobeamertemplate{block begin}{}{\setlength{\parskip}{35pt plus 1pt minus 1pt}} + +\newcommand{\green}[1]{\color{darkgreen}#1\normalcolor} +\newcommand{\red}[1]{\color{red}#1\normalcolor} From noreply at buildbot.pypy.org Thu Jan 23 17:38:26 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 23 Jan 2014 17:38:26 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Also add the PDF Message-ID: <20140123163826.CB7E81C33EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5138:7cf5eca746aa Date: 2014-01-23 17:38 +0100 http://bitbucket.org/pypy/extradoc/changeset/7cf5eca746aa/ Log: Also add the PDF diff --git a/talk/fosdem2014/pypy-stm.pdf b/talk/fosdem2014/pypy-stm.pdf new file mode 100644 index 0000000000000000000000000000000000000000..f3bd0385c3b25dad655b28e8e9351c9255abcc72 GIT binary patch [cut] From noreply at buildbot.pypy.org Thu Jan 23 17:56:24 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 23 Jan 2014 17:56:24 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Updates, thanks fijal Message-ID: <20140123165624.0F2C61C00F8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5139:7cd30b055ba3 Date: 2014-01-23 17:56 +0100 http://bitbucket.org/pypy/extradoc/changeset/7cd30b055ba3/ Log: Updates, thanks fijal diff --git a/talk/fosdem2014/pypy-stm.pdf b/talk/fosdem2014/pypy-stm.pdf index f3bd0385c3b25dad655b28e8e9351c9255abcc72..7eeb751506bd76c8928b1bcdf200a385875f286c GIT binary patch [cut] diff --git a/talk/fosdem2014/pypy-stm.rst b/talk/fosdem2014/pypy-stm.rst --- a/talk/fosdem2014/pypy-stm.rst +++ b/talk/fosdem2014/pypy-stm.rst @@ -158,7 +158,7 @@ Summary (optimistic) ==================== -* If you are a Twisted developer... +* If you are using Twisted... * Just wait and your program will run on multiple cores ``:-)`` @@ -170,8 +170,10 @@ out of the box * ...Because of "conflicts": each event should be "often" independent, - but may not be (e.g. because they each incrementing a global counter - or similar) + but may not be + +* Example: incrementing a global counter, or otherwise changing some + global object systematically Some work left for you to do @@ -184,6 +186,17 @@ * Then you need (hopefully small) rewrites to avoid them +Some work left for us to do, first +================================== + +* Additional conflicts come from Twisted itself + +* Example: the logging system, which may need to use queues + +* This means that some of the core Python data structures (dicts, + queues...) may need refactorings too + + What is the point? ================== From noreply at buildbot.pypy.org Thu Jan 23 18:00:50 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Thu, 23 Jan 2014 18:00:50 +0100 (CET) Subject: [pypy-commit] pypy default: Fix these tests to actually run the optimizations. Message-ID: <20140123170050.89CB21C00F8@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: Changeset: r68864:b4f8189e6b0b Date: 2014-01-23 17:59 +0100 http://bitbucket.org/pypy/pypy/changeset/b4f8189e6b0b/ Log: Fix these tests to actually run the optimizations. diff --git a/rpython/translator/c/test/test_backendoptimized.py b/rpython/translator/c/test/test_backendoptimized.py --- a/rpython/translator/c/test/test_backendoptimized.py +++ b/rpython/translator/c/test/test_backendoptimized.py @@ -1,6 +1,4 @@ -from rpython.conftest import option from rpython.rlib.rarithmetic import r_uint, r_longlong, r_ulonglong -from rpython.translator.backendopt.all import backend_optimizations from rpython.translator.c.test.test_typed import TestTypedTestCase as _TestTypedTestCase from rpython.translator.c.test.test_genc import compile @@ -77,12 +75,8 @@ assert res == 42 class TestTypedOptimizedSwitchTestCase: - - class CodeGenerator(_TestTypedTestCase): - def process(self, t): - _TestTypedTestCase.process(self, t) - self.t = t - backend_optimizations(t, merge_if_blocks=True) + def getcompiled(self, func, argtypes): + return compile(func, argtypes, merge_if_blocks=True) def test_int_switch(self): def f(x): @@ -93,8 +87,7 @@ elif x == 27: return 3 return 0 - codegenerator = self.CodeGenerator() - fn = codegenerator.getcompiled(f, [int]) + fn = self.getcompiled(f, [int]) for x in (0,1,2,3,9,27,48, -9): assert fn(x) == f(x) @@ -107,8 +100,7 @@ elif x == 3: return 3 return 0 - codegenerator = self.CodeGenerator() - fn = codegenerator.getcompiled(f, [int]) + fn = self.getcompiled(f, [int]) for x in (0,1,2,3,9,27,48, -9): assert fn(x) == f(x) @@ -121,8 +113,7 @@ elif x == 3: return 3 return 0 - codegenerator = self.CodeGenerator() - fn = codegenerator.getcompiled(f, [int]) + fn = self.getcompiled(f, [int]) for x in (0,1,2,3,9,27,48, -9): assert fn(x) == f(x) @@ -135,8 +126,7 @@ elif x == r_uint(27): return 3 return 0 - codegenerator = self.CodeGenerator() - fn = codegenerator.getcompiled(f, [r_uint]) + fn = self.getcompiled(f, [r_uint]) for x in (0,1,2,3,9,27,48): assert fn(r_uint(x)) == f(r_uint(x)) @@ -149,8 +139,7 @@ elif x == r_longlong(27): return 3 return 0 - codegenerator = self.CodeGenerator() - fn = codegenerator.getcompiled(f, [r_longlong]) + fn = self.getcompiled(f, [r_longlong]) for x in (0,1,2,3,9,27,48, -9): assert fn(r_longlong(x)) == f(r_longlong(x)) @@ -163,8 +152,7 @@ elif x == r_ulonglong(27): return 3 return 0 - codegenerator = self.CodeGenerator() - fn = codegenerator.getcompiled(f, [r_ulonglong]) + fn = self.getcompiled(f, [r_ulonglong]) for x in (0,1,2,3,9,27,48, r_ulonglong(-9)): assert fn(r_ulonglong(x)) == f(r_ulonglong(x)) @@ -178,8 +166,7 @@ elif x == 'c': return 'd' return '@' - codegenerator = self.CodeGenerator() - fn = codegenerator.getcompiled(f, [int]) + fn = self.getcompiled(f, [int]) for x in 'ABCabc@': y = ord(x) assert fn(y) == f(y) @@ -194,8 +181,7 @@ if case == '\xFB': return 5 if case == '\xFA': return 6 return 7 - codegenerator = self.CodeGenerator() - fn = codegenerator.getcompiled(f, [int]) + fn = self.getcompiled(f, [int]) for input, expected in [(255, 1), (253, 3), (251, 5), (161, 7)]: res = fn(input) assert res == expected @@ -210,20 +196,15 @@ elif x == u'c': return 'd' return '@' - codegenerator = self.CodeGenerator() - fn = codegenerator.getcompiled(f, [int]) + fn = self.getcompiled(f, [int]) for x in u'ABCabc@': y = ord(x) assert fn(y) == f(y) class TestTypedOptimizedRaisingOps: - - class CodeGenerator(_TestTypedTestCase): - def process(self, t): - _TestTypedTestCase.process(self, t) - self.t = t - backend_optimizations(t, raisingop2direct_call=True) + def getcompiled(self, func, argtypes): + return compile(func, argtypes, raisingop2direct_call=True) def test_int_floordiv_zer(self): def f(x): @@ -232,7 +213,6 @@ except: y = 456 return y - codegenerator = self.CodeGenerator() - fn = codegenerator.getcompiled(f, [int]) + fn = self.getcompiled(f, [int]) for x in (0,1,2,3,9,27,48, -9): assert fn(x) == f(x) diff --git a/rpython/translator/c/test/test_genc.py b/rpython/translator/c/test/test_genc.py --- a/rpython/translator/c/test/test_genc.py +++ b/rpython/translator/c/test/test_genc.py @@ -50,7 +50,7 @@ unsigned_ffffffff) def compile(fn, argtypes, view=False, gcpolicy="none", backendopt=True, - annotatorpolicy=None, thread=False): + annotatorpolicy=None, thread=False, **kwds): argtypes_unroll = unrolling_iterable(enumerate(argtypes)) for argtype in argtypes: @@ -98,7 +98,7 @@ return 0 t = Translation(entry_point, None, gc=gcpolicy, backend="c", - policy=annotatorpolicy, thread=thread) + policy=annotatorpolicy, thread=thread, **kwds) if not backendopt: t.disable(["backendopt_lltype"]) t.driver.config.translation.countmallocs = True From noreply at buildbot.pypy.org Thu Jan 23 18:31:00 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Thu, 23 Jan 2014 18:31:00 +0100 (CET) Subject: [pypy-commit] pypy default: Add failing test that checks whether raisingop2direct_call transforms overflow operations using unsigned arithmetic. Message-ID: <20140123173100.4D3CB1C00F8@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: Changeset: r68865:ae05315ebb9c Date: 2014-01-23 18:30 +0100 http://bitbucket.org/pypy/pypy/changeset/ae05315ebb9c/ Log: Add failing test that checks whether raisingop2direct_call transforms overflow operations using unsigned arithmetic. diff --git a/rpython/translator/c/test/test_backendoptimized.py b/rpython/translator/c/test/test_backendoptimized.py --- a/rpython/translator/c/test/test_backendoptimized.py +++ b/rpython/translator/c/test/test_backendoptimized.py @@ -216,3 +216,22 @@ fn = self.getcompiled(f, [int]) for x in (0,1,2,3,9,27,48, -9): assert fn(x) == f(x) + + def test_ovf_op_in_loop(self): + # This checks whether the raising operations are implemented using + # unsigned arithmetic. The problem with using signed arithmetic is that + # signed overflow is undefined in C and the optimizer is allowed to + # remove the overflow check. + from sys import maxint + from rpython.rlib.rarithmetic import ovfcheck + def f(x, y): + ret = 0 + for i in range(y): + try: + ret = ovfcheck(x + i) + except OverflowError: + break + return ret + fc = self.getcompiled(f, [int, int]) + assert fc(10, 10) == 19 + assert fc(maxint, 10) == maxint From noreply at buildbot.pypy.org Thu Jan 23 18:44:40 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Thu, 23 Jan 2014 18:44:40 +0100 (CET) Subject: [pypy-commit] pypy default: Fix the test added in ae05315ebb9c. Message-ID: <20140123174440.772E31C0962@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: Changeset: r68866:70df1594c35f Date: 2014-01-23 18:43 +0100 http://bitbucket.org/pypy/pypy/changeset/70df1594c35f/ Log: Fix the test added in ae05315ebb9c. diff --git a/rpython/rtyper/raisingops.py b/rpython/rtyper/raisingops.py --- a/rpython/rtyper/raisingops.py +++ b/rpython/rtyper/raisingops.py @@ -87,7 +87,7 @@ if ((r^(x)) >= 0 || (r^(y)) >= 0); \ else FAIL_OVF(err, "integer addition") ''' - r = x + y + r = intmask(r_uint(x) + r_uint(y)) if r^x >= 0 or r^y >= 0: return r else: @@ -99,7 +99,7 @@ if (r >= (x)); \ else FAIL_OVF("integer addition") ''' - r = x + y + r = intmask(r_uint(x) + r_uint(y)) if r >= x: return r else: @@ -111,7 +111,7 @@ if ((r^(x)) >= 0 || (r^~(y)) >= 0); \ else FAIL_OVF(err, "integer subtraction") ''' - r = x - y + r = intmask(r_uint(x) - r_uint(y)) if r^x >= 0 or r^~y >= 0: return r else: From noreply at buildbot.pypy.org Thu Jan 23 20:09:38 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 23 Jan 2014 20:09:38 +0100 (CET) Subject: [pypy-commit] pypy py3k: merge default (6cbefcec4ceb) Message-ID: <20140123190938.DDE2E1C00F8@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68867:6ebf49c04a51 Date: 2014-01-21 11:42 -0800 http://bitbucket.org/pypy/pypy/changeset/6ebf49c04a51/ Log: merge default (6cbefcec4ceb) diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -74,6 +74,10 @@ The actual details would be rather differen in PyPy, but we would like to have the same optimization implemented. +Or maybe not. We can also play around with the idea of using a single +representation: as a byte string in utf-8. (This idea needs some extra logic +for efficient indexing, like a cache.) + .. _`optimized unicode representation`: http://www.python.org/dev/peps/pep-0393/ Translation Toolchain diff --git a/pypy/goal/getnightly.py b/pypy/goal/getnightly.py --- a/pypy/goal/getnightly.py +++ b/pypy/goal/getnightly.py @@ -26,7 +26,12 @@ if branch == 'default': branch = 'trunk' -filename = 'pypy-c-jit-latest-%s.tar.bz2' % arch +if '--nojit' in sys.argv: + kind = 'nojit' +else: + kind = 'jit' + +filename = 'pypy-c-%s-latest-%s.tar.bz2' % (kind, arch) url = 'http://buildbot.pypy.org/nightly/%s/%s' % (branch, filename) tmp = py.path.local.mkdtemp() mydir = tmp.chdir() diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py --- a/pypy/interpreter/astcompiler/codegen.py +++ b/pypy/interpreter/astcompiler/codegen.py @@ -1243,6 +1243,8 @@ flags |= consts.CO_NESTED if scope.is_generator: flags |= consts.CO_GENERATOR + if scope.has_yield_inside_try: + flags |= consts.CO_YIELD_INSIDE_TRY if scope.has_variable_arg: flags |= consts.CO_VARARGS if scope.has_keywords_arg: diff --git a/pypy/interpreter/astcompiler/consts.py b/pypy/interpreter/astcompiler/consts.py --- a/pypy/interpreter/astcompiler/consts.py +++ b/pypy/interpreter/astcompiler/consts.py @@ -18,6 +18,7 @@ CO_FUTURE_BARRY_AS_BDFL = 0x40000 #pypy specific: CO_KILL_DOCSTRING = 0x100000 +CO_YIELD_INSIDE_TRY = 0x200000 PyCF_MASK = (CO_FUTURE_DIVISION | CO_FUTURE_ABSOLUTE_IMPORT | CO_FUTURE_WITH_STATEMENT | CO_FUTURE_PRINT_FUNCTION | diff --git a/pypy/interpreter/astcompiler/symtable.py b/pypy/interpreter/astcompiler/symtable.py --- a/pypy/interpreter/astcompiler/symtable.py +++ b/pypy/interpreter/astcompiler/symtable.py @@ -43,6 +43,7 @@ self.child_has_free = False self.nested = False self.doc_removable = False + self._in_try_body_depth = 0 def lookup(self, name): """Find the scope of identifier 'name'.""" @@ -75,6 +76,14 @@ self.varnames.append(mangled) return mangled + def note_try_start(self, try_node): + """Called when a try is found, before visiting the body.""" + self._in_try_body_depth += 1 + + def note_try_end(self, try_node): + """Called after visiting a try body.""" + self._in_try_body_depth -= 1 + def note_yield(self, yield_node): """Called when a yield is found.""" raise SyntaxError("'yield' outside function", yield_node.lineno, @@ -223,6 +232,7 @@ self.has_variable_arg = False self.has_keywords_arg = False self.is_generator = False + self.has_yield_inside_try = False self.optimized = True self.return_with_value = False self.import_star = None @@ -238,6 +248,8 @@ raise SyntaxError("'return' with argument inside generator", self.ret.lineno, self.ret.col_offset) self.is_generator = True + if self._in_try_body_depth > 0: + self.has_yield_inside_try = True def note_return(self, ret): if ret.value: @@ -489,7 +501,12 @@ self.scope.new_temporary_name() if wih.optional_vars: self.scope.new_temporary_name() - ast.GenericASTVisitor.visit_With(self, wih) + wih.context_expr.walkabout(self) + if wih.optional_vars: + wih.optional_vars.walkabout(self) + self.scope.note_try_start(wih) + self.visit_sequence(wih.body) + self.scope.note_try_end(wih) def visit_arguments(self, arguments): scope = self.scope @@ -535,3 +552,16 @@ else: role = SYM_ASSIGNED self.note_symbol(name.id, role) + + def visit_TryExcept(self, node): + self.scope.note_try_start(node) + self.visit_sequence(node.body) + self.scope.note_try_end(node) + self.visit_sequence(node.handlers) + self.visit_sequence(node.orelse) + + def visit_TryFinally(self, node): + self.scope.note_try_start(node) + self.visit_sequence(node.body) + self.scope.note_try_end(node) + self.visit_sequence(node.finalbody) diff --git a/pypy/interpreter/astcompiler/test/test_symtable.py b/pypy/interpreter/astcompiler/test/test_symtable.py --- a/pypy/interpreter/astcompiler/test/test_symtable.py +++ b/pypy/interpreter/astcompiler/test/test_symtable.py @@ -365,6 +365,25 @@ assert exc.msg == "'return' with argument inside generator" scp = self.func_scope("def f():\n return\n yield x") + def test_yield_inside_try(self): + scp = self.func_scope("def f(): yield x") + assert not scp.has_yield_inside_try + scp = self.func_scope("def f():\n try:\n yield x\n except: pass") + assert scp.has_yield_inside_try + scp = self.func_scope("def f():\n try:\n yield x\n finally: pass") + assert scp.has_yield_inside_try + scp = self.func_scope("def f():\n with x: yield y") + assert scp.has_yield_inside_try + + def test_yield_outside_try(self): + for input in ("try: pass\n except: pass", + "try: pass\n except: yield y", + "try: pass\n finally: pass", + "try: pass\n finally: yield y", + "with x: pass"): + input = "def f():\n yield y\n %s\n yield y" % (input,) + assert not self.func_scope(input).has_yield_inside_try + def test_return(self): for input in ("class x: return", "return"): exc = py.test.raises(SyntaxError, self.func_scope, input).value diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -160,20 +160,6 @@ code_name = self.pycode.co_name return space.wrap(code_name) - def __del__(self): - # Only bother enqueuing self to raise an exception if the frame is - # still not finished and finally or except blocks are present. - self.clear_all_weakrefs() - if self.frame is not None: - block = self.frame.lastblock - while block is not None: - if not isinstance(block, LoopBlock): - self.enqueue_for_destruction(self.space, - GeneratorIterator.descr_close, - "interrupting generator of ") - break - block = block.previous - # Results can be either an RPython list of W_Root, or it can be an # app-level W_ListObject, which also has an append() method, that's why we # generate 2 versions of the function and 2 jit drivers. @@ -216,3 +202,20 @@ return unpack_into unpack_into = _create_unpack_into() unpack_into_w = _create_unpack_into() + + +class GeneratorIteratorWithDel(GeneratorIterator): + + def __del__(self): + # Only bother enqueuing self to raise an exception if the frame is + # still not finished and finally or except blocks are present. + self.clear_all_weakrefs() + if self.frame is not None: + block = self.frame.lastblock + while block is not None: + if not isinstance(block, LoopBlock): + self.enqueue_for_destruction(self.space, + GeneratorIterator.descr_close, + "interrupting generator of ") + break + block = block.previous diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -12,7 +12,7 @@ from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter.astcompiler.consts import ( CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS, CO_NESTED, - CO_GENERATOR, CO_KILL_DOCSTRING) + CO_GENERATOR, CO_KILL_DOCSTRING, CO_YIELD_INSIDE_TRY) from pypy.tool.stdlib_opcode import opcodedesc, HAVE_ARGUMENT from rpython.rlib.rarithmetic import intmask from rpython.rlib.objectmodel import compute_hash, we_are_translated diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -170,8 +170,12 @@ def run(self): """Start this frame's execution.""" if self.is_generator(): - from pypy.interpreter.generator import GeneratorIterator - return self.space.wrap(GeneratorIterator(self)) + if pycode.CO_YIELD_INSIDE_TRY: + from pypy.interpreter.generator import GeneratorIteratorWithDel + return self.space.wrap(GeneratorIteratorWithDel(self)) + else: + from pypy.interpreter.generator import GeneratorIterator + return self.space.wrap(GeneratorIterator(self)) else: return self.execute_frame() diff --git a/pypy/module/_pickle_support/maker.py b/pypy/module/_pickle_support/maker.py --- a/pypy/module/_pickle_support/maker.py +++ b/pypy/module/_pickle_support/maker.py @@ -5,7 +5,7 @@ from pypy.interpreter.module import Module from pypy.interpreter.pyframe import PyFrame from pypy.interpreter.pytraceback import PyTraceback -from pypy.interpreter.generator import GeneratorIterator +from pypy.interpreter.generator import GeneratorIteratorWithDel from rpython.rlib.objectmodel import instantiate from pypy.interpreter.gateway import unwrap_spec from pypy.objspace.std.iterobject import W_SeqIterObject, W_ReverseSeqIterObject @@ -60,7 +60,7 @@ return space.wrap(tb) def generator_new(space): - new_generator = instantiate(GeneratorIterator) + new_generator = instantiate(GeneratorIteratorWithDel) return space.wrap(new_generator) def rangeiter_new(space, w_start, w_step, w_len, w_index): diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -1316,10 +1316,9 @@ l[index] = self.unwrap(w_item) except IndexError: raise - return - - w_list.switch_to_object_strategy() - w_list.setitem(index, w_item) + else: + w_list.switch_to_object_strategy() + w_list.setitem(index, w_item) def setslice(self, w_list, start, step, slicelength, w_other): assert slicelength >= 0 diff --git a/pypy/objspace/std/tupleobject.py b/pypy/objspace/std/tupleobject.py --- a/pypy/objspace/std/tupleobject.py +++ b/pypy/objspace/std/tupleobject.py @@ -27,6 +27,9 @@ jit.loop_unrolling_heuristic(other, other.length(), UNROLL_CUTOFF)) +contains_jmp = jit.JitDriver(greens = [], reds = 'auto', + name = 'tuple.contains') + class W_AbstractTupleObject(W_Root): __slots__ = () @@ -121,13 +124,26 @@ descr_gt = _make_tuple_comparison('gt') descr_ge = _make_tuple_comparison('ge') - @jit.look_inside_iff(lambda self, _1, _2: _unroll_condition(self)) def descr_contains(self, space, w_obj): + if _unroll_condition(self): + return self._descr_contains_unroll_safe(space, w_obj) + else: + return self._descr_contains_jmp(space, w_obj) + + @jit.unroll_safe + def _descr_contains_unroll_safe(self, space, w_obj): for w_item in self.tolist(): if space.eq_w(w_item, w_obj): return space.w_True return space.w_False + def _descr_contains_jmp(self, space, w_obj): + for w_item in self.tolist(): + contains_jmp.jit_merge_point() + if space.eq_w(w_item, w_obj): + return space.w_True + return space.w_False + def descr_add(self, space, w_other): if not isinstance(w_other, W_AbstractTupleObject): return space.w_NotImplemented diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -177,6 +177,9 @@ BoolOption("lldebug", "If true, makes an lldebug build", default=False, cmdline="--lldebug"), + BoolOption("lldebug0", + "If true, makes an lldebug0 build", default=False, + cmdline="--lldebug0"), OptionDescription("backendopt", "Backend Optimization Options", [ # control inlining diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -2102,11 +2102,11 @@ if not box1.same_constant(box2): break else: - # Found! Compile it as a loop. - # raises in case it works -- which is the common case if self.partial_trace: if start != self.retracing_from: raise SwitchToBlackhole(Counters.ABORT_BAD_LOOP) # For now + # Found! Compile it as a loop. + # raises in case it works -- which is the common case self.compile_loop(original_boxes, live_arg_boxes, start, resumedescr) # creation of the loop was cancelled! self.cancel_count += 1 diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -1287,26 +1287,58 @@ # Even if it's not power of two it can still be useful. return _muladd1(b, digit) + # a is not b + # use the following identity to reduce the number of operations + # a * b = a_0*b_0 + sum_{i=1}^n(a_0*b_i + a_1*b_{i-1}) + a_1*b_n z = rbigint([NULLDIGIT] * (size_a + size_b), 1) - # gradeschool long mult i = UDIGIT_TYPE(0) - while i < size_a: - carry = 0 - f = a.widedigit(i) + size_a1 = UDIGIT_TYPE(size_a - 1) + size_b1 = UDIGIT_TYPE(size_b - 1) + while i < size_a1: + f0 = a.widedigit(i) + f1 = a.widedigit(i + 1) pz = i + carry = z.widedigit(pz) + b.widedigit(0) * f0 + z.setdigit(pz, carry) + pz += 1 + carry >>= SHIFT + j = UDIGIT_TYPE(0) + while j < size_b1: + # this operation does not overflow using + # SHIFT = (LONG_BIT // 2) - 1 = B - 1; in fact before it + # carry and z.widedigit(pz) are less than 2**(B - 1); + # b.widedigit(j + 1) * f0 < (2**(B-1) - 1)**2; so + # carry + z.widedigit(pz) + b.widedigit(j + 1) * f0 + + # b.widedigit(j) * f1 < 2**(2*B - 1) - 2**B < 2**LONG)BIT - 1 + carry += z.widedigit(pz) + b.widedigit(j + 1) * f0 + \ + b.widedigit(j) * f1 + z.setdigit(pz, carry) + pz += 1 + carry >>= SHIFT + j += 1 + # carry < 2**(B + 1) - 2 + carry += z.widedigit(pz) + b.widedigit(size_b1) * f1 + z.setdigit(pz, carry) + pz += 1 + carry >>= SHIFT + # carry < 4 + if carry: + z.setdigit(pz, carry) + assert (carry >> SHIFT) == 0 + i += 2 + if size_a & 1: + pz = size_a1 + f = a.widedigit(pz) pb = 0 + carry = _widen_digit(0) while pb < size_b: carry += z.widedigit(pz) + b.widedigit(pb) * f pb += 1 z.setdigit(pz, carry) pz += 1 carry >>= SHIFT - assert carry <= MASK if carry: - assert pz >= 0 z.setdigit(pz, z.widedigit(pz) + carry) - assert (carry >> SHIFT) == 0 - i += 1 z._normalize() return z diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -364,6 +364,8 @@ extra_opts += ['-j', str(self.config.translation.make_jobs)] if self.config.translation.lldebug: extra_opts += ["lldebug"] + elif self.config.translation.lldebug0: + extra_opts += ["lldebug0"] self.translator.platform.execute_makefile(self.targetdir, extra_opts) if shared: @@ -398,6 +400,7 @@ ('linuxmemchk', '', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -DRPY_ASSERT -DPPY_USE_LINUXMEMCHK" debug_target'), ('llsafer', '', '$(MAKE) CFLAGS="-O2 -DRPY_LL_ASSERT" $(TARGET)'), ('lldebug', '', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -DRPY_ASSERT -DRPY_LL_ASSERT" debug_target'), + ('lldebug0','', '$(MAKE) CFLAGS="-O0 $(DEBUGFLAGS) -DRPY_ASSERT -DRPY_LL_ASSERT" debug_target'), ('profile', '', '$(MAKE) CFLAGS="-g -O1 -pg $(CFLAGS) -fno-omit-frame-pointer" LDFLAGS="-pg $(LDFLAGS)" $(TARGET)'), ] if self.has_profopt(): diff --git a/rpython/translator/driver.py b/rpython/translator/driver.py --- a/rpython/translator/driver.py +++ b/rpython/translator/driver.py @@ -591,3 +591,12 @@ if sys.platform == 'win32': name = name.new(ext='exe') return name + +if os.name == 'posix': + def shutil_copy(src, dst): + # this version handles the case where 'dst' is an executable + # currently being executed + shutil.copy(src, dst + '~') + os.rename(dst + '~', dst) +else: + shutil_copy = shutil.copy diff --git a/rpython/translator/test/test_driver.py b/rpython/translator/test/test_driver.py --- a/rpython/translator/test/test_driver.py +++ b/rpython/translator/test/test_driver.py @@ -1,6 +1,6 @@ import py import os -from rpython.translator.driver import TranslationDriver +from rpython.translator.driver import TranslationDriver, shutil_copy from rpython.tool.udir import udir def test_ctr(): @@ -74,4 +74,9 @@ assert dst_name.new(ext='dll').read() == 'dll' assert dst_name.new(purebasename='python27',ext='lib').read() == 'lib' - +def test_shutil_copy(): + a = udir.join('file_a') + b = udir.join('file_a') + a.write('hello') + shutil_copy(str(a), str(b)) + assert b.read() == 'hello' From noreply at buildbot.pypy.org Thu Jan 23 20:09:41 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 23 Jan 2014 20:09:41 +0100 (CET) Subject: [pypy-commit] pypy py3k-refactor-str-types: merge default (7de9113f7079) Message-ID: <20140123190941.112A11C00F8@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-refactor-str-types Changeset: r68868:7c3e094a773b Date: 2014-01-21 23:12 -0800 http://bitbucket.org/pypy/pypy/changeset/7c3e094a773b/ Log: merge default (7de9113f7079) diff too long, truncating to 2000 out of 10083 lines diff --git a/lib-python/2.7/test/test_memoryview.py b/lib-python/2.7/test/test_memoryview.py --- a/lib-python/2.7/test/test_memoryview.py +++ b/lib-python/2.7/test/test_memoryview.py @@ -166,11 +166,18 @@ self.assertTrue(m[0:6] == m[:]) self.assertFalse(m[0:5] == m) - # Comparison with objects which don't support the buffer API - self.assertFalse(m == u"abcdef") - self.assertTrue(m != u"abcdef") - self.assertFalse(u"abcdef" == m) - self.assertTrue(u"abcdef" != m) + if test_support.check_impl_detail(cpython=True): + # what is supported and what is not supported by memoryview is + # very inconsisten on CPython. In PyPy, memoryview supports + # the buffer interface, and thus the following comparison + # succeeds. See also the comment in + # pypy.modules.__builtin__.interp_memoryview.W_MemoryView.descr_buffer + # + # Comparison with objects which don't support the buffer API + self.assertFalse(m == u"abcdef", "%s %s" % (self, tp)) + self.assertTrue(m != u"abcdef") + self.assertFalse(u"abcdef" == m) + self.assertTrue(u"abcdef" != m) # Unordered comparisons are unimplemented, and therefore give # arbitrary results (they raise a TypeError in py3k) diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py --- a/lib_pypy/_ctypes/structure.py +++ b/lib_pypy/_ctypes/structure.py @@ -2,6 +2,8 @@ import _rawffi from _ctypes.basics import _CData, _CDataMeta, keepalive_key,\ store_reference, ensure_objects, CArgObject +from _ctypes.array import Array +from _ctypes.pointer import _Pointer import inspect def names_and_fields(self, _fields_, superclass, anonymous_fields=None): @@ -104,8 +106,11 @@ def __set__(self, obj, value): fieldtype = self.ctype cobj = fieldtype.from_param(value) - if ensure_objects(cobj) is not None: - key = keepalive_key(self.num) + key = keepalive_key(self.num) + if issubclass(fieldtype, _Pointer) and isinstance(cobj, Array): + # if our value is an Array we need the whole thing alive + store_reference(obj, key, cobj) + elif ensure_objects(cobj) is not None: store_reference(obj, key, cobj._objects) arg = cobj._get_buffer_value() if fieldtype._fficompositesize is not None: diff --git a/pypy/doc/config/translation.lldebug0.txt b/pypy/doc/config/translation.lldebug0.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/config/translation.lldebug0.txt @@ -0,0 +1,1 @@ +Like lldebug, but in addition compile C files with -O0 diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -41,3 +41,10 @@ Fix 3 broken links on PyPy published papers in docs. .. branch: jit-ordereddict + +.. branch: refactor-str-types +Remove multimethods on str/unicode/bytearray and make the implementations share code. + +.. branch: remove-del-from-generatoriterator +Speed up generators that don't yield inside try or wait blocks by skipping +unnecessary cleanup. diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -232,6 +232,11 @@ msg = "__int__ returned non-int (type '%T')" raise operationerrfmt(space.w_TypeError, msg, w_result) + def ord(self, space): + typename = space.type(self).getname(space) + msg = "ord() expected string of length 1, but %s found" + raise operationerrfmt(space.w_TypeError, msg, typename) + def __spacebind__(self, space): return self @@ -1450,6 +1455,9 @@ # This is here mostly just for gateway.int_unwrapping_space_method(). return bool(self.int_w(w_obj)) + def ord(self, w_obj): + return w_obj.ord(self) + # This is all interface for gateway.py. def gateway_int_w(self, w_obj): if self.isinstance_w(w_obj, self.w_float): diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -538,12 +538,13 @@ # When a BuiltinCode is stored in a Function object, # you get the functionality of CPython's built-in function type. - def __init__(self, func, unwrap_spec=None, self_type=None, descrmismatch=None): + def __init__(self, func, unwrap_spec=None, self_type=None, + descrmismatch=None, doc=None): "NOT_RPYTHON" # 'implfunc' is the interpreter-level function. # Note that this uses a lot of (construction-time) introspection. Code.__init__(self, func.__name__) - self.docstring = func.__doc__ + self.docstring = doc or func.__doc__ self.identifier = "%s-%s-%s" % (func.__module__, func.__name__, getattr(self_type, '__name__', '*')) @@ -850,7 +851,7 @@ instancecache = {} def __new__(cls, f, app_name=None, unwrap_spec=None, descrmismatch=None, - as_classmethod=False): + as_classmethod=False, doc=None): "NOT_RPYTHON" # f must be a function whose name does NOT start with 'app_' @@ -879,7 +880,8 @@ cls.instancecache[key] = self self._code = BuiltinCode(f, unwrap_spec=unwrap_spec, self_type=self_type, - descrmismatch=descrmismatch) + descrmismatch=descrmismatch, + doc=doc) self.__name__ = f.func_name self.name = app_name self.as_classmethod = as_classmethod diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -170,7 +170,7 @@ def run(self): """Start this frame's execution.""" if self.is_generator(): - if pycode.CO_YIELD_INSIDE_TRY: + if self.getcode().co_flags & pycode.CO_YIELD_INSIDE_TRY: from pypy.interpreter.generator import GeneratorIteratorWithDel return self.space.wrap(GeneratorIteratorWithDel(self)) else: diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -720,6 +720,18 @@ never_called py.test.raises(AssertionError, space.wrap, gateway.interp2app_temp(g)) + def test_interp2app_doc(self): + space = self.space + def f(space, w_x): + """foo""" + w_f = space.wrap(gateway.interp2app_temp(f)) + assert space.unwrap(space.getattr(w_f, space.wrap('__doc__'))) == 'foo' + # + def g(space, w_x): + never_called + w_g = space.wrap(gateway.interp2app_temp(g, doc='bar')) + assert space.unwrap(space.getattr(w_g, space.wrap('__doc__'))) == 'bar' + def test_unwrap_spec_default_bytes(self): space = self.space @gateway.unwrap_spec(s='bufferstr') diff --git a/pypy/interpreter/unicodehelper.py b/pypy/interpreter/unicodehelper.py --- a/pypy/interpreter/unicodehelper.py +++ b/pypy/interpreter/unicodehelper.py @@ -94,7 +94,7 @@ return space.wrapbytes(bytes) def encode(space, w_data, encoding=None, errors='strict'): - from pypy.objspace.std.unicodetype import encode_object + from pypy.objspace.std.unicodeobject import encode_object return encode_object(space, w_data, encoding, errors) # These functions take and return unwrapped rpython strings and unicodes diff --git a/pypy/module/__builtin__/interp_memoryview.py b/pypy/module/__builtin__/interp_memoryview.py --- a/pypy/module/__builtin__/interp_memoryview.py +++ b/pypy/module/__builtin__/interp_memoryview.py @@ -63,10 +63,14 @@ return W_MemoryView(buf) def descr_buffer(self, space): - """Note that memoryview() objects in PyPy support buffer(), whereas - not in CPython; but CPython supports passing memoryview() to most - built-in functions that accept buffers, with the notable exception - of the buffer() built-in.""" + """ + Note that memoryview() is very inconsistent in CPython: it does not + support the buffer interface but does support the new buffer + interface: as a result, it is possible to pass memoryview to + e.g. socket.send() but not to file.write(). For simplicity and + consistency, in PyPy memoryview DOES support buffer(), which means + that it is accepted in more places than CPython. + """ self._check_released(space) return space.wrap(self.buf) diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py --- a/pypy/module/_cffi_backend/newtype.py +++ b/pypy/module/_cffi_backend/newtype.py @@ -118,6 +118,7 @@ SF_MSVC_BITFIELDS = 1 SF_GCC_ARM_BITFIELDS = 2 SF_GCC_BIG_ENDIAN = 4 +SF_PACKED = 8 if sys.platform == 'win32': DEFAULT_SFLAGS = SF_MSVC_BITFIELDS @@ -190,8 +191,8 @@ boffset = 0 # reset each field at offset 0 # # update the total alignment requirement, but skip it if the - # field is an anonymous bitfield - falign = ftype.alignof() + # field is an anonymous bitfield or if SF_PACKED + falign = 1 if sflags & SF_PACKED else ftype.alignof() do_align = True if (sflags & SF_GCC_ARM_BITFIELDS) == 0 and fbitsize >= 0: if (sflags & SF_MSVC_BITFIELDS) == 0: @@ -305,6 +306,12 @@ if bits_already_occupied + fbitsize > 8 * ftype.size: # it would not fit, we need to start at the next # allowed position + if ((sflags & SF_PACKED) != 0 and + (bits_already_occupied & 7) != 0): + raise operationerrfmt(space.w_NotImplementedError, + "with 'packed', gcc would compile field " + "'%s.%s' to reuse some bits in the previous " + "field", w_ctype.name, fname) field_offset_bytes += falign assert boffset < field_offset_bytes * 8 boffset = field_offset_bytes * 8 diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -3137,6 +3137,44 @@ p = newp(BArray, None) assert sizeof(p[2:9]) == 7 * sizeof(BInt) +def test_packed(): + BLong = new_primitive_type("long") + BChar = new_primitive_type("char") + BShort = new_primitive_type("short") + BStruct = new_struct_type("struct foo") + complete_struct_or_union(BStruct, [('a1', BLong, -1), + ('a2', BChar, -1), + ('a3', BShort, -1)], + None, -1, -1, 8) # SF_PACKED==8 + d = BStruct.fields + assert len(d) == 3 + assert d[0][0] == 'a1' + assert d[0][1].type is BLong + assert d[0][1].offset == 0 + assert d[0][1].bitshift == -1 + assert d[0][1].bitsize == -1 + assert d[1][0] == 'a2' + assert d[1][1].type is BChar + assert d[1][1].offset == sizeof(BLong) + assert d[1][1].bitshift == -1 + assert d[1][1].bitsize == -1 + assert d[2][0] == 'a3' + assert d[2][1].type is BShort + assert d[2][1].offset == sizeof(BLong) + sizeof(BChar) + assert d[2][1].bitshift == -1 + assert d[2][1].bitsize == -1 + assert sizeof(BStruct) == sizeof(BLong) + sizeof(BChar) + sizeof(BShort) + assert alignof(BStruct) == 1 + +def test_packed_with_bitfields(): + BLong = new_primitive_type("long") + BChar = new_primitive_type("char") + BStruct = new_struct_type("struct foo") + py.test.raises(NotImplementedError, + complete_struct_or_union, + BStruct, [('a1', BLong, 30), + ('a2', BChar, 5)], + None, -1, -1, 8) # SF_PACKED==8 def test_version(): # this test is here mostly for PyPy diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -589,7 +589,7 @@ s = rffi.wcharpsize2unicode(rffi.cast(rffi.CWCHARP, address), maxlength) return space.wrap(s) - at unwrap_spec(address=r_uint, newcontent=str) + at unwrap_spec(address=r_uint, newcontent='bufferstr') def rawstring2charp(space, address, newcontent): from rpython.rtyper.annlowlevel import llstr from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -1,5 +1,6 @@ from rpython.translator.platform import platform from rpython.translator.tool.cbuild import ExternalCompilationInfo +from pypy.conftest import option from pypy.module._rawffi.interp_rawffi import TYPEMAP, TYPEMAP_FLOAT_LETTERS from pypy.module._rawffi.tracker import Tracker @@ -330,6 +331,8 @@ a = A(10, b'x'*10) _rawffi.rawstring2charp(a.buffer, b"foobar") assert b''.join([a[i] for i in range(10)]) == b"foobarxxxx" + _rawffi.rawstring2charp(a.buffer, memoryview(b"baz")) + assert b''.join([a[i] for i in range(10)]) == b"bazbarxxxx" a.free() def test_raw_callable(self): @@ -1134,6 +1137,15 @@ def setup_class(cls): cls.w_sizes_and_alignments = cls.space.wrap(dict( [(k, (v.c_size, v.c_alignment)) for k,v in TYPEMAP.iteritems()])) + # + # detect if we're running on PyPy with DO_TRACING not compiled in + if option.runappdirect: + try: + import _rawffi + _rawffi._num_of_allocated_objects() + except (ImportError, RuntimeError), e: + py.test.skip(str(e)) + # Tracker.DO_TRACING = True def test_structure_autofree(self): @@ -1141,24 +1153,32 @@ gc.collect() gc.collect() S = _rawffi.Structure([('x', 'i')]) - oldnum = _rawffi._num_of_allocated_objects() + try: + oldnum = _rawffi._num_of_allocated_objects() + except RuntimeError: + oldnum = '?' s = S(autofree=True) s.x = 3 s = None gc.collect() - assert oldnum == _rawffi._num_of_allocated_objects() + if oldnum != '?': + assert oldnum == _rawffi._num_of_allocated_objects() def test_array_autofree(self): import gc, _rawffi gc.collect() - oldnum = _rawffi._num_of_allocated_objects() + try: + oldnum = _rawffi._num_of_allocated_objects() + except RuntimeError: + oldnum = '?' A = _rawffi.Array('c') a = A(6, b'xxyxx\x00', autofree=True) assert _rawffi.charp2string(a.buffer) == b'xxyxx' a = None gc.collect() - assert oldnum == _rawffi._num_of_allocated_objects() + if oldnum != '?': + assert oldnum == _rawffi._num_of_allocated_objects() def teardown_class(cls): Tracker.DO_TRACING = False diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py --- a/pypy/module/cpyext/unicodeobject.py +++ b/pypy/module/cpyext/unicodeobject.py @@ -11,7 +11,7 @@ make_typedescr, get_typedescr) from pypy.module.cpyext.bytesobject import PyBytes_Check, PyBytes_FromObject from pypy.module._codecs.interp_codecs import CodecState -from pypy.objspace.std import unicodeobject, unicodetype +from pypy.objspace.std import unicodeobject from rpython.rlib import rstring, runicode from rpython.tool.sourcetools import func_renamer import sys @@ -275,7 +275,7 @@ def PyUnicode_GetDefaultEncoding(space): """Returns the currently active default encoding.""" if default_encoding[0] == '\x00': - encoding = unicodetype.getdefaultencoding(space) + encoding = unicodeobject.getdefaultencoding(space) i = 0 while i < len(encoding) and i < DEFAULT_ENCODING_SIZE: default_encoding[i] = encoding[i] @@ -297,7 +297,7 @@ encoding = rffi.charp2str(llencoding) if llerrors: errors = rffi.charp2str(llerrors) - return unicodetype.encode_object(space, w_unicode, encoding, errors) + return unicodeobject.encode_object(space, w_unicode, encoding, errors) @cpython_api([PyObject, CONST_STRING, CONST_STRING], PyObject) def PyUnicode_AsEncodedString(space, w_unicode, llencoding, llerrors): @@ -320,7 +320,7 @@ if not PyUnicode_Check(space, w_unicode): PyErr_BadArgument(space) - return unicodetype.encode_object(space, w_unicode, 'unicode-escape', 'strict') + return unicodeobject.encode_object(space, w_unicode, 'unicode-escape', 'strict') @cpython_api([CONST_WSTRING, Py_ssize_t], PyObject) def PyUnicode_FromUnicode(space, wchar_p, length): @@ -563,7 +563,7 @@ exception was raised by the codec.""" if not PyUnicode_Check(space, w_unicode): PyErr_BadArgument(space) - return unicodetype.encode_object(space, w_unicode, encoding, "strict") + return unicodeobject.encode_object(space, w_unicode, encoding, "strict") @cpython_api([CONST_STRING, Py_ssize_t, CONST_STRING], PyObject) @func_renamer('PyUnicode_Decode%s' % suffix) diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -852,9 +852,9 @@ # Depending on which opcodes are enabled, eg. CALL_METHOD we bump the version # number by some constant # -# default_magic - 6 -- used by CPython without the -U option -# default_magic - 5 -- used by CPython with the -U option -# default_magic -- used by PyPy [because of CALL_METHOD] +# CPython + 0 -- used by CPython without the -U option +# CPython + 1 -- used by CPython with the -U option +# CPython + 7 = default_magic -- used by PyPy (incompatible!) # from pypy.interpreter.pycode import default_magic MARSHAL_VERSION_FOR_PYC = 2 diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -2,10 +2,10 @@ from pypy.interpreter.error import operationerrfmt, OperationError from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty +from pypy.objspace.std.bytesobject import W_BytesObject from pypy.objspace.std.floattype import float_typedef -from pypy.objspace.std.stringtype import str_typedef -from pypy.objspace.std.unicodetype import unicode_typedef, unicode_from_object from pypy.objspace.std.longtype import long_typedef +from pypy.objspace.std.unicodeobject import W_UnicodeObject from pypy.objspace.std.complextype import complex_typedef from rpython.rlib.rarithmetic import LONG_BIT from rpython.rtyper.lltypesystem import rffi @@ -507,7 +507,7 @@ from pypy.module.micronumpy.interp_dtype import new_unicode_dtype - arg = space.unicode_w(unicode_from_object(space, w_arg)) + arg = space.unicode_w(space.unicode_from_object(w_arg)) # XXX size computations, we need tests anyway arr = VoidBoxStorage(len(arg), new_unicode_dtype(space, len(arg))) # XXX not this way, we need store @@ -769,13 +769,13 @@ __module__ = "numpy", ) -W_StringBox.typedef = TypeDef("bytes_", (W_CharacterBox.typedef, str_typedef), +W_StringBox.typedef = TypeDef("bytes_", (W_CharacterBox.typedef, W_BytesObject.typedef), __module__ = "numpy", __new__ = interp2app(W_StringBox.descr__new__string_box.im_func), __len__ = interp2app(W_StringBox.descr_len), ) -W_UnicodeBox.typedef = TypeDef("str_", (W_CharacterBox.typedef, unicode_typedef), +W_UnicodeBox.typedef = TypeDef("str_", (W_CharacterBox.typedef, W_UnicodeObject.typedef), __module__ = "numpy", __new__ = interp2app(W_UnicodeBox.descr__new__unicode_box.im_func), __len__ = interp2app(W_UnicodeBox.descr_len), diff --git a/pypy/module/test_lib_pypy/ctypes_tests/support.py b/pypy/module/test_lib_pypy/ctypes_tests/support.py --- a/pypy/module/test_lib_pypy/ctypes_tests/support.py +++ b/pypy/module/test_lib_pypy/ctypes_tests/support.py @@ -37,6 +37,8 @@ pass def teardown_class(cls): + if not hasattr(sys, 'pypy_translation_info'): + return if sys.pypy_translation_info['translation.gc'] == 'boehm': return # it seems that boehm has problems with __del__, so not # everything is freed diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_keepalive.py b/pypy/module/test_lib_pypy/ctypes_tests/test_keepalive.py --- a/pypy/module/test_lib_pypy/ctypes_tests/test_keepalive.py +++ b/pypy/module/test_lib_pypy/ctypes_tests/test_keepalive.py @@ -31,7 +31,17 @@ assert p._objects == {} assert len(x._objects) == 1 assert x._objects['0'] is p._objects - + + def test_simple_structure_and_pointer_with_array(self): + class X(Structure): + _fields_ = [('array', POINTER(c_int))] + + x = X() + a = (c_int * 3)(1, 2, 3) + assert x._objects is None + x.array = a + assert x._objects['0'] is a + def test_structure_with_pointers(self): class X(Structure): _fields_ = [('x', POINTER(c_int)), diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_strings.py b/pypy/module/test_lib_pypy/ctypes_tests/test_strings.py --- a/pypy/module/test_lib_pypy/ctypes_tests/test_strings.py +++ b/pypy/module/test_lib_pypy/ctypes_tests/test_strings.py @@ -38,6 +38,16 @@ buf.raw = "Hello, World" assert buf.value == "Hello, World" + def test_c_buffer_raw_from_buffer(self): + buf = c_buffer(32) + buf.raw = buffer("Hello, World") + assert buf.value == "Hello, World" + + def test_c_buffer_raw_from_memoryview(self): + buf = c_buffer(32) + buf.raw = memoryview("Hello, World") + assert buf.value == "Hello, World" + def test_param_1(self): BUF = c_char * 4 buf = BUF() diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -300,6 +300,9 @@ ec._py_repr = None return ec + def unicode_from_object(self, w_obj): + return w_some_obj() + # ---------- def translates(self, func=None, argtypes=None, seeobj_w=[], **kwds): diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -1,23 +1,25 @@ """The builtin bytearray implementation""" +from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.buffer import RWBuffer from pypy.interpreter.error import OperationError, operationerrfmt -from pypy.objspace.std import stringobject from pypy.objspace.std.bytearraytype import new_bytearray -from pypy.objspace.std.longobject import W_LongObject -from pypy.objspace.std.model import W_Object, registerimplementation -from pypy.objspace.std.multimethod import FailedToImplement -from pypy.objspace.std.noneobject import W_NoneObject -from pypy.objspace.std.register_all import register_all +from pypy.objspace.std.stringtype import getbytevalue, makebytesdata_w +from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault +from pypy.interpreter.signature import Signature from pypy.objspace.std.sliceobject import W_SliceObject -from pypy.objspace.std.stringobject import W_StringObject -from pypy.objspace.std.stringtype import getbytevalue, makebytesdata_w +from pypy.objspace.std.stdtypedef import StdTypeDef +from pypy.objspace.std.stringmethods import StringMethods from pypy.objspace.std.util import get_positive_index +from rpython.rlib.objectmodel import newlist_hint, resizelist_hint, import_from_mixin from rpython.rlib.rstring import StringBuilder -class W_BytearrayObject(W_Object): - from pypy.objspace.std.bytearraytype import bytearray_typedef as typedef +def _make_data(s): + return [s[i] for i in range(len(s))] + +class W_BytearrayObject(W_Root): + import_from_mixin(StringMethods) def __init__(w_self, data): w_self.data = data @@ -26,264 +28,984 @@ """ representation for debugging purposes """ return "%s(%s)" % (w_self.__class__.__name__, ''.join(w_self.data)) -registerimplementation(W_BytearrayObject) + def _new(self, value): + return W_BytearrayObject(_make_data(value)) -def len__Bytearray(space, w_bytearray): - result = len(w_bytearray.data) - return space.newint(result) + def _new_from_list(self, value): + return W_BytearrayObject(value) -def ord__Bytearray(space, w_bytearray): - if len(w_bytearray.data) != 1: - raise OperationError(space.w_TypeError, - space.wrap("expected a character, but string" - "of length %s found" % len(w_bytearray.data))) - return space.wrap(ord(w_bytearray.data[0])) + def _empty(self): + return W_BytearrayObject([]) -def getitem__Bytearray_ANY(space, w_bytearray, w_index): - # getindex_w should get a second argument space.w_IndexError, - # but that doesn't exist the first time this is called. + def _len(self): + return len(self.data) + + def _val(self, space): + return space.bufferstr_w(self) + + def _op_val(self, space, w_other): + return space.bufferstr_new_w(w_other) + + def _chr(self, char): + assert len(char) == 1 + return str(char)[0] + + _builder = StringBuilder + + def _newlist_unwrapped(self, space, res): + return space.newlist([W_BytearrayObject(_make_data(i)) for i in res]) + + def _isupper(self, ch): + return ch.isupper() + + def _islower(self, ch): + return ch.islower() + + def _istitle(self, ch): + return ch.isupper() + + def _isspace(self, ch): + return ch.isspace() + + def _isalpha(self, ch): + return ch.isalpha() + + def _isalnum(self, ch): + return ch.isalnum() + + def _isdigit(self, ch): + return ch.isdigit() + + _iscased = _isalpha + + def _islinebreak(self, ch): + return (ch == '\n') or (ch == '\r') + + def _upper(self, ch): + if ch.islower(): + o = ord(ch) - 32 + return chr(o) + else: + return ch + + def _lower(self, ch): + if ch.isupper(): + o = ord(ch) + 32 + return chr(o) + else: + return ch + + _title = _upper + + def _join_return_one(self, space, w_obj): + return False + + def _join_check_item(self, space, w_obj): + if (space.isinstance_w(w_obj, space.w_str) or + space.isinstance_w(w_obj, space.w_bytearray)): + return 0 + return 1 + + def ord(self, space): + if len(self.data) != 1: + msg = "ord() expected a character, but string of length %d found" + raise operationerrfmt(space.w_TypeError, msg, len(self.data)) + return space.wrap(ord(self.data[0])) + + @staticmethod + def descr_new(space, w_bytearraytype, __args__): + return new_bytearray(space, w_bytearraytype, []) + + def descr_reduce(self, space): + assert isinstance(self, W_BytearrayObject) + w_dict = self.getdict(space) + if w_dict is None: + w_dict = space.w_None + return space.newtuple([ + space.type(self), space.newtuple([ + space.wrap(''.join(self.data).decode('latin-1')), + space.wrap('latin-1')]), + w_dict]) + + @staticmethod + def descr_fromhex(space, w_bytearraytype, w_hexstring): + "bytearray.fromhex(string) -> bytearray\n" + "\n" + "Create a bytearray object from a string of hexadecimal numbers.\n" + "Spaces between two numbers are accepted.\n" + "Example: bytearray.fromhex('B9 01EF') -> bytearray(b'\\xb9\\x01\\xef')." + hexstring = space.str_w(w_hexstring) + hexstring = hexstring.lower() + data = [] + length = len(hexstring) + i = -2 + while True: + i += 2 + while i < length and hexstring[i] == ' ': + i += 1 + if i >= length: + break + if i+1 == length: + raise OperationError(space.w_ValueError, space.wrap( + "non-hexadecimal number found in fromhex() arg at position %d" % i)) + + top = _hex_digit_to_int(hexstring[i]) + if top == -1: + raise OperationError(space.w_ValueError, space.wrap( + "non-hexadecimal number found in fromhex() arg at position %d" % i)) + bot = _hex_digit_to_int(hexstring[i+1]) + if bot == -1: + raise OperationError(space.w_ValueError, space.wrap( + "non-hexadecimal number found in fromhex() arg at position %d" % (i+1,))) + data.append(chr(top*16 + bot)) + + # in CPython bytearray.fromhex is a staticmethod, so + # we ignore w_type and always return a bytearray + return new_bytearray(space, space.w_bytearray, data) + + def descr_init(self, space, __args__): + # this is on the silly side + w_source, w_encoding, w_errors = __args__.parse_obj( + None, 'bytearray', init_signature, init_defaults) + + if w_source is None: + w_source = space.wrap('') + if w_encoding is None: + w_encoding = space.w_None + if w_errors is None: + w_errors = space.w_None + + # Unicode argument + if not space.is_w(w_encoding, space.w_None): + from pypy.objspace.std.unicodeobject import ( + _get_encoding_and_errors, encode_object + ) + encoding, errors = _get_encoding_and_errors(space, w_encoding, w_errors) + + # if w_source is an integer this correctly raises a TypeError + # the CPython error message is: "encoding or errors without a string argument" + # ours is: "expected unicode, got int object" + w_source = encode_object(space, w_source, encoding, errors) + + # Is it an int? + try: + count = space.int_w(w_source) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + else: + if count < 0: + raise OperationError(space.w_ValueError, + space.wrap("bytearray negative count")) + self.data = ['\0'] * count + return + + data = makebytearraydata_w(space, w_source) + self.data = data + + def descr_repr(self, space): + s = self.data + + # Good default if there are no replacements. + buf = StringBuilder(len("bytearray(b'')") + len(s)) + + buf.append("bytearray(b'") + + for i in range(len(s)): + c = s[i] + + if c == '\\' or c == "'": + buf.append('\\') + buf.append(c) + elif c == '\t': + buf.append('\\t') + elif c == '\r': + buf.append('\\r') + elif c == '\n': + buf.append('\\n') + elif not '\x20' <= c < '\x7f': + n = ord(c) + buf.append('\\x') + buf.append("0123456789abcdef"[n>>4]) + buf.append("0123456789abcdef"[n&0xF]) + else: + buf.append(c) + + buf.append("')") + + return space.wrap(buf.build()) + + def descr_str(self, space): + return space.wrap(''.join(self.data)) + + def descr_eq(self, space, w_other): + try: + return space.newbool(self._val(space) == self._op_val(space, w_other)) + except OperationError, e: + if e.match(space, space.w_TypeError): + return space.w_NotImplemented + raise + + def descr_ne(self, space, w_other): + try: + return space.newbool(self._val(space) != self._op_val(space, w_other)) + except OperationError, e: + if e.match(space, space.w_TypeError): + return space.w_NotImplemented + raise + + def descr_lt(self, space, w_other): + try: + return space.newbool(self._val(space) < self._op_val(space, w_other)) + except OperationError, e: + if e.match(space, space.w_TypeError): + return space.w_NotImplemented + raise + + def descr_le(self, space, w_other): + try: + return space.newbool(self._val(space) <= self._op_val(space, w_other)) + except OperationError, e: + if e.match(space, space.w_TypeError): + return space.w_NotImplemented + raise + + def descr_gt(self, space, w_other): + try: + return space.newbool(self._val(space) > self._op_val(space, w_other)) + except OperationError, e: + if e.match(space, space.w_TypeError): + return space.w_NotImplemented + raise + + def descr_ge(self, space, w_other): + try: + return space.newbool(self._val(space) >= self._op_val(space, w_other)) + except OperationError, e: + if e.match(space, space.w_TypeError): + return space.w_NotImplemented + raise + + def descr_buffer(self, space): + return BytearrayBuffer(self.data) + + def descr_inplace_add(self, space, w_other): + if isinstance(w_other, W_BytearrayObject): + self.data += w_other.data + else: + self.data += self._op_val(space, w_other) + return self + + def descr_inplace_mul(self, space, w_times): + try: + times = space.getindex_w(w_times, space.w_OverflowError) + except OperationError, e: + if e.match(space, space.w_TypeError): + return space.w_NotImplemented + raise + self.data *= times + return self + + def descr_setitem(self, space, w_index, w_other): + if isinstance(w_index, W_SliceObject): + oldsize = len(self.data) + start, stop, step, slicelength = w_index.indices4(space, oldsize) + sequence2 = makebytearraydata_w(space, w_other) + _setitem_slice_helper(space, self.data, start, step, + slicelength, sequence2, empty_elem='\x00') + else: + idx = space.getindex_w(w_index, space.w_IndexError, "bytearray index") + try: + self.data[idx] = getbytevalue(space, w_other) + except IndexError: + raise OperationError(space.w_IndexError, + space.wrap("bytearray index out of range")) + + def descr_delitem(self, space, w_idx): + if isinstance(w_idx, W_SliceObject): + start, stop, step, slicelength = w_idx.indices4(space, + len(self.data)) + _delitem_slice_helper(space, self.data, start, step, slicelength) + else: + idx = space.getindex_w(w_idx, space.w_IndexError, "bytearray index") + try: + del self.data[idx] + except IndexError: + raise OperationError(space.w_IndexError, + space.wrap("bytearray deletion index out of range")) + + def descr_append(self, space, w_item): + self.data.append(getbytevalue(space, w_item)) + + def descr_extend(self, space, w_other): + if isinstance(w_other, W_BytearrayObject): + self.data += w_other.data + else: + self.data += makebytearraydata_w(space, w_other) + return self + + def descr_insert(self, space, w_idx, w_other): + where = space.int_w(w_idx) + length = len(self.data) + index = get_positive_index(where, length) + val = getbytevalue(space, w_other) + self.data.insert(index, val) + return space.w_None + + @unwrap_spec(w_idx=WrappedDefault(-1)) + def descr_pop(self, space, w_idx): + index = space.int_w(w_idx) + try: + result = self.data.pop(index) + except IndexError: + if not self.data: + raise OperationError(space.w_IndexError, space.wrap( + "pop from empty bytearray")) + raise OperationError(space.w_IndexError, space.wrap( + "pop index out of range")) + return space.wrap(ord(result)) + + def descr_remove(self, space, w_char): + char = space.int_w(space.index(w_char)) + try: + self.data.remove(chr(char)) + except ValueError: + raise OperationError(space.w_ValueError, space.wrap( + "value not found in bytearray")) + + def descr_reverse(self, space): + self.data.reverse() + +def getbytevalue(space, w_value): + if space.isinstance_w(w_value, space.w_str): + string = space.str_w(w_value) + if len(string) != 1: + raise OperationError(space.w_ValueError, space.wrap( + "string must be of size 1")) + return string[0] + + value = space.getindex_w(w_value, None) + if not 0 <= value < 256: + # this includes the OverflowError in case the long is too large + raise OperationError(space.w_ValueError, space.wrap( + "byte must be in range(0, 256)")) + return chr(value) + +def new_bytearray(space, w_bytearraytype, data): + w_obj = space.allocate_instance(W_BytearrayObject, w_bytearraytype) + W_BytearrayObject.__init__(w_obj, data) + return w_obj + + +def makebytearraydata_w(space, w_source): + # String-like argument try: - w_IndexError = space.w_IndexError - except AttributeError: - w_IndexError = None - index = space.getindex_w(w_index, w_IndexError, "bytearray index") - try: - return space.newint(ord(w_bytearray.data[index])) - except IndexError: - raise OperationError(space.w_IndexError, - space.wrap("bytearray index out of range")) + string = space.bufferstr_new_w(w_source) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + else: + return [c for c in string] -def getitem__Bytearray_Slice(space, w_bytearray, w_slice): - data = w_bytearray.data - length = len(data) - start, stop, step, slicelength = w_slice.indices4(space, length) - assert slicelength >= 0 - if step == 1 and 0 <= start <= stop: - newdata = data[start:stop] - else: - newdata = _getitem_slice_multistep(data, start, step, slicelength) - return W_BytearrayObject(newdata) + # sequence of bytes + w_iter = space.iter(w_source) + length_hint = space.length_hint(w_source, 0) + data = newlist_hint(length_hint) + extended = 0 + while True: + try: + w_item = space.next(w_iter) + except OperationError, e: + if not e.match(space, space.w_StopIteration): + raise + break + value = getbytevalue(space, w_item) + data.append(value) + extended += 1 + if extended < length_hint: + resizelist_hint(data, extended) + return data -def _getitem_slice_multistep(data, start, step, slicelength): - return [data[start + i*step] for i in range(slicelength)] +def _hex_digit_to_int(d): + val = ord(d) + if 47 < val < 58: + return val - 48 + if 96 < val < 103: + return val - 87 + return -1 -def contains__Bytearray_Long(space, w_bytearray, w_char): - char = space.int_w(w_char) - if not 0 <= char < 256: - raise OperationError(space.w_ValueError, - space.wrap("byte must be in range(0, 256)")) - for c in w_bytearray.data: - if ord(c) == char: - return space.w_True - return space.w_False -def contains__Bytearray_String(space, w_bytearray, w_str): - # XXX slow - copies, needs rewriting - w_str2 = _to_bytes(space, w_bytearray) - return stringobject.contains__String_String(space, w_str2, w_str) +class BytearrayDocstrings: + """bytearray(iterable_of_ints) -> bytearray + bytearray(string, encoding[, errors]) -> bytearray + bytearray(bytes_or_bytearray) -> mutable copy of bytes_or_bytearray + bytearray(memory_view) -> bytearray -def contains__Bytearray_ANY(space, w_bytearray, w_sub): - # XXX slow - copies, needs rewriting - w_str = space.wrapbytes(space.bufferstr_new_w(w_sub)) - w_str2 = _to_bytes(space, w_bytearray) - return stringobject.contains__String_String(space, w_str2, w_str) + Construct an mutable bytearray object from: + - an iterable yielding integers in range(256) + - a text string encoded using the specified encoding + - a bytes or a bytearray object + - any object implementing the buffer API. -def add__Bytearray_Bytearray(space, w_bytearray1, w_bytearray2): - data1 = w_bytearray1.data - data2 = w_bytearray2.data - return W_BytearrayObject(data1 + data2) + bytearray(int) -> bytearray. -def add__Bytearray_ANY(space, w_bytearray1, w_other): - data1 = w_bytearray1.data - data2 = [c for c in space.bufferstr_new_w(w_other)] - return W_BytearrayObject(data1 + data2) + Construct a zero-initialized bytearray of the given length. -def add__String_Bytearray(space, w_str, w_bytearray): - data2 = w_bytearray.data - data1 = [c for c in space.bytes_w(w_str)] - return W_BytearrayObject(data1 + data2) + """ -def mul_bytearray_times(space, w_bytearray, w_times): - try: - times = space.getindex_w(w_times, space.w_OverflowError) - except OperationError, e: - if e.match(space, space.w_TypeError): - raise FailedToImplement - raise - data = w_bytearray.data - return W_BytearrayObject(data * times) + def __add__(): + """x.__add__(y) <==> x+y""" -def mul__Bytearray_ANY(space, w_bytearray, w_times): - return mul_bytearray_times(space, w_bytearray, w_times) + def __alloc__(): + """B.__alloc__() -> int -def mul__ANY_Bytearray(space, w_times, w_bytearray): - return mul_bytearray_times(space, w_bytearray, w_times) + Return the number of bytes actually allocated. + """ -def inplace_mul__Bytearray_ANY(space, w_bytearray, w_times): - try: - times = space.getindex_w(w_times, space.w_OverflowError) - except OperationError, e: - if e.match(space, space.w_TypeError): - raise FailedToImplement - raise - w_bytearray.data *= times - return w_bytearray + def __contains__(): + """x.__contains__(y) <==> y in x""" -def eq__Bytearray_Bytearray(space, w_bytearray1, w_bytearray2): - data1 = w_bytearray1.data - data2 = w_bytearray2.data - if len(data1) != len(data2): - return space.w_False - for i in range(len(data1)): - if data1[i] != data2[i]: - return space.w_False - return space.w_True + def __delitem__(): + """x.__delitem__(y) <==> del x[y]""" -def String2Bytearray(space, w_str): - data = [c for c in space.bytes_w(w_str)] - return W_BytearrayObject(data) + def __eq__(): + """x.__eq__(y) <==> x==y""" -def eq__Bytearray_String(space, w_bytearray, w_other): - return space.eq(_to_bytes(space, w_bytearray), w_other) + def __ge__(): + """x.__ge__(y) <==> x>=y""" -def ne__Bytearray_String(space, w_bytearray, w_other): - return space.ne(_to_bytes(space, w_bytearray), w_other) + def __getattribute__(): + """x.__getattribute__('name') <==> x.name""" -def _min(a, b): - if a < b: - return a - return b + def __getitem__(): + """x.__getitem__(y) <==> x[y]""" -def lt__Bytearray_Bytearray(space, w_bytearray1, w_bytearray2): - data1 = w_bytearray1.data - data2 = w_bytearray2.data - ncmp = _min(len(data1), len(data2)) - # Search for the first index where items are different - for p in range(ncmp): - if data1[p] != data2[p]: - return space.newbool(data1[p] < data2[p]) - # No more items to compare -- compare sizes - return space.newbool(len(data1) < len(data2)) + def __gt__(): + """x.__gt__(y) <==> x>y""" -def gt__Bytearray_Bytearray(space, w_bytearray1, w_bytearray2): - data1 = w_bytearray1.data - data2 = w_bytearray2.data - ncmp = _min(len(data1), len(data2)) - # Search for the first index where items are different - for p in range(ncmp): - if data1[p] != data2[p]: - return space.newbool(data1[p] > data2[p]) - # No more items to compare -- compare sizes - return space.newbool(len(data1) > len(data2)) + def __iadd__(): + """x.__iadd__(y) <==> x+=y""" -def str_translate__Bytearray_ANY_ANY(space, w_bytearray1, w_table, w_deletechars): - # XXX slow, copies *twice* needs proper implementation - w_str_copy = _to_bytes(space, w_bytearray1) - w_res = stringobject.str_translate__String_ANY_ANY(space, w_str_copy, - w_table, w_deletechars) - return String2Bytearray(space, w_res) + def __imul__(): + """x.__imul__(y) <==> x*=y""" -# Mostly copied from repr__String, but without the "smart quote" -# functionality. -def repr__Bytearray(space, w_bytearray): - s = w_bytearray.data + def __init__(): + """x.__init__(...) initializes x; see help(type(x)) for signature""" - # Good default if there are no replacements. - buf = StringBuilder(len("bytearray(b'')") + len(s)) + def __iter__(): + """x.__iter__() <==> iter(x)""" - buf.append("bytearray(b'") + def __le__(): + """x.__le__(y) <==> x<=y""" - for i in range(len(s)): - c = s[i] + def __len__(): + """x.__len__() <==> len(x)""" - if c == '\\' or c == "'": - buf.append('\\') - buf.append(c) - elif c == '\t': - buf.append('\\t') - elif c == '\r': - buf.append('\\r') - elif c == '\n': - buf.append('\\n') - elif not '\x20' <= c < '\x7f': - n = ord(c) - buf.append('\\x') - buf.append("0123456789abcdef"[n>>4]) - buf.append("0123456789abcdef"[n&0xF]) - else: - buf.append(c) + def __lt__(): + """x.__lt__(y) <==> x x*n""" - return space.wrap(buf.build()) + def __ne__(): + """x.__ne__(y) <==> x!=y""" -def _to_bytes(space, w_bytearray): - return space.wrapbytes(''.join(w_bytearray.data)) + def __reduce__(): + """Return state information for pickling.""" -def str_count__Bytearray_ANY_ANY_ANY(space, w_bytearray, w_char, w_start, w_stop): - w_char = space.wrapbytes(space.bufferstr_new_w(w_char)) - w_str = _to_bytes(space, w_bytearray) - return stringobject.str_count__String_String_ANY_ANY(space, w_str, w_char, - w_start, w_stop) + def __repr__(): + """x.__repr__() <==> repr(x)""" -def str_index__Bytearray_ANY_ANY_ANY(space, w_bytearray, w_char, w_start, w_stop): - w_char = space.wrapbytes(space.bufferstr_new_w(w_char)) - w_str = _to_bytes(space, w_bytearray) - return stringobject.str_index__String_String_ANY_ANY(space, w_str, w_char, - w_start, w_stop) + def __rmul__(): + """x.__rmul__(n) <==> n*x""" -def str_rindex__Bytearray_ANY_ANY_ANY(space, w_bytearray, w_char, w_start, w_stop): - w_char = space.wrapbytes(space.bufferstr_new_w(w_char)) - w_str = _to_bytes(space, w_bytearray) - return stringobject.str_rindex__String_String_ANY_ANY(space, w_str, w_char, - w_start, w_stop) + def __setitem__(): + """x.__setitem__(i, y) <==> x[i]=y""" -def str_find__Bytearray_ANY_ANY_ANY(space, w_bytearray, w_char, w_start, w_stop): - w_char = space.wrapbytes(space.bufferstr_new_w(w_char)) - w_str = _to_bytes(space, w_bytearray) - return stringobject.str_find__String_String_ANY_ANY(space, w_str, w_char, - w_start, w_stop) + def __sizeof__(): + """B.__sizeof__() -> int -def str_rfind__Bytearray_ANY_ANY_ANY(space, w_bytearray, w_char, w_start, w_stop): - w_char = space.wrapbytes(space.bufferstr_new_w(w_char)) - w_str = _to_bytes(space, w_bytearray) - return stringobject.str_rfind__String_String_ANY_ANY(space, w_str, w_char, - w_start, w_stop) + Returns the size of B in memory, in bytes + """ -def _suffix_to_str(space, w_suffix, funcname): - try: - return space.bufferstr_new_w(w_suffix) - except OperationError as e: - if e.match(space, space.w_TypeError): - msg = ("%s first arg must be bytes or a tuple of bytes, " - "not %T") - raise operationerrfmt(space.w_TypeError, msg, funcname, w_suffix) + def __str__(): + """x.__str__() <==> str(x)""" -def str_startswith__Bytearray_ANY_ANY_ANY(space, w_bytearray, w_prefix, w_start, w_stop): - if space.isinstance_w(w_prefix, space.w_tuple): - w_str = _to_bytes(space, w_bytearray) - w_prefix = space.newtuple([space.wrapbytes(space.bufferstr_new_w(w_entry)) for w_entry in - space.fixedview(w_prefix)]) - return stringobject.str_startswith__String_ANY_ANY_ANY(space, w_str, w_prefix, - w_start, w_stop) + def append(): + """B.append(int) -> None - w_prefix = space.wrapbytes(_suffix_to_str(space, w_prefix, 'startswith')) - w_str = _to_bytes(space, w_bytearray) - return stringobject.str_startswith__String_String_ANY_ANY(space, w_str, w_prefix, - w_start, w_stop) + Append a single item to the end of B. + """ -def str_endswith__Bytearray_ANY_ANY_ANY(space, w_bytearray, w_suffix, w_start, w_stop): - if space.isinstance_w(w_suffix, space.w_tuple): - w_str = _to_bytes(space, w_bytearray) - w_suffix = space.newtuple([space.wrapbytes(space.bufferstr_new_w(w_entry)) for w_entry in - space.fixedview(w_suffix)]) - return stringobject.str_endswith__String_ANY_ANY_ANY(space, w_str, w_suffix, - w_start, w_stop) - w_suffix = space.wrapbytes(_suffix_to_str(space, w_suffix, 'endswith')) - w_str = _to_bytes(space, w_bytearray) - return stringobject.str_endswith__String_String_ANY_ANY(space, w_str, w_suffix, - w_start, w_stop) + def capitalize(): + """B.capitalize() -> copy of B + Return a copy of B with only its first character capitalized (ASCII) + and the rest lower-cased. + """ + + def center(): + """B.center(width[, fillchar]) -> copy of B + + Return B centered in a string of length width. Padding is + done using the specified fill character (default is a space). + """ + + def count(): + """B.count(sub[, start[, end]]) -> int + + Return the number of non-overlapping occurrences of subsection sub in + bytes B[start:end]. Optional arguments start and end are interpreted + as in slice notation. + """ + + def decode(): + """B.decode(encoding=None, errors='strict') -> unicode + + Decode B using the codec registered for encoding. encoding defaults + to the default encoding. errors may be given to set a different error + handling scheme. Default is 'strict' meaning that encoding errors raise + a UnicodeDecodeError. Other possible values are 'ignore' and 'replace' + as well as any other name registered with codecs.register_error that is + able to handle UnicodeDecodeErrors. + """ + + def endswith(): + """B.endswith(suffix[, start[, end]]) -> bool + + Return True if B ends with the specified suffix, False otherwise. + With optional start, test B beginning at that position. + With optional end, stop comparing B at that position. + suffix can also be a tuple of strings to try. + """ + + def expandtabs(): + """B.expandtabs([tabsize]) -> copy of B + + Return a copy of B where all tab characters are expanded using spaces. + If tabsize is not given, a tab size of 8 characters is assumed. + """ + + def extend(): + """B.extend(iterable_of_ints) -> None + + Append all the elements from the iterator or sequence to the + end of B. + """ + + def find(): + """B.find(sub[, start[, end]]) -> int + + Return the lowest index in B where subsection sub is found, + such that sub is contained within B[start,end]. Optional + arguments start and end are interpreted as in slice notation. + + Return -1 on failure. + """ + + def fromhex(): + """bytearray.fromhex(string) -> bytearray (static method) + + Create a bytearray object from a string of hexadecimal numbers. + Spaces between two numbers are accepted. + Example: bytearray.fromhex('B9 01EF') -> bytearray(b'\xb9\x01\xef'). + """ + + def index(): + """B.index(sub[, start[, end]]) -> int + + Like B.find() but raise ValueError when the subsection is not found. + """ + + def insert(): + """B.insert(index, int) -> None + + Insert a single item into the bytearray before the given index. + """ + + def isalnum(): + """B.isalnum() -> bool + + Return True if all characters in B are alphanumeric + and there is at least one character in B, False otherwise. + """ + + def isalpha(): + """B.isalpha() -> bool + + Return True if all characters in B are alphabetic + and there is at least one character in B, False otherwise. + """ + + def isdigit(): + """B.isdigit() -> bool + + Return True if all characters in B are digits + and there is at least one character in B, False otherwise. + """ + + def islower(): + """B.islower() -> bool + + Return True if all cased characters in B are lowercase and there is + at least one cased character in B, False otherwise. + """ + + def isspace(): + """B.isspace() -> bool + + Return True if all characters in B are whitespace + and there is at least one character in B, False otherwise. + """ + + def istitle(): + """B.istitle() -> bool + + Return True if B is a titlecased string and there is at least one + character in B, i.e. uppercase characters may only follow uncased + characters and lowercase characters only cased ones. Return False + otherwise. + """ + + def isupper(): + """B.isupper() -> bool + + Return True if all cased characters in B are uppercase and there is + at least one cased character in B, False otherwise. + """ + + def join(): + """B.join(iterable_of_bytes) -> bytearray + + Concatenate any number of str/bytearray objects, with B + in between each pair, and return the result as a new bytearray. + """ + + def ljust(): + """B.ljust(width[, fillchar]) -> copy of B + + Return B left justified in a string of length width. Padding is + done using the specified fill character (default is a space). + """ + + def lower(): + """B.lower() -> copy of B + + Return a copy of B with all ASCII characters converted to lowercase. + """ + + def lstrip(): + """B.lstrip([bytes]) -> bytearray + + Strip leading bytes contained in the argument + and return the result as a new bytearray. + If the argument is omitted, strip leading ASCII whitespace. + """ + + def partition(): + """B.partition(sep) -> (head, sep, tail) + + Search for the separator sep in B, and return the part before it, + the separator itself, and the part after it. If the separator is not + found, returns B and two empty bytearray objects. + """ + + def pop(): + """B.pop([index]) -> int + + Remove and return a single item from B. If no index + argument is given, will pop the last value. + """ + + def remove(): + """B.remove(int) -> None + + Remove the first occurrence of a value in B. + """ + + def replace(): + """B.replace(old, new[, count]) -> bytearray + + Return a copy of B with all occurrences of subsection + old replaced by new. If the optional argument count is + given, only the first count occurrences are replaced. + """ + + def reverse(): + """B.reverse() -> None + + Reverse the order of the values in B in place. + """ + + def rfind(): + """B.rfind(sub[, start[, end]]) -> int + + Return the highest index in B where subsection sub is found, + such that sub is contained within B[start,end]. Optional + arguments start and end are interpreted as in slice notation. + + Return -1 on failure. + """ + + def rindex(): + """B.rindex(sub[, start[, end]]) -> int + + Like B.rfind() but raise ValueError when the subsection is not found. + """ + + def rjust(): + """B.rjust(width[, fillchar]) -> copy of B + + Return B right justified in a string of length width. Padding is + done using the specified fill character (default is a space) + """ + + def rpartition(): + """B.rpartition(sep) -> (head, sep, tail) + + Search for the separator sep in B, starting at the end of B, + and return the part before it, the separator itself, and the + part after it. If the separator is not found, returns two empty + bytearray objects and B. + """ + + def rsplit(): + """B.rsplit(sep=None, maxsplit=-1) -> list of bytearrays + + Return a list of the sections in B, using sep as the delimiter, + starting at the end of B and working to the front. + If sep is not given, B is split on ASCII whitespace characters + (space, tab, return, newline, formfeed, vertical tab). + If maxsplit is given, at most maxsplit splits are done. + """ + + def rstrip(): + """B.rstrip([bytes]) -> bytearray + + Strip trailing bytes contained in the argument + and return the result as a new bytearray. + If the argument is omitted, strip trailing ASCII whitespace. + """ + + def split(): + """B.split(sep=None, maxsplit=-1) -> list of bytearrays + + Return a list of the sections in B, using sep as the delimiter. + If sep is not given, B is split on ASCII whitespace characters + (space, tab, return, newline, formfeed, vertical tab). + If maxsplit is given, at most maxsplit splits are done. + """ + + def splitlines(): + """B.splitlines(keepends=False) -> list of lines + + Return a list of the lines in B, breaking at line boundaries. + Line breaks are not included in the resulting list unless keepends + is given and true. + """ + + def startswith(): + """B.startswith(prefix[, start[, end]]) -> bool + + Return True if B starts with the specified prefix, False otherwise. + With optional start, test B beginning at that position. + With optional end, stop comparing B at that position. + prefix can also be a tuple of strings to try. + """ + + def strip(): + """B.strip([bytes]) -> bytearray + + Strip leading and trailing bytes contained in the argument + and return the result as a new bytearray. + If the argument is omitted, strip ASCII whitespace. + """ + + def swapcase(): + """B.swapcase() -> copy of B + + Return a copy of B with uppercase ASCII characters converted + to lowercase ASCII and vice versa. + """ + + def title(): + """B.title() -> copy of B + + Return a titlecased version of B, i.e. ASCII words start with uppercase + characters, all remaining cased characters have lowercase. + """ + + def translate(): + """B.translate(table[, deletechars]) -> bytearray + + Return a copy of B, where all characters occurring in the + optional argument deletechars are removed, and the remaining + characters have been mapped through the given translation + table, which must be a bytes object of length 256. + """ + + def upper(): + """B.upper() -> copy of B + + Return a copy of B with all ASCII characters converted to uppercase. + """ + + def zfill(): + """B.zfill(width) -> copy of B + + Pad a numeric string B with zeros on the left, to fill a field + of the specified width. B is never truncated. + """ + + +W_BytearrayObject.typedef = StdTypeDef( + "bytearray", + __doc__ = BytearrayDocstrings.__doc__, + __new__ = interp2app(W_BytearrayObject.descr_new), + __hash__ = None, + __reduce__ = interp2app(W_BytearrayObject.descr_reduce, + doc=BytearrayDocstrings.__reduce__.__doc__), + fromhex = interp2app(W_BytearrayObject.descr_fromhex, as_classmethod=True, + doc=BytearrayDocstrings.fromhex.__doc__), + + __repr__ = interp2app(W_BytearrayObject.descr_repr, + doc=BytearrayDocstrings.__repr__.__doc__), + __str__ = interp2app(W_BytearrayObject.descr_str, + doc=BytearrayDocstrings.__str__.__doc__), + + __eq__ = interp2app(W_BytearrayObject.descr_eq, + doc=BytearrayDocstrings.__eq__.__doc__), + __ne__ = interp2app(W_BytearrayObject.descr_ne, + doc=BytearrayDocstrings.__ne__.__doc__), + __lt__ = interp2app(W_BytearrayObject.descr_lt, + doc=BytearrayDocstrings.__lt__.__doc__), + __le__ = interp2app(W_BytearrayObject.descr_le, + doc=BytearrayDocstrings.__le__.__doc__), + __gt__ = interp2app(W_BytearrayObject.descr_gt, + doc=BytearrayDocstrings.__gt__.__doc__), + __ge__ = interp2app(W_BytearrayObject.descr_ge, + doc=BytearrayDocstrings.__ge__.__doc__), + + __len__ = interp2app(W_BytearrayObject.descr_len, + doc=BytearrayDocstrings.__len__.__doc__), + __contains__ = interp2app(W_BytearrayObject.descr_contains, + doc=BytearrayDocstrings.__contains__.__doc__), + + __add__ = interp2app(W_BytearrayObject.descr_add, + doc=BytearrayDocstrings.__add__.__doc__), + __mul__ = interp2app(W_BytearrayObject.descr_mul, + doc=BytearrayDocstrings.__mul__.__doc__), + __rmul__ = interp2app(W_BytearrayObject.descr_mul, + doc=BytearrayDocstrings.__rmul__.__doc__), + + __getitem__ = interp2app(W_BytearrayObject.descr_getitem, + doc=BytearrayDocstrings.__getitem__.__doc__), + + capitalize = interp2app(W_BytearrayObject.descr_capitalize, + doc=BytearrayDocstrings.capitalize.__doc__), + center = interp2app(W_BytearrayObject.descr_center, + doc=BytearrayDocstrings.center.__doc__), + count = interp2app(W_BytearrayObject.descr_count, + doc=BytearrayDocstrings.count.__doc__), + decode = interp2app(W_BytearrayObject.descr_decode, + doc=BytearrayDocstrings.decode.__doc__), + expandtabs = interp2app(W_BytearrayObject.descr_expandtabs, + doc=BytearrayDocstrings.expandtabs.__doc__), + find = interp2app(W_BytearrayObject.descr_find, + doc=BytearrayDocstrings.find.__doc__), + rfind = interp2app(W_BytearrayObject.descr_rfind, + doc=BytearrayDocstrings.rfind.__doc__), + index = interp2app(W_BytearrayObject.descr_index, + doc=BytearrayDocstrings.index.__doc__), + rindex = interp2app(W_BytearrayObject.descr_rindex, + doc=BytearrayDocstrings.rindex.__doc__), + isalnum = interp2app(W_BytearrayObject.descr_isalnum, + doc=BytearrayDocstrings.isalnum.__doc__), + isalpha = interp2app(W_BytearrayObject.descr_isalpha, + doc=BytearrayDocstrings.isalpha.__doc__), + isdigit = interp2app(W_BytearrayObject.descr_isdigit, + doc=BytearrayDocstrings.isdigit.__doc__), + islower = interp2app(W_BytearrayObject.descr_islower, + doc=BytearrayDocstrings.islower.__doc__), + isspace = interp2app(W_BytearrayObject.descr_isspace, + doc=BytearrayDocstrings.isspace.__doc__), + istitle = interp2app(W_BytearrayObject.descr_istitle, + doc=BytearrayDocstrings.istitle.__doc__), + isupper = interp2app(W_BytearrayObject.descr_isupper, + doc=BytearrayDocstrings.isupper.__doc__), + join = interp2app(W_BytearrayObject.descr_join, + doc=BytearrayDocstrings.join.__doc__), + ljust = interp2app(W_BytearrayObject.descr_ljust, + doc=BytearrayDocstrings.ljust.__doc__), + rjust = interp2app(W_BytearrayObject.descr_rjust, + doc=BytearrayDocstrings.rjust.__doc__), + lower = interp2app(W_BytearrayObject.descr_lower, + doc=BytearrayDocstrings.lower.__doc__), + partition = interp2app(W_BytearrayObject.descr_partition, + doc=BytearrayDocstrings.partition.__doc__), + rpartition = interp2app(W_BytearrayObject.descr_rpartition, + doc=BytearrayDocstrings.rpartition.__doc__), + replace = interp2app(W_BytearrayObject.descr_replace, + doc=BytearrayDocstrings.replace.__doc__), + split = interp2app(W_BytearrayObject.descr_split, + doc=BytearrayDocstrings.split.__doc__), + rsplit = interp2app(W_BytearrayObject.descr_rsplit, + doc=BytearrayDocstrings.rsplit.__doc__), + splitlines = interp2app(W_BytearrayObject.descr_splitlines, + doc=BytearrayDocstrings.splitlines.__doc__), + startswith = interp2app(W_BytearrayObject.descr_startswith, + doc=BytearrayDocstrings.startswith.__doc__), + endswith = interp2app(W_BytearrayObject.descr_endswith, + doc=BytearrayDocstrings.endswith.__doc__), + strip = interp2app(W_BytearrayObject.descr_strip, + doc=BytearrayDocstrings.strip.__doc__), + lstrip = interp2app(W_BytearrayObject.descr_lstrip, + doc=BytearrayDocstrings.lstrip.__doc__), + rstrip = interp2app(W_BytearrayObject.descr_rstrip, + doc=BytearrayDocstrings.rstrip.__doc__), + swapcase = interp2app(W_BytearrayObject.descr_swapcase, + doc=BytearrayDocstrings.swapcase.__doc__), + title = interp2app(W_BytearrayObject.descr_title, + doc=BytearrayDocstrings.title.__doc__), + translate = interp2app(W_BytearrayObject.descr_translate, + doc=BytearrayDocstrings.translate.__doc__), + upper = interp2app(W_BytearrayObject.descr_upper, + doc=BytearrayDocstrings.upper.__doc__), + zfill = interp2app(W_BytearrayObject.descr_zfill, + doc=BytearrayDocstrings.zfill.__doc__), + + __init__ = interp2app(W_BytearrayObject.descr_init, + doc=BytearrayDocstrings.__init__.__doc__), + __buffer__ = interp2app(W_BytearrayObject.descr_buffer), + + __iadd__ = interp2app(W_BytearrayObject.descr_inplace_add, + doc=BytearrayDocstrings.__iadd__.__doc__), + __imul__ = interp2app(W_BytearrayObject.descr_inplace_mul, + doc=BytearrayDocstrings.__imul__.__doc__), + __setitem__ = interp2app(W_BytearrayObject.descr_setitem, + doc=BytearrayDocstrings.__setitem__.__doc__), + __delitem__ = interp2app(W_BytearrayObject.descr_delitem, + doc=BytearrayDocstrings.__delitem__.__doc__), + + append = interp2app(W_BytearrayObject.descr_append, + doc=BytearrayDocstrings.append.__doc__), + extend = interp2app(W_BytearrayObject.descr_extend, + doc=BytearrayDocstrings.extend.__doc__), + insert = interp2app(W_BytearrayObject.descr_insert, + doc=BytearrayDocstrings.insert.__doc__), + pop = interp2app(W_BytearrayObject.descr_pop, + doc=BytearrayDocstrings.pop.__doc__), + remove = interp2app(W_BytearrayObject.descr_remove, + doc=BytearrayDocstrings.remove.__doc__), + reverse = interp2app(W_BytearrayObject.descr_reverse, + doc=BytearrayDocstrings.reverse.__doc__), +) + +init_signature = Signature(['source', 'encoding', 'errors'], None, None) +init_defaults = [None, None, None] + + +# XXX consider moving to W_BytearrayObject or remove def str_join__Bytearray_ANY(space, w_self, w_list): list_w = space.listview(w_list) if not list_w: @@ -302,249 +1024,8 @@ newdata.extend([c for c in space.bufferstr_new_w(w_s)]) return W_BytearrayObject(newdata) -def str_decode__Bytearray_ANY_ANY(space, w_bytearray, w_encoding, w_errors): - w_str = _to_bytes(space, w_bytearray) - return stringobject.str_decode__String_ANY_ANY(space, w_str, w_encoding, w_errors) - -def str_islower__Bytearray(space, w_bytearray): - w_str = _to_bytes(space, w_bytearray) - return stringobject.str_islower__String(space, w_str) - -def str_isupper__Bytearray(space, w_bytearray): - w_str = _to_bytes(space, w_bytearray) - return stringobject.str_isupper__String(space, w_str) - -def str_isalpha__Bytearray(space, w_bytearray): - w_str = _to_bytes(space, w_bytearray) - return stringobject.str_isalpha__String(space, w_str) - -def str_isalnum__Bytearray(space, w_bytearray): - w_str = _to_bytes(space, w_bytearray) - return stringobject.str_isalnum__String(space, w_str) - -def str_isdigit__Bytearray(space, w_bytearray): - w_str = _to_bytes(space, w_bytearray) - return stringobject.str_isdigit__String(space, w_str) - -def str_istitle__Bytearray(space, w_bytearray): - w_str = _to_bytes(space, w_bytearray) - return stringobject.str_istitle__String(space, w_str) - -def str_isspace__Bytearray(space, w_bytearray): - w_str = _to_bytes(space, w_bytearray) - return stringobject.str_isspace__String(space, w_str) - -def bytearray_insert__Bytearray_Long_ANY(space, w_bytearray, w_idx, w_other): - where = space.int_w(w_idx) - length = len(w_bytearray.data) - index = get_positive_index(where, length) - val = getbytevalue(space, w_other) - w_bytearray.data.insert(index, val) - return space.w_None - -def bytearray_pop__Bytearray_Long(space, w_bytearray, w_idx): - index = space.int_w(w_idx) - try: - result = w_bytearray.data.pop(index) - except IndexError: - if not w_bytearray.data: - raise OperationError(space.w_IndexError, space.wrap( - "pop from empty bytearray")) - raise OperationError(space.w_IndexError, space.wrap( - "pop index out of range")) - return space.wrap(ord(result)) - -def bytearray_remove__Bytearray_ANY(space, w_bytearray, w_char): - char = space.int_w(space.index(w_char)) - try: - result = w_bytearray.data.remove(chr(char)) - except ValueError: - raise OperationError(space.w_ValueError, space.wrap( - "value not found in bytearray")) - -def bytearray_reverse__Bytearray(space, w_bytearray): - w_bytearray.data.reverse() - return space.w_None - _space_chars = ''.join([chr(c) for c in [9, 10, 11, 12, 13, 32]]) -def bytearray_strip__Bytearray_None(space, w_bytearray, w_chars): - return _strip(space, w_bytearray, _space_chars, 1, 1) - -def bytearray_strip__Bytearray_ANY(space, w_bytearray, w_chars): - return _strip(space, w_bytearray, space.bufferstr_new_w(w_chars), 1, 1) - -def bytearray_lstrip__Bytearray_None(space, w_bytearray, w_chars): - return _strip(space, w_bytearray, _space_chars, 1, 0) - -def bytearray_lstrip__Bytearray_ANY(space, w_bytearray, w_chars): - return _strip(space, w_bytearray, space.bufferstr_new_w(w_chars), 1, 0) - -def bytearray_rstrip__Bytearray_None(space, w_bytearray, w_chars): - return _strip(space, w_bytearray, _space_chars, 0, 1) - -def bytearray_rstrip__Bytearray_ANY(space, w_bytearray, w_chars): - return _strip(space, w_bytearray, space.bufferstr_new_w(w_chars), 0, 1) - -# These methods could just delegate to the string implementation, -# but they have to return a bytearray. -def str_replace__Bytearray_ANY_ANY_ANY(space, w_bytearray, w_str1, w_str2, w_max): - w_str = _to_bytes(space, w_bytearray) - w_res = stringobject.str_replace__String_ANY_ANY_ANY(space, w_str, w_str1, - w_str2, w_max) - return String2Bytearray(space, w_res) - -def str_upper__Bytearray(space, w_bytearray): - w_str = _to_bytes(space, w_bytearray) - w_res = stringobject.str_upper__String(space, w_str) - return String2Bytearray(space, w_res) - -def str_lower__Bytearray(space, w_bytearray): - w_str = _to_bytes(space, w_bytearray) - w_res = stringobject.str_lower__String(space, w_str) - return String2Bytearray(space, w_res) - -def str_title__Bytearray(space, w_bytearray): - w_str = _to_bytes(space, w_bytearray) - w_res = stringobject.str_title__String(space, w_str) - return String2Bytearray(space, w_res) - -def str_swapcase__Bytearray(space, w_bytearray): - w_str = _to_bytes(space, w_bytearray) - w_res = stringobject.str_swapcase__String(space, w_str) - return String2Bytearray(space, w_res) - -def str_capitalize__Bytearray(space, w_bytearray): - w_str = _to_bytes(space, w_bytearray) - w_res = stringobject.str_capitalize__String(space, w_str) - return String2Bytearray(space, w_res) - -def str_ljust__Bytearray_ANY_ANY(space, w_bytearray, w_width, w_fillchar): - w_str = _to_bytes(space, w_bytearray) - w_res = stringobject.str_ljust__String_ANY_ANY(space, w_str, w_width, - w_fillchar) - return String2Bytearray(space, w_res) - -def str_rjust__Bytearray_ANY_ANY(space, w_bytearray, w_width, w_fillchar): - w_str = _to_bytes(space, w_bytearray) - w_res = stringobject.str_rjust__String_ANY_ANY(space, w_str, w_width, - w_fillchar) - return String2Bytearray(space, w_res) - -def str_center__Bytearray_ANY_ANY(space, w_bytearray, w_width, w_fillchar): - w_str = _to_bytes(space, w_bytearray) - w_res = stringobject.str_center__String_ANY_ANY(space, w_str, w_width, - w_fillchar) - return String2Bytearray(space, w_res) - -def str_zfill__Bytearray_ANY(space, w_bytearray, w_width): - w_str = _to_bytes(space, w_bytearray) - w_res = stringobject.str_zfill__String_ANY(space, w_str, w_width) - return String2Bytearray(space, w_res) - -def str_expandtabs__Bytearray_ANY(space, w_bytearray, w_tabsize): - w_str = _to_bytes(space, w_bytearray) - w_res = stringobject.str_expandtabs__String_ANY(space, w_str, w_tabsize) - return String2Bytearray(space, w_res) - -def str_splitlines__Bytearray_ANY(space, w_bytearray, w_keepends): - w_str = _to_bytes(space, w_bytearray) - w_result = stringobject.str_splitlines__String_ANY(space, w_str, w_keepends) - return space.newlist([ - new_bytearray(space, space.w_bytearray, makebytesdata_w(space, w_entry)) - for w_entry in space.unpackiterable(w_result) - ]) - -def str_split__Bytearray_ANY_ANY(space, w_bytearray, w_by, w_maxsplit=-1): - w_str = _to_bytes(space, w_bytearray) - if not space.is_w(w_by, space.w_None): - w_by = space.wrapbytes(space.bufferstr_new_w(w_by)) - w_list = space.call_method(w_str, "split", w_by, w_maxsplit) - length = space.int_w(space.len(w_list)) - for i in range(length): - w_i = space.wrap(i) - space.setitem(w_list, w_i, String2Bytearray(space, space.getitem(w_list, w_i))) - return w_list - -def str_rsplit__Bytearray_ANY_ANY(space, w_bytearray, w_by, w_maxsplit=-1): - w_str = _to_bytes(space, w_bytearray) - if not space.is_w(w_by, space.w_None): - w_by = space.wrapbytes(space.bufferstr_new_w(w_by)) - w_list = space.call_method(w_str, "rsplit", w_by, w_maxsplit) - length = space.int_w(space.len(w_list)) - for i in range(length): - w_i = space.wrap(i) - space.setitem(w_list, w_i, String2Bytearray(space, space.getitem(w_list, w_i))) - return w_list - -def str_partition__Bytearray_ANY(space, w_bytearray, w_sub): - w_str = _to_bytes(space, w_bytearray) - w_sub = space.wrapbytes(space.bufferstr_new_w(w_sub)) - w_tuple = stringobject.str_partition__String_String(space, w_str, w_sub) - w_a, w_b, w_c = space.fixedview(w_tuple, 3) - return space.newtuple([ - String2Bytearray(space, w_a), - String2Bytearray(space, w_b), - String2Bytearray(space, w_c)]) - -def str_rpartition__Bytearray_ANY(space, w_bytearray, w_sub): - w_str = _to_bytes(space, w_bytearray) - w_sub = space.wrapbytes(space.bufferstr_new_w(w_sub)) - w_tuple = stringobject.str_rpartition__String_String(space, w_str, w_sub) - w_a, w_b, w_c = space.fixedview(w_tuple, 3) - return space.newtuple([ - String2Bytearray(space, w_a), - String2Bytearray(space, w_b), - String2Bytearray(space, w_c)]) - -# __________________________________________________________ -# Mutability methods - -def bytearray_append__Bytearray_ANY(space, w_bytearray, w_item): - w_bytearray.data.append(getbytevalue(space, w_item)) - -def bytearray_extend__Bytearray_Bytearray(space, w_bytearray, w_other): - w_bytearray.data += w_other.data - -def bytearray_extend__Bytearray_ANY(space, w_bytearray, w_other): - w_bytearray.data += makebytesdata_w(space, w_other) - -def inplace_add__Bytearray_Bytearray(space, w_bytearray1, w_bytearray2): - bytearray_extend__Bytearray_Bytearray(space, w_bytearray1, w_bytearray2) - return w_bytearray1 - -def inplace_add__Bytearray_ANY(space, w_bytearray1, w_iterable2): - w_bytearray1.data += space.bufferstr_new_w(w_iterable2) From noreply at buildbot.pypy.org Thu Jan 23 20:09:42 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 23 Jan 2014 20:09:42 +0100 (CET) Subject: [pypy-commit] pypy py3k-refactor-str-types: reintegrate our unicode changes Message-ID: <20140123190942.5F1AD1C00F8@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-refactor-str-types Changeset: r68869:9d4908e6605a Date: 2014-01-23 11:07 -0800 http://bitbucket.org/pypy/pypy/changeset/9d4908e6605a/ Log: reintegrate our unicode changes diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -8,7 +8,7 @@ from pypy.objspace.std.formatting import mod_format from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.stringmethods import StringMethods -from pypy.objspace.std.unicodeobject import (unicode_from_string, +from pypy.objspace.std.unicodeobject import ( decode_object, unicode_from_encoded_object, _get_encoding_and_errors) from rpython.rlib.jit import we_are_jitted from rpython.rlib.objectmodel import compute_hash, compute_unique_id, import_from_mixin diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -513,7 +513,14 @@ if self._startswith(space, value, w_prefix, start, end): return space.w_True return space.w_False - return space.newbool(self._startswith(space, value, w_prefix, start, end)) + try: + return space.newbool(self._startswith(space, value, w_prefix, start, end)) + except OperationError as e: + if e.match(space, space.w_TypeError): + msg = ("startswith first arg must be str or a tuple of str, " + "not %T") + raise operationerrfmt(space.w_TypeError, msg, w_prefix) + raise def _startswith(self, space, value, w_prefix, start, end): return startswith(value, self._op_val(space, w_prefix), start, end) @@ -527,7 +534,15 @@ if self._endswith(space, value, w_suffix, start, end): return space.w_True return space.w_False - return space.newbool(self._endswith(space, value, w_suffix, start, end)) + try: + return space.newbool(self._endswith(space, value, w_suffix, start, + end)) + except OperationError as e: + if e.match(space, space.w_TypeError): + msg = ("endswith first arg must be str or a tuple of str, not " + "%T") + raise operationerrfmt(space.w_TypeError, msg, w_suffix) + raise def _endswith(self, space, value, w_prefix, start, end): return endswith(value, self._op_val(space, w_prefix), start, end) diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -16,7 +16,7 @@ __all__ = ['W_UnicodeObject', 'wrapunicode', 'plain_str2unicode', 'encode_object', 'decode_object', 'unicode_from_object', - 'unicode_from_string', 'unicode_to_decimal_w'] + 'unicode_to_decimal_w'] class W_UnicodeObject(W_Root): @@ -99,9 +99,9 @@ def _op_val(self, space, w_other): if isinstance(w_other, W_UnicodeObject): return w_other._value - if space.isinstance_w(w_other, space.w_str): - return unicode_from_string(space, w_other)._value - return unicode_from_encoded_object(space, w_other, None, "strict")._value + raise operationerrfmt(space.w_TypeError, + "Can't convert '%T' object to str implicitly", + w_other) def _chr(self, char): assert len(char) == 1 @@ -155,41 +155,101 @@ return space.newlist_unicode(lst) @staticmethod - @unwrap_spec(w_string = WrappedDefault("")) - def descr_new(space, w_unicodetype, w_string, w_encoding=None, + @unwrap_spec(w_object = WrappedDefault(u'')) + def descr_new(space, w_unicodetype, w_object=None, w_encoding=None, w_errors=None): # NB. the default value of w_obj is really a *wrapped* empty string: # there is gateway magic at work - w_obj = w_string + w_obj = w_object encoding, errors = _get_encoding_and_errors(space, w_encoding, w_errors) - # convoluted logic for the case when unicode subclass has a __unicode__ - # method, we need to call this method - is_precisely_unicode = space.is_w(space.type(w_obj), space.w_unicode) - if (is_precisely_unicode or - (space.isinstance_w(w_obj, space.w_unicode) and - space.findattr(w_obj, space.wrap('__unicode__')) is None)): - if encoding is not None or errors is not None: - raise OperationError(space.w_TypeError, space.wrap( - 'decoding Unicode is not supported')) - if (is_precisely_unicode and - space.is_w(w_unicodetype, space.w_unicode)): - return w_obj - w_value = w_obj + if encoding is None and errors is None: + w_value = unicode_from_object(space, w_obj) else: - if encoding is None and errors is None: - w_value = unicode_from_object(space, w_obj) - else: - w_value = unicode_from_encoded_object(space, w_obj, - encoding, errors) - if space.is_w(w_unicodetype, space.w_unicode): - return w_value + w_value = unicode_from_encoded_object(space, w_obj, + encoding, errors) + if space.is_w(w_unicodetype, space.w_unicode): + return w_value assert isinstance(w_value, W_UnicodeObject) w_newobj = space.allocate_instance(W_UnicodeObject, w_unicodetype) W_UnicodeObject.__init__(w_newobj, w_value._value) return w_newobj + @staticmethod + def descr_maketrans(space, w_type, w_x, w_y=None, w_z=None): + if space.is_none(w_y): + y = None + else: + y = space.unicode_w(w_y) + if space.is_none(w_z): + z = None + else: + z = space.unicode_w(w_z) + + w_new = space.newdict() + if y is not None: + # x must be a string too, of equal length + ylen = len(y) + try: + x = space.unicode_w(w_x) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + raise OperationError(space.w_TypeError, space.wrap( + "first maketrans argument must " + "be a string if there is a second argument")) + if len(x) != ylen: + raise OperationError(space.w_ValueError, space.wrap( + "the first two maketrans " + "arguments must have equal length")) + # create entries for translating chars in x to those in y + for i in range(len(x)): + w_key = space.newint(ord(x[i])) + w_value = space.newint(ord(y[i])) + space.setitem(w_new, w_key, w_value) + # create entries for deleting chars in z + if z is not None: + for i in range(len(z)): + w_key = space.newint(ord(z[i])) + space.setitem(w_new, w_key, space.w_None) + else: + # x must be a dict + if not space.is_w(space.type(w_x), space.w_dict): + raise OperationError(space.w_TypeError, space.wrap( + "if you give only one argument " + "to maketrans it must be a dict")) + # copy entries into the new dict, converting string keys to int keys + w_iter = space.iter(space.call_method(w_x, "items")) + while True: + try: + w_item = space.next(w_iter) + except OperationError, e: + if not e.match(space, space.w_StopIteration): + raise + break + w_key, w_value = space.unpackiterable(w_item, 2) + if space.isinstance_w(w_key, space.w_unicode): + # convert string keys to integer keys + key = space.unicode_w(w_key) + if len(key) != 1: + raise OperationError(space.w_ValueError, space.wrap( + "string keys in translate " + "table must be of length 1")) + w_key = space.newint(ord(key[0])) + else: + # just keep integer keys + try: + space.int_w(w_key) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + raise OperationError(space.w_TypeError, space.wrap( + "keys in translate table must " + "be strings or integers")) + space.setitem(w_new, w_key, w_value) + return w_new + def descr_repr(self, space): chars = self._value size = len(chars) @@ -197,7 +257,10 @@ return space.wrap(s) def descr_str(self, space): - return encode_object(space, self, None, None) + if space.is_w(space.type(self), space.w_unicode): + return self + # Subtype -- return genuine unicode string with the same value. + return space.wrap(space.unicode_w(self)) def descr_hash(self, space): x = compute_hash(self._value) @@ -209,13 +272,6 @@ except OperationError, e: if e.match(space, space.w_TypeError): return space.w_NotImplemented - if (e.match(space, space.w_UnicodeDecodeError) or - e.match(space, space.w_UnicodeEncodeError)): - msg = ("Unicode equal comparison failed to convert both " - "arguments to Unicode - interpreting them as being " - "unequal") - space.warn(space.wrap(msg), space.w_UnicodeWarning) - return space.w_False raise def descr_ne(self, space, w_other): @@ -224,13 +280,6 @@ except OperationError, e: if e.match(space, space.w_TypeError): return space.w_NotImplemented - if (e.match(space, space.w_UnicodeDecodeError) or - e.match(space, space.w_UnicodeEncodeError)): - msg = ("Unicode unequal comparison failed to convert both " - "arguments to Unicode - interpreting them as being " - "unequal") - space.warn(space.wrap(msg), space.w_UnicodeWarning) - return space.w_True raise def descr_lt(self, space, w_other): @@ -274,19 +323,16 @@ return newformat.format_method(space, self, __args__.arguments_w, w_kwds, True) + def descr_format_map(self, space, w_mapping): + return newformat.format_method(space, self, None, w_mapping, True) + def descr__format__(self, space, w_format_spec): - """ - if not space.isinstance_w(w_format_spec, space.w_unicode): - w_format_spec = space.call_function(space.w_unicode, w_format_spec) - spec = space.unicode_w(w_format_spec) - formatter = newformat.unicode_formatter(space, spec) - self2 = unicode_from_object(space, self) - assert isinstance(self2, W_UnicodeObject) - return formatter.format_string(self2._value) - """ return newformat.run_formatter(space, w_format_spec, "format_string", self) + def descr_iter(self, space): + return space.newseqiter(self) + def descr_mod(self, space, w_values): return mod_format(space, self, w_values, do_unicode=True) @@ -334,16 +380,6 @@ return 0 return 1 - def descr_formatter_parser(self, space): - from pypy.objspace.std.newformat import unicode_template_formatter - tformat = unicode_template_formatter(space, space.unicode_w(self)) - return tformat.formatter_parser() - - def descr_formatter_field_name_split(self, space): - from pypy.objspace.std.newformat import unicode_template_formatter - tformat = unicode_template_formatter(space, space.unicode_w(self)) - return tformat.formatter_field_name_split() - def descr_isdecimal(self, space): return self._is_generic(space, '_isdecimal') @@ -370,6 +406,15 @@ cased = True return space.newbool(cased) + def descr_isidentifier(self, space): + return space.newbool(_isidentifier(self._value)) + + def descr_isprintable(self, space): + for uchar in self._value: + if not unicodedb.isprintable(ord(uchar)): + return space.w_False + return space.w_True + def wrapunicode(space, uni): return W_UnicodeObject(uni) @@ -390,6 +435,25 @@ space.wrap("ordinal not in range(128)")])) assert False, "unreachable" +def _isidentifier(u): + if not u: + return False + + # PEP 3131 says that the first character must be in XID_Start and + # subsequent characters in XID_Continue, and for the ASCII range, + # the 2.x rules apply (i.e start with letters and underscore, + # continue with letters, digits, underscore). However, given the + # current definition of XID_Start and XID_Continue, it is sufficient + # to check just for these, except that _ must be allowed as starting + # an identifier. + first = u[0] + if not (unicodedb.isxidstart(ord(first)) or first == u'_'): + return False + + for i in range(1, len(u)): + if not unicodedb.isxidcontinue(ord(u[i])): + return False + return True # stuff imported from bytesobject for interoperability @@ -420,14 +484,13 @@ if encoding == 'ascii': u = space.unicode_w(w_object) eh = unicodehelper.encode_error_handler(space) - return space.wrap(unicode_encode_ascii( + return space.wrapbytes(unicode_encode_ascii( u, len(u), None, errorhandler=eh)) if encoding == 'utf-8': u = space.unicode_w(w_object) eh = unicodehelper.encode_error_handler(space) - return space.wrap(unicode_encode_utf_8( - u, len(u), None, errorhandler=eh, - allow_surrogates=True)) + return space.wrapbytes(unicode_encode_utf_8( + u, len(u), None, errorhandler=eh)) from pypy.module._codecs.interp_codecs import lookup_codec w_encoder = space.getitem(lookup_codec(space, encoding), space.wrap(0)) if errors is None: @@ -436,10 +499,9 @@ w_errors = space.wrap(errors) w_restuple = space.call_function(w_encoder, w_object, w_errors) w_retval = space.getitem(w_restuple, space.wrap(0)) - if not space.isinstance_w(w_retval, space.w_str): - raise operationerrfmt(space.w_TypeError, - "encoder did not return an string object (type '%s')", - space.type(w_retval).getname(space)) + if not space.isinstance_w(w_retval, space.w_bytes): + msg = "encoder did not return a bytes string (type '%T')" + raise operationerrfmt(space.w_TypeError, msg, w_retval) return w_retval def decode_object(space, w_obj, encoding, errors): @@ -456,8 +518,7 @@ s = space.bufferstr_w(w_obj) eh = unicodehelper.decode_error_handler(space) return space.wrap(str_decode_utf_8( - s, len(s), None, final=True, errorhandler=eh, - allow_surrogates=True)[0]) + s, len(s), None, final=True, errorhandler=eh)[0]) w_codecs = space.getbuiltinmodule("_codecs") w_decode = space.getattr(w_codecs, space.wrap("decode")) if errors is None: @@ -486,44 +547,29 @@ def unicode_from_object(space, w_obj): if space.is_w(space.type(w_obj), space.w_unicode): return w_obj - elif space.is_w(space.type(w_obj), space.w_str): - w_res = w_obj - else: - w_unicode_method = space.lookup(w_obj, "__unicode__") - # obscure workaround: for the next two lines see - # test_unicode_conversion_with__str__ - if w_unicode_method is None: - if space.isinstance_w(w_obj, space.w_unicode): - return space.wrap(space.unicode_w(w_obj)) - w_unicode_method = space.lookup(w_obj, "__str__") - if w_unicode_method is not None: - w_res = space.get_and_call_function(w_unicode_method, w_obj) - else: - w_res = space.str(w_obj) - if space.isinstance_w(w_res, space.w_unicode): - return w_res - return unicode_from_encoded_object(space, w_res, None, "strict") + if space.lookup(w_obj, "__str__") is not None: + return space.str(w_obj) + return space.repr(w_obj) -def unicode_from_string(space, w_str): - # this is a performance and bootstrapping hack - encoding = getdefaultencoding(space) - if encoding != 'ascii': - return unicode_from_encoded_object(space, w_str, encoding, "strict") - s = space.str_w(w_str) - try: - return W_UnicodeObject(s.decode("ascii")) - except UnicodeDecodeError: - # raising UnicodeDecodeError is messy, "please crash for me" - return unicode_from_encoded_object(space, w_str, "ascii", "strict") +def ascii_from_object(space, w_obj): + """Implements builtins.ascii()""" + # repr is guaranteed to be unicode + w_repr = space.repr(w_obj) + w_encoded = encode_object(space, w_repr, 'ascii', 'backslashreplace') + return decode_object(space, w_encoded, 'ascii', None) class UnicodeDocstrings: - """unicode(object='') -> unicode object - unicode(string[, encoding[, errors]]) -> unicode object + """str(object='') -> str + str(bytes_or_buffer[, encoding[, errors]]) -> str - Create a new Unicode object from the given encoded string. - encoding defaults to the current default string encoding. - errors can be 'strict', 'replace' or 'ignore' and defaults to 'strict'. + Create a new string object from the given object. If encoding or + errors is specified, then the object must expose a data buffer + that will be decoded using the given encoding and error handler. + Otherwise, returns the result of object.__str__() (if defined) + or repr(object). + encoding defaults to sys.getdefaultencoding(). + errors defaults to 'strict'. """ @@ -554,18 +600,15 @@ def __getnewargs__(): """""" - def __getslice__(): - """x.__getslice__(i, j) <==> x[i:j] - - Use of negative indices is not supported. - """ - def __gt__(): """x.__gt__(y) <==> x>y""" def __hash__(): """x.__hash__() <==> hash(x)""" + def __iter__(): + """x.__iter__() <==> iter(x)""" + def __le__(): """x.__le__(y) <==> x<=y""" @@ -676,6 +719,14 @@ The substitutions are identified by braces ('{' and '}'). """ + def format_map(): + """S.format_map(mapping) -> str + + Return a formatted version of S, using substitutions from + mapping. The substitutions are identified by braces ('{' and + '}'). + """ + def index(): """S.index(sub[, start[, end]]) -> int @@ -710,6 +761,13 @@ and there is at least one character in S, False otherwise. """ + def isidentifier(): + """S.isidentifier() -> bool + + Return True if S is a valid identifier according to the language + definition. + """ + def islower(): """S.islower() -> bool @@ -724,6 +782,13 @@ False otherwise. """ + def isprintable(): + """S.isprintable() -> bool + + Return True if all characters in S are considered printable in + repr() or S is empty, False otherwise. + """ + def isspace(): """S.isspace() -> bool @@ -775,6 +840,19 @@ If chars is a str, it will be converted to unicode before stripping """ + def maketrans(): + """str.maketrans(x[, y[, z]]) -> dict (static method) + + Return a translation table usable for str.translate(). + If there is only one argument, it must be a dictionary mapping Unicode + ordinals (integers) or characters to Unicode ordinals, strings or None. + Character keys will be then converted to ordinals. + If there are two arguments, they must be strings of equal length, and + in the resulting dictionary, each character in x will be mapped to the + character at the same position in y. If there is a third argument, it + must be a string, whose characters will be mapped to None in the result. + """ + def partition(): """S.partition(sep) -> (head, sep, tail) @@ -939,6 +1017,8 @@ __ge__ = interp2app(W_UnicodeObject.descr_ge, doc=UnicodeDocstrings.__ge__.__doc__), + __iter__ = interp2app(W_UnicodeObject.descr_iter, + doc=UnicodeDocstrings.__iter__.__doc__), __len__ = interp2app(W_UnicodeObject.descr_len, doc=UnicodeDocstrings.__len__.__doc__), __contains__ = interp2app(W_UnicodeObject.descr_contains, @@ -953,8 +1033,6 @@ __getitem__ = interp2app(W_UnicodeObject.descr_getitem, doc=UnicodeDocstrings.__getitem__.__doc__), - __getslice__ = interp2app(W_UnicodeObject.descr_getslice, - doc=UnicodeDocstrings.__getslice__.__doc__), capitalize = interp2app(W_UnicodeObject.descr_capitalize, doc=UnicodeDocstrings.capitalize.__doc__), @@ -962,8 +1040,6 @@ doc=UnicodeDocstrings.center.__doc__), count = interp2app(W_UnicodeObject.descr_count, doc=UnicodeDocstrings.count.__doc__), - decode = interp2app(W_UnicodeObject.descr_decode, - doc=UnicodeDocstrings.decode.__doc__), encode = interp2app(W_UnicodeObject.descr_encode, doc=UnicodeDocstrings.encode.__doc__), expandtabs = interp2app(W_UnicodeObject.descr_expandtabs, @@ -984,10 +1060,14 @@ doc=UnicodeDocstrings.isdecimal.__doc__), isdigit = interp2app(W_UnicodeObject.descr_isdigit, doc=UnicodeDocstrings.isdigit.__doc__), + isidentifier = interp2app(W_UnicodeObject.descr_isidentifier, + doc=UnicodeDocstrings.isidentifier.__doc__), islower = interp2app(W_UnicodeObject.descr_islower, doc=UnicodeDocstrings.islower.__doc__), isnumeric = interp2app(W_UnicodeObject.descr_isnumeric, doc=UnicodeDocstrings.isnumeric.__doc__), + isprintable = interp2app(W_UnicodeObject.descr_isprintable, + doc=UnicodeDocstrings.isprintable.__doc__), isspace = interp2app(W_UnicodeObject.descr_isspace, doc=UnicodeDocstrings.isspace.__doc__), istitle = interp2app(W_UnicodeObject.descr_istitle, @@ -1037,15 +1117,17 @@ format = interp2app(W_UnicodeObject.descr_format, doc=UnicodeDocstrings.format.__doc__), + format_map = interp2app(W_UnicodeObject.descr_format_map, + doc=UnicodeDocstrings.format_map.__doc__), __format__ = interp2app(W_UnicodeObject.descr__format__, doc=UnicodeDocstrings.__format__.__doc__), __mod__ = interp2app(W_UnicodeObject.descr_mod, doc=UnicodeDocstrings.__mod__.__doc__), __getnewargs__ = interp2app(W_UnicodeObject.descr_getnewargs, doc=UnicodeDocstrings.__getnewargs__.__doc__), - _formatter_parser = interp2app(W_UnicodeObject.descr_formatter_parser), - _formatter_field_name_split = - interp2app(W_UnicodeObject.descr_formatter_field_name_split), + maketrans = interp2app(W_UnicodeObject.descr_maketrans, + as_classmethod=True, + doc=UnicodeDocstrings.maketrans.__doc__) ) @@ -1057,7 +1139,15 @@ W_UnicodeObject.EMPTY = W_UnicodeObject(u'') -# Helper for converting int/long +# Helper for converting int/long this is called only from +# {int,long,float}type.descr__new__: in the default branch this is implemented +# using the same logic as PyUnicode_EncodeDecimal, as CPython 2.7 does. +# +# In CPython3 the call to PyUnicode_EncodeDecimal has been replaced to a call +# to PyUnicode_TransformDecimalToASCII, which is much simpler. Here, we do the +# equivalent. +# +# Note that, differently than default, we return an *unicode* RPython string def unicode_to_decimal_w(space, w_unistr): if not isinstance(w_unistr, W_UnicodeObject): raise operationerrfmt(space.w_TypeError, "expected unicode, got '%T'", @@ -1079,4 +1169,4 @@ _repr_function, _ = make_unicode_escape_function( - pass_printable=False, unicode_output=False, quotes=True, prefix='u') + pass_printable=True, unicode_output=True, quotes=True, prefix='') From noreply at buildbot.pypy.org Thu Jan 23 20:33:02 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 23 Jan 2014 20:33:02 +0100 (CET) Subject: [pypy-commit] pypy detect-immutable-fields: Initial re-organization to allow detecting immutable fields in mapdicts Message-ID: <20140123193302.231551C06CD@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: detect-immutable-fields Changeset: r68870:7244e2ea0430 Date: 2014-01-23 13:32 -0600 http://bitbucket.org/pypy/pypy/changeset/7244e2ea0430/ Log: Initial re-organization to allow detecting immutable fields in mapdicts diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -19,7 +19,7 @@ # we want to propagate knowledge that the result cannot be negative class AbstractAttribute(object): - _immutable_fields_ = ['terminator'] + _immutable_fields_ = ['terminator', 'ever_mutated?'] cache_attrs = None _size_estimate = 0 @@ -27,18 +27,21 @@ self.space = space assert isinstance(terminator, Terminator) self.terminator = terminator + self.ever_mutated = False def read(self, obj, selector): index = self.index(selector) - if index < 0: + if index is None: return self.terminator._read_terminator(obj, selector) - return obj._mapdict_read_storage(index) + return obj._mapdict_read_storage(index.position, pure=not self.ever_mutated) def write(self, obj, selector, w_value): index = self.index(selector) - if index < 0: + if index is None: return self.terminator._write_terminator(obj, selector, w_value) - obj._mapdict_write_storage(index, w_value) + obj._mapdict_write_storage(index.position, w_value) + if not index.ever_mutated: + index.ever_mutated = True return True def delete(self, obj, selector): @@ -97,9 +100,9 @@ def _index(self, selector): while isinstance(self, PlainAttribute): if selector == self.selector: - return self.position + return self self = self.back - return -1 + return None def copy(self, obj): raise NotImplementedError("abstract base class") @@ -330,7 +333,7 @@ self.attrs = [None] * SIZE self._empty_selector = (None, INVALID) self.selectors = [self._empty_selector] * SIZE - self.indices = [0] * SIZE + self.indices = [None] * SIZE if space.config.objspace.std.withmethodcachecounter: self.hits = {} self.misses = {} @@ -460,9 +463,10 @@ self.map = map self.storage = make_sure_not_resized([None] * map.size_estimate()) - def _mapdict_read_storage(self, index): + def _mapdict_read_storage(self, index, pure=False): assert index >= 0 return self.storage[index] + def _mapdict_write_storage(self, index, value): self.storage[index] = value def _mapdict_storage_length(self): @@ -519,7 +523,6 @@ rangenmin1 = unroll.unrolling_iterable(range(nmin1)) class subcls(BaseMapdictObject, supercls): def _init_empty(self, map): - from rpython.rlib.debug import make_sure_not_resized for i in rangen: setattr(self, "_value%s" % i, erase_item(None)) self.map = map @@ -531,7 +534,7 @@ erased = getattr(self, "_value%s" % nmin1) return unerase_list(erased) - def _mapdict_read_storage(self, index): + def _mapdict_read_storage(self, index, pure=False): assert index >= 0 if index < nmin1: for i in rangenmin1: @@ -879,11 +882,11 @@ # if selector[1] != INVALID: index = map.index(selector) - if index >= 0: + if index is not None: # Note that if map.terminator is a DevolvedDictTerminator, # map.index() will always return -1 if selector[1]==DICT. - _fill_cache(pycode, nameindex, map, version_tag, index) - return w_obj._mapdict_read_storage(index) + _fill_cache(pycode, nameindex, map, version_tag, index.position) + return w_obj._mapdict_read_storage(index.position) if space.config.objspace.std.withmethodcachecounter: INVALID_CACHE_ENTRY.failure_counter += 1 return space.getattr(w_obj, w_name) diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -64,7 +64,7 @@ current = Terminator(space, "cls") for i in range(20000): current = PlainAttribute((str(i), DICT), current) - assert current.index(("0", DICT)) == 0 + assert current.index(("0", DICT)).position == 0 def test_search(): @@ -231,7 +231,6 @@ obj = cls.instantiate() a = 0 b = 1 - c = 2 obj.setslotvalue(a, 50) obj.setslotvalue(b, 60) assert obj.getslotvalue(a) == 50 @@ -648,7 +647,7 @@ def test_delete_slot(self): class A(object): __slots__ = ['x'] - + a = A() a.x = 42 del a.x From noreply at buildbot.pypy.org Thu Jan 23 20:36:38 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 23 Jan 2014 20:36:38 +0100 (CET) Subject: [pypy-commit] pypy detect-immutable-fields: A failing test Message-ID: <20140123193638.1309E1C06CD@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: detect-immutable-fields Changeset: r68871:b93c074c8d0b Date: 2014-01-23 13:35 -0600 http://bitbucket.org/pypy/pypy/changeset/b93c074c8d0b/ Log: A failing test diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py @@ -35,7 +35,7 @@ class A(object): pass a = A() - a.x = 2 + a.x = 1 def main(n): i = 0 while i < n: @@ -49,7 +49,7 @@ i9 = int_lt(i5, i6) guard_true(i9, descr=...) guard_not_invalidated(descr=...) - i10 = int_add_ovf(i5, i7) + i10 = int_add(i5, 1) guard_no_overflow(descr=...) --TICK-- jump(..., descr=...) From noreply at buildbot.pypy.org Thu Jan 23 21:00:50 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 23 Jan 2014 21:00:50 +0100 (CET) Subject: [pypy-commit] pypy detect-immutable-fields: Appease the great annotator in the sky Message-ID: <20140123200050.A80061C06CD@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: detect-immutable-fields Changeset: r68873:9d227f7cb3c5 Date: 2014-01-23 13:59 -0600 http://bitbucket.org/pypy/pypy/changeset/9d227f7cb3c5/ Log: Appease the great annotator in the sky diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -179,7 +179,7 @@ return None def _set_mapdict_map(self, map): raise NotImplementedError - def _mapdict_read_storage(self, index): + def _mapdict_read_storage(self, index, pure=False): raise NotImplementedError def _mapdict_write_storage(self, index, value): raise NotImplementedError From noreply at buildbot.pypy.org Thu Jan 23 21:08:30 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 23 Jan 2014 21:08:30 +0100 (CET) Subject: [pypy-commit] pypy default: Oups, fix this test. Message-ID: <20140123200830.E896E1C06CD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68874:2fbd921a57d5 Date: 2014-01-23 21:07 +0100 http://bitbucket.org/pypy/pypy/changeset/2fbd921a57d5/ Log: Oups, fix this test. diff --git a/pypy/module/cpyext/test/test_ztranslation.py b/pypy/module/cpyext/test/test_ztranslation.py --- a/pypy/module/cpyext/test/test_ztranslation.py +++ b/pypy/module/cpyext/test/test_ztranslation.py @@ -1,4 +1,4 @@ from pypy.objspace.fake.checkmodule import checkmodule def test_cpyext_translates(): - checkmodule('cpyext') + checkmodule('cpyext', '_rawffi') From noreply at buildbot.pypy.org Thu Jan 23 21:08:33 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 23 Jan 2014 21:08:33 +0100 (CET) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20140123200833.696431C06CD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68875:40f0c1c12bf1 Date: 2014-01-23 21:07 +0100 http://bitbucket.org/pypy/pypy/changeset/40f0c1c12bf1/ Log: merge heads diff too long, truncating to 2000 out of 5548 lines diff --git a/pypy/doc/_ref.txt b/pypy/doc/_ref.txt --- a/pypy/doc/_ref.txt +++ b/pypy/doc/_ref.txt @@ -109,6 +109,4 @@ .. _`rpython/translator/c/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/c/ .. _`rpython/translator/c/src/stacklet/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/c/src/stacklet/ .. _`rpython/translator/c/src/stacklet/stacklet.h`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/c/src/stacklet/stacklet.h -.. _`rpython/translator/cli/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/cli/ -.. _`rpython/translator/jvm/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/jvm/ .. _`rpython/translator/tool/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/tool/ diff --git a/pypy/doc/garbage_collection.rst b/pypy/doc/garbage_collection.rst --- a/pypy/doc/garbage_collection.rst +++ b/pypy/doc/garbage_collection.rst @@ -210,4 +210,12 @@ are preserved. If the object dies then the pre-reserved location becomes free garbage, to be collected at the next major collection. +The exact name of this GC is either `minimark` or `incminimark`. The +latter is a version that does major collections incrementally (i.e. one +major collection is split along some number of minor collections, rather +than being done all at once after a specific minor collection). The +default is `incminimark`, as it seems to have a very minimal impact on +performance and memory usage at the benefit of avoiding the long pauses +of `minimark`. + .. include:: _ref.txt diff --git a/pypy/doc/gc_info.rst b/pypy/doc/gc_info.rst --- a/pypy/doc/gc_info.rst +++ b/pypy/doc/gc_info.rst @@ -6,7 +6,7 @@ Minimark -------- -PyPy's default ``minimark`` garbage collector is configurable through +PyPy's default ``incminimark`` garbage collector is configurable through several environment variables: ``PYPY_GC_NURSERY`` @@ -14,6 +14,17 @@ Defaults to 1/2 of your cache or ``4M``. Small values (like 1 or 1KB) are useful for debugging. +``PYPY_GC_NURSERY_CLEANUP`` + The interval at which nursery is cleaned up. Must + be smaller than the nursery size and bigger than the + biggest object we can allotate in the nursery. + +``PYPY_GC_INCREMENT_STEP`` + The size of memory marked during the marking step. Default is size of + nursery times 2. If you mark it too high your GC is not incremental at + all. The minimum is set to size that survives minor collection times + 1.5 so we reclaim anything all the time. + ``PYPY_GC_MAJOR_COLLECT`` Major collection memory factor. Default is ``1.82``, which means trigger a major collection when the diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -48,3 +48,7 @@ .. branch: remove-del-from-generatoriterator Speed up generators that don't yield inside try or wait blocks by skipping unnecessary cleanup. + +.. branch: annotator +Remove FlowObjSpace. +Improve cohesion between rpython.flowspace and rpython.annotator. diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -1,4 +1,4 @@ -import py +import py, sys from pypy.interpreter.astcompiler import codegen, astbuilder, symtable, optimize from pypy.interpreter.pyparser import pyparse from pypy.interpreter.pyparser.test import expressions @@ -867,6 +867,9 @@ class AppTestCompiler: + def setup_class(cls): + cls.w_maxunicode = cls.space.wrap(sys.maxunicode) + def test_docstring_not_loaded(self): import StringIO, dis, sys ns = {} @@ -911,7 +914,17 @@ l = [a for a in Foo()] assert hint_called[0] assert l == list(range(5)) - + + def test_unicode_in_source(self): + import sys + d = {} + exec '# -*- coding: utf-8 -*-\n\nu = u"\xf0\x9f\x92\x8b"' in d + if sys.maxunicode > 65535 and self.maxunicode > 65535: + expected_length = 1 + else: + expected_length = 2 + assert len(d['u']) == expected_length + class TestOptimizations: def count_instructions(self, source): diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -910,7 +910,7 @@ """ return self.unpackiterable(w_iterable, expected_length) - def listview_str(self, w_list): + def listview_bytes(self, w_list): """ Return a list of unwrapped strings out of a list of strings. If the argument is not a list or does not contain only strings, return None. May return None anyway. @@ -944,7 +944,7 @@ """ return (None, None) - def newlist_str(self, list_s): + def newlist_bytes(self, list_s): return self.newlist([self.wrap(s) for s in list_s]) def newlist_unicode(self, list_u): diff --git a/pypy/interpreter/pyparser/parsestring.py b/pypy/interpreter/pyparser/parsestring.py --- a/pypy/interpreter/pyparser/parsestring.py +++ b/pypy/interpreter/pyparser/parsestring.py @@ -15,7 +15,6 @@ Yes, it's very inefficient. Yes, CPython has very similar code. """ - # we use ps as "pointer to s" # q is the virtual last char index of the string ps = 0 @@ -54,42 +53,10 @@ if unicode_literal: # XXX Py_UnicodeFlag is ignored for now if encoding is None or encoding == "iso-8859-1": # 'unicode_escape' expects latin-1 bytes, string is ready. - buf = s - bufp = ps - bufq = q - u = None + assert 0 <= ps <= q + substr = s[ps:q] else: - # String is utf8-encoded, but 'unicode_escape' expects - # latin-1; So multibyte sequences must be escaped. - lis = [] # using a list to assemble the value - end = q - # Worst case: "\XX" may become "\u005c\uHHLL" (12 bytes) - while ps < end: - if s[ps] == '\\': - lis.append(s[ps]) - ps += 1 - if ord(s[ps]) & 0x80: - # A multibyte sequence will follow, it will be - # escaped like \u1234. To avoid confusion with - # the backslash we just wrote, we emit "\u005c" - # instead. - lis.append("u005c") - if ord(s[ps]) & 0x80: # XXX inefficient - w, ps = decode_utf8(space, s, ps, end, "utf-16-be") - rn = len(w) - assert rn % 2 == 0 - for i in range(0, rn, 2): - lis.append('\\u') - lis.append(hexbyte(ord(w[i]))) - lis.append(hexbyte(ord(w[i+1]))) - else: - lis.append(s[ps]) - ps += 1 - buf = ''.join(lis) - bufp = 0 - bufq = len(buf) - assert 0 <= bufp <= bufq - substr = buf[bufp:bufq] + substr = decode_unicode_utf8(space, s, ps, q) if rawmode: v = unicodehelper.decode_raw_unicode_escape(space, substr) else: @@ -121,6 +88,39 @@ result = "0" + result return result +def decode_unicode_utf8(space, s, ps, q): + # ****The Python 2.7 version, producing UTF-32 escapes**** + # String is utf8-encoded, but 'unicode_escape' expects + # latin-1; So multibyte sequences must be escaped. + lis = [] # using a list to assemble the value + end = q + # Worst case: + # "<92><195><164>" may become "\u005c\U000000E4" (16 bytes) + while ps < end: + if s[ps] == '\\': + lis.append(s[ps]) + ps += 1 + if ord(s[ps]) & 0x80: + # A multibyte sequence will follow, it will be + # escaped like \u1234. To avoid confusion with + # the backslash we just wrote, we emit "\u005c" + # instead. + lis.append("u005c") + if ord(s[ps]) & 0x80: # XXX inefficient + w, ps = decode_utf8(space, s, ps, end, "utf-32-be") + rn = len(w) + assert rn % 4 == 0 + for i in range(0, rn, 4): + lis.append('\\U') + lis.append(hexbyte(ord(w[i]))) + lis.append(hexbyte(ord(w[i+1]))) + lis.append(hexbyte(ord(w[i+2]))) + lis.append(hexbyte(ord(w[i+3]))) + else: + lis.append(s[ps]) + ps += 1 + return ''.join(lis) + def PyString_DecodeEscape(space, s, recode_encoding): """ Unescape a backslash-escaped string. If recode_encoding is non-zero, diff --git a/pypy/interpreter/pyparser/test/test_parsestring.py b/pypy/interpreter/pyparser/test/test_parsestring.py --- a/pypy/interpreter/pyparser/test/test_parsestring.py +++ b/pypy/interpreter/pyparser/test/test_parsestring.py @@ -1,5 +1,5 @@ from pypy.interpreter.pyparser import parsestring -import py +import py, sys class TestParsetring: def parse_and_compare(self, literal, value): @@ -91,3 +91,18 @@ input = ["'", 'x', ' ', chr(0xc3), chr(0xa9), ' ', chr(92), 'n', "'"] w_ret = parsestring.parsestr(space, 'utf8', ''.join(input)) assert space.str_w(w_ret) == ''.join(expected) + + def test_wide_unicode_in_source(self): + if sys.maxunicode == 65535: + py.test.skip("requires a wide-unicode host") + self.parse_and_compare('u"\xf0\x9f\x92\x8b"', + unichr(0x1f48b), + encoding='utf-8') + + def test_decode_unicode_utf8(self): + buf = parsestring.decode_unicode_utf8(self.space, + 'u"\xf0\x9f\x92\x8b"', 2, 6) + if sys.maxunicode == 65535: + assert buf == r"\U0000d83d\U0000dc8b" + else: + assert buf == r"\U0001f48b" diff --git a/pypy/module/__builtin__/test/test_classobj.py b/pypy/module/__builtin__/test/test_classobj.py --- a/pypy/module/__builtin__/test/test_classobj.py +++ b/pypy/module/__builtin__/test/test_classobj.py @@ -1061,14 +1061,14 @@ assert (D() >= A()) == 'D:A.ge' -class AppTestOldStyleClassStrDict(object): +class AppTestOldStyleClassBytesDict(object): def setup_class(cls): if cls.runappdirect: py.test.skip("can only be run on py.py") def is_strdict(space, w_class): - from pypy.objspace.std.dictmultiobject import StringDictStrategy + from pypy.objspace.std.dictmultiobject import BytesDictStrategy w_d = w_class.getdict(space) - return space.wrap(isinstance(w_d.strategy, StringDictStrategy)) + return space.wrap(isinstance(w_d.strategy, BytesDictStrategy)) cls.w_is_strdict = cls.space.wrap(gateway.interp2app(is_strdict)) diff --git a/pypy/module/__pypy__/test/test_special.py b/pypy/module/__pypy__/test/test_special.py --- a/pypy/module/__pypy__/test/test_special.py +++ b/pypy/module/__pypy__/test/test_special.py @@ -51,7 +51,9 @@ l = [1, 2, 3] assert list_strategy(l) == "int" l = ["a", "b", "c"] - assert list_strategy(l) == "str" + assert list_strategy(l) == "bytes" + l = [u"a", u"b", u"c"] + assert list_strategy(l) == "unicode" l = [1.1, 2.2, 3.3] assert list_strategy(l) == "float" l = range(3) diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py --- a/pypy/module/_cffi_backend/ctypestruct.py +++ b/pypy/module/_cffi_backend/ctypestruct.py @@ -33,7 +33,7 @@ if self.fields_dict is None: space = self.space raise operationerrfmt(w_errorcls or space.w_TypeError, - "'%s' is not completed yet", self.name) + "'%s' is opaque or not completed yet", self.name) def _alignof(self): self.check_complete(w_errorcls=self.space.w_ValueError) diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -1,6 +1,5 @@ from rpython.translator.platform import platform from rpython.translator.tool.cbuild import ExternalCompilationInfo -from pypy.conftest import option from pypy.module._rawffi.interp_rawffi import TYPEMAP, TYPEMAP_FLOAT_LETTERS from pypy.module._rawffi.tracker import Tracker @@ -1133,15 +1132,6 @@ def setup_class(cls): cls.w_sizes_and_alignments = cls.space.wrap(dict( [(k, (v.c_size, v.c_alignment)) for k,v in TYPEMAP.iteritems()])) - # - # detect if we're running on PyPy with DO_TRACING not compiled in - if option.runappdirect: - try: - import _rawffi - _rawffi._num_of_allocated_objects() - except (ImportError, RuntimeError), e: - py.test.skip(str(e)) - # Tracker.DO_TRACING = True def test_structure_autofree(self): diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -692,11 +692,11 @@ else: prefix = 'cpyexttest' init_buffer = rffi.llexternal('_%s_init_bufferobject' % prefix, [], lltype.Void, - compilation_info=eci, _nowrapper=True) + compilation_info=eci, releasegil=False) init_pycobject = rffi.llexternal('_%s_init_pycobject' % prefix, [], lltype.Void, - compilation_info=eci, _nowrapper=True) + compilation_info=eci, releasegil=False) init_capsule = rffi.llexternal('_%s_init_capsule' % prefix, [], lltype.Void, - compilation_info=eci, _nowrapper=True) + compilation_info=eci, releasegil=False) INIT_FUNCTIONS.extend([ lambda space: init_buffer(), lambda space: init_pycobject(), diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -213,12 +213,14 @@ Build an extension module linked against the cpyext api library. """ if not space.is_none(w_separate_module_files): - separate_module_files = space.listview_str(w_separate_module_files) + separate_module_files = space.listview_bytes( + w_separate_module_files) assert separate_module_files is not None else: separate_module_files = [] if not space.is_none(w_separate_module_sources): - separate_module_sources = space.listview_str(w_separate_module_sources) + separate_module_sources = space.listview_bytes( + w_separate_module_sources) assert separate_module_sources is not None else: separate_module_sources = [] diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -1083,58 +1083,64 @@ class W_Product(W_Root): def __init__(self, space, args_w, w_repeat): self.gears = [ - space.fixedview(arg_w) for arg_w in args_w + space.unpackiterable(arg_w) for arg_w in args_w ] * space.int_w(w_repeat) - self.num_gears = len(self.gears) - # initialization of indicies to loop over - self.indicies = [ - (0, len(gear)) - for gear in self.gears - ] - self.cont = True - for _, lim in self.indicies: - if lim <= 0: - self.cont = False + # + for gear in self.gears: + if len(gear) == 0: + self.lst = None break + else: + self.indices = [0] * len(self.gears) + self.lst = [gear[0] for gear in self.gears] - def roll_gears(self): - if self.num_gears == 0: - self.cont = False - return + def _rotate_previous_gears(self): + lst = self.lst + x = len(self.gears) - 1 + lst[x] = self.gears[x][0] + self.indices[x] = 0 + x -= 1 + # the outer loop runs as long as a we have a carry + while x >= 0: + gear = self.gears[x] + index = self.indices[x] + 1 + if index < len(gear): + # no carry: done + lst[x] = gear[index] + self.indices[x] = index + return + lst[x] = gear[0] + self.indices[x] = 0 + x -= 1 + else: + self.lst = None - # Starting from the end of the gear indicies work to the front - # incrementing the gear until the limit is reached. When the limit - # is reached carry operation to the next gear - should_carry = True - - for n in range(0, self.num_gears): - nth_gear = self.num_gears - n - 1 - if should_carry: - count, lim = self.indicies[nth_gear] - count += 1 - if count == lim and nth_gear == 0: - self.cont = False - if count == lim: - should_carry = True - count = 0 - else: - should_carry = False - self.indicies[nth_gear] = (count, lim) + def fill_next_result(self): + # the last gear is done here, in a function with no loop, + # to allow the JIT to look inside + lst = self.lst + x = len(self.gears) - 1 + if x >= 0: + gear = self.gears[x] + index = self.indices[x] + 1 + if index < len(gear): + # no carry: done + lst[x] = gear[index] + self.indices[x] = index else: - break + self._rotate_previous_gears() + else: + self.lst = None def iter_w(self, space): return space.wrap(self) def next_w(self, space): - if not self.cont: + if self.lst is None: raise OperationError(space.w_StopIteration, space.w_None) - l = [None] * self.num_gears - for x in range(0, self.num_gears): - index, limit = self.indicies[x] - l[x] = self.gears[x][index] - self.roll_gears() - return space.newtuple(l) + w_result = space.newtuple(self.lst[:]) + self.fill_next_result() + return w_result def W_Product__new__(space, w_subtype, __args__): diff --git a/pypy/module/itertools/test/test_itertools.py b/pypy/module/itertools/test/test_itertools.py --- a/pypy/module/itertools/test/test_itertools.py +++ b/pypy/module/itertools/test/test_itertools.py @@ -862,6 +862,28 @@ assert prod.next() == () raises (StopIteration, prod.next) + def test_product_powers_of_two(self): + from itertools import product + assert list(product()) == [()] + assert list(product('ab')) == [('a',), ('b',)] + assert list(product('ab', 'cd')) == [ + ('a', 'c'), ('a', 'd'), + ('b', 'c'), ('b', 'd')] + assert list(product('ab', 'cd', 'ef')) == [ + ('a', 'c', 'e'), ('a', 'c', 'f'), + ('a', 'd', 'e'), ('a', 'd', 'f'), + ('b', 'c', 'e'), ('b', 'c', 'f'), + ('b', 'd', 'e'), ('b', 'd', 'f')] + + def test_product_empty_item(self): + from itertools import product + assert list(product('')) == [] + assert list(product('ab', '')) == [] + assert list(product('', 'cd')) == [] + assert list(product('ab', 'cd', '')) == [] + assert list(product('ab', '', 'ef')) == [] + assert list(product('', 'cd', 'ef')) == [] + def test_permutations(self): from itertools import permutations assert list(permutations('AB')) == [('A', 'B'), ('B', 'A')] diff --git a/pypy/module/struct/__init__.py b/pypy/module/struct/__init__.py --- a/pypy/module/struct/__init__.py +++ b/pypy/module/struct/__init__.py @@ -49,11 +49,12 @@ 'calcsize': 'interp_struct.calcsize', 'pack': 'interp_struct.pack', 'unpack': 'interp_struct.unpack', - } + + 'Struct': 'interp_struct.W_Struct', + } appleveldefs = { 'error': 'app_struct.error', 'pack_into': 'app_struct.pack_into', 'unpack_from': 'app_struct.unpack_from', - 'Struct': 'app_struct.Struct', - } + } diff --git a/pypy/module/struct/app_struct.py b/pypy/module/struct/app_struct.py --- a/pypy/module/struct/app_struct.py +++ b/pypy/module/struct/app_struct.py @@ -4,6 +4,7 @@ """ import struct + class error(Exception): """Exception raised on various occasions; argument is a string describing what is wrong.""" @@ -21,21 +22,3 @@ raise error("unpack_from requires a buffer of at least %d bytes" % (size,)) return struct.unpack(fmt, data) - -# XXX inefficient -class Struct(object): - def __init__(self, format): - self.format = format - self.size = struct.calcsize(format) - - def pack(self, *args): - return struct.pack(self.format, *args) - - def unpack(self, s): - return struct.unpack(self.format, s) - - def pack_into(self, buffer, offset, *args): - return pack_into(self.format, buffer, offset, *args) - - def unpack_from(self, buffer, offset=0): - return unpack_from(self.format, buffer, offset) diff --git a/pypy/module/struct/interp_struct.py b/pypy/module/struct/interp_struct.py --- a/pypy/module/struct/interp_struct.py +++ b/pypy/module/struct/interp_struct.py @@ -1,15 +1,22 @@ -from pypy.interpreter.gateway import unwrap_spec -from pypy.interpreter.error import OperationError -from pypy.module.struct.formatiterator import PackFormatIterator, UnpackFormatIterator from rpython.rlib import jit from rpython.rlib.rstruct.error import StructError, StructOverflowError from rpython.rlib.rstruct.formatiterator import CalcSizeFormatIterator +from rpython.tool.sourcetools import func_with_new_name + +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.gateway import interp2app, unwrap_spec +from pypy.interpreter.error import OperationError +from pypy.interpreter.typedef import TypeDef, interp_attrproperty +from pypy.module.struct.formatiterator import ( + PackFormatIterator, UnpackFormatIterator +) @unwrap_spec(format=str) def calcsize(space, format): return space.wrap(_calcsize(space, format)) + def _calcsize(space, format): fmtiter = CalcSizeFormatIterator() try: @@ -52,3 +59,44 @@ w_error = space.getattr(w_module, space.wrap('error')) raise OperationError(w_error, space.wrap(e.msg)) return space.newtuple(fmtiter.result_w[:]) + + +class W_Struct(W_Root): + _immutable_fields_ = ["format", "size"] + + def __init__(self, space, format): + self.format = format + self.size = _calcsize(space, format) + + @unwrap_spec(format=str) + def descr__new__(space, w_subtype, format): + self = space.allocate_instance(W_Struct, w_subtype) + W_Struct.__init__(self, space, format) + return self + + def wrap_struct_method(name): + def impl(self, space, __args__): + w_module = space.getbuiltinmodule('struct') + w_method = space.getattr(w_module, space.wrap(name)) + return space.call_obj_args( + w_method, space.wrap(self.format), __args__ + ) + + return func_with_new_name(impl, 'descr_' + name) + + descr_pack = wrap_struct_method("pack") + descr_unpack = wrap_struct_method("unpack") + descr_pack_into = wrap_struct_method("pack_into") + descr_unpack_from = wrap_struct_method("unpack_from") + + +W_Struct.typedef = TypeDef("Struct", + __new__=interp2app(W_Struct.descr__new__.im_func), + format=interp_attrproperty("format", cls=W_Struct), + size=interp_attrproperty("size", cls=W_Struct), + + pack=interp2app(W_Struct.descr_pack), + unpack=interp2app(W_Struct.descr_unpack), + pack_into=interp2app(W_Struct.descr_pack_into), + unpack_from=interp2app(W_Struct.descr_unpack_from), +) diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -441,8 +441,8 @@ def str_w(self, space): return self._value - def listview_str(self): - return _create_list_from_string(self._value) + def listview_bytes(self): + return _create_list_from_bytes(self._value) def ord(self, space): if len(self._value) != 1: @@ -518,7 +518,7 @@ _title = _upper def _newlist_unwrapped(self, space, lst): - return space.newlist_str(lst) + return space.newlist_bytes(lst) @staticmethod @unwrap_spec(w_object = WrappedDefault("")) @@ -725,9 +725,9 @@ return tformat.formatter_field_name_split() -def _create_list_from_string(value): +def _create_list_from_bytes(value): # need this helper function to allow the jit to look inside and inline - # listview_str + # listview_bytes return [s for s in value] W_BytesObject.EMPTY = W_BytesObject('') diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py --- a/pypy/objspace/std/celldict.py +++ b/pypy/objspace/std/celldict.py @@ -127,7 +127,7 @@ def w_keys(self, w_dict): space = self.space l = self.unerase(w_dict.dstorage).keys() - return space.newlist_str(l) + return space.newlist_bytes(l) def values(self, w_dict): iterator = self.unerase(w_dict.dstorage).itervalues diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -58,7 +58,7 @@ strategy = space.fromcache(MapDictStrategy) elif instance or strdict or module: assert w_type is None - strategy = space.fromcache(StringDictStrategy) + strategy = space.fromcache(BytesDictStrategy) elif kwargs: assert w_type is None from pypy.objspace.std.kwargsdict import EmptyKwargsDictStrategy @@ -117,9 +117,9 @@ if space.is_w(w_type, space.w_dict): w_dict = W_DictMultiObject.allocate_and_init_instance(space, w_type) - strlist = space.listview_str(w_keys) - if strlist is not None: - for key in strlist: + byteslist = space.listview_bytes(w_keys) + if byteslist is not None: + for key in byteslist: w_dict.setitem_str(key, w_fill) else: for w_key in space.listview(w_keys): @@ -333,7 +333,7 @@ popitem delitem clear \ length w_keys values items \ iterkeys itervalues iteritems \ - listview_str listview_unicode listview_int \ + listview_bytes listview_unicode listview_int \ view_as_kwargs".split() def make_method(method): @@ -482,7 +482,7 @@ w_dict.strategy = strategy w_dict.dstorage = storage - def listview_str(self, w_dict): + def listview_bytes(self, w_dict): return None def listview_unicode(self, w_dict): @@ -506,7 +506,7 @@ def switch_to_correct_strategy(self, w_dict, w_key): withidentitydict = self.space.config.objspace.std.withidentitydict if type(w_key) is self.space.StringObjectCls: - self.switch_to_string_strategy(w_dict) + self.switch_to_bytes_strategy(w_dict) return elif type(w_key) is self.space.UnicodeObjectCls: self.switch_to_unicode_strategy(w_dict) @@ -519,8 +519,8 @@ else: self.switch_to_object_strategy(w_dict) - def switch_to_string_strategy(self, w_dict): - strategy = self.space.fromcache(StringDictStrategy) + def switch_to_bytes_strategy(self, w_dict): + strategy = self.space.fromcache(BytesDictStrategy) storage = strategy.get_empty_storage() w_dict.strategy = strategy w_dict.dstorage = storage @@ -572,7 +572,7 @@ w_dict.setitem(w_key, w_value) def setitem_str(self, w_dict, key, w_value): - self.switch_to_string_strategy(w_dict) + self.switch_to_bytes_strategy(w_dict) w_dict.setitem_str(key, w_value) def delitem(self, w_dict, w_key): @@ -874,8 +874,8 @@ create_iterator_classes(ObjectDictStrategy) -class StringDictStrategy(AbstractTypedStrategy, DictStrategy): - erase, unerase = rerased.new_erasing_pair("string") +class BytesDictStrategy(AbstractTypedStrategy, DictStrategy): + erase, unerase = rerased.new_erasing_pair("bytes") erase = staticmethod(erase) unerase = staticmethod(unerase) @@ -913,11 +913,11 @@ assert key is not None return self.unerase(w_dict.dstorage).get(key, None) - def listview_str(self, w_dict): + def listview_bytes(self, w_dict): return self.unerase(w_dict.dstorage).keys() def w_keys(self, w_dict): - return self.space.newlist_str(self.listview_str(w_dict)) + return self.space.newlist_bytes(self.listview_bytes(w_dict)) def wrapkey(space, key): return space.wrap(key) @@ -935,7 +935,7 @@ i += 1 return keys, values -create_iterator_classes(StringDictStrategy) +create_iterator_classes(BytesDictStrategy) class UnicodeDictStrategy(AbstractTypedStrategy, DictStrategy): @@ -961,7 +961,7 @@ def _never_equal_to(self, w_lookup_type): return _never_equal_to_string(self.space, w_lookup_type) - # we should implement the same shortcuts as we do for StringDictStrategy + # we should implement the same shortcuts as we do for BytesDictStrategy ## def setitem_str(self, w_dict, key, w_value): ## assert key is not None @@ -983,7 +983,7 @@ return self.unerase(w_dict.dstorage).keys() ## def w_keys(self, w_dict): - ## return self.space.newlist_str(self.listview_str(w_dict)) + ## return self.space.newlist_bytes(self.listview_bytes(w_dict)) def wrapkey(space, key): return space.wrap(key) diff --git a/pypy/objspace/std/dictproxyobject.py b/pypy/objspace/std/dictproxyobject.py --- a/pypy/objspace/std/dictproxyobject.py +++ b/pypy/objspace/std/dictproxyobject.py @@ -84,7 +84,7 @@ def w_keys(self, w_dict): space = self.space - return space.newlist_str(self.unerase(w_dict.dstorage).dict_w.keys()) + return space.newlist_bytes(self.unerase(w_dict.dstorage).dict_w.keys()) def values(self, w_dict): return [unwrap_cell(self.space, w_value) for w_value in self.unerase(w_dict.dstorage).dict_w.itervalues()] diff --git a/pypy/objspace/std/kwargsdict.py b/pypy/objspace/std/kwargsdict.py --- a/pypy/objspace/std/kwargsdict.py +++ b/pypy/objspace/std/kwargsdict.py @@ -2,15 +2,13 @@ ## dict strategy (see dictmultiobject.py) from rpython.rlib import rerased, jit -from pypy.objspace.std.dictmultiobject import (DictStrategy, - create_iterator_classes, - EmptyDictStrategy, - ObjectDictStrategy, - StringDictStrategy) +from pypy.objspace.std.dictmultiobject import ( + BytesDictStrategy, DictStrategy, EmptyDictStrategy, ObjectDictStrategy, + create_iterator_classes) class EmptyKwargsDictStrategy(EmptyDictStrategy): - def switch_to_string_strategy(self, w_dict): + def switch_to_bytes_strategy(self, w_dict): strategy = self.space.fromcache(KwargsDictStrategy) storage = strategy.get_empty_storage() w_dict.strategy = strategy @@ -61,7 +59,7 @@ else: # limit the size so that the linear searches don't become too long if len(keys) >= 16: - self.switch_to_string_strategy(w_dict) + self.switch_to_bytes_strategy(w_dict) w_dict.setitem_str(key, w_value) else: keys.append(key) @@ -111,7 +109,7 @@ def w_keys(self, w_dict): l = self.unerase(w_dict.dstorage)[0] - return self.space.newlist_str(l[:]) + return self.space.newlist_bytes(l[:]) def values(self, w_dict): return self.unerase(w_dict.dstorage)[1][:] # to make non-resizable @@ -142,8 +140,8 @@ w_dict.strategy = strategy w_dict.dstorage = strategy.erase(d_new) - def switch_to_string_strategy(self, w_dict): - strategy = self.space.fromcache(StringDictStrategy) + def switch_to_bytes_strategy(self, w_dict): + strategy = self.space.fromcache(BytesDictStrategy) keys, values_w = self.unerase(w_dict.dstorage) storage = strategy.get_empty_storage() d_new = strategy.unerase(storage) diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -81,7 +81,7 @@ if not type(w_obj) is W_BytesObject: break else: - return space.fromcache(StringListStrategy) + return space.fromcache(BytesListStrategy) # check for unicode for w_obj in list_w: @@ -162,8 +162,8 @@ return self @staticmethod - def newlist_str(space, list_s): - strategy = space.fromcache(StringListStrategy) + def newlist_bytes(space, list_s): + strategy = space.fromcache(BytesListStrategy) storage = strategy.erase(list_s) return W_ListObject.from_storage_and_strategy(space, storage, strategy) @@ -278,10 +278,10 @@ ObjectListStrategy.""" return self.strategy.getitems_copy(self) - def getitems_str(self): + def getitems_bytes(self): """Return the items in the list as unwrapped strings. If the list does not use the list strategy, return None.""" - return self.strategy.getitems_str(self) + return self.strategy.getitems_bytes(self) def getitems_unicode(self): """Return the items in the list as unwrapped unicodes. If the list does @@ -753,7 +753,7 @@ def getitems_copy(self, w_list): raise NotImplementedError - def getitems_str(self, w_list): + def getitems_bytes(self, w_list): return None def getitems_unicode(self, w_list): @@ -897,7 +897,7 @@ if type(w_item) is W_IntObject: strategy = self.space.fromcache(IntegerListStrategy) elif type(w_item) is W_BytesObject: - strategy = self.space.fromcache(StringListStrategy) + strategy = self.space.fromcache(BytesListStrategy) elif type(w_item) is W_UnicodeObject: strategy = self.space.fromcache(UnicodeListStrategy) elif type(w_item) is W_FloatObject: @@ -962,11 +962,11 @@ w_list.lstorage = strategy.erase(floatlist) return - strlist = space.listview_str(w_iterable) - if strlist is not None: - w_list.strategy = strategy = space.fromcache(StringListStrategy) + byteslist = space.listview_bytes(w_iterable) + if byteslist is not None: + w_list.strategy = strategy = space.fromcache(BytesListStrategy) # need to copy because intlist can share with w_iterable - w_list.lstorage = strategy.erase(strlist[:]) + w_list.lstorage = strategy.erase(byteslist[:]) return unilist = space.listview_unicode(w_iterable) @@ -1592,11 +1592,11 @@ return self.unerase(w_list.lstorage) -class StringListStrategy(ListStrategy): +class BytesListStrategy(ListStrategy): import_from_mixin(AbstractUnwrappedStrategy) _none_value = None - _applevel_repr = "str" + _applevel_repr = "bytes" def wrap(self, stringval): return self.space.wrap(stringval) @@ -1604,7 +1604,7 @@ def unwrap(self, w_string): return self.space.str_w(w_string) - erase, unerase = rerased.new_erasing_pair("string") + erase, unerase = rerased.new_erasing_pair("bytes") erase = staticmethod(erase) unerase = staticmethod(unerase) @@ -1612,7 +1612,7 @@ return type(w_obj) is W_BytesObject def list_is_correct_type(self, w_list): - return w_list.strategy is self.space.fromcache(StringListStrategy) + return w_list.strategy is self.space.fromcache(BytesListStrategy) def sort(self, w_list, reverse): l = self.unerase(w_list.lstorage) @@ -1621,7 +1621,7 @@ if reverse: l.reverse() - def getitems_str(self, w_list): + def getitems_bytes(self, w_list): return self.unerase(w_list.lstorage) diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -700,7 +700,7 @@ self.delitem(w_dict, w_key) return (w_key, w_value) - # XXX could implement a more efficient w_keys based on space.newlist_str + # XXX could implement a more efficient w_keys based on space.newlist_bytes def iterkeys(self, w_dict): return MapDictIteratorKeys(self.space, self, w_dict) diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -292,8 +292,8 @@ assert not list_w or sizehint == -1 return W_ListObject(self, list_w, sizehint) - def newlist_str(self, list_s): - return W_ListObject.newlist_str(self, list_s) + def newlist_bytes(self, list_s): + return W_ListObject.newlist_bytes(self, list_s) def newlist_unicode(self, list_u): return W_ListObject.newlist_unicode(self, list_u) @@ -431,19 +431,19 @@ raise self._wrap_expected_length(expected_length, len(t)) return t - def listview_str(self, w_obj): + def listview_bytes(self, w_obj): # note: uses exact type checking for objects with strategies, # and isinstance() for others. See test_listobject.test_uses_custom... if type(w_obj) is W_ListObject: - return w_obj.getitems_str() + return w_obj.getitems_bytes() if type(w_obj) is W_DictMultiObject: - return w_obj.listview_str() + return w_obj.listview_bytes() if type(w_obj) is W_SetObject or type(w_obj) is W_FrozensetObject: - return w_obj.listview_str() + return w_obj.listview_bytes() if isinstance(w_obj, W_BytesObject) and self._uses_no_iter(w_obj): - return w_obj.listview_str() + return w_obj.listview_bytes() if isinstance(w_obj, W_ListObject) and self._uses_list_iter(w_obj): - return w_obj.getitems_str() + return w_obj.getitems_bytes() return None def listview_unicode(self, w_obj): diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -79,9 +79,9 @@ """ Returns a dict with all elements of the set. Needed only for switching to ObjectSetStrategy. """ return self.strategy.getdict_w(self) - def listview_str(self): + def listview_bytes(self): """ If this is a string set return its contents as a list of uwnrapped strings. Otherwise return None. """ - return self.strategy.listview_str(self) + return self.strategy.listview_bytes(self) def listview_unicode(self): """ If this is a unicode set return its contents as a list of uwnrapped unicodes. Otherwise return None. """ @@ -669,7 +669,7 @@ """ Returns an empty storage (erased) object. Used to initialize an empty set.""" raise NotImplementedError - def listview_str(self, w_set): + def listview_bytes(self, w_set): return None def listview_unicode(self, w_set): @@ -776,7 +776,7 @@ if type(w_key) is W_IntObject: strategy = self.space.fromcache(IntegerSetStrategy) elif type(w_key) is W_BytesObject: - strategy = self.space.fromcache(StringSetStrategy) + strategy = self.space.fromcache(BytesSetStrategy) elif type(w_key) is W_UnicodeObject: strategy = self.space.fromcache(UnicodeSetStrategy) elif self.space.type(w_key).compares_by_identity(): @@ -1196,8 +1196,8 @@ return self.wrap(result[0]) -class StringSetStrategy(AbstractUnwrappedSetStrategy, SetStrategy): - erase, unerase = rerased.new_erasing_pair("string") +class BytesSetStrategy(AbstractUnwrappedSetStrategy, SetStrategy): + erase, unerase = rerased.new_erasing_pair("bytes") erase = staticmethod(erase) unerase = staticmethod(unerase) @@ -1207,7 +1207,7 @@ def get_empty_dict(self): return {} - def listview_str(self, w_set): + def listview_bytes(self, w_set): return self.unerase(w_set.sstorage).keys() def is_correct_type(self, w_key): @@ -1229,7 +1229,7 @@ return self.space.wrap(item) def iter(self, w_set): - return StringIteratorImplementation(self.space, self, w_set) + return BytesIteratorImplementation(self.space, self, w_set) class UnicodeSetStrategy(AbstractUnwrappedSetStrategy, SetStrategy): @@ -1286,7 +1286,7 @@ return type(w_key) is W_IntObject def may_contain_equal_elements(self, strategy): - if strategy is self.space.fromcache(StringSetStrategy): + if strategy is self.space.fromcache(BytesSetStrategy): return False elif strategy is self.space.fromcache(UnicodeSetStrategy): return False @@ -1371,7 +1371,7 @@ return False if strategy is self.space.fromcache(IntegerSetStrategy): return False - if strategy is self.space.fromcache(StringSetStrategy): + if strategy is self.space.fromcache(BytesSetStrategy): return False if strategy is self.space.fromcache(UnicodeSetStrategy): return False @@ -1436,7 +1436,7 @@ return None -class StringIteratorImplementation(IteratorImplementation): +class BytesIteratorImplementation(IteratorImplementation): def __init__(self, space, strategy, w_set): IteratorImplementation.__init__(self, space, strategy, w_set) d = strategy.unerase(w_set.sstorage) @@ -1546,11 +1546,11 @@ w_set.sstorage = w_iterable.get_storage_copy() return - stringlist = space.listview_str(w_iterable) - if stringlist is not None: - strategy = space.fromcache(StringSetStrategy) + byteslist = space.listview_bytes(w_iterable) + if byteslist is not None: + strategy = space.fromcache(BytesSetStrategy) w_set.strategy = strategy - w_set.sstorage = strategy.get_storage_from_unwrapped_list(stringlist) + w_set.sstorage = strategy.get_storage_from_unwrapped_list(byteslist) return unicodelist = space.listview_unicode(w_iterable) @@ -1593,7 +1593,7 @@ if type(w_item) is not W_BytesObject: break else: - w_set.strategy = space.fromcache(StringSetStrategy) + w_set.strategy = space.fromcache(BytesSetStrategy) w_set.sstorage = w_set.strategy.get_storage_from_list(iterable_w) return diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -35,13 +35,7 @@ if (isinstance(self, W_BytearrayObject) and space.isinstance_w(w_sub, space.w_int)): char = space.int_w(w_sub) - if not 0 <= char < 256: - raise OperationError(space.w_ValueError, - space.wrap("byte must be in range(0, 256)")) - for c in self.data: - if ord(c) == char: - return space.w_True - return space.w_False + return _descr_contains_bytearray(self.data, space, char) return space.newbool(self._val(space).find(self._op_val(space, w_sub)) >= 0) def descr_add(self, space, w_other): @@ -79,7 +73,7 @@ assert start >= 0 and stop >= 0 return self._sliced(space, selfvalue, start, stop, self) else: - ret = [selfvalue[start + i*step] for i in range(sl)] + ret = _descr_getslice_slowpath(selfvalue, start, step, sl) return self._new_from_list(ret) index = space.getindex_w(w_index, space.w_IndexError, "string index") @@ -253,17 +247,21 @@ return self._is_generic(space, '_isdigit') # this is only for bytes and bytesarray: unicodeobject overrides it + def _descr_islower_slowpath(self, space, v): + cased = False + for idx in range(len(v)): + if self._isupper(v[idx]): + return False + elif not cased and self._islower(v[idx]): + cased = True + return cased + def descr_islower(self, space): v = self._val(space) if len(v) == 1: c = v[0] return space.newbool(self._islower(c)) - cased = False - for idx in range(len(v)): - if self._isupper(v[idx]): - return space.w_False - elif not cased and self._islower(v[idx]): - cased = True + cased = self._descr_islower_slowpath(space, v) return space.newbool(cased) def descr_isspace(self, space): @@ -291,17 +289,21 @@ return space.newbool(cased) # this is only for bytes and bytesarray: unicodeobject overrides it + def _descr_isupper_slowpath(self, space, v): + cased = False + for idx in range(len(v)): + if self._islower(v[idx]): + return False + elif not cased and self._isupper(v[idx]): + cased = True + return cased + def descr_isupper(self, space): v = self._val(space) if len(v) == 1: c = v[0] return space.newbool(self._isupper(c)) - cased = False - for idx in range(len(v)): - if self._islower(v[idx]): - return space.w_False - elif not cased and self._isupper(v[idx]): - cased = True + cased = self._descr_isupper_slowpath(space, v) return space.newbool(cased) def descr_join(self, space, w_list): @@ -309,7 +311,7 @@ from pypy.objspace.std.unicodeobject import W_UnicodeObject if isinstance(self, W_BytesObject): - l = space.listview_str(w_list) + l = space.listview_bytes(w_list) if l is not None: if len(l) == 1: return space.wrap(l[0]) @@ -677,3 +679,19 @@ def descr_getnewargs(self, space): return space.newtuple([self._new(self._val(space))]) + +# ____________________________________________________________ +# helpers for slow paths, moved out because they contain loops + +def _descr_contains_bytearray(data, space, char): + if not 0 <= char < 256: + raise OperationError(space.w_ValueError, + space.wrap("byte must be in range(0, 256)")) + for c in data: + if ord(c) == char: + return space.w_True + return space.w_False + + at specialize.argtype(0) +def _descr_getslice_slowpath(selfvalue, start, step, sl): + return [selfvalue[start + i*step] for i in range(sl)] diff --git a/pypy/objspace/std/test/test_bytesobject.py b/pypy/objspace/std/test/test_bytesobject.py --- a/pypy/objspace/std/test/test_bytesobject.py +++ b/pypy/objspace/std/test/test_bytesobject.py @@ -80,9 +80,9 @@ w_slice = space.newslice(w(1), w_None, w(2)) assert self.space.eq_w(space.getitem(w_str, w_slice), w('el')) - def test_listview_str(self): + def test_listview_bytes(self): w_str = self.space.wrap('abcd') - assert self.space.listview_str(w_str) == list("abcd") + assert self.space.listview_bytes(w_str) == list("abcd") class AppTestBytesObject: diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -2,7 +2,7 @@ import py from pypy.objspace.std.dictmultiobject import (W_DictMultiObject, - StringDictStrategy, ObjectDictStrategy) + BytesDictStrategy, ObjectDictStrategy) class TestW_DictObject(object): @@ -134,11 +134,11 @@ assert space.eq_w(w_d.getitem_str("a"), space.w_None) assert space.eq_w(w_d.getitem_str("b"), space.w_None) - def test_listview_str_dict(self): + def test_listview_bytes_dict(self): w = self.space.wrap w_d = self.space.newdict() w_d.initialize_content([(w("a"), w(1)), (w("b"), w(2))]) - assert self.space.listview_str(w_d) == ["a", "b"] + assert self.space.listview_bytes(w_d) == ["a", "b"] def test_listview_unicode_dict(self): w = self.space.wrap @@ -160,7 +160,7 @@ w_l = self.space.call_method(w_d, "keys") assert sorted(self.space.listview_int(w_l)) == [1,2] - # make sure that .keys() calls newlist_str for string dicts + # make sure that .keys() calls newlist_bytes for string dicts def not_allowed(*args): assert False, 'should not be called' monkeypatch.setattr(self.space, 'newlist', not_allowed) @@ -168,7 +168,7 @@ w_d = self.space.newdict() w_d.initialize_content([(w("a"), w(1)), (w("b"), w(6))]) w_l = self.space.call_method(w_d, "keys") - assert sorted(self.space.listview_str(w_l)) == ["a", "b"] + assert sorted(self.space.listview_bytes(w_l)) == ["a", "b"] # XXX: it would be nice if the test passed without monkeypatch.undo(), # but we need space.newlist_unicode for it @@ -944,7 +944,7 @@ d = {} assert "EmptyDictStrategy" in self.get_strategy(d) d["a"] = 1 - assert "StringDictStrategy" in self.get_strategy(d) + assert "BytesDictStrategy" in self.get_strategy(d) class O(object): pass @@ -952,7 +952,7 @@ d = o.__dict__ = {} assert "EmptyDictStrategy" in self.get_strategy(d) o.a = 1 - assert "StringDictStrategy" in self.get_strategy(d) + assert "BytesDictStrategy" in self.get_strategy(d) def test_empty_to_unicode(self): d = {} @@ -1033,7 +1033,7 @@ eq_w = eq def newlist(self, l): return l - def newlist_str(self, l): + def newlist_bytes(self, l): return l DictObjectCls = W_DictMultiObject def type(self, w_obj): @@ -1275,9 +1275,9 @@ assert "s" not in d.w_keys() assert F() not in d.w_keys() -class TestStrDictImplementation(BaseTestRDictImplementation): - StrategyClass = StringDictStrategy - #ImplementionClass = StrDictImplementation +class TestBytesDictImplementation(BaseTestRDictImplementation): + StrategyClass = BytesDictStrategy + #ImplementionClass = BytesDictImplementation def test_str_shortcut(self): self.fill_impl() @@ -1301,12 +1301,12 @@ def check_not_devolved(self): pass -class TestDevolvedStrDictImplementation(BaseTestDevolvedDictImplementation): - StrategyClass = StringDictStrategy +class TestDevolvedBytesDictImplementation(BaseTestDevolvedDictImplementation): + StrategyClass = BytesDictStrategy def test_module_uses_strdict(): fakespace = FakeSpace() d = fakespace.newdict(module=True) - assert type(d.strategy) is StringDictStrategy + assert type(d.strategy) is BytesDictStrategy diff --git a/pypy/objspace/std/test/test_kwargsdict.py b/pypy/objspace/std/test/test_kwargsdict.py --- a/pypy/objspace/std/test/test_kwargsdict.py +++ b/pypy/objspace/std/test/test_kwargsdict.py @@ -73,7 +73,7 @@ for i in range(100): assert d.setitem_str("d%s" % i, 4) is None assert d.strategy is not strategy - assert "StringDictStrategy" == d.strategy.__class__.__name__ + assert "BytesDictStrategy" == d.strategy.__class__.__name__ def test_keys_doesnt_wrap(): space = FakeSpace() diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -1,5 +1,5 @@ import sys -from pypy.objspace.std.listobject import W_ListObject, EmptyListStrategy, ObjectListStrategy, IntegerListStrategy, FloatListStrategy, StringListStrategy, RangeListStrategy, make_range_list, UnicodeListStrategy +from pypy.objspace.std.listobject import W_ListObject, EmptyListStrategy, ObjectListStrategy, IntegerListStrategy, FloatListStrategy, BytesListStrategy, RangeListStrategy, make_range_list, UnicodeListStrategy from pypy.objspace.std import listobject from pypy.objspace.std.test.test_listobject import TestW_ListObject @@ -13,7 +13,7 @@ assert isinstance(W_ListObject(space, [w(1),w(2),w(3)]).strategy, IntegerListStrategy) assert isinstance(W_ListObject(space, [w('a'), w('b')]).strategy, - StringListStrategy) + BytesListStrategy) assert isinstance(W_ListObject(space, [w(u'a'), w(u'b')]).strategy, UnicodeListStrategy) assert isinstance(W_ListObject(space, [w(u'a'), w('b')]).strategy, @@ -35,7 +35,7 @@ l = W_ListObject(space, []) assert isinstance(l.strategy, EmptyListStrategy) l.append(w('a')) - assert isinstance(l.strategy, StringListStrategy) + assert isinstance(l.strategy, BytesListStrategy) l = W_ListObject(space, []) assert isinstance(l.strategy, EmptyListStrategy) @@ -59,9 +59,9 @@ def test_string_to_any(self): l = W_ListObject(self.space, [self.space.wrap('a'),self.space.wrap('b'),self.space.wrap('c')]) - assert isinstance(l.strategy, StringListStrategy) + assert isinstance(l.strategy, BytesListStrategy) l.append(self.space.wrap('d')) - assert isinstance(l.strategy, StringListStrategy) + assert isinstance(l.strategy, BytesListStrategy) l.append(self.space.wrap(3)) assert isinstance(l.strategy, ObjectListStrategy) @@ -92,7 +92,7 @@ l.setitem(0, w('d')) assert space.eq_w(l.getitem(0), w('d')) - assert isinstance(l.strategy, StringListStrategy) + assert isinstance(l.strategy, BytesListStrategy) # IntStrategy to ObjectStrategy l = W_ListObject(space, [w(1),w(2),w(3)]) @@ -100,9 +100,9 @@ l.setitem(0, w('d')) assert isinstance(l.strategy, ObjectListStrategy) - # StringStrategy to ObjectStrategy + # BytesStrategy to ObjectStrategy l = W_ListObject(space, [w('a'),w('b'),w('c')]) - assert isinstance(l.strategy, StringListStrategy) + assert isinstance(l.strategy, BytesListStrategy) l.setitem(0, w(2)) assert isinstance(l.strategy, ObjectListStrategy) @@ -127,9 +127,9 @@ l.insert(3, w(4)) assert isinstance(l.strategy, IntegerListStrategy) - # StringStrategy + # BytesStrategy l = W_ListObject(space, [w('a'),w('b'),w('c')]) - assert isinstance(l.strategy, StringListStrategy) + assert isinstance(l.strategy, BytesListStrategy) l.insert(3, w(2)) assert isinstance(l.strategy, ObjectListStrategy) @@ -155,7 +155,7 @@ l = W_ListObject(space, []) assert isinstance(l.strategy, EmptyListStrategy) l.insert(0, w('a')) - assert isinstance(l.strategy, StringListStrategy) + assert isinstance(l.strategy, BytesListStrategy) l = W_ListObject(space, []) assert isinstance(l.strategy, EmptyListStrategy) @@ -207,9 +207,9 @@ l.setslice(0, 1, 2, W_ListObject(space, [w('a'), w('b'), w('c')])) assert isinstance(l.strategy, ObjectListStrategy) - # StringStrategy to ObjectStrategy + # BytesStrategy to ObjectStrategy l = W_ListObject(space, [w('a'), w('b'), w('c')]) - assert isinstance(l.strategy, StringListStrategy) + assert isinstance(l.strategy, BytesListStrategy) l.setslice(0, 1, 2, W_ListObject(space, [w(1), w(2), w(3)])) assert isinstance(l.strategy, ObjectListStrategy) @@ -261,7 +261,7 @@ l = W_ListObject(space, wrapitems(["a","b","c","d","e"])) other = W_ListObject(space, wrapitems(["a", "b", "c"])) keep_other_strategy(l, 0, 2, other.length(), other) - assert l.strategy is space.fromcache(StringListStrategy) + assert l.strategy is space.fromcache(BytesListStrategy) l = W_ListObject(space, wrapitems([u"a",u"b",u"c",u"d",u"e"])) other = W_ListObject(space, wrapitems([u"a", u"b", u"c"])) @@ -330,7 +330,7 @@ empty = W_ListObject(space, []) assert isinstance(empty.strategy, EmptyListStrategy) empty.extend(W_ListObject(space, [w("a"), w("b"), w("c")])) - assert isinstance(empty.strategy, StringListStrategy) + assert isinstance(empty.strategy, BytesListStrategy) empty = W_ListObject(space, []) assert isinstance(empty.strategy, EmptyListStrategy) @@ -514,17 +514,17 @@ def test_unicode(self): l1 = W_ListObject(self.space, [self.space.wrap("eins"), self.space.wrap("zwei")]) - assert isinstance(l1.strategy, StringListStrategy) + assert isinstance(l1.strategy, BytesListStrategy) l2 = W_ListObject(self.space, [self.space.wrap(u"eins"), self.space.wrap(u"zwei")]) assert isinstance(l2.strategy, UnicodeListStrategy) l3 = W_ListObject(self.space, [self.space.wrap("eins"), self.space.wrap(u"zwei")]) assert isinstance(l3.strategy, ObjectListStrategy) - def test_listview_str(self): + def test_listview_bytes(self): space = self.space - assert space.listview_str(space.wrap(1)) == None + assert space.listview_bytes(space.wrap(1)) == None w_l = self.space.newlist([self.space.wrap('a'), self.space.wrap('b')]) - assert space.listview_str(w_l) == ["a", "b"] + assert space.listview_bytes(w_l) == ["a", "b"] def test_listview_unicode(self): space = self.space @@ -532,7 +532,7 @@ w_l = self.space.newlist([self.space.wrap(u'a'), self.space.wrap(u'b')]) assert space.listview_unicode(w_l) == [u"a", u"b"] - def test_string_join_uses_listview_str(self): + def test_string_join_uses_listview_bytes(self): space = self.space w_l = self.space.newlist([self.space.wrap('a'), self.space.wrap('b')]) w_l.getitems = None @@ -556,14 +556,14 @@ w_l.getitems = None assert space.is_w(space.call_method(space.wrap(u" -- "), "join", w_l), w_text) - def test_newlist_str(self): + def test_newlist_bytes(self): space = self.space l = ['a', 'b'] - w_l = self.space.newlist_str(l) - assert isinstance(w_l.strategy, StringListStrategy) - assert space.listview_str(w_l) is l + w_l = self.space.newlist_bytes(l) + assert isinstance(w_l.strategy, BytesListStrategy) + assert space.listview_bytes(w_l) is l - def test_string_uses_newlist_str(self): + def test_string_uses_newlist_bytes(self): space = self.space w_s = space.wrap("a b c") space.newlist = None @@ -574,10 +574,10 @@ w_l4 = space.call_method(w_s, "rsplit", space.wrap(" ")) finally: del space.newlist - assert space.listview_str(w_l) == ["a", "b", "c"] - assert space.listview_str(w_l2) == ["a", "b", "c"] - assert space.listview_str(w_l3) == ["a", "b", "c"] - assert space.listview_str(w_l4) == ["a", "b", "c"] + assert space.listview_bytes(w_l) == ["a", "b", "c"] + assert space.listview_bytes(w_l2) == ["a", "b", "c"] + assert space.listview_bytes(w_l3) == ["a", "b", "c"] + assert space.listview_bytes(w_l4) == ["a", "b", "c"] def test_unicode_uses_newlist_unicode(self): space = self.space @@ -630,10 +630,10 @@ assert space.eq_w(w_l, w_l2) - def test_listview_str_list(self): + def test_listview_bytes_list(self): space = self.space w_l = W_ListObject(space, [space.wrap("a"), space.wrap("b")]) - assert self.space.listview_str(w_l) == ["a", "b"] + assert self.space.listview_bytes(w_l) == ["a", "b"] def test_listview_unicode_list(self): space = self.space diff --git a/pypy/objspace/std/test/test_setobject.py b/pypy/objspace/std/test/test_setobject.py --- a/pypy/objspace/std/test/test_setobject.py +++ b/pypy/objspace/std/test/test_setobject.py @@ -82,7 +82,7 @@ def test_create_set_from_list(self): from pypy.interpreter.baseobjspace import W_Root - from pypy.objspace.std.setobject import ObjectSetStrategy, StringSetStrategy, UnicodeSetStrategy + from pypy.objspace.std.setobject import BytesSetStrategy, ObjectSetStrategy, UnicodeSetStrategy from pypy.objspace.std.floatobject import W_FloatObject w = self.space.wrap @@ -100,7 +100,7 @@ w_list = W_ListObject(self.space, [w("1"), w("2"), w("3")]) w_set = W_SetObject(self.space) _initialize_set(self.space, w_set, w_list) - assert w_set.strategy is self.space.fromcache(StringSetStrategy) + assert w_set.strategy is self.space.fromcache(BytesSetStrategy) assert w_set.strategy.unerase(w_set.sstorage) == {"1":None, "2":None, "3":None} w_list = self.space.iter(W_ListObject(self.space, [w(u"1"), w(u"2"), w(u"3")])) @@ -126,18 +126,18 @@ # changed cached object, need to change it back for other tests to pass intstr.get_storage_from_list = tmp_func - def test_listview_str_int_on_set(self): + def test_listview_bytes_int_on_set(self): w = self.space.wrap w_a = W_SetObject(self.space) _initialize_set(self.space, w_a, w("abcdefg")) - assert sorted(self.space.listview_str(w_a)) == list("abcdefg") + assert sorted(self.space.listview_bytes(w_a)) == list("abcdefg") assert self.space.listview_int(w_a) is None w_b = W_SetObject(self.space) _initialize_set(self.space, w_b, self.space.newlist([w(1),w(2),w(3),w(4),w(5)])) assert sorted(self.space.listview_int(w_b)) == [1,2,3,4,5] - assert self.space.listview_str(w_b) is None + assert self.space.listview_bytes(w_b) is None class AppTestAppSetTest: diff --git a/pypy/objspace/std/test/test_setstrategies.py b/pypy/objspace/std/test/test_setstrategies.py --- a/pypy/objspace/std/test/test_setstrategies.py +++ b/pypy/objspace/std/test/test_setstrategies.py @@ -1,10 +1,8 @@ from pypy.objspace.std.setobject import W_SetObject -from pypy.objspace.std.setobject import (IntegerSetStrategy, ObjectSetStrategy, - EmptySetStrategy, StringSetStrategy, - UnicodeSetStrategy, - IntegerIteratorImplementation, - StringIteratorImplementation, - UnicodeIteratorImplementation) +from pypy.objspace.std.setobject import ( + BytesIteratorImplementation, BytesSetStrategy, EmptySetStrategy, + IntegerIteratorImplementation, IntegerSetStrategy, ObjectSetStrategy, + UnicodeIteratorImplementation, UnicodeSetStrategy) from pypy.objspace.std.listobject import W_ListObject class TestW_SetStrategies: @@ -26,7 +24,7 @@ assert s.strategy is self.space.fromcache(EmptySetStrategy) s = W_SetObject(self.space, self.wrapped(["a", "b"])) - assert s.strategy is self.space.fromcache(StringSetStrategy) + assert s.strategy is self.space.fromcache(BytesSetStrategy) s = W_SetObject(self.space, self.wrapped([u"a", u"b"])) assert s.strategy is self.space.fromcache(UnicodeSetStrategy) @@ -126,7 +124,7 @@ # s = W_SetObject(space, self.wrapped(["a", "b"])) it = s.iter() - assert isinstance(it, StringIteratorImplementation) + assert isinstance(it, BytesIteratorImplementation) assert space.unwrap(it.next()) == "a" assert space.unwrap(it.next()) == "b" # @@ -142,7 +140,7 @@ assert sorted(space.listview_int(s)) == [1, 2] # s = W_SetObject(space, self.wrapped(["a", "b"])) - assert sorted(space.listview_str(s)) == ["a", "b"] + assert sorted(space.listview_bytes(s)) == ["a", "b"] # s = W_SetObject(space, self.wrapped([u"a", u"b"])) assert sorted(space.listview_unicode(s)) == [u"a", u"b"] diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -531,7 +531,7 @@ """x.__getitem__(y) <==> x[y]""" def __getnewargs__(): - """""" + "" def __getslice__(): """x.__getslice__(i, j) <==> x[i:j] diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -9,7 +9,7 @@ from rpython.flowspace.model import (Variable, Constant, FunctionGraph, c_last_exception, checkgraph) from rpython.translator import simplify, transform -from rpython.annotator import model as annmodel, signature, unaryop, binaryop +from rpython.annotator import model as annmodel, signature from rpython.annotator.bookkeeper import Bookkeeper import py @@ -455,12 +455,12 @@ # occour for this specific, typed operation. if block.exitswitch == c_last_exception: op = block.operations[-1] - if op.opname in binaryop.BINARY_OPERATIONS: + if op.dispatch == 2: arg1 = self.binding(op.args[0]) arg2 = self.binding(op.args[1]) binop = getattr(pair(arg1, arg2), op.opname, None) can_only_throw = annmodel.read_can_only_throw(binop, arg1, arg2) - elif op.opname in unaryop.UNARY_OPERATIONS: + elif op.dispatch == 1: arg1 = self.binding(op.args[0]) opname = op.opname if opname == 'contains': opname = 'op_contains' @@ -611,44 +611,6 @@ def noreturnvalue(self, op): return annmodel.s_ImpossibleValue # no return value (hook method) - # XXX "contains" clash with SomeObject method - def consider_op_contains(self, seq, elem): - self.bookkeeper.count("contains", seq) - return seq.op_contains(elem) - - def consider_op_newtuple(self, *args): - return annmodel.SomeTuple(items = args) - - def consider_op_newlist(self, *args): - return self.bookkeeper.newlist(*args) - - def consider_op_newdict(self): - return self.bookkeeper.newdict() - - - def _registeroperations(cls, unary_ops, binary_ops): - # All unary operations - d = {} - for opname in unary_ops: - fnname = 'consider_op_' + opname - exec py.code.Source(""" -def consider_op_%s(self, arg, *args): - return arg.%s(*args) -""" % (opname, opname)).compile() in globals(), d - setattr(cls, fnname, d[fnname]) - # All binary operations - for opname in binary_ops: - fnname = 'consider_op_' + opname - exec py.code.Source(""" -def consider_op_%s(self, arg1, arg2, *args): - return pair(arg1,arg2).%s(*args) -""" % (opname, opname)).compile() in globals(), d - setattr(cls, fnname, d[fnname]) - _registeroperations = classmethod(_registeroperations) - -# register simple operations handling -RPythonAnnotator._registeroperations(unaryop.UNARY_OPERATIONS, binaryop.BINARY_OPERATIONS) - class BlockedInference(Exception): """This exception signals the type inference engine that the situation diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -12,10 +12,11 @@ SomeBuiltin, SomeIterator, SomePBC, SomeFloat, s_None, SomeByteArray, SomeWeakRef, SomeAddress, SomeTypedAddressAccess, SomeSingleFloat, SomeLongFloat, SomeType, SomeConstantType, unionof, UnionError, - missing_operation, read_can_only_throw, add_knowntypedata, + read_can_only_throw, add_knowntypedata, merge_knowntypedata,) from rpython.annotator.bookkeeper import getbookkeeper from rpython.flowspace.model import Variable, Constant +from rpython.flowspace.operation import op from rpython.rlib import rarithmetic from rpython.annotator.model import AnnotatorError @@ -23,28 +24,9 @@ def immutablevalue(x): return getbookkeeper().immutablevalue(x) -# XXX unify this with ObjSpace.MethodTable -BINARY_OPERATIONS = set(['add', 'sub', 'mul', 'div', 'mod', - 'truediv', 'floordiv', 'divmod', - 'and_', 'or_', 'xor', - 'lshift', 'rshift', - 'getitem', 'setitem', 'delitem', - 'getitem_idx', 'getitem_key', 'getitem_idx_key', - 'inplace_add', 'inplace_sub', 'inplace_mul', - 'inplace_truediv', 'inplace_floordiv', 'inplace_div', - 'inplace_mod', - 'inplace_lshift', 'inplace_rshift', - 'inplace_and', 'inplace_or', 'inplace_xor', - 'lt', 'le', 'eq', 'ne', 'gt', 'ge', 'is_', 'cmp', - 'coerce', - ] - +[opname+'_ovf' for opname in - """add sub mul floordiv div mod lshift - """.split() - ]) +BINARY_OPERATIONS = set([oper.opname for oper in op.__dict__.values() + if oper.dispatch == 2]) -for opname in BINARY_OPERATIONS: - missing_operation(pairtype(SomeObject, SomeObject), opname) class __extend__(pairtype(SomeObject, SomeObject)): @@ -78,46 +60,39 @@ if obj1.is_immutable_constant() and obj2.is_immutable_constant(): return immutablevalue(obj1.const < obj2.const) else: - getbookkeeper().count("non_int_comp", obj1, obj2) return s_Bool def le((obj1, obj2)): if obj1.is_immutable_constant() and obj2.is_immutable_constant(): return immutablevalue(obj1.const <= obj2.const) else: - getbookkeeper().count("non_int_comp", obj1, obj2) return s_Bool def eq((obj1, obj2)): if obj1.is_immutable_constant() and obj2.is_immutable_constant(): return immutablevalue(obj1.const == obj2.const) else: - getbookkeeper().count("non_int_eq", obj1, obj2) return s_Bool def ne((obj1, obj2)): if obj1.is_immutable_constant() and obj2.is_immutable_constant(): return immutablevalue(obj1.const != obj2.const) else: - getbookkeeper().count("non_int_eq", obj1, obj2) return s_Bool def gt((obj1, obj2)): if obj1.is_immutable_constant() and obj2.is_immutable_constant(): return immutablevalue(obj1.const > obj2.const) else: - getbookkeeper().count("non_int_comp", obj1, obj2) return s_Bool def ge((obj1, obj2)): if obj1.is_immutable_constant() and obj2.is_immutable_constant(): return immutablevalue(obj1.const >= obj2.const) else: - getbookkeeper().count("non_int_comp", obj1, obj2) return s_Bool def cmp((obj1, obj2)): - getbookkeeper().count("cmp", obj1, obj2) if obj1.is_immutable_constant() and obj2.is_immutable_constant(): return immutablevalue(cmp(obj1.const, obj2.const)) else: @@ -163,13 +138,19 @@ return r def divmod((obj1, obj2)): - getbookkeeper().count("divmod", obj1, obj2) return SomeTuple([pair(obj1, obj2).div(), pair(obj1, obj2).mod()]) def coerce((obj1, obj2)): - getbookkeeper().count("coerce", obj1, obj2) return pair(obj1, obj2).union() # reasonable enough + def getitem((obj1, obj2)): + return s_ImpossibleValue + add = sub = mul = truediv = floordiv = div = mod = getitem + lshift = rshift = and_ = or_ = xor = delitem = getitem + + def setitem((obj1, obj2), _): + return s_ImpossibleValue + # approximation of an annotation intersection, the result should be the annotation obj or # the intersection of obj and improvement def improve((obj, improvement)): @@ -466,7 +447,6 @@ SomeUnicodeString))): raise AnnotatorError( "string formatting mixing strings and unicode not supported") - getbookkeeper().count('strformat', s_string, s_tuple) no_nul = s_string.no_nul for s_item in s_tuple.items: if isinstance(s_item, SomeFloat): @@ -484,7 +464,6 @@ pairtype(SomeUnicodeString, SomeObject)): def mod((s_string, args)): - getbookkeeper().count('strformat', s_string, args) return s_string.__class__() class __extend__(pairtype(SomeFloat, SomeFloat)): @@ -586,19 +565,16 @@ return [KeyError] def getitem((dic1, obj2)): - getbookkeeper().count("dict_getitem", dic1) dic1.dictdef.generalize_key(obj2) return dic1.dictdef.read_value() getitem.can_only_throw = _can_only_throw def setitem((dic1, obj2), s_value): - getbookkeeper().count("dict_setitem", dic1) dic1.dictdef.generalize_key(obj2) dic1.dictdef.generalize_value(s_value) setitem.can_only_throw = _can_only_throw def delitem((dic1, obj2)): - getbookkeeper().count("dict_delitem", dic1) dic1.dictdef.generalize_key(obj2) delitem.can_only_throw = _can_only_throw @@ -612,7 +588,6 @@ except IndexError: return s_ImpossibleValue else: - getbookkeeper().count("tuple_random_getitem", tup1) return unionof(*tup1.items) getitem.can_only_throw = [IndexError] @@ -623,74 +598,63 @@ return lst1.listdef.offspring() def getitem((lst1, int2)): - getbookkeeper().count("list_getitem", int2) return lst1.listdef.read_item() getitem.can_only_throw = [] getitem_key = getitem def getitem_idx((lst1, int2)): - getbookkeeper().count("list_getitem", int2) return lst1.listdef.read_item() getitem_idx.can_only_throw = [IndexError] getitem_idx_key = getitem_idx def setitem((lst1, int2), s_value): - getbookkeeper().count("list_setitem", int2) lst1.listdef.mutate() lst1.listdef.generalize(s_value) setitem.can_only_throw = [IndexError] def delitem((lst1, int2)): - getbookkeeper().count("list_delitem", int2) lst1.listdef.resize() delitem.can_only_throw = [IndexError] class __extend__(pairtype(SomeString, SomeInteger)): def getitem((str1, int2)): - getbookkeeper().count("str_getitem", int2) return SomeChar(no_nul=str1.no_nul) getitem.can_only_throw = [] getitem_key = getitem def getitem_idx((str1, int2)): - getbookkeeper().count("str_getitem", int2) return SomeChar(no_nul=str1.no_nul) getitem_idx.can_only_throw = [IndexError] getitem_idx_key = getitem_idx def mul((str1, int2)): # xxx do we want to support this - getbookkeeper().count("str_mul", str1, int2) return SomeString(no_nul=str1.no_nul) class __extend__(pairtype(SomeUnicodeString, SomeInteger)): def getitem((str1, int2)): - getbookkeeper().count("str_getitem", int2) return SomeUnicodeCodePoint() getitem.can_only_throw = [] getitem_key = getitem def getitem_idx((str1, int2)): - getbookkeeper().count("str_getitem", int2) return SomeUnicodeCodePoint() getitem_idx.can_only_throw = [IndexError] getitem_idx_key = getitem_idx def mul((str1, int2)): # xxx do we want to support this - getbookkeeper().count("str_mul", str1, int2) return SomeUnicodeString() class __extend__(pairtype(SomeInteger, SomeString), pairtype(SomeInteger, SomeUnicodeString)): def mul((int1, str2)): # xxx do we want to support this - getbookkeeper().count("str_mul", str2, int1) return str2.basestringclass() class __extend__(pairtype(SomeUnicodeCodePoint, SomeUnicodeString), diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -25,112 +25,6 @@ from rpython.rtyper import extregistry -class Stats(object): - - def __init__(self, bookkeeper): - self.bookkeeper = bookkeeper - self.classify = {} - - def count(self, category, *args): - for_category = self.classify.setdefault(category, {}) - classifier = getattr(self, 'consider_%s' % category, self.consider_generic) - outcome = classifier(*args) - for_category[self.bookkeeper.position_key] = outcome - - def indexrepr(self, idx): - if idx.is_constant(): From noreply at buildbot.pypy.org Thu Jan 23 21:41:38 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 23 Jan 2014 21:41:38 +0100 (CET) Subject: [pypy-commit] pypy default: Argh, another fix of 3e844dad3e26 Message-ID: <20140123204138.F09F91C3360@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68876:c1b431309ce4 Date: 2014-01-23 21:40 +0100 http://bitbucket.org/pypy/pypy/changeset/c1b431309ce4/ Log: Argh, another fix of 3e844dad3e26 diff --git a/pypy/module/pypyjit/policy.py b/pypy/module/pypyjit/policy.py --- a/pypy/module/pypyjit/policy.py +++ b/pypy/module/pypyjit/policy.py @@ -98,7 +98,8 @@ modname == '__builtin__.functional' or modname == '__builtin__.descriptor' or modname == 'thread.os_local' or - modname == 'thread.os_thread'): + modname == 'thread.os_thread' or + modname.startswith('_rawffi.alt'): return True if '.' in modname: modname, rest = modname.split('.', 1) From noreply at buildbot.pypy.org Thu Jan 23 21:42:16 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 23 Jan 2014 21:42:16 +0100 (CET) Subject: [pypy-commit] pypy default: Fix Message-ID: <20140123204216.126E01C3360@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68877:b9d43659244a Date: 2014-01-23 21:41 +0100 http://bitbucket.org/pypy/pypy/changeset/b9d43659244a/ Log: Fix diff --git a/pypy/module/pypyjit/policy.py b/pypy/module/pypyjit/policy.py --- a/pypy/module/pypyjit/policy.py +++ b/pypy/module/pypyjit/policy.py @@ -99,7 +99,7 @@ modname == '__builtin__.descriptor' or modname == 'thread.os_local' or modname == 'thread.os_thread' or - modname.startswith('_rawffi.alt'): + modname.startswith('_rawffi.alt')): return True if '.' in modname: modname, rest = modname.split('.', 1) From noreply at buildbot.pypy.org Thu Jan 23 21:48:13 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 23 Jan 2014 21:48:13 +0100 (CET) Subject: [pypy-commit] pypy default: Fix for 32-bit Message-ID: <20140123204813.899781C3360@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68878:78369eb1c93b Date: 2014-01-23 21:47 +0100 http://bitbucket.org/pypy/pypy/changeset/78369eb1c93b/ Log: Fix for 32-bit diff --git a/pypy/module/pypyjit/test_pypy_c/test_weakref.py b/pypy/module/pypyjit/test_pypy_c/test_weakref.py --- a/pypy/module/pypyjit/test_pypy_c/test_weakref.py +++ b/pypy/module/pypyjit/test_pypy_c/test_weakref.py @@ -30,13 +30,13 @@ guard_not_invalidated(descr=...) p64 = getfield_gc(ConstPtr(ptr40), descr=) guard_value(p64, ConstPtr(ptr42), descr=...) - p65 = getfield_gc(p14, descr=) + p65 = getfield_gc(p14, descr=) guard_value(p65, ConstPtr(ptr45), descr=...) p66 = getfield_gc(p14, descr=) guard_nonnull_class(p66, ..., descr=...) p67 = force_token() setfield_gc(p0, p67, descr=) - p68 = call_may_force(ConstClass(WeakrefLifelineWithCallbacks.make_weakref_with_callback), p66, ConstPtr(ptr50), p14, ConstPtr(ptr51), descr=) + p68 = call_may_force(ConstClass(WeakrefLifelineWithCallbacks.make_weakref_with_callback), p66, ConstPtr(ptr50), p14, ConstPtr(ptr51), descr=) guard_not_forced(descr=...) guard_no_exception(descr=...) guard_nonnull_class(p68, ..., descr=...) From noreply at buildbot.pypy.org Thu Jan 23 21:50:48 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 23 Jan 2014 21:50:48 +0100 (CET) Subject: [pypy-commit] pypy default: Fix for test_wide_unicode_in_source on wide-unicode hosts Message-ID: <20140123205048.571421C3360@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68879:a8d569db033c Date: 2014-01-23 21:49 +0100 http://bitbucket.org/pypy/pypy/changeset/a8d569db033c/ Log: Fix for test_wide_unicode_in_source on wide-unicode hosts diff --git a/pypy/interpreter/pyparser/test/test_parsestring.py b/pypy/interpreter/pyparser/test/test_parsestring.py --- a/pypy/interpreter/pyparser/test/test_parsestring.py +++ b/pypy/interpreter/pyparser/test/test_parsestring.py @@ -2,9 +2,9 @@ import py, sys class TestParsetring: - def parse_and_compare(self, literal, value): + def parse_and_compare(self, literal, value, encoding=None): space = self.space - w_ret = parsestring.parsestr(space, None, literal) + w_ret = parsestring.parsestr(space, encoding, literal) if isinstance(value, str): assert space.type(w_ret) == space.w_str assert space.str_w(w_ret) == value From noreply at buildbot.pypy.org Thu Jan 23 22:01:14 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 23 Jan 2014 22:01:14 +0100 (CET) Subject: [pypy-commit] pypy detect-immutable-fields: Remove this... it works? Message-ID: <20140123210114.6A4861D241D@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: detect-immutable-fields Changeset: r68880:9664230f6d92 Date: 2014-01-23 15:00 -0600 http://bitbucket.org/pypy/pypy/changeset/9664230f6d92/ Log: Remove this... it works? diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py @@ -50,7 +50,6 @@ guard_true(i9, descr=...) guard_not_invalidated(descr=...) i10 = int_add(i5, 1) - guard_no_overflow(descr=...) --TICK-- jump(..., descr=...) """) From noreply at buildbot.pypy.org Thu Jan 23 22:15:49 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 23 Jan 2014 22:15:49 +0100 (CET) Subject: [pypy-commit] pypy detect-immutable-fields: merged default in Message-ID: <20140123211549.1584E1C00F8@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: detect-immutable-fields Changeset: r68881:215ae189f2ae Date: 2014-01-23 15:14 -0600 http://bitbucket.org/pypy/pypy/changeset/215ae189f2ae/ Log: merged default in diff --git a/pypy/interpreter/pyparser/test/test_parsestring.py b/pypy/interpreter/pyparser/test/test_parsestring.py --- a/pypy/interpreter/pyparser/test/test_parsestring.py +++ b/pypy/interpreter/pyparser/test/test_parsestring.py @@ -2,9 +2,9 @@ import py, sys class TestParsetring: - def parse_and_compare(self, literal, value): + def parse_and_compare(self, literal, value, encoding=None): space = self.space - w_ret = parsestring.parsestr(space, None, literal) + w_ret = parsestring.parsestr(space, encoding, literal) if isinstance(value, str): assert space.type(w_ret) == space.w_str assert space.str_w(w_ret) == value diff --git a/pypy/module/cpyext/test/test_ztranslation.py b/pypy/module/cpyext/test/test_ztranslation.py --- a/pypy/module/cpyext/test/test_ztranslation.py +++ b/pypy/module/cpyext/test/test_ztranslation.py @@ -1,4 +1,4 @@ from pypy.objspace.fake.checkmodule import checkmodule def test_cpyext_translates(): - checkmodule('cpyext') + checkmodule('cpyext', '_rawffi') diff --git a/pypy/module/pypyjit/policy.py b/pypy/module/pypyjit/policy.py --- a/pypy/module/pypyjit/policy.py +++ b/pypy/module/pypyjit/policy.py @@ -98,7 +98,8 @@ modname == '__builtin__.functional' or modname == '__builtin__.descriptor' or modname == 'thread.os_local' or - modname == 'thread.os_thread'): + modname == 'thread.os_thread' or + modname.startswith('_rawffi.alt')): return True if '.' in modname: modname, rest = modname.split('.', 1) diff --git a/pypy/module/pypyjit/test_pypy_c/test_weakref.py b/pypy/module/pypyjit/test_pypy_c/test_weakref.py --- a/pypy/module/pypyjit/test_pypy_c/test_weakref.py +++ b/pypy/module/pypyjit/test_pypy_c/test_weakref.py @@ -30,13 +30,13 @@ guard_not_invalidated(descr=...) p64 = getfield_gc(ConstPtr(ptr40), descr=) guard_value(p64, ConstPtr(ptr42), descr=...) - p65 = getfield_gc(p14, descr=) + p65 = getfield_gc(p14, descr=) guard_value(p65, ConstPtr(ptr45), descr=...) p66 = getfield_gc(p14, descr=) guard_nonnull_class(p66, ..., descr=...) p67 = force_token() setfield_gc(p0, p67, descr=) - p68 = call_may_force(ConstClass(WeakrefLifelineWithCallbacks.make_weakref_with_callback), p66, ConstPtr(ptr50), p14, ConstPtr(ptr51), descr=) + p68 = call_may_force(ConstClass(WeakrefLifelineWithCallbacks.make_weakref_with_callback), p66, ConstPtr(ptr50), p14, ConstPtr(ptr51), descr=) guard_not_forced(descr=...) guard_no_exception(descr=...) guard_nonnull_class(p68, ..., descr=...) diff --git a/rpython/rtyper/raisingops.py b/rpython/rtyper/raisingops.py --- a/rpython/rtyper/raisingops.py +++ b/rpython/rtyper/raisingops.py @@ -87,7 +87,7 @@ if ((r^(x)) >= 0 || (r^(y)) >= 0); \ else FAIL_OVF(err, "integer addition") ''' - r = x + y + r = intmask(r_uint(x) + r_uint(y)) if r^x >= 0 or r^y >= 0: return r else: @@ -99,7 +99,7 @@ if (r >= (x)); \ else FAIL_OVF("integer addition") ''' - r = x + y + r = intmask(r_uint(x) + r_uint(y)) if r >= x: return r else: @@ -111,7 +111,7 @@ if ((r^(x)) >= 0 || (r^~(y)) >= 0); \ else FAIL_OVF(err, "integer subtraction") ''' - r = x - y + r = intmask(r_uint(x) - r_uint(y)) if r^x >= 0 or r^~y >= 0: return r else: diff --git a/rpython/translator/c/test/test_backendoptimized.py b/rpython/translator/c/test/test_backendoptimized.py --- a/rpython/translator/c/test/test_backendoptimized.py +++ b/rpython/translator/c/test/test_backendoptimized.py @@ -1,6 +1,4 @@ -from rpython.conftest import option from rpython.rlib.rarithmetic import r_uint, r_longlong, r_ulonglong -from rpython.translator.backendopt.all import backend_optimizations from rpython.translator.c.test.test_typed import TestTypedTestCase as _TestTypedTestCase from rpython.translator.c.test.test_genc import compile @@ -77,12 +75,8 @@ assert res == 42 class TestTypedOptimizedSwitchTestCase: - - class CodeGenerator(_TestTypedTestCase): - def process(self, t): - _TestTypedTestCase.process(self, t) - self.t = t - backend_optimizations(t, merge_if_blocks=True) + def getcompiled(self, func, argtypes): + return compile(func, argtypes, merge_if_blocks=True) def test_int_switch(self): def f(x): @@ -93,8 +87,7 @@ elif x == 27: return 3 return 0 - codegenerator = self.CodeGenerator() - fn = codegenerator.getcompiled(f, [int]) + fn = self.getcompiled(f, [int]) for x in (0,1,2,3,9,27,48, -9): assert fn(x) == f(x) @@ -107,8 +100,7 @@ elif x == 3: return 3 return 0 - codegenerator = self.CodeGenerator() - fn = codegenerator.getcompiled(f, [int]) + fn = self.getcompiled(f, [int]) for x in (0,1,2,3,9,27,48, -9): assert fn(x) == f(x) @@ -121,8 +113,7 @@ elif x == 3: return 3 return 0 - codegenerator = self.CodeGenerator() - fn = codegenerator.getcompiled(f, [int]) + fn = self.getcompiled(f, [int]) for x in (0,1,2,3,9,27,48, -9): assert fn(x) == f(x) @@ -135,8 +126,7 @@ elif x == r_uint(27): return 3 return 0 - codegenerator = self.CodeGenerator() - fn = codegenerator.getcompiled(f, [r_uint]) + fn = self.getcompiled(f, [r_uint]) for x in (0,1,2,3,9,27,48): assert fn(r_uint(x)) == f(r_uint(x)) @@ -149,8 +139,7 @@ elif x == r_longlong(27): return 3 return 0 - codegenerator = self.CodeGenerator() - fn = codegenerator.getcompiled(f, [r_longlong]) + fn = self.getcompiled(f, [r_longlong]) for x in (0,1,2,3,9,27,48, -9): assert fn(r_longlong(x)) == f(r_longlong(x)) @@ -163,8 +152,7 @@ elif x == r_ulonglong(27): return 3 return 0 - codegenerator = self.CodeGenerator() - fn = codegenerator.getcompiled(f, [r_ulonglong]) + fn = self.getcompiled(f, [r_ulonglong]) for x in (0,1,2,3,9,27,48, r_ulonglong(-9)): assert fn(r_ulonglong(x)) == f(r_ulonglong(x)) @@ -178,8 +166,7 @@ elif x == 'c': return 'd' return '@' - codegenerator = self.CodeGenerator() - fn = codegenerator.getcompiled(f, [int]) + fn = self.getcompiled(f, [int]) for x in 'ABCabc@': y = ord(x) assert fn(y) == f(y) @@ -194,8 +181,7 @@ if case == '\xFB': return 5 if case == '\xFA': return 6 return 7 - codegenerator = self.CodeGenerator() - fn = codegenerator.getcompiled(f, [int]) + fn = self.getcompiled(f, [int]) for input, expected in [(255, 1), (253, 3), (251, 5), (161, 7)]: res = fn(input) assert res == expected @@ -210,20 +196,15 @@ elif x == u'c': return 'd' return '@' - codegenerator = self.CodeGenerator() - fn = codegenerator.getcompiled(f, [int]) + fn = self.getcompiled(f, [int]) for x in u'ABCabc@': y = ord(x) assert fn(y) == f(y) class TestTypedOptimizedRaisingOps: - - class CodeGenerator(_TestTypedTestCase): - def process(self, t): - _TestTypedTestCase.process(self, t) - self.t = t - backend_optimizations(t, raisingop2direct_call=True) + def getcompiled(self, func, argtypes): + return compile(func, argtypes, raisingop2direct_call=True) def test_int_floordiv_zer(self): def f(x): @@ -232,7 +213,25 @@ except: y = 456 return y - codegenerator = self.CodeGenerator() - fn = codegenerator.getcompiled(f, [int]) + fn = self.getcompiled(f, [int]) for x in (0,1,2,3,9,27,48, -9): assert fn(x) == f(x) + + def test_ovf_op_in_loop(self): + # This checks whether the raising operations are implemented using + # unsigned arithmetic. The problem with using signed arithmetic is that + # signed overflow is undefined in C and the optimizer is allowed to + # remove the overflow check. + from sys import maxint + from rpython.rlib.rarithmetic import ovfcheck + def f(x, y): + ret = 0 + for i in range(y): + try: + ret = ovfcheck(x + i) + except OverflowError: + break + return ret + fc = self.getcompiled(f, [int, int]) + assert fc(10, 10) == 19 + assert fc(maxint, 10) == maxint diff --git a/rpython/translator/c/test/test_genc.py b/rpython/translator/c/test/test_genc.py --- a/rpython/translator/c/test/test_genc.py +++ b/rpython/translator/c/test/test_genc.py @@ -50,7 +50,7 @@ unsigned_ffffffff) def compile(fn, argtypes, view=False, gcpolicy="none", backendopt=True, - annotatorpolicy=None, thread=False): + annotatorpolicy=None, thread=False, **kwds): argtypes_unroll = unrolling_iterable(enumerate(argtypes)) for argtype in argtypes: @@ -98,7 +98,7 @@ return 0 t = Translation(entry_point, None, gc=gcpolicy, backend="c", - policy=annotatorpolicy, thread=thread) + policy=annotatorpolicy, thread=thread, **kwds) if not backendopt: t.disable(["backendopt_lltype"]) t.driver.config.translation.countmallocs = True From noreply at buildbot.pypy.org Thu Jan 23 22:16:36 2014 From: noreply at buildbot.pypy.org (RonnyPfannschmidt) Date: Thu, 23 Jan 2014 22:16:36 +0100 (CET) Subject: [pypy-commit] pypy link-old-glibc-abi: close link old abi branch - the approach is discarded Message-ID: <20140123211636.5806B1C00F8@cobra.cs.uni-duesseldorf.de> Author: Ronny Pfannschmidt Branch: link-old-glibc-abi Changeset: r68882:505a7c83ec97 Date: 2014-01-23 22:09 +0100 http://bitbucket.org/pypy/pypy/changeset/505a7c83ec97/ Log: close link old abi branch - the approach is discarded From noreply at buildbot.pypy.org Thu Jan 23 22:28:54 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 23 Jan 2014 22:28:54 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Add Remi Message-ID: <20140123212854.AE5B61D2425@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5140:51c84b949539 Date: 2014-01-23 22:28 +0100 http://bitbucket.org/pypy/extradoc/changeset/51c84b949539/ Log: Add Remi diff --git a/talk/fosdem2014/pypy-stm.pdf b/talk/fosdem2014/pypy-stm.pdf index 7eeb751506bd76c8928b1bcdf200a385875f286c..68969cdc285bd7404a257fee458f30e30793003e GIT binary patch [cut] diff --git a/talk/fosdem2014/pypy-stm.rst b/talk/fosdem2014/pypy-stm.rst --- a/talk/fosdem2014/pypy-stm.rst +++ b/talk/fosdem2014/pypy-stm.rst @@ -19,9 +19,9 @@ Introduction ============ -* Armin Rigo +* Armin Rigo, PyPy dev, CPython dev -* PyPy dev, CPython dev +* Co-author: Remi Meier, ETHZ * This talk applies to Python or any similar language From noreply at buildbot.pypy.org Thu Jan 23 22:30:44 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Thu, 23 Jan 2014 22:30:44 +0100 (CET) Subject: [pypy-commit] pypy llvm-translation-backend: hg merge default Message-ID: <20140123213044.4C1D21D2425@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r68883:9aa591418092 Date: 2014-01-23 22:05 +0100 http://bitbucket.org/pypy/pypy/changeset/9aa591418092/ Log: hg merge default diff too long, truncating to 2000 out of 4628 lines diff --git a/pypy/doc/garbage_collection.rst b/pypy/doc/garbage_collection.rst --- a/pypy/doc/garbage_collection.rst +++ b/pypy/doc/garbage_collection.rst @@ -210,4 +210,12 @@ are preserved. If the object dies then the pre-reserved location becomes free garbage, to be collected at the next major collection. +The exact name of this GC is either `minimark` or `incminimark`. The +latter is a version that does major collections incrementally (i.e. one +major collection is split along some number of minor collections, rather +than being done all at once after a specific minor collection). The +default is `incminimark`, as it seems to have a very minimal impact on +performance and memory usage at the benefit of avoiding the long pauses +of `minimark`. + .. include:: _ref.txt diff --git a/pypy/doc/gc_info.rst b/pypy/doc/gc_info.rst --- a/pypy/doc/gc_info.rst +++ b/pypy/doc/gc_info.rst @@ -6,7 +6,7 @@ Minimark -------- -PyPy's default ``minimark`` garbage collector is configurable through +PyPy's default ``incminimark`` garbage collector is configurable through several environment variables: ``PYPY_GC_NURSERY`` @@ -14,6 +14,17 @@ Defaults to 1/2 of your cache or ``4M``. Small values (like 1 or 1KB) are useful for debugging. +``PYPY_GC_NURSERY_CLEANUP`` + The interval at which nursery is cleaned up. Must + be smaller than the nursery size and bigger than the + biggest object we can allotate in the nursery. + +``PYPY_GC_INCREMENT_STEP`` + The size of memory marked during the marking step. Default is size of + nursery times 2. If you mark it too high your GC is not incremental at + all. The minimum is set to size that survives minor collection times + 1.5 so we reclaim anything all the time. + ``PYPY_GC_MAJOR_COLLECT`` Major collection memory factor. Default is ``1.82``, which means trigger a major collection when the diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -48,3 +48,7 @@ .. branch: remove-del-from-generatoriterator Speed up generators that don't yield inside try or wait blocks by skipping unnecessary cleanup. + +.. branch: annotator +Remove FlowObjSpace. +Improve cohesion between rpython.flowspace and rpython.annotator. diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -1,4 +1,4 @@ -import py +import py, sys from pypy.interpreter.astcompiler import codegen, astbuilder, symtable, optimize from pypy.interpreter.pyparser import pyparse from pypy.interpreter.pyparser.test import expressions @@ -867,6 +867,9 @@ class AppTestCompiler: + def setup_class(cls): + cls.w_maxunicode = cls.space.wrap(sys.maxunicode) + def test_docstring_not_loaded(self): import StringIO, dis, sys ns = {} @@ -911,7 +914,17 @@ l = [a for a in Foo()] assert hint_called[0] assert l == list(range(5)) - + + def test_unicode_in_source(self): + import sys + d = {} + exec '# -*- coding: utf-8 -*-\n\nu = u"\xf0\x9f\x92\x8b"' in d + if sys.maxunicode > 65535 and self.maxunicode > 65535: + expected_length = 1 + else: + expected_length = 2 + assert len(d['u']) == expected_length + class TestOptimizations: def count_instructions(self, source): diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -910,7 +910,7 @@ """ return self.unpackiterable(w_iterable, expected_length) - def listview_str(self, w_list): + def listview_bytes(self, w_list): """ Return a list of unwrapped strings out of a list of strings. If the argument is not a list or does not contain only strings, return None. May return None anyway. @@ -944,7 +944,7 @@ """ return (None, None) - def newlist_str(self, list_s): + def newlist_bytes(self, list_s): return self.newlist([self.wrap(s) for s in list_s]) def newlist_unicode(self, list_u): diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -167,7 +167,7 @@ def run(self): """Start this frame's execution.""" if self.getcode().co_flags & pycode.CO_GENERATOR: - if 1:# self.getcode().co_flags & pycode.CO_YIELD_INSIDE_TRY: + if self.getcode().co_flags & pycode.CO_YIELD_INSIDE_TRY: from pypy.interpreter.generator import GeneratorIteratorWithDel return self.space.wrap(GeneratorIteratorWithDel(self)) else: diff --git a/pypy/interpreter/pyparser/parsestring.py b/pypy/interpreter/pyparser/parsestring.py --- a/pypy/interpreter/pyparser/parsestring.py +++ b/pypy/interpreter/pyparser/parsestring.py @@ -15,7 +15,6 @@ Yes, it's very inefficient. Yes, CPython has very similar code. """ - # we use ps as "pointer to s" # q is the virtual last char index of the string ps = 0 @@ -54,42 +53,10 @@ if unicode_literal: # XXX Py_UnicodeFlag is ignored for now if encoding is None or encoding == "iso-8859-1": # 'unicode_escape' expects latin-1 bytes, string is ready. - buf = s - bufp = ps - bufq = q - u = None + assert 0 <= ps <= q + substr = s[ps:q] else: - # String is utf8-encoded, but 'unicode_escape' expects - # latin-1; So multibyte sequences must be escaped. - lis = [] # using a list to assemble the value - end = q - # Worst case: "\XX" may become "\u005c\uHHLL" (12 bytes) - while ps < end: - if s[ps] == '\\': - lis.append(s[ps]) - ps += 1 - if ord(s[ps]) & 0x80: - # A multibyte sequence will follow, it will be - # escaped like \u1234. To avoid confusion with - # the backslash we just wrote, we emit "\u005c" - # instead. - lis.append("u005c") - if ord(s[ps]) & 0x80: # XXX inefficient - w, ps = decode_utf8(space, s, ps, end, "utf-16-be") - rn = len(w) - assert rn % 2 == 0 - for i in range(0, rn, 2): - lis.append('\\u') - lis.append(hexbyte(ord(w[i]))) - lis.append(hexbyte(ord(w[i+1]))) - else: - lis.append(s[ps]) - ps += 1 - buf = ''.join(lis) - bufp = 0 - bufq = len(buf) - assert 0 <= bufp <= bufq - substr = buf[bufp:bufq] + substr = decode_unicode_utf8(space, s, ps, q) if rawmode: v = unicodehelper.decode_raw_unicode_escape(space, substr) else: @@ -121,6 +88,39 @@ result = "0" + result return result +def decode_unicode_utf8(space, s, ps, q): + # ****The Python 2.7 version, producing UTF-32 escapes**** + # String is utf8-encoded, but 'unicode_escape' expects + # latin-1; So multibyte sequences must be escaped. + lis = [] # using a list to assemble the value + end = q + # Worst case: + # "<92><195><164>" may become "\u005c\U000000E4" (16 bytes) + while ps < end: + if s[ps] == '\\': + lis.append(s[ps]) + ps += 1 + if ord(s[ps]) & 0x80: + # A multibyte sequence will follow, it will be + # escaped like \u1234. To avoid confusion with + # the backslash we just wrote, we emit "\u005c" + # instead. + lis.append("u005c") + if ord(s[ps]) & 0x80: # XXX inefficient + w, ps = decode_utf8(space, s, ps, end, "utf-32-be") + rn = len(w) + assert rn % 4 == 0 + for i in range(0, rn, 4): + lis.append('\\U') + lis.append(hexbyte(ord(w[i]))) + lis.append(hexbyte(ord(w[i+1]))) + lis.append(hexbyte(ord(w[i+2]))) + lis.append(hexbyte(ord(w[i+3]))) + else: + lis.append(s[ps]) + ps += 1 + return ''.join(lis) + def PyString_DecodeEscape(space, s, recode_encoding): """ Unescape a backslash-escaped string. If recode_encoding is non-zero, diff --git a/pypy/interpreter/pyparser/test/test_parsestring.py b/pypy/interpreter/pyparser/test/test_parsestring.py --- a/pypy/interpreter/pyparser/test/test_parsestring.py +++ b/pypy/interpreter/pyparser/test/test_parsestring.py @@ -1,10 +1,10 @@ from pypy.interpreter.pyparser import parsestring -import py +import py, sys class TestParsetring: - def parse_and_compare(self, literal, value): + def parse_and_compare(self, literal, value, encoding=None): space = self.space - w_ret = parsestring.parsestr(space, None, literal) + w_ret = parsestring.parsestr(space, encoding, literal) if isinstance(value, str): assert space.type(w_ret) == space.w_str assert space.str_w(w_ret) == value @@ -91,3 +91,18 @@ input = ["'", 'x', ' ', chr(0xc3), chr(0xa9), ' ', chr(92), 'n', "'"] w_ret = parsestring.parsestr(space, 'utf8', ''.join(input)) assert space.str_w(w_ret) == ''.join(expected) + + def test_wide_unicode_in_source(self): + if sys.maxunicode == 65535: + py.test.skip("requires a wide-unicode host") + self.parse_and_compare('u"\xf0\x9f\x92\x8b"', + unichr(0x1f48b), + encoding='utf-8') + + def test_decode_unicode_utf8(self): + buf = parsestring.decode_unicode_utf8(self.space, + 'u"\xf0\x9f\x92\x8b"', 2, 6) + if sys.maxunicode == 65535: + assert buf == r"\U0000d83d\U0000dc8b" + else: + assert buf == r"\U0001f48b" diff --git a/pypy/module/__builtin__/test/test_classobj.py b/pypy/module/__builtin__/test/test_classobj.py --- a/pypy/module/__builtin__/test/test_classobj.py +++ b/pypy/module/__builtin__/test/test_classobj.py @@ -1061,14 +1061,14 @@ assert (D() >= A()) == 'D:A.ge' -class AppTestOldStyleClassStrDict(object): +class AppTestOldStyleClassBytesDict(object): def setup_class(cls): if cls.runappdirect: py.test.skip("can only be run on py.py") def is_strdict(space, w_class): - from pypy.objspace.std.dictmultiobject import StringDictStrategy + from pypy.objspace.std.dictmultiobject import BytesDictStrategy w_d = w_class.getdict(space) - return space.wrap(isinstance(w_d.strategy, StringDictStrategy)) + return space.wrap(isinstance(w_d.strategy, BytesDictStrategy)) cls.w_is_strdict = cls.space.wrap(gateway.interp2app(is_strdict)) diff --git a/pypy/module/__pypy__/test/test_special.py b/pypy/module/__pypy__/test/test_special.py --- a/pypy/module/__pypy__/test/test_special.py +++ b/pypy/module/__pypy__/test/test_special.py @@ -51,7 +51,9 @@ l = [1, 2, 3] assert list_strategy(l) == "int" l = ["a", "b", "c"] - assert list_strategy(l) == "str" + assert list_strategy(l) == "bytes" + l = [u"a", u"b", u"c"] + assert list_strategy(l) == "unicode" l = [1.1, 2.2, 3.3] assert list_strategy(l) == "float" l = range(3) diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -692,11 +692,11 @@ else: prefix = 'cpyexttest' init_buffer = rffi.llexternal('_%s_init_bufferobject' % prefix, [], lltype.Void, - compilation_info=eci, _nowrapper=True) + compilation_info=eci, releasegil=False) init_pycobject = rffi.llexternal('_%s_init_pycobject' % prefix, [], lltype.Void, - compilation_info=eci, _nowrapper=True) + compilation_info=eci, releasegil=False) init_capsule = rffi.llexternal('_%s_init_capsule' % prefix, [], lltype.Void, - compilation_info=eci, _nowrapper=True) + compilation_info=eci, releasegil=False) INIT_FUNCTIONS.extend([ lambda space: init_buffer(), lambda space: init_pycobject(), diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -213,12 +213,14 @@ Build an extension module linked against the cpyext api library. """ if not space.is_none(w_separate_module_files): - separate_module_files = space.listview_str(w_separate_module_files) + separate_module_files = space.listview_bytes( + w_separate_module_files) assert separate_module_files is not None else: separate_module_files = [] if not space.is_none(w_separate_module_sources): - separate_module_sources = space.listview_str(w_separate_module_sources) + separate_module_sources = space.listview_bytes( + w_separate_module_sources) assert separate_module_sources is not None else: separate_module_sources = [] diff --git a/pypy/module/cpyext/test/test_ztranslation.py b/pypy/module/cpyext/test/test_ztranslation.py --- a/pypy/module/cpyext/test/test_ztranslation.py +++ b/pypy/module/cpyext/test/test_ztranslation.py @@ -1,4 +1,4 @@ from pypy.objspace.fake.checkmodule import checkmodule def test_cpyext_translates(): - checkmodule('cpyext') + checkmodule('cpyext', '_rawffi') diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -1083,58 +1083,64 @@ class W_Product(W_Root): def __init__(self, space, args_w, w_repeat): self.gears = [ - space.fixedview(arg_w) for arg_w in args_w + space.unpackiterable(arg_w) for arg_w in args_w ] * space.int_w(w_repeat) - self.num_gears = len(self.gears) - # initialization of indicies to loop over - self.indicies = [ - (0, len(gear)) - for gear in self.gears - ] - self.cont = True - for _, lim in self.indicies: - if lim <= 0: - self.cont = False + # + for gear in self.gears: + if len(gear) == 0: + self.lst = None break + else: + self.indices = [0] * len(self.gears) + self.lst = [gear[0] for gear in self.gears] - def roll_gears(self): - if self.num_gears == 0: - self.cont = False - return + def _rotate_previous_gears(self): + lst = self.lst + x = len(self.gears) - 1 + lst[x] = self.gears[x][0] + self.indices[x] = 0 + x -= 1 + # the outer loop runs as long as a we have a carry + while x >= 0: + gear = self.gears[x] + index = self.indices[x] + 1 + if index < len(gear): + # no carry: done + lst[x] = gear[index] + self.indices[x] = index + return + lst[x] = gear[0] + self.indices[x] = 0 + x -= 1 + else: + self.lst = None - # Starting from the end of the gear indicies work to the front - # incrementing the gear until the limit is reached. When the limit - # is reached carry operation to the next gear - should_carry = True - - for n in range(0, self.num_gears): - nth_gear = self.num_gears - n - 1 - if should_carry: - count, lim = self.indicies[nth_gear] - count += 1 - if count == lim and nth_gear == 0: - self.cont = False - if count == lim: - should_carry = True - count = 0 - else: - should_carry = False - self.indicies[nth_gear] = (count, lim) + def fill_next_result(self): + # the last gear is done here, in a function with no loop, + # to allow the JIT to look inside + lst = self.lst + x = len(self.gears) - 1 + if x >= 0: + gear = self.gears[x] + index = self.indices[x] + 1 + if index < len(gear): + # no carry: done + lst[x] = gear[index] + self.indices[x] = index else: - break + self._rotate_previous_gears() + else: + self.lst = None def iter_w(self, space): return space.wrap(self) def next_w(self, space): - if not self.cont: + if self.lst is None: raise OperationError(space.w_StopIteration, space.w_None) - l = [None] * self.num_gears - for x in range(0, self.num_gears): - index, limit = self.indicies[x] - l[x] = self.gears[x][index] - self.roll_gears() - return space.newtuple(l) + w_result = space.newtuple(self.lst[:]) + self.fill_next_result() + return w_result def W_Product__new__(space, w_subtype, __args__): diff --git a/pypy/module/itertools/test/test_itertools.py b/pypy/module/itertools/test/test_itertools.py --- a/pypy/module/itertools/test/test_itertools.py +++ b/pypy/module/itertools/test/test_itertools.py @@ -862,6 +862,28 @@ assert prod.next() == () raises (StopIteration, prod.next) + def test_product_powers_of_two(self): + from itertools import product + assert list(product()) == [()] + assert list(product('ab')) == [('a',), ('b',)] + assert list(product('ab', 'cd')) == [ + ('a', 'c'), ('a', 'd'), + ('b', 'c'), ('b', 'd')] + assert list(product('ab', 'cd', 'ef')) == [ + ('a', 'c', 'e'), ('a', 'c', 'f'), + ('a', 'd', 'e'), ('a', 'd', 'f'), + ('b', 'c', 'e'), ('b', 'c', 'f'), + ('b', 'd', 'e'), ('b', 'd', 'f')] + + def test_product_empty_item(self): + from itertools import product + assert list(product('')) == [] + assert list(product('ab', '')) == [] + assert list(product('', 'cd')) == [] + assert list(product('ab', 'cd', '')) == [] + assert list(product('ab', '', 'ef')) == [] + assert list(product('', 'cd', 'ef')) == [] + def test_permutations(self): from itertools import permutations assert list(permutations('AB')) == [('A', 'B'), ('B', 'A')] diff --git a/pypy/module/pypyjit/policy.py b/pypy/module/pypyjit/policy.py --- a/pypy/module/pypyjit/policy.py +++ b/pypy/module/pypyjit/policy.py @@ -98,7 +98,8 @@ modname == '__builtin__.functional' or modname == '__builtin__.descriptor' or modname == 'thread.os_local' or - modname == 'thread.os_thread'): + modname == 'thread.os_thread' or + modname.startswith('_rawffi.alt')): return True if '.' in modname: modname, rest = modname.split('.', 1) diff --git a/pypy/module/pypyjit/test_pypy_c/test_weakref.py b/pypy/module/pypyjit/test_pypy_c/test_weakref.py --- a/pypy/module/pypyjit/test_pypy_c/test_weakref.py +++ b/pypy/module/pypyjit/test_pypy_c/test_weakref.py @@ -30,13 +30,13 @@ guard_not_invalidated(descr=...) p64 = getfield_gc(ConstPtr(ptr40), descr=) guard_value(p64, ConstPtr(ptr42), descr=...) - p65 = getfield_gc(p14, descr=) + p65 = getfield_gc(p14, descr=) guard_value(p65, ConstPtr(ptr45), descr=...) p66 = getfield_gc(p14, descr=) guard_nonnull_class(p66, ..., descr=...) p67 = force_token() setfield_gc(p0, p67, descr=) - p68 = call_may_force(ConstClass(WeakrefLifelineWithCallbacks.make_weakref_with_callback), p66, ConstPtr(ptr50), p14, ConstPtr(ptr51), descr=) + p68 = call_may_force(ConstClass(WeakrefLifelineWithCallbacks.make_weakref_with_callback), p66, ConstPtr(ptr50), p14, ConstPtr(ptr51), descr=) guard_not_forced(descr=...) guard_no_exception(descr=...) guard_nonnull_class(p68, ..., descr=...) diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -441,8 +441,8 @@ def str_w(self, space): return self._value - def listview_str(self): - return _create_list_from_string(self._value) + def listview_bytes(self): + return _create_list_from_bytes(self._value) def ord(self, space): if len(self._value) != 1: @@ -518,7 +518,7 @@ _title = _upper def _newlist_unwrapped(self, space, lst): - return space.newlist_str(lst) + return space.newlist_bytes(lst) @staticmethod @unwrap_spec(w_object = WrappedDefault("")) @@ -725,9 +725,9 @@ return tformat.formatter_field_name_split() -def _create_list_from_string(value): +def _create_list_from_bytes(value): # need this helper function to allow the jit to look inside and inline - # listview_str + # listview_bytes return [s for s in value] W_BytesObject.EMPTY = W_BytesObject('') diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py --- a/pypy/objspace/std/celldict.py +++ b/pypy/objspace/std/celldict.py @@ -127,7 +127,7 @@ def w_keys(self, w_dict): space = self.space l = self.unerase(w_dict.dstorage).keys() - return space.newlist_str(l) + return space.newlist_bytes(l) def values(self, w_dict): iterator = self.unerase(w_dict.dstorage).itervalues diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -58,7 +58,7 @@ strategy = space.fromcache(MapDictStrategy) elif instance or strdict or module: assert w_type is None - strategy = space.fromcache(StringDictStrategy) + strategy = space.fromcache(BytesDictStrategy) elif kwargs: assert w_type is None from pypy.objspace.std.kwargsdict import EmptyKwargsDictStrategy @@ -117,9 +117,9 @@ if space.is_w(w_type, space.w_dict): w_dict = W_DictMultiObject.allocate_and_init_instance(space, w_type) - strlist = space.listview_str(w_keys) - if strlist is not None: - for key in strlist: + byteslist = space.listview_bytes(w_keys) + if byteslist is not None: + for key in byteslist: w_dict.setitem_str(key, w_fill) else: for w_key in space.listview(w_keys): @@ -333,7 +333,7 @@ popitem delitem clear \ length w_keys values items \ iterkeys itervalues iteritems \ - listview_str listview_unicode listview_int \ + listview_bytes listview_unicode listview_int \ view_as_kwargs".split() def make_method(method): @@ -482,7 +482,7 @@ w_dict.strategy = strategy w_dict.dstorage = storage - def listview_str(self, w_dict): + def listview_bytes(self, w_dict): return None def listview_unicode(self, w_dict): @@ -506,7 +506,7 @@ def switch_to_correct_strategy(self, w_dict, w_key): withidentitydict = self.space.config.objspace.std.withidentitydict if type(w_key) is self.space.StringObjectCls: - self.switch_to_string_strategy(w_dict) + self.switch_to_bytes_strategy(w_dict) return elif type(w_key) is self.space.UnicodeObjectCls: self.switch_to_unicode_strategy(w_dict) @@ -519,8 +519,8 @@ else: self.switch_to_object_strategy(w_dict) - def switch_to_string_strategy(self, w_dict): - strategy = self.space.fromcache(StringDictStrategy) + def switch_to_bytes_strategy(self, w_dict): + strategy = self.space.fromcache(BytesDictStrategy) storage = strategy.get_empty_storage() w_dict.strategy = strategy w_dict.dstorage = storage @@ -572,7 +572,7 @@ w_dict.setitem(w_key, w_value) def setitem_str(self, w_dict, key, w_value): - self.switch_to_string_strategy(w_dict) + self.switch_to_bytes_strategy(w_dict) w_dict.setitem_str(key, w_value) def delitem(self, w_dict, w_key): @@ -874,8 +874,8 @@ create_iterator_classes(ObjectDictStrategy) -class StringDictStrategy(AbstractTypedStrategy, DictStrategy): - erase, unerase = rerased.new_erasing_pair("string") +class BytesDictStrategy(AbstractTypedStrategy, DictStrategy): + erase, unerase = rerased.new_erasing_pair("bytes") erase = staticmethod(erase) unerase = staticmethod(unerase) @@ -913,11 +913,11 @@ assert key is not None return self.unerase(w_dict.dstorage).get(key, None) - def listview_str(self, w_dict): + def listview_bytes(self, w_dict): return self.unerase(w_dict.dstorage).keys() def w_keys(self, w_dict): - return self.space.newlist_str(self.listview_str(w_dict)) + return self.space.newlist_bytes(self.listview_bytes(w_dict)) def wrapkey(space, key): return space.wrap(key) @@ -935,7 +935,7 @@ i += 1 return keys, values -create_iterator_classes(StringDictStrategy) +create_iterator_classes(BytesDictStrategy) class UnicodeDictStrategy(AbstractTypedStrategy, DictStrategy): @@ -961,7 +961,7 @@ def _never_equal_to(self, w_lookup_type): return _never_equal_to_string(self.space, w_lookup_type) - # we should implement the same shortcuts as we do for StringDictStrategy + # we should implement the same shortcuts as we do for BytesDictStrategy ## def setitem_str(self, w_dict, key, w_value): ## assert key is not None @@ -983,7 +983,7 @@ return self.unerase(w_dict.dstorage).keys() ## def w_keys(self, w_dict): - ## return self.space.newlist_str(self.listview_str(w_dict)) + ## return self.space.newlist_bytes(self.listview_bytes(w_dict)) def wrapkey(space, key): return space.wrap(key) diff --git a/pypy/objspace/std/dictproxyobject.py b/pypy/objspace/std/dictproxyobject.py --- a/pypy/objspace/std/dictproxyobject.py +++ b/pypy/objspace/std/dictproxyobject.py @@ -84,7 +84,7 @@ def w_keys(self, w_dict): space = self.space - return space.newlist_str(self.unerase(w_dict.dstorage).dict_w.keys()) + return space.newlist_bytes(self.unerase(w_dict.dstorage).dict_w.keys()) def values(self, w_dict): return [unwrap_cell(self.space, w_value) for w_value in self.unerase(w_dict.dstorage).dict_w.itervalues()] diff --git a/pypy/objspace/std/kwargsdict.py b/pypy/objspace/std/kwargsdict.py --- a/pypy/objspace/std/kwargsdict.py +++ b/pypy/objspace/std/kwargsdict.py @@ -2,15 +2,13 @@ ## dict strategy (see dictmultiobject.py) from rpython.rlib import rerased, jit -from pypy.objspace.std.dictmultiobject import (DictStrategy, - create_iterator_classes, - EmptyDictStrategy, - ObjectDictStrategy, - StringDictStrategy) +from pypy.objspace.std.dictmultiobject import ( + BytesDictStrategy, DictStrategy, EmptyDictStrategy, ObjectDictStrategy, + create_iterator_classes) class EmptyKwargsDictStrategy(EmptyDictStrategy): - def switch_to_string_strategy(self, w_dict): + def switch_to_bytes_strategy(self, w_dict): strategy = self.space.fromcache(KwargsDictStrategy) storage = strategy.get_empty_storage() w_dict.strategy = strategy @@ -61,7 +59,7 @@ else: # limit the size so that the linear searches don't become too long if len(keys) >= 16: - self.switch_to_string_strategy(w_dict) + self.switch_to_bytes_strategy(w_dict) w_dict.setitem_str(key, w_value) else: keys.append(key) @@ -111,7 +109,7 @@ def w_keys(self, w_dict): l = self.unerase(w_dict.dstorage)[0] - return self.space.newlist_str(l[:]) + return self.space.newlist_bytes(l[:]) def values(self, w_dict): return self.unerase(w_dict.dstorage)[1][:] # to make non-resizable @@ -142,8 +140,8 @@ w_dict.strategy = strategy w_dict.dstorage = strategy.erase(d_new) - def switch_to_string_strategy(self, w_dict): - strategy = self.space.fromcache(StringDictStrategy) + def switch_to_bytes_strategy(self, w_dict): + strategy = self.space.fromcache(BytesDictStrategy) keys, values_w = self.unerase(w_dict.dstorage) storage = strategy.get_empty_storage() d_new = strategy.unerase(storage) diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -81,7 +81,7 @@ if not type(w_obj) is W_BytesObject: break else: - return space.fromcache(StringListStrategy) + return space.fromcache(BytesListStrategy) # check for unicode for w_obj in list_w: @@ -162,8 +162,8 @@ return self @staticmethod - def newlist_str(space, list_s): - strategy = space.fromcache(StringListStrategy) + def newlist_bytes(space, list_s): + strategy = space.fromcache(BytesListStrategy) storage = strategy.erase(list_s) return W_ListObject.from_storage_and_strategy(space, storage, strategy) @@ -278,10 +278,10 @@ ObjectListStrategy.""" return self.strategy.getitems_copy(self) - def getitems_str(self): + def getitems_bytes(self): """Return the items in the list as unwrapped strings. If the list does not use the list strategy, return None.""" - return self.strategy.getitems_str(self) + return self.strategy.getitems_bytes(self) def getitems_unicode(self): """Return the items in the list as unwrapped unicodes. If the list does @@ -753,7 +753,7 @@ def getitems_copy(self, w_list): raise NotImplementedError - def getitems_str(self, w_list): + def getitems_bytes(self, w_list): return None def getitems_unicode(self, w_list): @@ -897,7 +897,7 @@ if type(w_item) is W_IntObject: strategy = self.space.fromcache(IntegerListStrategy) elif type(w_item) is W_BytesObject: - strategy = self.space.fromcache(StringListStrategy) + strategy = self.space.fromcache(BytesListStrategy) elif type(w_item) is W_UnicodeObject: strategy = self.space.fromcache(UnicodeListStrategy) elif type(w_item) is W_FloatObject: @@ -962,11 +962,11 @@ w_list.lstorage = strategy.erase(floatlist) return - strlist = space.listview_str(w_iterable) - if strlist is not None: - w_list.strategy = strategy = space.fromcache(StringListStrategy) + byteslist = space.listview_bytes(w_iterable) + if byteslist is not None: + w_list.strategy = strategy = space.fromcache(BytesListStrategy) # need to copy because intlist can share with w_iterable - w_list.lstorage = strategy.erase(strlist[:]) + w_list.lstorage = strategy.erase(byteslist[:]) return unilist = space.listview_unicode(w_iterable) @@ -1592,11 +1592,11 @@ return self.unerase(w_list.lstorage) -class StringListStrategy(ListStrategy): +class BytesListStrategy(ListStrategy): import_from_mixin(AbstractUnwrappedStrategy) _none_value = None - _applevel_repr = "str" + _applevel_repr = "bytes" def wrap(self, stringval): return self.space.wrap(stringval) @@ -1604,7 +1604,7 @@ def unwrap(self, w_string): return self.space.str_w(w_string) - erase, unerase = rerased.new_erasing_pair("string") + erase, unerase = rerased.new_erasing_pair("bytes") erase = staticmethod(erase) unerase = staticmethod(unerase) @@ -1612,7 +1612,7 @@ return type(w_obj) is W_BytesObject def list_is_correct_type(self, w_list): - return w_list.strategy is self.space.fromcache(StringListStrategy) + return w_list.strategy is self.space.fromcache(BytesListStrategy) def sort(self, w_list, reverse): l = self.unerase(w_list.lstorage) @@ -1621,7 +1621,7 @@ if reverse: l.reverse() - def getitems_str(self, w_list): + def getitems_bytes(self, w_list): return self.unerase(w_list.lstorage) diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -700,7 +700,7 @@ self.delitem(w_dict, w_key) return (w_key, w_value) - # XXX could implement a more efficient w_keys based on space.newlist_str + # XXX could implement a more efficient w_keys based on space.newlist_bytes def iterkeys(self, w_dict): return MapDictIteratorKeys(self.space, self, w_dict) diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -292,8 +292,8 @@ assert not list_w or sizehint == -1 return W_ListObject(self, list_w, sizehint) - def newlist_str(self, list_s): - return W_ListObject.newlist_str(self, list_s) + def newlist_bytes(self, list_s): + return W_ListObject.newlist_bytes(self, list_s) def newlist_unicode(self, list_u): return W_ListObject.newlist_unicode(self, list_u) @@ -431,19 +431,19 @@ raise self._wrap_expected_length(expected_length, len(t)) return t - def listview_str(self, w_obj): + def listview_bytes(self, w_obj): # note: uses exact type checking for objects with strategies, # and isinstance() for others. See test_listobject.test_uses_custom... if type(w_obj) is W_ListObject: - return w_obj.getitems_str() + return w_obj.getitems_bytes() if type(w_obj) is W_DictMultiObject: - return w_obj.listview_str() + return w_obj.listview_bytes() if type(w_obj) is W_SetObject or type(w_obj) is W_FrozensetObject: - return w_obj.listview_str() + return w_obj.listview_bytes() if isinstance(w_obj, W_BytesObject) and self._uses_no_iter(w_obj): - return w_obj.listview_str() + return w_obj.listview_bytes() if isinstance(w_obj, W_ListObject) and self._uses_list_iter(w_obj): - return w_obj.getitems_str() + return w_obj.getitems_bytes() return None def listview_unicode(self, w_obj): diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -79,9 +79,9 @@ """ Returns a dict with all elements of the set. Needed only for switching to ObjectSetStrategy. """ return self.strategy.getdict_w(self) - def listview_str(self): + def listview_bytes(self): """ If this is a string set return its contents as a list of uwnrapped strings. Otherwise return None. """ - return self.strategy.listview_str(self) + return self.strategy.listview_bytes(self) def listview_unicode(self): """ If this is a unicode set return its contents as a list of uwnrapped unicodes. Otherwise return None. """ @@ -669,7 +669,7 @@ """ Returns an empty storage (erased) object. Used to initialize an empty set.""" raise NotImplementedError - def listview_str(self, w_set): + def listview_bytes(self, w_set): return None def listview_unicode(self, w_set): @@ -776,7 +776,7 @@ if type(w_key) is W_IntObject: strategy = self.space.fromcache(IntegerSetStrategy) elif type(w_key) is W_BytesObject: - strategy = self.space.fromcache(StringSetStrategy) + strategy = self.space.fromcache(BytesSetStrategy) elif type(w_key) is W_UnicodeObject: strategy = self.space.fromcache(UnicodeSetStrategy) elif self.space.type(w_key).compares_by_identity(): @@ -1196,8 +1196,8 @@ return self.wrap(result[0]) -class StringSetStrategy(AbstractUnwrappedSetStrategy, SetStrategy): - erase, unerase = rerased.new_erasing_pair("string") +class BytesSetStrategy(AbstractUnwrappedSetStrategy, SetStrategy): + erase, unerase = rerased.new_erasing_pair("bytes") erase = staticmethod(erase) unerase = staticmethod(unerase) @@ -1207,7 +1207,7 @@ def get_empty_dict(self): return {} - def listview_str(self, w_set): + def listview_bytes(self, w_set): return self.unerase(w_set.sstorage).keys() def is_correct_type(self, w_key): @@ -1229,7 +1229,7 @@ return self.space.wrap(item) def iter(self, w_set): - return StringIteratorImplementation(self.space, self, w_set) + return BytesIteratorImplementation(self.space, self, w_set) class UnicodeSetStrategy(AbstractUnwrappedSetStrategy, SetStrategy): @@ -1286,7 +1286,7 @@ return type(w_key) is W_IntObject def may_contain_equal_elements(self, strategy): - if strategy is self.space.fromcache(StringSetStrategy): + if strategy is self.space.fromcache(BytesSetStrategy): return False elif strategy is self.space.fromcache(UnicodeSetStrategy): return False @@ -1371,7 +1371,7 @@ return False if strategy is self.space.fromcache(IntegerSetStrategy): return False - if strategy is self.space.fromcache(StringSetStrategy): + if strategy is self.space.fromcache(BytesSetStrategy): return False if strategy is self.space.fromcache(UnicodeSetStrategy): return False @@ -1436,7 +1436,7 @@ return None -class StringIteratorImplementation(IteratorImplementation): +class BytesIteratorImplementation(IteratorImplementation): def __init__(self, space, strategy, w_set): IteratorImplementation.__init__(self, space, strategy, w_set) d = strategy.unerase(w_set.sstorage) @@ -1546,11 +1546,11 @@ w_set.sstorage = w_iterable.get_storage_copy() return - stringlist = space.listview_str(w_iterable) - if stringlist is not None: - strategy = space.fromcache(StringSetStrategy) + byteslist = space.listview_bytes(w_iterable) + if byteslist is not None: + strategy = space.fromcache(BytesSetStrategy) w_set.strategy = strategy - w_set.sstorage = strategy.get_storage_from_unwrapped_list(stringlist) + w_set.sstorage = strategy.get_storage_from_unwrapped_list(byteslist) return unicodelist = space.listview_unicode(w_iterable) @@ -1593,7 +1593,7 @@ if type(w_item) is not W_BytesObject: break else: - w_set.strategy = space.fromcache(StringSetStrategy) + w_set.strategy = space.fromcache(BytesSetStrategy) w_set.sstorage = w_set.strategy.get_storage_from_list(iterable_w) return diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -311,7 +311,7 @@ from pypy.objspace.std.unicodeobject import W_UnicodeObject if isinstance(self, W_BytesObject): - l = space.listview_str(w_list) + l = space.listview_bytes(w_list) if l is not None: if len(l) == 1: return space.wrap(l[0]) diff --git a/pypy/objspace/std/test/test_bytesobject.py b/pypy/objspace/std/test/test_bytesobject.py --- a/pypy/objspace/std/test/test_bytesobject.py +++ b/pypy/objspace/std/test/test_bytesobject.py @@ -80,9 +80,9 @@ w_slice = space.newslice(w(1), w_None, w(2)) assert self.space.eq_w(space.getitem(w_str, w_slice), w('el')) - def test_listview_str(self): + def test_listview_bytes(self): w_str = self.space.wrap('abcd') - assert self.space.listview_str(w_str) == list("abcd") + assert self.space.listview_bytes(w_str) == list("abcd") class AppTestBytesObject: diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -2,7 +2,7 @@ import py from pypy.objspace.std.dictmultiobject import (W_DictMultiObject, - StringDictStrategy, ObjectDictStrategy) + BytesDictStrategy, ObjectDictStrategy) class TestW_DictObject(object): @@ -134,11 +134,11 @@ assert space.eq_w(w_d.getitem_str("a"), space.w_None) assert space.eq_w(w_d.getitem_str("b"), space.w_None) - def test_listview_str_dict(self): + def test_listview_bytes_dict(self): w = self.space.wrap w_d = self.space.newdict() w_d.initialize_content([(w("a"), w(1)), (w("b"), w(2))]) - assert self.space.listview_str(w_d) == ["a", "b"] + assert self.space.listview_bytes(w_d) == ["a", "b"] def test_listview_unicode_dict(self): w = self.space.wrap @@ -160,7 +160,7 @@ w_l = self.space.call_method(w_d, "keys") assert sorted(self.space.listview_int(w_l)) == [1,2] - # make sure that .keys() calls newlist_str for string dicts + # make sure that .keys() calls newlist_bytes for string dicts def not_allowed(*args): assert False, 'should not be called' monkeypatch.setattr(self.space, 'newlist', not_allowed) @@ -168,7 +168,7 @@ w_d = self.space.newdict() w_d.initialize_content([(w("a"), w(1)), (w("b"), w(6))]) w_l = self.space.call_method(w_d, "keys") - assert sorted(self.space.listview_str(w_l)) == ["a", "b"] + assert sorted(self.space.listview_bytes(w_l)) == ["a", "b"] # XXX: it would be nice if the test passed without monkeypatch.undo(), # but we need space.newlist_unicode for it @@ -944,7 +944,7 @@ d = {} assert "EmptyDictStrategy" in self.get_strategy(d) d["a"] = 1 - assert "StringDictStrategy" in self.get_strategy(d) + assert "BytesDictStrategy" in self.get_strategy(d) class O(object): pass @@ -952,7 +952,7 @@ d = o.__dict__ = {} assert "EmptyDictStrategy" in self.get_strategy(d) o.a = 1 - assert "StringDictStrategy" in self.get_strategy(d) + assert "BytesDictStrategy" in self.get_strategy(d) def test_empty_to_unicode(self): d = {} @@ -1033,7 +1033,7 @@ eq_w = eq def newlist(self, l): return l - def newlist_str(self, l): + def newlist_bytes(self, l): return l DictObjectCls = W_DictMultiObject def type(self, w_obj): @@ -1275,9 +1275,9 @@ assert "s" not in d.w_keys() assert F() not in d.w_keys() -class TestStrDictImplementation(BaseTestRDictImplementation): - StrategyClass = StringDictStrategy - #ImplementionClass = StrDictImplementation +class TestBytesDictImplementation(BaseTestRDictImplementation): + StrategyClass = BytesDictStrategy + #ImplementionClass = BytesDictImplementation def test_str_shortcut(self): self.fill_impl() @@ -1301,12 +1301,12 @@ def check_not_devolved(self): pass -class TestDevolvedStrDictImplementation(BaseTestDevolvedDictImplementation): - StrategyClass = StringDictStrategy +class TestDevolvedBytesDictImplementation(BaseTestDevolvedDictImplementation): + StrategyClass = BytesDictStrategy def test_module_uses_strdict(): fakespace = FakeSpace() d = fakespace.newdict(module=True) - assert type(d.strategy) is StringDictStrategy + assert type(d.strategy) is BytesDictStrategy diff --git a/pypy/objspace/std/test/test_kwargsdict.py b/pypy/objspace/std/test/test_kwargsdict.py --- a/pypy/objspace/std/test/test_kwargsdict.py +++ b/pypy/objspace/std/test/test_kwargsdict.py @@ -73,7 +73,7 @@ for i in range(100): assert d.setitem_str("d%s" % i, 4) is None assert d.strategy is not strategy - assert "StringDictStrategy" == d.strategy.__class__.__name__ + assert "BytesDictStrategy" == d.strategy.__class__.__name__ def test_keys_doesnt_wrap(): space = FakeSpace() diff --git a/pypy/objspace/std/test/test_liststrategies.py b/pypy/objspace/std/test/test_liststrategies.py --- a/pypy/objspace/std/test/test_liststrategies.py +++ b/pypy/objspace/std/test/test_liststrategies.py @@ -1,5 +1,5 @@ import sys -from pypy.objspace.std.listobject import W_ListObject, EmptyListStrategy, ObjectListStrategy, IntegerListStrategy, FloatListStrategy, StringListStrategy, RangeListStrategy, make_range_list, UnicodeListStrategy +from pypy.objspace.std.listobject import W_ListObject, EmptyListStrategy, ObjectListStrategy, IntegerListStrategy, FloatListStrategy, BytesListStrategy, RangeListStrategy, make_range_list, UnicodeListStrategy from pypy.objspace.std import listobject from pypy.objspace.std.test.test_listobject import TestW_ListObject @@ -13,7 +13,7 @@ assert isinstance(W_ListObject(space, [w(1),w(2),w(3)]).strategy, IntegerListStrategy) assert isinstance(W_ListObject(space, [w('a'), w('b')]).strategy, - StringListStrategy) + BytesListStrategy) assert isinstance(W_ListObject(space, [w(u'a'), w(u'b')]).strategy, UnicodeListStrategy) assert isinstance(W_ListObject(space, [w(u'a'), w('b')]).strategy, @@ -35,7 +35,7 @@ l = W_ListObject(space, []) assert isinstance(l.strategy, EmptyListStrategy) l.append(w('a')) - assert isinstance(l.strategy, StringListStrategy) + assert isinstance(l.strategy, BytesListStrategy) l = W_ListObject(space, []) assert isinstance(l.strategy, EmptyListStrategy) @@ -59,9 +59,9 @@ def test_string_to_any(self): l = W_ListObject(self.space, [self.space.wrap('a'),self.space.wrap('b'),self.space.wrap('c')]) - assert isinstance(l.strategy, StringListStrategy) + assert isinstance(l.strategy, BytesListStrategy) l.append(self.space.wrap('d')) - assert isinstance(l.strategy, StringListStrategy) + assert isinstance(l.strategy, BytesListStrategy) l.append(self.space.wrap(3)) assert isinstance(l.strategy, ObjectListStrategy) @@ -92,7 +92,7 @@ l.setitem(0, w('d')) assert space.eq_w(l.getitem(0), w('d')) - assert isinstance(l.strategy, StringListStrategy) + assert isinstance(l.strategy, BytesListStrategy) # IntStrategy to ObjectStrategy l = W_ListObject(space, [w(1),w(2),w(3)]) @@ -100,9 +100,9 @@ l.setitem(0, w('d')) assert isinstance(l.strategy, ObjectListStrategy) - # StringStrategy to ObjectStrategy + # BytesStrategy to ObjectStrategy l = W_ListObject(space, [w('a'),w('b'),w('c')]) - assert isinstance(l.strategy, StringListStrategy) + assert isinstance(l.strategy, BytesListStrategy) l.setitem(0, w(2)) assert isinstance(l.strategy, ObjectListStrategy) @@ -127,9 +127,9 @@ l.insert(3, w(4)) assert isinstance(l.strategy, IntegerListStrategy) - # StringStrategy + # BytesStrategy l = W_ListObject(space, [w('a'),w('b'),w('c')]) - assert isinstance(l.strategy, StringListStrategy) + assert isinstance(l.strategy, BytesListStrategy) l.insert(3, w(2)) assert isinstance(l.strategy, ObjectListStrategy) @@ -155,7 +155,7 @@ l = W_ListObject(space, []) assert isinstance(l.strategy, EmptyListStrategy) l.insert(0, w('a')) - assert isinstance(l.strategy, StringListStrategy) + assert isinstance(l.strategy, BytesListStrategy) l = W_ListObject(space, []) assert isinstance(l.strategy, EmptyListStrategy) @@ -207,9 +207,9 @@ l.setslice(0, 1, 2, W_ListObject(space, [w('a'), w('b'), w('c')])) assert isinstance(l.strategy, ObjectListStrategy) - # StringStrategy to ObjectStrategy + # BytesStrategy to ObjectStrategy l = W_ListObject(space, [w('a'), w('b'), w('c')]) - assert isinstance(l.strategy, StringListStrategy) + assert isinstance(l.strategy, BytesListStrategy) l.setslice(0, 1, 2, W_ListObject(space, [w(1), w(2), w(3)])) assert isinstance(l.strategy, ObjectListStrategy) @@ -261,7 +261,7 @@ l = W_ListObject(space, wrapitems(["a","b","c","d","e"])) other = W_ListObject(space, wrapitems(["a", "b", "c"])) keep_other_strategy(l, 0, 2, other.length(), other) - assert l.strategy is space.fromcache(StringListStrategy) + assert l.strategy is space.fromcache(BytesListStrategy) l = W_ListObject(space, wrapitems([u"a",u"b",u"c",u"d",u"e"])) other = W_ListObject(space, wrapitems([u"a", u"b", u"c"])) @@ -330,7 +330,7 @@ empty = W_ListObject(space, []) assert isinstance(empty.strategy, EmptyListStrategy) empty.extend(W_ListObject(space, [w("a"), w("b"), w("c")])) - assert isinstance(empty.strategy, StringListStrategy) + assert isinstance(empty.strategy, BytesListStrategy) empty = W_ListObject(space, []) assert isinstance(empty.strategy, EmptyListStrategy) @@ -514,17 +514,17 @@ def test_unicode(self): l1 = W_ListObject(self.space, [self.space.wrap("eins"), self.space.wrap("zwei")]) - assert isinstance(l1.strategy, StringListStrategy) + assert isinstance(l1.strategy, BytesListStrategy) l2 = W_ListObject(self.space, [self.space.wrap(u"eins"), self.space.wrap(u"zwei")]) assert isinstance(l2.strategy, UnicodeListStrategy) l3 = W_ListObject(self.space, [self.space.wrap("eins"), self.space.wrap(u"zwei")]) assert isinstance(l3.strategy, ObjectListStrategy) - def test_listview_str(self): + def test_listview_bytes(self): space = self.space - assert space.listview_str(space.wrap(1)) == None + assert space.listview_bytes(space.wrap(1)) == None w_l = self.space.newlist([self.space.wrap('a'), self.space.wrap('b')]) - assert space.listview_str(w_l) == ["a", "b"] + assert space.listview_bytes(w_l) == ["a", "b"] def test_listview_unicode(self): space = self.space @@ -532,7 +532,7 @@ w_l = self.space.newlist([self.space.wrap(u'a'), self.space.wrap(u'b')]) assert space.listview_unicode(w_l) == [u"a", u"b"] - def test_string_join_uses_listview_str(self): + def test_string_join_uses_listview_bytes(self): space = self.space w_l = self.space.newlist([self.space.wrap('a'), self.space.wrap('b')]) w_l.getitems = None @@ -556,14 +556,14 @@ w_l.getitems = None assert space.is_w(space.call_method(space.wrap(u" -- "), "join", w_l), w_text) - def test_newlist_str(self): + def test_newlist_bytes(self): space = self.space l = ['a', 'b'] - w_l = self.space.newlist_str(l) - assert isinstance(w_l.strategy, StringListStrategy) - assert space.listview_str(w_l) is l + w_l = self.space.newlist_bytes(l) + assert isinstance(w_l.strategy, BytesListStrategy) + assert space.listview_bytes(w_l) is l - def test_string_uses_newlist_str(self): + def test_string_uses_newlist_bytes(self): space = self.space w_s = space.wrap("a b c") space.newlist = None @@ -574,10 +574,10 @@ w_l4 = space.call_method(w_s, "rsplit", space.wrap(" ")) finally: del space.newlist - assert space.listview_str(w_l) == ["a", "b", "c"] - assert space.listview_str(w_l2) == ["a", "b", "c"] - assert space.listview_str(w_l3) == ["a", "b", "c"] - assert space.listview_str(w_l4) == ["a", "b", "c"] + assert space.listview_bytes(w_l) == ["a", "b", "c"] + assert space.listview_bytes(w_l2) == ["a", "b", "c"] + assert space.listview_bytes(w_l3) == ["a", "b", "c"] + assert space.listview_bytes(w_l4) == ["a", "b", "c"] def test_unicode_uses_newlist_unicode(self): space = self.space @@ -630,10 +630,10 @@ assert space.eq_w(w_l, w_l2) - def test_listview_str_list(self): + def test_listview_bytes_list(self): space = self.space w_l = W_ListObject(space, [space.wrap("a"), space.wrap("b")]) - assert self.space.listview_str(w_l) == ["a", "b"] + assert self.space.listview_bytes(w_l) == ["a", "b"] def test_listview_unicode_list(self): space = self.space diff --git a/pypy/objspace/std/test/test_setobject.py b/pypy/objspace/std/test/test_setobject.py --- a/pypy/objspace/std/test/test_setobject.py +++ b/pypy/objspace/std/test/test_setobject.py @@ -82,7 +82,7 @@ def test_create_set_from_list(self): from pypy.interpreter.baseobjspace import W_Root - from pypy.objspace.std.setobject import ObjectSetStrategy, StringSetStrategy, UnicodeSetStrategy + from pypy.objspace.std.setobject import BytesSetStrategy, ObjectSetStrategy, UnicodeSetStrategy from pypy.objspace.std.floatobject import W_FloatObject w = self.space.wrap @@ -100,7 +100,7 @@ w_list = W_ListObject(self.space, [w("1"), w("2"), w("3")]) w_set = W_SetObject(self.space) _initialize_set(self.space, w_set, w_list) - assert w_set.strategy is self.space.fromcache(StringSetStrategy) + assert w_set.strategy is self.space.fromcache(BytesSetStrategy) assert w_set.strategy.unerase(w_set.sstorage) == {"1":None, "2":None, "3":None} w_list = self.space.iter(W_ListObject(self.space, [w(u"1"), w(u"2"), w(u"3")])) @@ -126,18 +126,18 @@ # changed cached object, need to change it back for other tests to pass intstr.get_storage_from_list = tmp_func - def test_listview_str_int_on_set(self): + def test_listview_bytes_int_on_set(self): w = self.space.wrap w_a = W_SetObject(self.space) _initialize_set(self.space, w_a, w("abcdefg")) - assert sorted(self.space.listview_str(w_a)) == list("abcdefg") + assert sorted(self.space.listview_bytes(w_a)) == list("abcdefg") assert self.space.listview_int(w_a) is None w_b = W_SetObject(self.space) _initialize_set(self.space, w_b, self.space.newlist([w(1),w(2),w(3),w(4),w(5)])) assert sorted(self.space.listview_int(w_b)) == [1,2,3,4,5] - assert self.space.listview_str(w_b) is None + assert self.space.listview_bytes(w_b) is None class AppTestAppSetTest: diff --git a/pypy/objspace/std/test/test_setstrategies.py b/pypy/objspace/std/test/test_setstrategies.py --- a/pypy/objspace/std/test/test_setstrategies.py +++ b/pypy/objspace/std/test/test_setstrategies.py @@ -1,10 +1,8 @@ from pypy.objspace.std.setobject import W_SetObject -from pypy.objspace.std.setobject import (IntegerSetStrategy, ObjectSetStrategy, - EmptySetStrategy, StringSetStrategy, - UnicodeSetStrategy, - IntegerIteratorImplementation, - StringIteratorImplementation, - UnicodeIteratorImplementation) +from pypy.objspace.std.setobject import ( + BytesIteratorImplementation, BytesSetStrategy, EmptySetStrategy, + IntegerIteratorImplementation, IntegerSetStrategy, ObjectSetStrategy, + UnicodeIteratorImplementation, UnicodeSetStrategy) from pypy.objspace.std.listobject import W_ListObject class TestW_SetStrategies: @@ -26,7 +24,7 @@ assert s.strategy is self.space.fromcache(EmptySetStrategy) s = W_SetObject(self.space, self.wrapped(["a", "b"])) - assert s.strategy is self.space.fromcache(StringSetStrategy) + assert s.strategy is self.space.fromcache(BytesSetStrategy) s = W_SetObject(self.space, self.wrapped([u"a", u"b"])) assert s.strategy is self.space.fromcache(UnicodeSetStrategy) @@ -126,7 +124,7 @@ # s = W_SetObject(space, self.wrapped(["a", "b"])) it = s.iter() - assert isinstance(it, StringIteratorImplementation) + assert isinstance(it, BytesIteratorImplementation) assert space.unwrap(it.next()) == "a" assert space.unwrap(it.next()) == "b" # @@ -142,7 +140,7 @@ assert sorted(space.listview_int(s)) == [1, 2] # s = W_SetObject(space, self.wrapped(["a", "b"])) - assert sorted(space.listview_str(s)) == ["a", "b"] + assert sorted(space.listview_bytes(s)) == ["a", "b"] # s = W_SetObject(space, self.wrapped([u"a", u"b"])) assert sorted(space.listview_unicode(s)) == [u"a", u"b"] diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -531,7 +531,7 @@ """x.__getitem__(y) <==> x[y]""" def __getnewargs__(): - """""" + "" def __getslice__(): """x.__getslice__(i, j) <==> x[i:j] diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -9,7 +9,7 @@ from rpython.flowspace.model import (Variable, Constant, FunctionGraph, c_last_exception, checkgraph) from rpython.translator import simplify, transform -from rpython.annotator import model as annmodel, signature, unaryop, binaryop +from rpython.annotator import model as annmodel, signature from rpython.annotator.bookkeeper import Bookkeeper import py @@ -455,12 +455,12 @@ # occour for this specific, typed operation. if block.exitswitch == c_last_exception: op = block.operations[-1] - if op.opname in binaryop.BINARY_OPERATIONS: + if op.dispatch == 2: arg1 = self.binding(op.args[0]) arg2 = self.binding(op.args[1]) binop = getattr(pair(arg1, arg2), op.opname, None) can_only_throw = annmodel.read_can_only_throw(binop, arg1, arg2) - elif op.opname in unaryop.UNARY_OPERATIONS: + elif op.dispatch == 1: arg1 = self.binding(op.args[0]) opname = op.opname if opname == 'contains': opname = 'op_contains' @@ -611,44 +611,6 @@ def noreturnvalue(self, op): return annmodel.s_ImpossibleValue # no return value (hook method) - # XXX "contains" clash with SomeObject method - def consider_op_contains(self, seq, elem): - self.bookkeeper.count("contains", seq) - return seq.op_contains(elem) - - def consider_op_newtuple(self, *args): - return annmodel.SomeTuple(items = args) - - def consider_op_newlist(self, *args): - return self.bookkeeper.newlist(*args) - - def consider_op_newdict(self): - return self.bookkeeper.newdict() - - - def _registeroperations(cls, unary_ops, binary_ops): - # All unary operations - d = {} - for opname in unary_ops: - fnname = 'consider_op_' + opname - exec py.code.Source(""" -def consider_op_%s(self, arg, *args): - return arg.%s(*args) -""" % (opname, opname)).compile() in globals(), d - setattr(cls, fnname, d[fnname]) - # All binary operations - for opname in binary_ops: - fnname = 'consider_op_' + opname - exec py.code.Source(""" -def consider_op_%s(self, arg1, arg2, *args): - return pair(arg1,arg2).%s(*args) -""" % (opname, opname)).compile() in globals(), d - setattr(cls, fnname, d[fnname]) - _registeroperations = classmethod(_registeroperations) - -# register simple operations handling -RPythonAnnotator._registeroperations(unaryop.UNARY_OPERATIONS, binaryop.BINARY_OPERATIONS) - class BlockedInference(Exception): """This exception signals the type inference engine that the situation diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -12,10 +12,11 @@ SomeBuiltin, SomeIterator, SomePBC, SomeFloat, s_None, SomeByteArray, SomeWeakRef, SomeAddress, SomeTypedAddressAccess, SomeSingleFloat, SomeLongFloat, SomeType, SomeConstantType, unionof, UnionError, - missing_operation, read_can_only_throw, add_knowntypedata, + read_can_only_throw, add_knowntypedata, merge_knowntypedata,) from rpython.annotator.bookkeeper import getbookkeeper from rpython.flowspace.model import Variable, Constant +from rpython.flowspace.operation import op from rpython.rlib import rarithmetic from rpython.annotator.model import AnnotatorError @@ -23,28 +24,9 @@ def immutablevalue(x): return getbookkeeper().immutablevalue(x) -# XXX unify this with ObjSpace.MethodTable -BINARY_OPERATIONS = set(['add', 'sub', 'mul', 'div', 'mod', - 'truediv', 'floordiv', 'divmod', - 'and_', 'or_', 'xor', - 'lshift', 'rshift', - 'getitem', 'setitem', 'delitem', - 'getitem_idx', 'getitem_key', 'getitem_idx_key', - 'inplace_add', 'inplace_sub', 'inplace_mul', - 'inplace_truediv', 'inplace_floordiv', 'inplace_div', - 'inplace_mod', - 'inplace_lshift', 'inplace_rshift', - 'inplace_and', 'inplace_or', 'inplace_xor', - 'lt', 'le', 'eq', 'ne', 'gt', 'ge', 'is_', 'cmp', - 'coerce', - ] - +[opname+'_ovf' for opname in - """add sub mul floordiv div mod lshift - """.split() - ]) +BINARY_OPERATIONS = set([oper.opname for oper in op.__dict__.values() + if oper.dispatch == 2]) -for opname in BINARY_OPERATIONS: - missing_operation(pairtype(SomeObject, SomeObject), opname) class __extend__(pairtype(SomeObject, SomeObject)): @@ -78,46 +60,39 @@ if obj1.is_immutable_constant() and obj2.is_immutable_constant(): return immutablevalue(obj1.const < obj2.const) else: - getbookkeeper().count("non_int_comp", obj1, obj2) return s_Bool def le((obj1, obj2)): if obj1.is_immutable_constant() and obj2.is_immutable_constant(): return immutablevalue(obj1.const <= obj2.const) else: - getbookkeeper().count("non_int_comp", obj1, obj2) return s_Bool def eq((obj1, obj2)): if obj1.is_immutable_constant() and obj2.is_immutable_constant(): return immutablevalue(obj1.const == obj2.const) else: - getbookkeeper().count("non_int_eq", obj1, obj2) return s_Bool def ne((obj1, obj2)): if obj1.is_immutable_constant() and obj2.is_immutable_constant(): return immutablevalue(obj1.const != obj2.const) else: - getbookkeeper().count("non_int_eq", obj1, obj2) return s_Bool def gt((obj1, obj2)): if obj1.is_immutable_constant() and obj2.is_immutable_constant(): return immutablevalue(obj1.const > obj2.const) else: - getbookkeeper().count("non_int_comp", obj1, obj2) return s_Bool def ge((obj1, obj2)): if obj1.is_immutable_constant() and obj2.is_immutable_constant(): return immutablevalue(obj1.const >= obj2.const) else: - getbookkeeper().count("non_int_comp", obj1, obj2) return s_Bool def cmp((obj1, obj2)): - getbookkeeper().count("cmp", obj1, obj2) if obj1.is_immutable_constant() and obj2.is_immutable_constant(): return immutablevalue(cmp(obj1.const, obj2.const)) else: @@ -163,13 +138,19 @@ return r def divmod((obj1, obj2)): - getbookkeeper().count("divmod", obj1, obj2) return SomeTuple([pair(obj1, obj2).div(), pair(obj1, obj2).mod()]) def coerce((obj1, obj2)): - getbookkeeper().count("coerce", obj1, obj2) return pair(obj1, obj2).union() # reasonable enough + def getitem((obj1, obj2)): + return s_ImpossibleValue + add = sub = mul = truediv = floordiv = div = mod = getitem + lshift = rshift = and_ = or_ = xor = delitem = getitem + + def setitem((obj1, obj2), _): + return s_ImpossibleValue + # approximation of an annotation intersection, the result should be the annotation obj or # the intersection of obj and improvement def improve((obj, improvement)): @@ -466,7 +447,6 @@ SomeUnicodeString))): raise AnnotatorError( "string formatting mixing strings and unicode not supported") - getbookkeeper().count('strformat', s_string, s_tuple) no_nul = s_string.no_nul for s_item in s_tuple.items: if isinstance(s_item, SomeFloat): @@ -484,7 +464,6 @@ pairtype(SomeUnicodeString, SomeObject)): def mod((s_string, args)): - getbookkeeper().count('strformat', s_string, args) return s_string.__class__() class __extend__(pairtype(SomeFloat, SomeFloat)): @@ -586,19 +565,16 @@ return [KeyError] def getitem((dic1, obj2)): - getbookkeeper().count("dict_getitem", dic1) dic1.dictdef.generalize_key(obj2) return dic1.dictdef.read_value() getitem.can_only_throw = _can_only_throw def setitem((dic1, obj2), s_value): - getbookkeeper().count("dict_setitem", dic1) dic1.dictdef.generalize_key(obj2) dic1.dictdef.generalize_value(s_value) setitem.can_only_throw = _can_only_throw def delitem((dic1, obj2)): - getbookkeeper().count("dict_delitem", dic1) dic1.dictdef.generalize_key(obj2) delitem.can_only_throw = _can_only_throw @@ -612,7 +588,6 @@ except IndexError: return s_ImpossibleValue else: - getbookkeeper().count("tuple_random_getitem", tup1) return unionof(*tup1.items) getitem.can_only_throw = [IndexError] @@ -623,74 +598,63 @@ return lst1.listdef.offspring() def getitem((lst1, int2)): - getbookkeeper().count("list_getitem", int2) return lst1.listdef.read_item() getitem.can_only_throw = [] getitem_key = getitem def getitem_idx((lst1, int2)): - getbookkeeper().count("list_getitem", int2) return lst1.listdef.read_item() getitem_idx.can_only_throw = [IndexError] getitem_idx_key = getitem_idx def setitem((lst1, int2), s_value): - getbookkeeper().count("list_setitem", int2) lst1.listdef.mutate() lst1.listdef.generalize(s_value) setitem.can_only_throw = [IndexError] def delitem((lst1, int2)): - getbookkeeper().count("list_delitem", int2) lst1.listdef.resize() delitem.can_only_throw = [IndexError] class __extend__(pairtype(SomeString, SomeInteger)): def getitem((str1, int2)): - getbookkeeper().count("str_getitem", int2) return SomeChar(no_nul=str1.no_nul) getitem.can_only_throw = [] getitem_key = getitem def getitem_idx((str1, int2)): - getbookkeeper().count("str_getitem", int2) return SomeChar(no_nul=str1.no_nul) getitem_idx.can_only_throw = [IndexError] getitem_idx_key = getitem_idx def mul((str1, int2)): # xxx do we want to support this - getbookkeeper().count("str_mul", str1, int2) return SomeString(no_nul=str1.no_nul) class __extend__(pairtype(SomeUnicodeString, SomeInteger)): def getitem((str1, int2)): - getbookkeeper().count("str_getitem", int2) return SomeUnicodeCodePoint() getitem.can_only_throw = [] getitem_key = getitem def getitem_idx((str1, int2)): - getbookkeeper().count("str_getitem", int2) return SomeUnicodeCodePoint() getitem_idx.can_only_throw = [IndexError] getitem_idx_key = getitem_idx def mul((str1, int2)): # xxx do we want to support this - getbookkeeper().count("str_mul", str1, int2) return SomeUnicodeString() class __extend__(pairtype(SomeInteger, SomeString), pairtype(SomeInteger, SomeUnicodeString)): def mul((int1, str2)): # xxx do we want to support this - getbookkeeper().count("str_mul", str2, int1) return str2.basestringclass() class __extend__(pairtype(SomeUnicodeCodePoint, SomeUnicodeString), diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -25,112 +25,6 @@ from rpython.rtyper import extregistry -class Stats(object): - - def __init__(self, bookkeeper): - self.bookkeeper = bookkeeper - self.classify = {} - - def count(self, category, *args): - for_category = self.classify.setdefault(category, {}) - classifier = getattr(self, 'consider_%s' % category, self.consider_generic) - outcome = classifier(*args) - for_category[self.bookkeeper.position_key] = outcome - - def indexrepr(self, idx): - if idx.is_constant(): - if idx.const is None: - return '' - if isinstance(idx, SomeInteger): - if idx.const >=0: - return 'pos-constant' - else: - return 'Neg-constant' - return idx.const - else: - if isinstance(idx, SomeInteger): - if idx.nonneg: - return "non-neg" - else: - return "MAYBE-NEG" - else: - return self.typerepr(idx) - - def steprepr(self, stp): - if stp.is_constant(): - if stp.const in (1, None): - return 'step=1' - else: - return 'step=%s?' % stp.const - else: - return 'non-const-step %s' % self.typerepr(stp) - - def consider_generic(self, *args): - return tuple([self.typerepr(x) for x in args]) - - def consider_list_list_eq(self, obj1, obj2): - return obj1, obj2 - - def consider_contains(self, seq): - return seq - - def consider_non_int_eq(self, obj1, obj2): - if obj1.knowntype == obj2.knowntype == list: - self.count("list_list_eq", obj1, obj2) - return self.typerepr(obj1), self.typerepr(obj2) - - def consider_non_int_comp(self, obj1, obj2): - return self.typerepr(obj1), self.typerepr(obj2) - - def typerepr(self, obj): - if isinstance(obj, SomeInstance): - return obj.classdef.name - else: - return obj.knowntype.__name__ - - def consider_tuple_random_getitem(self, tup): - return tuple([self.typerepr(x) for x in tup.items]) - - def consider_list_index(self): - return '!' - - def consider_list_getitem(self, idx): - return self.indexrepr(idx) - - def consider_list_setitem(self, idx): - return self.indexrepr(idx) - - def consider_list_delitem(self, idx): - return self.indexrepr(idx) - - def consider_str_join(self, s): - if s.is_constant(): - return repr(s.const) - else: - return "NON-CONSTANT" - - def consider_str_getitem(self, idx): - return self.indexrepr(idx) - - def consider_strformat(self, str, args): - if str.is_constant(): - s = repr(str.const) - else: - s = "?!!!!!!" - if isinstance(args, SomeTuple): - return (s, tuple([self.typerepr(x) for x in args.items])) - else: - return (s, self.typerepr(args)) - - def consider_dict_getitem(self, dic): - return dic - - def consider_dict_setitem(self, dic): - return dic - - def consider_dict_delitem(self, dic): - return dic - class Bookkeeper(object): """The log of choices that have been made while analysing the operations. It ensures that the same 'choice objects' will be returned if we ask @@ -165,13 +59,8 @@ self.needs_generic_instantiate = {} - self.stats = Stats(self) - delayed_imports() - def count(self, category, *args): - self.stats.count(category, *args) - def enter(self, position_key): """Start of an operation. The operation is uniquely identified by the given key.""" diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -347,9 +347,6 @@ def test(*args): return s_Bool -def import_func(*args): - return SomeObject() - # collect all functions import __builtin__ BUILTIN_ANALYZERS = {} @@ -397,9 +394,6 @@ else: BUILTIN_ANALYZERS[object.__init__] = object_init -# import -BUILTIN_ANALYZERS[__import__] = import_func - # annotation of low-level types from rpython.annotator.model import SomePtr from rpython.rtyper.lltypesystem import lltype diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -799,21 +799,6 @@ assert 0, "couldn't get to commonbase of %r and %r" % (cls1, cls2) -def missing_operation(cls, name): - def default_op(*args): - if args and isinstance(args[0], tuple): - flattened = tuple(args[0]) + args[1:] - else: - flattened = args - for arg in flattened: - if arg.__class__ is SomeObject and arg.knowntype is not type: - return SomeObject() - bookkeeper = rpython.annotator.bookkeeper.getbookkeeper() - bookkeeper.warning("no precise annotation supplied for %s%r" % (name, args)) - return s_ImpossibleValue - setattr(cls, name, default_op) - - class HarmlesslyBlocked(Exception): """Raised by the unaryop/binaryop to signal a harmless kind of BlockedInference: the current block is blocked, but not in a way diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -14,7 +14,8 @@ from rpython.rlib.rarithmetic import r_uint, base_int, r_longlong, r_ulonglong from rpython.rlib.rarithmetic import r_singlefloat from rpython.rlib import objectmodel -from rpython.flowspace.objspace import build_flow, FlowingError +from rpython.flowspace.objspace import build_flow +from rpython.flowspace.flowcontext import FlowingError from rpython.flowspace.operation import op from rpython.translator.test import snippet diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -5,11 +5,12 @@ from __future__ import absolute_import from types import MethodType +from rpython.flowspace.operation import op from rpython.annotator.model import (SomeObject, SomeInteger, SomeBool, SomeString, SomeChar, SomeList, SomeDict, SomeTuple, SomeImpossibleValue, SomeUnicodeCodePoint, SomeInstance, SomeBuiltin, SomeFloat, SomeIterator, SomePBC, SomeTypedAddressAccess, SomeAddress, SomeType, s_ImpossibleValue, - s_Bool, s_None, unionof, missing_operation, add_knowntypedata, + s_Bool, s_None, unionof, add_knowntypedata, HarmlesslyBlocked, SomeWeakRef, SomeUnicodeString, SomeByteArray) from rpython.annotator.bookkeeper import getbookkeeper from rpython.annotator import builtin @@ -20,17 +21,8 @@ def immutablevalue(x): return getbookkeeper().immutablevalue(x) -UNARY_OPERATIONS = set(['len', 'bool', 'getattr', 'setattr', 'delattr', - 'simple_call', 'call_args', 'str', 'repr', - 'iter', 'next', 'invert', 'type', 'issubtype', - 'pos', 'neg', 'abs', 'hex', 'oct', - 'ord', 'int', 'float', 'long', - 'hash', 'id', # <== not supported any more - 'getslice', 'setslice', 'delslice', - 'neg_ovf', 'abs_ovf', 'hint', 'unicode', 'unichr']) - -for opname in UNARY_OPERATIONS: - missing_operation(SomeObject, opname) +UNARY_OPERATIONS = set([oper.opname for oper in op.__dict__.values() + if oper.dispatch == 1]) class __extend__(SomeObject): @@ -84,23 +76,18 @@ raise AnnotatorError("cannot use hash() in RPython") def str(self): - getbookkeeper().count('str', self) return SomeString() From noreply at buildbot.pypy.org Thu Jan 23 22:30:45 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Thu, 23 Jan 2014 22:30:45 +0100 (CET) Subject: [pypy-commit] pypy llvm-translation-backend: Kill dead import. Message-ID: <20140123213045.7D0001D2425@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r68884:bb9cbf3752cc Date: 2014-01-23 22:08 +0100 http://bitbucket.org/pypy/pypy/changeset/bb9cbf3752cc/ Log: Kill dead import. diff --git a/rpython/translator/llvm/test/test_genllvm.py b/rpython/translator/llvm/test/test_genllvm.py --- a/rpython/translator/llvm/test/test_genllvm.py +++ b/rpython/translator/llvm/test/test_genllvm.py @@ -7,7 +7,7 @@ from rpython.rlib.rarithmetic import (LONG_BIT, r_uint, r_singlefloat, r_longfloat) from rpython.rlib.test.test_longlong2float import enum_floats, fn, fnsingle -from rpython.rtyper.lltypesystem import lltype, rffi, llmemory, llgroup +from rpython.rtyper.lltypesystem import lltype, rffi, llmemory from rpython.rtyper.lltypesystem.ll2ctypes import (force_cast, get_ctypes_type, lltype2ctypes, ctypes2lltype) from rpython.rtyper.rtuple import TupleRepr From noreply at buildbot.pypy.org Thu Jan 23 22:30:46 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Thu, 23 Jan 2014 22:30:46 +0100 (CET) Subject: [pypy-commit] pypy llvm-translation-backend: Adapt TestTypedOptimizedSwitchTestCaseLLVM. Message-ID: <20140123213046.945091D2425@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r68885:76bb3d3dff4f Date: 2014-01-23 22:10 +0100 http://bitbucket.org/pypy/pypy/changeset/76bb3d3dff4f/ Log: Adapt TestTypedOptimizedSwitchTestCaseLLVM. diff --git a/rpython/translator/llvm/test/test_genllvm.py b/rpython/translator/llvm/test/test_genllvm.py --- a/rpython/translator/llvm/test/test_genllvm.py +++ b/rpython/translator/llvm/test/test_genllvm.py @@ -701,12 +701,10 @@ .TestTypedOptimizedTestCase): pass -class TestTypedOptimizedSwitchTestCaseLLVM(test_backendoptimized +class TestTypedOptimizedSwitchTestCaseLLVM(_LLVMMixin, + test_backendoptimized .TestTypedOptimizedSwitchTestCase): - class CodeGenerator(_LLVMMixin, test_backendoptimized - .TestTypedOptimizedSwitchTestCase - .CodeGenerator): - pass + pass class TestLLVMRffi(BaseTestRffi, _LLVMMixin): From noreply at buildbot.pypy.org Thu Jan 23 22:30:47 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Thu, 23 Jan 2014 22:30:47 +0100 (CET) Subject: [pypy-commit] pypy llvm-translation-backend: Don't run RTyper tests on top of GenLLVM. GenC doesn't, neither. They are too high-level and required some hacks to be runnable on GenLLVM. These hacks will be removed in the next commit. Message-ID: <20140123213047.BDC4F1D2425@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r68886:7ef5897257d5 Date: 2014-01-23 22:22 +0100 http://bitbucket.org/pypy/pypy/changeset/7ef5897257d5/ Log: Don't run RTyper tests on top of GenLLVM. GenC doesn't, neither. They are too high-level and required some hacks to be runnable on GenLLVM. These hacks will be removed in the next commit. diff --git a/rpython/translator/llvm/test/test_genllvm.py b/rpython/translator/llvm/test/test_genllvm.py --- a/rpython/translator/llvm/test/test_genllvm.py +++ b/rpython/translator/llvm/test/test_genllvm.py @@ -8,17 +8,12 @@ r_longfloat) from rpython.rlib.test.test_longlong2float import enum_floats, fn, fnsingle from rpython.rtyper.lltypesystem import lltype, rffi, llmemory -from rpython.rtyper.lltypesystem.ll2ctypes import (force_cast, get_ctypes_type, +from rpython.rtyper.lltypesystem.ll2ctypes import (get_ctypes_type, lltype2ctypes, ctypes2lltype) from rpython.rtyper.rtuple import TupleRepr from rpython.rtyper.lltypesystem.rstr import StringRepr, UnicodeRepr from rpython.rtyper.lltypesystem.test.test_rffi import BaseTestRffi from rpython.rtyper.module.support import LLSupport -from rpython.rtyper.test import (test_annlowlevel, test_exception, - test_generator, test_rbool, test_rbuilder, test_rbuiltin, test_rclass, - test_rconstantdict, test_rdict, test_remptydict, test_rfloat, - test_rint, test_rlist, test_rpbc, test_rrange, test_rstr, - test_rtuple, test_runicode, test_rvirtualizable, test_rweakref) from rpython.rtyper.typesystem import getfunctionptr from rpython.translator.backendopt.all import backend_optimizations from rpython.translator.backendopt.raisingop2direct_call import ( @@ -369,15 +364,6 @@ self._types = types return self._compiled - def interpret(self, func, args, **kwds): - fc = self._compile(func, args, **kwds) - return fc(*args) - - def interpret_raises(self, exception, func, args, **kwds): - fc = self._compile(func, args, **kwds) - with py.test.raises(exception): - fc(*args) - @property def translator(self): return self._translator @@ -741,107 +727,3 @@ if t is not None: py.test.skip('not supported yet') return self.getcompiled(fn, inputtypes, gcpolicy='ref') - - -class TestRtypingLLVM(_LLVMMixin, test_annlowlevel.TestLLType): - pass - -class TestExceptionLLVM(_LLVMMixin, test_exception.TestException): - def test_raise_and_catch_other(self): - py.test.skip('Impossible to pass if not running on LLInterpreter.') - - def test_raise_prebuilt_and_catch_other(self): - py.test.skip('Impossible to pass if not running on LLInterpreter.') - -class TestGeneratorLLVM(_LLVMMixin, test_generator.TestGenerator): - pass - -class TestRboolLLVM(_LLVMMixin, test_rbool.TestRbool): - pass - -class TestStringBuilderLLVM(_LLVMMixin, test_rbuilder.TestStringBuilder): - pass - -class TestRbuiltinLLVM(_LLVMMixin, test_rbuiltin.TestRbuiltin): - def test_debug_llinterpcall(self): - py.test.skip('Impossible to pass if not running on LLInterpreter.') - -class TestRclassLLVM(_LLVMMixin, test_rclass.TestRclass): - pass - -class TestRconstantdictLLVM(_LLVMMixin, test_rconstantdict.TestRconstantdict): - pass - -class TestRdictLLVM(_LLVMMixin, test_rdict.TestRDict): - def test_memoryerror_should_not_insert(self): - py.test.skip('Impossible to pass if not running on LLInterpreter.') - -class TestRemptydictLLVM(_LLVMMixin, test_remptydict.TestRemptydict): - pass - -class TestRfloatLLVM(_LLVMMixin, test_rfloat.TestRfloat): - pass - -class TestRintLLVM(_LLVMMixin, test_rint.TestRint): - pass - -class TestRlistLLVM(_LLVMMixin, test_rlist.TestRlist): - def test_iterate_over_immutable_list(self): - py.test.skip('Impossible to pass if not running on LLInterpreter.') - - def test_iterate_over_immutable_list_quasiimmut_attr(self): - py.test.skip('Impossible to pass if not running on LLInterpreter.') - - def test_getitem_exc_1(self): - py.test.skip('Impossible to pass if not running on LLInterpreter.') - - def test_getitem_exc_2(self): - py.test.skip('Impossible to pass if not running on LLInterpreter.') - - def list_is_clear(self, lis, idx): - items = lis._obj.items - for i in range(idx, lis._obj.length): - if items[i]._obj is not None: - return False - return True - -class TestRPBCLLVM(_LLVMMixin, test_rpbc.TestRPBC): - def read_attr(self, value, attr_name): - class_name = 'pypy.rpython.test.test_rpbc.' + self.class_name(value) - for (cd, _), ir in self._translator.rtyper.instance_reprs.items(): - if cd is not None and cd.name == class_name: - value = force_cast(ir.lowleveltype, value) - - value = value._obj - while value is not None: - attr = getattr(value, "inst_" + attr_name, None) - if attr is None: - value = value.super - else: - return attr - raise AttributeError() - -class TestRPBCExtraLLVM(_LLVMMixin, test_rpbc.TestRPBCExtra): - pass - -class TestRrangeLLVM(_LLVMMixin, test_rrange.TestRrange): - pass - -class TestRstrLLVM(_LLVMMixin, test_rstr.TestRstr): - def test_getitem_exc(self): - py.test.skip('Impossible to pass if not running on LLInterpreter.') - -class TestRtupleLLVM(_LLVMMixin, test_rtuple.TestRtuple): - pass - -class TestRUnicodeLLVM(_LLVMMixin, test_runicode.TestRUnicode): - def test_getitem_exc(self): - py.test.skip('Impossible to pass if not running on LLInterpreter.') - -class TestRvirtualizableLLVM(_LLVMMixin, test_rvirtualizable.TestVirtualizable): - pass - -class TestRweakrefLLVM(_LLVMMixin, test_rweakref.TestRweakref): - def _compile(self, *args, **kwds): - kwds['gcpolicy'] = 'minimark' - return _LLVMMixin._compile(self, *args, **kwds) From noreply at buildbot.pypy.org Thu Jan 23 22:30:48 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Thu, 23 Jan 2014 22:30:48 +0100 (CET) Subject: [pypy-commit] pypy llvm-translation-backend: Remove hacks formerly required to run RTyper tests on top of GenLLVM. Message-ID: <20140123213048.D4FF71D2425@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r68887:120355abd906 Date: 2014-01-23 22:29 +0100 http://bitbucket.org/pypy/pypy/changeset/120355abd906/ Log: Remove hacks formerly required to run RTyper tests on top of GenLLVM. diff --git a/rpython/rtyper/test/test_rbuiltin.py b/rpython/rtyper/test/test_rbuiltin.py --- a/rpython/rtyper/test/test_rbuiltin.py +++ b/rpython/rtyper/test/test_rbuiltin.py @@ -267,7 +267,7 @@ except OSError: pass count = 0 - for dir_call in enum_direct_calls(self.translator, fn): + for dir_call in enum_direct_calls(test_llinterp.typer.annotator.translator, fn): cfptr = dir_call.args[0] assert self.get_callable(cfptr.value).__name__.startswith('dup') count += 1 @@ -282,7 +282,7 @@ res = self.interpret(f, []) os.close(res) count = 0 - for dir_call in enum_direct_calls(self.translator, wr_open): + for dir_call in enum_direct_calls(test_llinterp.typer.annotator.translator, wr_open): cfptr = dir_call.args[0] assert self.get_callable(cfptr.value).__name__.startswith('os_open') count += 1 diff --git a/rpython/rtyper/test/test_rlist.py b/rpython/rtyper/test/test_rlist.py --- a/rpython/rtyper/test/test_rlist.py +++ b/rpython/rtyper/test/test_rlist.py @@ -82,6 +82,15 @@ self.check_list(l1, expected) +# helper used by some tests below +def list_is_clear(lis, idx): + items = lis._obj.items._obj.items + for i in range(idx, len(items)): + if items[i]._obj is not None: + return False + return True + + class TestListImpl(BaseTestListImpl): def sample_list(self): # [42, 43, 44, 45] rlist = ListRepr(None, signed_repr) @@ -1475,13 +1484,6 @@ assert r_A_list.lowleveltype == r_B_list.lowleveltype - def list_is_clear(self, lis, idx): - items = lis._obj.items._obj.items - for i in range(idx, len(items)): - if items[i]._obj is not None: - return False - return True - def test_no_unneeded_refs(self): def fndel(p, q): lis = ["5", "3", "99"] @@ -1497,10 +1499,10 @@ return lis for i in range(2, 3+1): lis = self.interpret(fndel, [0, i]) - assert self.list_is_clear(lis, 3-i) + assert list_is_clear(lis, 3-i) for i in range(3): lis = self.interpret(fnpop, [i]) - assert self.list_is_clear(lis, 3-i) + assert list_is_clear(lis, 3-i) def test_oopspec(self): lst1 = [123, 456] # non-mutated list diff --git a/rpython/rtyper/test/test_rvirtualizable.py b/rpython/rtyper/test/test_rvirtualizable.py --- a/rpython/rtyper/test/test_rvirtualizable.py +++ b/rpython/rtyper/test/test_rvirtualizable.py @@ -367,7 +367,9 @@ def test_simple(self): def f(v): vinst = V(v) - return vinst + return vinst, vinst.v res = self.interpret(f, [42]) + assert res.item1 == 42 + res = lltype.normalizeptr(res.item0) assert res.inst_v == 42 assert res.vable_token == lltype.nullptr(llmemory.GCREF.TO) diff --git a/rpython/rtyper/test/test_rweakref.py b/rpython/rtyper/test/test_rweakref.py --- a/rpython/rtyper/test/test_rweakref.py +++ b/rpython/rtyper/test/test_rweakref.py @@ -43,10 +43,7 @@ r = w1 else: r = w2 - obj = r() - if obj is None: - return -1 - return obj.hello + return r().hello res = self.interpret(f, [1]) assert res == 5 res = self.interpret(f, [0]) diff --git a/rpython/rtyper/test/tool.py b/rpython/rtyper/test/tool.py --- a/rpython/rtyper/test/tool.py +++ b/rpython/rtyper/test/tool.py @@ -1,6 +1,5 @@ import py from rpython.rtyper.lltypesystem import lltype -from rpython.rtyper.test import test_llinterp from rpython.rtyper.test.test_llinterp import gengraph, interpret, interpret_raises class BaseRtypingTest(object): @@ -36,10 +35,6 @@ def is_of_type(self, x, type_): return type(x) is type_ - @property - def translator(self): - return test_llinterp.typer.annotator.translator - def _skip_llinterpreter(self, reason): py.test.skip("lltypesystem doesn't support %s, yet" % reason) From noreply at buildbot.pypy.org Thu Jan 23 22:48:30 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 23 Jan 2014 22:48:30 +0100 (CET) Subject: [pypy-commit] pypy detect-immutable-fields: Clear this too Message-ID: <20140123214830.763C91C3360@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: detect-immutable-fields Changeset: r68888:0f2c24d693c5 Date: 2014-01-23 15:47 -0600 http://bitbucket.org/pypy/pypy/changeset/0f2c24d693c5/ Log: Clear this too diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -343,6 +343,8 @@ self.attrs[i] = None for i in range(len(self.selectors)): self.selectors[i] = self._empty_selector + for i in range(len(self.indices)): + self.indices[i] = None # ____________________________________________________________ # object implementation From noreply at buildbot.pypy.org Thu Jan 23 22:51:57 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Thu, 23 Jan 2014 22:51:57 +0100 (CET) Subject: [pypy-commit] pypy llvm-translation-backend: Remove dead code. Message-ID: <20140123215157.BA5F41C3360@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r68889:f114c24cec5f Date: 2014-01-23 22:51 +0100 http://bitbucket.org/pypy/pypy/changeset/f114c24cec5f/ Log: Remove dead code. diff --git a/rpython/translator/llvm/test/test_genllvm.py b/rpython/translator/llvm/test/test_genllvm.py --- a/rpython/translator/llvm/test/test_genllvm.py +++ b/rpython/translator/llvm/test/test_genllvm.py @@ -353,17 +353,6 @@ so_file = genllvm._compile(True) return CTypesFuncWrapper(genllvm, graph, str(so_file)) - def _compile(self, func, args, policy=None, gcpolicy=None): - types = [lltype.typeOf(arg) for arg in args] - if not (func == self._func and types == self._types): - self._compiled = self.getcompiled(func, types, gcpolicy=gcpolicy, - annotator_policy=policy, - no_gcremovetypeptr=True) - self._compiled.convert = False - self._func = func - self._types = types - return self._compiled - @property def translator(self): return self._translator From noreply at buildbot.pypy.org Thu Jan 23 22:56:48 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 23 Jan 2014 22:56:48 +0100 (CET) Subject: [pypy-commit] pypy detect-immutable-fields: Import cleanups Message-ID: <20140123215648.8C99B1C33EC@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: detect-immutable-fields Changeset: r68890:f7c6652f73fc Date: 2014-01-23 15:55 -0600 http://bitbucket.org/pypy/pypy/changeset/f7c6652f73fc/ Log: Import cleanups diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -1,15 +1,16 @@ import weakref -from rpython.rlib import jit, objectmodel, debug + +from rpython.rlib import jit, objectmodel, debug, rerased from rpython.rlib.rarithmetic import intmask, r_uint -from rpython.rlib import rerased from pypy.interpreter.baseobjspace import W_Root -from pypy.objspace.std.dictmultiobject import W_DictMultiObject, DictStrategy, ObjectDictStrategy -from pypy.objspace.std.dictmultiobject import BaseKeyIterator, BaseValueIterator, BaseItemIterator -from pypy.objspace.std.dictmultiobject import _never_equal_to_string -from pypy.objspace.std.objectobject import W_ObjectObject +from pypy.objspace.std.dictmultiobject import ( + W_DictMultiObject, DictStrategy, ObjectDictStrategy, BaseKeyIterator, + BaseValueIterator, BaseItemIterator, _never_equal_to_string +) from pypy.objspace.std.typeobject import TypeCell + # ____________________________________________________________ # attribute shapes From noreply at buildbot.pypy.org Thu Jan 23 23:08:41 2014 From: noreply at buildbot.pypy.org (RonnyPfannschmidt) Date: Thu, 23 Jan 2014 23:08:41 +0100 (CET) Subject: [pypy-commit] pypy refine-testrunner: merge Message-ID: <20140123220841.3D7D91C33EC@cobra.cs.uni-duesseldorf.de> Author: Ronny Pfannschmidt Branch: refine-testrunner Changeset: r68891:0ccacf17c26b Date: 2014-01-23 22:48 +0100 http://bitbucket.org/pypy/pypy/changeset/0ccacf17c26b/ Log: merge diff too long, truncating to 2000 out of 75349 lines diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -3,6 +3,7 @@ *.sw[po] *~ .*.swp +.env .idea .project .pydevproject diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -28,7 +28,7 @@ DEALINGS IN THE SOFTWARE. -PyPy Copyright holders 2003-2013 +PyPy Copyright holders 2003-2014 ----------------------------------- Except when otherwise stated (look for LICENSE files or information at diff --git a/Makefile b/Makefile --- a/Makefile +++ b/Makefile @@ -1,7 +1,38 @@ all: pypy-c +PYPY_EXECUTABLE := $(shell which pypy) +URAM := $(shell python -c "import sys; print 4.5 if sys.maxint>1<<32 else 2.5") + +ifeq ($(PYPY_EXECUTABLE),) +RUNINTERP = python +else +RUNINTERP = $(PYPY_EXECUTABLE) +endif + pypy-c: - @echo "Building PyPy with JIT, it'll take about 40 minutes and 4G of RAM" - @sleep 3 - rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py + @echo + @echo "====================================================================" +ifeq ($(PYPY_EXECUTABLE),) + @echo "Building a regular (jitting) version of PyPy, using CPython." + @echo "This takes around 2 hours and $(URAM) GB of RAM." + @echo "Note that pre-installing a PyPy binary would reduce this time" + @echo "and produce basically the same result." +else + @echo "Building a regular (jitting) version of PyPy, using" + @echo "$(PYPY_EXECUTABLE) to run the translation itself." + @echo "This takes up to 1 hour and $(URAM) GB of RAM." +endif + @echo + @echo "For more control (e.g. to use multiple CPU cores during part of" + @echo "the process) you need to run \`\`rpython/bin/rpython'' directly." + @echo "For more information see \`\`http://pypy.org/download.html''." + @echo "====================================================================" + @echo + @sleep 5 + $(RUNINTERP) rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py + +# Note: the -jN option, or MAKEFLAGS=-jN, are not usable. They are +# replaced with an opaque --jobserver option by the time this Makefile +# runs. We cannot get their original value either: +# http://lists.gnu.org/archive/html/help-make/2010-08/msg00106.html diff --git a/README.rst b/README.rst --- a/README.rst +++ b/README.rst @@ -1,5 +1,5 @@ ===================================== -PyPy: Python in Python Implementation +PyPy: Python in Python Implementation ===================================== Welcome to PyPy! @@ -26,12 +26,14 @@ Building ======== -build with:: +build with: - rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py +.. code-block:: console + + $ rpython/bin/rpython -Ojit pypy/goal/targetpypystandalone.py This ends up with ``pypy-c`` binary in the main pypy directory. We suggest -to use virtualenv with the resulting pypy-c as the interpreter, you can +to use virtualenv with the resulting pypy-c as the interpreter; you can find more details about various installation schemes here: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy diff --git a/dotviewer/graphparse.py b/dotviewer/graphparse.py --- a/dotviewer/graphparse.py +++ b/dotviewer/graphparse.py @@ -152,7 +152,8 @@ try: plaincontent = dot2plain_graphviz(content, contenttype) except PlainParseError, e: - print e - # failed, retry via codespeak - plaincontent = dot2plain_codespeak(content, contenttype) + raise + ##print e + ### failed, retry via codespeak + ##plaincontent = dot2plain_codespeak(content, contenttype) return list(parse_plain(graph_id, plaincontent, links, fixedfont)) diff --git a/lib-python/2.7/argparse.py b/lib-python/2.7/argparse.py --- a/lib-python/2.7/argparse.py +++ b/lib-python/2.7/argparse.py @@ -1780,7 +1780,19 @@ # error if this argument is not allowed with other previously # seen arguments, assuming that actions that use the default # value don't really count as "present" - if argument_values is not action.default: + + # XXX PyPy bug-to-bug compatibility: "is" on primitive types + # is not consistent in CPython. We'll assume it is close + # enough for ints (which is true only for "small ints"), but + # for floats and longs and complexes we'll go for the option + # of forcing "is" to say False, like it usually does on + # CPython. A fix is pending on CPython trunk + # (http://bugs.python.org/issue18943) but that might change + # the details of the semantics and so not be applied to 2.7. + # See the line AA below. + + if (argument_values is not action.default or + type(argument_values) in (float, long, complex)): # AA seen_non_default_actions.add(action) for conflict_action in action_conflicts.get(action, []): if conflict_action in seen_non_default_actions: diff --git a/lib-python/2.7/ctypes/__init__.py b/lib-python/2.7/ctypes/__init__.py --- a/lib-python/2.7/ctypes/__init__.py +++ b/lib-python/2.7/ctypes/__init__.py @@ -371,10 +371,9 @@ self._handle = handle def __repr__(self): - return "<%s '%s', handle %r at %x>" % \ - (self.__class__.__name__, self._name, - (self._handle), - id(self) & (_sys.maxint*2 + 1)) + return "<%s '%s', handle %r at 0x%x>" % ( + self.__class__.__name__, self._name, self._handle, + id(self) & (_sys.maxint * 2 + 1)) def __getattr__(self, name): diff --git a/lib-python/2.7/ctypes/test/test_python_api.py b/lib-python/2.7/ctypes/test/test_python_api.py --- a/lib-python/2.7/ctypes/test/test_python_api.py +++ b/lib-python/2.7/ctypes/test/test_python_api.py @@ -73,6 +73,7 @@ del pyobj self.assertEqual(grc(s), ref) + @xfail def test_PyOS_snprintf(self): PyOS_snprintf = pythonapi.PyOS_snprintf PyOS_snprintf.argtypes = POINTER(c_char), c_size_t, c_char_p diff --git a/lib-python/2.7/json/encoder.py b/lib-python/2.7/json/encoder.py --- a/lib-python/2.7/json/encoder.py +++ b/lib-python/2.7/json/encoder.py @@ -4,6 +4,21 @@ from __pypy__.builders import StringBuilder, UnicodeBuilder +class StringOrUnicodeBuilder(object): + def __init__(self): + self._builder = StringBuilder() + def append(self, string): + try: + self._builder.append(string) + except UnicodeEncodeError: + ub = UnicodeBuilder() + ub.append(self._builder.build()) + self._builder = ub + ub.append(string) + def build(self): + return self._builder.build() + + ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]') ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])') HAS_UTF8 = re.compile(r'[\x80-\xff]') @@ -192,7 +207,7 @@ if self.ensure_ascii: builder = StringBuilder() else: - builder = UnicodeBuilder() + builder = StringOrUnicodeBuilder() self.__encode(o, markers, builder, 0) return builder.build() diff --git a/lib-python/2.7/socket.py b/lib-python/2.7/socket.py --- a/lib-python/2.7/socket.py +++ b/lib-python/2.7/socket.py @@ -335,9 +335,10 @@ s = self._sock self._sock = None if s is not None: - s._drop() if self._close: s.close() + else: + s._drop() def __del__(self): try: diff --git a/lib-python/2.7/string.py b/lib-python/2.7/string.py --- a/lib-python/2.7/string.py +++ b/lib-python/2.7/string.py @@ -66,16 +66,17 @@ must be of the same length. """ - if len(fromstr) != len(tostr): + n = len(fromstr) + if n != len(tostr): raise ValueError, "maketrans arguments must have same length" - global _idmapL - if not _idmapL: - _idmapL = list(_idmap) - L = _idmapL[:] - fromstr = map(ord, fromstr) - for i in range(len(fromstr)): - L[fromstr[i]] = tostr[i] - return ''.join(L) + # this function has been rewritten to suit PyPy better; it is + # almost 10x faster than the original. + buf = bytearray(256) + for i in range(256): + buf[i] = i + for i in range(n): + buf[ord(fromstr[i])] = tostr[i] + return str(buf) diff --git a/lib-python/2.7/test/keycert.pem b/lib-python/2.7/test/keycert.pem --- a/lib-python/2.7/test/keycert.pem +++ b/lib-python/2.7/test/keycert.pem @@ -1,32 +1,31 @@ ------BEGIN RSA PRIVATE KEY----- -MIICXwIBAAKBgQC8ddrhm+LutBvjYcQlnH21PPIseJ1JVG2HMmN2CmZk2YukO+9L -opdJhTvbGfEj0DQs1IE8M+kTUyOmuKfVrFMKwtVeCJphrAnhoz7TYOuLBSqt7lVH -fhi/VwovESJlaBOp+WMnfhcduPEYHYx/6cnVapIkZnLt30zu2um+DzA9jQIDAQAB -AoGBAK0FZpaKj6WnJZN0RqhhK+ggtBWwBnc0U/ozgKz2j1s3fsShYeiGtW6CK5nU -D1dZ5wzhbGThI7LiOXDvRucc9n7vUgi0alqPQ/PFodPxAN/eEYkmXQ7W2k7zwsDA -IUK0KUhktQbLu8qF/m8qM86ba9y9/9YkXuQbZ3COl5ahTZrhAkEA301P08RKv3KM -oXnGU2UHTuJ1MAD2hOrPxjD4/wxA/39EWG9bZczbJyggB4RHu0I3NOSFjAm3HQm0 -ANOu5QK9owJBANgOeLfNNcF4pp+UikRFqxk5hULqRAWzVxVrWe85FlPm0VVmHbb/ -loif7mqjU8o1jTd/LM7RD9f2usZyE2psaw8CQQCNLhkpX3KO5kKJmS9N7JMZSc4j -oog58yeYO8BBqKKzpug0LXuQultYv2K4veaIO04iL9VLe5z9S/Q1jaCHBBuXAkEA -z8gjGoi1AOp6PBBLZNsncCvcV/0aC+1se4HxTNo2+duKSDnbq+ljqOM+E7odU+Nq -ewvIWOG//e8fssd0mq3HywJBAJ8l/c8GVmrpFTx8r/nZ2Pyyjt3dH1widooDXYSV -q6Gbf41Llo5sYAtmxdndTLASuHKecacTgZVhy0FryZpLKrU= ------END RSA PRIVATE KEY----- +-----BEGIN PRIVATE KEY----- +MIICdwIBADANBgkqhkiG9w0BAQEFAASCAmEwggJdAgEAAoGBANtb0+YrKuxevGpm +LrjaUhZSgz6zFAmuGFmKmUbdjmfv9zSmmdsQIksK++jK0Be9LeZy20j6ahOfuVa0 +ufEmPoP7Fy4hXegKZR9cCWcIe/A6H2xWF1IIJLRTLaU8ol/I7T+um5HD5AwAwNPP +USNU0Eegmvp+xxWu3NX2m1Veot85AgMBAAECgYA3ZdZ673X0oexFlq7AAmrutkHt +CL7LvwrpOiaBjhyTxTeSNWzvtQBkIU8DOI0bIazA4UreAFffwtvEuPmonDb3F+Iq +SMAu42XcGyVZEl+gHlTPU9XRX7nTOXVt+MlRRRxL6t9GkGfUAXI3XxJDXW3c0vBK +UL9xqD8cORXOfE06rQJBAP8mEX1ERkR64Ptsoe4281vjTlNfIbs7NMPkUnrn9N/Y +BLhjNIfQ3HFZG8BTMLfX7kCS9D593DW5tV4Z9BP/c6cCQQDcFzCcVArNh2JSywOQ +ZfTfRbJg/Z5Lt9Fkngv1meeGNPgIMLN8Sg679pAOOWmzdMO3V706rNPzSVMME7E5 +oPIfAkEA8pDddarP5tCvTTgUpmTFbakm0KoTZm2+FzHcnA4jRh+XNTjTOv98Y6Ik +eO5d1ZnKXseWvkZncQgxfdnMqqpj5wJAcNq/RVne1DbYlwWchT2Si65MYmmJ8t+F +0mcsULqjOnEMwf5e+ptq5LzwbyrHZYq5FNk7ocufPv/ZQrcSSC+cFwJBAKvOJByS +x56qyGeZLOQlWS2JS3KJo59XuLFGqcbgN9Om9xFa41Yb4N9NvplFivsvZdw3m1Q/ +SPIXQuT8RMPDVNQ= +-----END PRIVATE KEY----- -----BEGIN CERTIFICATE----- -MIICpzCCAhCgAwIBAgIJAP+qStv1cIGNMA0GCSqGSIb3DQEBBQUAMIGJMQswCQYD -VQQGEwJVUzERMA8GA1UECBMIRGVsYXdhcmUxEzARBgNVBAcTCldpbG1pbmd0b24x -IzAhBgNVBAoTGlB5dGhvbiBTb2Z0d2FyZSBGb3VuZGF0aW9uMQwwCgYDVQQLEwNT -U0wxHzAdBgNVBAMTFnNvbWVtYWNoaW5lLnB5dGhvbi5vcmcwHhcNMDcwODI3MTY1 -NDUwWhcNMTMwMjE2MTY1NDUwWjCBiTELMAkGA1UEBhMCVVMxETAPBgNVBAgTCERl -bGF3YXJlMRMwEQYDVQQHEwpXaWxtaW5ndG9uMSMwIQYDVQQKExpQeXRob24gU29m -dHdhcmUgRm91bmRhdGlvbjEMMAoGA1UECxMDU1NMMR8wHQYDVQQDExZzb21lbWFj -aGluZS5weXRob24ub3JnMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC8ddrh -m+LutBvjYcQlnH21PPIseJ1JVG2HMmN2CmZk2YukO+9LopdJhTvbGfEj0DQs1IE8 -M+kTUyOmuKfVrFMKwtVeCJphrAnhoz7TYOuLBSqt7lVHfhi/VwovESJlaBOp+WMn -fhcduPEYHYx/6cnVapIkZnLt30zu2um+DzA9jQIDAQABoxUwEzARBglghkgBhvhC -AQEEBAMCBkAwDQYJKoZIhvcNAQEFBQADgYEAF4Q5BVqmCOLv1n8je/Jw9K669VXb -08hyGzQhkemEBYQd6fzQ9A/1ZzHkJKb1P6yreOLSEh4KcxYPyrLRC1ll8nr5OlCx -CMhKkTnR6qBsdNV0XtdU2+N25hqW+Ma4ZeqsN/iiJVCGNOZGnvQuvCAGWF8+J/f/ -iHkC6gGdBJhogs4= +MIICVDCCAb2gAwIBAgIJANfHOBkZr8JOMA0GCSqGSIb3DQEBBQUAMF8xCzAJBgNV +BAYTAlhZMRcwFQYDVQQHEw5DYXN0bGUgQW50aHJheDEjMCEGA1UEChMaUHl0aG9u +IFNvZnR3YXJlIEZvdW5kYXRpb24xEjAQBgNVBAMTCWxvY2FsaG9zdDAeFw0xMDEw +MDgyMzAxNTZaFw0yMDEwMDUyMzAxNTZaMF8xCzAJBgNVBAYTAlhZMRcwFQYDVQQH +Ew5DYXN0bGUgQW50aHJheDEjMCEGA1UEChMaUHl0aG9uIFNvZnR3YXJlIEZvdW5k +YXRpb24xEjAQBgNVBAMTCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOBjQAw +gYkCgYEA21vT5isq7F68amYuuNpSFlKDPrMUCa4YWYqZRt2OZ+/3NKaZ2xAiSwr7 +6MrQF70t5nLbSPpqE5+5VrS58SY+g/sXLiFd6AplH1wJZwh78DofbFYXUggktFMt +pTyiX8jtP66bkcPkDADA089RI1TQR6Ca+n7HFa7c1fabVV6i3zkCAwEAAaMYMBYw +FAYDVR0RBA0wC4IJbG9jYWxob3N0MA0GCSqGSIb3DQEBBQUAA4GBAHPctQBEQ4wd +BJ6+JcpIraopLn8BGhbjNWj40mmRqWB/NAWF6M5ne7KpGAu7tLeG4hb1zLaldK8G +lxy2GPSRF6LFS48dpEj2HbMv2nvv6xxalDMJ9+DicWgAKTQ6bcX2j3GUkCR0g/T1 +CRlNBAAlvhKzO7Clpf9l0YKBEfraJByX -----END CERTIFICATE----- diff --git a/lib-python/2.7/test/sha256.pem b/lib-python/2.7/test/sha256.pem --- a/lib-python/2.7/test/sha256.pem +++ b/lib-python/2.7/test/sha256.pem @@ -1,129 +1,128 @@ # Certificate chain for https://sha256.tbs-internet.com - 0 s:/C=FR/postalCode=14000/ST=Calvados/L=CAEN/street=22 rue de Bretagne/O=TBS INTERNET/OU=0002 440443810/OU=sha-256 production/CN=sha256.tbs-internet.com - i:/C=FR/ST=Calvados/L=Caen/O=TBS INTERNET/OU=Terms and Conditions: http://www.tbs-internet.com/CA/repository/OU=TBS INTERNET CA/CN=TBS X509 CA SGC + 0 s:/C=FR/postalCode=14000/ST=Calvados/L=CAEN/street=22 rue de Bretagne/O=TBS INTERNET/OU=0002 440443810/OU=Certificats TBS X509/CN=ecom.tbs-x509.com + i:/C=FR/ST=Calvados/L=Caen/O=TBS INTERNET/OU=Terms and Conditions: http://www.tbs-internet.com/CA/repository/OU=TBS INTERNET CA/CN=TBS X509 CA business -----BEGIN CERTIFICATE----- -MIIGXTCCBUWgAwIBAgIRAMmag+ygSAdxZsbyzYjhuW0wDQYJKoZIhvcNAQELBQAw -gcQxCzAJBgNVBAYTAkZSMREwDwYDVQQIEwhDYWx2YWRvczENMAsGA1UEBxMEQ2Fl +MIIGTjCCBTagAwIBAgIQOh3d9dNDPq1cSdJmEiMpqDANBgkqhkiG9w0BAQUFADCB +yTELMAkGA1UEBhMCRlIxETAPBgNVBAgTCENhbHZhZG9zMQ0wCwYDVQQHEwRDYWVu +MRUwEwYDVQQKEwxUQlMgSU5URVJORVQxSDBGBgNVBAsTP1Rlcm1zIGFuZCBDb25k +aXRpb25zOiBodHRwOi8vd3d3LnRicy1pbnRlcm5ldC5jb20vQ0EvcmVwb3NpdG9y +eTEYMBYGA1UECxMPVEJTIElOVEVSTkVUIENBMR0wGwYDVQQDExRUQlMgWDUwOSBD +QSBidXNpbmVzczAeFw0xMTAxMjUwMDAwMDBaFw0xMzAyMDUyMzU5NTlaMIHHMQsw +CQYDVQQGEwJGUjEOMAwGA1UEERMFMTQwMDAxETAPBgNVBAgTCENhbHZhZG9zMQ0w +CwYDVQQHEwRDQUVOMRswGQYDVQQJExIyMiBydWUgZGUgQnJldGFnbmUxFTATBgNV +BAoTDFRCUyBJTlRFUk5FVDEXMBUGA1UECxMOMDAwMiA0NDA0NDM4MTAxHTAbBgNV +BAsTFENlcnRpZmljYXRzIFRCUyBYNTA5MRowGAYDVQQDExFlY29tLnRicy14NTA5 +LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKRrlHUnJ++1lpcg +jtYco7cdmRe+EEfTmwPfCdfV3G1QfsTSvY6FfMpm/83pqHfT+4ANwr18wD9ZrAEN +G16mf9VdCGK12+TP7DmqeZyGIqlFFoahQnmb8EarvE43/1UeQ2CV9XmzwZvpqeli +LfXsFonawrY3H6ZnMwS64St61Z+9gdyuZ/RbsoZBbT5KUjDEG844QRU4OT1IGeEI +eY5NM5RNIh6ZNhVtqeeCxMS7afONkHQrOco73RdSTRck/Hj96Ofl3MHNHryr+AMK +DGFk1kLCZGpPdXtkxXvaDeQoiYDlil26CWc+YK6xyDPMdsWvoG14ZLyCpzMXA7/7 +4YAQRH0CAwEAAaOCAjAwggIsMB8GA1UdIwQYMBaAFBoJBMz5CY+7HqDO1KQUf0vV +I1jNMB0GA1UdDgQWBBQgOU8HsWzbmD4WZP5Wtdw7jca2WDAOBgNVHQ8BAf8EBAMC +BaAwDAYDVR0TAQH/BAIwADAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIw +TAYDVR0gBEUwQzBBBgsrBgEEAYDlNwIBATAyMDAGCCsGAQUFBwIBFiRodHRwczov +L3d3dy50YnMtaW50ZXJuZXQuY29tL0NBL0NQUzEwdwYDVR0fBHAwbjA3oDWgM4Yx +aHR0cDovL2NybC50YnMtaW50ZXJuZXQuY29tL1RCU1g1MDlDQWJ1c2luZXNzLmNy +bDAzoDGgL4YtaHR0cDovL2NybC50YnMteDUwOS5jb20vVEJTWDUwOUNBYnVzaW5l +c3MuY3JsMIGwBggrBgEFBQcBAQSBozCBoDA9BggrBgEFBQcwAoYxaHR0cDovL2Ny +dC50YnMtaW50ZXJuZXQuY29tL1RCU1g1MDlDQWJ1c2luZXNzLmNydDA5BggrBgEF +BQcwAoYtaHR0cDovL2NydC50YnMteDUwOS5jb20vVEJTWDUwOUNBYnVzaW5lc3Mu +Y3J0MCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC50YnMteDUwOS5jb20wMwYDVR0R +BCwwKoIRZWNvbS50YnMteDUwOS5jb22CFXd3dy5lY29tLnRicy14NTA5LmNvbTAN +BgkqhkiG9w0BAQUFAAOCAQEArT4NHfbY87bGAw8lPV4DmHlmuDuVp/y7ltO3Ynse +3Rz8RxW2AzuO0Oy2F0Cu4yWKtMyEyMXyHqWtae7ElRbdTu5w5GwVBLJHClCzC8S9 +SpgMMQTx3Rgn8vjkHuU9VZQlulZyiPK7yunjc7c310S9FRZ7XxOwf8Nnx4WnB+No +WrfApzhhQl31w+RyrNxZe58hCfDDHmevRvwLjQ785ZoQXJDj2j3qAD4aI2yB8lB5 +oaE1jlCJzC7Kmz/Y9jzfmv/zAs1LQTm9ktevv4BTUFaGjv9jxnQ1xnS862ZiouLW +zZYIlYPf4F6JjXGiIQgQRglILUfq3ftJd9/ok9W9ZF8h8w== +-----END CERTIFICATE----- + 1 s:/C=FR/ST=Calvados/L=Caen/O=TBS INTERNET/OU=Terms and Conditions: http://www.tbs-internet.com/CA/repository/OU=TBS INTERNET CA/CN=TBS X509 CA business + i:/C=SE/O=AddTrust AB/OU=AddTrust External TTP Network/CN=AddTrust External CA Root +-----BEGIN CERTIFICATE----- +MIIFPzCCBCegAwIBAgIQDlBz/++iRSmLDeVRHT/hADANBgkqhkiG9w0BAQUFADBv +MQswCQYDVQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFk +ZFRydXN0IEV4dGVybmFsIFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBF +eHRlcm5hbCBDQSBSb290MB4XDTA1MTIwMTAwMDAwMFoXDTE5MDcwOTE4MTkyMlow +gckxCzAJBgNVBAYTAkZSMREwDwYDVQQIEwhDYWx2YWRvczENMAsGA1UEBxMEQ2Fl bjEVMBMGA1UEChMMVEJTIElOVEVSTkVUMUgwRgYDVQQLEz9UZXJtcyBhbmQgQ29u ZGl0aW9uczogaHR0cDovL3d3dy50YnMtaW50ZXJuZXQuY29tL0NBL3JlcG9zaXRv -cnkxGDAWBgNVBAsTD1RCUyBJTlRFUk5FVCBDQTEYMBYGA1UEAxMPVEJTIFg1MDkg -Q0EgU0dDMB4XDTEwMDIxODAwMDAwMFoXDTEyMDIxOTIzNTk1OVowgcsxCzAJBgNV -BAYTAkZSMQ4wDAYDVQQREwUxNDAwMDERMA8GA1UECBMIQ2FsdmFkb3MxDTALBgNV -BAcTBENBRU4xGzAZBgNVBAkTEjIyIHJ1ZSBkZSBCcmV0YWduZTEVMBMGA1UEChMM -VEJTIElOVEVSTkVUMRcwFQYDVQQLEw4wMDAyIDQ0MDQ0MzgxMDEbMBkGA1UECxMS -c2hhLTI1NiBwcm9kdWN0aW9uMSAwHgYDVQQDExdzaGEyNTYudGJzLWludGVybmV0 -LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKbuM8VT7f0nntwu -N3F7v9KIBlhKNAxqCrziOXU5iqUt8HrQB3DtHbdmII+CpVUlwlmepsx6G+srEZ9a -MIGAy0nxi5aLb7watkyIdPjJTMvTUBQ/+RPWzt5JtYbbY9BlJ+yci0dctP74f4NU -ISLtlrEjUbf2gTohLrcE01TfmOF6PDEbB5PKDi38cB3NzKfizWfrOaJW6Q1C1qOJ -y4/4jkUREX1UFUIxzx7v62VfjXSGlcjGpBX1fvtABQOSLeE0a6gciDZs1REqroFf -5eXtqYphpTa14Z83ITXMfgg5Nze1VtMnzI9Qx4blYBw4dgQVEuIsYr7FDBOITDzc -VEVXZx0CAwEAAaOCAj8wggI7MB8GA1UdIwQYMBaAFAdEdoWTKLx/bXjSCuv6TEvf -2YIfMB0GA1UdDgQWBBSJKI/AYVI9RQNY0QPIqc8ej2QivTAOBgNVHQ8BAf8EBAMC -BaAwDAYDVR0TAQH/BAIwADA0BgNVHSUELTArBggrBgEFBQcDAQYIKwYBBQUHAwIG -CisGAQQBgjcKAwMGCWCGSAGG+EIEATBMBgNVHSAERTBDMEEGCysGAQQBgOU3AgQB -MDIwMAYIKwYBBQUHAgEWJGh0dHBzOi8vd3d3LnRicy1pbnRlcm5ldC5jb20vQ0Ev -Q1BTNDBtBgNVHR8EZjBkMDKgMKAuhixodHRwOi8vY3JsLnRicy1pbnRlcm5ldC5j -b20vVEJTWDUwOUNBU0dDLmNybDAuoCygKoYoaHR0cDovL2NybC50YnMteDUwOS5j -b20vVEJTWDUwOUNBU0dDLmNybDCBpgYIKwYBBQUHAQEEgZkwgZYwOAYIKwYBBQUH -MAKGLGh0dHA6Ly9jcnQudGJzLWludGVybmV0LmNvbS9UQlNYNTA5Q0FTR0MuY3J0 -MDQGCCsGAQUFBzAChihodHRwOi8vY3J0LnRicy14NTA5LmNvbS9UQlNYNTA5Q0FT -R0MuY3J0MCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC50YnMteDUwOS5jb20wPwYD -VR0RBDgwNoIXc2hhMjU2LnRicy1pbnRlcm5ldC5jb22CG3d3dy5zaGEyNTYudGJz -LWludGVybmV0LmNvbTANBgkqhkiG9w0BAQsFAAOCAQEAA5NL0D4QSqhErhlkdPmz -XtiMvdGL+ZehM4coTRIpasM/Agt36Rc0NzCvnQwKE+wkngg1Gy2qe7Q0E/ziqBtB -fZYzdVgu1zdiL4kTaf+wFKYAFGsFbyeEmXysy+CMwaNoF2vpSjCU1UD56bEnTX/W -fxVZYxtBQUpnu2wOsm8cDZuZRv9XrYgAhGj9Tt6F0aVHSDGn59uwShG1+BVF/uju -SCyPTTjL1oc7YElJUzR/x4mQJYvtQI8gDIDAGEOs7v3R/gKa5EMfbUQUI4C84UbI -Yz09Jdnws/MkC/Hm1BZEqk89u7Hvfv+oHqEb0XaUo0TDfsxE0M1sMdnLb91QNQBm -UQ== ------END CERTIFICATE----- - 1 s:/C=FR/ST=Calvados/L=Caen/O=TBS INTERNET/OU=Terms and Conditions: http://www.tbs-internet.com/CA/repository/OU=TBS INTERNET CA/CN=TBS X509 CA SGC - i:/C=SE/O=AddTrust AB/OU=AddTrust External TTP Network/CN=AddTrust External CA Root ------BEGIN CERTIFICATE----- -MIIFVjCCBD6gAwIBAgIQXpDZ0ETJMV02WTx3GTnhhTANBgkqhkiG9w0BAQUFADBv -MQswCQYDVQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFk -ZFRydXN0IEV4dGVybmFsIFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBF -eHRlcm5hbCBDQSBSb290MB4XDTA1MTIwMTAwMDAwMFoXDTE5MDYyNDE5MDYzMFow -gcQxCzAJBgNVBAYTAkZSMREwDwYDVQQIEwhDYWx2YWRvczENMAsGA1UEBxMEQ2Fl -bjEVMBMGA1UEChMMVEJTIElOVEVSTkVUMUgwRgYDVQQLEz9UZXJtcyBhbmQgQ29u -ZGl0aW9uczogaHR0cDovL3d3dy50YnMtaW50ZXJuZXQuY29tL0NBL3JlcG9zaXRv -cnkxGDAWBgNVBAsTD1RCUyBJTlRFUk5FVCBDQTEYMBYGA1UEAxMPVEJTIFg1MDkg -Q0EgU0dDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsgOkO3f7wzN6 -rOjg45tR5vjBfzK7qmV9IBxb/QW9EEXxG+E7FNhZqQLtwGBKoSsHTnQqV75wWMk0 -9tinWvftBkSpj5sTi/8cbzJfUvTSVYh3Qxv6AVVjMMH/ruLjE6y+4PoaPs8WoYAQ -ts5R4Z1g8c/WnTepLst2x0/Wv7GmuoQi+gXvHU6YrBiu7XkeYhzc95QdviWSJRDk -owhb5K43qhcvjRmBfO/paGlCliDGZp8mHwrI21mwobWpVjTxZRwYO3bd4+TGcI4G -Ie5wmHwE8F7SK1tgSqbBacKjDa93j7txKkfz/Yd2n7TGqOXiHPsJpG655vrKtnXk -9vs1zoDeJQIDAQABo4IBljCCAZIwHQYDVR0OBBYEFAdEdoWTKLx/bXjSCuv6TEvf -2YIfMA4GA1UdDwEB/wQEAwIBBjASBgNVHRMBAf8ECDAGAQH/AgEAMCAGA1UdJQQZ -MBcGCisGAQQBgjcKAwMGCWCGSAGG+EIEATAYBgNVHSAEETAPMA0GCysGAQQBgOU3 -AgQBMHsGA1UdHwR0MHIwOKA2oDSGMmh0dHA6Ly9jcmwuY29tb2RvY2EuY29tL0Fk -ZFRydXN0RXh0ZXJuYWxDQVJvb3QuY3JsMDagNKAyhjBodHRwOi8vY3JsLmNvbW9k -by5uZXQvQWRkVHJ1c3RFeHRlcm5hbENBUm9vdC5jcmwwgYAGCCsGAQUFBwEBBHQw -cjA4BggrBgEFBQcwAoYsaHR0cDovL2NydC5jb21vZG9jYS5jb20vQWRkVHJ1c3RV -VE5TR0NDQS5jcnQwNgYIKwYBBQUHMAKGKmh0dHA6Ly9jcnQuY29tb2RvLm5ldC9B -ZGRUcnVzdFVUTlNHQ0NBLmNydDARBglghkgBhvhCAQEEBAMCAgQwDQYJKoZIhvcN -AQEFBQADggEBAK2zEzs+jcIrVK9oDkdDZNvhuBYTdCfpxfFs+OAujW0bIfJAy232 -euVsnJm6u/+OrqKudD2tad2BbejLLXhMZViaCmK7D9nrXHx4te5EP8rL19SUVqLY -1pTnv5dhNgEgvA7n5lIzDSYs7yRLsr7HJsYPr6SeYSuZizyX1SNz7ooJ32/F3X98 -RB0Mlc/E0OyOrkQ9/y5IrnpnaSora8CnUrV5XNOg+kyCz9edCyx4D5wXYcwZPVWz -8aDqquESrezPyjtfi4WRO4s/VD3HLZvOxzMrWAVYCDG9FxaOhF0QGuuG1F7F3GKV -v6prNyCl016kRl2j1UT+a7gLd8fA25A4C9E= +cnkxGDAWBgNVBAsTD1RCUyBJTlRFUk5FVCBDQTEdMBsGA1UEAxMUVEJTIFg1MDkg +Q0EgYnVzaW5lc3MwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDB1PAU +qudCcz3tmyGcf+u6EkZqonKKHrV4gZYbvVkIRojmmlhfi/jwvpHvo8bqSt/9Rj5S +jhCDW0pcbI+IPPtD1Jy+CHNSfnMqVDy6CKQ3p5maTzCMG6ZT+XjnvcND5v+FtaiB +xk1iCX6uvt0jeUtdZvYbyytsSDE6c3Y5//wRxOF8tM1JxibwO3pyER26jbbN2gQz +m/EkdGjLdJ4svPk23WDAvQ6G0/z2LcAaJB+XLfqRwfQpHQvfKa1uTi8PivC8qtip +rmNQMMPMjxSK2azX8cKjjTDJiUKaCb4VHlJDWKEsCFRpgJAoAuX8f7Yfs1M4esGo +sWb3PGspK3O22uIlAgMBAAGjggF6MIIBdjAdBgNVHQ4EFgQUGgkEzPkJj7seoM7U +pBR/S9UjWM0wDgYDVR0PAQH/BAQDAgEGMBIGA1UdEwEB/wQIMAYBAf8CAQAwGAYD +VR0gBBEwDzANBgsrBgEEAYDlNwIBATB7BgNVHR8EdDByMDigNqA0hjJodHRwOi8v +Y3JsLmNvbW9kb2NhLmNvbS9BZGRUcnVzdEV4dGVybmFsQ0FSb290LmNybDA2oDSg +MoYwaHR0cDovL2NybC5jb21vZG8ubmV0L0FkZFRydXN0RXh0ZXJuYWxDQVJvb3Qu +Y3JsMIGGBggrBgEFBQcBAQR6MHgwOwYIKwYBBQUHMAKGL2h0dHA6Ly9jcnQuY29t +b2RvY2EuY29tL0FkZFRydXN0VVROU2VydmVyQ0EuY3J0MDkGCCsGAQUFBzAChi1o +dHRwOi8vY3J0LmNvbW9kby5uZXQvQWRkVHJ1c3RVVE5TZXJ2ZXJDQS5jcnQwEQYJ +YIZIAYb4QgEBBAQDAgIEMA0GCSqGSIb3DQEBBQUAA4IBAQA7mqrMgk/MrE6QnbNA +h4nRCn2ti4bg4w2C3lB6bSvRPnYwuNw9Jb8vuKkNFzRDxNJXqVDZdfFW5CVQJuyd +nfAx83+wk+spzvFaE1KhFYfN9G9pQfXUfvDRoIcJgPEKUXL1wRiOG+IjU3VVI8pg +IgqHkr7ylln5i5zCiFAPuIJmYUSFg/gxH5xkCNcjJqqrHrHatJr6Qrrke93joupw +oU1njfAcZtYp6fbiK6u2b1pJqwkVBE8RsfLnPhRj+SFbpvjv8Od7o/ieJhFIYQNU +k2jX2u8qZnAiNw93LZW9lpYjtuvMXq8QQppENNja5b53q7UwI+lU7ZGjZ7quuESp +J6/5 -----END CERTIFICATE----- 2 s:/C=SE/O=AddTrust AB/OU=AddTrust External TTP Network/CN=AddTrust External CA Root - i:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN - DATACorp SGC + i:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN-USERFirst-Hardware -----BEGIN CERTIFICATE----- -MIIEZjCCA06gAwIBAgIQUSYKkxzif5zDpV954HKugjANBgkqhkiG9w0BAQUFADCB -kzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug +MIIETzCCAzegAwIBAgIQHM5EYpUZep1jUvnyI6m2mDANBgkqhkiG9w0BAQUFADCB +lzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho -dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZBgNVBAMTElVUTiAtIERBVEFDb3Jw -IFNHQzAeFw0wNTA2MDcwODA5MTBaFw0xOTA2MjQxOTA2MzBaMG8xCzAJBgNVBAYT -AlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEmMCQGA1UECxMdQWRkVHJ1c3QgRXh0 -ZXJuYWwgVFRQIE5ldHdvcmsxIjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENB -IFJvb3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC39xoz5vIABC05 -4E5b7R+8bA/Ntfojts7emxEzl6QpTH2Tn71KvJPtAxrjj8/lbVBa1pcplFqAsEl6 -2y6V/bjKvzc4LR4+kUGtcFbH8E8/6DKedMrIkFTpxl8PeJ2aQDwOrGGqXhSPnoeh -alDc15pOrwWzpnGUnHGzUGAKxxOdOAeGAqjpqGkmGJCrTLBPI6s6T4TY386f4Wlv -u9dC12tE5Met7m1BX3JacQg3s3llpFmglDf3AC8NwpJy2tA4ctsUqEXEXSp9t7TW -xO6szRNEt8kr3UMAJfphuWlqWCMRt6czj1Z1WfXNKddGtworZbbTQm8Vsrh7++/p -XVPVNFonAgMBAAGjgdgwgdUwHwYDVR0jBBgwFoAUUzLRs89/+uDxoF2FTpLSnkUd -tE8wHQYDVR0OBBYEFK29mHo0tCb3+sQmVO8DveAky1QaMA4GA1UdDwEB/wQEAwIB -BjAPBgNVHRMBAf8EBTADAQH/MBEGCWCGSAGG+EIBAQQEAwIBAjAgBgNVHSUEGTAX -BgorBgEEAYI3CgMDBglghkgBhvhCBAEwPQYDVR0fBDYwNDAyoDCgLoYsaHR0cDov -L2NybC51c2VydHJ1c3QuY29tL1VUTi1EQVRBQ29ycFNHQy5jcmwwDQYJKoZIhvcN -AQEFBQADggEBAMbuUxdoFLJRIh6QWA2U/b3xcOWGLcM2MY9USEbnLQg3vGwKYOEO -rVE04BKT6b64q7gmtOmWPSiPrmQH/uAB7MXjkesYoPF1ftsK5p+R26+udd8jkWjd -FwBaS/9kbHDrARrQkNnHptZt9hPk/7XJ0h4qy7ElQyZ42TCbTg0evmnv3+r+LbPM -+bDdtRTKkdSytaX7ARmjR3mfnYyVhzT4HziS2jamEfpr62vp3EV4FTkG101B5CHI -3C+H0be/SGB1pWLLJN47YaApIKa+xWycxOkKaSLvkTr6Jq/RW0GnOuL4OAdCq8Fb -+M5tug8EPzI0rNwEKNdwMBQmBsTkm5jVz3g= +dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3Qt +SGFyZHdhcmUwHhcNMDUwNjA3MDgwOTEwWhcNMTkwNzA5MTgxOTIyWjBvMQswCQYD +VQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFkZFRydXN0 +IEV4dGVybmFsIFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBFeHRlcm5h +bCBDQSBSb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAt/caM+by +AAQtOeBOW+0fvGwPzbX6I7bO3psRM5ekKUx9k5+9SryT7QMa44/P5W1QWtaXKZRa +gLBJetsulf24yr83OC0ePpFBrXBWx/BPP+gynnTKyJBU6cZfD3idmkA8Dqxhql4U +j56HoWpQ3NeaTq8Fs6ZxlJxxs1BgCscTnTgHhgKo6ahpJhiQq0ywTyOrOk+E2N/O +n+Fpb7vXQtdrROTHre5tQV9yWnEIN7N5ZaRZoJQ39wAvDcKSctrQOHLbFKhFxF0q +fbe01sTurM0TRLfJK91DACX6YblpalgjEbenM49WdVn1zSnXRrcKK2W200JvFbK4 +e/vv6V1T1TRaJwIDAQABo4G9MIG6MB8GA1UdIwQYMBaAFKFyXyYbKJhDlV0HN9WF +lp1L0sNFMB0GA1UdDgQWBBStvZh6NLQm9/rEJlTvA73gJMtUGjAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zARBglghkgBhvhCAQEEBAMCAQIwRAYDVR0f +BD0wOzA5oDegNYYzaHR0cDovL2NybC51c2VydHJ1c3QuY29tL1VUTi1VU0VSRmly +c3QtSGFyZHdhcmUuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQByQhANOs4kClrwF8BW +onvUOGCSjRK52zYZgDXYNjDtmr5rJ6NyPFDNn+JxkLpjYetIFMTbSRe679Bt8m7a +gIAoQYFQtxMuyLnJegB2aEbQiIxh/tC21UcFF7ktdnDoTlA6w3pLuvunaI84Of3o +2YBrhzkTbCfaYk5JRlTpudW9DkUkHBsyx3nknPKnplkIGaK0jgn8E0n+SFabYaHk +I9LroYT/+JtLefh9lgBdAgVv0UPbzoGfuDsrk/Zh+UrgbLFpHoVnElhzbkh64Z0X +OGaJunQc68cCZu5HTn/aK7fBGMcVflRCXLVEQpU9PIAdGA8Ynvg684t8GMaKsRl1 +jIGZ -----END CERTIFICATE----- - 3 s:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN - DATACorp SGC - i:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN - DATACorp SGC + 3 s:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN-USERFirst-Hardware + i:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN-USERFirst-Hardware -----BEGIN CERTIFICATE----- -MIIEXjCCA0agAwIBAgIQRL4Mi1AAIbQR0ypoBqmtaTANBgkqhkiG9w0BAQUFADCB -kzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug +MIIEdDCCA1ygAwIBAgIQRL4Mi1AAJLQR0zYq/mUK/TANBgkqhkiG9w0BAQUFADCB +lzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho -dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZBgNVBAMTElVUTiAtIERBVEFDb3Jw -IFNHQzAeFw05OTA2MjQxODU3MjFaFw0xOTA2MjQxOTA2MzBaMIGTMQswCQYDVQQG -EwJVUzELMAkGA1UECBMCVVQxFzAVBgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4wHAYD -VQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxITAfBgNVBAsTGGh0dHA6Ly93d3cu -dXNlcnRydXN0LmNvbTEbMBkGA1UEAxMSVVROIC0gREFUQUNvcnAgU0dDMIIBIjAN -BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3+5YEKIrblXEjr8uRgnn4AgPLit6 -E5Qbvfa2gI5lBZMAHryv4g+OGQ0SR+ysraP6LnD43m77VkIVni5c7yPeIbkFdicZ -D0/Ww5y0vpQZY/KmEQrrU0icvvIpOxboGqBMpsn0GFlowHDyUwDAXlCCpVZvNvlK -4ESGoE1O1kduSUrLZ9emxAW5jh70/P/N5zbgnAVssjMiFdC04MwXwLLA9P4yPykq -lXvY8qdOD1R8oQ2AswkDwf9c3V6aPryuvEeKaq5xyh+xKrhfQgUL7EYw0XILyulW -bfXv33i+Ybqypa4ETLyorGkVl73v67SMvzX41MPRKA5cOp9wGDMgd8SirwIDAQAB -o4GrMIGoMAsGA1UdDwQEAwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRT -MtGzz3/64PGgXYVOktKeRR20TzA9BgNVHR8ENjA0MDKgMKAuhixodHRwOi8vY3Js -LnVzZXJ0cnVzdC5jb20vVVROLURBVEFDb3JwU0dDLmNybDAqBgNVHSUEIzAhBggr -BgEFBQcDAQYKKwYBBAGCNwoDAwYJYIZIAYb4QgQBMA0GCSqGSIb3DQEBBQUAA4IB -AQAnNZcAiosovcYzMB4p/OL31ZjUQLtgyr+rFywJNn9Q+kHcrpY6CiM+iVnJowft -Gzet/Hy+UUla3joKVAgWRcKZsYfNjGjgaQPpxE6YsjuMFrMOoAyYUJuTqXAJyCyj -j98C5OBxOvG0I3KgqgHf35g+FFCgMSa9KOlaMCZ1+XtgHI3zzVAmbQQnmt/VDUVH -KWss5nbZqSl9Mt3JNjy9rjXxEZ4du5A/EkdOjtd+D2JzHVImOBwYSf0wdJrE5SIv -2MCN7ZF6TACPcn9d2t0bi0Vr591pl6jFVkwPDPafepE39peC4N1xaf92P2BNPM/3 -mfnGV/TJVTl4uix5yaaIK/QI +dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3Qt +SGFyZHdhcmUwHhcNOTkwNzA5MTgxMDQyWhcNMTkwNzA5MTgxOTIyWjCBlzELMAkG +A1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEe +MBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8v +d3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3QtSGFyZHdh +cmUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCx98M4P7Sof885glFn +0G2f0v9Y8+efK+wNiVSZuTiZFvfgIXlIwrthdBKWHTxqctU8EGc6Oe0rE81m65UJ +M6Rsl7HoxuzBdXmcRl6Nq9Bq/bkqVRcQVLMZ8Jr28bFdtqdt++BxF2uiiPsA3/4a +MXcMmgF6sTLjKwEHOG7DpV4jvEWbe1DByTCP2+UretNb+zNAHqDVmBe8i4fDidNd +oI6yqqr2jmmIBsX6iSHzCJ1pLgkzmykNRg+MzEk0sGlRvfkGzWitZky8PqxhvQqI +DsjfPe58BEydCl5rkdbux+0ojatNh4lz0G6k0B4WixThdkQDf2Os5M1JnMWS9Ksy +oUhbAgMBAAGjgbkwgbYwCwYDVR0PBAQDAgHGMA8GA1UdEwEB/wQFMAMBAf8wHQYD +VR0OBBYEFKFyXyYbKJhDlV0HN9WFlp1L0sNFMEQGA1UdHwQ9MDswOaA3oDWGM2h0 +dHA6Ly9jcmwudXNlcnRydXN0LmNvbS9VVE4tVVNFUkZpcnN0LUhhcmR3YXJlLmNy +bDAxBgNVHSUEKjAoBggrBgEFBQcDAQYIKwYBBQUHAwUGCCsGAQUFBwMGBggrBgEF +BQcDBzANBgkqhkiG9w0BAQUFAAOCAQEARxkP3nTGmZev/K0oXnWO6y1n7k57K9cM +//bey1WiCuFMVGWTYGufEpytXoMs61quwOQt9ABjHbjAbPLPSbtNk28Gpgoiskli +CE7/yMgUsogWXecB5BKV5UU0s4tpvc+0hY91UZ59Ojg6FEgSxvunOxqNDYJAB+gE +CJChicsZUN/KHAG8HQQZexB2lzvukJDKxA4fFm517zP4029bHpbj4HR3dHuKom4t +3XbWOTCC8KucUvIqx69JXn7HaOWCgchqJ/kniCrVWFCVH/A7HFe7fRQ5YiuayZSS +KqMiDP+JJn1fIytH1xUdqWqeUQ0qUZ6B+dQ7XnASfxAynB67nfhmqA== -----END CERTIFICATE----- diff --git a/lib-python/2.7/test/test_mailbox.py b/lib-python/2.7/test/test_mailbox.py --- a/lib-python/2.7/test/test_mailbox.py +++ b/lib-python/2.7/test/test_mailbox.py @@ -38,14 +38,9 @@ def _delete_recursively(self, target): # Delete a file or delete a directory recursively if os.path.isdir(target): - for path, dirs, files in os.walk(target, topdown=False): - for name in files: - os.remove(os.path.join(path, name)) - for name in dirs: - os.rmdir(os.path.join(path, name)) - os.rmdir(target) + test_support.rmtree(target) elif os.path.exists(target): - os.remove(target) + test_support.unlink(target) class TestMailbox(TestBase): @@ -137,6 +132,7 @@ msg = self._box.get(key1) self.assertEqual(msg['from'], 'foo') self.assertEqual(msg.fp.read(), '1') + msg.fp.close() def test_getitem(self): # Retrieve message using __getitem__() @@ -169,10 +165,14 @@ # Get file representations of messages key0 = self._box.add(self._template % 0) key1 = self._box.add(_sample_message) - self.assertEqual(self._box.get_file(key0).read().replace(os.linesep, '\n'), + msg0 = self._box.get_file(key0) + self.assertEqual(msg0.read().replace(os.linesep, '\n'), self._template % 0) - self.assertEqual(self._box.get_file(key1).read().replace(os.linesep, '\n'), + msg1 = self._box.get_file(key1) + self.assertEqual(msg1.read().replace(os.linesep, '\n'), _sample_message) + msg0.close() + msg1.close() def test_get_file_can_be_closed_twice(self): # Issue 11700 @@ -407,6 +407,7 @@ self._box.add(contents[0]) self._box.add(contents[1]) self._box.add(contents[2]) + oldbox = self._box method() if should_call_close: self._box.close() @@ -415,6 +416,7 @@ self.assertEqual(len(keys), 3) for key in keys: self.assertIn(self._box.get_string(key), contents) + oldbox.close() def test_dump_message(self): # Write message representations to disk @@ -1835,6 +1837,10 @@ def setUp(self): # create a new maildir mailbox to work with: self._dir = test_support.TESTFN + if os.path.isdir(self._dir): + test_support.rmtree(self._dir) + if os.path.isfile(self._dir): + test_support.unlink(self._dir) os.mkdir(self._dir) os.mkdir(os.path.join(self._dir, "cur")) os.mkdir(os.path.join(self._dir, "tmp")) @@ -1844,10 +1850,10 @@ def tearDown(self): map(os.unlink, self._msgfiles) - os.rmdir(os.path.join(self._dir, "cur")) - os.rmdir(os.path.join(self._dir, "tmp")) - os.rmdir(os.path.join(self._dir, "new")) - os.rmdir(self._dir) + test_support.rmdir(os.path.join(self._dir, "cur")) + test_support.rmdir(os.path.join(self._dir, "tmp")) + test_support.rmdir(os.path.join(self._dir, "new")) + test_support.rmdir(self._dir) def createMessage(self, dir, mbox=False): t = int(time.time() % 1000000) @@ -1883,7 +1889,9 @@ self.createMessage("cur") self.mbox = mailbox.Maildir(test_support.TESTFN) #self.assertTrue(len(self.mbox.boxes) == 1) - self.assertIsNot(self.mbox.next(), None) + msg = self.mbox.next() + self.assertIsNot(msg, None) + msg.fp.close() self.assertIs(self.mbox.next(), None) self.assertIs(self.mbox.next(), None) @@ -1891,7 +1899,9 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) #self.assertTrue(len(self.mbox.boxes) == 1) - self.assertIsNot(self.mbox.next(), None) + msg = self.mbox.next() + self.assertIsNot(msg, None) + msg.fp.close() self.assertIs(self.mbox.next(), None) self.assertIs(self.mbox.next(), None) @@ -1900,8 +1910,12 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) #self.assertTrue(len(self.mbox.boxes) == 2) - self.assertIsNot(self.mbox.next(), None) - self.assertIsNot(self.mbox.next(), None) + msg = self.mbox.next() + self.assertIsNot(msg, None) + msg.fp.close() + msg = self.mbox.next() + self.assertIsNot(msg, None) + msg.fp.close() self.assertIs(self.mbox.next(), None) self.assertIs(self.mbox.next(), None) @@ -1910,11 +1924,13 @@ import email.parser fname = self.createMessage("cur", True) n = 0 - for msg in mailbox.PortableUnixMailbox(open(fname), + fid = open(fname) + for msg in mailbox.PortableUnixMailbox(fid, email.parser.Parser().parse): n += 1 self.assertEqual(msg["subject"], "Simple Test") self.assertEqual(len(str(msg)), len(FROM_)+len(DUMMY_MESSAGE)) + fid.close() self.assertEqual(n, 1) ## End: classes from the original module (for backward compatibility). diff --git a/lib-python/2.7/test/test_memoryview.py b/lib-python/2.7/test/test_memoryview.py --- a/lib-python/2.7/test/test_memoryview.py +++ b/lib-python/2.7/test/test_memoryview.py @@ -166,11 +166,18 @@ self.assertTrue(m[0:6] == m[:]) self.assertFalse(m[0:5] == m) - # Comparison with objects which don't support the buffer API - self.assertFalse(m == u"abcdef") - self.assertTrue(m != u"abcdef") - self.assertFalse(u"abcdef" == m) - self.assertTrue(u"abcdef" != m) + if test_support.check_impl_detail(cpython=True): + # what is supported and what is not supported by memoryview is + # very inconsisten on CPython. In PyPy, memoryview supports + # the buffer interface, and thus the following comparison + # succeeds. See also the comment in + # pypy.modules.__builtin__.interp_memoryview.W_MemoryView.descr_buffer + # + # Comparison with objects which don't support the buffer API + self.assertFalse(m == u"abcdef", "%s %s" % (self, tp)) + self.assertTrue(m != u"abcdef") + self.assertFalse(u"abcdef" == m) + self.assertTrue(u"abcdef" != m) # Unordered comparisons are unimplemented, and therefore give # arbitrary results (they raise a TypeError in py3k) diff --git a/lib-python/2.7/test/test_mmap.py b/lib-python/2.7/test/test_mmap.py --- a/lib-python/2.7/test/test_mmap.py +++ b/lib-python/2.7/test/test_mmap.py @@ -11,7 +11,7 @@ def setUp(self): if os.path.exists(TESTFN): - os.unlink(TESTFN) + unlink(TESTFN) def tearDown(self): try: diff --git a/lib-python/2.7/test/test_multiprocessing.py b/lib-python/2.7/test/test_multiprocessing.py --- a/lib-python/2.7/test/test_multiprocessing.py +++ b/lib-python/2.7/test/test_multiprocessing.py @@ -1,5 +1,10 @@ #!/usr/bin/env python +## FIXME: remove when https://bugs.pypy.org/issue1644 is resolved +import sys +if sys.platform.startswith('freebsd'): + raise Exception("This test hangs on FreeBSD. Test deactivated for now until https://bugs.pypy.org/issue1644 get resolved") + # # Unit tests for the multiprocessing package # diff --git a/lib-python/2.7/test/test_old_mailbox.py b/lib-python/2.7/test/test_old_mailbox.py --- a/lib-python/2.7/test/test_old_mailbox.py +++ b/lib-python/2.7/test/test_old_mailbox.py @@ -73,7 +73,9 @@ self.createMessage("cur") self.mbox = mailbox.Maildir(test_support.TESTFN) self.assertTrue(len(self.mbox) == 1) - self.assertTrue(self.mbox.next() is not None) + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() self.assertTrue(self.mbox.next() is None) self.assertTrue(self.mbox.next() is None) @@ -81,7 +83,9 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) self.assertTrue(len(self.mbox) == 1) - self.assertTrue(self.mbox.next() is not None) + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() self.assertTrue(self.mbox.next() is None) self.assertTrue(self.mbox.next() is None) @@ -90,8 +94,12 @@ self.createMessage("new") self.mbox = mailbox.Maildir(test_support.TESTFN) self.assertTrue(len(self.mbox) == 2) - self.assertTrue(self.mbox.next() is not None) - self.assertTrue(self.mbox.next() is not None) + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() + msg = self.mbox.next() + self.assertTrue(msg is not None) + msg.fp.close() self.assertTrue(self.mbox.next() is None) self.assertTrue(self.mbox.next() is None) diff --git a/lib-python/2.7/test/test_os.py b/lib-python/2.7/test/test_os.py --- a/lib-python/2.7/test/test_os.py +++ b/lib-python/2.7/test/test_os.py @@ -75,7 +75,7 @@ self.assertFalse(os.path.exists(name), "file already exists for temporary file") # make sure we can create the file - open(name, "w") + open(name, "w").close() self.files.append(name) def test_tempnam(self): diff --git a/lib-python/2.7/test/test_ssl.py b/lib-python/2.7/test/test_ssl.py --- a/lib-python/2.7/test/test_ssl.py +++ b/lib-python/2.7/test/test_ssl.py @@ -111,13 +111,12 @@ if test_support.verbose: sys.stdout.write("\n" + pprint.pformat(p) + "\n") self.assertEqual(p['subject'], - ((('countryName', u'US'),), - (('stateOrProvinceName', u'Delaware'),), - (('localityName', u'Wilmington'),), - (('organizationName', u'Python Software Foundation'),), - (('organizationalUnitName', u'SSL'),), - (('commonName', u'somemachine.python.org'),)), + ((('countryName', 'XY'),), + (('localityName', 'Castle Anthrax'),), + (('organizationName', 'Python Software Foundation'),), + (('commonName', 'localhost'),)) ) + self.assertEqual(p['subjectAltName'], (('DNS', 'localhost'),)) # Issue #13034: the subjectAltName in some certificates # (notably projects.developer.nokia.com:443) wasn't parsed p = ssl._ssl._test_decode_cert(NOKIACERT) @@ -994,7 +993,7 @@ try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED) - try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, True) + try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False) diff --git a/lib-python/2.7/test/test_support.py b/lib-python/2.7/test/test_support.py --- a/lib-python/2.7/test/test_support.py +++ b/lib-python/2.7/test/test_support.py @@ -179,15 +179,79 @@ except KeyError: pass +if sys.platform.startswith("win"): + def _waitfor(func, pathname, waitall=False): + # Peform the operation + func(pathname) + # Now setup the wait loop + if waitall: + dirname = pathname + else: + dirname, name = os.path.split(pathname) + dirname = dirname or '.' + # Check for `pathname` to be removed from the filesystem. + # The exponential backoff of the timeout amounts to a total + # of ~1 second after which the deletion is probably an error + # anyway. + # Testing on a i7 at 4.3GHz shows that usually only 1 iteration is + # required when contention occurs. + timeout = 0.001 + while timeout < 1.0: + # Note we are only testing for the existance of the file(s) in + # the contents of the directory regardless of any security or + # access rights. If we have made it this far, we have sufficient + # permissions to do that much using Python's equivalent of the + # Windows API FindFirstFile. + # Other Windows APIs can fail or give incorrect results when + # dealing with files that are pending deletion. + L = os.listdir(dirname) + if not (L if waitall else name in L): + return + # Increase the timeout and try again + time.sleep(timeout) + timeout *= 2 + warnings.warn('tests may fail, delete still pending for ' + pathname, + RuntimeWarning, stacklevel=4) + + def _unlink(filename): + _waitfor(os.unlink, filename) + + def _rmdir(dirname): + _waitfor(os.rmdir, dirname) + + def _rmtree(path): + def _rmtree_inner(path): + for name in os.listdir(path): + fullname = os.path.join(path, name) + if os.path.isdir(fullname): + _waitfor(_rmtree_inner, fullname, waitall=True) + os.rmdir(fullname) + else: + os.unlink(fullname) + _waitfor(_rmtree_inner, path, waitall=True) + _waitfor(os.rmdir, path) +else: + _unlink = os.unlink + _rmdir = os.rmdir + _rmtree = shutil.rmtree + def unlink(filename): try: - os.unlink(filename) + _unlink(filename) except OSError: pass +def rmdir(dirname): + try: + _rmdir(dirname) + except OSError as error: + # The directory need not exist. + if error.errno != errno.ENOENT: + raise + def rmtree(path): try: - shutil.rmtree(path) + _rmtree(path) except OSError, e: # Unix returns ENOENT, Windows returns ESRCH. if e.errno not in (errno.ENOENT, errno.ESRCH): diff --git a/lib-python/2.7/test/test_tarfile.py b/lib-python/2.7/test/test_tarfile.py --- a/lib-python/2.7/test/test_tarfile.py +++ b/lib-python/2.7/test/test_tarfile.py @@ -300,26 +300,21 @@ def test_extract_hardlink(self): # Test hardlink extraction (e.g. bug #857297). - tar = tarfile.open(tarname, errorlevel=1, encoding="iso8859-1") + with tarfile.open(tarname, errorlevel=1, encoding="iso8859-1") as tar: + tar.extract("ustar/regtype", TEMPDIR) + self.addCleanup(os.remove, os.path.join(TEMPDIR, "ustar/regtype")) - tar.extract("ustar/regtype", TEMPDIR) - try: tar.extract("ustar/lnktype", TEMPDIR) - except EnvironmentError, e: - if e.errno == errno.ENOENT: - self.fail("hardlink not extracted properly") + self.addCleanup(os.remove, os.path.join(TEMPDIR, "ustar/lnktype")) + with open(os.path.join(TEMPDIR, "ustar/lnktype"), "rb") as f: + data = f.read() + self.assertEqual(md5sum(data), md5_regtype) - data = open(os.path.join(TEMPDIR, "ustar/lnktype"), "rb").read() - self.assertEqual(md5sum(data), md5_regtype) - - try: tar.extract("ustar/symtype", TEMPDIR) - except EnvironmentError, e: - if e.errno == errno.ENOENT: - self.fail("symlink not extracted properly") - - data = open(os.path.join(TEMPDIR, "ustar/symtype"), "rb").read() - self.assertEqual(md5sum(data), md5_regtype) + self.addCleanup(os.remove, os.path.join(TEMPDIR, "ustar/symtype")) + with open(os.path.join(TEMPDIR, "ustar/symtype"), "rb") as f: + data = f.read() + self.assertEqual(md5sum(data), md5_regtype) def test_extractall(self): # Test if extractall() correctly restores directory permissions @@ -340,7 +335,7 @@ # constructor in case of an error. For the test we rely on # the fact that opening an empty file raises a ReadError. empty = os.path.join(TEMPDIR, "empty") - open(empty, "wb").write("") + open(empty, "wb").close() try: tar = object.__new__(tarfile.TarFile) @@ -351,7 +346,7 @@ else: self.fail("ReadError not raised") finally: - os.remove(empty) + test_support.unlink(empty) class StreamReadTest(CommonReadTest): @@ -1327,7 +1322,7 @@ def setUp(self): self.tarname = tmpname if os.path.exists(self.tarname): - os.remove(self.tarname) + test_support.unlink(self.tarname) def _add_testfile(self, fileobj=None): tar = tarfile.open(self.tarname, "a", fileobj=fileobj) diff --git a/lib-python/2.7/traceback.py b/lib-python/2.7/traceback.py --- a/lib-python/2.7/traceback.py +++ b/lib-python/2.7/traceback.py @@ -107,7 +107,7 @@ return list -def print_exception(etype, value, tb, limit=None, file=None): +def print_exception(etype, value, tb, limit=None, file=None, _encoding=None): """Print exception up to 'limit' stack trace entries from 'tb' to 'file'. This differs from print_tb() in the following ways: (1) if @@ -123,7 +123,7 @@ if tb: _print(file, 'Traceback (most recent call last):') print_tb(tb, limit, file) - lines = format_exception_only(etype, value) + lines = format_exception_only(etype, value, _encoding) for line in lines: _print(file, line, '') @@ -144,7 +144,7 @@ list = list + format_exception_only(etype, value) return list -def format_exception_only(etype, value): +def format_exception_only(etype, value, _encoding=None): """Format the exception part of a traceback. The arguments are the exception type and value such as given by @@ -170,12 +170,12 @@ if (isinstance(etype, BaseException) or isinstance(etype, types.InstanceType) or etype is None or type(etype) is str): - return [_format_final_exc_line(etype, value)] + return [_format_final_exc_line(etype, value, _encoding)] stype = etype.__name__ if not issubclass(etype, SyntaxError): - return [_format_final_exc_line(stype, value)] + return [_format_final_exc_line(stype, value, _encoding)] # It was a syntax error; show exactly where the problem was found. lines = [] @@ -196,26 +196,26 @@ lines.append(' %s^\n' % ''.join(caretspace)) value = msg - lines.append(_format_final_exc_line(stype, value)) + lines.append(_format_final_exc_line(stype, value, _encoding)) return lines -def _format_final_exc_line(etype, value): +def _format_final_exc_line(etype, value, _encoding=None): """Return a list of a single line -- normal case for format_exception_only""" - valuestr = _some_str(value) + valuestr = _some_str(value, _encoding) if value is None or not valuestr: line = "%s\n" % etype else: line = "%s: %s\n" % (etype, valuestr) return line -def _some_str(value): +def _some_str(value, _encoding=None): try: return str(value) except Exception: pass try: value = unicode(value) - return value.encode("ascii", "backslashreplace") + return value.encode(_encoding or "ascii", "backslashreplace") except Exception: pass return '' % type(value).__name__ diff --git a/lib-python/2.7/uuid.py b/lib-python/2.7/uuid.py --- a/lib-python/2.7/uuid.py +++ b/lib-python/2.7/uuid.py @@ -44,6 +44,8 @@ UUID('00010203-0405-0607-0809-0a0b0c0d0e0f') """ +import struct + __author__ = 'Ka-Ping Yee ' RESERVED_NCS, RFC_4122, RESERVED_MICROSOFT, RESERVED_FUTURE = [ @@ -125,25 +127,39 @@ overriding the given 'hex', 'bytes', 'bytes_le', 'fields', or 'int'. """ - if [hex, bytes, bytes_le, fields, int].count(None) != 4: - raise TypeError('need one of hex, bytes, bytes_le, fields, or int') if hex is not None: + if (bytes is not None or bytes_le is not None or + fields is not None or int is not None): + raise TypeError('if the hex argument is given, bytes,' + ' bytes_le, fields, and int need to be None') hex = hex.replace('urn:', '').replace('uuid:', '') hex = hex.strip('{}').replace('-', '') if len(hex) != 32: raise ValueError('badly formed hexadecimal UUID string') int = long(hex, 16) - if bytes_le is not None: + elif bytes_le is not None: + if bytes is not None or fields is not None or int is not None: + raise TypeError('if the bytes_le argument is given, bytes,' + ' fields, and int need to be None') if len(bytes_le) != 16: raise ValueError('bytes_le is not a 16-char string') bytes = (bytes_le[3] + bytes_le[2] + bytes_le[1] + bytes_le[0] + bytes_le[5] + bytes_le[4] + bytes_le[7] + bytes_le[6] + bytes_le[8:]) - if bytes is not None: + int = (struct.unpack('>Q', bytes[:8])[0] << 64 | + struct.unpack('>Q', bytes[8:])[0]) + elif bytes is not None: + if fields is not None or int is not None: + raise TypeError('if the bytes argument is given, fields ' + 'and int need to be None') if len(bytes) != 16: raise ValueError('bytes is not a 16-char string') - int = long(('%02x'*16) % tuple(map(ord, bytes)), 16) - if fields is not None: + int = (struct.unpack('>Q', bytes[:8])[0] << 64 | + struct.unpack('>Q', bytes[8:])[0]) + elif fields is not None: + if int is not None: + raise TypeError('if the fields argument is given, int needs' + ' to be None') if len(fields) != 6: raise ValueError('fields is not a 6-tuple') (time_low, time_mid, time_hi_version, @@ -163,9 +179,12 @@ clock_seq = (clock_seq_hi_variant << 8L) | clock_seq_low int = ((time_low << 96L) | (time_mid << 80L) | (time_hi_version << 64L) | (clock_seq << 48L) | node) - if int is not None: + elif int is not None: if not 0 <= int < 1<<128L: raise ValueError('int is out of range (need a 128-bit value)') + else: + raise TypeError('one of hex, bytes, bytes_le, fields,' + ' or int need to be not None') if version is not None: if not 1 <= version <= 5: raise ValueError('illegal version number') @@ -175,7 +194,7 @@ # Set the version number. int &= ~(0xf000 << 64L) int |= version << 76L - self.__dict__['int'] = int + object.__setattr__(self, 'int', int) def __cmp__(self, other): if isinstance(other, UUID): diff --git a/lib-python/2.7/weakref.py b/lib-python/2.7/weakref.py --- a/lib-python/2.7/weakref.py +++ b/lib-python/2.7/weakref.py @@ -48,7 +48,14 @@ def remove(wr, selfref=ref(self)): self = selfref() if self is not None: - del self.data[wr.key] + # Changed this for PyPy: made more resistent. The + # issue is that in some corner cases, self.data + # might already be changed or removed by the time + # this weakref's callback is called. If that is + # the case, we don't want to randomly kill an + # unrelated entry. + if self.data.get(wr.key) is wr: + del self.data[wr.key] self._remove = remove UserDict.UserDict.__init__(self, *args, **kw) @@ -160,22 +167,26 @@ try: o = self.data.pop(key)() except KeyError: + o = None + if o is None: if args: return args[0] - raise - if o is None: raise KeyError, key else: return o + # The logic above was fixed in PyPy def setdefault(self, key, default=None): try: - wr = self.data[key] + o = self.data[key]() except KeyError: + o = None + if o is None: self.data[key] = KeyedRef(default, self._remove, key) return default else: - return wr() + return o + # The logic above was fixed in PyPy def update(self, dict=None, **kwargs): d = self.data diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -109,7 +109,7 @@ RegrTest('test_asynchat.py', usemodules='select fcntl'), RegrTest('test_asyncore.py', usemodules='select fcntl'), RegrTest('test_atexit.py', core=True), - RegrTest('test_audioop.py', skip="unsupported extension module"), + RegrTest('test_audioop.py', skip="incomplete module"), RegrTest('test_augassign.py', core=True), RegrTest('test_base64.py', usemodules='struct'), RegrTest('test_bastion.py'), diff --git a/lib_pypy/_ctypes/array.py b/lib_pypy/_ctypes/array.py --- a/lib_pypy/_ctypes/array.py +++ b/lib_pypy/_ctypes/array.py @@ -1,4 +1,4 @@ -import _ffi +from _rawffi import alt as _ffi import _rawffi from _ctypes.basics import _CData, cdata_from_address, _CDataMeta, sizeof @@ -20,10 +20,13 @@ # we don't want to have buffers here if len(val) > self._length_: raise ValueError("%r too long" % (val,)) - for i in range(len(val)): - self[i] = val[i] + if isinstance(val, str): + _rawffi.rawstring2charp(self._buffer.buffer, val) + else: + for i in range(len(val)): + self[i] = val[i] if len(val) < self._length_: - self[len(val)] = '\x00' + self._buffer[len(val)] = '\x00' res.value = property(getvalue, setvalue) def getraw(self): @@ -33,8 +36,7 @@ def setraw(self, buffer): if len(buffer) > self._length_: raise ValueError("%r too long" % (buffer,)) - for i in range(len(buffer)): - self[i] = buffer[i] + _rawffi.rawstring2charp(self._buffer.buffer, buffer) res.raw = property(getraw, setraw) elif subletter == 'u': def getvalue(self): @@ -45,10 +47,14 @@ # we don't want to have buffers here if len(val) > self._length_: raise ValueError("%r too long" % (val,)) + if isinstance(val, unicode): + target = self._buffer + else: + target = self for i in range(len(val)): - self[i] = val[i] + target[i] = val[i] if len(val) < self._length_: - self[len(val)] = '\x00' + target[len(val)] = u'\x00' res.value = property(getvalue, setvalue) if '_length_' in typedict: diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py --- a/lib_pypy/_ctypes/basics.py +++ b/lib_pypy/_ctypes/basics.py @@ -1,6 +1,6 @@ import _rawffi -import _ffi +from _rawffi import alt as _ffi import sys try: from __pypy__ import builtinify diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -5,7 +5,7 @@ from _ctypes.basics import is_struct_shape from _ctypes.builtin import get_errno, set_errno, get_last_error, set_last_error import _rawffi -import _ffi +from _rawffi import alt as _ffi import sys import traceback @@ -328,21 +328,23 @@ raise ValueError( "native COM method call without 'this' parameter" ) - thisarg = cast(args[0], POINTER(POINTER(c_void_p))) - keepalives, newargs, argtypes, outargs = self._convert_args(argtypes, - args[1:], kwargs) - newargs.insert(0, args[0].value) + thisvalue = args.pop(0) + thisarg = cast(thisvalue, POINTER(POINTER(c_void_p))) + keepalives, newargs, argtypes, outargs, errcheckargs = ( + self._convert_args(argtypes, args, kwargs)) + args.insert(0, thisvalue) + newargs.insert(0, thisvalue.value) argtypes.insert(0, c_void_p) else: thisarg = None - keepalives, newargs, argtypes, outargs = self._convert_args(argtypes, - args, kwargs) + keepalives, newargs, argtypes, outargs, errcheckargs = ( + self._convert_args(argtypes, args, kwargs)) funcptr = self._getfuncptr(argtypes, self._restype_, thisarg) result = self._call_funcptr(funcptr, *newargs) - result = self._do_errcheck(result, args) + result, forced = self._do_errcheck(result, errcheckargs) - if not outargs: + if not outargs or forced: return result from ctypes import c_void_p @@ -377,22 +379,22 @@ set_last_error(tmp) # try: - return self._build_result(self._restype_, result, newargs) + return self._build_result(self._restype_, result) finally: funcptr.free_temp_buffers() def _do_errcheck(self, result, args): # The 'errcheck' protocol if self._errcheck_: - v = self._errcheck_(result, self, args) + v = self._errcheck_(result, self, tuple(args)) # If the errcheck funtion failed, let it throw # If the errcheck function returned newargs unchanged, # continue normal processing. # If the errcheck function returned something else, # use that as result. if v is not args: - return v - return result + return v, True + return result, False def _getfuncptr_fromaddress(self, argtypes, restype): address = self._get_address() @@ -495,16 +497,16 @@ newargtypes = [] total = len(args) paramflags = self._paramflags - inargs_idx = 0 if not paramflags and total < len(argtypes): raise TypeError("not enough arguments") - for i, argtype in enumerate(argtypes): - flag = 0 - name = None - defval = marker - if paramflags: + if paramflags: + errcheckargs = [] + inargs_idx = 0 + for i, argtype in enumerate(argtypes): + flag = 0 + defval = marker paramflag = paramflags[i] paramlen = len(paramflag) name = None @@ -519,6 +521,7 @@ val = defval if val is marker: val = 0 + errcheckargs.append(val) keepalive, newarg, newargtype = self._conv_param(argtype, val) keepalives.append(keepalive) newargs.append(newarg) @@ -536,27 +539,31 @@ raise TypeError("required argument '%s' missing" % name) else: raise TypeError("not enough arguments") + errcheckargs.append(val) keepalive, newarg, newargtype = self._conv_param(argtype, val) keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) elif flag == PARAMFLAG_FOUT: if defval is not marker: - outargs.append(defval) + val = defval keepalive, newarg, newargtype = self._conv_param(argtype, defval) else: import ctypes val = argtype._type_() - outargs.append(val) keepalive = None newarg = ctypes.byref(val) newargtype = type(newarg) + errcheckargs.append(val) + outargs.append(val) keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) else: raise ValueError("paramflag %d not yet implemented" % flag) - else: + else: + errcheckargs = args + for i, argtype in enumerate(argtypes): try: keepalive, newarg, newargtype = self._conv_param(argtype, args[i]) except (UnicodeError, TypeError, ValueError), e: @@ -564,7 +571,6 @@ keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) - inargs_idx += 1 if len(newargs) < len(args): extra = args[len(newargs):] @@ -576,7 +582,7 @@ keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) - return keepalives, newargs, newargtypes, outargs + return keepalives, newargs, newargtypes, outargs, errcheckargs @staticmethod def _is_primitive(argtype): @@ -601,7 +607,7 @@ retval = restype._CData_retval(buf) return retval - def _build_result(self, restype, result, argsandobjs): + def _build_result(self, restype, result): """Build the function result: If there is no OUT parameter, return the actual function result If there is one OUT parameter, return it @@ -611,11 +617,6 @@ # i.e. an array of ints. Now it takes a result, which is already a # python object. All places that do "resbuffer[0]" should check that # result is actually an int and just use it. - # - # Also, argsandobjs used to be "args" in __call__, now it's "newargs" - # (i.e., the already unwrapped objects). It's used only when we have a - # PARAMFLAG_FOUT and it's probably wrong, I'll fix it when I find a - # failing test retval = None @@ -704,7 +705,7 @@ funcptr = self._getfuncptr(argtypes, restype, thisarg) try: result = self._call_funcptr(funcptr, *args) - result = self._do_errcheck(result, args) + result, _ = self._do_errcheck(result, args) except (TypeError, ArgumentError, UnicodeDecodeError): assert self._slowpath_allowed return CFuncPtr.__call__(self, *args) diff --git a/lib_pypy/_ctypes/pointer.py b/lib_pypy/_ctypes/pointer.py --- a/lib_pypy/_ctypes/pointer.py +++ b/lib_pypy/_ctypes/pointer.py @@ -1,6 +1,6 @@ import _rawffi -import _ffi +from _rawffi import alt as _ffi from _ctypes.basics import _CData, _CDataMeta, cdata_from_address, ArgumentError from _ctypes.basics import keepalive_key, store_reference, ensure_objects from _ctypes.basics import sizeof, byref, as_ffi_pointer diff --git a/lib_pypy/_ctypes/primitive.py b/lib_pypy/_ctypes/primitive.py --- a/lib_pypy/_ctypes/primitive.py +++ b/lib_pypy/_ctypes/primitive.py @@ -1,4 +1,4 @@ -import _ffi +from _rawffi import alt as _ffi import _rawffi import weakref import sys diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py --- a/lib_pypy/_ctypes/structure.py +++ b/lib_pypy/_ctypes/structure.py @@ -2,6 +2,8 @@ import _rawffi from _ctypes.basics import _CData, _CDataMeta, keepalive_key,\ store_reference, ensure_objects, CArgObject +from _ctypes.array import Array +from _ctypes.pointer import _Pointer import inspect def names_and_fields(self, _fields_, superclass, anonymous_fields=None): @@ -104,8 +106,11 @@ def __set__(self, obj, value): fieldtype = self.ctype cobj = fieldtype.from_param(value) - if ensure_objects(cobj) is not None: - key = keepalive_key(self.num) + key = keepalive_key(self.num) + if issubclass(fieldtype, _Pointer) and isinstance(cobj, Array): + # if our value is an Array we need the whole thing alive + store_reference(obj, key, cobj) + elif ensure_objects(cobj) is not None: store_reference(obj, key, cobj._objects) arg = cobj._get_buffer_value() if fieldtype._fficompositesize is not None: diff --git a/lib_pypy/_curses.py b/lib_pypy/_curses.py --- a/lib_pypy/_curses.py +++ b/lib_pypy/_curses.py @@ -1,6 +1,9 @@ """Reimplementation of the standard extension module '_curses' using cffi.""" import sys +if sys.platform == 'win32': + #This module does not exist in windows + raise ImportError('No module named _curses') from functools import wraps from cffi import FFI diff --git a/lib_pypy/_ffi.py b/lib_pypy/_ffi.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_ffi.py @@ -0,0 +1,2 @@ +# Backward compatibility hack +from _rawffi.alt import * diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_pypy_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -33,14 +33,13 @@ # set link options output_filename = modulename + _get_c_extension_suffix() if sys.platform == 'win32': - # XXX libpypy-c.lib is currently not installed automatically - library = os.path.join(thisdir, '..', 'include', 'libpypy-c') + # XXX pyconfig.h uses a pragma to link to the import library, + # which is currently python27.lib + library = os.path.join(thisdir, '..', 'include', 'python27') if not os.path.exists(library + '.lib'): - #For a nightly build - library = os.path.join(thisdir, '..', 'include', 'python27') - if not os.path.exists(library + '.lib'): - # For a local translation - library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c') + # For a local translation or nightly build + library = os.path.join(thisdir, '..', 'pypy', 'goal', 'python27') + assert os.path.exists(library + '.lib'),'Could not find import library "%s"' % library libraries = [library, 'oleaut32'] extra_ldargs = ['/MANIFEST', # needed for VC10 '/EXPORT:init' + modulename] diff --git a/lib_pypy/_sha.py b/lib_pypy/_sha.py --- a/lib_pypy/_sha.py +++ b/lib_pypy/_sha.py @@ -115,14 +115,14 @@ ] class sha: - "An implementation of the MD5 hash function in pure Python." + "An implementation of the SHA hash function in pure Python." digest_size = digestsize = 20 - block_size = 1 + block_size = 512 // 8 def __init__(self): "Initialisation." - + # Initial message length in bits(!). self.length = 0 self.count = [0, 0] @@ -209,7 +209,7 @@ self.H2 = (self.H2 + C) & 0xffffffff self.H3 = (self.H3 + D) & 0xffffffff self.H4 = (self.H4 + E) & 0xffffffff - + # Down from here all methods follow the Python Standard Library # API of the sha module. @@ -295,13 +295,13 @@ _long2bytesBigEndian(self.H3, 4) + \ _long2bytesBigEndian(self.H4, 4) - self.H0 = H0 - self.H1 = H1 + self.H0 = H0 + self.H1 = H1 self.H2 = H2 self.H3 = H3 self.H4 = H4 - self.input = input - self.count = count + self.input = input + self.count = count return digest diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -268,10 +268,18 @@ if _has_load_extension(): _ffi.cdef("int sqlite3_enable_load_extension(sqlite3 *db, int onoff);") -_lib = _ffi.verify(""" -#include -""", libraries=['sqlite3'] -) +if sys.platform.startswith('freebsd'): + _lib = _ffi.verify(""" + #include + """, libraries=['sqlite3'], + include_dirs=['/usr/local/include'], + library_dirs=['/usr/local/lib'] + ) +else: + _lib = _ffi.verify(""" + #include + """, libraries=['sqlite3'] + ) exported_sqlite_symbols = [ 'SQLITE_ALTER_TABLE', @@ -322,6 +330,14 @@ # SQLite version information sqlite_version = str(_ffi.string(_lib.sqlite3_libversion()).decode('ascii')) +_STMT_TYPE_UPDATE = 0 +_STMT_TYPE_DELETE = 1 +_STMT_TYPE_INSERT = 2 +_STMT_TYPE_REPLACE = 3 +_STMT_TYPE_OTHER = 4 +_STMT_TYPE_SELECT = 5 +_STMT_TYPE_INVALID = 6 + class Error(StandardError): pass @@ -363,9 +379,11 @@ pass -def connect(database, **kwargs): - factory = kwargs.get("factory", Connection) - return factory(database, **kwargs) +def connect(database, timeout=5.0, detect_types=0, isolation_level="", + check_same_thread=True, factory=None, cached_statements=100): + factory = Connection if not factory else factory + return factory(database, timeout, detect_types, isolation_level, + check_same_thread, factory, cached_statements) def _unicode_text_factory(x): @@ -982,13 +1000,18 @@ self.__statement = self.__connection._statement_cache.get(sql) if self.__connection._isolation_level is not None: - if self.__statement._type in ("UPDATE", "DELETE", "INSERT", "REPLACE"): + if self.__statement._type in ( + _STMT_TYPE_UPDATE, + _STMT_TYPE_DELETE, + _STMT_TYPE_INSERT, + _STMT_TYPE_REPLACE + ): if not self.__connection._in_transaction: self.__connection._begin() - elif self.__statement._type == "OTHER": + elif self.__statement._type == _STMT_TYPE_OTHER: if self.__connection._in_transaction: self.__connection.commit() - elif self.__statement._type == "SELECT": + elif self.__statement._type == _STMT_TYPE_SELECT: if multiple: raise ProgrammingError("You cannot execute SELECT " "statements in executemany().") @@ -1011,12 +1034,17 @@ self.__statement._reset() raise self.__connection._get_exception(ret) - if self.__statement._type in ("UPDATE", "DELETE", "INSERT", "REPLACE"): + if self.__statement._type in ( + _STMT_TYPE_UPDATE, + _STMT_TYPE_DELETE, + _STMT_TYPE_INSERT, + _STMT_TYPE_REPLACE + ): if self.__rowcount == -1: self.__rowcount = 0 self.__rowcount += _lib.sqlite3_changes(self.__connection._db) - if not multiple and self.__statement._type == "INSERT": + if not multiple and self.__statement._type == _STMT_TYPE_INSERT: self.__lastrowid = _lib.sqlite3_last_insert_rowid(self.__connection._db) else: self.__lastrowid = None @@ -1166,11 +1194,19 @@ first_word = sql.lstrip().split(" ")[0].upper() if first_word == "": - self._type = "INVALID" - elif first_word in ("SELECT", "INSERT", "UPDATE", "DELETE", "REPLACE"): - self._type = first_word + self._type = _STMT_TYPE_INVALID + elif first_word == "SELECT": + self._type = _STMT_TYPE_SELECT + elif first_word == "INSERT": + self._type = _STMT_TYPE_INSERT + elif first_word == "UPDATE": + self._type = _STMT_TYPE_UPDATE + elif first_word == "DELETE": + self._type = _STMT_TYPE_DELETE + elif first_word == "REPLACE": + self._type = _STMT_TYPE_REPLACE else: - self._type = "OTHER" + self._type = _STMT_TYPE_OTHER if isinstance(sql, unicode): sql = sql.encode('utf-8') @@ -1183,7 +1219,7 @@ if ret == _lib.SQLITE_OK and not self._statement: # an empty statement, work around that, as it's the least trouble - self._type = "SELECT" + self._type = _STMT_TYPE_SELECT c_sql = _ffi.new("char[]", b"select 42") ret = _lib.sqlite3_prepare_v2(self.__con._db, c_sql, -1, statement_star, next_char) @@ -1229,7 +1265,10 @@ if cvt is not None: param = cvt(param) - param = adapt(param) + try: + param = adapt(param) + except: + pass # And use previous value if param is None: rc = _lib.sqlite3_bind_null(self._statement, idx) @@ -1299,7 +1338,12 @@ raise ValueError("parameters are of unsupported type") def _get_description(self): - if self._type in ("INSERT", "UPDATE", "DELETE", "REPLACE"): + if self._type in ( + _STMT_TYPE_INSERT, + _STMT_TYPE_UPDATE, + _STMT_TYPE_DELETE, + _STMT_TYPE_REPLACE + ): return None desc = [] for i in xrange(_lib.sqlite3_column_count(self._statement)): diff --git a/lib_pypy/_tkinter/__init__.py b/lib_pypy/_tkinter/__init__.py --- a/lib_pypy/_tkinter/__init__.py +++ b/lib_pypy/_tkinter/__init__.py @@ -22,6 +22,7 @@ READABLE = tklib.TCL_READABLE WRITABLE = tklib.TCL_WRITABLE EXCEPTION = tklib.TCL_EXCEPTION +DONT_WAIT = tklib.TCL_DONT_WAIT def create(screenName=None, baseName=None, className=None, interactive=False, wantobjects=False, wantTk=True, diff --git a/lib_pypy/_tkinter/app.py b/lib_pypy/_tkinter/app.py --- a/lib_pypy/_tkinter/app.py +++ b/lib_pypy/_tkinter/app.py @@ -4,7 +4,23 @@ from . import TclError from .tclobj import TclObject, FromObj, AsObj, TypeCache +import contextlib import sys +import threading +import time + + +class _DummyLock(object): + "A lock-like object that does not do anything" + def acquire(self): + pass + def release(self): + pass + def __enter__(self): + pass + def __exit__(self, *exc): + pass + def varname_converter(input): if isinstance(input, TclObject): @@ -37,17 +53,18 @@ def PythonCmd(clientData, interp, argc, argv): self = tkffi.from_handle(clientData) assert self.app.interp == interp - try: - args = [tkffi.string(arg) for arg in argv[1:argc]] - result = self.func(*args) - obj = AsObj(result) - tklib.Tcl_SetObjResult(interp, obj) - except: - self.app.errorInCmd = True - self.app.exc_info = sys.exc_info() - return tklib.TCL_ERROR - else: - return tklib.TCL_OK + with self.app._tcl_lock_released(): + try: + args = [tkffi.string(arg) for arg in argv[1:argc]] + result = self.func(*args) + obj = AsObj(result) + tklib.Tcl_SetObjResult(interp, obj) + except: + self.app.errorInCmd = True + self.app.exc_info = sys.exc_info() + return tklib.TCL_ERROR + else: + return tklib.TCL_OK @tkffi.callback("Tcl_CmdDeleteProc") def PythonCmdDelete(clientData): @@ -58,6 +75,8 @@ class TkApp(object): + _busywaitinterval = 0.02 # 20ms. + def __new__(cls, screenName, baseName, className, interactive, wantobjects, wantTk, sync, use): if not wantobjects: @@ -73,6 +92,12 @@ self.quitMainLoop = False self.errorInCmd = False + if not self.threaded: + # TCL is not thread-safe, calls needs to be serialized. + self._tcl_lock = threading.Lock() + else: + self._tcl_lock = _DummyLock() + self._typeCache = TypeCache() self._commands = {} @@ -133,6 +158,13 @@ if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): raise RuntimeError("Calling Tcl from different appartment") + @contextlib.contextmanager + def _tcl_lock_released(self): + "Context manager to temporarily release the tcl lock." + self._tcl_lock.release() + yield + self._tcl_lock.acquire() + def loadtk(self): # We want to guard against calling Tk_Init() multiple times err = tklib.Tcl_Eval(self.interp, "info exists tk_version") @@ -159,22 +191,25 @@ flags=tklib.TCL_LEAVE_ERR_MSG if global_only: flags |= tklib.TCL_GLOBAL_ONLY - res = tklib.Tcl_GetVar2Ex(self.interp, name1, name2, flags) - if not res: - self.raiseTclError() - assert self._wantobjects - return FromObj(self, res) + with self._tcl_lock: + res = tklib.Tcl_GetVar2Ex(self.interp, name1, name2, flags) + if not res: + self.raiseTclError() + assert self._wantobjects + return FromObj(self, res) def _setvar(self, name1, value, global_only=False): name1 = varname_converter(name1) + # XXX Acquire tcl lock??? newval = AsObj(value) flags=tklib.TCL_LEAVE_ERR_MSG if global_only: flags |= tklib.TCL_GLOBAL_ONLY - res = tklib.Tcl_SetVar2Ex(self.interp, name1, tkffi.NULL, - newval, flags) - if not res: - self.raiseTclError() + with self._tcl_lock: + res = tklib.Tcl_SetVar2Ex(self.interp, name1, tkffi.NULL, + newval, flags) + if not res: + self.raiseTclError() def _unsetvar(self, name1, name2=None, global_only=False): name1 = varname_converter(name1) @@ -183,9 +218,10 @@ flags=tklib.TCL_LEAVE_ERR_MSG if global_only: flags |= tklib.TCL_GLOBAL_ONLY - res = tklib.Tcl_UnsetVar2(self.interp, name1, name2, flags) - if res == tklib.TCL_ERROR: - self.raiseTclError() + with self._tcl_lock: + res = tklib.Tcl_UnsetVar2(self.interp, name1, name2, flags) + if res == tklib.TCL_ERROR: + self.raiseTclError() def getvar(self, name1, name2=None): return self._var_invoke(self._getvar, name1, name2) @@ -219,9 +255,10 @@ if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): raise NotImplementedError("Call from another thread") - res = tklib.Tcl_CreateCommand( - self.interp, cmdName, _CommandData.PythonCmd, - clientData, _CommandData.PythonCmdDelete) + with self._tcl_lock: + res = tklib.Tcl_CreateCommand( + self.interp, cmdName, _CommandData.PythonCmd, + clientData, _CommandData.PythonCmdDelete) if not res: raise TclError("can't create Tcl command") @@ -229,7 +266,8 @@ if self.threaded and self.thread_id != tklib.Tcl_GetCurrentThread(): raise NotImplementedError("Call from another thread") - res = tklib.Tcl_DeleteCommand(self.interp, cmdName) + with self._tcl_lock: + res = tklib.Tcl_DeleteCommand(self.interp, cmdName) if res == -1: raise TclError("can't delete Tcl command") @@ -256,11 +294,12 @@ tklib.Tcl_IncrRefCount(obj) objects[i] = obj - res = tklib.Tcl_EvalObjv(self.interp, argc, objects, flags) - if res == tklib.TCL_ERROR: - self.raiseTclError() - else: - result = self._callResult() + with self._tcl_lock: + res = tklib.Tcl_EvalObjv(self.interp, argc, objects, flags) + if res == tklib.TCL_ERROR: + self.raiseTclError() + else: + result = self._callResult() finally: for obj in objects: if obj: @@ -280,17 +319,19 @@ def eval(self, script): self._check_tcl_appartment() - res = tklib.Tcl_Eval(self.interp, script) - if res == tklib.TCL_ERROR: - self.raiseTclError() - return tkffi.string(tklib.Tcl_GetStringResult(self.interp)) + with self._tcl_lock: + res = tklib.Tcl_Eval(self.interp, script) + if res == tklib.TCL_ERROR: + self.raiseTclError() + return tkffi.string(tklib.Tcl_GetStringResult(self.interp)) def evalfile(self, filename): self._check_tcl_appartment() - res = tklib.Tcl_EvalFile(self.interp, filename) - if res == tklib.TCL_ERROR: - self.raiseTclError() - return tkffi.string(tklib.Tcl_GetStringResult(self.interp)) + with self._tcl_lock: + res = tklib.Tcl_EvalFile(self.interp, filename) + if res == tklib.TCL_ERROR: + self.raiseTclError() + return tkffi.string(tklib.Tcl_GetStringResult(self.interp)) def split(self, arg): if isinstance(arg, tuple): @@ -375,7 +416,10 @@ if self.threaded: result = tklib.Tcl_DoOneEvent(0) else: - raise NotImplementedError("TCL configured without threads") + with self._tcl_lock: + result = tklib.Tcl_DoOneEvent(tklib.TCL_DONT_WAIT) + if result == 0: + time.sleep(self._busywaitinterval) if result < 0: break diff --git a/lib_pypy/_tkinter/tclobj.py b/lib_pypy/_tkinter/tclobj.py --- a/lib_pypy/_tkinter/tclobj.py +++ b/lib_pypy/_tkinter/tclobj.py @@ -28,9 +28,11 @@ return result elif value.typePtr == typeCache.BooleanType: - return result + return bool(value.internalRep.longValue) elif value.typePtr == typeCache.ByteArrayType: - return result + size = tkffi.new('int*') + data = tklib.Tcl_GetByteArrayFromObj(value, size) + return tkffi.buffer(data, size[0])[:] elif value.typePtr == typeCache.DoubleType: return value.internalRep.doubleValue elif value.typePtr == typeCache.IntType: @@ -50,7 +52,7 @@ From noreply at buildbot.pypy.org Thu Jan 23 23:08:42 2014 From: noreply at buildbot.pypy.org (RonnyPfannschmidt) Date: Thu, 23 Jan 2014 23:08:42 +0100 (CET) Subject: [pypy-commit] pypy refine-testrunner: testrunner: simplify and fix interpret_exitcode tests Message-ID: <20140123220842.7803C1C33EC@cobra.cs.uni-duesseldorf.de> Author: Ronny Pfannschmidt Branch: refine-testrunner Changeset: r68892:a58339b0503c Date: 2014-01-23 23:02 +0100 http://bitbucket.org/pypy/pypy/changeset/a58339b0503c/ Log: testrunner: simplify and fix interpret_exitcode tests diff --git a/testrunner/test/test_util.py b/testrunner/test/test_util.py --- a/testrunner/test/test_util.py +++ b/testrunner/test/test_util.py @@ -105,32 +105,28 @@ out = out.read() assert out == "42\n" +def make_test(id, input, expected): + def test_interpret_exitcode(): + print(input) + print(expected) + failure, extralog = util.interpret_exitcode( + input[0], 'test_foo', input[1]) + assert (failure, extralog) == expected + test_interpret_exitcode.__name__ += str(id) + globals()[test_interpret_exitcode.__name__] = test_interpret_exitcode -def test_interpret_exitcode(): - failure, extralog = util.interpret_exitcode(0, "test_foo", '') - assert not failure - assert extralog == "" +cases = [ + # input expected output + # exit, logdata, failure, extralog + (0, '', False, ''), + (1, '', True, "! test_foo\n Exit code 1.\n"), + (1, 'F foo\n', True, ' (somefailed=True in test_foo)\n'), + (2, '', True, "! test_foo\n Exit code 2.\n"), + (-signal.SIGSEGV, '', True, "! test_foo\n Killed by SIGSEGV.\n"), - failure, extralog = util.interpret_exitcode(1, "test_foo", "") - assert failure - assert extralog == """! test_foo - Exit code 1. -""" - assert extralog == " (somefailed=True in test_foo)\n" #xXX find location +] - failure, extralog = util.interpret_exitcode(1, "test_foo", "F Foo\n") - assert failure - assert extralog == "" +for n, i in enumerate(cases): + make_test(n, i[:2], i[2:]) - failure, extralog = util.interpret_exitcode(2, "test_foo") - assert failure - assert extralog == """! test_foo - Exit code 2. -""" - failure, extralog = util.interpret_exitcode(-signal.SIGSEGV, - "test_foo") - assert failure - assert extralog == """! test_foo - Killed by SIGSEGV. -""" From noreply at buildbot.pypy.org Thu Jan 23 23:15:06 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 23 Jan 2014 23:15:06 +0100 (CET) Subject: [pypy-commit] pypy default: Bah, another try at 9bfd0a649773. See comment. Message-ID: <20140123221506.7100A1C00F8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68893:49c649807414 Date: 2014-01-23 23:14 +0100 http://bitbucket.org/pypy/pypy/changeset/49c649807414/ Log: Bah, another try at 9bfd0a649773. See comment. diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -400,16 +400,16 @@ '_PyObject_CallFunction_SizeT', '_PyObject_CallMethod_SizeT', 'PyBuffer_FromMemory', 'PyBuffer_FromReadWriteMemory', 'PyBuffer_FromObject', - 'PyBuffer_FromReadWriteObject', 'PyBuffer_New', 'PyBuffer_Type', '_Py_init_bufferobject', + 'PyBuffer_FromReadWriteObject', 'PyBuffer_New', 'PyBuffer_Type', '_Py_get_buffer_type', 'PyCObject_FromVoidPtr', 'PyCObject_FromVoidPtrAndDesc', 'PyCObject_AsVoidPtr', 'PyCObject_GetDesc', 'PyCObject_Import', 'PyCObject_SetVoidPtr', - 'PyCObject_Type', '_Py_init_pycobject', + 'PyCObject_Type', '_Py_get_cobject_type', 'PyCapsule_New', 'PyCapsule_IsValid', 'PyCapsule_GetPointer', 'PyCapsule_GetName', 'PyCapsule_GetDestructor', 'PyCapsule_GetContext', 'PyCapsule_SetPointer', 'PyCapsule_SetName', 'PyCapsule_SetDestructor', - 'PyCapsule_SetContext', 'PyCapsule_Import', 'PyCapsule_Type', '_Py_init_capsule', + 'PyCapsule_SetContext', 'PyCapsule_Import', 'PyCapsule_Type', '_Py_get_capsule_type', 'PyObject_AsReadBuffer', 'PyObject_AsWriteBuffer', 'PyObject_CheckReadBuffer', @@ -691,17 +691,25 @@ prefix = 'PyPy' else: prefix = 'cpyexttest' - init_buffer = rffi.llexternal('_%s_init_bufferobject' % prefix, [], lltype.Void, - compilation_info=eci, releasegil=False) - init_pycobject = rffi.llexternal('_%s_init_pycobject' % prefix, [], lltype.Void, - compilation_info=eci, releasegil=False) - init_capsule = rffi.llexternal('_%s_init_capsule' % prefix, [], lltype.Void, - compilation_info=eci, releasegil=False) - INIT_FUNCTIONS.extend([ - lambda space: init_buffer(), - lambda space: init_pycobject(), - lambda space: init_capsule(), - ]) + # jump through hoops to avoid releasing the GIL during initialization + # of the cpyext module. The C functions are called with no wrapper, + # but must not do anything like calling back PyType_Ready(). We + # use them just to get a pointer to the PyTypeObjects defined in C. + get_buffer_type = rffi.llexternal('_%s_get_buffer_type' % prefix, + [], PyTypeObjectPtr, + compilation_info=eci, _nowrapper=True) + get_cobject_type = rffi.llexternal('_%s_get_cobject_type' % prefix, + [], PyTypeObjectPtr, + compilation_info=eci, _nowrapper=True) + get_capsule_type = rffi.llexternal('_%s_get_capsule_type' % prefix, + [], PyTypeObjectPtr, + compilation_info=eci, _nowrapper=True) + def init_types(space): + from pypy.module.cpyext.typeobject import py_type_ready + py_type_ready(space, get_buffer_type()) + py_type_ready(space, get_cobject_type()) + py_type_ready(space, get_capsule_type()) + INIT_FUNCTIONS.append(init_types) from pypy.module.posix.interp_posix import add_fork_hook reinit_tls = rffi.llexternal('%sThread_ReInitTLS' % prefix, [], lltype.Void, compilation_info=eci) diff --git a/pypy/module/cpyext/src/bufferobject.c b/pypy/module/cpyext/src/bufferobject.c --- a/pypy/module/cpyext/src/bufferobject.c +++ b/pypy/module/cpyext/src/bufferobject.c @@ -783,9 +783,9 @@ return size; } -void _Py_init_bufferobject(void) +PyTypeObject *_Py_get_buffer_type(void) { - PyType_Ready(&PyBuffer_Type); + return &PyBuffer_Type; } static PySequenceMethods buffer_as_sequence = { diff --git a/pypy/module/cpyext/src/capsule.c b/pypy/module/cpyext/src/capsule.c --- a/pypy/module/cpyext/src/capsule.c +++ b/pypy/module/cpyext/src/capsule.c @@ -321,8 +321,7 @@ PyCapsule_Type__doc__ /*tp_doc*/ }; -void _Py_init_capsule() +PyTypeObject *_Py_get_capsule_type(void) { - PyType_Ready(&PyCapsule_Type); + return &PyCapsule_Type; } - diff --git a/pypy/module/cpyext/src/cobject.c b/pypy/module/cpyext/src/cobject.c --- a/pypy/module/cpyext/src/cobject.c +++ b/pypy/module/cpyext/src/cobject.c @@ -156,7 +156,7 @@ PyCObject_Type__doc__ /*tp_doc*/ }; -void _Py_init_pycobject() +PyTypeObject *_Py_get_cobject_type(void) { - PyType_Ready(&PyCObject_Type); + return &PyCObject_Type; } diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -549,11 +549,14 @@ pto.c_tp_flags |= Py_TPFLAGS_READY return pto +def py_type_ready(space, pto): + if pto.c_tp_flags & Py_TPFLAGS_READY: + return + type_realize(space, rffi.cast(PyObject, pto)) + @cpython_api([PyTypeObjectPtr], rffi.INT_real, error=-1) def PyType_Ready(space, pto): - if pto.c_tp_flags & Py_TPFLAGS_READY: - return 0 - type_realize(space, rffi.cast(PyObject, pto)) + py_type_ready(space, pto) return 0 def type_realize(space, py_obj): From noreply at buildbot.pypy.org Thu Jan 23 23:45:31 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 23 Jan 2014 23:45:31 +0100 (CET) Subject: [pypy-commit] pypy detect-immutable-fields: merged default in Message-ID: <20140123224531.F399D1C0962@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: detect-immutable-fields Changeset: r68894:2c48224485a6 Date: 2014-01-23 16:44 -0600 http://bitbucket.org/pypy/pypy/changeset/2c48224485a6/ Log: merged default in diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -400,16 +400,16 @@ '_PyObject_CallFunction_SizeT', '_PyObject_CallMethod_SizeT', 'PyBuffer_FromMemory', 'PyBuffer_FromReadWriteMemory', 'PyBuffer_FromObject', - 'PyBuffer_FromReadWriteObject', 'PyBuffer_New', 'PyBuffer_Type', '_Py_init_bufferobject', + 'PyBuffer_FromReadWriteObject', 'PyBuffer_New', 'PyBuffer_Type', '_Py_get_buffer_type', 'PyCObject_FromVoidPtr', 'PyCObject_FromVoidPtrAndDesc', 'PyCObject_AsVoidPtr', 'PyCObject_GetDesc', 'PyCObject_Import', 'PyCObject_SetVoidPtr', - 'PyCObject_Type', '_Py_init_pycobject', + 'PyCObject_Type', '_Py_get_cobject_type', 'PyCapsule_New', 'PyCapsule_IsValid', 'PyCapsule_GetPointer', 'PyCapsule_GetName', 'PyCapsule_GetDestructor', 'PyCapsule_GetContext', 'PyCapsule_SetPointer', 'PyCapsule_SetName', 'PyCapsule_SetDestructor', - 'PyCapsule_SetContext', 'PyCapsule_Import', 'PyCapsule_Type', '_Py_init_capsule', + 'PyCapsule_SetContext', 'PyCapsule_Import', 'PyCapsule_Type', '_Py_get_capsule_type', 'PyObject_AsReadBuffer', 'PyObject_AsWriteBuffer', 'PyObject_CheckReadBuffer', @@ -691,17 +691,25 @@ prefix = 'PyPy' else: prefix = 'cpyexttest' - init_buffer = rffi.llexternal('_%s_init_bufferobject' % prefix, [], lltype.Void, - compilation_info=eci, releasegil=False) - init_pycobject = rffi.llexternal('_%s_init_pycobject' % prefix, [], lltype.Void, - compilation_info=eci, releasegil=False) - init_capsule = rffi.llexternal('_%s_init_capsule' % prefix, [], lltype.Void, - compilation_info=eci, releasegil=False) - INIT_FUNCTIONS.extend([ - lambda space: init_buffer(), - lambda space: init_pycobject(), - lambda space: init_capsule(), - ]) + # jump through hoops to avoid releasing the GIL during initialization + # of the cpyext module. The C functions are called with no wrapper, + # but must not do anything like calling back PyType_Ready(). We + # use them just to get a pointer to the PyTypeObjects defined in C. + get_buffer_type = rffi.llexternal('_%s_get_buffer_type' % prefix, + [], PyTypeObjectPtr, + compilation_info=eci, _nowrapper=True) + get_cobject_type = rffi.llexternal('_%s_get_cobject_type' % prefix, + [], PyTypeObjectPtr, + compilation_info=eci, _nowrapper=True) + get_capsule_type = rffi.llexternal('_%s_get_capsule_type' % prefix, + [], PyTypeObjectPtr, + compilation_info=eci, _nowrapper=True) + def init_types(space): + from pypy.module.cpyext.typeobject import py_type_ready + py_type_ready(space, get_buffer_type()) + py_type_ready(space, get_cobject_type()) + py_type_ready(space, get_capsule_type()) + INIT_FUNCTIONS.append(init_types) from pypy.module.posix.interp_posix import add_fork_hook reinit_tls = rffi.llexternal('%sThread_ReInitTLS' % prefix, [], lltype.Void, compilation_info=eci) diff --git a/pypy/module/cpyext/src/bufferobject.c b/pypy/module/cpyext/src/bufferobject.c --- a/pypy/module/cpyext/src/bufferobject.c +++ b/pypy/module/cpyext/src/bufferobject.c @@ -783,9 +783,9 @@ return size; } -void _Py_init_bufferobject(void) +PyTypeObject *_Py_get_buffer_type(void) { - PyType_Ready(&PyBuffer_Type); + return &PyBuffer_Type; } static PySequenceMethods buffer_as_sequence = { diff --git a/pypy/module/cpyext/src/capsule.c b/pypy/module/cpyext/src/capsule.c --- a/pypy/module/cpyext/src/capsule.c +++ b/pypy/module/cpyext/src/capsule.c @@ -321,8 +321,7 @@ PyCapsule_Type__doc__ /*tp_doc*/ }; -void _Py_init_capsule() +PyTypeObject *_Py_get_capsule_type(void) { - PyType_Ready(&PyCapsule_Type); + return &PyCapsule_Type; } - diff --git a/pypy/module/cpyext/src/cobject.c b/pypy/module/cpyext/src/cobject.c --- a/pypy/module/cpyext/src/cobject.c +++ b/pypy/module/cpyext/src/cobject.c @@ -156,7 +156,7 @@ PyCObject_Type__doc__ /*tp_doc*/ }; -void _Py_init_pycobject() +PyTypeObject *_Py_get_cobject_type(void) { - PyType_Ready(&PyCObject_Type); + return &PyCObject_Type; } diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -549,11 +549,14 @@ pto.c_tp_flags |= Py_TPFLAGS_READY return pto +def py_type_ready(space, pto): + if pto.c_tp_flags & Py_TPFLAGS_READY: + return + type_realize(space, rffi.cast(PyObject, pto)) + @cpython_api([PyTypeObjectPtr], rffi.INT_real, error=-1) def PyType_Ready(space, pto): - if pto.c_tp_flags & Py_TPFLAGS_READY: - return 0 - type_realize(space, rffi.cast(PyObject, pto)) + py_type_ready(space, pto) return 0 def type_realize(space, py_obj): From noreply at buildbot.pypy.org Thu Jan 23 23:47:37 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 23 Jan 2014 23:47:37 +0100 (CET) Subject: [pypy-commit] pypy detect-immutable-fields: Removed some nonsense isvirtual calls Message-ID: <20140123224737.A28301C0962@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: detect-immutable-fields Changeset: r68895:a24d7051b582 Date: 2014-01-23 16:47 -0600 http://bitbucket.org/pypy/pypy/changeset/a24d7051b582/ Log: Removed some nonsense isvirtual calls diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -468,7 +468,7 @@ def _mapdict_read_storage(self, index, pure=False): assert index >= 0 - if pure and jit.isconstant(index) and (jit.isconstant(self) or jit.isvirtual(self)): + if pure and jit.isconstant(index) and jit.isconstant(self): return self._pure_mapdict_read_storage(index) return self.storage[index] @@ -545,7 +545,7 @@ def _mapdict_read_storage(self, index, pure=False): assert index >= 0 - if pure and jit.isconstant(index) and (jit.isconstant(self) or jit.isvirtual(self)): + if pure and jit.isconstant(index) and jit.isconstant(self): return self._pure_mapdict_read_storage(index) return self._indirection_mapdict_read_storage(index) From noreply at buildbot.pypy.org Fri Jan 24 01:30:06 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 24 Jan 2014 01:30:06 +0100 (CET) Subject: [pypy-commit] pypy detect-immutable-fields: Extra guard_not_invalidated Message-ID: <20140124003006.47AD91C00F8@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: detect-immutable-fields Changeset: r68896:fa101e09da9d Date: 2014-01-23 18:29 -0600 http://bitbucket.org/pypy/pypy/changeset/fa101e09da9d/ Log: Extra guard_not_invalidated diff --git a/pypy/module/pypyjit/test_pypy_c/test_thread.py b/pypy/module/pypyjit/test_pypy_c/test_thread.py --- a/pypy/module/pypyjit/test_pypy_c/test_thread.py +++ b/pypy/module/pypyjit/test_pypy_c/test_thread.py @@ -42,6 +42,7 @@ assert loop.match(""" i53 = int_lt(i48, i27) guard_true(i53, descr=...) + guard_not_invalidated(descr=...) i54 = int_add_ovf(i48, i47) guard_no_overflow(descr=...) --TICK-- From noreply at buildbot.pypy.org Fri Jan 24 09:41:56 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 24 Jan 2014 09:41:56 +0100 (CET) Subject: [pypy-commit] pypy stdlib-2.7.5: Finish fixing rsre in this branch according to 7b0cafed5689. Message-ID: <20140124084156.589701C00F8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stdlib-2.7.5 Changeset: r68897:bfa0306f27f6 Date: 2014-01-24 09:40 +0100 http://bitbucket.org/pypy/pypy/changeset/bfa0306f27f6/ Log: Finish fixing rsre in this branch according to 7b0cafed5689. diff --git a/rpython/rlib/rsre/rpy/__init__.py b/rpython/rlib/rsre/rpy/__init__.py --- a/rpython/rlib/rsre/rpy/__init__.py +++ b/rpython/rlib/rsre/rpy/__init__.py @@ -1,1 +1,1 @@ -from ._sre import get_code +from ._sre import get_code, VERSION diff --git a/rpython/rlib/rsre/rpy/_sre.py b/rpython/rlib/rsre/rpy/_sre.py --- a/rpython/rlib/rsre/rpy/_sre.py +++ b/rpython/rlib/rsre/rpy/_sre.py @@ -1,9 +1,10 @@ - +import sys from rpython.rlib.rsre import rsre_char from rpython.rlib.rarithmetic import intmask - +VERSION = "2.7.5" MAGIC = 20031017 +MAXREPEAT = rsre_char.MAXREPEAT CODESIZE = rsre_char.CODESIZE getlower = rsre_char.getlower diff --git a/rpython/rlib/rsre/rpy/sre_compile.py b/rpython/rlib/rsre/rpy/sre_compile.py --- a/rpython/rlib/rsre/rpy/sre_compile.py +++ b/rpython/rlib/rsre/rpy/sre_compile.py @@ -8,12 +8,12 @@ # See the sre.py file for information on usage and redistribution. # -"""Internal support module for sre (copied from CPython 2.7.3)""" +"""Internal support module for sre (copied from CPython 2.7.5)""" import sys from . import _sre, sre_parse from .sre_constants import * -from _sre import MAXREPEAT +from ._sre import MAXREPEAT assert _sre.MAGIC == MAGIC, "SRE module mismatch" diff --git a/rpython/rlib/rsre/rpy/sre_constants.py b/rpython/rlib/rsre/rpy/sre_constants.py --- a/rpython/rlib/rsre/rpy/sre_constants.py +++ b/rpython/rlib/rsre/rpy/sre_constants.py @@ -9,13 +9,13 @@ # See the sre.py file for information on usage and redistribution. # -"""Internal support module for sre (copied from CPython 2.7.3)""" +"""Internal support module for sre (copied from CPython 2.7.5)""" # update when constants are added or removed MAGIC = 20031017 -from _sre import MAXREPEAT +from ._sre import MAXREPEAT # SRE standard exception (access as sre.error) # (use the real re.error exception class) diff --git a/rpython/rlib/rsre/rpy/sre_parse.py b/rpython/rlib/rsre/rpy/sre_parse.py --- a/rpython/rlib/rsre/rpy/sre_parse.py +++ b/rpython/rlib/rsre/rpy/sre_parse.py @@ -8,14 +8,14 @@ # See the sre.py file for information on usage and redistribution. # -"""Internal support module for sre (copied from CPython 2.7.3)""" +"""Internal support module for sre (copied from CPython 2.7.5)""" # XXX: show string offset and offending character for all errors import sys from .sre_constants import * -from _sre import MAXREPEAT +from ._sre import MAXREPEAT SPECIAL_CHARS = ".\\[{()*+?^$|" REPEAT_CHARS = "*+?{" diff --git a/rpython/rlib/rsre/rsre_char.py b/rpython/rlib/rsre/rsre_char.py --- a/rpython/rlib/rsre/rsre_char.py +++ b/rpython/rlib/rsre/rsre_char.py @@ -5,7 +5,7 @@ from rpython.rlib.rlocale import tolower, isalnum from rpython.rlib.unroll import unrolling_iterable from rpython.rlib import jit -from rpython.rlib.rarithmetic import int_between +from rpython.rlib.rarithmetic import int_between, intmask # Note: the unicode parts of this module require you to call # rsre_char.set_unicode_db() first, to select one of the modules @@ -26,7 +26,10 @@ # Identifying as _sre from Python 2.3 and onwards (at least up to 2.7) MAGIC = 20031017 -MAXREPEAT = 65535 +if sys.maxint > 2**32: + MAXREPEAT = intmask(2**32 - 1) +else: + MAXREPEAT = intmask(2**31 - 1) # In _sre.c this is bytesize of the code word type of the C implementation. # There it's 2 for normal Python builds and more for wide unicode builds (large diff --git a/rpython/rlib/rsre/rsre_core.py b/rpython/rlib/rsre/rsre_core.py --- a/rpython/rlib/rsre/rsre_core.py +++ b/rpython/rlib/rsre/rsre_core.py @@ -95,6 +95,10 @@ self.match_start = match_start self.end = end self.flags = flags + # check we don't get the old value of MAXREPEAT + # during the untranslated tests + if not we_are_translated(): + assert 65535 not in pattern def reset(self, start): self.match_start = start diff --git a/rpython/rlib/rsre/test/targetrsre.py b/rpython/rlib/rsre/test/targetrsre.py --- a/rpython/rlib/rsre/test/targetrsre.py +++ b/rpython/rlib/rsre/test/targetrsre.py @@ -1,14 +1,15 @@ #!/usr/bin/env python from rpython.rlib.rarithmetic import intmask from rpython.rlib.rsre import rsre_core +from rpython.rlib.rsre.rsre_char import MAXREPEAT import os, time # \s*(.*?) r_code1 = [17, 18, 1, 21, 131091, 6, 6, 60, 105, 116, 101, 109, 62, 0, 0, 0, 0, 0, 0, 19, 60, 19, 105, 19, 116, 19, 101, 19, 109, 19, 62, 29, -9, 0, 65535, 15, 4, 9, 2, 0, 1, 19, 60, 19, 116, 19, 105, 19, 116, 19, -108, 19, 101, 19, 62, 21, 0, 31, 5, 0, 65535, 2, 1, 21, 1, 19, 60, 19, +9, 0, MAXREPEAT, 15, 4, 9, 2, 0, 1, 19, 60, 19, 116, 19, 105, 19, 116, 19, +108, 19, 101, 19, 62, 21, 0, 31, 5, 0, MAXREPEAT, 2, 1, 21, 1, 19, 60, 19, 47, 19, 116, 19, 105, 19, 116, 19, 108, 19, 101, 19, 62, 1] diff --git a/rpython/rlib/rsre/test/test_match.py b/rpython/rlib/rsre/test/test_match.py --- a/rpython/rlib/rsre/test/test_match.py +++ b/rpython/rlib/rsre/test/test_match.py @@ -1,6 +1,6 @@ -import re, random +import re, random, py from rpython.rlib.rsre import rsre_core -from rpython.rlib.rsre.rpy import get_code +from rpython.rlib.rsre.rpy import get_code, VERSION def get_code_and_re(regexp): @@ -239,6 +239,9 @@ assert rsre_core.match(r, "x") def test_match_bug3(self): + if VERSION == "2.7.5": + py.test.skip("pattern fails to compile with exactly 2.7.5 " + "(works on 2.7.3 and on 2.7.trunk though)") r = get_code(r'([ax]*?x*)?$') assert rsre_core.match(r, "aaxaa") @@ -257,3 +260,10 @@ assert res is not None else: assert res is None + + def test_simple_match_1(self): + r = get_code(r"ab*bbbbbbbc") + print r + match = rsre_core.match(r, "abbbbbbbbbcdef") + assert match + assert match.match_end == 11 From noreply at buildbot.pypy.org Fri Jan 24 10:18:03 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 24 Jan 2014 10:18:03 +0100 (CET) Subject: [pypy-commit] pypy default: Import directly "_rawffi.alt" from here Message-ID: <20140124091803.1A12B1C06CD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68898:2f6abb55cbfd Date: 2014-01-24 10:17 +0100 http://bitbucket.org/pypy/pypy/changeset/2f6abb55cbfd/ Log: Import directly "_rawffi.alt" from here diff --git a/pypy/module/pypyjit/test_pypy_c/test_ffi.py b/pypy/module/pypyjit/test_pypy_c/test_ffi.py --- a/pypy/module/pypyjit/test_pypy_c/test_ffi.py +++ b/pypy/module/pypyjit/test_pypy_c/test_ffi.py @@ -7,9 +7,9 @@ from rpython.rlib.test.test_clibffi import get_libm_name def main(libm_name): try: - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types except ImportError: - sys.stderr.write('SKIP: cannot import _ffi\n') + sys.stderr.write('SKIP: cannot import _rawffi.alt\n') return 0 libm = CDLL(libm_name) @@ -45,9 +45,9 @@ from rpython.rlib.test.test_clibffi import get_libm_name def main(libm_name): try: - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types except ImportError: - sys.stderr.write('SKIP: cannot import _ffi\n') + sys.stderr.write('SKIP: cannot import _rawffi.alt\n') return 0 libm = CDLL(libm_name) @@ -82,12 +82,12 @@ from threading import Thread # if os.name == 'nt': - from _ffi import WinDLL, types + from _rawffi.alt import WinDLL, types libc = WinDLL('Kernel32.dll') sleep = libc.getfunc('Sleep', [types.uint], types.uint) delays = [0]*n + [1000] else: - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libc = CDLL(libc_name) sleep = libc.getfunc('sleep', [types.uint], types.uint) delays = [0]*n + [1] @@ -144,7 +144,7 @@ def test__ffi_struct(self): def main(): - from _ffi import _StructDescr, Field, types + from _rawffi.alt import _StructDescr, Field, types fields = [ Field('x', types.slong), ] From noreply at buildbot.pypy.org Fri Jan 24 11:54:25 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 24 Jan 2014 11:54:25 +0100 (CET) Subject: [pypy-commit] pypy detect-immutable-fields: disambiguate the 6 different uses of "index" Message-ID: <20140124105425.328CE1C0962@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: detect-immutable-fields Changeset: r68899:18f97d79777a Date: 2014-01-24 11:43 +0100 http://bitbucket.org/pypy/pypy/changeset/18f97d79777a/ Log: disambiguate the 6 different uses of "index" many of them were not an index at all any more after Alex' change diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -3,7 +3,7 @@ from rpython.rlib.objectmodel import we_are_translated from pypy.objspace.std.listobject import W_ListObject from pypy.objspace.std.typeobject import MethodCache -from pypy.objspace.std.mapdict import IndexCache +from pypy.objspace.std.mapdict import MapAttrCache from rpython.rlib import rposix, rgc @@ -35,7 +35,7 @@ cache.misses = {} cache.hits = {} if space.config.objspace.std.withmapdict: - cache = space.fromcache(IndexCache) + cache = space.fromcache(MapAttrCache) cache.misses = {} cache.hits = {} @@ -45,7 +45,7 @@ in the mapdict cache with the given attribute name.""" assert space.config.objspace.std.withmethodcachecounter assert space.config.objspace.std.withmapdict - cache = space.fromcache(IndexCache) + cache = space.fromcache(MapAttrCache) return space.newtuple([space.newint(cache.hits.get(name, 0)), space.newint(cache.misses.get(name, 0))]) diff --git a/pypy/module/gc/interp_gc.py b/pypy/module/gc/interp_gc.py --- a/pypy/module/gc/interp_gc.py +++ b/pypy/module/gc/interp_gc.py @@ -12,8 +12,8 @@ cache = space.fromcache(MethodCache) cache.clear() if space.config.objspace.std.withmapdict: - from pypy.objspace.std.mapdict import IndexCache - cache = space.fromcache(IndexCache) + from pypy.objspace.std.mapdict import MapAttrCache + cache = space.fromcache(MapAttrCache) cache.clear() rgc.collect() return space.wrap(0) diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -31,46 +31,46 @@ self.ever_mutated = False def read(self, obj, selector): - index = self.index(selector) - if index is None: + attr = self.find_map_attr(selector) + if attr is None: return self.terminator._read_terminator(obj, selector) - return obj._mapdict_read_storage(index.position, pure=not self.ever_mutated) + return obj._mapdict_read_storage(attr.storageindex, pure=not self.ever_mutated) def write(self, obj, selector, w_value): - index = self.index(selector) - if index is None: + attr = self.find_map_attr(selector) + if attr is None: return self.terminator._write_terminator(obj, selector, w_value) - obj._mapdict_write_storage(index.position, w_value) - if not index.ever_mutated: - index.ever_mutated = True + obj._mapdict_write_storage(attr.storageindex, w_value) + if not attr.ever_mutated: + attr.ever_mutated = True return True def delete(self, obj, selector): return None - def index(self, selector): + def find_map_attr(self, selector): if jit.we_are_jitted(): # hack for the jit: - # the _index method is pure too, but its argument is never + # the _find_map_attr method is pure too, but its argument is never # constant, because it is always a new tuple - return self._index_jit_pure(selector[0], selector[1]) + return self._find_map_attr_jit_pure(selector[0], selector[1]) else: - return self._index_indirection(selector) + return self._find_map_attr_indirection(selector) @jit.elidable - def _index_jit_pure(self, name, index): - return self._index_indirection((name, index)) + def _find_map_attr_jit_pure(self, name, index): + return self._find_map_attr_indirection((name, index)) @jit.dont_look_inside - def _index_indirection(self, selector): + def _find_map_attr_indirection(self, selector): if (self.space.config.objspace.std.withmethodcache): - return self._index_cache(selector) - return self._index(selector) + return self._find_map_attr_cache(selector) + return self._find_map_attr(selector) @jit.dont_look_inside - def _index_cache(self, selector): + def _find_map_attr_cache(self, selector): space = self.space - cache = space.fromcache(IndexCache) + cache = space.fromcache(MapAttrCache) SHIFT2 = r_uint.BITS - space.config.objspace.std.methodcachesizeexp SHIFT1 = SHIFT2 - 5 attrs_as_int = objectmodel.current_object_addr_as_int(self) @@ -78,27 +78,27 @@ # _pure_lookup_where_with_method_cache() hash_selector = objectmodel.compute_hash(selector) product = intmask(attrs_as_int * hash_selector) - index_hash = (r_uint(product) ^ (r_uint(product) << SHIFT1)) >> SHIFT2 + attr_hash = (r_uint(product) ^ (r_uint(product) << SHIFT1)) >> SHIFT2 # ^^^Note2: same comment too - cached_attr = cache.attrs[index_hash] + cached_attr = cache.attrs[attr_hash] if cached_attr is self: - cached_selector = cache.selectors[index_hash] + cached_selector = cache.selectors[attr_hash] if cached_selector == selector: - index = cache.indices[index_hash] + attr = cache.cached_attrs[attr_hash] if space.config.objspace.std.withmethodcachecounter: name = selector[0] cache.hits[name] = cache.hits.get(name, 0) + 1 - return index - index = self._index(selector) - cache.attrs[index_hash] = self - cache.selectors[index_hash] = selector - cache.indices[index_hash] = index + return attr + attr = self._find_map_attr(selector) + cache.attrs[attr_hash] = self + cache.selectors[attr_hash] = selector + cache.cached_attrs[attr_hash] = attr if space.config.objspace.std.withmethodcachecounter: name = selector[0] cache.misses[name] = cache.misses.get(name, 0) + 1 - return index + return attr - def _index(self, selector): + def _find_map_attr(self, selector): while isinstance(self, PlainAttribute): if selector == self.selector: return self @@ -159,7 +159,7 @@ # the order is important here: first change the map, then the storage, # for the benefit of the special subclasses obj._set_mapdict_map(attr) - obj._mapdict_write_storage(attr.position, w_value) + obj._mapdict_write_storage(attr.storageindex, w_value) def materialize_r_dict(self, space, obj, dict_w): raise NotImplementedError("abstract base class") @@ -265,11 +265,11 @@ return Terminator.set_terminator(self, obj, terminator) class PlainAttribute(AbstractAttribute): - _immutable_fields_ = ['selector', 'position', 'back'] + _immutable_fields_ = ['selector', 'storageindex', 'back'] def __init__(self, selector, back): AbstractAttribute.__init__(self, back.space, back.terminator) self.selector = selector - self.position = back.length() + self.storageindex = back.length() self.back = back self._size_estimate = self.length() * NUM_DIGITS_POW2 @@ -292,7 +292,7 @@ return new_obj def length(self): - return self.position + 1 + return self.storageindex + 1 def set_terminator(self, obj, terminator): new_obj = self.back.set_terminator(obj, terminator) @@ -308,7 +308,7 @@ new_obj = self.back.materialize_r_dict(space, obj, dict_w) if self.selector[1] == DICT: w_attr = space.wrap(self.selector[0]) - dict_w[w_attr] = obj._mapdict_read_storage(self.position) + dict_w[w_attr] = obj._mapdict_read_storage(self.storageindex) else: self._copy_attr(obj, new_obj) return new_obj @@ -320,21 +320,21 @@ return new_obj def __repr__(self): - return "" % (self.selector, self.position, self.back) + return "" % (self.selector, self.storageindex, self.back) def _become(w_obj, new_obj): # this is like the _become method, really, but we cannot use that due to # RPython reasons w_obj._set_mapdict_storage_and_map(new_obj.storage, new_obj.map) -class IndexCache(object): +class MapAttrCache(object): def __init__(self, space): assert space.config.objspace.std.withmethodcache SIZE = 1 << space.config.objspace.std.methodcachesizeexp self.attrs = [None] * SIZE self._empty_selector = (None, INVALID) self.selectors = [self._empty_selector] * SIZE - self.indices = [None] * SIZE + self.cached_attrs = [None] * SIZE if space.config.objspace.std.withmethodcachecounter: self.hits = {} self.misses = {} @@ -344,8 +344,8 @@ self.attrs[i] = None for i in range(len(self.selectors)): self.selectors[i] = self._empty_selector - for i in range(len(self.indices)): - self.indices[i] = None + for i in range(len(self.cached_attrs)): + self.cached_attrs[i] = None # ____________________________________________________________ # object implementation @@ -422,16 +422,16 @@ self.typedef is W_InstanceObject.typedef) self._init_empty(w_subtype.terminator) - def getslotvalue(self, index): - key = ("slot", SLOTS_STARTING_FROM + index) + def getslotvalue(self, slotindex): + key = ("slot", SLOTS_STARTING_FROM + slotindex) return self._get_mapdict_map().read(self, key) - def setslotvalue(self, index, w_value): - key = ("slot", SLOTS_STARTING_FROM + index) + def setslotvalue(self, slotindex, w_value): + key = ("slot", SLOTS_STARTING_FROM + slotindex) self._get_mapdict_map().write(self, key, w_value) - def delslotvalue(self, index): - key = ("slot", SLOTS_STARTING_FROM + index) + def delslotvalue(self, slotindex): + key = ("slot", SLOTS_STARTING_FROM + slotindex) new_obj = self._get_mapdict_map().delete(self, key) if new_obj is None: return False @@ -466,18 +466,18 @@ self.map = map self.storage = make_sure_not_resized([None] * map.size_estimate()) - def _mapdict_read_storage(self, index, pure=False): - assert index >= 0 - if pure and jit.isconstant(index) and jit.isconstant(self): - return self._pure_mapdict_read_storage(index) - return self.storage[index] + def _mapdict_read_storage(self, storageindex, pure=False): + assert storageindex >= 0 + if pure and jit.isconstant(storageindex) and jit.isconstant(self): + return self._pure_mapdict_read_storage(storageindex) + return self.storage[storageindex] @jit.elidable - def _pure_mapdict_read_storage(self, index): - return self.storage[index] + def _pure_mapdict_read_storage(self, storageindex): + return self.storage[storageindex] - def _mapdict_write_storage(self, index, value): - self.storage[index] = value + def _mapdict_write_storage(self, storageindex, value): + self.storage[storageindex] = value def _mapdict_storage_length(self): return len(self.storage) def _set_mapdict_storage_and_map(self, storage, map): @@ -543,35 +543,35 @@ erased = getattr(self, "_value%s" % nmin1) return unerase_list(erased) - def _mapdict_read_storage(self, index, pure=False): - assert index >= 0 - if pure and jit.isconstant(index) and jit.isconstant(self): - return self._pure_mapdict_read_storage(index) - return self._indirection_mapdict_read_storage(index) + def _mapdict_read_storage(self, storageindex, pure=False): + assert storageindex >= 0 + if pure and jit.isconstant(storageindex) and jit.isconstant(self): + return self._pure_mapdict_read_storage(storageindex) + return self._indirection_mapdict_read_storage(storageindex) @jit.elidable - def _pure_mapdict_read_storage(self, index): - return self._indirection_mapdict_read_storage(index) + def _pure_mapdict_read_storage(self, storageindex): + return self._indirection_mapdict_read_storage(storageindex) - def _indirection_mapdict_read_storage(self, index): - if index < nmin1: + def _indirection_mapdict_read_storage(self, storageindex): + if storageindex < nmin1: for i in rangenmin1: - if index == i: + if storageindex == i: erased = getattr(self, "_value%s" % i) return unerase_item(erased) if self._has_storage_list(): - return self._mapdict_get_storage_list()[index - nmin1] + return self._mapdict_get_storage_list()[storageindex - nmin1] erased = getattr(self, "_value%s" % nmin1) return unerase_item(erased) - def _mapdict_write_storage(self, index, value): + def _mapdict_write_storage(self, storageindex, value): erased = erase_item(value) for i in rangenmin1: - if index == i: + if storageindex == i: setattr(self, "_value%s" % i, erased) return if self._has_storage_list(): - self._mapdict_get_storage_list()[index - nmin1] = value + self._mapdict_get_storage_list()[storageindex - nmin1] = value return setattr(self, "_value%s" % nmin1, erased) @@ -806,7 +806,7 @@ class CacheEntry(object): version_tag = None - index = 0 + storageindex = 0 w_method = None # for callmethod success_counter = 0 failure_counter = 0 @@ -839,14 +839,14 @@ pycode._mapdict_caches = [INVALID_CACHE_ENTRY] * num_entries @jit.dont_look_inside -def _fill_cache(pycode, nameindex, map, version_tag, index, w_method=None): +def _fill_cache(pycode, nameindex, map, version_tag, storageindex, w_method=None): entry = pycode._mapdict_caches[nameindex] if entry is INVALID_CACHE_ENTRY: entry = CacheEntry() pycode._mapdict_caches[nameindex] = entry entry.map_wref = weakref.ref(map) entry.version_tag = version_tag - entry.index = index + entry.storageindex = storageindex entry.w_method = w_method if pycode.space.config.objspace.std.withmethodcachecounter: entry.failure_counter += 1 @@ -858,7 +858,7 @@ map = w_obj._get_mapdict_map() if entry.is_valid_for_map(map) and entry.w_method is None: # everything matches, it's incredibly fast - return w_obj._mapdict_read_storage(entry.index) + return w_obj._mapdict_read_storage(entry.storageindex) return LOAD_ATTR_slowpath(pycode, w_obj, nameindex, map) LOAD_ATTR_caching._always_inline_ = True @@ -892,19 +892,19 @@ selector = ("slot", SLOTS_STARTING_FROM + w_descr.index) else: # There is a non-data descriptor in the class. If there is - # also a dict attribute, use the latter, caching its position. + # also a dict attribute, use the latter, caching its storageindex. # If not, we loose. We could do better in this case too, # but we don't care too much; the common case of a method # invocation is handled by LOOKUP_METHOD_xxx below. selector = (name, DICT) # if selector[1] != INVALID: - index = map.index(selector) - if index is not None: + attr = map.find_map_attr(selector) + if attr is not None: # Note that if map.terminator is a DevolvedDictTerminator, - # map.index() will always return -1 if selector[1]==DICT. - _fill_cache(pycode, nameindex, map, version_tag, index.position) - return w_obj._mapdict_read_storage(index.position) + # map.find_map_attr will always return None if selector[1]==DICT. + _fill_cache(pycode, nameindex, map, version_tag, attr.storageindex) + return w_obj._mapdict_read_storage(attr.storageindex) if space.config.objspace.std.withmethodcachecounter: INVALID_CACHE_ENTRY.failure_counter += 1 return space.getattr(w_obj, w_name) diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -64,7 +64,7 @@ current = Terminator(space, "cls") for i in range(20000): current = PlainAttribute((str(i), DICT), current) - assert current.index(("0", DICT)).position == 0 + assert current.find_map_attr(("0", DICT)).storageindex == 0 def test_search(): From noreply at buildbot.pypy.org Fri Jan 24 12:19:09 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 24 Jan 2014 12:19:09 +0100 (CET) Subject: [pypy-commit] pypy detect-immutable-fields: The check for immutability was done on the wrong attribute, leading to Message-ID: <20140124111909.53F451C1190@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: detect-immutable-fields Changeset: r68900:2d7e3e3b5a02 Date: 2014-01-24 12:11 +0100 http://bitbucket.org/pypy/pypy/changeset/2d7e3e3b5a02/ Log: The check for immutability was done on the wrong attribute, leading to completely bogus immutability assumptions. tests are important! diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -34,7 +34,7 @@ attr = self.find_map_attr(selector) if attr is None: return self.terminator._read_terminator(obj, selector) - return obj._mapdict_read_storage(attr.storageindex, pure=not self.ever_mutated) + return obj._mapdict_read_storage(attr.storageindex, pure=not attr.ever_mutated) def write(self, obj, selector, w_value): attr = self.find_map_attr(selector) diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -107,6 +107,43 @@ assert obj2.getdictvalue(space, "b") == 60 assert obj2.map is obj.map +def test_attr_immutability(): + cls = Class() + obj = cls.instantiate() + obj.setdictvalue(space, "a", 10) + obj.setdictvalue(space, "b", 20) + obj.setdictvalue(space, "b", 30) + assert obj.storage == [10, 30] + assert obj.map.ever_mutated == True + assert obj.map.back.ever_mutated == False + + def _mapdict_read_storage(index, pure=False): + assert index in (0, 1) + if index == 0: + assert pure == True + else: + assert pure == False + return Object._mapdict_read_storage(obj, index, pure) + + obj._mapdict_read_storage = _mapdict_read_storage + + assert obj.getdictvalue(space, "a") == 10 + assert obj.getdictvalue(space, "b") == 30 + + obj2 = cls.instantiate() + obj2.setdictvalue(space, "a", 15) + obj2.setdictvalue(space, "b", 25) + assert obj2.map is obj.map + assert obj2.map.ever_mutated == True + assert obj2.map.back.ever_mutated == False + + # mutating obj2 changes the map + obj2.setdictvalue(space, "a", 50) + assert obj2.map.back.ever_mutated == True + assert obj2.map is obj.map + + + def test_delete(): for i, dattr in enumerate(["a", "b", "c"]): c = Class() From noreply at buildbot.pypy.org Fri Jan 24 12:19:10 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 24 Jan 2014 12:19:10 +0100 (CET) Subject: [pypy-commit] pypy detect-immutable-fields: be consistent with adding an attribute: first change the map, then write to the Message-ID: <20140124111910.A3C0B1C1190@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: detect-immutable-fields Changeset: r68901:78b5d4b49b56 Date: 2014-01-24 12:11 +0100 http://bitbucket.org/pypy/pypy/changeset/78b5d4b49b56/ Log: be consistent with adding an attribute: first change the map, then write to the object. diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -40,9 +40,9 @@ attr = self.find_map_attr(selector) if attr is None: return self.terminator._write_terminator(obj, selector, w_value) - obj._mapdict_write_storage(attr.storageindex, w_value) if not attr.ever_mutated: attr.ever_mutated = True + obj._mapdict_write_storage(attr.storageindex, w_value) return True def delete(self, obj, selector): From noreply at buildbot.pypy.org Fri Jan 24 14:22:59 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Fri, 24 Jan 2014 14:22:59 +0100 (CET) Subject: [pypy-commit] stmgc c7: add largemalloc and fix its tests Message-ID: <20140124132259.6D9B01C0166@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r672:5a8511bf2e9f Date: 2014-01-24 14:22 +0100 http://bitbucket.org/pypy/stmgc/changeset/5a8511bf2e9f/ Log: add largemalloc and fix its tests diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -16,7 +16,7 @@ #include "nursery.h" #include "pages.h" #include "stmsync.h" - +#include "largemalloc.h" char *object_pages; @@ -196,6 +196,14 @@ or should it be UNCOMMITTED??? */ num_threads_started = 0; + + assert(HEAP_PAGES < NB_PAGES - FIRST_AFTER_NURSERY_PAGE); + assert(HEAP_PAGES > 10); + + uintptr_t first_heap = stm_pages_reserve(HEAP_PAGES); + char *heap = REAL_ADDRESS(get_thread_base(0), first_heap * 4096UL); + assert(memset(heap, 0xcd, HEAP_PAGES * 4096)); // testing + stm_largemalloc_init(heap, HEAP_PAGES * 4096UL); } #define INVALID_GS_VALUE 0x6D6D6D6D diff --git a/c7/core.h b/c7/core.h --- a/c7/core.h +++ b/c7/core.h @@ -21,6 +21,8 @@ #define READMARKER_START ((FIRST_OBJECT_PAGE * 4096UL) >> 4) #define FIRST_READMARKER_PAGE (READMARKER_START / 4096UL) #define FIRST_AFTER_NURSERY_PAGE (FIRST_OBJECT_PAGE + NB_NURSERY_PAGES) +#define HEAP_PAGES (((NB_PAGES - FIRST_AFTER_NURSERY_PAGE) * 2) / 3) + enum { diff --git a/c7/largemalloc.c b/c7/largemalloc.c new file mode 100644 --- /dev/null +++ b/c7/largemalloc.c @@ -0,0 +1,438 @@ +/* This contains a lot of inspiration from malloc() in the GNU C Library. + More precisely, this is (a subset of) the part that handles large + blocks, which in our case means at least 288 bytes. It is actually + a general allocator, although it doesn't contain any of the small- + or medium-block support that are also present in the GNU C Library. +*/ + +#include +#include +#include +#include "largemalloc.h" + + +#define MMAP_LIMIT (1280*1024) + +#define largebin_index(sz) \ + (((sz) < (48 << 6)) ? ((sz) >> 6): /* 0 - 47 */ \ + ((sz) < (24 << 9)) ? 42 + ((sz) >> 9): /* 48 - 65 */ \ + ((sz) < (12 << 12)) ? 63 + ((sz) >> 12): /* 66 - 74 */ \ + ((sz) < (6 << 15)) ? 74 + ((sz) >> 15): /* 75 - 79 */ \ + ((sz) < (3 << 18)) ? 80 + ((sz) >> 18): /* 80 - 82 */ \ + 83) +#define N_BINS 84 +#define LAST_BIN_INDEX(sz) ((sz) >= (3 << 18)) + +typedef struct dlist_s { + struct dlist_s *next; /* a doubly-linked list */ + struct dlist_s *prev; +} dlist_t; + +typedef struct malloc_chunk { + size_t prev_size; /* - if the previous chunk is free: size of its data + - otherwise, if this chunk is free: 1 + - otherwise, 0. */ + size_t size; /* size of the data in this chunk, + plus optionally the FLAG_SORTED */ + + dlist_t d; /* if free: a doubly-linked list */ + /* if not free: the user data starts here */ + + /* The chunk has a total size of 'size'. It is immediately followed + in memory by another chunk. This list ends with the last "chunk" + being actually only one word long, 'size_t prev_size'. Both this + last chunk and the theoretical chunk before the first one are + considered "not free". */ +} mchunk_t; + +#define FLAG_SORTED 1 +#define THIS_CHUNK_FREE 1 +#define BOTH_CHUNKS_USED 0 +#define CHUNK_HEADER_SIZE offsetof(struct malloc_chunk, d) +#define END_MARKER 0xDEADBEEF + +#define chunk_at_offset(p, ofs) ((mchunk_t *)(((char *)(p)) + (ofs))) +#define data2chunk(p) chunk_at_offset(p, -CHUNK_HEADER_SIZE) + +static mchunk_t *next_chunk_s(mchunk_t *p) +{ + assert(p->size & FLAG_SORTED); + return chunk_at_offset(p, CHUNK_HEADER_SIZE + p->size - FLAG_SORTED); +} +static mchunk_t *next_chunk_u(mchunk_t *p) +{ + assert(!(p->size & FLAG_SORTED)); + return chunk_at_offset(p, CHUNK_HEADER_SIZE + p->size); +} + + +/* The free chunks are stored in "bins". Each bin is a doubly-linked + list of chunks. There are 84 bins, with largebin_index() giving the + correspondence between sizes are bin indices. + + Each free chunk is preceeded in memory by a non-free chunk (or no + chunk at all). Each free chunk is followed in memory by a non-free + chunk (or no chunk at all). Chunks are consolidated with their + neighbors to ensure this. + + In each bin's doubly-linked list, chunks are sorted by their size in + decreasing order (if you start from 'd.next'). At the end of this + list are some unsorted chunks. All unsorted chunks are after all + sorted chunks. The flag 'FLAG_SORTED' distinguishes them. + + Note that if the user always calls stm_large_malloc() with a large + enough argument, then the few bins corresponding to smaller values + will never be sorted at all. They are still populated with the + fragments of space between bigger allocations. +*/ + +static dlist_t largebins[N_BINS]; +static mchunk_t *first_chunk, *last_chunk; + +void _stm_chunk_pages(object_t *tldata, intptr_t *start, intptr_t *num) +{ + char *data = _stm_real_address(tldata); + mchunk_t *chunk = data2chunk(data); + *start = (((char*)chunk) - get_thread_base(0)) / 4096UL; + size_t offset_into_page = ((uintptr_t)chunk) & 4095UL; // % 4096 + *num = ((chunk->size & ~CHUNK_HEADER_SIZE) + CHUNK_HEADER_SIZE + offset_into_page + 4095) / 4096UL; +} + +size_t _stm_data_size(object_t *tldata) +{ + char *data = _stm_real_address(tldata); + mchunk_t *chunk = data2chunk(data); + return chunk->size & ~CHUNK_HEADER_SIZE; +} + +static void insert_unsorted(mchunk_t *new) +{ + size_t index = LAST_BIN_INDEX(new->size) ? N_BINS - 1 + : largebin_index(new->size); + new->d.next = &largebins[index]; + new->d.prev = largebins[index].prev; + new->d.prev->next = &new->d; + largebins[index].prev = &new->d; + assert(!(new->size & FLAG_SORTED)); +} + +static int compare_chunks(const void *vchunk1, const void *vchunk2) +{ + /* sort by size */ + const mchunk_t *chunk1 = (const mchunk_t *)vchunk1; + const mchunk_t *chunk2 = (const mchunk_t *)vchunk2; + if (chunk1->size < chunk2->size) + return -1; + if (chunk1->size == chunk2->size) + return 0; + else + return +1; +} + +static void really_sort_bin(size_t index) +{ + dlist_t *unsorted = largebins[index].prev; + dlist_t *end = &largebins[index]; + dlist_t *scan = unsorted->prev; + size_t count = 1; + while (scan != end && !(data2chunk(scan)->size & FLAG_SORTED)) { + scan = scan->prev; + ++count; + } + end->prev = scan; + scan->next = end; + + mchunk_t *chunk1; + mchunk_t *chunks[count]; /* dynamically-sized */ + if (count == 1) { + chunk1 = data2chunk(unsorted); /* common case */ + count = 0; + } + else { + size_t i; + for (i = 0; i < count; i++) { + chunks[i] = data2chunk(unsorted); + unsorted = unsorted->prev; + } + assert(unsorted == scan); + qsort(chunks, count, sizeof(mchunk_t *), compare_chunks); + + chunk1 = chunks[--count]; + } + chunk1->size |= FLAG_SORTED; + size_t search_size = chunk1->size; + dlist_t *head = largebins[index].next; + + while (1) { + if (head == end || search_size >= data2chunk(head)->size) { + /* insert 'chunk1' here, before the current head */ + head->prev->next = &chunk1->d; + chunk1->d.prev = head->prev; + head->prev = &chunk1->d; + chunk1->d.next = head; + if (count == 0) + break; /* all done */ + chunk1 = chunks[--count]; + chunk1->size |= FLAG_SORTED; + search_size = chunk1->size; + } + else { + head = head->next; + } + } +} + +static void sort_bin(size_t index) +{ + dlist_t *last = largebins[index].prev; + if (last != &largebins[index] && !(data2chunk(last)->size & FLAG_SORTED)) + really_sort_bin(index); +} + +object_t *stm_large_malloc(size_t request_size) +{ + /* 'request_size' should already be a multiple of the word size here */ + assert((request_size & (sizeof(char *)-1)) == 0); + + size_t index = largebin_index(request_size); + sort_bin(index); + + /* scan through the chunks of current bin in reverse order + to find the smallest that fits. */ + dlist_t *scan = largebins[index].prev; + dlist_t *end = &largebins[index]; + mchunk_t *mscan; + while (scan != end) { + mscan = data2chunk(scan); + assert(mscan->prev_size == THIS_CHUNK_FREE); + assert(next_chunk_s(mscan)->prev_size == mscan->size - FLAG_SORTED); + + if (mscan->size > request_size) + goto found; + scan = mscan->d.prev; + } + + /* search now through all higher bins. We only need to take the + smallest item of the first non-empty bin, as it will be large + enough. */ + while (++index < N_BINS) { + if (largebins[index].prev != &largebins[index]) { + /* non-empty bin. */ + sort_bin(index); + scan = largebins[index].prev; + end = &largebins[index]; + mscan = data2chunk(scan); + goto found; + } + } + + /* not enough memory. */ + return NULL; + + found: + assert(mscan->size & FLAG_SORTED); + assert(mscan->size > request_size); + + /* unlink mscan from the doubly-linked list */ + mscan->d.next->prev = mscan->d.prev; + mscan->d.prev->next = mscan->d.next; + + size_t remaining_size_plus_1 = mscan->size - request_size; + if (remaining_size_plus_1 <= sizeof(struct malloc_chunk)) { + next_chunk_s(mscan)->prev_size = BOTH_CHUNKS_USED; + request_size = mscan->size & ~FLAG_SORTED; + } + else { + /* only part of the chunk is being used; reduce the size + of 'mscan' down to 'request_size', and create a new + chunk of the 'remaining_size' afterwards */ + mchunk_t *new = chunk_at_offset(mscan, CHUNK_HEADER_SIZE + + request_size); + new->prev_size = THIS_CHUNK_FREE; + size_t remaining_size = remaining_size_plus_1 - 1 - CHUNK_HEADER_SIZE; + new->size = remaining_size; + next_chunk_u(new)->prev_size = remaining_size; + insert_unsorted(new); + } + mscan->size = request_size; + mscan->prev_size = BOTH_CHUNKS_USED; + return (object_t *)(((char *)&mscan->d) - get_thread_base(0)); +} + +void stm_large_free(object_t *tldata) +{ + char *data = _stm_real_address(tldata); + mchunk_t *chunk = data2chunk(data); + assert((chunk->size & (sizeof(char *) - 1)) == 0); + assert(chunk->prev_size != THIS_CHUNK_FREE); + + /* try to merge with the following chunk in memory */ + size_t msize = chunk->size + CHUNK_HEADER_SIZE; + mchunk_t *mscan = chunk_at_offset(chunk, msize); + + if (mscan->prev_size == BOTH_CHUNKS_USED) { + assert((mscan->size & ((sizeof(char *) - 1) & ~FLAG_SORTED)) == 0); + mscan->prev_size = chunk->size; + } + else { + mscan->size &= ~FLAG_SORTED; + size_t fsize = mscan->size; + mchunk_t *fscan = chunk_at_offset(mscan, fsize + CHUNK_HEADER_SIZE); + + /* unlink the following chunk */ + mscan->d.next->prev = mscan->d.prev; + mscan->d.prev->next = mscan->d.next; + assert(mscan->prev_size = (size_t)-1); + assert(mscan->size = (size_t)-1); + + /* merge the two chunks */ + assert(fsize == fscan->prev_size); + fsize += msize; + fscan->prev_size = fsize; + chunk->size = fsize; + } + + /* try to merge with the previous chunk in memory */ + if (chunk->prev_size == BOTH_CHUNKS_USED) { + chunk->prev_size = THIS_CHUNK_FREE; + } + else { + assert((chunk->prev_size & (sizeof(char *) - 1)) == 0); + + /* get at the previous chunk */ + msize = chunk->prev_size + CHUNK_HEADER_SIZE; + mscan = chunk_at_offset(chunk, -msize); + assert(mscan->prev_size == THIS_CHUNK_FREE); + assert((mscan->size & ~FLAG_SORTED) == chunk->prev_size); + + /* unlink the previous chunk */ + mscan->d.next->prev = mscan->d.prev; + mscan->d.prev->next = mscan->d.next; + + /* merge the two chunks */ + mscan->size = msize + chunk->size; + next_chunk_u(mscan)->prev_size = mscan->size; + + assert(chunk->prev_size = (size_t)-1); + assert(chunk->size = (size_t)-1); + chunk = mscan; + } + + insert_unsorted(chunk); +} + + +void _stm_large_dump(void) +{ + char *data = ((char *)first_chunk) + 16; + size_t prev_size_if_free = 0; + while (1) { + fprintf(stderr, "[ %p: %zu\n", data - 16, *(size_t*)(data - 16)); + if (prev_size_if_free == 0) { + assert(*(size_t*)(data - 16) == THIS_CHUNK_FREE || + *(size_t*)(data - 16) == BOTH_CHUNKS_USED); + if (*(size_t*)(data - 16) == THIS_CHUNK_FREE) + prev_size_if_free = (*(size_t*)(data - 8)) & ~FLAG_SORTED; + } + else { + assert(*(size_t*)(data - 16) == prev_size_if_free); + prev_size_if_free = 0; + } + if (*(size_t*)(data - 8) == END_MARKER) + break; + fprintf(stderr, " %p: %zu ]", data - 8, *(size_t*)(data - 8)); + if (prev_size_if_free) { + fprintf(stderr, " (free %p / %p)\n", + *(void **)data, *(void **)(data + 8)); + } + else { + fprintf(stderr, "\n"); + } + if (!prev_size_if_free) + assert(!((*(size_t*)(data - 8)) & FLAG_SORTED)); + assert(*(ssize_t*)(data - 8) >= 16); + data += (*(size_t*)(data - 8)) & ~FLAG_SORTED; + data += 16; + } + fprintf(stderr, " %p: end. ]\n\n", data - 8); + assert(data - 16 == (char *)last_chunk); +} + +char *_stm_largemalloc_data_start() +{ + return (char*)first_chunk; +} + +void stm_largemalloc_init(char *data_start, size_t data_size) +{ + int i; + for (i = 0; i < N_BINS; i++) + largebins[i].prev = largebins[i].next = &largebins[i]; + + assert(data_size >= 2 * sizeof(struct malloc_chunk)); + assert((data_size & 31) == 0); + first_chunk = (mchunk_t *)data_start; + first_chunk->prev_size = THIS_CHUNK_FREE; + first_chunk->size = data_size - 2 * CHUNK_HEADER_SIZE; + last_chunk = chunk_at_offset(first_chunk, data_size - CHUNK_HEADER_SIZE); + last_chunk->prev_size = first_chunk->size; + last_chunk->size = END_MARKER; + assert(last_chunk == next_chunk_u(first_chunk)); + + insert_unsorted(first_chunk); +} + +int stm_largemalloc_resize_arena(size_t new_size) +{ + assert(new_size >= 2 * sizeof(struct malloc_chunk)); + assert((new_size & 31) == 0); + + new_size -= CHUNK_HEADER_SIZE; + mchunk_t *new_last_chunk = chunk_at_offset(first_chunk, new_size); + mchunk_t *old_last_chunk = last_chunk; + size_t old_size = ((char *)old_last_chunk) - (char *)first_chunk; + + if (new_size < old_size) { + /* check if there is enough free space at the end to allow + such a reduction */ + size_t lsize = last_chunk->prev_size; + assert(lsize != THIS_CHUNK_FREE); + if (lsize == BOTH_CHUNKS_USED) + return 0; + lsize += CHUNK_HEADER_SIZE; + mchunk_t *prev_chunk = chunk_at_offset(last_chunk, -lsize); + if (((char *)new_last_chunk) < ((char *)prev_chunk) + + sizeof(struct malloc_chunk)) + return 0; + + /* unlink the prev_chunk from the doubly-linked list */ + prev_chunk->d.next->prev = prev_chunk->d.prev; + prev_chunk->d.prev->next = prev_chunk->d.next; + + /* reduce the prev_chunk */ + assert((prev_chunk->size & ~FLAG_SORTED) == last_chunk->prev_size); + prev_chunk->size = ((char*)new_last_chunk) - (char *)prev_chunk + - CHUNK_HEADER_SIZE; + + /* make a fresh-new last chunk */ + new_last_chunk->prev_size = prev_chunk->size; + new_last_chunk->size = END_MARKER; + last_chunk = new_last_chunk; + assert(last_chunk == next_chunk_u(prev_chunk)); + + insert_unsorted(prev_chunk); + } + else if (new_size > old_size) { + /* make the new last chunk first, with only the extra size */ + mchunk_t *old_last_chunk = last_chunk; + old_last_chunk->size = (new_size - old_size) - CHUNK_HEADER_SIZE; + new_last_chunk->prev_size = BOTH_CHUNKS_USED; + new_last_chunk->size = END_MARKER; + last_chunk = new_last_chunk; + assert(last_chunk == next_chunk_u(old_last_chunk)); + + /* then free the last_chunk (turn it from "used" to "free) */ + stm_large_free((object_t *)(((char *)&old_last_chunk->d) - get_thread_base(0))); + } + return 1; +} diff --git a/c7/largemalloc.h b/c7/largemalloc.h new file mode 100644 --- /dev/null +++ b/c7/largemalloc.h @@ -0,0 +1,13 @@ +#include +#include "core.h" + +void stm_largemalloc_init(char *data_start, size_t data_size); +int stm_largemalloc_resize_arena(size_t new_size); + +object_t *stm_large_malloc(size_t request_size); +void stm_large_free(object_t *data); + +void _stm_large_dump(void); +void _stm_chunk_pages(object_t *tldata, intptr_t *start, intptr_t *num); +size_t _stm_data_size(object_t *tldata); +char *_stm_largemalloc_data_start(void); diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -10,12 +10,12 @@ """core.h pagecopy.h list.h reader_writer_lock.h nursery.h pages.h - stmsync.h""".split()] + stmsync.h largemalloc.h""".split()] source_files = [os.path.join(parent_dir, _n) for _n in """core.c pagecopy.c list.c reader_writer_lock.c nursery.c pages.c - stmsync.c""".split()] + stmsync.c largemalloc.c""".split()] _pycache_ = os.path.join(parent_dir, 'test', '__pycache__') if os.path.exists(_pycache_): @@ -96,6 +96,17 @@ }; +void stm_largemalloc_init(char *data_start, size_t data_size); +int stm_largemalloc_resize_arena(size_t new_size); + +object_t *stm_large_malloc(size_t request_size); +void stm_large_free(object_t *data); + +void _stm_large_dump(void); +void _stm_chunk_pages(object_t *tldata, intptr_t *start, intptr_t *num); +size_t _stm_data_size(object_t *tldata); +char *_stm_largemalloc_data_start(void); + """) lib = ffi.verify(''' @@ -106,6 +117,7 @@ #include "pages.h" #include "nursery.h" #include "stmsync.h" +#include "largemalloc.h" struct myobj_s { struct object_s hdr; diff --git a/c7/test/test_largemalloc.py b/c7/test/test_largemalloc.py --- a/c7/test/test_largemalloc.py +++ b/c7/test/test_largemalloc.py @@ -1,22 +1,28 @@ from support import * import sys, random +ra = stm_get_real_address -class TestLargeMalloc(object): +class TestLargeMalloc(BaseTest): + def setup_method(self, meth): + # initialize some big heap in stm_setup() + BaseTest.setup_method(self, meth) - def setup_method(self, meth): - size = 1024 * 1024 # 1MB - self.rawmem = ffi.new("char[]", size) - self.size = size - lib.memset(self.rawmem, 0xcd, size) - lib.stm_largemalloc_init(self.rawmem, size) + # now re-initialize the heap to 1MB with 0xcd in it + self.size = 1024 * 1024 # 1MB + self.rawmem = lib._stm_largemalloc_data_start() + + lib.memset(self.rawmem, 0xcd, self.size) + lib.stm_largemalloc_init(self.rawmem, self.size) def test_simple(self): d1 = lib.stm_large_malloc(7000) d2 = lib.stm_large_malloc(8000) - assert d2 - d1 == 7016 + print d1 + print d2 + assert ra(d2) - ra(d1) == 7016 d3 = lib.stm_large_malloc(9000) - assert d3 - d2 == 8016 + assert ra(d3) - ra(d2) == 8016 # lib.stm_large_free(d1) lib.stm_large_free(d2) @@ -24,7 +30,7 @@ d4 = lib.stm_large_malloc(600) assert d4 == d1 d5 = lib.stm_large_malloc(600) - assert d5 == d4 + 616 + assert ra(d5) == ra(d4) + 616 # lib.stm_large_free(d5) # @@ -34,7 +40,7 @@ lib.stm_large_free(d4) # d7 = lib.stm_large_malloc(608) - assert d7 == d6 + 616 + assert ra(d7) == ra(d6) + 616 d8 = lib.stm_large_malloc(600) assert d8 == d4 # @@ -42,7 +48,7 @@ def test_overflow_1(self): d = lib.stm_large_malloc(self.size - 32) - assert d == self.rawmem + 16 + assert ra(d) == self.rawmem + 16 lib._stm_large_dump() def test_overflow_2(self): @@ -73,8 +79,8 @@ r = lib.stm_largemalloc_resize_arena(self.size // 2) assert r == 1 d2 = lib.stm_large_malloc(128) - assert d1 == self.rawmem + 16 - assert d2 == d1 + 128 + 16 + assert ra(d1) == self.rawmem + 16 + assert ra(d2) == ra(d1) + 128 + 16 lib._stm_large_dump() def test_resize_arena_cannot_reduce_1(self): @@ -97,18 +103,18 @@ index = r.randrange(0, len(p)) d, length, content1, content2 = p.pop(index) print ' free %5d (%s)' % (length, d) - assert d[0] == content1 - assert d[length - 1] == content2 + assert ra(d)[0] == content1 + assert ra(d)[length - 1] == content2 lib.stm_large_free(d) else: sz = r.randrange(8, 160) * 8 d = lib.stm_large_malloc(sz) print 'alloc %5d (%s)' % (sz, d) assert d != ffi.NULL - lib.memset(d, 0xdd, sz) + lib.memset(ra(d), 0xdd, sz) content1 = chr(r.randrange(0, 256)) content2 = chr(r.randrange(0, 256)) - d[0] = content1 - d[sz - 1] = content2 + ra(d)[0] = content1 + ra(d)[sz - 1] = content2 p.append((d, sz, content1, content2)) lib._stm_large_dump() From noreply at buildbot.pypy.org Fri Jan 24 16:00:14 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 24 Jan 2014 16:00:14 +0100 (CET) Subject: [pypy-commit] pypy detect-immutable-fields: merged default in Message-ID: <20140124150014.74C261C00F8@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: detect-immutable-fields Changeset: r68902:5a45c7f4d108 Date: 2014-01-24 07:43 -0600 http://bitbucket.org/pypy/pypy/changeset/5a45c7f4d108/ Log: merged default in diff --git a/pypy/module/pypyjit/test_pypy_c/test_ffi.py b/pypy/module/pypyjit/test_pypy_c/test_ffi.py --- a/pypy/module/pypyjit/test_pypy_c/test_ffi.py +++ b/pypy/module/pypyjit/test_pypy_c/test_ffi.py @@ -7,9 +7,9 @@ from rpython.rlib.test.test_clibffi import get_libm_name def main(libm_name): try: - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types except ImportError: - sys.stderr.write('SKIP: cannot import _ffi\n') + sys.stderr.write('SKIP: cannot import _rawffi.alt\n') return 0 libm = CDLL(libm_name) @@ -45,9 +45,9 @@ from rpython.rlib.test.test_clibffi import get_libm_name def main(libm_name): try: - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types except ImportError: - sys.stderr.write('SKIP: cannot import _ffi\n') + sys.stderr.write('SKIP: cannot import _rawffi.alt\n') return 0 libm = CDLL(libm_name) @@ -82,12 +82,12 @@ from threading import Thread # if os.name == 'nt': - from _ffi import WinDLL, types + from _rawffi.alt import WinDLL, types libc = WinDLL('Kernel32.dll') sleep = libc.getfunc('Sleep', [types.uint], types.uint) delays = [0]*n + [1000] else: - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libc = CDLL(libc_name) sleep = libc.getfunc('sleep', [types.uint], types.uint) delays = [0]*n + [1] @@ -144,7 +144,7 @@ def test__ffi_struct(self): def main(): - from _ffi import _StructDescr, Field, types + from _rawffi.alt import _StructDescr, Field, types fields = [ Field('x', types.slong), ] From noreply at buildbot.pypy.org Fri Jan 24 16:00:15 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 24 Jan 2014 16:00:15 +0100 (CET) Subject: [pypy-commit] pypy detect-immutable-fields: Failing test, unsure how to make it pass Message-ID: <20140124150015.B5D8B1C00F8@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: detect-immutable-fields Changeset: r68903:9b7b906379c9 Date: 2014-01-24 08:59 -0600 http://bitbucket.org/pypy/pypy/changeset/9b7b906379c9/ Log: Failing test, unsure how to make it pass diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -179,7 +179,7 @@ return None def _set_mapdict_map(self, map): raise NotImplementedError - def _mapdict_read_storage(self, index, pure=False): + def _mapdict_read_storage(self, index, pure): raise NotImplementedError def _mapdict_write_storage(self, index, value): raise NotImplementedError diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -34,7 +34,18 @@ attr = self.find_map_attr(selector) if attr is None: return self.terminator._read_terminator(obj, selector) - return obj._mapdict_read_storage(attr.storageindex, pure=not attr.ever_mutated) + if ( + not attr.ever_mutated and + jit.isconstant(attr.storageindex) and + jit.isconstant(obj) + ): + return self._pure_mapdict_read_storage(obj, attr.storageindex) + else: + return obj._mapdict_read_storage(attr.storageindex) + + @jit.elidable + def _pure_mapdict_read_storage(self, obj, storageindex): + return obj._mapdict_read_storage(storageindex) def write(self, obj, selector, w_value): attr = self.find_map_attr(selector) @@ -466,18 +477,13 @@ self.map = map self.storage = make_sure_not_resized([None] * map.size_estimate()) - def _mapdict_read_storage(self, storageindex, pure=False): + def _mapdict_read_storage(self, storageindex): assert storageindex >= 0 - if pure and jit.isconstant(storageindex) and jit.isconstant(self): - return self._pure_mapdict_read_storage(storageindex) - return self.storage[storageindex] - - @jit.elidable - def _pure_mapdict_read_storage(self, storageindex): return self.storage[storageindex] def _mapdict_write_storage(self, storageindex, value): self.storage[storageindex] = value + def _mapdict_storage_length(self): return len(self.storage) def _set_mapdict_storage_and_map(self, storage, map): @@ -543,17 +549,8 @@ erased = getattr(self, "_value%s" % nmin1) return unerase_list(erased) - def _mapdict_read_storage(self, storageindex, pure=False): + def _mapdict_read_storage(self, storageindex): assert storageindex >= 0 - if pure and jit.isconstant(storageindex) and jit.isconstant(self): - return self._pure_mapdict_read_storage(storageindex) - return self._indirection_mapdict_read_storage(storageindex) - - @jit.elidable - def _pure_mapdict_read_storage(self, storageindex): - return self._indirection_mapdict_read_storage(storageindex) - - def _indirection_mapdict_read_storage(self, storageindex): if storageindex < nmin1: for i in rangenmin1: if storageindex == i: diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -117,18 +117,19 @@ assert obj.map.ever_mutated == True assert obj.map.back.ever_mutated == False - def _mapdict_read_storage(index, pure=False): - assert index in (0, 1) - if index == 0: - assert pure == True - else: - assert pure == False - return Object._mapdict_read_storage(obj, index, pure) + indices = [] - obj._mapdict_read_storage = _mapdict_read_storage + def _pure_mapdict_read_storage(obj, index): + assert index == 0 + indices.append(index) + return obj._mapdict_read_storage(obj, index) + + obj.map._pure_mapdict_read_storage = _pure_mapdict_read_storage assert obj.getdictvalue(space, "a") == 10 assert obj.getdictvalue(space, "b") == 30 + assert obj.getdictvalue(space, "a") == 10 + assert indices == [0, 0] obj2 = cls.instantiate() obj2.setdictvalue(space, "a", 15) From noreply at buildbot.pypy.org Fri Jan 24 16:07:40 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 24 Jan 2014 16:07:40 +0100 (CET) Subject: [pypy-commit] pypy detect-immutable-fields: Re-arrange to only read the field if the things are constants Message-ID: <20140124150740.56E441C06CD@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: detect-immutable-fields Changeset: r68904:cadc0d2ebe59 Date: 2014-01-24 09:07 -0600 http://bitbucket.org/pypy/pypy/changeset/cadc0d2ebe59/ Log: Re-arrange to only read the field if the things are constants diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -35,9 +35,9 @@ if attr is None: return self.terminator._read_terminator(obj, selector) if ( - not attr.ever_mutated and jit.isconstant(attr.storageindex) and - jit.isconstant(obj) + jit.isconstant(obj) and + not attr.ever_mutated ): return self._pure_mapdict_read_storage(obj, attr.storageindex) else: From noreply at buildbot.pypy.org Fri Jan 24 16:09:33 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 24 Jan 2014 16:09:33 +0100 (CET) Subject: [pypy-commit] pypy detect-immutable-fields: Make teh test pass Message-ID: <20140124150933.8962F1C0166@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: detect-immutable-fields Changeset: r68905:87ce7e7c00c5 Date: 2014-01-24 09:08 -0600 http://bitbucket.org/pypy/pypy/changeset/87ce7e7c00c5/ Log: Make teh test pass diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -107,7 +107,7 @@ assert obj2.getdictvalue(space, "b") == 60 assert obj2.map is obj.map -def test_attr_immutability(): +def test_attr_immutability(monkeypatch): cls = Class() obj = cls.instantiate() obj.setdictvalue(space, "a", 10) @@ -119,12 +119,13 @@ indices = [] - def _pure_mapdict_read_storage(obj, index): - assert index == 0 - indices.append(index) - return obj._mapdict_read_storage(obj, index) + def _pure_mapdict_read_storage(obj, storageindex): + assert storageindex == 0 + indices.append(storageindex) + return obj._mapdict_read_storage(storageindex) obj.map._pure_mapdict_read_storage = _pure_mapdict_read_storage + monkeypatch.setattr(jit, "isconstant", lambda c: True) assert obj.getdictvalue(space, "a") == 10 assert obj.getdictvalue(space, "b") == 30 From noreply at buildbot.pypy.org Fri Jan 24 16:16:43 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 24 Jan 2014 16:16:43 +0100 (CET) Subject: [pypy-commit] pypy detect-immutable-fields: We pray for your forgiveness, o wise annotator Message-ID: <20140124151644.009C91C00F8@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: detect-immutable-fields Changeset: r68906:f7d99d77b8a2 Date: 2014-01-24 09:16 -0600 http://bitbucket.org/pypy/pypy/changeset/f7d99d77b8a2/ Log: We pray for your forgiveness, o wise annotator diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -179,7 +179,7 @@ return None def _set_mapdict_map(self, map): raise NotImplementedError - def _mapdict_read_storage(self, index, pure): + def _mapdict_read_storage(self, index): raise NotImplementedError def _mapdict_write_storage(self, index, value): raise NotImplementedError From noreply at buildbot.pypy.org Fri Jan 24 16:19:39 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Fri, 24 Jan 2014 16:19:39 +0100 (CET) Subject: [pypy-commit] stmgc c7: use only largemalloc to allocate old objects Message-ID: <20140124151939.E5AD21C00F8@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r673:31b7b6a45064 Date: 2014-01-24 15:46 +0100 http://bitbucket.org/pypy/stmgc/changeset/31b7b6a45064/ Log: use only largemalloc to allocate old objects diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -68,11 +68,9 @@ uintptr_t lock_idx = (((uintptr_t)item) >> 4) - READMARKER_START; assert(write_locks[lock_idx]); write_locks[lock_idx] = 0; - - char *src = REAL_ADDRESS(local_base, item); - char *dst = REAL_ADDRESS(remote_base, item); - size_t size = stmcb_size((struct object_s*)src); - memcpy(dst, src, size); + + _stm_move_object(REAL_ADDRESS(local_base, item), + REAL_ADDRESS(remote_base, item)); })); if (conflicted) { @@ -88,23 +86,24 @@ { uintptr_t pagenum = ((uintptr_t)obj) / 4096; assert(pagenum < NB_PAGES); + assert(!_stm_is_young(obj)); LIST_APPEND(_STM_TL->old_objects_to_trace, obj); /* for old objects from the same transaction we don't need - to privatize the page */ - if ((stm_get_page_flag(pagenum) == UNCOMMITTED_SHARED_PAGE) - || (obj->stm_flags & GCFLAG_NOT_COMMITTED)) { + to privatize the pages */ + if (obj->stm_flags & GCFLAG_NOT_COMMITTED) { obj->stm_flags &= ~GCFLAG_WRITE_BARRIER; return; } /* privatize if SHARED_PAGE */ - /* xxx stmcb_size() is probably too slow, maybe add a GCFLAG_LARGE for - objs with more than 1 page */ - int pages = stmcb_size(real_address(obj)) / 4096; - for (; pages >= 0; pages--) - stm_pages_privatize(pagenum + pages); + uintptr_t pagenum2, pages; + _stm_chunk_pages((struct object_s*)REAL_ADDRESS(get_thread_base(0), obj), + &pagenum2, &pages); + assert(pagenum == pagenum2); + for (pagenum2 += pages - 1; pagenum2 >= pagenum; pagenum2--) + stm_pages_privatize(pagenum2); /* claim the write-lock for this object */ uintptr_t lock_idx = (((uintptr_t)obj) >> 4) - READMARKER_START; @@ -226,7 +225,6 @@ _STM_TL->shadow_stack_base = _STM_TL->shadow_stack; _STM_TL->old_objects_to_trace = stm_list_create(); - _STM_TL->uncommitted_pages = stm_list_create(); _STM_TL->modified_objects = stm_list_create(); _STM_TL->uncommitted_objects = stm_list_create(); @@ -247,7 +245,6 @@ assert(stm_list_is_empty(_STM_TL->uncommitted_objects)); stm_list_free(_STM_TL->uncommitted_objects); - _STM_TL->uncommitted_objects = NULL; assert(_STM_TL->shadow_stack == _STM_TL->shadow_stack_base); free(_STM_TL->shadow_stack); @@ -255,9 +252,6 @@ assert(_STM_TL->old_objects_to_trace->count == 0); stm_list_free(_STM_TL->old_objects_to_trace); - assert(_STM_TL->uncommitted_pages->count == 0); - stm_list_free(_STM_TL->uncommitted_pages); - set_gs_register(INVALID_GS_VALUE); } @@ -341,9 +335,7 @@ _STM_TL->jmpbufptr = NULL; /* cannot abort any more */ - /* push uncommitted objects to other threads, - make completely uncommitted pages SHARED, - */ + /* push uncommitted objects to other threads */ nursery_on_commit(); /* copy modified object versions to other threads */ diff --git a/c7/core.h b/c7/core.h --- a/c7/core.h +++ b/c7/core.h @@ -49,7 +49,6 @@ typedef TLPREFIX struct object_s object_t; typedef TLPREFIX struct read_marker_s read_marker_t; typedef TLPREFIX char localchar_t; -typedef TLPREFIX struct alloc_for_size_s alloc_for_size_t; typedef void* jmpbufptr_t[5]; /* for use with __builtin_setjmp() */ /* Structure of objects @@ -80,12 +79,6 @@ uint8_t rm; }; -struct alloc_for_size_s { - localchar_t *next; - uint16_t start, stop; - bool flag_partial_page; -}; - struct _thread_local1_s { jmpbufptr_t *jmpbufptr; @@ -101,13 +94,8 @@ object_t **shadow_stack; object_t **shadow_stack_base; - struct alloc_for_size_s alloc[LARGE_OBJECT_WORDS]; struct stm_list_s *uncommitted_objects; - /* pages newly allocated in the current transaction only containing - uncommitted objects */ - struct stm_list_s *uncommitted_pages; - localchar_t *nursery_current; struct stm_list_s *old_objects_to_trace; }; diff --git a/c7/largemalloc.c b/c7/largemalloc.c --- a/c7/largemalloc.c +++ b/c7/largemalloc.c @@ -5,11 +5,13 @@ or medium-block support that are also present in the GNU C Library. */ +#include #include #include #include #include "largemalloc.h" - +#include "pages.h" +#include "pagecopy.h" #define MMAP_LIMIT (1280*1024) @@ -89,20 +91,45 @@ static dlist_t largebins[N_BINS]; static mchunk_t *first_chunk, *last_chunk; -void _stm_chunk_pages(object_t *tldata, intptr_t *start, intptr_t *num) +void _stm_chunk_pages(struct object_s *data, uintptr_t *start, uintptr_t *num) { - char *data = _stm_real_address(tldata); - mchunk_t *chunk = data2chunk(data); - *start = (((char*)chunk) - get_thread_base(0)) / 4096UL; - size_t offset_into_page = ((uintptr_t)chunk) & 4095UL; // % 4096 - *num = ((chunk->size & ~CHUNK_HEADER_SIZE) + CHUNK_HEADER_SIZE + offset_into_page + 4095) / 4096UL; + /* returns the start page and number of pages that the *payload* + spans over. the CHUNK_HEADER is not included in the calculations */ + mchunk_t *chunk = data2chunk((char*)data); + *start = (((char*)data) - get_thread_base(0)) / 4096UL; + size_t offset_into_page = ((uintptr_t)data) & 4095UL; // % 4096 + *num = ((chunk->size & ~FLAG_SORTED) + offset_into_page + 4095) / 4096UL; } -size_t _stm_data_size(object_t *tldata) +size_t _stm_data_size(struct object_s *data) { - char *data = _stm_real_address(tldata); - mchunk_t *chunk = data2chunk(data); - return chunk->size & ~CHUNK_HEADER_SIZE; + mchunk_t *chunk = data2chunk((char*)data); + return chunk->size & ~FLAG_SORTED; +} + +void _stm_move_object(char *src, char *dst) +{ + /* only copies if page is PRIVATE + XXX: various optimizations for objects with + multiple pages. E.g. using pagecopy or + memcpy over multiple PRIVATE pages. */ + char *end = src + _stm_data_size((struct object_s*)src); + uintptr_t pagenum, num; + struct object_s *t0_obj = (struct object_s*)REAL_ADDRESS(get_thread_base(0), _stm_tl_address(src)); + _stm_chunk_pages(t0_obj, &pagenum, &num); + + while (src < end) { + size_t to_copy = 4096UL - ((uintptr_t)src & 4095UL); + if (to_copy > end - src) + to_copy = end - src; + if (stm_get_page_flag(pagenum) == PRIVATE_PAGE) { + memcpy(dst, src, to_copy); + } + + pagenum++; + src += to_copy; + dst += to_copy; + } } static void insert_unsorted(mchunk_t *new) diff --git a/c7/largemalloc.h b/c7/largemalloc.h --- a/c7/largemalloc.h +++ b/c7/largemalloc.h @@ -8,6 +8,10 @@ void stm_large_free(object_t *data); void _stm_large_dump(void); -void _stm_chunk_pages(object_t *tldata, intptr_t *start, intptr_t *num); -size_t _stm_data_size(object_t *tldata); char *_stm_largemalloc_data_start(void); + +void _stm_move_object(char *src, char *dst); +size_t _stm_data_size(struct object_s *data); +void _stm_chunk_pages(struct object_s *data, uintptr_t *start, uintptr_t *num); + + diff --git a/c7/nursery.c b/c7/nursery.c --- a/c7/nursery.c +++ b/c7/nursery.c @@ -15,6 +15,7 @@ #include "nursery.h" #include "pages.h" #include "stmsync.h" +#include "largemalloc.h" void stm_major_collection(void) { @@ -30,18 +31,9 @@ } -void mark_page_as_uncommitted(uintptr_t pagenum) -{ - stm_set_page_flag(pagenum, UNCOMMITTED_SHARED_PAGE); - LIST_APPEND(_STM_TL->uncommitted_pages, (object_t*)pagenum); -} - object_t *_stm_allocate_old(size_t size) { - int pages = (size + 4095) / 4096; - localchar_t* addr = (localchar_t*)(stm_pages_reserve(pages) * 4096); - - object_t* o = (object_t*)addr; + object_t* o = stm_large_malloc(size); o->stm_flags |= GCFLAG_WRITE_BARRIER; return o; } @@ -51,84 +43,6 @@ return _stm_allocate_old(size); /* XXX */ } -localchar_t *_stm_alloc_next_page(size_t size_class) -{ - /* may return uninitialized pages */ - - /* 'alloc->next' points to where the next allocation should go. The - present function is called instead when this next allocation is - equal to 'alloc->stop'. As we know that 'start', 'next' and - 'stop' are always nearby pointers, we play tricks and only store - the lower 16 bits of 'start' and 'stop', so that the three - variables plus some flags fit in 16 bytes. - */ - uintptr_t page; - localchar_t *result; - alloc_for_size_t *alloc = &_STM_TL->alloc[size_class]; - size_t size = size_class * 8; - - /* reserve a fresh new page */ - page = stm_pages_reserve(1); - - /* mark as UNCOMMITTED_... */ - mark_page_as_uncommitted(page); - - result = (localchar_t *)(page * 4096UL); - alloc->start = (uintptr_t)result; - alloc->stop = alloc->start + (4096 / size) * size; - alloc->next = result + size; - alloc->flag_partial_page = false; - return result; -} - - - - -object_t *_stm_alloc_old(size_t size) -{ - /* may return uninitialized objects. except for the - GCFLAG_NOT_COMMITTED, it is set exactly if - we allocated the object in a SHARED and partially - committed page. (XXX: add the flag in some other place) - */ - object_t *result; - size_t size_class = size / 8; - assert(size_class >= 2); - - if (size_class >= LARGE_OBJECT_WORDS) { - result = _stm_allocate_old(size); - result->stm_flags &= ~GCFLAG_NOT_COMMITTED; /* page may be non-zeroed */ - - int page = ((uintptr_t)result) / 4096; - int pages = (size + 4095) / 4096; - int i; - for (i = 0; i < pages; i++) { - mark_page_as_uncommitted(page + i); - } - /* make sure the flag is not set (page is not zeroed!) */ - result->stm_flags &= ~GCFLAG_NOT_COMMITTED; - } else { - alloc_for_size_t *alloc = &_STM_TL->alloc[size_class]; - - if ((uint16_t)((uintptr_t)alloc->next) == alloc->stop) { - result = (object_t *)_stm_alloc_next_page(size_class); - } else { - result = (object_t *)alloc->next; - alloc->next += size; - if (alloc->flag_partial_page) { - LIST_APPEND(_STM_TL->uncommitted_objects, result); - result->stm_flags |= GCFLAG_NOT_COMMITTED; - } else { - /* make sure the flag is not set (page is not zeroed!) */ - result->stm_flags &= ~GCFLAG_NOT_COMMITTED; - } - } - } - return result; -} - - - void trace_if_young(object_t **pobj) { @@ -147,16 +61,16 @@ /* move obj to somewhere else */ size_t size = stmcb_size(real_address(*pobj)); - object_t *moved = (object_t*)_stm_alloc_old(size); + object_t *moved = stm_large_malloc(size); - if (moved->stm_flags & GCFLAG_NOT_COMMITTED) - (*pobj)->stm_flags |= GCFLAG_NOT_COMMITTED; /* XXX: memcpy below overwrites this otherwise. - find better solution.*/ - memcpy((void*)real_address(moved), (void*)real_address(*pobj), size); + /* object is not committed yet */ + moved->stm_flags |= GCFLAG_NOT_COMMITTED; + LIST_APPEND(_STM_TL->uncommitted_objects, moved); + (*pobj)->stm_flags |= GCFLAG_MOVED; *pforwarded = moved; *pobj = moved; @@ -251,22 +165,15 @@ /* remove the flag (they are now committed) */ item->stm_flags &= ~GCFLAG_NOT_COMMITTED; - - uintptr_t pagenum = ((uintptr_t)item) / 4096UL; - if (stm_get_page_flag(pagenum) == PRIVATE_PAGE) { - /* page was privatized... */ - char *src = REAL_ADDRESS(local_base, item); - char *dst = REAL_ADDRESS(remote_base, item); - size_t size = stmcb_size((struct object_s*)src); - memcpy(dst, src, size); - } + + _stm_move_object(REAL_ADDRESS(local_base, item), + REAL_ADDRESS(remote_base, item)); })); } void nursery_on_start() { assert(stm_list_is_empty(_STM_TL->old_objects_to_trace)); - stm_list_clear(_STM_TL->uncommitted_pages); _STM_TL->old_shadow_stack = _STM_TL->shadow_stack; } @@ -277,37 +184,9 @@ the caller (optimization) */ /* minor_collect(); */ - /* uncommitted objects / partially COMMITTED pages */ + /* uncommitted objects */ push_uncommitted_to_other_threads(); stm_list_clear(_STM_TL->uncommitted_objects); - - /* uncommitted_pages */ - long j; - for (j = 2; j < LARGE_OBJECT_WORDS; j++) { - alloc_for_size_t *alloc = &_STM_TL->alloc[j]; - uint16_t start = alloc->start; - uint16_t cur = (uintptr_t)alloc->next; - - if (start == cur) - continue; /* page full -> will be replaced automatically */ - - alloc->start = cur; /* next transaction has different 'start' to - reset in case of an abort */ - - uintptr_t pagenum = ((uintptr_t)(alloc->next - 1)) / 4096UL; - if (stm_get_page_flag(pagenum) == UNCOMMITTED_SHARED_PAGE) { - /* becomes a SHARED (done below) partially used page */ - alloc->flag_partial_page = 1; - } - } - - STM_LIST_FOREACH( - _STM_TL->uncommitted_pages, - ({ - uintptr_t pagenum = (uintptr_t)item; - stm_set_page_flag(pagenum, SHARED_PAGE); - })); - stm_list_clear(_STM_TL->uncommitted_pages); } void nursery_on_abort() @@ -324,36 +203,16 @@ _STM_TL->nursery_current = nursery_base; - /* forget about GCFLAG_NOT_COMMITTED objects by - resetting alloc-pages */ - long j; - for (j = 2; j < LARGE_OBJECT_WORDS; j++) { - alloc_for_size_t *alloc = &_STM_TL->alloc[j]; - uint16_t num_allocated = ((uintptr_t)alloc->next) - alloc->start; - uintptr_t next = (uintptr_t)alloc->next; - - if (num_allocated) { - /* forget about all non-committed objects */ - alloc->next -= num_allocated; - - uintptr_t pagenum = ((uintptr_t)(next - 1)) / 4096UL; - if (stm_get_page_flag(pagenum) == UNCOMMITTED_SHARED_PAGE) { - /* the page will be freed below, we need a new one for the - next allocation */ - alloc->next = 0; - alloc->stop = 0; - alloc->start = 0; - } - } - } + /* free uncommitted objects */ + struct stm_list_s *uncommitted = _STM_TL->uncommitted_objects; - /* unreserve uncommitted_pages and mark them as SHARED again - IFF they are not in alloc[] */ - STM_LIST_FOREACH(_STM_TL->uncommitted_pages, ({ - stm_pages_unreserve((uintptr_t)item); - })); - stm_list_clear(_STM_TL->uncommitted_pages); - + STM_LIST_FOREACH( + uncommitted, + ({ + stm_large_free(item); + })); + + stm_list_clear(uncommitted); } diff --git a/c7/pages.h b/c7/pages.h --- a/c7/pages.h +++ b/c7/pages.h @@ -7,11 +7,6 @@ /* page private for each thread */ PRIVATE_PAGE, - - /* set for SHARED pages that only contain objects belonging - to the current transaction, so the whole page is not - visible yet for other threads */ - UNCOMMITTED_SHARED_PAGE, }; /* flag_page_private */ diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -86,7 +86,6 @@ SHARED_PAGE=0, REMAPPING_PAGE, PRIVATE_PAGE, - UNCOMMITTED_SHARED_PAGE, }; /* flag_page_private */ enum { @@ -103,10 +102,12 @@ void stm_large_free(object_t *data); void _stm_large_dump(void); -void _stm_chunk_pages(object_t *tldata, intptr_t *start, intptr_t *num); -size_t _stm_data_size(object_t *tldata); char *_stm_largemalloc_data_start(void); +void _stm_move_object(char *src, char *dst); +size_t _stm_data_size(struct object_s *data); +void _stm_chunk_pages(struct object_s *data, uintptr_t *start, uintptr_t *num); + """) lib = ffi.verify(''' diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -353,8 +353,8 @@ new = stm_pop_root() assert len(stm_get_obj_pages(new)) == 2 - assert ([stm_get_page_flag(p) for p in stm_get_obj_pages(new)] - == [lib.UNCOMMITTED_SHARED_PAGE]*2) + # assert ([stm_get_page_flag(p) for p in stm_get_obj_pages(new)] + # == [lib.UNCOMMITTED_SHARED_PAGE]*2) assert not is_in_nursery(new) @@ -397,8 +397,8 @@ stm_push_root(new) stm_minor_collect() new = stm_pop_root() - assert stm_get_page_flag(stm_get_obj_pages(new)[0]) == lib.UNCOMMITTED_SHARED_PAGE - assert not (stm_get_flags(new) & lib.GCFLAG_NOT_COMMITTED) + # assert stm_get_page_flag(stm_get_obj_pages(new)[0]) == lib.UNCOMMITTED_SHARED_PAGE + # assert not (stm_get_flags(new) & lib.GCFLAG_NOT_COMMITTED) stm_stop_transaction() assert stm_get_page_flag(stm_get_obj_pages(new)[0]) == lib.SHARED_PAGE @@ -446,7 +446,7 @@ stm_push_root(new) stm_minor_collect() new = stm_pop_root() - assert stm_get_page_flag(stm_get_obj_pages(new)[0]) == lib.UNCOMMITTED_SHARED_PAGE + # assert stm_get_page_flag(stm_get_obj_pages(new)[0]) == lib.UNCOMMITTED_SHARED_PAGE stm_abort_transaction() stm_start_transaction() From noreply at buildbot.pypy.org Fri Jan 24 16:19:41 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Fri, 24 Jan 2014 16:19:41 +0100 (CET) Subject: [pypy-commit] stmgc c7: fixes, all tests seem to pass Message-ID: <20140124151941.1D4011C00F8@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r674:50820e18ee9b Date: 2014-01-24 16:19 +0100 http://bitbucket.org/pypy/stmgc/changeset/50820e18ee9b/ Log: fixes, all tests seem to pass diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -69,7 +69,8 @@ assert(write_locks[lock_idx]); write_locks[lock_idx] = 0; - _stm_move_object(REAL_ADDRESS(local_base, item), + _stm_move_object(item, + REAL_ADDRESS(local_base, item), REAL_ADDRESS(remote_base, item)); })); @@ -102,6 +103,7 @@ _stm_chunk_pages((struct object_s*)REAL_ADDRESS(get_thread_base(0), obj), &pagenum2, &pages); assert(pagenum == pagenum2); + assert(pages == (stmcb_size(real_address(obj)) +4095) / 4096); for (pagenum2 += pages - 1; pagenum2 >= pagenum; pagenum2--) stm_pages_privatize(pagenum2); diff --git a/c7/largemalloc.c b/c7/largemalloc.c --- a/c7/largemalloc.c +++ b/c7/largemalloc.c @@ -90,9 +90,11 @@ static dlist_t largebins[N_BINS]; static mchunk_t *first_chunk, *last_chunk; +uint8_t alloc_lock = 0; void _stm_chunk_pages(struct object_s *data, uintptr_t *start, uintptr_t *num) { + /* expects object_s in thread0-space */ /* returns the start page and number of pages that the *payload* spans over. the CHUNK_HEADER is not included in the calculations */ mchunk_t *chunk = data2chunk((char*)data); @@ -107,13 +109,15 @@ return chunk->size & ~FLAG_SORTED; } -void _stm_move_object(char *src, char *dst) +void _stm_move_object(object_t* obj, char *src, char *dst) { + /* XXX: should be thread-safe... */ + /* only copies if page is PRIVATE XXX: various optimizations for objects with multiple pages. E.g. using pagecopy or memcpy over multiple PRIVATE pages. */ - char *end = src + _stm_data_size((struct object_s*)src); + char *end = src + _stm_data_size((struct object_s*)REAL_ADDRESS(get_thread_base(0), obj)); uintptr_t pagenum, num; struct object_s *t0_obj = (struct object_s*)REAL_ADDRESS(get_thread_base(0), _stm_tl_address(src)); _stm_chunk_pages(t0_obj, &pagenum, &num); @@ -218,6 +222,9 @@ object_t *stm_large_malloc(size_t request_size) { + while (__sync_lock_test_and_set(&alloc_lock, 1)) + spin_loop(); + /* 'request_size' should already be a multiple of the word size here */ assert((request_size & (sizeof(char *)-1)) == 0); @@ -254,6 +261,8 @@ } /* not enough memory. */ + alloc_lock = 0; + abort(); return NULL; found: @@ -283,11 +292,16 @@ } mscan->size = request_size; mscan->prev_size = BOTH_CHUNKS_USED; + + alloc_lock = 0; return (object_t *)(((char *)&mscan->d) - get_thread_base(0)); } void stm_large_free(object_t *tldata) { + while (__sync_lock_test_and_set(&alloc_lock, 1)) + spin_loop(); + char *data = _stm_real_address(tldata); mchunk_t *chunk = data2chunk(data); assert((chunk->size & (sizeof(char *) - 1)) == 0); @@ -346,6 +360,8 @@ } insert_unsorted(chunk); + + alloc_lock = 0; } @@ -411,6 +427,8 @@ int stm_largemalloc_resize_arena(size_t new_size) { + /* XXX not thread-safe regarding all functions here... */ + assert(new_size >= 2 * sizeof(struct malloc_chunk)); assert((new_size & 31) == 0); diff --git a/c7/largemalloc.h b/c7/largemalloc.h --- a/c7/largemalloc.h +++ b/c7/largemalloc.h @@ -10,7 +10,7 @@ void _stm_large_dump(void); char *_stm_largemalloc_data_start(void); -void _stm_move_object(char *src, char *dst); +void _stm_move_object(object_t *obj, char *src, char *dst); size_t _stm_data_size(struct object_s *data); void _stm_chunk_pages(struct object_s *data, uintptr_t *start, uintptr_t *num); diff --git a/c7/nursery.c b/c7/nursery.c --- a/c7/nursery.c +++ b/c7/nursery.c @@ -34,6 +34,7 @@ object_t *_stm_allocate_old(size_t size) { object_t* o = stm_large_malloc(size); + memset(real_address(o), 0, size); o->stm_flags |= GCFLAG_WRITE_BARRIER; return o; } @@ -166,8 +167,9 @@ /* remove the flag (they are now committed) */ item->stm_flags &= ~GCFLAG_NOT_COMMITTED; - _stm_move_object(REAL_ADDRESS(local_base, item), - REAL_ADDRESS(remote_base, item)); + _stm_move_object(item, + REAL_ADDRESS(local_base, item), + REAL_ADDRESS(remote_base, item)); })); } diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -104,7 +104,7 @@ void _stm_large_dump(void); char *_stm_largemalloc_data_start(void); -void _stm_move_object(char *src, char *dst); +void _stm_move_object(object_t* obj, char *src, char *dst); size_t _stm_data_size(struct object_s *data); void _stm_chunk_pages(struct object_s *data, uintptr_t *start, uintptr_t *num); diff --git a/duhton/Makefile b/duhton/Makefile --- a/duhton/Makefile +++ b/duhton/Makefile @@ -5,6 +5,7 @@ ../c7/pages.c \ ../c7/nursery.c \ ../c7/stmsync.c \ + ../c7/largemalloc.c \ ../c7/reader_writer_lock.c C7HEADERS = ../c7/*.h From noreply at buildbot.pypy.org Fri Jan 24 17:27:57 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 24 Jan 2014 17:27:57 +0100 (CET) Subject: [pypy-commit] pypy detect-immutable-fields: This no longer appears Message-ID: <20140124162757.C845A1C1190@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: detect-immutable-fields Changeset: r68907:65fb858e4798 Date: 2014-01-24 10:27 -0600 http://bitbucket.org/pypy/pypy/changeset/65fb858e4798/ Log: This no longer appears diff --git a/pypy/module/pypyjit/test_pypy_c/test_thread.py b/pypy/module/pypyjit/test_pypy_c/test_thread.py --- a/pypy/module/pypyjit/test_pypy_c/test_thread.py +++ b/pypy/module/pypyjit/test_pypy_c/test_thread.py @@ -42,7 +42,6 @@ assert loop.match(""" i53 = int_lt(i48, i27) guard_true(i53, descr=...) - guard_not_invalidated(descr=...) i54 = int_add_ovf(i48, i47) guard_no_overflow(descr=...) --TICK-- From noreply at buildbot.pypy.org Fri Jan 24 17:48:45 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 24 Jan 2014 17:48:45 +0100 (CET) Subject: [pypy-commit] pypy detect-immutable-fields: Close branch before merge Message-ID: <20140124164845.3FAD81C2FFE@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: detect-immutable-fields Changeset: r68908:ed156187a38e Date: 2014-01-24 10:47 -0600 http://bitbucket.org/pypy/pypy/changeset/ed156187a38e/ Log: Close branch before merge From noreply at buildbot.pypy.org Fri Jan 24 17:48:46 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 24 Jan 2014 17:48:46 +0100 (CET) Subject: [pypy-commit] pypy default: Merged detect-immutable-fields Message-ID: <20140124164846.724971C2FFE@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r68909:9d3715134dcf Date: 2014-01-24 10:48 -0600 http://bitbucket.org/pypy/pypy/changeset/9d3715134dcf/ Log: Merged detect-immutable-fields diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -3,7 +3,7 @@ from rpython.rlib.objectmodel import we_are_translated from pypy.objspace.std.listobject import W_ListObject from pypy.objspace.std.typeobject import MethodCache -from pypy.objspace.std.mapdict import IndexCache +from pypy.objspace.std.mapdict import MapAttrCache from rpython.rlib import rposix, rgc @@ -35,7 +35,7 @@ cache.misses = {} cache.hits = {} if space.config.objspace.std.withmapdict: - cache = space.fromcache(IndexCache) + cache = space.fromcache(MapAttrCache) cache.misses = {} cache.hits = {} @@ -45,7 +45,7 @@ in the mapdict cache with the given attribute name.""" assert space.config.objspace.std.withmethodcachecounter assert space.config.objspace.std.withmapdict - cache = space.fromcache(IndexCache) + cache = space.fromcache(MapAttrCache) return space.newtuple([space.newint(cache.hits.get(name, 0)), space.newint(cache.misses.get(name, 0))]) diff --git a/pypy/module/gc/interp_gc.py b/pypy/module/gc/interp_gc.py --- a/pypy/module/gc/interp_gc.py +++ b/pypy/module/gc/interp_gc.py @@ -12,8 +12,8 @@ cache = space.fromcache(MethodCache) cache.clear() if space.config.objspace.std.withmapdict: - from pypy.objspace.std.mapdict import IndexCache - cache = space.fromcache(IndexCache) + from pypy.objspace.std.mapdict import MapAttrCache + cache = space.fromcache(MapAttrCache) cache.clear() rgc.collect() return space.wrap(0) diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py @@ -35,7 +35,7 @@ class A(object): pass a = A() - a.x = 2 + a.x = 1 def main(n): i = 0 while i < n: @@ -49,8 +49,7 @@ i9 = int_lt(i5, i6) guard_true(i9, descr=...) guard_not_invalidated(descr=...) - i10 = int_add_ovf(i5, i7) - guard_no_overflow(descr=...) + i10 = int_add(i5, 1) --TICK-- jump(..., descr=...) """) diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -1,15 +1,16 @@ import weakref -from rpython.rlib import jit, objectmodel, debug + +from rpython.rlib import jit, objectmodel, debug, rerased from rpython.rlib.rarithmetic import intmask, r_uint -from rpython.rlib import rerased from pypy.interpreter.baseobjspace import W_Root -from pypy.objspace.std.dictmultiobject import W_DictMultiObject, DictStrategy, ObjectDictStrategy -from pypy.objspace.std.dictmultiobject import BaseKeyIterator, BaseValueIterator, BaseItemIterator -from pypy.objspace.std.dictmultiobject import _never_equal_to_string -from pypy.objspace.std.objectobject import W_ObjectObject +from pypy.objspace.std.dictmultiobject import ( + W_DictMultiObject, DictStrategy, ObjectDictStrategy, BaseKeyIterator, + BaseValueIterator, BaseItemIterator, _never_equal_to_string +) from pypy.objspace.std.typeobject import TypeCell + # ____________________________________________________________ # attribute shapes @@ -19,7 +20,7 @@ # we want to propagate knowledge that the result cannot be negative class AbstractAttribute(object): - _immutable_fields_ = ['terminator'] + _immutable_fields_ = ['terminator', 'ever_mutated?'] cache_attrs = None _size_estimate = 0 @@ -27,46 +28,60 @@ self.space = space assert isinstance(terminator, Terminator) self.terminator = terminator + self.ever_mutated = False def read(self, obj, selector): - index = self.index(selector) - if index < 0: + attr = self.find_map_attr(selector) + if attr is None: return self.terminator._read_terminator(obj, selector) - return obj._mapdict_read_storage(index) + if ( + jit.isconstant(attr.storageindex) and + jit.isconstant(obj) and + not attr.ever_mutated + ): + return self._pure_mapdict_read_storage(obj, attr.storageindex) + else: + return obj._mapdict_read_storage(attr.storageindex) + + @jit.elidable + def _pure_mapdict_read_storage(self, obj, storageindex): + return obj._mapdict_read_storage(storageindex) def write(self, obj, selector, w_value): - index = self.index(selector) - if index < 0: + attr = self.find_map_attr(selector) + if attr is None: return self.terminator._write_terminator(obj, selector, w_value) - obj._mapdict_write_storage(index, w_value) + if not attr.ever_mutated: + attr.ever_mutated = True + obj._mapdict_write_storage(attr.storageindex, w_value) return True def delete(self, obj, selector): return None - def index(self, selector): + def find_map_attr(self, selector): if jit.we_are_jitted(): # hack for the jit: - # the _index method is pure too, but its argument is never + # the _find_map_attr method is pure too, but its argument is never # constant, because it is always a new tuple - return self._index_jit_pure(selector[0], selector[1]) + return self._find_map_attr_jit_pure(selector[0], selector[1]) else: - return self._index_indirection(selector) + return self._find_map_attr_indirection(selector) @jit.elidable - def _index_jit_pure(self, name, index): - return self._index_indirection((name, index)) + def _find_map_attr_jit_pure(self, name, index): + return self._find_map_attr_indirection((name, index)) @jit.dont_look_inside - def _index_indirection(self, selector): + def _find_map_attr_indirection(self, selector): if (self.space.config.objspace.std.withmethodcache): - return self._index_cache(selector) - return self._index(selector) + return self._find_map_attr_cache(selector) + return self._find_map_attr(selector) @jit.dont_look_inside - def _index_cache(self, selector): + def _find_map_attr_cache(self, selector): space = self.space - cache = space.fromcache(IndexCache) + cache = space.fromcache(MapAttrCache) SHIFT2 = r_uint.BITS - space.config.objspace.std.methodcachesizeexp SHIFT1 = SHIFT2 - 5 attrs_as_int = objectmodel.current_object_addr_as_int(self) @@ -74,32 +89,32 @@ # _pure_lookup_where_with_method_cache() hash_selector = objectmodel.compute_hash(selector) product = intmask(attrs_as_int * hash_selector) - index_hash = (r_uint(product) ^ (r_uint(product) << SHIFT1)) >> SHIFT2 + attr_hash = (r_uint(product) ^ (r_uint(product) << SHIFT1)) >> SHIFT2 # ^^^Note2: same comment too - cached_attr = cache.attrs[index_hash] + cached_attr = cache.attrs[attr_hash] if cached_attr is self: - cached_selector = cache.selectors[index_hash] + cached_selector = cache.selectors[attr_hash] if cached_selector == selector: - index = cache.indices[index_hash] + attr = cache.cached_attrs[attr_hash] if space.config.objspace.std.withmethodcachecounter: name = selector[0] cache.hits[name] = cache.hits.get(name, 0) + 1 - return index - index = self._index(selector) - cache.attrs[index_hash] = self - cache.selectors[index_hash] = selector - cache.indices[index_hash] = index + return attr + attr = self._find_map_attr(selector) + cache.attrs[attr_hash] = self + cache.selectors[attr_hash] = selector + cache.cached_attrs[attr_hash] = attr if space.config.objspace.std.withmethodcachecounter: name = selector[0] cache.misses[name] = cache.misses.get(name, 0) + 1 - return index + return attr - def _index(self, selector): + def _find_map_attr(self, selector): while isinstance(self, PlainAttribute): if selector == self.selector: - return self.position + return self self = self.back - return -1 + return None def copy(self, obj): raise NotImplementedError("abstract base class") @@ -155,7 +170,7 @@ # the order is important here: first change the map, then the storage, # for the benefit of the special subclasses obj._set_mapdict_map(attr) - obj._mapdict_write_storage(attr.position, w_value) + obj._mapdict_write_storage(attr.storageindex, w_value) def materialize_r_dict(self, space, obj, dict_w): raise NotImplementedError("abstract base class") @@ -261,11 +276,11 @@ return Terminator.set_terminator(self, obj, terminator) class PlainAttribute(AbstractAttribute): - _immutable_fields_ = ['selector', 'position', 'back'] + _immutable_fields_ = ['selector', 'storageindex', 'back'] def __init__(self, selector, back): AbstractAttribute.__init__(self, back.space, back.terminator) self.selector = selector - self.position = back.length() + self.storageindex = back.length() self.back = back self._size_estimate = self.length() * NUM_DIGITS_POW2 @@ -288,7 +303,7 @@ return new_obj def length(self): - return self.position + 1 + return self.storageindex + 1 def set_terminator(self, obj, terminator): new_obj = self.back.set_terminator(obj, terminator) @@ -304,7 +319,7 @@ new_obj = self.back.materialize_r_dict(space, obj, dict_w) if self.selector[1] == DICT: w_attr = space.wrap(self.selector[0]) - dict_w[w_attr] = obj._mapdict_read_storage(self.position) + dict_w[w_attr] = obj._mapdict_read_storage(self.storageindex) else: self._copy_attr(obj, new_obj) return new_obj @@ -316,21 +331,21 @@ return new_obj def __repr__(self): - return "" % (self.selector, self.position, self.back) + return "" % (self.selector, self.storageindex, self.back) def _become(w_obj, new_obj): # this is like the _become method, really, but we cannot use that due to # RPython reasons w_obj._set_mapdict_storage_and_map(new_obj.storage, new_obj.map) -class IndexCache(object): +class MapAttrCache(object): def __init__(self, space): assert space.config.objspace.std.withmethodcache SIZE = 1 << space.config.objspace.std.methodcachesizeexp self.attrs = [None] * SIZE self._empty_selector = (None, INVALID) self.selectors = [self._empty_selector] * SIZE - self.indices = [0] * SIZE + self.cached_attrs = [None] * SIZE if space.config.objspace.std.withmethodcachecounter: self.hits = {} self.misses = {} @@ -340,6 +355,8 @@ self.attrs[i] = None for i in range(len(self.selectors)): self.selectors[i] = self._empty_selector + for i in range(len(self.cached_attrs)): + self.cached_attrs[i] = None # ____________________________________________________________ # object implementation @@ -416,16 +433,16 @@ self.typedef is W_InstanceObject.typedef) self._init_empty(w_subtype.terminator) - def getslotvalue(self, index): - key = ("slot", SLOTS_STARTING_FROM + index) + def getslotvalue(self, slotindex): + key = ("slot", SLOTS_STARTING_FROM + slotindex) return self._get_mapdict_map().read(self, key) - def setslotvalue(self, index, w_value): - key = ("slot", SLOTS_STARTING_FROM + index) + def setslotvalue(self, slotindex, w_value): + key = ("slot", SLOTS_STARTING_FROM + slotindex) self._get_mapdict_map().write(self, key, w_value) - def delslotvalue(self, index): - key = ("slot", SLOTS_STARTING_FROM + index) + def delslotvalue(self, slotindex): + key = ("slot", SLOTS_STARTING_FROM + slotindex) new_obj = self._get_mapdict_map().delete(self, key) if new_obj is None: return False @@ -460,11 +477,13 @@ self.map = map self.storage = make_sure_not_resized([None] * map.size_estimate()) - def _mapdict_read_storage(self, index): - assert index >= 0 - return self.storage[index] - def _mapdict_write_storage(self, index, value): - self.storage[index] = value + def _mapdict_read_storage(self, storageindex): + assert storageindex >= 0 + return self.storage[storageindex] + + def _mapdict_write_storage(self, storageindex, value): + self.storage[storageindex] = value + def _mapdict_storage_length(self): return len(self.storage) def _set_mapdict_storage_and_map(self, storage, map): @@ -519,7 +538,6 @@ rangenmin1 = unroll.unrolling_iterable(range(nmin1)) class subcls(BaseMapdictObject, supercls): def _init_empty(self, map): - from rpython.rlib.debug import make_sure_not_resized for i in rangen: setattr(self, "_value%s" % i, erase_item(None)) self.map = map @@ -531,26 +549,26 @@ erased = getattr(self, "_value%s" % nmin1) return unerase_list(erased) - def _mapdict_read_storage(self, index): - assert index >= 0 - if index < nmin1: + def _mapdict_read_storage(self, storageindex): + assert storageindex >= 0 + if storageindex < nmin1: for i in rangenmin1: - if index == i: + if storageindex == i: erased = getattr(self, "_value%s" % i) return unerase_item(erased) if self._has_storage_list(): - return self._mapdict_get_storage_list()[index - nmin1] + return self._mapdict_get_storage_list()[storageindex - nmin1] erased = getattr(self, "_value%s" % nmin1) return unerase_item(erased) - def _mapdict_write_storage(self, index, value): + def _mapdict_write_storage(self, storageindex, value): erased = erase_item(value) for i in rangenmin1: - if index == i: + if storageindex == i: setattr(self, "_value%s" % i, erased) return if self._has_storage_list(): - self._mapdict_get_storage_list()[index - nmin1] = value + self._mapdict_get_storage_list()[storageindex - nmin1] = value return setattr(self, "_value%s" % nmin1, erased) @@ -785,7 +803,7 @@ class CacheEntry(object): version_tag = None - index = 0 + storageindex = 0 w_method = None # for callmethod success_counter = 0 failure_counter = 0 @@ -818,14 +836,14 @@ pycode._mapdict_caches = [INVALID_CACHE_ENTRY] * num_entries @jit.dont_look_inside -def _fill_cache(pycode, nameindex, map, version_tag, index, w_method=None): +def _fill_cache(pycode, nameindex, map, version_tag, storageindex, w_method=None): entry = pycode._mapdict_caches[nameindex] if entry is INVALID_CACHE_ENTRY: entry = CacheEntry() pycode._mapdict_caches[nameindex] = entry entry.map_wref = weakref.ref(map) entry.version_tag = version_tag - entry.index = index + entry.storageindex = storageindex entry.w_method = w_method if pycode.space.config.objspace.std.withmethodcachecounter: entry.failure_counter += 1 @@ -837,7 +855,7 @@ map = w_obj._get_mapdict_map() if entry.is_valid_for_map(map) and entry.w_method is None: # everything matches, it's incredibly fast - return w_obj._mapdict_read_storage(entry.index) + return w_obj._mapdict_read_storage(entry.storageindex) return LOAD_ATTR_slowpath(pycode, w_obj, nameindex, map) LOAD_ATTR_caching._always_inline_ = True @@ -871,19 +889,19 @@ selector = ("slot", SLOTS_STARTING_FROM + w_descr.index) else: # There is a non-data descriptor in the class. If there is - # also a dict attribute, use the latter, caching its position. + # also a dict attribute, use the latter, caching its storageindex. # If not, we loose. We could do better in this case too, # but we don't care too much; the common case of a method # invocation is handled by LOOKUP_METHOD_xxx below. selector = (name, DICT) # if selector[1] != INVALID: - index = map.index(selector) - if index >= 0: + attr = map.find_map_attr(selector) + if attr is not None: # Note that if map.terminator is a DevolvedDictTerminator, - # map.index() will always return -1 if selector[1]==DICT. - _fill_cache(pycode, nameindex, map, version_tag, index) - return w_obj._mapdict_read_storage(index) + # map.find_map_attr will always return None if selector[1]==DICT. + _fill_cache(pycode, nameindex, map, version_tag, attr.storageindex) + return w_obj._mapdict_read_storage(attr.storageindex) if space.config.objspace.std.withmethodcachecounter: INVALID_CACHE_ENTRY.failure_counter += 1 return space.getattr(w_obj, w_name) diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -64,7 +64,7 @@ current = Terminator(space, "cls") for i in range(20000): current = PlainAttribute((str(i), DICT), current) - assert current.index(("0", DICT)) == 0 + assert current.find_map_attr(("0", DICT)).storageindex == 0 def test_search(): @@ -107,6 +107,45 @@ assert obj2.getdictvalue(space, "b") == 60 assert obj2.map is obj.map +def test_attr_immutability(monkeypatch): + cls = Class() + obj = cls.instantiate() + obj.setdictvalue(space, "a", 10) + obj.setdictvalue(space, "b", 20) + obj.setdictvalue(space, "b", 30) + assert obj.storage == [10, 30] + assert obj.map.ever_mutated == True + assert obj.map.back.ever_mutated == False + + indices = [] + + def _pure_mapdict_read_storage(obj, storageindex): + assert storageindex == 0 + indices.append(storageindex) + return obj._mapdict_read_storage(storageindex) + + obj.map._pure_mapdict_read_storage = _pure_mapdict_read_storage + monkeypatch.setattr(jit, "isconstant", lambda c: True) + + assert obj.getdictvalue(space, "a") == 10 + assert obj.getdictvalue(space, "b") == 30 + assert obj.getdictvalue(space, "a") == 10 + assert indices == [0, 0] + + obj2 = cls.instantiate() + obj2.setdictvalue(space, "a", 15) + obj2.setdictvalue(space, "b", 25) + assert obj2.map is obj.map + assert obj2.map.ever_mutated == True + assert obj2.map.back.ever_mutated == False + + # mutating obj2 changes the map + obj2.setdictvalue(space, "a", 50) + assert obj2.map.back.ever_mutated == True + assert obj2.map is obj.map + + + def test_delete(): for i, dattr in enumerate(["a", "b", "c"]): c = Class() @@ -231,7 +270,6 @@ obj = cls.instantiate() a = 0 b = 1 - c = 2 obj.setslotvalue(a, 50) obj.setslotvalue(b, 60) assert obj.getslotvalue(a) == 50 @@ -648,7 +686,7 @@ def test_delete_slot(self): class A(object): __slots__ = ['x'] - + a = A() a.x = 42 del a.x From noreply at buildbot.pypy.org Fri Jan 24 17:51:03 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 24 Jan 2014 17:51:03 +0100 (CET) Subject: [pypy-commit] pypy default: Document this branch Message-ID: <20140124165103.20EF11C2FFE@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r68910:73e7be19afcd Date: 2014-01-24 10:50 -0600 http://bitbucket.org/pypy/pypy/changeset/73e7be19afcd/ Log: Document this branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -52,3 +52,8 @@ .. branch: annotator Remove FlowObjSpace. Improve cohesion between rpython.flowspace and rpython.annotator. + +.. branch: detect-immutable-fields +mapdicts keep track of whether or not an attribute is every assigned to +multiple times. If it's only assigned once then an elidable lookup is used when +possible. From noreply at buildbot.pypy.org Fri Jan 24 20:57:50 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 24 Jan 2014 20:57:50 +0100 (CET) Subject: [pypy-commit] pypy default: try to avoid at least some cases of interp2app identifer name clashes Message-ID: <20140124195750.6B9C81C0166@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r68911:2f88f3eea121 Date: 2014-01-24 11:53 -0800 http://bitbucket.org/pypy/pypy/changeset/2f88f3eea121/ Log: try to avoid at least some cases of interp2app identifer name clashes diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -750,6 +750,8 @@ argument. """ flatten = {} + caller = sys._getframe(1) + caller_name = caller.f_globals.get('__name__') for base in inspect.getmro(M): if base is object: continue @@ -764,13 +766,17 @@ elif isinstance(value, staticmethod): func = value.__get__(42) func = func_with_new_name(func, func.__name__) + if caller_name: + # staticmethods lack a unique im_class so further + # distinguish them from themselves + func.__module__ = caller_name value = staticmethod(func) elif isinstance(value, classmethod): raise AssertionError("classmethods not supported " "in 'import_from_mixin'") flatten[key] = value # - target = sys._getframe(1).f_locals + target = caller.f_locals for key, value in flatten.items(): if key in target: raise Exception("import_from_mixin: would overwrite the value " diff --git a/rpython/rlib/test/test_objectmodel.py b/rpython/rlib/test/test_objectmodel.py --- a/rpython/rlib/test/test_objectmodel.py +++ b/rpython/rlib/test/test_objectmodel.py @@ -621,3 +621,14 @@ class B(A): import_from_mixin(M) assert B().foo == 42 + + d = dict(__name__='foo') + exec """class M(object): + @staticmethod + def f(): pass + """ in d + M = d['M'] + class A(object): + import_from_mixin(M) + assert A.f is not M.f + assert A.f.__module__ != M.f.__module__ From noreply at buildbot.pypy.org Fri Jan 24 21:20:06 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 24 Jan 2014 21:20:06 +0100 (CET) Subject: [pypy-commit] pypy py3k-refactor-str-types: reintegrate our bytes/bytearray Message-ID: <20140124202006.2ECD51C06CD@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-refactor-str-types Changeset: r68912:eeae6a72a1be Date: 2014-01-24 11:49 -0800 http://bitbucket.org/pypy/pypy/changeset/eeae6a72a1be/ Log: reintegrate our bytes/bytearray diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -233,9 +233,8 @@ raise operationerrfmt(space.w_TypeError, msg, w_result) def ord(self, space): - typename = space.type(self).getname(space) - msg = "ord() expected string of length 1, but %s found" - raise operationerrfmt(space.w_TypeError, msg, typename) + msg = "ord() expected string of length 1, but %T found" + raise operationerrfmt(space.w_TypeError, msg, self) def __spacebind__(self, space): return self diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -3,15 +3,14 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.buffer import RWBuffer from pypy.interpreter.error import OperationError, operationerrfmt -from pypy.objspace.std.bytearraytype import new_bytearray -from pypy.objspace.std.stringtype import getbytevalue, makebytesdata_w +from pypy.objspace.std.bytesobject import ( + getbytevalue, makebytesdata_w, newbytesdata_w) from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault -from pypy.interpreter.signature import Signature from pypy.objspace.std.sliceobject import W_SliceObject from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.stringmethods import StringMethods from pypy.objspace.std.util import get_positive_index -from rpython.rlib.objectmodel import newlist_hint, resizelist_hint, import_from_mixin +from rpython.rlib.objectmodel import import_from_mixin from rpython.rlib.rstring import StringBuilder @@ -101,10 +100,8 @@ return False def _join_check_item(self, space, w_obj): - if (space.isinstance_w(w_obj, space.w_str) or - space.isinstance_w(w_obj, space.w_bytearray)): - return 0 - return 1 + return not (space.isinstance_w(w_obj, space.w_bytes) or + space.isinstance_w(w_obj, space.w_bytearray)) def ord(self, space): if len(self.data) != 1: @@ -134,74 +131,19 @@ "Create a bytearray object from a string of hexadecimal numbers.\n" "Spaces between two numbers are accepted.\n" "Example: bytearray.fromhex('B9 01EF') -> bytearray(b'\\xb9\\x01\\xef')." - hexstring = space.str_w(w_hexstring) - hexstring = hexstring.lower() - data = [] - length = len(hexstring) - i = -2 - while True: - i += 2 - while i < length and hexstring[i] == ' ': - i += 1 - if i >= length: - break - if i+1 == length: - raise OperationError(space.w_ValueError, space.wrap( - "non-hexadecimal number found in fromhex() arg at position %d" % i)) - - top = _hex_digit_to_int(hexstring[i]) - if top == -1: - raise OperationError(space.w_ValueError, space.wrap( - "non-hexadecimal number found in fromhex() arg at position %d" % i)) - bot = _hex_digit_to_int(hexstring[i+1]) - if bot == -1: - raise OperationError(space.w_ValueError, space.wrap( - "non-hexadecimal number found in fromhex() arg at position %d" % (i+1,))) - data.append(chr(top*16 + bot)) - + if not space.is_w(space.type(w_hexstring), space.w_unicode): + raise operationerrfmt(space.w_TypeError, "must be str, not %T", + w_hexstring) + hexstring = space.unicode_w(w_hexstring) + data = _hexstring_to_array(space, hexstring) # in CPython bytearray.fromhex is a staticmethod, so # we ignore w_type and always return a bytearray return new_bytearray(space, space.w_bytearray, data) - def descr_init(self, space, __args__): - # this is on the silly side - w_source, w_encoding, w_errors = __args__.parse_obj( - None, 'bytearray', init_signature, init_defaults) - - if w_source is None: - w_source = space.wrap('') - if w_encoding is None: - w_encoding = space.w_None - if w_errors is None: - w_errors = space.w_None - - # Unicode argument - if not space.is_w(w_encoding, space.w_None): - from pypy.objspace.std.unicodeobject import ( - _get_encoding_and_errors, encode_object - ) - encoding, errors = _get_encoding_and_errors(space, w_encoding, w_errors) - - # if w_source is an integer this correctly raises a TypeError - # the CPython error message is: "encoding or errors without a string argument" - # ours is: "expected unicode, got int object" - w_source = encode_object(space, w_source, encoding, errors) - - # Is it an int? - try: - count = space.int_w(w_source) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - else: - if count < 0: - raise OperationError(space.w_ValueError, - space.wrap("bytearray negative count")) - self.data = ['\0'] * count - return - - data = makebytearraydata_w(space, w_source) - self.data = data + @unwrap_spec(encoding='str_or_None', errors='str_or_None') + def descr_init(self, space, w_source=None, encoding=None, errors=None): + assert isinstance(self, W_BytearrayObject) + self.data = newbytesdata_w(space, w_source, encoding, errors) def descr_repr(self, space): s = self.data @@ -236,7 +178,10 @@ return space.wrap(buf.build()) def descr_str(self, space): - return space.wrap(''.join(self.data)) + if space.sys.get_flag('bytes_warning'): + space.warn(space.wrap("str() on a bytearray instance"), + space.w_BytesWarning) + return self.descr_repr(space) def descr_eq(self, space, w_other): try: @@ -310,7 +255,7 @@ if isinstance(w_index, W_SliceObject): oldsize = len(self.data) start, stop, step, slicelength = w_index.indices4(space, oldsize) - sequence2 = makebytearraydata_w(space, w_other) + sequence2 = makebytesdata_w(space, w_other) _setitem_slice_helper(space, self.data, start, step, slicelength, sequence2, empty_elem='\x00') else: @@ -341,7 +286,7 @@ if isinstance(w_other, W_BytearrayObject): self.data += w_other.data else: - self.data += makebytearraydata_w(space, w_other) + self.data += makebytesdata_w(space, w_other) return self def descr_insert(self, space, w_idx, w_other): @@ -376,64 +321,47 @@ def descr_reverse(self, space): self.data.reverse() -def getbytevalue(space, w_value): - if space.isinstance_w(w_value, space.w_str): - string = space.str_w(w_value) - if len(string) != 1: - raise OperationError(space.w_ValueError, space.wrap( - "string must be of size 1")) - return string[0] - - value = space.getindex_w(w_value, None) - if not 0 <= value < 256: - # this includes the OverflowError in case the long is too large - raise OperationError(space.w_ValueError, space.wrap( - "byte must be in range(0, 256)")) - return chr(value) - def new_bytearray(space, w_bytearraytype, data): w_obj = space.allocate_instance(W_BytearrayObject, w_bytearraytype) W_BytearrayObject.__init__(w_obj, data) return w_obj -def makebytearraydata_w(space, w_source): - # String-like argument - try: - string = space.bufferstr_new_w(w_source) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - else: - return [c for c in string] - - # sequence of bytes - w_iter = space.iter(w_source) - length_hint = space.length_hint(w_source, 0) - data = newlist_hint(length_hint) - extended = 0 - while True: - try: - w_item = space.next(w_iter) - except OperationError, e: - if not e.match(space, space.w_StopIteration): - raise - break - value = getbytevalue(space, w_item) - data.append(value) - extended += 1 - if extended < length_hint: - resizelist_hint(data, extended) - return data - def _hex_digit_to_int(d): val = ord(d) if 47 < val < 58: return val - 48 + if 64 < val < 71: + return val - 55 if 96 < val < 103: return val - 87 return -1 +def _hexstring_to_array(space, s): + data = [] + length = len(s) + i = -2 + while True: + i += 2 + while i < length and s[i] == ' ': + i += 1 + if i >= length: + break + if i + 1 == length: + raise OperationError(space.w_ValueError, space.wrap( + "non-hexadecimal number found in fromhex() arg at position %d" % i)) + + top = _hex_digit_to_int(s[i]) + if top == -1: + raise OperationError(space.w_ValueError, space.wrap( + "non-hexadecimal number found in fromhex() arg at position %d" % i)) + bot = _hex_digit_to_int(s[i+1]) + if bot == -1: + raise OperationError(space.w_ValueError, space.wrap( + "non-hexadecimal number found in fromhex() arg at position %d" % (i+1,))) + data.append(chr(top*16 + bot)) + return data + class BytearrayDocstrings: """bytearray(iterable_of_ints) -> bytearray @@ -867,6 +795,8 @@ doc=BytearrayDocstrings.__reduce__.__doc__), fromhex = interp2app(W_BytearrayObject.descr_fromhex, as_classmethod=True, doc=BytearrayDocstrings.fromhex.__doc__), + maketrans = interp2app(W_BytearrayObject.descr_maketrans, + as_classmethod=True), __repr__ = interp2app(W_BytearrayObject.descr_repr, doc=BytearrayDocstrings.__repr__.__doc__), @@ -1001,9 +931,6 @@ doc=BytearrayDocstrings.reverse.__doc__), ) -init_signature = Signature(['source', 'encoding', 'errors'], None, None) -init_defaults = [None, None, None] - # XXX consider moving to W_BytearrayObject or remove def str_join__Bytearray_ANY(space, w_self, w_list): @@ -1014,7 +941,7 @@ newdata = [] for i in range(len(list_w)): w_s = list_w[i] - if not (space.isinstance_w(w_s, space.w_str) or + if not (space.isinstance_w(w_s, space.w_bytes) or space.isinstance_w(w_s, space.w_bytearray)): msg = "sequence item %d: expected string, %T found" raise operationerrfmt(space.w_TypeError, msg, i, w_s) diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -4,15 +4,13 @@ from pypy.interpreter.buffer import StringBuffer from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault, interpindirect2app -from pypy.objspace.std import newformat -from pypy.objspace.std.formatting import mod_format from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.stringmethods import StringMethods -from pypy.objspace.std.unicodeobject import ( - decode_object, unicode_from_encoded_object, _get_encoding_and_errors) from rpython.rlib.jit import we_are_jitted -from rpython.rlib.objectmodel import compute_hash, compute_unique_id, import_from_mixin -from rpython.rlib.rstring import StringBuilder, replace +from rpython.rlib.objectmodel import ( + compute_hash, compute_unique_id, import_from_mixin, newlist_hint, + resizelist_hint) +from rpython.rlib.rstring import StringBuilder class W_AbstractBytesObject(W_Root): @@ -41,12 +39,6 @@ def descr_eq(self, space, w_other): """x.__eq__(y) <==> x==y""" - def descr__format__(self, space, w_format_spec): - """S.__format__(format_spec) -> string - - Return a formatted version of S as described by format_spec. - """ - def descr_ge(self, space, w_other): """x.__ge__(y) <==> x>=y""" @@ -56,12 +48,6 @@ def descr_getnewargs(self, space): "" - def descr_getslice(self, space, w_start, w_stop): - """x.__getslice__(i, j) <==> x[i:j] - - Use of negative indices is not supported. - """ - def descr_gt(self, space, w_other): """x.__gt__(y) <==> x>y""" @@ -77,9 +63,6 @@ def descr_lt(self, space, w_other): """x.__lt__(y) <==> x x%y""" - def descr_mul(self, space, w_times): """x.__mul__(n) <==> x*n""" @@ -132,17 +115,6 @@ able to handle UnicodeDecodeErrors. """ - def descr_encode(self, space, w_encoding=None, w_errors=None): - """S.encode(encoding=None, errors='strict') -> object - - Encode S using the codec registered for encoding. encoding defaults - to the default encoding. errors may be given to set a different error - handling scheme. Default is 'strict' meaning that encoding errors raise - a UnicodeEncodeError. Other possible values are 'ignore', 'replace' and - 'xmlcharrefreplace' as well as any other name registered with - codecs.register_error that is able to handle UnicodeEncodeErrors. - """ - def descr_endswith(self, space, w_suffix, w_start=None, w_end=None): """S.endswith(suffix[, start[, end]]) -> bool @@ -170,13 +142,6 @@ Return -1 on failure. """ - def descr_format(self, space, __args__): - """S.format(*args, **kwargs) -> string - - Return a formatted version of S, using substitutions from args and kwargs. - The substitutions are identified by braces ('{' and '}'). - """ - def descr_index(self, space, w_sub, w_start=None, w_end=None): """S.index(sub[, start[, end]]) -> int @@ -511,47 +476,46 @@ return space.newlist_str(lst) @staticmethod - @unwrap_spec(w_object = WrappedDefault("")) - def descr_new(space, w_stringtype, w_object): - # NB. the default value of w_object is really a *wrapped* empty string: - # there is gateway magic at work - w_obj = space.str(w_object) - if space.is_w(w_stringtype, space.w_str): - return w_obj # XXX might be reworked when space.str() typechecks - value = space.str_w(w_obj) + @unwrap_spec(encoding='str_or_None', errors='str_or_None') + def descr_new(space, w_stringtype, w_source=None, encoding=None, + errors=None): + if (w_source and space.is_w(space.type(w_source), space.w_bytes) and + space.is_w(w_stringtype, space.w_bytes)): + return w_source + value = ''.join(newbytesdata_w(space, w_source, encoding, errors)) w_obj = space.allocate_instance(W_BytesObject, w_stringtype) W_BytesObject.__init__(w_obj, value) return w_obj + @staticmethod + def descr_fromhex(space, w_type, w_hexstring): + r"""bytes.fromhex(string) -> bytes + + Create a bytes object from a string of hexadecimal numbers. + Spaces between two numbers are accepted. + Example: bytes.fromhex('B9 01EF') -> b'\xb9\x01\xef'. + """ + if not space.is_w(space.type(w_hexstring), space.w_unicode): + raise operationerrfmt(space.w_TypeError, "must be str, not %T", + w_hexstring) + from pypy.objspace.std.bytearrayobject import _hexstring_to_array + hexstring = space.unicode_w(w_hexstring) + bytes = ''.join(_hexstring_to_array(space, hexstring)) + return W_BytesObject(bytes) + def descr_repr(self, space): - s = self._value - quote = "'" - if quote in s and '"' not in s: - quote = '"' - return space.wrap(string_escape_encode(s, quote)) + return space.wrap(string_escape_encode(self._value, True)) def descr_str(self, space): - if type(self) is W_BytesObject: - return self - return wrapstr(space, self._value) + if space.sys.get_flag('bytes_warning'): + space.warn(space.wrap("str() on a bytes instance"), + space.w_BytesWarning) + return self.descr_repr(space) def descr_hash(self, space): x = compute_hash(self._value) return space.wrap(x) - def descr_format(self, space, __args__): - return newformat.format_method(space, self, __args__, is_unicode=False) - - def descr__format__(self, space, w_format_spec): - if not space.isinstance_w(w_format_spec, space.w_str): - w_format_spec = space.str(w_format_spec) - spec = space.str_w(w_format_spec) - formatter = newformat.str_formatter(space, spec) - return formatter.format_string(self._value) - - def descr_mod(self, space, w_values): - return mod_format(space, self, w_values, do_unicode=False) - def descr_buffer(self, space): return space.wrap(StringBuffer(self._value)) @@ -613,10 +577,7 @@ _StringMethods_descr_add = descr_add def descr_add(self, space, w_other): - if space.isinstance_w(w_other, space.w_unicode): - self_as_unicode = unicode_from_encoded_object(space, self, None, None) - return space.add(self_as_unicode, w_other) - elif space.isinstance_w(w_other, space.w_bytearray): + if space.isinstance_w(w_other, space.w_bytearray): # XXX: eliminate double-copy from .bytearrayobject import W_BytearrayObject, _make_data self_as_bytearray = W_BytearrayObject(_make_data(self._value)) @@ -635,51 +596,23 @@ return W_StringBufferObject(builder) return self._StringMethods_descr_add(space, w_other) - _StringMethods__startswith = _startswith - def _startswith(self, space, value, w_prefix, start, end): - if space.isinstance_w(w_prefix, space.w_unicode): - self_as_unicode = unicode_from_encoded_object(space, self, None, None) - return self_as_unicode._startswith(space, self_as_unicode._value, w_prefix, start, end) - return self._StringMethods__startswith(space, value, w_prefix, start, end) - - _StringMethods__endswith = _endswith - def _endswith(self, space, value, w_suffix, start, end): - if space.isinstance_w(w_suffix, space.w_unicode): - self_as_unicode = unicode_from_encoded_object(space, self, None, None) - return self_as_unicode._endswith(space, self_as_unicode._value, w_suffix, start, end) - return self._StringMethods__endswith(space, value, w_suffix, start, end) - _StringMethods_descr_contains = descr_contains def descr_contains(self, space, w_sub): - if space.isinstance_w(w_sub, space.w_unicode): - from pypy.objspace.std.unicodeobject import W_UnicodeObject - assert isinstance(w_sub, W_UnicodeObject) - self_as_unicode = unicode_from_encoded_object(space, self, None, None) - return space.newbool(self_as_unicode._value.find(w_sub._value) >= 0) + if space.isinstance_w(w_sub, space.w_int): + try: + char = space.int_w(w_sub) + except OperationError as e: + if e.match(space, space.w_OverflowError): + char = 256 # arbitrary value which will trigger the ValueError + # condition below + else: + raise + if not 0 <= char < 256: + raise operationerrfmt(space.w_ValueError, + "character must be in range(256)") + return space.newbool(self._value.find(chr(char)) >= 0) return self._StringMethods_descr_contains(space, w_sub) - _StringMethods_descr_replace = descr_replace - @unwrap_spec(count=int) - def descr_replace(self, space, w_old, w_new, count=-1): - old_is_unicode = space.isinstance_w(w_old, space.w_unicode) - new_is_unicode = space.isinstance_w(w_new, space.w_unicode) - if old_is_unicode or new_is_unicode: - self_as_uni = unicode_from_encoded_object(space, self, None, None) - if not old_is_unicode: - w_old = unicode_from_encoded_object(space, w_old, None, None) - if not new_is_unicode: - w_new = unicode_from_encoded_object(space, w_new, None, None) - input = self_as_uni._val(space) - sub = self_as_uni._op_val(space, w_old) - by = self_as_uni._op_val(space, w_new) - try: - res = replace(input, sub, by, count) - except OverflowError: - raise OperationError(space.w_OverflowError, - space.wrap("replace string is too long")) - return self_as_uni._new(res) - return self._StringMethods_descr_replace(space, w_old, w_new, count) - def descr_lower(self, space): return W_BytesObject(self._value.lower()) @@ -687,32 +620,16 @@ return W_BytesObject(self._value.upper()) def _join_return_one(self, space, w_obj): - return (space.is_w(space.type(w_obj), space.w_str) or - space.is_w(space.type(w_obj), space.w_unicode)) + return space.is_w(space.type(w_obj), space.w_str) def _join_check_item(self, space, w_obj): - if space.isinstance_w(w_obj, space.w_str): - return 0 - if space.isinstance_w(w_obj, space.w_unicode): - return 2 - return 1 - - def _join_autoconvert(self, space, list_w): - # we need to rebuild w_list here, because the original - # w_list might be an iterable which we already consumed - w_list = space.newlist(list_w) - w_u = space.call_function(space.w_unicode, self) - return space.call_method(w_u, "join", w_list) - - def descr_formatter_parser(self, space): - from pypy.objspace.std.newformat import str_template_formatter - tformat = str_template_formatter(space, space.str_w(self)) - return tformat.formatter_parser() - - def descr_formatter_field_name_split(self, space): - from pypy.objspace.std.newformat import str_template_formatter - tformat = str_template_formatter(space, space.str_w(self)) - return tformat.formatter_field_name_split() + try: + self._op_val(space, w_obj) + except OperationError as e: + if not e.match(space, space.w_TypeError): + raise + return True + return False def _create_list_from_string(value): @@ -748,13 +665,103 @@ return W_BytesObject(c) +def getbytevalue(space, w_value): + value = space.getindex_w(w_value, None) + if not 0 <= value < 256: + # this includes the OverflowError in case the long is too large + raise OperationError(space.w_ValueError, space.wrap( + "byte must be in range(0, 256)")) + return chr(value) + +def newbytesdata_w(space, w_source, encoding, errors): + # None value + if w_source is None: + if encoding is not None or errors is not None: + raise OperationError(space.w_TypeError, space.wrap( + "encoding or errors without string argument")) + return [] + # Is it an int? + try: + count = space.int_w(w_source) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + else: + if count < 0: + raise OperationError(space.w_ValueError, + space.wrap("negative count")) + if encoding is not None or errors is not None: + raise OperationError(space.w_TypeError, space.wrap( + "encoding or errors without string argument")) + return ['\0'] * count + # Unicode with encoding + if space.isinstance_w(w_source, space.w_unicode): + if encoding is None: + raise OperationError(space.w_TypeError, space.wrap( + "string argument without an encoding")) + from pypy.objspace.std.unicodeobject import encode_object + w_source = encode_object(space, w_source, encoding, errors) + # and continue with the encoded string + + return makebytesdata_w(space, w_source) + +def makebytesdata_w(space, w_source): + w_bytes_method = space.lookup(w_source, "__bytes__") + if w_bytes_method is not None: + w_bytes = space.get_and_call_function(w_bytes_method, w_source) + if not space.isinstance_w(w_bytes, space.w_bytes): + msg = "__bytes__ returned non-bytes (type '%T')" + raise operationerrfmt(space.w_TypeError, msg, w_bytes) + return [c for c in space.bytes_w(w_bytes)] + + # String-like argument + try: + string = space.bufferstr_new_w(w_source) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + else: + return [c for c in string] + + if space.isinstance_w(w_source, space.w_unicode): + raise OperationError( + space.w_TypeError, + space.wrap("cannot convert unicode object to bytes")) + + # sequence of bytes + w_iter = space.iter(w_source) + length_hint = space.length_hint(w_source, 0) + data = newlist_hint(length_hint) + extended = 0 + while True: + try: + w_item = space.next(w_iter) + except OperationError, e: + if not e.match(space, space.w_StopIteration): + raise + break + value = getbytevalue(space, w_item) + data.append(value) + extended += 1 + if extended < length_hint: + resizelist_hint(data, extended) + return data + + W_BytesObject.typedef = StdTypeDef( "bytes", __new__ = interp2app(W_BytesObject.descr_new), - __doc__ = """str(object='') -> string + __doc__ = """bytes(iterable_of_ints) -> bytes + bytes(string, encoding[, errors]) -> bytes + bytes(bytes_or_buffer) -> immutable copy of bytes_or_buffer + bytes(int) -> bytes object of size given by the parameter initialized with null bytes + bytes() -> empty bytes object - Return a nice string representation of the object. - If the argument is a string, the return value is the same object. + Construct an immutable array of bytes from: + - an iterable yielding integers in range(256) + - a text string encoded using the specified encoding + - any object implementing the buffer API. + - an integer """, __repr__ = interpindirect2app(W_AbstractBytesObject.descr_repr), @@ -776,13 +783,11 @@ __rmul__ = interpindirect2app(W_AbstractBytesObject.descr_rmul), __getitem__ = interpindirect2app(W_AbstractBytesObject.descr_getitem), - __getslice__ = interpindirect2app(W_AbstractBytesObject.descr_getslice), capitalize = interpindirect2app(W_AbstractBytesObject.descr_capitalize), center = interpindirect2app(W_AbstractBytesObject.descr_center), count = interpindirect2app(W_AbstractBytesObject.descr_count), decode = interpindirect2app(W_AbstractBytesObject.descr_decode), - encode = interpindirect2app(W_AbstractBytesObject.descr_encode), expandtabs = interpindirect2app(W_AbstractBytesObject.descr_expandtabs), find = interpindirect2app(W_AbstractBytesObject.descr_find), rfind = interpindirect2app(W_AbstractBytesObject.descr_rfind), @@ -816,14 +821,11 @@ upper = interpindirect2app(W_AbstractBytesObject.descr_upper), zfill = interpindirect2app(W_AbstractBytesObject.descr_zfill), - format = interpindirect2app(W_BytesObject.descr_format), - __format__ = interpindirect2app(W_BytesObject.descr__format__), - __mod__ = interpindirect2app(W_BytesObject.descr_mod), __buffer__ = interpindirect2app(W_AbstractBytesObject.descr_buffer), __getnewargs__ = interpindirect2app(W_AbstractBytesObject.descr_getnewargs), - _formatter_parser = interp2app(W_BytesObject.descr_formatter_parser), - _formatter_field_name_split = - interp2app(W_BytesObject.descr_formatter_field_name_split), + + fromhex = interp2app(W_BytesObject.descr_fromhex, as_classmethod=True), + maketrans = interp2app(W_BytesObject.descr_maketrans, as_classmethod=True), ) diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -1,7 +1,7 @@ from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import unwrap_spec, WrappedDefault from pypy.objspace.std import slicetype -from pypy.objspace.std.sliceobject import W_SliceObject, normalize_simple_slice +from pypy.objspace.std.sliceobject import W_SliceObject from rpython.rlib import jit from rpython.rlib.objectmodel import specialize from rpython.rlib.rarithmetic import ovfcheck @@ -24,6 +24,32 @@ space, lenself, w_start, w_end, upper_bound=upper_bound) return (value, start, end) + @staticmethod + def descr_maketrans(space, w_type, w_from, w_to): + """B.maketrans(frm, to) -> translation table + + Return a translation table (a bytes object of length 256) suitable + for use in the bytes or bytearray translate method where each byte + in frm is mapped to the byte at the same position in to. + The bytes objects frm and to must be of the same length. + """ + from pypy.objspace.std.bytesobject import makebytesdata_w, wrapstr + + base_table = [chr(i) for i in range(256)] + list_from = makebytesdata_w(space, w_from) + list_to = makebytesdata_w(space, w_to) + + if len(list_from) != len(list_to): + raise operationerrfmt(space.w_ValueError, + "maketrans arguments must have same length") + + for i in range(len(list_from)): + pos_from = ord(list_from[i]) + char_to = list_to[i] + base_table[pos_from] = char_to + + return wrapstr(space, ''.join(base_table)) + def descr_len(self, space): return space.wrap(self._len()) @@ -90,21 +116,13 @@ if index < 0 or index >= selflen: raise OperationError(space.w_IndexError, space.wrap("string index out of range")) + from pypy.objspace.std.bytesobject import W_BytesObject from pypy.objspace.std.bytearrayobject import W_BytearrayObject - if isinstance(self, W_BytearrayObject): + if isinstance(self, W_BytesObject) or isinstance(self, W_BytearrayObject): return space.wrap(ord(selfvalue[index])) #return wrapchar(space, selfvalue[index]) return self._new(selfvalue[index]) - def descr_getslice(self, space, w_start, w_stop): - selfvalue = self._val(space) - start, stop = normalize_simple_slice(space, len(selfvalue), w_start, - w_stop) - if start == stop: - return self._empty() - else: - return self._sliced(space, selfvalue, start, stop, self) - def descr_capitalize(self, space): value = self._val(space) if len(value) == 0: @@ -139,19 +157,11 @@ return space.newint(value.count(self._op_val(space, w_sub), start, end)) def descr_decode(self, space, w_encoding=None, w_errors=None): - from pypy.objspace.std.unicodeobject import _get_encoding_and_errors, \ - unicode_from_string, decode_object + from pypy.objspace.std.unicodeobject import ( + _get_encoding_and_errors, decode_object) encoding, errors = _get_encoding_and_errors(space, w_encoding, w_errors) - if encoding is None and errors is None: - return unicode_from_string(space, self) return decode_object(space, self, encoding, errors) - def descr_encode(self, space, w_encoding=None, w_errors=None): - from pypy.objspace.std.unicodeobject import _get_encoding_and_errors, \ - encode_object - encoding, errors = _get_encoding_and_errors(space, w_encoding, w_errors) - return encode_object(space, self, encoding, errors) - @unwrap_spec(tabsize=int) def descr_expandtabs(self, space, tabsize=8): value = self._val(space) @@ -175,6 +185,9 @@ def _tabindent(self, token, tabsize): "calculates distance behind the token to the next tabstop" + if tabsize <= 0: + return tabsize + distance = tabsize if token: distance = 0 @@ -305,16 +318,9 @@ return space.newbool(cased) def descr_join(self, space, w_list): - from pypy.objspace.std.bytesobject import W_BytesObject from pypy.objspace.std.unicodeobject import W_UnicodeObject - if isinstance(self, W_BytesObject): - l = space.listview_str(w_list) - if l is not None: - if len(l) == 1: - return space.wrap(l[0]) - return space.wrap(self._val(space).join(l)) - elif isinstance(self, W_UnicodeObject): + if isinstance(self, W_UnicodeObject): l = space.listview_unicode(w_list) if l is not None: if len(l) == 1: @@ -343,14 +349,11 @@ prealloc_size = len(value) * (size - 1) for i in range(size): w_s = list_w[i] - check_item = self._join_check_item(space, w_s) - if check_item == 1: + if self._join_check_item(space, w_s): raise operationerrfmt( space.w_TypeError, - "sequence item %d: expected string, %s " - "found", i, space.type(w_s).getname(space)) - elif check_item == 2: - return self._join_autoconvert(space, list_w) + "sequence item %d: expected %s, %T found", + i, self._generic_name(), w_s) prealloc_size += len(self._op_val(space, w_s)) sb = self._builder(prealloc_size) @@ -360,9 +363,6 @@ sb.append(self._op_val(space, list_w[i])) return self._new(sb.build()) - def _join_autoconvert(self, space, list_w): - assert False, 'unreachable' - @unwrap_spec(width=int, w_fillchar=WrappedDefault(' ')) def descr_ljust(self, space, width, w_fillchar): value = self._val(space) @@ -505,6 +505,9 @@ strs.append(value[pos:length]) return self._newlist_unwrapped(space, strs) + def _generic_name(self): + return "bytes" + def descr_startswith(self, space, w_prefix, w_start=None, w_end=None): (value, start, end) = self._convert_idx_params(space, w_start, w_end, True) @@ -514,13 +517,15 @@ return space.w_True return space.w_False try: - return space.newbool(self._startswith(space, value, w_prefix, start, end)) + res = self._startswith(space, value, w_prefix, start, end) except OperationError as e: - if e.match(space, space.w_TypeError): - msg = ("startswith first arg must be str or a tuple of str, " - "not %T") - raise operationerrfmt(space.w_TypeError, msg, w_prefix) - raise + if not e.match(space, space.w_TypeError): + raise + wanted = self._generic_name() + raise operationerrfmt(space.w_TypeError, + "startswith first arg must be %s or a tuple " + "of %s, not %T", wanted, wanted, w_prefix) + return space.newbool(res) def _startswith(self, space, value, w_prefix, start, end): return startswith(value, self._op_val(space, w_prefix), start, end) @@ -535,14 +540,15 @@ return space.w_True return space.w_False try: - return space.newbool(self._endswith(space, value, w_suffix, start, - end)) + res = self._endswith(space, value, w_suffix, start, end) except OperationError as e: - if e.match(space, space.w_TypeError): - msg = ("endswith first arg must be str or a tuple of str, not " - "%T") - raise operationerrfmt(space.w_TypeError, msg, w_suffix) - raise + if not e.match(space, space.w_TypeError): + raise + wanted = self._generic_name() + raise operationerrfmt(space.w_TypeError, + "endswith first arg must be %s or a tuple " + "of %s, not %T", wanted, wanted, w_suffix) + return space.newbool(res) def _endswith(self, space, value, w_prefix, start, end): return endswith(value, self._op_val(space, w_prefix), start, end) diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -109,6 +109,9 @@ _builder = UnicodeBuilder + def _generic_name(self): + return "str" + def _isupper(self, ch): return unicodedb.isupper(ord(ch)) @@ -178,16 +181,10 @@ @staticmethod def descr_maketrans(space, w_type, w_x, w_y=None, w_z=None): - if space.is_none(w_y): - y = None - else: - y = space.unicode_w(w_y) - if space.is_none(w_z): - z = None - else: - z = space.unicode_w(w_z) + y = None if space.is_none(w_y) else space.unicode_w(w_y) + z = None if space.is_none(w_z) else space.unicode_w(w_z) + w_new = space.newdict() - w_new = space.newdict() if y is not None: # x must be a string too, of equal length ylen = len(y) @@ -362,9 +359,9 @@ elif space.isinstance_w(w_newval, space.w_unicode): result.append(space.unicode_w(w_newval)) else: - raise OperationError( + raise operationerrfmt( space.w_TypeError, - space.wrap("character mapping must return integer, None or unicode")) + "character mapping must return integer, None or str") return W_UnicodeObject(u''.join(result)) def descr_encode(self, space, w_encoding=None, w_errors=None): @@ -375,10 +372,7 @@ return space.is_w(space.type(w_obj), space.w_unicode) def _join_check_item(self, space, w_obj): - if (space.isinstance_w(w_obj, space.w_str) or - space.isinstance_w(w_obj, space.w_unicode)): - return 0 - return 1 + return not space.isinstance_w(w_obj, space.w_unicode) def descr_isdecimal(self, space): return self._is_generic(space, '_isdecimal') @@ -415,6 +409,17 @@ return space.w_False return space.w_True + def _fix_fillchar(func): + # XXX: hack + from rpython.tool.sourcetools import func_with_new_name + func = func_with_new_name(func, func.__name__) + func.unwrap_spec = func.unwrap_spec.copy() + func.unwrap_spec['w_fillchar'] = WrappedDefault(u' ') + return func + + descr_center = _fix_fillchar(StringMethods.descr_center) + descr_ljust = _fix_fillchar(StringMethods.descr_ljust) + descr_rjust = _fix_fillchar(StringMethods.descr_rjust) def wrapunicode(space, uni): return W_UnicodeObject(uni) @@ -530,17 +535,11 @@ def unicode_from_encoded_object(space, w_obj, encoding, errors): - # explicitly block bytearray on 2.7 - from .bytearrayobject import W_BytearrayObject - if isinstance(w_obj, W_BytearrayObject): - raise OperationError(space.w_TypeError, - space.wrap("decoding bytearray is not supported")) - w_retval = decode_object(space, w_obj, encoding, errors) if not space.isinstance_w(w_retval, space.w_unicode): raise operationerrfmt(space.w_TypeError, - "decoder did not return an unicode object (type '%s')", - space.type(w_retval).getname(space)) + "decoder did not return a str object (type '%T')", + w_retval) assert isinstance(w_retval, W_UnicodeObject) return w_retval @@ -840,19 +839,6 @@ If chars is a str, it will be converted to unicode before stripping """ - def maketrans(): - """str.maketrans(x[, y[, z]]) -> dict (static method) - - Return a translation table usable for str.translate(). - If there is only one argument, it must be a dictionary mapping Unicode - ordinals (integers) or characters to Unicode ordinals, strings or None. - Character keys will be then converted to ordinals. - If there are two arguments, they must be strings of equal length, and - in the resulting dictionary, each character in x will be mapped to the - character at the same position in y. If there is a third argument, it - must be a string, whose characters will be mapped to None in the result. - """ - def partition(): """S.partition(sep) -> (head, sep, tail) @@ -1126,8 +1112,7 @@ __getnewargs__ = interp2app(W_UnicodeObject.descr_getnewargs, doc=UnicodeDocstrings.__getnewargs__.__doc__), maketrans = interp2app(W_UnicodeObject.descr_maketrans, - as_classmethod=True, - doc=UnicodeDocstrings.maketrans.__doc__) + as_classmethod=True), ) From noreply at buildbot.pypy.org Fri Jan 24 21:20:07 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 24 Jan 2014 21:20:07 +0100 (CET) Subject: [pypy-commit] pypy py3k-refactor-str-types: fix imports Message-ID: <20140124202007.641131C06CD@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-refactor-str-types Changeset: r68913:2c5e0c0b4e71 Date: 2014-01-24 11:52 -0800 http://bitbucket.org/pypy/pypy/changeset/2c5e0c0b4e71/ Log: fix imports diff --git a/pypy/module/__builtin__/operation.py b/pypy/module/__builtin__/operation.py --- a/pypy/module/__builtin__/operation.py +++ b/pypy/module/__builtin__/operation.py @@ -19,7 +19,7 @@ object, but escape the non-ASCII characters in the string returned by repr() using \\x, \\u or \\U escapes. This generates a string similar to that returned by repr() in Python 2.""" - from pypy.objspace.std.unicodetype import ascii_from_object + from pypy.objspace.std.unicodeobject import ascii_from_object return ascii_from_object(space, w_obj) @unwrap_spec(code=int) diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -849,7 +849,7 @@ @unwrap_spec(data="bufferstr", errors='str_or_None') def escape_encode(space, data, errors='strict'): - from pypy.objspace.std.stringobject import string_escape_encode + from pypy.objspace.std.bytesobject import string_escape_encode result = string_escape_encode(data, False) return space.newtuple([space.wrapbytes(result), space.wrap(len(data))]) diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -2,7 +2,7 @@ from pypy.interpreter.error import OperationError, wrap_oserror, operationerrfmt from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty -from pypy.objspace.std.stringtype import getbytevalue +from pypy.objspace.std.bytesobject import getbytevalue from rpython.rlib.clibffi import * from rpython.rlib.objectmodel import we_are_translated diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py --- a/pypy/module/cpyext/unicodeobject.py +++ b/pypy/module/cpyext/unicodeobject.py @@ -230,7 +230,8 @@ if not ref_unicode.c_utf8buffer: # Copy unicode buffer w_unicode = from_ref(space, ref) - w_encoded = unicodetype.encode_object(space, w_unicode, "utf-8", "strict") + w_encoded = unicodeobject.encode_object(space, w_unicode, "utf-8", + "strict") s = space.bytes_w(w_encoded) ref_unicode.c_utf8buffer = rffi.str2charp(s) return ref_unicode.c_utf8buffer diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py --- a/pypy/module/imp/interp_imp.py +++ b/pypy/module/imp/interp_imp.py @@ -5,7 +5,7 @@ from pypy.interpreter.module import Module from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter.pyparser import pyparse -from pypy.objspace.std import unicodetype +from pypy.objspace.std import unicodeobject from pypy.module._io.interp_iobase import W_IOBase from pypy.module._io import interp_io from pypy.interpreter.streamutil import wrap_streamerror @@ -81,7 +81,7 @@ stream.flush() encoding = pyparse._check_for_encoding(top) if encoding is None: - encoding = unicodetype.getdefaultencoding(space) + encoding = unicodeobject.getdefaultencoding(space) # # in python2, both CPython and PyPy pass the filename to # open(). However, CPython 3 just passes the fd, so the returned file diff --git a/pypy/objspace/std/formatting.py b/pypy/objspace/std/formatting.py --- a/pypy/objspace/std/formatting.py +++ b/pypy/objspace/std/formatting.py @@ -2,8 +2,6 @@ String formatting routines. """ from pypy.interpreter.error import OperationError -from pypy.objspace.std.unicodetype import ( - unicode_from_object, ascii_from_object) from rpython.rlib import jit from rpython.rlib.rarithmetic import ovfcheck from rpython.rlib.rfloat import formatd, DTSF_ALT, isnan, isinf @@ -445,6 +443,7 @@ self.std_wp(self.space.unicode_w(self.space.repr(w_value))) def fmt_a(self, w_value): + from pypy.objspace.std.unicodeobject import ascii_from_object w_value = ascii_from_object(self.space, w_value) self.std_wp(self.space.unicode_w(w_value)) diff --git a/pypy/objspace/std/longtype.py b/pypy/objspace/std/longtype.py --- a/pypy/objspace/std/longtype.py +++ b/pypy/objspace/std/longtype.py @@ -134,7 +134,7 @@ @unwrap_spec(byteorder=str, signed=bool) def descr_from_bytes(space, w_cls, w_obj, byteorder, signed=False): - from pypy.objspace.std.stringtype import makebytesdata_w + from pypy.objspace.std.bytesobject import makebytesdata_w bytes = ''.join(makebytesdata_w(space, w_obj)) try: bigint = rbigint.frombytes(bytes, byteorder=byteorder, signed=signed) diff --git a/pypy/objspace/std/newformat.py b/pypy/objspace/std/newformat.py --- a/pypy/objspace/std/newformat.py +++ b/pypy/objspace/std/newformat.py @@ -7,7 +7,6 @@ from rpython.rlib.objectmodel import specialize from rpython.rlib.rfloat import copysign, formatd from rpython.tool import sourcetools -from pypy.objspace.std.unicodetype import ascii_from_object @specialize.argtype(1) @@ -316,6 +315,7 @@ return space.call_function(space.w_unicode, w_obj) return space.str(w_obj) elif conv == "a": + from pypy.objspace.std.unicodeobject import ascii_from_object return ascii_from_object(space, w_obj) else: raise OperationError(self.space.w_ValueError, From noreply at buildbot.pypy.org Fri Jan 24 21:20:08 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 24 Jan 2014 21:20:08 +0100 (CET) Subject: [pypy-commit] pypy py3k-refactor-str-types: we follow the more consistent py33 behavior now Message-ID: <20140124202008.7B5491C06CD@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-refactor-str-types Changeset: r68914:20ec3039a327 Date: 2014-01-24 11:52 -0800 http://bitbucket.org/pypy/pypy/changeset/20ec3039a327/ Log: we follow the more consistent py33 behavior now diff --git a/pypy/objspace/std/test/test_bytesobject.py b/pypy/objspace/std/test/test_bytesobject.py --- a/pypy/objspace/std/test/test_bytesobject.py +++ b/pypy/objspace/std/test/test_bytesobject.py @@ -203,10 +203,10 @@ assert b'abc'.rjust(5, b'*') == b'**abc' # Python 2.4 assert b'abc'.rjust(0) == b'abc' assert b'abc'.rjust(-1) == b'abc' + assert b'abc'.rjust(5, bytearray(b' ')) == b' abc' raises(TypeError, b'abc'.rjust, 5.0) raises(TypeError, b'abc'.rjust, 5, '*') raises(TypeError, b'abc'.rjust, 5, b'xx') - raises(TypeError, b'abc'.rjust, 5, bytearray(b' ')) raises(TypeError, b'abc'.rjust, 5, 32) def test_ljust(self): @@ -290,8 +290,8 @@ assert b'abc'.center(5, b'*') == b'*abc*' # Python 2.4 assert b'abc'.center(0) == b'abc' assert b'abc'.center(-1) == b'abc' + assert b'abc'.center(5, bytearray(b' ')) == b' abc ' raises(TypeError, b'abc'.center, 4, b'cba') - raises(TypeError, b'abc'.center, 5, bytearray(b' ')) assert b' abc'.center(7) == b' abc ' def test_count(self): From noreply at buildbot.pypy.org Fri Jan 24 21:20:09 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 24 Jan 2014 21:20:09 +0100 (CET) Subject: [pypy-commit] pypy py3k: try to avoid at least some cases of interp2app identifer name clashes Message-ID: <20140124202009.9D5C81C06CD@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68915:879dea160a95 Date: 2014-01-24 11:53 -0800 http://bitbucket.org/pypy/pypy/changeset/879dea160a95/ Log: try to avoid at least some cases of interp2app identifer name clashes (grafted from 2f88f3eea121783eea13bf2d0d053eafc96e01a0) diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -750,6 +750,8 @@ argument. """ flatten = {} + caller = sys._getframe(1) + caller_name = caller.f_globals.get('__name__') for base in inspect.getmro(M): if base is object: continue @@ -764,13 +766,17 @@ elif isinstance(value, staticmethod): func = value.__get__(42) func = func_with_new_name(func, func.__name__) + if caller_name: + # staticmethods lack a unique im_class so further + # distinguish them from themselves + func.__module__ = caller_name value = staticmethod(func) elif isinstance(value, classmethod): raise AssertionError("classmethods not supported " "in 'import_from_mixin'") flatten[key] = value # - target = sys._getframe(1).f_locals + target = caller.f_locals for key, value in flatten.items(): if key in target: raise Exception("import_from_mixin: would overwrite the value " diff --git a/rpython/rlib/test/test_objectmodel.py b/rpython/rlib/test/test_objectmodel.py --- a/rpython/rlib/test/test_objectmodel.py +++ b/rpython/rlib/test/test_objectmodel.py @@ -621,3 +621,14 @@ class B(A): import_from_mixin(M) assert B().foo == 42 + + d = dict(__name__='foo') + exec """class M(object): + @staticmethod + def f(): pass + """ in d + M = d['M'] + class A(object): + import_from_mixin(M) + assert A.f is not M.f + assert A.f.__module__ != M.f.__module__ From noreply at buildbot.pypy.org Fri Jan 24 21:21:41 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 24 Jan 2014 21:21:41 +0100 (CET) Subject: [pypy-commit] pypy py3k-refactor-str-types: try to avoid at least some cases of interp2app identifer name clashes Message-ID: <20140124202141.7396D1C06CD@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-refactor-str-types Changeset: r68916:3e72e557a27b Date: 2014-01-24 11:53 -0800 http://bitbucket.org/pypy/pypy/changeset/3e72e557a27b/ Log: try to avoid at least some cases of interp2app identifer name clashes (grafted from 2f88f3eea121783eea13bf2d0d053eafc96e01a0) diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -750,6 +750,8 @@ argument. """ flatten = {} + caller = sys._getframe(1) + caller_name = caller.f_globals.get('__name__') for base in inspect.getmro(M): if base is object: continue @@ -764,13 +766,17 @@ elif isinstance(value, staticmethod): func = value.__get__(42) func = func_with_new_name(func, func.__name__) + if caller_name: + # staticmethods lack a unique im_class so further + # distinguish them from themselves + func.__module__ = caller_name value = staticmethod(func) elif isinstance(value, classmethod): raise AssertionError("classmethods not supported " "in 'import_from_mixin'") flatten[key] = value # - target = sys._getframe(1).f_locals + target = caller.f_locals for key, value in flatten.items(): if key in target: raise Exception("import_from_mixin: would overwrite the value " diff --git a/rpython/rlib/test/test_objectmodel.py b/rpython/rlib/test/test_objectmodel.py --- a/rpython/rlib/test/test_objectmodel.py +++ b/rpython/rlib/test/test_objectmodel.py @@ -621,3 +621,14 @@ class B(A): import_from_mixin(M) assert B().foo == 42 + + d = dict(__name__='foo') + exec """class M(object): + @staticmethod + def f(): pass + """ in d + M = d['M'] + class A(object): + import_from_mixin(M) + assert A.f is not M.f + assert A.f.__module__ != M.f.__module__ From noreply at buildbot.pypy.org Fri Jan 24 21:28:30 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 24 Jan 2014 21:28:30 +0100 (CET) Subject: [pypy-commit] pypy default: fix numpy.float64.as_integer_ratio Message-ID: <20140124202830.22B8F1C00F8@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68917:455237237e4c Date: 2014-01-24 15:24 -0500 http://bitbucket.org/pypy/pypy/changeset/455237237e4c/ Log: fix numpy.float64.as_integer_ratio diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -394,6 +394,9 @@ class W_Float64Box(W_FloatingBox, PrimitiveBox): descr__new__, _get_dtype, descr_reduce = new_dtype_getter("float64") + def descr_as_integer_ratio(self, space): + return space.call_method(self.item(space), 'as_integer_ratio') + class W_ComplexFloatingBox(W_InexactBox): def descr_get_real(self, space): dtype = self._COMPONENTS_BOX._get_dtype(space) @@ -719,6 +722,7 @@ __module__ = "numpy", __new__ = interp2app(W_Float64Box.descr__new__.im_func), __reduce__ = interp2app(W_Float64Box.descr_reduce), + as_integer_ratio = interp2app(W_Float64Box.descr_as_integer_ratio), ) W_ComplexFloatingBox.typedef = TypeDef("complexfloating", W_InexactBox.typedef, diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -181,6 +181,11 @@ s = np.dtype([('a', 'int64'), ('b', 'int64')]).type('a' * 16) assert s.view('S16') == 'a' * 16 + def test_as_integer_ratio(self): + import numpy as np + raises(AttributeError, 'np.float32(1.5).as_integer_ratio()') + assert np.float64(1.5).as_integer_ratio() == (3, 2) + def test_complex_scalar_complex_cast(self): import numpy as np for tp in [np.csingle, np.cdouble, np.clongdouble]: From noreply at buildbot.pypy.org Fri Jan 24 21:57:24 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 24 Jan 2014 21:57:24 +0100 (CET) Subject: [pypy-commit] pypy default: Comment out (hopefully temporarily) this check again. (Note that it was never enabled before a few days ago.) Message-ID: <20140124205724.D5BF71C06CD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68918:b58a2c01fd59 Date: 2014-01-24 21:56 +0100 http://bitbucket.org/pypy/pypy/changeset/b58a2c01fd59/ Log: Comment out (hopefully temporarily) this check again. (Note that it was never enabled before a few days ago.) diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -594,9 +594,11 @@ if tobox is not None: # sanity check: see whether the current struct value # corresponds to what the cache thinks the value is - resbox = executor.execute(self.metainterp.cpu, self.metainterp, - rop.GETFIELD_GC, fielddescr, box) - assert resbox.constbox().same_constant(tobox.constbox()) + # XXX pypy with the following check fails on micronumpy, + # XXX investigate + #resbox = executor.execute(self.metainterp.cpu, self.metainterp, + # rop.GETFIELD_GC, fielddescr, box) + #assert resbox.constbox().same_constant(tobox.constbox()) return tobox resbox = self.execute_with_descr(opnum, fielddescr, box) self.metainterp.heapcache.getfield_now_known(box, fielddescr, resbox) From noreply at buildbot.pypy.org Fri Jan 24 22:59:36 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 24 Jan 2014 22:59:36 +0100 (CET) Subject: [pypy-commit] pypy py3k-refactor-str-types: merge default Message-ID: <20140124215936.7EA611C1190@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-refactor-str-types Changeset: r68919:0ffad77c46f4 Date: 2014-01-24 13:49 -0800 http://bitbucket.org/pypy/pypy/changeset/0ffad77c46f4/ Log: merge default diff too long, truncating to 2000 out of 9701 lines diff --git a/lib_pypy/_ctypes/array.py b/lib_pypy/_ctypes/array.py --- a/lib_pypy/_ctypes/array.py +++ b/lib_pypy/_ctypes/array.py @@ -1,4 +1,4 @@ -import _ffi +from _rawffi import alt as _ffi import _rawffi from _ctypes.basics import _CData, cdata_from_address, _CDataMeta, sizeof diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py --- a/lib_pypy/_ctypes/basics.py +++ b/lib_pypy/_ctypes/basics.py @@ -1,6 +1,6 @@ import _rawffi -import _ffi +from _rawffi import alt as _ffi import sys try: from __pypy__ import builtinify diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -5,7 +5,7 @@ from _ctypes.basics import is_struct_shape from _ctypes.builtin import get_errno, set_errno, get_last_error, set_last_error import _rawffi -import _ffi +from _rawffi import alt as _ffi import sys import traceback diff --git a/lib_pypy/_ctypes/pointer.py b/lib_pypy/_ctypes/pointer.py --- a/lib_pypy/_ctypes/pointer.py +++ b/lib_pypy/_ctypes/pointer.py @@ -1,6 +1,6 @@ import _rawffi -import _ffi +from _rawffi import alt as _ffi from _ctypes.basics import _CData, _CDataMeta, cdata_from_address, ArgumentError from _ctypes.basics import keepalive_key, store_reference, ensure_objects from _ctypes.basics import sizeof, byref, as_ffi_pointer diff --git a/lib_pypy/_ctypes/primitive.py b/lib_pypy/_ctypes/primitive.py --- a/lib_pypy/_ctypes/primitive.py +++ b/lib_pypy/_ctypes/primitive.py @@ -1,4 +1,4 @@ -import _ffi +from _rawffi import alt as _ffi import _rawffi import weakref import sys diff --git a/lib_pypy/_ffi.py b/lib_pypy/_ffi.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_ffi.py @@ -0,0 +1,2 @@ +# Backward compatibility hack +from _rawffi.alt import * diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -34,7 +34,7 @@ "struct", "_hashlib", "_md5", "_minimal_curses", "thread", "itertools", "pyexpat", "_ssl", "cpyext", "array", "binascii", "_multiprocessing", '_warnings', - "_collections", "_multibytecodec", "_ffi", + "_collections", "_multibytecodec", "_continuation", "_csv", "_cffi_backend", "_posixsubprocess", "_pypyjson", # "cppyy", "micronumpy", ] @@ -43,7 +43,7 @@ translation_modules = default_modules.copy() translation_modules.update(dict.fromkeys( ["fcntl", "rctime", "select", "signal", "_rawffi", "zlib", - "struct", "array", "_ffi", + "struct", "array", "binascii", # the following are needed for pyrepl (and hence for the # interactive prompt/pdb) @@ -99,7 +99,6 @@ # no _rawffi if importing rpython.rlib.clibffi raises ImportError # or CompilationError or py.test.skip.Exception "_rawffi" : ["rpython.rlib.clibffi"], - "_ffi" : ["rpython.rlib.clibffi"], "zlib" : ["rpython.rlib.rzlib"], "bz2" : ["pypy.module.bz2.interp_bz2"], diff --git a/pypy/doc/_ref.txt b/pypy/doc/_ref.txt --- a/pypy/doc/_ref.txt +++ b/pypy/doc/_ref.txt @@ -109,6 +109,4 @@ .. _`rpython/translator/c/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/c/ .. _`rpython/translator/c/src/stacklet/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/c/src/stacklet/ .. _`rpython/translator/c/src/stacklet/stacklet.h`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/c/src/stacklet/stacklet.h -.. _`rpython/translator/cli/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/cli/ -.. _`rpython/translator/jvm/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/jvm/ .. _`rpython/translator/tool/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/tool/ diff --git a/pypy/doc/garbage_collection.rst b/pypy/doc/garbage_collection.rst --- a/pypy/doc/garbage_collection.rst +++ b/pypy/doc/garbage_collection.rst @@ -210,4 +210,12 @@ are preserved. If the object dies then the pre-reserved location becomes free garbage, to be collected at the next major collection. +The exact name of this GC is either `minimark` or `incminimark`. The +latter is a version that does major collections incrementally (i.e. one +major collection is split along some number of minor collections, rather +than being done all at once after a specific minor collection). The +default is `incminimark`, as it seems to have a very minimal impact on +performance and memory usage at the benefit of avoiding the long pauses +of `minimark`. + .. include:: _ref.txt diff --git a/pypy/doc/gc_info.rst b/pypy/doc/gc_info.rst --- a/pypy/doc/gc_info.rst +++ b/pypy/doc/gc_info.rst @@ -6,7 +6,7 @@ Minimark -------- -PyPy's default ``minimark`` garbage collector is configurable through +PyPy's default ``incminimark`` garbage collector is configurable through several environment variables: ``PYPY_GC_NURSERY`` @@ -14,6 +14,17 @@ Defaults to 1/2 of your cache or ``4M``. Small values (like 1 or 1KB) are useful for debugging. +``PYPY_GC_NURSERY_CLEANUP`` + The interval at which nursery is cleaned up. Must + be smaller than the nursery size and bigger than the + biggest object we can allotate in the nursery. + +``PYPY_GC_INCREMENT_STEP`` + The size of memory marked during the marking step. Default is size of + nursery times 2. If you mark it too high your GC is not incremental at + all. The minimum is set to size that survives minor collection times + 1.5 so we reclaim anything all the time. + ``PYPY_GC_MAJOR_COLLECT`` Major collection memory factor. Default is ``1.82``, which means trigger a major collection when the diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -48,3 +48,12 @@ .. branch: remove-del-from-generatoriterator Speed up generators that don't yield inside try or wait blocks by skipping unnecessary cleanup. + +.. branch: annotator +Remove FlowObjSpace. +Improve cohesion between rpython.flowspace and rpython.annotator. + +.. branch: detect-immutable-fields +mapdicts keep track of whether or not an attribute is every assigned to +multiple times. If it's only assigned once then an elidable lookup is used when +possible. diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -1,5 +1,5 @@ from __future__ import division -import py +import py, sys from pypy.interpreter.astcompiler import codegen, astbuilder, symtable, optimize from pypy.interpreter.pyparser import pyparse from pypy.interpreter.pyparser.test import expressions @@ -968,6 +968,9 @@ class AppTestCompiler: + def setup_class(cls): + cls.w_maxunicode = cls.space.wrap(sys.maxunicode) + def test_docstring_not_loaded(self): import io, dis, sys ns = {} @@ -1002,7 +1005,17 @@ l = [a for a in Foo()] assert hint_called[0] assert l == list(range(5)) - + + def test_unicode_in_source(self): + import sys + d = {} + exec('# -*- coding: utf-8 -*-\n\nu = "\xf0\x9f\x92\x8b"', d) + if sys.maxunicode > 65535 and self.maxunicode > 65535: + expected_length = 1 + else: + expected_length = 2 + assert len(d['u']) == expected_length + class TestOptimizations: def count_instructions(self, source): diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -918,7 +918,7 @@ """ return self.unpackiterable(w_iterable, expected_length) - def listview_str(self, w_list): + def listview_bytes(self, w_list): """ Return a list of unwrapped strings out of a list of strings. If the argument is not a list or does not contain only strings, return None. May return None anyway. @@ -952,7 +952,7 @@ """ return (None, None) - def newlist_str(self, list_s): + def newlist_bytes(self, list_s): return self.newlist([self.wrapbytes(s) for s in list_s]) def newlist_unicode(self, list_u): diff --git a/pypy/interpreter/pyparser/parsestring.py b/pypy/interpreter/pyparser/parsestring.py --- a/pypy/interpreter/pyparser/parsestring.py +++ b/pypy/interpreter/pyparser/parsestring.py @@ -15,7 +15,6 @@ Yes, it's very inefficient. Yes, CPython has very similar code. """ - # we use ps as "pointer to s" # q is the virtual last char index of the string ps = 0 @@ -50,47 +49,10 @@ if unicode_literal and not rawmode: # XXX Py_UnicodeFlag is ignored for now if encoding is None: - buf = s - bufp = ps - bufq = q - u = None + assert 0 <= ps <= q + substr = s[ps:q] else: - # String is utf8-encoded, but 'unicode_escape' expects - # latin-1; So multibyte sequences must be escaped. - lis = [] # using a list to assemble the value - end = q - # Worst case: - # "ä" (2 bytes) may become "\U000000E4" (10 bytes), or 1:5 - # "\ä" (3 bytes) may become "\u005c\U000000E4" (16 bytes), - # or ~1:6 - while ps < end: - if s[ps] == '\\': - lis.append(s[ps]) - ps += 1 - if ord(s[ps]) & 0x80: - # A multibyte sequence will follow, it will be - # escaped like \u1234. To avoid confusion with - # the backslash we just wrote, we emit "\u005c" - # instead. - lis.append("u005c") - if ord(s[ps]) & 0x80: # XXX inefficient - w, ps = decode_utf8(space, s, ps, end, "utf-32-be") - rn = len(w) - assert rn % 4 == 0 - for i in range(0, rn, 4): - lis.append('\\U') - lis.append(hexbyte(ord(w[i]))) - lis.append(hexbyte(ord(w[i+1]))) - lis.append(hexbyte(ord(w[i+2]))) - lis.append(hexbyte(ord(w[i+3]))) - else: - lis.append(s[ps]) - ps += 1 - buf = ''.join(lis) - bufp = 0 - bufq = len(buf) - assert 0 <= bufp <= bufq - substr = buf[bufp:bufq] + substr = decode_unicode_utf8(space, s, ps, q) v = unicodehelper.decode_unicode_escape(space, substr) return space.wrap(v) @@ -120,6 +82,39 @@ result = "0" + result return result +def decode_unicode_utf8(space, s, ps, q): + # ****The Python 2.7 version, producing UTF-32 escapes**** + # String is utf8-encoded, but 'unicode_escape' expects + # latin-1; So multibyte sequences must be escaped. + lis = [] # using a list to assemble the value + end = q + # Worst case: + # "<92><195><164>" may become "\u005c\U000000E4" (16 bytes) + while ps < end: + if s[ps] == '\\': + lis.append(s[ps]) + ps += 1 + if ord(s[ps]) & 0x80: + # A multibyte sequence will follow, it will be + # escaped like \u1234. To avoid confusion with + # the backslash we just wrote, we emit "\u005c" + # instead. + lis.append("u005c") + if ord(s[ps]) & 0x80: # XXX inefficient + w, ps = decode_utf8(space, s, ps, end, "utf-32-be") + rn = len(w) + assert rn % 4 == 0 + for i in range(0, rn, 4): + lis.append('\\U') + lis.append(hexbyte(ord(w[i]))) + lis.append(hexbyte(ord(w[i+1]))) + lis.append(hexbyte(ord(w[i+2]))) + lis.append(hexbyte(ord(w[i+3]))) + else: + lis.append(s[ps]) + ps += 1 + return ''.join(lis) + def PyString_DecodeEscape(space, s, recode_encoding): """ Unescape a backslash-escaped string. If recode_encoding is non-zero, diff --git a/pypy/interpreter/pyparser/test/test_parsestring.py b/pypy/interpreter/pyparser/test/test_parsestring.py --- a/pypy/interpreter/pyparser/test/test_parsestring.py +++ b/pypy/interpreter/pyparser/test/test_parsestring.py @@ -1,10 +1,10 @@ from pypy.interpreter.pyparser import parsestring -import py +import py, sys class TestParsetring: - def parse_and_compare(self, literal, value): + def parse_and_compare(self, literal, value, encoding=None): space = self.space - w_ret = parsestring.parsestr(space, None, literal) + w_ret = parsestring.parsestr(space, encoding, literal) if isinstance(value, str): assert space.type(w_ret) == space.w_bytes assert space.bytes_w(w_ret) == value @@ -105,3 +105,18 @@ input = ["'", 'x', ' ', chr(0xc3), chr(0xa9), ' ', chr(92), 'n', "'"] w_ret = parsestring.parsestr(space, 'utf8', ''.join(input)) assert space.str_w(w_ret) == ''.join(expected) + + def test_wide_unicode_in_source(self): + if sys.maxunicode == 65535: + py.test.skip("requires a wide-unicode host") + self.parse_and_compare('u"\xf0\x9f\x92\x8b"', + unichr(0x1f48b), + encoding='utf-8') + + def test_decode_unicode_utf8(self): + buf = parsestring.decode_unicode_utf8(self.space, + 'u"\xf0\x9f\x92\x8b"', 2, 6) + if sys.maxunicode == 65535: + assert buf == r"\U0000d83d\U0000dc8b" + else: + assert buf == r"\U0001f48b" diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -3,7 +3,7 @@ from rpython.rlib.objectmodel import we_are_translated from pypy.objspace.std.listobject import W_ListObject from pypy.objspace.std.typeobject import MethodCache -from pypy.objspace.std.mapdict import IndexCache +from pypy.objspace.std.mapdict import MapAttrCache from rpython.rlib import rposix, rgc @@ -35,7 +35,7 @@ cache.misses = {} cache.hits = {} if space.config.objspace.std.withmapdict: - cache = space.fromcache(IndexCache) + cache = space.fromcache(MapAttrCache) cache.misses = {} cache.hits = {} @@ -45,7 +45,7 @@ in the mapdict cache with the given attribute name.""" assert space.config.objspace.std.withmethodcachecounter assert space.config.objspace.std.withmapdict - cache = space.fromcache(IndexCache) + cache = space.fromcache(MapAttrCache) return space.newtuple([space.newint(cache.hits.get(name, 0)), space.newint(cache.misses.get(name, 0))]) diff --git a/pypy/module/__pypy__/test/test_special.py b/pypy/module/__pypy__/test/test_special.py --- a/pypy/module/__pypy__/test/test_special.py +++ b/pypy/module/__pypy__/test/test_special.py @@ -81,7 +81,9 @@ l = [1, 2, 3] assert list_strategy(l) == "int" l = ["a", "b", "c"] - assert list_strategy(l) == "str" + assert list_strategy(l) == "bytes" + l = [u"a", u"b", u"c"] + assert list_strategy(l) == "unicode" l = [1.1, 2.2, 3.3] assert list_strategy(l) == "float" l = range(3) diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py --- a/pypy/module/_cffi_backend/ctypestruct.py +++ b/pypy/module/_cffi_backend/ctypestruct.py @@ -33,7 +33,7 @@ if self.fields_dict is None: space = self.space raise operationerrfmt(w_errorcls or space.w_TypeError, - "'%s' is not completed yet", self.name) + "'%s' is opaque or not completed yet", self.name) def _alignof(self): self.check_complete(w_errorcls=self.space.w_ValueError) diff --git a/pypy/module/_ffi/app_struct.py b/pypy/module/_ffi/app_struct.py deleted file mode 100644 --- a/pypy/module/_ffi/app_struct.py +++ /dev/null @@ -1,21 +0,0 @@ -import _ffi - -class MetaStructure(type): - - def __new__(cls, name, bases, dic): - cls._compute_shape(name, dic) - return type.__new__(cls, name, bases, dic) - - @classmethod - def _compute_shape(cls, name, dic): - fields = dic.get('_fields_') - if fields is None: - return - struct_descr = _ffi._StructDescr(name, fields) - for field in fields: - dic[field.name] = field - dic['_struct_'] = struct_descr - - -class Structure(metaclass=MetaStructure): - pass diff --git a/pypy/module/_ffi/interp_funcptr.py b/pypy/module/_ffi/interp_funcptr.py deleted file mode 100644 --- a/pypy/module/_ffi/interp_funcptr.py +++ /dev/null @@ -1,380 +0,0 @@ -from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError, wrap_oserror, \ - operationerrfmt -from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.interpreter.typedef import TypeDef -from pypy.module._ffi.interp_ffitype import W_FFIType -# -from rpython.rtyper.lltypesystem import lltype, rffi -# -from rpython.rlib import jit -from rpython.rlib import libffi -from rpython.rlib.clibffi import get_libc_name, StackCheckError, LibFFIError -from rpython.rlib.rdynload import DLOpenError -from rpython.rlib.rarithmetic import r_uint -from rpython.rlib.objectmodel import we_are_translated -from pypy.module._ffi.type_converter import FromAppLevelConverter, ToAppLevelConverter -from pypy.module._rawffi.interp_rawffi import got_libffi_error, wrap_dlopenerror - -import os -if os.name == 'nt': - def _getfunc(space, CDLL, w_name, w_argtypes, w_restype): - argtypes_w, argtypes, w_restype, restype = unpack_argtypes( - space, w_argtypes, w_restype) - if space.isinstance_w(w_name, space.w_unicode): - # XXX: support LoadLibraryW - name = space.str_w(w_name) - try: - func = CDLL.cdll.getpointer(name, argtypes, restype, - flags = CDLL.flags) - except KeyError: - raise operationerrfmt( - space.w_AttributeError, - "No symbol %s found in library %s", name, CDLL.name) - except LibFFIError: - raise got_libffi_error(space) - - return W_FuncPtr(func, argtypes_w, w_restype) - elif space.isinstance_w(w_name, space.w_int): - ordinal = space.int_w(w_name) - try: - func = CDLL.cdll.getpointer_by_ordinal( - ordinal, argtypes, restype, - flags = CDLL.flags) - except KeyError: - raise operationerrfmt( - space.w_AttributeError, - "No ordinal %d found in library %s", ordinal, CDLL.name) - except LibFFIError: - raise got_libffi_error(space) - - return W_FuncPtr(func, argtypes_w, w_restype) - else: - raise OperationError(space.w_TypeError, space.wrap( - 'function name must be a string or integer')) -else: - @unwrap_spec(name=str) - def _getfunc(space, CDLL, w_name, w_argtypes, w_restype): - name = space.str_w(w_name) - argtypes_w, argtypes, w_restype, restype = unpack_argtypes( - space, w_argtypes, w_restype) - try: - func = CDLL.cdll.getpointer(name, argtypes, restype, - flags = CDLL.flags) - except KeyError: - raise operationerrfmt( - space.w_AttributeError, - "No symbol %s found in library %s", name, CDLL.name) - except LibFFIError: - raise got_libffi_error(space) - - return W_FuncPtr(func, argtypes_w, w_restype) - -def unwrap_ffitype(space, w_argtype, allow_void=False): - res = w_argtype.get_ffitype() - if res is libffi.types.void and not allow_void: - msg = 'void is not a valid argument type' - raise OperationError(space.w_TypeError, space.wrap(msg)) - return res - - -# ======================================================================== - -class W_FuncPtr(W_Root): - - _immutable_fields_ = ['func', 'argtypes_w[*]', 'w_restype'] - - def __init__(self, func, argtypes_w, w_restype): - self.func = func - self.argtypes_w = argtypes_w - self.w_restype = w_restype - self.to_free = [] - - @jit.unroll_safe - def build_argchain(self, space, args_w): - expected = len(self.argtypes_w) - given = len(args_w) - if given != expected: - arg = 'arguments' - if len(self.argtypes_w) == 1: - arg = 'argument' - raise operationerrfmt(space.w_TypeError, - '%s() takes exactly %d %s (%d given)', - self.func.name, expected, arg, given) - # - argchain = libffi.ArgChain() - argpusher = PushArgumentConverter(space, argchain, self) - for i in range(expected): - w_argtype = self.argtypes_w[i] - w_arg = args_w[i] - argpusher.unwrap_and_do(w_argtype, w_arg) - return argchain - - def call(self, space, args_w): - self = jit.promote(self) - argchain = self.build_argchain(space, args_w) - func_caller = CallFunctionConverter(space, self.func, argchain) - try: - return func_caller.do_and_wrap(self.w_restype) - except StackCheckError, e: - raise OperationError(space.w_ValueError, space.wrap(e.message)) - #return self._do_call(space, argchain) - - def free_temp_buffers(self, space): - for buf in self.to_free: - if not we_are_translated(): - buf[0] = '\00' # invalidate the buffer, so that - # test_keepalive_temp_buffer can fail - lltype.free(buf, flavor='raw') - self.to_free = [] - - def getaddr(self, space): - """ - Return the physical address in memory of the function - """ - return space.wrap(rffi.cast(rffi.LONG, self.func.funcsym)) - - -class PushArgumentConverter(FromAppLevelConverter): - """ - A converter used by W_FuncPtr to unwrap the app-level objects into - low-level types and push them to the argchain. - """ - - def __init__(self, space, argchain, w_func): - FromAppLevelConverter.__init__(self, space) - self.argchain = argchain - self.w_func = w_func - - def handle_signed(self, w_ffitype, w_obj, intval): - self.argchain.arg(intval) - - def handle_unsigned(self, w_ffitype, w_obj, uintval): - self.argchain.arg(uintval) - - def handle_pointer(self, w_ffitype, w_obj, intval): - self.argchain.arg(intval) - - def handle_char(self, w_ffitype, w_obj, intval): - self.argchain.arg(intval) - - def handle_unichar(self, w_ffitype, w_obj, intval): - self.argchain.arg(intval) - - def handle_longlong(self, w_ffitype, w_obj, longlongval): - self.argchain.arg(longlongval) - - def handle_char_p(self, w_ffitype, w_obj, strval): - buf = rffi.str2charp(strval) - self.w_func.to_free.append(rffi.cast(rffi.VOIDP, buf)) - addr = rffi.cast(rffi.ULONG, buf) - self.argchain.arg(addr) - - def handle_unichar_p(self, w_ffitype, w_obj, unicodeval): - buf = rffi.unicode2wcharp(unicodeval) - self.w_func.to_free.append(rffi.cast(rffi.VOIDP, buf)) - addr = rffi.cast(rffi.ULONG, buf) - self.argchain.arg(addr) - - def handle_float(self, w_ffitype, w_obj, floatval): - self.argchain.arg(floatval) - - def handle_singlefloat(self, w_ffitype, w_obj, singlefloatval): - self.argchain.arg(singlefloatval) - - def handle_struct(self, w_ffitype, w_structinstance): - # arg_raw directly takes value to put inside ll_args - ptrval = w_structinstance.rawmem - self.argchain.arg_raw(ptrval) - - def handle_struct_rawffi(self, w_ffitype, w_structinstance): - # arg_raw directly takes value to put inside ll_args - ptrval = w_structinstance.ll_buffer - self.argchain.arg_raw(ptrval) - - -class CallFunctionConverter(ToAppLevelConverter): - """ - A converter used by W_FuncPtr to call the function, expect the result of - a correct low-level type and wrap it to the corresponding app-level type - """ - - def __init__(self, space, func, argchain): - ToAppLevelConverter.__init__(self, space) - self.func = func - self.argchain = argchain - - def get_longlong(self, w_ffitype): - return self.func.call(self.argchain, rffi.LONGLONG) - - def get_ulonglong(self, w_ffitype): - return self.func.call(self.argchain, rffi.ULONGLONG) - - def get_signed(self, w_ffitype): - # if the declared return type of the function is smaller than LONG, - # the result buffer may contains garbage in its higher bits. To get - # the correct value, and to be sure to handle the signed/unsigned case - # correctly, we need to cast the result to the correct type. After - # that, we cast it back to LONG, because this is what we want to pass - # to space.wrap in order to get a nice applevel . - # - restype = w_ffitype.get_ffitype() - call = self.func.call - if restype is libffi.types.slong: - return call(self.argchain, rffi.LONG) - elif restype is libffi.types.sint: - return rffi.cast(rffi.LONG, call(self.argchain, rffi.INT)) - elif restype is libffi.types.sshort: - return rffi.cast(rffi.LONG, call(self.argchain, rffi.SHORT)) - elif restype is libffi.types.schar: - return rffi.cast(rffi.LONG, call(self.argchain, rffi.SIGNEDCHAR)) - else: - self.error(w_ffitype) - - def get_unsigned(self, w_ffitype): - return self.func.call(self.argchain, rffi.ULONG) - - def get_unsigned_which_fits_into_a_signed(self, w_ffitype): - # the same comment as get_signed apply - restype = w_ffitype.get_ffitype() - call = self.func.call - if restype is libffi.types.uint: - assert not libffi.IS_32_BIT - # on 32bit machines, we should never get here, because it's a case - # which has already been handled by get_unsigned above. - return rffi.cast(rffi.LONG, call(self.argchain, rffi.UINT)) - elif restype is libffi.types.ushort: - return rffi.cast(rffi.LONG, call(self.argchain, rffi.USHORT)) - elif restype is libffi.types.uchar: - return rffi.cast(rffi.LONG, call(self.argchain, rffi.UCHAR)) - else: - self.error(w_ffitype) - - - def get_pointer(self, w_ffitype): - ptrres = self.func.call(self.argchain, rffi.VOIDP) - return rffi.cast(rffi.ULONG, ptrres) - - def get_char(self, w_ffitype): - return self.func.call(self.argchain, rffi.UCHAR) - - def get_unichar(self, w_ffitype): - return self.func.call(self.argchain, rffi.WCHAR_T) - - def get_float(self, w_ffitype): - return self.func.call(self.argchain, rffi.DOUBLE) - - def get_singlefloat(self, w_ffitype): - return self.func.call(self.argchain, rffi.FLOAT) - - def get_struct(self, w_ffitype, w_structdescr): - addr = self.func.call(self.argchain, rffi.LONG, is_struct=True) - return w_structdescr.fromaddress(self.space, addr) - - def get_struct_rawffi(self, w_ffitype, w_structdescr): - uintval = self.func.call(self.argchain, rffi.ULONG, is_struct=True) - return w_structdescr.fromaddress(self.space, uintval) - - def get_void(self, w_ffitype): - return self.func.call(self.argchain, lltype.Void) - - -def unpack_argtypes(space, w_argtypes, w_restype): - argtypes_w = [space.interp_w(W_FFIType, w_argtype) - for w_argtype in space.listview(w_argtypes)] - argtypes = [unwrap_ffitype(space, w_argtype) for w_argtype in - argtypes_w] - w_restype = space.interp_w(W_FFIType, w_restype) - restype = unwrap_ffitype(space, w_restype, allow_void=True) - return argtypes_w, argtypes, w_restype, restype - - at unwrap_spec(addr=r_uint, name=str, flags=int) -def descr_fromaddr(space, w_cls, addr, name, w_argtypes, - w_restype, flags=libffi.FUNCFLAG_CDECL): - argtypes_w, argtypes, w_restype, restype = unpack_argtypes(space, - w_argtypes, - w_restype) - addr = rffi.cast(rffi.VOIDP, addr) - try: - func = libffi.Func(name, argtypes, restype, addr, flags) - return W_FuncPtr(func, argtypes_w, w_restype) - except LibFFIError: - raise got_libffi_error(space) - - -W_FuncPtr.typedef = TypeDef( - '_ffi.FuncPtr', - __call__ = interp2app(W_FuncPtr.call), - getaddr = interp2app(W_FuncPtr.getaddr), - free_temp_buffers = interp2app(W_FuncPtr.free_temp_buffers), - fromaddr = interp2app(descr_fromaddr, as_classmethod=True) - ) - - - -# ======================================================================== - -class W_CDLL(W_Root): - def __init__(self, space, name, mode): - self.flags = libffi.FUNCFLAG_CDECL - self.space = space - if name is None: - self.name = "" - else: - self.name = name - try: - self.cdll = libffi.CDLL(name, mode) - except DLOpenError, e: - raise wrap_dlopenerror(space, e, self.name) - - def getfunc(self, space, w_name, w_argtypes, w_restype): - return _getfunc(space, self, w_name, w_argtypes, w_restype) - - @unwrap_spec(name=str) - def getaddressindll(self, space, name): - try: - address_as_uint = rffi.cast(lltype.Unsigned, - self.cdll.getaddressindll(name)) - except KeyError: - raise operationerrfmt( - space.w_ValueError, - "No symbol %s found in library %s", name, self.name) - return space.wrap(address_as_uint) - - at unwrap_spec(name='str_or_None', mode=int) -def descr_new_cdll(space, w_type, name, mode=-1): - return space.wrap(W_CDLL(space, name, mode)) - - -W_CDLL.typedef = TypeDef( - '_ffi.CDLL', - __new__ = interp2app(descr_new_cdll), - getfunc = interp2app(W_CDLL.getfunc), - getaddressindll = interp2app(W_CDLL.getaddressindll), - ) - -class W_WinDLL(W_CDLL): - def __init__(self, space, name, mode): - W_CDLL.__init__(self, space, name, mode) - self.flags = libffi.FUNCFLAG_STDCALL - - at unwrap_spec(name='str_or_None', mode=int) -def descr_new_windll(space, w_type, name, mode=-1): - return space.wrap(W_WinDLL(space, name, mode)) - - -W_WinDLL.typedef = TypeDef( - '_ffi.WinDLL', - __new__ = interp2app(descr_new_windll), - getfunc = interp2app(W_WinDLL.getfunc), - getaddressindll = interp2app(W_WinDLL.getaddressindll), - ) - -# ======================================================================== - -def get_libc(space): - try: - return space.wrap(W_CDLL(space, get_libc_name(), -1)) - except OSError, e: - raise wrap_oserror(space, e) - diff --git a/pypy/module/_ffi/test/test_funcptr.py b/pypy/module/_ffi/test/test_funcptr.py deleted file mode 100644 --- a/pypy/module/_ffi/test/test_funcptr.py +++ /dev/null @@ -1,643 +0,0 @@ -from rpython.rtyper.lltypesystem import rffi -from rpython.rlib.clibffi import get_libc_name -from rpython.rlib.libffi import types -from rpython.rlib.libffi import CDLL -from rpython.rlib.test.test_clibffi import get_libm_name - -import sys, py - -class BaseAppTestFFI(object): - spaceconfig = dict(usemodules=('_ffi', '_rawffi')) - - @classmethod - def prepare_c_example(cls): - from rpython.tool.udir import udir - from rpython.translator.tool.cbuild import ExternalCompilationInfo - from rpython.translator.platform import platform - - c_file = udir.ensure("test__ffi", dir=1).join("foolib.c") - # automatically collect the C source from the docstrings of the tests - snippets = [""" - #ifdef _WIN32 - #define DLLEXPORT __declspec(dllexport) - #else - #define DLLEXPORT - #endif - """] - for name in dir(cls): - if name.startswith('test_'): - meth = getattr(cls, name) - # the heuristic to determine it it's really C code could be - # improved: so far we just check that there is a '{' :-) - if meth.__doc__ is not None and '{' in meth.__doc__: - snippets.append(meth.__doc__) - # - c_file.write(py.code.Source('\n'.join(snippets))) - eci = ExternalCompilationInfo(export_symbols=[]) - return str(platform.compile([c_file], eci, 'x', standalone=False)) - - def setup_class(cls): - space = cls.space - cls.w_iswin32 = space.wrap(sys.platform == 'win32') - cls.w_libfoo_name = space.wrap(cls.prepare_c_example()) - cls.w_libc_name = space.wrap(get_libc_name()) - libm_name = get_libm_name(sys.platform) - cls.w_libm_name = space.wrap(libm_name) - libm = CDLL(libm_name) - pow = libm.getpointer('pow', [], types.void) - pow_addr = rffi.cast(rffi.LONG, pow.funcsym) - cls._libm = libm # otherwise it gets unloaded - argh! - cls.w_pow_addr = space.wrap(pow_addr) - -class AppTestFFI(BaseAppTestFFI): - - def setup_class(cls): - BaseAppTestFFI.setup_class.im_func(cls) - space = cls.space - # these are needed for test_single_float_args - from ctypes import c_float - f_12_34 = c_float(12.34).value - f_56_78 = c_float(56.78).value - f_result = c_float(f_12_34 + f_56_78).value - cls.w_f_12_34_plus_56_78 = space.wrap(f_result) - - def test_libload(self): - import _ffi - _ffi.CDLL(self.libc_name) - - def test_libload_fail(self): - import _ffi - raises(OSError, _ffi.CDLL, "xxxxx_this_name_does_not_exist_xxxxx") - - def test_libload_None(self): - if self.iswin32: - skip("unix specific") - from _ffi import CDLL, types - # this should return *all* loaded libs, dlopen(NULL) - dll = CDLL(None) - # libm should be loaded - res = dll.getfunc('sqrt', [types.double], types.double)(1.0) - assert res == 1.0 - - def test_callfunc(self): - from _ffi import CDLL, types - libm = CDLL(self.libm_name) - pow = libm.getfunc('pow', [types.double, types.double], types.double) - assert pow(2, 3) == 8 - - @py.test.mark.skipif("py.test.config.option.runappdirect") - def test_getaddr(self): - from _ffi import CDLL, types - libm = CDLL(self.libm_name) - pow = libm.getfunc('pow', [types.double, types.double], types.double) - assert pow.getaddr() == self.pow_addr - - @py.test.mark.skipif("py.test.config.option.runappdirect") - def test_getaddressindll(self): - import sys - from _ffi import CDLL - libm = CDLL(self.libm_name) - pow_addr = libm.getaddressindll('pow') - fff = sys.maxsize*2-1 - assert pow_addr == self.pow_addr & fff - - def test_func_fromaddr(self): - from _ffi import CDLL, types, FuncPtr - libm = CDLL(self.libm_name) - pow_addr = libm.getaddressindll('pow') - pow = FuncPtr.fromaddr(pow_addr, 'pow', [types.double, types.double], - types.double) - assert pow(2, 3) == 8 - - def test_int_args(self): - """ - DLLEXPORT int sum_xy(int x, int y) - { - return x+y; - } - """ - import sys - from _ffi import CDLL, types - libfoo = CDLL(self.libfoo_name) - sum_xy = libfoo.getfunc('sum_xy', [types.sint, types.sint], types.sint) - assert sum_xy(30, 12) == 42 - assert sum_xy(sys.maxsize*2, 0) == -2 - - def test_void_result(self): - """ - int dummy = 0; - DLLEXPORT void set_dummy(int val) { dummy = val; } - DLLEXPORT int get_dummy() { return dummy; } - """ - from _ffi import CDLL, types - libfoo = CDLL(self.libfoo_name) - set_dummy = libfoo.getfunc('set_dummy', [types.sint], types.void) - get_dummy = libfoo.getfunc('get_dummy', [], types.sint) - assert get_dummy() == 0 - assert set_dummy(42) is None - assert get_dummy() == 42 - set_dummy(0) - - def test_pointer_args(self): - """ - extern int dummy; // defined in test_void_result - DLLEXPORT int* get_dummy_ptr() { return &dummy; } - DLLEXPORT void set_val_to_ptr(int* ptr, int val) { *ptr = val; } - """ - from _ffi import CDLL, types - libfoo = CDLL(self.libfoo_name) - get_dummy = libfoo.getfunc('get_dummy', [], types.sint) - get_dummy_ptr = libfoo.getfunc('get_dummy_ptr', [], types.void_p) - set_val_to_ptr = libfoo.getfunc('set_val_to_ptr', - [types.void_p, types.sint], - types.void) - assert get_dummy() == 0 - ptr = get_dummy_ptr() - set_val_to_ptr(ptr, 123) - assert get_dummy() == 123 - set_val_to_ptr(ptr, 0) - - def test_convert_pointer_args(self): - """ - extern int dummy; // defined in test_void_result - DLLEXPORT int* get_dummy_ptr(); // defined in test_pointer_args - DLLEXPORT void set_val_to_ptr(int* ptr, int val); // ditto - """ - from _ffi import CDLL, types - - class MyPointerWrapper(object): - def __init__(self, value): - self.value = value - def _as_ffi_pointer_(self, ffitype): - assert ffitype is types.void_p - return self.value - - libfoo = CDLL(self.libfoo_name) - get_dummy = libfoo.getfunc('get_dummy', [], types.sint) - get_dummy_ptr = libfoo.getfunc('get_dummy_ptr', [], types.void_p) - set_val_to_ptr = libfoo.getfunc('set_val_to_ptr', - [types.void_p, types.sint], - types.void) - assert get_dummy() == 0 - ptr = get_dummy_ptr() - assert type(ptr) is int - ptr2 = MyPointerWrapper(ptr) - set_val_to_ptr(ptr2, 123) - assert get_dummy() == 123 - set_val_to_ptr(ptr2, 0) - - def test_convert_strings_to_char_p(self): - """ - DLLEXPORT - long mystrlen(char* s) - { - long len = 0; - while(*s++) - len++; - return len; - } - """ - from _ffi import CDLL, types - import _rawffi - libfoo = CDLL(self.libfoo_name) - mystrlen = libfoo.getfunc('mystrlen', [types.char_p], types.slong) - # - # first, try automatic conversion from a string - assert mystrlen(b'foobar') == 6 - # then, try to pass an explicit pointer - CharArray = _rawffi.Array('c') - mystr = CharArray(7, b'foobar') - assert mystrlen(mystr.buffer) == 6 - mystr.free() - mystrlen.free_temp_buffers() - - def test_convert_unicode_to_unichar_p(self): - """ - #include - DLLEXPORT - long mystrlen_u(wchar_t* s) - { - long len = 0; - while(*s++) - len++; - return len; - } - """ - from _ffi import CDLL, types - import _rawffi - libfoo = CDLL(self.libfoo_name) - mystrlen = libfoo.getfunc('mystrlen_u', [types.unichar_p], types.slong) - # - # first, try automatic conversion from strings and unicode - assert mystrlen('foobar') == 6 - assert mystrlen('foobar') == 6 - assert mystrlen('ab\u2070') == 3 - # then, try to pass an explicit pointer - UniCharArray = _rawffi.Array('u') - mystr = UniCharArray(7, 'foobar') - assert mystrlen(mystr.buffer) == 6 - mystr.free() - mystrlen.free_temp_buffers() - - def test_keepalive_temp_buffer(self): - """ - DLLEXPORT - char* do_nothing(char* s) - { - return s; - } - """ - from _ffi import CDLL, types - import _rawffi - libfoo = CDLL(self.libfoo_name) - do_nothing = libfoo.getfunc('do_nothing', [types.char_p], types.char_p) - CharArray = _rawffi.Array('c') - # - ptr = do_nothing(b'foobar') - array = CharArray.fromaddress(ptr, 7) - assert bytes(array) == b'foobar\00' - do_nothing.free_temp_buffers() - - def test_typed_pointer_args(self): - """ - extern int dummy; // defined in test_void_result - DLLEXPORT int* get_dummy_ptr(); // defined in test_pointer_args - DLLEXPORT void set_val_to_ptr(int* ptr, int val); // ditto - """ - from _ffi import CDLL, types - - libfoo = CDLL(self.libfoo_name) - intptr = types.Pointer(types.sint) - get_dummy = libfoo.getfunc('get_dummy', [], types.sint) - get_dummy_ptr = libfoo.getfunc('get_dummy_ptr', [], intptr) - set_val_to_ptr = libfoo.getfunc('set_val_to_ptr', [intptr, types.sint], types.void) - assert get_dummy() == 0 - ptr = get_dummy_ptr() - set_val_to_ptr(ptr, 123) - assert get_dummy() == 123 - set_val_to_ptr(ptr, 0) - - def test_huge_pointer_args(self): - """ - #include - DLLEXPORT long is_null_ptr(void* ptr) { return ptr == NULL; } - """ - import sys - from _ffi import CDLL, types - libfoo = CDLL(self.libfoo_name) - is_null_ptr = libfoo.getfunc('is_null_ptr', [types.void_p], types.ulong) - assert not is_null_ptr(sys.maxsize+1) - - def test_unsigned_long_args(self): - """ - DLLEXPORT unsigned long sum_xy_ul(unsigned long x, unsigned long y) - { - return x+y; - } - """ - import sys - from _ffi import CDLL, types - libfoo = CDLL(self.libfoo_name) - sum_xy = libfoo.getfunc('sum_xy_ul', [types.ulong, types.ulong], - types.ulong) - assert sum_xy(sys.maxsize, 12) == sys.maxsize+12 - assert sum_xy(sys.maxsize+1, 12) == sys.maxsize+13 - # - res = sum_xy(sys.maxsize*2+3, 0) - assert res == 1 - - def test_unsigned_short_args(self): - """ - DLLEXPORT unsigned short sum_xy_us(unsigned short x, unsigned short y) - { - return x+y; - } - """ - from _ffi import CDLL, types - libfoo = CDLL(self.libfoo_name) - sum_xy = libfoo.getfunc('sum_xy_us', [types.ushort, types.ushort], - types.ushort) - assert sum_xy(32000, 8000) == 40000 - assert sum_xy(60000, 30000) == 90000 % 65536 - - def test_unsigned_byte_args(self): - """ - DLLEXPORT unsigned char sum_xy_ub(unsigned char x, unsigned char y) - { - return x+y; - } - """ - from _ffi import CDLL, types - libfoo = CDLL(self.libfoo_name) - sum_xy = libfoo.getfunc('sum_xy_us', [types.ubyte, types.ubyte], - types.ubyte) - assert sum_xy(100, 40) == 140 - assert sum_xy(200, 60) == 260 % 256 - - def test_unsigned_int_args(self): - r""" - DLLEXPORT unsigned int sum_xy_ui(unsigned int x, unsigned int y) - { - return x+y; - } - """ - import sys - from _ffi import CDLL, types - maxint32 = 2147483647 - libfoo = CDLL(self.libfoo_name) - sum_xy = libfoo.getfunc('sum_xy_ui', [types.uint, types.uint], - types.uint) - assert sum_xy(maxint32, 1) == maxint32+1 - assert sum_xy(maxint32, maxint32+2) == 0 - - def test_signed_byte_args(self): - """ - DLLEXPORT signed char sum_xy_sb(signed char x, signed char y) - { - return x+y; - } - """ - from _ffi import CDLL, types - libfoo = CDLL(self.libfoo_name) - sum_xy = libfoo.getfunc('sum_xy_sb', [types.sbyte, types.sbyte], - types.sbyte) - assert sum_xy(10, 20) == 30 - assert sum_xy(100, 28) == -128 - - def test_char_args(self): - """ - DLLEXPORT char my_toupper(char x) - { - return x - ('a'-'A'); - } - """ - from _ffi import CDLL, types - libfoo = CDLL(self.libfoo_name) - my_toupper = libfoo.getfunc('my_toupper', [types.char], - types.char) - assert my_toupper('c') == 'C' - - def test_unichar_args(self): - """ - #include - DLLEXPORT wchar_t sum_xy_wc(wchar_t x, wchar_t y) - { - return x + y; - } - """ - from _ffi import CDLL, types - libfoo = CDLL(self.libfoo_name) - sum_xy = libfoo.getfunc('sum_xy_wc', [types.unichar, types.unichar], - types.unichar) - res = sum_xy(chr(1000), chr(2000)) - assert type(res) is str - assert ord(res) == 3000 - - def test_single_float_args(self): - """ - DLLEXPORT float sum_xy_float(float x, float y) - { - return x+y; - } - """ - from _ffi import CDLL, types - libfoo = CDLL(self.libfoo_name) - sum_xy = libfoo.getfunc('sum_xy_float', [types.float, types.float], - types.float) - res = sum_xy(12.34, 56.78) - assert res == self.f_12_34_plus_56_78 - - - def test_slonglong_args(self): - """ - DLLEXPORT long long sum_xy_longlong(long long x, long long y) - { - return x+y; - } - """ - from _ffi import CDLL, types - maxint32 = 2147483647 # we cannot really go above maxint on 64 bits - # (and we would not test anything, as there long - # is the same as long long) - - libfoo = CDLL(self.libfoo_name) - sum_xy = libfoo.getfunc('sum_xy_longlong', [types.slonglong, types.slonglong], - types.slonglong) - x = maxint32+1 - y = maxint32+2 - res = sum_xy(x, y) - expected = maxint32*2 + 3 - assert res == expected - - def test_ulonglong_args(self): - """ - DLLEXPORT unsigned long long sum_xy_ulonglong(unsigned long long x, - unsigned long long y) - { - return x+y; - } - """ - from _ffi import CDLL, types - maxint64 = 9223372036854775807 # maxint64+1 does not fit into a - # longlong, but it does into a - # ulonglong - libfoo = CDLL(self.libfoo_name) - sum_xy = libfoo.getfunc('sum_xy_ulonglong', [types.ulonglong, types.ulonglong], - types.ulonglong) - x = maxint64+1 - y = 2 - res = sum_xy(x, y) - expected = maxint64 + 3 - assert res == expected - # - res = sum_xy(maxint64*2+3, 0) - assert res == 1 - - def test_byval_argument(self): - """ - struct Point { - long x; - long y; - }; - - DLLEXPORT long sum_point(struct Point p) { - return p.x + p.y; - } - """ - from _ffi import CDLL, types, _StructDescr, Field - Point = _StructDescr('Point', [ - Field('x', types.slong), - Field('y', types.slong), - ]) - libfoo = CDLL(self.libfoo_name) - sum_point = libfoo.getfunc('sum_point', [Point.ffitype], types.slong) - # - p = Point.allocate() - p.setfield('x', 30) - p.setfield('y', 12) - res = sum_point(p) - assert res == 42 - - def test_byval_result(self): - """ - DLLEXPORT struct Point make_point(long x, long y) { - struct Point p; - p.x = x; - p.y = y; - return p; - } - """ - from _ffi import CDLL, types, _StructDescr, Field - Point = _StructDescr('Point', [ - Field('x', types.slong), - Field('y', types.slong), - ]) - libfoo = CDLL(self.libfoo_name) - make_point = libfoo.getfunc('make_point', [types.slong, types.slong], - Point.ffitype) - # - p = make_point(12, 34) - assert p.getfield('x') == 12 - assert p.getfield('y') == 34 - - # XXX: support for _rawffi structures should be killed as soon as we - # implement ctypes.Structure on top of _ffi. In the meantime, we support - # both - def test_byval_argument__rawffi(self): - """ - // defined above - struct Point; - DLLEXPORT long sum_point(struct Point p); - """ - import _rawffi - from _ffi import CDLL, types - POINT = _rawffi.Structure([('x', 'l'), ('y', 'l')]) - ffi_point = POINT.get_ffi_type() - libfoo = CDLL(self.libfoo_name) - sum_point = libfoo.getfunc('sum_point', [ffi_point], types.slong) - # - p = POINT() - p.x = 30 - p.y = 12 - res = sum_point(p) - assert res == 42 - p.free() - - def test_byval_result__rawffi(self): - """ - // defined above - DLLEXPORT struct Point make_point(long x, long y); - """ - import _rawffi - from _ffi import CDLL, types - POINT = _rawffi.Structure([('x', 'l'), ('y', 'l')]) - ffi_point = POINT.get_ffi_type() - libfoo = CDLL(self.libfoo_name) - make_point = libfoo.getfunc('make_point', [types.slong, types.slong], ffi_point) - # - p = make_point(12, 34) - assert p.x == 12 - assert p.y == 34 - p.free() - - - def test_TypeError_numargs(self): - from _ffi import CDLL, types - libfoo = CDLL(self.libfoo_name) - sum_xy = libfoo.getfunc('sum_xy', [types.sint, types.sint], types.sint) - raises(TypeError, "sum_xy(1, 2, 3)") - raises(TypeError, "sum_xy(1)") - - def test_TypeError_voidarg(self): - from _ffi import CDLL, types - libfoo = CDLL(self.libfoo_name) - raises(TypeError, "libfoo.getfunc('sum_xy', [types.void], types.sint)") - - def test_OSError_loading(self): - from _ffi import CDLL, types - raises(OSError, "CDLL('I do not exist')") - - def test_AttributeError_missing_function(self): - from _ffi import CDLL, types - libfoo = CDLL(self.libfoo_name) - raises(AttributeError, "libfoo.getfunc('I_do_not_exist', [], types.void)") - if self.iswin32: - skip("unix specific") - libnone = CDLL(None) - raises(AttributeError, "libnone.getfunc('I_do_not_exist', [], types.void)") - - def test_calling_convention1(self): - if not self.iswin32: - skip("windows specific") - from _ffi import WinDLL, types - libm = WinDLL(self.libm_name) - pow = libm.getfunc('pow', [types.double, types.double], types.double) - try: - pow(2, 3) - except ValueError as e: - assert str(e).startswith('Procedure called with') - else: - assert 0, 'test must assert, wrong calling convention' - - def test_calling_convention2(self): - if not self.iswin32: - skip("windows specific") - from _ffi import WinDLL, types - kernel = WinDLL('Kernel32.dll') - sleep = kernel.getfunc('Sleep', [types.uint], types.void) - sleep(10) - - def test_calling_convention3(self): - if not self.iswin32: - skip("windows specific") - from _ffi import CDLL, types - wrong_kernel = CDLL('Kernel32.dll') - wrong_sleep = wrong_kernel.getfunc('Sleep', [types.uint], types.void) - try: - wrong_sleep(10) - except ValueError as e: - assert str(e).startswith('Procedure called with') - else: - assert 0, 'test must assert, wrong calling convention' - - def test_func_fromaddr2(self): - if not self.iswin32: - skip("windows specific") - from _ffi import CDLL, types, FuncPtr - from _rawffi import FUNCFLAG_STDCALL - libm = CDLL(self.libm_name) - pow_addr = libm.getaddressindll('pow') - wrong_pow = FuncPtr.fromaddr(pow_addr, 'pow', - [types.double, types.double], types.double, FUNCFLAG_STDCALL) - try: - wrong_pow(2, 3) == 8 - except ValueError as e: - assert str(e).startswith('Procedure called with') - else: - assert 0, 'test must assert, wrong calling convention' - - def test_func_fromaddr3(self): - if not self.iswin32: - skip("windows specific") - from _ffi import WinDLL, types, FuncPtr - from _rawffi import FUNCFLAG_STDCALL - kernel = WinDLL('Kernel32.dll') - sleep_addr = kernel.getaddressindll('Sleep') - sleep = FuncPtr.fromaddr(sleep_addr, 'sleep', [types.uint], - types.void, FUNCFLAG_STDCALL) - sleep(10) - - def test_by_ordinal(self): - """ - int DLLEXPORT AAA_first_ordinal_function() - { - return 42; - } - """ - if not self.iswin32: - skip("windows specific") - from _ffi import CDLL, types - libfoo = CDLL(self.libfoo_name) - f_name = libfoo.getfunc('AAA_first_ordinal_function', [], types.sint) - f_ordinal = libfoo.getfunc(1, [], types.sint) - assert f_name.getaddr() == f_ordinal.getaddr() diff --git a/pypy/module/_ffi/test/test_struct.py b/pypy/module/_ffi/test/test_struct.py deleted file mode 100644 --- a/pypy/module/_ffi/test/test_struct.py +++ /dev/null @@ -1,331 +0,0 @@ -from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.module._ffi.interp_ffitype import app_types, W_FFIType -from pypy.module._ffi.interp_struct import compute_size_and_alignement, W_Field -from pypy.module._ffi.test.test_funcptr import BaseAppTestFFI - - -class TestStruct(object): - - class FakeSpace(object): - def interp_w(self, cls, obj): - return obj - - def compute(self, ffitypes_w): - fields_w = [W_Field('', w_ffitype) for - w_ffitype in ffitypes_w] - return compute_size_and_alignement(self.FakeSpace(), fields_w) - - def sizeof(self, ffitypes_w): - size, aligned, fields_w = self.compute(ffitypes_w) - return size - - def test_compute_size(self): - T = app_types - byte_size = app_types.sbyte.sizeof() - long_size = app_types.slong.sizeof() - llong_size = app_types.slonglong.sizeof() - llong_align = app_types.slonglong.get_alignment() - # - assert llong_align >= 4 - assert self.sizeof([T.sbyte, T.slong]) == 2*long_size - assert self.sizeof([T.sbyte, T.slonglong]) == llong_align + llong_size - assert self.sizeof([T.sbyte, T.sbyte, T.slonglong]) == llong_align + llong_size - assert self.sizeof([T.sbyte, T.sbyte, T.sbyte, T.slonglong]) == llong_align + llong_size - assert self.sizeof([T.sbyte, T.sbyte, T.sbyte, T.sbyte, T.slonglong]) == llong_align + llong_size - assert self.sizeof([T.slonglong, T.sbyte]) == llong_size + llong_align - assert self.sizeof([T.slonglong, T.sbyte, T.sbyte]) == llong_size + llong_align - assert self.sizeof([T.slonglong, T.sbyte, T.sbyte, T.sbyte]) == llong_size + llong_align - assert self.sizeof([T.slonglong, T.sbyte, T.sbyte, T.sbyte, T.sbyte]) == llong_size + llong_align - - -class AppTestStruct(BaseAppTestFFI): - - @classmethod - def read_raw_mem(cls, addr, typename, length): - import ctypes - addr = ctypes.cast(addr, ctypes.c_void_p) - c_type = getattr(ctypes, typename) - array_type = ctypes.POINTER(c_type * length) - ptr_array = ctypes.cast(addr, array_type) - array = ptr_array[0] - lst = [array[i] for i in range(length)] - return lst - - def setup_class(cls): - BaseAppTestFFI.setup_class.im_func(cls) - - if cls.runappdirect: - cls.w_read_raw_mem = cls.read_raw_mem - else: - @unwrap_spec(addr=int, typename=str, length=int) - def read_raw_mem_w(space, addr, typename, length): - return space.wrap(cls.read_raw_mem(addr, typename, length)) - cls.w_read_raw_mem = cls.space.wrap(interp2app(read_raw_mem_w)) - # - from rpython.rlib import clibffi - from rpython.rlib.rarithmetic import r_uint - from rpython.rtyper.lltypesystem import lltype, rffi - dummy_type = lltype.malloc(clibffi.FFI_TYPE_P.TO, flavor='raw') - dummy_type.c_size = r_uint(123) - dummy_type.c_alignment = rffi.cast(rffi.USHORT, 0) - dummy_type.c_type = rffi.cast(rffi.USHORT, 0) - cls.w_dummy_type = W_FFIType('dummy', dummy_type) - cls.w_runappdirect = cls.space.wrap(cls.runappdirect) - - def test__StructDescr(self): - from _ffi import _StructDescr, Field, types - longsize = types.slong.sizeof() - fields = [ - Field('x', types.slong), - Field('y', types.slong), - ] - descr = _StructDescr('foo', fields) - assert descr.ffitype.sizeof() == longsize*2 - assert descr.ffitype.name == 'struct foo' - - def test_alignment(self): - from _ffi import _StructDescr, Field, types - longsize = types.slong.sizeof() - fields = [ - Field('x', types.sbyte), - Field('y', types.slong), - ] - descr = _StructDescr('foo', fields) - assert descr.ffitype.sizeof() == longsize*2 - assert fields[0].offset == 0 - assert fields[1].offset == longsize # aligned to WORD - - def test_missing_field(self): - from _ffi import _StructDescr, Field, types - longsize = types.slong.sizeof() - fields = [ - Field('x', types.slong), - Field('y', types.slong), - ] - descr = _StructDescr('foo', fields) - struct = descr.allocate() - raises(AttributeError, "struct.getfield('missing')") - raises(AttributeError, "struct.setfield('missing', 42)") - - def test_unknown_type(self): - if self.runappdirect: - skip('cannot use self.dummy_type with -A') - from _ffi import _StructDescr, Field - fields = [ - Field('x', self.dummy_type), - ] - descr = _StructDescr('foo', fields) - struct = descr.allocate() - raises(TypeError, "struct.getfield('x')") - raises(TypeError, "struct.setfield('x', 42)") - - def test_getfield_setfield(self): - from _ffi import _StructDescr, Field, types - longsize = types.slong.sizeof() - fields = [ - Field('x', types.slong), - Field('y', types.slong), - ] - descr = _StructDescr('foo', fields) - struct = descr.allocate() - struct.setfield('x', 42) - struct.setfield('y', 43) - assert struct.getfield('x') == 42 - assert struct.getfield('y') == 43 - mem = self.read_raw_mem(struct.getaddr(), 'c_long', 2) - assert mem == [42, 43] - - def test_getfield_setfield_signed_types(self): - import sys - from _ffi import _StructDescr, Field, types - longsize = types.slong.sizeof() - fields = [ - Field('sbyte', types.sbyte), - Field('sshort', types.sshort), - Field('sint', types.sint), - Field('slong', types.slong), - ] - descr = _StructDescr('foo', fields) - struct = descr.allocate() - struct.setfield('sbyte', 128) - assert struct.getfield('sbyte') == -128 - struct.setfield('sshort', 32768) - assert struct.getfield('sshort') == -32768 - struct.setfield('sint', 43) - assert struct.getfield('sint') == 43 - struct.setfield('slong', sys.maxsize+1) - assert struct.getfield('slong') == -sys.maxsize-1 - struct.setfield('slong', sys.maxsize*3) - assert struct.getfield('slong') == sys.maxsize-2 - - def test_getfield_setfield_unsigned_types(self): - import sys - from _ffi import _StructDescr, Field, types - longsize = types.slong.sizeof() - fields = [ - Field('ubyte', types.ubyte), - Field('ushort', types.ushort), - Field('uint', types.uint), - Field('ulong', types.ulong), - Field('char', types.char), - Field('unichar', types.unichar), - Field('ptr', types.void_p), - ] - descr = _StructDescr('foo', fields) - struct = descr.allocate() - struct.setfield('ubyte', -1) - assert struct.getfield('ubyte') == 255 - struct.setfield('ushort', -1) - assert struct.getfield('ushort') == 65535 - struct.setfield('uint', 43) - assert struct.getfield('uint') == 43 - struct.setfield('ulong', -1) - assert struct.getfield('ulong') == sys.maxsize*2 + 1 - struct.setfield('ulong', sys.maxsize*2 + 2) - assert struct.getfield('ulong') == 0 - struct.setfield('char', 'a') - assert struct.getfield('char') == 'a' - struct.setfield('unichar', '\u1234') - assert struct.getfield('unichar') == '\u1234' - struct.setfield('ptr', -1) - assert struct.getfield('ptr') == sys.maxsize*2 + 1 - - def test_getfield_setfield_longlong(self): - import sys - from _ffi import _StructDescr, Field, types - longsize = types.slong.sizeof() - fields = [ - Field('slonglong', types.slonglong), - Field('ulonglong', types.ulonglong), - ] - descr = _StructDescr('foo', fields) - struct = descr.allocate() - struct.setfield('slonglong', 9223372036854775808) - assert struct.getfield('slonglong') == -9223372036854775808 - struct.setfield('ulonglong', -1) - assert struct.getfield('ulonglong') == 18446744073709551615 - mem = self.read_raw_mem(struct.getaddr(), 'c_longlong', 2) - assert mem == [-9223372036854775808, -1] - - def test_getfield_setfield_float(self): - import sys - from _ffi import _StructDescr, Field, types - longsize = types.slong.sizeof() - fields = [ - Field('x', types.double), - ] - descr = _StructDescr('foo', fields) - struct = descr.allocate() - struct.setfield('x', 123.4) - assert struct.getfield('x') == 123.4 - mem = self.read_raw_mem(struct.getaddr(), 'c_double', 1) - assert mem == [123.4] - - def test_getfield_setfield_singlefloat(self): - import sys - from _ffi import _StructDescr, Field, types - longsize = types.slong.sizeof() - fields = [ - Field('x', types.float), - ] - descr = _StructDescr('foo', fields) - struct = descr.allocate() - struct.setfield('x', 123.4) # this is a value which DOES loose - # precision in a single float - assert 0 < abs(struct.getfield('x') - 123.4) < 0.0001 - # - struct.setfield('x', 123.5) # this is a value which does not loose - # precision in a single float - assert struct.getfield('x') == 123.5 - mem = self.read_raw_mem(struct.getaddr(), 'c_float', 1) - assert mem == [123.5] - - def test_define_fields(self): - from _ffi import _StructDescr, Field, types - longsize = types.slong.sizeof() - fields = [ - Field('x', types.slong), - Field('y', types.slong), - ] - descr = _StructDescr('foo') - assert descr.ffitype.name == 'struct foo' - assert repr(descr.ffitype) == '' - raises(ValueError, "descr.ffitype.sizeof()") - raises(ValueError, "descr.allocate()") - # - descr.define_fields(fields) - assert repr(descr.ffitype) == '' - assert descr.ffitype.sizeof() == longsize*2 - raises(ValueError, "descr.define_fields(fields)") - - def test_pointer_to_incomplete_struct(self): - from _ffi import _StructDescr, Field, types - longsize = types.slong.sizeof() - fields = [ - Field('x', types.slong), - Field('y', types.slong), - ] - descr = _StructDescr('foo') - foo_ffitype = descr.ffitype - foo_p = types.Pointer(descr.ffitype) - assert foo_p.deref_pointer() is foo_ffitype - descr.define_fields(fields) - assert descr.ffitype is foo_ffitype - assert foo_p.deref_pointer() is foo_ffitype - assert types.Pointer(descr.ffitype) is foo_p - - def test_nested_structure(self): - from _ffi import _StructDescr, Field, types - longsize = types.slong.sizeof() - foo_fields = [ - Field('x', types.slong), - Field('y', types.slong), - ] - foo_descr = _StructDescr('foo', foo_fields) - # - bar_fields = [ - Field('x', types.slong), - Field('foo', foo_descr.ffitype), - ] - bar_descr = _StructDescr('bar', bar_fields) - assert bar_descr.ffitype.sizeof() == longsize*3 - # - struct = bar_descr.allocate() - struct.setfield('x', 40) - # reading a nested structure yields a reference to it - struct_foo = struct.getfield('foo') - struct_foo.setfield('x', 41) - struct_foo.setfield('y', 42) - mem = self.read_raw_mem(struct.getaddr(), 'c_long', 3) - assert mem == [40, 41, 42] - # - struct_foo2 = foo_descr.allocate() - struct_foo2.setfield('x', 141) - struct_foo2.setfield('y', 142) - # writing a nested structure copies its memory into the target - struct.setfield('foo', struct_foo2) - struct_foo2.setfield('x', 241) - struct_foo2.setfield('y', 242) - mem = self.read_raw_mem(struct.getaddr(), 'c_long', 3) - assert mem == [40, 141, 142] - mem = self.read_raw_mem(struct_foo2.getaddr(), 'c_long', 2) - assert mem == [241, 242] - - - - def test_compute_shape(self): - from _ffi import Structure, Field, types - class Point(Structure): - _fields_ = [ - Field('x', types.slong), - Field('y', types.slong), - ] - - longsize = types.slong.sizeof() - assert isinstance(Point.x, Field) - assert isinstance(Point.y, Field) - assert Point.x.offset == 0 - assert Point.y.offset == longsize - assert Point._struct_.ffitype.sizeof() == longsize*2 - assert Point._struct_.ffitype.name == 'struct Point' - diff --git a/pypy/module/_ffi/test/test_type_converter.py b/pypy/module/_ffi/test/test_type_converter.py deleted file mode 100644 --- a/pypy/module/_ffi/test/test_type_converter.py +++ /dev/null @@ -1,170 +0,0 @@ -import sys -from rpython.rlib.rarithmetic import r_uint, r_singlefloat, r_longlong, r_ulonglong -from rpython.rlib.libffi import IS_32_BIT -from pypy.module._ffi.interp_ffitype import app_types, descr_new_pointer -from pypy.module._ffi.type_converter import FromAppLevelConverter, ToAppLevelConverter - -class DummyFromAppLevelConverter(FromAppLevelConverter): - - def handle_all(self, w_ffitype, w_obj, val): - self.lastval = val - - handle_signed = handle_all - handle_unsigned = handle_all - handle_pointer = handle_all - handle_char = handle_all - handle_unichar = handle_all - handle_longlong = handle_all - handle_char_p = handle_all - handle_unichar_p = handle_all - handle_float = handle_all - handle_singlefloat = handle_all - - def handle_struct(self, w_ffitype, w_structinstance): - self.lastval = w_structinstance - - def convert(self, w_ffitype, w_obj): - self.unwrap_and_do(w_ffitype, w_obj) - return self.lastval - - -class TestFromAppLevel(object): - spaceconfig = dict(usemodules=('_ffi',)) - - def setup_class(cls): - converter = DummyFromAppLevelConverter(cls.space) - cls.from_app_level = staticmethod(converter.convert) - - def check(self, w_ffitype, w_obj, expected): - v = self.from_app_level(w_ffitype, w_obj) - assert v == expected - assert type(v) is type(expected) - - def test_int(self): - self.check(app_types.sint, self.space.wrap(42), 42) - self.check(app_types.sint, self.space.wrap(sys.maxint+1), -sys.maxint-1) - self.check(app_types.sint, self.space.wrap(sys.maxint*2), -2) - - def test_unsigned(self): - space = self.space - self.check(app_types.uint, space.wrap(42), r_uint(42)) - self.check(app_types.uint, space.wrap(-1), r_uint(sys.maxint*2 +1)) - self.check(app_types.uint, space.wrap(sys.maxint*3), - r_uint(sys.maxint - 2)) - self.check(app_types.ulong, space.wrap(sys.maxint+12), - r_uint(sys.maxint+12)) - self.check(app_types.ulong, space.wrap(sys.maxint*2+3), r_uint(1)) - - def test_char(self): - space = self.space - self.check(app_types.char, space.wrap('a'), ord('a')) - self.check(app_types.unichar, space.wrap(u'\u1234'), 0x1234) - - def test_signed_longlong(self): - space = self.space - maxint32 = 2147483647 # we cannot really go above maxint on 64 bits - # (and we would not test anything, as there long - # is the same as long long) - expected = maxint32+1 - if IS_32_BIT: - expected = r_longlong(expected) - self.check(app_types.slonglong, space.wrap(maxint32+1), expected) - - def test_unsigned_longlong(self): - space = self.space - maxint64 = 9223372036854775807 # maxint64+1 does not fit into a - # longlong, but it does into a - # ulonglong - if IS_32_BIT: - # internally, the type converter always casts to signed longlongs - expected = r_longlong(-maxint64-1) - else: - # on 64 bit, ulonglong == uint (i.e., unsigned long in C terms) - expected = r_uint(maxint64+1) - self.check(app_types.ulonglong, space.wrap(maxint64+1), expected) - - def test_float_and_double(self): - space = self.space - self.check(app_types.float, space.wrap(12.34), r_singlefloat(12.34)) - self.check(app_types.double, space.wrap(12.34), 12.34) - - def test_pointer(self): - # pointers are "unsigned" at applevel, but signed at interp-level (for - # no good reason, at interp-level Signed or Unsigned makes no - # difference for passing bits around) - space = self.space - self.check(app_types.void_p, space.wrap(42), 42) - self.check(app_types.void_p, space.wrap(sys.maxint+1), -sys.maxint-1) - # - # typed pointers - w_ptr_sint = descr_new_pointer(space, None, app_types.sint) - self.check(w_ptr_sint, space.wrap(sys.maxint+1), -sys.maxint-1) - - - def test__as_ffi_pointer_(self): - space = self.space - w_MyPointerWrapper = space.appexec([], """(): - import _ffi - class MyPointerWrapper(object): - def __init__(self, value): - self.value = value - def _as_ffi_pointer_(self, ffitype): - assert ffitype is _ffi.types.void_p - return self.value - - return MyPointerWrapper - """) - w_obj = space.call_function(w_MyPointerWrapper, space.wrap(42)) - self.check(app_types.void_p, w_obj, 42) - - def test_strings(self): - # first, try automatic conversion from applevel - self.check(app_types.char_p, self.space.wrapbytes('foo'), 'foo') - self.check(app_types.unichar_p, self.space.wrap(u'foo\u1234'), u'foo\u1234') - self.check(app_types.unichar_p, self.space.wrap('foo'), u'foo') - # then, try to pass explicit pointers - self.check(app_types.char_p, self.space.wrap(42), 42) - self.check(app_types.unichar_p, self.space.wrap(42), 42) - - - -class DummyToAppLevelConverter(ToAppLevelConverter): - - def get_all(self, w_ffitype): - return self.val - - get_signed = get_all - get_unsigned = get_all - get_pointer = get_all - get_char = get_all - get_unichar = get_all - get_longlong = get_all - get_char_p = get_all - get_unichar_p = get_all - get_float = get_all - get_singlefloat = get_all - get_unsigned_which_fits_into_a_signed = get_all - - def convert(self, w_ffitype, val): - self.val = val - return self.do_and_wrap(w_ffitype) - - -class TestToAppLevel(object): - spaceconfig = dict(usemodules=('_ffi',)) - - def setup_class(cls): - converter = DummyToAppLevelConverter(cls.space) - cls.from_app_level = staticmethod(converter.convert) - - def check(self, w_ffitype, val, w_expected): - w_v = self.from_app_level(w_ffitype, val) - assert self.space.eq_w(w_v, w_expected) - - def test_int(self): - self.check(app_types.sint, 42, self.space.wrap(42)) - self.check(app_types.sint, -sys.maxint-1, self.space.wrap(-sys.maxint-1)) - - def test_uint(self): - self.check(app_types.uint, 42, self.space.wrap(42)) - self.check(app_types.uint, r_uint(sys.maxint+1), self.space.wrap(sys.maxint+1)) diff --git a/pypy/module/_multiprocessing/test/test_memory.py b/pypy/module/_multiprocessing/test/test_memory.py --- a/pypy/module/_multiprocessing/test/test_memory.py +++ b/pypy/module/_multiprocessing/test/test_memory.py @@ -1,6 +1,6 @@ class AppTestMemory: spaceconfig = dict(usemodules=('_multiprocessing', 'mmap', - '_rawffi', '_ffi')) + '_rawffi')) def test_address_of(self): import _multiprocessing diff --git a/pypy/module/_rawffi/__init__.py b/pypy/module/_rawffi/__init__.py --- a/pypy/module/_rawffi/__init__.py +++ b/pypy/module/_rawffi/__init__.py @@ -2,6 +2,7 @@ """ From noreply at buildbot.pypy.org Fri Jan 24 22:59:37 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 24 Jan 2014 22:59:37 +0100 (CET) Subject: [pypy-commit] pypy py3k-refactor-str-types: close before merging Message-ID: <20140124215937.9BC421C1190@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-refactor-str-types Changeset: r68920:74d93872d6bd Date: 2014-01-24 13:50 -0800 http://bitbucket.org/pypy/pypy/changeset/74d93872d6bd/ Log: close before merging From noreply at buildbot.pypy.org Fri Jan 24 22:59:40 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 24 Jan 2014 22:59:40 +0100 (CET) Subject: [pypy-commit] pypy py3k: merge py3k-refactor-str-types Message-ID: <20140124215940.67B0E1C1190@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68921:e6e6e777920d Date: 2014-01-24 13:51 -0800 http://bitbucket.org/pypy/pypy/changeset/e6e6e777920d/ Log: merge py3k-refactor-str-types diff too long, truncating to 2000 out of 19625 lines diff --git a/lib-python/2.7/test/test_memoryview.py b/lib-python/2.7/test/test_memoryview.py --- a/lib-python/2.7/test/test_memoryview.py +++ b/lib-python/2.7/test/test_memoryview.py @@ -166,11 +166,18 @@ self.assertTrue(m[0:6] == m[:]) self.assertFalse(m[0:5] == m) - # Comparison with objects which don't support the buffer API - self.assertFalse(m == u"abcdef") - self.assertTrue(m != u"abcdef") - self.assertFalse(u"abcdef" == m) - self.assertTrue(u"abcdef" != m) + if test_support.check_impl_detail(cpython=True): + # what is supported and what is not supported by memoryview is + # very inconsisten on CPython. In PyPy, memoryview supports + # the buffer interface, and thus the following comparison + # succeeds. See also the comment in + # pypy.modules.__builtin__.interp_memoryview.W_MemoryView.descr_buffer + # + # Comparison with objects which don't support the buffer API + self.assertFalse(m == u"abcdef", "%s %s" % (self, tp)) + self.assertTrue(m != u"abcdef") + self.assertFalse(u"abcdef" == m) + self.assertTrue(u"abcdef" != m) # Unordered comparisons are unimplemented, and therefore give # arbitrary results (they raise a TypeError in py3k) diff --git a/lib_pypy/_ctypes/array.py b/lib_pypy/_ctypes/array.py --- a/lib_pypy/_ctypes/array.py +++ b/lib_pypy/_ctypes/array.py @@ -1,4 +1,4 @@ -import _ffi +from _rawffi import alt as _ffi import _rawffi from _ctypes.basics import _CData, cdata_from_address, _CDataMeta, sizeof diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py --- a/lib_pypy/_ctypes/basics.py +++ b/lib_pypy/_ctypes/basics.py @@ -1,6 +1,6 @@ import _rawffi -import _ffi +from _rawffi import alt as _ffi import sys try: from __pypy__ import builtinify diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -5,7 +5,7 @@ from _ctypes.basics import is_struct_shape from _ctypes.builtin import get_errno, set_errno, get_last_error, set_last_error import _rawffi -import _ffi +from _rawffi import alt as _ffi import sys import traceback diff --git a/lib_pypy/_ctypes/pointer.py b/lib_pypy/_ctypes/pointer.py --- a/lib_pypy/_ctypes/pointer.py +++ b/lib_pypy/_ctypes/pointer.py @@ -1,6 +1,6 @@ import _rawffi -import _ffi +from _rawffi import alt as _ffi from _ctypes.basics import _CData, _CDataMeta, cdata_from_address, ArgumentError from _ctypes.basics import keepalive_key, store_reference, ensure_objects from _ctypes.basics import sizeof, byref, as_ffi_pointer diff --git a/lib_pypy/_ctypes/primitive.py b/lib_pypy/_ctypes/primitive.py --- a/lib_pypy/_ctypes/primitive.py +++ b/lib_pypy/_ctypes/primitive.py @@ -1,4 +1,4 @@ -import _ffi +from _rawffi import alt as _ffi import _rawffi import weakref import sys diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py --- a/lib_pypy/_ctypes/structure.py +++ b/lib_pypy/_ctypes/structure.py @@ -2,6 +2,8 @@ import _rawffi from _ctypes.basics import _CData, _CDataMeta, keepalive_key,\ store_reference, ensure_objects, CArgObject +from _ctypes.array import Array +from _ctypes.pointer import _Pointer import inspect def names_and_fields(self, _fields_, superclass, anonymous_fields=None): @@ -104,8 +106,11 @@ def __set__(self, obj, value): fieldtype = self.ctype cobj = fieldtype.from_param(value) - if ensure_objects(cobj) is not None: - key = keepalive_key(self.num) + key = keepalive_key(self.num) + if issubclass(fieldtype, _Pointer) and isinstance(cobj, Array): + # if our value is an Array we need the whole thing alive + store_reference(obj, key, cobj) + elif ensure_objects(cobj) is not None: store_reference(obj, key, cobj._objects) arg = cobj._get_buffer_value() if fieldtype._fficompositesize is not None: diff --git a/lib_pypy/_ffi.py b/lib_pypy/_ffi.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_ffi.py @@ -0,0 +1,2 @@ +# Backward compatibility hack +from _rawffi.alt import * diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -34,7 +34,7 @@ "struct", "_hashlib", "_md5", "_minimal_curses", "thread", "itertools", "pyexpat", "_ssl", "cpyext", "array", "binascii", "_multiprocessing", '_warnings', - "_collections", "_multibytecodec", "_ffi", + "_collections", "_multibytecodec", "_continuation", "_csv", "_cffi_backend", "_posixsubprocess", "_pypyjson", # "cppyy", "micronumpy", ] @@ -43,7 +43,7 @@ translation_modules = default_modules.copy() translation_modules.update(dict.fromkeys( ["fcntl", "rctime", "select", "signal", "_rawffi", "zlib", - "struct", "array", "_ffi", + "struct", "array", "binascii", # the following are needed for pyrepl (and hence for the # interactive prompt/pdb) @@ -99,7 +99,6 @@ # no _rawffi if importing rpython.rlib.clibffi raises ImportError # or CompilationError or py.test.skip.Exception "_rawffi" : ["rpython.rlib.clibffi"], - "_ffi" : ["rpython.rlib.clibffi"], "zlib" : ["rpython.rlib.rzlib"], "bz2" : ["pypy.module.bz2.interp_bz2"], diff --git a/pypy/doc/_ref.txt b/pypy/doc/_ref.txt --- a/pypy/doc/_ref.txt +++ b/pypy/doc/_ref.txt @@ -109,6 +109,4 @@ .. _`rpython/translator/c/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/c/ .. _`rpython/translator/c/src/stacklet/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/c/src/stacklet/ .. _`rpython/translator/c/src/stacklet/stacklet.h`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/c/src/stacklet/stacklet.h -.. _`rpython/translator/cli/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/cli/ -.. _`rpython/translator/jvm/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/jvm/ .. _`rpython/translator/tool/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/tool/ diff --git a/pypy/doc/config/translation.lldebug0.txt b/pypy/doc/config/translation.lldebug0.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/config/translation.lldebug0.txt @@ -0,0 +1,1 @@ +Like lldebug, but in addition compile C files with -O0 diff --git a/pypy/doc/garbage_collection.rst b/pypy/doc/garbage_collection.rst --- a/pypy/doc/garbage_collection.rst +++ b/pypy/doc/garbage_collection.rst @@ -210,4 +210,12 @@ are preserved. If the object dies then the pre-reserved location becomes free garbage, to be collected at the next major collection. +The exact name of this GC is either `minimark` or `incminimark`. The +latter is a version that does major collections incrementally (i.e. one +major collection is split along some number of minor collections, rather +than being done all at once after a specific minor collection). The +default is `incminimark`, as it seems to have a very minimal impact on +performance and memory usage at the benefit of avoiding the long pauses +of `minimark`. + .. include:: _ref.txt diff --git a/pypy/doc/gc_info.rst b/pypy/doc/gc_info.rst --- a/pypy/doc/gc_info.rst +++ b/pypy/doc/gc_info.rst @@ -6,7 +6,7 @@ Minimark -------- -PyPy's default ``minimark`` garbage collector is configurable through +PyPy's default ``incminimark`` garbage collector is configurable through several environment variables: ``PYPY_GC_NURSERY`` @@ -14,6 +14,17 @@ Defaults to 1/2 of your cache or ``4M``. Small values (like 1 or 1KB) are useful for debugging. +``PYPY_GC_NURSERY_CLEANUP`` + The interval at which nursery is cleaned up. Must + be smaller than the nursery size and bigger than the + biggest object we can allotate in the nursery. + +``PYPY_GC_INCREMENT_STEP`` + The size of memory marked during the marking step. Default is size of + nursery times 2. If you mark it too high your GC is not incremental at + all. The minimum is set to size that survives minor collection times + 1.5 so we reclaim anything all the time. + ``PYPY_GC_MAJOR_COLLECT`` Major collection memory factor. Default is ``1.82``, which means trigger a major collection when the diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -41,3 +41,19 @@ Fix 3 broken links on PyPy published papers in docs. .. branch: jit-ordereddict + +.. branch: refactor-str-types +Remove multimethods on str/unicode/bytearray and make the implementations share code. + +.. branch: remove-del-from-generatoriterator +Speed up generators that don't yield inside try or wait blocks by skipping +unnecessary cleanup. + +.. branch: annotator +Remove FlowObjSpace. +Improve cohesion between rpython.flowspace and rpython.annotator. + +.. branch: detect-immutable-fields +mapdicts keep track of whether or not an attribute is every assigned to +multiple times. If it's only assigned once then an elidable lookup is used when +possible. diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -1,5 +1,5 @@ from __future__ import division -import py +import py, sys from pypy.interpreter.astcompiler import codegen, astbuilder, symtable, optimize from pypy.interpreter.pyparser import pyparse from pypy.interpreter.pyparser.test import expressions @@ -968,6 +968,9 @@ class AppTestCompiler: + def setup_class(cls): + cls.w_maxunicode = cls.space.wrap(sys.maxunicode) + def test_docstring_not_loaded(self): import io, dis, sys ns = {} @@ -1002,7 +1005,17 @@ l = [a for a in Foo()] assert hint_called[0] assert l == list(range(5)) - + + def test_unicode_in_source(self): + import sys + d = {} + exec('# -*- coding: utf-8 -*-\n\nu = "\xf0\x9f\x92\x8b"', d) + if sys.maxunicode > 65535 and self.maxunicode > 65535: + expected_length = 1 + else: + expected_length = 2 + assert len(d['u']) == expected_length + class TestOptimizations: def count_instructions(self, source): diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -232,6 +232,10 @@ msg = "__int__ returned non-int (type '%T')" raise operationerrfmt(space.w_TypeError, msg, w_result) + def ord(self, space): + msg = "ord() expected string of length 1, but %T found" + raise operationerrfmt(space.w_TypeError, msg, self) + def __spacebind__(self, space): return self @@ -914,7 +918,7 @@ """ return self.unpackiterable(w_iterable, expected_length) - def listview_str(self, w_list): + def listview_bytes(self, w_list): """ Return a list of unwrapped strings out of a list of strings. If the argument is not a list or does not contain only strings, return None. May return None anyway. @@ -948,7 +952,7 @@ """ return (None, None) - def newlist_str(self, list_s): + def newlist_bytes(self, list_s): return self.newlist([self.wrapbytes(s) for s in list_s]) def newlist_unicode(self, list_u): @@ -1450,6 +1454,9 @@ # This is here mostly just for gateway.int_unwrapping_space_method(). return bool(self.int_w(w_obj)) + def ord(self, w_obj): + return w_obj.ord(self) + # This is all interface for gateway.py. def gateway_int_w(self, w_obj): if self.isinstance_w(w_obj, self.w_float): diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -538,12 +538,13 @@ # When a BuiltinCode is stored in a Function object, # you get the functionality of CPython's built-in function type. - def __init__(self, func, unwrap_spec=None, self_type=None, descrmismatch=None): + def __init__(self, func, unwrap_spec=None, self_type=None, + descrmismatch=None, doc=None): "NOT_RPYTHON" # 'implfunc' is the interpreter-level function. # Note that this uses a lot of (construction-time) introspection. Code.__init__(self, func.__name__) - self.docstring = func.__doc__ + self.docstring = doc or func.__doc__ self.identifier = "%s-%s-%s" % (func.__module__, func.__name__, getattr(self_type, '__name__', '*')) @@ -850,7 +851,7 @@ instancecache = {} def __new__(cls, f, app_name=None, unwrap_spec=None, descrmismatch=None, - as_classmethod=False): + as_classmethod=False, doc=None): "NOT_RPYTHON" # f must be a function whose name does NOT start with 'app_' @@ -879,7 +880,8 @@ cls.instancecache[key] = self self._code = BuiltinCode(f, unwrap_spec=unwrap_spec, self_type=self_type, - descrmismatch=descrmismatch) + descrmismatch=descrmismatch, + doc=doc) self.__name__ = f.func_name self.name = app_name self.as_classmethod = as_classmethod diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -170,7 +170,7 @@ def run(self): """Start this frame's execution.""" if self.is_generator(): - if pycode.CO_YIELD_INSIDE_TRY: + if self.getcode().co_flags & pycode.CO_YIELD_INSIDE_TRY: from pypy.interpreter.generator import GeneratorIteratorWithDel return self.space.wrap(GeneratorIteratorWithDel(self)) else: diff --git a/pypy/interpreter/pyparser/parsestring.py b/pypy/interpreter/pyparser/parsestring.py --- a/pypy/interpreter/pyparser/parsestring.py +++ b/pypy/interpreter/pyparser/parsestring.py @@ -15,7 +15,6 @@ Yes, it's very inefficient. Yes, CPython has very similar code. """ - # we use ps as "pointer to s" # q is the virtual last char index of the string ps = 0 @@ -50,47 +49,10 @@ if unicode_literal and not rawmode: # XXX Py_UnicodeFlag is ignored for now if encoding is None: - buf = s - bufp = ps - bufq = q - u = None + assert 0 <= ps <= q + substr = s[ps:q] else: - # String is utf8-encoded, but 'unicode_escape' expects - # latin-1; So multibyte sequences must be escaped. - lis = [] # using a list to assemble the value - end = q - # Worst case: - # "ä" (2 bytes) may become "\U000000E4" (10 bytes), or 1:5 - # "\ä" (3 bytes) may become "\u005c\U000000E4" (16 bytes), - # or ~1:6 - while ps < end: - if s[ps] == '\\': - lis.append(s[ps]) - ps += 1 - if ord(s[ps]) & 0x80: - # A multibyte sequence will follow, it will be - # escaped like \u1234. To avoid confusion with - # the backslash we just wrote, we emit "\u005c" - # instead. - lis.append("u005c") - if ord(s[ps]) & 0x80: # XXX inefficient - w, ps = decode_utf8(space, s, ps, end, "utf-32-be") - rn = len(w) - assert rn % 4 == 0 - for i in range(0, rn, 4): - lis.append('\\U') - lis.append(hexbyte(ord(w[i]))) - lis.append(hexbyte(ord(w[i+1]))) - lis.append(hexbyte(ord(w[i+2]))) - lis.append(hexbyte(ord(w[i+3]))) - else: - lis.append(s[ps]) - ps += 1 - buf = ''.join(lis) - bufp = 0 - bufq = len(buf) - assert 0 <= bufp <= bufq - substr = buf[bufp:bufq] + substr = decode_unicode_utf8(space, s, ps, q) v = unicodehelper.decode_unicode_escape(space, substr) return space.wrap(v) @@ -120,6 +82,39 @@ result = "0" + result return result +def decode_unicode_utf8(space, s, ps, q): + # ****The Python 2.7 version, producing UTF-32 escapes**** + # String is utf8-encoded, but 'unicode_escape' expects + # latin-1; So multibyte sequences must be escaped. + lis = [] # using a list to assemble the value + end = q + # Worst case: + # "<92><195><164>" may become "\u005c\U000000E4" (16 bytes) + while ps < end: + if s[ps] == '\\': + lis.append(s[ps]) + ps += 1 + if ord(s[ps]) & 0x80: + # A multibyte sequence will follow, it will be + # escaped like \u1234. To avoid confusion with + # the backslash we just wrote, we emit "\u005c" + # instead. + lis.append("u005c") + if ord(s[ps]) & 0x80: # XXX inefficient + w, ps = decode_utf8(space, s, ps, end, "utf-32-be") + rn = len(w) + assert rn % 4 == 0 + for i in range(0, rn, 4): + lis.append('\\U') + lis.append(hexbyte(ord(w[i]))) + lis.append(hexbyte(ord(w[i+1]))) + lis.append(hexbyte(ord(w[i+2]))) + lis.append(hexbyte(ord(w[i+3]))) + else: + lis.append(s[ps]) + ps += 1 + return ''.join(lis) + def PyString_DecodeEscape(space, s, recode_encoding): """ Unescape a backslash-escaped string. If recode_encoding is non-zero, diff --git a/pypy/interpreter/pyparser/test/test_parsestring.py b/pypy/interpreter/pyparser/test/test_parsestring.py --- a/pypy/interpreter/pyparser/test/test_parsestring.py +++ b/pypy/interpreter/pyparser/test/test_parsestring.py @@ -1,10 +1,10 @@ from pypy.interpreter.pyparser import parsestring -import py +import py, sys class TestParsetring: - def parse_and_compare(self, literal, value): + def parse_and_compare(self, literal, value, encoding=None): space = self.space - w_ret = parsestring.parsestr(space, None, literal) + w_ret = parsestring.parsestr(space, encoding, literal) if isinstance(value, str): assert space.type(w_ret) == space.w_bytes assert space.bytes_w(w_ret) == value @@ -105,3 +105,18 @@ input = ["'", 'x', ' ', chr(0xc3), chr(0xa9), ' ', chr(92), 'n', "'"] w_ret = parsestring.parsestr(space, 'utf8', ''.join(input)) assert space.str_w(w_ret) == ''.join(expected) + + def test_wide_unicode_in_source(self): + if sys.maxunicode == 65535: + py.test.skip("requires a wide-unicode host") + self.parse_and_compare('u"\xf0\x9f\x92\x8b"', + unichr(0x1f48b), + encoding='utf-8') + + def test_decode_unicode_utf8(self): + buf = parsestring.decode_unicode_utf8(self.space, + 'u"\xf0\x9f\x92\x8b"', 2, 6) + if sys.maxunicode == 65535: + assert buf == r"\U0000d83d\U0000dc8b" + else: + assert buf == r"\U0001f48b" diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -720,6 +720,18 @@ never_called py.test.raises(AssertionError, space.wrap, gateway.interp2app_temp(g)) + def test_interp2app_doc(self): + space = self.space + def f(space, w_x): + """foo""" + w_f = space.wrap(gateway.interp2app_temp(f)) + assert space.unwrap(space.getattr(w_f, space.wrap('__doc__'))) == 'foo' + # + def g(space, w_x): + never_called + w_g = space.wrap(gateway.interp2app_temp(g, doc='bar')) + assert space.unwrap(space.getattr(w_g, space.wrap('__doc__'))) == 'bar' + def test_unwrap_spec_default_bytes(self): space = self.space @gateway.unwrap_spec(s='bufferstr') diff --git a/pypy/interpreter/unicodehelper.py b/pypy/interpreter/unicodehelper.py --- a/pypy/interpreter/unicodehelper.py +++ b/pypy/interpreter/unicodehelper.py @@ -94,7 +94,7 @@ return space.wrapbytes(bytes) def encode(space, w_data, encoding=None, errors='strict'): - from pypy.objspace.std.unicodetype import encode_object + from pypy.objspace.std.unicodeobject import encode_object return encode_object(space, w_data, encoding, errors) # These functions take and return unwrapped rpython strings and unicodes diff --git a/pypy/module/__builtin__/interp_memoryview.py b/pypy/module/__builtin__/interp_memoryview.py --- a/pypy/module/__builtin__/interp_memoryview.py +++ b/pypy/module/__builtin__/interp_memoryview.py @@ -63,10 +63,14 @@ return W_MemoryView(buf) def descr_buffer(self, space): - """Note that memoryview() objects in PyPy support buffer(), whereas - not in CPython; but CPython supports passing memoryview() to most - built-in functions that accept buffers, with the notable exception - of the buffer() built-in.""" + """ + Note that memoryview() is very inconsistent in CPython: it does not + support the buffer interface but does support the new buffer + interface: as a result, it is possible to pass memoryview to + e.g. socket.send() but not to file.write(). For simplicity and + consistency, in PyPy memoryview DOES support buffer(), which means + that it is accepted in more places than CPython. + """ self._check_released(space) return space.wrap(self.buf) diff --git a/pypy/module/__builtin__/operation.py b/pypy/module/__builtin__/operation.py --- a/pypy/module/__builtin__/operation.py +++ b/pypy/module/__builtin__/operation.py @@ -19,7 +19,7 @@ object, but escape the non-ASCII characters in the string returned by repr() using \\x, \\u or \\U escapes. This generates a string similar to that returned by repr() in Python 2.""" - from pypy.objspace.std.unicodetype import ascii_from_object + from pypy.objspace.std.unicodeobject import ascii_from_object return ascii_from_object(space, w_obj) @unwrap_spec(code=int) diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -3,7 +3,7 @@ from rpython.rlib.objectmodel import we_are_translated from pypy.objspace.std.listobject import W_ListObject from pypy.objspace.std.typeobject import MethodCache -from pypy.objspace.std.mapdict import IndexCache +from pypy.objspace.std.mapdict import MapAttrCache from rpython.rlib import rposix, rgc @@ -35,7 +35,7 @@ cache.misses = {} cache.hits = {} if space.config.objspace.std.withmapdict: - cache = space.fromcache(IndexCache) + cache = space.fromcache(MapAttrCache) cache.misses = {} cache.hits = {} @@ -45,7 +45,7 @@ in the mapdict cache with the given attribute name.""" assert space.config.objspace.std.withmethodcachecounter assert space.config.objspace.std.withmapdict - cache = space.fromcache(IndexCache) + cache = space.fromcache(MapAttrCache) return space.newtuple([space.newint(cache.hits.get(name, 0)), space.newint(cache.misses.get(name, 0))]) diff --git a/pypy/module/__pypy__/test/test_special.py b/pypy/module/__pypy__/test/test_special.py --- a/pypy/module/__pypy__/test/test_special.py +++ b/pypy/module/__pypy__/test/test_special.py @@ -81,7 +81,9 @@ l = [1, 2, 3] assert list_strategy(l) == "int" l = ["a", "b", "c"] - assert list_strategy(l) == "str" + assert list_strategy(l) == "bytes" + l = [u"a", u"b", u"c"] + assert list_strategy(l) == "unicode" l = [1.1, 2.2, 3.3] assert list_strategy(l) == "float" l = range(3) diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py --- a/pypy/module/_cffi_backend/ctypestruct.py +++ b/pypy/module/_cffi_backend/ctypestruct.py @@ -33,7 +33,7 @@ if self.fields_dict is None: space = self.space raise operationerrfmt(w_errorcls or space.w_TypeError, - "'%s' is not completed yet", self.name) + "'%s' is opaque or not completed yet", self.name) def _alignof(self): self.check_complete(w_errorcls=self.space.w_ValueError) diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py --- a/pypy/module/_cffi_backend/newtype.py +++ b/pypy/module/_cffi_backend/newtype.py @@ -118,6 +118,7 @@ SF_MSVC_BITFIELDS = 1 SF_GCC_ARM_BITFIELDS = 2 SF_GCC_BIG_ENDIAN = 4 +SF_PACKED = 8 if sys.platform == 'win32': DEFAULT_SFLAGS = SF_MSVC_BITFIELDS @@ -190,8 +191,8 @@ boffset = 0 # reset each field at offset 0 # # update the total alignment requirement, but skip it if the - # field is an anonymous bitfield - falign = ftype.alignof() + # field is an anonymous bitfield or if SF_PACKED + falign = 1 if sflags & SF_PACKED else ftype.alignof() do_align = True if (sflags & SF_GCC_ARM_BITFIELDS) == 0 and fbitsize >= 0: if (sflags & SF_MSVC_BITFIELDS) == 0: @@ -305,6 +306,12 @@ if bits_already_occupied + fbitsize > 8 * ftype.size: # it would not fit, we need to start at the next # allowed position + if ((sflags & SF_PACKED) != 0 and + (bits_already_occupied & 7) != 0): + raise operationerrfmt(space.w_NotImplementedError, + "with 'packed', gcc would compile field " + "'%s.%s' to reuse some bits in the previous " + "field", w_ctype.name, fname) field_offset_bytes += falign assert boffset < field_offset_bytes * 8 boffset = field_offset_bytes * 8 diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -3137,6 +3137,44 @@ p = newp(BArray, None) assert sizeof(p[2:9]) == 7 * sizeof(BInt) +def test_packed(): + BLong = new_primitive_type("long") + BChar = new_primitive_type("char") + BShort = new_primitive_type("short") + BStruct = new_struct_type("struct foo") + complete_struct_or_union(BStruct, [('a1', BLong, -1), + ('a2', BChar, -1), + ('a3', BShort, -1)], + None, -1, -1, 8) # SF_PACKED==8 + d = BStruct.fields + assert len(d) == 3 + assert d[0][0] == 'a1' + assert d[0][1].type is BLong + assert d[0][1].offset == 0 + assert d[0][1].bitshift == -1 + assert d[0][1].bitsize == -1 + assert d[1][0] == 'a2' + assert d[1][1].type is BChar + assert d[1][1].offset == sizeof(BLong) + assert d[1][1].bitshift == -1 + assert d[1][1].bitsize == -1 + assert d[2][0] == 'a3' + assert d[2][1].type is BShort + assert d[2][1].offset == sizeof(BLong) + sizeof(BChar) + assert d[2][1].bitshift == -1 + assert d[2][1].bitsize == -1 + assert sizeof(BStruct) == sizeof(BLong) + sizeof(BChar) + sizeof(BShort) + assert alignof(BStruct) == 1 + +def test_packed_with_bitfields(): + BLong = new_primitive_type("long") + BChar = new_primitive_type("char") + BStruct = new_struct_type("struct foo") + py.test.raises(NotImplementedError, + complete_struct_or_union, + BStruct, [('a1', BLong, 30), + ('a2', BChar, 5)], + None, -1, -1, 8) # SF_PACKED==8 def test_version(): # this test is here mostly for PyPy diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -849,7 +849,7 @@ @unwrap_spec(data="bufferstr", errors='str_or_None') def escape_encode(space, data, errors='strict'): - from pypy.objspace.std.stringobject import string_escape_encode + from pypy.objspace.std.bytesobject import string_escape_encode result = string_escape_encode(data, False) return space.newtuple([space.wrapbytes(result), space.wrap(len(data))]) diff --git a/pypy/module/_ffi/app_struct.py b/pypy/module/_ffi/app_struct.py deleted file mode 100644 --- a/pypy/module/_ffi/app_struct.py +++ /dev/null @@ -1,21 +0,0 @@ -import _ffi - -class MetaStructure(type): - - def __new__(cls, name, bases, dic): - cls._compute_shape(name, dic) - return type.__new__(cls, name, bases, dic) - - @classmethod - def _compute_shape(cls, name, dic): - fields = dic.get('_fields_') - if fields is None: - return - struct_descr = _ffi._StructDescr(name, fields) - for field in fields: - dic[field.name] = field - dic['_struct_'] = struct_descr - - -class Structure(metaclass=MetaStructure): - pass diff --git a/pypy/module/_ffi/interp_funcptr.py b/pypy/module/_ffi/interp_funcptr.py deleted file mode 100644 --- a/pypy/module/_ffi/interp_funcptr.py +++ /dev/null @@ -1,380 +0,0 @@ -from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError, wrap_oserror, \ - operationerrfmt -from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.interpreter.typedef import TypeDef -from pypy.module._ffi.interp_ffitype import W_FFIType -# -from rpython.rtyper.lltypesystem import lltype, rffi -# -from rpython.rlib import jit -from rpython.rlib import libffi -from rpython.rlib.clibffi import get_libc_name, StackCheckError, LibFFIError -from rpython.rlib.rdynload import DLOpenError -from rpython.rlib.rarithmetic import r_uint -from rpython.rlib.objectmodel import we_are_translated -from pypy.module._ffi.type_converter import FromAppLevelConverter, ToAppLevelConverter -from pypy.module._rawffi.interp_rawffi import got_libffi_error, wrap_dlopenerror - -import os -if os.name == 'nt': - def _getfunc(space, CDLL, w_name, w_argtypes, w_restype): - argtypes_w, argtypes, w_restype, restype = unpack_argtypes( - space, w_argtypes, w_restype) - if space.isinstance_w(w_name, space.w_unicode): - # XXX: support LoadLibraryW - name = space.str_w(w_name) - try: - func = CDLL.cdll.getpointer(name, argtypes, restype, - flags = CDLL.flags) - except KeyError: - raise operationerrfmt( - space.w_AttributeError, - "No symbol %s found in library %s", name, CDLL.name) - except LibFFIError: - raise got_libffi_error(space) - - return W_FuncPtr(func, argtypes_w, w_restype) - elif space.isinstance_w(w_name, space.w_int): - ordinal = space.int_w(w_name) - try: - func = CDLL.cdll.getpointer_by_ordinal( - ordinal, argtypes, restype, - flags = CDLL.flags) - except KeyError: - raise operationerrfmt( - space.w_AttributeError, - "No ordinal %d found in library %s", ordinal, CDLL.name) - except LibFFIError: - raise got_libffi_error(space) - - return W_FuncPtr(func, argtypes_w, w_restype) - else: - raise OperationError(space.w_TypeError, space.wrap( - 'function name must be a string or integer')) -else: - @unwrap_spec(name=str) - def _getfunc(space, CDLL, w_name, w_argtypes, w_restype): - name = space.str_w(w_name) - argtypes_w, argtypes, w_restype, restype = unpack_argtypes( - space, w_argtypes, w_restype) - try: - func = CDLL.cdll.getpointer(name, argtypes, restype, - flags = CDLL.flags) - except KeyError: - raise operationerrfmt( - space.w_AttributeError, - "No symbol %s found in library %s", name, CDLL.name) - except LibFFIError: - raise got_libffi_error(space) - - return W_FuncPtr(func, argtypes_w, w_restype) - -def unwrap_ffitype(space, w_argtype, allow_void=False): - res = w_argtype.get_ffitype() - if res is libffi.types.void and not allow_void: - msg = 'void is not a valid argument type' - raise OperationError(space.w_TypeError, space.wrap(msg)) - return res - - -# ======================================================================== - -class W_FuncPtr(W_Root): - - _immutable_fields_ = ['func', 'argtypes_w[*]', 'w_restype'] - - def __init__(self, func, argtypes_w, w_restype): - self.func = func - self.argtypes_w = argtypes_w - self.w_restype = w_restype - self.to_free = [] - - @jit.unroll_safe - def build_argchain(self, space, args_w): - expected = len(self.argtypes_w) - given = len(args_w) - if given != expected: - arg = 'arguments' - if len(self.argtypes_w) == 1: - arg = 'argument' - raise operationerrfmt(space.w_TypeError, - '%s() takes exactly %d %s (%d given)', - self.func.name, expected, arg, given) - # - argchain = libffi.ArgChain() - argpusher = PushArgumentConverter(space, argchain, self) - for i in range(expected): - w_argtype = self.argtypes_w[i] - w_arg = args_w[i] - argpusher.unwrap_and_do(w_argtype, w_arg) - return argchain - - def call(self, space, args_w): - self = jit.promote(self) - argchain = self.build_argchain(space, args_w) - func_caller = CallFunctionConverter(space, self.func, argchain) - try: - return func_caller.do_and_wrap(self.w_restype) - except StackCheckError, e: - raise OperationError(space.w_ValueError, space.wrap(e.message)) - #return self._do_call(space, argchain) - - def free_temp_buffers(self, space): - for buf in self.to_free: - if not we_are_translated(): - buf[0] = '\00' # invalidate the buffer, so that - # test_keepalive_temp_buffer can fail - lltype.free(buf, flavor='raw') - self.to_free = [] - - def getaddr(self, space): - """ - Return the physical address in memory of the function - """ - return space.wrap(rffi.cast(rffi.LONG, self.func.funcsym)) - - -class PushArgumentConverter(FromAppLevelConverter): - """ - A converter used by W_FuncPtr to unwrap the app-level objects into - low-level types and push them to the argchain. - """ - - def __init__(self, space, argchain, w_func): - FromAppLevelConverter.__init__(self, space) - self.argchain = argchain - self.w_func = w_func - - def handle_signed(self, w_ffitype, w_obj, intval): - self.argchain.arg(intval) - - def handle_unsigned(self, w_ffitype, w_obj, uintval): - self.argchain.arg(uintval) - - def handle_pointer(self, w_ffitype, w_obj, intval): - self.argchain.arg(intval) - - def handle_char(self, w_ffitype, w_obj, intval): - self.argchain.arg(intval) - - def handle_unichar(self, w_ffitype, w_obj, intval): - self.argchain.arg(intval) - - def handle_longlong(self, w_ffitype, w_obj, longlongval): - self.argchain.arg(longlongval) - - def handle_char_p(self, w_ffitype, w_obj, strval): - buf = rffi.str2charp(strval) - self.w_func.to_free.append(rffi.cast(rffi.VOIDP, buf)) - addr = rffi.cast(rffi.ULONG, buf) - self.argchain.arg(addr) - - def handle_unichar_p(self, w_ffitype, w_obj, unicodeval): - buf = rffi.unicode2wcharp(unicodeval) - self.w_func.to_free.append(rffi.cast(rffi.VOIDP, buf)) - addr = rffi.cast(rffi.ULONG, buf) - self.argchain.arg(addr) - - def handle_float(self, w_ffitype, w_obj, floatval): - self.argchain.arg(floatval) - - def handle_singlefloat(self, w_ffitype, w_obj, singlefloatval): - self.argchain.arg(singlefloatval) - - def handle_struct(self, w_ffitype, w_structinstance): - # arg_raw directly takes value to put inside ll_args - ptrval = w_structinstance.rawmem - self.argchain.arg_raw(ptrval) - - def handle_struct_rawffi(self, w_ffitype, w_structinstance): - # arg_raw directly takes value to put inside ll_args - ptrval = w_structinstance.ll_buffer - self.argchain.arg_raw(ptrval) - - -class CallFunctionConverter(ToAppLevelConverter): - """ - A converter used by W_FuncPtr to call the function, expect the result of - a correct low-level type and wrap it to the corresponding app-level type - """ - - def __init__(self, space, func, argchain): - ToAppLevelConverter.__init__(self, space) - self.func = func - self.argchain = argchain - - def get_longlong(self, w_ffitype): - return self.func.call(self.argchain, rffi.LONGLONG) - - def get_ulonglong(self, w_ffitype): - return self.func.call(self.argchain, rffi.ULONGLONG) - - def get_signed(self, w_ffitype): - # if the declared return type of the function is smaller than LONG, - # the result buffer may contains garbage in its higher bits. To get - # the correct value, and to be sure to handle the signed/unsigned case - # correctly, we need to cast the result to the correct type. After - # that, we cast it back to LONG, because this is what we want to pass - # to space.wrap in order to get a nice applevel . - # - restype = w_ffitype.get_ffitype() - call = self.func.call - if restype is libffi.types.slong: - return call(self.argchain, rffi.LONG) - elif restype is libffi.types.sint: - return rffi.cast(rffi.LONG, call(self.argchain, rffi.INT)) - elif restype is libffi.types.sshort: - return rffi.cast(rffi.LONG, call(self.argchain, rffi.SHORT)) - elif restype is libffi.types.schar: - return rffi.cast(rffi.LONG, call(self.argchain, rffi.SIGNEDCHAR)) - else: - self.error(w_ffitype) - - def get_unsigned(self, w_ffitype): - return self.func.call(self.argchain, rffi.ULONG) - - def get_unsigned_which_fits_into_a_signed(self, w_ffitype): - # the same comment as get_signed apply - restype = w_ffitype.get_ffitype() - call = self.func.call - if restype is libffi.types.uint: - assert not libffi.IS_32_BIT - # on 32bit machines, we should never get here, because it's a case - # which has already been handled by get_unsigned above. - return rffi.cast(rffi.LONG, call(self.argchain, rffi.UINT)) - elif restype is libffi.types.ushort: - return rffi.cast(rffi.LONG, call(self.argchain, rffi.USHORT)) - elif restype is libffi.types.uchar: - return rffi.cast(rffi.LONG, call(self.argchain, rffi.UCHAR)) - else: - self.error(w_ffitype) - - - def get_pointer(self, w_ffitype): - ptrres = self.func.call(self.argchain, rffi.VOIDP) - return rffi.cast(rffi.ULONG, ptrres) - - def get_char(self, w_ffitype): - return self.func.call(self.argchain, rffi.UCHAR) - - def get_unichar(self, w_ffitype): - return self.func.call(self.argchain, rffi.WCHAR_T) - - def get_float(self, w_ffitype): - return self.func.call(self.argchain, rffi.DOUBLE) - - def get_singlefloat(self, w_ffitype): - return self.func.call(self.argchain, rffi.FLOAT) - - def get_struct(self, w_ffitype, w_structdescr): - addr = self.func.call(self.argchain, rffi.LONG, is_struct=True) - return w_structdescr.fromaddress(self.space, addr) - - def get_struct_rawffi(self, w_ffitype, w_structdescr): - uintval = self.func.call(self.argchain, rffi.ULONG, is_struct=True) - return w_structdescr.fromaddress(self.space, uintval) - - def get_void(self, w_ffitype): - return self.func.call(self.argchain, lltype.Void) - - -def unpack_argtypes(space, w_argtypes, w_restype): - argtypes_w = [space.interp_w(W_FFIType, w_argtype) - for w_argtype in space.listview(w_argtypes)] - argtypes = [unwrap_ffitype(space, w_argtype) for w_argtype in - argtypes_w] - w_restype = space.interp_w(W_FFIType, w_restype) - restype = unwrap_ffitype(space, w_restype, allow_void=True) - return argtypes_w, argtypes, w_restype, restype - - at unwrap_spec(addr=r_uint, name=str, flags=int) -def descr_fromaddr(space, w_cls, addr, name, w_argtypes, - w_restype, flags=libffi.FUNCFLAG_CDECL): - argtypes_w, argtypes, w_restype, restype = unpack_argtypes(space, - w_argtypes, - w_restype) - addr = rffi.cast(rffi.VOIDP, addr) - try: - func = libffi.Func(name, argtypes, restype, addr, flags) - return W_FuncPtr(func, argtypes_w, w_restype) - except LibFFIError: - raise got_libffi_error(space) - - -W_FuncPtr.typedef = TypeDef( - '_ffi.FuncPtr', - __call__ = interp2app(W_FuncPtr.call), - getaddr = interp2app(W_FuncPtr.getaddr), - free_temp_buffers = interp2app(W_FuncPtr.free_temp_buffers), - fromaddr = interp2app(descr_fromaddr, as_classmethod=True) - ) - - - -# ======================================================================== - -class W_CDLL(W_Root): - def __init__(self, space, name, mode): - self.flags = libffi.FUNCFLAG_CDECL - self.space = space - if name is None: - self.name = "" - else: - self.name = name - try: - self.cdll = libffi.CDLL(name, mode) - except DLOpenError, e: - raise wrap_dlopenerror(space, e, self.name) - - def getfunc(self, space, w_name, w_argtypes, w_restype): - return _getfunc(space, self, w_name, w_argtypes, w_restype) - - @unwrap_spec(name=str) - def getaddressindll(self, space, name): - try: - address_as_uint = rffi.cast(lltype.Unsigned, - self.cdll.getaddressindll(name)) - except KeyError: - raise operationerrfmt( - space.w_ValueError, - "No symbol %s found in library %s", name, self.name) - return space.wrap(address_as_uint) - - at unwrap_spec(name='str_or_None', mode=int) -def descr_new_cdll(space, w_type, name, mode=-1): - return space.wrap(W_CDLL(space, name, mode)) - - -W_CDLL.typedef = TypeDef( - '_ffi.CDLL', - __new__ = interp2app(descr_new_cdll), - getfunc = interp2app(W_CDLL.getfunc), - getaddressindll = interp2app(W_CDLL.getaddressindll), - ) - -class W_WinDLL(W_CDLL): - def __init__(self, space, name, mode): - W_CDLL.__init__(self, space, name, mode) - self.flags = libffi.FUNCFLAG_STDCALL - - at unwrap_spec(name='str_or_None', mode=int) -def descr_new_windll(space, w_type, name, mode=-1): - return space.wrap(W_WinDLL(space, name, mode)) - - -W_WinDLL.typedef = TypeDef( - '_ffi.WinDLL', - __new__ = interp2app(descr_new_windll), - getfunc = interp2app(W_WinDLL.getfunc), - getaddressindll = interp2app(W_WinDLL.getaddressindll), - ) - -# ======================================================================== - -def get_libc(space): - try: - return space.wrap(W_CDLL(space, get_libc_name(), -1)) - except OSError, e: - raise wrap_oserror(space, e) - diff --git a/pypy/module/_ffi/test/test_funcptr.py b/pypy/module/_ffi/test/test_funcptr.py deleted file mode 100644 --- a/pypy/module/_ffi/test/test_funcptr.py +++ /dev/null @@ -1,643 +0,0 @@ -from rpython.rtyper.lltypesystem import rffi -from rpython.rlib.clibffi import get_libc_name -from rpython.rlib.libffi import types -from rpython.rlib.libffi import CDLL -from rpython.rlib.test.test_clibffi import get_libm_name - -import sys, py - -class BaseAppTestFFI(object): - spaceconfig = dict(usemodules=('_ffi', '_rawffi')) - - @classmethod - def prepare_c_example(cls): - from rpython.tool.udir import udir - from rpython.translator.tool.cbuild import ExternalCompilationInfo - from rpython.translator.platform import platform - - c_file = udir.ensure("test__ffi", dir=1).join("foolib.c") - # automatically collect the C source from the docstrings of the tests - snippets = [""" - #ifdef _WIN32 - #define DLLEXPORT __declspec(dllexport) - #else - #define DLLEXPORT - #endif - """] - for name in dir(cls): - if name.startswith('test_'): - meth = getattr(cls, name) - # the heuristic to determine it it's really C code could be - # improved: so far we just check that there is a '{' :-) - if meth.__doc__ is not None and '{' in meth.__doc__: - snippets.append(meth.__doc__) - # - c_file.write(py.code.Source('\n'.join(snippets))) - eci = ExternalCompilationInfo(export_symbols=[]) - return str(platform.compile([c_file], eci, 'x', standalone=False)) - - def setup_class(cls): - space = cls.space - cls.w_iswin32 = space.wrap(sys.platform == 'win32') - cls.w_libfoo_name = space.wrap(cls.prepare_c_example()) - cls.w_libc_name = space.wrap(get_libc_name()) - libm_name = get_libm_name(sys.platform) - cls.w_libm_name = space.wrap(libm_name) - libm = CDLL(libm_name) - pow = libm.getpointer('pow', [], types.void) - pow_addr = rffi.cast(rffi.LONG, pow.funcsym) - cls._libm = libm # otherwise it gets unloaded - argh! - cls.w_pow_addr = space.wrap(pow_addr) - -class AppTestFFI(BaseAppTestFFI): - - def setup_class(cls): - BaseAppTestFFI.setup_class.im_func(cls) - space = cls.space - # these are needed for test_single_float_args - from ctypes import c_float - f_12_34 = c_float(12.34).value - f_56_78 = c_float(56.78).value - f_result = c_float(f_12_34 + f_56_78).value - cls.w_f_12_34_plus_56_78 = space.wrap(f_result) - - def test_libload(self): - import _ffi - _ffi.CDLL(self.libc_name) - - def test_libload_fail(self): - import _ffi - raises(OSError, _ffi.CDLL, "xxxxx_this_name_does_not_exist_xxxxx") - - def test_libload_None(self): - if self.iswin32: - skip("unix specific") - from _ffi import CDLL, types - # this should return *all* loaded libs, dlopen(NULL) - dll = CDLL(None) - # libm should be loaded - res = dll.getfunc('sqrt', [types.double], types.double)(1.0) - assert res == 1.0 - - def test_callfunc(self): - from _ffi import CDLL, types - libm = CDLL(self.libm_name) - pow = libm.getfunc('pow', [types.double, types.double], types.double) - assert pow(2, 3) == 8 - - @py.test.mark.skipif("py.test.config.option.runappdirect") - def test_getaddr(self): - from _ffi import CDLL, types - libm = CDLL(self.libm_name) - pow = libm.getfunc('pow', [types.double, types.double], types.double) - assert pow.getaddr() == self.pow_addr - - @py.test.mark.skipif("py.test.config.option.runappdirect") - def test_getaddressindll(self): - import sys - from _ffi import CDLL - libm = CDLL(self.libm_name) - pow_addr = libm.getaddressindll('pow') - fff = sys.maxsize*2-1 - assert pow_addr == self.pow_addr & fff - - def test_func_fromaddr(self): - from _ffi import CDLL, types, FuncPtr - libm = CDLL(self.libm_name) - pow_addr = libm.getaddressindll('pow') - pow = FuncPtr.fromaddr(pow_addr, 'pow', [types.double, types.double], - types.double) - assert pow(2, 3) == 8 - - def test_int_args(self): - """ - DLLEXPORT int sum_xy(int x, int y) - { - return x+y; - } - """ - import sys - from _ffi import CDLL, types - libfoo = CDLL(self.libfoo_name) - sum_xy = libfoo.getfunc('sum_xy', [types.sint, types.sint], types.sint) - assert sum_xy(30, 12) == 42 - assert sum_xy(sys.maxsize*2, 0) == -2 - - def test_void_result(self): - """ - int dummy = 0; - DLLEXPORT void set_dummy(int val) { dummy = val; } - DLLEXPORT int get_dummy() { return dummy; } - """ - from _ffi import CDLL, types - libfoo = CDLL(self.libfoo_name) - set_dummy = libfoo.getfunc('set_dummy', [types.sint], types.void) - get_dummy = libfoo.getfunc('get_dummy', [], types.sint) - assert get_dummy() == 0 - assert set_dummy(42) is None - assert get_dummy() == 42 - set_dummy(0) - - def test_pointer_args(self): - """ - extern int dummy; // defined in test_void_result - DLLEXPORT int* get_dummy_ptr() { return &dummy; } - DLLEXPORT void set_val_to_ptr(int* ptr, int val) { *ptr = val; } - """ - from _ffi import CDLL, types - libfoo = CDLL(self.libfoo_name) - get_dummy = libfoo.getfunc('get_dummy', [], types.sint) - get_dummy_ptr = libfoo.getfunc('get_dummy_ptr', [], types.void_p) - set_val_to_ptr = libfoo.getfunc('set_val_to_ptr', - [types.void_p, types.sint], - types.void) - assert get_dummy() == 0 - ptr = get_dummy_ptr() - set_val_to_ptr(ptr, 123) - assert get_dummy() == 123 - set_val_to_ptr(ptr, 0) - - def test_convert_pointer_args(self): - """ - extern int dummy; // defined in test_void_result - DLLEXPORT int* get_dummy_ptr(); // defined in test_pointer_args - DLLEXPORT void set_val_to_ptr(int* ptr, int val); // ditto - """ - from _ffi import CDLL, types - - class MyPointerWrapper(object): - def __init__(self, value): - self.value = value - def _as_ffi_pointer_(self, ffitype): - assert ffitype is types.void_p - return self.value - - libfoo = CDLL(self.libfoo_name) - get_dummy = libfoo.getfunc('get_dummy', [], types.sint) - get_dummy_ptr = libfoo.getfunc('get_dummy_ptr', [], types.void_p) - set_val_to_ptr = libfoo.getfunc('set_val_to_ptr', - [types.void_p, types.sint], - types.void) - assert get_dummy() == 0 - ptr = get_dummy_ptr() - assert type(ptr) is int - ptr2 = MyPointerWrapper(ptr) - set_val_to_ptr(ptr2, 123) - assert get_dummy() == 123 - set_val_to_ptr(ptr2, 0) - - def test_convert_strings_to_char_p(self): - """ - DLLEXPORT - long mystrlen(char* s) - { - long len = 0; - while(*s++) - len++; - return len; - } - """ - from _ffi import CDLL, types - import _rawffi - libfoo = CDLL(self.libfoo_name) - mystrlen = libfoo.getfunc('mystrlen', [types.char_p], types.slong) - # - # first, try automatic conversion from a string - assert mystrlen(b'foobar') == 6 - # then, try to pass an explicit pointer - CharArray = _rawffi.Array('c') - mystr = CharArray(7, b'foobar') - assert mystrlen(mystr.buffer) == 6 - mystr.free() - mystrlen.free_temp_buffers() - - def test_convert_unicode_to_unichar_p(self): - """ - #include - DLLEXPORT - long mystrlen_u(wchar_t* s) - { - long len = 0; - while(*s++) - len++; - return len; - } - """ - from _ffi import CDLL, types - import _rawffi - libfoo = CDLL(self.libfoo_name) - mystrlen = libfoo.getfunc('mystrlen_u', [types.unichar_p], types.slong) - # - # first, try automatic conversion from strings and unicode - assert mystrlen('foobar') == 6 - assert mystrlen('foobar') == 6 - assert mystrlen('ab\u2070') == 3 - # then, try to pass an explicit pointer - UniCharArray = _rawffi.Array('u') - mystr = UniCharArray(7, 'foobar') - assert mystrlen(mystr.buffer) == 6 - mystr.free() - mystrlen.free_temp_buffers() - - def test_keepalive_temp_buffer(self): - """ - DLLEXPORT - char* do_nothing(char* s) - { - return s; - } - """ - from _ffi import CDLL, types - import _rawffi - libfoo = CDLL(self.libfoo_name) - do_nothing = libfoo.getfunc('do_nothing', [types.char_p], types.char_p) - CharArray = _rawffi.Array('c') - # - ptr = do_nothing(b'foobar') - array = CharArray.fromaddress(ptr, 7) - assert bytes(array) == b'foobar\00' - do_nothing.free_temp_buffers() - - def test_typed_pointer_args(self): - """ - extern int dummy; // defined in test_void_result - DLLEXPORT int* get_dummy_ptr(); // defined in test_pointer_args - DLLEXPORT void set_val_to_ptr(int* ptr, int val); // ditto - """ - from _ffi import CDLL, types - - libfoo = CDLL(self.libfoo_name) - intptr = types.Pointer(types.sint) - get_dummy = libfoo.getfunc('get_dummy', [], types.sint) - get_dummy_ptr = libfoo.getfunc('get_dummy_ptr', [], intptr) - set_val_to_ptr = libfoo.getfunc('set_val_to_ptr', [intptr, types.sint], types.void) - assert get_dummy() == 0 - ptr = get_dummy_ptr() - set_val_to_ptr(ptr, 123) - assert get_dummy() == 123 - set_val_to_ptr(ptr, 0) - - def test_huge_pointer_args(self): - """ - #include - DLLEXPORT long is_null_ptr(void* ptr) { return ptr == NULL; } - """ - import sys - from _ffi import CDLL, types - libfoo = CDLL(self.libfoo_name) - is_null_ptr = libfoo.getfunc('is_null_ptr', [types.void_p], types.ulong) - assert not is_null_ptr(sys.maxsize+1) - - def test_unsigned_long_args(self): - """ - DLLEXPORT unsigned long sum_xy_ul(unsigned long x, unsigned long y) - { - return x+y; - } - """ - import sys - from _ffi import CDLL, types - libfoo = CDLL(self.libfoo_name) - sum_xy = libfoo.getfunc('sum_xy_ul', [types.ulong, types.ulong], - types.ulong) - assert sum_xy(sys.maxsize, 12) == sys.maxsize+12 - assert sum_xy(sys.maxsize+1, 12) == sys.maxsize+13 - # - res = sum_xy(sys.maxsize*2+3, 0) - assert res == 1 - - def test_unsigned_short_args(self): - """ - DLLEXPORT unsigned short sum_xy_us(unsigned short x, unsigned short y) - { - return x+y; - } - """ - from _ffi import CDLL, types - libfoo = CDLL(self.libfoo_name) - sum_xy = libfoo.getfunc('sum_xy_us', [types.ushort, types.ushort], - types.ushort) - assert sum_xy(32000, 8000) == 40000 - assert sum_xy(60000, 30000) == 90000 % 65536 - - def test_unsigned_byte_args(self): - """ - DLLEXPORT unsigned char sum_xy_ub(unsigned char x, unsigned char y) - { - return x+y; - } - """ - from _ffi import CDLL, types - libfoo = CDLL(self.libfoo_name) - sum_xy = libfoo.getfunc('sum_xy_us', [types.ubyte, types.ubyte], - types.ubyte) - assert sum_xy(100, 40) == 140 - assert sum_xy(200, 60) == 260 % 256 - - def test_unsigned_int_args(self): - r""" - DLLEXPORT unsigned int sum_xy_ui(unsigned int x, unsigned int y) - { - return x+y; - } - """ - import sys - from _ffi import CDLL, types - maxint32 = 2147483647 - libfoo = CDLL(self.libfoo_name) - sum_xy = libfoo.getfunc('sum_xy_ui', [types.uint, types.uint], - types.uint) - assert sum_xy(maxint32, 1) == maxint32+1 - assert sum_xy(maxint32, maxint32+2) == 0 - - def test_signed_byte_args(self): - """ - DLLEXPORT signed char sum_xy_sb(signed char x, signed char y) - { - return x+y; - } - """ - from _ffi import CDLL, types - libfoo = CDLL(self.libfoo_name) - sum_xy = libfoo.getfunc('sum_xy_sb', [types.sbyte, types.sbyte], - types.sbyte) - assert sum_xy(10, 20) == 30 - assert sum_xy(100, 28) == -128 - - def test_char_args(self): - """ - DLLEXPORT char my_toupper(char x) - { - return x - ('a'-'A'); - } - """ - from _ffi import CDLL, types - libfoo = CDLL(self.libfoo_name) - my_toupper = libfoo.getfunc('my_toupper', [types.char], - types.char) - assert my_toupper('c') == 'C' - - def test_unichar_args(self): - """ - #include - DLLEXPORT wchar_t sum_xy_wc(wchar_t x, wchar_t y) - { - return x + y; - } - """ - from _ffi import CDLL, types - libfoo = CDLL(self.libfoo_name) - sum_xy = libfoo.getfunc('sum_xy_wc', [types.unichar, types.unichar], - types.unichar) - res = sum_xy(chr(1000), chr(2000)) - assert type(res) is str - assert ord(res) == 3000 - - def test_single_float_args(self): - """ - DLLEXPORT float sum_xy_float(float x, float y) - { - return x+y; - } - """ - from _ffi import CDLL, types - libfoo = CDLL(self.libfoo_name) - sum_xy = libfoo.getfunc('sum_xy_float', [types.float, types.float], - types.float) - res = sum_xy(12.34, 56.78) - assert res == self.f_12_34_plus_56_78 - - - def test_slonglong_args(self): - """ - DLLEXPORT long long sum_xy_longlong(long long x, long long y) - { - return x+y; - } - """ - from _ffi import CDLL, types - maxint32 = 2147483647 # we cannot really go above maxint on 64 bits - # (and we would not test anything, as there long - # is the same as long long) - - libfoo = CDLL(self.libfoo_name) - sum_xy = libfoo.getfunc('sum_xy_longlong', [types.slonglong, types.slonglong], - types.slonglong) - x = maxint32+1 - y = maxint32+2 - res = sum_xy(x, y) - expected = maxint32*2 + 3 - assert res == expected - - def test_ulonglong_args(self): - """ - DLLEXPORT unsigned long long sum_xy_ulonglong(unsigned long long x, - unsigned long long y) - { - return x+y; - } - """ - from _ffi import CDLL, types - maxint64 = 9223372036854775807 # maxint64+1 does not fit into a - # longlong, but it does into a - # ulonglong - libfoo = CDLL(self.libfoo_name) - sum_xy = libfoo.getfunc('sum_xy_ulonglong', [types.ulonglong, types.ulonglong], - types.ulonglong) - x = maxint64+1 - y = 2 - res = sum_xy(x, y) - expected = maxint64 + 3 - assert res == expected - # - res = sum_xy(maxint64*2+3, 0) - assert res == 1 - - def test_byval_argument(self): - """ - struct Point { - long x; - long y; - }; - - DLLEXPORT long sum_point(struct Point p) { - return p.x + p.y; - } - """ - from _ffi import CDLL, types, _StructDescr, Field - Point = _StructDescr('Point', [ - Field('x', types.slong), - Field('y', types.slong), - ]) - libfoo = CDLL(self.libfoo_name) - sum_point = libfoo.getfunc('sum_point', [Point.ffitype], types.slong) - # - p = Point.allocate() - p.setfield('x', 30) - p.setfield('y', 12) - res = sum_point(p) - assert res == 42 - - def test_byval_result(self): - """ - DLLEXPORT struct Point make_point(long x, long y) { - struct Point p; - p.x = x; - p.y = y; - return p; - } - """ - from _ffi import CDLL, types, _StructDescr, Field - Point = _StructDescr('Point', [ - Field('x', types.slong), - Field('y', types.slong), - ]) - libfoo = CDLL(self.libfoo_name) - make_point = libfoo.getfunc('make_point', [types.slong, types.slong], - Point.ffitype) - # - p = make_point(12, 34) - assert p.getfield('x') == 12 - assert p.getfield('y') == 34 - - # XXX: support for _rawffi structures should be killed as soon as we - # implement ctypes.Structure on top of _ffi. In the meantime, we support - # both - def test_byval_argument__rawffi(self): - """ - // defined above - struct Point; - DLLEXPORT long sum_point(struct Point p); - """ - import _rawffi - from _ffi import CDLL, types - POINT = _rawffi.Structure([('x', 'l'), ('y', 'l')]) - ffi_point = POINT.get_ffi_type() - libfoo = CDLL(self.libfoo_name) - sum_point = libfoo.getfunc('sum_point', [ffi_point], types.slong) - # - p = POINT() - p.x = 30 - p.y = 12 - res = sum_point(p) - assert res == 42 - p.free() - - def test_byval_result__rawffi(self): - """ - // defined above - DLLEXPORT struct Point make_point(long x, long y); - """ - import _rawffi - from _ffi import CDLL, types - POINT = _rawffi.Structure([('x', 'l'), ('y', 'l')]) - ffi_point = POINT.get_ffi_type() - libfoo = CDLL(self.libfoo_name) - make_point = libfoo.getfunc('make_point', [types.slong, types.slong], ffi_point) - # - p = make_point(12, 34) - assert p.x == 12 - assert p.y == 34 - p.free() - - - def test_TypeError_numargs(self): - from _ffi import CDLL, types - libfoo = CDLL(self.libfoo_name) - sum_xy = libfoo.getfunc('sum_xy', [types.sint, types.sint], types.sint) - raises(TypeError, "sum_xy(1, 2, 3)") - raises(TypeError, "sum_xy(1)") - - def test_TypeError_voidarg(self): - from _ffi import CDLL, types - libfoo = CDLL(self.libfoo_name) - raises(TypeError, "libfoo.getfunc('sum_xy', [types.void], types.sint)") - - def test_OSError_loading(self): - from _ffi import CDLL, types - raises(OSError, "CDLL('I do not exist')") - - def test_AttributeError_missing_function(self): - from _ffi import CDLL, types - libfoo = CDLL(self.libfoo_name) - raises(AttributeError, "libfoo.getfunc('I_do_not_exist', [], types.void)") - if self.iswin32: - skip("unix specific") - libnone = CDLL(None) - raises(AttributeError, "libnone.getfunc('I_do_not_exist', [], types.void)") - - def test_calling_convention1(self): - if not self.iswin32: - skip("windows specific") - from _ffi import WinDLL, types - libm = WinDLL(self.libm_name) - pow = libm.getfunc('pow', [types.double, types.double], types.double) - try: - pow(2, 3) - except ValueError as e: - assert str(e).startswith('Procedure called with') - else: - assert 0, 'test must assert, wrong calling convention' - - def test_calling_convention2(self): - if not self.iswin32: - skip("windows specific") - from _ffi import WinDLL, types - kernel = WinDLL('Kernel32.dll') - sleep = kernel.getfunc('Sleep', [types.uint], types.void) - sleep(10) - - def test_calling_convention3(self): - if not self.iswin32: - skip("windows specific") - from _ffi import CDLL, types - wrong_kernel = CDLL('Kernel32.dll') - wrong_sleep = wrong_kernel.getfunc('Sleep', [types.uint], types.void) - try: - wrong_sleep(10) - except ValueError as e: - assert str(e).startswith('Procedure called with') - else: - assert 0, 'test must assert, wrong calling convention' - - def test_func_fromaddr2(self): - if not self.iswin32: - skip("windows specific") - from _ffi import CDLL, types, FuncPtr - from _rawffi import FUNCFLAG_STDCALL - libm = CDLL(self.libm_name) - pow_addr = libm.getaddressindll('pow') - wrong_pow = FuncPtr.fromaddr(pow_addr, 'pow', - [types.double, types.double], types.double, FUNCFLAG_STDCALL) - try: - wrong_pow(2, 3) == 8 - except ValueError as e: - assert str(e).startswith('Procedure called with') - else: - assert 0, 'test must assert, wrong calling convention' - - def test_func_fromaddr3(self): - if not self.iswin32: - skip("windows specific") - from _ffi import WinDLL, types, FuncPtr - from _rawffi import FUNCFLAG_STDCALL - kernel = WinDLL('Kernel32.dll') - sleep_addr = kernel.getaddressindll('Sleep') - sleep = FuncPtr.fromaddr(sleep_addr, 'sleep', [types.uint], - types.void, FUNCFLAG_STDCALL) - sleep(10) - - def test_by_ordinal(self): - """ - int DLLEXPORT AAA_first_ordinal_function() - { - return 42; - } - """ - if not self.iswin32: - skip("windows specific") - from _ffi import CDLL, types - libfoo = CDLL(self.libfoo_name) - f_name = libfoo.getfunc('AAA_first_ordinal_function', [], types.sint) - f_ordinal = libfoo.getfunc(1, [], types.sint) - assert f_name.getaddr() == f_ordinal.getaddr() diff --git a/pypy/module/_ffi/test/test_struct.py b/pypy/module/_ffi/test/test_struct.py deleted file mode 100644 --- a/pypy/module/_ffi/test/test_struct.py +++ /dev/null @@ -1,331 +0,0 @@ -from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.module._ffi.interp_ffitype import app_types, W_FFIType -from pypy.module._ffi.interp_struct import compute_size_and_alignement, W_Field -from pypy.module._ffi.test.test_funcptr import BaseAppTestFFI - - -class TestStruct(object): - - class FakeSpace(object): - def interp_w(self, cls, obj): - return obj - - def compute(self, ffitypes_w): - fields_w = [W_Field('', w_ffitype) for - w_ffitype in ffitypes_w] - return compute_size_and_alignement(self.FakeSpace(), fields_w) - - def sizeof(self, ffitypes_w): - size, aligned, fields_w = self.compute(ffitypes_w) - return size - - def test_compute_size(self): - T = app_types - byte_size = app_types.sbyte.sizeof() - long_size = app_types.slong.sizeof() - llong_size = app_types.slonglong.sizeof() - llong_align = app_types.slonglong.get_alignment() - # - assert llong_align >= 4 - assert self.sizeof([T.sbyte, T.slong]) == 2*long_size - assert self.sizeof([T.sbyte, T.slonglong]) == llong_align + llong_size - assert self.sizeof([T.sbyte, T.sbyte, T.slonglong]) == llong_align + llong_size - assert self.sizeof([T.sbyte, T.sbyte, T.sbyte, T.slonglong]) == llong_align + llong_size - assert self.sizeof([T.sbyte, T.sbyte, T.sbyte, T.sbyte, T.slonglong]) == llong_align + llong_size - assert self.sizeof([T.slonglong, T.sbyte]) == llong_size + llong_align - assert self.sizeof([T.slonglong, T.sbyte, T.sbyte]) == llong_size + llong_align - assert self.sizeof([T.slonglong, T.sbyte, T.sbyte, T.sbyte]) == llong_size + llong_align - assert self.sizeof([T.slonglong, T.sbyte, T.sbyte, T.sbyte, T.sbyte]) == llong_size + llong_align - - -class AppTestStruct(BaseAppTestFFI): - - @classmethod - def read_raw_mem(cls, addr, typename, length): - import ctypes - addr = ctypes.cast(addr, ctypes.c_void_p) - c_type = getattr(ctypes, typename) - array_type = ctypes.POINTER(c_type * length) - ptr_array = ctypes.cast(addr, array_type) - array = ptr_array[0] - lst = [array[i] for i in range(length)] - return lst - - def setup_class(cls): - BaseAppTestFFI.setup_class.im_func(cls) - - if cls.runappdirect: - cls.w_read_raw_mem = cls.read_raw_mem - else: - @unwrap_spec(addr=int, typename=str, length=int) - def read_raw_mem_w(space, addr, typename, length): - return space.wrap(cls.read_raw_mem(addr, typename, length)) - cls.w_read_raw_mem = cls.space.wrap(interp2app(read_raw_mem_w)) - # - from rpython.rlib import clibffi - from rpython.rlib.rarithmetic import r_uint - from rpython.rtyper.lltypesystem import lltype, rffi - dummy_type = lltype.malloc(clibffi.FFI_TYPE_P.TO, flavor='raw') - dummy_type.c_size = r_uint(123) - dummy_type.c_alignment = rffi.cast(rffi.USHORT, 0) - dummy_type.c_type = rffi.cast(rffi.USHORT, 0) - cls.w_dummy_type = W_FFIType('dummy', dummy_type) - cls.w_runappdirect = cls.space.wrap(cls.runappdirect) - - def test__StructDescr(self): - from _ffi import _StructDescr, Field, types - longsize = types.slong.sizeof() - fields = [ - Field('x', types.slong), - Field('y', types.slong), - ] - descr = _StructDescr('foo', fields) - assert descr.ffitype.sizeof() == longsize*2 - assert descr.ffitype.name == 'struct foo' - - def test_alignment(self): - from _ffi import _StructDescr, Field, types - longsize = types.slong.sizeof() - fields = [ - Field('x', types.sbyte), - Field('y', types.slong), - ] - descr = _StructDescr('foo', fields) - assert descr.ffitype.sizeof() == longsize*2 - assert fields[0].offset == 0 - assert fields[1].offset == longsize # aligned to WORD - - def test_missing_field(self): - from _ffi import _StructDescr, Field, types - longsize = types.slong.sizeof() - fields = [ - Field('x', types.slong), - Field('y', types.slong), - ] - descr = _StructDescr('foo', fields) - struct = descr.allocate() - raises(AttributeError, "struct.getfield('missing')") - raises(AttributeError, "struct.setfield('missing', 42)") - - def test_unknown_type(self): - if self.runappdirect: - skip('cannot use self.dummy_type with -A') - from _ffi import _StructDescr, Field - fields = [ - Field('x', self.dummy_type), - ] - descr = _StructDescr('foo', fields) - struct = descr.allocate() - raises(TypeError, "struct.getfield('x')") - raises(TypeError, "struct.setfield('x', 42)") - - def test_getfield_setfield(self): - from _ffi import _StructDescr, Field, types - longsize = types.slong.sizeof() - fields = [ - Field('x', types.slong), - Field('y', types.slong), - ] - descr = _StructDescr('foo', fields) - struct = descr.allocate() - struct.setfield('x', 42) - struct.setfield('y', 43) - assert struct.getfield('x') == 42 - assert struct.getfield('y') == 43 - mem = self.read_raw_mem(struct.getaddr(), 'c_long', 2) - assert mem == [42, 43] - - def test_getfield_setfield_signed_types(self): - import sys - from _ffi import _StructDescr, Field, types - longsize = types.slong.sizeof() - fields = [ - Field('sbyte', types.sbyte), - Field('sshort', types.sshort), - Field('sint', types.sint), - Field('slong', types.slong), - ] - descr = _StructDescr('foo', fields) - struct = descr.allocate() - struct.setfield('sbyte', 128) - assert struct.getfield('sbyte') == -128 - struct.setfield('sshort', 32768) - assert struct.getfield('sshort') == -32768 - struct.setfield('sint', 43) - assert struct.getfield('sint') == 43 - struct.setfield('slong', sys.maxsize+1) - assert struct.getfield('slong') == -sys.maxsize-1 - struct.setfield('slong', sys.maxsize*3) - assert struct.getfield('slong') == sys.maxsize-2 - - def test_getfield_setfield_unsigned_types(self): - import sys - from _ffi import _StructDescr, Field, types - longsize = types.slong.sizeof() - fields = [ - Field('ubyte', types.ubyte), - Field('ushort', types.ushort), - Field('uint', types.uint), - Field('ulong', types.ulong), - Field('char', types.char), - Field('unichar', types.unichar), - Field('ptr', types.void_p), - ] - descr = _StructDescr('foo', fields) - struct = descr.allocate() - struct.setfield('ubyte', -1) - assert struct.getfield('ubyte') == 255 - struct.setfield('ushort', -1) - assert struct.getfield('ushort') == 65535 - struct.setfield('uint', 43) - assert struct.getfield('uint') == 43 - struct.setfield('ulong', -1) - assert struct.getfield('ulong') == sys.maxsize*2 + 1 - struct.setfield('ulong', sys.maxsize*2 + 2) - assert struct.getfield('ulong') == 0 - struct.setfield('char', 'a') - assert struct.getfield('char') == 'a' - struct.setfield('unichar', '\u1234') - assert struct.getfield('unichar') == '\u1234' - struct.setfield('ptr', -1) - assert struct.getfield('ptr') == sys.maxsize*2 + 1 - - def test_getfield_setfield_longlong(self): - import sys - from _ffi import _StructDescr, Field, types - longsize = types.slong.sizeof() - fields = [ - Field('slonglong', types.slonglong), - Field('ulonglong', types.ulonglong), - ] - descr = _StructDescr('foo', fields) - struct = descr.allocate() - struct.setfield('slonglong', 9223372036854775808) - assert struct.getfield('slonglong') == -9223372036854775808 - struct.setfield('ulonglong', -1) - assert struct.getfield('ulonglong') == 18446744073709551615 - mem = self.read_raw_mem(struct.getaddr(), 'c_longlong', 2) - assert mem == [-9223372036854775808, -1] - - def test_getfield_setfield_float(self): - import sys - from _ffi import _StructDescr, Field, types - longsize = types.slong.sizeof() - fields = [ - Field('x', types.double), - ] - descr = _StructDescr('foo', fields) - struct = descr.allocate() - struct.setfield('x', 123.4) - assert struct.getfield('x') == 123.4 - mem = self.read_raw_mem(struct.getaddr(), 'c_double', 1) - assert mem == [123.4] From noreply at buildbot.pypy.org Fri Jan 24 22:59:41 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 24 Jan 2014 22:59:41 +0100 (CET) Subject: [pypy-commit] pypy py3k: rpython forces 'utf8' now Message-ID: <20140124215941.868781C1190@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68922:fbe8e328bbe8 Date: 2014-01-24 13:58 -0800 http://bitbucket.org/pypy/pypy/changeset/fbe8e328bbe8/ Log: rpython forces 'utf8' now diff --git a/lib-python/3/test/test_pep3120.py b/lib-python/3/test/test_pep3120.py --- a/lib-python/3/test/test_pep3120.py +++ b/lib-python/3/test/test_pep3120.py @@ -20,7 +20,7 @@ import test.badsyntax_pep3120 except SyntaxError as msg: msg = str(msg).lower() - self.assertTrue('utf-8' in msg) + self.assertTrue('utf8' in msg) else: self.fail("expected exception didn't occur") From noreply at buildbot.pypy.org Sat Jan 25 01:55:59 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 25 Jan 2014 01:55:59 +0100 (CET) Subject: [pypy-commit] pypy default: add bytearray.__iter__ Message-ID: <20140125005559.1A3A41C00F8@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r68924:263c41db4f9c Date: 2014-01-24 16:18 -0800 http://bitbucket.org/pypy/pypy/changeset/263c41db4f9c/ Log: add bytearray.__iter__ diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -285,6 +285,9 @@ raise return space.newbool(res) + def descr_iter(self, space): + return space.newseqiter(self) + def descr_buffer(self, space): return BytearrayBuffer(self.data) @@ -893,6 +896,8 @@ __ge__ = interp2app(W_BytearrayObject.descr_ge, doc=BytearrayDocstrings.__ge__.__doc__), + __iter__ = interp2app(W_BytearrayObject.descr_iter, + doc=BytearrayDocstrings.__iter__.__doc__), __len__ = interp2app(W_BytearrayObject.descr_len, doc=BytearrayDocstrings.__len__.__doc__), __contains__ = interp2app(W_BytearrayObject.descr_contains, diff --git a/pypy/objspace/std/test/test_bytearrayobject.py b/pypy/objspace/std/test/test_bytearrayobject.py --- a/pypy/objspace/std/test/test_bytearrayobject.py +++ b/pypy/objspace/std/test/test_bytearrayobject.py @@ -134,6 +134,7 @@ def test_iter(self): assert list(bytearray('hello')) == [104, 101, 108, 108, 111] + assert list(bytearray('hello').__iter__()) == [104, 101, 108, 108, 111] def test_compare(self): assert bytearray('hello') == bytearray('hello') From noreply at buildbot.pypy.org Sat Jan 25 01:55:57 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 25 Jan 2014 01:55:57 +0100 (CET) Subject: [pypy-commit] pypy default: pep8/cleanup Message-ID: <20140125005557.C7FD41C00F8@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r68923:2f70add7ec6d Date: 2014-01-24 16:17 -0800 http://bitbucket.org/pypy/pypy/changeset/2f70add7ec6d/ Log: pep8/cleanup diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -232,9 +232,8 @@ raise operationerrfmt(space.w_TypeError, msg, w_result) def ord(self, space): - typename = space.type(self).getname(space) - msg = "ord() expected string of length 1, but %s found" - raise operationerrfmt(space.w_TypeError, msg, typename) + msg = "ord() expected string of length 1, but %T found" + raise operationerrfmt(space.w_TypeError, msg, self) def __spacebind__(self, space): return self diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -1,20 +1,21 @@ """The builtin bytearray implementation""" +from rpython.rlib.objectmodel import ( + import_from_mixin, newlist_hint, resizelist_hint) +from rpython.rlib.rstring import StringBuilder + from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.buffer import RWBuffer from pypy.interpreter.error import OperationError, operationerrfmt -from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault +from pypy.interpreter.gateway import WrappedDefault, interp2app, unwrap_spec from pypy.interpreter.signature import Signature from pypy.objspace.std.sliceobject import W_SliceObject from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.stringmethods import StringMethods from pypy.objspace.std.util import get_positive_index -from rpython.rlib.objectmodel import newlist_hint, resizelist_hint, import_from_mixin -from rpython.rlib.rstring import StringBuilder +NON_HEX_MSG = "non-hexadecimal number found in fromhex() arg at position %d" -def _make_data(s): - return [s[i] for i in range(len(s))] class W_BytearrayObject(W_Root): import_from_mixin(StringMethods) @@ -23,7 +24,7 @@ w_self.data = data def __repr__(w_self): - """ representation for debugging purposes """ + """representation for debugging purposes""" return "%s(%s)" % (w_self.__class__.__name__, ''.join(w_self.data)) def _new(self, value): @@ -127,11 +128,6 @@ @staticmethod def descr_fromhex(space, w_bytearraytype, w_hexstring): - "bytearray.fromhex(string) -> bytearray\n" - "\n" - "Create a bytearray object from a string of hexadecimal numbers.\n" - "Spaces between two numbers are accepted.\n" - "Example: bytearray.fromhex('B9 01EF') -> bytearray(b'\\xb9\\x01\\xef')." hexstring = space.str_w(w_hexstring) hexstring = hexstring.lower() data = [] @@ -143,18 +139,15 @@ i += 1 if i >= length: break - if i+1 == length: - raise OperationError(space.w_ValueError, space.wrap( - "non-hexadecimal number found in fromhex() arg at position %d" % i)) + if i + 1 == length: + raise operationerrfmt(space.w_ValueError, NON_HEX_MSG, i) top = _hex_digit_to_int(hexstring[i]) if top == -1: - raise OperationError(space.w_ValueError, space.wrap( - "non-hexadecimal number found in fromhex() arg at position %d" % i)) + raise operationerrfmt(space.w_ValueError, NON_HEX_MSG, i) bot = _hex_digit_to_int(hexstring[i+1]) if bot == -1: - raise OperationError(space.w_ValueError, space.wrap( - "non-hexadecimal number found in fromhex() arg at position %d" % (i+1,))) + raise operationerrfmt(space.w_ValueError, NON_HEX_MSG, i + 1) data.append(chr(top*16 + bot)) # in CPython bytearray.fromhex is a staticmethod, so @@ -178,23 +171,25 @@ from pypy.objspace.std.unicodeobject import ( _get_encoding_and_errors, encode_object ) - encoding, errors = _get_encoding_and_errors(space, w_encoding, w_errors) + encoding, errors = _get_encoding_and_errors(space, w_encoding, + w_errors) - # if w_source is an integer this correctly raises a TypeError - # the CPython error message is: "encoding or errors without a string argument" - # ours is: "expected unicode, got int object" + # if w_source is an integer this correctly raises a + # TypeError the CPython error message is: "encoding or + # errors without a string argument" ours is: "expected + # unicode, got int object" w_source = encode_object(space, w_source, encoding, errors) # Is it an int? try: count = space.int_w(w_source) - except OperationError, e: + except OperationError as e: if not e.match(space, space.w_TypeError): raise else: if count < 0: - raise OperationError(space.w_ValueError, - space.wrap("bytearray negative count")) + raise operationerrfmt(space.w_ValueError, + "bytearray negative count") self.data = ['\0'] * count return @@ -224,8 +219,8 @@ elif not '\x20' <= c < '\x7f': n = ord(c) buf.append('\\x') - buf.append("0123456789abcdef"[n>>4]) - buf.append("0123456789abcdef"[n&0xF]) + buf.append("0123456789abcdef"[n >> 4]) + buf.append("0123456789abcdef"[n & 0xF]) else: buf.append(c) @@ -238,51 +233,57 @@ def descr_eq(self, space, w_other): try: - return space.newbool(self._val(space) == self._op_val(space, w_other)) - except OperationError, e: + res = self._val(space) == self._op_val(space, w_other) + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise + return space.newbool(res) def descr_ne(self, space, w_other): try: - return space.newbool(self._val(space) != self._op_val(space, w_other)) - except OperationError, e: + res = self._val(space) != self._op_val(space, w_other) + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise + return space.newbool(res) def descr_lt(self, space, w_other): try: - return space.newbool(self._val(space) < self._op_val(space, w_other)) - except OperationError, e: + res = self._val(space) < self._op_val(space, w_other) + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise + return space.newbool(res) def descr_le(self, space, w_other): try: - return space.newbool(self._val(space) <= self._op_val(space, w_other)) - except OperationError, e: + res = self._val(space) <= self._op_val(space, w_other) + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise + return space.newbool(res) def descr_gt(self, space, w_other): try: - return space.newbool(self._val(space) > self._op_val(space, w_other)) - except OperationError, e: + res = self._val(space) > self._op_val(space, w_other) + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise + return space.newbool(res) def descr_ge(self, space, w_other): try: - return space.newbool(self._val(space) >= self._op_val(space, w_other)) - except OperationError, e: + res = self._val(space) >= self._op_val(space, w_other) + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise + return space.newbool(res) def descr_buffer(self, space): return BytearrayBuffer(self.data) @@ -297,7 +298,7 @@ def descr_inplace_mul(self, space, w_times): try: times = space.getindex_w(w_times, space.w_OverflowError) - except OperationError, e: + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise @@ -312,12 +313,13 @@ _setitem_slice_helper(space, self.data, start, step, slicelength, sequence2, empty_elem='\x00') else: - idx = space.getindex_w(w_index, space.w_IndexError, "bytearray index") + idx = space.getindex_w(w_index, space.w_IndexError, + "bytearray index") try: self.data[idx] = getbytevalue(space, w_other) except IndexError: - raise OperationError(space.w_IndexError, - space.wrap("bytearray index out of range")) + raise operationerrfmt(space.w_IndexError, + "bytearray index out of range") def descr_delitem(self, space, w_idx): if isinstance(w_idx, W_SliceObject): @@ -325,12 +327,13 @@ len(self.data)) _delitem_slice_helper(space, self.data, start, step, slicelength) else: - idx = space.getindex_w(w_idx, space.w_IndexError, "bytearray index") + idx = space.getindex_w(w_idx, space.w_IndexError, + "bytearray index") try: del self.data[idx] except IndexError: - raise OperationError(space.w_IndexError, - space.wrap("bytearray deletion index out of range")) + raise operationerrfmt(space.w_IndexError, + "bytearray deletion index out of range") def descr_append(self, space, w_item): self.data.append(getbytevalue(space, w_item)) @@ -357,10 +360,9 @@ result = self.data.pop(index) except IndexError: if not self.data: - raise OperationError(space.w_IndexError, space.wrap( - "pop from empty bytearray")) - raise OperationError(space.w_IndexError, space.wrap( - "pop index out of range")) + raise operationerrfmt(space.w_IndexError, + "pop from empty bytearray") + raise operationerrfmt(space.w_IndexError, "pop index out of range") return space.wrap(ord(result)) def descr_remove(self, space, w_char): @@ -368,27 +370,33 @@ try: self.data.remove(chr(char)) except ValueError: - raise OperationError(space.w_ValueError, space.wrap( - "value not found in bytearray")) + raise operationerrfmt(space.w_ValueError, + "value not found in bytearray") def descr_reverse(self, space): self.data.reverse() + +def _make_data(s): + return [s[i] for i in range(len(s))] + + def getbytevalue(space, w_value): if space.isinstance_w(w_value, space.w_str): string = space.str_w(w_value) if len(string) != 1: - raise OperationError(space.w_ValueError, space.wrap( - "string must be of size 1")) + raise operationerrfmt(space.w_ValueError, + "string must be of size 1") return string[0] value = space.getindex_w(w_value, None) if not 0 <= value < 256: # this includes the OverflowError in case the long is too large - raise OperationError(space.w_ValueError, space.wrap( - "byte must be in range(0, 256)")) + raise operationerrfmt(space.w_ValueError, + "byte must be in range(0, 256)") return chr(value) + def new_bytearray(space, w_bytearraytype, data): w_obj = space.allocate_instance(W_BytearrayObject, w_bytearraytype) W_BytearrayObject.__init__(w_obj, data) @@ -399,7 +407,7 @@ # String-like argument try: string = space.bufferstr_new_w(w_source) - except OperationError, e: + except OperationError as e: if not e.match(space, space.w_TypeError): raise else: @@ -413,7 +421,7 @@ while True: try: w_item = space.next(w_iter) - except OperationError, e: + except OperationError as e: if not e.match(space, space.w_StopIteration): raise break @@ -424,6 +432,7 @@ resizelist_hint(data, extended) return data + def _hex_digit_to_int(d): val = ord(d) if 47 < val < 58: @@ -560,12 +569,12 @@ def decode(): """B.decode(encoding=None, errors='strict') -> unicode - Decode B using the codec registered for encoding. encoding defaults - to the default encoding. errors may be given to set a different error - handling scheme. Default is 'strict' meaning that encoding errors raise - a UnicodeDecodeError. Other possible values are 'ignore' and 'replace' - as well as any other name registered with codecs.register_error that is - able to handle UnicodeDecodeErrors. + Decode B using the codec registered for encoding. encoding defaults to + the default encoding. errors may be given to set a different error + handling scheme. Default is 'strict' meaning that encoding errors + raise a UnicodeDecodeError. Other possible values are 'ignore' and + 'replace' as well as any other name registered with + codecs.register_error that is able to handle UnicodeDecodeErrors. """ def endswith(): @@ -602,7 +611,7 @@ """ def fromhex(): - """bytearray.fromhex(string) -> bytearray (static method) + r"""bytearray.fromhex(string) -> bytearray (static method) Create a bytearray object from a string of hexadecimal numbers. Spaces between two numbers are accepted. @@ -1024,9 +1033,10 @@ _space_chars = ''.join([chr(c) for c in [9, 10, 11, 12, 13, 32]]) -#XXX share the code again with the stuff in listobject.py + +# XXX share the code again with the stuff in listobject.py def _delitem_slice_helper(space, items, start, step, slicelength): - if slicelength==0: + if slicelength == 0: return if step < 0: @@ -1056,6 +1066,7 @@ assert start >= 0 # annotator hint del items[start:] + def _setitem_slice_helper(space, items, start, step, slicelength, sequence2, empty_elem): assert slicelength >= 0 diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -1,19 +1,23 @@ """The builtin str implementation""" +from rpython.rlib.jit import we_are_jitted +from rpython.rlib.objectmodel import ( + compute_hash, compute_unique_id, import_from_mixin) +from rpython.rlib.rstring import StringBuilder, replace + from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.buffer import StringBuffer from pypy.interpreter.error import OperationError, operationerrfmt -from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault, interpindirect2app +from pypy.interpreter.gateway import ( + WrappedDefault, interp2app, interpindirect2app, unwrap_spec) from pypy.objspace.std import newformat from pypy.objspace.std.basestringtype import basestring_typedef from pypy.objspace.std.formatting import mod_format from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.stringmethods import StringMethods -from pypy.objspace.std.unicodeobject import (unicode_from_string, - decode_object, unicode_from_encoded_object, _get_encoding_and_errors) -from rpython.rlib.jit import we_are_jitted -from rpython.rlib.objectmodel import compute_hash, compute_unique_id, import_from_mixin -from rpython.rlib.rstring import StringBuilder, replace +from pypy.objspace.std.unicodeobject import ( + _get_encoding_and_errors, decode_object, unicode_from_encoded_object, + unicode_from_string) class W_AbstractBytesObject(W_Root): @@ -184,8 +188,8 @@ def descr_format(self, space, __args__): """S.format(*args, **kwargs) -> string - Return a formatted version of S, using substitutions from args and kwargs. - The substitutions are identified by braces ('{' and '}'). + Return a formatted version of S, using substitutions from args and + kwargs. The substitutions are identified by braces ('{' and '}'). """ def descr_index(self, space, w_sub, w_start=None, w_end=None): @@ -319,8 +323,8 @@ """S.rpartition(sep) -> (head, sep, tail) Search for the separator sep in S, starting at the end of S, and return - the part before it, the separator itself, and the part after it. If the - separator is not found, return two empty strings and S. + the part before it, the separator itself, and the part after it. If + the separator is not found, return two empty strings and S. """ @unwrap_spec(maxsplit=int) @@ -432,7 +436,7 @@ self._value = str def __repr__(self): - """ representation for debugging purposes """ + """representation for debugging purposes""" return "%s(%r)" % (self.__class__.__name__, self._value) def unwrap(self, space): @@ -521,7 +525,7 @@ return space.newlist_bytes(lst) @staticmethod - @unwrap_spec(w_object = WrappedDefault("")) + @unwrap_spec(w_object=WrappedDefault("")) def descr_new(space, w_stringtype, w_object): # NB. the default value of w_object is really a *wrapped* empty string: # there is gateway magic at work @@ -624,7 +628,8 @@ _StringMethods_descr_add = descr_add def descr_add(self, space, w_other): if space.isinstance_w(w_other, space.w_unicode): - self_as_unicode = unicode_from_encoded_object(space, self, None, None) + self_as_unicode = unicode_from_encoded_object(space, self, None, + None) return space.add(self_as_unicode, w_other) elif space.isinstance_w(w_other, space.w_bytearray): # XXX: eliminate double-copy @@ -635,7 +640,7 @@ from pypy.objspace.std.strbufobject import W_StringBufferObject try: other = self._op_val(space, w_other) - except OperationError, e: + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise @@ -648,24 +653,32 @@ _StringMethods__startswith = _startswith def _startswith(self, space, value, w_prefix, start, end): if space.isinstance_w(w_prefix, space.w_unicode): - self_as_unicode = unicode_from_encoded_object(space, self, None, None) - return self_as_unicode._startswith(space, self_as_unicode._value, w_prefix, start, end) - return self._StringMethods__startswith(space, value, w_prefix, start, end) + self_as_unicode = unicode_from_encoded_object(space, self, None, + None) + return self_as_unicode._startswith(space, self_as_unicode._value, + w_prefix, start, end) + return self._StringMethods__startswith(space, value, w_prefix, start, + end) _StringMethods__endswith = _endswith def _endswith(self, space, value, w_suffix, start, end): if space.isinstance_w(w_suffix, space.w_unicode): - self_as_unicode = unicode_from_encoded_object(space, self, None, None) - return self_as_unicode._endswith(space, self_as_unicode._value, w_suffix, start, end) - return self._StringMethods__endswith(space, value, w_suffix, start, end) + self_as_unicode = unicode_from_encoded_object(space, self, None, + None) + return self_as_unicode._endswith(space, self_as_unicode._value, + w_suffix, start, end) + return self._StringMethods__endswith(space, value, w_suffix, start, + end) _StringMethods_descr_contains = descr_contains def descr_contains(self, space, w_sub): if space.isinstance_w(w_sub, space.w_unicode): from pypy.objspace.std.unicodeobject import W_UnicodeObject assert isinstance(w_sub, W_UnicodeObject) - self_as_unicode = unicode_from_encoded_object(space, self, None, None) - return space.newbool(self_as_unicode._value.find(w_sub._value) >= 0) + self_as_unicode = unicode_from_encoded_object(space, self, None, + None) + return space.newbool( + self_as_unicode._value.find(w_sub._value) >= 0) return self._StringMethods_descr_contains(space, w_sub) _StringMethods_descr_replace = descr_replace @@ -685,8 +698,8 @@ try: res = replace(input, sub, by, count) except OverflowError: - raise OperationError(space.w_OverflowError, - space.wrap("replace string is too long")) + raise operationerrfmt(space.w_OverflowError, + "replace string is too long") return self_as_uni._new(res) return self._StringMethods_descr_replace(space, w_old, w_new, count) @@ -751,6 +764,7 @@ return W_BytesObject.EMPTY return W_BytesObject(s) + def wrapchar(space, c): if space.config.objspace.std.withprebuiltchar and not we_are_jitted(): return W_BytesObject.PREBUILT[ord(c)] @@ -830,7 +844,8 @@ __format__ = interpindirect2app(W_BytesObject.descr__format__), __mod__ = interpindirect2app(W_BytesObject.descr_mod), __buffer__ = interpindirect2app(W_AbstractBytesObject.descr_buffer), - __getnewargs__ = interpindirect2app(W_AbstractBytesObject.descr_getnewargs), + __getnewargs__ = interpindirect2app( + W_AbstractBytesObject.descr_getnewargs), _formatter_parser = interp2app(W_BytesObject.descr_formatter_parser), _formatter_field_name_split = interp2app(W_BytesObject.descr_formatter_field_name_split), @@ -865,8 +880,8 @@ buf.append_slice(s, startslice, i) startslice = i + 1 buf.append('\\x') - buf.append("0123456789abcdef"[n>>4]) - buf.append("0123456789abcdef"[n&0xF]) + buf.append("0123456789abcdef"[n >> 4]) + buf.append("0123456789abcdef"[n & 0xF]) if use_bs_char: if i != startslice: diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -162,9 +162,9 @@ return self @staticmethod - def newlist_bytes(space, list_s): + def newlist_bytes(space, list_b): strategy = space.fromcache(BytesListStrategy) - storage = strategy.erase(list_s) + storage = strategy.erase(list_b) return W_ListObject.from_storage_and_strategy(space, storage, strategy) @staticmethod diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -1,18 +1,22 @@ -from pypy.interpreter.error import OperationError, operationerrfmt -from pypy.interpreter.gateway import unwrap_spec, WrappedDefault -from pypy.objspace.std import slicetype -from pypy.objspace.std.sliceobject import W_SliceObject, normalize_simple_slice +"""Functionality shared between bytes/bytearray/unicode""" + from rpython.rlib import jit from rpython.rlib.objectmodel import specialize from rpython.rlib.rarithmetic import ovfcheck -from rpython.rlib.rstring import split, rsplit, replace, startswith, endswith +from rpython.rlib.rstring import endswith, replace, rsplit, split, startswith + +from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.gateway import WrappedDefault, unwrap_spec +from pypy.objspace.std import slicetype +from pypy.objspace.std.sliceobject import W_SliceObject, normalize_simple_slice class StringMethods(object): def _sliced(self, space, s, start, stop, orig_obj): assert start >= 0 assert stop >= 0 - #if start == 0 and stop == len(s) and space.is_w(space.type(orig_obj), space.w_str): + #if start == 0 and stop == len(s) and space.is_w(space.type(orig_obj), + # space.w_str): # return orig_obj return self._new(s[start:stop]) @@ -21,7 +25,7 @@ value = self._val(space) lenself = len(value) start, end = slicetype.unwrap_start_stop( - space, lenself, w_start, w_end, upper_bound=upper_bound) + space, lenself, w_start, w_end, upper_bound=upper_bound) return (value, start, end) def descr_len(self, space): @@ -36,12 +40,14 @@ space.isinstance_w(w_sub, space.w_int)): char = space.int_w(w_sub) return _descr_contains_bytearray(self.data, space, char) - return space.newbool(self._val(space).find(self._op_val(space, w_sub)) >= 0) + value = self._val(space) + other = self._op_val(space, w_sub) + return space.newbool(value.find(other) >= 0) def descr_add(self, space, w_other): try: other = self._op_val(space, w_other) - except OperationError, e: + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise @@ -50,7 +56,7 @@ def descr_mul(self, space, w_times): try: times = space.getindex_w(w_times, space.w_OverflowError) - except OperationError, e: + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise @@ -82,12 +88,11 @@ if index < 0: index += selflen if index < 0 or index >= selflen: - raise OperationError(space.w_IndexError, - space.wrap("string index out of range")) + raise operationerrfmt(space.w_IndexError, + "string index out of range") from pypy.objspace.std.bytearrayobject import W_BytearrayObject if isinstance(self, W_BytearrayObject): return space.wrap(ord(selfvalue[index])) - #return wrapchar(space, selfvalue[index]) return self._new(selfvalue[index]) def descr_getslice(self, space, w_start, w_stop): @@ -115,35 +120,39 @@ value = self._val(space) fillchar = self._op_val(space, w_fillchar) if len(fillchar) != 1: - raise OperationError(space.w_TypeError, - space.wrap("center() argument 2 must be a single character")) + raise operationerrfmt(space.w_TypeError, + "center() argument 2 must be a single " + "character") d = width - len(value) - if d>0: + if d > 0: offset = d//2 + (d & width & 1) fillchar = fillchar[0] # annotator hint: it's a single character - u_centered = offset * fillchar + value + (d - offset) * fillchar + centered = offset * fillchar + value + (d - offset) * fillchar else: - u_centered = value + centered = value - return self._new(u_centered) + return self._new(centered) def descr_count(self, space, w_sub, w_start=None, w_end=None): value, start, end = self._convert_idx_params(space, w_start, w_end) - return space.newint(value.count(self._op_val(space, w_sub), start, end)) + return space.newint(value.count(self._op_val(space, w_sub), start, + end)) def descr_decode(self, space, w_encoding=None, w_errors=None): - from pypy.objspace.std.unicodeobject import _get_encoding_and_errors, \ - unicode_from_string, decode_object - encoding, errors = _get_encoding_and_errors(space, w_encoding, w_errors) + from pypy.objspace.std.unicodeobject import ( + _get_encoding_and_errors, decode_object, unicode_from_string) + encoding, errors = _get_encoding_and_errors(space, w_encoding, + w_errors) if encoding is None and errors is None: return unicode_from_string(space, self) return decode_object(space, self, encoding, errors) def descr_encode(self, space, w_encoding=None, w_errors=None): - from pypy.objspace.std.unicodeobject import _get_encoding_and_errors, \ - encode_object - encoding, errors = _get_encoding_and_errors(space, w_encoding, w_errors) + from pypy.objspace.std.unicodeobject import ( + _get_encoding_and_errors, encode_object) + encoding, errors = _get_encoding_and_errors(space, w_encoding, + w_errors) return encode_object(space, self, encoding, errors) @unwrap_spec(tabsize=int) @@ -156,18 +165,19 @@ try: ovfcheck(len(splitted) * tabsize) except OverflowError: - raise OperationError(space.w_OverflowError, - space.wrap("new string is too long")) + raise operationerrfmt(space.w_OverflowError, + "new string is too long") expanded = oldtoken = splitted.pop(0) for token in splitted: - expanded += self._chr(' ') * self._tabindent(oldtoken, tabsize) + token + expanded += self._chr(' ') * self._tabindent(oldtoken, + tabsize) + token oldtoken = token return self._new(expanded) def _tabindent(self, token, tabsize): - "calculates distance behind the token to the next tabstop" + """calculates distance behind the token to the next tabstop""" distance = tabsize if token: @@ -203,8 +213,8 @@ (value, start, end) = self._convert_idx_params(space, w_start, w_end) res = value.find(self._op_val(space, w_sub), start, end) if res < 0: - raise OperationError(space.w_ValueError, - space.wrap("substring not found in string.index")) + raise operationerrfmt(space.w_ValueError, + "substring not found in string.index") return space.wrap(res) @@ -212,8 +222,8 @@ (value, start, end) = self._convert_idx_params(space, w_start, w_end) res = value.rfind(self._op_val(space, w_sub), start, end) if res < 0: - raise OperationError(space.w_ValueError, - space.wrap("substring not found in string.rindex")) + raise operationerrfmt(space.w_ValueError, + "substring not found in string.rindex") return space.wrap(res) @@ -349,8 +359,7 @@ if check_item == 1: raise operationerrfmt( space.w_TypeError, - "sequence item %d: expected string, %s " - "found", i, space.type(w_s).getname(space)) + "sequence item %d: expected string, %T found", i, w_s) elif check_item == 2: return self._join_autoconvert(space, list_w) prealloc_size += len(self._op_val(space, w_s)) @@ -370,9 +379,9 @@ value = self._val(space) fillchar = self._op_val(space, w_fillchar) if len(fillchar) != 1: - raise OperationError(space.w_TypeError, - space.wrap("ljust() argument 2 must be a single character")) - + raise operationerrfmt(space.w_TypeError, + "ljust() argument 2 must be a single " + "character") d = width - len(value) if d > 0: fillchar = fillchar[0] # annotator hint: it's a single character @@ -385,9 +394,9 @@ value = self._val(space) fillchar = self._op_val(space, w_fillchar) if len(fillchar) != 1: - raise OperationError(space.w_TypeError, - space.wrap("rjust() argument 2 must be a single character")) - + raise operationerrfmt(space.w_TypeError, + "rjust() argument 2 must be a single " + "character") d = width - len(value) if d > 0: fillchar = fillchar[0] # annotator hint: it's a single character @@ -406,8 +415,7 @@ value = self._val(space) sub = self._op_val(space, w_sub) if not sub: - raise OperationError(space.w_ValueError, - space.wrap("empty separator")) + raise operationerrfmt(space.w_ValueError, "empty separator") pos = value.find(sub) if pos == -1: from pypy.objspace.std.bytearrayobject import W_BytearrayObject @@ -426,8 +434,7 @@ value = self._val(space) sub = self._op_val(space, w_sub) if not sub: - raise OperationError(space.w_ValueError, - space.wrap("empty separator")) + raise operationerrfmt(space.w_ValueError, "empty separator") pos = value.rfind(sub) if pos == -1: from pypy.objspace.std.bytearrayobject import W_BytearrayObject @@ -450,8 +457,8 @@ try: res = replace(input, sub, by, count) except OverflowError: - raise OperationError(space.w_OverflowError, - space.wrap("replace string is too long")) + raise operationerrfmt(space.w_OverflowError, + "replace string is too long") return self._new(res) @unwrap_spec(maxsplit=int) @@ -466,7 +473,7 @@ by = self._op_val(space, w_sep) bylen = len(by) if bylen == 0: - raise OperationError(space.w_ValueError, space.wrap("empty separator")) + raise operationerrfmt(space.w_ValueError, "empty separator") res = split(value, by, maxsplit) return self._newlist_unwrapped(space, res) @@ -481,7 +488,7 @@ by = self._op_val(space, w_sep) bylen = len(by) if bylen == 0: - raise OperationError(space.w_ValueError, space.wrap("empty separator")) + raise operationerrfmt(space.w_ValueError, "empty separator") res = rsplit(value, by, maxsplit) return self._newlist_unwrapped(space, res) @@ -515,21 +522,22 @@ if self._startswith(space, value, w_prefix, start, end): return space.w_True return space.w_False - return space.newbool(self._startswith(space, value, w_prefix, start, end)) + return space.newbool(self._startswith(space, value, w_prefix, start, + end)) def _startswith(self, space, value, w_prefix, start, end): return startswith(value, self._op_val(space, w_prefix), start, end) def descr_endswith(self, space, w_suffix, w_start=None, w_end=None): - (value, start, end) = self._convert_idx_params(space, w_start, - w_end, True) - + (value, start, end) = self._convert_idx_params(space, w_start, w_end, + True) if space.isinstance_w(w_suffix, space.w_tuple): for w_suffix in space.fixedview(w_suffix): if self._endswith(space, value, w_suffix, start, end): return space.w_True return space.w_False - return space.newbool(self._endswith(space, value, w_suffix, start, end)) + return space.newbool(self._endswith(space, value, w_suffix, start, + end)) def _endswith(self, space, value, w_prefix, start, end): return endswith(value, self._op_val(space, w_prefix), start, end) @@ -537,18 +545,17 @@ def _strip(self, space, w_chars, left, right): "internal function called by str_xstrip methods" value = self._val(space) - u_chars = self._op_val(space, w_chars) + chars = self._op_val(space, w_chars) lpos = 0 rpos = len(value) if left: - #print "while %d < %d and -%s- in -%s-:"%(lpos, rpos, value[lpos],w_chars) - while lpos < rpos and value[lpos] in u_chars: + while lpos < rpos and value[lpos] in chars: lpos += 1 if right: - while rpos > lpos and value[rpos - 1] in u_chars: + while rpos > lpos and value[rpos - 1] in chars: rpos -= 1 assert rpos >= lpos # annotator hint, don't remove @@ -562,13 +569,12 @@ rpos = len(value) if left: - #print "while %d < %d and -%s- in -%s-:"%(lpos, rpos, value[lpos],w_chars) while lpos < rpos and self._isspace(value[lpos]): - lpos += 1 + lpos += 1 if right: while rpos > lpos and self._isspace(value[rpos - 1]): - rpos -= 1 + rpos -= 1 assert rpos >= lpos # annotator hint, don't remove return self._sliced(space, value, lpos, rpos, self) @@ -629,9 +635,9 @@ else: table = self._op_val(space, w_table) if len(table) != 256: - raise OperationError( + raise operationerrfmt( space.w_ValueError, - space.wrap("translation table must be 256 characters long")) + "translation table must be 256 characters long") string = self._val(space) deletechars = self._op_val(space, w_deletechars) @@ -685,13 +691,14 @@ def _descr_contains_bytearray(data, space, char): if not 0 <= char < 256: - raise OperationError(space.w_ValueError, - space.wrap("byte must be in range(0, 256)")) + raise operationerrfmt(space.w_ValueError, + "byte must be in range(0, 256)") for c in data: if ord(c) == char: return space.w_True return space.w_False + @specialize.argtype(0) def _descr_getslice_slowpath(selfvalue, start, step, sl): return [selfvalue[start + i*step] for i in range(sl)] diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -1,19 +1,22 @@ """The builtin unicode implementation""" +from rpython.rlib.objectmodel import ( + compute_hash, compute_unique_id, import_from_mixin) +from rpython.rlib.rstring import UnicodeBuilder +from rpython.rlib.runicode import ( + make_unicode_escape_function, str_decode_ascii, str_decode_utf_8, + unicode_encode_ascii, unicode_encode_utf_8) + from pypy.interpreter import unicodehelper from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, operationerrfmt -from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault +from pypy.interpreter.gateway import WrappedDefault, interp2app, unwrap_spec from pypy.module.unicodedata import unicodedb from pypy.objspace.std import newformat from pypy.objspace.std.basestringtype import basestring_typedef from pypy.objspace.std.formatting import mod_format from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.stringmethods import StringMethods -from rpython.rlib.objectmodel import compute_hash, compute_unique_id, import_from_mixin -from rpython.rlib.rstring import UnicodeBuilder -from rpython.rlib.runicode import (str_decode_utf_8, str_decode_ascii, - unicode_encode_utf_8, unicode_encode_ascii, make_unicode_escape_function) __all__ = ['W_UnicodeObject', 'wrapunicode', 'plain_str2unicode', 'encode_object', 'decode_object', 'unicode_from_object', @@ -29,7 +32,7 @@ w_self._value = unistr def __repr__(w_self): - """ representation for debugging purposes """ + """representation for debugging purposes""" return "%s(%r)" % (w_self.__class__.__name__, w_self._value) def unwrap(w_self, space): @@ -90,7 +93,8 @@ return w_other._value if space.isinstance_w(w_other, space.w_str): return unicode_from_string(space, w_other)._value - return unicode_from_encoded_object(space, w_other, None, "strict")._value + return unicode_from_encoded_object( + space, w_other, None, "strict")._value def _chr(self, char): assert len(char) == 1 @@ -144,14 +148,15 @@ return space.newlist_unicode(lst) @staticmethod - @unwrap_spec(w_string = WrappedDefault("")) + @unwrap_spec(w_string=WrappedDefault("")) def descr_new(space, w_unicodetype, w_string, w_encoding=None, w_errors=None): # NB. the default value of w_obj is really a *wrapped* empty string: # there is gateway magic at work w_obj = w_string - encoding, errors = _get_encoding_and_errors(space, w_encoding, w_errors) + encoding, errors = _get_encoding_and_errors(space, w_encoding, + w_errors) # convoluted logic for the case when unicode subclass has a __unicode__ # method, we need to call this method is_precisely_unicode = space.is_w(space.type(w_obj), space.w_unicode) @@ -159,8 +164,8 @@ (space.isinstance_w(w_obj, space.w_unicode) and space.findattr(w_obj, space.wrap('__unicode__')) is None)): if encoding is not None or errors is not None: - raise OperationError(space.w_TypeError, space.wrap( - 'decoding Unicode is not supported')) + raise operationerrfmt(space.w_TypeError, + "decoding Unicode is not supported") if (is_precisely_unicode and space.is_w(w_unicodetype, space.w_unicode)): return w_obj @@ -194,8 +199,8 @@ def descr_eq(self, space, w_other): try: - return space.newbool(self._val(space) == self._op_val(space, w_other)) - except OperationError, e: + res = self._val(space) == self._op_val(space, w_other) + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented if (e.match(space, space.w_UnicodeDecodeError) or @@ -206,11 +211,12 @@ space.warn(space.wrap(msg), space.w_UnicodeWarning) return space.w_False raise + return space.newbool(res) def descr_ne(self, space, w_other): try: - return space.newbool(self._val(space) != self._op_val(space, w_other)) - except OperationError, e: + res = self._val(space) != self._op_val(space, w_other) + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented if (e.match(space, space.w_UnicodeDecodeError) or @@ -221,38 +227,43 @@ space.warn(space.wrap(msg), space.w_UnicodeWarning) return space.w_True raise + return space.newbool(res) def descr_lt(self, space, w_other): try: - return space.newbool(self._val(space) < self._op_val(space, w_other)) - except OperationError, e: + res = self._val(space) < self._op_val(space, w_other) + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise + return space.newbool(res) def descr_le(self, space, w_other): try: - return space.newbool(self._val(space) <= self._op_val(space, w_other)) - except OperationError, e: + res = self._val(space) <= self._op_val(space, w_other) + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise + return space.newbool(res) def descr_gt(self, space, w_other): try: - return space.newbool(self._val(space) > self._op_val(space, w_other)) - except OperationError, e: + res = self._val(space) > self._op_val(space, w_other) + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise + return space.newbool(res) def descr_ge(self, space, w_other): try: - return space.newbool(self._val(space) >= self._op_val(space, w_other)) - except OperationError, e: + res = self._val(space) >= self._op_val(space, w_other) + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise + return space.newbool(res) def descr_format(self, space, __args__): return newformat.format_method(space, self, __args__, is_unicode=True) @@ -272,12 +283,13 @@ def descr_translate(self, space, w_table): selfvalue = self._value w_sys = space.getbuiltinmodule('sys') - maxunicode = space.int_w(space.getattr(w_sys, space.wrap("maxunicode"))) + maxunicode = space.int_w(space.getattr(w_sys, + space.wrap("maxunicode"))) result = [] for unichar in selfvalue: try: w_newval = space.getitem(w_table, space.wrap(ord(unichar))) - except OperationError, e: + except OperationError as e: if e.match(space, space.w_LookupError): result.append(unichar) else: @@ -288,20 +300,21 @@ elif space.isinstance_w(w_newval, space.w_int): newval = space.int_w(w_newval) if newval < 0 or newval > maxunicode: - raise OperationError( - space.w_TypeError, - space.wrap("character mapping must be in range(0x%x)" % (maxunicode + 1,))) + msg = ("character mapping must be in range(0x%x)" % + (maxunicode + 1,)) + raise operationerrfmt(space.w_TypeError, msg) result.append(unichr(newval)) elif space.isinstance_w(w_newval, space.w_unicode): result.append(space.unicode_w(w_newval)) else: - raise OperationError( - space.w_TypeError, - space.wrap("character mapping must return integer, None or unicode")) + raise operationerrfmt(space.w_TypeError, + "character mapping must return " + "integer, None or unicode") return W_UnicodeObject(u''.join(result)) def descr_encode(self, space, w_encoding=None, w_errors=None): - encoding, errors = _get_encoding_and_errors(space, w_encoding, w_errors) + encoding, errors = _get_encoding_and_errors(space, w_encoding, + w_errors) return encode_object(space, self, encoding, errors) def _join_return_one(self, space, w_obj): @@ -353,6 +366,7 @@ def wrapunicode(space, uni): return W_UnicodeObject(uni) + def plain_str2unicode(space, s): try: return unicode(s) @@ -378,17 +392,13 @@ def getdefaultencoding(space): return space.sys.defaultencoding + def _get_encoding_and_errors(space, w_encoding, w_errors): - if space.is_none(w_encoding): - encoding = None - else: - encoding = space.str_w(w_encoding) - if space.is_none(w_errors): - errors = None - else: - errors = space.str_w(w_errors) + encoding = None if space.is_none(w_encoding) else space.str_w(w_encoding) + errors = None if space.is_none(w_errors) else space.str_w(w_errors) return encoding, errors + def encode_object(space, w_object, encoding, errors): if encoding is None: # Get the encoder functions as a wrapped object. @@ -416,11 +426,12 @@ w_restuple = space.call_function(w_encoder, w_object, w_errors) w_retval = space.getitem(w_restuple, space.wrap(0)) if not space.isinstance_w(w_retval, space.w_str): - raise operationerrfmt(space.w_TypeError, - "encoder did not return an string object (type '%s')", - space.type(w_retval).getname(space)) + raise operationerrfmt( + space.w_TypeError, + "encoder did not return an string object (type '%T')", w_retval) return w_retval + def decode_object(space, w_obj, encoding, errors): if encoding is None: encoding = getdefaultencoding(space) @@ -451,17 +462,18 @@ # explicitly block bytearray on 2.7 from .bytearrayobject import W_BytearrayObject if isinstance(w_obj, W_BytearrayObject): - raise OperationError(space.w_TypeError, - space.wrap("decoding bytearray is not supported")) + raise operationerrfmt(space.w_TypeError, + "decoding bytearray is not supported") w_retval = decode_object(space, w_obj, encoding, errors) if not space.isinstance_w(w_retval, space.w_unicode): raise operationerrfmt(space.w_TypeError, - "decoder did not return an unicode object (type '%s')", - space.type(w_retval).getname(space)) + "decoder did not return an unicode object (type '%T')", + w_retval) assert isinstance(w_retval, W_UnicodeObject) return w_retval + def unicode_from_object(space, w_obj): if space.is_w(space.type(w_obj), space.w_unicode): return w_obj @@ -483,6 +495,7 @@ return w_res return unicode_from_encoded_object(space, w_res, None, "strict") + def unicode_from_string(space, w_str): # this is a performance and bootstrapping hack encoding = getdefaultencoding(space) @@ -651,8 +664,8 @@ def format(): """S.format(*args, **kwargs) -> unicode - Return a formatted version of S, using substitutions from args and kwargs. - The substitutions are identified by braces ('{' and '}'). + Return a formatted version of S, using substitutions from args and + kwargs. The substitutions are identified by braces ('{' and '}'). """ def index(): @@ -789,16 +802,16 @@ def rjust(): """S.rjust(width[, fillchar]) -> unicode - Return S right-justified in a Unicode string of length width. Padding is - done using the specified fill character (default is a space). + Return S right-justified in a Unicode string of length width. Padding + is done using the specified fill character (default is a space). """ def rpartition(): """S.rpartition(sep) -> (head, sep, tail) Search for the separator sep in S, starting at the end of S, and return - the part before it, the separator itself, and the part after it. If the - separator is not found, return two empty strings and S. + the part before it, the separator itself, and the part after it. If + the separator is not found, return two empty strings and S. """ def rsplit(): @@ -1036,6 +1049,7 @@ W_UnicodeObject.EMPTY = W_UnicodeObject(u'') + # Helper for converting int/long def unicode_to_decimal_w(space, w_unistr): if not isinstance(w_unistr, W_UnicodeObject): @@ -1043,8 +1057,8 @@ w_unistr) unistr = w_unistr._value result = ['\0'] * len(unistr) - digits = [ '0', '1', '2', '3', '4', - '5', '6', '7', '8', '9'] + digits = ['0', '1', '2', '3', '4', + '5', '6', '7', '8', '9'] for i in xrange(len(unistr)): uchr = ord(unistr[i]) if unicodedb.isspace(uchr): @@ -1060,7 +1074,10 @@ w_start = space.wrap(i) w_end = space.wrap(i+1) w_reason = space.wrap('invalid decimal Unicode string') - raise OperationError(space.w_UnicodeEncodeError, space.newtuple([w_encoding, w_unistr, w_start, w_end, w_reason])) + raise OperationError(space.w_UnicodeEncodeError, + space.newtuple([w_encoding, w_unistr, + w_start, w_end, + w_reason])) return ''.join(result) From noreply at buildbot.pypy.org Sat Jan 25 01:56:00 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 25 Jan 2014 01:56:00 +0100 (CET) Subject: [pypy-commit] pypy default: push these specifics out into the individual classes Message-ID: <20140125005600.401AD1C00F8@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r68925:be9bfeaafbe7 Date: 2014-01-24 16:18 -0800 http://bitbucket.org/pypy/pypy/changeset/be9bfeaafbe7/ Log: push these specifics out into the individual classes diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -376,14 +376,36 @@ raise operationerrfmt(space.w_ValueError, "value not found in bytearray") + _StringMethods_descr_contains = descr_contains + def descr_contains(self, space, w_sub): + if space.isinstance_w(w_sub, space.w_int): + char = space.int_w(w_sub) + return _descr_contains_bytearray(self.data, space, char) + return self._StringMethods_descr_contains(space, w_sub) + def descr_reverse(self, space): self.data.reverse() +# ____________________________________________________________ +# helpers for slow paths, moved out because they contain loops + def _make_data(s): return [s[i] for i in range(len(s))] +def _descr_contains_bytearray(data, space, char): + if not 0 <= char < 256: + raise operationerrfmt(space.w_ValueError, + "byte must be in range(0, 256)") + for c in data: + if ord(c) == char: + return space.w_True + return space.w_False + +# ____________________________________________________________ + + def getbytevalue(space, w_value): if space.isinstance_w(w_value, space.w_str): string = space.str_w(w_value) diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -703,11 +703,14 @@ return self_as_uni._new(res) return self._StringMethods_descr_replace(space, w_old, w_new, count) - def descr_lower(self, space): - return W_BytesObject(self._value.lower()) - - def descr_upper(self, space): - return W_BytesObject(self._value.upper()) + _StringMethods_descr_join = descr_join + def descr_join(self, space, w_list): + l = space.listview_bytes(w_list) + if l is not None: + if len(l) == 1: + return space.wrap(l[0]) + return space.wrap(self._val(space).join(l)) + return self._StringMethods_descr_join(space, w_list) def _join_return_one(self, space, w_obj): return (space.is_w(space.type(w_obj), space.w_str) or @@ -727,6 +730,12 @@ w_u = space.call_function(space.w_unicode, self) return space.call_method(w_u, "join", w_list) + def descr_lower(self, space): + return W_BytesObject(self._value.lower()) + + def descr_upper(self, space): + return W_BytesObject(self._value.upper()) + def descr_formatter_parser(self, space): from pypy.objspace.std.newformat import str_template_formatter tformat = str_template_formatter(space, space.str_w(self)) diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -35,11 +35,6 @@ # pass def descr_contains(self, space, w_sub): - from pypy.objspace.std.bytearrayobject import W_BytearrayObject - if (isinstance(self, W_BytearrayObject) and - space.isinstance_w(w_sub, space.w_int)): - char = space.int_w(w_sub) - return _descr_contains_bytearray(self.data, space, char) value = self._val(space) other = self._op_val(space, w_sub) return space.newbool(value.find(other) >= 0) @@ -317,22 +312,6 @@ return space.newbool(cased) def descr_join(self, space, w_list): - from pypy.objspace.std.bytesobject import W_BytesObject - from pypy.objspace.std.unicodeobject import W_UnicodeObject - - if isinstance(self, W_BytesObject): - l = space.listview_bytes(w_list) - if l is not None: - if len(l) == 1: - return space.wrap(l[0]) - return space.wrap(self._val(space).join(l)) - elif isinstance(self, W_UnicodeObject): - l = space.listview_unicode(w_list) - if l is not None: - if len(l) == 1: - return space.wrap(l[0]) - return space.wrap(self._val(space).join(l)) - list_w = space.listview(w_list) size = len(list_w) @@ -689,16 +668,6 @@ # ____________________________________________________________ # helpers for slow paths, moved out because they contain loops -def _descr_contains_bytearray(data, space, char): - if not 0 <= char < 256: - raise operationerrfmt(space.w_ValueError, - "byte must be in range(0, 256)") - for c in data: - if ord(c) == char: - return space.w_True - return space.w_False - - @specialize.argtype(0) def _descr_getslice_slowpath(selfvalue, start, step, sl): return [selfvalue[start + i*step] for i in range(sl)] diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -317,6 +317,15 @@ w_errors) return encode_object(space, self, encoding, errors) + _StringMethods_descr_join = descr_join + def descr_join(self, space, w_list): + l = space.listview_unicode(w_list) + if l is not None: + if len(l) == 1: + return space.wrap(l[0]) + return space.wrap(self._val(space).join(l)) + return self._StringMethods_descr_join(space, w_list) + def _join_return_one(self, space, w_obj): return space.is_w(space.type(w_obj), space.w_unicode) From noreply at buildbot.pypy.org Sat Jan 25 01:56:01 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 25 Jan 2014 01:56:01 +0100 (CET) Subject: [pypy-commit] pypy default: merge upstream Message-ID: <20140125005601.8C75A1C00F8@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r68926:92a94109c1f3 Date: 2014-01-24 16:18 -0800 http://bitbucket.org/pypy/pypy/changeset/92a94109c1f3/ Log: merge upstream diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -394,6 +394,9 @@ class W_Float64Box(W_FloatingBox, PrimitiveBox): descr__new__, _get_dtype, descr_reduce = new_dtype_getter("float64") + def descr_as_integer_ratio(self, space): + return space.call_method(self.item(space), 'as_integer_ratio') + class W_ComplexFloatingBox(W_InexactBox): def descr_get_real(self, space): dtype = self._COMPONENTS_BOX._get_dtype(space) @@ -719,6 +722,7 @@ __module__ = "numpy", __new__ = interp2app(W_Float64Box.descr__new__.im_func), __reduce__ = interp2app(W_Float64Box.descr_reduce), + as_integer_ratio = interp2app(W_Float64Box.descr_as_integer_ratio), ) W_ComplexFloatingBox.typedef = TypeDef("complexfloating", W_InexactBox.typedef, diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -181,6 +181,11 @@ s = np.dtype([('a', 'int64'), ('b', 'int64')]).type('a' * 16) assert s.view('S16') == 'a' * 16 + def test_as_integer_ratio(self): + import numpy as np + raises(AttributeError, 'np.float32(1.5).as_integer_ratio()') + assert np.float64(1.5).as_integer_ratio() == (3, 2) + def test_complex_scalar_complex_cast(self): import numpy as np for tp in [np.csingle, np.cdouble, np.clongdouble]: diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -594,9 +594,11 @@ if tobox is not None: # sanity check: see whether the current struct value # corresponds to what the cache thinks the value is - resbox = executor.execute(self.metainterp.cpu, self.metainterp, - rop.GETFIELD_GC, fielddescr, box) - assert resbox.constbox().same_constant(tobox.constbox()) + # XXX pypy with the following check fails on micronumpy, + # XXX investigate + #resbox = executor.execute(self.metainterp.cpu, self.metainterp, + # rop.GETFIELD_GC, fielddescr, box) + #assert resbox.constbox().same_constant(tobox.constbox()) return tobox resbox = self.execute_with_descr(opnum, fielddescr, box) self.metainterp.heapcache.getfield_now_known(box, fielddescr, resbox) From noreply at buildbot.pypy.org Sat Jan 25 01:56:02 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 25 Jan 2014 01:56:02 +0100 (CET) Subject: [pypy-commit] pypy py3k: bump for the new co_flag Message-ID: <20140125005602.BF2EA1C00F8@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68927:633456049ba7 Date: 2014-01-24 14:12 -0800 http://bitbucket.org/pypy/pypy/changeset/633456049ba7/ Log: bump for the new co_flag diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -35,7 +35,7 @@ # different value for the highest 16 bits. Bump pypy_incremental_magic every # time you make pyc files incompatible -pypy_incremental_magic = 16 # bump it by 16 +pypy_incremental_magic = 32 # bump it by 16 assert pypy_incremental_magic % 16 == 0 assert pypy_incremental_magic < 3000 # the magic number of Python 3. There are # no known magic numbers below this value From noreply at buildbot.pypy.org Sat Jan 25 01:56:04 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 25 Jan 2014 01:56:04 +0100 (CET) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140125005604.1B9431C00F8@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68928:a27d22674995 Date: 2014-01-24 16:52 -0800 http://bitbucket.org/pypy/pypy/changeset/a27d22674995/ Log: merge default diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -389,6 +389,9 @@ class W_Float64Box(W_FloatingBox, PrimitiveBox): descr__new__, _get_dtype, descr_reduce = new_dtype_getter("float64") + def descr_as_integer_ratio(self, space): + return space.call_method(self.item(space), 'as_integer_ratio') + class W_ComplexFloatingBox(W_InexactBox): def descr_get_real(self, space): dtype = self._COMPONENTS_BOX._get_dtype(space) @@ -715,6 +718,7 @@ __module__ = "numpy", __new__ = interp2app(W_Float64Box.descr__new__.im_func), __reduce__ = interp2app(W_Float64Box.descr_reduce), + as_integer_ratio = interp2app(W_Float64Box.descr_as_integer_ratio), ) W_ComplexFloatingBox.typedef = TypeDef("complexfloating", W_InexactBox.typedef, diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -181,6 +181,11 @@ s = np.dtype([('a', 'int64'), ('b', 'int64')]).type('a' * 16) assert s.view('S16') == 'a' * 16 + def test_as_integer_ratio(self): + import numpy as np + raises(AttributeError, 'np.float32(1.5).as_integer_ratio()') + assert np.float64(1.5).as_integer_ratio() == (3, 2) + def test_complex_scalar_complex_cast(self): import numpy as np for tp in [np.csingle, np.cdouble, np.clongdouble]: diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -1,21 +1,22 @@ """The builtin bytearray implementation""" +from rpython.rlib.objectmodel import ( + import_from_mixin, newlist_hint, resizelist_hint) +from rpython.rlib.rstring import StringBuilder + from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.buffer import RWBuffer from pypy.interpreter.error import OperationError, operationerrfmt from pypy.objspace.std.bytesobject import ( getbytevalue, makebytesdata_w, newbytesdata_w) -from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault +from pypy.interpreter.gateway import WrappedDefault, interp2app, unwrap_spec from pypy.objspace.std.sliceobject import W_SliceObject from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.stringmethods import StringMethods from pypy.objspace.std.util import get_positive_index -from rpython.rlib.objectmodel import import_from_mixin -from rpython.rlib.rstring import StringBuilder +NON_HEX_MSG = "non-hexadecimal number found in fromhex() arg at position %d" -def _make_data(s): - return [s[i] for i in range(len(s))] class W_BytearrayObject(W_Root): import_from_mixin(StringMethods) @@ -24,7 +25,7 @@ w_self.data = data def __repr__(w_self): - """ representation for debugging purposes """ + """representation for debugging purposes""" return "%s(%s)" % (w_self.__class__.__name__, ''.join(w_self.data)) def _new(self, value): @@ -126,11 +127,6 @@ @staticmethod def descr_fromhex(space, w_bytearraytype, w_hexstring): - "bytearray.fromhex(string) -> bytearray\n" - "\n" - "Create a bytearray object from a string of hexadecimal numbers.\n" - "Spaces between two numbers are accepted.\n" - "Example: bytearray.fromhex('B9 01EF') -> bytearray(b'\\xb9\\x01\\xef')." if not space.is_w(space.type(w_hexstring), space.w_unicode): raise operationerrfmt(space.w_TypeError, "must be str, not %T", w_hexstring) @@ -168,8 +164,8 @@ elif not '\x20' <= c < '\x7f': n = ord(c) buf.append('\\x') - buf.append("0123456789abcdef"[n>>4]) - buf.append("0123456789abcdef"[n&0xF]) + buf.append("0123456789abcdef"[n >> 4]) + buf.append("0123456789abcdef"[n & 0xF]) else: buf.append(c) @@ -185,51 +181,60 @@ def descr_eq(self, space, w_other): try: - return space.newbool(self._val(space) == self._op_val(space, w_other)) - except OperationError, e: + res = self._val(space) == self._op_val(space, w_other) + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise + return space.newbool(res) def descr_ne(self, space, w_other): try: - return space.newbool(self._val(space) != self._op_val(space, w_other)) - except OperationError, e: + res = self._val(space) != self._op_val(space, w_other) + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise + return space.newbool(res) def descr_lt(self, space, w_other): try: - return space.newbool(self._val(space) < self._op_val(space, w_other)) - except OperationError, e: + res = self._val(space) < self._op_val(space, w_other) + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise + return space.newbool(res) def descr_le(self, space, w_other): try: - return space.newbool(self._val(space) <= self._op_val(space, w_other)) - except OperationError, e: + res = self._val(space) <= self._op_val(space, w_other) + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise + return space.newbool(res) def descr_gt(self, space, w_other): try: - return space.newbool(self._val(space) > self._op_val(space, w_other)) - except OperationError, e: + res = self._val(space) > self._op_val(space, w_other) + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise + return space.newbool(res) def descr_ge(self, space, w_other): try: - return space.newbool(self._val(space) >= self._op_val(space, w_other)) - except OperationError, e: + res = self._val(space) >= self._op_val(space, w_other) + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise + return space.newbool(res) + + def descr_iter(self, space): + return space.newseqiter(self) def descr_buffer(self, space): return BytearrayBuffer(self.data) @@ -244,7 +249,7 @@ def descr_inplace_mul(self, space, w_times): try: times = space.getindex_w(w_times, space.w_OverflowError) - except OperationError, e: + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise @@ -259,12 +264,13 @@ _setitem_slice_helper(space, self.data, start, step, slicelength, sequence2, empty_elem='\x00') else: - idx = space.getindex_w(w_index, space.w_IndexError, "bytearray index") + idx = space.getindex_w(w_index, space.w_IndexError, + "bytearray index") try: self.data[idx] = getbytevalue(space, w_other) except IndexError: - raise OperationError(space.w_IndexError, - space.wrap("bytearray index out of range")) + raise operationerrfmt(space.w_IndexError, + "bytearray index out of range") def descr_delitem(self, space, w_idx): if isinstance(w_idx, W_SliceObject): @@ -272,12 +278,13 @@ len(self.data)) _delitem_slice_helper(space, self.data, start, step, slicelength) else: - idx = space.getindex_w(w_idx, space.w_IndexError, "bytearray index") + idx = space.getindex_w(w_idx, space.w_IndexError, + "bytearray index") try: del self.data[idx] except IndexError: - raise OperationError(space.w_IndexError, - space.wrap("bytearray deletion index out of range")) + raise operationerrfmt(space.w_IndexError, + "bytearray deletion index out of range") def descr_append(self, space, w_item): self.data.append(getbytevalue(space, w_item)) @@ -304,10 +311,9 @@ result = self.data.pop(index) except IndexError: if not self.data: - raise OperationError(space.w_IndexError, space.wrap( - "pop from empty bytearray")) - raise OperationError(space.w_IndexError, space.wrap( - "pop index out of range")) + raise operationerrfmt(space.w_IndexError, + "pop from empty bytearray") + raise operationerrfmt(space.w_IndexError, "pop index out of range") return space.wrap(ord(result)) def descr_remove(self, space, w_char): @@ -315,12 +321,39 @@ try: self.data.remove(chr(char)) except ValueError: - raise OperationError(space.w_ValueError, space.wrap( - "value not found in bytearray")) + raise operationerrfmt(space.w_ValueError, + "value not found in bytearray") + + _StringMethods_descr_contains = descr_contains + def descr_contains(self, space, w_sub): + if space.isinstance_w(w_sub, space.w_int): + char = space.int_w(w_sub) + return _descr_contains_bytearray(self.data, space, char) + return self._StringMethods_descr_contains(space, w_sub) def descr_reverse(self, space): self.data.reverse() + +# ____________________________________________________________ +# helpers for slow paths, moved out because they contain loops + +def _make_data(s): + return [s[i] for i in range(len(s))] + + +def _descr_contains_bytearray(data, space, char): + if not 0 <= char < 256: + raise operationerrfmt(space.w_ValueError, + "byte must be in range(0, 256)") + for c in data: + if ord(c) == char: + return space.w_True + return space.w_False + +# ____________________________________________________________ + + def new_bytearray(space, w_bytearraytype, data): w_obj = space.allocate_instance(W_BytearrayObject, w_bytearraytype) W_BytearrayObject.__init__(w_obj, data) @@ -490,12 +523,12 @@ def decode(): """B.decode(encoding=None, errors='strict') -> unicode - Decode B using the codec registered for encoding. encoding defaults - to the default encoding. errors may be given to set a different error - handling scheme. Default is 'strict' meaning that encoding errors raise - a UnicodeDecodeError. Other possible values are 'ignore' and 'replace' - as well as any other name registered with codecs.register_error that is - able to handle UnicodeDecodeErrors. + Decode B using the codec registered for encoding. encoding defaults to + the default encoding. errors may be given to set a different error + handling scheme. Default is 'strict' meaning that encoding errors + raise a UnicodeDecodeError. Other possible values are 'ignore' and + 'replace' as well as any other name registered with + codecs.register_error that is able to handle UnicodeDecodeErrors. """ def endswith(): @@ -532,7 +565,7 @@ """ def fromhex(): - """bytearray.fromhex(string) -> bytearray (static method) + r"""bytearray.fromhex(string) -> bytearray (static method) Create a bytearray object from a string of hexadecimal numbers. Spaces between two numbers are accepted. @@ -816,6 +849,8 @@ __ge__ = interp2app(W_BytearrayObject.descr_ge, doc=BytearrayDocstrings.__ge__.__doc__), + __iter__ = interp2app(W_BytearrayObject.descr_iter, + doc=BytearrayDocstrings.__iter__.__doc__), __len__ = interp2app(W_BytearrayObject.descr_len, doc=BytearrayDocstrings.__len__.__doc__), __contains__ = interp2app(W_BytearrayObject.descr_contains, @@ -953,9 +988,10 @@ _space_chars = ''.join([chr(c) for c in [9, 10, 11, 12, 13, 32]]) -#XXX share the code again with the stuff in listobject.py + +# XXX share the code again with the stuff in listobject.py def _delitem_slice_helper(space, items, start, step, slicelength): - if slicelength==0: + if slicelength == 0: return if step < 0: @@ -985,6 +1021,7 @@ assert start >= 0 # annotator hint del items[start:] + def _setitem_slice_helper(space, items, start, step, slicelength, sequence2, empty_elem): assert slicelength >= 0 diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -1,17 +1,19 @@ """The builtin bytes implementation""" -from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.buffer import StringBuffer -from pypy.interpreter.error import OperationError, operationerrfmt -from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault, interpindirect2app -from pypy.objspace.std.stdtypedef import StdTypeDef -from pypy.objspace.std.stringmethods import StringMethods from rpython.rlib.jit import we_are_jitted from rpython.rlib.objectmodel import ( compute_hash, compute_unique_id, import_from_mixin, newlist_hint, resizelist_hint) from rpython.rlib.rstring import StringBuilder +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.buffer import StringBuffer +from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.gateway import ( + WrappedDefault, interp2app, interpindirect2app, unwrap_spec) +from pypy.objspace.std.stdtypedef import StdTypeDef +from pypy.objspace.std.stringmethods import StringMethods + class W_AbstractBytesObject(W_Root): __slots__ = () @@ -273,8 +275,8 @@ """S.rpartition(sep) -> (head, sep, tail) Search for the separator sep in S, starting at the end of S, and return - the part before it, the separator itself, and the part after it. If the - separator is not found, return two empty strings and S. + the part before it, the separator itself, and the part after it. If + the separator is not found, return two empty strings and S. """ @unwrap_spec(maxsplit=int) @@ -387,7 +389,7 @@ self._value = str def __repr__(self): - """ representation for debugging purposes """ + """representation for debugging purposes""" return "%s(%r)" % (self.__class__.__name__, self._value) def unwrap(self, space): @@ -586,7 +588,7 @@ from pypy.objspace.std.strbufobject import W_StringBufferObject try: other = self._op_val(space, w_other) - except OperationError, e: + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise @@ -613,11 +615,14 @@ return space.newbool(self._value.find(chr(char)) >= 0) return self._StringMethods_descr_contains(space, w_sub) - def descr_lower(self, space): - return W_BytesObject(self._value.lower()) - - def descr_upper(self, space): - return W_BytesObject(self._value.upper()) + _StringMethods_descr_join = descr_join + def descr_join(self, space, w_list): + l = space.listview_bytes(w_list) + if l is not None: + if len(l) == 1: + return space.wrap(l[0]) + return space.wrap(self._val(space).join(l)) + return self._StringMethods_descr_join(space, w_list) def _join_return_one(self, space, w_obj): return space.is_w(space.type(w_obj), space.w_str) @@ -631,6 +636,12 @@ return True return False + def descr_lower(self, space): + return W_BytesObject(self._value.lower()) + + def descr_upper(self, space): + return W_BytesObject(self._value.upper()) + def _create_list_from_bytes(value): # need this helper function to allow the jit to look inside and inline @@ -658,6 +669,7 @@ return W_BytesObject.EMPTY return W_BytesObject(s) + def wrapchar(space, c): if space.config.objspace.std.withprebuiltchar and not we_are_jitted(): return W_BytesObject.PREBUILT[ord(c)] @@ -822,7 +834,8 @@ zfill = interpindirect2app(W_AbstractBytesObject.descr_zfill), __buffer__ = interpindirect2app(W_AbstractBytesObject.descr_buffer), - __getnewargs__ = interpindirect2app(W_AbstractBytesObject.descr_getnewargs), + __getnewargs__ = interpindirect2app( + W_AbstractBytesObject.descr_getnewargs), fromhex = interp2app(W_BytesObject.descr_fromhex, as_classmethod=True), maketrans = interp2app(W_BytesObject.descr_maketrans, as_classmethod=True), @@ -864,8 +877,8 @@ buf.append_slice(s, startslice, i) startslice = i + 1 buf.append('\\x') - buf.append("0123456789abcdef"[n>>4]) - buf.append("0123456789abcdef"[n&0xF]) + buf.append("0123456789abcdef"[n >> 4]) + buf.append("0123456789abcdef"[n & 0xF]) if use_bs_char: if i != startslice: diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -166,9 +166,9 @@ # XXX: BytesListStrategy is currently broken #@staticmethod - #def newlist_bytes(space, list_s): + #def newlist_bytes(space, list_b): # strategy = space.fromcache(BytesListStrategy) - # storage = strategy.erase(list_s) + # storage = strategy.erase(list_b) # return W_ListObject.from_storage_and_strategy(space, storage, strategy) @staticmethod diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -1,18 +1,22 @@ -from pypy.interpreter.error import OperationError, operationerrfmt -from pypy.interpreter.gateway import unwrap_spec, WrappedDefault -from pypy.objspace.std import slicetype -from pypy.objspace.std.sliceobject import W_SliceObject +"""Functionality shared between bytes/bytearray/unicode""" + from rpython.rlib import jit from rpython.rlib.objectmodel import specialize from rpython.rlib.rarithmetic import ovfcheck -from rpython.rlib.rstring import split, rsplit, replace, startswith, endswith +from rpython.rlib.rstring import endswith, replace, rsplit, split, startswith + +from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.gateway import WrappedDefault, unwrap_spec +from pypy.objspace.std import slicetype +from pypy.objspace.std.sliceobject import W_SliceObject class StringMethods(object): def _sliced(self, space, s, start, stop, orig_obj): assert start >= 0 assert stop >= 0 - #if start == 0 and stop == len(s) and space.is_w(space.type(orig_obj), space.w_str): + #if start == 0 and stop == len(s) and space.is_w(space.type(orig_obj), + # space.w_str): # return orig_obj return self._new(s[start:stop]) @@ -21,7 +25,7 @@ value = self._val(space) lenself = len(value) start, end = slicetype.unwrap_start_stop( - space, lenself, w_start, w_end, upper_bound=upper_bound) + space, lenself, w_start, w_end, upper_bound=upper_bound) return (value, start, end) @staticmethod @@ -57,17 +61,14 @@ # pass def descr_contains(self, space, w_sub): - from pypy.objspace.std.bytearrayobject import W_BytearrayObject - if (isinstance(self, W_BytearrayObject) and - space.isinstance_w(w_sub, space.w_int)): - char = space.int_w(w_sub) - return _descr_contains_bytearray(self.data, space, char) - return space.newbool(self._val(space).find(self._op_val(space, w_sub)) >= 0) + value = self._val(space) + other = self._op_val(space, w_sub) + return space.newbool(value.find(other) >= 0) def descr_add(self, space, w_other): try: other = self._op_val(space, w_other) - except OperationError, e: + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise @@ -76,7 +77,7 @@ def descr_mul(self, space, w_times): try: times = space.getindex_w(w_times, space.w_OverflowError) - except OperationError, e: + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise @@ -108,13 +109,12 @@ if index < 0: index += selflen if index < 0 or index >= selflen: - raise OperationError(space.w_IndexError, - space.wrap("string index out of range")) + raise operationerrfmt(space.w_IndexError, + "string index out of range") from pypy.objspace.std.bytesobject import W_BytesObject from pypy.objspace.std.bytearrayobject import W_BytearrayObject if isinstance(self, W_BytesObject) or isinstance(self, W_BytearrayObject): return space.wrap(ord(selfvalue[index])) - #return wrapchar(space, selfvalue[index]) return self._new(selfvalue[index]) def descr_capitalize(self, space): @@ -133,27 +133,30 @@ value = self._val(space) fillchar = self._op_val(space, w_fillchar) if len(fillchar) != 1: - raise OperationError(space.w_TypeError, - space.wrap("center() argument 2 must be a single character")) + raise operationerrfmt(space.w_TypeError, + "center() argument 2 must be a single " + "character") d = width - len(value) - if d>0: + if d > 0: offset = d//2 + (d & width & 1) fillchar = fillchar[0] # annotator hint: it's a single character - u_centered = offset * fillchar + value + (d - offset) * fillchar + centered = offset * fillchar + value + (d - offset) * fillchar else: - u_centered = value + centered = value - return self._new(u_centered) + return self._new(centered) def descr_count(self, space, w_sub, w_start=None, w_end=None): value, start, end = self._convert_idx_params(space, w_start, w_end) - return space.newint(value.count(self._op_val(space, w_sub), start, end)) + return space.newint(value.count(self._op_val(space, w_sub), start, + end)) def descr_decode(self, space, w_encoding=None, w_errors=None): from pypy.objspace.std.unicodeobject import ( _get_encoding_and_errors, decode_object) - encoding, errors = _get_encoding_and_errors(space, w_encoding, w_errors) + encoding, errors = _get_encoding_and_errors(space, w_encoding, + w_errors) return decode_object(space, self, encoding, errors) @unwrap_spec(tabsize=int) @@ -166,18 +169,19 @@ try: ovfcheck(len(splitted) * tabsize) except OverflowError: - raise OperationError(space.w_OverflowError, - space.wrap("new string is too long")) + raise operationerrfmt(space.w_OverflowError, + "new string is too long") expanded = oldtoken = splitted.pop(0) for token in splitted: - expanded += self._chr(' ') * self._tabindent(oldtoken, tabsize) + token + expanded += self._chr(' ') * self._tabindent(oldtoken, + tabsize) + token oldtoken = token return self._new(expanded) def _tabindent(self, token, tabsize): - "calculates distance behind the token to the next tabstop" + """calculates distance behind the token to the next tabstop""" if tabsize <= 0: return tabsize @@ -216,8 +220,8 @@ (value, start, end) = self._convert_idx_params(space, w_start, w_end) res = value.find(self._op_val(space, w_sub), start, end) if res < 0: - raise OperationError(space.w_ValueError, - space.wrap("substring not found in string.index")) + raise operationerrfmt(space.w_ValueError, + "substring not found in string.index") return space.wrap(res) @@ -225,8 +229,8 @@ (value, start, end) = self._convert_idx_params(space, w_start, w_end) res = value.rfind(self._op_val(space, w_sub), start, end) if res < 0: - raise OperationError(space.w_ValueError, - space.wrap("substring not found in string.rindex")) + raise operationerrfmt(space.w_ValueError, + "substring not found in string.rindex") return space.wrap(res) @@ -320,15 +324,6 @@ return space.newbool(cased) def descr_join(self, space, w_list): - from pypy.objspace.std.unicodeobject import W_UnicodeObject - - if isinstance(self, W_UnicodeObject): - l = space.listview_unicode(w_list) - if l is not None: - if len(l) == 1: - return space.wrap(l[0]) - return space.wrap(self._val(space).join(l)) - list_w = space.listview(w_list) size = len(list_w) @@ -370,9 +365,9 @@ value = self._val(space) fillchar = self._op_val(space, w_fillchar) if len(fillchar) != 1: - raise OperationError(space.w_TypeError, - space.wrap("ljust() argument 2 must be a single character")) - + raise operationerrfmt(space.w_TypeError, + "ljust() argument 2 must be a single " + "character") d = width - len(value) if d > 0: fillchar = fillchar[0] # annotator hint: it's a single character @@ -385,9 +380,9 @@ value = self._val(space) fillchar = self._op_val(space, w_fillchar) if len(fillchar) != 1: - raise OperationError(space.w_TypeError, - space.wrap("rjust() argument 2 must be a single character")) - + raise operationerrfmt(space.w_TypeError, + "rjust() argument 2 must be a single " + "character") d = width - len(value) if d > 0: fillchar = fillchar[0] # annotator hint: it's a single character @@ -406,8 +401,7 @@ value = self._val(space) sub = self._op_val(space, w_sub) if not sub: - raise OperationError(space.w_ValueError, - space.wrap("empty separator")) + raise operationerrfmt(space.w_ValueError, "empty separator") pos = value.find(sub) if pos == -1: from pypy.objspace.std.bytearrayobject import W_BytearrayObject @@ -426,8 +420,7 @@ value = self._val(space) sub = self._op_val(space, w_sub) if not sub: - raise OperationError(space.w_ValueError, - space.wrap("empty separator")) + raise operationerrfmt(space.w_ValueError, "empty separator") pos = value.rfind(sub) if pos == -1: from pypy.objspace.std.bytearrayobject import W_BytearrayObject @@ -450,8 +443,8 @@ try: res = replace(input, sub, by, count) except OverflowError: - raise OperationError(space.w_OverflowError, - space.wrap("replace string is too long")) + raise operationerrfmt(space.w_OverflowError, + "replace string is too long") return self._new(res) @unwrap_spec(maxsplit=int) @@ -466,7 +459,7 @@ by = self._op_val(space, w_sep) bylen = len(by) if bylen == 0: - raise OperationError(space.w_ValueError, space.wrap("empty separator")) + raise operationerrfmt(space.w_ValueError, "empty separator") res = split(value, by, maxsplit) return self._newlist_unwrapped(space, res) @@ -481,7 +474,7 @@ by = self._op_val(space, w_sep) bylen = len(by) if bylen == 0: - raise OperationError(space.w_ValueError, space.wrap("empty separator")) + raise operationerrfmt(space.w_ValueError, "empty separator") res = rsplit(value, by, maxsplit) return self._newlist_unwrapped(space, res) @@ -533,9 +526,8 @@ return startswith(value, self._op_val(space, w_prefix), start, end) def descr_endswith(self, space, w_suffix, w_start=None, w_end=None): - (value, start, end) = self._convert_idx_params(space, w_start, - w_end, True) - + (value, start, end) = self._convert_idx_params(space, w_start, w_end, + True) if space.isinstance_w(w_suffix, space.w_tuple): for w_suffix in space.fixedview(w_suffix): if self._endswith(space, value, w_suffix, start, end): @@ -558,18 +550,17 @@ def _strip(self, space, w_chars, left, right): "internal function called by str_xstrip methods" value = self._val(space) - u_chars = self._op_val(space, w_chars) + chars = self._op_val(space, w_chars) lpos = 0 rpos = len(value) if left: - #print "while %d < %d and -%s- in -%s-:"%(lpos, rpos, value[lpos],w_chars) - while lpos < rpos and value[lpos] in u_chars: + while lpos < rpos and value[lpos] in chars: lpos += 1 if right: - while rpos > lpos and value[rpos - 1] in u_chars: + while rpos > lpos and value[rpos - 1] in chars: rpos -= 1 assert rpos >= lpos # annotator hint, don't remove @@ -583,13 +574,12 @@ rpos = len(value) if left: - #print "while %d < %d and -%s- in -%s-:"%(lpos, rpos, value[lpos],w_chars) while lpos < rpos and self._isspace(value[lpos]): - lpos += 1 + lpos += 1 if right: while rpos > lpos and self._isspace(value[rpos - 1]): - rpos -= 1 + rpos -= 1 assert rpos >= lpos # annotator hint, don't remove return self._sliced(space, value, lpos, rpos, self) @@ -650,9 +640,9 @@ else: table = self._op_val(space, w_table) if len(table) != 256: - raise OperationError( + raise operationerrfmt( space.w_ValueError, - space.wrap("translation table must be 256 characters long")) + "translation table must be 256 characters long") string = self._val(space) deletechars = self._op_val(space, w_deletechars) @@ -704,15 +694,6 @@ # ____________________________________________________________ # helpers for slow paths, moved out because they contain loops -def _descr_contains_bytearray(data, space, char): - if not 0 <= char < 256: - raise OperationError(space.w_ValueError, - space.wrap("byte must be in range(0, 256)")) - for c in data: - if ord(c) == char: - return space.w_True - return space.w_False - @specialize.argtype(0) def _descr_getslice_slowpath(selfvalue, start, step, sl): return [selfvalue[start + i*step] for i in range(sl)] diff --git a/pypy/objspace/std/test/test_bytearrayobject.py b/pypy/objspace/std/test/test_bytearrayobject.py --- a/pypy/objspace/std/test/test_bytearrayobject.py +++ b/pypy/objspace/std/test/test_bytearrayobject.py @@ -130,6 +130,7 @@ def test_iter(self): assert list(bytearray(b'hello')) == [104, 101, 108, 108, 111] + assert list(bytearray(b'hello').__iter__()) == [104, 101, 108, 108, 111] def test_compare(self): assert bytearray(b'hello') == bytearray(b'hello') diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -1,18 +1,21 @@ """The builtin str implementation""" +from rpython.rlib.objectmodel import ( + compute_hash, compute_unique_id, import_from_mixin) +from rpython.rlib.rstring import UnicodeBuilder +from rpython.rlib.runicode import ( + make_unicode_escape_function, str_decode_ascii, str_decode_utf_8, + unicode_encode_ascii, unicode_encode_utf_8) + from pypy.interpreter import unicodehelper from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, operationerrfmt -from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault +from pypy.interpreter.gateway import WrappedDefault, interp2app, unwrap_spec from pypy.module.unicodedata import unicodedb from pypy.objspace.std import newformat from pypy.objspace.std.formatting import mod_format from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.stringmethods import StringMethods -from rpython.rlib.objectmodel import compute_hash, compute_unique_id, import_from_mixin -from rpython.rlib.rstring import UnicodeBuilder -from rpython.rlib.runicode import (str_decode_utf_8, str_decode_ascii, - unicode_encode_utf_8, unicode_encode_ascii, make_unicode_escape_function) __all__ = ['W_UnicodeObject', 'wrapunicode', 'plain_str2unicode', 'encode_object', 'decode_object', 'unicode_from_object', @@ -29,7 +32,7 @@ w_self._utf8 = None def __repr__(w_self): - """ representation for debugging purposes """ + """representation for debugging purposes""" return "%s(%r)" % (w_self.__class__.__name__, w_self._value) def unwrap(w_self, space): @@ -158,19 +161,20 @@ return space.newlist_unicode(lst) @staticmethod - @unwrap_spec(w_object = WrappedDefault(u'')) + @unwrap_spec(w_object=WrappedDefault(u'')) def descr_new(space, w_unicodetype, w_object=None, w_encoding=None, w_errors=None): # NB. the default value of w_obj is really a *wrapped* empty string: # there is gateway magic at work w_obj = w_object - encoding, errors = _get_encoding_and_errors(space, w_encoding, w_errors) + encoding, errors = _get_encoding_and_errors(space, w_encoding, + w_errors) if encoding is None and errors is None: w_value = unicode_from_object(space, w_obj) else: - w_value = unicode_from_encoded_object(space, w_obj, - encoding, errors) + w_value = unicode_from_encoded_object(space, w_obj, encoding, + errors) if space.is_w(w_unicodetype, space.w_unicode): return w_value @@ -265,51 +269,57 @@ def descr_eq(self, space, w_other): try: - return space.newbool(self._val(space) == self._op_val(space, w_other)) - except OperationError, e: + res = self._val(space) == self._op_val(space, w_other) + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise + return space.newbool(res) def descr_ne(self, space, w_other): try: - return space.newbool(self._val(space) != self._op_val(space, w_other)) - except OperationError, e: + res = self._val(space) != self._op_val(space, w_other) + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise + return space.newbool(res) def descr_lt(self, space, w_other): try: - return space.newbool(self._val(space) < self._op_val(space, w_other)) - except OperationError, e: + res = self._val(space) < self._op_val(space, w_other) + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise + return space.newbool(res) def descr_le(self, space, w_other): try: - return space.newbool(self._val(space) <= self._op_val(space, w_other)) - except OperationError, e: + res = self._val(space) <= self._op_val(space, w_other) + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise + return space.newbool(res) def descr_gt(self, space, w_other): try: - return space.newbool(self._val(space) > self._op_val(space, w_other)) - except OperationError, e: + res = self._val(space) > self._op_val(space, w_other) + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise + return space.newbool(res) def descr_ge(self, space, w_other): try: - return space.newbool(self._val(space) >= self._op_val(space, w_other)) - except OperationError, e: + res = self._val(space) >= self._op_val(space, w_other) + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise + return space.newbool(res) def descr_format(self, space, __args__): w_kwds = space.newdict() @@ -336,12 +346,13 @@ def descr_translate(self, space, w_table): selfvalue = self._value w_sys = space.getbuiltinmodule('sys') - maxunicode = space.int_w(space.getattr(w_sys, space.wrap("maxunicode"))) + maxunicode = space.int_w(space.getattr(w_sys, + space.wrap("maxunicode"))) result = [] for unichar in selfvalue: try: w_newval = space.getitem(w_table, space.wrap(ord(unichar))) - except OperationError, e: + except OperationError as e: if e.match(space, space.w_LookupError): result.append(unichar) else: @@ -352,22 +363,32 @@ elif space.isinstance_w(w_newval, space.w_int): newval = space.int_w(w_newval) if newval < 0 or newval > maxunicode: - raise OperationError( - space.w_TypeError, - space.wrap("character mapping must be in range(0x%x)" % (maxunicode + 1,))) + msg = ("character mapping must be in range(0x%x)" % + (maxunicode + 1,)) + raise operationerrfmt(space.w_TypeError, msg) result.append(unichr(newval)) elif space.isinstance_w(w_newval, space.w_unicode): result.append(space.unicode_w(w_newval)) else: - raise operationerrfmt( - space.w_TypeError, - "character mapping must return integer, None or str") + raise operationerrfmt(space.w_TypeError, + "character mapping must return " + "integer, None or str") return W_UnicodeObject(u''.join(result)) def descr_encode(self, space, w_encoding=None, w_errors=None): - encoding, errors = _get_encoding_and_errors(space, w_encoding, w_errors) + encoding, errors = _get_encoding_and_errors(space, w_encoding, + w_errors) return encode_object(space, self, encoding, errors) + _StringMethods_descr_join = descr_join + def descr_join(self, space, w_list): + l = space.listview_unicode(w_list) + if l is not None: + if len(l) == 1: + return space.wrap(l[0]) + return space.wrap(self._val(space).join(l)) + return self._StringMethods_descr_join(space, w_list) + def _join_return_one(self, space, w_obj): return space.is_w(space.type(w_obj), space.w_unicode) @@ -424,6 +445,7 @@ def wrapunicode(space, uni): return W_UnicodeObject(uni) + def plain_str2unicode(space, s): try: return unicode(s) @@ -468,17 +490,13 @@ def getdefaultencoding(space): return space.sys.defaultencoding + def _get_encoding_and_errors(space, w_encoding, w_errors): - if space.is_none(w_encoding): - encoding = None - else: - encoding = space.str_w(w_encoding) - if space.is_none(w_errors): - errors = None - else: - errors = space.str_w(w_errors) + encoding = None if space.is_none(w_encoding) else space.str_w(w_encoding) + errors = None if space.is_none(w_errors) else space.str_w(w_errors) return encoding, errors + def encode_object(space, w_object, encoding, errors): if encoding is None: # Get the encoder functions as a wrapped object. @@ -505,10 +523,12 @@ w_restuple = space.call_function(w_encoder, w_object, w_errors) w_retval = space.getitem(w_restuple, space.wrap(0)) if not space.isinstance_w(w_retval, space.w_bytes): - msg = "encoder did not return a bytes string (type '%T')" - raise operationerrfmt(space.w_TypeError, msg, w_retval) + raise operationerrfmt( + space.w_TypeError, + "encoder did not return a bytes object (type '%T')", w_retval) return w_retval + def decode_object(space, w_obj, encoding, errors): if encoding is None: encoding = getdefaultencoding(space) @@ -543,6 +563,7 @@ assert isinstance(w_retval, W_UnicodeObject) return w_retval + def unicode_from_object(space, w_obj): if space.is_w(space.type(w_obj), space.w_unicode): return w_obj @@ -714,8 +735,8 @@ def format(): """S.format(*args, **kwargs) -> unicode - Return a formatted version of S, using substitutions from args and kwargs. - The substitutions are identified by braces ('{' and '}'). + Return a formatted version of S, using substitutions from args and + kwargs. The substitutions are identified by braces ('{' and '}'). """ def format_map(): @@ -874,16 +895,16 @@ def rjust(): """S.rjust(width[, fillchar]) -> unicode - Return S right-justified in a Unicode string of length width. Padding is - done using the specified fill character (default is a space). + Return S right-justified in a Unicode string of length width. Padding + is done using the specified fill character (default is a space). """ def rpartition(): """S.rpartition(sep) -> (head, sep, tail) Search for the separator sep in S, starting at the end of S, and return - the part before it, the separator itself, and the part after it. If the - separator is not found, return two empty strings and S. + the part before it, the separator itself, and the part after it. If + the separator is not found, return two empty strings and S. """ def rsplit(): diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -594,9 +594,11 @@ if tobox is not None: # sanity check: see whether the current struct value # corresponds to what the cache thinks the value is - resbox = executor.execute(self.metainterp.cpu, self.metainterp, - rop.GETFIELD_GC, fielddescr, box) - assert resbox.constbox().same_constant(tobox.constbox()) + # XXX pypy with the following check fails on micronumpy, + # XXX investigate + #resbox = executor.execute(self.metainterp.cpu, self.metainterp, + # rop.GETFIELD_GC, fielddescr, box) + #assert resbox.constbox().same_constant(tobox.constbox()) return tobox resbox = self.execute_with_descr(opnum, fielddescr, box) self.metainterp.heapcache.getfield_now_known(box, fielddescr, resbox) From noreply at buildbot.pypy.org Sat Jan 25 01:56:05 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 25 Jan 2014 01:56:05 +0100 (CET) Subject: [pypy-commit] pypy py3k: kill Message-ID: <20140125005605.3C8C31C00F8@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68929:6dded87497b9 Date: 2014-01-24 16:55 -0800 http://bitbucket.org/pypy/pypy/changeset/6dded87497b9/ Log: kill diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -17,9 +17,8 @@ from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.stringmethods import StringMethods -__all__ = ['W_UnicodeObject', 'wrapunicode', 'plain_str2unicode', - 'encode_object', 'decode_object', 'unicode_from_object', - 'unicode_to_decimal_w'] +__all__ = ['W_UnicodeObject', 'wrapunicode', 'encode_object', 'decode_object', + 'unicode_from_object', 'unicode_to_decimal_w'] class W_UnicodeObject(W_Root): @@ -442,26 +441,11 @@ descr_ljust = _fix_fillchar(StringMethods.descr_ljust) descr_rjust = _fix_fillchar(StringMethods.descr_rjust) + def wrapunicode(space, uni): return W_UnicodeObject(uni) -def plain_str2unicode(space, s): - try: - return unicode(s) - except UnicodeDecodeError: - for i in range(len(s)): - if ord(s[i]) > 127: - raise OperationError( - space.w_UnicodeDecodeError, - space.newtuple([ - space.wrap('ascii'), - space.wrap(s), - space.wrap(i), - space.wrap(i+1), - space.wrap("ordinal not in range(128)")])) - assert False, "unreachable" - def _isidentifier(u): if not u: return False From noreply at buildbot.pypy.org Sat Jan 25 02:21:16 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 25 Jan 2014 02:21:16 +0100 (CET) Subject: [pypy-commit] pypy default: oops, give operationerrfmt a constant Message-ID: <20140125012116.D79C21C30CA@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r68930:e3f4f801f664 Date: 2014-01-24 17:20 -0800 http://bitbucket.org/pypy/pypy/changeset/e3f4f801f664/ Log: oops, give operationerrfmt a constant diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -300,9 +300,9 @@ elif space.isinstance_w(w_newval, space.w_int): newval = space.int_w(w_newval) if newval < 0 or newval > maxunicode: - msg = ("character mapping must be in range(0x%x)" % - (maxunicode + 1,)) - raise operationerrfmt(space.w_TypeError, msg) + raise operationerrfmt(space.w_TypeError, + "character mapping must be in " + "range(%s)", hex(maxunicode + 1)) result.append(unichr(newval)) elif space.isinstance_w(w_newval, space.w_unicode): result.append(space.unicode_w(w_newval)) From noreply at buildbot.pypy.org Sat Jan 25 02:21:18 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 25 Jan 2014 02:21:18 +0100 (CET) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140125012118.361491C30CA@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68931:021856169522 Date: 2014-01-24 17:20 -0800 http://bitbucket.org/pypy/pypy/changeset/021856169522/ Log: merge default diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -362,9 +362,9 @@ elif space.isinstance_w(w_newval, space.w_int): newval = space.int_w(w_newval) if newval < 0 or newval > maxunicode: - msg = ("character mapping must be in range(0x%x)" % - (maxunicode + 1,)) - raise operationerrfmt(space.w_TypeError, msg) + raise operationerrfmt(space.w_TypeError, + "character mapping must be in " + "range(%s)", hex(maxunicode + 1)) result.append(unichr(newval)) elif space.isinstance_w(w_newval, space.w_unicode): result.append(space.unicode_w(w_newval)) From noreply at buildbot.pypy.org Sat Jan 25 02:34:05 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 25 Jan 2014 02:34:05 +0100 (CET) Subject: [pypy-commit] pypy py3k: __iter__ all the things Message-ID: <20140125013405.5EF3E1C3360@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68932:dbf9786cc7d8 Date: 2014-01-24 17:33 -0800 http://bitbucket.org/pypy/pypy/changeset/dbf9786cc7d8/ Log: __iter__ all the things diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -233,9 +233,6 @@ raise return space.newbool(res) - def descr_iter(self, space): - return space.newseqiter(self) - def descr_buffer(self, space): return BytearrayBuffer(self.data) diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -56,6 +56,9 @@ def descr_hash(self, space): """x.__hash__() <==> hash(x)""" + def descr_iter(self, space): + """x.__iter__() <==> iter(x)""" + def descr_le(self, space, w_other): """x.__le__(y) <==> x<=y""" @@ -787,6 +790,7 @@ __gt__ = interpindirect2app(W_AbstractBytesObject.descr_gt), __ge__ = interpindirect2app(W_AbstractBytesObject.descr_ge), + __iter__ = interpindirect2app(W_AbstractBytesObject.descr_iter), __len__ = interpindirect2app(W_AbstractBytesObject.descr_len), __contains__ = interpindirect2app(W_AbstractBytesObject.descr_contains), diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -57,8 +57,8 @@ def descr_len(self, space): return space.wrap(self._len()) - #def descr_iter(self, space): - # pass + def descr_iter(self, space): + return space.newseqiter(self) def descr_contains(self, space, w_sub): value = self._val(space) diff --git a/pypy/objspace/std/test/test_bytesobject.py b/pypy/objspace/std/test/test_bytesobject.py --- a/pypy/objspace/std/test/test_bytesobject.py +++ b/pypy/objspace/std/test/test_bytesobject.py @@ -652,6 +652,7 @@ for i in iter(b"42"): l.append(i) assert l == [52, 50] + assert list(b"42".__iter__()) == [52, 50] def test_repr(self): for f in str, repr: diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -336,9 +336,6 @@ return newformat.run_formatter(space, w_format_spec, "format_string", self) - def descr_iter(self, space): - return space.newseqiter(self) - def descr_mod(self, space, w_values): return mod_format(space, self, w_values, do_unicode=True) From noreply at buildbot.pypy.org Sat Jan 25 16:09:18 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sat, 25 Jan 2014 16:09:18 +0100 (CET) Subject: [pypy-commit] pypy llvm-translation-backend: Remove this because it probably prevents the JIT from emitting efficient code for longlong on 32-bit. This breaks LLVM translation on 32-bit. Message-ID: <20140125150918.AB1F91C01F2@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r68933:d460c0863b4a Date: 2014-01-25 15:56 +0100 http://bitbucket.org/pypy/pypy/changeset/d460c0863b4a/ Log: Remove this because it probably prevents the JIT from emitting efficient code for longlong on 32-bit. This breaks LLVM translation on 32-bit. diff --git a/rpython/rtyper/rint.py b/rpython/rtyper/rint.py --- a/rpython/rtyper/rint.py +++ b/rpython/rtyper/rint.py @@ -166,7 +166,11 @@ repr = signed_repr else: repr = r_result - vlist = hop.inputargs(repr, repr) + if func.startswith(('lshift', 'rshift')): + repr2 = signed_repr + else: + repr2 = repr + vlist = hop.inputargs(repr, repr2) hop.exception_is_here() prefix = repr.opprefix diff --git a/rpython/rtyper/test/test_rint.py b/rpython/rtyper/test/test_rint.py --- a/rpython/rtyper/test/test_rint.py +++ b/rpython/rtyper/test/test_rint.py @@ -257,9 +257,6 @@ assert res == intmask(f(2147483647, 12)) else: assert res == inttype(f(2147483647, 12)) - # XXX: this assertion doesn't hold since e1d98ce26c7b - # because it breaked llvm translation on 32-bit - continue # # check that '*_[lr]shift' take an inttype and an # int as arguments, without the need for a From noreply at buildbot.pypy.org Sat Jan 25 16:09:19 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sat, 25 Jan 2014 16:09:19 +0100 (CET) Subject: [pypy-commit] pypy default: Move test functions in test_refcount.py in a class. The LLVM subclasses this test class. Message-ID: <20140125150919.C63B31C01F2@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: Changeset: r68934:040c55b8ecee Date: 2014-01-25 16:08 +0100 http://bitbucket.org/pypy/pypy/changeset/040c55b8ecee/ Log: Move test functions in test_refcount.py in a class. The LLVM subclasses this test class. diff --git a/rpython/translator/c/test/test_refcount.py b/rpython/translator/c/test/test_refcount.py --- a/rpython/translator/c/test/test_refcount.py +++ b/rpython/translator/c/test/test_refcount.py @@ -1,191 +1,189 @@ import py -import os +from rpython.rtyper.lltypesystem import lltype from rpython.translator.translator import TranslationContext -from rpython.translator.c import genc from rpython.translator.c.test.test_genc import compile -from rpython.rtyper.lltypesystem import lltype -from rpython.conftest import option -def compile_func(func, args): - return compile(func, args, gcpolicy='ref') -def test_something(): - def f(): - return 1 - fn = compile_func(f, []) - assert fn() == 1 +class TestRefcount(object): + def compile_func(self, func, args): + return compile(func, args, gcpolicy='ref') -def test_something_more(): - S = lltype.GcStruct("S", ('x', lltype.Signed)) - def f(x): - s = lltype.malloc(S) - s.x = x - return s.x - fn = compile_func(f, [int]) - assert fn(1) == 1 + def test_something(self): + def f(): + return 1 + fn = self.compile_func(f, []) + assert fn() == 1 -def test_call_function(): - class C: - pass - def f(): - c = C() - c.x = 1 - return c - def g(): - return f().x - fn = compile_func(g, []) - assert fn() == 1 + def test_something_more(self): + S = lltype.GcStruct("S", ('x', lltype.Signed)) + def f(x): + s = lltype.malloc(S) + s.x = x + return s.x + fn = self.compile_func(f, [int]) + assert fn(1) == 1 -def test_multiple_exits(): - S = lltype.GcStruct("S", ('x', lltype.Signed)) - T = lltype.GcStruct("T", ('y', lltype.Signed)) - def f(n): - c = lltype.malloc(S) - d = lltype.malloc(T) - d.y = 1 - e = lltype.malloc(T) - e.y = 2 - if n: - x = d - else: - x = e - return x.y - fn = compile_func(f, [int]) - assert fn(1) == 1 - assert fn(0) == 2 + def test_call_function(self): + class C: + pass + def f(): + c = C() + c.x = 1 + return c + def g(): + return f().x + fn = self.compile_func(g, []) + assert fn() == 1 + def test_multiple_exits(self): + S = lltype.GcStruct("S", ('x', lltype.Signed)) + T = lltype.GcStruct("T", ('y', lltype.Signed)) + def f(n): + c = lltype.malloc(S) + d = lltype.malloc(T) + d.y = 1 + e = lltype.malloc(T) + e.y = 2 + if n: + x = d + else: + x = e + return x.y + fn = self.compile_func(f, [int]) + assert fn(1) == 1 + assert fn(0) == 2 -def test_cleanup_vars_on_call(): - S = lltype.GcStruct("S", ('x', lltype.Signed)) - def f(): - return lltype.malloc(S) - def g(): - s1 = f() - s1.x = 42 - s2 = f() - s3 = f() - return s1.x - fn = compile_func(g, []) - assert fn() == 42 -def test_multiply_passed_var(): - S = lltype.GcStruct("S", ('x', lltype.Signed)) - def f(x): - if x: - a = lltype.malloc(S) - a.x = 1 - b = a - else: - a = lltype.malloc(S) - a.x = 1 - b = lltype.malloc(S) - b.x = 2 - return a.x + b.x - fn = compile_func(f, [int]) - fn(1) == 2 - fn(0) == 3 + def test_cleanup_vars_on_call(self): + S = lltype.GcStruct("S", ('x', lltype.Signed)) + def f(): + return lltype.malloc(S) + def g(): + s1 = f() + s1.x = 42 + s2 = f() + s3 = f() + return s1.x + fn = self.compile_func(g, []) + assert fn() == 42 -def test_write_barrier(): - S = lltype.GcStruct("S", ('x', lltype.Signed)) - T = lltype.GcStruct("T", ('s', lltype.Ptr(S))) - def f(x): - s = lltype.malloc(S) - s.x = 0 - s1 = lltype.malloc(S) - s1.x = 1 - s2 = lltype.malloc(S) - s2.x = 2 - t = lltype.malloc(T) - t.s = s - if x: - t.s = s1 - else: - t.s = s2 - return t.s.x + s.x + s1.x + s2.x - fn = compile_func(f, [int]) - assert fn(1) == 4 - assert fn(0) == 5 + def test_multiply_passed_var(self): + S = lltype.GcStruct("S", ('x', lltype.Signed)) + def f(x): + if x: + a = lltype.malloc(S) + a.x = 1 + b = a + else: + a = lltype.malloc(S) + a.x = 1 + b = lltype.malloc(S) + b.x = 2 + return a.x + b.x + fn = self.compile_func(f, [int]) + fn(1) == 2 + fn(0) == 3 -def test_del_basic(): - py.test.skip("xxx fix or kill") - S = lltype.GcStruct('S', ('x', lltype.Signed), rtti=True) - TRASH = lltype.GcStruct('TRASH', ('x', lltype.Signed)) - GLOBAL = lltype.Struct('GLOBAL', ('x', lltype.Signed)) - glob = lltype.malloc(GLOBAL, immortal=True) - def destructor(s): - glob.x = s.x + 1 - def type_info_S(s): - return lltype.getRuntimeTypeInfo(S) + def test_write_barrier(self): + S = lltype.GcStruct("S", ('x', lltype.Signed)) + T = lltype.GcStruct("T", ('s', lltype.Ptr(S))) + def f(x): + s = lltype.malloc(S) + s.x = 0 + s1 = lltype.malloc(S) + s1.x = 1 + s2 = lltype.malloc(S) + s2.x = 2 + t = lltype.malloc(T) + t.s = s + if x: + t.s = s1 + else: + t.s = s2 + return t.s.x + s.x + s1.x + s2.x + fn = self.compile_func(f, [int]) + assert fn(1) == 4 + assert fn(0) == 5 - def g(n): - s = lltype.malloc(S) - s.x = n - # now 's' should go away - def entrypoint(n): - g(n) - # llop.gc__collect(lltype.Void) - return glob.x + def test_del_basic(self): + py.test.skip("xxx fix or kill") + S = lltype.GcStruct('S', ('x', lltype.Signed), rtti=True) + TRASH = lltype.GcStruct('TRASH', ('x', lltype.Signed)) + GLOBAL = lltype.Struct('GLOBAL', ('x', lltype.Signed)) + glob = lltype.malloc(GLOBAL, immortal=True) + def destructor(s): + glob.x = s.x + 1 + def type_info_S(s): + return lltype.getRuntimeTypeInfo(S) - t = TranslationContext() - t.buildannotator().build_types(entrypoint, [int]) - rtyper = t.buildrtyper() - destrptr = rtyper.annotate_helper_fn(destructor, [lltype.Ptr(S)]) - rtyper.attachRuntimeTypeInfoFunc(S, type_info_S, destrptr=destrptr) - rtyper.specialize() - fn = compile_func(entrypoint, None, t) + def g(n): + s = lltype.malloc(S) + s.x = n + # now 's' should go away + def entrypoint(n): + g(n) + # llop.gc__collect(lltype.Void) + return glob.x - res = fn(123) - assert res == 124 + t = TranslationContext() + t.buildannotator().build_types(entrypoint, [int]) + rtyper = t.buildrtyper() + destrptr = rtyper.annotate_helper_fn(destructor, [lltype.Ptr(S)]) + rtyper.attachRuntimeTypeInfoFunc(S, type_info_S, destrptr=destrptr) + rtyper.specialize() + fn = self.compile_func(entrypoint, None, t) -def test_del_catches(): - import os - def g(): - pass - class A(object): - def __del__(self): - try: - g() - except: - os.write(1, "hallo") - def f1(i): - if i: - raise TypeError - def f(i): + res = fn(123) + assert res == 124 + + def test_del_catches(self): + import os + def g(): + pass + class A(object): + def __del__(self): + try: + g() + except: + os.write(1, "hallo") + def f1(i): + if i: + raise TypeError + def f(i): + a = A() + f1(i) + a.b = 1 + return a.b + fn = self.compile_func(f, [int]) + assert fn(0) == 1 + fn(1, expected_exception_name="TypeError") + + def test_del_raises(self): + class B(object): + def __del__(self): + raise TypeError + def func(): + b = B() + fn = self.compile_func(func, []) + # does not crash + fn() + + def test_wrong_order_setitem(self): + class A(object): + pass a = A() - f1(i) - a.b = 1 - return a.b - fn = compile_func(f, [int]) - assert fn(0) == 1 - fn(1, expected_exception_name="TypeError") - -def test_del_raises(): - class B(object): - def __del__(self): - raise TypeError - def func(): - b = B() - fn = compile_func(func, []) - # does not crash - fn() - -def test_wrong_order_setitem(): - import os - class A(object): - pass - a = A() - a.b = None - class B(object): - def __del__(self): - a.freed += 1 - a.b = None - def f(n): - a.freed = 0 - a.b = B() - if n: - a.b = None - return a.freed - fn = compile_func(f, [int]) - res = fn(1) - assert res == 1 + a.b = None + class B(object): + def __del__(self): + a.freed += 1 + a.b = None + def f(n): + a.freed = 0 + a.b = B() + if n: + a.b = None + return a.freed + fn = self.compile_func(f, [int]) + res = fn(1) + assert res == 1 From noreply at buildbot.pypy.org Sat Jan 25 16:32:14 2014 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 25 Jan 2014 16:32:14 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: fixes to llgraph cpu Message-ID: <20140125153214.6E1121C0962@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68935:cf0a7fc29152 Date: 2014-01-25 08:55 +0100 http://bitbucket.org/pypy/pypy/changeset/cf0a7fc29152/ Log: fixes to llgraph cpu diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -5,7 +5,7 @@ LivenessAnalyzer, compute_vars_longevity from rpython.jit.metainterp.history import AbstractDescr from rpython.jit.metainterp.history import Const, getkind -from rpython.jit.metainterp.history import INT, REF, FLOAT, VOID +from rpython.jit.metainterp.history import INT, REF, FLOAT, VOID, Box from rpython.jit.metainterp.resoperation import rop from rpython.jit.codewriter import longlong, heaptracker from rpython.jit.codewriter.effectinfo import EffectInfo @@ -24,23 +24,28 @@ def get_jitframe_position(self): return self.pos + def __repr__(self): + return '' % self.pos + class ResumeFrame(object): def __init__(self, no, start_pos): self.registers = [None] * no self.start_pos = start_pos class LLGraphResumeBuilder(ResumeBuilder): - def __init__(self, frontend_liveness, descr, inputargs, inputlocs): + def __init__(self, mapping, frontend_liveness, descr, inputargs, inputlocs): self.liveness = LivenessAnalyzer() self.numbering = {} + self.mapping = mapping self.framestack = [] if inputlocs is not None: + assert len(inputargs) == len(inputlocs) for arg, loc in zip(inputargs, inputlocs): - self.numbering[arg] = loc + self.numbering[self.mapping(arg)] = loc ResumeBuilder.__init__(self, self, frontend_liveness, descr) def loc(self, box, must_exist=True): - return Position(self.numbering[box]) + return Position(self.numbering[self.mapping(box)]) def process(self, op): getattr(self, 'process_' + op.getopname())(op) @@ -60,14 +65,17 @@ def process_resume_set_pc(self, op): pass + def process_resume_setfield_gc(self, op): + xxx + def process_resume_put(self, op): box = op.getarg(0) if isinstance(box, Const): return + if self.mapping(box) not in self.numbering: + self.numbering[self.mapping(box)] = len(self.numbering) frame_pos = op.getarg(1).getint() pos_in_frame = op.getarg(2).getint() - i = self.framestack[frame_pos].start_pos + pos_in_frame - self.numbering[box] = i self.framestack[frame_pos].registers[pos_in_frame] = box def process_resume_clear(self, op): @@ -76,14 +84,14 @@ self.framestack[frame_pos].registers[frontend_pos] = None def get_numbering(self, mapping, op): - lst = [] + res = [] + all = {} for frame in self.framestack: for reg in frame.registers: - if reg is None: - lst.append(None) - else: - lst.append(mapping(reg)) - return lst + if reg is not None and isinstance(reg, Box) and reg not in all: + res.append(mapping(reg)) + all[reg] = None + return res class LLTrace(object): has_been_freed = False @@ -107,8 +115,9 @@ x = compute_vars_longevity(inputargs, operations, descr) longevity, last_real_usage, frontend_liveness = x - resumebuilder = LLGraphResumeBuilder(frontend_liveness, descr, + resumebuilder = LLGraphResumeBuilder(mapping, frontend_liveness, descr, inputargs, locs) + self.numbering = resumebuilder.numbering for op in operations: if op.is_resume(): resumebuilder.process(op) @@ -144,9 +153,9 @@ self.deadframe = deadframe class Jump(Exception): - def __init__(self, jump_target, args): + def __init__(self, jump_target, values): self.jump_target = jump_target - self.args = args + self.values = values class CallDescr(AbstractDescr): def __init__(self, RESULT, ARGS, extrainfo): @@ -319,7 +328,7 @@ def _execute_token(self, loop_token, *args): lltrace = loop_token.compiled_loop_token._llgraph_loop - frame = LLFrame(self, lltrace.inputargs, args) + frame = LLFrame(self, lltrace.inputargs, args, lltrace.numbering) try: frame.execute(lltrace) assert False @@ -356,7 +365,7 @@ frame = force_token assert isinstance(frame, LLFrame) assert frame.forced_deadframe is None - values = [] + values = {} for box in frame.force_guard_op.failargs: if box is None: value = None @@ -366,7 +375,7 @@ value = frame.env[box] else: value = box.value # 0 or 0.0 or NULL - values.append(value) + values[frame.numbering[box]] = value frame.forced_deadframe = LLDeadFrame( _getdescr(frame.force_guard_op), values) return frame.forced_deadframe @@ -697,9 +706,10 @@ last_exception = None force_guard_op = None - def __init__(self, cpu, argboxes, args): + def __init__(self, cpu, argboxes, args, numbering): self.env = {} self.cpu = cpu + self.numbering = numbering assert len(argboxes) == len(args) for box, arg in zip(argboxes, args): self.setenv(box, arg) @@ -755,6 +765,7 @@ resval = execute(_getdescr(op), *args) except Jump, j: self.lltrace, i = j.jump_target + self.numbering = self.lltrace.numbering if i >= 0: label_op = self.lltrace.operations[i] i += 1 @@ -762,7 +773,7 @@ else: targetargs = self.lltrace.inputargs i = 0 - self.do_renaming(targetargs, j.args) + self.do_renaming(targetargs, j.values) continue if op.result is not None: self.setenv(op.result, resval) @@ -774,25 +785,24 @@ self.env = {} self.framecontent = {} i = 0 - for value in newvalues: - if value is None or isinstance(value, Const): - continue - self.setenv(newargs[i], value) - i += 1 + if isinstance(newvalues, dict): + for k, v in newvalues.iteritems(): + self.setenv(newargs[k], v) + else: + for value in newvalues: + assert value is not None + assert not isinstance(value, Const) + self.setenv(newargs[i], value) + i += 1 # ----------------------------------------------------- def fail_guard(self, descr, saved_data=None): - values = [] + values = {} for i in range(len(self.current_op.failargs)): arg = self.current_op.failargs[i] - if arg is None: - value = None - elif isinstance(arg, Const): - value = arg - else: - value = self.env[arg] - values.append(value) + value = self.env[arg] + values[self.numbering[arg]] = value if hasattr(descr, '_llgraph_bridge'): target = (descr._llgraph_bridge, -1) raise Jump(target, values) diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -497,7 +497,8 @@ def make_virtual(self, known_class, box, source_op=None): vvalue = VirtualValue(self.optimizer.cpu, known_class, box, source_op) - self.optimizer.resumebuilder.new_virtual(box) + self.optimizer.resumebuilder.new_virtual_with_vtable(box, known_class, + vvalue) self.make_equal_to(box, vvalue) return vvalue diff --git a/rpython/jit/metainterp/test/support.py b/rpython/jit/metainterp/test/support.py --- a/rpython/jit/metainterp/test/support.py +++ b/rpython/jit/metainterp/test/support.py @@ -199,7 +199,6 @@ def meta_interp(self, *args, **kwds): kwds['CPUClass'] = self.CPUClass kwds['type_system'] = self.type_system - kwds['enable_opts'] = '' if "backendopt" not in kwds: kwds["backendopt"] = False old = codewriter.CodeWriter.debug diff --git a/rpython/jit/metainterp/test/test_virtual.py b/rpython/jit/metainterp/test/test_virtual.py --- a/rpython/jit/metainterp/test/test_virtual.py +++ b/rpython/jit/metainterp/test/test_virtual.py @@ -11,6 +11,9 @@ def _freeze_(self): return True + def check_resops(self, *args, **kwds): + pass + def test_virtualized1(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'node']) def f(n): diff --git a/rpython/jit/resume/backend.py b/rpython/jit/resume/backend.py --- a/rpython/jit/resume/backend.py +++ b/rpython/jit/resume/backend.py @@ -28,6 +28,9 @@ def resume_new(self, result, descr): self.deps[result] = {} + def resume_new_with_vtable(self, result, klass): + self.deps[result] = {} + def resume_setfield_gc(self, arg0, arg1, descr): self.deps[arg0][descr] = arg1 @@ -51,6 +54,8 @@ op.getarg(2).getint()) elif op.getopnum() == rop.RESUME_NEW: self.resume_new(op.result, op.getdescr()) + elif op.getopnum() == rop.RESUME_NEW_WITH_VTABLE: + self.resume_new_with_vtable(op.result, op.getarg(0)) elif op.getopnum() == rop.RESUME_SETFIELD_GC: self.resume_setfield_gc(op.getarg(0), op.getarg(1), op.getdescr()) @@ -131,6 +136,10 @@ v_pos = len(self.virtuals) self.virtuals[op.result] = v_pos self.builder.resume_new(v_pos, op.getdescr()) + elif op.getopnum() == rop.RESUME_NEW_WITH_VTABLE: + v_pos = len(self.virtuals) + self.virtuals[op.result] = v_pos + self.builder.resume_new_with_vtable(v_pos, op.getarg(0)) elif op.getopnum() == rop.RESUME_SETFIELD_GC: structpos = self.get_box_pos(op.getarg(0)) fieldpos = self.get_box_pos(op.getarg(1)) diff --git a/rpython/jit/resume/optimizer.py b/rpython/jit/resume/optimizer.py --- a/rpython/jit/resume/optimizer.py +++ b/rpython/jit/resume/optimizer.py @@ -55,6 +55,12 @@ xxx self.optimizer.emit_operation(rop.RESUME_NEW) + def new_virtual_with_vtable(self, box, vtable, vvalue): + virtualbox = BoxPtr() + op = ResOperation(rop.RESUME_NEW_WITH_VTABLE, [vtable], virtualbox) + vvalue.resume_box = virtualbox + self.opt._newoperations.append(op) + def new_virtual_struct(self, box, vstruct, structdescr): newbox = BoxPtr() vstruct.resume_box = newbox diff --git a/rpython/jit/resume/rescode.py b/rpython/jit/resume/rescode.py --- a/rpython/jit/resume/rescode.py +++ b/rpython/jit/resume/rescode.py @@ -1,8 +1,10 @@ from rpython.jit.metainterp.history import ConstInt +from rpython.rlib.objectmodel import Symbolic (UNUSED, ENTER_FRAME, LEAVE_FRAME, RESUME_PUT, - RESUME_NEW, RESUME_SETFIELD_GC, RESUME_SET_PC, RESUME_CLEAR) = range(8) + RESUME_NEW, RESUME_NEW_WITH_VTABLE, RESUME_SETFIELD_GC, + RESUME_SET_PC, RESUME_CLEAR) = range(9) TAGCONST = 0x0 TAGVIRTUAL = 0x2 @@ -58,7 +60,9 @@ return tag | (loc << 2) def encode_const(self, const): - if isinstance(const, ConstInt) and 0 <= const.getint() < 0x4000: + if (isinstance(const, ConstInt) and + not isinstance(const.getint(), Symbolic) and + 0 <= const.getint() < 0x4000): return TAGSMALLINT | (const.getint() << 2) self.consts.append(const) return TAGCONST | ((len(self.consts) - 1) << 2) @@ -78,6 +82,11 @@ self.write_short(self.encode(TAGVIRTUAL, v_pos)) self.write_short(descr.global_descr_index) + def resume_new_with_vtable(self, v_pos, const_class): + self.write(RESUME_NEW_WITH_VTABLE) + self.write_short(self.encode(TAGVIRTUAL, v_pos)) + self.write_short(self.encode_const(const_class)) + def resume_setfield_gc(self, structpos, fieldpos, descr): self.write(RESUME_SETFIELD_GC) self.write_short(structpos) From noreply at buildbot.pypy.org Sat Jan 25 16:32:15 2014 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 25 Jan 2014 16:32:15 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: fix Message-ID: <20140125153215.9A7351C0962@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68936:8207661b5b10 Date: 2014-01-25 09:20 +0100 http://bitbucket.org/pypy/pypy/changeset/8207661b5b10/ Log: fix diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -786,8 +786,8 @@ self.framecontent = {} i = 0 if isinstance(newvalues, dict): - for k, v in newvalues.iteritems(): - self.setenv(newargs[k], v) + for arg in newargs: + self.setenv(arg, newvalues[self.numbering[arg]]) else: for value in newvalues: assert value is not None From noreply at buildbot.pypy.org Sat Jan 25 16:32:16 2014 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 25 Jan 2014 16:32:16 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: pad out numbering so we don't have duplicates Message-ID: <20140125153216.B1CEC1C0962@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68937:75bb32699cc1 Date: 2014-01-25 09:33 +0100 http://bitbucket.org/pypy/pypy/changeset/75bb32699cc1/ Log: pad out numbering so we don't have duplicates diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -42,6 +42,9 @@ assert len(inputargs) == len(inputlocs) for arg, loc in zip(inputargs, inputlocs): self.numbering[self.mapping(arg)] = loc + max_no = max(self.numbering.values()) + while max_no >= len(self.numbering): + self.numbering[Box()] = -1 ResumeBuilder.__init__(self, self, frontend_liveness, descr) def loc(self, box, must_exist=True): From noreply at buildbot.pypy.org Sat Jan 25 16:32:17 2014 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 25 Jan 2014 16:32:17 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: fix the x86 backend Message-ID: <20140125153217.D3CAF1C0962@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68938:8d41fc938a3b Date: 2014-01-25 10:08 +0100 http://bitbucket.org/pypy/pypy/changeset/8d41fc938a3b/ Log: fix the x86 backend diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -315,6 +315,7 @@ self.xrm.position = i if op.is_resume(): self.resumebuilder.process(op) + self.possibly_free_vars_for_op(op) i += 1 continue if op.has_no_side_effect() and op.result not in self.longevity: From noreply at buildbot.pypy.org Sat Jan 25 20:03:52 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sat, 25 Jan 2014 20:03:52 +0100 (CET) Subject: [pypy-commit] pypy default: Kill align.h and its ROUND_UP_FOR_ALLOCATION. Do the computation directly instead. This shouldn't impact performance, but I'll run the benchmarks to verify. Message-ID: <20140125190352.622C81C03D0@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: Changeset: r68939:708f486843e2 Date: 2014-01-25 19:47 +0100 http://bitbucket.org/pypy/pypy/changeset/708f486843e2/ Log: Kill align.h and its ROUND_UP_FOR_ALLOCATION. Do the computation directly instead. This shouldn't impact performance, but I'll run the benchmarks to verify. diff --git a/rpython/rtyper/lltypesystem/llarena.py b/rpython/rtyper/lltypesystem/llarena.py --- a/rpython/rtyper/lltypesystem/llarena.py +++ b/rpython/rtyper/lltypesystem/llarena.py @@ -405,8 +405,11 @@ import os, sys from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rtyper.extfunc import register_external +from rpython.rtyper.tool.rffi_platform import memory_alignment from rpython.rlib.objectmodel import CDefinedIntSymbolic +MEMORY_ALIGNMENT = memory_alignment() + if sys.platform.startswith('linux'): # This only works with linux's madvise(), which is really not a memory # usage hint but a real command. It guarantees that after MADV_DONTNEED @@ -597,11 +600,8 @@ llfakeimpl=arena_shrink_obj, sandboxsafe=True) -llimpl_round_up_for_allocation = rffi.llexternal('ROUND_UP_FOR_ALLOCATION', - [lltype.Signed, lltype.Signed], - lltype.Signed, - sandboxsafe=True, - _nowrapper=True) +def llimpl_round_up_for_allocation(size, minsize): + return (max(size, minsize) + (MEMORY_ALIGNMENT-1)) & ~(MEMORY_ALIGNMENT-1) register_external(_round_up_for_allocation, [int, int], int, 'll_arena.round_up_for_allocation', llimpl=llimpl_round_up_for_allocation, diff --git a/rpython/translator/c/primitive.py b/rpython/translator/c/primitive.py --- a/rpython/translator/c/primitive.py +++ b/rpython/translator/c/primitive.py @@ -11,10 +11,12 @@ UnsignedLongLong, Float, SingleFloat, LongFloat, Char, UniChar, Bool, Void, FixedSizeArray, Ptr, cast_opaque_ptr, typeOf) from rpython.rtyper.lltypesystem.llarena import RoundedUpForAllocation +from rpython.rtyper.tool.rffi_platform import memory_alignment from rpython.translator.c.support import cdecl, barebonearray SUPPORT_INT128 = hasattr(rffi, '__INT128_T') +MEMORY_ALIGNMENT = memory_alignment() # ____________________________________________________________ # @@ -69,9 +71,12 @@ elif type(value) == GCHeaderOffset: return '0' elif type(value) == RoundedUpForAllocation: - return 'ROUND_UP_FOR_ALLOCATION(%s, %s)' % ( - name_signed(value.basesize, db), - name_signed(value.minsize, db)) + return ('(((%(x)s>=%(minsize)s?%(x)s:%(minsize)s) + %(align_m1)s)' + ' & ~%(align_m1)s)') % { + 'x': name_signed(value.basesize, db), + 'minsize': name_signed(value.minsize, db), + 'align_m1': MEMORY_ALIGNMENT-1 + } elif isinstance(value, CDefinedIntSymbolic): return str(value.expr) elif isinstance(value, ComputedIntSymbolic): diff --git a/rpython/translator/c/src/align.h b/rpython/translator/c/src/align.h deleted file mode 100644 --- a/rpython/translator/c/src/align.h +++ /dev/null @@ -1,21 +0,0 @@ - -#ifndef _PYPY_ALIGN_H -#define _PYPY_ALIGN_H - -/* alignment for arena-based garbage collectors: the following line - enforces an alignment that should be enough for any structure - containing pointers and 'double' fields. */ -struct rpy_memory_alignment_test1 { - double d; - void* p; -}; -struct rpy_memory_alignment_test2 { - char c; - struct rpy_memory_alignment_test1 s; -}; -#define MEMORY_ALIGNMENT offsetof(struct rpy_memory_alignment_test2, s) -#define ROUND_UP_FOR_ALLOCATION(x, minsize) \ - ((((x)>=(minsize)?(x):(minsize)) \ - + (MEMORY_ALIGNMENT-1)) & ~(MEMORY_ALIGNMENT-1)) - -#endif //_PYPY_ALIGN_H diff --git a/rpython/translator/c/src/g_prerequisite.h b/rpython/translator/c/src/g_prerequisite.h --- a/rpython/translator/c/src/g_prerequisite.h +++ b/rpython/translator/c/src/g_prerequisite.h @@ -23,6 +23,3 @@ # define RPY_LENGTH0 1 /* array decl [0] are bad */ # define RPY_DUMMY_VARLENGTH /* nothing */ #endif - - -#include "src/align.h" From noreply at buildbot.pypy.org Sat Jan 25 20:03:54 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sat, 25 Jan 2014 20:03:54 +0100 (CET) Subject: [pypy-commit] pypy default: Delete debug prints. Message-ID: <20140125190354.0DB661C03D0@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: Changeset: r68940:7a4e12588ec4 Date: 2014-01-25 19:50 +0100 http://bitbucket.org/pypy/pypy/changeset/7a4e12588ec4/ Log: Delete debug prints. diff --git a/rpython/translator/c/test/test_lltyped.py b/rpython/translator/c/test/test_lltyped.py --- a/rpython/translator/c/test/test_lltyped.py +++ b/rpython/translator/c/test/test_lltyped.py @@ -646,10 +646,7 @@ def llf(): s = '' for i in range(5): - print i - print s s += a[i] - print s assert s == "85?!" + lastchar fn = self.getcompiled(llf, []) fn() @@ -731,7 +728,6 @@ s = '' for i in range(4): s += a[i] - print s return s == 'abcd' fn = self.getcompiled(llf, []) assert fn() From noreply at buildbot.pypy.org Sat Jan 25 20:03:55 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sat, 25 Jan 2014 20:03:55 +0100 (CET) Subject: [pypy-commit] pypy default: Add longfloat to the PrimitiveErrorValue table. Message-ID: <20140125190355.7E15A1C03D0@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: Changeset: r68941:9ff28dec956f Date: 2014-01-25 19:52 +0100 http://bitbucket.org/pypy/pypy/changeset/9ff28dec956f/ Log: Add longfloat to the PrimitiveErrorValue table. diff --git a/rpython/translator/exceptiontransform.py b/rpython/translator/exceptiontransform.py --- a/rpython/translator/exceptiontransform.py +++ b/rpython/translator/exceptiontransform.py @@ -10,7 +10,7 @@ from rpython.rtyper import rtyper from rpython.rtyper.rmodel import inputconst from rpython.rlib.rarithmetic import r_uint, r_longlong, r_ulonglong -from rpython.rlib.rarithmetic import r_singlefloat +from rpython.rlib.rarithmetic import r_singlefloat, r_longfloat from rpython.rlib.debug import ll_assert from rpython.annotator import model as annmodel from rpython.rtyper.annlowlevel import MixLevelHelperAnnotator @@ -22,6 +22,7 @@ lltype.UnsignedLongLong: r_ulonglong(-1), lltype.Float: -1.0, lltype.SingleFloat: r_singlefloat(-1.0), + lltype.LongFloat: r_longfloat(-1.0), lltype.Char: chr(255), lltype.UniChar: unichr(0xFFFF), # XXX is this always right? lltype.Bool: True, From noreply at buildbot.pypy.org Sat Jan 25 20:03:56 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sat, 25 Jan 2014 20:03:56 +0100 (CET) Subject: [pypy-commit] pypy default: Remove dead import. Message-ID: <20140125190356.C66341C03D0@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: Changeset: r68942:ab4fc35adb9e Date: 2014-01-25 19:55 +0100 http://bitbucket.org/pypy/pypy/changeset/ab4fc35adb9e/ Log: Remove dead import. diff --git a/rpython/rtyper/lltypesystem/llarena.py b/rpython/rtyper/lltypesystem/llarena.py --- a/rpython/rtyper/lltypesystem/llarena.py +++ b/rpython/rtyper/lltypesystem/llarena.py @@ -406,7 +406,6 @@ from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rtyper.extfunc import register_external from rpython.rtyper.tool.rffi_platform import memory_alignment -from rpython.rlib.objectmodel import CDefinedIntSymbolic MEMORY_ALIGNMENT = memory_alignment() From noreply at buildbot.pypy.org Sat Jan 25 20:03:58 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sat, 25 Jan 2014 20:03:58 +0100 (CET) Subject: [pypy-commit] pypy default: Use expected_exception_name here. Message-ID: <20140125190358.311771C03D0@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: Changeset: r68943:0a83ace73826 Date: 2014-01-25 20:00 +0100 http://bitbucket.org/pypy/pypy/changeset/0a83ace73826/ Log: Use expected_exception_name here. diff --git a/rpython/translator/c/test/test_typed.py b/rpython/translator/c/test/test_typed.py --- a/rpython/translator/c/test/test_typed.py +++ b/rpython/translator/c/test/test_typed.py @@ -17,16 +17,6 @@ def getcompiled(self, func, argtypes): return compile(func, argtypes, backendopt=False) - def get_wrapper(self, func): - def wrapper(*args): - try: - return func(*args) - except OverflowError: - return -1 - except ZeroDivisionError: - return -2 - return wrapper - def test_set_attr(self): set_attr = self.getcompiled(snippet.set_attr, []) assert set_attr() == 2 @@ -470,32 +460,32 @@ assert res == f(i, ord(l[j])) def test_int_overflow(self): - fn = self.getcompiled(self.get_wrapper(snippet.add_func), [int]) - assert fn(sys.maxint) == -1 + fn = self.getcompiled(snippet.add_func, [int]) + fn(sys.maxint, expected_exception_name='OverflowError') def test_int_floordiv_ovf_zer(self): - fn = self.getcompiled(self.get_wrapper(snippet.div_func), [int]) - assert fn(-1) == -1 - assert fn(0) == -2 + fn = self.getcompiled(snippet.div_func, [int]) + fn(-1, expected_exception_name='OverflowError') + fn(0, expected_exception_name='ZeroDivisionError') def test_int_mul_ovf(self): - fn = self.getcompiled(self.get_wrapper(snippet.mul_func), [int, int]) + fn = self.getcompiled(snippet.mul_func, [int, int]) for y in range(-5, 5): for x in range(-5, 5): assert fn(x, y) == snippet.mul_func(x, y) n = sys.maxint / 4 assert fn(n, 3) == snippet.mul_func(n, 3) assert fn(n, 4) == snippet.mul_func(n, 4) - assert fn(n, 5) == -1 + fn(n, 5, expected_exception_name='OverflowError') def test_int_mod_ovf_zer(self): - fn = self.getcompiled(self.get_wrapper(snippet.mod_func), [int]) - assert fn(-1) == -1 - assert fn(0) == -2 + fn = self.getcompiled(snippet.mod_func, [int]) + fn(-1, expected_exception_name='OverflowError') + fn(0, expected_exception_name='ZeroDivisionError') def test_int_lshift_ovf(self): - fn = self.getcompiled(self.get_wrapper(snippet.lshift_func), [int]) - assert fn(1) == -1 + fn = self.getcompiled(snippet.lshift_func, [int]) + fn(1, expected_exception_name='OverflowError') def test_int_unary_ovf(self): def w(a, b): @@ -503,12 +493,12 @@ return snippet.unary_func(a)[0] else: return snippet.unary_func(a)[1] - fn = self.getcompiled(self.get_wrapper(w), [int, int]) + fn = self.getcompiled(w, [int, int]) for i in range(-3, 3): assert fn(i, 0) == -(i) assert fn(i, 1) == abs(i - 1) - assert fn(-sys.maxint - 1, 0) == -1 - assert fn(-sys.maxint, 0) == -1 + fn(-sys.maxint - 1, 0, expected_exception_name='OverflowError') + fn(-sys.maxint, 0, expected_exception_name='OverflowError') # floats def test_float_operations(self): From noreply at buildbot.pypy.org Sat Jan 25 20:28:22 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sat, 25 Jan 2014 20:28:22 +0100 (CET) Subject: [pypy-commit] pypy llvm-translation-backend: Remove modifications in multibytecodec.c/h - the compiler warning was silenced on default in another way. Message-ID: <20140125192822.613C21C00F8@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r68944:ea4fb0492e4b Date: 2014-01-25 20:22 +0100 http://bitbucket.org/pypy/pypy/changeset/ea4fb0492e4b/ Log: Remove modifications in multibytecodec.c/h - the compiler warning was silenced on default in another way. diff --git a/rpython/translator/c/src/cjkcodecs/multibytecodec.c b/rpython/translator/c/src/cjkcodecs/multibytecodec.c --- a/rpython/translator/c/src/cjkcodecs/multibytecodec.c +++ b/rpython/translator/c/src/cjkcodecs/multibytecodec.c @@ -19,7 +19,7 @@ } Py_ssize_t pypy_cjk_dec_init(struct pypy_cjk_dec_s *d, - unsigned char *inbuf, Py_ssize_t inlen) + char *inbuf, Py_ssize_t inlen) { d->inbuf_start = (unsigned char *)inbuf; d->inbuf = (unsigned char *)inbuf; @@ -217,7 +217,7 @@ } } -unsigned char *pypy_cjk_enc_outbuf(struct pypy_cjk_enc_s *d) +char *pypy_cjk_enc_outbuf(struct pypy_cjk_enc_s *d) { return (char *)d->outbuf_start; } diff --git a/rpython/translator/c/src/cjkcodecs/multibytecodec.h b/rpython/translator/c/src/cjkcodecs/multibytecodec.h --- a/rpython/translator/c/src/cjkcodecs/multibytecodec.h +++ b/rpython/translator/c/src/cjkcodecs/multibytecodec.h @@ -97,7 +97,7 @@ struct pypy_cjk_dec_s *pypy_cjk_dec_new(const MultibyteCodec *codec); Py_ssize_t pypy_cjk_dec_init(struct pypy_cjk_dec_s *d, - unsigned char *inbuf, Py_ssize_t inlen); + char *inbuf, Py_ssize_t inlen); void pypy_cjk_dec_free(struct pypy_cjk_dec_s *); Py_ssize_t pypy_cjk_dec_chunk(struct pypy_cjk_dec_s *); Py_UNICODE *pypy_cjk_dec_outbuf(struct pypy_cjk_dec_s *); @@ -120,7 +120,7 @@ void pypy_cjk_enc_free(struct pypy_cjk_enc_s *); Py_ssize_t pypy_cjk_enc_chunk(struct pypy_cjk_enc_s *, Py_ssize_t); Py_ssize_t pypy_cjk_enc_reset(struct pypy_cjk_enc_s *); -unsigned char *pypy_cjk_enc_outbuf(struct pypy_cjk_enc_s *); +char *pypy_cjk_enc_outbuf(struct pypy_cjk_enc_s *); Py_ssize_t pypy_cjk_enc_outlen(struct pypy_cjk_enc_s *); Py_ssize_t pypy_cjk_enc_inbuf_remaining(struct pypy_cjk_enc_s *d); Py_ssize_t pypy_cjk_enc_inbuf_consumed(struct pypy_cjk_enc_s* d); From noreply at buildbot.pypy.org Sat Jan 25 20:28:24 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sat, 25 Jan 2014 20:28:24 +0100 (CET) Subject: [pypy-commit] pypy llvm-translation-backend: hg merge default Message-ID: <20140125192824.77D001C00F8@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r68945:04fc14c02571 Date: 2014-01-25 20:27 +0100 http://bitbucket.org/pypy/pypy/changeset/04fc14c02571/ Log: hg merge default diff too long, truncating to 2000 out of 2339 lines diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -52,3 +52,8 @@ .. branch: annotator Remove FlowObjSpace. Improve cohesion between rpython.flowspace and rpython.annotator. + +.. branch: detect-immutable-fields +mapdicts keep track of whether or not an attribute is every assigned to +multiple times. If it's only assigned once then an elidable lookup is used when +possible. diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -232,9 +232,8 @@ raise operationerrfmt(space.w_TypeError, msg, w_result) def ord(self, space): - typename = space.type(self).getname(space) - msg = "ord() expected string of length 1, but %s found" - raise operationerrfmt(space.w_TypeError, msg, typename) + msg = "ord() expected string of length 1, but %T found" + raise operationerrfmt(space.w_TypeError, msg, self) def __spacebind__(self, space): return self diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -3,7 +3,7 @@ from rpython.rlib.objectmodel import we_are_translated from pypy.objspace.std.listobject import W_ListObject from pypy.objspace.std.typeobject import MethodCache -from pypy.objspace.std.mapdict import IndexCache +from pypy.objspace.std.mapdict import MapAttrCache from rpython.rlib import rposix, rgc @@ -35,7 +35,7 @@ cache.misses = {} cache.hits = {} if space.config.objspace.std.withmapdict: - cache = space.fromcache(IndexCache) + cache = space.fromcache(MapAttrCache) cache.misses = {} cache.hits = {} @@ -45,7 +45,7 @@ in the mapdict cache with the given attribute name.""" assert space.config.objspace.std.withmethodcachecounter assert space.config.objspace.std.withmapdict - cache = space.fromcache(IndexCache) + cache = space.fromcache(MapAttrCache) return space.newtuple([space.newint(cache.hits.get(name, 0)), space.newint(cache.misses.get(name, 0))]) diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -400,16 +400,16 @@ '_PyObject_CallFunction_SizeT', '_PyObject_CallMethod_SizeT', 'PyBuffer_FromMemory', 'PyBuffer_FromReadWriteMemory', 'PyBuffer_FromObject', - 'PyBuffer_FromReadWriteObject', 'PyBuffer_New', 'PyBuffer_Type', '_Py_init_bufferobject', + 'PyBuffer_FromReadWriteObject', 'PyBuffer_New', 'PyBuffer_Type', '_Py_get_buffer_type', 'PyCObject_FromVoidPtr', 'PyCObject_FromVoidPtrAndDesc', 'PyCObject_AsVoidPtr', 'PyCObject_GetDesc', 'PyCObject_Import', 'PyCObject_SetVoidPtr', - 'PyCObject_Type', '_Py_init_pycobject', + 'PyCObject_Type', '_Py_get_cobject_type', 'PyCapsule_New', 'PyCapsule_IsValid', 'PyCapsule_GetPointer', 'PyCapsule_GetName', 'PyCapsule_GetDestructor', 'PyCapsule_GetContext', 'PyCapsule_SetPointer', 'PyCapsule_SetName', 'PyCapsule_SetDestructor', - 'PyCapsule_SetContext', 'PyCapsule_Import', 'PyCapsule_Type', '_Py_init_capsule', + 'PyCapsule_SetContext', 'PyCapsule_Import', 'PyCapsule_Type', '_Py_get_capsule_type', 'PyObject_AsReadBuffer', 'PyObject_AsWriteBuffer', 'PyObject_CheckReadBuffer', @@ -691,17 +691,25 @@ prefix = 'PyPy' else: prefix = 'cpyexttest' - init_buffer = rffi.llexternal('_%s_init_bufferobject' % prefix, [], lltype.Void, - compilation_info=eci, releasegil=False) - init_pycobject = rffi.llexternal('_%s_init_pycobject' % prefix, [], lltype.Void, - compilation_info=eci, releasegil=False) - init_capsule = rffi.llexternal('_%s_init_capsule' % prefix, [], lltype.Void, - compilation_info=eci, releasegil=False) - INIT_FUNCTIONS.extend([ - lambda space: init_buffer(), - lambda space: init_pycobject(), - lambda space: init_capsule(), - ]) + # jump through hoops to avoid releasing the GIL during initialization + # of the cpyext module. The C functions are called with no wrapper, + # but must not do anything like calling back PyType_Ready(). We + # use them just to get a pointer to the PyTypeObjects defined in C. + get_buffer_type = rffi.llexternal('_%s_get_buffer_type' % prefix, + [], PyTypeObjectPtr, + compilation_info=eci, _nowrapper=True) + get_cobject_type = rffi.llexternal('_%s_get_cobject_type' % prefix, + [], PyTypeObjectPtr, + compilation_info=eci, _nowrapper=True) + get_capsule_type = rffi.llexternal('_%s_get_capsule_type' % prefix, + [], PyTypeObjectPtr, + compilation_info=eci, _nowrapper=True) + def init_types(space): + from pypy.module.cpyext.typeobject import py_type_ready + py_type_ready(space, get_buffer_type()) + py_type_ready(space, get_cobject_type()) + py_type_ready(space, get_capsule_type()) + INIT_FUNCTIONS.append(init_types) from pypy.module.posix.interp_posix import add_fork_hook reinit_tls = rffi.llexternal('%sThread_ReInitTLS' % prefix, [], lltype.Void, compilation_info=eci) diff --git a/pypy/module/cpyext/src/bufferobject.c b/pypy/module/cpyext/src/bufferobject.c --- a/pypy/module/cpyext/src/bufferobject.c +++ b/pypy/module/cpyext/src/bufferobject.c @@ -783,9 +783,9 @@ return size; } -void _Py_init_bufferobject(void) +PyTypeObject *_Py_get_buffer_type(void) { - PyType_Ready(&PyBuffer_Type); + return &PyBuffer_Type; } static PySequenceMethods buffer_as_sequence = { diff --git a/pypy/module/cpyext/src/capsule.c b/pypy/module/cpyext/src/capsule.c --- a/pypy/module/cpyext/src/capsule.c +++ b/pypy/module/cpyext/src/capsule.c @@ -321,8 +321,7 @@ PyCapsule_Type__doc__ /*tp_doc*/ }; -void _Py_init_capsule() +PyTypeObject *_Py_get_capsule_type(void) { - PyType_Ready(&PyCapsule_Type); + return &PyCapsule_Type; } - diff --git a/pypy/module/cpyext/src/cobject.c b/pypy/module/cpyext/src/cobject.c --- a/pypy/module/cpyext/src/cobject.c +++ b/pypy/module/cpyext/src/cobject.c @@ -156,7 +156,7 @@ PyCObject_Type__doc__ /*tp_doc*/ }; -void _Py_init_pycobject() +PyTypeObject *_Py_get_cobject_type(void) { - PyType_Ready(&PyCObject_Type); + return &PyCObject_Type; } diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -549,11 +549,14 @@ pto.c_tp_flags |= Py_TPFLAGS_READY return pto +def py_type_ready(space, pto): + if pto.c_tp_flags & Py_TPFLAGS_READY: + return + type_realize(space, rffi.cast(PyObject, pto)) + @cpython_api([PyTypeObjectPtr], rffi.INT_real, error=-1) def PyType_Ready(space, pto): - if pto.c_tp_flags & Py_TPFLAGS_READY: - return 0 - type_realize(space, rffi.cast(PyObject, pto)) + py_type_ready(space, pto) return 0 def type_realize(space, py_obj): diff --git a/pypy/module/gc/interp_gc.py b/pypy/module/gc/interp_gc.py --- a/pypy/module/gc/interp_gc.py +++ b/pypy/module/gc/interp_gc.py @@ -12,8 +12,8 @@ cache = space.fromcache(MethodCache) cache.clear() if space.config.objspace.std.withmapdict: - from pypy.objspace.std.mapdict import IndexCache - cache = space.fromcache(IndexCache) + from pypy.objspace.std.mapdict import MapAttrCache + cache = space.fromcache(MapAttrCache) cache.clear() rgc.collect() return space.wrap(0) diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -394,6 +394,9 @@ class W_Float64Box(W_FloatingBox, PrimitiveBox): descr__new__, _get_dtype, descr_reduce = new_dtype_getter("float64") + def descr_as_integer_ratio(self, space): + return space.call_method(self.item(space), 'as_integer_ratio') + class W_ComplexFloatingBox(W_InexactBox): def descr_get_real(self, space): dtype = self._COMPONENTS_BOX._get_dtype(space) @@ -719,6 +722,7 @@ __module__ = "numpy", __new__ = interp2app(W_Float64Box.descr__new__.im_func), __reduce__ = interp2app(W_Float64Box.descr_reduce), + as_integer_ratio = interp2app(W_Float64Box.descr_as_integer_ratio), ) W_ComplexFloatingBox.typedef = TypeDef("complexfloating", W_InexactBox.typedef, diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -181,6 +181,11 @@ s = np.dtype([('a', 'int64'), ('b', 'int64')]).type('a' * 16) assert s.view('S16') == 'a' * 16 + def test_as_integer_ratio(self): + import numpy as np + raises(AttributeError, 'np.float32(1.5).as_integer_ratio()') + assert np.float64(1.5).as_integer_ratio() == (3, 2) + def test_complex_scalar_complex_cast(self): import numpy as np for tp in [np.csingle, np.cdouble, np.clongdouble]: diff --git a/pypy/module/pypyjit/test_pypy_c/test_ffi.py b/pypy/module/pypyjit/test_pypy_c/test_ffi.py --- a/pypy/module/pypyjit/test_pypy_c/test_ffi.py +++ b/pypy/module/pypyjit/test_pypy_c/test_ffi.py @@ -7,9 +7,9 @@ from rpython.rlib.test.test_clibffi import get_libm_name def main(libm_name): try: - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types except ImportError: - sys.stderr.write('SKIP: cannot import _ffi\n') + sys.stderr.write('SKIP: cannot import _rawffi.alt\n') return 0 libm = CDLL(libm_name) @@ -45,9 +45,9 @@ from rpython.rlib.test.test_clibffi import get_libm_name def main(libm_name): try: - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types except ImportError: - sys.stderr.write('SKIP: cannot import _ffi\n') + sys.stderr.write('SKIP: cannot import _rawffi.alt\n') return 0 libm = CDLL(libm_name) @@ -82,12 +82,12 @@ from threading import Thread # if os.name == 'nt': - from _ffi import WinDLL, types + from _rawffi.alt import WinDLL, types libc = WinDLL('Kernel32.dll') sleep = libc.getfunc('Sleep', [types.uint], types.uint) delays = [0]*n + [1000] else: - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libc = CDLL(libc_name) sleep = libc.getfunc('sleep', [types.uint], types.uint) delays = [0]*n + [1] @@ -144,7 +144,7 @@ def test__ffi_struct(self): def main(): - from _ffi import _StructDescr, Field, types + from _rawffi.alt import _StructDescr, Field, types fields = [ Field('x', types.slong), ] diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py @@ -35,7 +35,7 @@ class A(object): pass a = A() - a.x = 2 + a.x = 1 def main(n): i = 0 while i < n: @@ -49,8 +49,7 @@ i9 = int_lt(i5, i6) guard_true(i9, descr=...) guard_not_invalidated(descr=...) - i10 = int_add_ovf(i5, i7) - guard_no_overflow(descr=...) + i10 = int_add(i5, 1) --TICK-- jump(..., descr=...) """) diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -1,20 +1,21 @@ """The builtin bytearray implementation""" +from rpython.rlib.objectmodel import ( + import_from_mixin, newlist_hint, resizelist_hint) +from rpython.rlib.rstring import StringBuilder + from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.buffer import RWBuffer from pypy.interpreter.error import OperationError, operationerrfmt -from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault +from pypy.interpreter.gateway import WrappedDefault, interp2app, unwrap_spec from pypy.interpreter.signature import Signature from pypy.objspace.std.sliceobject import W_SliceObject from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.stringmethods import StringMethods from pypy.objspace.std.util import get_positive_index -from rpython.rlib.objectmodel import newlist_hint, resizelist_hint, import_from_mixin -from rpython.rlib.rstring import StringBuilder +NON_HEX_MSG = "non-hexadecimal number found in fromhex() arg at position %d" -def _make_data(s): - return [s[i] for i in range(len(s))] class W_BytearrayObject(W_Root): import_from_mixin(StringMethods) @@ -23,7 +24,7 @@ w_self.data = data def __repr__(w_self): - """ representation for debugging purposes """ + """representation for debugging purposes""" return "%s(%s)" % (w_self.__class__.__name__, ''.join(w_self.data)) def _new(self, value): @@ -127,11 +128,6 @@ @staticmethod def descr_fromhex(space, w_bytearraytype, w_hexstring): - "bytearray.fromhex(string) -> bytearray\n" - "\n" - "Create a bytearray object from a string of hexadecimal numbers.\n" - "Spaces between two numbers are accepted.\n" - "Example: bytearray.fromhex('B9 01EF') -> bytearray(b'\\xb9\\x01\\xef')." hexstring = space.str_w(w_hexstring) hexstring = hexstring.lower() data = [] @@ -143,18 +139,15 @@ i += 1 if i >= length: break - if i+1 == length: - raise OperationError(space.w_ValueError, space.wrap( - "non-hexadecimal number found in fromhex() arg at position %d" % i)) + if i + 1 == length: + raise operationerrfmt(space.w_ValueError, NON_HEX_MSG, i) top = _hex_digit_to_int(hexstring[i]) if top == -1: - raise OperationError(space.w_ValueError, space.wrap( - "non-hexadecimal number found in fromhex() arg at position %d" % i)) + raise operationerrfmt(space.w_ValueError, NON_HEX_MSG, i) bot = _hex_digit_to_int(hexstring[i+1]) if bot == -1: - raise OperationError(space.w_ValueError, space.wrap( - "non-hexadecimal number found in fromhex() arg at position %d" % (i+1,))) + raise operationerrfmt(space.w_ValueError, NON_HEX_MSG, i + 1) data.append(chr(top*16 + bot)) # in CPython bytearray.fromhex is a staticmethod, so @@ -178,23 +171,25 @@ from pypy.objspace.std.unicodeobject import ( _get_encoding_and_errors, encode_object ) - encoding, errors = _get_encoding_and_errors(space, w_encoding, w_errors) + encoding, errors = _get_encoding_and_errors(space, w_encoding, + w_errors) - # if w_source is an integer this correctly raises a TypeError - # the CPython error message is: "encoding or errors without a string argument" - # ours is: "expected unicode, got int object" + # if w_source is an integer this correctly raises a + # TypeError the CPython error message is: "encoding or + # errors without a string argument" ours is: "expected + # unicode, got int object" w_source = encode_object(space, w_source, encoding, errors) # Is it an int? try: count = space.int_w(w_source) - except OperationError, e: + except OperationError as e: if not e.match(space, space.w_TypeError): raise else: if count < 0: - raise OperationError(space.w_ValueError, - space.wrap("bytearray negative count")) + raise operationerrfmt(space.w_ValueError, + "bytearray negative count") self.data = ['\0'] * count return @@ -224,8 +219,8 @@ elif not '\x20' <= c < '\x7f': n = ord(c) buf.append('\\x') - buf.append("0123456789abcdef"[n>>4]) - buf.append("0123456789abcdef"[n&0xF]) + buf.append("0123456789abcdef"[n >> 4]) + buf.append("0123456789abcdef"[n & 0xF]) else: buf.append(c) @@ -238,51 +233,60 @@ def descr_eq(self, space, w_other): try: - return space.newbool(self._val(space) == self._op_val(space, w_other)) - except OperationError, e: + res = self._val(space) == self._op_val(space, w_other) + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise + return space.newbool(res) def descr_ne(self, space, w_other): try: - return space.newbool(self._val(space) != self._op_val(space, w_other)) - except OperationError, e: + res = self._val(space) != self._op_val(space, w_other) + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise + return space.newbool(res) def descr_lt(self, space, w_other): try: - return space.newbool(self._val(space) < self._op_val(space, w_other)) - except OperationError, e: + res = self._val(space) < self._op_val(space, w_other) + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise + return space.newbool(res) def descr_le(self, space, w_other): try: - return space.newbool(self._val(space) <= self._op_val(space, w_other)) - except OperationError, e: + res = self._val(space) <= self._op_val(space, w_other) + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise + return space.newbool(res) def descr_gt(self, space, w_other): try: - return space.newbool(self._val(space) > self._op_val(space, w_other)) - except OperationError, e: + res = self._val(space) > self._op_val(space, w_other) + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise + return space.newbool(res) def descr_ge(self, space, w_other): try: - return space.newbool(self._val(space) >= self._op_val(space, w_other)) - except OperationError, e: + res = self._val(space) >= self._op_val(space, w_other) + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise + return space.newbool(res) + + def descr_iter(self, space): + return space.newseqiter(self) def descr_buffer(self, space): return BytearrayBuffer(self.data) @@ -297,7 +301,7 @@ def descr_inplace_mul(self, space, w_times): try: times = space.getindex_w(w_times, space.w_OverflowError) - except OperationError, e: + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise @@ -312,12 +316,13 @@ _setitem_slice_helper(space, self.data, start, step, slicelength, sequence2, empty_elem='\x00') else: - idx = space.getindex_w(w_index, space.w_IndexError, "bytearray index") + idx = space.getindex_w(w_index, space.w_IndexError, + "bytearray index") try: self.data[idx] = getbytevalue(space, w_other) except IndexError: - raise OperationError(space.w_IndexError, - space.wrap("bytearray index out of range")) + raise operationerrfmt(space.w_IndexError, + "bytearray index out of range") def descr_delitem(self, space, w_idx): if isinstance(w_idx, W_SliceObject): @@ -325,12 +330,13 @@ len(self.data)) _delitem_slice_helper(space, self.data, start, step, slicelength) else: - idx = space.getindex_w(w_idx, space.w_IndexError, "bytearray index") + idx = space.getindex_w(w_idx, space.w_IndexError, + "bytearray index") try: del self.data[idx] except IndexError: - raise OperationError(space.w_IndexError, - space.wrap("bytearray deletion index out of range")) + raise operationerrfmt(space.w_IndexError, + "bytearray deletion index out of range") def descr_append(self, space, w_item): self.data.append(getbytevalue(space, w_item)) @@ -357,10 +363,9 @@ result = self.data.pop(index) except IndexError: if not self.data: - raise OperationError(space.w_IndexError, space.wrap( - "pop from empty bytearray")) - raise OperationError(space.w_IndexError, space.wrap( - "pop index out of range")) + raise operationerrfmt(space.w_IndexError, + "pop from empty bytearray") + raise operationerrfmt(space.w_IndexError, "pop index out of range") return space.wrap(ord(result)) def descr_remove(self, space, w_char): @@ -368,27 +373,55 @@ try: self.data.remove(chr(char)) except ValueError: - raise OperationError(space.w_ValueError, space.wrap( - "value not found in bytearray")) + raise operationerrfmt(space.w_ValueError, + "value not found in bytearray") + + _StringMethods_descr_contains = descr_contains + def descr_contains(self, space, w_sub): + if space.isinstance_w(w_sub, space.w_int): + char = space.int_w(w_sub) + return _descr_contains_bytearray(self.data, space, char) + return self._StringMethods_descr_contains(space, w_sub) def descr_reverse(self, space): self.data.reverse() + +# ____________________________________________________________ +# helpers for slow paths, moved out because they contain loops + +def _make_data(s): + return [s[i] for i in range(len(s))] + + +def _descr_contains_bytearray(data, space, char): + if not 0 <= char < 256: + raise operationerrfmt(space.w_ValueError, + "byte must be in range(0, 256)") + for c in data: + if ord(c) == char: + return space.w_True + return space.w_False + +# ____________________________________________________________ + + def getbytevalue(space, w_value): if space.isinstance_w(w_value, space.w_str): string = space.str_w(w_value) if len(string) != 1: - raise OperationError(space.w_ValueError, space.wrap( - "string must be of size 1")) + raise operationerrfmt(space.w_ValueError, + "string must be of size 1") return string[0] value = space.getindex_w(w_value, None) if not 0 <= value < 256: # this includes the OverflowError in case the long is too large - raise OperationError(space.w_ValueError, space.wrap( - "byte must be in range(0, 256)")) + raise operationerrfmt(space.w_ValueError, + "byte must be in range(0, 256)") return chr(value) + def new_bytearray(space, w_bytearraytype, data): w_obj = space.allocate_instance(W_BytearrayObject, w_bytearraytype) W_BytearrayObject.__init__(w_obj, data) @@ -399,7 +432,7 @@ # String-like argument try: string = space.bufferstr_new_w(w_source) - except OperationError, e: + except OperationError as e: if not e.match(space, space.w_TypeError): raise else: @@ -413,7 +446,7 @@ while True: try: w_item = space.next(w_iter) - except OperationError, e: + except OperationError as e: if not e.match(space, space.w_StopIteration): raise break @@ -424,6 +457,7 @@ resizelist_hint(data, extended) return data + def _hex_digit_to_int(d): val = ord(d) if 47 < val < 58: @@ -560,12 +594,12 @@ def decode(): """B.decode(encoding=None, errors='strict') -> unicode - Decode B using the codec registered for encoding. encoding defaults - to the default encoding. errors may be given to set a different error - handling scheme. Default is 'strict' meaning that encoding errors raise - a UnicodeDecodeError. Other possible values are 'ignore' and 'replace' - as well as any other name registered with codecs.register_error that is - able to handle UnicodeDecodeErrors. + Decode B using the codec registered for encoding. encoding defaults to + the default encoding. errors may be given to set a different error + handling scheme. Default is 'strict' meaning that encoding errors + raise a UnicodeDecodeError. Other possible values are 'ignore' and + 'replace' as well as any other name registered with + codecs.register_error that is able to handle UnicodeDecodeErrors. """ def endswith(): @@ -602,7 +636,7 @@ """ def fromhex(): - """bytearray.fromhex(string) -> bytearray (static method) + r"""bytearray.fromhex(string) -> bytearray (static method) Create a bytearray object from a string of hexadecimal numbers. Spaces between two numbers are accepted. @@ -884,6 +918,8 @@ __ge__ = interp2app(W_BytearrayObject.descr_ge, doc=BytearrayDocstrings.__ge__.__doc__), + __iter__ = interp2app(W_BytearrayObject.descr_iter, + doc=BytearrayDocstrings.__iter__.__doc__), __len__ = interp2app(W_BytearrayObject.descr_len, doc=BytearrayDocstrings.__len__.__doc__), __contains__ = interp2app(W_BytearrayObject.descr_contains, @@ -1024,9 +1060,10 @@ _space_chars = ''.join([chr(c) for c in [9, 10, 11, 12, 13, 32]]) -#XXX share the code again with the stuff in listobject.py + +# XXX share the code again with the stuff in listobject.py def _delitem_slice_helper(space, items, start, step, slicelength): - if slicelength==0: + if slicelength == 0: return if step < 0: @@ -1056,6 +1093,7 @@ assert start >= 0 # annotator hint del items[start:] + def _setitem_slice_helper(space, items, start, step, slicelength, sequence2, empty_elem): assert slicelength >= 0 diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -1,19 +1,23 @@ """The builtin str implementation""" +from rpython.rlib.jit import we_are_jitted +from rpython.rlib.objectmodel import ( + compute_hash, compute_unique_id, import_from_mixin) +from rpython.rlib.rstring import StringBuilder, replace + from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.buffer import StringBuffer from pypy.interpreter.error import OperationError, operationerrfmt -from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault, interpindirect2app +from pypy.interpreter.gateway import ( + WrappedDefault, interp2app, interpindirect2app, unwrap_spec) from pypy.objspace.std import newformat from pypy.objspace.std.basestringtype import basestring_typedef from pypy.objspace.std.formatting import mod_format from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.stringmethods import StringMethods -from pypy.objspace.std.unicodeobject import (unicode_from_string, - decode_object, unicode_from_encoded_object, _get_encoding_and_errors) -from rpython.rlib.jit import we_are_jitted -from rpython.rlib.objectmodel import compute_hash, compute_unique_id, import_from_mixin -from rpython.rlib.rstring import StringBuilder, replace +from pypy.objspace.std.unicodeobject import ( + _get_encoding_and_errors, decode_object, unicode_from_encoded_object, + unicode_from_string) class W_AbstractBytesObject(W_Root): @@ -184,8 +188,8 @@ def descr_format(self, space, __args__): """S.format(*args, **kwargs) -> string - Return a formatted version of S, using substitutions from args and kwargs. - The substitutions are identified by braces ('{' and '}'). + Return a formatted version of S, using substitutions from args and + kwargs. The substitutions are identified by braces ('{' and '}'). """ def descr_index(self, space, w_sub, w_start=None, w_end=None): @@ -319,8 +323,8 @@ """S.rpartition(sep) -> (head, sep, tail) Search for the separator sep in S, starting at the end of S, and return - the part before it, the separator itself, and the part after it. If the - separator is not found, return two empty strings and S. + the part before it, the separator itself, and the part after it. If + the separator is not found, return two empty strings and S. """ @unwrap_spec(maxsplit=int) @@ -432,7 +436,7 @@ self._value = str def __repr__(self): - """ representation for debugging purposes """ + """representation for debugging purposes""" return "%s(%r)" % (self.__class__.__name__, self._value) def unwrap(self, space): @@ -521,7 +525,7 @@ return space.newlist_bytes(lst) @staticmethod - @unwrap_spec(w_object = WrappedDefault("")) + @unwrap_spec(w_object=WrappedDefault("")) def descr_new(space, w_stringtype, w_object): # NB. the default value of w_object is really a *wrapped* empty string: # there is gateway magic at work @@ -624,7 +628,8 @@ _StringMethods_descr_add = descr_add def descr_add(self, space, w_other): if space.isinstance_w(w_other, space.w_unicode): - self_as_unicode = unicode_from_encoded_object(space, self, None, None) + self_as_unicode = unicode_from_encoded_object(space, self, None, + None) return space.add(self_as_unicode, w_other) elif space.isinstance_w(w_other, space.w_bytearray): # XXX: eliminate double-copy @@ -635,7 +640,7 @@ from pypy.objspace.std.strbufobject import W_StringBufferObject try: other = self._op_val(space, w_other) - except OperationError, e: + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise @@ -648,24 +653,32 @@ _StringMethods__startswith = _startswith def _startswith(self, space, value, w_prefix, start, end): if space.isinstance_w(w_prefix, space.w_unicode): - self_as_unicode = unicode_from_encoded_object(space, self, None, None) - return self_as_unicode._startswith(space, self_as_unicode._value, w_prefix, start, end) - return self._StringMethods__startswith(space, value, w_prefix, start, end) + self_as_unicode = unicode_from_encoded_object(space, self, None, + None) + return self_as_unicode._startswith(space, self_as_unicode._value, + w_prefix, start, end) + return self._StringMethods__startswith(space, value, w_prefix, start, + end) _StringMethods__endswith = _endswith def _endswith(self, space, value, w_suffix, start, end): if space.isinstance_w(w_suffix, space.w_unicode): - self_as_unicode = unicode_from_encoded_object(space, self, None, None) - return self_as_unicode._endswith(space, self_as_unicode._value, w_suffix, start, end) - return self._StringMethods__endswith(space, value, w_suffix, start, end) + self_as_unicode = unicode_from_encoded_object(space, self, None, + None) + return self_as_unicode._endswith(space, self_as_unicode._value, + w_suffix, start, end) + return self._StringMethods__endswith(space, value, w_suffix, start, + end) _StringMethods_descr_contains = descr_contains def descr_contains(self, space, w_sub): if space.isinstance_w(w_sub, space.w_unicode): from pypy.objspace.std.unicodeobject import W_UnicodeObject assert isinstance(w_sub, W_UnicodeObject) - self_as_unicode = unicode_from_encoded_object(space, self, None, None) - return space.newbool(self_as_unicode._value.find(w_sub._value) >= 0) + self_as_unicode = unicode_from_encoded_object(space, self, None, + None) + return space.newbool( + self_as_unicode._value.find(w_sub._value) >= 0) return self._StringMethods_descr_contains(space, w_sub) _StringMethods_descr_replace = descr_replace @@ -685,16 +698,19 @@ try: res = replace(input, sub, by, count) except OverflowError: - raise OperationError(space.w_OverflowError, - space.wrap("replace string is too long")) + raise operationerrfmt(space.w_OverflowError, + "replace string is too long") return self_as_uni._new(res) return self._StringMethods_descr_replace(space, w_old, w_new, count) - def descr_lower(self, space): - return W_BytesObject(self._value.lower()) - - def descr_upper(self, space): - return W_BytesObject(self._value.upper()) + _StringMethods_descr_join = descr_join + def descr_join(self, space, w_list): + l = space.listview_bytes(w_list) + if l is not None: + if len(l) == 1: + return space.wrap(l[0]) + return space.wrap(self._val(space).join(l)) + return self._StringMethods_descr_join(space, w_list) def _join_return_one(self, space, w_obj): return (space.is_w(space.type(w_obj), space.w_str) or @@ -714,6 +730,12 @@ w_u = space.call_function(space.w_unicode, self) return space.call_method(w_u, "join", w_list) + def descr_lower(self, space): + return W_BytesObject(self._value.lower()) + + def descr_upper(self, space): + return W_BytesObject(self._value.upper()) + def descr_formatter_parser(self, space): from pypy.objspace.std.newformat import str_template_formatter tformat = str_template_formatter(space, space.str_w(self)) @@ -751,6 +773,7 @@ return W_BytesObject.EMPTY return W_BytesObject(s) + def wrapchar(space, c): if space.config.objspace.std.withprebuiltchar and not we_are_jitted(): return W_BytesObject.PREBUILT[ord(c)] @@ -830,7 +853,8 @@ __format__ = interpindirect2app(W_BytesObject.descr__format__), __mod__ = interpindirect2app(W_BytesObject.descr_mod), __buffer__ = interpindirect2app(W_AbstractBytesObject.descr_buffer), - __getnewargs__ = interpindirect2app(W_AbstractBytesObject.descr_getnewargs), + __getnewargs__ = interpindirect2app( + W_AbstractBytesObject.descr_getnewargs), _formatter_parser = interp2app(W_BytesObject.descr_formatter_parser), _formatter_field_name_split = interp2app(W_BytesObject.descr_formatter_field_name_split), @@ -865,8 +889,8 @@ buf.append_slice(s, startslice, i) startslice = i + 1 buf.append('\\x') - buf.append("0123456789abcdef"[n>>4]) - buf.append("0123456789abcdef"[n&0xF]) + buf.append("0123456789abcdef"[n >> 4]) + buf.append("0123456789abcdef"[n & 0xF]) if use_bs_char: if i != startslice: diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -162,9 +162,9 @@ return self @staticmethod - def newlist_bytes(space, list_s): + def newlist_bytes(space, list_b): strategy = space.fromcache(BytesListStrategy) - storage = strategy.erase(list_s) + storage = strategy.erase(list_b) return W_ListObject.from_storage_and_strategy(space, storage, strategy) @staticmethod diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -1,15 +1,16 @@ import weakref -from rpython.rlib import jit, objectmodel, debug + +from rpython.rlib import jit, objectmodel, debug, rerased from rpython.rlib.rarithmetic import intmask, r_uint -from rpython.rlib import rerased from pypy.interpreter.baseobjspace import W_Root -from pypy.objspace.std.dictmultiobject import W_DictMultiObject, DictStrategy, ObjectDictStrategy -from pypy.objspace.std.dictmultiobject import BaseKeyIterator, BaseValueIterator, BaseItemIterator -from pypy.objspace.std.dictmultiobject import _never_equal_to_string -from pypy.objspace.std.objectobject import W_ObjectObject +from pypy.objspace.std.dictmultiobject import ( + W_DictMultiObject, DictStrategy, ObjectDictStrategy, BaseKeyIterator, + BaseValueIterator, BaseItemIterator, _never_equal_to_string +) from pypy.objspace.std.typeobject import TypeCell + # ____________________________________________________________ # attribute shapes @@ -19,7 +20,7 @@ # we want to propagate knowledge that the result cannot be negative class AbstractAttribute(object): - _immutable_fields_ = ['terminator'] + _immutable_fields_ = ['terminator', 'ever_mutated?'] cache_attrs = None _size_estimate = 0 @@ -27,46 +28,60 @@ self.space = space assert isinstance(terminator, Terminator) self.terminator = terminator + self.ever_mutated = False def read(self, obj, selector): - index = self.index(selector) - if index < 0: + attr = self.find_map_attr(selector) + if attr is None: return self.terminator._read_terminator(obj, selector) - return obj._mapdict_read_storage(index) + if ( + jit.isconstant(attr.storageindex) and + jit.isconstant(obj) and + not attr.ever_mutated + ): + return self._pure_mapdict_read_storage(obj, attr.storageindex) + else: + return obj._mapdict_read_storage(attr.storageindex) + + @jit.elidable + def _pure_mapdict_read_storage(self, obj, storageindex): + return obj._mapdict_read_storage(storageindex) def write(self, obj, selector, w_value): - index = self.index(selector) - if index < 0: + attr = self.find_map_attr(selector) + if attr is None: return self.terminator._write_terminator(obj, selector, w_value) - obj._mapdict_write_storage(index, w_value) + if not attr.ever_mutated: + attr.ever_mutated = True + obj._mapdict_write_storage(attr.storageindex, w_value) return True def delete(self, obj, selector): return None - def index(self, selector): + def find_map_attr(self, selector): if jit.we_are_jitted(): # hack for the jit: - # the _index method is pure too, but its argument is never + # the _find_map_attr method is pure too, but its argument is never # constant, because it is always a new tuple - return self._index_jit_pure(selector[0], selector[1]) + return self._find_map_attr_jit_pure(selector[0], selector[1]) else: - return self._index_indirection(selector) + return self._find_map_attr_indirection(selector) @jit.elidable - def _index_jit_pure(self, name, index): - return self._index_indirection((name, index)) + def _find_map_attr_jit_pure(self, name, index): + return self._find_map_attr_indirection((name, index)) @jit.dont_look_inside - def _index_indirection(self, selector): + def _find_map_attr_indirection(self, selector): if (self.space.config.objspace.std.withmethodcache): - return self._index_cache(selector) - return self._index(selector) + return self._find_map_attr_cache(selector) + return self._find_map_attr(selector) @jit.dont_look_inside - def _index_cache(self, selector): + def _find_map_attr_cache(self, selector): space = self.space - cache = space.fromcache(IndexCache) + cache = space.fromcache(MapAttrCache) SHIFT2 = r_uint.BITS - space.config.objspace.std.methodcachesizeexp SHIFT1 = SHIFT2 - 5 attrs_as_int = objectmodel.current_object_addr_as_int(self) @@ -74,32 +89,32 @@ # _pure_lookup_where_with_method_cache() hash_selector = objectmodel.compute_hash(selector) product = intmask(attrs_as_int * hash_selector) - index_hash = (r_uint(product) ^ (r_uint(product) << SHIFT1)) >> SHIFT2 + attr_hash = (r_uint(product) ^ (r_uint(product) << SHIFT1)) >> SHIFT2 # ^^^Note2: same comment too - cached_attr = cache.attrs[index_hash] + cached_attr = cache.attrs[attr_hash] if cached_attr is self: - cached_selector = cache.selectors[index_hash] + cached_selector = cache.selectors[attr_hash] if cached_selector == selector: - index = cache.indices[index_hash] + attr = cache.cached_attrs[attr_hash] if space.config.objspace.std.withmethodcachecounter: name = selector[0] cache.hits[name] = cache.hits.get(name, 0) + 1 - return index - index = self._index(selector) - cache.attrs[index_hash] = self - cache.selectors[index_hash] = selector - cache.indices[index_hash] = index + return attr + attr = self._find_map_attr(selector) + cache.attrs[attr_hash] = self + cache.selectors[attr_hash] = selector + cache.cached_attrs[attr_hash] = attr if space.config.objspace.std.withmethodcachecounter: name = selector[0] cache.misses[name] = cache.misses.get(name, 0) + 1 - return index + return attr - def _index(self, selector): + def _find_map_attr(self, selector): while isinstance(self, PlainAttribute): if selector == self.selector: - return self.position + return self self = self.back - return -1 + return None def copy(self, obj): raise NotImplementedError("abstract base class") @@ -155,7 +170,7 @@ # the order is important here: first change the map, then the storage, # for the benefit of the special subclasses obj._set_mapdict_map(attr) - obj._mapdict_write_storage(attr.position, w_value) + obj._mapdict_write_storage(attr.storageindex, w_value) def materialize_r_dict(self, space, obj, dict_w): raise NotImplementedError("abstract base class") @@ -261,11 +276,11 @@ return Terminator.set_terminator(self, obj, terminator) class PlainAttribute(AbstractAttribute): - _immutable_fields_ = ['selector', 'position', 'back'] + _immutable_fields_ = ['selector', 'storageindex', 'back'] def __init__(self, selector, back): AbstractAttribute.__init__(self, back.space, back.terminator) self.selector = selector - self.position = back.length() + self.storageindex = back.length() self.back = back self._size_estimate = self.length() * NUM_DIGITS_POW2 @@ -288,7 +303,7 @@ return new_obj def length(self): - return self.position + 1 + return self.storageindex + 1 def set_terminator(self, obj, terminator): new_obj = self.back.set_terminator(obj, terminator) @@ -304,7 +319,7 @@ new_obj = self.back.materialize_r_dict(space, obj, dict_w) if self.selector[1] == DICT: w_attr = space.wrap(self.selector[0]) - dict_w[w_attr] = obj._mapdict_read_storage(self.position) + dict_w[w_attr] = obj._mapdict_read_storage(self.storageindex) else: self._copy_attr(obj, new_obj) return new_obj @@ -316,21 +331,21 @@ return new_obj def __repr__(self): - return "" % (self.selector, self.position, self.back) + return "" % (self.selector, self.storageindex, self.back) def _become(w_obj, new_obj): # this is like the _become method, really, but we cannot use that due to # RPython reasons w_obj._set_mapdict_storage_and_map(new_obj.storage, new_obj.map) -class IndexCache(object): +class MapAttrCache(object): def __init__(self, space): assert space.config.objspace.std.withmethodcache SIZE = 1 << space.config.objspace.std.methodcachesizeexp self.attrs = [None] * SIZE self._empty_selector = (None, INVALID) self.selectors = [self._empty_selector] * SIZE - self.indices = [0] * SIZE + self.cached_attrs = [None] * SIZE if space.config.objspace.std.withmethodcachecounter: self.hits = {} self.misses = {} @@ -340,6 +355,8 @@ self.attrs[i] = None for i in range(len(self.selectors)): self.selectors[i] = self._empty_selector + for i in range(len(self.cached_attrs)): + self.cached_attrs[i] = None # ____________________________________________________________ # object implementation @@ -416,16 +433,16 @@ self.typedef is W_InstanceObject.typedef) self._init_empty(w_subtype.terminator) - def getslotvalue(self, index): - key = ("slot", SLOTS_STARTING_FROM + index) + def getslotvalue(self, slotindex): + key = ("slot", SLOTS_STARTING_FROM + slotindex) return self._get_mapdict_map().read(self, key) - def setslotvalue(self, index, w_value): - key = ("slot", SLOTS_STARTING_FROM + index) + def setslotvalue(self, slotindex, w_value): + key = ("slot", SLOTS_STARTING_FROM + slotindex) self._get_mapdict_map().write(self, key, w_value) - def delslotvalue(self, index): - key = ("slot", SLOTS_STARTING_FROM + index) + def delslotvalue(self, slotindex): + key = ("slot", SLOTS_STARTING_FROM + slotindex) new_obj = self._get_mapdict_map().delete(self, key) if new_obj is None: return False @@ -460,11 +477,13 @@ self.map = map self.storage = make_sure_not_resized([None] * map.size_estimate()) - def _mapdict_read_storage(self, index): - assert index >= 0 - return self.storage[index] - def _mapdict_write_storage(self, index, value): - self.storage[index] = value + def _mapdict_read_storage(self, storageindex): + assert storageindex >= 0 + return self.storage[storageindex] + + def _mapdict_write_storage(self, storageindex, value): + self.storage[storageindex] = value + def _mapdict_storage_length(self): return len(self.storage) def _set_mapdict_storage_and_map(self, storage, map): @@ -519,7 +538,6 @@ rangenmin1 = unroll.unrolling_iterable(range(nmin1)) class subcls(BaseMapdictObject, supercls): def _init_empty(self, map): - from rpython.rlib.debug import make_sure_not_resized for i in rangen: setattr(self, "_value%s" % i, erase_item(None)) self.map = map @@ -531,26 +549,26 @@ erased = getattr(self, "_value%s" % nmin1) return unerase_list(erased) - def _mapdict_read_storage(self, index): - assert index >= 0 - if index < nmin1: + def _mapdict_read_storage(self, storageindex): + assert storageindex >= 0 + if storageindex < nmin1: for i in rangenmin1: - if index == i: + if storageindex == i: erased = getattr(self, "_value%s" % i) return unerase_item(erased) if self._has_storage_list(): - return self._mapdict_get_storage_list()[index - nmin1] + return self._mapdict_get_storage_list()[storageindex - nmin1] erased = getattr(self, "_value%s" % nmin1) return unerase_item(erased) - def _mapdict_write_storage(self, index, value): + def _mapdict_write_storage(self, storageindex, value): erased = erase_item(value) for i in rangenmin1: - if index == i: + if storageindex == i: setattr(self, "_value%s" % i, erased) return if self._has_storage_list(): - self._mapdict_get_storage_list()[index - nmin1] = value + self._mapdict_get_storage_list()[storageindex - nmin1] = value return setattr(self, "_value%s" % nmin1, erased) @@ -785,7 +803,7 @@ class CacheEntry(object): version_tag = None - index = 0 + storageindex = 0 w_method = None # for callmethod success_counter = 0 failure_counter = 0 @@ -818,14 +836,14 @@ pycode._mapdict_caches = [INVALID_CACHE_ENTRY] * num_entries @jit.dont_look_inside -def _fill_cache(pycode, nameindex, map, version_tag, index, w_method=None): +def _fill_cache(pycode, nameindex, map, version_tag, storageindex, w_method=None): entry = pycode._mapdict_caches[nameindex] if entry is INVALID_CACHE_ENTRY: entry = CacheEntry() pycode._mapdict_caches[nameindex] = entry entry.map_wref = weakref.ref(map) entry.version_tag = version_tag - entry.index = index + entry.storageindex = storageindex entry.w_method = w_method if pycode.space.config.objspace.std.withmethodcachecounter: entry.failure_counter += 1 @@ -837,7 +855,7 @@ map = w_obj._get_mapdict_map() if entry.is_valid_for_map(map) and entry.w_method is None: # everything matches, it's incredibly fast - return w_obj._mapdict_read_storage(entry.index) + return w_obj._mapdict_read_storage(entry.storageindex) return LOAD_ATTR_slowpath(pycode, w_obj, nameindex, map) LOAD_ATTR_caching._always_inline_ = True @@ -871,19 +889,19 @@ selector = ("slot", SLOTS_STARTING_FROM + w_descr.index) else: # There is a non-data descriptor in the class. If there is - # also a dict attribute, use the latter, caching its position. + # also a dict attribute, use the latter, caching its storageindex. # If not, we loose. We could do better in this case too, # but we don't care too much; the common case of a method # invocation is handled by LOOKUP_METHOD_xxx below. selector = (name, DICT) # if selector[1] != INVALID: - index = map.index(selector) - if index >= 0: + attr = map.find_map_attr(selector) + if attr is not None: # Note that if map.terminator is a DevolvedDictTerminator, - # map.index() will always return -1 if selector[1]==DICT. - _fill_cache(pycode, nameindex, map, version_tag, index) - return w_obj._mapdict_read_storage(index) + # map.find_map_attr will always return None if selector[1]==DICT. + _fill_cache(pycode, nameindex, map, version_tag, attr.storageindex) + return w_obj._mapdict_read_storage(attr.storageindex) if space.config.objspace.std.withmethodcachecounter: INVALID_CACHE_ENTRY.failure_counter += 1 return space.getattr(w_obj, w_name) diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -1,18 +1,22 @@ -from pypy.interpreter.error import OperationError, operationerrfmt -from pypy.interpreter.gateway import unwrap_spec, WrappedDefault -from pypy.objspace.std import slicetype -from pypy.objspace.std.sliceobject import W_SliceObject, normalize_simple_slice +"""Functionality shared between bytes/bytearray/unicode""" + from rpython.rlib import jit from rpython.rlib.objectmodel import specialize from rpython.rlib.rarithmetic import ovfcheck -from rpython.rlib.rstring import split, rsplit, replace, startswith, endswith +from rpython.rlib.rstring import endswith, replace, rsplit, split, startswith + +from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.gateway import WrappedDefault, unwrap_spec +from pypy.objspace.std import slicetype +from pypy.objspace.std.sliceobject import W_SliceObject, normalize_simple_slice class StringMethods(object): def _sliced(self, space, s, start, stop, orig_obj): assert start >= 0 assert stop >= 0 - #if start == 0 and stop == len(s) and space.is_w(space.type(orig_obj), space.w_str): + #if start == 0 and stop == len(s) and space.is_w(space.type(orig_obj), + # space.w_str): # return orig_obj return self._new(s[start:stop]) @@ -21,7 +25,7 @@ value = self._val(space) lenself = len(value) start, end = slicetype.unwrap_start_stop( - space, lenself, w_start, w_end, upper_bound=upper_bound) + space, lenself, w_start, w_end, upper_bound=upper_bound) return (value, start, end) def descr_len(self, space): @@ -31,17 +35,14 @@ # pass def descr_contains(self, space, w_sub): - from pypy.objspace.std.bytearrayobject import W_BytearrayObject - if (isinstance(self, W_BytearrayObject) and - space.isinstance_w(w_sub, space.w_int)): - char = space.int_w(w_sub) - return _descr_contains_bytearray(self.data, space, char) - return space.newbool(self._val(space).find(self._op_val(space, w_sub)) >= 0) + value = self._val(space) + other = self._op_val(space, w_sub) + return space.newbool(value.find(other) >= 0) def descr_add(self, space, w_other): try: other = self._op_val(space, w_other) - except OperationError, e: + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise @@ -50,7 +51,7 @@ def descr_mul(self, space, w_times): try: times = space.getindex_w(w_times, space.w_OverflowError) - except OperationError, e: + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise @@ -82,12 +83,11 @@ if index < 0: index += selflen if index < 0 or index >= selflen: - raise OperationError(space.w_IndexError, - space.wrap("string index out of range")) + raise operationerrfmt(space.w_IndexError, + "string index out of range") from pypy.objspace.std.bytearrayobject import W_BytearrayObject if isinstance(self, W_BytearrayObject): return space.wrap(ord(selfvalue[index])) - #return wrapchar(space, selfvalue[index]) return self._new(selfvalue[index]) def descr_getslice(self, space, w_start, w_stop): @@ -115,35 +115,39 @@ value = self._val(space) fillchar = self._op_val(space, w_fillchar) if len(fillchar) != 1: - raise OperationError(space.w_TypeError, - space.wrap("center() argument 2 must be a single character")) + raise operationerrfmt(space.w_TypeError, + "center() argument 2 must be a single " + "character") d = width - len(value) - if d>0: + if d > 0: offset = d//2 + (d & width & 1) fillchar = fillchar[0] # annotator hint: it's a single character - u_centered = offset * fillchar + value + (d - offset) * fillchar + centered = offset * fillchar + value + (d - offset) * fillchar else: - u_centered = value + centered = value - return self._new(u_centered) + return self._new(centered) def descr_count(self, space, w_sub, w_start=None, w_end=None): value, start, end = self._convert_idx_params(space, w_start, w_end) - return space.newint(value.count(self._op_val(space, w_sub), start, end)) + return space.newint(value.count(self._op_val(space, w_sub), start, + end)) def descr_decode(self, space, w_encoding=None, w_errors=None): - from pypy.objspace.std.unicodeobject import _get_encoding_and_errors, \ - unicode_from_string, decode_object - encoding, errors = _get_encoding_and_errors(space, w_encoding, w_errors) + from pypy.objspace.std.unicodeobject import ( + _get_encoding_and_errors, decode_object, unicode_from_string) + encoding, errors = _get_encoding_and_errors(space, w_encoding, + w_errors) if encoding is None and errors is None: return unicode_from_string(space, self) return decode_object(space, self, encoding, errors) def descr_encode(self, space, w_encoding=None, w_errors=None): - from pypy.objspace.std.unicodeobject import _get_encoding_and_errors, \ - encode_object - encoding, errors = _get_encoding_and_errors(space, w_encoding, w_errors) + from pypy.objspace.std.unicodeobject import ( + _get_encoding_and_errors, encode_object) + encoding, errors = _get_encoding_and_errors(space, w_encoding, + w_errors) return encode_object(space, self, encoding, errors) @unwrap_spec(tabsize=int) @@ -156,18 +160,19 @@ try: ovfcheck(len(splitted) * tabsize) except OverflowError: - raise OperationError(space.w_OverflowError, - space.wrap("new string is too long")) + raise operationerrfmt(space.w_OverflowError, + "new string is too long") expanded = oldtoken = splitted.pop(0) for token in splitted: - expanded += self._chr(' ') * self._tabindent(oldtoken, tabsize) + token + expanded += self._chr(' ') * self._tabindent(oldtoken, + tabsize) + token oldtoken = token return self._new(expanded) def _tabindent(self, token, tabsize): - "calculates distance behind the token to the next tabstop" + """calculates distance behind the token to the next tabstop""" distance = tabsize if token: @@ -203,8 +208,8 @@ (value, start, end) = self._convert_idx_params(space, w_start, w_end) res = value.find(self._op_val(space, w_sub), start, end) if res < 0: - raise OperationError(space.w_ValueError, - space.wrap("substring not found in string.index")) + raise operationerrfmt(space.w_ValueError, + "substring not found in string.index") return space.wrap(res) @@ -212,8 +217,8 @@ (value, start, end) = self._convert_idx_params(space, w_start, w_end) res = value.rfind(self._op_val(space, w_sub), start, end) if res < 0: - raise OperationError(space.w_ValueError, - space.wrap("substring not found in string.rindex")) + raise operationerrfmt(space.w_ValueError, + "substring not found in string.rindex") return space.wrap(res) @@ -307,22 +312,6 @@ return space.newbool(cased) def descr_join(self, space, w_list): - from pypy.objspace.std.bytesobject import W_BytesObject - from pypy.objspace.std.unicodeobject import W_UnicodeObject - - if isinstance(self, W_BytesObject): - l = space.listview_bytes(w_list) - if l is not None: - if len(l) == 1: - return space.wrap(l[0]) - return space.wrap(self._val(space).join(l)) - elif isinstance(self, W_UnicodeObject): - l = space.listview_unicode(w_list) - if l is not None: - if len(l) == 1: - return space.wrap(l[0]) - return space.wrap(self._val(space).join(l)) - list_w = space.listview(w_list) size = len(list_w) @@ -349,8 +338,7 @@ if check_item == 1: raise operationerrfmt( space.w_TypeError, - "sequence item %d: expected string, %s " - "found", i, space.type(w_s).getname(space)) + "sequence item %d: expected string, %T found", i, w_s) elif check_item == 2: return self._join_autoconvert(space, list_w) prealloc_size += len(self._op_val(space, w_s)) @@ -370,9 +358,9 @@ value = self._val(space) fillchar = self._op_val(space, w_fillchar) if len(fillchar) != 1: - raise OperationError(space.w_TypeError, - space.wrap("ljust() argument 2 must be a single character")) - + raise operationerrfmt(space.w_TypeError, + "ljust() argument 2 must be a single " + "character") d = width - len(value) if d > 0: fillchar = fillchar[0] # annotator hint: it's a single character @@ -385,9 +373,9 @@ value = self._val(space) fillchar = self._op_val(space, w_fillchar) if len(fillchar) != 1: - raise OperationError(space.w_TypeError, - space.wrap("rjust() argument 2 must be a single character")) - + raise operationerrfmt(space.w_TypeError, + "rjust() argument 2 must be a single " + "character") d = width - len(value) if d > 0: fillchar = fillchar[0] # annotator hint: it's a single character @@ -406,8 +394,7 @@ value = self._val(space) sub = self._op_val(space, w_sub) if not sub: - raise OperationError(space.w_ValueError, - space.wrap("empty separator")) + raise operationerrfmt(space.w_ValueError, "empty separator") pos = value.find(sub) if pos == -1: from pypy.objspace.std.bytearrayobject import W_BytearrayObject @@ -426,8 +413,7 @@ value = self._val(space) sub = self._op_val(space, w_sub) if not sub: - raise OperationError(space.w_ValueError, - space.wrap("empty separator")) + raise operationerrfmt(space.w_ValueError, "empty separator") pos = value.rfind(sub) if pos == -1: from pypy.objspace.std.bytearrayobject import W_BytearrayObject @@ -450,8 +436,8 @@ try: res = replace(input, sub, by, count) except OverflowError: - raise OperationError(space.w_OverflowError, - space.wrap("replace string is too long")) + raise operationerrfmt(space.w_OverflowError, + "replace string is too long") return self._new(res) @unwrap_spec(maxsplit=int) @@ -466,7 +452,7 @@ by = self._op_val(space, w_sep) bylen = len(by) if bylen == 0: - raise OperationError(space.w_ValueError, space.wrap("empty separator")) + raise operationerrfmt(space.w_ValueError, "empty separator") res = split(value, by, maxsplit) return self._newlist_unwrapped(space, res) @@ -481,7 +467,7 @@ by = self._op_val(space, w_sep) bylen = len(by) if bylen == 0: - raise OperationError(space.w_ValueError, space.wrap("empty separator")) + raise operationerrfmt(space.w_ValueError, "empty separator") res = rsplit(value, by, maxsplit) return self._newlist_unwrapped(space, res) @@ -515,21 +501,22 @@ if self._startswith(space, value, w_prefix, start, end): return space.w_True return space.w_False - return space.newbool(self._startswith(space, value, w_prefix, start, end)) + return space.newbool(self._startswith(space, value, w_prefix, start, + end)) def _startswith(self, space, value, w_prefix, start, end): return startswith(value, self._op_val(space, w_prefix), start, end) def descr_endswith(self, space, w_suffix, w_start=None, w_end=None): - (value, start, end) = self._convert_idx_params(space, w_start, - w_end, True) - + (value, start, end) = self._convert_idx_params(space, w_start, w_end, + True) if space.isinstance_w(w_suffix, space.w_tuple): for w_suffix in space.fixedview(w_suffix): if self._endswith(space, value, w_suffix, start, end): return space.w_True return space.w_False - return space.newbool(self._endswith(space, value, w_suffix, start, end)) + return space.newbool(self._endswith(space, value, w_suffix, start, + end)) def _endswith(self, space, value, w_prefix, start, end): return endswith(value, self._op_val(space, w_prefix), start, end) @@ -537,18 +524,17 @@ def _strip(self, space, w_chars, left, right): "internal function called by str_xstrip methods" value = self._val(space) - u_chars = self._op_val(space, w_chars) + chars = self._op_val(space, w_chars) lpos = 0 rpos = len(value) if left: - #print "while %d < %d and -%s- in -%s-:"%(lpos, rpos, value[lpos],w_chars) - while lpos < rpos and value[lpos] in u_chars: + while lpos < rpos and value[lpos] in chars: lpos += 1 if right: - while rpos > lpos and value[rpos - 1] in u_chars: + while rpos > lpos and value[rpos - 1] in chars: rpos -= 1 assert rpos >= lpos # annotator hint, don't remove @@ -562,13 +548,12 @@ rpos = len(value) if left: - #print "while %d < %d and -%s- in -%s-:"%(lpos, rpos, value[lpos],w_chars) while lpos < rpos and self._isspace(value[lpos]): - lpos += 1 + lpos += 1 if right: while rpos > lpos and self._isspace(value[rpos - 1]): - rpos -= 1 + rpos -= 1 assert rpos >= lpos # annotator hint, don't remove return self._sliced(space, value, lpos, rpos, self) @@ -629,9 +614,9 @@ else: table = self._op_val(space, w_table) if len(table) != 256: - raise OperationError( + raise operationerrfmt( space.w_ValueError, - space.wrap("translation table must be 256 characters long")) + "translation table must be 256 characters long") string = self._val(space) deletechars = self._op_val(space, w_deletechars) @@ -683,15 +668,6 @@ # ____________________________________________________________ # helpers for slow paths, moved out because they contain loops -def _descr_contains_bytearray(data, space, char): - if not 0 <= char < 256: - raise OperationError(space.w_ValueError, - space.wrap("byte must be in range(0, 256)")) - for c in data: - if ord(c) == char: - return space.w_True - return space.w_False - @specialize.argtype(0) def _descr_getslice_slowpath(selfvalue, start, step, sl): return [selfvalue[start + i*step] for i in range(sl)] diff --git a/pypy/objspace/std/test/test_bytearrayobject.py b/pypy/objspace/std/test/test_bytearrayobject.py --- a/pypy/objspace/std/test/test_bytearrayobject.py +++ b/pypy/objspace/std/test/test_bytearrayobject.py @@ -134,6 +134,7 @@ def test_iter(self): assert list(bytearray('hello')) == [104, 101, 108, 108, 111] + assert list(bytearray('hello').__iter__()) == [104, 101, 108, 108, 111] def test_compare(self): assert bytearray('hello') == bytearray('hello') diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -64,7 +64,7 @@ current = Terminator(space, "cls") for i in range(20000): current = PlainAttribute((str(i), DICT), current) - assert current.index(("0", DICT)) == 0 + assert current.find_map_attr(("0", DICT)).storageindex == 0 def test_search(): @@ -107,6 +107,45 @@ assert obj2.getdictvalue(space, "b") == 60 assert obj2.map is obj.map +def test_attr_immutability(monkeypatch): + cls = Class() + obj = cls.instantiate() + obj.setdictvalue(space, "a", 10) + obj.setdictvalue(space, "b", 20) + obj.setdictvalue(space, "b", 30) + assert obj.storage == [10, 30] + assert obj.map.ever_mutated == True + assert obj.map.back.ever_mutated == False + + indices = [] + + def _pure_mapdict_read_storage(obj, storageindex): + assert storageindex == 0 + indices.append(storageindex) + return obj._mapdict_read_storage(storageindex) + + obj.map._pure_mapdict_read_storage = _pure_mapdict_read_storage + monkeypatch.setattr(jit, "isconstant", lambda c: True) + + assert obj.getdictvalue(space, "a") == 10 + assert obj.getdictvalue(space, "b") == 30 + assert obj.getdictvalue(space, "a") == 10 + assert indices == [0, 0] + + obj2 = cls.instantiate() + obj2.setdictvalue(space, "a", 15) + obj2.setdictvalue(space, "b", 25) + assert obj2.map is obj.map + assert obj2.map.ever_mutated == True + assert obj2.map.back.ever_mutated == False + + # mutating obj2 changes the map + obj2.setdictvalue(space, "a", 50) + assert obj2.map.back.ever_mutated == True + assert obj2.map is obj.map + + + def test_delete(): for i, dattr in enumerate(["a", "b", "c"]): c = Class() @@ -231,7 +270,6 @@ obj = cls.instantiate() a = 0 b = 1 - c = 2 obj.setslotvalue(a, 50) obj.setslotvalue(b, 60) assert obj.getslotvalue(a) == 50 @@ -648,7 +686,7 @@ def test_delete_slot(self): class A(object): __slots__ = ['x'] - + a = A() a.x = 42 del a.x diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -1,19 +1,22 @@ """The builtin unicode implementation""" +from rpython.rlib.objectmodel import ( + compute_hash, compute_unique_id, import_from_mixin) +from rpython.rlib.rstring import UnicodeBuilder +from rpython.rlib.runicode import ( + make_unicode_escape_function, str_decode_ascii, str_decode_utf_8, + unicode_encode_ascii, unicode_encode_utf_8) + from pypy.interpreter import unicodehelper from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, operationerrfmt -from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault +from pypy.interpreter.gateway import WrappedDefault, interp2app, unwrap_spec from pypy.module.unicodedata import unicodedb from pypy.objspace.std import newformat from pypy.objspace.std.basestringtype import basestring_typedef from pypy.objspace.std.formatting import mod_format from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.stringmethods import StringMethods -from rpython.rlib.objectmodel import compute_hash, compute_unique_id, import_from_mixin -from rpython.rlib.rstring import UnicodeBuilder -from rpython.rlib.runicode import (str_decode_utf_8, str_decode_ascii, - unicode_encode_utf_8, unicode_encode_ascii, make_unicode_escape_function) __all__ = ['W_UnicodeObject', 'wrapunicode', 'plain_str2unicode', 'encode_object', 'decode_object', 'unicode_from_object', @@ -29,7 +32,7 @@ w_self._value = unistr def __repr__(w_self): - """ representation for debugging purposes """ + """representation for debugging purposes""" return "%s(%r)" % (w_self.__class__.__name__, w_self._value) def unwrap(w_self, space): @@ -90,7 +93,8 @@ return w_other._value if space.isinstance_w(w_other, space.w_str): return unicode_from_string(space, w_other)._value - return unicode_from_encoded_object(space, w_other, None, "strict")._value + return unicode_from_encoded_object( + space, w_other, None, "strict")._value def _chr(self, char): assert len(char) == 1 @@ -144,14 +148,15 @@ return space.newlist_unicode(lst) @staticmethod - @unwrap_spec(w_string = WrappedDefault("")) + @unwrap_spec(w_string=WrappedDefault("")) def descr_new(space, w_unicodetype, w_string, w_encoding=None, w_errors=None): # NB. the default value of w_obj is really a *wrapped* empty string: # there is gateway magic at work w_obj = w_string - encoding, errors = _get_encoding_and_errors(space, w_encoding, w_errors) + encoding, errors = _get_encoding_and_errors(space, w_encoding, + w_errors) # convoluted logic for the case when unicode subclass has a __unicode__ # method, we need to call this method is_precisely_unicode = space.is_w(space.type(w_obj), space.w_unicode) @@ -159,8 +164,8 @@ (space.isinstance_w(w_obj, space.w_unicode) and space.findattr(w_obj, space.wrap('__unicode__')) is None)): if encoding is not None or errors is not None: - raise OperationError(space.w_TypeError, space.wrap( - 'decoding Unicode is not supported')) + raise operationerrfmt(space.w_TypeError, + "decoding Unicode is not supported") if (is_precisely_unicode and space.is_w(w_unicodetype, space.w_unicode)): return w_obj @@ -194,8 +199,8 @@ def descr_eq(self, space, w_other): try: - return space.newbool(self._val(space) == self._op_val(space, w_other)) - except OperationError, e: + res = self._val(space) == self._op_val(space, w_other) + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented if (e.match(space, space.w_UnicodeDecodeError) or @@ -206,11 +211,12 @@ space.warn(space.wrap(msg), space.w_UnicodeWarning) return space.w_False raise + return space.newbool(res) def descr_ne(self, space, w_other): try: - return space.newbool(self._val(space) != self._op_val(space, w_other)) - except OperationError, e: + res = self._val(space) != self._op_val(space, w_other) + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented if (e.match(space, space.w_UnicodeDecodeError) or @@ -221,38 +227,43 @@ space.warn(space.wrap(msg), space.w_UnicodeWarning) return space.w_True raise + return space.newbool(res) def descr_lt(self, space, w_other): try: - return space.newbool(self._val(space) < self._op_val(space, w_other)) - except OperationError, e: + res = self._val(space) < self._op_val(space, w_other) + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise + return space.newbool(res) def descr_le(self, space, w_other): try: - return space.newbool(self._val(space) <= self._op_val(space, w_other)) - except OperationError, e: + res = self._val(space) <= self._op_val(space, w_other) + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise + return space.newbool(res) def descr_gt(self, space, w_other): try: - return space.newbool(self._val(space) > self._op_val(space, w_other)) - except OperationError, e: + res = self._val(space) > self._op_val(space, w_other) + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise + return space.newbool(res) def descr_ge(self, space, w_other): try: - return space.newbool(self._val(space) >= self._op_val(space, w_other)) - except OperationError, e: + res = self._val(space) >= self._op_val(space, w_other) + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise + return space.newbool(res) def descr_format(self, space, __args__): return newformat.format_method(space, self, __args__, is_unicode=True) @@ -272,12 +283,13 @@ def descr_translate(self, space, w_table): selfvalue = self._value w_sys = space.getbuiltinmodule('sys') - maxunicode = space.int_w(space.getattr(w_sys, space.wrap("maxunicode"))) + maxunicode = space.int_w(space.getattr(w_sys, + space.wrap("maxunicode"))) result = [] for unichar in selfvalue: try: w_newval = space.getitem(w_table, space.wrap(ord(unichar))) - except OperationError, e: + except OperationError as e: if e.match(space, space.w_LookupError): result.append(unichar) else: @@ -288,22 +300,32 @@ elif space.isinstance_w(w_newval, space.w_int): newval = space.int_w(w_newval) if newval < 0 or newval > maxunicode: - raise OperationError( - space.w_TypeError, - space.wrap("character mapping must be in range(0x%x)" % (maxunicode + 1,))) + raise operationerrfmt(space.w_TypeError, + "character mapping must be in " + "range(%s)", hex(maxunicode + 1)) result.append(unichr(newval)) elif space.isinstance_w(w_newval, space.w_unicode): result.append(space.unicode_w(w_newval)) else: - raise OperationError( - space.w_TypeError, - space.wrap("character mapping must return integer, None or unicode")) + raise operationerrfmt(space.w_TypeError, + "character mapping must return " + "integer, None or unicode") return W_UnicodeObject(u''.join(result)) def descr_encode(self, space, w_encoding=None, w_errors=None): - encoding, errors = _get_encoding_and_errors(space, w_encoding, w_errors) + encoding, errors = _get_encoding_and_errors(space, w_encoding, + w_errors) return encode_object(space, self, encoding, errors) + _StringMethods_descr_join = descr_join + def descr_join(self, space, w_list): + l = space.listview_unicode(w_list) + if l is not None: + if len(l) == 1: + return space.wrap(l[0]) + return space.wrap(self._val(space).join(l)) + return self._StringMethods_descr_join(space, w_list) + def _join_return_one(self, space, w_obj): return space.is_w(space.type(w_obj), space.w_unicode) @@ -353,6 +375,7 @@ def wrapunicode(space, uni): return W_UnicodeObject(uni) + def plain_str2unicode(space, s): try: return unicode(s) @@ -378,17 +401,13 @@ def getdefaultencoding(space): return space.sys.defaultencoding + def _get_encoding_and_errors(space, w_encoding, w_errors): - if space.is_none(w_encoding): - encoding = None - else: - encoding = space.str_w(w_encoding) - if space.is_none(w_errors): From noreply at buildbot.pypy.org Sat Jan 25 20:47:13 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sat, 25 Jan 2014 20:47:13 +0100 (CET) Subject: [pypy-commit] pypy llvm-translation-backend: Set default translation backend to llvm to make the buildbot translate with it. Message-ID: <20140125194713.1561D1C00F8@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r68946:3cd27ae9f524 Date: 2014-01-25 20:46 +0100 http://bitbucket.org/pypy/pypy/changeset/3cd27ae9f524/ Log: Set default translation backend to llvm to make the buildbot translate with it. diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -39,7 +39,7 @@ ChoiceOption("type_system", "Type system to use when RTyping", ["lltype"], cmdline=None, default="lltype"), ChoiceOption("backend", "Backend to use for code generation", - ["c", "llvm"], default="c", + ["c", "llvm"], default="llvm", requires={ "c": [("translation.type_system", "lltype")], "llvm": [("translation.type_system", "lltype"), From noreply at buildbot.pypy.org Sat Jan 25 20:58:14 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sat, 25 Jan 2014 20:58:14 +0100 (CET) Subject: [pypy-commit] pypy llvm-translation-backend: Remove some tab character probably copied from somewhere by accident. Message-ID: <20140125195814.F1A241C00F8@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r68947:3e2e1ba9f97c Date: 2014-01-25 20:57 +0100 http://bitbucket.org/pypy/pypy/changeset/3e2e1ba9f97c/ Log: Remove some tab character probably copied from somewhere by accident. diff --git a/rpython/translator/llvm/genllvm.py b/rpython/translator/llvm/genllvm.py --- a/rpython/translator/llvm/genllvm.py +++ b/rpython/translator/llvm/genllvm.py @@ -1644,7 +1644,7 @@ if callable(entrypoint): setup_ptr = self.gcpolicy.get_setup_ptr() def main(argc, argv): - llop.gc_stack_bottom(lltype.Void) + llop.gc_stack_bottom(lltype.Void) try: if setup_ptr is not None: setup_ptr() From noreply at buildbot.pypy.org Sat Jan 25 23:17:11 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sat, 25 Jan 2014 23:17:11 +0100 (CET) Subject: [pypy-commit] pypy llvm-translation-backend: Add llvm_* operations to llinterp. Message-ID: <20140125221711.B3F111C00F8@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r68948:942d2a6f3ece Date: 2014-01-25 23:16 +0100 http://bitbucket.org/pypy/pypy/changeset/942d2a6f3ece/ Log: Add llvm_* operations to llinterp. diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -919,6 +919,18 @@ def op_stack_current(self): return 0 + def op_llvm_gcmap(self): + raise NotImplementedError("llvm_gcmap") + + def op_llvm_store_gcroot(self): + raise NotImplementedError("llvm_store_gcroot") + + def op_llvm_load_gcroot(self): + raise NotImplementedError("llvm_load_gcroot") + + def op_llvm_stack_malloc(self): + raise NotImplementedError("llvm_stack_malloc") + # __________________________________________________________ # operations on addresses From noreply at buildbot.pypy.org Sun Jan 26 01:00:40 2014 From: noreply at buildbot.pypy.org (rguillebert) Date: Sun, 26 Jan 2014 01:00:40 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Add my FOSDEM talk Message-ID: <20140126000040.1D6D71C01F2@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: extradoc Changeset: r5141:1b90c0f4886c Date: 2014-01-26 01:00 +0100 http://bitbucket.org/pypy/extradoc/changeset/1b90c0f4886c/ Log: Add my FOSDEM talk diff --git a/talk/fosdem2014/pypy-jit/Makefile b/talk/fosdem2014/pypy-jit/Makefile new file mode 100644 --- /dev/null +++ b/talk/fosdem2014/pypy-jit/Makefile @@ -0,0 +1,16 @@ +# you can find rst2beamer.py and inkscapeslide.py here: +# http://bitbucket.org/antocuni/env/src/619f486c4fad/bin/rst2beamer.py +# http://bitbucket.org/antocuni/env/src/619f486c4fad/bin/inkscapeslide.py + + +talk.pdf: talk.rst author.latex stylesheet.latex + rst2beamer.py --input-encoding=utf8 --output-encoding=utf8 --stylesheet=stylesheet.latex --documentoptions=14pt talk.rst talk.latex || exit + sed 's/\\date{}/\\input{author.latex}/' -i talk.latex || exit + #sed 's/\\maketitle/\\input{title.latex}/' -i talk.latex || exit + pdflatex talk.latex || exit + +view: talk.pdf + evince talk.pdf > /dev/null 2>&1 & + +xpdf: talk.pdf + xpdf talk.pdf & diff --git a/talk/fosdem2014/pypy-jit/Speed.png b/talk/fosdem2014/pypy-jit/Speed.png new file mode 100644 index 0000000000000000000000000000000000000000..796a1ed2ef8f48d701a54242e78694ac16a70762 GIT binary patch [cut] diff --git a/talk/fosdem2014/pypy-jit/author.latex b/talk/fosdem2014/pypy-jit/author.latex new file mode 100644 --- /dev/null +++ b/talk/fosdem2014/pypy-jit/author.latex @@ -0,0 +1,8 @@ +\definecolor{rrblitbackground}{rgb}{0.0, 0.0, 0.0} + +\title[How PyPy makes your code run fast]{How PyPy makes your code run fast} +\author[rguillebert] +{Romain Guillebert} + +\institute{FOSDEM} +\date{February 2nd, 2014} diff --git a/talk/fosdem2014/pypy-jit/beamerdefs.txt b/talk/fosdem2014/pypy-jit/beamerdefs.txt new file mode 100644 --- /dev/null +++ b/talk/fosdem2014/pypy-jit/beamerdefs.txt @@ -0,0 +1,108 @@ +.. colors +.. =========================== + +.. role:: green +.. role:: red + + +.. general useful commands +.. =========================== + +.. |pause| raw:: latex + + \pause + +.. |small| raw:: latex + + {\small + +.. |end_small| raw:: latex + + } + +.. |scriptsize| raw:: latex + + {\scriptsize + +.. |end_scriptsize| raw:: latex + + } + +.. |strike<| raw:: latex + + \sout{ + +.. closed bracket +.. =========================== + +.. |>| raw:: latex + + } + + +.. example block +.. =========================== + +.. |example<| raw:: latex + + \begin{exampleblock}{ + + +.. |end_example| raw:: latex + + \end{exampleblock} + + + +.. alert block +.. =========================== + +.. |alert<| raw:: latex + + \begin{alertblock}{ + + +.. |end_alert| raw:: latex + + \end{alertblock} + + + +.. columns +.. =========================== + +.. |column1| raw:: latex + + \begin{columns} + \begin{column}{0.45\textwidth} + +.. |column2| raw:: latex + + \end{column} + \begin{column}{0.45\textwidth} + + +.. |end_columns| raw:: latex + + \end{column} + \end{columns} + + + +.. |snake| image:: ../../img/py-web-new.png + :scale: 15% + + + +.. nested blocks +.. =========================== + +.. |nested| raw:: latex + + \begin{columns} + \begin{column}{0.85\textwidth} + +.. |end_nested| raw:: latex + + \end{column} + \end{columns} diff --git a/talk/fosdem2014/pypy-jit/rst2beamer.py b/talk/fosdem2014/pypy-jit/rst2beamer.py new file mode 100755 --- /dev/null +++ b/talk/fosdem2014/pypy-jit/rst2beamer.py @@ -0,0 +1,267 @@ +#!/usr/bin/env python +# encoding: utf-8 +""" +A docutils script converting restructured text into Beamer-flavoured LaTeX. + +Beamer is a LaTeX document class for presentations. Via this script, ReST can +be used to prepare slides. It can be called:: + + rst2beamer.py infile.txt > outfile.tex + +where ``infile.tex`` contains the produced Beamer LaTeX. + +See for more details. + +""" +# TODO: modifications for handout sections? +# TOOD: sections and subsections? +# TODO: enable beamer themes? +# TODO: convert document metadata to front page fields? +# TODO: toc-conversion? +# TODO: fix descriptions + +# Unless otherwise stated, created by P-M Agapow on 2007-08-21 +# and open for academic & non-commercial use and modification . + +__docformat__ = 'restructuredtext en' +__author__ = "Paul-Michael Agapow " +__version__ = "0.2" + + +### IMPORTS ### + +import locale +from docutils.core import publish_cmdline, default_description +from docutils.writers.latex2e import Writer as Latex2eWriter +from docutils.writers.latex2e import LaTeXTranslator, DocumentClass +from docutils import nodes + +## Syntax highlighting: + +""" + .. sourcecode:: python + + My code goes here. + + + :copyright: 2007 by Georg Brandl. + :license: BSD, see LICENSE for more details. +""" + +from pygments.formatters import HtmlFormatter, LatexFormatter + +# The default formatter +DEFAULT = LatexFormatter() + + +from docutils.parsers.rst import directives + +from pygments import highlight +from pygments.lexers import get_lexer_by_name, TextLexer + +VARIANTS = { + 'linenos': LatexFormatter(linenos=True), +} + +def pygments_directive(name, arguments, options, content, lineno, + content_offset, block_text, state, state_machine): + try: + lexer = get_lexer_by_name(arguments[0]) + except ValueError: + # no lexer found - use the text one instead of an exception + lexer = TextLexer() + formatter = DEFAULT + parsed = highlight(u'\n'.join(content), lexer, formatter) + return [nodes.raw('', parsed, format='latex')] + +pygments_directive.arguments = (1, 0, 1) +pygments_directive.content = 1 +pygments_directive.options = dict([(key, directives.flag) for key in VARIANTS]) + +directives.register_directive('sourcecode', pygments_directive) + + +## multiple images as a single animation + +""" + .. animage:: foo-p*.pdf + :align: center + :scale: 50% +""" + +from glob import glob +import copy +from docutils.parsers.rst import directives +from docutils.parsers.rst.directives.images import Image +import docutils + +class Animage(Image): # Animated Image :-) + + def run(self): + def raw(text): + return docutils.nodes.raw('', text, format='latex') + + nodes = Image.run(self) + img = nodes[0] + if not isinstance(img, docutils.nodes.image): + return nodes # not an image, WTF? + newnodes = [] + pattern = img.attributes['uri'] + filenames = sorted(glob(pattern)) + for i, filename in enumerate(filenames): + newimg = copy.deepcopy(img) + newimg.attributes['uri'] = filename + newnodes += [raw(r'\only<%d>{' % (i+1)), + newimg, + raw('}')] + return newnodes + +directives.register_directive('animage', Animage) + + + + +## CONSTANTS & DEFINES: ### + +BEAMER_SPEC = ( + 'Beamer options', + 'These are derived almost entirely from the LaTeX2e options', + tuple ( + [ + ( + 'Specify theme.', + ['--theme'], + {'default': '', } + ), + ( + 'Specify document options. Multiple options can be given, ' + 'separated by commas. Default is "10pt,a4paper".', + ['--documentoptions'], + {'default': '', } + ), + ] + list (Latex2eWriter.settings_spec[2][2:]) + ), +) + +BEAMER_DEFAULTS = { + 'output_encoding': 'latin-1', + 'documentclass': 'beamer', +} + + +### IMPLEMENTATION ### + +try: + locale.setlocale (locale.LC_ALL, '') +except: + pass + +class BeamerTranslator (LaTeXTranslator): + """ + A converter for docutils elements to beamer-flavoured latex. + """ + + def __init__ (self, document): + LaTeXTranslator.__init__ (self, document) + self.head_prefix = [x for x in self.head_prefix if ('{typearea}' not in x)] + hyperref_posn = [i for i in range (len (self.head_prefix)) if ('{hyperref}' in self.head_prefix[i])] + if not hyperref_posn: + self.head_prefix.append(None) + hyperref_posn = [-1] # XXX hack + self.head_prefix[hyperref_posn[0]] = ('\\usepackage{hyperref}\n' + + '\\usepackage{fancyvrb}\n' + + LatexFormatter(style="manni").get_style_defs() + + "\n") + + self.head_prefix.extend ([ + '\\definecolor{rrblitbackground}{rgb}{0.55, 0.3, 0.1}\n', + '\\newenvironment{rtbliteral}{\n', + '\\begin{ttfamily}\n', + '\\color{rrblitbackground}\n', + '}{\n', + '\\end{ttfamily}\n', + '}\n', + ]) + # this fixes the hardcoded section titles in docutils 0.4 + self.d_class = DocumentClass ('article') + + def begin_frametag (self, node): + if "verbatim" in str(node).lower(): + return '\\begin{frame}[containsverbatim,fragile]\n' + else: + return '\\begin{frame}\n' + + def end_frametag (self): + return '\\end{frame}\n' + + def visit_section (self, node): + if (self.section_level == 0): + self.body.append (self.begin_frametag(node)) + LaTeXTranslator.visit_section (self, node) + + def depart_section (self, node): + # Remove counter for potential subsections: + LaTeXTranslator.depart_section (self, node) + if (self.section_level == 0): + self.body.append (self.end_frametag()) + + def visit_title (self, node): + if (self.section_level == 1): + self.body.append ('\\frametitle{%s}\n\n' % self.encode(node.astext())) + raise nodes.SkipNode + else: + LaTeXTranslator.visit_title (self, node) + + def depart_title (self, node): + if (self.section_level != 1): + LaTeXTranslator.depart_title (self, node) + + def visit_literal_block(self, node): + if not self.active_table.is_open(): + self.body.append('\n\n\\smallskip\n\\begin{rtbliteral}\n') + self.context.append('\\end{rtbliteral}\n\\smallskip\n\n') + else: + self.body.append('\n') + self.context.append('\n') + if (self.settings.use_verbatim_when_possible and (len(node) == 1) + # in case of a parsed-literal containing just a "**bold**" word: + and isinstance(node[0], nodes.Text)): + self.verbatim = 1 + self.body.append('\\begin{verbatim}\n') + else: + self.literal_block = 1 + self.insert_none_breaking_blanks = 1 + + def depart_literal_block(self, node): + if self.verbatim: + self.body.append('\n\\end{verbatim}\n') + self.verbatim = 0 + else: + self.body.append('\n') + self.insert_none_breaking_blanks = 0 + self.literal_block = 0 + self.body.append(self.context.pop()) + + +class BeamerWriter (Latex2eWriter): + """ + A docutils writer that modifies the translator and settings for beamer. + """ + settings_spec = BEAMER_SPEC + settings_defaults = BEAMER_DEFAULTS + + def __init__(self): + Latex2eWriter.__init__(self) + self.translator_class = BeamerTranslator + + + + +if __name__ == '__main__': + description = ( + "Generates Beamer-flavoured LaTeX for PDF-based presentations." + default_description) + publish_cmdline (writer=BeamerWriter(), description=description) + + +### END ###################################################################### + diff --git a/talk/fosdem2014/pypy-jit/stylesheet.latex b/talk/fosdem2014/pypy-jit/stylesheet.latex new file mode 100644 --- /dev/null +++ b/talk/fosdem2014/pypy-jit/stylesheet.latex @@ -0,0 +1,11 @@ +\usetheme{Boadilla} +\usecolortheme{whale} +\setbeamercovered{transparent} +\setbeamertemplate{navigation symbols}{} + +\definecolor{darkgreen}{rgb}{0, 0.5, 0.0} +\newcommand{\docutilsrolegreen}[1]{\color{darkgreen}#1\normalcolor} +\newcommand{\docutilsrolered}[1]{\color{red}#1\normalcolor} + +\newcommand{\green}[1]{\color{darkgreen}#1\normalcolor} +\newcommand{\red}[1]{\color{red}#1\normalcolor} diff --git a/talk/fosdem2014/pypy-jit/talk.rst b/talk/fosdem2014/pypy-jit/talk.rst new file mode 100644 --- /dev/null +++ b/talk/fosdem2014/pypy-jit/talk.rst @@ -0,0 +1,94 @@ +================================= +How PyPy makes your code run fast +================================= + +Introduction +============ + +* Romain Guillebert, @rguillebert + +* PyPy contributor for ~3 years + +* NumPyPy contributor + +* Please interrupt me + +* How the PyPy JIT works (kind of) + +* Warning : May contain traces of machine code + +speed.pypy.org +============== + +.. image:: Speed.png + :scale: 40% + :align: center + +AOT +=== + +* Ahead of time compilation + +* GCC + +* You can optimize only on what you know before running the program + +Interpreter +=========== + +* CPython, PyPy + +* Do everything at runtime + +* Not very smart + +JIT +=== + +* PyPy + +* Gathers information at runtime + +* Produces optimized machine code + +Tracing JIT +=========== + +* Optimizes loops + +* Traces one iteration of a loop + +* Produces a linear trace of execution + +* The trace is then optimized and compiled + +Guard +===== + +* The JIT produces a linear trace, but the code isn't + +* The JIT can make assumptions that are not always true + +* Guard : If this is true, continue, otherwise return to the interpreter + +* guard_true, guard_class, guard_no_exception, ... + +Bridge +====== + +* After a guard has failed X times, the other path is traced, compiled and attached to the trace + +Jitviewer +========= + +* Jitviewer demo + +Demo +==== + +* Edge detection algorithm + +Questions +========= + +* Questions ? From noreply at buildbot.pypy.org Sun Jan 26 01:34:43 2014 From: noreply at buildbot.pypy.org (rguillebert) Date: Sun, 26 Jan 2014 01:34:43 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Update to the talk Message-ID: <20140126003443.71A851C00F8@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: extradoc Changeset: r5142:ab2f21f5af4a Date: 2014-01-26 01:34 +0100 http://bitbucket.org/pypy/extradoc/changeset/ab2f21f5af4a/ Log: Update to the talk diff --git a/talk/fosdem2014/pypy-jit/talk.rst b/talk/fosdem2014/pypy-jit/talk.rst --- a/talk/fosdem2014/pypy-jit/talk.rst +++ b/talk/fosdem2014/pypy-jit/talk.rst @@ -31,14 +31,14 @@ * GCC -* You can optimize only on what you know before running the program +* Can optimize only on what it knows before running the program Interpreter =========== * CPython, PyPy -* Do everything at runtime +* Executes an abstract representation of the program * Not very smart From noreply at buildbot.pypy.org Sun Jan 26 04:53:58 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 26 Jan 2014 04:53:58 +0100 (CET) Subject: [pypy-commit] pypy default: better error message when trying to unify a method with a function Message-ID: <20140126035358.4D67C1C00F8@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r68949:d3c3dd8408fe Date: 2014-01-26 03:52 +0000 http://bitbucket.org/pypy/pypy/changeset/d3c3dd8408fe/ Log: better error message when trying to unify a method with a function diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -460,15 +460,15 @@ def getKind(self): "Return the common Desc class of all descriptions in this PBC." - kinds = {} + kinds = set() for x in self.descriptions: assert type(x).__name__.endswith('Desc') # avoid import nightmares - kinds[x.__class__] = True - assert len(kinds) <= 1, ( - "mixing several kinds of PBCs: %r" % (kinds.keys(),)) + kinds.add(x.__class__) + if len(kinds) > 1: + raise AnnotatorError("mixing several kinds of PBCs: %r" % kinds) if not kinds: raise ValueError("no 'kind' on the 'None' PBC") - return kinds.keys()[0] + return kinds.pop() def simplify(self): if self.descriptions: From noreply at buildbot.pypy.org Sun Jan 26 07:04:00 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 26 Jan 2014 07:04:00 +0100 (CET) Subject: [pypy-commit] pypy default: Use a decorator to register builtin_analyzers. Message-ID: <20140126060400.C06FF1C0134@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r68950:a920e8967669 Date: 2014-01-26 06:03 +0000 http://bitbucket.org/pypy/pypy/changeset/a920e8967669/ Log: Use a decorator to register builtin_analyzers. (Had anybody noticed that test() and termios_error_init() did nothing?) diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -44,6 +44,14 @@ func, args, realresult, s_result)) return s_realresult +BUILTIN_ANALYZERS = {} + +def analyzer_for(func): + def wrapped(ann_func): + BUILTIN_ANALYZERS[func] = ann_func + return func + return wrapped + # ____________________________________________________________ def builtin_range(*args): @@ -250,30 +258,46 @@ s = SomeInteger(nonneg=True, knowntype=s.knowntype) return s +# collect all functions +import __builtin__ +for name, value in globals().items(): + if name.startswith('builtin_'): + original = getattr(__builtin__, name[8:]) + BUILTIN_ANALYZERS[original] = value + + at analyzer_for(getattr(OSError.__init__, 'im_func', OSError.__init__)) def OSError_init(s_self, *args): pass -def WindowsError_init(s_self, *args): +try: + WindowsError +except NameError: pass +else: + @analyzer_for(getattr(WindowsError.__init__, 'im_func', WindowsError.__init__)) + def WindowsError_init(s_self, *args): + pass -def termios_error_init(s_self, *args): - pass - + at analyzer_for(getattr(object.__init__, 'im_func', object.__init__)) def object_init(s_self, *args): # ignore - mostly used for abstract classes initialization pass + at analyzer_for(sys.getdefaultencoding) def conf(): return SomeString() + at analyzer_for(rpython.rlib.rarithmetic.intmask) def rarith_intmask(s_obj): return SomeInteger() + at analyzer_for(rpython.rlib.rarithmetic.longlongmask) def rarith_longlongmask(s_obj): return SomeInteger(knowntype=rpython.rlib.rarithmetic.r_longlong) + at analyzer_for(rpython.rlib.objectmodel.instantiate) def robjmodel_instantiate(s_clspbc): assert isinstance(s_clspbc, SomePBC) clsdef = None @@ -288,6 +312,7 @@ clsdef = clsdef.commonbase(cdef) return SomeInstance(clsdef) + at analyzer_for(rpython.rlib.objectmodel.r_dict) def robjmodel_r_dict(s_eqfn, s_hashfn, s_force_non_null=None): if s_force_non_null is None: force_non_null = False @@ -299,11 +324,13 @@ dictdef.dictkey.update_rdict_annotations(s_eqfn, s_hashfn) return SomeDict(dictdef) + at analyzer_for(rpython.rlib.objectmodel.r_ordereddict) def robjmodel_r_ordereddict(s_eqfn, s_hashfn): dictdef = getbookkeeper().getdictdef(is_r_dict=True) dictdef.dictkey.update_rdict_annotations(s_eqfn, s_hashfn) return SomeOrderedDict(dictdef) + at analyzer_for(rpython.rlib.objectmodel.hlinvoke) def robjmodel_hlinvoke(s_repr, s_llcallable, *args_s): from rpython.rtyper import rmodel from rpython.rtyper.error import TyperError @@ -323,81 +350,49 @@ return lltype_to_annotation(rresult.lowleveltype) + at analyzer_for(rpython.rlib.objectmodel.keepalive_until_here) def robjmodel_keepalive_until_here(*args_s): return immutablevalue(None) + at analyzer_for(rpython.rtyper.lltypesystem.llmemory.cast_ptr_to_adr) def llmemory_cast_ptr_to_adr(s): from rpython.annotator.model import SomeInteriorPtr assert not isinstance(s, SomeInteriorPtr) return SomeAddress() + at analyzer_for(rpython.rtyper.lltypesystem.llmemory.cast_adr_to_ptr) def llmemory_cast_adr_to_ptr(s, s_type): assert s_type.is_constant() return SomePtr(s_type.const) + at analyzer_for(rpython.rtyper.lltypesystem.llmemory.cast_adr_to_int) def llmemory_cast_adr_to_int(s, s_mode=None): return SomeInteger() # xxx + at analyzer_for(rpython.rtyper.lltypesystem.llmemory.cast_int_to_adr) def llmemory_cast_int_to_adr(s): return SomeAddress() -def unicodedata_decimal(s_uchr): - raise TypeError("unicodedate.decimal() calls should not happen at interp-level") - -def test(*args): - return s_Bool - -# collect all functions -import __builtin__ -BUILTIN_ANALYZERS = {} -for name, value in globals().items(): - if name.startswith('builtin_'): - original = getattr(__builtin__, name[8:]) - BUILTIN_ANALYZERS[original] = value - -BUILTIN_ANALYZERS[rpython.rlib.rarithmetic.intmask] = rarith_intmask -BUILTIN_ANALYZERS[rpython.rlib.rarithmetic.longlongmask] = rarith_longlongmask -BUILTIN_ANALYZERS[rpython.rlib.objectmodel.instantiate] = robjmodel_instantiate -BUILTIN_ANALYZERS[rpython.rlib.objectmodel.r_dict] = robjmodel_r_dict -BUILTIN_ANALYZERS[rpython.rlib.objectmodel.r_ordereddict] = robjmodel_r_ordereddict -BUILTIN_ANALYZERS[SomeOrderedDict.knowntype] = lambda : SomeOrderedDict(getbookkeeper().getdictdef()) -BUILTIN_ANALYZERS[rpython.rlib.objectmodel.hlinvoke] = robjmodel_hlinvoke -BUILTIN_ANALYZERS[rpython.rlib.objectmodel.keepalive_until_here] = robjmodel_keepalive_until_here -BUILTIN_ANALYZERS[rpython.rtyper.lltypesystem.llmemory.cast_ptr_to_adr] = llmemory_cast_ptr_to_adr -BUILTIN_ANALYZERS[rpython.rtyper.lltypesystem.llmemory.cast_adr_to_ptr] = llmemory_cast_adr_to_ptr -BUILTIN_ANALYZERS[rpython.rtyper.lltypesystem.llmemory.cast_adr_to_int] = llmemory_cast_adr_to_int -BUILTIN_ANALYZERS[rpython.rtyper.lltypesystem.llmemory.cast_int_to_adr] = llmemory_cast_int_to_adr - -BUILTIN_ANALYZERS[getattr(OSError.__init__, 'im_func', OSError.__init__)] = ( - OSError_init) - -try: - WindowsError -except NameError: - pass -else: - BUILTIN_ANALYZERS[getattr(WindowsError.__init__, 'im_func', - WindowsError.__init__)] = ( - WindowsError_init) - -BUILTIN_ANALYZERS[sys.getdefaultencoding] = conf try: import unicodedata except ImportError: pass else: - BUILTIN_ANALYZERS[unicodedata.decimal] = unicodedata_decimal # xxx + @analyzer_for(unicodedata.decimal) + def unicodedata_decimal(s_uchr): + raise TypeError("unicodedate.decimal() calls should not happen at interp-level") -# object - just ignore object.__init__ -if hasattr(object.__init__, 'im_func'): - BUILTIN_ANALYZERS[object.__init__.im_func] = object_init -else: - BUILTIN_ANALYZERS[object.__init__] = object_init + at analyzer_for(SomeOrderedDict.knowntype) +def analyze(): + return SomeOrderedDict(getbookkeeper().getdictdef()) + + # annotation of low-level types from rpython.annotator.model import SomePtr from rpython.rtyper.lltypesystem import lltype + at analyzer_for(lltype.malloc) def malloc(s_T, s_n=None, s_flavor=None, s_zero=None, s_track_allocation=None, s_add_memory_pressure=None): assert (s_n is None or s_n.knowntype == int @@ -422,6 +417,7 @@ r = SomePtr(lltype.Ptr(s_T.const)) return r + at analyzer_for(lltype.free) def free(s_p, s_flavor, s_track_allocation=None): assert s_flavor.is_constant() assert s_track_allocation is None or s_track_allocation.is_constant() @@ -431,34 +427,41 @@ #p = lltype.malloc(T, flavor=s_flavor.const) #lltype.free(p, flavor=s_flavor.const) + at analyzer_for(lltype.render_immortal) def render_immortal(s_p, s_track_allocation=None): assert s_track_allocation is None or s_track_allocation.is_constant() + at analyzer_for(lltype.typeOf) def typeOf(s_val): lltype = annotation_to_lltype(s_val, info="in typeOf(): ") return immutablevalue(lltype) + at analyzer_for(lltype.cast_primitive) def cast_primitive(T, s_v): assert T.is_constant() return ll_to_annotation(lltype.cast_primitive(T.const, annotation_to_lltype(s_v)._defl())) + at analyzer_for(lltype.nullptr) def nullptr(T): assert T.is_constant() p = lltype.nullptr(T.const) return immutablevalue(p) + at analyzer_for(lltype.cast_pointer) def cast_pointer(PtrT, s_p): assert isinstance(s_p, SomePtr), "casting of non-pointer: %r" % s_p assert PtrT.is_constant() cast_p = lltype.cast_pointer(PtrT.const, s_p.ll_ptrtype._defl()) return SomePtr(ll_ptrtype=lltype.typeOf(cast_p)) + at analyzer_for(lltype.cast_opaque_ptr) def cast_opaque_ptr(PtrT, s_p): assert isinstance(s_p, SomePtr), "casting of non-pointer: %r" % s_p assert PtrT.is_constant() cast_p = lltype.cast_opaque_ptr(PtrT.const, s_p.ll_ptrtype._defl()) return SomePtr(ll_ptrtype=lltype.typeOf(cast_p)) + at analyzer_for(lltype.direct_fieldptr) def direct_fieldptr(s_p, s_fieldname): assert isinstance(s_p, SomePtr), "direct_* of non-pointer: %r" % s_p assert s_fieldname.is_constant() @@ -466,62 +469,54 @@ s_fieldname.const) return SomePtr(ll_ptrtype=lltype.typeOf(cast_p)) + at analyzer_for(lltype.direct_arrayitems) def direct_arrayitems(s_p): assert isinstance(s_p, SomePtr), "direct_* of non-pointer: %r" % s_p cast_p = lltype.direct_arrayitems(s_p.ll_ptrtype._example()) return SomePtr(ll_ptrtype=lltype.typeOf(cast_p)) + at analyzer_for(lltype.direct_ptradd) def direct_ptradd(s_p, s_n): assert isinstance(s_p, SomePtr), "direct_* of non-pointer: %r" % s_p # don't bother with an example here: the resulting pointer is the same return s_p + at analyzer_for(lltype.cast_ptr_to_int) def cast_ptr_to_int(s_ptr): # xxx return SomeInteger() + at analyzer_for(lltype.cast_int_to_ptr) def cast_int_to_ptr(PtrT, s_int): assert PtrT.is_constant() return SomePtr(ll_ptrtype=PtrT.const) + at analyzer_for(lltype.identityhash) def identityhash(s_obj): assert isinstance(s_obj, SomePtr) return SomeInteger() + at analyzer_for(lltype.getRuntimeTypeInfo) def getRuntimeTypeInfo(T): assert T.is_constant() return immutablevalue(lltype.getRuntimeTypeInfo(T.const)) + at analyzer_for(lltype.runtime_type_info) def runtime_type_info(s_p): assert isinstance(s_p, SomePtr), "runtime_type_info of non-pointer: %r" % s_p return SomePtr(lltype.typeOf(lltype.runtime_type_info(s_p.ll_ptrtype._example()))) + at analyzer_for(lltype.Ptr) def constPtr(T): assert T.is_constant() return immutablevalue(lltype.Ptr(T.const)) -BUILTIN_ANALYZERS[lltype.malloc] = malloc -BUILTIN_ANALYZERS[lltype.free] = free -BUILTIN_ANALYZERS[lltype.render_immortal] = render_immortal -BUILTIN_ANALYZERS[lltype.typeOf] = typeOf -BUILTIN_ANALYZERS[lltype.cast_primitive] = cast_primitive -BUILTIN_ANALYZERS[lltype.nullptr] = nullptr -BUILTIN_ANALYZERS[lltype.cast_pointer] = cast_pointer -BUILTIN_ANALYZERS[lltype.cast_opaque_ptr] = cast_opaque_ptr -BUILTIN_ANALYZERS[lltype.direct_fieldptr] = direct_fieldptr -BUILTIN_ANALYZERS[lltype.direct_arrayitems] = direct_arrayitems -BUILTIN_ANALYZERS[lltype.direct_ptradd] = direct_ptradd -BUILTIN_ANALYZERS[lltype.cast_ptr_to_int] = cast_ptr_to_int -BUILTIN_ANALYZERS[lltype.cast_int_to_ptr] = cast_int_to_ptr -BUILTIN_ANALYZERS[lltype.identityhash] = identityhash -BUILTIN_ANALYZERS[lltype.getRuntimeTypeInfo] = getRuntimeTypeInfo -BUILTIN_ANALYZERS[lltype.runtime_type_info] = runtime_type_info -BUILTIN_ANALYZERS[lltype.Ptr] = constPtr #________________________________ # weakrefs import weakref + at analyzer_for(weakref.ref) def weakref_ref(s_obj): if not isinstance(s_obj, SomeInstance): raise Exception("cannot take a weakref to %r" % (s_obj,)) @@ -530,8 +525,10 @@ "a weakref to cannot be None") return SomeWeakRef(s_obj.classdef) -BUILTIN_ANALYZERS[weakref.ref] = weakref_ref +from rpython.rtyper.lltypesystem import llmemory + + at analyzer_for(llmemory.weakref_create) def llweakref_create(s_obj): if (not isinstance(s_obj, SomePtr) or s_obj.ll_ptrtype.TO._gckind != 'gc'): @@ -539,6 +536,7 @@ s_obj,)) return SomePtr(llmemory.WeakRefPtr) + at analyzer_for(llmemory.weakref_deref ) def llweakref_deref(s_ptrtype, s_wref): if not (s_ptrtype.is_constant() and isinstance(s_ptrtype.const, lltype.Ptr) and @@ -551,10 +549,12 @@ "got %s" % (s_wref,)) return SomePtr(s_ptrtype.const) + at analyzer_for(llmemory.cast_ptr_to_weakrefptr) def llcast_ptr_to_weakrefptr(s_ptr): assert isinstance(s_ptr, SomePtr) return SomePtr(llmemory.WeakRefPtr) + at analyzer_for(llmemory.cast_weakrefptr_to_ptr) def llcast_weakrefptr_to_ptr(s_ptrtype, s_wref): if not (s_ptrtype.is_constant() and isinstance(s_ptrtype.const, lltype.Ptr)): @@ -566,56 +566,47 @@ "got %s" % (s_wref,)) return SomePtr(s_ptrtype.const) -from rpython.rtyper.lltypesystem import llmemory -BUILTIN_ANALYZERS[llmemory.weakref_create] = llweakref_create -BUILTIN_ANALYZERS[llmemory.weakref_deref ] = llweakref_deref -BUILTIN_ANALYZERS[llmemory.cast_ptr_to_weakrefptr] = llcast_ptr_to_weakrefptr -BUILTIN_ANALYZERS[llmemory.cast_weakrefptr_to_ptr] = llcast_weakrefptr_to_ptr - #________________________________ # non-gc objects + at analyzer_for(rpython.rlib.objectmodel.free_non_gc_object) def robjmodel_free_non_gc_object(obj): pass -BUILTIN_ANALYZERS[rpython.rlib.objectmodel.free_non_gc_object] = ( - robjmodel_free_non_gc_object) #_________________________________ # memory address + at analyzer_for(llmemory.raw_malloc) def raw_malloc(s_size): assert isinstance(s_size, SomeInteger) #XXX add noneg...? return SomeAddress() + at analyzer_for(llmemory.raw_malloc_usage) def raw_malloc_usage(s_size): assert isinstance(s_size, SomeInteger) #XXX add noneg...? return SomeInteger(nonneg=True) + at analyzer_for(llmemory.raw_free) def raw_free(s_addr): assert isinstance(s_addr, SomeAddress) + at analyzer_for(llmemory.raw_memclear) def raw_memclear(s_addr, s_int): assert isinstance(s_addr, SomeAddress) assert isinstance(s_int, SomeInteger) + at analyzer_for(llmemory.raw_memcopy) def raw_memcopy(s_addr1, s_addr2, s_int): assert isinstance(s_addr1, SomeAddress) assert isinstance(s_addr2, SomeAddress) assert isinstance(s_int, SomeInteger) #XXX add noneg...? -BUILTIN_ANALYZERS[llmemory.raw_malloc] = raw_malloc -BUILTIN_ANALYZERS[llmemory.raw_malloc_usage] = raw_malloc_usage -BUILTIN_ANALYZERS[llmemory.raw_free] = raw_free -BUILTIN_ANALYZERS[llmemory.raw_memclear] = raw_memclear -BUILTIN_ANALYZERS[llmemory.raw_memcopy] = raw_memcopy #_________________________________ # offsetof/sizeof + at analyzer_for(llmemory.offsetof) def offsetof(TYPE, fldname): return SomeInteger() - -BUILTIN_ANALYZERS[llmemory.offsetof] = offsetof - From noreply at buildbot.pypy.org Sun Jan 26 18:04:11 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 26 Jan 2014 18:04:11 +0100 (CET) Subject: [pypy-commit] pypy default: fix annotation of malloc_nonmovable() Message-ID: <20140126170411.7CF7A1C0134@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r68951:1d37bca68fa8 Date: 2014-01-26 17:03 +0000 http://bitbucket.org/pypy/pypy/changeset/1d37bca68fa8/ Log: fix annotation of malloc_nonmovable() diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py --- a/rpython/rlib/rgc.py +++ b/rpython/rlib/rgc.py @@ -114,8 +114,8 @@ def compute_result_annotation(self, s_TP, s_n=None, s_zero=None): # basically return the same as malloc - from rpython.annotator.builtin import malloc - return malloc(s_TP, s_n, s_zero=s_zero) + from rpython.annotator.builtin import BUILTIN_ANALYZERS + return BUILTIN_ANALYZERS[lltype.malloc](s_TP, s_n, s_zero=s_zero) def specialize_call(self, hop, i_zero=None): # XXX assume flavor and zero to be None by now @@ -266,7 +266,7 @@ func._dont_inline_ = True func._no_release_gil_ = True return func - + def no_collect(func): func._dont_inline_ = True func._gc_no_collect_ = True From noreply at buildbot.pypy.org Sun Jan 26 18:51:11 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 26 Jan 2014 18:51:11 +0100 (CET) Subject: [pypy-commit] pypy default: add more cpython compatibility for numpy build on windows Message-ID: <20140126175111.8E0181D23DE@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r68952:315da51498e7 Date: 2014-01-26 19:50 +0200 http://bitbucket.org/pypy/pypy/changeset/315da51498e7/ Log: add more cpython compatibility for numpy build on windows diff --git a/pypy/module/cpyext/include/pyconfig.h b/pypy/module/cpyext/include/pyconfig.h --- a/pypy/module/cpyext/include/pyconfig.h +++ b/pypy/module/cpyext/include/pyconfig.h @@ -15,6 +15,8 @@ #define HAVE_UNICODE #define WITHOUT_COMPLEX #define HAVE_WCHAR_H 1 +#define HAVE_SYS_TYPES_H 1 +#define HAVE_SYS_STAT_H 1 /* PyPy supposes Py_UNICODE == wchar_t */ #define HAVE_USABLE_WCHAR_T 1 diff --git a/pypy/module/cpyext/include/pyport.h b/pypy/module/cpyext/include/pyport.h --- a/pypy/module/cpyext/include/pyport.h +++ b/pypy/module/cpyext/include/pyport.h @@ -64,4 +64,45 @@ # error "Python needs a typedef for Py_uintptr_t in pyport.h." #endif /* HAVE_UINTPTR_T */ +/******************************* + * stat() and fstat() fiddling * + *******************************/ + +/* We expect that stat and fstat exist on most systems. + * It's confirmed on Unix, Mac and Windows. + * If you don't have them, add + * #define DONT_HAVE_STAT + * and/or + * #define DONT_HAVE_FSTAT + * to your pyconfig.h. Python code beyond this should check HAVE_STAT and + * HAVE_FSTAT instead. + * Also + * #define HAVE_SYS_STAT_H + * if exists on your platform, and + * #define HAVE_STAT_H + * if does. + */ +#ifndef DONT_HAVE_STAT +#define HAVE_STAT +#endif + +#ifndef DONT_HAVE_FSTAT +#define HAVE_FSTAT +#endif + +#ifdef RISCOS +#include +#include "unixstuff.h" +#endif + +#ifdef HAVE_SYS_STAT_H +#if defined(PYOS_OS2) && defined(PYCC_GCC) +#include +#endif +#include +#elif defined(HAVE_STAT_H) +#include +#else +#endif + #endif /* Py_PYPORT_H */ From noreply at buildbot.pypy.org Sun Jan 26 19:56:33 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 26 Jan 2014 19:56:33 +0100 (CET) Subject: [pypy-commit] pypy default: move SomeAddress and SomeTypedAddressAccess to rpython.rtyper Message-ID: <20140126185633.2E5301C069E@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r68953:6472d7d4cba1 Date: 2014-01-26 18:54 +0000 http://bitbucket.org/pypy/pypy/changeset/6472d7d4cba1/ Log: move SomeAddress and SomeTypedAddressAccess to rpython.rtyper diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -10,10 +10,11 @@ SomeDict, SomeOrderedDict, SomeUnicodeCodePoint, SomeUnicodeString, SomeTuple, SomeImpossibleValue, s_ImpossibleValue, SomeInstance, SomeBuiltin, SomeIterator, SomePBC, SomeFloat, s_None, SomeByteArray, - SomeWeakRef, SomeAddress, SomeTypedAddressAccess, SomeSingleFloat, + SomeWeakRef, SomeSingleFloat, SomeLongFloat, SomeType, SomeConstantType, unionof, UnionError, read_can_only_throw, add_knowntypedata, merge_knowntypedata,) +from rpython.rtyper.llannotation import SomeAddress, SomeTypedAddressAccess from rpython.annotator.bookkeeper import getbookkeeper from rpython.flowspace.model import Variable, Constant from rpython.flowspace.operation import op diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -9,10 +9,11 @@ from rpython.flowspace.model import Constant from rpython.annotator.model import (SomeOrderedDict, SomeString, SomeChar, SomeFloat, SomePtr, unionof, SomeInstance, SomeDict, - SomeBuiltin, SomePBC, SomeInteger, TLS, SomeAddress, SomeUnicodeCodePoint, + SomeBuiltin, SomePBC, SomeInteger, TLS, SomeUnicodeCodePoint, s_None, s_ImpossibleValue, SomeLLADTMeth, SomeBool, SomeTuple, SomeImpossibleValue, SomeUnicodeString, SomeList, HarmlesslyBlocked, SomeWeakRef, lltype_to_annotation, SomeType, SomeByteArray, SomeConstantType) +from rpython.rtyper.llannotation import SomeAddress from rpython.annotator.classdef import InstanceSource, ClassDef from rpython.annotator.listdef import ListDef, ListItem from rpython.annotator.dictdef import DictDef diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -5,11 +5,12 @@ from rpython.annotator.model import ( SomeInteger, SomeObject, SomeChar, SomeBool, SomeString, SomeTuple, s_Bool, - SomeUnicodeCodePoint, SomeAddress, SomeFloat, unionof, SomeUnicodeString, + SomeUnicodeCodePoint, SomeFloat, unionof, SomeUnicodeString, SomePBC, SomeInstance, SomeDict, SomeList, SomeWeakRef, SomeIterator, SomeOrderedDict, SomeByteArray, annotation_to_lltype, lltype_to_annotation, ll_to_annotation, add_knowntypedata, s_ImpossibleValue,) +from rpython.rtyper.llannotation import SomeAddress from rpython.annotator.bookkeeper import getbookkeeper from rpython.annotator import description from rpython.flowspace.model import Constant diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -568,33 +568,6 @@ # 'classdef' is None for known-to-be-dead weakrefs. self.classdef = classdef -# ____________________________________________________________ -# memory addresses - -from rpython.rtyper.lltypesystem import llmemory - - -class SomeAddress(SomeObject): - immutable = True - - def can_be_none(self): - return False - - def is_null_address(self): - return self.is_immutable_constant() and not self.const - - -# The following class is used to annotate the intermediate value that -# appears in expressions of the form: -# addr.signed[offset] and addr.signed[offset] = value - -class SomeTypedAddressAccess(SomeObject): - def __init__(self, type): - self.type = type - - def can_be_none(self): - return False - #____________________________________________________________ # annotation of low-level types @@ -630,6 +603,8 @@ return False +from rpython.rtyper.llannotation import SomeAddress +from rpython.rtyper.lltypesystem import llmemory annotation_to_ll_map = [ (SomeSingleFloat(), lltype.SingleFloat), diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -9,9 +9,10 @@ from rpython.annotator.model import (SomeObject, SomeInteger, SomeBool, SomeString, SomeChar, SomeList, SomeDict, SomeTuple, SomeImpossibleValue, SomeUnicodeCodePoint, SomeInstance, SomeBuiltin, SomeFloat, SomeIterator, - SomePBC, SomeTypedAddressAccess, SomeAddress, SomeType, s_ImpossibleValue, + SomePBC, SomeType, s_ImpossibleValue, s_Bool, s_None, unionof, add_knowntypedata, HarmlesslyBlocked, SomeWeakRef, SomeUnicodeString, SomeByteArray) +from rpython.rtyper.llannotation import SomeAddress, SomeTypedAddressAccess from rpython.annotator.bookkeeper import getbookkeeper from rpython.annotator import builtin from rpython.annotator.binaryop import _clone ## XXX where to put this? diff --git a/rpython/memory/gctransform/asmgcroot.py b/rpython/memory/gctransform/asmgcroot.py --- a/rpython/memory/gctransform/asmgcroot.py +++ b/rpython/memory/gctransform/asmgcroot.py @@ -7,6 +7,7 @@ from rpython.rtyper.lltypesystem.lloperation import llop from rpython.memory.gctransform.framework import ( BaseFrameworkGCTransformer, BaseRootWalker) +from rpython.rtyper.llannotation import SomeAddress from rpython.rtyper.rbuiltin import gen_cast from rpython.translator.unsimplify import copyvar, varoftype from rpython.translator.tool.cbuild import ExternalCompilationInfo @@ -215,7 +216,7 @@ # update the global stack counter rffi.stackcounter.stacks_counter += 1 # - s_addr = annmodel.SomeAddress() + s_addr = SomeAddress() s_None = annmodel.s_None self.gc_detach_callback_pieces_ptr = getfn(gc_detach_callback_pieces, [], s_addr) @@ -327,10 +328,10 @@ inline=True) self.thread_die_ptr = getfn(thread_die, [], annmodel.s_None) self.thread_before_fork_ptr = getfn(thread_before_fork, [], - annmodel.SomeAddress()) + SomeAddress()) self.thread_after_fork_ptr = getfn(thread_after_fork, [annmodel.SomeInteger(), - annmodel.SomeAddress()], + SomeAddress()], annmodel.s_None) # # check that the order of the need_*() is correct for us: if we @@ -496,7 +497,7 @@ # location -- but we check for consistency that ebp points # to a JITFRAME object. from rpython.jit.backend.llsupport.jitframe import STACK_DEPTH_OFS - + tid = self.gc.get_possibly_forwarded_type_id(ebp_in_caller) ll_assert(rffi.cast(lltype.Signed, tid) == rffi.cast(lltype.Signed, self.frame_tid), diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -1,4 +1,5 @@ from rpython.annotator import model as annmodel +from rpython.rtyper.llannotation import SomeAddress from rpython.rlib import rgc from rpython.rtyper import rmodel, annlowlevel from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, llgroup @@ -195,21 +196,11 @@ # the point of this little dance is to not annotate # self.gcdata.static_root_xyz as constants. XXX is it still needed?? data_classdef = bk.getuniqueclassdef(gctypelayout.GCData) - data_classdef.generalize_attr( - 'static_root_start', - annmodel.SomeAddress()) - data_classdef.generalize_attr( - 'static_root_nongcend', - annmodel.SomeAddress()) - data_classdef.generalize_attr( - 'static_root_end', - annmodel.SomeAddress()) - data_classdef.generalize_attr( - 'max_type_id', - annmodel.SomeInteger()) - data_classdef.generalize_attr( - 'typeids_z', - annmodel.SomeAddress()) + data_classdef.generalize_attr('static_root_start', SomeAddress()) + data_classdef.generalize_attr('static_root_nongcend', SomeAddress()) + data_classdef.generalize_attr('static_root_end', SomeAddress()) + data_classdef.generalize_attr('max_type_id', annmodel.SomeInteger()) + data_classdef.generalize_attr('typeids_z', SomeAddress()) annhelper = annlowlevel.MixLevelHelperAnnotator(self.translator.rtyper) @@ -310,13 +301,13 @@ self.collect_ptr = getfn(GCClass.collect.im_func, [s_gc, annmodel.SomeInteger()], annmodel.s_None) self.can_move_ptr = getfn(GCClass.can_move.im_func, - [s_gc, annmodel.SomeAddress()], + [s_gc, SomeAddress()], annmodel.SomeBool()) if hasattr(GCClass, 'shrink_array'): self.shrink_array_ptr = getfn( GCClass.shrink_array.im_func, - [s_gc, annmodel.SomeAddress(), + [s_gc, SomeAddress(), annmodel.SomeInteger(nonneg=True)], annmodel.s_Bool) else: self.shrink_array_ptr = None @@ -333,7 +324,7 @@ if hasattr(GCClass, 'writebarrier_before_copy'): self.wb_before_copy_ptr = \ getfn(GCClass.writebarrier_before_copy.im_func, - [s_gc] + [annmodel.SomeAddress()] * 2 + + [s_gc] + [SomeAddress()] * 2 + [annmodel.SomeInteger()] * 3, annmodel.SomeBool()) elif GCClass.needs_write_barrier: raise NotImplementedError("GC needs write barrier, but does not provide writebarrier_before_copy functionality") @@ -421,7 +412,7 @@ if getattr(GCClass, 'obtain_free_space', False): self.obtainfreespace_ptr = getfn(GCClass.obtain_free_space.im_func, [s_gc, annmodel.SomeInteger()], - annmodel.SomeAddress()) + SomeAddress()) if GCClass.moving_gc: self.id_ptr = getfn(GCClass.id.im_func, @@ -470,8 +461,7 @@ self.write_barrier_from_array_ptr = None if GCClass.needs_write_barrier: self.write_barrier_ptr = getfn(GCClass.write_barrier.im_func, - [s_gc, - annmodel.SomeAddress()], + [s_gc, SomeAddress()], annmodel.s_None, inline=True) func = getattr(gcdata.gc, 'remember_young_pointer', None) @@ -479,13 +469,12 @@ # func should not be a bound method, but a real function assert isinstance(func, types.FunctionType) self.write_barrier_failing_case_ptr = getfn(func, - [annmodel.SomeAddress()], + [SomeAddress()], annmodel.s_None) func = getattr(GCClass, 'write_barrier_from_array', None) if func is not None: self.write_barrier_from_array_ptr = getfn(func.im_func, - [s_gc, - annmodel.SomeAddress(), + [s_gc, SomeAddress(), annmodel.SomeInteger()], annmodel.s_None, inline=True) @@ -497,7 +486,7 @@ assert isinstance(func, types.FunctionType) self.write_barrier_from_array_failing_case_ptr = \ getfn(func, - [annmodel.SomeAddress()], + [SomeAddress()], annmodel.s_None) diff --git a/rpython/memory/gctransform/shadowstack.py b/rpython/memory/gctransform/shadowstack.py --- a/rpython/memory/gctransform/shadowstack.py +++ b/rpython/memory/gctransform/shadowstack.py @@ -5,6 +5,7 @@ from rpython.rtyper import rmodel from rpython.rtyper.annlowlevel import llhelper from rpython.rtyper.lltypesystem import lltype, llmemory +from rpython.rtyper.llannotation import SomeAddress from rpython.memory.gctransform.framework import ( BaseFrameworkGCTransformer, BaseRootWalker, sizeofaddr) from rpython.rtyper.rbuiltin import gen_cast @@ -14,11 +15,11 @@ def annotate_walker_functions(self, getfn): self.incr_stack_ptr = getfn(self.root_walker.incr_stack, [annmodel.SomeInteger()], - annmodel.SomeAddress(), + SomeAddress(), inline = True) self.decr_stack_ptr = getfn(self.root_walker.decr_stack, [annmodel.SomeInteger()], - annmodel.SomeAddress(), + SomeAddress(), inline = True) def build_root_walker(self): @@ -211,7 +212,7 @@ # no thread_before_fork_ptr here self.thread_after_fork_ptr = getfn(thread_after_fork, [annmodel.SomeInteger(), - annmodel.SomeAddress()], + SomeAddress()], annmodel.s_None, minimal_transform=False) @@ -242,7 +243,7 @@ shadow_stack_pool.start_fresh_new_state() s_gcref = annmodel.SomePtr(llmemory.GCREF) - s_addr = annmodel.SomeAddress() + s_addr = SomeAddress() self.gc_shadowstackref_new_ptr = getfn(gc_shadowstackref_new, [], s_gcref, minimal_transform=False) diff --git a/rpython/rtyper/lltypesystem/llmemory.py b/rpython/rtyper/lltypesystem/llmemory.py --- a/rpython/rtyper/lltypesystem/llmemory.py +++ b/rpython/rtyper/lltypesystem/llmemory.py @@ -905,11 +905,12 @@ _about_ = raw_memmove def compute_result_annotation(self, s_from, s_to, s_size): - from rpython.annotator.model import SomeAddress, SomeInteger + from rpython.annotator.model import SomeInteger + from rpython.rtyper.llannotation import SomeAddress assert isinstance(s_from, SomeAddress) assert isinstance(s_to, SomeAddress) assert isinstance(s_size, SomeInteger) - + def specialize_call(self, hop): hop.exception_cannot_occur() v_list = hop.inputargs(Address, Address, lltype.Signed) diff --git a/rpython/rtyper/raddress.py b/rpython/rtyper/raddress.py --- a/rpython/rtyper/raddress.py +++ b/rpython/rtyper/raddress.py @@ -1,5 +1,5 @@ # rtyping of memory address operations -from rpython.annotator import model as annmodel +from rpython.rtyper.llannotation import SomeAddress, SomeTypedAddressAccess from rpython.rlib.rarithmetic import r_uint from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.lltypesystem.llmemory import (NULL, Address, @@ -9,14 +9,14 @@ from rpython.tool.pairtype import pairtype -class __extend__(annmodel.SomeAddress): +class __extend__(SomeAddress): def rtyper_makerepr(self, rtyper): return address_repr def rtyper_makekey(self): return self.__class__, -class __extend__(annmodel.SomeTypedAddressAccess): +class __extend__(SomeTypedAddressAccess): def rtyper_makerepr(self, rtyper): return TypedAddressAccessRepr(self.type) diff --git a/rpython/rtyper/test/test_nongc.py b/rpython/rtyper/test/test_nongc.py --- a/rpython/rtyper/test/test_nongc.py +++ b/rpython/rtyper/test/test_nongc.py @@ -1,6 +1,7 @@ import py from rpython.annotator import model as annmodel +from rpython.rtyper.llannotation import SomeAddress from rpython.annotator.annrpython import RPythonAnnotator from rpython.rtyper.rtyper import RPythonTyper from rpython.rlib.objectmodel import free_non_gc_object @@ -25,7 +26,7 @@ assert t.method2() == 42 free_non_gc_object(t) py.test.raises(RuntimeError, "t.method1()") - py.test.raises(RuntimeError, "t.method2()") + py.test.raises(RuntimeError, "t.method2()") py.test.raises(RuntimeError, "t.a") py.test.raises(RuntimeError, "t.a = 1") py.test.raises(AssertionError, "free_non_gc_object(TestClass2())") @@ -43,8 +44,8 @@ rtyper = RPythonTyper(a) rtyper.specialize() assert (Adef, 'raw') in rtyper.instance_reprs - assert (Adef, 'gc') not in rtyper.instance_reprs - + assert (Adef, 'gc') not in rtyper.instance_reprs + def test_alloc_flavor_subclassing(): class A: _alloc_flavor_ = "raw" @@ -64,7 +65,7 @@ assert (Adef, 'raw') in rtyper.instance_reprs assert (Adef, 'gc') not in rtyper.instance_reprs assert (Bdef, 'raw') in rtyper.instance_reprs - assert (Bdef, 'gc') not in rtyper.instance_reprs + assert (Bdef, 'gc') not in rtyper.instance_reprs def test_unsupported(): class A: @@ -85,7 +86,7 @@ pass class C(B): pass - + def f(i): if i == 0: o = None @@ -226,7 +227,7 @@ return b a = RPythonAnnotator() #does not raise: - s = a.build_types(malloc_and_free, [annmodel.SomeAddress()]) - assert isinstance(s, annmodel.SomeAddress) + s = a.build_types(malloc_and_free, [SomeAddress()]) + assert isinstance(s, SomeAddress) rtyper = RPythonTyper(a) rtyper.specialize() From noreply at buildbot.pypy.org Sun Jan 26 20:16:28 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 26 Jan 2014 20:16:28 +0100 (CET) Subject: [pypy-commit] pypy default: add missing file Message-ID: <20140126191628.376BB1C00F8@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r68954:a7ccd77bc726 Date: 2014-01-26 19:15 +0000 http://bitbucket.org/pypy/pypy/changeset/a7ccd77bc726/ Log: add missing file diff --git a/rpython/rtyper/llannotation.py b/rpython/rtyper/llannotation.py new file mode 100644 --- /dev/null +++ b/rpython/rtyper/llannotation.py @@ -0,0 +1,26 @@ +""" +Code for annotating low-level thingies. +""" +from rpython.annotator.model import SomeObject + +class SomeAddress(SomeObject): + immutable = True + + def can_be_none(self): + return False + + def is_null_address(self): + return self.is_immutable_constant() and not self.const + +class SomeTypedAddressAccess(SomeObject): + """This class is used to annotate the intermediate value that + appears in expressions of the form: + addr.signed[offset] and addr.signed[offset] = value + """ + + def __init__(self, type): + self.type = type + + def can_be_none(self): + return False + From noreply at buildbot.pypy.org Sun Jan 26 22:16:55 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sun, 26 Jan 2014 22:16:55 +0100 (CET) Subject: [pypy-commit] pypy default: Document that we have a LLVM translation backend in-tree to prevent that someone else starts working on it. Message-ID: <20140126211655.A9B0F1C0134@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: Changeset: r68955:04acdc4163f4 Date: 2014-01-26 22:16 +0100 http://bitbucket.org/pypy/pypy/changeset/04acdc4163f4/ Log: Document that we have a LLVM translation backend in-tree to prevent that someone else starts working on it. diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -426,25 +426,12 @@ Could we use LLVM? ------------------ -In theory yes. But we tried to use it 5 or 6 times already, as a -translation backend or as a JIT backend --- and failed each time. +There is a (static) translation backend using LLVM in the branch +``llvm-translation-backend``. It can translate PyPy with or without the JIT on +Linux. -In more details: using LLVM as a (static) translation backend is -pointless nowadays because you can generate C code and compile it with -clang. (Note that compiling PyPy with clang gives a result that is not -faster than compiling it with gcc.) We might in theory get extra -benefits from LLVM's GC integration, but this requires more work on the -LLVM side before it would be remotely useful. Anyway, it could be -interfaced via a custom primitive in the C code. - -On the other hand, using LLVM as our JIT backend looks interesting as -well --- but again we made an attempt, and it failed: LLVM has no way to -patch the generated machine code. - -So the position of the core PyPy developers is that if anyone wants to -make an N+1'th attempt with LLVM, they are welcome, and will be happy to -provide help in the IRC channel, but they are left with the burden of proof -that (a) it works and (b) it gives important benefits. +Using LLVM as our JIT backend looks interesting as well -- we made an attempt, +but it failed: LLVM has no way to patch the generated machine code. ---------------------- How do I compile PyPy? From noreply at buildbot.pypy.org Sun Jan 26 22:30:19 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 26 Jan 2014 22:30:19 +0100 (CET) Subject: [pypy-commit] pypy default: Fix from an example by mitsuhiko on irc: sys.exc_clear() can crash PyPy Message-ID: <20140126213019.283101C069E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68956:77ecf91c635a Date: 2014-01-26 19:08 +0100 http://bitbucket.org/pypy/pypy/changeset/77ecf91c635a/ Log: Fix from an example by mitsuhiko on irc: sys.exc_clear() can crash PyPy in corner cases, see test. diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -6,7 +6,7 @@ from errno import EINTR from rpython.rlib import jit -from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib.objectmodel import we_are_translated, specialize from pypy.interpreter import debug @@ -40,12 +40,11 @@ self.debug_excs = [] def clear(self, space): - # for sys.exc_clear() - self.w_type = space.w_None - self._w_value = space.w_None - self._application_traceback = None - if not we_are_translated(): - del self.debug_excs[:] + # XXX remove this method. The point is that we cannot always + # hack at 'self' to clear w_type and _w_value, because in some + # corner cases the OperationError will be used again: see + # test_interpreter.py:test_with_statement_and_sys_clear. + pass def match(self, space, w_check_class): "Check if this application-level exception matches 'w_check_class'." @@ -300,6 +299,10 @@ """ self._application_traceback = traceback + at specialize.memo() +def get_cleared_operation_error(space): + return OperationError(space.w_None, space.w_None) + # ____________________________________________________________ # optimization only: avoid the slowest operation -- the string # formatting with '%' -- in the common case were we don't diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -1,5 +1,5 @@ import sys -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, get_cleared_operation_error from rpython.rlib.unroll import unrolling_iterable from rpython.rlib import jit @@ -217,6 +217,17 @@ if frame: # else, the exception goes nowhere and is lost frame.last_exception = operror + def clear_sys_exc_info(self): + # Find the frame out of which sys_exc_info() would return its result, + # and hack this frame's last_exception to become the cleared + # OperationError (which is different from None!). + frame = self.gettopframe_nohidden() + while frame: + if frame.last_exception is not None: + frame.last_exception = get_cleared_operation_error(self.space) + break + frame = self.getnextframe_nohidden(frame) + @jit.dont_look_inside def settrace(self, w_func): """Set the global trace function.""" diff --git a/pypy/interpreter/test/test_interpreter.py b/pypy/interpreter/test/test_interpreter.py --- a/pypy/interpreter/test/test_interpreter.py +++ b/pypy/interpreter/test/test_interpreter.py @@ -311,3 +311,63 @@ assert str(e) == "maximum recursion depth exceeded" else: assert 0, "should have raised!" + + def test_with_statement_and_sys_clear(self): + import sys + class CM(object): + def __enter__(self): + return self + def __exit__(self, exc_type, exc_value, tb): + sys.exc_clear() + try: + with CM(): + 1 / 0 + raise AssertionError("should not be reached") + except ZeroDivisionError: + pass + + def test_sys_clear_while_handling_exception(self): + import sys + def f(): + try: + some_missing_name + except NameError: + g() + assert sys.exc_info()[0] is NameError + def g(): + assert sys.exc_info()[0] is NameError + try: + 1 / 0 + except ZeroDivisionError: + assert sys.exc_info()[0] is ZeroDivisionError + sys.exc_clear() + assert sys.exc_info()[0] is None + h() + assert sys.exc_info()[0] is None + def h(): + assert sys.exc_info()[0] is None + f() + + def test_sys_clear_while_handling_exception_nested(self): + import sys + def f(): + try: + some_missing_name + except NameError: + g() + assert sys.exc_info()[0] is NameError + def g(): + assert sys.exc_info()[0] is NameError + try: + 1 / 0 + except ZeroDivisionError: + assert sys.exc_info()[0] is ZeroDivisionError + h1() + assert sys.exc_info()[0] is None + h() + assert sys.exc_info()[0] is None + def h(): + assert sys.exc_info()[0] is None + def h1(): + sys.exc_clear() + f() diff --git a/pypy/module/sys/vm.py b/pypy/module/sys/vm.py --- a/pypy/module/sys/vm.py +++ b/pypy/module/sys/vm.py @@ -155,9 +155,7 @@ to exc_info() will return (None,None,None) until another exception is raised and caught in the current thread or the execution stack returns to a frame where another exception is being handled.""" - operror = space.getexecutioncontext().sys_exc_info() - if operror is not None: - operror.clear(space) + space.getexecutioncontext().clear_sys_exc_info() def settrace(space, w_func): """Set the global debug tracing function. It will be called on each From noreply at buildbot.pypy.org Sun Jan 26 22:30:20 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 26 Jan 2014 22:30:20 +0100 (CET) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20140126213020.761051C069E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68957:76462f1c604c Date: 2014-01-26 22:29 +0100 http://bitbucket.org/pypy/pypy/changeset/76462f1c604c/ Log: merge heads diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -426,25 +426,12 @@ Could we use LLVM? ------------------ -In theory yes. But we tried to use it 5 or 6 times already, as a -translation backend or as a JIT backend --- and failed each time. +There is a (static) translation backend using LLVM in the branch +``llvm-translation-backend``. It can translate PyPy with or without the JIT on +Linux. -In more details: using LLVM as a (static) translation backend is -pointless nowadays because you can generate C code and compile it with -clang. (Note that compiling PyPy with clang gives a result that is not -faster than compiling it with gcc.) We might in theory get extra -benefits from LLVM's GC integration, but this requires more work on the -LLVM side before it would be remotely useful. Anyway, it could be -interfaced via a custom primitive in the C code. - -On the other hand, using LLVM as our JIT backend looks interesting as -well --- but again we made an attempt, and it failed: LLVM has no way to -patch the generated machine code. - -So the position of the core PyPy developers is that if anyone wants to -make an N+1'th attempt with LLVM, they are welcome, and will be happy to -provide help in the IRC channel, but they are left with the burden of proof -that (a) it works and (b) it gives important benefits. +Using LLVM as our JIT backend looks interesting as well -- we made an attempt, +but it failed: LLVM has no way to patch the generated machine code. ---------------------- How do I compile PyPy? diff --git a/pypy/module/cpyext/include/pyconfig.h b/pypy/module/cpyext/include/pyconfig.h --- a/pypy/module/cpyext/include/pyconfig.h +++ b/pypy/module/cpyext/include/pyconfig.h @@ -15,6 +15,8 @@ #define HAVE_UNICODE #define WITHOUT_COMPLEX #define HAVE_WCHAR_H 1 +#define HAVE_SYS_TYPES_H 1 +#define HAVE_SYS_STAT_H 1 /* PyPy supposes Py_UNICODE == wchar_t */ #define HAVE_USABLE_WCHAR_T 1 diff --git a/pypy/module/cpyext/include/pyport.h b/pypy/module/cpyext/include/pyport.h --- a/pypy/module/cpyext/include/pyport.h +++ b/pypy/module/cpyext/include/pyport.h @@ -64,4 +64,45 @@ # error "Python needs a typedef for Py_uintptr_t in pyport.h." #endif /* HAVE_UINTPTR_T */ +/******************************* + * stat() and fstat() fiddling * + *******************************/ + +/* We expect that stat and fstat exist on most systems. + * It's confirmed on Unix, Mac and Windows. + * If you don't have them, add + * #define DONT_HAVE_STAT + * and/or + * #define DONT_HAVE_FSTAT + * to your pyconfig.h. Python code beyond this should check HAVE_STAT and + * HAVE_FSTAT instead. + * Also + * #define HAVE_SYS_STAT_H + * if exists on your platform, and + * #define HAVE_STAT_H + * if does. + */ +#ifndef DONT_HAVE_STAT +#define HAVE_STAT +#endif + +#ifndef DONT_HAVE_FSTAT +#define HAVE_FSTAT +#endif + +#ifdef RISCOS +#include +#include "unixstuff.h" +#endif + +#ifdef HAVE_SYS_STAT_H +#if defined(PYOS_OS2) && defined(PYCC_GCC) +#include +#endif +#include +#elif defined(HAVE_STAT_H) +#include +#else +#endif + #endif /* Py_PYPORT_H */ diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -10,10 +10,11 @@ SomeDict, SomeOrderedDict, SomeUnicodeCodePoint, SomeUnicodeString, SomeTuple, SomeImpossibleValue, s_ImpossibleValue, SomeInstance, SomeBuiltin, SomeIterator, SomePBC, SomeFloat, s_None, SomeByteArray, - SomeWeakRef, SomeAddress, SomeTypedAddressAccess, SomeSingleFloat, + SomeWeakRef, SomeSingleFloat, SomeLongFloat, SomeType, SomeConstantType, unionof, UnionError, read_can_only_throw, add_knowntypedata, merge_knowntypedata,) +from rpython.rtyper.llannotation import SomeAddress, SomeTypedAddressAccess from rpython.annotator.bookkeeper import getbookkeeper from rpython.flowspace.model import Variable, Constant from rpython.flowspace.operation import op diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -9,10 +9,11 @@ from rpython.flowspace.model import Constant from rpython.annotator.model import (SomeOrderedDict, SomeString, SomeChar, SomeFloat, SomePtr, unionof, SomeInstance, SomeDict, - SomeBuiltin, SomePBC, SomeInteger, TLS, SomeAddress, SomeUnicodeCodePoint, + SomeBuiltin, SomePBC, SomeInteger, TLS, SomeUnicodeCodePoint, s_None, s_ImpossibleValue, SomeLLADTMeth, SomeBool, SomeTuple, SomeImpossibleValue, SomeUnicodeString, SomeList, HarmlesslyBlocked, SomeWeakRef, lltype_to_annotation, SomeType, SomeByteArray, SomeConstantType) +from rpython.rtyper.llannotation import SomeAddress from rpython.annotator.classdef import InstanceSource, ClassDef from rpython.annotator.listdef import ListDef, ListItem from rpython.annotator.dictdef import DictDef diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -5,11 +5,12 @@ from rpython.annotator.model import ( SomeInteger, SomeObject, SomeChar, SomeBool, SomeString, SomeTuple, s_Bool, - SomeUnicodeCodePoint, SomeAddress, SomeFloat, unionof, SomeUnicodeString, + SomeUnicodeCodePoint, SomeFloat, unionof, SomeUnicodeString, SomePBC, SomeInstance, SomeDict, SomeList, SomeWeakRef, SomeIterator, SomeOrderedDict, SomeByteArray, annotation_to_lltype, lltype_to_annotation, ll_to_annotation, add_knowntypedata, s_ImpossibleValue,) +from rpython.rtyper.llannotation import SomeAddress from rpython.annotator.bookkeeper import getbookkeeper from rpython.annotator import description from rpython.flowspace.model import Constant diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -568,33 +568,6 @@ # 'classdef' is None for known-to-be-dead weakrefs. self.classdef = classdef -# ____________________________________________________________ -# memory addresses - -from rpython.rtyper.lltypesystem import llmemory - - -class SomeAddress(SomeObject): - immutable = True - - def can_be_none(self): - return False - - def is_null_address(self): - return self.is_immutable_constant() and not self.const - - -# The following class is used to annotate the intermediate value that -# appears in expressions of the form: -# addr.signed[offset] and addr.signed[offset] = value - -class SomeTypedAddressAccess(SomeObject): - def __init__(self, type): - self.type = type - - def can_be_none(self): - return False - #____________________________________________________________ # annotation of low-level types @@ -630,6 +603,8 @@ return False +from rpython.rtyper.llannotation import SomeAddress +from rpython.rtyper.lltypesystem import llmemory annotation_to_ll_map = [ (SomeSingleFloat(), lltype.SingleFloat), diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -9,9 +9,10 @@ from rpython.annotator.model import (SomeObject, SomeInteger, SomeBool, SomeString, SomeChar, SomeList, SomeDict, SomeTuple, SomeImpossibleValue, SomeUnicodeCodePoint, SomeInstance, SomeBuiltin, SomeFloat, SomeIterator, - SomePBC, SomeTypedAddressAccess, SomeAddress, SomeType, s_ImpossibleValue, + SomePBC, SomeType, s_ImpossibleValue, s_Bool, s_None, unionof, add_knowntypedata, HarmlesslyBlocked, SomeWeakRef, SomeUnicodeString, SomeByteArray) +from rpython.rtyper.llannotation import SomeAddress, SomeTypedAddressAccess from rpython.annotator.bookkeeper import getbookkeeper from rpython.annotator import builtin from rpython.annotator.binaryop import _clone ## XXX where to put this? diff --git a/rpython/memory/gctransform/asmgcroot.py b/rpython/memory/gctransform/asmgcroot.py --- a/rpython/memory/gctransform/asmgcroot.py +++ b/rpython/memory/gctransform/asmgcroot.py @@ -7,6 +7,7 @@ from rpython.rtyper.lltypesystem.lloperation import llop from rpython.memory.gctransform.framework import ( BaseFrameworkGCTransformer, BaseRootWalker) +from rpython.rtyper.llannotation import SomeAddress from rpython.rtyper.rbuiltin import gen_cast from rpython.translator.unsimplify import copyvar, varoftype from rpython.translator.tool.cbuild import ExternalCompilationInfo @@ -215,7 +216,7 @@ # update the global stack counter rffi.stackcounter.stacks_counter += 1 # - s_addr = annmodel.SomeAddress() + s_addr = SomeAddress() s_None = annmodel.s_None self.gc_detach_callback_pieces_ptr = getfn(gc_detach_callback_pieces, [], s_addr) @@ -327,10 +328,10 @@ inline=True) self.thread_die_ptr = getfn(thread_die, [], annmodel.s_None) self.thread_before_fork_ptr = getfn(thread_before_fork, [], - annmodel.SomeAddress()) + SomeAddress()) self.thread_after_fork_ptr = getfn(thread_after_fork, [annmodel.SomeInteger(), - annmodel.SomeAddress()], + SomeAddress()], annmodel.s_None) # # check that the order of the need_*() is correct for us: if we @@ -496,7 +497,7 @@ # location -- but we check for consistency that ebp points # to a JITFRAME object. from rpython.jit.backend.llsupport.jitframe import STACK_DEPTH_OFS - + tid = self.gc.get_possibly_forwarded_type_id(ebp_in_caller) ll_assert(rffi.cast(lltype.Signed, tid) == rffi.cast(lltype.Signed, self.frame_tid), diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -1,4 +1,5 @@ from rpython.annotator import model as annmodel +from rpython.rtyper.llannotation import SomeAddress from rpython.rlib import rgc from rpython.rtyper import rmodel, annlowlevel from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, llgroup @@ -195,21 +196,11 @@ # the point of this little dance is to not annotate # self.gcdata.static_root_xyz as constants. XXX is it still needed?? data_classdef = bk.getuniqueclassdef(gctypelayout.GCData) - data_classdef.generalize_attr( - 'static_root_start', - annmodel.SomeAddress()) - data_classdef.generalize_attr( - 'static_root_nongcend', - annmodel.SomeAddress()) - data_classdef.generalize_attr( - 'static_root_end', - annmodel.SomeAddress()) - data_classdef.generalize_attr( - 'max_type_id', - annmodel.SomeInteger()) - data_classdef.generalize_attr( - 'typeids_z', - annmodel.SomeAddress()) + data_classdef.generalize_attr('static_root_start', SomeAddress()) + data_classdef.generalize_attr('static_root_nongcend', SomeAddress()) + data_classdef.generalize_attr('static_root_end', SomeAddress()) + data_classdef.generalize_attr('max_type_id', annmodel.SomeInteger()) + data_classdef.generalize_attr('typeids_z', SomeAddress()) annhelper = annlowlevel.MixLevelHelperAnnotator(self.translator.rtyper) @@ -310,13 +301,13 @@ self.collect_ptr = getfn(GCClass.collect.im_func, [s_gc, annmodel.SomeInteger()], annmodel.s_None) self.can_move_ptr = getfn(GCClass.can_move.im_func, - [s_gc, annmodel.SomeAddress()], + [s_gc, SomeAddress()], annmodel.SomeBool()) if hasattr(GCClass, 'shrink_array'): self.shrink_array_ptr = getfn( GCClass.shrink_array.im_func, - [s_gc, annmodel.SomeAddress(), + [s_gc, SomeAddress(), annmodel.SomeInteger(nonneg=True)], annmodel.s_Bool) else: self.shrink_array_ptr = None @@ -333,7 +324,7 @@ if hasattr(GCClass, 'writebarrier_before_copy'): self.wb_before_copy_ptr = \ getfn(GCClass.writebarrier_before_copy.im_func, - [s_gc] + [annmodel.SomeAddress()] * 2 + + [s_gc] + [SomeAddress()] * 2 + [annmodel.SomeInteger()] * 3, annmodel.SomeBool()) elif GCClass.needs_write_barrier: raise NotImplementedError("GC needs write barrier, but does not provide writebarrier_before_copy functionality") @@ -421,7 +412,7 @@ if getattr(GCClass, 'obtain_free_space', False): self.obtainfreespace_ptr = getfn(GCClass.obtain_free_space.im_func, [s_gc, annmodel.SomeInteger()], - annmodel.SomeAddress()) + SomeAddress()) if GCClass.moving_gc: self.id_ptr = getfn(GCClass.id.im_func, @@ -470,8 +461,7 @@ self.write_barrier_from_array_ptr = None if GCClass.needs_write_barrier: self.write_barrier_ptr = getfn(GCClass.write_barrier.im_func, - [s_gc, - annmodel.SomeAddress()], + [s_gc, SomeAddress()], annmodel.s_None, inline=True) func = getattr(gcdata.gc, 'remember_young_pointer', None) @@ -479,13 +469,12 @@ # func should not be a bound method, but a real function assert isinstance(func, types.FunctionType) self.write_barrier_failing_case_ptr = getfn(func, - [annmodel.SomeAddress()], + [SomeAddress()], annmodel.s_None) func = getattr(GCClass, 'write_barrier_from_array', None) if func is not None: self.write_barrier_from_array_ptr = getfn(func.im_func, - [s_gc, - annmodel.SomeAddress(), + [s_gc, SomeAddress(), annmodel.SomeInteger()], annmodel.s_None, inline=True) @@ -497,7 +486,7 @@ assert isinstance(func, types.FunctionType) self.write_barrier_from_array_failing_case_ptr = \ getfn(func, - [annmodel.SomeAddress()], + [SomeAddress()], annmodel.s_None) diff --git a/rpython/memory/gctransform/shadowstack.py b/rpython/memory/gctransform/shadowstack.py --- a/rpython/memory/gctransform/shadowstack.py +++ b/rpython/memory/gctransform/shadowstack.py @@ -5,6 +5,7 @@ from rpython.rtyper import rmodel from rpython.rtyper.annlowlevel import llhelper from rpython.rtyper.lltypesystem import lltype, llmemory +from rpython.rtyper.llannotation import SomeAddress from rpython.memory.gctransform.framework import ( BaseFrameworkGCTransformer, BaseRootWalker, sizeofaddr) from rpython.rtyper.rbuiltin import gen_cast @@ -14,11 +15,11 @@ def annotate_walker_functions(self, getfn): self.incr_stack_ptr = getfn(self.root_walker.incr_stack, [annmodel.SomeInteger()], - annmodel.SomeAddress(), + SomeAddress(), inline = True) self.decr_stack_ptr = getfn(self.root_walker.decr_stack, [annmodel.SomeInteger()], - annmodel.SomeAddress(), + SomeAddress(), inline = True) def build_root_walker(self): @@ -211,7 +212,7 @@ # no thread_before_fork_ptr here self.thread_after_fork_ptr = getfn(thread_after_fork, [annmodel.SomeInteger(), - annmodel.SomeAddress()], + SomeAddress()], annmodel.s_None, minimal_transform=False) @@ -242,7 +243,7 @@ shadow_stack_pool.start_fresh_new_state() s_gcref = annmodel.SomePtr(llmemory.GCREF) - s_addr = annmodel.SomeAddress() + s_addr = SomeAddress() self.gc_shadowstackref_new_ptr = getfn(gc_shadowstackref_new, [], s_gcref, minimal_transform=False) diff --git a/rpython/rtyper/llannotation.py b/rpython/rtyper/llannotation.py new file mode 100644 --- /dev/null +++ b/rpython/rtyper/llannotation.py @@ -0,0 +1,26 @@ +""" +Code for annotating low-level thingies. +""" +from rpython.annotator.model import SomeObject + +class SomeAddress(SomeObject): + immutable = True + + def can_be_none(self): + return False + + def is_null_address(self): + return self.is_immutable_constant() and not self.const + +class SomeTypedAddressAccess(SomeObject): + """This class is used to annotate the intermediate value that + appears in expressions of the form: + addr.signed[offset] and addr.signed[offset] = value + """ + + def __init__(self, type): + self.type = type + + def can_be_none(self): + return False + diff --git a/rpython/rtyper/lltypesystem/llmemory.py b/rpython/rtyper/lltypesystem/llmemory.py --- a/rpython/rtyper/lltypesystem/llmemory.py +++ b/rpython/rtyper/lltypesystem/llmemory.py @@ -905,11 +905,12 @@ _about_ = raw_memmove def compute_result_annotation(self, s_from, s_to, s_size): - from rpython.annotator.model import SomeAddress, SomeInteger + from rpython.annotator.model import SomeInteger + from rpython.rtyper.llannotation import SomeAddress assert isinstance(s_from, SomeAddress) assert isinstance(s_to, SomeAddress) assert isinstance(s_size, SomeInteger) - + def specialize_call(self, hop): hop.exception_cannot_occur() v_list = hop.inputargs(Address, Address, lltype.Signed) diff --git a/rpython/rtyper/raddress.py b/rpython/rtyper/raddress.py --- a/rpython/rtyper/raddress.py +++ b/rpython/rtyper/raddress.py @@ -1,5 +1,5 @@ # rtyping of memory address operations -from rpython.annotator import model as annmodel +from rpython.rtyper.llannotation import SomeAddress, SomeTypedAddressAccess from rpython.rlib.rarithmetic import r_uint from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.lltypesystem.llmemory import (NULL, Address, @@ -9,14 +9,14 @@ from rpython.tool.pairtype import pairtype -class __extend__(annmodel.SomeAddress): +class __extend__(SomeAddress): def rtyper_makerepr(self, rtyper): return address_repr def rtyper_makekey(self): return self.__class__, -class __extend__(annmodel.SomeTypedAddressAccess): +class __extend__(SomeTypedAddressAccess): def rtyper_makerepr(self, rtyper): return TypedAddressAccessRepr(self.type) diff --git a/rpython/rtyper/test/test_nongc.py b/rpython/rtyper/test/test_nongc.py --- a/rpython/rtyper/test/test_nongc.py +++ b/rpython/rtyper/test/test_nongc.py @@ -1,6 +1,7 @@ import py from rpython.annotator import model as annmodel +from rpython.rtyper.llannotation import SomeAddress from rpython.annotator.annrpython import RPythonAnnotator from rpython.rtyper.rtyper import RPythonTyper from rpython.rlib.objectmodel import free_non_gc_object @@ -25,7 +26,7 @@ assert t.method2() == 42 free_non_gc_object(t) py.test.raises(RuntimeError, "t.method1()") - py.test.raises(RuntimeError, "t.method2()") + py.test.raises(RuntimeError, "t.method2()") py.test.raises(RuntimeError, "t.a") py.test.raises(RuntimeError, "t.a = 1") py.test.raises(AssertionError, "free_non_gc_object(TestClass2())") @@ -43,8 +44,8 @@ rtyper = RPythonTyper(a) rtyper.specialize() assert (Adef, 'raw') in rtyper.instance_reprs - assert (Adef, 'gc') not in rtyper.instance_reprs - + assert (Adef, 'gc') not in rtyper.instance_reprs + def test_alloc_flavor_subclassing(): class A: _alloc_flavor_ = "raw" @@ -64,7 +65,7 @@ assert (Adef, 'raw') in rtyper.instance_reprs assert (Adef, 'gc') not in rtyper.instance_reprs assert (Bdef, 'raw') in rtyper.instance_reprs - assert (Bdef, 'gc') not in rtyper.instance_reprs + assert (Bdef, 'gc') not in rtyper.instance_reprs def test_unsupported(): class A: @@ -85,7 +86,7 @@ pass class C(B): pass - + def f(i): if i == 0: o = None @@ -226,7 +227,7 @@ return b a = RPythonAnnotator() #does not raise: - s = a.build_types(malloc_and_free, [annmodel.SomeAddress()]) - assert isinstance(s, annmodel.SomeAddress) + s = a.build_types(malloc_and_free, [SomeAddress()]) + assert isinstance(s, SomeAddress) rtyper = RPythonTyper(a) rtyper.specialize() From noreply at buildbot.pypy.org Mon Jan 27 00:37:34 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 27 Jan 2014 00:37:34 +0100 (CET) Subject: [pypy-commit] pypy default: Test and fix Message-ID: <20140126233734.F1DE71C069E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r68958:beafaef52b05 Date: 2014-01-27 00:36 +0100 http://bitbucket.org/pypy/pypy/changeset/beafaef52b05/ Log: Test and fix diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -744,6 +744,9 @@ else: raise OperationError(space.w_TypeError, space.wrap("raise: no active exception to re-raise")) + if operror.w_type is space.w_None: + raise OperationError(space.w_TypeError, + space.wrap("raise: the exception to re-raise was cleared")) # re-raise, no new traceback obj will be attached self.last_exception = operror raise RaiseWithExplicitTraceback(operror) diff --git a/pypy/interpreter/test/test_interpreter.py b/pypy/interpreter/test/test_interpreter.py --- a/pypy/interpreter/test/test_interpreter.py +++ b/pypy/interpreter/test/test_interpreter.py @@ -371,3 +371,13 @@ def h1(): sys.exc_clear() f() + + def test_sys_clear_reraise(self): + import sys + def f(): + try: + 1 / 0 + except ZeroDivisionError: + sys.exc_clear() + raise + raises(TypeError, f) From noreply at buildbot.pypy.org Mon Jan 27 04:29:31 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 27 Jan 2014 04:29:31 +0100 (CET) Subject: [pypy-commit] pypy default: move SomePtr, SomeInteriorPtr, SomeLLADTMeth to rpython.rtyper Message-ID: <20140127032931.761061D25CC@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r68959:3534680bda85 Date: 2014-01-27 03:28 +0000 http://bitbucket.org/pypy/pypy/changeset/3534680bda85/ Log: move SomePtr, SomeInteriorPtr, SomeLLADTMeth to rpython.rtyper diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -829,7 +829,7 @@ # ____________________________________________________________ # annotation of low-level types -from rpython.annotator.model import SomePtr +from rpython.rtyper.llannotation import SomePtr from rpython.annotator.model import ll_to_annotation, annotation_to_lltype class __extend__(pairtype(SomePtr, SomePtr)): diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -8,12 +8,12 @@ from rpython.flowspace.model import Constant from rpython.annotator.model import (SomeOrderedDict, - SomeString, SomeChar, SomeFloat, SomePtr, unionof, SomeInstance, SomeDict, + SomeString, SomeChar, SomeFloat, unionof, SomeInstance, SomeDict, SomeBuiltin, SomePBC, SomeInteger, TLS, SomeUnicodeCodePoint, - s_None, s_ImpossibleValue, SomeLLADTMeth, SomeBool, SomeTuple, + s_None, s_ImpossibleValue, SomeBool, SomeTuple, SomeImpossibleValue, SomeUnicodeString, SomeList, HarmlesslyBlocked, - SomeWeakRef, lltype_to_annotation, SomeType, SomeByteArray, SomeConstantType) -from rpython.rtyper.llannotation import SomeAddress + SomeWeakRef, lltype_to_annotation, SomeByteArray, SomeConstantType) +from rpython.rtyper.llannotation import SomeAddress, SomePtr, SomeLLADTMeth from rpython.annotator.classdef import InstanceSource, ClassDef from rpython.annotator.listdef import ListDef, ListItem from rpython.annotator.dictdef import DictDef diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -357,7 +357,7 @@ @analyzer_for(rpython.rtyper.lltypesystem.llmemory.cast_ptr_to_adr) def llmemory_cast_ptr_to_adr(s): - from rpython.annotator.model import SomeInteriorPtr + from rpython.rtyper.llannotation import SomeInteriorPtr assert not isinstance(s, SomeInteriorPtr) return SomeAddress() @@ -390,7 +390,7 @@ # annotation of low-level types -from rpython.annotator.model import SomePtr +from rpython.rtyper.llannotation import SomePtr from rpython.rtyper.lltypesystem import lltype @analyzer_for(lltype.malloc) diff --git a/rpython/annotator/description.py b/rpython/annotator/description.py --- a/rpython/annotator/description.py +++ b/rpython/annotator/description.py @@ -624,7 +624,7 @@ except ValueError: pass else: - from rpython.annotator.model import SomePtr + from rpython.rtyper.llannotation import SomePtr assert not isinstance(s_arg, SomePtr) else: # call the constructor diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -574,36 +574,8 @@ from rpython.rtyper.lltypesystem import lltype -class SomePtr(SomeObject): - knowntype = lltype._ptr - immutable = True - def __init__(self, ll_ptrtype): - assert isinstance(ll_ptrtype, lltype.Ptr) - self.ll_ptrtype = ll_ptrtype - - def can_be_none(self): - return False - - -class SomeInteriorPtr(SomePtr): - def __init__(self, ll_ptrtype): - assert isinstance(ll_ptrtype, lltype.InteriorPtr) - self.ll_ptrtype = ll_ptrtype - - -class SomeLLADTMeth(SomeObject): - immutable = True - - def __init__(self, ll_ptrtype, func): - self.ll_ptrtype = ll_ptrtype - self.func = func - - def can_be_none(self): - return False - - -from rpython.rtyper.llannotation import SomeAddress +from rpython.rtyper.llannotation import SomeAddress, SomePtr, SomeInteriorPtr from rpython.rtyper.lltypesystem import llmemory annotation_to_ll_map = [ diff --git a/rpython/annotator/test/test_model.py b/rpython/annotator/test/test_model.py --- a/rpython/annotator/test/test_model.py +++ b/rpython/annotator/test/test_model.py @@ -1,6 +1,7 @@ import py from rpython.annotator.model import * +from rpython.rtyper.llannotation import SomePtr from rpython.annotator.listdef import ListDef from rpython.translator.translator import TranslationContext diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -759,7 +759,7 @@ raise AnnotatorError("Cannot call len on a pbc") # annotation of low-level types -from rpython.annotator.model import SomePtr, SomeLLADTMeth +from rpython.rtyper.llannotation import SomePtr, SomeLLADTMeth from rpython.annotator.model import ll_to_annotation, lltype_to_annotation, annotation_to_lltype class __extend__(SomePtr): diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -1,5 +1,5 @@ from rpython.annotator import model as annmodel -from rpython.rtyper.llannotation import SomeAddress +from rpython.rtyper.llannotation import SomeAddress, SomePtr from rpython.rlib import rgc from rpython.rtyper import rmodel, annlowlevel from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, llgroup @@ -268,7 +268,7 @@ from rpython.memory.gc.base import ARRAY_TYPEID_MAP from rpython.memory.gc import inspector - s_gcref = annmodel.SomePtr(llmemory.GCREF) + s_gcref = SomePtr(llmemory.GCREF) gcdata = self.gcdata translator = self.translator @@ -314,7 +314,7 @@ if hasattr(GCClass, 'heap_stats'): self.heap_stats_ptr = getfn(GCClass.heap_stats.im_func, - [s_gc], annmodel.SomePtr(lltype.Ptr(ARRAY_TYPEID_MAP)), + [s_gc], SomePtr(lltype.Ptr(ARRAY_TYPEID_MAP)), minimal_transform=False) self.get_member_index_ptr = getfn( GCClass.get_member_index.im_func, @@ -448,8 +448,7 @@ minimal_transform=False) self.get_typeids_z_ptr = getfn(inspector.get_typeids_z, [s_gc], - annmodel.SomePtr( - lltype.Ptr(rgc.ARRAY_OF_CHAR)), + SomePtr(lltype.Ptr(rgc.ARRAY_OF_CHAR)), minimal_transform=False) self.set_max_heap_size_ptr = getfn(GCClass.set_max_heap_size.im_func, diff --git a/rpython/memory/gctransform/shadowstack.py b/rpython/memory/gctransform/shadowstack.py --- a/rpython/memory/gctransform/shadowstack.py +++ b/rpython/memory/gctransform/shadowstack.py @@ -1,4 +1,5 @@ from rpython.annotator import model as annmodel +from rpython.rtyper.llannotation import SomePtr from rpython.rlib.debug import ll_assert from rpython.rlib.nonconst import NonConstant from rpython.rlib import rgc @@ -242,7 +243,7 @@ def gc_start_fresh_new_state(): shadow_stack_pool.start_fresh_new_state() - s_gcref = annmodel.SomePtr(llmemory.GCREF) + s_gcref = SomePtr(llmemory.GCREF) s_addr = SomeAddress() self.gc_shadowstackref_new_ptr = getfn(gc_shadowstackref_new, [], s_gcref, diff --git a/rpython/memory/test/test_transformed_gc.py b/rpython/memory/test/test_transformed_gc.py --- a/rpython/memory/test/test_transformed_gc.py +++ b/rpython/memory/test/test_transformed_gc.py @@ -3,6 +3,7 @@ from rpython.translator.c import gc from rpython.annotator import model as annmodel +from rpython.rtyper.llannotation import SomePtr from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, llgroup from rpython.memory.gctransform import framework, shadowstack from rpython.rtyper.lltypesystem.lloperation import llop, void @@ -98,7 +99,7 @@ from rpython.translator.c.genc import CStandaloneBuilder - s_args = annmodel.SomePtr(lltype.Ptr(ARGS)) + s_args = SomePtr(lltype.Ptr(ARGS)) t = rtype(entrypoint, [s_args], gcname=cls.gcname, taggedpointers=cls.taggedpointers) @@ -827,7 +828,7 @@ from rpython.translator.translator import graphof from rpython.flowspace.model import Constant from rpython.rtyper.lltypesystem import rffi - layoutbuilder = cls.ensure_layoutbuilder(translator) + layoutbuilder = cls.ensure_layoutbuilder(translator) type_id = layoutbuilder.get_type_id(P) # # now fix the do_malloc_fixedsize_clear in the graph of g @@ -1116,7 +1117,7 @@ def test_adr_of_nursery(self): run = self.runner("adr_of_nursery") - res = run([]) + res = run([]) class TestGenerationalNoFullCollectGC(GCTest): # test that nursery is doing its job and that no full collection diff --git a/rpython/rlib/jit_hooks.py b/rpython/rlib/jit_hooks.py --- a/rpython/rlib/jit_hooks.py +++ b/rpython/rlib/jit_hooks.py @@ -1,4 +1,5 @@ from rpython.annotator import model as annmodel +from rpython.rtyper.llannotation import SomePtr from rpython.rlib.objectmodel import specialize from rpython.rtyper.annlowlevel import (cast_instance_to_base_ptr, cast_base_ptr_to_instance, llstr) @@ -50,7 +51,7 @@ def emptyval(): return lltype.nullptr(llmemory.GCREF.TO) - at register_helper(annmodel.SomePtr(llmemory.GCREF)) + at register_helper(SomePtr(llmemory.GCREF)) def resop_new(no, llargs, llres): from rpython.jit.metainterp.history import ResOperation @@ -61,7 +62,7 @@ res = None return _cast_to_gcref(ResOperation(no, args, res)) - at register_helper(annmodel.SomePtr(llmemory.GCREF)) + at register_helper(SomePtr(llmemory.GCREF)) def boxint_new(no): from rpython.jit.metainterp.history import BoxInt return _cast_to_gcref(BoxInt(no)) @@ -74,7 +75,7 @@ def resop_getopname(llop): return llstr(_cast_to_resop(llop).getopname()) - at register_helper(annmodel.SomePtr(llmemory.GCREF)) + at register_helper(SomePtr(llmemory.GCREF)) def resop_getarg(llop, no): return _cast_to_gcref(_cast_to_resop(llop).getarg(no)) @@ -82,7 +83,7 @@ def resop_setarg(llop, no, llbox): _cast_to_resop(llop).setarg(no, _cast_to_box(llbox)) - at register_helper(annmodel.SomePtr(llmemory.GCREF)) + at register_helper(SomePtr(llmemory.GCREF)) def resop_getresult(llop): return _cast_to_gcref(_cast_to_resop(llop).result) @@ -94,15 +95,15 @@ def box_getint(llbox): return _cast_to_box(llbox).getint() - at register_helper(annmodel.SomePtr(llmemory.GCREF)) + at register_helper(SomePtr(llmemory.GCREF)) def box_clone(llbox): return _cast_to_gcref(_cast_to_box(llbox).clonebox()) - at register_helper(annmodel.SomePtr(llmemory.GCREF)) + at register_helper(SomePtr(llmemory.GCREF)) def box_constbox(llbox): return _cast_to_gcref(_cast_to_box(llbox).constbox()) - at register_helper(annmodel.SomePtr(llmemory.GCREF)) + at register_helper(SomePtr(llmemory.GCREF)) def box_nonconstbox(llbox): return _cast_to_gcref(_cast_to_box(llbox).nonconstbox()) diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py --- a/rpython/rlib/rgc.py +++ b/rpython/rlib/rgc.py @@ -94,9 +94,9 @@ _about_ = _heap_stats def compute_result_annotation(self): - from rpython.annotator import model as annmodel + from rpython.rtyper.llannotation import SomePtr from rpython.memory.gc.base import ARRAY_TYPEID_MAP - return annmodel.SomePtr(lltype.Ptr(ARRAY_TYPEID_MAP)) + return SomePtr(lltype.Ptr(ARRAY_TYPEID_MAP)) def specialize_call(self, hop): hop.exception_is_here() @@ -452,8 +452,9 @@ global _cache_s_list_of_gcrefs if _cache_s_list_of_gcrefs is None: from rpython.annotator import model as annmodel + from rpython.rtyper.llannotation import SomePtr from rpython.annotator.listdef import ListDef - s_gcref = annmodel.SomePtr(llmemory.GCREF) + s_gcref = SomePtr(llmemory.GCREF) _cache_s_list_of_gcrefs = annmodel.SomeList( ListDef(None, s_gcref, mutated=True, resized=False)) return _cache_s_list_of_gcrefs @@ -468,15 +469,17 @@ class Entry(ExtRegistryEntry): _about_ = get_rpy_referents + def compute_result_annotation(self, s_gcref): - from rpython.annotator import model as annmodel - assert annmodel.SomePtr(llmemory.GCREF).contains(s_gcref) + from rpython.rtyper.llannotation import SomePtr + assert SomePtr(llmemory.GCREF).contains(s_gcref) return s_list_of_gcrefs() + def specialize_call(self, hop): vlist = hop.inputargs(hop.args_r[0]) hop.exception_cannot_occur() return hop.genop('gc_get_rpy_referents', vlist, - resulttype = hop.r_result) + resulttype=hop.r_result) class Entry(ExtRegistryEntry): _about_ = get_rpy_memory_usage @@ -522,10 +525,11 @@ class Entry(ExtRegistryEntry): _about_ = _get_llcls_from_cls def compute_result_annotation(self, s_Class): - from rpython.annotator import model as annmodel + from rpython.rtyper.llannotation import SomePtr from rpython.rtyper.lltypesystem import rclass assert s_Class.is_constant() - return annmodel.SomePtr(rclass.CLASSTYPE) + return SomePtr(rclass.CLASSTYPE) + def specialize_call(self, hop): from rpython.rtyper.rclass import getclassrepr from rpython.flowspace.model import Constant @@ -550,9 +554,11 @@ class Entry(ExtRegistryEntry): _about_ = get_typeids_z + def compute_result_annotation(self): - from rpython.annotator.model import SomePtr + from rpython.rtyper.llannotation import SomePtr return SomePtr(lltype.Ptr(ARRAY_OF_CHAR)) + def specialize_call(self, hop): hop.exception_is_here() return hop.genop('gc_typeids_z', [], resulttype = hop.r_result) diff --git a/rpython/rlib/rstring.py b/rpython/rlib/rstring.py --- a/rpython/rlib/rstring.py +++ b/rpython/rlib/rstring.py @@ -3,7 +3,8 @@ import sys from rpython.annotator.model import (SomeObject, SomeString, s_None, SomeChar, - SomeInteger, SomeUnicodeCodePoint, SomeUnicodeString, SomePtr, SomePBC) + SomeInteger, SomeUnicodeCodePoint, SomeUnicodeString, SomePBC) +from rpython.rtyper.llannotation import SomePtr from rpython.rlib import jit from rpython.rlib.objectmodel import newlist_hint, specialize from rpython.rlib.rarithmetic import ovfcheck diff --git a/rpython/rlib/test/test_signature.py b/rpython/rlib/test/test_signature.py --- a/rpython/rlib/test/test_signature.py +++ b/rpython/rlib/test/test_signature.py @@ -2,6 +2,7 @@ from rpython.rlib.signature import signature, finishsigs, FieldSpec, ClassSpec from rpython.rlib import types from rpython.annotator import model +from rpython.rtyper.llannotation import SomePtr from rpython.annotator.signature import SignatureError from rpython.translator.translator import TranslationContext, graphof from rpython.rtyper.lltypesystem import rstr @@ -127,7 +128,7 @@ def f(buf): pass argtype = getsig(f, policy=policy)[0] - assert isinstance(argtype, model.SomePtr) + assert isinstance(argtype, SomePtr) assert argtype.ll_ptrtype.TO == rstr.STR def g(): diff --git a/rpython/rtyper/annlowlevel.py b/rpython/rtyper/annlowlevel.py --- a/rpython/rtyper/annlowlevel.py +++ b/rpython/rtyper/annlowlevel.py @@ -7,6 +7,7 @@ from rpython.annotator.policy import AnnotatorPolicy from rpython.annotator.signature import Sig from rpython.annotator.specialize import flatten_star_args +from rpython.rtyper.llannotation import SomePtr from rpython.rtyper.normalizecalls import perform_normalizations from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.flowspace.model import Constant @@ -359,7 +360,7 @@ key = (llhelper, s_callable.const) s_res = self.bookkeeper.emulate_pbc_call(key, s_callable, args_s) assert annmodel.lltype_to_annotation(FUNC.RESULT).contains(s_res) - return annmodel.SomePtr(F) + return SomePtr(F) def specialize_call(self, hop): hop.exception_cannot_occur() @@ -476,7 +477,7 @@ def compute_result_annotation(self, s_PTR, s_object): assert s_PTR.is_constant() if isinstance(s_PTR.const, lltype.Ptr): - return annmodel.SomePtr(s_PTR.const) + return SomePtr(s_PTR.const) else: assert False @@ -535,14 +536,14 @@ def placeholder_sigarg(s): if s == "self": def expand(s_self, *args_s): - assert isinstance(s_self, annmodel.SomePtr) + assert isinstance(s_self, SomePtr) return s_self elif s == "SELF": raise NotImplementedError else: assert s.islower() def expand(s_self, *args_s): - assert isinstance(s_self, annmodel.SomePtr) + assert isinstance(s_self, SomePtr) return getattr(s_self.ll_ptrtype.TO, s.upper()) return expand diff --git a/rpython/rtyper/exceptiondata.py b/rpython/rtyper/exceptiondata.py --- a/rpython/rtyper/exceptiondata.py +++ b/rpython/rtyper/exceptiondata.py @@ -1,4 +1,5 @@ from rpython.annotator import model as annmodel +from rpython.rtyper.llannotation import SomePtr from rpython.rlib import rstackovf from rpython.rtyper import rclass from rpython.rtyper.lltypesystem.rclass import (ll_issubclass, ll_type, @@ -72,12 +73,12 @@ def make_exception_matcher(self, rtyper): # ll_exception_matcher(real_exception_vtable, match_exception_vtable) - s_typeptr = annmodel.SomePtr(self.lltype_of_exception_type) + s_typeptr = SomePtr(self.lltype_of_exception_type) helper_fn = rtyper.annotate_helper_fn(ll_issubclass, [s_typeptr, s_typeptr]) return helper_fn def make_type_of_exc_inst(self, rtyper): # ll_type_of_exc_inst(exception_instance) -> exception_vtable - s_excinst = annmodel.SomePtr(self.lltype_of_exception_value) + s_excinst = SomePtr(self.lltype_of_exception_value) helper_fn = rtyper.annotate_helper_fn(ll_type, [s_excinst]) return helper_fn diff --git a/rpython/rtyper/llannotation.py b/rpython/rtyper/llannotation.py --- a/rpython/rtyper/llannotation.py +++ b/rpython/rtyper/llannotation.py @@ -2,6 +2,7 @@ Code for annotating low-level thingies. """ from rpython.annotator.model import SomeObject +from rpython.rtyper.lltypesystem import lltype class SomeAddress(SomeObject): immutable = True @@ -24,3 +25,30 @@ def can_be_none(self): return False +class SomePtr(SomeObject): + knowntype = lltype._ptr + immutable = True + + def __init__(self, ll_ptrtype): + assert isinstance(ll_ptrtype, lltype.Ptr) + self.ll_ptrtype = ll_ptrtype + + def can_be_none(self): + return False + + +class SomeInteriorPtr(SomePtr): + def __init__(self, ll_ptrtype): + assert isinstance(ll_ptrtype, lltype.InteriorPtr) + self.ll_ptrtype = ll_ptrtype + + +class SomeLLADTMeth(SomeObject): + immutable = True + + def __init__(self, ll_ptrtype, func): + self.ll_ptrtype = ll_ptrtype + self.func = func + + def can_be_none(self): + return False diff --git a/rpython/rtyper/lltypesystem/ll2ctypes.py b/rpython/rtyper/lltypesystem/ll2ctypes.py --- a/rpython/rtyper/lltypesystem/ll2ctypes.py +++ b/rpython/rtyper/lltypesystem/ll2ctypes.py @@ -22,6 +22,7 @@ from rpython.rlib.rarithmetic import r_singlefloat, r_longfloat, base_int, intmask from rpython.rlib.rarithmetic import is_emulated_long, maxint from rpython.annotator import model as annmodel +from rpython.rtyper.llannotation import SomePtr from rpython.rtyper.llinterp import LLInterpreter, LLException from rpython.rtyper.lltypesystem.rclass import OBJECT, OBJECT_VTABLE from rpython.rtyper import raddress @@ -161,7 +162,7 @@ llmemory.GCREF: ctypes.c_void_p, llmemory.WeakRef: ctypes.c_void_p, # XXX }) - + if '__int128_t' in rffi.TYPES: _ctypes_cache[rffi.__INT128_T] = ctypes.c_longlong # XXX: Not right at all. But for some reason, It started by while doing JIT compile after a merge with default. Can't extend ctypes, because thats a python standard, right? @@ -1339,7 +1340,7 @@ def compute_result_annotation(self, s_ptr, s_n): assert isinstance(s_n, annmodel.SomeInteger) - assert isinstance(s_ptr, annmodel.SomePtr) + assert isinstance(s_ptr, SomePtr) typecheck_ptradd(s_ptr.ll_ptrtype) return annmodel.lltype_to_annotation(s_ptr.ll_ptrtype) diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -1,5 +1,6 @@ import py from rpython.annotator import model as annmodel +from rpython.rtyper.llannotation import SomePtr from rpython.rtyper.lltypesystem import lltype, rstr from rpython.rtyper.lltypesystem import ll2ctypes from rpython.rtyper.lltypesystem.llmemory import cast_ptr_to_adr @@ -52,7 +53,7 @@ class _IsLLPtrEntry(ExtRegistryEntry): _about_ = _isllptr def compute_result_annotation(self, s_p): - result = isinstance(s_p, annmodel.SomePtr) + result = isinstance(s_p, SomePtr) return self.bookkeeper.immutablevalue(result) def specialize_call(self, hop): hop.exception_cannot_occur() @@ -996,7 +997,7 @@ TP = s_type.const if not isinstance(TP, lltype.Struct): raise TypeError("make called with %s instead of Struct as first argument" % TP) - return annmodel.SomePtr(lltype.Ptr(TP)) + return SomePtr(lltype.Ptr(TP)) def specialize_call(self, hop, **fields): assert hop.args_s[0].is_constant() diff --git a/rpython/rtyper/rptr.py b/rpython/rtyper/rptr.py --- a/rpython/rtyper/rptr.py +++ b/rpython/rtyper/rptr.py @@ -1,4 +1,6 @@ from rpython.annotator import model as annmodel +from rpython.rtyper.llannotation import ( + SomePtr, SomeInteriorPtr, SomeLLADTMeth) from rpython.flowspace import model as flowmodel from rpython.rlib.rarithmetic import r_uint from rpython.rtyper.error import TyperError @@ -7,7 +9,7 @@ from rpython.tool.pairtype import pairtype -class __extend__(annmodel.SomePtr): +class __extend__(SomePtr): def rtyper_makerepr(self, rtyper): return PtrRepr(self.ll_ptrtype) @@ -15,7 +17,7 @@ return self.__class__, self.ll_ptrtype -class __extend__(annmodel.SomeInteriorPtr): +class __extend__(SomeInteriorPtr): def rtyper_makerepr(self, rtyper): return InteriorPtrRepr(self.ll_ptrtype) @@ -38,7 +40,7 @@ def rtype_getattr(self, hop): attr = hop.args_s[1].const - if isinstance(hop.s_result, annmodel.SomeLLADTMeth): + if isinstance(hop.s_result, SomeLLADTMeth): return hop.inputarg(hop.r_result, arg=0) try: self.lowleveltype._example()._lookup_adtmeth(attr) @@ -179,7 +181,7 @@ # ________________________________________________________________ # ADT methods -class __extend__(annmodel.SomeLLADTMeth): +class __extend__(SomeLLADTMeth): def rtyper_makerepr(self, rtyper): return LLADTMethRepr(self, rtyper) def rtyper_makekey(self): @@ -270,7 +272,7 @@ def rtype_getattr(self, hop): attr = hop.args_s[1].const - if isinstance(hop.s_result, annmodel.SomeLLADTMeth): + if isinstance(hop.s_result, SomeLLADTMeth): return hop.inputarg(hop.r_result, arg=0) FIELD_TYPE = getattr(self.resulttype.TO, attr) if isinstance(FIELD_TYPE, lltype.ContainerType): diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -16,6 +16,7 @@ import py from rpython.annotator import model as annmodel, unaryop, binaryop +from rpython.rtyper.llannotation import SomePtr from rpython.annotator.annrpython import FAIL from rpython.flowspace.model import Variable, Constant, SpaceOperation, c_last_exception from rpython.rtyper.annlowlevel import annotate_lowlevel_helper, LowLevelAnnotatorPolicy @@ -639,10 +640,10 @@ self.call_all_setups() # compute ForwardReferences now if ARG_GCSTRUCT is None: ARG_GCSTRUCT = GCSTRUCT - args_s = [annmodel.SomePtr(Ptr(ARG_GCSTRUCT))] + args_s = [SomePtr(Ptr(ARG_GCSTRUCT))] graph = self.annotate_helper(func, args_s) s = self.annotator.binding(graph.getreturnvar()) - if (not isinstance(s, annmodel.SomePtr) or + if (not isinstance(s, SomePtr) or s.ll_ptrtype != Ptr(RuntimeTypeInfo)): raise TyperError("runtime type info function %r returns %r, " "excepted Ptr(RuntimeTypeInfo)" % (func, s)) diff --git a/rpython/rtyper/test/test_llann.py b/rpython/rtyper/test/test_llann.py --- a/rpython/rtyper/test/test_llann.py +++ b/rpython/rtyper/test/test_llann.py @@ -1,6 +1,7 @@ import py from rpython.annotator import model as annmodel +from rpython.rtyper.llannotation import SomePtr from rpython.conftest import option from rpython.rtyper.annlowlevel import (annotate_lowlevel_helper, MixLevelHelperAnnotator, PseudoHighLevelCallable, llhelper, @@ -100,8 +101,8 @@ p2 = p1.sub1 p3 = cast_pointer(PS1, p2) return p3 - s = self.annotate(llf, [annmodel.SomePtr(PS1)]) - assert isinstance(s, annmodel.SomePtr) + s = self.annotate(llf, [SomePtr(PS1)]) + assert isinstance(s, SomePtr) assert s.ll_ptrtype == PS1 def test_cast_simple_widening_from_gc(self): @@ -114,7 +115,7 @@ p3 = cast_pointer(PS1, p2) return p3 s = self.annotate(llf, []) - assert isinstance(s, annmodel.SomePtr) + assert isinstance(s, SomePtr) assert s.ll_ptrtype == PS1 def test_cast_pointer(self): @@ -152,7 +153,7 @@ PF = Ptr(F) def llf(p): return p(0) - s = self.annotate(llf, [annmodel.SomePtr(PF)]) + s = self.annotate(llf, [SomePtr(PF)]) assert s.knowntype == int @@ -344,7 +345,7 @@ def llf(): return getRuntimeTypeInfo(S) s = self.annotate(llf, []) - assert isinstance(s, annmodel.SomePtr) + assert isinstance(s, SomePtr) assert s.ll_ptrtype == Ptr(RuntimeTypeInfo) assert s.const == getRuntimeTypeInfo(S) @@ -352,8 +353,8 @@ S = GcStruct('s', ('x', Signed), rtti=True) def llf(p): return runtime_type_info(p) - s = self.annotate(llf, [annmodel.SomePtr(Ptr(S))]) - assert isinstance(s, annmodel.SomePtr) + s = self.annotate(llf, [SomePtr(Ptr(S))]) + assert isinstance(s, SomePtr) assert s.ll_ptrtype == Ptr(RuntimeTypeInfo) def test_cast_primitive(self): diff --git a/rpython/rtyper/test/test_rpbc.py b/rpython/rtyper/test/test_rpbc.py --- a/rpython/rtyper/test/test_rpbc.py +++ b/rpython/rtyper/test/test_rpbc.py @@ -3,6 +3,7 @@ from rpython.annotator import policy, specialize from rpython.rtyper.lltypesystem.lltype import typeOf from rpython.rtyper.test.tool import BaseRtypingTest +from rpython.rtyper.llannotation import SomePtr class MyBase: @@ -1817,7 +1818,8 @@ s_ll_f = annmodel.lltype_to_annotation(r_f.lowleveltype) A_repr = rclass.getinstancerepr(rt, a.bookkeeper.getdesc(A). getuniqueclassdef()) - ll_h_graph = annlowlevel.annotate_lowlevel_helper(a, ll_h, [s_R, s_ll_f, annmodel.SomePtr(A_repr.lowleveltype)]) + ll_h_graph = annlowlevel.annotate_lowlevel_helper( + a, ll_h, [s_R, s_ll_f, SomePtr(A_repr.lowleveltype)]) s = a.binding(ll_h_graph.getreturnvar()) assert s.ll_ptrtype == A_repr.lowleveltype rt.specialize_more_blocks() @@ -1873,7 +1875,8 @@ s_ll_f = annmodel.lltype_to_annotation(r_f.lowleveltype) A_repr = rclass.getinstancerepr(rt, a.bookkeeper.getdesc(A). getuniqueclassdef()) - ll_h_graph = annlowlevel.annotate_lowlevel_helper(a, ll_h, [s_R, s_ll_f, annmodel.SomePtr(A_repr.lowleveltype)]) + ll_h_graph = annlowlevel.annotate_lowlevel_helper( + a, ll_h, [s_R, s_ll_f, SomePtr(A_repr.lowleveltype)]) s = a.binding(ll_h_graph.getreturnvar()) assert s.ll_ptrtype == A_repr.lowleveltype rt.specialize_more_blocks() @@ -1929,7 +1932,8 @@ A_repr = rclass.getinstancerepr(rt, a.bookkeeper.getdesc(A). getuniqueclassdef()) - ll_h_graph = annlowlevel.annotate_lowlevel_helper(a, ll_h, [s_R, s_ll_f, annmodel.SomePtr(A_repr.lowleveltype)]) + ll_h_graph = annlowlevel.annotate_lowlevel_helper( + a, ll_h, [s_R, s_ll_f, SomePtr(A_repr.lowleveltype)]) s = a.binding(ll_h_graph.getreturnvar()) assert s.ll_ptrtype == A_repr.lowleveltype rt.specialize_more_blocks() diff --git a/rpython/rtyper/test/test_rptr.py b/rpython/rtyper/test/test_rptr.py --- a/rpython/rtyper/test/test_rptr.py +++ b/rpython/rtyper/test/test_rptr.py @@ -3,6 +3,7 @@ import py from rpython.annotator import model as annmodel +from rpython.rtyper.llannotation import SomePtr from rpython.annotator.annrpython import RPythonAnnotator from rpython.rlib.rarithmetic import is_valid_int from rpython.rtyper.annlowlevel import annotate_lowlevel_helper, LowLevelAnnotatorPolicy @@ -31,11 +32,11 @@ PS2 = lltype.Ptr(S2) def lldown(p): return lltype.cast_pointer(PS, p) - s, t = ll_rtype(lldown, [annmodel.SomePtr(PS2)]) + s, t = ll_rtype(lldown, [SomePtr(PS2)]) assert s.ll_ptrtype == PS def llup(p): return lltype.cast_pointer(PS2, p) - s, t = ll_rtype(llup, [annmodel.SomePtr(PS)]) + s, t = ll_rtype(llup, [SomePtr(PS)]) assert s.ll_ptrtype == PS2 def test_runtime_type_info(): @@ -45,8 +46,8 @@ lltype.runtime_type_info(p) == lltype.getRuntimeTypeInfo(S)) assert ll_example(lltype.malloc(S)) == (lltype.getRuntimeTypeInfo(S), True) - s, t = ll_rtype(ll_example, [annmodel.SomePtr(lltype.Ptr(S))]) - assert s == annmodel.SomeTuple([annmodel.SomePtr(lltype.Ptr(lltype.RuntimeTypeInfo)), + s, t = ll_rtype(ll_example, [SomePtr(lltype.Ptr(S))]) + assert s == annmodel.SomeTuple([SomePtr(lltype.Ptr(lltype.RuntimeTypeInfo)), annmodel.SomeBool()]) from rpython.rtyper.test.test_llinterp import interpret, gengraph diff --git a/rpython/rtyper/test/test_rvirtualizable.py b/rpython/rtyper/test/test_rvirtualizable.py --- a/rpython/rtyper/test/test_rvirtualizable.py +++ b/rpython/rtyper/test/test_rvirtualizable.py @@ -1,4 +1,5 @@ import py +from rpython.rtyper.llannotation import SomePtr from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.rtyper.test.tool import BaseRtypingTest from rpython.rtyper.rvirtualizable import replace_force_virtualizable_with_call @@ -153,7 +154,7 @@ raise ValueError annhelper = MixLevelHelperAnnotator(rtyper) if self.type_system == 'lltype': - s_vinst = annmodel.SomePtr(v_inst_ll_type) + s_vinst = SomePtr(v_inst_ll_type) else: s_vinst = annmodel.SomeOOInstance(v_inst_ll_type) funcptr = annhelper.delayedfunction(mycall, [s_vinst], annmodel.s_None) From noreply at buildbot.pypy.org Mon Jan 27 11:26:51 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Mon, 27 Jan 2014 11:26:51 +0100 (CET) Subject: [pypy-commit] stmgc c7: update hgignore Message-ID: <20140127102651.5C12E1D234D@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r675:3fd68097a2fd Date: 2014-01-27 11:26 +0100 http://bitbucket.org/pypy/stmgc/changeset/3fd68097a2fd/ Log: update hgignore diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -1,8 +1,9 @@ syntax: glob *.pyc *~ -c4/build-demo* -c4/debug-demo* -c4/release-demo* +*/build-demo* +*/debug-demo* +*/release-demo* *.orig -c4/test/__pycache__ +*/__pycache__ +*.out.* From noreply at buildbot.pypy.org Mon Jan 27 13:54:58 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Mon, 27 Jan 2014 13:54:58 +0100 (CET) Subject: [pypy-commit] stmgc c7: small fix Message-ID: <20140127125458.26CF81C0134@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r676:88d3fb9f680f Date: 2014-01-27 13:08 +0100 http://bitbucket.org/pypy/stmgc/changeset/88d3fb9f680f/ Log: small fix diff --git a/c7/pages.c b/c7/pages.c --- a/c7/pages.c +++ b/c7/pages.c @@ -112,7 +112,7 @@ uintptr_t stm_pages_reserve(int num) { /* grab free, possibly uninitialized pages */ - if (!stm_list_is_empty(single_page_list)) { + if (num == 1 && !stm_list_is_empty(single_page_list)) { uint8_t previous; while ((previous = __sync_lock_test_and_set(&list_lock, 1))) spin_loop(); From noreply at buildbot.pypy.org Mon Jan 27 13:54:59 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Mon, 27 Jan 2014 13:54:59 +0100 (CET) Subject: [pypy-commit] stmgc c7: re-introduce the alloc-pages for small-sized allocations (3x faster in simple duhton bench). Message-ID: <20140127125459.568BF1C0134@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r677:8e7c804b4bdc Date: 2014-01-27 13:55 +0100 http://bitbucket.org/pypy/stmgc/changeset/8e7c804b4bdc/ Log: re-introduce the alloc-pages for small-sized allocations (3x faster in simple duhton bench). diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -100,10 +100,15 @@ /* privatize if SHARED_PAGE */ uintptr_t pagenum2, pages; - _stm_chunk_pages((struct object_s*)REAL_ADDRESS(get_thread_base(0), obj), - &pagenum2, &pages); - assert(pagenum == pagenum2); - assert(pages == (stmcb_size(real_address(obj)) +4095) / 4096); + if (obj->stm_flags & GCFLAG_SMALL) { + pagenum2 = pagenum; + pages = 1; + } else { + _stm_chunk_pages((struct object_s*)REAL_ADDRESS(get_thread_base(0), obj), + &pagenum2, &pages); + assert(pagenum == pagenum2); + assert(pages == (stmcb_size(real_address(obj)) +4095) / 4096); + } for (pagenum2 += pages - 1; pagenum2 >= pagenum; pagenum2--) stm_pages_privatize(pagenum2); diff --git a/c7/core.h b/c7/core.h --- a/c7/core.h +++ b/c7/core.h @@ -37,6 +37,9 @@ /* only used during collections to mark an obj as moved out of the generation it was in */ GCFLAG_MOVED = (1 << 2), + /* objects smaller than one page and even smaller than + LARGE_OBJECT_WORDS * 8 bytes */ + GCFLAG_SMALL = (1 << 3), }; @@ -47,6 +50,7 @@ typedef TLPREFIX struct _thread_local1_s _thread_local1_t; typedef TLPREFIX struct object_s object_t; +typedef TLPREFIX struct alloc_for_size_s alloc_for_size_t; typedef TLPREFIX struct read_marker_s read_marker_t; typedef TLPREFIX char localchar_t; typedef void* jmpbufptr_t[5]; /* for use with __builtin_setjmp() */ @@ -79,6 +83,11 @@ uint8_t rm; }; +struct alloc_for_size_s { + localchar_t *next; + uint16_t start, stop; + bool flag_partial_page; +}; struct _thread_local1_s { jmpbufptr_t *jmpbufptr; @@ -94,6 +103,7 @@ object_t **shadow_stack; object_t **shadow_stack_base; + struct alloc_for_size_s alloc[LARGE_OBJECT_WORDS]; struct stm_list_s *uncommitted_objects; localchar_t *nursery_current; diff --git a/c7/largemalloc.c b/c7/largemalloc.c --- a/c7/largemalloc.c +++ b/c7/largemalloc.c @@ -105,6 +105,9 @@ size_t _stm_data_size(struct object_s *data) { + if (data->stm_flags & GCFLAG_SMALL) + return stmcb_size(data); /* XXX: inefficient */ + mchunk_t *chunk = data2chunk((char*)data); return chunk->size & ~FLAG_SORTED; } @@ -120,7 +123,13 @@ char *end = src + _stm_data_size((struct object_s*)REAL_ADDRESS(get_thread_base(0), obj)); uintptr_t pagenum, num; struct object_s *t0_obj = (struct object_s*)REAL_ADDRESS(get_thread_base(0), _stm_tl_address(src)); - _stm_chunk_pages(t0_obj, &pagenum, &num); + + if (obj->stm_flags & GCFLAG_SMALL) { + pagenum = (uintptr_t)obj / 4096UL; + num = 1; + } else { + _stm_chunk_pages(t0_obj, &pagenum, &num); + } while (src < end) { size_t to_copy = 4096UL - ((uintptr_t)src & 4095UL); @@ -299,6 +308,8 @@ void stm_large_free(object_t *tldata) { + assert(!(tldata->stm_flags & GCFLAG_SMALL)); + while (__sync_lock_test_and_set(&alloc_lock, 1)) spin_loop(); diff --git a/c7/nursery.c b/c7/nursery.c --- a/c7/nursery.c +++ b/c7/nursery.c @@ -44,6 +44,58 @@ return _stm_allocate_old(size); /* XXX */ } +localchar_t *_stm_alloc_next_page(size_t size_class) +{ + /* may return uninitialized pages */ + + /* 'alloc->next' points to where the next allocation should go. The + present function is called instead when this next allocation is + equal to 'alloc->stop'. As we know that 'start', 'next' and + 'stop' are always nearby pointers, we play tricks and only store + the lower 16 bits of 'start' and 'stop', so that the three + variables plus some flags fit in 16 bytes. + */ + uintptr_t page; + localchar_t *result; + alloc_for_size_t *alloc = &_STM_TL->alloc[size_class]; + size_t size = size_class * 8; + + /* reserve a fresh new page (XXX: from the end!) */ + page = stm_pages_reserve(1); + + result = (localchar_t *)(page * 4096UL); + alloc->start = (uintptr_t)result; + alloc->stop = alloc->start + (4096 / size) * size; + alloc->next = result + size; + alloc->flag_partial_page = false; + return result; +} + +object_t *stm_big_small_alloc_old(size_t size, bool *is_small) +{ + /* may return uninitialized objects */ + object_t *result; + size_t size_class = size / 8; + assert(size_class >= 2); + + if (size_class >= LARGE_OBJECT_WORDS) { + result = stm_large_malloc(size); + *is_small = 0; + } else { + *is_small = 1; + alloc_for_size_t *alloc = &_STM_TL->alloc[size_class]; + + if ((uint16_t)((uintptr_t)alloc->next) == alloc->stop) { + result = (object_t *)_stm_alloc_next_page(size_class); + } else { + result = (object_t *)alloc->next; + alloc->next += size; + } + } + return result; +} + + void trace_if_young(object_t **pobj) { @@ -62,7 +114,8 @@ /* move obj to somewhere else */ size_t size = stmcb_size(real_address(*pobj)); - object_t *moved = stm_large_malloc(size); + bool is_small; + object_t *moved = stm_big_small_alloc_old(size, &is_small); memcpy((void*)real_address(moved), (void*)real_address(*pobj), @@ -70,6 +123,8 @@ /* object is not committed yet */ moved->stm_flags |= GCFLAG_NOT_COMMITTED; + if (is_small) /* means, not allocated by large-malloc */ + moved->stm_flags |= GCFLAG_SMALL; LIST_APPEND(_STM_TL->uncommitted_objects, moved); (*pobj)->stm_flags |= GCFLAG_MOVED; @@ -189,6 +244,21 @@ /* uncommitted objects */ push_uncommitted_to_other_threads(); stm_list_clear(_STM_TL->uncommitted_objects); + + /* for small alloc classes, set the partial flag */ + long j; + for (j = 2; j < LARGE_OBJECT_WORDS; j++) { + alloc_for_size_t *alloc = &_STM_TL->alloc[j]; + uint16_t start = alloc->start; + uint16_t cur = (uintptr_t)alloc->next; + + if (start == cur) + continue; /* page full -> will be replaced automatically */ + + alloc->start = cur; /* next transaction has different 'start' to + reset in case of an abort */ + alloc->flag_partial_page = 1; + } } void nursery_on_abort() @@ -205,13 +275,26 @@ _STM_TL->nursery_current = nursery_base; + /* reset the alloc-pages to the state at the start of the transaction */ + long j; + for (j = 2; j < LARGE_OBJECT_WORDS; j++) { + alloc_for_size_t *alloc = &_STM_TL->alloc[j]; + uint16_t num_allocated = ((uintptr_t)alloc->next) - alloc->start; + + if (num_allocated) { + /* forget about all non-committed objects */ + alloc->next -= num_allocated; + } + } + /* free uncommitted objects */ struct stm_list_s *uncommitted = _STM_TL->uncommitted_objects; STM_LIST_FOREACH( uncommitted, ({ - stm_large_free(item); + if (!(item->stm_flags & GCFLAG_SMALL)) + stm_large_free(item); })); stm_list_clear(uncommitted); From noreply at buildbot.pypy.org Mon Jan 27 14:37:43 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Mon, 27 Jan 2014 14:37:43 +0100 (CET) Subject: [pypy-commit] stmgc c7: remove some read barriers from duhton in case the field that is read is actually immutable (hopefully) Message-ID: <20140127133743.ADA541C0134@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r678:7743314d82eb Date: 2014-01-27 14:37 +0100 http://bitbucket.org/pypy/stmgc/changeset/7743314d82eb/ Log: remove some read barriers from duhton in case the field that is read is actually immutable (hopefully) diff --git a/c7/largemalloc.c b/c7/largemalloc.c --- a/c7/largemalloc.c +++ b/c7/largemalloc.c @@ -99,8 +99,10 @@ spans over. the CHUNK_HEADER is not included in the calculations */ mchunk_t *chunk = data2chunk((char*)data); *start = (((char*)data) - get_thread_base(0)) / 4096UL; + assert(*start < NB_PAGES); size_t offset_into_page = ((uintptr_t)data) & 4095UL; // % 4096 *num = ((chunk->size & ~FLAG_SORTED) + offset_into_page + 4095) / 4096UL; + assert(*num < NB_PAGES); } size_t _stm_data_size(struct object_s *data) diff --git a/duhton/consobject.c b/duhton/consobject.c --- a/duhton/consobject.c +++ b/duhton/consobject.c @@ -12,7 +12,7 @@ DuObject *p; printf("( "); while (1) { - _du_read1(ob); + /* _du_read1(ob); IMMUTABLE */ _du_save1(ob); Du_Print(ob->car, 0); _du_restore1(ob); @@ -33,7 +33,7 @@ DuObject *cons_eval(DuConsObject *ob, DuObject *locals) { - _du_read1(ob); + /* _du_read1(ob); IMMUTABLE */ return _DuFrame_EvalCall(locals, ob->car, ob->cdr, 1); } @@ -59,14 +59,14 @@ DuObject *DuCons_Car(DuObject *cons) { DuCons_Ensure("DuCons_Car", cons); - _du_read1(cons); + /* _du_read1(cons); IMMUTABLE */ return ((DuConsObject *)cons)->car; } DuObject *DuCons_Cdr(DuObject *cons) { DuCons_Ensure("DuCons_Cdr", cons); - _du_read1(cons); + /* _du_read1(cons); IMMUTABLE */ return ((DuConsObject *)cons)->cdr; } diff --git a/duhton/demo/container_transaction.duh b/duhton/demo/container_transaction.duh --- a/duhton/demo/container_transaction.duh +++ b/duhton/demo/container_transaction.duh @@ -3,11 +3,11 @@ (defun g (thread n) (set c (+ (get c) 1)) - (if (> (get c) 20000) + (if (> (get c) 200000) (print (quote overflow) (get c)) - (if (< n 10000) + (if (< n 100000) (transaction f thread (+ n 1)) - (if (< (get c) 20000) + (if (< (get c) 200000) (print (quote not-enough)) (print (quote ok)))))) diff --git a/duhton/intobject.c b/duhton/intobject.c --- a/duhton/intobject.c +++ b/duhton/intobject.c @@ -7,13 +7,13 @@ void int_print(DuIntObject *ob) { - _du_read1(ob); + /* _du_read1(ob); IMMUTABLE */ printf("%d", ob->ob_intval); } int int_is_true(DuIntObject *ob) { - _du_read1(ob); + /* _du_read1(ob); IMMUTABLE */ return ob->ob_intval; } @@ -38,7 +38,7 @@ int DuInt_AsInt(DuObject *ob) { DuInt_Ensure("DuInt_AsInt", ob); - _du_read1(ob); + /* _du_read1(ob); IMMUTABLE */ return ((DuIntObject *)ob)->ob_intval; } diff --git a/duhton/symbol.c b/duhton/symbol.c --- a/duhton/symbol.c +++ b/duhton/symbol.c @@ -21,7 +21,7 @@ void symbol_print(DuSymbolObject *ob) { - _du_read1(ob); + /* _du_read1(ob); IMMUTABLE name */ printf("'%s'", ob->name); } @@ -66,6 +66,7 @@ DuObject *DuSymbol_FromString(const char *name) { DuSymbolObject *p, *head = _Du_AllSymbols; + _du_read1(head); for (p=head; p != NULL; p=p->next) { _du_read1(p); if (strcmp(name, p->name) == 0) { @@ -86,7 +87,7 @@ char *DuSymbol_AsString(DuObject *ob) { DuSymbol_Ensure("DuSymbol_AsString", ob); - _du_read1(ob); + /* _du_read1(ob); IMMUTABLE name */ return ((DuSymbolObject *)ob)->name; } From noreply at buildbot.pypy.org Mon Jan 27 14:50:10 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Mon, 27 Jan 2014 14:50:10 +0100 (CET) Subject: [pypy-commit] stmgc c7: remove some more barriers Message-ID: <20140127135010.F41FA1C0134@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r679:fca6ea2f895d Date: 2014-01-27 14:50 +0100 http://bitbucket.org/pypy/stmgc/changeset/fca6ea2f895d/ Log: remove some more barriers diff --git a/duhton/frame.c b/duhton/frame.c --- a/duhton/frame.c +++ b/duhton/frame.c @@ -212,7 +212,7 @@ Du_FatalError("call to '%s': not enough arguments", DuSymbol_AsString(symbol)); - _du_read1(arguments); + /* _du_read1(arguments); IMMUTABLE */ DuObject *arg = _DuCons_CAR(arguments); DuObject *argumentsnext = _DuCons_NEXT(arguments); @@ -221,7 +221,7 @@ DuObject *obj = Du_Eval(arg, caller); _du_restore2(formallist, callee); - _du_read1(formallist); + /* _du_read1(formallist); IMMUTABLE */ DuObject *sym = _DuCons_CAR(formallist); DuObject *formallistnext = _DuCons_NEXT(formallist); diff --git a/duhton/glob.c b/duhton/glob.c --- a/duhton/glob.c +++ b/duhton/glob.c @@ -11,7 +11,7 @@ if (cons == Du_None) goto error; - _du_read1(cons); + /* _du_read1(cons); IMMUTABLE */ expr1 = _DuCons_CAR(cons); cons = _DuCons_NEXT(cons); if (cons != Du_None) goto error; @@ -31,12 +31,12 @@ if (cons == Du_None) goto error; - _du_read1(cons); + /* _du_read1(cons); IMMUTABLE */ expr1 = _DuCons_CAR(cons); cons = _DuCons_NEXT(cons); if (cons == Du_None) goto error; - _du_read1(cons); + /* _du_read1(cons); IMMUTABLE */ expr2 = _DuCons_CAR(cons); cons = _DuCons_NEXT(cons); if (cons != Du_None) goto error; @@ -64,7 +64,7 @@ { DuObject *result = Du_None; while (cons != Du_None) { - _du_read1(cons); + /* _du_read1(cons); IMMUTABLE */ DuObject *expr = _DuCons_CAR(cons); DuObject *next = _DuCons_NEXT(cons); _du_save2(next, locals); @@ -79,12 +79,12 @@ { DuObject *result = Du_None; while (cons != Du_None) { - _du_read1(cons); + /* _du_read1(cons); IMMUTABLE */ DuObject *symbol = _DuCons_CAR(cons); cons = _DuCons_NEXT(cons); if (cons == Du_None) Du_FatalError("setq: number of arguments is odd"); - _du_read1(cons); + /* _du_read1(cons); IMMUTABLE */ DuObject *expr = _DuCons_CAR(cons); DuObject *next = _DuCons_NEXT(cons); @@ -109,7 +109,7 @@ _du_restore2(cons, locals); while (cons != Du_None) { - _du_read1(cons); + /* _du_read1(cons); IMMUTABLE */ DuObject *expr = _DuCons_CAR(cons); DuObject *next = _DuCons_NEXT(cons); @@ -144,7 +144,7 @@ { int result = 0; while (cons != Du_None) { - _du_read1(cons); + /* _du_read1(cons); IMMUTABLE */ DuObject *expr = _DuCons_CAR(cons); DuObject *next = _DuCons_NEXT(cons); @@ -163,7 +163,7 @@ int result = 0; int sign = 1; while (cons != Du_None) { - _du_read1(cons); + /* _du_read1(cons); IMMUTABLE */ DuObject *expr = _DuCons_CAR(cons); DuObject *next = _DuCons_NEXT(cons); @@ -182,7 +182,7 @@ { int result = 1; while (cons != Du_None) { - _du_read1(cons); + /* _du_read1(cons); IMMUTABLE */ DuObject *expr = _DuCons_CAR(cons); DuObject *next = _DuCons_NEXT(cons); @@ -202,7 +202,7 @@ int first = 1; while (cons != Du_None) { - _du_read1(cons); + /* _du_read1(cons); IMMUTABLE */ DuObject *expr = _DuCons_CAR(cons); DuObject *next = _DuCons_NEXT(cons); @@ -263,7 +263,7 @@ DuObject *du_quote(DuObject *cons, DuObject *locals) { - _du_read1(cons); + /* _du_read1(cons); IMMUTABLE */ if (cons == Du_None || _DuCons_NEXT(cons) != Du_None) Du_FatalError("quote: expected one argument"); return _DuCons_CAR(cons); @@ -275,7 +275,7 @@ DuObject *list = DuList_New(); _du_restore2(cons, locals); while (cons != Du_None) { - _du_read1(cons); + /* _du_read1(cons); IMMUTABLE */ DuObject *expr = _DuCons_CAR(cons); DuObject *next = _DuCons_NEXT(cons); @@ -313,7 +313,7 @@ if (cons == Du_None) Du_FatalError("get: expected at least one argument"); - _du_read1(cons); + /* _du_read1(cons); IMMUTABLE */ DuObject *expr = _DuCons_CAR(cons); DuObject *next = _DuCons_NEXT(cons); @@ -322,7 +322,7 @@ _du_restore2(next, locals); if (DuList_Check(obj)) { - _du_read1(next); + /* _du_read1(next); IMMUTABLE */ if (next == Du_None || _DuCons_NEXT(next) != Du_None) Du_FatalError("get with a list: expected two arguments"); @@ -344,7 +344,7 @@ DuObject *du_set(DuObject *cons, DuObject *locals) { - _du_read1(cons); + /* _du_read1(cons); IMMUTABLE */ if (cons == Du_None || _DuCons_NEXT(cons) == Du_None) Du_FatalError("set: expected at least two arguments"); @@ -355,12 +355,12 @@ DuObject *obj = Du_Eval(expr, locals); _du_restore2(next, locals); - _du_read1(next); + /* _du_read1(next); IMMUTABLE */ DuObject *expr2 = _DuCons_CAR(next); DuObject *next2 = _DuCons_NEXT(next); if (DuList_Check(obj)) { - _du_read1(next2); + /* _du_read1(next2); IMMUTABLE */ if (next2 == Du_None || _DuCons_NEXT(next2) != Du_None) Du_FatalError("set with a list: expected three arguments"); @@ -404,7 +404,7 @@ if (cons == Du_None) Du_FatalError("pop: expected at least one argument"); - _du_read1(cons); + /* _du_read1(cons); IMMUTABLE */ DuObject *expr = _DuCons_CAR(cons); DuObject *next = _DuCons_NEXT(cons); @@ -419,7 +419,7 @@ Du_FatalError("pop: empty list"); } else { - _du_read1(next); + /* _du_read1(next); IMMUTABLE */ DuObject *expr2 = _DuCons_CAR(next); DuObject *next2 = _DuCons_NEXT(next); @@ -447,7 +447,7 @@ DuObject *du_if(DuObject *cons, DuObject *locals) { - _du_read1(cons); + /* _du_read1(cons); IMMUTABLE */ if (cons == Du_None || _DuCons_NEXT(cons) == Du_None) Du_FatalError("if: expected at least two arguments"); @@ -458,7 +458,7 @@ DuObject *cond = Du_Eval(expr, locals); _du_restore2(next, locals); - _du_read1(next); + /* _du_read1(next); IMMUTABLE */ if (DuObject_IsTrue(cond) != 0) { /* true path */ return Du_Eval(_DuCons_CAR(next), locals); @@ -474,7 +474,7 @@ if (cons == Du_None) Du_FatalError("while: expected at least one argument"); - _du_read1(cons); + /* _du_read1(cons); IMMUTABLE */ DuObject *expr = _DuCons_CAR(cons); DuObject *next = _DuCons_NEXT(cons); @@ -495,14 +495,14 @@ DuObject *du_defun(DuObject *cons, DuObject *locals) { - _du_read1(cons); + /* _du_read1(cons); IMMUTABLE */ if (cons == Du_None || _DuCons_NEXT(cons) == Du_None) Du_FatalError("defun: expected at least two arguments"); DuObject *name = _DuCons_CAR(cons); DuObject *next = _DuCons_NEXT(cons); - _du_read1(next); + /* _du_read1(next); IMMUTABLE */ DuObject *arglist = _DuCons_CAR(next); DuObject *progn = _DuCons_NEXT(next); @@ -549,7 +549,7 @@ if (cons == Du_None) Du_FatalError("transaction: expected at least one argument"); - _du_read1(cons); + /* _du_read1(cons); IMMUTABLE */ DuObject *sym = _DuCons_CAR(cons); DuObject *rest = _DuCons_NEXT(cons); _DuFrame_EvalCall(locals, sym, rest, 0); @@ -573,7 +573,7 @@ DuObject *du_defined(DuObject *cons, DuObject *locals) { - _du_read1(cons); + /* _du_read1(cons); IMMUTABLE */ if (cons == Du_None || _DuCons_NEXT(cons) != Du_None) Du_FatalError("defined?: expected one argument"); DuObject *ob = _DuCons_CAR(cons); @@ -602,7 +602,7 @@ if (!DuInt_AsInt(obj)) { printf("assert failed: "); - _du_read1(cons); + /* _du_read1(cons); IMMUTABLE */ Du_Print(_DuCons_CAR(cons), 1); Du_FatalError("assert failed"); } diff --git a/duhton/transaction.c b/duhton/transaction.c --- a/duhton/transaction.c +++ b/duhton/transaction.c @@ -86,13 +86,13 @@ stm_start_transaction(&here); root = du_pending_transactions; - _du_read1(root); + /* _du_read1(root); IMMUTABLE */ if (root->cdr != Du_None) { DuObject *cell = root->cdr; _du_write1(root); - _du_read1(cell); + /* _du_read1(cell); IMMUTABLE */ DuObject *result = _DuCons_CAR(cell); root->cdr = _DuCons_NEXT(cell); @@ -130,7 +130,7 @@ while (__builtin_setjmp(here) == 1) { } stm_start_transaction(&here); - _du_read1(pending); + /* _du_read1(pending); IMMUTABLE */ DuObject *result = _DuCons_CAR(pending); DuObject *next = _DuCons_NEXT(pending); @@ -139,7 +139,7 @@ DuObject *tail = next; while (1) { - _du_read1(tail); + /* _du_read1(tail); IMMUTABLE */ DuObject *tailnext = ((DuConsObject *)tail)->cdr; if (tailnext == Du_None) break; From noreply at buildbot.pypy.org Mon Jan 27 15:38:58 2014 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 27 Jan 2014 15:38:58 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: implement new and new_with_vtable Message-ID: <20140127143858.492A41D23D1@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68960:672bb5aed066 Date: 2014-01-27 10:35 +0100 http://bitbucket.org/pypy/pypy/changeset/672bb5aed066/ Log: implement new and new_with_vtable diff --git a/rpython/jit/resume/frontend.py b/rpython/jit/resume/frontend.py --- a/rpython/jit/resume/frontend.py +++ b/rpython/jit/resume/frontend.py @@ -1,125 +1,13 @@ from rpython.jit.metainterp.resoperation import rop from rpython.jit.metainterp.history import BoxInt, BoxPtr, BoxFloat, ConstInt,\ - INT, REF, Const + INT, REF, FLOAT from rpython.jit.metainterp import history from rpython.jit.resume.reader import AbstractResumeReader from rpython.jit.resume.rescode import TAGBOX, TAGCONST, TAGSMALLINT,\ TAGVIRTUAL, CLEAR_POSITION -# class AbstractResumeReader(object): -# """ A resume reader that can follow resume until given point. Consult -# the concrete classes for details -# """ - -# def __init__(self): -# self.framestack = [] -# self.consts = [] # XXX cache? -# self.virtuals = {} -# self.virtual_list = [] - -# def rebuild(self, faildescr): -# self._rebuild_until(faildescr.rd_resume_bytecode, -# faildescr.rd_bytecode_position) -# return self.finish() - -# def finish(self): -# pass - -# def enter_frame(self, pc, jitcode): -# if self.framestack: -# assert pc != -1 -# self.framestack[-1].pc = pc -# self.framestack.append(ResumeFrame(jitcode)) - -# def encode_box(self, pos): -# return TAGBOX | (pos << TAGOFFSET) - -# def encode_virtual(self, box): -# return TAGVIRTUAL | (self.virtuals[box].pos << TAGOFFSET) - -# def encode_const(self, const): -# if isinstance(const, ConstInt) and const.getint() < (sys.maxint >> 3): -# return TAGSMALLINT | (const.getint() << TAGOFFSET) -# self.consts.append(const) -# return TAGCONST | ((len(self.consts) - 1) << TAGOFFSET) - -# def decode(self, pos): -# return pos & 0x3, pos >> TAGOFFSET - -# def resume_put(self, jitframe_pos_box, frame_no, frontend_position): -# if isinstance(jitframe_pos_box, Box): -# jitframe_pos = self.encode_virtual(jitframe_pos_box) -# else: -# jitframe_pos = self.encode_box(jitframe_pos_box.getint()) -# self.framestack[frame_no].registers[frontend_position] = jitframe_pos - -# def encode(self, box): -# xxx - -# def resume_new(self, box, descr): -# # XXX make it a list -# v = Virtual(len(self.virtual_list), descr) -# self.virtuals[box] = v -# self.virtual_list.append(v) - -# def resume_setfield_gc(self, box, fieldbox, descr): -# # XXX optimize fields -# self.virtuals[box].fields[descr] = self.encode(fieldbox) - -# def resume_clear(self, frame_no, frontend_position): -# self.framestack[frame_no].registers[frontend_position] = -1 - -# def resume_put_const(self, const, frame_no, frontend_position): -# pos = self.encode_const(const) -# self.framestack[frame_no].registers[frontend_position] = pos - -# def resume_set_pc(self, pc): -# self.framestack[-1].pc = pc - -# def leave_frame(self): -# self.framestack.pop() - -# def _rebuild_until(self, rb, position): -# if rb.parent is not None: -# self._rebuild_until(rb.parent, rb.parent_position) -# self.interpret_until(rb.opcodes, position) - -# def interpret_until(self, bytecode, until, pos=0): -# while pos < until: -# op = bytecode[pos] -# if op == rescode.ENTER_FRAME: -# xxx -# descr = op.getdescr() -# assert isinstance(descr, JitCode) -# self.enter_frame(op.getarg(0).getint(), descr) -# elif op.getopnum() == rop.LEAVE_FRAME: -# self.leave_frame() -# elif op.getopnum() == rop.RESUME_PUT: -# self.resume_put(op.getarg(0), op.getarg(1).getint(), -# op.getarg(2).getint()) -# elif op.getopnum() == rop.RESUME_NEW: -# self.resume_new(op.result, op.getdescr()) -# elif op.getopnum() == rop.RESUME_SETFIELD_GC: -# self.resume_setfield_gc(op.getarg(0), op.getarg(1), -# op.getdescr()) -# elif op.getopnum() == rop.RESUME_SET_PC: -# self.resume_set_pc(op.getarg(0).getint()) -# elif op.getopnum() == rop.RESUME_CLEAR: -# self.resume_clear(op.getarg(0).getint(), -# op.getarg(1).getint()) -# elif not op.is_resume(): -# pos += 1 -# continue -# else: -# xxx -# pos += 1 - -# def read_int(self, jitframe_pos): -# return self.metainterp.cpu.get_int_value(self.deadframe, jitframe_pos) - - class DirectResumeReader(AbstractResumeReader): """ Directly read values from the jitframe and put them in the blackhole interpreter @@ -129,6 +17,7 @@ self.bhinterpbuilder = binterpbuilder self.cpu = cpu self.deadframe = deadframe + self.virtuals_cache = {} AbstractResumeReader.__init__(self, metainterp_sd) def finish(self): @@ -154,38 +43,55 @@ def store_int_value(self, curbh, i, encoded_pos): if encoded_pos == CLEAR_POSITION: return + val = self.getint(encoded_pos) + curbh.registers_i[i] = val + + def getint(self, encoded_pos): tag, index = self.decode(encoded_pos) if tag == TAGBOX: - curbh.registers_i[i] = self.cpu.get_int_value(self.deadframe, index) + return self.cpu.get_int_value(self.deadframe, index) elif tag == TAGSMALLINT: - curbh.registers_i[i] = index + return index else: xxx - return - xxx - if jitframe_pos >= 0: - curbh.registers_i[i] = self.cpu.get_int_value( - self.deadframe, jitframe_pos) - elif jitframe_pos < -1: - curbh.registers_i[i] = self.consts[-jitframe_pos - 2].getint() def store_ref_value(self, curbh, i, encoded_pos): if encoded_pos == CLEAR_POSITION: return + curbh.registers_r[i] = self.getref(encoded_pos) + + def getref(self, encoded_pos): tag, index = self.decode(encoded_pos) if tag == TAGBOX: - curbh.registers_r[i] = self.cpu.get_ref_value(self.deadframe, index) + return self.cpu.get_ref_value(self.deadframe, index) elif tag == TAGCONST: - curbh.registers_r[i] = self.consts[index].getref_base() + return self.consts[index].getref_base() + elif tag == TAGVIRTUAL: + return self.allocate_virtual(index) else: xxx - return - xxxx - if jitframe_pos >= 0: - curbh.registers_r[i] = self.cpu.get_ref_value( - self.deadframe, jitframe_pos) - elif jitframe_pos < -1: - curbh.registers_r[i] = self.consts[-jitframe_pos - 2].getref_base() + + def allocate_virtual(self, index): + try: + return self.virtuals_cache[index] + except KeyError: + pass + val = self.virtuals[index].allocate_direct(self.cpu) + self.virtuals_cache[index] = val + fields = self.virtuals[index].fields + for fielddescr, encoded_field_pos in fields.iteritems(): + self.setfield_gc(val, encoded_field_pos, fielddescr) + return val + + def setfield_gc(self, struct, encoded_field_pos, fielddescr): + if fielddescr.kind == INT: + intval = self.getint(encoded_field_pos) + self.cpu.bh_setfield_gc_i(struct, intval, fielddescr) + elif fielddescr.kind == REF: + refval = self.getref(encoded_field_pos) + self.cpu.bh_setfield_gc_r(struct, refval, fielddescr) + elif fielddescr.kind == FLOAT: + xxx def store_float_value(self, curbh, i, jitframe_pos): xxx @@ -236,10 +142,10 @@ else: assert tag == TAGVIRTUAL virtual = self.virtuals[pos] - virtual_box = self.allocate_struct(virtual) + virtual_box = virtual.allocate_box(self.metainterp) + self.cache[encoded_pos] = virtual_box for fielddescr, encoded_field_pos in virtual.fields.iteritems(): self.setfield_gc(virtual_box, encoded_field_pos, fielddescr) - self.cache[encoded_pos] = virtual_box if pos_in_frame != -1: self.metainterp.history.record(rop.RESUME_PUT, [virtual_box, @@ -248,9 +154,6 @@ None, None) return virtual_box - def allocate_struct(self, virtual): - return self.metainterp.execute_and_record(rop.NEW, virtual.descr) - def setfield_gc(self, box, encoded_field_pos, fielddescr): field_box = self.get_box_value(-1, -1, encoded_field_pos, fielddescr.kind) diff --git a/rpython/jit/resume/reader.py b/rpython/jit/resume/reader.py --- a/rpython/jit/resume/reader.py +++ b/rpython/jit/resume/reader.py @@ -1,4 +1,6 @@ +from rpython.jit.metainterp.resoperation import rop +from rpython.jit.metainterp.history import ConstInt from rpython.jit.resume import rescode class ResumeFrame(object): @@ -7,13 +9,33 @@ self.jitcode = jitcode self.pc = -1 +class BaseVirtual(object): + pass -class Virtual(object): +class VirtualStruct(BaseVirtual): def __init__(self, pos, descr): self.pos = pos self.fields = {} self.descr = descr + def allocate_box(self, metainterp): + return metainterp.execute_and_record(rop.NEW, self.descr) + + def allocate_direct(self, cpu): + return cpu.bh_new(self.descr) + +class VirtualWithVtable(BaseVirtual): + def __init__(self, pos, const_class): + self.pos = pos + self.const_class = const_class + self.fields = {} + + def allocate_box(self, metainterp): + return metainterp.execute_and_record(rop.NEW_WITH_VTABLE, + ConstInt(self.const_class)) + + def allocate_direct(self, cpu): + return cpu.bh_new_with_vtable(self.const_class) class AbstractResumeReader(object): """ A resume reader that can follow resume until given point. Consult @@ -46,11 +68,19 @@ self.framestack[frame_no].registers[frontend_position] = encoded_pos def resume_new(self, v_pos, descr): - v = Virtual(v_pos, descr) + v = VirtualStruct(v_pos, descr) + self._add_to_virtuals(v, v_pos) + + def resume_new_with_vtable(self, v_pos, c_const_class): + const_class = c_const_class.getint() + v = VirtualWithVtable(v_pos, const_class) + self._add_to_virtuals(v, v_pos) + + def _add_to_virtuals(self, v, v_pos): if v_pos >= len(self.virtuals): self.virtuals += [None] * (len(self.virtuals) - v_pos + 1) self.virtuals[v_pos] = v - + def resume_setfield_gc(self, pos, fieldpos, descr): # XXX optimize fields tag, index = self.decode(pos) @@ -97,11 +127,15 @@ self.resume_put(encoded, frame_pos, pos_in_frame) pos += 5 elif op == rescode.RESUME_NEW: - tag, v_pos = self.decode(self.read_short(pos + 1)) - assert tag == rescode.TAGVIRTUAL + v_pos = self.read_short(pos + 1) descr = self.staticdata.opcode_descrs[self.read_short(pos + 3)] self.resume_new(v_pos, descr) pos += 5 + elif op == rescode.RESUME_NEW_WITH_VTABLE: + v_pos = self.read_short(pos + 1) + const_class = self.consts[self.read_short(pos + 3)] + self.resume_new_with_vtable(v_pos, const_class) + pos += 5 elif op == rescode.RESUME_SETFIELD_GC: structpos = self.read_short(pos + 1) fieldpos = self.read_short(pos + 3) @@ -140,6 +174,10 @@ def resume_new(self, v_pos, descr): self.l.append("%d = resume_new %d" % (v_pos, descr.global_descr_index)) + def resume_new_with_vtable(self, v_pos, c_const_class): + self.l.append("%d = resume_new_with_vtable %d" % (v_pos, + c_const_class.getint())) + def leave_frame(self): self.l.append("leave_frame") diff --git a/rpython/jit/resume/rescode.py b/rpython/jit/resume/rescode.py --- a/rpython/jit/resume/rescode.py +++ b/rpython/jit/resume/rescode.py @@ -64,9 +64,15 @@ not isinstance(const.getint(), Symbolic) and 0 <= const.getint() < 0x4000): return TAGSMALLINT | (const.getint() << 2) + assert len(self.consts) < 0x4000 self.consts.append(const) return TAGCONST | ((len(self.consts) - 1) << 2) + def encode_const_not_small(self, const): + assert len(self.consts) < 0x4000 + self.consts.append(const) + return len(self.consts) - 1 + def resume_set_pc(self, pc): self.write(RESUME_SET_PC) self.write_short(pc) @@ -79,13 +85,13 @@ def resume_new(self, v_pos, descr): self.write(RESUME_NEW) - self.write_short(self.encode(TAGVIRTUAL, v_pos)) + self.write_short(v_pos) # XXX byte virtuals? self.write_short(descr.global_descr_index) def resume_new_with_vtable(self, v_pos, const_class): self.write(RESUME_NEW_WITH_VTABLE) - self.write_short(self.encode(TAGVIRTUAL, v_pos)) - self.write_short(self.encode_const(const_class)) + self.write_short(v_pos) # XXX byte virtuals? + self.write_short(self.encode_const_not_small(const_class)) def resume_setfield_gc(self, structpos, fieldpos, descr): self.write(RESUME_SETFIELD_GC) diff --git a/rpython/jit/resume/test/test_frontend.py b/rpython/jit/resume/test/test_frontend.py --- a/rpython/jit/resume/test/test_frontend.py +++ b/rpython/jit/resume/test/test_frontend.py @@ -2,8 +2,9 @@ from rpython.jit.tool.oparser import parse from rpython.jit.codewriter.jitcode import JitCode from rpython.jit.metainterp.history import AbstractDescr, Const, INT, Stats,\ - ConstInt -from rpython.jit.resume.frontend import rebuild_from_resumedata + ConstInt, REF +from rpython.jit.resume.frontend import rebuild_from_resumedata,\ + blackhole_from_resumedata from rpython.jit.resume.rescode import ResumeBytecode, TAGBOX,\ ResumeBytecodeBuilder, TAGCONST, TAGSMALLINT, TAGVIRTUAL, CLEAR_POSITION from rpython.jit.resume.reader import AbstractResumeReader @@ -72,10 +73,42 @@ self.framestack.pop() class MockCPU(object): + def __init__(self): + self.history = [] + def get_int_value(self, frame, index): assert frame == "myframe" return index + 3 + def bh_new(self, descr): + self.history.append(("new", descr)) + return "new" + + def bh_setfield_gc_i(self, struct, intval, fielddescr): + self.history.append(("setfield_gc_i", struct, intval, fielddescr)) + + def bh_setfield_gc_r(self, struct, refval, fielddescr): + self.history.append(("setfield_gc_r", struct, refval, fielddescr)) + + def bh_new_with_vtable(self, const_class): + self.history.append(("new_with_vtable", const_class)) + return "new_with_vtable" + +class MockBlackholeInterp(object): + def __init__(self): + pass + + def setposition(self, jitcode, pos): + self.positions = pos + self.jitcode = jitcode + self.registers_i = [-1] * jitcode.num_regs_i() + self.registers_r = [None] * jitcode.num_regs_r() + +class FakeInterpBuilder(object): + def acquire_interp(self): + self.interp = MockBlackholeInterp() + return self.interp + class RebuildingResumeReader(AbstractResumeReader): def finish(self): res = [] @@ -111,6 +144,7 @@ metainterp = MockMetaInterp() metainterp.staticdata = MockStaticData([jitcode], []) metainterp.cpu = MockCPU() + metainterp.staticdata.cpu = metainterp.cpu inputargs, inplocs = rebuild_from_resumedata(metainterp, "myframe", descr) assert len(metainterp.framestack) == 1 @@ -172,28 +206,63 @@ jitcode1.setup(num_regs_i=0, num_regs_r=1, num_regs_f=0) builder = ResumeBytecodeBuilder() descr = Descr() + const_class = ConstInt(13) descr.global_descr_index = 0 builder.enter_frame(-1, jitcode1) builder.resume_new(0, descr) + builder.resume_new_with_vtable(1, const_class) d2 = Descr() d2.kind = INT d2.global_descr_index = 1 + d3 = Descr() + d3.kind = REF + d3.global_descr_index = 2 builder.resume_setfield_gc(TAGVIRTUAL | (0 << 2), TAGSMALLINT | (1 << 2), d2) + builder.resume_setfield_gc(TAGVIRTUAL | (0 << 2), + TAGVIRTUAL | (1 << 2), d3) + builder.resume_put(TAGVIRTUAL | (0 << 2), 0, 0) rd = builder.build() descr = Descr() - descr.rd_resume_bytecode = ResumeBytecode(rd, []) + descr.rd_resume_bytecode = ResumeBytecode(rd, [const_class]) descr.rd_bytecode_position = len(rd) metainterp = MockMetaInterp() - metainterp.staticdata = MockStaticData([jitcode1], [descr, d2]) + metainterp.staticdata = MockStaticData([jitcode1], [descr, d2, d3]) metainterp.cpu = MockCPU() + metainterp.staticdata.cpu = metainterp.cpu rebuild_from_resumedata(metainterp, "myframe", descr) expected = [(rop.NEW, descr), (rop.SETFIELD_GC, d2, AnyBox(), EqConstInt(1)), + (rop.NEW_WITH_VTABLE, EqConstInt(13)), + (rop.SETFIELD_GC, d3, AnyBox(), AnyBox()), (rop.RESUME_PUT, None, AnyBox(), EqConstInt(0), EqConstInt(0))] - assert metainterp.history == expected + expected2 = [(rop.NEW, descr), + (rop.NEW_WITH_VTABLE, EqConstInt(13)), + (rop.SETFIELD_GC, d3, AnyBox(), AnyBox()), + (rop.SETFIELD_GC, d2, AnyBox(), EqConstInt(1)), + (rop.RESUME_PUT, None, AnyBox(), EqConstInt(0), + EqConstInt(0))] + assert metainterp.history == expected or metainterp.history == expected2 + ib = FakeInterpBuilder() + blackhole_from_resumedata(ib, metainterp.staticdata, + descr, "myframe") + hist = metainterp.cpu.history + dir_expected2 = [ + ("new", descr), + ("new_with_vtable", 13), + ("setfield_gc_r", "new", "new_with_vtable", d3), + ("setfield_gc_i", "new", 1, d2), + ] + dir_expected = [ + ("new", descr), + ("setfield_gc_i", "new", 1, d2), + ("new_with_vtable", 13), + ("setfield_gc_r", "new", "new_with_vtable", d3), + ] + assert hist == dir_expected or hist == dir_expected2 + assert ib.interp.registers_r[0] == "new" def test_reconstructing_resume_reader(self): jitcode1 = JitCode("jitcode") From noreply at buildbot.pypy.org Mon Jan 27 15:38:59 2014 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 27 Jan 2014 15:38:59 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: Adjust tests for the real interface of bhimpl_new_with_vtable Message-ID: <20140127143859.810691D23D1@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68961:a6c79f403639 Date: 2014-01-27 15:38 +0100 http://bitbucket.org/pypy/pypy/changeset/a6c79f403639/ Log: Adjust tests for the real interface of bhimpl_new_with_vtable diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -6,6 +6,7 @@ from rpython.jit.metainterp.history import AbstractDescr from rpython.jit.metainterp.history import Const, getkind from rpython.jit.metainterp.history import INT, REF, FLOAT, VOID, Box +from rpython.jit.metainterp.history import getkind from rpython.jit.metainterp.resoperation import rop from rpython.jit.codewriter import longlong, heaptracker from rpython.jit.codewriter.effectinfo import EffectInfo @@ -36,6 +37,7 @@ def __init__(self, mapping, frontend_liveness, descr, inputargs, inputlocs): self.liveness = LivenessAnalyzer() self.numbering = {} + self.deps = {} self.mapping = mapping self.framestack = [] if inputlocs is not None: @@ -68,15 +70,24 @@ def process_resume_set_pc(self, op): pass + def process_resume_new_with_vtable(self, op): + self._add_box_to_numbering(op.result) + self.deps[op.result] = {} + def process_resume_setfield_gc(self, op): - xxx + self._add_box_to_numbering(op.getarg(1)) + self.deps[op.getarg(0)][op.getdescr()] = op.getarg(1) + + def _add_box_to_numbering(self, box): + if box not in self.deps: + if self.mapping(box) not in self.numbering: + self.numbering[self.mapping(box)] = len(self.numbering) def process_resume_put(self, op): box = op.getarg(0) if isinstance(box, Const): return - if self.mapping(box) not in self.numbering: - self.numbering[self.mapping(box)] = len(self.numbering) + self._add_box_to_numbering(box) frame_pos = op.getarg(1).getint() pos_in_frame = op.getarg(2).getint() self.framestack[frame_pos].registers[pos_in_frame] = box @@ -86,13 +97,23 @@ frontend_pos = op.getarg(1).getint() self.framestack[frame_pos].registers[frontend_pos] = None + def extend_from_virtual(self, r, box): + for v in sorted(self.deps[box].values()): + if v in self.deps: + self.extend_from_virtual(r, v) + else: + r.append(self.mapping(v)) + def get_numbering(self, mapping, op): res = [] all = {} for frame in self.framestack: for reg in frame.registers: if reg is not None and isinstance(reg, Box) and reg not in all: - res.append(mapping(reg)) + if reg in self.deps: + self.extend_from_virtual(res, reg) + else: + res.append(mapping(reg)) all[reg] = None return res diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -23,10 +23,10 @@ def __init__(self, rtyper, stats, opts, translate_support_code=False, gcdescr=None): + from rpython.jit.backend.llsupport.gc import get_ll_description + assert type(opts) is not bool self.opts = opts - - from rpython.jit.backend.llsupport.gc import get_ll_description AbstractCPU.__init__(self) self.rtyper = rtyper self.stats = stats diff --git a/rpython/jit/resume/frontend.py b/rpython/jit/resume/frontend.py --- a/rpython/jit/resume/frontend.py +++ b/rpython/jit/resume/frontend.py @@ -84,13 +84,13 @@ return val def setfield_gc(self, struct, encoded_field_pos, fielddescr): - if fielddescr.kind == INT: + if fielddescr.is_field_signed(): intval = self.getint(encoded_field_pos) self.cpu.bh_setfield_gc_i(struct, intval, fielddescr) - elif fielddescr.kind == REF: + elif fielddescr.is_pointer_field(): refval = self.getref(encoded_field_pos) self.cpu.bh_setfield_gc_r(struct, refval, fielddescr) - elif fielddescr.kind == FLOAT: + elif fielddescr.is_float_field(): xxx def store_float_value(self, curbh, i, jitframe_pos): diff --git a/rpython/jit/resume/reader.py b/rpython/jit/resume/reader.py --- a/rpython/jit/resume/reader.py +++ b/rpython/jit/resume/reader.py @@ -1,6 +1,7 @@ from rpython.jit.metainterp.resoperation import rop from rpython.jit.metainterp.history import ConstInt +from rpython.jit.codewriter import heaptracker from rpython.jit.resume import rescode class ResumeFrame(object): @@ -35,7 +36,8 @@ ConstInt(self.const_class)) def allocate_direct(self, cpu): - return cpu.bh_new_with_vtable(self.const_class) + descr = heaptracker.vtable2descr(cpu, self.const_class) + return cpu.bh_new_with_vtable(self.const_class, descr) class AbstractResumeReader(object): """ A resume reader that can follow resume until given point. Consult diff --git a/rpython/jit/resume/test/test_frontend.py b/rpython/jit/resume/test/test_frontend.py --- a/rpython/jit/resume/test/test_frontend.py +++ b/rpython/jit/resume/test/test_frontend.py @@ -12,6 +12,7 @@ from rpython.jit.metainterp.resoperation import rop from rpython.jit.codewriter.format import unformat_assembler from rpython.jit.codewriter.codewriter import CodeWriter +from rpython.jit.codewriter import heaptracker from rpython.jit.backend.llgraph.runner import LLGraphCPU from rpython.jit.metainterp.pyjitpl import MetaInterp, MetaInterpStaticData from rpython.jit.metainterp.jitdriver import JitDriverStaticData @@ -19,10 +20,15 @@ from rpython.jit.metainterp.jitexc import DoneWithThisFrameInt from rpython.jit.metainterp.optimizeopt.util import equaloplists from rpython.rlib.jit import JitDriver +from rpython.rtyper.lltypesystem import rclass, lltype, llmemory class Descr(AbstractDescr): - pass + def is_pointer_field(self): + return self.kind == REF + + def is_field_signed(self): + return self.kind == INT class MockLoop(object): pass @@ -90,7 +96,7 @@ def bh_setfield_gc_r(self, struct, refval, fielddescr): self.history.append(("setfield_gc_r", struct, refval, fielddescr)) - def bh_new_with_vtable(self, const_class): + def bh_new_with_vtable(self, const_class, descr): self.history.append(("new_with_vtable", const_class)) return "new_with_vtable" @@ -206,7 +212,10 @@ jitcode1.setup(num_regs_i=0, num_regs_r=1, num_regs_f=0) builder = ResumeBytecodeBuilder() descr = Descr() - const_class = ConstInt(13) + cls = lltype.malloc(rclass.OBJECT_VTABLE, flavor='raw', + immortal=True) + cls_as_int = heaptracker.adr2int(llmemory.cast_ptr_to_adr(cls)) + const_class = ConstInt(cls_as_int) descr.global_descr_index = 0 builder.enter_frame(-1, jitcode1) builder.resume_new(0, descr) @@ -230,16 +239,24 @@ metainterp = MockMetaInterp() metainterp.staticdata = MockStaticData([jitcode1], [descr, d2, d3]) metainterp.cpu = MockCPU() + + class MockTracker(object): + pass + + tr = MockTracker() + tr._all_size_descrs_with_vtable = [descr] + descr._corresponding_vtable = cls + metainterp.cpu.tracker = tr metainterp.staticdata.cpu = metainterp.cpu rebuild_from_resumedata(metainterp, "myframe", descr) expected = [(rop.NEW, descr), (rop.SETFIELD_GC, d2, AnyBox(), EqConstInt(1)), - (rop.NEW_WITH_VTABLE, EqConstInt(13)), + (rop.NEW_WITH_VTABLE, EqConstInt(cls_as_int)), (rop.SETFIELD_GC, d3, AnyBox(), AnyBox()), (rop.RESUME_PUT, None, AnyBox(), EqConstInt(0), EqConstInt(0))] expected2 = [(rop.NEW, descr), - (rop.NEW_WITH_VTABLE, EqConstInt(13)), + (rop.NEW_WITH_VTABLE, EqConstInt(cls_as_int)), (rop.SETFIELD_GC, d3, AnyBox(), AnyBox()), (rop.SETFIELD_GC, d2, AnyBox(), EqConstInt(1)), (rop.RESUME_PUT, None, AnyBox(), EqConstInt(0), @@ -251,14 +268,14 @@ hist = metainterp.cpu.history dir_expected2 = [ ("new", descr), - ("new_with_vtable", 13), + ("new_with_vtable", cls_as_int), ("setfield_gc_r", "new", "new_with_vtable", d3), ("setfield_gc_i", "new", 1, d2), ] dir_expected = [ ("new", descr), ("setfield_gc_i", "new", 1, d2), - ("new_with_vtable", 13), + ("new_with_vtable", cls_as_int), ("setfield_gc_r", "new", "new_with_vtable", d3), ] assert hist == dir_expected or hist == dir_expected2 From noreply at buildbot.pypy.org Mon Jan 27 17:53:01 2014 From: noreply at buildbot.pypy.org (rguillebert) Date: Mon, 27 Jan 2014 17:53:01 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: (Hopefully) final modifications Message-ID: <20140127165301.066C01C01F0@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: extradoc Changeset: r5143:326c63faded1 Date: 2014-01-27 17:52 +0100 http://bitbucket.org/pypy/extradoc/changeset/326c63faded1/ Log: (Hopefully) final modifications diff --git a/talk/fosdem2014/pypy-jit/talk.pdf b/talk/fosdem2014/pypy-jit/talk.pdf new file mode 100644 index 0000000000000000000000000000000000000000..78643be7cc20a8371fc7ae6eebf74543936b90f7 GIT binary patch [cut] diff --git a/talk/fosdem2014/pypy-jit/talk.rst b/talk/fosdem2014/pypy-jit/talk.rst --- a/talk/fosdem2014/pypy-jit/talk.rst +++ b/talk/fosdem2014/pypy-jit/talk.rst @@ -51,6 +51,17 @@ * Produces optimized machine code +RPython +======= + +* Statically typed subset of Python + +* The RPython compiler automatically generates the JIT from the annotated RPython code + +* The JIT can be added with just one line of code + +* More hints are needed to have an efficient JIT + Tracing JIT =========== @@ -60,6 +71,8 @@ * Produces a linear trace of execution +* Inlines almost everything + * The trace is then optimized and compiled Guard @@ -78,6 +91,15 @@ * After a guard has failed X times, the other path is traced, compiled and attached to the trace +Optimizations +============= + +* Virtuals + +* Virtualizables + +* Promotion + Jitviewer ========= From noreply at buildbot.pypy.org Mon Jan 27 18:21:16 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 27 Jan 2014 18:21:16 +0100 (CET) Subject: [pypy-commit] pypy llvm-translation-backend: Instead of setting the LLVM translation backend by default, set it after the construction of the config object. This makes sure raisingop2direct_call is forced. Message-ID: <20140127172116.B6A671D236E@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r68962:f38ed6da2533 Date: 2014-01-27 18:20 +0100 http://bitbucket.org/pypy/pypy/changeset/f38ed6da2533/ Log: Instead of setting the LLVM translation backend by default, set it after the construction of the config object. This makes sure raisingop2direct_call is forced. diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -39,7 +39,7 @@ ChoiceOption("type_system", "Type system to use when RTyping", ["lltype"], cmdline=None, default="lltype"), ChoiceOption("backend", "Backend to use for code generation", - ["c", "llvm"], default="llvm", + ["c", "llvm"], default="c", requires={ "c": [("translation.type_system", "lltype")], "llvm": [("translation.type_system", "lltype"), @@ -292,6 +292,7 @@ if child._name != newname] descr = OptionDescription("pypy", "all options", children) config = Config(descr, **overrides) + config.translation.backend = 'llvm' if translating: config.translating = True if existing_config is not None: From noreply at buildbot.pypy.org Mon Jan 27 19:16:39 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 27 Jan 2014 19:16:39 +0100 (CET) Subject: [pypy-commit] pypy llvm-translation-backend: Add debug_offset() and debug_flush() stubs to the LLVM translation backend. Message-ID: <20140127181639.121211C0134@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r68963:c9c061c83ea3 Date: 2014-01-27 19:15 +0100 http://bitbucket.org/pypy/pypy/changeset/c9c061c83ea3/ Log: Add debug_offset() and debug_flush() stubs to the LLVM translation backend. diff --git a/rpython/translator/llvm/genllvm.py b/rpython/translator/llvm/genllvm.py --- a/rpython/translator/llvm/genllvm.py +++ b/rpython/translator/llvm/genllvm.py @@ -1087,6 +1087,12 @@ def op_debug_nonnull_pointer(self, result, *args): pass + def op_debug_offset(self, result, *args): + pass + + def op_debug_flush(self, result, *args): + pass + def op_track_alloc_start(self, result, *args): pass From noreply at buildbot.pypy.org Mon Jan 27 21:17:51 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 27 Jan 2014 21:17:51 +0100 (CET) Subject: [pypy-commit] pypy precompiled-headers: rearrange Message-ID: <20140127201751.38D061D236E@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: precompiled-headers Changeset: r68964:009c2b057902 Date: 2014-01-27 00:16 +0200 http://bitbucket.org/pypy/pypy/changeset/009c2b057902/ Log: rearrange diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -686,11 +686,11 @@ print >> fc, '/***********************************************************/' print >> fc, '/*** Implementations ***/' print >> fc - print >> fc, '#define PYPY_FILE_NAME "%s"' % name print >> fc, '#include "common_header.h"' print >> fc, '#include "structdef.h"' print >> fc, '#include "forwarddecl.h"' print >> fc, '#include "preimpl.h"' + print >> fc, '#define PYPY_FILE_NAME "%s"' % name print >> fc, '#include "src/g_include.h"' print >> fc print >> fc, MARKER From noreply at buildbot.pypy.org Mon Jan 27 21:17:52 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 27 Jan 2014 21:17:52 +0100 (CET) Subject: [pypy-commit] pypy precompiled-headers: add a flag, start a test Message-ID: <20140127201752.6DF8E1D236E@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: precompiled-headers Changeset: r68965:288f78765ae2 Date: 2014-01-27 22:16 +0200 http://bitbucket.org/pypy/pypy/changeset/288f78765ae2/ Log: add a flag, start a test diff --git a/rpython/translator/platform/__init__.py b/rpython/translator/platform/__init__.py --- a/rpython/translator/platform/__init__.py +++ b/rpython/translator/platform/__init__.py @@ -100,7 +100,7 @@ return ExecutionResult(returncode, stdout, stderr) def gen_makefile(self, cfiles, eci, exe_name=None, path=None, - shared=False): + shared=False, cfile_precompilation=None): raise NotImplementedError("Pure abstract baseclass") def __repr__(self): diff --git a/rpython/translator/platform/darwin.py b/rpython/translator/platform/darwin.py --- a/rpython/translator/platform/darwin.py +++ b/rpython/translator/platform/darwin.py @@ -50,14 +50,14 @@ return ["-Wl,-exported_symbols_list,%s" % (response_file,)] def gen_makefile(self, cfiles, eci, exe_name=None, path=None, - shared=False): + shared=False, cfile_precompilation=None): # ensure frameworks are passed in the Makefile fs = self._frameworks(eci.frameworks) if len(fs) > 0: # concat (-framework, FrameworkName) pairs self.extra_libs += tuple(map(" ".join, zip(fs[::2], fs[1::2]))) mk = super(Darwin, self).gen_makefile(cfiles, eci, exe_name, path, - shared) + shared, cfile_precompilation) return mk diff --git a/rpython/translator/platform/posix.py b/rpython/translator/platform/posix.py --- a/rpython/translator/platform/posix.py +++ b/rpython/translator/platform/posix.py @@ -83,7 +83,7 @@ return [entry[2:] for entry in out.split()] def gen_makefile(self, cfiles, eci, exe_name=None, path=None, - shared=False): + shared=False, cfile_precompilation=None): cfiles = self._all_cfiles(cfiles, eci) if path is None: diff --git a/rpython/translator/platform/test/test_platform.py b/rpython/translator/platform/test/test_platform.py --- a/rpython/translator/platform/test/test_platform.py +++ b/rpython/translator/platform/test/test_platform.py @@ -86,6 +86,12 @@ res = self.platform.execute(udir.join('test_900_files')) self.check_res(res, '%d\n' %sum(range(900))) + def test_precompiled_headers(self): + # Create an eci that should not use precompiled headers + # Create some cfiles with headers we want precompiled + # Call gen_makefile(cfiles, eci, cfiles_precompiled_headers=[list, of, headers]) + # Make sure it all works + pass def test_nice_errors(self): cfile = udir.join('test_nice_errors.c') diff --git a/rpython/translator/platform/windows.py b/rpython/translator/platform/windows.py --- a/rpython/translator/platform/windows.py +++ b/rpython/translator/platform/windows.py @@ -249,7 +249,7 @@ def gen_makefile(self, cfiles, eci, exe_name=None, path=None, - shared=False): + shared=False, cfile_precompilation=None): cfiles = self._all_cfiles(cfiles, eci) if path is None: From noreply at buildbot.pypy.org Mon Jan 27 22:03:55 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 27 Jan 2014 22:03:55 +0100 (CET) Subject: [pypy-commit] pypy kill-running_on_llinterp: Start branch to kill running_on_llinterp. Message-ID: <20140127210355.4C7E11C0134@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: kill-running_on_llinterp Changeset: r68966:d2cca69c128d Date: 2014-01-27 19:41 +0100 http://bitbucket.org/pypy/pypy/changeset/d2cca69c128d/ Log: Start branch to kill running_on_llinterp. From noreply at buildbot.pypy.org Mon Jan 27 22:03:57 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 27 Jan 2014 22:03:57 +0100 (CET) Subject: [pypy-commit] pypy kill-running_on_llinterp: Kill running_on_llinterp. Instead we attach the fakeimpl to the function object and fish it in llinterp. Message-ID: <20140127210357.ABA731C0134@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: kill-running_on_llinterp Changeset: r68968:39eb895a3a29 Date: 2014-01-27 21:58 +0100 http://bitbucket.org/pypy/pypy/changeset/39eb895a3a29/ Log: Kill running_on_llinterp. Instead we attach the fakeimpl to the function object and fish it in llinterp. diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -266,8 +266,6 @@ return lltype.Signed malloc_zero_filled = CDefinedIntSymbolic('MALLOC_ZERO_FILLED', default=0) -running_on_llinterp = CDefinedIntSymbolic('RUNNING_ON_LLINTERP', default=1) -# running_on_llinterp is meant to have the value 0 in all backends # ____________________________________________________________ diff --git a/rpython/rtyper/extfunc.py b/rpython/rtyper/extfunc.py --- a/rpython/rtyper/extfunc.py +++ b/rpython/rtyper/extfunc.py @@ -162,34 +162,6 @@ impl = getattr(self, method_name, None) fakeimpl = getattr(self, fake_method_name, self.instance) if impl: - if hasattr(self, fake_method_name): - # If we have both an llimpl and an llfakeimpl, - # we need a wrapper that selects the proper one and calls it - from rpython.tool.sourcetools import func_with_new_name - # Using '*args' is delicate because this wrapper is also - # created for init-time functions like llarena.arena_malloc - # which are called before the GC is fully initialized - args = ', '.join(['arg%d' % i for i in range(len(args_ll))]) - d = {'original_impl': impl, - 's_result': s_result, - 'fakeimpl': fakeimpl, - '__name__': __name__, - } - exec py.code.compile(""" - from rpython.rlib.objectmodel import running_on_llinterp - from rpython.rlib.debug import llinterpcall - from rpython.rlib.jit import dont_look_inside - # note: we say 'dont_look_inside' mostly because the - # JIT does not support 'running_on_llinterp', but in - # theory it is probably right to stop jitting anyway. - @dont_look_inside - def ll_wrapper(%s): - if running_on_llinterp: - return llinterpcall(s_result, fakeimpl, %s) - else: - return original_impl(%s) - """ % (args, args, args)) in d - impl = func_with_new_name(d['ll_wrapper'], name + '_wrapper') if rtyper.annotator.translator.config.translation.sandbox: impl._dont_inline_ = True # store some attributes to the 'impl' function, where @@ -199,6 +171,8 @@ '_name': self.name, '_safe_not_sandboxed': self.safe_not_sandboxed, } + if hasattr(self, fake_method_name): + impl._llfnobjattrs_['_fakeimpl'] = fakeimpl obj = rtyper.getannmixlevel().delayedfunction( impl, signature_args, hop.s_result) else: diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -643,6 +643,14 @@ return frame.eval() def op_direct_call(self, f, *args): + pythonfunction = getattr(f._obj, '_fakeimpl', None) + if pythonfunction is not None: + try: + return pythonfunction(*args) + except: + self.make_llexception() + return + FTYPE = self.llinterpreter.typer.type_system.derefType(lltype.typeOf(f)) return self.perform_call(f, FTYPE.ARGS, args) From noreply at buildbot.pypy.org Mon Jan 27 22:03:56 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 27 Jan 2014 22:03:56 +0100 (CET) Subject: [pypy-commit] pypy kill-running_on_llinterp: Replace test using running_on_llinterp directly by another test that uses running_on_llinterp indirectly through register_external. Message-ID: <20140127210356.7EDAF1C0134@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: kill-running_on_llinterp Changeset: r68967:cd8bd484b255 Date: 2014-01-27 19:58 +0100 http://bitbucket.org/pypy/pypy/changeset/cd8bd484b255/ Log: Replace test using running_on_llinterp directly by another test that uses running_on_llinterp indirectly through register_external. diff --git a/rpython/rtyper/test/test_extfunc.py b/rpython/rtyper/test/test_extfunc.py --- a/rpython/rtyper/test/test_extfunc.py +++ b/rpython/rtyper/test/test_extfunc.py @@ -1,10 +1,9 @@ import py -from rpython.rtyper.extfunc import ExtFuncEntry, register_external,\ - is_external, lazy_register from rpython.annotator import model as annmodel from rpython.annotator.annrpython import RPythonAnnotator from rpython.annotator.policy import AnnotatorPolicy +from rpython.rtyper.extfunc import ExtFuncEntry, register_external from rpython.rtyper.test.test_llinterp import interpret class TestExtFuncEntry: @@ -173,4 +172,21 @@ py.test.raises(Exception, a.build_types, g, [[str]]) a.build_types(g, [[str0]]) # Does not raise + def test_register_external_llfakeimpl(self): + def a(i): + return i + def a_llimpl(i): + return i * 2 + def a_llfakeimpl(i): + return i * 3 + register_external(a, [int], int, llimpl=a_llimpl, + llfakeimpl=a_llfakeimpl) + def f(i): + return a(i) + res = interpret(f, [7]) + assert res == 21 + + from rpython.translator.c.test.test_genc import compile + fc = compile(f, [int]) + assert fc(7) == 14 diff --git a/rpython/rtyper/test/test_rbuiltin.py b/rpython/rtyper/test/test_rbuiltin.py --- a/rpython/rtyper/test/test_rbuiltin.py +++ b/rpython/rtyper/test/test_rbuiltin.py @@ -3,8 +3,7 @@ import py -from rpython.rlib.debug import llinterpcall -from rpython.rlib.objectmodel import instantiate, running_on_llinterp, compute_unique_id, current_object_addr_as_int +from rpython.rlib.objectmodel import instantiate, compute_unique_id, current_object_addr_as_int from rpython.rlib.rarithmetic import (intmask, longlongmask, r_int64, is_valid_int, r_int, r_uint, r_longlong, r_ulonglong) from rpython.rlib.rstring import StringBuilder, UnicodeBuilder @@ -415,26 +414,6 @@ res = self.interpret(fn, [3.25]) assert res == 7.25 - def test_debug_llinterpcall(self): - S = lltype.Struct('S', ('m', lltype.Signed)) - SPTR = lltype.Ptr(S) - def foo(n): - "NOT_RPYTHON" - s = lltype.malloc(S, immortal=True) - s.m = eval("n*6", locals()) - return s - def fn(n): - if running_on_llinterp: - return llinterpcall(SPTR, foo, n).m - else: - return 321 - res = self.interpret(fn, [7]) - assert res == 42 - from rpython.translator.c.test.test_genc import compile - f = compile(fn, [int]) - res = f(7) - assert res == 321 - def test_id(self): class A: pass From noreply at buildbot.pypy.org Mon Jan 27 22:03:58 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 27 Jan 2014 22:03:58 +0100 (CET) Subject: [pypy-commit] pypy llvm-translation-backend: Add stub for debug_forked(). Where do all these new operations come from? Message-ID: <20140127210358.C02DF1C0134@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r68969:20ad6157d777 Date: 2014-01-27 22:02 +0100 http://bitbucket.org/pypy/pypy/changeset/20ad6157d777/ Log: Add stub for debug_forked(). Where do all these new operations come from? diff --git a/rpython/translator/llvm/genllvm.py b/rpython/translator/llvm/genllvm.py --- a/rpython/translator/llvm/genllvm.py +++ b/rpython/translator/llvm/genllvm.py @@ -1093,6 +1093,9 @@ def op_debug_flush(self, result, *args): pass + def op_debug_forked(self, result, *args): + pass + def op_track_alloc_start(self, result, *args): pass From noreply at buildbot.pypy.org Mon Jan 27 23:40:30 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 27 Jan 2014 23:40:30 +0100 (CET) Subject: [pypy-commit] pypy llvm-translation-backend: Make sure pypy_debug_catch_fatal_exception is defined before its use. Message-ID: <20140127224030.18B171C0134@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r68970:dd59cd5364eb Date: 2014-01-27 23:39 +0100 http://bitbucket.org/pypy/pypy/changeset/dd59cd5364eb/ Log: Make sure pypy_debug_catch_fatal_exception is defined before its use. diff --git a/rpython/translator/llvm/genllvm.py b/rpython/translator/llvm/genllvm.py --- a/rpython/translator/llvm/genllvm.py +++ b/rpython/translator/llvm/genllvm.py @@ -1638,7 +1638,11 @@ 'ref': RefcountGCPolicy }[translator.config.translation.gctransformer](self) self.transformed_graphs = set() - self.sources = [] + self.sources = [str(py.code.Source(r''' + void pypy_debug_catch_fatal_exception(void) { + fprintf(stderr, "Fatal RPython error\n"); + abort(); + }'''))] self.ecis = [] self.entrypoints = set() @@ -1750,11 +1754,6 @@ exports.clear() def _compile(self, shared=False): - self.sources.append(str(py.code.Source(r''' - void pypy_debug_catch_fatal_exception(void) { - fprintf(stderr, "Fatal RPython error\n"); - abort(); - }'''))) eci = ExternalCompilationInfo( includes=['stdio.h', 'stdlib.h'], separate_module_sources=['\n'.join(self.sources)], From noreply at buildbot.pypy.org Tue Jan 28 01:46:34 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 28 Jan 2014 01:46:34 +0100 (CET) Subject: [pypy-commit] pypy kill-running_on_llinterp: Readd some of the wrapper logic because we might get a lltype funcptr as lltypeimpl. In any of these cases we also get lltypefakeimpl, that's why it worked before this branch (a wrapper was generated if both lltypeimpl and lltypefakeimpl were given). Message-ID: <20140128004634.9E0C51D236E@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: kill-running_on_llinterp Changeset: r68971:c7a94af00283 Date: 2014-01-28 01:45 +0100 http://bitbucket.org/pypy/pypy/changeset/c7a94af00283/ Log: Readd some of the wrapper logic because we might get a lltype funcptr as lltypeimpl. In any of these cases we also get lltypefakeimpl, that's why it worked before this branch (a wrapper was generated if both lltypeimpl and lltypefakeimpl were given). diff --git a/rpython/rtyper/extfunc.py b/rpython/rtyper/extfunc.py --- a/rpython/rtyper/extfunc.py +++ b/rpython/rtyper/extfunc.py @@ -5,6 +5,7 @@ from rpython.annotator.signature import annotation import py, sys +from types import FunctionType class extdef(object): @@ -162,6 +163,20 @@ impl = getattr(self, method_name, None) fakeimpl = getattr(self, fake_method_name, self.instance) if impl: + if not isinstance(impl, FunctionType): + # We can't add the _fakeimpl attribute on lltype function ptrs, + # so instead we create a wrapper with that attribute. + from rpython.tool.sourcetools import func_with_new_name + # Using '*args' is delicate because this wrapper is also + # created for init-time functions like llarena.arena_malloc + # which are called before the GC is fully initialized + args = ', '.join(['arg%d' % i for i in range(len(args_ll))]) + d = {'original_impl': impl, '__name__': __name__} + exec py.code.compile(""" + def ll_wrapper(%s): + return original_impl(%s) + """ % (args, args)) in d + impl = func_with_new_name(d['ll_wrapper'], name + '_wrapper') if rtyper.annotator.translator.config.translation.sandbox: impl._dont_inline_ = True # store some attributes to the 'impl' function, where From noreply at buildbot.pypy.org Tue Jan 28 02:31:20 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 28 Jan 2014 02:31:20 +0100 (CET) Subject: [pypy-commit] pypy default: cleanup numpy array dot Message-ID: <20140128013120.6841C1D246C@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68972:a4ca1e94409a Date: 2014-01-27 19:59 -0500 http://bitbucket.org/pypy/pypy/changeset/a4ca1e94409a/ Log: cleanup numpy array dot diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -903,8 +903,8 @@ w_res = self.descr_mul(space, other) assert isinstance(w_res, W_NDimArray) return w_res.descr_sum(space, space.wrap(-1), out) - dtype = interp_ufuncs.find_binop_result_dtype(space, - self.get_dtype(), other.get_dtype()) + dtype = interp_ufuncs.find_binop_result_dtype(space, self.get_dtype(), + other.get_dtype()) if self.get_size() < 1 and other.get_size() < 1: # numpy compatability return W_NDimArray.new_scalar(space, dtype, space.wrap(0)) @@ -912,25 +912,27 @@ out_shape, other_critical_dim = _match_dot_shapes(space, self, other) if out: matches = True - if len(out.get_shape()) != len(out_shape): + if dtype != out.get_dtype(): + matches = False + elif not out.implementation.order == "C": + matches = False + elif len(out.get_shape()) != len(out_shape): matches = False else: for i in range(len(out_shape)): if out.get_shape()[i] != out_shape[i]: matches = False break - if dtype != out.get_dtype(): - matches = False - if not out.implementation.order == "C": - matches = False if not matches: raise OperationError(space.w_ValueError, space.wrap( - 'output array is not acceptable (must have the right type, nr dimensions, and be a C-Array)')) + 'output array is not acceptable (must have the right type, ' + 'nr dimensions, and be a C-Array)')) w_res = out + w_res.fill(space, self.get_dtype().coerce(space, None)) else: w_res = W_NDimArray.from_shape(space, out_shape, dtype, w_instance=self) # This is the place to add fpypy and blas - return loop.multidim_dot(space, self, other, w_res, dtype, + return loop.multidim_dot(space, self, other, w_res, dtype, other_critical_dim) def descr_mean(self, space, __args__): diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -146,8 +146,7 @@ while not obj_iter.done(): reduce_driver.jit_merge_point(shapelen=shapelen, func=func, done_func=done_func, - calc_dtype=calc_dtype, - ) + calc_dtype=calc_dtype) rval = obj_iter.getitem().convert_to(space, calc_dtype) if done_func is not None and done_func(calc_dtype, rval): return rval @@ -172,8 +171,7 @@ shapelen = len(obj.get_shape()) while not obj_iter.done(): reduce_cum_driver.jit_merge_point(shapelen=shapelen, func=func, - dtype=calc_dtype, - ) + dtype=calc_dtype) rval = obj_iter.getitem().convert_to(space, calc_dtype) cur_value = func(calc_dtype, cur_value, rval) out_iter.setitem(cur_value) @@ -271,8 +269,7 @@ iter.next() shapelen = len(arr.get_shape()) while not iter.done(): - arg_driver.jit_merge_point(shapelen=shapelen, dtype=dtype, - ) + arg_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) w_val = iter.getitem() new_best = getattr(dtype.itemtype, op_name)(cur_best, w_val) if dtype.itemtype.ne(new_best, cur_best): @@ -311,6 +308,7 @@ if i != right_critical_dim] right_skip = range(len(left_shape) - 1) result_skip = [len(result.get_shape()) - (len(right_shape) > 1)] + assert result.get_dtype() == dtype outi = result.create_dot_iter(broadcast_shape, result_skip) lefti = left.create_dot_iter(broadcast_shape, left_skip) righti = right.create_dot_iter(broadcast_shape, right_skip) @@ -318,10 +316,10 @@ dot_driver.jit_merge_point(dtype=dtype) lval = lefti.getitem().convert_to(space, dtype) rval = righti.getitem().convert_to(space, dtype) - outval = outi.getitem().convert_to(space, dtype) + outval = outi.getitem() v = dtype.itemtype.mul(lval, rval) - value = dtype.itemtype.add(v, outval).convert_to(space, dtype) - outi.setitem(value) + v = dtype.itemtype.add(v, outval) + outi.setitem(v) outi.next() righti.next() lefti.next() @@ -652,8 +650,8 @@ out_iter = out.create_iter(shape) while not arr_iter.done(): round_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) - w_v = dtype.itemtype.round(arr_iter.getitem().convert_to(space, dtype), - decimals) + w_v = arr_iter.getitem().convert_to(space, dtype) + w_v = dtype.itemtype.round(w_v, decimals) out_iter.setitem(w_v) arr_iter.next() out_iter.next() diff --git a/pypy/module/micronumpy/test/test_arrayops.py b/pypy/module/micronumpy/test/test_arrayops.py --- a/pypy/module/micronumpy/test/test_arrayops.py +++ b/pypy/module/micronumpy/test/test_arrayops.py @@ -56,6 +56,10 @@ b = arange(12).reshape(4, 3) c = a.dot(b) assert (c == [[ 42, 48, 54], [114, 136, 158], [186, 224, 262]]).all() + c = a.dot(b.astype(float)) + assert (c == [[ 42, 48, 54], [114, 136, 158], [186, 224, 262]]).all() + c = a.astype(float).dot(b) + assert (c == [[ 42, 48, 54], [114, 136, 158], [186, 224, 262]]).all() a = arange(24).reshape(2, 3, 4) raises(ValueError, "a.dot(a)") @@ -91,9 +95,11 @@ out = arange(9).reshape(3, 3) c = dot(a, b, out=out) assert (c == out).all() - out = arange(9,dtype=float).reshape(3, 3) + assert (c == [[42, 48, 54], [114, 136, 158], [186, 224, 262]]).all() + out = arange(9, dtype=float).reshape(3, 3) exc = raises(ValueError, dot, a, b, out) - assert exc.value[0].find('not acceptable') > 0 + assert exc.value[0] == ('output array is not acceptable (must have the ' + 'right type, nr dimensions, and be a C-Array)') def test_choose_basic(self): from numpypy import array From noreply at buildbot.pypy.org Tue Jan 28 04:20:30 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 28 Jan 2014 04:20:30 +0100 (CET) Subject: [pypy-commit] pypy refactor-buffer-api: hg merge default Message-ID: <20140128032030.93F9F1D23FF@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-buffer-api Changeset: r68973:94dca846c67a Date: 2014-01-28 04:14 +0100 http://bitbucket.org/pypy/pypy/changeset/94dca846c67a/ Log: hg merge default diff too long, truncating to 2000 out of 30700 lines diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -28,7 +28,7 @@ DEALINGS IN THE SOFTWARE. -PyPy Copyright holders 2003-2013 +PyPy Copyright holders 2003-2014 ----------------------------------- Except when otherwise stated (look for LICENSE files or information at diff --git a/lib-python/2.7/ctypes/__init__.py b/lib-python/2.7/ctypes/__init__.py --- a/lib-python/2.7/ctypes/__init__.py +++ b/lib-python/2.7/ctypes/__init__.py @@ -371,10 +371,9 @@ self._handle = handle def __repr__(self): - return "<%s '%s', handle %r at %x>" % \ - (self.__class__.__name__, self._name, - (self._handle), - id(self) & (_sys.maxint*2 + 1)) + return "<%s '%s', handle %r at 0x%x>" % ( + self.__class__.__name__, self._name, self._handle, + id(self) & (_sys.maxint * 2 + 1)) def __getattr__(self, name): diff --git a/lib-python/2.7/ctypes/test/test_python_api.py b/lib-python/2.7/ctypes/test/test_python_api.py --- a/lib-python/2.7/ctypes/test/test_python_api.py +++ b/lib-python/2.7/ctypes/test/test_python_api.py @@ -73,6 +73,7 @@ del pyobj self.assertEqual(grc(s), ref) + @xfail def test_PyOS_snprintf(self): PyOS_snprintf = pythonapi.PyOS_snprintf PyOS_snprintf.argtypes = POINTER(c_char), c_size_t, c_char_p diff --git a/lib-python/2.7/socket.py b/lib-python/2.7/socket.py --- a/lib-python/2.7/socket.py +++ b/lib-python/2.7/socket.py @@ -335,9 +335,10 @@ s = self._sock self._sock = None if s is not None: - s._drop() if self._close: s.close() + else: + s._drop() def __del__(self): try: diff --git a/lib-python/2.7/test/test_memoryview.py b/lib-python/2.7/test/test_memoryview.py --- a/lib-python/2.7/test/test_memoryview.py +++ b/lib-python/2.7/test/test_memoryview.py @@ -166,11 +166,18 @@ self.assertTrue(m[0:6] == m[:]) self.assertFalse(m[0:5] == m) - # Comparison with objects which don't support the buffer API - self.assertFalse(m == u"abcdef") - self.assertTrue(m != u"abcdef") - self.assertFalse(u"abcdef" == m) - self.assertTrue(u"abcdef" != m) + if test_support.check_impl_detail(cpython=True): + # what is supported and what is not supported by memoryview is + # very inconsisten on CPython. In PyPy, memoryview supports + # the buffer interface, and thus the following comparison + # succeeds. See also the comment in + # pypy.modules.__builtin__.interp_memoryview.W_MemoryView.descr_buffer + # + # Comparison with objects which don't support the buffer API + self.assertFalse(m == u"abcdef", "%s %s" % (self, tp)) + self.assertTrue(m != u"abcdef") + self.assertFalse(u"abcdef" == m) + self.assertTrue(u"abcdef" != m) # Unordered comparisons are unimplemented, and therefore give # arbitrary results (they raise a TypeError in py3k) diff --git a/lib-python/2.7/test/test_ssl.py b/lib-python/2.7/test/test_ssl.py --- a/lib-python/2.7/test/test_ssl.py +++ b/lib-python/2.7/test/test_ssl.py @@ -993,7 +993,7 @@ try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED) - try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, True) + try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False) diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -109,7 +109,7 @@ RegrTest('test_asynchat.py', usemodules='select fcntl'), RegrTest('test_asyncore.py', usemodules='select fcntl'), RegrTest('test_atexit.py', core=True), - RegrTest('test_audioop.py', skip="unsupported extension module"), + RegrTest('test_audioop.py', skip="incomplete module"), RegrTest('test_augassign.py', core=True), RegrTest('test_base64.py', usemodules='struct'), RegrTest('test_bastion.py'), diff --git a/lib_pypy/_ctypes/array.py b/lib_pypy/_ctypes/array.py --- a/lib_pypy/_ctypes/array.py +++ b/lib_pypy/_ctypes/array.py @@ -1,4 +1,4 @@ -import _ffi +from _rawffi import alt as _ffi import _rawffi from _ctypes.basics import _CData, cdata_from_address, _CDataMeta, sizeof @@ -20,10 +20,13 @@ # we don't want to have buffers here if len(val) > self._length_: raise ValueError("%r too long" % (val,)) - for i in range(len(val)): - self[i] = val[i] + if isinstance(val, str): + _rawffi.rawstring2charp(self._buffer.buffer, val) + else: + for i in range(len(val)): + self[i] = val[i] if len(val) < self._length_: - self[len(val)] = '\x00' + self._buffer[len(val)] = '\x00' res.value = property(getvalue, setvalue) def getraw(self): @@ -33,8 +36,7 @@ def setraw(self, buffer): if len(buffer) > self._length_: raise ValueError("%r too long" % (buffer,)) - for i in range(len(buffer)): - self[i] = buffer[i] + _rawffi.rawstring2charp(self._buffer.buffer, buffer) res.raw = property(getraw, setraw) elif subletter == 'u': def getvalue(self): @@ -45,10 +47,14 @@ # we don't want to have buffers here if len(val) > self._length_: raise ValueError("%r too long" % (val,)) + if isinstance(val, unicode): + target = self._buffer + else: + target = self for i in range(len(val)): - self[i] = val[i] + target[i] = val[i] if len(val) < self._length_: - self[len(val)] = '\x00' + target[len(val)] = u'\x00' res.value = property(getvalue, setvalue) if '_length_' in typedict: diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py --- a/lib_pypy/_ctypes/basics.py +++ b/lib_pypy/_ctypes/basics.py @@ -1,6 +1,6 @@ import _rawffi -import _ffi +from _rawffi import alt as _ffi import sys try: from __pypy__ import builtinify diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -5,7 +5,7 @@ from _ctypes.basics import is_struct_shape from _ctypes.builtin import get_errno, set_errno, get_last_error, set_last_error import _rawffi -import _ffi +from _rawffi import alt as _ffi import sys import traceback @@ -328,21 +328,23 @@ raise ValueError( "native COM method call without 'this' parameter" ) - thisarg = cast(args[0], POINTER(POINTER(c_void_p))) - keepalives, newargs, argtypes, outargs = self._convert_args(argtypes, - args[1:], kwargs) - newargs.insert(0, args[0].value) + thisvalue = args.pop(0) + thisarg = cast(thisvalue, POINTER(POINTER(c_void_p))) + keepalives, newargs, argtypes, outargs, errcheckargs = ( + self._convert_args(argtypes, args, kwargs)) + args.insert(0, thisvalue) + newargs.insert(0, thisvalue.value) argtypes.insert(0, c_void_p) else: thisarg = None - keepalives, newargs, argtypes, outargs = self._convert_args(argtypes, - args, kwargs) + keepalives, newargs, argtypes, outargs, errcheckargs = ( + self._convert_args(argtypes, args, kwargs)) funcptr = self._getfuncptr(argtypes, self._restype_, thisarg) result = self._call_funcptr(funcptr, *newargs) - result = self._do_errcheck(result, args) + result, forced = self._do_errcheck(result, errcheckargs) - if not outargs: + if not outargs or forced: return result from ctypes import c_void_p @@ -377,22 +379,22 @@ set_last_error(tmp) # try: - return self._build_result(self._restype_, result, newargs) + return self._build_result(self._restype_, result) finally: funcptr.free_temp_buffers() def _do_errcheck(self, result, args): # The 'errcheck' protocol if self._errcheck_: - v = self._errcheck_(result, self, args) + v = self._errcheck_(result, self, tuple(args)) # If the errcheck funtion failed, let it throw # If the errcheck function returned newargs unchanged, # continue normal processing. # If the errcheck function returned something else, # use that as result. if v is not args: - return v - return result + return v, True + return result, False def _getfuncptr_fromaddress(self, argtypes, restype): address = self._get_address() @@ -495,16 +497,16 @@ newargtypes = [] total = len(args) paramflags = self._paramflags - inargs_idx = 0 if not paramflags and total < len(argtypes): raise TypeError("not enough arguments") - for i, argtype in enumerate(argtypes): - flag = 0 - name = None - defval = marker - if paramflags: + if paramflags: + errcheckargs = [] + inargs_idx = 0 + for i, argtype in enumerate(argtypes): + flag = 0 + defval = marker paramflag = paramflags[i] paramlen = len(paramflag) name = None @@ -519,6 +521,7 @@ val = defval if val is marker: val = 0 + errcheckargs.append(val) keepalive, newarg, newargtype = self._conv_param(argtype, val) keepalives.append(keepalive) newargs.append(newarg) @@ -536,27 +539,31 @@ raise TypeError("required argument '%s' missing" % name) else: raise TypeError("not enough arguments") + errcheckargs.append(val) keepalive, newarg, newargtype = self._conv_param(argtype, val) keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) elif flag == PARAMFLAG_FOUT: if defval is not marker: - outargs.append(defval) + val = defval keepalive, newarg, newargtype = self._conv_param(argtype, defval) else: import ctypes val = argtype._type_() - outargs.append(val) keepalive = None newarg = ctypes.byref(val) newargtype = type(newarg) + errcheckargs.append(val) + outargs.append(val) keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) else: raise ValueError("paramflag %d not yet implemented" % flag) - else: + else: + errcheckargs = args + for i, argtype in enumerate(argtypes): try: keepalive, newarg, newargtype = self._conv_param(argtype, args[i]) except (UnicodeError, TypeError, ValueError), e: @@ -564,7 +571,6 @@ keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) - inargs_idx += 1 if len(newargs) < len(args): extra = args[len(newargs):] @@ -576,7 +582,7 @@ keepalives.append(keepalive) newargs.append(newarg) newargtypes.append(newargtype) - return keepalives, newargs, newargtypes, outargs + return keepalives, newargs, newargtypes, outargs, errcheckargs @staticmethod def _is_primitive(argtype): @@ -601,7 +607,7 @@ retval = restype._CData_retval(buf) return retval - def _build_result(self, restype, result, argsandobjs): + def _build_result(self, restype, result): """Build the function result: If there is no OUT parameter, return the actual function result If there is one OUT parameter, return it @@ -611,11 +617,6 @@ # i.e. an array of ints. Now it takes a result, which is already a # python object. All places that do "resbuffer[0]" should check that # result is actually an int and just use it. - # - # Also, argsandobjs used to be "args" in __call__, now it's "newargs" - # (i.e., the already unwrapped objects). It's used only when we have a - # PARAMFLAG_FOUT and it's probably wrong, I'll fix it when I find a - # failing test retval = None @@ -704,7 +705,7 @@ funcptr = self._getfuncptr(argtypes, restype, thisarg) try: result = self._call_funcptr(funcptr, *args) - result = self._do_errcheck(result, args) + result, _ = self._do_errcheck(result, args) except (TypeError, ArgumentError, UnicodeDecodeError): assert self._slowpath_allowed return CFuncPtr.__call__(self, *args) diff --git a/lib_pypy/_ctypes/pointer.py b/lib_pypy/_ctypes/pointer.py --- a/lib_pypy/_ctypes/pointer.py +++ b/lib_pypy/_ctypes/pointer.py @@ -1,6 +1,6 @@ import _rawffi -import _ffi +from _rawffi import alt as _ffi from _ctypes.basics import _CData, _CDataMeta, cdata_from_address, ArgumentError from _ctypes.basics import keepalive_key, store_reference, ensure_objects from _ctypes.basics import sizeof, byref, as_ffi_pointer diff --git a/lib_pypy/_ctypes/primitive.py b/lib_pypy/_ctypes/primitive.py --- a/lib_pypy/_ctypes/primitive.py +++ b/lib_pypy/_ctypes/primitive.py @@ -1,4 +1,4 @@ -import _ffi +from _rawffi import alt as _ffi import _rawffi import weakref import sys diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py --- a/lib_pypy/_ctypes/structure.py +++ b/lib_pypy/_ctypes/structure.py @@ -2,6 +2,8 @@ import _rawffi from _ctypes.basics import _CData, _CDataMeta, keepalive_key,\ store_reference, ensure_objects, CArgObject +from _ctypes.array import Array +from _ctypes.pointer import _Pointer import inspect def names_and_fields(self, _fields_, superclass, anonymous_fields=None): @@ -104,8 +106,11 @@ def __set__(self, obj, value): fieldtype = self.ctype cobj = fieldtype.from_param(value) - if ensure_objects(cobj) is not None: - key = keepalive_key(self.num) + key = keepalive_key(self.num) + if issubclass(fieldtype, _Pointer) and isinstance(cobj, Array): + # if our value is an Array we need the whole thing alive + store_reference(obj, key, cobj) + elif ensure_objects(cobj) is not None: store_reference(obj, key, cobj._objects) arg = cobj._get_buffer_value() if fieldtype._fficompositesize is not None: diff --git a/lib_pypy/_ffi.py b/lib_pypy/_ffi.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_ffi.py @@ -0,0 +1,2 @@ +# Backward compatibility hack +from _rawffi.alt import * diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_pypy_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -33,14 +33,13 @@ # set link options output_filename = modulename + _get_c_extension_suffix() if sys.platform == 'win32': - # XXX libpypy-c.lib is currently not installed automatically - library = os.path.join(thisdir, '..', 'include', 'libpypy-c') + # XXX pyconfig.h uses a pragma to link to the import library, + # which is currently python27.lib + library = os.path.join(thisdir, '..', 'include', 'python27') if not os.path.exists(library + '.lib'): - #For a nightly build - library = os.path.join(thisdir, '..', 'include', 'python27') - if not os.path.exists(library + '.lib'): - # For a local translation - library = os.path.join(thisdir, '..', 'pypy', 'goal', 'libpypy-c') + # For a local translation or nightly build + library = os.path.join(thisdir, '..', 'pypy', 'goal', 'python27') + assert os.path.exists(library + '.lib'),'Could not find import library "%s"' % library libraries = [library, 'oleaut32'] extra_ldargs = ['/MANIFEST', # needed for VC10 '/EXPORT:init' + modulename] diff --git a/lib_pypy/_sha.py b/lib_pypy/_sha.py --- a/lib_pypy/_sha.py +++ b/lib_pypy/_sha.py @@ -115,14 +115,14 @@ ] class sha: - "An implementation of the MD5 hash function in pure Python." + "An implementation of the SHA hash function in pure Python." digest_size = digestsize = 20 - block_size = 1 + block_size = 512 // 8 def __init__(self): "Initialisation." - + # Initial message length in bits(!). self.length = 0 self.count = [0, 0] @@ -209,7 +209,7 @@ self.H2 = (self.H2 + C) & 0xffffffff self.H3 = (self.H3 + D) & 0xffffffff self.H4 = (self.H4 + E) & 0xffffffff - + # Down from here all methods follow the Python Standard Library # API of the sha module. @@ -295,13 +295,13 @@ _long2bytesBigEndian(self.H3, 4) + \ _long2bytesBigEndian(self.H4, 4) - self.H0 = H0 - self.H1 = H1 + self.H0 = H0 + self.H1 = H1 self.H2 = H2 self.H3 = H3 self.H4 = H4 - self.input = input - self.count = count + self.input = input + self.count = count return digest diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -330,6 +330,14 @@ # SQLite version information sqlite_version = str(_ffi.string(_lib.sqlite3_libversion()).decode('ascii')) +_STMT_TYPE_UPDATE = 0 +_STMT_TYPE_DELETE = 1 +_STMT_TYPE_INSERT = 2 +_STMT_TYPE_REPLACE = 3 +_STMT_TYPE_OTHER = 4 +_STMT_TYPE_SELECT = 5 +_STMT_TYPE_INVALID = 6 + class Error(StandardError): pass @@ -992,13 +1000,18 @@ self.__statement = self.__connection._statement_cache.get(sql) if self.__connection._isolation_level is not None: - if self.__statement._type in ("UPDATE", "DELETE", "INSERT", "REPLACE"): + if self.__statement._type in ( + _STMT_TYPE_UPDATE, + _STMT_TYPE_DELETE, + _STMT_TYPE_INSERT, + _STMT_TYPE_REPLACE + ): if not self.__connection._in_transaction: self.__connection._begin() - elif self.__statement._type == "OTHER": + elif self.__statement._type == _STMT_TYPE_OTHER: if self.__connection._in_transaction: self.__connection.commit() - elif self.__statement._type == "SELECT": + elif self.__statement._type == _STMT_TYPE_SELECT: if multiple: raise ProgrammingError("You cannot execute SELECT " "statements in executemany().") @@ -1021,12 +1034,17 @@ self.__statement._reset() raise self.__connection._get_exception(ret) - if self.__statement._type in ("UPDATE", "DELETE", "INSERT", "REPLACE"): + if self.__statement._type in ( + _STMT_TYPE_UPDATE, + _STMT_TYPE_DELETE, + _STMT_TYPE_INSERT, + _STMT_TYPE_REPLACE + ): if self.__rowcount == -1: self.__rowcount = 0 self.__rowcount += _lib.sqlite3_changes(self.__connection._db) - if not multiple and self.__statement._type == "INSERT": + if not multiple and self.__statement._type == _STMT_TYPE_INSERT: self.__lastrowid = _lib.sqlite3_last_insert_rowid(self.__connection._db) else: self.__lastrowid = None @@ -1176,11 +1194,19 @@ first_word = sql.lstrip().split(" ")[0].upper() if first_word == "": - self._type = "INVALID" - elif first_word in ("SELECT", "INSERT", "UPDATE", "DELETE", "REPLACE"): - self._type = first_word + self._type = _STMT_TYPE_INVALID + elif first_word == "SELECT": + self._type = _STMT_TYPE_SELECT + elif first_word == "INSERT": + self._type = _STMT_TYPE_INSERT + elif first_word == "UPDATE": + self._type = _STMT_TYPE_UPDATE + elif first_word == "DELETE": + self._type = _STMT_TYPE_DELETE + elif first_word == "REPLACE": + self._type = _STMT_TYPE_REPLACE else: - self._type = "OTHER" + self._type = _STMT_TYPE_OTHER if isinstance(sql, unicode): sql = sql.encode('utf-8') @@ -1193,7 +1219,7 @@ if ret == _lib.SQLITE_OK and not self._statement: # an empty statement, work around that, as it's the least trouble - self._type = "SELECT" + self._type = _STMT_TYPE_SELECT c_sql = _ffi.new("char[]", b"select 42") ret = _lib.sqlite3_prepare_v2(self.__con._db, c_sql, -1, statement_star, next_char) @@ -1312,7 +1338,12 @@ raise ValueError("parameters are of unsupported type") def _get_description(self): - if self._type in ("INSERT", "UPDATE", "DELETE", "REPLACE"): + if self._type in ( + _STMT_TYPE_INSERT, + _STMT_TYPE_UPDATE, + _STMT_TYPE_DELETE, + _STMT_TYPE_REPLACE + ): return None desc = [] for i in xrange(_lib.sqlite3_column_count(self._statement)): diff --git a/lib_pypy/audioop.py b/lib_pypy/audioop.py new file mode 100644 --- /dev/null +++ b/lib_pypy/audioop.py @@ -0,0 +1,29 @@ + +import struct + + +class error(Exception): + pass + + +def _check_size(size): + if size != 1 and size != 2 and size != 4: + raise error("Size should be 1, 2 or 4") + + +def _check_params(length, size): + _check_size(size) + if length % size != 0: + raise error("not a whole number of frames") + + +def getsample(cp, size, i): + _check_params(len(cp), size) + if not (0 <= i < len(cp) / size): + raise error("Index out of range") + if size == 1: + return struct.unpack_from("B", buffer(cp)[i:])[0] + elif size == 2: + return struct.unpack_from("H", buffer(cp)[i * 2:])[0] + elif size == 4: + return struct.unpack_from("I", buffer(cp)[i * 4:])[0] diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.8" -__version_info__ = (0, 8) +__version__ = "0.8.1" +__version_info__ = (0, 8, 1) diff --git a/lib_pypy/datetime.py b/lib_pypy/datetime.py --- a/lib_pypy/datetime.py +++ b/lib_pypy/datetime.py @@ -878,7 +878,6 @@ month = self._month if day is None: day = self._day - year, month, day = _check_date_fields(year, month, day) return date(year, month, day) # Comparisons of date objects with other. @@ -1389,8 +1388,6 @@ microsecond = self.microsecond if tzinfo is True: tzinfo = self.tzinfo - hour, minute, second, microsecond = _check_time_fields(hour, minute, second, microsecond) - _check_tzinfo_arg(tzinfo) return time(hour, minute, second, microsecond, tzinfo) def __nonzero__(self): @@ -1608,9 +1605,6 @@ microsecond = self.microsecond if tzinfo is True: tzinfo = self.tzinfo - year, month, day = _check_date_fields(year, month, day) - hour, minute, second, microsecond = _check_time_fields(hour, minute, second, microsecond) - _check_tzinfo_arg(tzinfo) return datetime(year, month, day, hour, minute, second, microsecond, tzinfo) diff --git a/lib_pypy/pyrepl/simple_interact.py b/lib_pypy/pyrepl/simple_interact.py --- a/lib_pypy/pyrepl/simple_interact.py +++ b/lib_pypy/pyrepl/simple_interact.py @@ -63,3 +63,6 @@ except KeyboardInterrupt: console.write("\nKeyboardInterrupt\n") console.resetbuffer() + except MemoryError: + console.write("\nMemoryError\n") + console.resetbuffer() diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -34,14 +34,14 @@ "struct", "_hashlib", "_md5", "_sha", "_minimal_curses", "cStringIO", "thread", "itertools", "pyexpat", "_ssl", "cpyext", "array", "binascii", "_multiprocessing", '_warnings', - "_collections", "_multibytecodec", "micronumpy", "_ffi", + "_collections", "_multibytecodec", "micronumpy", "_continuation", "_cffi_backend", "_csv", "cppyy", "_pypyjson"] )) translation_modules = default_modules.copy() translation_modules.update(dict.fromkeys( ["fcntl", "rctime", "select", "signal", "_rawffi", "zlib", - "struct", "_md5", "cStringIO", "array", "_ffi", + "struct", "_md5", "cStringIO", "array", "binascii", # the following are needed for pyrepl (and hence for the # interactive prompt/pdb) @@ -96,7 +96,6 @@ # no _rawffi if importing rpython.rlib.clibffi raises ImportError # or CompilationError or py.test.skip.Exception "_rawffi" : ["rpython.rlib.clibffi"], - "_ffi" : ["rpython.rlib.clibffi"], "zlib" : ["rpython.rlib.rzlib"], "bz2" : ["pypy.module.bz2.interp_bz2"], diff --git a/pypy/doc/_ref.txt b/pypy/doc/_ref.txt --- a/pypy/doc/_ref.txt +++ b/pypy/doc/_ref.txt @@ -109,6 +109,4 @@ .. _`rpython/translator/c/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/c/ .. _`rpython/translator/c/src/stacklet/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/c/src/stacklet/ .. _`rpython/translator/c/src/stacklet/stacklet.h`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/c/src/stacklet/stacklet.h -.. _`rpython/translator/cli/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/cli/ -.. _`rpython/translator/jvm/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/jvm/ .. _`rpython/translator/tool/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/tool/ diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -38,7 +38,7 @@ # General information about the project. project = u'PyPy' -copyright = u'2013, The PyPy Project' +copyright = u'2014, The PyPy Project' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -47,7 +47,7 @@ # The short X.Y version. version = '2.2' # The full version, including alpha/beta/rc tags. -release = '2.2.0' +release = '2.2.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/config/translation.lldebug0.txt b/pypy/doc/config/translation.lldebug0.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/config/translation.lldebug0.txt @@ -0,0 +1,1 @@ +Like lldebug, but in addition compile C files with -O0 diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -83,7 +83,7 @@ _winreg -* Supported by being rewritten in pure Python (possibly using ``ctypes``): +* Supported by being rewritten in pure Python (possibly using ``cffi``): see the `lib_pypy/`_ directory. Examples of modules that we support this way: ``ctypes``, ``cPickle``, ``cmath``, ``dbm``, ``datetime``... Note that some modules are both in there and in the list above; @@ -316,5 +316,4 @@ type and vice versa. For builtin types, a dictionary will be returned that cannot be changed (but still looks and behaves like a normal dictionary). - .. include:: _ref.txt diff --git a/pypy/doc/ctypes-implementation.rst b/pypy/doc/ctypes-implementation.rst --- a/pypy/doc/ctypes-implementation.rst +++ b/pypy/doc/ctypes-implementation.rst @@ -72,7 +72,13 @@ Here is a list of the limitations and missing features of the current implementation: -* No support for ``PyXxx`` functions from ``libpython``, for obvious reasons. +* ``ctypes.pythonapi`` lets you access the CPython C API emulation layer + of PyPy, at your own risks and without doing anything sensible about + the GIL. Since PyPy 2.3, these functions are also named with an extra + "Py", for example ``PyPyInt_FromLong()``. Basically, don't use this, + but it might more or less work in simple cases if you do. (Obviously, + assuming the PyObject pointers you get have any particular fields in + any particular order is just going to crash.) * We copy Python strings instead of having pointers to raw buffers diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst --- a/pypy/doc/extending.rst +++ b/pypy/doc/extending.rst @@ -5,119 +5,68 @@ This document tries to explain how to interface the PyPy python interpreter with any external library. -Note: We try to describe state-of-the art, but it -might fade out of date as this is the front on which things are changing -in pypy rapidly. +Right now, there are the following possibilities of providing +third-party modules for the PyPy python interpreter (in order of +usefulness): -Possibilities -============= +* Write them in pure Python and use CFFI_. -Right now, there are three possibilities of providing third-party modules -for the PyPy python interpreter (in order of usefulness): +* Write them in pure Python and use ctypes_. -* Write them in pure python and use ctypes, see ctypes_ - section +* Write them in C++ and bind them through Reflex_. -* Write them in pure python and use direct libffi low-level bindings, See - \_ffi_ module description. +* Write them in as `RPython mixed modules`_. -* Write them in RPython as mixedmodule_, using *rffi* as bindings. -* Write them in C++ and bind them through Reflex_ +CFFI +==== -.. _ctypes: #CTypes -.. _\_ffi: #LibFFI -.. _mixedmodule: #Mixed Modules +CFFI__ is the recommended way. It is a way to write pure Python code +that accesses C libraries. The idea is to support either ABI- or +API-level access to C --- so that you can sanely access C libraries +without depending on details like the exact field order in the C +structures or the numerical value of all the constants. It works on +both CPython (as a separate ``pip install cffi``) and on PyPy, where it +is included by default. + +PyPy's JIT does a quite reasonable job on the Python code that call C +functions or manipulate C pointers with CFFI. (As of PyPy 2.2.1, it +could still be improved, but is already good.) + +See the documentation here__. + +.. __: http://cffi.readthedocs.org/ +.. __: http://cffi.readthedocs.org/ + CTypes ====== -The ctypes module in PyPy is ready to use. -It's goal is to be as-compatible-as-possible with the -`CPython ctypes`_ version. Right now it's able to support large examples, -such as pyglet. PyPy is planning to have a 100% compatible ctypes -implementation, without the CPython C-level API bindings (so it is very -unlikely that direct object-manipulation trickery through this API will work). +The goal of the ctypes module of PyPy is to be as compatible as possible +with the `CPython ctypes`_ version. It works for large examples, such +as pyglet. PyPy's implementation is not strictly 100% compatible with +CPython, but close enough for most cases. -We also provide a `ctypes-configure`_ for overcoming the platform dependencies, -not relying on the ctypes codegen. This tool works by querying gcc about -platform-dependent details (compiling small snippets of C code and running -them), so it'll benefit not pypy-related ctypes-based modules as well. +We also used to provide ``ctypes-configure`` for some API-level access. +This is now viewed as a precursor of CFFI, which you should use instead. +More (but older) information is available here__. +Also, ctypes' performance is not as good as CFFI's. -ctypes call are optimized by the JIT and the resulting machine code contains a -direct call to the target C function. However, due to the very dynamic nature -of ctypes, some overhead over a bare C call is still present, in particular to -check/convert the types of the parameters. Moreover, even if most calls are -optimized, some cannot and thus need to follow the slow path, not optimized by -the JIT. +.. _`CPython ctypes`: http://docs.python.org/library/ctypes.html +.. __: ctypes-implementation.html -.. _`ctypes-configure`: ctypes-implementation.html#ctypes-configure -.. _`CPython ctypes`: http://docs.python.org/library/ctypes.html +PyPy implements ctypes as pure Python code around two built-in modules +called ``_ffi`` and ``_rawffi``, which give a very low-level binding to +the C library libffi_. Nowadays it is not recommended to use directly +these two modules. -Pros ----- +.. _libffi: http://sourceware.org/libffi/ -Stable, CPython-compatible API. Most calls are fast, optimized by JIT. - -Cons ----- - -Problems with platform-dependency (although we partially solve -those). Although the JIT optimizes ctypes calls, some overhead is still -present. The slow-path is very slow. - - -LibFFI -====== - -Mostly in order to be able to write a ctypes module, we developed a very -low-level libffi bindings called ``_ffi``. (libffi is a C-level library for dynamic calling, -which is used by CPython ctypes). This library provides stable and usable API, -although it's API is a very low-level one. It does not contain any -magic. It is also optimized by the JIT, but has much less overhead than ctypes. - -Pros ----- - -It Works. Probably more suitable for a delicate code where ctypes magic goes -in a way. All calls are optimized by the JIT, there is no slow path as in -ctypes. - -Cons ----- - -It combines disadvantages of using ctypes with disadvantages of using mixed -modules. CPython-incompatible API, very rough and low-level. - -Mixed Modules -============= - -This is the most advanced and powerful way of writing extension modules. -It has some serious disadvantages: - -* a mixed module needs to be written in RPython, which is far more - complicated than Python (XXX link) - -* due to lack of separate compilation (as of July 2011), each - compilation-check requires to recompile whole PyPy python interpreter, - which takes 0.5-1h. We plan to solve this at some point in near future. - -* although rpython is a garbage-collected language, the border between - C and RPython needs to be managed by hand (each object that goes into the - C level must be explicitly freed). - -Some documentation is available `here`_ - -.. _`here`: rffi.html - -XXX we should provide detailed docs about lltype and rffi, especially if we - want people to follow that way. Reflex ====== -This method is still experimental and is being exercised on a branch, -`reflex-support`_, which adds the `cppyy`_ module. +This method is still experimental. It adds the `cppyy`_ module. The method works by using the `Reflex package`_ to provide reflection information of the C++ code, which is then used to automatically generate bindings at runtime. @@ -168,3 +117,15 @@ to work around it in python or with a C++ helper function. Although Reflex works on various platforms, the bindings with PyPy have only been tested on Linux. + + +RPython Mixed Modules +===================== + +This is the internal way to write built-in extension modules in PyPy. +It cannot be used by any 3rd-party module: the extension modules are +*built-in*, not independently loadable DLLs. + +This is reserved for special cases: it gives direct access to e.g. the +details of the JIT, allowing us to tweak its interaction with user code. +This is how the numpy module is being developed. diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.rst +++ b/pypy/doc/extradoc.rst @@ -72,13 +72,13 @@ .. _bibtex: https://bitbucket.org/pypy/extradoc/raw/tip/talk/bibtex.bib .. _`Runtime Feedback in a Meta-Tracing JIT for Efficient Dynamic Languages`: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/icooolps2011/jit-hints.pdf -.. _`Allocation Removal by Partial Evaluation in a Tracing JIT`: http://codespeak.net/svn/pypy/extradoc/talk/pepm2011/bolz-allocation-removal.pdf -.. _`Towards a Jitting VM for Prolog Execution`: http://www.stups.uni-duesseldorf.de/publications/bolz-prolog-jit.pdf +.. _`Allocation Removal by Partial Evaluation in a Tracing JIT`: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/pepm2011/bolz-allocation-removal.pdf +.. _`Towards a Jitting VM for Prolog Execution`: http://www.stups.uni-duesseldorf.de/mediawiki/images/a/a7/Pub-BoLeSch2010.pdf .. _`High performance implementation of Python for CLI/.NET with JIT compiler generation for dynamic languages`: http://buildbot.pypy.org/misc/antocuni-thesis.pdf .. _`How to *not* write Virtual Machines for Dynamic Languages`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dyla2007/dyla.pdf .. _`Tracing the Meta-Level: PyPy's Tracing JIT Compiler`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009/bolz-tracing-jit.pdf .. _`Faster than C#: Efficient Implementation of Dynamic Languages on .NET`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009-dotnet/cli-jit.pdf -.. _`Automatic JIT Compiler Generation with Runtime Partial Evaluation`: http://www.stups.uni-duesseldorf.de/thesis/final-master.pdf +.. _`Automatic JIT Compiler Generation with Runtime Partial Evaluation`: http://wwwold.cobra.cs.uni-duesseldorf.de/thesis/final-master.pdf .. _`RPython: A Step towards Reconciling Dynamically and Statically Typed OO Languages`: http://www.disi.unige.it/person/AnconaD/papers/Recent_abstracts.html#AACM-DLS07 .. _`EU Reports`: index-report.html .. _`Hardware Transactional Memory Support for Lightweight Dynamic Language Evolution`: http://sabi.net/nriley/pubs/dls6-riley.pdf diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -426,25 +426,12 @@ Could we use LLVM? ------------------ -In theory yes. But we tried to use it 5 or 6 times already, as a -translation backend or as a JIT backend --- and failed each time. +There is a (static) translation backend using LLVM in the branch +``llvm-translation-backend``. It can translate PyPy with or without the JIT on +Linux. -In more details: using LLVM as a (static) translation backend is -pointless nowadays because you can generate C code and compile it with -clang. (Note that compiling PyPy with clang gives a result that is not -faster than compiling it with gcc.) We might in theory get extra -benefits from LLVM's GC integration, but this requires more work on the -LLVM side before it would be remotely useful. Anyway, it could be -interfaced via a custom primitive in the C code. - -On the other hand, using LLVM as our JIT backend looks interesting as -well --- but again we made an attempt, and it failed: LLVM has no way to -patch the generated machine code. - -So the position of the core PyPy developers is that if anyone wants to -make an N+1'th attempt with LLVM, they are welcome, and will be happy to -provide help in the IRC channel, but they are left with the burden of proof -that it works. +Using LLVM as our JIT backend looks interesting as well -- we made an attempt, +but it failed: LLVM has no way to patch the generated machine code. ---------------------- How do I compile PyPy? diff --git a/pypy/doc/garbage_collection.rst b/pypy/doc/garbage_collection.rst --- a/pypy/doc/garbage_collection.rst +++ b/pypy/doc/garbage_collection.rst @@ -210,4 +210,12 @@ are preserved. If the object dies then the pre-reserved location becomes free garbage, to be collected at the next major collection. +The exact name of this GC is either `minimark` or `incminimark`. The +latter is a version that does major collections incrementally (i.e. one +major collection is split along some number of minor collections, rather +than being done all at once after a specific minor collection). The +default is `incminimark`, as it seems to have a very minimal impact on +performance and memory usage at the benefit of avoiding the long pauses +of `minimark`. + .. include:: _ref.txt diff --git a/pypy/doc/gc_info.rst b/pypy/doc/gc_info.rst --- a/pypy/doc/gc_info.rst +++ b/pypy/doc/gc_info.rst @@ -6,7 +6,7 @@ Minimark -------- -PyPy's default ``minimark`` garbage collector is configurable through +PyPy's default ``incminimark`` garbage collector is configurable through several environment variables: ``PYPY_GC_NURSERY`` @@ -14,6 +14,17 @@ Defaults to 1/2 of your cache or ``4M``. Small values (like 1 or 1KB) are useful for debugging. +``PYPY_GC_NURSERY_CLEANUP`` + The interval at which nursery is cleaned up. Must + be smaller than the nursery size and bigger than the + biggest object we can allotate in the nursery. + +``PYPY_GC_INCREMENT_STEP`` + The size of memory marked during the marking step. Default is size of + nursery times 2. If you mark it too high your GC is not incremental at + all. The minimum is set to size that survives minor collection times + 1.5 so we reclaim anything all the time. + ``PYPY_GC_MAJOR_COLLECT`` Major collection memory factor. Default is ``1.82``, which means trigger a major collection when the diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -40,7 +40,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 2.2.0`_: the latest official release +* `Release 2.2.1`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -110,7 +110,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 2.2.0`: http://pypy.org/download.html +.. _`Release 2.2.1`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -74,6 +74,10 @@ The actual details would be rather differen in PyPy, but we would like to have the same optimization implemented. +Or maybe not. We can also play around with the idea of using a single +representation: as a byte string in utf-8. (This idea needs some extra logic +for efficient indexing, like a cache.) + .. _`optimized unicode representation`: http://www.python.org/dev/peps/pep-0393/ Translation Toolchain diff --git a/pypy/doc/release-2.2.1.rst b/pypy/doc/release-2.2.1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.2.1.rst @@ -0,0 +1,47 @@ +======================================= +PyPy 2.2.1 - Incrementalism.1 +======================================= + +We're pleased to announce PyPy 2.2.1, which targets version 2.7.3 of the Python +language. This is a bugfix release over 2.2. + +You can download the PyPy 2.2.1 release here: + + http://pypy.org/download.html + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy 2.2 and cpython 2.7.2`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows +32, or ARM (ARMv6 or ARMv7, with VFPv3). + +Work on the native Windows 64 is still stalling, we would welcome a volunteer +to handle that. + +.. _`pypy 2.2 and cpython 2.7.2`: http://speed.pypy.org + +Highlights +========== + +This is a bugfix release. The most important bugs fixed are: + +* an issue in sockets' reference counting emulation, showing up + notably when using the ssl module and calling ``makefile()``. + +* Tkinter support on Windows. + +* If sys.maxunicode==65535 (on Windows and maybe OS/X), the json + decoder incorrectly decoded surrogate pairs. + +* some FreeBSD fixes. + +Note that CFFI 0.8.1 was released. Both versions 0.8 and 0.8.1 are +compatible with both PyPy 2.2 and 2.2.1. + + +Cheers, +Armin Rigo & everybody diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -15,3 +15,45 @@ .. branch: armhf-singlefloat JIT support for singlefloats on ARM using the hardfloat ABI + +.. branch: voidtype_strformat +Better support for record numpy arrays + +.. branch: osx-eci-frameworks-makefile +OSX: Ensure frameworks end up in Makefile when specified in External compilation info + +.. branch: less-stringly-ops +Use subclasses of SpaceOperation instead of SpaceOperator objects. +Random cleanups in flowspace and annotator. + +.. branch: ndarray-buffer +adds support for the buffer= argument to the ndarray ctor + +.. branch: better_ftime_detect2 +On OpenBSD do not pull in libcompat.a as it is about to be removed. +And more generally, if you have gettimeofday(2) you will not need ftime(3). + +.. branch: timeb_h +Remove dependency upon on OpenBSD. This will be disappearing +along with libcompat.a. + +.. branch: OlivierBlanvillain/fix-3-broken-links-on-pypy-published-pap-1386250839215 +Fix 3 broken links on PyPy published papers in docs. + +.. branch: jit-ordereddict + +.. branch: refactor-str-types +Remove multimethods on str/unicode/bytearray and make the implementations share code. + +.. branch: remove-del-from-generatoriterator +Speed up generators that don't yield inside try or wait blocks by skipping +unnecessary cleanup. + +.. branch: annotator +Remove FlowObjSpace. +Improve cohesion between rpython.flowspace and rpython.annotator. + +.. branch: detect-immutable-fields +mapdicts keep track of whether or not an attribute is every assigned to +multiple times. If it's only assigned once then an elidable lookup is used when +possible. diff --git a/pypy/goal/getnightly.py b/pypy/goal/getnightly.py --- a/pypy/goal/getnightly.py +++ b/pypy/goal/getnightly.py @@ -26,7 +26,12 @@ if branch == 'default': branch = 'trunk' -filename = 'pypy-c-jit-latest-%s.tar.bz2' % arch +if '--nojit' in sys.argv: + kind = 'nojit' +else: + kind = 'jit' + +filename = 'pypy-c-%s-latest-%s.tar.bz2' % (kind, arch) url = 'http://buildbot.pypy.org/nightly/%s/%s' % (branch, filename) tmp = py.path.local.mkdtemp() mydir = tmp.chdir() diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py --- a/pypy/interpreter/astcompiler/codegen.py +++ b/pypy/interpreter/astcompiler/codegen.py @@ -1234,6 +1234,8 @@ flags |= consts.CO_NESTED if scope.is_generator: flags |= consts.CO_GENERATOR + if scope.has_yield_inside_try: + flags |= consts.CO_YIELD_INSIDE_TRY if scope.has_variable_arg: flags |= consts.CO_VARARGS if scope.has_keywords_arg: diff --git a/pypy/interpreter/astcompiler/consts.py b/pypy/interpreter/astcompiler/consts.py --- a/pypy/interpreter/astcompiler/consts.py +++ b/pypy/interpreter/astcompiler/consts.py @@ -17,6 +17,7 @@ CO_FUTURE_UNICODE_LITERALS = 0x20000 #pypy specific: CO_KILL_DOCSTRING = 0x100000 +CO_YIELD_INSIDE_TRY = 0x200000 PyCF_SOURCE_IS_UTF8 = 0x0100 PyCF_DONT_IMPLY_DEDENT = 0x0200 diff --git a/pypy/interpreter/astcompiler/symtable.py b/pypy/interpreter/astcompiler/symtable.py --- a/pypy/interpreter/astcompiler/symtable.py +++ b/pypy/interpreter/astcompiler/symtable.py @@ -43,6 +43,7 @@ self.child_has_free = False self.nested = False self.doc_removable = False + self._in_try_body_depth = 0 def lookup(self, name): """Find the scope of identifier 'name'.""" @@ -75,6 +76,14 @@ self.varnames.append(mangled) return mangled + def note_try_start(self, try_node): + """Called when a try is found, before visiting the body.""" + self._in_try_body_depth += 1 + + def note_try_end(self, try_node): + """Called after visiting a try body.""" + self._in_try_body_depth -= 1 + def note_yield(self, yield_node): """Called when a yield is found.""" raise SyntaxError("'yield' outside function", yield_node.lineno, @@ -210,6 +219,7 @@ self.has_variable_arg = False self.has_keywords_arg = False self.is_generator = False + self.has_yield_inside_try = False self.optimized = True self.return_with_value = False self.import_star = None @@ -220,6 +230,8 @@ raise SyntaxError("'return' with argument inside generator", self.ret.lineno, self.ret.col_offset) self.is_generator = True + if self._in_try_body_depth > 0: + self.has_yield_inside_try = True def note_return(self, ret): if ret.value: @@ -463,7 +475,12 @@ self.scope.new_temporary_name() if wih.optional_vars: self.scope.new_temporary_name() - ast.GenericASTVisitor.visit_With(self, wih) + wih.context_expr.walkabout(self) + if wih.optional_vars: + wih.optional_vars.walkabout(self) + self.scope.note_try_start(wih) + self.visit_sequence(wih.body) + self.scope.note_try_end(wih) def visit_arguments(self, arguments): scope = self.scope @@ -505,3 +522,16 @@ else: role = SYM_ASSIGNED self.note_symbol(name.id, role) + + def visit_TryExcept(self, node): + self.scope.note_try_start(node) + self.visit_sequence(node.body) + self.scope.note_try_end(node) + self.visit_sequence(node.handlers) + self.visit_sequence(node.orelse) + + def visit_TryFinally(self, node): + self.scope.note_try_start(node) + self.visit_sequence(node.body) + self.scope.note_try_end(node) + self.visit_sequence(node.finalbody) diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -1,4 +1,4 @@ -import py +import py, sys from pypy.interpreter.astcompiler import codegen, astbuilder, symtable, optimize from pypy.interpreter.pyparser import pyparse from pypy.interpreter.pyparser.test import expressions @@ -867,6 +867,9 @@ class AppTestCompiler: + def setup_class(cls): + cls.w_maxunicode = cls.space.wrap(sys.maxunicode) + def test_docstring_not_loaded(self): import StringIO, dis, sys ns = {} @@ -911,7 +914,17 @@ l = [a for a in Foo()] assert hint_called[0] assert l == list(range(5)) - + + def test_unicode_in_source(self): + import sys + d = {} + exec '# -*- coding: utf-8 -*-\n\nu = u"\xf0\x9f\x92\x8b"' in d + if sys.maxunicode > 65535 and self.maxunicode > 65535: + expected_length = 1 + else: + expected_length = 2 + assert len(d['u']) == expected_length + class TestOptimizations: def count_instructions(self, source): diff --git a/pypy/interpreter/astcompiler/test/test_symtable.py b/pypy/interpreter/astcompiler/test/test_symtable.py --- a/pypy/interpreter/astcompiler/test/test_symtable.py +++ b/pypy/interpreter/astcompiler/test/test_symtable.py @@ -346,6 +346,25 @@ assert exc.msg == "'return' with argument inside generator" scp = self.func_scope("def f():\n return\n yield x") + def test_yield_inside_try(self): + scp = self.func_scope("def f(): yield x") + assert not scp.has_yield_inside_try + scp = self.func_scope("def f():\n try:\n yield x\n except: pass") + assert scp.has_yield_inside_try + scp = self.func_scope("def f():\n try:\n yield x\n finally: pass") + assert scp.has_yield_inside_try + scp = self.func_scope("def f():\n with x: yield y") + assert scp.has_yield_inside_try + + def test_yield_outside_try(self): + for input in ("try: pass\n except: pass", + "try: pass\n except: yield y", + "try: pass\n finally: pass", + "try: pass\n finally: yield y", + "with x: pass"): + input = "def f():\n yield y\n %s\n yield y" % (input,) + assert not self.func_scope(input).has_yield_inside_try + def test_return(self): for input in ("class x: return", "return"): exc = py.test.raises(SyntaxError, self.func_scope, input).value diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -240,6 +240,10 @@ msg = "__int__ returned non-int (type '%T')" raise operationerrfmt(space.w_TypeError, msg, w_result) + def ord(self, space): + msg = "ord() expected string of length 1, but %T found" + raise operationerrfmt(space.w_TypeError, msg, self) + def __spacebind__(self, space): return self @@ -914,7 +918,7 @@ """ return self.unpackiterable(w_iterable, expected_length) - def listview_str(self, w_list): + def listview_bytes(self, w_list): """ Return a list of unwrapped strings out of a list of strings. If the argument is not a list or does not contain only strings, return None. May return None anyway. @@ -948,7 +952,7 @@ """ return (None, None) - def newlist_str(self, list_s): + def newlist_bytes(self, list_s): return self.newlist([self.wrap(s) for s in list_s]) def newlist_unicode(self, list_u): @@ -1402,6 +1406,9 @@ # This is here mostly just for gateway.int_unwrapping_space_method(). return bool(self.int_w(w_obj)) + def ord(self, w_obj): + return w_obj.ord(self) + # This is all interface for gateway.py. def gateway_int_w(self, w_obj): if self.isinstance_w(w_obj, self.w_float): diff --git a/pypy/interpreter/buffer.py b/pypy/interpreter/buffer.py --- a/pypy/interpreter/buffer.py +++ b/pypy/interpreter/buffer.py @@ -30,11 +30,17 @@ raise ValueError("no raw buffer") + def is_writable(self): + return False + class RWBuffer(Buffer): """Abstract base class for read-write buffers.""" __slots__ = () # no extra slot here + def is_writable(self): + return True + def setitem(self, index, char): "Write a character into the buffer." raise NotImplementedError # Must be overriden. No bounds checks. diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -6,7 +6,7 @@ from errno import EINTR from rpython.rlib import jit -from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib.objectmodel import we_are_translated, specialize from pypy.interpreter import debug @@ -40,12 +40,11 @@ self.debug_excs = [] def clear(self, space): - # for sys.exc_clear() - self.w_type = space.w_None - self._w_value = space.w_None - self._application_traceback = None - if not we_are_translated(): - del self.debug_excs[:] + # XXX remove this method. The point is that we cannot always + # hack at 'self' to clear w_type and _w_value, because in some + # corner cases the OperationError will be used again: see + # test_interpreter.py:test_with_statement_and_sys_clear. + pass def match(self, space, w_check_class): "Check if this application-level exception matches 'w_check_class'." @@ -300,6 +299,10 @@ """ self._application_traceback = traceback + at specialize.memo() +def get_cleared_operation_error(space): + return OperationError(space.w_None, space.w_None) + # ____________________________________________________________ # optimization only: avoid the slowest operation -- the string # formatting with '%' -- in the common case were we don't diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -1,5 +1,5 @@ import sys -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, get_cleared_operation_error from rpython.rlib.unroll import unrolling_iterable from rpython.rlib import jit @@ -217,6 +217,17 @@ if frame: # else, the exception goes nowhere and is lost frame.last_exception = operror + def clear_sys_exc_info(self): + # Find the frame out of which sys_exc_info() would return its result, + # and hack this frame's last_exception to become the cleared + # OperationError (which is different from None!). + frame = self.gettopframe_nohidden() + while frame: + if frame.last_exception is not None: + frame.last_exception = get_cleared_operation_error(self.space) + break + frame = self.getnextframe_nohidden(frame) + @jit.dont_look_inside def settrace(self, w_func): """Set the global trace function.""" diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -520,12 +520,13 @@ # When a BuiltinCode is stored in a Function object, # you get the functionality of CPython's built-in function type. - def __init__(self, func, unwrap_spec=None, self_type=None, descrmismatch=None): + def __init__(self, func, unwrap_spec=None, self_type=None, + descrmismatch=None, doc=None): "NOT_RPYTHON" # 'implfunc' is the interpreter-level function. # Note that this uses a lot of (construction-time) introspection. Code.__init__(self, func.__name__) - self.docstring = func.__doc__ + self.docstring = doc or func.__doc__ self.identifier = "%s-%s-%s" % (func.__module__, func.__name__, getattr(self_type, '__name__', '*')) @@ -805,8 +806,8 @@ raise TypeError("Varargs and keywords not supported in unwrap_spec") argspec = ', '.join([arg for arg in args.args[1:]]) func_code = py.code.Source(""" - def f(w_obj, %(args)s): - return w_obj.%(func_name)s(%(args)s) + def f(self, %(args)s): + return self.%(func_name)s(%(args)s) """ % {'args': argspec, 'func_name': func.func_name}) d = {} exec func_code.compile() in d @@ -821,7 +822,7 @@ else: assert isinstance(unwrap_spec, dict) unwrap_spec = unwrap_spec.copy() - unwrap_spec['w_obj'] = base_cls + unwrap_spec['self'] = base_cls return interp2app(globals()['unwrap_spec'](**unwrap_spec)(f)) class interp2app(W_Root): @@ -832,7 +833,7 @@ instancecache = {} def __new__(cls, f, app_name=None, unwrap_spec=None, descrmismatch=None, - as_classmethod=False): + as_classmethod=False, doc=None): "NOT_RPYTHON" # f must be a function whose name does NOT start with 'app_' @@ -861,7 +862,8 @@ cls.instancecache[key] = self self._code = BuiltinCode(f, unwrap_spec=unwrap_spec, self_type=self_type, - descrmismatch=descrmismatch) + descrmismatch=descrmismatch, + doc=doc) self.__name__ = f.func_name self.name = app_name self.as_classmethod = as_classmethod diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -155,20 +155,6 @@ code_name = self.pycode.co_name return space.wrap(code_name) - def __del__(self): - # Only bother enqueuing self to raise an exception if the frame is - # still not finished and finally or except blocks are present. - self.clear_all_weakrefs() - if self.frame is not None: - block = self.frame.lastblock - while block is not None: - if not isinstance(block, LoopBlock): - self.enqueue_for_destruction(self.space, - GeneratorIterator.descr_close, - "interrupting generator of ") - break - block = block.previous - # Results can be either an RPython list of W_Root, or it can be an # app-level W_ListObject, which also has an append() method, that's why we # generate 2 versions of the function and 2 jit drivers. @@ -211,3 +197,20 @@ return unpack_into unpack_into = _create_unpack_into() unpack_into_w = _create_unpack_into() + + +class GeneratorIteratorWithDel(GeneratorIterator): + + def __del__(self): + # Only bother enqueuing self to raise an exception if the frame is + # still not finished and finally or except blocks are present. + self.clear_all_weakrefs() + if self.frame is not None: + block = self.frame.lastblock + while block is not None: + if not isinstance(block, LoopBlock): + self.enqueue_for_destruction(self.space, + GeneratorIterator.descr_close, + "interrupting generator of ") + break + block = block.previous diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -12,7 +12,7 @@ from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter.astcompiler.consts import ( CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS, CO_NESTED, - CO_GENERATOR, CO_KILL_DOCSTRING) + CO_GENERATOR, CO_KILL_DOCSTRING, CO_YIELD_INSIDE_TRY) from pypy.tool.stdlib_opcode import opcodedesc, HAVE_ARGUMENT from rpython.rlib.rarithmetic import intmask from rpython.rlib.objectmodel import compute_hash @@ -31,7 +31,7 @@ # Magic numbers for the bytecode version in code objects. # See comments in pypy/module/imp/importing. cpython_magic, = struct.unpack("<195><164>" may become "\u005c\U000000E4" (16 bytes) + while ps < end: + if s[ps] == '\\': + lis.append(s[ps]) + ps += 1 + if ord(s[ps]) & 0x80: + # A multibyte sequence will follow, it will be + # escaped like \u1234. To avoid confusion with + # the backslash we just wrote, we emit "\u005c" + # instead. + lis.append("u005c") + if ord(s[ps]) & 0x80: # XXX inefficient + w, ps = decode_utf8(space, s, ps, end, "utf-32-be") + rn = len(w) + assert rn % 4 == 0 + for i in range(0, rn, 4): + lis.append('\\U') + lis.append(hexbyte(ord(w[i]))) + lis.append(hexbyte(ord(w[i+1]))) + lis.append(hexbyte(ord(w[i+2]))) + lis.append(hexbyte(ord(w[i+3]))) + else: + lis.append(s[ps]) + ps += 1 + return ''.join(lis) + def PyString_DecodeEscape(space, s, recode_encoding): """ Unescape a backslash-escaped string. If recode_encoding is non-zero, diff --git a/pypy/interpreter/pyparser/test/test_parsestring.py b/pypy/interpreter/pyparser/test/test_parsestring.py --- a/pypy/interpreter/pyparser/test/test_parsestring.py +++ b/pypy/interpreter/pyparser/test/test_parsestring.py @@ -1,10 +1,10 @@ from pypy.interpreter.pyparser import parsestring -import py +import py, sys class TestParsetring: - def parse_and_compare(self, literal, value): + def parse_and_compare(self, literal, value, encoding=None): space = self.space - w_ret = parsestring.parsestr(space, None, literal) + w_ret = parsestring.parsestr(space, encoding, literal) if isinstance(value, str): assert space.type(w_ret) == space.w_str assert space.str_w(w_ret) == value @@ -91,3 +91,18 @@ input = ["'", 'x', ' ', chr(0xc3), chr(0xa9), ' ', chr(92), 'n', "'"] w_ret = parsestring.parsestr(space, 'utf8', ''.join(input)) assert space.str_w(w_ret) == ''.join(expected) + + def test_wide_unicode_in_source(self): + if sys.maxunicode == 65535: + py.test.skip("requires a wide-unicode host") + self.parse_and_compare('u"\xf0\x9f\x92\x8b"', + unichr(0x1f48b), + encoding='utf-8') + + def test_decode_unicode_utf8(self): + buf = parsestring.decode_unicode_utf8(self.space, + 'u"\xf0\x9f\x92\x8b"', 2, 6) + if sys.maxunicode == 65535: + assert buf == r"\U0000d83d\U0000dc8b" + else: + assert buf == r"\U0001f48b" diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -708,6 +708,18 @@ never_called py.test.raises(AssertionError, space.wrap, gateway.interp2app_temp(g)) + def test_interp2app_doc(self): + space = self.space + def f(space, w_x): + """foo""" + w_f = space.wrap(gateway.interp2app_temp(f)) + assert space.unwrap(space.getattr(w_f, space.wrap('__doc__'))) == 'foo' + # + def g(space, w_x): + never_called + w_g = space.wrap(gateway.interp2app_temp(g, doc='bar')) + assert space.unwrap(space.getattr(w_g, space.wrap('__doc__'))) == 'bar' + class AppTestPyTestMark: @py.test.mark.unlikely_to_exist diff --git a/pypy/interpreter/test/test_interpreter.py b/pypy/interpreter/test/test_interpreter.py --- a/pypy/interpreter/test/test_interpreter.py +++ b/pypy/interpreter/test/test_interpreter.py @@ -311,3 +311,73 @@ assert str(e) == "maximum recursion depth exceeded" else: assert 0, "should have raised!" + + def test_with_statement_and_sys_clear(self): + import sys + class CM(object): + def __enter__(self): + return self + def __exit__(self, exc_type, exc_value, tb): + sys.exc_clear() + try: + with CM(): + 1 / 0 + raise AssertionError("should not be reached") + except ZeroDivisionError: + pass + + def test_sys_clear_while_handling_exception(self): + import sys + def f(): + try: + some_missing_name + except NameError: + g() + assert sys.exc_info()[0] is NameError + def g(): + assert sys.exc_info()[0] is NameError + try: + 1 / 0 + except ZeroDivisionError: + assert sys.exc_info()[0] is ZeroDivisionError + sys.exc_clear() + assert sys.exc_info()[0] is None + h() + assert sys.exc_info()[0] is None + def h(): + assert sys.exc_info()[0] is None + f() + + def test_sys_clear_while_handling_exception_nested(self): + import sys + def f(): + try: + some_missing_name + except NameError: + g() + assert sys.exc_info()[0] is NameError + def g(): + assert sys.exc_info()[0] is NameError + try: + 1 / 0 + except ZeroDivisionError: + assert sys.exc_info()[0] is ZeroDivisionError + h1() + assert sys.exc_info()[0] is None + h() + assert sys.exc_info()[0] is None + def h(): + assert sys.exc_info()[0] is None + def h1(): + sys.exc_clear() + f() + + def test_sys_clear_reraise(self): + import sys + def f(): + try: + 1 / 0 + except ZeroDivisionError: + sys.exc_clear() + raise + raises(TypeError, f) diff --git a/pypy/interpreter/unicodehelper.py b/pypy/interpreter/unicodehelper.py --- a/pypy/interpreter/unicodehelper.py +++ b/pypy/interpreter/unicodehelper.py @@ -30,7 +30,7 @@ # ____________________________________________________________ def encode(space, w_data, encoding=None, errors='strict'): - from pypy.objspace.std.unicodetype import encode_object + from pypy.objspace.std.unicodeobject import encode_object return encode_object(space, w_data, encoding, errors) # These functions take and return unwrapped rpython strings and unicodes diff --git a/pypy/module/__builtin__/app_operation.py b/pypy/module/__builtin__/app_operation.py --- a/pypy/module/__builtin__/app_operation.py +++ b/pypy/module/__builtin__/app_operation.py @@ -1,4 +1,5 @@ +import operator + def bin(x): - if not isinstance(x, (int, long)): - raise TypeError("must be int or long") - return x.__format__("#b") + value = operator.index(x) + return value.__format__("#b") diff --git a/pypy/module/__builtin__/interp_memoryview.py b/pypy/module/__builtin__/interp_memoryview.py --- a/pypy/module/__builtin__/interp_memoryview.py +++ b/pypy/module/__builtin__/interp_memoryview.py @@ -229,10 +229,14 @@ return W_MemoryView(buf) def descr_buffer(self, space): - """Note that memoryview() objects in PyPy support buffer(), whereas - not in CPython; but CPython supports passing memoryview() to most - built-in functions that accept buffers, with the notable exception - of the buffer() built-in.""" + """ + Note that memoryview() is very inconsistent in CPython: it does not + support the buffer interface but does support the new buffer + interface: as a result, it is possible to pass memoryview to + e.g. socket.send() but not to file.write(). For simplicity and + consistency, in PyPy memoryview DOES support buffer(), which means + that it is accepted in more places than CPython. + """ return space.wrap(self.buf) def descr_tobytes(self, space): diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -46,6 +46,15 @@ assert bin(2L) == "0b10" assert bin(-2L) == "-0b10" raises(TypeError, bin, 0.) + class C(object): + def __index__(self): + return 42 + assert bin(C()) == bin(42) + class D(object): + def __int__(self): + return 42 + exc = raises(TypeError, bin, D()) + assert "index" in exc.value.message def test_unichr(self): import sys diff --git a/pypy/module/__builtin__/test/test_classobj.py b/pypy/module/__builtin__/test/test_classobj.py --- a/pypy/module/__builtin__/test/test_classobj.py +++ b/pypy/module/__builtin__/test/test_classobj.py @@ -1061,14 +1061,14 @@ assert (D() >= A()) == 'D:A.ge' -class AppTestOldStyleClassStrDict(object): +class AppTestOldStyleClassBytesDict(object): def setup_class(cls): if cls.runappdirect: py.test.skip("can only be run on py.py") def is_strdict(space, w_class): - from pypy.objspace.std.dictmultiobject import StringDictStrategy + from pypy.objspace.std.dictmultiobject import BytesDictStrategy w_d = w_class.getdict(space) - return space.wrap(isinstance(w_d.strategy, StringDictStrategy)) + return space.wrap(isinstance(w_d.strategy, BytesDictStrategy)) cls.w_is_strdict = cls.space.wrap(gateway.interp2app(is_strdict)) diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -3,7 +3,7 @@ from rpython.rlib.objectmodel import we_are_translated from pypy.objspace.std.listobject import W_ListObject from pypy.objspace.std.typeobject import MethodCache -from pypy.objspace.std.mapdict import IndexCache +from pypy.objspace.std.mapdict import MapAttrCache from rpython.rlib import rposix, rgc @@ -35,7 +35,7 @@ cache.misses = {} cache.hits = {} if space.config.objspace.std.withmapdict: - cache = space.fromcache(IndexCache) + cache = space.fromcache(MapAttrCache) cache.misses = {} cache.hits = {} @@ -45,7 +45,7 @@ in the mapdict cache with the given attribute name.""" assert space.config.objspace.std.withmethodcachecounter assert space.config.objspace.std.withmapdict From noreply at buildbot.pypy.org Tue Jan 28 11:20:32 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Tue, 28 Jan 2014 11:20:32 +0100 (CET) Subject: [pypy-commit] stmgc c7: fix bugs in the nursery Message-ID: <20140128102032.2F0F11D2420@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r681:c9bb5552e354 Date: 2014-01-28 11:20 +0100 http://bitbucket.org/pypy/stmgc/changeset/c9bb5552e354/ Log: fix bugs in the nursery diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -24,6 +24,12 @@ uint8_t write_locks[READMARKER_END - READMARKER_START]; +struct _thread_local1_s* _stm_dbg_get_tl(int thread) +{ + if (thread == -1) + return (struct _thread_local1_s*)real_address((object_t*)_STM_TL); + return (struct _thread_local1_s*)REAL_ADDRESS(get_thread_base(thread), _STM_TL); +} bool _stm_was_read_remote(char *base, object_t *obj) { @@ -228,6 +234,9 @@ _stm_restore_local_state(thread_num); _STM_TL->nursery_current = (localchar_t*)(FIRST_NURSERY_PAGE * 4096); + memset((void*)real_address((object_t*)_STM_TL->nursery_current), 0x0, + (FIRST_AFTER_NURSERY_PAGE - FIRST_NURSERY_PAGE) * 4096); /* clear nursery */ + _STM_TL->shadow_stack = (object_t**)malloc(LENGTH_SHADOW_STACK * sizeof(void*)); _STM_TL->shadow_stack_base = _STM_TL->shadow_stack; @@ -397,10 +406,6 @@ /* here we hold the shared lock as a reader or writer */ assert(_STM_TL->running_transaction); - - /* reset shadowstack */ - _STM_TL->shadow_stack = _STM_TL->old_shadow_stack; - nursery_on_abort(); assert(_STM_TL->jmpbufptr != NULL); diff --git a/c7/core.h b/c7/core.h --- a/c7/core.h +++ b/c7/core.h @@ -128,6 +128,7 @@ #define REAL_ADDRESS(object_pages, src) ((object_pages) + (uintptr_t)(src)) + static inline struct object_s *real_address(object_t *src) { return (struct object_s*)REAL_ADDRESS(_STM_TL->thread_base, src); @@ -158,6 +159,7 @@ return object_pages + thread_num * (NB_PAGES * 4096UL); } + static inline void spin_loop(void) { asm("pause" : : : "memory"); @@ -226,5 +228,7 @@ void _stm_minor_collect(); #define stm_become_inevitable(msg) /* XXX implement me! */ +struct _thread_local1_s* _stm_dbg_get_tl(int thread); /* -1 is current thread */ + #endif diff --git a/c7/nursery.c b/c7/nursery.c --- a/c7/nursery.c +++ b/c7/nursery.c @@ -63,6 +63,8 @@ /* reserve a fresh new page (XXX: from the end!) */ page = stm_pages_reserve(1); + assert(memset(real_address((object_t*)(page * 4096)), 0xdd, 4096)); + result = (localchar_t *)(page * 4096UL); alloc->start = (uintptr_t)result; alloc->stop = alloc->start + (4096 / size) * size; @@ -99,6 +101,7 @@ void trace_if_young(object_t **pobj) { + /* takes a normal pointer to a thread-local pointer to an object */ if (*pobj == NULL) return; if (!_stm_is_young(*pobj)) @@ -125,6 +128,7 @@ moved->stm_flags |= GCFLAG_NOT_COMMITTED; if (is_small) /* means, not allocated by large-malloc */ moved->stm_flags |= GCFLAG_SMALL; + assert(size == _stm_data_size((struct object_s*)REAL_ADDRESS(get_thread_base(0), moved))); LIST_APPEND(_STM_TL->uncommitted_objects, moved); (*pobj)->stm_flags |= GCFLAG_MOVED; @@ -195,6 +199,7 @@ _STM_TL->nursery_current = new_current; assert((uintptr_t)new_current < (1L << 32)); if ((uintptr_t)new_current > FIRST_AFTER_NURSERY_PAGE * 4096) { + _STM_TL->nursery_current = current; /* reset for nursery-clearing in minor_collect!! */ current = collect_and_reserve(size); } @@ -263,7 +268,9 @@ void nursery_on_abort() { - + /* reset shadowstack */ + _STM_TL->shadow_stack = _STM_TL->old_shadow_stack; + /* clear old_objects_to_trace (they will have the WRITE_BARRIER flag set because the ones we care about are also in modified_objects) */ stm_list_clear(_STM_TL->old_objects_to_trace); From noreply at buildbot.pypy.org Tue Jan 28 11:20:31 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Tue, 28 Jan 2014 11:20:31 +0100 (CET) Subject: [pypy-commit] stmgc c7: add a new demo (sorting) and implement some more operators Message-ID: <20140128102031.0E0341D2420@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r680:bf658db49e2b Date: 2014-01-28 11:19 +0100 http://bitbucket.org/pypy/stmgc/changeset/bf658db49e2b/ Log: add a new demo (sorting) and implement some more operators diff --git a/duhton/demo/sort.duh b/duhton/demo/sort.duh new file mode 100644 --- /dev/null +++ b/duhton/demo/sort.duh @@ -0,0 +1,162 @@ + + + + +(setq c (container (list 1 2 3 4))) + + +(setq _rand (container (list 133542157 362436069 521288629 88675123))) +(defun xor128 () + (setq lst (get _rand)) + (setq x (get lst 0)) + (setq y (get lst 1)) + (setq z (get lst 2)) + (setq w (get lst 3)) + + (setq t (^ x (<< x 11))) + (setq x y) + (setq y z) + (setq z w) + + (setq w (^ w (^ (>> w 19) (^ t (>> t 8))))) + (set lst 0 x) + (set lst 1 y) + (set lst 2 z) + (set lst 3 w) + w + ) + + +(defun random_list (n) + (if (> n 0) + (progn + (setq res (random_list (- n 1))) + (append res (% (xor128) 10)) + res + ) + (list ) + ) + ) + + + +(defun merge_lists (as bs res) + ;; empties the two lists and merges the result to res + (setq len_as (len as)) + (setq len_bs (len bs)) + (if (< 0 len_as) + (if (< 0 len_bs) + (if (> (get as 0) (get bs 0)) + (append res (pop bs 0)) + (append res (pop as 0)) + ) + (append res (pop as 0)) + ) + (if (< 0 len_bs) + (append res (pop bs 0)) + ) + ) + (if (|| (< 0 len_as) (< 0 len_bs)) + (merge_lists as bs res) + ) + ) + + +(defun append_to_correct_half (xs first second half_len) + (if (< 0 (len xs)) + (progn + (setq elem (pop xs 0)) + (if (< 0 half_len) + (append_to_correct_half xs + (append first elem) + second + (- half_len 1)) + + (append_to_correct_half xs + first + (append second elem) + (- half_len 1)) + ) + ) + ) + ) + +(defun split_list (xs) + ;; empties xs and fills 2 new lists to be returned + (setq half_len (/ (len xs) 2)) + + (setq first (list)) + (setq second (list)) + + (append_to_correct_half xs first second half_len) + (list first second) + ) + + +(defun merge_sort (xs) + (if (<= (len xs) 1) ; 1 elem + xs + (progn ; many elems + (setq lists (split_list xs)) + (setq left (merge_sort (get lists 0))) + (setq right (merge_sort (get lists 1))) + ;; (print left right) + (setq merged (list)) + (merge_lists left right merged) + ;; (print (quote >) merged) + merged + ) + ) + ) + + +(defun copy_list_helper (xs res idx) + (if (< idx (len xs)) + (progn + (append res (get xs idx)) + (copy_list_helper xs res (+ idx 1)) + ) + ) + ) +(defun copy_list (xs) + (setq res (list)) + (copy_list_helper xs res 0) + res + ) +(defun print_list (xs) + (print (quote len:) (len xs) (quote ->) xs) + ) +;; (defun rotate (tree) +;; (if (pair? tree) +;; (progn +;; (setq left (car tree)) +;; (print left) +;; (print (cdr tree)) +;; ) +;; (print 111111) +;; ) +;; ) + +;; (defun create-tree (n) +;; (if (== n 0) +;; (progn +;; (set c (+ (get c) 1)) +;; (get c)) +;; (cons (create-tree (- n 1)) (create-tree (- n 1)))) +;; ) + + + +(setq as (random_list 20)) +(setq bs (random_list 20)) +(print as) +(print bs) +(print (split_list as)) + +(setq cs (random_list 200)) +(print_list cs) +(print_list (merge_sort (copy_list cs))) + + + + diff --git a/duhton/glob.c b/duhton/glob.c --- a/duhton/glob.c +++ b/duhton/glob.c @@ -140,6 +140,96 @@ return Du_None; } +DuObject *du_xor(DuObject *cons, DuObject *locals) +{ + int result = 0; + /* _du_read1(cons); IMMUTABLE */ + DuObject *expr = _DuCons_CAR(cons); + DuObject *next = _DuCons_NEXT(cons); + + _du_save2(next, locals); + DuObject *obj = Du_Eval(expr, locals); + result = DuInt_AsInt(obj); + _du_restore2(next, locals); + + cons = next; + + while (cons != Du_None) { + /* _du_read1(cons); IMMUTABLE */ + expr = _DuCons_CAR(cons); + next = _DuCons_NEXT(cons); + + _du_save2(next, locals); + obj = Du_Eval(expr, locals); + result ^= DuInt_AsInt(obj); + _du_restore2(next, locals); + + cons = next; + } + + return DuInt_FromInt(result); +} + +DuObject *du_lshift(DuObject *cons, DuObject *locals) +{ + int result = 0; + /* _du_read1(cons); IMMUTABLE */ + DuObject *expr = _DuCons_CAR(cons); + DuObject *next = _DuCons_NEXT(cons); + + _du_save2(next, locals); + DuObject *obj = Du_Eval(expr, locals); + result = DuInt_AsInt(obj); + _du_restore2(next, locals); + + cons = next; + + while (cons != Du_None) { + /* _du_read1(cons); IMMUTABLE */ + expr = _DuCons_CAR(cons); + next = _DuCons_NEXT(cons); + + _du_save2(next, locals); + obj = Du_Eval(expr, locals); + result <<= DuInt_AsInt(obj); + _du_restore2(next, locals); + + cons = next; + } + + return DuInt_FromInt(result); +} + +DuObject *du_rshift(DuObject *cons, DuObject *locals) +{ + int result = 0; + /* _du_read1(cons); IMMUTABLE */ + DuObject *expr = _DuCons_CAR(cons); + DuObject *next = _DuCons_NEXT(cons); + + _du_save2(next, locals); + DuObject *obj = Du_Eval(expr, locals); + result = DuInt_AsInt(obj); + _du_restore2(next, locals); + + cons = next; + + while (cons != Du_None) { + /* _du_read1(cons); IMMUTABLE */ + expr = _DuCons_CAR(cons); + next = _DuCons_NEXT(cons); + + _du_save2(next, locals); + obj = Du_Eval(expr, locals); + result >>= DuInt_AsInt(obj); + _du_restore2(next, locals); + + cons = next; + } + + return DuInt_FromInt(result); +} + DuObject *du_add(DuObject *cons, DuObject *locals) { int result = 0; @@ -221,6 +311,32 @@ return DuInt_FromInt(result); } +DuObject *du_mod(DuObject *cons, DuObject *locals) +{ + int result = 0; + int first = 1; + + while (cons != Du_None) { + /* _du_read1(cons); IMMUTABLE */ + DuObject *expr = _DuCons_CAR(cons); + DuObject *next = _DuCons_NEXT(cons); + + _du_save2(next, locals); + DuObject *obj = Du_Eval(expr, locals); + if (first) { + result = DuInt_AsInt(obj); + first = 0; + } else { + result %= DuInt_AsInt(obj); + } + _du_restore2(next, locals); + + cons = next; + } + return DuInt_FromInt(result); +} + + static DuObject *_du_intcmp(DuObject *cons, DuObject *locals, int mode) { DuObject *obj_a, *obj_b; @@ -236,6 +352,8 @@ case 3: r = a != b; break; case 4: r = a > b; break; case 5: r = a >= b; break; + case 6: r = a && b; break; + case 7: r = a || b; break; } return DuInt_FromInt(r); } @@ -252,6 +370,12 @@ { return _du_intcmp(cons, locals, 4); } DuObject *du_ge(DuObject *cons, DuObject *locals) { return _du_intcmp(cons, locals, 5); } +DuObject *du_and(DuObject *cons, DuObject *locals) +{ return _du_intcmp(cons, locals, 6); } +DuObject *du_or(DuObject *cons, DuObject *locals) +{ return _du_intcmp(cons, locals, 7); } + + DuObject *du_type(DuObject *cons, DuObject *locals) { @@ -396,7 +520,7 @@ _du_getargs2("append", cons, locals, &lst, &newobj); DuList_Append(lst, newobj); - return newobj; + return lst; } DuObject *du_pop(DuObject *cons, DuObject *locals) @@ -638,9 +762,15 @@ DuFrame_SetBuiltinMacro(Du_Globals, "setq", du_setq); DuFrame_SetBuiltinMacro(Du_Globals, "print", du_print); DuFrame_SetBuiltinMacro(Du_Globals, "+", du_add); + DuFrame_SetBuiltinMacro(Du_Globals, "^", du_xor); + DuFrame_SetBuiltinMacro(Du_Globals, "<<", du_lshift); + DuFrame_SetBuiltinMacro(Du_Globals, ">>", du_rshift); + DuFrame_SetBuiltinMacro(Du_Globals, "%", du_mod); DuFrame_SetBuiltinMacro(Du_Globals, "-", du_sub); DuFrame_SetBuiltinMacro(Du_Globals, "*", du_mul); DuFrame_SetBuiltinMacro(Du_Globals, "/", du_div); + DuFrame_SetBuiltinMacro(Du_Globals, "||", du_or); + DuFrame_SetBuiltinMacro(Du_Globals, "&&", du_and); DuFrame_SetBuiltinMacro(Du_Globals, "<", du_lt); DuFrame_SetBuiltinMacro(Du_Globals, "<=", du_le); DuFrame_SetBuiltinMacro(Du_Globals, "==", du_eq); From noreply at buildbot.pypy.org Tue Jan 28 13:48:28 2014 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 28 Jan 2014 13:48:28 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: reenable loop unrolling Message-ID: <20140128124828.3D7A31C015D@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68976:36ccb35831a5 Date: 2014-01-28 13:42 +0100 http://bitbucket.org/pypy/pypy/changeset/36ccb35831a5/ Log: reenable loop unrolling diff --git a/rpython/jit/metainterp/optimizeopt/__init__.py b/rpython/jit/metainterp/optimizeopt/__init__.py --- a/rpython/jit/metainterp/optimizeopt/__init__.py +++ b/rpython/jit/metainterp/optimizeopt/__init__.py @@ -19,7 +19,8 @@ ('string', OptString), ('earlyforce', OptEarlyForce), ('pure', OptPure), - ('heap', OptHeap)] + ('heap', OptHeap), + ('unroll', None)] # no direct instantiation of unroll unroll_all_opts = unrolling_iterable(ALL_OPTS) diff --git a/rpython/jit/metainterp/test/test_loop.py b/rpython/jit/metainterp/test/test_loop.py --- a/rpython/jit/metainterp/test/test_loop.py +++ b/rpython/jit/metainterp/test/test_loop.py @@ -60,7 +60,7 @@ assert res == f(6, 13) self.check_trace_count(1) if self.enable_opts: - self.check_resops(setfield_gc=1, getfield_gc=0) + self.check_resops(setfield_gc=2, getfield_gc=0) def test_loop_with_two_paths(self): @@ -107,10 +107,10 @@ pattern >>= 1 return 42 self.meta_interp(f, [0xF0F0F0]) - #if self.enable_opts: - # self.check_trace_count(3) - #else: - self.check_trace_count(2) + if self.enable_opts: + self.check_trace_count(3) + else: + self.check_trace_count(2) def test_interp_simple(self): myjitdriver = JitDriver(greens = ['i'], reds = ['x', 'y']) @@ -189,6 +189,10 @@ found = 0 for op in get_stats().loops[0]._all_operations(): if op.getopname() == 'guard_true': + liveboxes = op.getfailargs() + assert len(liveboxes) == 2 # x, y (in some order) + assert isinstance(liveboxes[0], history.BoxInt) + assert isinstance(liveboxes[1], history.BoxInt) found += 1 if 'unroll' in self.enable_opts: assert found == 2 diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -437,7 +437,7 @@ """Inconsistency in the JIT hints.""" ENABLE_ALL_OPTS = ( - 'intbounds:rewrite:virtualize:string:earlyforce:pure:heap') + 'intbounds:rewrite:virtualize:string:earlyforce:pure:heap:unroll') PARAMETER_DOCS = { 'threshold': 'number of times a loop has to run for it to become hot', From noreply at buildbot.pypy.org Tue Jan 28 15:05:46 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Tue, 28 Jan 2014 15:05:46 +0100 (CET) Subject: [pypy-commit] stmgc c7: test and fix for writing to old objects which are already writeable for us Message-ID: <20140128140546.E140B1C0C34@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r682:3e167ba69711 Date: 2014-01-28 13:09 +0100 http://bitbucket.org/pypy/stmgc/changeset/3e167ba69711/ Log: test and fix for writing to old objects which are already writeable for us diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -72,7 +72,7 @@ /* clear the write-lock */ uintptr_t lock_idx = (((uintptr_t)item) >> 4) - READMARKER_START; - assert(write_locks[lock_idx]); + assert(write_locks[lock_idx] == _STM_TL->thread_num + 1); write_locks[lock_idx] = 0; _stm_move_object(item, @@ -103,7 +103,7 @@ obj->stm_flags &= ~GCFLAG_WRITE_BARRIER; return; } - + /* privatize if SHARED_PAGE */ uintptr_t pagenum2, pages; if (obj->stm_flags & GCFLAG_SMALL) { @@ -115,28 +115,38 @@ assert(pagenum == pagenum2); assert(pages == (stmcb_size(real_address(obj)) +4095) / 4096); } + for (pagenum2 += pages - 1; pagenum2 >= pagenum; pagenum2--) stm_pages_privatize(pagenum2); + /* claim the write-lock for this object */ uintptr_t lock_idx = (((uintptr_t)obj) >> 4) - READMARKER_START; - uint8_t previous; - while ((previous = __sync_lock_test_and_set(&write_locks[lock_idx], 1))) { + uint8_t lock_num = _STM_TL->thread_num + 1; + uint8_t prev_owner; + do { + prev_owner = __sync_val_compare_and_swap(&write_locks[lock_idx], + 0, lock_num); + + /* if there was no lock-holder or we already have the lock */ + if ((!prev_owner) || (prev_owner == lock_num)) + break; + /* XXXXXX */ //_stm_start_semi_safe_point(); - usleep(1); + //usleep(1); //_stm_stop_semi_safe_point(); - //if (!(previous = __sync_lock_test_and_set(&write_locks[lock_idx], 1))) - // break; + // try again.... XXX stm_abort_transaction(); /* XXX: only abort if we are younger */ spin_loop(); + } while (1); + + obj->stm_flags &= ~GCFLAG_WRITE_BARRIER; + if (prev_owner == 0) { + stm_read(obj); + LIST_APPEND(_STM_TL->modified_objects, obj); } - - obj->stm_flags &= ~GCFLAG_WRITE_BARRIER; - stm_read(obj); - - LIST_APPEND(_STM_TL->modified_objects, obj); } @@ -380,11 +390,13 @@ /* note: same as push_modified_to... but src/dst swapped TODO: unify both... */ - char *dst = REAL_ADDRESS(local_base, item); - char *src = REAL_ADDRESS(remote_base, item); - size_t size = stmcb_size((struct object_s*)src); - memcpy(dst, src, size); - + /* check at least the first page (required by move_obj() */ + assert(stm_get_page_flag((uintptr_t)item / 4096) == PRIVATE_PAGE); + + _stm_move_object(item, + REAL_ADDRESS(remote_base, item), + REAL_ADDRESS(local_base, item)); + /* copying from the other thread re-added the WRITE_BARRIER flag */ assert(item->stm_flags & GCFLAG_WRITE_BARRIER); diff --git a/c7/largemalloc.c b/c7/largemalloc.c --- a/c7/largemalloc.c +++ b/c7/largemalloc.c @@ -124,7 +124,7 @@ memcpy over multiple PRIVATE pages. */ char *end = src + _stm_data_size((struct object_s*)REAL_ADDRESS(get_thread_base(0), obj)); uintptr_t pagenum, num; - struct object_s *t0_obj = (struct object_s*)REAL_ADDRESS(get_thread_base(0), _stm_tl_address(src)); + struct object_s *t0_obj = (struct object_s*)REAL_ADDRESS(get_thread_base(0), obj); if (obj->stm_flags & GCFLAG_SMALL) { pagenum = (uintptr_t)obj / 4096UL; diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -456,6 +456,23 @@ newer = stm_pop_root() assert new == newer + def test_write_to_old_after_minor(self): + stm_start_transaction() + new = stm_allocate(16) + stm_push_root(new) + stm_minor_collect() + old = stm_pop_root() + stm_stop_transaction() + + stm_start_transaction() + stm_write(old) # old objs to trace + stm_set_char(old, 'x') + stm_minor_collect() + stm_write(old) # old objs to trace + stm_set_char(old, 'y') + stm_stop_transaction() + + # def test_resolve_write_write_no_conflict(self): # stm_start_transaction() diff --git a/duhton/demo/sort.duh b/duhton/demo/sort.duh --- a/duhton/demo/sort.duh +++ b/duhton/demo/sort.duh @@ -126,24 +126,7 @@ (defun print_list (xs) (print (quote len:) (len xs) (quote ->) xs) ) -;; (defun rotate (tree) -;; (if (pair? tree) -;; (progn -;; (setq left (car tree)) -;; (print left) -;; (print (cdr tree)) -;; ) -;; (print 111111) -;; ) -;; ) -;; (defun create-tree (n) -;; (if (== n 0) -;; (progn -;; (set c (+ (get c) 1)) -;; (get c)) -;; (cons (create-tree (- n 1)) (create-tree (- n 1)))) -;; ) @@ -153,7 +136,7 @@ (print bs) (print (split_list as)) -(setq cs (random_list 200)) +(setq cs (random_list 1000)) (print_list cs) (print_list (merge_sort (copy_list cs))) From noreply at buildbot.pypy.org Tue Jan 28 15:05:47 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Tue, 28 Jan 2014 15:05:47 +0100 (CET) Subject: [pypy-commit] stmgc c7: fix possible bug in duhton Message-ID: <20140128140547.DCE731C0C34@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r683:3bb2c6fb41f2 Date: 2014-01-28 13:39 +0100 http://bitbucket.org/pypy/stmgc/changeset/3bb2c6fb41f2/ Log: fix possible bug in duhton diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -113,14 +113,15 @@ _stm_chunk_pages((struct object_s*)REAL_ADDRESS(get_thread_base(0), obj), &pagenum2, &pages); assert(pagenum == pagenum2); - assert(pages == (stmcb_size(real_address(obj)) +4095) / 4096); + assert(pages == (stmcb_size(real_address(obj)) + 4095) / 4096); } for (pagenum2 += pages - 1; pagenum2 >= pagenum; pagenum2--) stm_pages_privatize(pagenum2); - /* claim the write-lock for this object */ + /* claim the write-lock for this object (XXX: maybe a fastpath + for prev_owner == lock_num?) */ uintptr_t lock_idx = (((uintptr_t)obj) >> 4) - READMARKER_START; uint8_t lock_num = _STM_TL->thread_num + 1; uint8_t prev_owner; diff --git a/duhton/duhton.h b/duhton/duhton.h --- a/duhton/duhton.h +++ b/duhton/duhton.h @@ -155,6 +155,11 @@ #define _du_save3(p1,p2,p3) (_push_root((DuObject *)(p1)), \ _push_root((DuObject *)(p2)), \ _push_root((DuObject *)(p3))) +#define _du_save4(p1,p2,p3,p4) (_push_root((DuObject *)(p1)), \ + _push_root((DuObject *)(p2)), \ + _push_root((DuObject *)(p3)), \ + _push_root((DuObject *)(p4))) + #define _du_restore1(p1) (p1 = (typeof(p1))_pop_root()) #define _du_restore2(p1,p2) (p2 = (typeof(p2))_pop_root(), \ @@ -162,6 +167,11 @@ #define _du_restore3(p1,p2,p3) (p3 = (typeof(p3))_pop_root(), \ p2 = (typeof(p2))_pop_root(), \ p1 = (typeof(p1))_pop_root()) +#define _du_restore4(p1,p2,p3,p4)(p4 = (typeof(p4))_pop_root(), \ + p3 = (typeof(p3))_pop_root(), \ + p2 = (typeof(p2))_pop_root(), \ + p1 = (typeof(p1))_pop_root()) + #define _du_read1(p1) stm_read((object_t *)(p1)) #define _du_write1(p1) stm_write((object_t *)(p1)) diff --git a/duhton/frame.c b/duhton/frame.c --- a/duhton/frame.c +++ b/duhton/frame.c @@ -118,6 +118,7 @@ static dictentry_t * find_entry(DuFrameObject *frame, DuObject *symbol, int write_mode) { + /* only allocates if write_mode = 1 */ _du_read1(frame); DuFrameNodeObject *ob = frame->ob_nodes; @@ -259,13 +260,14 @@ } if (e->func_progn) { DuObject *func = e->func_progn; + DuObject *func_arglist = e->func_arglist; _du_save1(func); - _du_save3(frame, symbol, rest); + _du_save4(frame, symbol, rest, func_arglist); DuObject *callee_frame = DuFrame_New(); - _du_restore3(frame, symbol, rest); + _du_restore4(frame, symbol, rest, func_arglist); _du_save1(callee_frame); - _parse_arguments(symbol, rest, e->func_arglist, frame, callee_frame); + _parse_arguments(symbol, rest, func_arglist, frame, callee_frame); _du_restore1(callee_frame); _du_restore1(func); From noreply at buildbot.pypy.org Tue Jan 28 15:05:48 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Tue, 28 Jan 2014 15:05:48 +0100 (CET) Subject: [pypy-commit] stmgc c7: fix more possible bugs in duhton Message-ID: <20140128140548.E24F91C0C34@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r684:a40aa54fd216 Date: 2014-01-28 14:40 +0100 http://bitbucket.org/pypy/stmgc/changeset/a40aa54fd216/ Log: fix more possible bugs in duhton diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -142,9 +142,13 @@ /* XXX: only abort if we are younger */ spin_loop(); } while (1); + + /* remove the write-barrier ONLY if we have the write-lock */ + obj->stm_flags &= ~GCFLAG_WRITE_BARRIER; - obj->stm_flags &= ~GCFLAG_WRITE_BARRIER; if (prev_owner == 0) { + /* otherwise, we have the lock and already added it to + modified_objects / read-marker */ stm_read(obj); LIST_APPEND(_STM_TL->modified_objects, obj); } diff --git a/duhton/frame.c b/duhton/frame.c --- a/duhton/frame.c +++ b/duhton/frame.c @@ -200,7 +200,11 @@ DuObject *sym = DuSymbol_FromString(name); _du_restore1(frame); + _du_save1(frame); dictentry_t *e = find_entry((DuFrameObject *)frame, sym, 1); + _du_restore1(frame); + + _du_write1(frame); /* e is part of frame or a new object */ e->builtin_macro = func; } @@ -245,6 +249,7 @@ dictentry_t *e; DuFrame_Ensure("_DuFrame_EvalCall", frame); + /* find_entry not in write_mode will not collect */ e = find_entry((DuFrameObject *)frame, symbol, 0); if (!e) { e = find_entry((DuFrameObject *)Du_Globals, symbol, 0); @@ -296,6 +301,7 @@ DuFrame_Ensure("DuFrame_GetSymbol", frame); e = find_entry((DuFrameObject *)frame, symbol, 0); + /* find_entry does the read_barrier */ return e ? e->value : NULL; } @@ -304,10 +310,11 @@ dictentry_t *e; DuFrame_Ensure("DuFrame_SetSymbol", frame); - _du_save1(value); + _du_save2(value, frame); e = find_entry((DuFrameObject *)frame, symbol, 1); - _du_restore1(value); + _du_restore2(value, frame); + _du_write1(frame); /* e is new or part of frame */ e->value = value; } @@ -326,10 +333,11 @@ dictentry_t *e; DuFrame_Ensure("DuFrame_SetUserFunction", frame); - _du_save2(arglist, progn); + _du_save3(arglist, progn, frame); e = find_entry((DuFrameObject *)frame, symbol, 1); - _du_restore2(arglist, progn); + _du_restore3(arglist, progn, frame); + _du_write1(frame); /* e is part of frame or new */ e->func_arglist = arglist; e->func_progn = progn; } From noreply at buildbot.pypy.org Tue Jan 28 15:05:49 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Tue, 28 Jan 2014 15:05:49 +0100 (CET) Subject: [pypy-commit] stmgc c7: finally fix a missing save/restore in lst_append Message-ID: <20140128140549.D90F41C0C34@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r685:2ec35b21f89e Date: 2014-01-28 15:05 +0100 http://bitbucket.org/pypy/stmgc/changeset/2ec35b21f89e/ Log: finally fix a missing save/restore in lst_append diff --git a/duhton/glob.c b/duhton/glob.c --- a/duhton/glob.c +++ b/duhton/glob.c @@ -425,10 +425,7 @@ else _du_getargs1("container", cons, locals, &obj); - _du_save2(cons, locals); DuObject *container = DuContainer_New(obj); - _du_restore2(cons, locals); - return container; } @@ -519,7 +516,9 @@ DuObject *lst, *newobj; _du_getargs2("append", cons, locals, &lst, &newobj); + _du_save1(lst); DuList_Append(lst, newobj); + _du_restore1(lst); return lst; } From noreply at buildbot.pypy.org Tue Jan 28 16:30:53 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Tue, 28 Jan 2014 16:30:53 +0100 (CET) Subject: [pypy-commit] stmgc c7: WIP: implement run-transactions in duhton Message-ID: <20140128153053.9271D1C015D@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r686:257f822e0770 Date: 2014-01-28 16:31 +0100 http://bitbucket.org/pypy/stmgc/changeset/257f822e0770/ Log: WIP: implement run-transactions in duhton diff --git a/c7/core.h b/c7/core.h --- a/c7/core.h +++ b/c7/core.h @@ -227,6 +227,7 @@ void _stm_minor_collect(); #define stm_become_inevitable(msg) /* XXX implement me! */ +#define stm_start_inevitable_transaction() stm_start_transaction(NULL) /* XXX implement me! */ struct _thread_local1_s* _stm_dbg_get_tl(int thread); /* -1 is current thread */ diff --git a/duhton/demo/sort.duh b/duhton/demo/sort.duh --- a/duhton/demo/sort.duh +++ b/duhton/demo/sort.duh @@ -136,7 +136,7 @@ (print bs) (print (split_list as)) -(setq cs (random_list 1000)) +(setq cs (random_list 10000)) (print_list cs) (print_list (merge_sort (copy_list cs))) diff --git a/duhton/glob.c b/duhton/glob.c --- a/duhton/glob.c +++ b/duhton/glob.c @@ -679,6 +679,21 @@ return Du_None; } +DuObject *du_run_transactions(DuObject *cons, DuObject *locals) +{ + if (cons != Du_None) + Du_FatalError("run-transactions: expected no argument"); + + _du_save1(stm_thread_local_obj); + stm_stop_transaction(); + _du_restore1(stm_thread_local_obj); + + Du_TransactionRun(); + + stm_start_inevitable_transaction(); + return Du_None; +} + DuObject *du_sleepms(DuObject *cons, DuObject *locals) { DuObject *obj; @@ -793,6 +808,7 @@ DuFrame_SetBuiltinMacro(Du_Globals, "cons", du_cons); DuFrame_SetBuiltinMacro(Du_Globals, "not", du_not); DuFrame_SetBuiltinMacro(Du_Globals, "transaction", du_transaction); + DuFrame_SetBuiltinMacro(Du_Globals, "run-transactions", du_run_transactions); DuFrame_SetBuiltinMacro(Du_Globals, "sleepms", du_sleepms); DuFrame_SetBuiltinMacro(Du_Globals, "defined?", du_defined); DuFrame_SetBuiltinMacro(Du_Globals, "pair?", du_pair); diff --git a/duhton/transaction.c b/duhton/transaction.c --- a/duhton/transaction.c +++ b/duhton/transaction.c @@ -23,6 +23,7 @@ static void run_all_threads(void) { + thread_sleeping = 0; int i; for (i = 0; i < all_threads_count; i++) { int status = pthread_create(&all_threads[i], NULL, run_thread, @@ -34,6 +35,7 @@ } for (i = 0; i < all_threads_count; i++) { pthread_join(all_threads[i], NULL); + all_threads[i] = (pthread_t)NULL; } } From noreply at buildbot.pypy.org Tue Jan 28 17:58:44 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 28 Jan 2014 17:58:44 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Tweaks Message-ID: <20140128165844.C55801C015D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5144:8b18c872d336 Date: 2014-01-28 17:58 +0100 http://bitbucket.org/pypy/extradoc/changeset/8b18c872d336/ Log: Tweaks diff --git a/talk/fosdem2014/pypy-stm.pdf b/talk/fosdem2014/pypy-stm.pdf index 68969cdc285bd7404a257fee458f30e30793003e..78ca0e1e4a6f24dd1d5ebb89e45fe4c8f6bf95e8 GIT binary patch [cut] diff --git a/talk/fosdem2014/pypy-stm.rst b/talk/fosdem2014/pypy-stm.rst --- a/talk/fosdem2014/pypy-stm.rst +++ b/talk/fosdem2014/pypy-stm.rst @@ -142,7 +142,7 @@ * If the Twisted reactor (say) was modified to start a pool of threads, and to run all events in "``with atomic:``" -* ...Then the end result is the same, for any Twisted program +* ...Then the end result is the same, for any Twisted application Behind-the-scene threads @@ -152,7 +152,7 @@ Python run on several cores * The "``with atomic:``" means that the semantics of the Twisted - program didn't change + application didn't change Summary (optimistic) @@ -160,7 +160,7 @@ * If you are using Twisted... -* Just wait and your program will run on multiple cores ``:-)`` +* ...Your program will run on multiple cores ``:-)`` Conflicts @@ -222,7 +222,7 @@ Scope ===== -* Twisted / Eventlet / Stackless / etc.: event-driven programming +* Twisted / Tornado / Eventlet / Stackless / etc.: event-driven programming * Any program computing something complicated, e.g. over all items in a dictionary, occasionally updating a shared state, etc. From noreply at buildbot.pypy.org Tue Jan 28 18:01:18 2014 From: noreply at buildbot.pypy.org (rguillebert) Date: Tue, 28 Jan 2014 18:01:18 +0100 (CET) Subject: [pypy-commit] pypy default: Add stub for ufunc.outer Message-ID: <20140128170118.D8E581C015D@cobra.cs.uni-duesseldorf.de> Author: Romain Guillebert Branch: Changeset: r68977:d7c223f70bc3 Date: 2014-01-27 21:49 +0100 http://bitbucket.org/pypy/pypy/changeset/d7c223f70bc3/ Log: Add stub for ufunc.outer diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -254,6 +254,13 @@ return out return res + def descr_outer(self, space, __args__): + return self._outer(space, __args__) + + def _outer(self, space, __args__): + raise OperationError(space.w_TypeError, + space.wrap("outer product only supported for binary functions")) + class W_Ufunc1(W_Ufunc): _immutable_fields_ = ["func", "bool_result"] argcount = 1 @@ -432,6 +439,7 @@ nin = interp_attrproperty("argcount", cls=W_Ufunc), reduce = interp2app(W_Ufunc.descr_reduce), + outer = interp2app(W_Ufunc.descr_outer), ) diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -1052,3 +1052,8 @@ np.array([0, -1, -3, -6, -10])).all() assert (np.divide.accumulate(todivide) == np.array([2., 4., 16.])).all() + + def test_outer(self): + import numpy as np + from numpypy import absolute + assert raises(TypeError, np.absolute.outer, [-1, -2]) From noreply at buildbot.pypy.org Tue Jan 28 19:54:47 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 28 Jan 2014 19:54:47 +0100 (CET) Subject: [pypy-commit] pypy default: fix outer exception to match numpy Message-ID: <20140128185447.477901C0C34@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r68978:3528431821e9 Date: 2014-01-28 13:54 -0500 http://bitbucket.org/pypy/pypy/changeset/3528431821e9/ Log: fix outer exception to match numpy diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -258,7 +258,7 @@ return self._outer(space, __args__) def _outer(self, space, __args__): - raise OperationError(space.w_TypeError, + raise OperationError(space.w_ValueError, space.wrap("outer product only supported for binary functions")) class W_Ufunc1(W_Ufunc): diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -1056,4 +1056,5 @@ def test_outer(self): import numpy as np from numpypy import absolute - assert raises(TypeError, np.absolute.outer, [-1, -2]) + exc = raises(ValueError, np.absolute.outer, [-1, -2]) + assert exc.value[0] == 'outer product only supported for binary functions' From noreply at buildbot.pypy.org Tue Jan 28 22:14:02 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 28 Jan 2014 22:14:02 +0100 (CET) Subject: [pypy-commit] pypy default: fix get_w_value() failing when called from setup, which can happen on py3k Message-ID: <20140128211402.C18241D23CF@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r68979:e50d58dfb04c Date: 2014-01-28 12:38 -0800 http://bitbucket.org/pypy/pypy/changeset/e50d58dfb04c/ Log: fix get_w_value() failing when called from setup, which can happen on py3k diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -374,8 +374,8 @@ class OpErrFmtNoArgs(OperationError): def __init__(self, w_type, value): + self._value = value self.setup(w_type) - self._value = value def get_w_value(self, space): w_value = self._w_value From noreply at buildbot.pypy.org Tue Jan 28 22:14:04 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 28 Jan 2014 22:14:04 +0100 (CET) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140128211404.6E82A1D23CF@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68980:b900559195c4 Date: 2014-01-27 16:02 -0800 http://bitbucket.org/pypy/pypy/changeset/b900559195c4/ Log: merge default diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -426,25 +426,12 @@ Could we use LLVM? ------------------ -In theory yes. But we tried to use it 5 or 6 times already, as a -translation backend or as a JIT backend --- and failed each time. +There is a (static) translation backend using LLVM in the branch +``llvm-translation-backend``. It can translate PyPy with or without the JIT on +Linux. -In more details: using LLVM as a (static) translation backend is -pointless nowadays because you can generate C code and compile it with -clang. (Note that compiling PyPy with clang gives a result that is not -faster than compiling it with gcc.) We might in theory get extra -benefits from LLVM's GC integration, but this requires more work on the -LLVM side before it would be remotely useful. Anyway, it could be -interfaced via a custom primitive in the C code. - -On the other hand, using LLVM as our JIT backend looks interesting as -well --- but again we made an attempt, and it failed: LLVM has no way to -patch the generated machine code. - -So the position of the core PyPy developers is that if anyone wants to -make an N+1'th attempt with LLVM, they are welcome, and will be happy to -provide help in the IRC channel, but they are left with the burden of proof -that (a) it works and (b) it gives important benefits. +Using LLVM as our JIT backend looks interesting as well -- we made an attempt, +but it failed: LLVM has no way to patch the generated machine code. ---------------------- How do I compile PyPy? diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -6,7 +6,7 @@ from errno import EINTR from rpython.rlib import jit -from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib.objectmodel import we_are_translated, specialize from pypy.interpreter import debug @@ -74,11 +74,11 @@ space.setattr(w_value, space.wrap('__context__'), w_last_value) def clear(self, space): - self.w_type = space.w_None - self._w_value = space.w_None - self._application_traceback = None - if not we_are_translated(): - del self.debug_excs[:] + # XXX remove this method. The point is that we cannot always + # hack at 'self' to clear w_type and _w_value, because in some + # corner cases the OperationError will be used again: see + # test_interpreter.py:test_with_statement_and_sys_clear. + pass def match(self, space, w_check_class): "Check if this application-level exception matches 'w_check_class'." @@ -348,6 +348,10 @@ """ self._application_traceback = traceback + at specialize.memo() +def get_cleared_operation_error(space): + return OperationError(space.w_None, space.w_None) + # ____________________________________________________________ # optimization only: avoid the slowest operation -- the string # formatting with '%' -- in the common case were we don't diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -1,5 +1,5 @@ import sys -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, get_cleared_operation_error from rpython.rlib.unroll import unrolling_iterable from rpython.rlib import jit @@ -217,6 +217,17 @@ if frame: # else, the exception goes nowhere and is lost frame.last_exception = operror + def clear_sys_exc_info(self): + # Find the frame out of which sys_exc_info() would return its result, + # and hack this frame's last_exception to become the cleared + # OperationError (which is different from None!). + frame = self.gettopframe_nohidden() + while frame: + if frame.last_exception is not None: + frame.last_exception = get_cleared_operation_error(self.space) + break + frame = self.getnextframe_nohidden(frame) + @jit.dont_look_inside def settrace(self, w_func): """Set the global trace function.""" diff --git a/pypy/module/cpyext/include/pyconfig.h b/pypy/module/cpyext/include/pyconfig.h --- a/pypy/module/cpyext/include/pyconfig.h +++ b/pypy/module/cpyext/include/pyconfig.h @@ -15,6 +15,8 @@ #define HAVE_UNICODE #define WITHOUT_COMPLEX #define HAVE_WCHAR_H 1 +#define HAVE_SYS_TYPES_H 1 +#define HAVE_SYS_STAT_H 1 /* PyPy supposes Py_UNICODE == wchar_t */ #define HAVE_USABLE_WCHAR_T 1 diff --git a/pypy/module/cpyext/include/pyport.h b/pypy/module/cpyext/include/pyport.h --- a/pypy/module/cpyext/include/pyport.h +++ b/pypy/module/cpyext/include/pyport.h @@ -81,4 +81,45 @@ #endif #endif +/******************************* + * stat() and fstat() fiddling * + *******************************/ + +/* We expect that stat and fstat exist on most systems. + * It's confirmed on Unix, Mac and Windows. + * If you don't have them, add + * #define DONT_HAVE_STAT + * and/or + * #define DONT_HAVE_FSTAT + * to your pyconfig.h. Python code beyond this should check HAVE_STAT and + * HAVE_FSTAT instead. + * Also + * #define HAVE_SYS_STAT_H + * if exists on your platform, and + * #define HAVE_STAT_H + * if does. + */ +#ifndef DONT_HAVE_STAT +#define HAVE_STAT +#endif + +#ifndef DONT_HAVE_FSTAT +#define HAVE_FSTAT +#endif + +#ifdef RISCOS +#include +#include "unixstuff.h" +#endif + +#ifdef HAVE_SYS_STAT_H +#if defined(PYOS_OS2) && defined(PYCC_GCC) +#include +#endif +#include +#elif defined(HAVE_STAT_H) +#include +#else +#endif + #endif /* Py_PYPORT_H */ diff --git a/pypy/module/sys/vm.py b/pypy/module/sys/vm.py --- a/pypy/module/sys/vm.py +++ b/pypy/module/sys/vm.py @@ -152,9 +152,7 @@ to exc_info() will return (None,None,None) until another exception is raised and caught in the current thread or the execution stack returns to a frame where another exception is being handled.""" - operror = space.getexecutioncontext().sys_exc_info() - if operror is not None: - operror.clear(space) + space.getexecutioncontext().clear_sys_exc_info() def settrace(space, w_func): """Set the global debug tracing function. It will be called on each diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -10,10 +10,11 @@ SomeDict, SomeOrderedDict, SomeUnicodeCodePoint, SomeUnicodeString, SomeTuple, SomeImpossibleValue, s_ImpossibleValue, SomeInstance, SomeBuiltin, SomeIterator, SomePBC, SomeFloat, s_None, SomeByteArray, - SomeWeakRef, SomeAddress, SomeTypedAddressAccess, SomeSingleFloat, + SomeWeakRef, SomeSingleFloat, SomeLongFloat, SomeType, SomeConstantType, unionof, UnionError, read_can_only_throw, add_knowntypedata, merge_knowntypedata,) +from rpython.rtyper.llannotation import SomeAddress, SomeTypedAddressAccess from rpython.annotator.bookkeeper import getbookkeeper from rpython.flowspace.model import Variable, Constant from rpython.flowspace.operation import op diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -9,10 +9,11 @@ from rpython.flowspace.model import Constant from rpython.annotator.model import (SomeOrderedDict, SomeString, SomeChar, SomeFloat, SomePtr, unionof, SomeInstance, SomeDict, - SomeBuiltin, SomePBC, SomeInteger, TLS, SomeAddress, SomeUnicodeCodePoint, + SomeBuiltin, SomePBC, SomeInteger, TLS, SomeUnicodeCodePoint, s_None, s_ImpossibleValue, SomeLLADTMeth, SomeBool, SomeTuple, SomeImpossibleValue, SomeUnicodeString, SomeList, HarmlesslyBlocked, SomeWeakRef, lltype_to_annotation, SomeType, SomeByteArray, SomeConstantType) +from rpython.rtyper.llannotation import SomeAddress from rpython.annotator.classdef import InstanceSource, ClassDef from rpython.annotator.listdef import ListDef, ListItem from rpython.annotator.dictdef import DictDef diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -5,11 +5,12 @@ from rpython.annotator.model import ( SomeInteger, SomeObject, SomeChar, SomeBool, SomeString, SomeTuple, s_Bool, - SomeUnicodeCodePoint, SomeAddress, SomeFloat, unionof, SomeUnicodeString, + SomeUnicodeCodePoint, SomeFloat, unionof, SomeUnicodeString, SomePBC, SomeInstance, SomeDict, SomeList, SomeWeakRef, SomeIterator, SomeOrderedDict, SomeByteArray, annotation_to_lltype, lltype_to_annotation, ll_to_annotation, add_knowntypedata, s_ImpossibleValue,) +from rpython.rtyper.llannotation import SomeAddress from rpython.annotator.bookkeeper import getbookkeeper from rpython.annotator import description from rpython.flowspace.model import Constant @@ -44,6 +45,14 @@ func, args, realresult, s_result)) return s_realresult +BUILTIN_ANALYZERS = {} + +def analyzer_for(func): + def wrapped(ann_func): + BUILTIN_ANALYZERS[func] = ann_func + return func + return wrapped + # ____________________________________________________________ def builtin_range(*args): @@ -250,30 +259,46 @@ s = SomeInteger(nonneg=True, knowntype=s.knowntype) return s +# collect all functions +import __builtin__ +for name, value in globals().items(): + if name.startswith('builtin_'): + original = getattr(__builtin__, name[8:]) + BUILTIN_ANALYZERS[original] = value + + at analyzer_for(getattr(OSError.__init__, 'im_func', OSError.__init__)) def OSError_init(s_self, *args): pass -def WindowsError_init(s_self, *args): +try: + WindowsError +except NameError: pass +else: + @analyzer_for(getattr(WindowsError.__init__, 'im_func', WindowsError.__init__)) + def WindowsError_init(s_self, *args): + pass -def termios_error_init(s_self, *args): - pass - + at analyzer_for(getattr(object.__init__, 'im_func', object.__init__)) def object_init(s_self, *args): # ignore - mostly used for abstract classes initialization pass + at analyzer_for(sys.getdefaultencoding) def conf(): return SomeString() + at analyzer_for(rpython.rlib.rarithmetic.intmask) def rarith_intmask(s_obj): return SomeInteger() + at analyzer_for(rpython.rlib.rarithmetic.longlongmask) def rarith_longlongmask(s_obj): return SomeInteger(knowntype=rpython.rlib.rarithmetic.r_longlong) + at analyzer_for(rpython.rlib.objectmodel.instantiate) def robjmodel_instantiate(s_clspbc): assert isinstance(s_clspbc, SomePBC) clsdef = None @@ -288,6 +313,7 @@ clsdef = clsdef.commonbase(cdef) return SomeInstance(clsdef) + at analyzer_for(rpython.rlib.objectmodel.r_dict) def robjmodel_r_dict(s_eqfn, s_hashfn, s_force_non_null=None): if s_force_non_null is None: force_non_null = False @@ -299,11 +325,13 @@ dictdef.dictkey.update_rdict_annotations(s_eqfn, s_hashfn) return SomeDict(dictdef) + at analyzer_for(rpython.rlib.objectmodel.r_ordereddict) def robjmodel_r_ordereddict(s_eqfn, s_hashfn): dictdef = getbookkeeper().getdictdef(is_r_dict=True) dictdef.dictkey.update_rdict_annotations(s_eqfn, s_hashfn) return SomeOrderedDict(dictdef) + at analyzer_for(rpython.rlib.objectmodel.hlinvoke) def robjmodel_hlinvoke(s_repr, s_llcallable, *args_s): from rpython.rtyper import rmodel from rpython.rtyper.error import TyperError @@ -323,81 +351,49 @@ return lltype_to_annotation(rresult.lowleveltype) + at analyzer_for(rpython.rlib.objectmodel.keepalive_until_here) def robjmodel_keepalive_until_here(*args_s): return immutablevalue(None) + at analyzer_for(rpython.rtyper.lltypesystem.llmemory.cast_ptr_to_adr) def llmemory_cast_ptr_to_adr(s): from rpython.annotator.model import SomeInteriorPtr assert not isinstance(s, SomeInteriorPtr) return SomeAddress() + at analyzer_for(rpython.rtyper.lltypesystem.llmemory.cast_adr_to_ptr) def llmemory_cast_adr_to_ptr(s, s_type): assert s_type.is_constant() return SomePtr(s_type.const) + at analyzer_for(rpython.rtyper.lltypesystem.llmemory.cast_adr_to_int) def llmemory_cast_adr_to_int(s, s_mode=None): return SomeInteger() # xxx + at analyzer_for(rpython.rtyper.lltypesystem.llmemory.cast_int_to_adr) def llmemory_cast_int_to_adr(s): return SomeAddress() -def unicodedata_decimal(s_uchr): - raise TypeError("unicodedate.decimal() calls should not happen at interp-level") - -def test(*args): - return s_Bool - -# collect all functions -import __builtin__ -BUILTIN_ANALYZERS = {} -for name, value in globals().items(): - if name.startswith('builtin_'): - original = getattr(__builtin__, name[8:]) - BUILTIN_ANALYZERS[original] = value - -BUILTIN_ANALYZERS[rpython.rlib.rarithmetic.intmask] = rarith_intmask -BUILTIN_ANALYZERS[rpython.rlib.rarithmetic.longlongmask] = rarith_longlongmask -BUILTIN_ANALYZERS[rpython.rlib.objectmodel.instantiate] = robjmodel_instantiate -BUILTIN_ANALYZERS[rpython.rlib.objectmodel.r_dict] = robjmodel_r_dict -BUILTIN_ANALYZERS[rpython.rlib.objectmodel.r_ordereddict] = robjmodel_r_ordereddict -BUILTIN_ANALYZERS[SomeOrderedDict.knowntype] = lambda : SomeOrderedDict(getbookkeeper().getdictdef()) -BUILTIN_ANALYZERS[rpython.rlib.objectmodel.hlinvoke] = robjmodel_hlinvoke -BUILTIN_ANALYZERS[rpython.rlib.objectmodel.keepalive_until_here] = robjmodel_keepalive_until_here -BUILTIN_ANALYZERS[rpython.rtyper.lltypesystem.llmemory.cast_ptr_to_adr] = llmemory_cast_ptr_to_adr -BUILTIN_ANALYZERS[rpython.rtyper.lltypesystem.llmemory.cast_adr_to_ptr] = llmemory_cast_adr_to_ptr -BUILTIN_ANALYZERS[rpython.rtyper.lltypesystem.llmemory.cast_adr_to_int] = llmemory_cast_adr_to_int -BUILTIN_ANALYZERS[rpython.rtyper.lltypesystem.llmemory.cast_int_to_adr] = llmemory_cast_int_to_adr - -BUILTIN_ANALYZERS[getattr(OSError.__init__, 'im_func', OSError.__init__)] = ( - OSError_init) - -try: - WindowsError -except NameError: - pass -else: - BUILTIN_ANALYZERS[getattr(WindowsError.__init__, 'im_func', - WindowsError.__init__)] = ( - WindowsError_init) - -BUILTIN_ANALYZERS[sys.getdefaultencoding] = conf try: import unicodedata except ImportError: pass else: - BUILTIN_ANALYZERS[unicodedata.decimal] = unicodedata_decimal # xxx + @analyzer_for(unicodedata.decimal) + def unicodedata_decimal(s_uchr): + raise TypeError("unicodedate.decimal() calls should not happen at interp-level") -# object - just ignore object.__init__ -if hasattr(object.__init__, 'im_func'): - BUILTIN_ANALYZERS[object.__init__.im_func] = object_init -else: - BUILTIN_ANALYZERS[object.__init__] = object_init + at analyzer_for(SomeOrderedDict.knowntype) +def analyze(): + return SomeOrderedDict(getbookkeeper().getdictdef()) + + # annotation of low-level types from rpython.annotator.model import SomePtr from rpython.rtyper.lltypesystem import lltype + at analyzer_for(lltype.malloc) def malloc(s_T, s_n=None, s_flavor=None, s_zero=None, s_track_allocation=None, s_add_memory_pressure=None): assert (s_n is None or s_n.knowntype == int @@ -422,6 +418,7 @@ r = SomePtr(lltype.Ptr(s_T.const)) return r + at analyzer_for(lltype.free) def free(s_p, s_flavor, s_track_allocation=None): assert s_flavor.is_constant() assert s_track_allocation is None or s_track_allocation.is_constant() @@ -431,34 +428,41 @@ #p = lltype.malloc(T, flavor=s_flavor.const) #lltype.free(p, flavor=s_flavor.const) + at analyzer_for(lltype.render_immortal) def render_immortal(s_p, s_track_allocation=None): assert s_track_allocation is None or s_track_allocation.is_constant() + at analyzer_for(lltype.typeOf) def typeOf(s_val): lltype = annotation_to_lltype(s_val, info="in typeOf(): ") return immutablevalue(lltype) + at analyzer_for(lltype.cast_primitive) def cast_primitive(T, s_v): assert T.is_constant() return ll_to_annotation(lltype.cast_primitive(T.const, annotation_to_lltype(s_v)._defl())) + at analyzer_for(lltype.nullptr) def nullptr(T): assert T.is_constant() p = lltype.nullptr(T.const) return immutablevalue(p) + at analyzer_for(lltype.cast_pointer) def cast_pointer(PtrT, s_p): assert isinstance(s_p, SomePtr), "casting of non-pointer: %r" % s_p assert PtrT.is_constant() cast_p = lltype.cast_pointer(PtrT.const, s_p.ll_ptrtype._defl()) return SomePtr(ll_ptrtype=lltype.typeOf(cast_p)) + at analyzer_for(lltype.cast_opaque_ptr) def cast_opaque_ptr(PtrT, s_p): assert isinstance(s_p, SomePtr), "casting of non-pointer: %r" % s_p assert PtrT.is_constant() cast_p = lltype.cast_opaque_ptr(PtrT.const, s_p.ll_ptrtype._defl()) return SomePtr(ll_ptrtype=lltype.typeOf(cast_p)) + at analyzer_for(lltype.direct_fieldptr) def direct_fieldptr(s_p, s_fieldname): assert isinstance(s_p, SomePtr), "direct_* of non-pointer: %r" % s_p assert s_fieldname.is_constant() @@ -466,62 +470,54 @@ s_fieldname.const) return SomePtr(ll_ptrtype=lltype.typeOf(cast_p)) + at analyzer_for(lltype.direct_arrayitems) def direct_arrayitems(s_p): assert isinstance(s_p, SomePtr), "direct_* of non-pointer: %r" % s_p cast_p = lltype.direct_arrayitems(s_p.ll_ptrtype._example()) return SomePtr(ll_ptrtype=lltype.typeOf(cast_p)) + at analyzer_for(lltype.direct_ptradd) def direct_ptradd(s_p, s_n): assert isinstance(s_p, SomePtr), "direct_* of non-pointer: %r" % s_p # don't bother with an example here: the resulting pointer is the same return s_p + at analyzer_for(lltype.cast_ptr_to_int) def cast_ptr_to_int(s_ptr): # xxx return SomeInteger() + at analyzer_for(lltype.cast_int_to_ptr) def cast_int_to_ptr(PtrT, s_int): assert PtrT.is_constant() return SomePtr(ll_ptrtype=PtrT.const) + at analyzer_for(lltype.identityhash) def identityhash(s_obj): assert isinstance(s_obj, SomePtr) return SomeInteger() + at analyzer_for(lltype.getRuntimeTypeInfo) def getRuntimeTypeInfo(T): assert T.is_constant() return immutablevalue(lltype.getRuntimeTypeInfo(T.const)) + at analyzer_for(lltype.runtime_type_info) def runtime_type_info(s_p): assert isinstance(s_p, SomePtr), "runtime_type_info of non-pointer: %r" % s_p return SomePtr(lltype.typeOf(lltype.runtime_type_info(s_p.ll_ptrtype._example()))) + at analyzer_for(lltype.Ptr) def constPtr(T): assert T.is_constant() return immutablevalue(lltype.Ptr(T.const)) -BUILTIN_ANALYZERS[lltype.malloc] = malloc -BUILTIN_ANALYZERS[lltype.free] = free -BUILTIN_ANALYZERS[lltype.render_immortal] = render_immortal -BUILTIN_ANALYZERS[lltype.typeOf] = typeOf -BUILTIN_ANALYZERS[lltype.cast_primitive] = cast_primitive -BUILTIN_ANALYZERS[lltype.nullptr] = nullptr -BUILTIN_ANALYZERS[lltype.cast_pointer] = cast_pointer -BUILTIN_ANALYZERS[lltype.cast_opaque_ptr] = cast_opaque_ptr -BUILTIN_ANALYZERS[lltype.direct_fieldptr] = direct_fieldptr -BUILTIN_ANALYZERS[lltype.direct_arrayitems] = direct_arrayitems -BUILTIN_ANALYZERS[lltype.direct_ptradd] = direct_ptradd -BUILTIN_ANALYZERS[lltype.cast_ptr_to_int] = cast_ptr_to_int -BUILTIN_ANALYZERS[lltype.cast_int_to_ptr] = cast_int_to_ptr -BUILTIN_ANALYZERS[lltype.identityhash] = identityhash -BUILTIN_ANALYZERS[lltype.getRuntimeTypeInfo] = getRuntimeTypeInfo -BUILTIN_ANALYZERS[lltype.runtime_type_info] = runtime_type_info -BUILTIN_ANALYZERS[lltype.Ptr] = constPtr #________________________________ # weakrefs import weakref + at analyzer_for(weakref.ref) def weakref_ref(s_obj): if not isinstance(s_obj, SomeInstance): raise Exception("cannot take a weakref to %r" % (s_obj,)) @@ -530,8 +526,10 @@ "a weakref to cannot be None") return SomeWeakRef(s_obj.classdef) -BUILTIN_ANALYZERS[weakref.ref] = weakref_ref +from rpython.rtyper.lltypesystem import llmemory + + at analyzer_for(llmemory.weakref_create) def llweakref_create(s_obj): if (not isinstance(s_obj, SomePtr) or s_obj.ll_ptrtype.TO._gckind != 'gc'): @@ -539,6 +537,7 @@ s_obj,)) return SomePtr(llmemory.WeakRefPtr) + at analyzer_for(llmemory.weakref_deref ) def llweakref_deref(s_ptrtype, s_wref): if not (s_ptrtype.is_constant() and isinstance(s_ptrtype.const, lltype.Ptr) and @@ -551,10 +550,12 @@ "got %s" % (s_wref,)) return SomePtr(s_ptrtype.const) + at analyzer_for(llmemory.cast_ptr_to_weakrefptr) def llcast_ptr_to_weakrefptr(s_ptr): assert isinstance(s_ptr, SomePtr) return SomePtr(llmemory.WeakRefPtr) + at analyzer_for(llmemory.cast_weakrefptr_to_ptr) def llcast_weakrefptr_to_ptr(s_ptrtype, s_wref): if not (s_ptrtype.is_constant() and isinstance(s_ptrtype.const, lltype.Ptr)): @@ -566,56 +567,47 @@ "got %s" % (s_wref,)) return SomePtr(s_ptrtype.const) -from rpython.rtyper.lltypesystem import llmemory -BUILTIN_ANALYZERS[llmemory.weakref_create] = llweakref_create -BUILTIN_ANALYZERS[llmemory.weakref_deref ] = llweakref_deref -BUILTIN_ANALYZERS[llmemory.cast_ptr_to_weakrefptr] = llcast_ptr_to_weakrefptr -BUILTIN_ANALYZERS[llmemory.cast_weakrefptr_to_ptr] = llcast_weakrefptr_to_ptr - #________________________________ # non-gc objects + at analyzer_for(rpython.rlib.objectmodel.free_non_gc_object) def robjmodel_free_non_gc_object(obj): pass -BUILTIN_ANALYZERS[rpython.rlib.objectmodel.free_non_gc_object] = ( - robjmodel_free_non_gc_object) #_________________________________ # memory address + at analyzer_for(llmemory.raw_malloc) def raw_malloc(s_size): assert isinstance(s_size, SomeInteger) #XXX add noneg...? return SomeAddress() + at analyzer_for(llmemory.raw_malloc_usage) def raw_malloc_usage(s_size): assert isinstance(s_size, SomeInteger) #XXX add noneg...? return SomeInteger(nonneg=True) + at analyzer_for(llmemory.raw_free) def raw_free(s_addr): assert isinstance(s_addr, SomeAddress) + at analyzer_for(llmemory.raw_memclear) def raw_memclear(s_addr, s_int): assert isinstance(s_addr, SomeAddress) assert isinstance(s_int, SomeInteger) + at analyzer_for(llmemory.raw_memcopy) def raw_memcopy(s_addr1, s_addr2, s_int): assert isinstance(s_addr1, SomeAddress) assert isinstance(s_addr2, SomeAddress) assert isinstance(s_int, SomeInteger) #XXX add noneg...? -BUILTIN_ANALYZERS[llmemory.raw_malloc] = raw_malloc -BUILTIN_ANALYZERS[llmemory.raw_malloc_usage] = raw_malloc_usage -BUILTIN_ANALYZERS[llmemory.raw_free] = raw_free -BUILTIN_ANALYZERS[llmemory.raw_memclear] = raw_memclear -BUILTIN_ANALYZERS[llmemory.raw_memcopy] = raw_memcopy #_________________________________ # offsetof/sizeof + at analyzer_for(llmemory.offsetof) def offsetof(TYPE, fldname): return SomeInteger() - -BUILTIN_ANALYZERS[llmemory.offsetof] = offsetof - diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -460,15 +460,15 @@ def getKind(self): "Return the common Desc class of all descriptions in this PBC." - kinds = {} + kinds = set() for x in self.descriptions: assert type(x).__name__.endswith('Desc') # avoid import nightmares - kinds[x.__class__] = True - assert len(kinds) <= 1, ( - "mixing several kinds of PBCs: %r" % (kinds.keys(),)) + kinds.add(x.__class__) + if len(kinds) > 1: + raise AnnotatorError("mixing several kinds of PBCs: %r" % kinds) if not kinds: raise ValueError("no 'kind' on the 'None' PBC") - return kinds.keys()[0] + return kinds.pop() def simplify(self): if self.descriptions: @@ -568,33 +568,6 @@ # 'classdef' is None for known-to-be-dead weakrefs. self.classdef = classdef -# ____________________________________________________________ -# memory addresses - -from rpython.rtyper.lltypesystem import llmemory - - -class SomeAddress(SomeObject): - immutable = True - - def can_be_none(self): - return False - - def is_null_address(self): - return self.is_immutable_constant() and not self.const - - -# The following class is used to annotate the intermediate value that -# appears in expressions of the form: -# addr.signed[offset] and addr.signed[offset] = value - -class SomeTypedAddressAccess(SomeObject): - def __init__(self, type): - self.type = type - - def can_be_none(self): - return False - #____________________________________________________________ # annotation of low-level types @@ -630,6 +603,8 @@ return False +from rpython.rtyper.llannotation import SomeAddress +from rpython.rtyper.lltypesystem import llmemory annotation_to_ll_map = [ (SomeSingleFloat(), lltype.SingleFloat), diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -9,9 +9,10 @@ from rpython.annotator.model import (SomeObject, SomeInteger, SomeBool, SomeString, SomeChar, SomeList, SomeDict, SomeTuple, SomeImpossibleValue, SomeUnicodeCodePoint, SomeInstance, SomeBuiltin, SomeFloat, SomeIterator, - SomePBC, SomeTypedAddressAccess, SomeAddress, SomeType, s_ImpossibleValue, + SomePBC, SomeType, s_ImpossibleValue, s_Bool, s_None, unionof, add_knowntypedata, HarmlesslyBlocked, SomeWeakRef, SomeUnicodeString, SomeByteArray) +from rpython.rtyper.llannotation import SomeAddress, SomeTypedAddressAccess from rpython.annotator.bookkeeper import getbookkeeper from rpython.annotator import builtin from rpython.annotator.binaryop import _clone ## XXX where to put this? diff --git a/rpython/memory/gctransform/asmgcroot.py b/rpython/memory/gctransform/asmgcroot.py --- a/rpython/memory/gctransform/asmgcroot.py +++ b/rpython/memory/gctransform/asmgcroot.py @@ -7,6 +7,7 @@ from rpython.rtyper.lltypesystem.lloperation import llop from rpython.memory.gctransform.framework import ( BaseFrameworkGCTransformer, BaseRootWalker) +from rpython.rtyper.llannotation import SomeAddress from rpython.rtyper.rbuiltin import gen_cast from rpython.translator.unsimplify import copyvar, varoftype from rpython.translator.tool.cbuild import ExternalCompilationInfo @@ -215,7 +216,7 @@ # update the global stack counter rffi.stackcounter.stacks_counter += 1 # - s_addr = annmodel.SomeAddress() + s_addr = SomeAddress() s_None = annmodel.s_None self.gc_detach_callback_pieces_ptr = getfn(gc_detach_callback_pieces, [], s_addr) @@ -327,10 +328,10 @@ inline=True) self.thread_die_ptr = getfn(thread_die, [], annmodel.s_None) self.thread_before_fork_ptr = getfn(thread_before_fork, [], - annmodel.SomeAddress()) + SomeAddress()) self.thread_after_fork_ptr = getfn(thread_after_fork, [annmodel.SomeInteger(), - annmodel.SomeAddress()], + SomeAddress()], annmodel.s_None) # # check that the order of the need_*() is correct for us: if we @@ -496,7 +497,7 @@ # location -- but we check for consistency that ebp points # to a JITFRAME object. from rpython.jit.backend.llsupport.jitframe import STACK_DEPTH_OFS - + tid = self.gc.get_possibly_forwarded_type_id(ebp_in_caller) ll_assert(rffi.cast(lltype.Signed, tid) == rffi.cast(lltype.Signed, self.frame_tid), diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -1,4 +1,5 @@ from rpython.annotator import model as annmodel +from rpython.rtyper.llannotation import SomeAddress from rpython.rlib import rgc from rpython.rtyper import rmodel, annlowlevel from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, llgroup @@ -195,21 +196,11 @@ # the point of this little dance is to not annotate # self.gcdata.static_root_xyz as constants. XXX is it still needed?? data_classdef = bk.getuniqueclassdef(gctypelayout.GCData) - data_classdef.generalize_attr( - 'static_root_start', - annmodel.SomeAddress()) - data_classdef.generalize_attr( - 'static_root_nongcend', - annmodel.SomeAddress()) - data_classdef.generalize_attr( - 'static_root_end', - annmodel.SomeAddress()) - data_classdef.generalize_attr( - 'max_type_id', - annmodel.SomeInteger()) - data_classdef.generalize_attr( - 'typeids_z', - annmodel.SomeAddress()) + data_classdef.generalize_attr('static_root_start', SomeAddress()) + data_classdef.generalize_attr('static_root_nongcend', SomeAddress()) + data_classdef.generalize_attr('static_root_end', SomeAddress()) + data_classdef.generalize_attr('max_type_id', annmodel.SomeInteger()) + data_classdef.generalize_attr('typeids_z', SomeAddress()) annhelper = annlowlevel.MixLevelHelperAnnotator(self.translator.rtyper) @@ -310,13 +301,13 @@ self.collect_ptr = getfn(GCClass.collect.im_func, [s_gc, annmodel.SomeInteger()], annmodel.s_None) self.can_move_ptr = getfn(GCClass.can_move.im_func, - [s_gc, annmodel.SomeAddress()], + [s_gc, SomeAddress()], annmodel.SomeBool()) if hasattr(GCClass, 'shrink_array'): self.shrink_array_ptr = getfn( GCClass.shrink_array.im_func, - [s_gc, annmodel.SomeAddress(), + [s_gc, SomeAddress(), annmodel.SomeInteger(nonneg=True)], annmodel.s_Bool) else: self.shrink_array_ptr = None @@ -333,7 +324,7 @@ if hasattr(GCClass, 'writebarrier_before_copy'): self.wb_before_copy_ptr = \ getfn(GCClass.writebarrier_before_copy.im_func, - [s_gc] + [annmodel.SomeAddress()] * 2 + + [s_gc] + [SomeAddress()] * 2 + [annmodel.SomeInteger()] * 3, annmodel.SomeBool()) elif GCClass.needs_write_barrier: raise NotImplementedError("GC needs write barrier, but does not provide writebarrier_before_copy functionality") @@ -421,7 +412,7 @@ if getattr(GCClass, 'obtain_free_space', False): self.obtainfreespace_ptr = getfn(GCClass.obtain_free_space.im_func, [s_gc, annmodel.SomeInteger()], - annmodel.SomeAddress()) + SomeAddress()) if GCClass.moving_gc: self.id_ptr = getfn(GCClass.id.im_func, @@ -470,8 +461,7 @@ self.write_barrier_from_array_ptr = None if GCClass.needs_write_barrier: self.write_barrier_ptr = getfn(GCClass.write_barrier.im_func, - [s_gc, - annmodel.SomeAddress()], + [s_gc, SomeAddress()], annmodel.s_None, inline=True) func = getattr(gcdata.gc, 'remember_young_pointer', None) @@ -479,13 +469,12 @@ # func should not be a bound method, but a real function assert isinstance(func, types.FunctionType) self.write_barrier_failing_case_ptr = getfn(func, - [annmodel.SomeAddress()], + [SomeAddress()], annmodel.s_None) func = getattr(GCClass, 'write_barrier_from_array', None) if func is not None: self.write_barrier_from_array_ptr = getfn(func.im_func, - [s_gc, - annmodel.SomeAddress(), + [s_gc, SomeAddress(), annmodel.SomeInteger()], annmodel.s_None, inline=True) @@ -497,7 +486,7 @@ assert isinstance(func, types.FunctionType) self.write_barrier_from_array_failing_case_ptr = \ getfn(func, - [annmodel.SomeAddress()], + [SomeAddress()], annmodel.s_None) diff --git a/rpython/memory/gctransform/shadowstack.py b/rpython/memory/gctransform/shadowstack.py --- a/rpython/memory/gctransform/shadowstack.py +++ b/rpython/memory/gctransform/shadowstack.py @@ -5,6 +5,7 @@ from rpython.rtyper import rmodel from rpython.rtyper.annlowlevel import llhelper from rpython.rtyper.lltypesystem import lltype, llmemory +from rpython.rtyper.llannotation import SomeAddress from rpython.memory.gctransform.framework import ( BaseFrameworkGCTransformer, BaseRootWalker, sizeofaddr) from rpython.rtyper.rbuiltin import gen_cast @@ -14,11 +15,11 @@ def annotate_walker_functions(self, getfn): self.incr_stack_ptr = getfn(self.root_walker.incr_stack, [annmodel.SomeInteger()], - annmodel.SomeAddress(), + SomeAddress(), inline = True) self.decr_stack_ptr = getfn(self.root_walker.decr_stack, [annmodel.SomeInteger()], - annmodel.SomeAddress(), + SomeAddress(), inline = True) def build_root_walker(self): @@ -211,7 +212,7 @@ # no thread_before_fork_ptr here self.thread_after_fork_ptr = getfn(thread_after_fork, [annmodel.SomeInteger(), - annmodel.SomeAddress()], + SomeAddress()], annmodel.s_None, minimal_transform=False) @@ -242,7 +243,7 @@ shadow_stack_pool.start_fresh_new_state() s_gcref = annmodel.SomePtr(llmemory.GCREF) - s_addr = annmodel.SomeAddress() + s_addr = SomeAddress() self.gc_shadowstackref_new_ptr = getfn(gc_shadowstackref_new, [], s_gcref, minimal_transform=False) diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py --- a/rpython/rlib/rgc.py +++ b/rpython/rlib/rgc.py @@ -114,8 +114,8 @@ def compute_result_annotation(self, s_TP, s_n=None, s_zero=None): # basically return the same as malloc - from rpython.annotator.builtin import malloc - return malloc(s_TP, s_n, s_zero=s_zero) + from rpython.annotator.builtin import BUILTIN_ANALYZERS + return BUILTIN_ANALYZERS[lltype.malloc](s_TP, s_n, s_zero=s_zero) def specialize_call(self, hop, i_zero=None): # XXX assume flavor and zero to be None by now @@ -266,7 +266,7 @@ func._dont_inline_ = True func._no_release_gil_ = True return func - + def no_collect(func): func._dont_inline_ = True func._gc_no_collect_ = True diff --git a/rpython/rtyper/llannotation.py b/rpython/rtyper/llannotation.py new file mode 100644 --- /dev/null +++ b/rpython/rtyper/llannotation.py @@ -0,0 +1,26 @@ +""" +Code for annotating low-level thingies. +""" +from rpython.annotator.model import SomeObject + +class SomeAddress(SomeObject): + immutable = True + + def can_be_none(self): + return False + + def is_null_address(self): + return self.is_immutable_constant() and not self.const + +class SomeTypedAddressAccess(SomeObject): + """This class is used to annotate the intermediate value that + appears in expressions of the form: + addr.signed[offset] and addr.signed[offset] = value + """ + + def __init__(self, type): + self.type = type + + def can_be_none(self): + return False + diff --git a/rpython/rtyper/lltypesystem/llarena.py b/rpython/rtyper/lltypesystem/llarena.py --- a/rpython/rtyper/lltypesystem/llarena.py +++ b/rpython/rtyper/lltypesystem/llarena.py @@ -405,7 +405,9 @@ import os, sys from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rtyper.extfunc import register_external -from rpython.rlib.objectmodel import CDefinedIntSymbolic +from rpython.rtyper.tool.rffi_platform import memory_alignment + +MEMORY_ALIGNMENT = memory_alignment() if sys.platform.startswith('linux'): # This only works with linux's madvise(), which is really not a memory @@ -597,11 +599,8 @@ llfakeimpl=arena_shrink_obj, sandboxsafe=True) -llimpl_round_up_for_allocation = rffi.llexternal('ROUND_UP_FOR_ALLOCATION', - [lltype.Signed, lltype.Signed], - lltype.Signed, - sandboxsafe=True, - _nowrapper=True) +def llimpl_round_up_for_allocation(size, minsize): + return (max(size, minsize) + (MEMORY_ALIGNMENT-1)) & ~(MEMORY_ALIGNMENT-1) register_external(_round_up_for_allocation, [int, int], int, 'll_arena.round_up_for_allocation', llimpl=llimpl_round_up_for_allocation, diff --git a/rpython/rtyper/lltypesystem/llmemory.py b/rpython/rtyper/lltypesystem/llmemory.py --- a/rpython/rtyper/lltypesystem/llmemory.py +++ b/rpython/rtyper/lltypesystem/llmemory.py @@ -905,11 +905,12 @@ _about_ = raw_memmove def compute_result_annotation(self, s_from, s_to, s_size): - from rpython.annotator.model import SomeAddress, SomeInteger + from rpython.annotator.model import SomeInteger + from rpython.rtyper.llannotation import SomeAddress assert isinstance(s_from, SomeAddress) assert isinstance(s_to, SomeAddress) assert isinstance(s_size, SomeInteger) - + def specialize_call(self, hop): hop.exception_cannot_occur() v_list = hop.inputargs(Address, Address, lltype.Signed) diff --git a/rpython/rtyper/raddress.py b/rpython/rtyper/raddress.py --- a/rpython/rtyper/raddress.py +++ b/rpython/rtyper/raddress.py @@ -1,5 +1,5 @@ # rtyping of memory address operations -from rpython.annotator import model as annmodel +from rpython.rtyper.llannotation import SomeAddress, SomeTypedAddressAccess from rpython.rlib.rarithmetic import r_uint from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.lltypesystem.llmemory import (NULL, Address, @@ -9,14 +9,14 @@ from rpython.tool.pairtype import pairtype -class __extend__(annmodel.SomeAddress): +class __extend__(SomeAddress): def rtyper_makerepr(self, rtyper): return address_repr def rtyper_makekey(self): return self.__class__, -class __extend__(annmodel.SomeTypedAddressAccess): +class __extend__(SomeTypedAddressAccess): def rtyper_makerepr(self, rtyper): return TypedAddressAccessRepr(self.type) diff --git a/rpython/rtyper/test/test_nongc.py b/rpython/rtyper/test/test_nongc.py --- a/rpython/rtyper/test/test_nongc.py +++ b/rpython/rtyper/test/test_nongc.py @@ -1,6 +1,7 @@ import py from rpython.annotator import model as annmodel +from rpython.rtyper.llannotation import SomeAddress from rpython.annotator.annrpython import RPythonAnnotator from rpython.rtyper.rtyper import RPythonTyper from rpython.rlib.objectmodel import free_non_gc_object @@ -25,7 +26,7 @@ assert t.method2() == 42 free_non_gc_object(t) py.test.raises(RuntimeError, "t.method1()") - py.test.raises(RuntimeError, "t.method2()") + py.test.raises(RuntimeError, "t.method2()") py.test.raises(RuntimeError, "t.a") py.test.raises(RuntimeError, "t.a = 1") py.test.raises(AssertionError, "free_non_gc_object(TestClass2())") @@ -43,8 +44,8 @@ rtyper = RPythonTyper(a) rtyper.specialize() assert (Adef, 'raw') in rtyper.instance_reprs - assert (Adef, 'gc') not in rtyper.instance_reprs - + assert (Adef, 'gc') not in rtyper.instance_reprs + def test_alloc_flavor_subclassing(): class A: _alloc_flavor_ = "raw" @@ -64,7 +65,7 @@ assert (Adef, 'raw') in rtyper.instance_reprs assert (Adef, 'gc') not in rtyper.instance_reprs assert (Bdef, 'raw') in rtyper.instance_reprs - assert (Bdef, 'gc') not in rtyper.instance_reprs + assert (Bdef, 'gc') not in rtyper.instance_reprs def test_unsupported(): class A: @@ -85,7 +86,7 @@ pass class C(B): pass - + def f(i): if i == 0: o = None @@ -226,7 +227,7 @@ return b a = RPythonAnnotator() #does not raise: - s = a.build_types(malloc_and_free, [annmodel.SomeAddress()]) - assert isinstance(s, annmodel.SomeAddress) + s = a.build_types(malloc_and_free, [SomeAddress()]) + assert isinstance(s, SomeAddress) rtyper = RPythonTyper(a) rtyper.specialize() diff --git a/rpython/translator/c/primitive.py b/rpython/translator/c/primitive.py --- a/rpython/translator/c/primitive.py +++ b/rpython/translator/c/primitive.py @@ -11,10 +11,12 @@ UnsignedLongLong, Float, SingleFloat, LongFloat, Char, UniChar, Bool, Void, FixedSizeArray, Ptr, cast_opaque_ptr, typeOf) from rpython.rtyper.lltypesystem.llarena import RoundedUpForAllocation +from rpython.rtyper.tool.rffi_platform import memory_alignment from rpython.translator.c.support import cdecl, barebonearray SUPPORT_INT128 = hasattr(rffi, '__INT128_T') +MEMORY_ALIGNMENT = memory_alignment() # ____________________________________________________________ # @@ -69,9 +71,12 @@ elif type(value) == GCHeaderOffset: return '0' elif type(value) == RoundedUpForAllocation: - return 'ROUND_UP_FOR_ALLOCATION(%s, %s)' % ( - name_signed(value.basesize, db), - name_signed(value.minsize, db)) + return ('(((%(x)s>=%(minsize)s?%(x)s:%(minsize)s) + %(align_m1)s)' + ' & ~%(align_m1)s)') % { + 'x': name_signed(value.basesize, db), + 'minsize': name_signed(value.minsize, db), + 'align_m1': MEMORY_ALIGNMENT-1 + } elif isinstance(value, CDefinedIntSymbolic): return str(value.expr) elif isinstance(value, ComputedIntSymbolic): diff --git a/rpython/translator/c/src/align.h b/rpython/translator/c/src/align.h deleted file mode 100644 --- a/rpython/translator/c/src/align.h +++ /dev/null @@ -1,21 +0,0 @@ - -#ifndef _PYPY_ALIGN_H -#define _PYPY_ALIGN_H - -/* alignment for arena-based garbage collectors: the following line - enforces an alignment that should be enough for any structure - containing pointers and 'double' fields. */ -struct rpy_memory_alignment_test1 { - double d; - void* p; -}; -struct rpy_memory_alignment_test2 { - char c; - struct rpy_memory_alignment_test1 s; -}; -#define MEMORY_ALIGNMENT offsetof(struct rpy_memory_alignment_test2, s) -#define ROUND_UP_FOR_ALLOCATION(x, minsize) \ - ((((x)>=(minsize)?(x):(minsize)) \ - + (MEMORY_ALIGNMENT-1)) & ~(MEMORY_ALIGNMENT-1)) - -#endif //_PYPY_ALIGN_H diff --git a/rpython/translator/c/src/g_prerequisite.h b/rpython/translator/c/src/g_prerequisite.h --- a/rpython/translator/c/src/g_prerequisite.h +++ b/rpython/translator/c/src/g_prerequisite.h @@ -23,6 +23,3 @@ # define RPY_LENGTH0 1 /* array decl [0] are bad */ # define RPY_DUMMY_VARLENGTH /* nothing */ #endif - - -#include "src/align.h" diff --git a/rpython/translator/c/test/test_lltyped.py b/rpython/translator/c/test/test_lltyped.py --- a/rpython/translator/c/test/test_lltyped.py +++ b/rpython/translator/c/test/test_lltyped.py @@ -646,10 +646,7 @@ def llf(): s = '' for i in range(5): - print i - print s s += a[i] - print s assert s == "85?!" + lastchar fn = self.getcompiled(llf, []) fn() @@ -731,7 +728,6 @@ s = '' for i in range(4): s += a[i] - print s return s == 'abcd' fn = self.getcompiled(llf, []) assert fn() diff --git a/rpython/translator/c/test/test_refcount.py b/rpython/translator/c/test/test_refcount.py --- a/rpython/translator/c/test/test_refcount.py +++ b/rpython/translator/c/test/test_refcount.py @@ -1,191 +1,189 @@ import py -import os +from rpython.rtyper.lltypesystem import lltype from rpython.translator.translator import TranslationContext -from rpython.translator.c import genc from rpython.translator.c.test.test_genc import compile -from rpython.rtyper.lltypesystem import lltype -from rpython.conftest import option -def compile_func(func, args): - return compile(func, args, gcpolicy='ref') -def test_something(): - def f(): - return 1 - fn = compile_func(f, []) - assert fn() == 1 +class TestRefcount(object): + def compile_func(self, func, args): + return compile(func, args, gcpolicy='ref') -def test_something_more(): - S = lltype.GcStruct("S", ('x', lltype.Signed)) - def f(x): - s = lltype.malloc(S) - s.x = x - return s.x - fn = compile_func(f, [int]) - assert fn(1) == 1 + def test_something(self): + def f(): + return 1 + fn = self.compile_func(f, []) + assert fn() == 1 -def test_call_function(): - class C: - pass - def f(): - c = C() - c.x = 1 - return c - def g(): - return f().x - fn = compile_func(g, []) - assert fn() == 1 + def test_something_more(self): + S = lltype.GcStruct("S", ('x', lltype.Signed)) + def f(x): + s = lltype.malloc(S) + s.x = x + return s.x + fn = self.compile_func(f, [int]) + assert fn(1) == 1 -def test_multiple_exits(): - S = lltype.GcStruct("S", ('x', lltype.Signed)) - T = lltype.GcStruct("T", ('y', lltype.Signed)) - def f(n): - c = lltype.malloc(S) - d = lltype.malloc(T) - d.y = 1 - e = lltype.malloc(T) - e.y = 2 - if n: - x = d - else: - x = e - return x.y - fn = compile_func(f, [int]) - assert fn(1) == 1 - assert fn(0) == 2 + def test_call_function(self): + class C: + pass + def f(): + c = C() + c.x = 1 + return c + def g(): + return f().x + fn = self.compile_func(g, []) + assert fn() == 1 + def test_multiple_exits(self): + S = lltype.GcStruct("S", ('x', lltype.Signed)) + T = lltype.GcStruct("T", ('y', lltype.Signed)) + def f(n): + c = lltype.malloc(S) + d = lltype.malloc(T) + d.y = 1 + e = lltype.malloc(T) + e.y = 2 + if n: + x = d + else: + x = e + return x.y + fn = self.compile_func(f, [int]) + assert fn(1) == 1 + assert fn(0) == 2 -def test_cleanup_vars_on_call(): - S = lltype.GcStruct("S", ('x', lltype.Signed)) - def f(): - return lltype.malloc(S) - def g(): - s1 = f() - s1.x = 42 - s2 = f() - s3 = f() - return s1.x - fn = compile_func(g, []) - assert fn() == 42 -def test_multiply_passed_var(): - S = lltype.GcStruct("S", ('x', lltype.Signed)) - def f(x): - if x: - a = lltype.malloc(S) - a.x = 1 - b = a - else: - a = lltype.malloc(S) - a.x = 1 - b = lltype.malloc(S) - b.x = 2 - return a.x + b.x - fn = compile_func(f, [int]) - fn(1) == 2 - fn(0) == 3 + def test_cleanup_vars_on_call(self): + S = lltype.GcStruct("S", ('x', lltype.Signed)) + def f(): + return lltype.malloc(S) + def g(): + s1 = f() + s1.x = 42 + s2 = f() + s3 = f() + return s1.x + fn = self.compile_func(g, []) + assert fn() == 42 -def test_write_barrier(): - S = lltype.GcStruct("S", ('x', lltype.Signed)) - T = lltype.GcStruct("T", ('s', lltype.Ptr(S))) - def f(x): - s = lltype.malloc(S) - s.x = 0 - s1 = lltype.malloc(S) - s1.x = 1 - s2 = lltype.malloc(S) - s2.x = 2 - t = lltype.malloc(T) - t.s = s - if x: - t.s = s1 - else: - t.s = s2 - return t.s.x + s.x + s1.x + s2.x - fn = compile_func(f, [int]) - assert fn(1) == 4 - assert fn(0) == 5 + def test_multiply_passed_var(self): + S = lltype.GcStruct("S", ('x', lltype.Signed)) + def f(x): + if x: + a = lltype.malloc(S) + a.x = 1 + b = a + else: + a = lltype.malloc(S) + a.x = 1 + b = lltype.malloc(S) + b.x = 2 + return a.x + b.x + fn = self.compile_func(f, [int]) + fn(1) == 2 + fn(0) == 3 -def test_del_basic(): - py.test.skip("xxx fix or kill") - S = lltype.GcStruct('S', ('x', lltype.Signed), rtti=True) - TRASH = lltype.GcStruct('TRASH', ('x', lltype.Signed)) - GLOBAL = lltype.Struct('GLOBAL', ('x', lltype.Signed)) - glob = lltype.malloc(GLOBAL, immortal=True) - def destructor(s): - glob.x = s.x + 1 - def type_info_S(s): - return lltype.getRuntimeTypeInfo(S) + def test_write_barrier(self): + S = lltype.GcStruct("S", ('x', lltype.Signed)) + T = lltype.GcStruct("T", ('s', lltype.Ptr(S))) + def f(x): + s = lltype.malloc(S) + s.x = 0 + s1 = lltype.malloc(S) + s1.x = 1 + s2 = lltype.malloc(S) + s2.x = 2 + t = lltype.malloc(T) + t.s = s + if x: + t.s = s1 + else: + t.s = s2 + return t.s.x + s.x + s1.x + s2.x + fn = self.compile_func(f, [int]) + assert fn(1) == 4 + assert fn(0) == 5 - def g(n): - s = lltype.malloc(S) - s.x = n - # now 's' should go away - def entrypoint(n): - g(n) - # llop.gc__collect(lltype.Void) - return glob.x + def test_del_basic(self): + py.test.skip("xxx fix or kill") + S = lltype.GcStruct('S', ('x', lltype.Signed), rtti=True) + TRASH = lltype.GcStruct('TRASH', ('x', lltype.Signed)) + GLOBAL = lltype.Struct('GLOBAL', ('x', lltype.Signed)) + glob = lltype.malloc(GLOBAL, immortal=True) + def destructor(s): + glob.x = s.x + 1 + def type_info_S(s): + return lltype.getRuntimeTypeInfo(S) - t = TranslationContext() - t.buildannotator().build_types(entrypoint, [int]) - rtyper = t.buildrtyper() - destrptr = rtyper.annotate_helper_fn(destructor, [lltype.Ptr(S)]) - rtyper.attachRuntimeTypeInfoFunc(S, type_info_S, destrptr=destrptr) - rtyper.specialize() - fn = compile_func(entrypoint, None, t) + def g(n): + s = lltype.malloc(S) + s.x = n + # now 's' should go away + def entrypoint(n): + g(n) + # llop.gc__collect(lltype.Void) + return glob.x - res = fn(123) - assert res == 124 + t = TranslationContext() + t.buildannotator().build_types(entrypoint, [int]) + rtyper = t.buildrtyper() + destrptr = rtyper.annotate_helper_fn(destructor, [lltype.Ptr(S)]) + rtyper.attachRuntimeTypeInfoFunc(S, type_info_S, destrptr=destrptr) + rtyper.specialize() + fn = self.compile_func(entrypoint, None, t) -def test_del_catches(): - import os - def g(): - pass - class A(object): - def __del__(self): - try: - g() - except: - os.write(1, "hallo") - def f1(i): - if i: - raise TypeError - def f(i): + res = fn(123) + assert res == 124 + + def test_del_catches(self): + import os + def g(): + pass + class A(object): + def __del__(self): + try: + g() + except: + os.write(1, "hallo") + def f1(i): + if i: + raise TypeError + def f(i): + a = A() + f1(i) + a.b = 1 + return a.b + fn = self.compile_func(f, [int]) + assert fn(0) == 1 + fn(1, expected_exception_name="TypeError") + + def test_del_raises(self): + class B(object): + def __del__(self): + raise TypeError + def func(): + b = B() + fn = self.compile_func(func, []) + # does not crash + fn() + + def test_wrong_order_setitem(self): + class A(object): + pass a = A() - f1(i) - a.b = 1 - return a.b - fn = compile_func(f, [int]) - assert fn(0) == 1 - fn(1, expected_exception_name="TypeError") - -def test_del_raises(): - class B(object): - def __del__(self): - raise TypeError - def func(): - b = B() - fn = compile_func(func, []) - # does not crash - fn() - -def test_wrong_order_setitem(): - import os - class A(object): - pass - a = A() - a.b = None - class B(object): - def __del__(self): - a.freed += 1 - a.b = None - def f(n): - a.freed = 0 - a.b = B() - if n: - a.b = None - return a.freed - fn = compile_func(f, [int]) - res = fn(1) - assert res == 1 + a.b = None + class B(object): + def __del__(self): + a.freed += 1 + a.b = None + def f(n): + a.freed = 0 + a.b = B() + if n: + a.b = None + return a.freed + fn = self.compile_func(f, [int]) + res = fn(1) + assert res == 1 diff --git a/rpython/translator/c/test/test_typed.py b/rpython/translator/c/test/test_typed.py --- a/rpython/translator/c/test/test_typed.py +++ b/rpython/translator/c/test/test_typed.py @@ -17,16 +17,6 @@ def getcompiled(self, func, argtypes): return compile(func, argtypes, backendopt=False) - def get_wrapper(self, func): - def wrapper(*args): - try: - return func(*args) - except OverflowError: - return -1 - except ZeroDivisionError: - return -2 - return wrapper - def test_set_attr(self): set_attr = self.getcompiled(snippet.set_attr, []) assert set_attr() == 2 @@ -470,32 +460,32 @@ assert res == f(i, ord(l[j])) def test_int_overflow(self): - fn = self.getcompiled(self.get_wrapper(snippet.add_func), [int]) - assert fn(sys.maxint) == -1 + fn = self.getcompiled(snippet.add_func, [int]) + fn(sys.maxint, expected_exception_name='OverflowError') def test_int_floordiv_ovf_zer(self): - fn = self.getcompiled(self.get_wrapper(snippet.div_func), [int]) - assert fn(-1) == -1 - assert fn(0) == -2 + fn = self.getcompiled(snippet.div_func, [int]) + fn(-1, expected_exception_name='OverflowError') + fn(0, expected_exception_name='ZeroDivisionError') def test_int_mul_ovf(self): - fn = self.getcompiled(self.get_wrapper(snippet.mul_func), [int, int]) + fn = self.getcompiled(snippet.mul_func, [int, int]) for y in range(-5, 5): for x in range(-5, 5): assert fn(x, y) == snippet.mul_func(x, y) n = sys.maxint / 4 assert fn(n, 3) == snippet.mul_func(n, 3) assert fn(n, 4) == snippet.mul_func(n, 4) - assert fn(n, 5) == -1 + fn(n, 5, expected_exception_name='OverflowError') def test_int_mod_ovf_zer(self): - fn = self.getcompiled(self.get_wrapper(snippet.mod_func), [int]) - assert fn(-1) == -1 - assert fn(0) == -2 + fn = self.getcompiled(snippet.mod_func, [int]) + fn(-1, expected_exception_name='OverflowError') + fn(0, expected_exception_name='ZeroDivisionError') def test_int_lshift_ovf(self): - fn = self.getcompiled(self.get_wrapper(snippet.lshift_func), [int]) - assert fn(1) == -1 + fn = self.getcompiled(snippet.lshift_func, [int]) + fn(1, expected_exception_name='OverflowError') def test_int_unary_ovf(self): def w(a, b): @@ -503,12 +493,12 @@ return snippet.unary_func(a)[0] else: return snippet.unary_func(a)[1] - fn = self.getcompiled(self.get_wrapper(w), [int, int]) + fn = self.getcompiled(w, [int, int]) for i in range(-3, 3): assert fn(i, 0) == -(i) assert fn(i, 1) == abs(i - 1) - assert fn(-sys.maxint - 1, 0) == -1 - assert fn(-sys.maxint, 0) == -1 + fn(-sys.maxint - 1, 0, expected_exception_name='OverflowError') + fn(-sys.maxint, 0, expected_exception_name='OverflowError') # floats def test_float_operations(self): diff --git a/rpython/translator/exceptiontransform.py b/rpython/translator/exceptiontransform.py --- a/rpython/translator/exceptiontransform.py +++ b/rpython/translator/exceptiontransform.py @@ -10,7 +10,7 @@ from rpython.rtyper import rtyper from rpython.rtyper.rmodel import inputconst from rpython.rlib.rarithmetic import r_uint, r_longlong, r_ulonglong -from rpython.rlib.rarithmetic import r_singlefloat +from rpython.rlib.rarithmetic import r_singlefloat, r_longfloat from rpython.rlib.debug import ll_assert from rpython.annotator import model as annmodel from rpython.rtyper.annlowlevel import MixLevelHelperAnnotator @@ -22,6 +22,7 @@ lltype.UnsignedLongLong: r_ulonglong(-1), lltype.Float: -1.0, lltype.SingleFloat: r_singlefloat(-1.0), + lltype.LongFloat: r_longfloat(-1.0), lltype.Char: chr(255), lltype.UniChar: unichr(0xFFFF), # XXX is this always right? lltype.Bool: True, From noreply at buildbot.pypy.org Tue Jan 28 22:14:06 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 28 Jan 2014 22:14:06 +0100 (CET) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140128211406.0EF581D23CF@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68981:15b720ac779c Date: 2014-01-28 12:43 -0800 http://bitbucket.org/pypy/pypy/changeset/15b720ac779c/ Log: merge default diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -428,8 +428,8 @@ class OpErrFmtNoArgs(OperationError): def __init__(self, w_type, value): + self._value = value self.setup(w_type) - self._value = value def get_w_value(self, space): w_value = self._w_value diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -903,8 +903,8 @@ w_res = self.descr_mul(space, other) assert isinstance(w_res, W_NDimArray) return w_res.descr_sum(space, space.wrap(-1), out) - dtype = interp_ufuncs.find_binop_result_dtype(space, - self.get_dtype(), other.get_dtype()) + dtype = interp_ufuncs.find_binop_result_dtype(space, self.get_dtype(), + other.get_dtype()) if self.get_size() < 1 and other.get_size() < 1: # numpy compatability return W_NDimArray.new_scalar(space, dtype, space.wrap(0)) @@ -912,25 +912,27 @@ out_shape, other_critical_dim = _match_dot_shapes(space, self, other) if out: matches = True - if len(out.get_shape()) != len(out_shape): + if dtype != out.get_dtype(): + matches = False + elif not out.implementation.order == "C": + matches = False + elif len(out.get_shape()) != len(out_shape): matches = False else: for i in range(len(out_shape)): if out.get_shape()[i] != out_shape[i]: matches = False break - if dtype != out.get_dtype(): - matches = False - if not out.implementation.order == "C": - matches = False if not matches: raise OperationError(space.w_ValueError, space.wrap( - 'output array is not acceptable (must have the right type, nr dimensions, and be a C-Array)')) + 'output array is not acceptable (must have the right type, ' + 'nr dimensions, and be a C-Array)')) w_res = out + w_res.fill(space, self.get_dtype().coerce(space, None)) else: w_res = W_NDimArray.from_shape(space, out_shape, dtype, w_instance=self) # This is the place to add fpypy and blas - return loop.multidim_dot(space, self, other, w_res, dtype, + return loop.multidim_dot(space, self, other, w_res, dtype, other_critical_dim) def descr_mean(self, space, __args__): diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -254,6 +254,13 @@ return out return res + def descr_outer(self, space, __args__): + return self._outer(space, __args__) + + def _outer(self, space, __args__): + raise OperationError(space.w_ValueError, + space.wrap("outer product only supported for binary functions")) + class W_Ufunc1(W_Ufunc): _immutable_fields_ = ["func", "bool_result"] argcount = 1 @@ -432,6 +439,7 @@ nin = interp_attrproperty("argcount", cls=W_Ufunc), reduce = interp2app(W_Ufunc.descr_reduce), + outer = interp2app(W_Ufunc.descr_outer), ) diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -146,8 +146,7 @@ while not obj_iter.done(): reduce_driver.jit_merge_point(shapelen=shapelen, func=func, done_func=done_func, - calc_dtype=calc_dtype, - ) + calc_dtype=calc_dtype) rval = obj_iter.getitem().convert_to(space, calc_dtype) if done_func is not None and done_func(calc_dtype, rval): return rval @@ -172,8 +171,7 @@ shapelen = len(obj.get_shape()) while not obj_iter.done(): reduce_cum_driver.jit_merge_point(shapelen=shapelen, func=func, - dtype=calc_dtype, - ) + dtype=calc_dtype) rval = obj_iter.getitem().convert_to(space, calc_dtype) cur_value = func(calc_dtype, cur_value, rval) out_iter.setitem(cur_value) @@ -271,8 +269,7 @@ iter.next() shapelen = len(arr.get_shape()) while not iter.done(): - arg_driver.jit_merge_point(shapelen=shapelen, dtype=dtype, - ) + arg_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) w_val = iter.getitem() new_best = getattr(dtype.itemtype, op_name)(cur_best, w_val) if dtype.itemtype.ne(new_best, cur_best): @@ -311,6 +308,7 @@ if i != right_critical_dim] right_skip = range(len(left_shape) - 1) result_skip = [len(result.get_shape()) - (len(right_shape) > 1)] + assert result.get_dtype() == dtype outi = result.create_dot_iter(broadcast_shape, result_skip) lefti = left.create_dot_iter(broadcast_shape, left_skip) righti = right.create_dot_iter(broadcast_shape, right_skip) @@ -318,10 +316,10 @@ dot_driver.jit_merge_point(dtype=dtype) lval = lefti.getitem().convert_to(space, dtype) rval = righti.getitem().convert_to(space, dtype) - outval = outi.getitem().convert_to(space, dtype) + outval = outi.getitem() v = dtype.itemtype.mul(lval, rval) - value = dtype.itemtype.add(v, outval).convert_to(space, dtype) - outi.setitem(value) + v = dtype.itemtype.add(v, outval) + outi.setitem(v) outi.next() righti.next() lefti.next() @@ -652,8 +650,8 @@ out_iter = out.create_iter(shape) while not arr_iter.done(): round_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) - w_v = dtype.itemtype.round(arr_iter.getitem().convert_to(space, dtype), - decimals) + w_v = arr_iter.getitem().convert_to(space, dtype) + w_v = dtype.itemtype.round(w_v, decimals) out_iter.setitem(w_v) arr_iter.next() out_iter.next() diff --git a/pypy/module/micronumpy/test/test_arrayops.py b/pypy/module/micronumpy/test/test_arrayops.py --- a/pypy/module/micronumpy/test/test_arrayops.py +++ b/pypy/module/micronumpy/test/test_arrayops.py @@ -56,6 +56,10 @@ b = arange(12).reshape(4, 3) c = a.dot(b) assert (c == [[ 42, 48, 54], [114, 136, 158], [186, 224, 262]]).all() + c = a.dot(b.astype(float)) + assert (c == [[ 42, 48, 54], [114, 136, 158], [186, 224, 262]]).all() + c = a.astype(float).dot(b) + assert (c == [[ 42, 48, 54], [114, 136, 158], [186, 224, 262]]).all() a = arange(24).reshape(2, 3, 4) raises(ValueError, "a.dot(a)") @@ -91,9 +95,11 @@ out = arange(9).reshape(3, 3) c = dot(a, b, out=out) assert (c == out).all() - out = arange(9,dtype=float).reshape(3, 3) + assert (c == [[42, 48, 54], [114, 136, 158], [186, 224, 262]]).all() + out = arange(9, dtype=float).reshape(3, 3) exc = raises(ValueError, dot, a, b, out) - assert exc.value[0].find('not acceptable') > 0 + assert exc.value[0] == ('output array is not acceptable (must have the ' + 'right type, nr dimensions, and be a C-Array)') def test_choose_basic(self): from numpypy import array diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -1052,3 +1052,9 @@ np.array([0, -1, -3, -6, -10])).all() assert (np.divide.accumulate(todivide) == np.array([2., 4., 16.])).all() + + def test_outer(self): + import numpy as np + from numpypy import absolute + exc = raises(ValueError, np.absolute.outer, [-1, -2]) + assert exc.value[0] == 'outer product only supported for binary functions' diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -829,7 +829,7 @@ # ____________________________________________________________ # annotation of low-level types -from rpython.annotator.model import SomePtr +from rpython.rtyper.llannotation import SomePtr from rpython.annotator.model import ll_to_annotation, annotation_to_lltype class __extend__(pairtype(SomePtr, SomePtr)): diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -8,12 +8,12 @@ from rpython.flowspace.model import Constant from rpython.annotator.model import (SomeOrderedDict, - SomeString, SomeChar, SomeFloat, SomePtr, unionof, SomeInstance, SomeDict, + SomeString, SomeChar, SomeFloat, unionof, SomeInstance, SomeDict, SomeBuiltin, SomePBC, SomeInteger, TLS, SomeUnicodeCodePoint, - s_None, s_ImpossibleValue, SomeLLADTMeth, SomeBool, SomeTuple, + s_None, s_ImpossibleValue, SomeBool, SomeTuple, SomeImpossibleValue, SomeUnicodeString, SomeList, HarmlesslyBlocked, - SomeWeakRef, lltype_to_annotation, SomeType, SomeByteArray, SomeConstantType) -from rpython.rtyper.llannotation import SomeAddress + SomeWeakRef, lltype_to_annotation, SomeByteArray, SomeConstantType) +from rpython.rtyper.llannotation import SomeAddress, SomePtr, SomeLLADTMeth from rpython.annotator.classdef import InstanceSource, ClassDef from rpython.annotator.listdef import ListDef, ListItem from rpython.annotator.dictdef import DictDef diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -357,7 +357,7 @@ @analyzer_for(rpython.rtyper.lltypesystem.llmemory.cast_ptr_to_adr) def llmemory_cast_ptr_to_adr(s): - from rpython.annotator.model import SomeInteriorPtr + from rpython.rtyper.llannotation import SomeInteriorPtr assert not isinstance(s, SomeInteriorPtr) return SomeAddress() @@ -390,7 +390,7 @@ # annotation of low-level types -from rpython.annotator.model import SomePtr +from rpython.rtyper.llannotation import SomePtr from rpython.rtyper.lltypesystem import lltype @analyzer_for(lltype.malloc) diff --git a/rpython/annotator/description.py b/rpython/annotator/description.py --- a/rpython/annotator/description.py +++ b/rpython/annotator/description.py @@ -624,7 +624,7 @@ except ValueError: pass else: - from rpython.annotator.model import SomePtr + from rpython.rtyper.llannotation import SomePtr assert not isinstance(s_arg, SomePtr) else: # call the constructor diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -574,36 +574,8 @@ from rpython.rtyper.lltypesystem import lltype -class SomePtr(SomeObject): - knowntype = lltype._ptr - immutable = True - def __init__(self, ll_ptrtype): - assert isinstance(ll_ptrtype, lltype.Ptr) - self.ll_ptrtype = ll_ptrtype - - def can_be_none(self): - return False - - -class SomeInteriorPtr(SomePtr): - def __init__(self, ll_ptrtype): - assert isinstance(ll_ptrtype, lltype.InteriorPtr) - self.ll_ptrtype = ll_ptrtype - - -class SomeLLADTMeth(SomeObject): - immutable = True - - def __init__(self, ll_ptrtype, func): - self.ll_ptrtype = ll_ptrtype - self.func = func - - def can_be_none(self): - return False - - -from rpython.rtyper.llannotation import SomeAddress +from rpython.rtyper.llannotation import SomeAddress, SomePtr, SomeInteriorPtr from rpython.rtyper.lltypesystem import llmemory annotation_to_ll_map = [ diff --git a/rpython/annotator/test/test_model.py b/rpython/annotator/test/test_model.py --- a/rpython/annotator/test/test_model.py +++ b/rpython/annotator/test/test_model.py @@ -1,6 +1,7 @@ import py from rpython.annotator.model import * +from rpython.rtyper.llannotation import SomePtr from rpython.annotator.listdef import ListDef from rpython.translator.translator import TranslationContext diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -759,7 +759,7 @@ raise AnnotatorError("Cannot call len on a pbc") # annotation of low-level types -from rpython.annotator.model import SomePtr, SomeLLADTMeth +from rpython.rtyper.llannotation import SomePtr, SomeLLADTMeth from rpython.annotator.model import ll_to_annotation, lltype_to_annotation, annotation_to_lltype class __extend__(SomePtr): diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -1,5 +1,5 @@ from rpython.annotator import model as annmodel -from rpython.rtyper.llannotation import SomeAddress +from rpython.rtyper.llannotation import SomeAddress, SomePtr from rpython.rlib import rgc from rpython.rtyper import rmodel, annlowlevel from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, llgroup @@ -268,7 +268,7 @@ from rpython.memory.gc.base import ARRAY_TYPEID_MAP from rpython.memory.gc import inspector - s_gcref = annmodel.SomePtr(llmemory.GCREF) + s_gcref = SomePtr(llmemory.GCREF) gcdata = self.gcdata translator = self.translator @@ -314,7 +314,7 @@ if hasattr(GCClass, 'heap_stats'): self.heap_stats_ptr = getfn(GCClass.heap_stats.im_func, - [s_gc], annmodel.SomePtr(lltype.Ptr(ARRAY_TYPEID_MAP)), + [s_gc], SomePtr(lltype.Ptr(ARRAY_TYPEID_MAP)), minimal_transform=False) self.get_member_index_ptr = getfn( GCClass.get_member_index.im_func, @@ -448,8 +448,7 @@ minimal_transform=False) self.get_typeids_z_ptr = getfn(inspector.get_typeids_z, [s_gc], - annmodel.SomePtr( - lltype.Ptr(rgc.ARRAY_OF_CHAR)), + SomePtr(lltype.Ptr(rgc.ARRAY_OF_CHAR)), minimal_transform=False) self.set_max_heap_size_ptr = getfn(GCClass.set_max_heap_size.im_func, diff --git a/rpython/memory/gctransform/shadowstack.py b/rpython/memory/gctransform/shadowstack.py --- a/rpython/memory/gctransform/shadowstack.py +++ b/rpython/memory/gctransform/shadowstack.py @@ -1,4 +1,5 @@ from rpython.annotator import model as annmodel +from rpython.rtyper.llannotation import SomePtr from rpython.rlib.debug import ll_assert from rpython.rlib.nonconst import NonConstant from rpython.rlib import rgc @@ -242,7 +243,7 @@ def gc_start_fresh_new_state(): shadow_stack_pool.start_fresh_new_state() - s_gcref = annmodel.SomePtr(llmemory.GCREF) + s_gcref = SomePtr(llmemory.GCREF) s_addr = SomeAddress() self.gc_shadowstackref_new_ptr = getfn(gc_shadowstackref_new, [], s_gcref, diff --git a/rpython/memory/test/test_transformed_gc.py b/rpython/memory/test/test_transformed_gc.py --- a/rpython/memory/test/test_transformed_gc.py +++ b/rpython/memory/test/test_transformed_gc.py @@ -3,6 +3,7 @@ from rpython.translator.c import gc from rpython.annotator import model as annmodel +from rpython.rtyper.llannotation import SomePtr from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, llgroup from rpython.memory.gctransform import framework, shadowstack from rpython.rtyper.lltypesystem.lloperation import llop, void @@ -98,7 +99,7 @@ from rpython.translator.c.genc import CStandaloneBuilder - s_args = annmodel.SomePtr(lltype.Ptr(ARGS)) + s_args = SomePtr(lltype.Ptr(ARGS)) t = rtype(entrypoint, [s_args], gcname=cls.gcname, taggedpointers=cls.taggedpointers) @@ -827,7 +828,7 @@ from rpython.translator.translator import graphof from rpython.flowspace.model import Constant from rpython.rtyper.lltypesystem import rffi - layoutbuilder = cls.ensure_layoutbuilder(translator) + layoutbuilder = cls.ensure_layoutbuilder(translator) type_id = layoutbuilder.get_type_id(P) # # now fix the do_malloc_fixedsize_clear in the graph of g @@ -1116,7 +1117,7 @@ def test_adr_of_nursery(self): run = self.runner("adr_of_nursery") - res = run([]) + res = run([]) class TestGenerationalNoFullCollectGC(GCTest): # test that nursery is doing its job and that no full collection diff --git a/rpython/rlib/jit_hooks.py b/rpython/rlib/jit_hooks.py --- a/rpython/rlib/jit_hooks.py +++ b/rpython/rlib/jit_hooks.py @@ -1,4 +1,5 @@ from rpython.annotator import model as annmodel +from rpython.rtyper.llannotation import SomePtr from rpython.rlib.objectmodel import specialize from rpython.rtyper.annlowlevel import (cast_instance_to_base_ptr, cast_base_ptr_to_instance, llstr) @@ -50,7 +51,7 @@ def emptyval(): return lltype.nullptr(llmemory.GCREF.TO) - at register_helper(annmodel.SomePtr(llmemory.GCREF)) + at register_helper(SomePtr(llmemory.GCREF)) def resop_new(no, llargs, llres): from rpython.jit.metainterp.history import ResOperation @@ -61,7 +62,7 @@ res = None return _cast_to_gcref(ResOperation(no, args, res)) - at register_helper(annmodel.SomePtr(llmemory.GCREF)) + at register_helper(SomePtr(llmemory.GCREF)) def boxint_new(no): from rpython.jit.metainterp.history import BoxInt return _cast_to_gcref(BoxInt(no)) @@ -74,7 +75,7 @@ def resop_getopname(llop): return llstr(_cast_to_resop(llop).getopname()) - at register_helper(annmodel.SomePtr(llmemory.GCREF)) + at register_helper(SomePtr(llmemory.GCREF)) def resop_getarg(llop, no): return _cast_to_gcref(_cast_to_resop(llop).getarg(no)) @@ -82,7 +83,7 @@ def resop_setarg(llop, no, llbox): _cast_to_resop(llop).setarg(no, _cast_to_box(llbox)) - at register_helper(annmodel.SomePtr(llmemory.GCREF)) + at register_helper(SomePtr(llmemory.GCREF)) def resop_getresult(llop): return _cast_to_gcref(_cast_to_resop(llop).result) @@ -94,15 +95,15 @@ def box_getint(llbox): return _cast_to_box(llbox).getint() - at register_helper(annmodel.SomePtr(llmemory.GCREF)) + at register_helper(SomePtr(llmemory.GCREF)) def box_clone(llbox): return _cast_to_gcref(_cast_to_box(llbox).clonebox()) - at register_helper(annmodel.SomePtr(llmemory.GCREF)) + at register_helper(SomePtr(llmemory.GCREF)) def box_constbox(llbox): return _cast_to_gcref(_cast_to_box(llbox).constbox()) - at register_helper(annmodel.SomePtr(llmemory.GCREF)) + at register_helper(SomePtr(llmemory.GCREF)) def box_nonconstbox(llbox): return _cast_to_gcref(_cast_to_box(llbox).nonconstbox()) diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py --- a/rpython/rlib/rgc.py +++ b/rpython/rlib/rgc.py @@ -94,9 +94,9 @@ _about_ = _heap_stats def compute_result_annotation(self): - from rpython.annotator import model as annmodel + from rpython.rtyper.llannotation import SomePtr from rpython.memory.gc.base import ARRAY_TYPEID_MAP - return annmodel.SomePtr(lltype.Ptr(ARRAY_TYPEID_MAP)) + return SomePtr(lltype.Ptr(ARRAY_TYPEID_MAP)) def specialize_call(self, hop): hop.exception_is_here() @@ -452,8 +452,9 @@ global _cache_s_list_of_gcrefs if _cache_s_list_of_gcrefs is None: from rpython.annotator import model as annmodel + from rpython.rtyper.llannotation import SomePtr from rpython.annotator.listdef import ListDef - s_gcref = annmodel.SomePtr(llmemory.GCREF) + s_gcref = SomePtr(llmemory.GCREF) _cache_s_list_of_gcrefs = annmodel.SomeList( ListDef(None, s_gcref, mutated=True, resized=False)) return _cache_s_list_of_gcrefs @@ -468,15 +469,17 @@ class Entry(ExtRegistryEntry): _about_ = get_rpy_referents + def compute_result_annotation(self, s_gcref): - from rpython.annotator import model as annmodel - assert annmodel.SomePtr(llmemory.GCREF).contains(s_gcref) + from rpython.rtyper.llannotation import SomePtr + assert SomePtr(llmemory.GCREF).contains(s_gcref) return s_list_of_gcrefs() + def specialize_call(self, hop): vlist = hop.inputargs(hop.args_r[0]) hop.exception_cannot_occur() return hop.genop('gc_get_rpy_referents', vlist, - resulttype = hop.r_result) + resulttype=hop.r_result) class Entry(ExtRegistryEntry): _about_ = get_rpy_memory_usage @@ -522,10 +525,11 @@ class Entry(ExtRegistryEntry): _about_ = _get_llcls_from_cls def compute_result_annotation(self, s_Class): - from rpython.annotator import model as annmodel + from rpython.rtyper.llannotation import SomePtr from rpython.rtyper.lltypesystem import rclass assert s_Class.is_constant() - return annmodel.SomePtr(rclass.CLASSTYPE) + return SomePtr(rclass.CLASSTYPE) + def specialize_call(self, hop): from rpython.rtyper.rclass import getclassrepr from rpython.flowspace.model import Constant @@ -550,9 +554,11 @@ class Entry(ExtRegistryEntry): _about_ = get_typeids_z + def compute_result_annotation(self): - from rpython.annotator.model import SomePtr + from rpython.rtyper.llannotation import SomePtr return SomePtr(lltype.Ptr(ARRAY_OF_CHAR)) + def specialize_call(self, hop): hop.exception_is_here() return hop.genop('gc_typeids_z', [], resulttype = hop.r_result) diff --git a/rpython/rlib/rstring.py b/rpython/rlib/rstring.py --- a/rpython/rlib/rstring.py +++ b/rpython/rlib/rstring.py @@ -3,7 +3,8 @@ import sys from rpython.annotator.model import (SomeObject, SomeString, s_None, SomeChar, - SomeInteger, SomeUnicodeCodePoint, SomeUnicodeString, SomePtr, SomePBC) + SomeInteger, SomeUnicodeCodePoint, SomeUnicodeString, SomePBC) +from rpython.rtyper.llannotation import SomePtr from rpython.rlib import jit from rpython.rlib.objectmodel import newlist_hint, specialize, enforceargs from rpython.rlib.rarithmetic import ovfcheck diff --git a/rpython/rlib/test/test_signature.py b/rpython/rlib/test/test_signature.py --- a/rpython/rlib/test/test_signature.py +++ b/rpython/rlib/test/test_signature.py @@ -2,6 +2,7 @@ from rpython.rlib.signature import signature, finishsigs, FieldSpec, ClassSpec from rpython.rlib import types from rpython.annotator import model +from rpython.rtyper.llannotation import SomePtr from rpython.annotator.signature import SignatureError from rpython.translator.translator import TranslationContext, graphof from rpython.rtyper.lltypesystem import rstr @@ -127,7 +128,7 @@ def f(buf): pass argtype = getsig(f, policy=policy)[0] - assert isinstance(argtype, model.SomePtr) + assert isinstance(argtype, SomePtr) assert argtype.ll_ptrtype.TO == rstr.STR def g(): diff --git a/rpython/rtyper/annlowlevel.py b/rpython/rtyper/annlowlevel.py --- a/rpython/rtyper/annlowlevel.py +++ b/rpython/rtyper/annlowlevel.py @@ -7,6 +7,7 @@ from rpython.annotator.policy import AnnotatorPolicy from rpython.annotator.signature import Sig from rpython.annotator.specialize import flatten_star_args +from rpython.rtyper.llannotation import SomePtr from rpython.rtyper.normalizecalls import perform_normalizations from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.flowspace.model import Constant @@ -359,7 +360,7 @@ key = (llhelper, s_callable.const) s_res = self.bookkeeper.emulate_pbc_call(key, s_callable, args_s) assert annmodel.lltype_to_annotation(FUNC.RESULT).contains(s_res) - return annmodel.SomePtr(F) + return SomePtr(F) def specialize_call(self, hop): hop.exception_cannot_occur() @@ -476,7 +477,7 @@ def compute_result_annotation(self, s_PTR, s_object): assert s_PTR.is_constant() if isinstance(s_PTR.const, lltype.Ptr): - return annmodel.SomePtr(s_PTR.const) + return SomePtr(s_PTR.const) else: assert False @@ -535,14 +536,14 @@ def placeholder_sigarg(s): if s == "self": def expand(s_self, *args_s): - assert isinstance(s_self, annmodel.SomePtr) + assert isinstance(s_self, SomePtr) return s_self elif s == "SELF": raise NotImplementedError else: assert s.islower() def expand(s_self, *args_s): - assert isinstance(s_self, annmodel.SomePtr) + assert isinstance(s_self, SomePtr) return getattr(s_self.ll_ptrtype.TO, s.upper()) return expand diff --git a/rpython/rtyper/exceptiondata.py b/rpython/rtyper/exceptiondata.py --- a/rpython/rtyper/exceptiondata.py +++ b/rpython/rtyper/exceptiondata.py @@ -1,4 +1,5 @@ from rpython.annotator import model as annmodel +from rpython.rtyper.llannotation import SomePtr from rpython.rlib import rstackovf from rpython.rtyper import rclass from rpython.rtyper.lltypesystem.rclass import (ll_issubclass, ll_type, @@ -72,12 +73,12 @@ def make_exception_matcher(self, rtyper): # ll_exception_matcher(real_exception_vtable, match_exception_vtable) - s_typeptr = annmodel.SomePtr(self.lltype_of_exception_type) + s_typeptr = SomePtr(self.lltype_of_exception_type) helper_fn = rtyper.annotate_helper_fn(ll_issubclass, [s_typeptr, s_typeptr]) return helper_fn def make_type_of_exc_inst(self, rtyper): # ll_type_of_exc_inst(exception_instance) -> exception_vtable - s_excinst = annmodel.SomePtr(self.lltype_of_exception_value) + s_excinst = SomePtr(self.lltype_of_exception_value) helper_fn = rtyper.annotate_helper_fn(ll_type, [s_excinst]) return helper_fn diff --git a/rpython/rtyper/llannotation.py b/rpython/rtyper/llannotation.py --- a/rpython/rtyper/llannotation.py +++ b/rpython/rtyper/llannotation.py @@ -2,6 +2,7 @@ Code for annotating low-level thingies. """ from rpython.annotator.model import SomeObject +from rpython.rtyper.lltypesystem import lltype class SomeAddress(SomeObject): immutable = True @@ -24,3 +25,30 @@ def can_be_none(self): return False +class SomePtr(SomeObject): + knowntype = lltype._ptr + immutable = True + + def __init__(self, ll_ptrtype): + assert isinstance(ll_ptrtype, lltype.Ptr) + self.ll_ptrtype = ll_ptrtype + + def can_be_none(self): + return False + + +class SomeInteriorPtr(SomePtr): + def __init__(self, ll_ptrtype): + assert isinstance(ll_ptrtype, lltype.InteriorPtr) + self.ll_ptrtype = ll_ptrtype + + +class SomeLLADTMeth(SomeObject): + immutable = True + + def __init__(self, ll_ptrtype, func): + self.ll_ptrtype = ll_ptrtype + self.func = func + + def can_be_none(self): + return False diff --git a/rpython/rtyper/lltypesystem/ll2ctypes.py b/rpython/rtyper/lltypesystem/ll2ctypes.py --- a/rpython/rtyper/lltypesystem/ll2ctypes.py +++ b/rpython/rtyper/lltypesystem/ll2ctypes.py @@ -22,6 +22,7 @@ from rpython.rlib.rarithmetic import r_singlefloat, r_longfloat, base_int, intmask from rpython.rlib.rarithmetic import is_emulated_long, maxint from rpython.annotator import model as annmodel +from rpython.rtyper.llannotation import SomePtr from rpython.rtyper.llinterp import LLInterpreter, LLException from rpython.rtyper.lltypesystem.rclass import OBJECT, OBJECT_VTABLE from rpython.rtyper import raddress @@ -161,7 +162,7 @@ llmemory.GCREF: ctypes.c_void_p, llmemory.WeakRef: ctypes.c_void_p, # XXX }) - + if '__int128_t' in rffi.TYPES: _ctypes_cache[rffi.__INT128_T] = ctypes.c_longlong # XXX: Not right at all. But for some reason, It started by while doing JIT compile after a merge with default. Can't extend ctypes, because thats a python standard, right? @@ -1339,7 +1340,7 @@ def compute_result_annotation(self, s_ptr, s_n): assert isinstance(s_n, annmodel.SomeInteger) - assert isinstance(s_ptr, annmodel.SomePtr) + assert isinstance(s_ptr, SomePtr) typecheck_ptradd(s_ptr.ll_ptrtype) return annmodel.lltype_to_annotation(s_ptr.ll_ptrtype) diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -1,5 +1,6 @@ import py from rpython.annotator import model as annmodel +from rpython.rtyper.llannotation import SomePtr from rpython.rtyper.lltypesystem import lltype, rstr from rpython.rtyper.lltypesystem import ll2ctypes from rpython.rtyper.lltypesystem.llmemory import cast_ptr_to_adr @@ -52,7 +53,7 @@ class _IsLLPtrEntry(ExtRegistryEntry): _about_ = _isllptr def compute_result_annotation(self, s_p): - result = isinstance(s_p, annmodel.SomePtr) + result = isinstance(s_p, SomePtr) return self.bookkeeper.immutablevalue(result) def specialize_call(self, hop): hop.exception_cannot_occur() @@ -996,7 +997,7 @@ TP = s_type.const if not isinstance(TP, lltype.Struct): raise TypeError("make called with %s instead of Struct as first argument" % TP) - return annmodel.SomePtr(lltype.Ptr(TP)) + return SomePtr(lltype.Ptr(TP)) def specialize_call(self, hop, **fields): assert hop.args_s[0].is_constant() diff --git a/rpython/rtyper/rptr.py b/rpython/rtyper/rptr.py --- a/rpython/rtyper/rptr.py +++ b/rpython/rtyper/rptr.py @@ -1,4 +1,6 @@ from rpython.annotator import model as annmodel +from rpython.rtyper.llannotation import ( + SomePtr, SomeInteriorPtr, SomeLLADTMeth) from rpython.flowspace import model as flowmodel from rpython.rlib.rarithmetic import r_uint from rpython.rtyper.error import TyperError @@ -7,7 +9,7 @@ from rpython.tool.pairtype import pairtype -class __extend__(annmodel.SomePtr): +class __extend__(SomePtr): def rtyper_makerepr(self, rtyper): return PtrRepr(self.ll_ptrtype) @@ -15,7 +17,7 @@ return self.__class__, self.ll_ptrtype -class __extend__(annmodel.SomeInteriorPtr): +class __extend__(SomeInteriorPtr): def rtyper_makerepr(self, rtyper): return InteriorPtrRepr(self.ll_ptrtype) @@ -38,7 +40,7 @@ def rtype_getattr(self, hop): attr = hop.args_s[1].const - if isinstance(hop.s_result, annmodel.SomeLLADTMeth): + if isinstance(hop.s_result, SomeLLADTMeth): return hop.inputarg(hop.r_result, arg=0) try: self.lowleveltype._example()._lookup_adtmeth(attr) @@ -179,7 +181,7 @@ # ________________________________________________________________ # ADT methods -class __extend__(annmodel.SomeLLADTMeth): +class __extend__(SomeLLADTMeth): def rtyper_makerepr(self, rtyper): return LLADTMethRepr(self, rtyper) def rtyper_makekey(self): @@ -270,7 +272,7 @@ def rtype_getattr(self, hop): attr = hop.args_s[1].const - if isinstance(hop.s_result, annmodel.SomeLLADTMeth): + if isinstance(hop.s_result, SomeLLADTMeth): return hop.inputarg(hop.r_result, arg=0) FIELD_TYPE = getattr(self.resulttype.TO, attr) if isinstance(FIELD_TYPE, lltype.ContainerType): diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -16,6 +16,7 @@ import py from rpython.annotator import model as annmodel, unaryop, binaryop +from rpython.rtyper.llannotation import SomePtr from rpython.annotator.annrpython import FAIL from rpython.flowspace.model import Variable, Constant, SpaceOperation, c_last_exception from rpython.rtyper.annlowlevel import annotate_lowlevel_helper, LowLevelAnnotatorPolicy @@ -639,10 +640,10 @@ self.call_all_setups() # compute ForwardReferences now if ARG_GCSTRUCT is None: ARG_GCSTRUCT = GCSTRUCT - args_s = [annmodel.SomePtr(Ptr(ARG_GCSTRUCT))] + args_s = [SomePtr(Ptr(ARG_GCSTRUCT))] graph = self.annotate_helper(func, args_s) s = self.annotator.binding(graph.getreturnvar()) - if (not isinstance(s, annmodel.SomePtr) or + if (not isinstance(s, SomePtr) or s.ll_ptrtype != Ptr(RuntimeTypeInfo)): raise TyperError("runtime type info function %r returns %r, " "excepted Ptr(RuntimeTypeInfo)" % (func, s)) diff --git a/rpython/rtyper/test/test_llann.py b/rpython/rtyper/test/test_llann.py --- a/rpython/rtyper/test/test_llann.py +++ b/rpython/rtyper/test/test_llann.py @@ -1,6 +1,7 @@ import py from rpython.annotator import model as annmodel +from rpython.rtyper.llannotation import SomePtr from rpython.conftest import option from rpython.rtyper.annlowlevel import (annotate_lowlevel_helper, MixLevelHelperAnnotator, PseudoHighLevelCallable, llhelper, @@ -100,8 +101,8 @@ p2 = p1.sub1 p3 = cast_pointer(PS1, p2) return p3 - s = self.annotate(llf, [annmodel.SomePtr(PS1)]) - assert isinstance(s, annmodel.SomePtr) + s = self.annotate(llf, [SomePtr(PS1)]) + assert isinstance(s, SomePtr) assert s.ll_ptrtype == PS1 def test_cast_simple_widening_from_gc(self): @@ -114,7 +115,7 @@ p3 = cast_pointer(PS1, p2) return p3 s = self.annotate(llf, []) - assert isinstance(s, annmodel.SomePtr) + assert isinstance(s, SomePtr) assert s.ll_ptrtype == PS1 def test_cast_pointer(self): @@ -152,7 +153,7 @@ PF = Ptr(F) def llf(p): return p(0) - s = self.annotate(llf, [annmodel.SomePtr(PF)]) + s = self.annotate(llf, [SomePtr(PF)]) assert s.knowntype == int @@ -344,7 +345,7 @@ def llf(): return getRuntimeTypeInfo(S) s = self.annotate(llf, []) - assert isinstance(s, annmodel.SomePtr) + assert isinstance(s, SomePtr) assert s.ll_ptrtype == Ptr(RuntimeTypeInfo) assert s.const == getRuntimeTypeInfo(S) @@ -352,8 +353,8 @@ S = GcStruct('s', ('x', Signed), rtti=True) def llf(p): return runtime_type_info(p) - s = self.annotate(llf, [annmodel.SomePtr(Ptr(S))]) - assert isinstance(s, annmodel.SomePtr) + s = self.annotate(llf, [SomePtr(Ptr(S))]) + assert isinstance(s, SomePtr) assert s.ll_ptrtype == Ptr(RuntimeTypeInfo) def test_cast_primitive(self): diff --git a/rpython/rtyper/test/test_rpbc.py b/rpython/rtyper/test/test_rpbc.py --- a/rpython/rtyper/test/test_rpbc.py +++ b/rpython/rtyper/test/test_rpbc.py @@ -3,6 +3,7 @@ from rpython.annotator import policy, specialize from rpython.rtyper.lltypesystem.lltype import typeOf from rpython.rtyper.test.tool import BaseRtypingTest +from rpython.rtyper.llannotation import SomePtr class MyBase: @@ -1817,7 +1818,8 @@ s_ll_f = annmodel.lltype_to_annotation(r_f.lowleveltype) A_repr = rclass.getinstancerepr(rt, a.bookkeeper.getdesc(A). getuniqueclassdef()) - ll_h_graph = annlowlevel.annotate_lowlevel_helper(a, ll_h, [s_R, s_ll_f, annmodel.SomePtr(A_repr.lowleveltype)]) + ll_h_graph = annlowlevel.annotate_lowlevel_helper( + a, ll_h, [s_R, s_ll_f, SomePtr(A_repr.lowleveltype)]) s = a.binding(ll_h_graph.getreturnvar()) assert s.ll_ptrtype == A_repr.lowleveltype rt.specialize_more_blocks() @@ -1873,7 +1875,8 @@ s_ll_f = annmodel.lltype_to_annotation(r_f.lowleveltype) A_repr = rclass.getinstancerepr(rt, a.bookkeeper.getdesc(A). getuniqueclassdef()) - ll_h_graph = annlowlevel.annotate_lowlevel_helper(a, ll_h, [s_R, s_ll_f, annmodel.SomePtr(A_repr.lowleveltype)]) + ll_h_graph = annlowlevel.annotate_lowlevel_helper( + a, ll_h, [s_R, s_ll_f, SomePtr(A_repr.lowleveltype)]) s = a.binding(ll_h_graph.getreturnvar()) assert s.ll_ptrtype == A_repr.lowleveltype rt.specialize_more_blocks() @@ -1929,7 +1932,8 @@ A_repr = rclass.getinstancerepr(rt, a.bookkeeper.getdesc(A). getuniqueclassdef()) - ll_h_graph = annlowlevel.annotate_lowlevel_helper(a, ll_h, [s_R, s_ll_f, annmodel.SomePtr(A_repr.lowleveltype)]) + ll_h_graph = annlowlevel.annotate_lowlevel_helper( + a, ll_h, [s_R, s_ll_f, SomePtr(A_repr.lowleveltype)]) s = a.binding(ll_h_graph.getreturnvar()) assert s.ll_ptrtype == A_repr.lowleveltype rt.specialize_more_blocks() diff --git a/rpython/rtyper/test/test_rptr.py b/rpython/rtyper/test/test_rptr.py --- a/rpython/rtyper/test/test_rptr.py +++ b/rpython/rtyper/test/test_rptr.py @@ -3,6 +3,7 @@ import py from rpython.annotator import model as annmodel +from rpython.rtyper.llannotation import SomePtr from rpython.annotator.annrpython import RPythonAnnotator from rpython.rlib.rarithmetic import is_valid_int from rpython.rtyper.annlowlevel import annotate_lowlevel_helper, LowLevelAnnotatorPolicy @@ -31,11 +32,11 @@ PS2 = lltype.Ptr(S2) def lldown(p): return lltype.cast_pointer(PS, p) - s, t = ll_rtype(lldown, [annmodel.SomePtr(PS2)]) + s, t = ll_rtype(lldown, [SomePtr(PS2)]) assert s.ll_ptrtype == PS def llup(p): return lltype.cast_pointer(PS2, p) - s, t = ll_rtype(llup, [annmodel.SomePtr(PS)]) + s, t = ll_rtype(llup, [SomePtr(PS)]) assert s.ll_ptrtype == PS2 def test_runtime_type_info(): @@ -45,8 +46,8 @@ lltype.runtime_type_info(p) == lltype.getRuntimeTypeInfo(S)) assert ll_example(lltype.malloc(S)) == (lltype.getRuntimeTypeInfo(S), True) - s, t = ll_rtype(ll_example, [annmodel.SomePtr(lltype.Ptr(S))]) - assert s == annmodel.SomeTuple([annmodel.SomePtr(lltype.Ptr(lltype.RuntimeTypeInfo)), + s, t = ll_rtype(ll_example, [SomePtr(lltype.Ptr(S))]) + assert s == annmodel.SomeTuple([SomePtr(lltype.Ptr(lltype.RuntimeTypeInfo)), annmodel.SomeBool()]) from rpython.rtyper.test.test_llinterp import interpret, gengraph diff --git a/rpython/rtyper/test/test_rvirtualizable.py b/rpython/rtyper/test/test_rvirtualizable.py --- a/rpython/rtyper/test/test_rvirtualizable.py +++ b/rpython/rtyper/test/test_rvirtualizable.py @@ -1,4 +1,5 @@ import py +from rpython.rtyper.llannotation import SomePtr from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.rtyper.test.tool import BaseRtypingTest from rpython.rtyper.rvirtualizable import replace_force_virtualizable_with_call @@ -153,7 +154,7 @@ raise ValueError annhelper = MixLevelHelperAnnotator(rtyper) if self.type_system == 'lltype': - s_vinst = annmodel.SomePtr(v_inst_ll_type) + s_vinst = SomePtr(v_inst_ll_type) else: s_vinst = annmodel.SomeOOInstance(v_inst_ll_type) funcptr = annhelper.delayedfunction(mycall, [s_vinst], annmodel.s_None) From noreply at buildbot.pypy.org Tue Jan 28 22:14:07 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 28 Jan 2014 22:14:07 +0100 (CET) Subject: [pypy-commit] pypy py3k: kill Message-ID: <20140128211407.39EEC1D23CF@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68982:9d12dac12c3a Date: 2014-01-28 12:43 -0800 http://bitbucket.org/pypy/pypy/changeset/9d12dac12c3a/ Log: kill diff --git a/pypy/module/pyexpat/test/test_parser.py b/pypy/module/pyexpat/test/test_parser.py --- a/pypy/module/pyexpat/test/test_parser.py +++ b/pypy/module/pyexpat/test/test_parser.py @@ -183,7 +183,7 @@ assert fake_reader.read_count == 4 class AppTestPyexpat2: - spaceconfig = dict(usemodules=['_ffi', '_rawffi', 'pyexpat', 'itertools', + spaceconfig = dict(usemodules=['_rawffi', 'pyexpat', 'itertools', '_socket', 'rctime', 'struct', 'binascii']) def test_django_bug(self): From noreply at buildbot.pypy.org Tue Jan 28 22:14:08 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 28 Jan 2014 22:14:08 +0100 (CET) Subject: [pypy-commit] pypy py3k: 2to3 Message-ID: <20140128211408.64BE21D23CF@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68983:ffa7cbdd7233 Date: 2014-01-28 12:48 -0800 http://bitbucket.org/pypy/pypy/changeset/ffa7cbdd7233/ Log: 2to3 diff --git a/pypy/interpreter/pyparser/test/test_parsestring.py b/pypy/interpreter/pyparser/test/test_parsestring.py --- a/pypy/interpreter/pyparser/test/test_parsestring.py +++ b/pypy/interpreter/pyparser/test/test_parsestring.py @@ -109,7 +109,7 @@ def test_wide_unicode_in_source(self): if sys.maxunicode == 65535: py.test.skip("requires a wide-unicode host") - self.parse_and_compare('u"\xf0\x9f\x92\x8b"', + self.parse_and_compare('"\xf0\x9f\x92\x8b"', unichr(0x1f48b), encoding='utf-8') diff --git a/pypy/module/__pypy__/test/test_special.py b/pypy/module/__pypy__/test/test_special.py --- a/pypy/module/__pypy__/test/test_special.py +++ b/pypy/module/__pypy__/test/test_special.py @@ -80,9 +80,9 @@ l = [1, 2, 3] assert list_strategy(l) == "int" + l = [b"a", b"b", b"c"] + assert list_strategy(l) == "bytes" l = ["a", "b", "c"] - assert list_strategy(l) == "bytes" - l = [u"a", u"b", u"c"] assert list_strategy(l) == "unicode" l = [1.1, 2.2, 3.3] assert list_strategy(l) == "float" From noreply at buildbot.pypy.org Tue Jan 28 22:21:40 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 28 Jan 2014 22:21:40 +0100 (CET) Subject: [pypy-commit] pypy precompiled-headers: implement test, so far without actually using precompiled headers Message-ID: <20140128212140.E88721C015D@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: precompiled-headers Changeset: r68984:c5404203c0c7 Date: 2014-01-28 23:16 +0200 http://bitbucket.org/pypy/pypy/changeset/c5404203c0c7/ Log: implement test, so far without actually using precompiled headers diff --git a/rpython/translator/platform/test/test_platform.py b/rpython/translator/platform/test/test_platform.py --- a/rpython/translator/platform/test/test_platform.py +++ b/rpython/translator/platform/test/test_platform.py @@ -87,11 +87,48 @@ self.check_res(res, '%d\n' %sum(range(900))) def test_precompiled_headers(self): + tmpdir = udir.join('precompiled_headers').ensure(dir=1) # Create an eci that should not use precompiled headers + eci = ExternalCompilationInfo(include_dirs=[tmpdir]) + main_c = tmpdir.join('main_no_pch.c') + eci.separate_module_files = [main_c] + ncfiles = 10 + nprecompiled_headers = 4 + txt = '' + for i in range(ncfiles): + txt += "int func%03d();\n" % i + txt += "\nint main(int argc, char * argv[])\n" + txt += "{\n int i=0;\n" + for i in range(ncfiles): + txt += " i += func%03d();\n" % i + txt += ' printf("%d\\n", i);\n' + txt += " return 0;\n};\n" + main_c.write(txt) + # Create some large headers with dummy functions to be precompiled + cfiles_precompiled_headers = [] + for i in range(nprecompiled_headers): + pch_name =tmpdir.join('pcheader%03d.h' % i) + txt = '' + for j in range(1200): + txt += "int pcfunc%03d_%03d();\n" %(i, j) + pch_name.write(txt) + cfiles_precompiled_headers.append(pch_name) # Create some cfiles with headers we want precompiled - # Call gen_makefile(cfiles, eci, cfiles_precompiled_headers=[list, of, headers]) - # Make sure it all works - pass + cfiles = [] + for i in range(ncfiles): + c_name =tmpdir.join('implement%03d.c' % i) + txt = '' + for pch_name in cfiles_precompiled_headers: + txt += '#include "%s"\n' % pch_name + txt += "int func%03d(){ return %d;};\n" % (i, i) + c_name.write(txt) + cfiles.append(c_name) + mk = self.platform.gen_makefile(cfiles, eci, path=udir, + cfile_precompilation=cfiles_precompiled_headers) + mk.write() + self.platform.execute_makefile(mk) + res = self.platform.execute(mk.exe_name) + self.check_res(res, '%d\n' %sum(range(ncfiles))) def test_nice_errors(self): cfile = udir.join('test_nice_errors.c') From noreply at buildbot.pypy.org Wed Jan 29 02:10:43 2014 From: noreply at buildbot.pypy.org (wlav) Date: Wed, 29 Jan 2014 02:10:43 +0100 (CET) Subject: [pypy-commit] pypy reflex-support: far from functional, this is a start for the cling backend Message-ID: <20140129011043.0A9461C0C34@cobra.cs.uni-duesseldorf.de> Author: Wim Lavrijsen Branch: reflex-support Changeset: r68985:8f1d5735b2a5 Date: 2014-01-28 17:09 -0800 http://bitbucket.org/pypy/pypy/changeset/8f1d5735b2a5/ Log: far from functional, this is a start for the cling backend diff --git a/pypy/module/cppyy/capi/cling_capi.py b/pypy/module/cppyy/capi/cling_capi.py new file mode 100644 --- /dev/null +++ b/pypy/module/cppyy/capi/cling_capi.py @@ -0,0 +1,69 @@ +import py, os + +from rpython.translator.tool.cbuild import ExternalCompilationInfo +from rpython.rtyper.lltypesystem import rffi +from rpython.rlib import libffi, rdynload + +__all__ = ['identify', 'std_string_name', 'eci', 'c_load_dictionary'] + +pkgpath = py.path.local(__file__).dirpath().join(os.pardir) +srcpath = pkgpath.join("src") +incpath = pkgpath.join("include") + +import commands +(config_stat, incdir) = commands.getstatusoutput("root-config --incdir") + +if os.environ.get("ROOTSYS"): + if config_stat != 0: # presumably Reflex-only + rootincpath = [os.path.join(os.environ["ROOTSYS"], "interpreter/cling/include"), + os.path.join(os.environ["ROOTSYS"], "interpreter/llvm/inst/include")] + rootlibpath = [os.path.join(os.environ["ROOTSYS"], "lib64"), os.path.join(os.environ["ROOTSYS"], "lib")] + else: + rootincpath = [incdir] + rootlibpath = commands.getoutput("root-config --libdir").split() +else: + if config_stat == 0: + rootincpath = [incdir] + rootlibpath = commands.getoutput("root-config --libdir").split() + else: + rootincpath = [] + rootlibpath = [] + +def identify(): + return 'Cling' + +ts_reflect = False +ts_call = 'auto' +ts_memory = 'auto' +ts_helper = 'auto' + +std_string_name = 'std::basic_string' + +eci = ExternalCompilationInfo( + separate_module_files=[srcpath.join("clingcwrapper.cxx")], + include_dirs=[incpath] + rootincpath, + includes=["clingcwrapper.h"], + library_dirs=rootlibpath, + libraries=["Cling"], + compile_extra=["-fno-strict-aliasing"], + use_cpp_linker=True, +) + +_c_load_dictionary = rffi.llexternal( + "cppyy_load_dictionary", + [rffi.CCHARP], rdynload.DLLHANDLE, + releasegil=False, + compilation_info=eci) + +def c_load_dictionary(name): + pch = _c_load_dictionary(name) + return pch + + +# Cling-specific pythonizations +def register_pythonizations(space): + "NOT_RPYTHON" + pass + +def pythonize(space, name, w_pycppclass): + pass diff --git a/pypy/module/cppyy/include/clingcwrapper.h b/pypy/module/cppyy/include/clingcwrapper.h new file mode 100644 --- /dev/null +++ b/pypy/module/cppyy/include/clingcwrapper.h @@ -0,0 +1,37 @@ +#ifndef CPPYY_CLINGCWRAPPER +#define CPPYY_CLINGCWRAPPER + +#include "capi.h" + +#ifdef __cplusplus +extern "C" { +#endif // ifdef __cplusplus + + /* misc helpers */ + void* cppyy_load_dictionary(const char* lib_name); + +#ifdef __cplusplus +} +#endif // ifdef __cplusplus + +// TODO: pick up from llvm-config --cxxflags +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif + +#ifndef __STDC_CONSTANT_MACROS +#define __STDC_CONSTANT_MACROS +#endif + +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif + +#ifndef __STDC_LIMIT_MACROS +#define __STDC_LIMIT_MACROS +#endif + +// Wrapper callback: except this to become available from Cling directly +typedef void (*CPPYY_Cling_Wrapper_t)(void*, int, void**, void*); + +#endif // ifndef CPPYY_CLINGCWRAPPER diff --git a/pypy/module/cppyy/src/clingcwrapper.cxx b/pypy/module/cppyy/src/clingcwrapper.cxx new file mode 100644 --- /dev/null +++ b/pypy/module/cppyy/src/clingcwrapper.cxx @@ -0,0 +1,1810 @@ +#include "cppyy.h" +#include "clingcwrapper.h" + +/************************************************************************* + * Copyright (C) 1995-2014, the ROOT team. * + * LICENSE: LGPLv2.1; see http://root.cern.ch/drupal/content/license * + * CONTRIBUTORS: see http://root.cern.ch/drupal/content/contributors * + *************************************************************************/ + +#include + +#include "clang/AST/ASTContext.h" +#include "clang/AST/Decl.h" +#include "clang/AST/DeclBase.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/PrettyPrinter.h" +#include "clang/AST/Type.h" +#include "clang/Frontend/CompilerInstance.h" +#include "clang/Sema/Sema.h" + +#include "cling/Interpreter/DynamicLibraryManager.h" +#include "cling/Interpreter/Interpreter.h" +#include "cling/Interpreter/LookupHelper.h" +#include "cling/Interpreter/StoredValueRef.h" +#include "cling/MetaProcessor/MetaProcessor.h" + +#include "llvm/ADT/SmallVector.h" +#include "llvm/ExecutionEngine/GenericValue.h" +#include "llvm/Support/raw_ostream.h" + +#include +#include +#include +#include +#include + +#include +#include +#include + +using namespace clang; + + +/* cling initialization --------------------------------------------------- */ +namespace { + +cling::Interpreter* gCppyy_Cling; +cling::MetaProcessor* gCppyy_MetaProcessor; + +struct Cppyy_InitCling { // TODO: check whether ROOT/meta's TCling is linked in + Cppyy_InitCling() { + std::vector cling_args_storage; + cling_args_storage.push_back("cling4cppyy"); + + // TODO: get this from env + cling_args_storage.push_back("-I/home/wlavrijsen/rootdev/root/etc"); + + std::vector interp_args; + for (std::vector::const_iterator iarg = cling_args_storage.begin(); + iarg != cling_args_storage.end(); ++iarg) + interp_args.push_back(iarg->c_str()); + + // TODO: get this from env + const char* llvm_resource_dir = "/home/wlavrijsen/rootdev/root/etc/cling"; + gCppyy_Cling = new cling::Interpreter( + interp_args.size(), &(interp_args[0]), llvm_resource_dir); + + // fInterpreter->installLazyFunctionCreator(llvmLazyFunctionCreator); + + { + // R__LOCKGUARD(gInterpreterMutex); + gCppyy_Cling->AddIncludePath("/home/wlavrijsen/rootdev/root/etc/cling"); + gCppyy_Cling->AddIncludePath("."); + } + + // don't check whether modules' files exist. + gCppyy_Cling->getCI()->getPreprocessorOpts().DisablePCHValidation = true; + + // Use a stream that doesn't close its file descriptor. + static llvm::raw_fd_ostream fMPOuts (STDOUT_FILENO, /* ShouldClose */ false); + gCppyy_MetaProcessor = new cling::MetaProcessor(*gCppyy_Cling, fMPOuts); + + gCppyy_Cling->enableDynamicLookup(); + } +} _init; + +typedef std::map NamedHandles_t; +static NamedHandles_t s_named; + +struct SimpleScope { + std::vector m_methods; + std::vector m_data; +}; + +typedef std::map Scopes_t; +static Scopes_t s_scopes; + +typedef std::map Wrappers_t; +static Wrappers_t s_wrappers; + +} // unnamed namespace + + +/* local helpers --------------------------------------------------------- */ +static inline void print_error(const std::string& where, const std::string& what) { + std::cerr << where << ": " << what << std::endl; +} + +static inline char* cppstring_to_cstring(const std::string& name) { + char* name_char = (char*)malloc(name.size() + 1); + strcpy(name_char, name.c_str()); + return name_char; +} + +static inline SimpleScope* scope_from_handle(cppyy_type_t handle) { + return s_scopes[(cppyy_scope_t)handle]; +} + +static inline std::string qualtype_to_string(const QualType& qt, const ASTContext& atx) { + std::string result; + + PrintingPolicy policy(atx.getPrintingPolicy()); + policy.SuppressTagKeyword = true; // no class or struct keyword + policy.SuppressScope = true; // force scope from a clang::ElaboratedType + policy.AnonymousTagLocations = false; // no file name + line number for anonymous types + // The scope suppression is required for getting rid of the anonymous part of the name + // of a class defined in an anonymous namespace. + + qt.getAsStringInternal(result, policy); + return result; +} + +static inline std::vector build_args(int nargs, void* args) { + std::vector arguments; + arguments.reserve(nargs); + for (int i = 0; i < nargs; ++i) { + char tc = ((CPPYY_G__value*)args)[i].type; + if (tc != 'a' && tc != 'o') + arguments.push_back(&((CPPYY_G__value*)args)[i]); + else + arguments.push_back((void*)(*(long*)&((CPPYY_G__value*)args)[i])); + } + return arguments; +} + + +/* name to opaque C++ scope representation -------------------------------- */ +int cppyy_num_scopes(cppyy_scope_t handle) { + return 0; +} + +char* cppyy_resolve_name(const char* cppitem_name) { + std::cout << " RESOLVING: " << cppitem_name << std::endl; + return cppstring_to_cstring(cppitem_name); +} + +cppyy_scope_t cppyy_get_scope(const char* scope_name) { + const cling::LookupHelper& lh = gCppyy_Cling->getLookupHelper(); + const Type* type = 0; + const Decl* decl = lh.findScope(scope_name, &type, /* intantiateTemplate= */ true); + if (!decl) { + //std::string buf = TClassEdit::InsertStd(name); + //decl = lh.findScope(buf, &type, /* intantiateTemplate= */ true); + } + if (!decl && type) { + const TagType* tagtype = type->getAs(); + if (tagtype) { + decl = tagtype->getDecl(); + } + } + + std::cout << "FOR: " << scope_name << " RECEIVED: " << type << " AND: " << decl << std::endl; + if (decl) { + DeclContext* dc = llvm::cast(const_cast(decl)); + SimpleScope* s = new SimpleScope; + for (DeclContext::decl_iterator idecl = dc->decls_begin(); *idecl; ++idecl) { + if (FunctionDecl* m = llvm::dyn_cast_or_null(*idecl)) + s->m_methods.push_back(m); + else if (FieldDecl* d = llvm::dyn_cast_or_null(*idecl)) + s->m_data.push_back(d); + } + s_scopes[(cppyy_scope_t)decl] = s; + } + + return (cppyy_scope_t)decl; // lookup failure return 0 (== error) +} + + +/* method/function dispatching -------------------------------------------- */ + +// TODO: expect the below to live in libCling.so +static CPPYY_Cling_Wrapper_t make_wrapper(const FunctionDecl* fdecl); +static void exec_with_valref_return(void* address, cling::StoredValueRef* ret, const FunctionDecl*); +static long long sv_to_long_long(const cling::StoredValueRef& svref); +// -- TODO: expect the above to live in libCling.so + + +template +static inline T cppyy_call_T(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + if (s_wrappers.find(method) == s_wrappers.end()) { + make_wrapper((FunctionDecl*)method); + } + cling::StoredValueRef ret; + // std::vector arguments = build_args(nargs, args); + // CPPYY_Cling_Wrapper_t cb = (CPPYY_Cling_Wrapper_t)method; + exec_with_valref_return((void*)self, &ret, (FunctionDecl*)method); + // (*cb)((void*)self, nargs, const_cast(arguments.data()), ret); + return static_cast(sv_to_long_long(ret)); +} + + + +int cppyy_call_i(cppyy_method_t method, cppyy_object_t self, int nargs, void* args) { + return cppyy_call_T(method, self, nargs, args); +} + + + +cppyy_methptrgetter_t cppyy_get_methptr_getter(cppyy_type_t /* handle */, cppyy_index_t /* method_index */) { + return (cppyy_methptrgetter_t)0; +} + + +/* handling of function argument buffer ----------------------------------- */ +void* cppyy_allocate_function_args(size_t nargs) { + CPPYY_G__value* args = (CPPYY_G__value*)malloc(nargs*sizeof(CPPYY_G__value)); + for (size_t i = 0; i < nargs; ++i) + args[i].type = 'l'; + return (void*)args; +} + +void cppyy_deallocate_function_args(void* args) { + free(args); +} + +size_t cppyy_function_arg_sizeof() { + return sizeof(CPPYY_G__value); +} + +size_t cppyy_function_arg_typeoffset() { + return offsetof(CPPYY_G__value, type); +} + + +/* scope reflection information ------------------------------------------- */ +int cppyy_is_namespace(cppyy_scope_t /* handle */) { + return 0; +} + +int cppyy_is_enum(const char* /* type_name */) { + return 0; +} + + +/* class reflection information ------------------------------------------- */ +char* cppyy_final_name(cppyy_type_t handle) { + for (NamedHandles_t::iterator isp = s_named.begin(); isp != s_named.end(); ++isp) { + if (isp->second == (cppyy_scope_t)handle) + return cppstring_to_cstring(isp->first); + } + return cppstring_to_cstring(""); +} + +char* cppyy_scoped_final_name(cppyy_type_t handle) { + return cppyy_final_name(handle); +} + +int cppyy_has_complex_hierarchy(cppyy_type_t /* handle */) { + return 1; +} + + +/* method/function reflection information --------------------------------- */ +int cppyy_num_methods(cppyy_scope_t handle) { + SimpleScope* s = scope_from_handle(handle); + if (!s) return 0; + return s->m_methods.size(); +} + +cppyy_index_t cppyy_method_index_at(cppyy_scope_t /* scope */, int imeth) { + return (cppyy_index_t)imeth; +} + +char* cppyy_method_name(cppyy_scope_t handle, cppyy_index_t method_index) { + SimpleScope* s = scope_from_handle(handle); + if (!s) return cppstring_to_cstring(""); + FunctionDecl* meth = s->m_methods.at(method_index); + std::cout << " METHOD NAME: " << meth->getDeclName().getAsString() << std::endl; + return cppstring_to_cstring(meth->getDeclName().getAsString()); +} + +char* cppyy_method_result_type(cppyy_scope_t handle, cppyy_index_t method_index) { + SimpleScope* s = scope_from_handle(handle); + if (!s) return cppstring_to_cstring(""); + FunctionDecl* meth = s->m_methods.at(method_index); + const std::string& ret_type = + qualtype_to_string(meth->getCallResultType(), meth->getASTContext()); + std::cout << " -> RET TYPE: " << ret_type << std::endl; + return cppstring_to_cstring(ret_type); +} + +int cppyy_method_num_args(cppyy_scope_t /* handle */, cppyy_index_t /* method_index */) { + return 1; +} + +int cppyy_method_req_args(cppyy_scope_t handle, cppyy_index_t method_index) { + return cppyy_method_num_args(handle, method_index); +} + +char* cppyy_method_arg_type(cppyy_scope_t /* handle */, cppyy_index_t /* method_index */, int /* arg_index */) { + return cppstring_to_cstring("double"); +} + +char* cppyy_method_arg_default(cppyy_scope_t handle, cppyy_index_t method_index, int arg_index) { + return cppstring_to_cstring(""); +} + +char* cppyy_method_signature(cppyy_scope_t /* handle */, cppyy_index_t /* method_index */) { + return cppstring_to_cstring("double"); +} + +int cppyy_method_is_template(cppyy_scope_t /* handle */, cppyy_index_t /* method_index */) { + return 0; +} + +cppyy_method_t cppyy_get_method(cppyy_scope_t handle, cppyy_index_t method_index) { + SimpleScope* s = scope_from_handle(handle); + if (!s) return (cppyy_method_t)0; + return (cppyy_method_t)s->m_methods.at(method_index); +} + + +/* method properties ----------------------------------------------------- */ +int cppyy_is_constructor(cppyy_type_t /* handle */, cppyy_index_t /* method_index */) { + return 0; +} + +int cppyy_is_staticmethod(cppyy_type_t /* handle */, cppyy_index_t /* method_index */) { + return 1; +} + + +/* data member reflection information ------------------------------------- */ +int cppyy_num_datamembers(cppyy_scope_t /* handle */) { + return 0; +} + + +/* misc helpers ----------------------------------------------------------- */ +void cppyy_free(void* ptr) { + free(ptr); +} + + +void* cppyy_load_dictionary(const char* lib_name) { + // TODO: need to rethink this; for now it creates reflection info from + // .h while loading lib.so + + // Load a library file in cling's memory. + // if 'system' is true, the library is never unloaded. + // Return 0 on success, -1 on failure. + // R__LOCKGUARD2(gInterpreterMutex); + std::cout << " NOW LOADING: " << lib_name << std::endl; + + cling::StoredValueRef call_res; + cling::Interpreter::CompilationResult comp_res = cling::Interpreter::kSuccess; + std::ostringstream line; + line << "#include \"" << lib_name << ".h\""; + gCppyy_MetaProcessor->process(line.str().c_str(), comp_res, &call_res); + + std::string to_load = "lib"; + to_load += lib_name; + to_load += ".so"; + cling::DynamicLibraryManager::LoadLibResult res + = gCppyy_Cling->getDynamicLibraryManager()->loadLibrary(to_load, /* not unload */ true); + // if (res == cling::DynamicLibraryManager::kLoadLibSuccess) { + // UpdateListOfLoadedSharedLibraries(); + // } + switch (res) { + case cling::DynamicLibraryManager::kLoadLibSuccess: return (void*)1; + case cling::DynamicLibraryManager::kLoadLibExists: return (void*)2; + default: break; + }; + return (void*)1; +} + + +/* to-be libCling code taken from ROOT/meta ------------------------------- */ + +// TODO: expect the below to live in libCling.so + +template +T sv_to_long_long_u_or_not(const cling::StoredValueRef& svref) { + const cling::Value& valref = svref.get(); + QualType QT = valref.getClangType(); + if (QT.isNull()) { + print_error("sv_to_long_long_u_or_not", "null type!"); + return 0; + } + llvm::GenericValue gv = valref.getGV(); + if (QT->isMemberPointerType()) { + const MemberPointerType* MPT = + QT->getAs(); + if (MPT->isMemberDataPointer()) { + return (T) (ptrdiff_t) gv.PointerVal; + } + return (T) gv.PointerVal; + } + if (QT->isPointerType() || QT->isArrayType() || QT->isRecordType() || + QT->isReferenceType()) { + return (T) gv.PointerVal; + } + if (const EnumType* ET = llvm::dyn_cast(&*QT)) { + if (ET->getDecl()->getIntegerType()->hasSignedIntegerRepresentation()) + return (T) gv.IntVal.getSExtValue(); + else + return (T) gv.IntVal.getZExtValue(); + } + if (const BuiltinType* BT = llvm::dyn_cast(&*QT)) { + if (BT->isSignedInteger()) { + return gv.IntVal.getSExtValue(); + } else if (BT->isUnsignedInteger()) { + return (T) gv.IntVal.getZExtValue(); + } else { + switch (BT->getKind()) { + case BuiltinType::Float: + return (T) gv.FloatVal; + case BuiltinType::Double: + return (T) gv.DoubleVal; + case BuiltinType::LongDouble: + // FIXME: Implement this! + break; + case BuiltinType::NullPtr: + // C++11 nullptr + return 0; + default: break; + } + } + } + print_error("sv_to_long_long_u_or_not", "cannot handle this type!"); + QT->dump(); + return 0; +} + +static long long sv_to_long_long(const cling::StoredValueRef& svref) { + return sv_to_long_long_u_or_not(svref); +} + +static +unsigned long long sv_to_ulong_long(const cling::StoredValueRef& svref) { + return sv_to_long_long_u_or_not(svref); +} + + +namespace { + +class ValHolder { +public: + union { + long double ldbl; + double dbl; + float flt; + //__uint128_t ui128; + //__int128_t i128; + unsigned long long ull; + long long ll; + unsigned long ul; + long l; + unsigned int ui; + int i; + unsigned short us; + short s; + //char32_t c32; + //char16_t c16; + //unsigned wchar_t uwc; - non-standard + wchar_t wc; + unsigned char uc; + signed char sc; + char c; + bool b; + void* vp; + } u; +}; + +} // unnamed namespace + +static void exec(void* address, void* ret, const FunctionDecl* fdecl) { + std::vector vh_ary; + std::vector vp_ary; + + // + // Convert the arguments from cling::StoredValueRef to their + // actual type and store them in a holder for passing to the + // wrapper function by pointer to value. + // + unsigned num_params = fdecl->getNumParams(); + /* unsigned num_args = fArgVals.size(); + + if (num_args < fdecl->getMinRequiredArguments ()) { + Error("TClingCallFunc::exec", + "Not enough arguments provided for %s (%d instead of the minimum %d)", + fMethod->Name(ROOT::TMetaUtils::TNormalizedCtxt(fInterp->getLookupHelper())), + num_args,fdecl->getMinRequiredArguments ()); + return; + } + if (address == 0 && llvm::dyn_cast(fdecl) + && !(llvm::dyn_cast(fdecl))->isStatic() + && !llvm::dyn_cast(fdecl)) { + Error("TClingCallFunc::exec", + "The method %s is called without an object.", + fMethod->Name(ROOT::TMetaUtils::TNormalizedCtxt(fInterp->getLookupHelper()))); + return; + } + vh_ary.reserve(num_args); + vp_ary.reserve(num_args); + for (unsigned i = 0U; i < num_args; ++i) { + QualType Ty; + if (i < num_params) { + const ParmVarDecl* PVD = fdecl->getParamDecl(i); + Ty = PVD->getType(); + } + else { + Ty = fArgVals[i].get().getClangType(); + } + QualType QT = Ty.getCanonicalType(); + if (QT->isReferenceType()) { + ValHolder vh; + vh.u.vp = (void*) sv_to_ulong_long(fArgVals[i]); + vh_ary.push_back(vh); + vp_ary.push_back(&vh_ary.back()); + } + else if (QT->isMemberPointerType()) { + ValHolder vh; + vh.u.vp = (void*) sv_to_ulong_long(fArgVals[i]); + vh_ary.push_back(vh); + vp_ary.push_back(&vh_ary.back()); + } + else if (QT->isPointerType() || QT->isArrayType()) { + ValHolder vh; + vh.u.vp = (void*) sv_to_ulong_long(fArgVals[i]); + vh_ary.push_back(vh); + vp_ary.push_back(&vh_ary.back()); + } + else if (QT->isRecordType()) { + ValHolder vh; + vh.u.vp = (void*) sv_to_ulong_long(fArgVals[i]); + vh_ary.push_back(vh); + vp_ary.push_back(&vh_ary.back()); + } + else if (const EnumType* ET = llvm::dyn_cast(&*QT)) { + // Note: We may need to worry about the underlying type + // of the enum here. + (void) ET; + ValHolder vh; + vh.u.i = (int) sv_to_long_long(fArgVals[i]); + vh_ary.push_back(vh); + vp_ary.push_back(&vh_ary.back()); + } + else if (const BuiltinType* BT = llvm::dyn_cast(&*QT)) { + // + // WARNING!!! + // + // This switch is organized in order-of-declaration + // so that the produced assembly code is optimal. + // Do not reorder! + // + switch (BT->getKind()) { + // + // Builtin Types + // + case BuiltinType::Void: { + // void + print_error("exec", "invalid argument type (void)"); + return; + } + // + // Unsigned Types + // + case BuiltinType::Bool: { + // bool + ValHolder vh; + vh.u.b = (bool) sv_to_ulong_long(fArgVals[i]); + vh_ary.push_back(vh); + vp_ary.push_back(&vh_ary.back()); + break; + } + case BuiltinType::Char_U: { + // char on targets where it is unsigned + ValHolder vh; + vh.u.c = (char) sv_to_ulong_long(fArgVals[i]); + vh_ary.push_back(vh); + vp_ary.push_back(&vh_ary.back()); + break; + } + case BuiltinType::UChar: { + // unsigned char + ValHolder vh; + vh.u.uc = (unsigned char) sv_to_ulong_long(fArgVals[i]); + vh_ary.push_back(vh); + vp_ary.push_back(&vh_ary.back()); + break; + } + case BuiltinType::WChar_U: { + // wchar_t on targets where it is unsigned. + // The standard doesn't allow to specify signednedd of wchar_t + // thus this maps simply to wchar_t. + ValHolder vh; + vh.u.wc = (wchar_t) sv_to_ulong_long(fArgVals[i]); + vh_ary.push_back(vh); + vp_ary.push_back(&vh_ary.back()); + break; + } + case BuiltinType::Char16: + case BuiltinType::Char32: { + print_error("exec", "unsupported argument"); + QT->dump(); + return; + } + case BuiltinType::UShort: { + // unsigned short + ValHolder vh; + vh.u.us = (unsigned short) sv_to_ulong_long(fArgVals[i]); + vh_ary.push_back(vh); + vp_ary.push_back(&vh_ary.back()); + break; + } + case BuiltinType::UInt: { + // unsigned int + ValHolder vh; + vh.u.ui = (unsigned int) sv_to_ulong_long(fArgVals[i]); + vh_ary.push_back(vh); + vp_ary.push_back(&vh_ary.back()); + break; + } + case BuiltinType::ULong: { + // unsigned long + ValHolder vh; + vh.u.ul = (unsigned long) sv_to_ulong_long(fArgVals[i]); + vh_ary.push_back(vh); + vp_ary.push_back(&vh_ary.back()); + break; + } + case BuiltinType::ULongLong: { + // unsigned long long + ValHolder vh; + vh.u.ull = (unsigned long long) sv_to_ulong_long(fArgVals[i]); + vh_ary.push_back(vh); + vp_ary.push_back(&vh_ary.back()); + break; + } + case BuiltinType::UInt128: { + print_error("exec", "unsupported argument"); + QT->dump(); + return; + } + // + // Signed Types + // + case BuiltinType::Char_S: { + // char on targets where it is signed + ValHolder vh; + vh.u.c = (char) sv_to_long_long(fArgVals[i]); + vh_ary.push_back(vh); + vp_ary.push_back(&vh_ary.back()); + break; + } + case BuiltinType::SChar: { + // signed char + ValHolder vh; + vh.u.sc = (signed char) sv_to_long_long(fArgVals[i]); + vh_ary.push_back(vh); + vp_ary.push_back(&vh_ary.back()); + break; + } + case BuiltinType::WChar_S: { + // wchar_t on targets where it is signed. + // The standard doesn't allow to specify signednedd of wchar_t + // thus this maps simply to wchar_t. + ValHolder vh; + vh.u.wc = (wchar_t) sv_to_long_long(fArgVals[i]); + vh_ary.push_back(vh); + vp_ary.push_back(&vh_ary.back()); + break; + } + case BuiltinType::Short: { + // short + ValHolder vh; + vh.u.s = (short) sv_to_long_long(fArgVals[i]); + vh_ary.push_back(vh); + vp_ary.push_back(&vh_ary.back()); + break; + } + case BuiltinType::Int: { + // int + ValHolder vh; + vh.u.i = (int) sv_to_long_long(fArgVals[i]); + vh_ary.push_back(vh); + vp_ary.push_back(&vh_ary.back()); + break; + } + case BuiltinType::Long: { + // long + ValHolder vh; + vh.u.l = (long) sv_to_long_long(fArgVals[i]); + vh_ary.push_back(vh); + vp_ary.push_back(&vh_ary.back()); + break; + } + case BuiltinType::LongLong: { + // long long + ValHolder vh; + vh.u.ll = (long long) sv_to_long_long(fArgVals[i]); + vh_ary.push_back(vh); + vp_ary.push_back(&vh_ary.back()); + break; + } + case BuiltinType::Int128: + case BuiltinType::Half: { + // half in OpenCL, __fp16 in ARM NEON + print_error("exec", "unsupported argument"); + QT->dump(); + return; + } + case BuiltinType::Float: { + // float + ValHolder vh; + vh.u.flt = sv_to(fArgVals[i]); + vh_ary.push_back(vh); + vp_ary.push_back(&vh_ary.back()); + break; + } + case BuiltinType::Double: { + // double + ValHolder vh; + vh.u.dbl = sv_to(fArgVals[i]); + vh_ary.push_back(vh); + vp_ary.push_back(&vh_ary.back()); + break; + } + case BuiltinType::LongDouble: { + // long double + print_error("exec", "unsupported argument"); + QT->dump(); + return; + } + // + // Language-Specific Types + // + case BuiltinType::NullPtr: { + // C++11 nullptr + ValHolder vh; + vh.u.vp = (void*) fArgVals[i].get().getGV().PointerVal; + vh_ary.push_back(vh); + vp_ary.push_back(&vh_ary.back()); + break; + } + default: { + print_error("exec", "unsupported argument"); + QT->dump(); + return; + } + } + } + else { + print_error("exec", "invalid type (unrecognized)!"); + QT->dump(); + return; + } + }*/ + + CPPYY_Cling_Wrapper_t wrapper = s_wrappers[(cppyy_method_t)fdecl]; + (*wrapper)(address, (int)0/*num_args*/, (void**)vp_ary.data(), ret); +} + + +static void exec_with_valref_return(void* address, cling::StoredValueRef* ret, const FunctionDecl* fdecl) { + if (!ret) { + exec(address, 0, fdecl); + return; + } + std::cout << " USING DECL: " << fdecl << std::endl; + fdecl->dump(); + ASTContext& Context = fdecl->getASTContext(); + + if (const CXXConstructorDecl* CD = llvm::dyn_cast(fdecl)) { + const TypeDecl* TD = llvm::dyn_cast(CD->getDeclContext()); + QualType ClassTy(TD->getTypeForDecl(), 0); + QualType QT = Context.getLValueReferenceType(ClassTy); + llvm::GenericValue gv; + exec(address, &gv.PointerVal, fdecl); + *ret = cling::StoredValueRef::bitwiseCopy( + *gCppyy_Cling, cling::Value(gv, QT)); + return; + } + QualType QT = fdecl->getResultType().getCanonicalType(); + if (QT->isReferenceType()) { + llvm::GenericValue gv; + exec(address, &gv.PointerVal, fdecl); + *ret = cling::StoredValueRef::bitwiseCopy( + *gCppyy_Cling, cling::Value(gv, QT)); + return; + } + else if (QT->isMemberPointerType()) { + const MemberPointerType* MPT = + QT->getAs(); + if (MPT->isMemberDataPointer()) { + // A member data pointer is a actually a struct with one + // member of ptrdiff_t, the offset from the base of the object + // storage to the storage for the designated data member. + llvm::GenericValue gv; + exec(address, &gv.PointerVal, fdecl); + *ret = cling::StoredValueRef::bitwiseCopy( + *gCppyy_Cling, cling::Value(gv, QT)); + return; + } + // We are a function member pointer. + llvm::GenericValue gv; + exec(address, &gv.PointerVal, fdecl); + *ret = cling::StoredValueRef::bitwiseCopy( + *gCppyy_Cling, cling::Value(gv, QT)); + return; + } + else if (QT->isPointerType() || QT->isArrayType()) { + // Note: ArrayType is an illegal function return value type. + llvm::GenericValue gv; + exec(address, &gv.PointerVal, fdecl); + *ret = cling::StoredValueRef::bitwiseCopy( + *gCppyy_Cling, cling::Value(gv, QT)); + return; + } + else if (QT->isRecordType()) { + uint64_t size = Context.getTypeSizeInChars(QT).getQuantity(); + void* p = ::operator new(size); + exec(address, p, fdecl); + *ret = cling::StoredValueRef::bitwiseCopy( + *gCppyy_Cling, cling::Value(llvm::PTOGV(p), QT)); + return; + } + else if (const EnumType* ET = llvm::dyn_cast(&*QT)) { + // Note: We may need to worry about the underlying type + // of the enum here. + (void) ET; + uint64_t numBits = Context.getTypeSize(QT); + int retVal = 0; + exec(address, &retVal, fdecl); + llvm::GenericValue gv; + gv.IntVal = llvm::APInt(numBits, (uint64_t)retVal, true /*isSigned*/); + *ret = cling::StoredValueRef::bitwiseCopy( + *gCppyy_Cling, cling::Value(gv, QT)); + return; + } + else if (const BuiltinType* BT = llvm::dyn_cast(&*QT)) { + llvm::GenericValue gv; + + uint64_t numBits = Context.getTypeSize(QT); + switch (BT->getKind()) { + // + // builtin types + // + case BuiltinType::Void: { + exec(address, 0, fdecl); + return; + } + // + // unsigned integral types + // + case BuiltinType::Bool: { + bool retVal = false; + exec(address, &retVal, fdecl); + gv.IntVal = llvm::APInt(numBits, (uint64_t)retVal, false /*isSigned*/); + break; + } + case BuiltinType::Char_U: { + // char on targets where it is unsigned + char retVal = '\0'; + exec(address, &retVal, fdecl); + gv.IntVal = llvm::APInt(numBits, (uint64_t) retVal, false /*isSigned*/); + break; + } + case BuiltinType::UChar: { + unsigned char retVal = '\0'; + exec(address, &retVal, fdecl); + gv.IntVal = llvm::APInt(numBits, (uint64_t) retVal, false /*isSigned*/); + break; + } + case BuiltinType::WChar_U: { + // wchar_t on targets where it is unsigned. + // The standard doesn't allow to specify signedness of wchar_t + // thus this maps simply to wchar_t. + wchar_t retVal = L'\0'; + exec(address, &retVal, fdecl); + gv.IntVal = llvm::APInt(numBits, (uint64_t) retVal, false /*isSigned*/); + break; + } + case BuiltinType::UShort: { + unsigned short retVal = 0; + exec(address, &retVal, fdecl); + gv.IntVal = llvm::APInt(numBits, (uint64_t) retVal, false /*isSigned*/); + break; + } + case BuiltinType::UInt: { + unsigned int retVal = 0; + exec(address, &retVal, fdecl); + gv.IntVal = llvm::APInt(numBits, (uint64_t) retVal, false /*isSigned*/); + break; + } + case BuiltinType::ULong: { + // unsigned long + unsigned long retVal = 0; + exec(address, &retVal, fdecl); + gv.IntVal = llvm::APInt(numBits, (uint64_t) retVal, false /*isSigned*/); + break; + } + case BuiltinType::ULongLong: { + // unsigned long long + unsigned long long retVal = 0; + exec(address, &retVal, fdecl); + gv.IntVal = llvm::APInt(numBits, (uint64_t) retVal, false /*isSigned*/); + break; + } + // + // signed integral types + // + case BuiltinType::Char_S: { + // char on targets where it is signed + char retVal = '\0'; + exec(address, &retVal, fdecl); + gv.IntVal = llvm::APInt(numBits, (uint64_t) retVal, true /*isSigned*/); + break; + } + case BuiltinType::SChar: { + // signed char + signed char retVal = '\0'; + exec(address, &retVal, fdecl); + gv.IntVal = llvm::APInt(numBits, (uint64_t) retVal, true /*isSigned*/); + break; + } + case BuiltinType::WChar_S: { + // wchar_t on targets where it is signed. + // The standard doesn't allow to specify signednedd of wchar_t + // thus this maps simply to wchar_t. + wchar_t retVal = L'\0'; + exec(address, &retVal, fdecl); + gv.IntVal = llvm::APInt(numBits, (uint64_t) retVal, true /*isSigned*/); + break; + } + case BuiltinType::Short: { + // short + short retVal = 0; + exec(address, &retVal, fdecl); + gv.IntVal = llvm::APInt(numBits, (uint64_t) retVal, true /*isSigned*/); + break; + } + case BuiltinType::Int: { + // int + int retVal = 0; + exec(address, &retVal, fdecl); + gv.IntVal = llvm::APInt(numBits, (uint64_t) retVal, true /*isSigned*/); + break; + } + case BuiltinType::Long: { + long retVal = 0; + exec(address, &retVal, fdecl); + gv.IntVal = llvm::APInt(numBits, (uint64_t) retVal, true /*isSigned*/); + break; + } + case BuiltinType::LongLong: { + long long retVal = 0; + exec(address, &retVal, fdecl); + gv.IntVal = llvm::APInt(numBits, (uint64_t) retVal, true /*isSigned*/); + break; + } + case BuiltinType::Float: { + exec(address, &gv.FloatVal, fdecl); + break; + } + case BuiltinType::Double: { + exec(address, &gv.DoubleVal, fdecl); + break; + } + case BuiltinType::Char16: + case BuiltinType::Char32: + case BuiltinType::Half: + case BuiltinType::Int128: + case BuiltinType::UInt128: + case BuiltinType::LongDouble: + case BuiltinType::NullPtr: + default: { + print_error("exec_with_valref", "unsupported return type"); + return; + } + } + + *ret = cling::StoredValueRef::bitwiseCopy(*gCppyy_Cling, cling::Value(gv, QT)); + return; + } + + std::cout << "exec_with_valref: some error occurred ... " << std::endl; +} + + +static const std::string indent_string(" "); +static unsigned long long wrapper_serial = 0LL; + +void collect_type_info(QualType& QT, std::ostringstream& typedefbuf, + std::ostringstream& callbuf, std::string& type_name, bool& isReference, + int& ptrCnt, int indent_level, bool forArgument, const FunctionDecl* fdecl) { + // + // Collect information about type type of a function parameter + // needed for building the wrapper function. + // + PrintingPolicy Policy(fdecl->getASTContext().getPrintingPolicy()); + isReference = false; + ptrCnt = 0; + if (QT->isRecordType() && forArgument) { + // Note: We treat object of class type as if it were a reference + // type because we hold it by pointer. + isReference = true; + QT.getAsStringInternal(type_name, Policy); + // And drop the default arguments if any (at least until the clang + // type printer properly handle template paratemeter that are enumerator). + //R__DropDefaultArg(type_name); + return; + } + while (1) { + if (QT->isArrayType()) { + ++ptrCnt; + QT = cast(QT)->getElementType(); + continue; + } + else if (QT->isFunctionPointerType()) { + std::string fp_typedef_name; + { + std::ostringstream nm; + nm << "FP" << wrapper_serial++; + type_name = nm.str(); + llvm::raw_string_ostream OS(fp_typedef_name); + QT.print(OS, Policy, type_name); + OS.flush(); + } + for (int i = 0; i < indent_level; ++i) { + typedefbuf << indent_string; + } + typedefbuf << "typedef " << fp_typedef_name << ";\n"; + break; + } + else if (QT->isMemberPointerType()) { + std::string mp_typedef_name; + { + std::ostringstream nm; + nm << "MP" << wrapper_serial++; + type_name = nm.str(); + llvm::raw_string_ostream OS(mp_typedef_name); + QT.print(OS, Policy, type_name); + OS.flush(); + } + for (int i = 0; i < indent_level; ++i) { + typedefbuf << indent_string; + } + typedefbuf << "typedef " << mp_typedef_name << ";\n"; + break; + } + else if (QT->isPointerType()) { + ++ptrCnt; + QT = cast(QT)->getPointeeType(); + continue; + } + else if (QT->isReferenceType()) { + isReference = true; + QT = cast(QT)->getPointeeType(); + continue; + } + QT.getAsStringInternal(type_name, Policy); + break; + } + // And drop the default arguments if any (at least until the clang + // type printer properly handle template paratemeter that are enumerator). + // R__DropDefaultArg(type_name); +} + +void make_narg_ctor(const unsigned N, std::ostringstream& typedefbuf, + std::ostringstream& callbuf, const std::string& class_name, + int indent_level, const FunctionDecl* fdecl) { + // Make a code string that follows this pattern: + // + // new ClassName(args...) + // + callbuf << "new " << class_name << "("; + for (unsigned i = 0U; i < N; ++i) { + const ParmVarDecl* PVD = fdecl->getParamDecl(i); + QualType Ty = PVD->getType(); + QualType QT = Ty.getCanonicalType(); + std::string type_name; + bool isReference = false; + int ptrCnt = 0; + collect_type_info(QT, typedefbuf, callbuf, type_name, + isReference, ptrCnt, indent_level, true, fdecl); + if (i) { + callbuf << ','; + if (i % 2) { + callbuf << ' '; + } + else { + callbuf << "\n"; + for (int j = 0; j <= indent_level; ++j) { + callbuf << indent_string; + } + } + } + if (isReference) { + std::string stars; + for (int j = 0; j < ptrCnt; ++j) { + stars.push_back('*'); + } + callbuf << "**(" << type_name.c_str() << stars << "**)args[" + << i << "]"; + } + else if (ptrCnt) { + std::string stars; + for (int j = 0; j < ptrCnt; ++j) { + stars.push_back('*'); + } + callbuf << "*(" << type_name.c_str() << stars << "*)args[" + << i << "]"; + } + else { + callbuf << "*(" << type_name.c_str() << "*)args[" << i << "]"; + } + } + callbuf << ")"; +} + +void make_narg_call(const unsigned N, std::ostringstream& typedefbuf, + std::ostringstream& callbuf, const std::string& class_name,int indent_level, const FunctionDecl* fdecl) { + // + // Make a code string that follows this pattern: + // + // ((*)obj)->(*(*)args[i], ...) + // + if (const CXXMethodDecl* MD = llvm::dyn_cast(fdecl)) { + // This is a class, struct, or union member. + if (MD->isConst()) + callbuf << "((const " << class_name << "*)obj)->"; + else + callbuf << "((" << class_name << "*)obj)->"; + } + else if (const NamedDecl* ND = llvm::dyn_cast(fdecl->getDeclContext())) { + // This is a namespace member. + (void) ND; + callbuf << class_name << "::"; + } + // callbuf << fMethod->Name() << "("; + { + std::string name; + { + llvm::raw_string_ostream stream(name); + fdecl->getNameForDiagnostic(stream, fdecl->getASTContext().getPrintingPolicy(), /*Qualified=*/false); + } + callbuf << name << "("; + } + for (unsigned i = 0U; i < N; ++i) { + const ParmVarDecl* PVD = fdecl->getParamDecl(i); + QualType Ty = PVD->getType(); + QualType QT = Ty.getCanonicalType(); + std::string type_name; + bool isReference = false; + int ptrCnt = 0; + collect_type_info(QT, typedefbuf, callbuf, type_name, + isReference, ptrCnt, indent_level, true, fdecl); + if (i) { + callbuf << ','; + if (i % 2) { + callbuf << ' '; + } + else { + callbuf << "\n"; + for (int j = 0; j <= indent_level; ++j) { + callbuf << indent_string; + } + } + } + if (isReference) { + std::string stars; + for (int j = 0; j < ptrCnt; ++j) { + stars.push_back('*'); + } + callbuf << "**(" << type_name.c_str() << stars << "**)args[" + << i << "]"; + } + else if (ptrCnt) { + std::string stars; + for (int j = 0; j < ptrCnt; ++j) { + stars.push_back('*'); + } + callbuf << "*(" << type_name.c_str() << stars << "*)args[" + << i << "]"; + } + else { + callbuf << "*(" << type_name.c_str() << "*)args[" << i << "]"; + } + } + callbuf << ")"; +} + +void make_narg_ctor_with_return(const unsigned N, const std::string& class_name, + std::ostringstream& buf, int indent_level, const FunctionDecl* fdecl) { + // Make a code string that follows this pattern: + // + // if (ret) { + // (*(ClassName**)ret) = new ClassName(args...); + // } + // else { + // new ClassName(args...); + // } + // + for (int i = 0; i < indent_level; ++i) { + buf << indent_string; + } + buf << "if (ret) {\n"; + ++indent_level; + { + std::ostringstream typedefbuf; + std::ostringstream callbuf; + // + // Write the return value assignment part. + // + for (int i = 0; i < indent_level; ++i) { + callbuf << indent_string; + } + callbuf << "(*(" << class_name << "**)ret) = "; + // + // Write the actual new expression. + // + make_narg_ctor(N, typedefbuf, callbuf, class_name, indent_level, fdecl); + // + // End the new expression statement. + // + callbuf << ";\n"; + for (int i = 0; i < indent_level; ++i) { + callbuf << indent_string; + } + callbuf << "return;\n"; + // + // Output the whole new expression and return statement. + // + buf << typedefbuf.str() << callbuf.str(); + } + --indent_level; + for (int i = 0; i < indent_level; ++i) { + buf << indent_string; + } + buf << "}\n"; + for (int i = 0; i < indent_level; ++i) { + buf << indent_string; + } + buf << "else {\n"; + ++indent_level; + { + std::ostringstream typedefbuf; + std::ostringstream callbuf; + for (int i = 0; i < indent_level; ++i) { + callbuf << indent_string; + } + make_narg_ctor(N, typedefbuf, callbuf, class_name, indent_level, fdecl); + callbuf << ";\n"; + for (int i = 0; i < indent_level; ++i) { + callbuf << indent_string; + } + callbuf << "return;\n"; + buf << typedefbuf.str() << callbuf.str(); + } + --indent_level; + for (int i = 0; i < indent_level; ++i) { + buf << indent_string; + } + buf << "}\n"; +} + +void make_narg_call_with_return(const unsigned N, const std::string& class_name, + std::ostringstream& buf, int indent_level, const FunctionDecl* fdecl) { + // Make a code string that follows this pattern: + // + // if (ret) { + // new (ret) (return_type) ((class_name*)obj)->func(args...); + // } + // else { + // ((class_name*)obj)->func(args...); + // } + // + if (const CXXConstructorDecl* CD = dyn_cast(fdecl)) { + (void) CD; + make_narg_ctor_with_return(N, class_name, buf, indent_level, fdecl); + return; + } + QualType QT = fdecl->getResultType().getCanonicalType(); + if (QT->isVoidType()) { + std::ostringstream typedefbuf; + std::ostringstream callbuf; + for (int i = 0; i < indent_level; ++i) { + callbuf << indent_string; + } + make_narg_call(N, typedefbuf, callbuf, class_name, indent_level, fdecl); + callbuf << ";\n"; + for (int i = 0; i < indent_level; ++i) { + callbuf << indent_string; + } + callbuf << "return;\n"; + buf << typedefbuf.str() << callbuf.str(); + } + else { + for (int i = 0; i < indent_level; ++i) { + buf << indent_string; + } + buf << "if (ret) {\n"; + ++indent_level; + { + std::ostringstream typedefbuf; + std::ostringstream callbuf; + // + // Write the placement part of the placement new. + // + for (int i = 0; i < indent_level; ++i) { + callbuf << indent_string; + } + callbuf << "new (ret) "; + std::string type_name; + bool isReference = false; + int ptrCnt = 0; + collect_type_info(QT, typedefbuf, callbuf, type_name, + isReference, ptrCnt, indent_level, false, fdecl); + // + // Write the type part of the placement new. + // + if (isReference) { + std::string stars; + for (int j = 0; j < ptrCnt; ++j) { + stars.push_back('*'); + } + callbuf << "(" << type_name.c_str() << stars << "*) (&"; + } + else if (ptrCnt) { + std::string stars; + for (int j = 0; j < ptrCnt; ++j) { + stars.push_back('*'); + } + callbuf << "(" << type_name.c_str() << stars << ") ("; + } + else { + callbuf << "(" << type_name.c_str() << ") ("; + } + // + // Write the actual function call. + // + make_narg_call(N, typedefbuf, callbuf, class_name, indent_level, fdecl); + // + // End the placement new. + // + callbuf << ");\n"; + for (int i = 0; i < indent_level; ++i) { + callbuf << indent_string; + } + callbuf << "return;\n"; + // + // Output the whole placement new expression and return statement. + // + buf << typedefbuf.str() << callbuf.str(); + } + --indent_level; + for (int i = 0; i < indent_level; ++i) { + buf << indent_string; + } + buf << "}\n"; + for (int i = 0; i < indent_level; ++i) { + buf << indent_string; + } + buf << "else {\n"; + ++indent_level; + { + std::ostringstream typedefbuf; + std::ostringstream callbuf; + for (int i = 0; i < indent_level; ++i) { + callbuf << indent_string; + } + make_narg_call(N, typedefbuf, callbuf, class_name, indent_level, fdecl); + callbuf << ";\n"; + for (int i = 0; i < indent_level; ++i) { + callbuf << indent_string; + } + callbuf << "return;\n"; + buf << typedefbuf.str() << callbuf.str(); + } + --indent_level; + for (int i = 0; i < indent_level; ++i) { + buf << indent_string; + } + buf << "}\n"; + } +} + +static CPPYY_Cling_Wrapper_t make_wrapper(const FunctionDecl* fdecl) { + ASTContext& Context = fdecl->getASTContext(); + PrintingPolicy Policy(Context.getPrintingPolicy()); + // + // Get the class or namespace name. + // + std::string class_name; + // if (const TypeDecl* TD = llvm::dyn_cast(fdecl->getDeclContext())) { + // // This is a class, struct, or union member. + // QualType QT(TD->getTypeForDecl(), 0); + // ROOT::TMetaUtils::GetFullyQualifiedTypeName(class_name, QT, *gCppyy_Cling); + // // And drop the default arguments if any (at least until the clang + // // type printer properly handle template paratemeter that are enumerator). + // R__DropDefaultArg(class_name); + // } + // else + if (const NamedDecl* ND = llvm::dyn_cast(fdecl->getDeclContext())) { + // This is a namespace member. + llvm::raw_string_ostream stream(class_name); + ND->getNameForDiagnostic(stream, Policy, /*Qualified=*/true); + stream.flush(); + } + // + // Check to make sure that we can + // instantiate and codegen this function. + // + bool needInstantiation = false; + const FunctionDecl* Definition = 0; + if (!fdecl->isDefined(Definition)) { + FunctionDecl::TemplatedKind TK = fdecl->getTemplatedKind(); + switch (TK) { + case FunctionDecl::TK_NonTemplate: { + // Ordinary function, not a template specialization. + // Note: This might be ok, the body might be defined + // in a library, and all we have seen is the + // header file. + //print_error("make_wrapper", + // "cannot make wrapper for a function which is declared but not defined!"); + //return 0; + break; + } + case FunctionDecl::TK_FunctionTemplate: { + // This decl is actually a function template, + // not a function at all. + print_error("make_wrapper", "cannot make wrapper for a function template!"); + return 0; + } + case FunctionDecl::TK_MemberSpecialization: { + // This function is the result of instantiating an ordinary + // member function of a class template, or of instantiating + // an ordinary member function of a class member of a class + // template, or of specializing a member function template + // of a class template, or of specializing a member function + // template of a class member of a class template. + if (!fdecl->isTemplateInstantiation()) { + // We are either TSK_Undeclared or + // TSK_ExplicitSpecialization. + // Note: This might be ok, the body might be defined + // in a library, and all we have seen is the + // header file. + //print_error("make_wrapper", + // "cannot make wrapper for a function template explicit specialization" + // " which is declared but not defined!"); + //return 0; + break; + } + const FunctionDecl* Pattern = fdecl->getTemplateInstantiationPattern(); + if (!Pattern) { + print_error("make_wrapper", + "cannot make wrapper for a member function instantiation with no pattern!"); + return 0; + } + FunctionDecl::TemplatedKind PTK = Pattern->getTemplatedKind(); + TemplateSpecializationKind PTSK = + Pattern->getTemplateSpecializationKind(); + if ( + // The pattern is an ordinary member function. + (PTK == FunctionDecl::TK_NonTemplate) || + // The pattern is an explicit specialization, and + // so is not a template. + ((PTK != FunctionDecl::TK_FunctionTemplate) && + ((PTSK == TSK_Undeclared) || + (PTSK == TSK_ExplicitSpecialization))) + ) { + // Note: This might be ok, the body might be defined + // in a library, and all we have seen is the + // header file. + break; + } + else if (!Pattern->hasBody()) { + print_error("make_wrapper", + "cannot make wrapper for a member function instantiation with no body!"); + return 0; + } + if (fdecl->isImplicitlyInstantiable()) { + needInstantiation = true; + } + break; + } + case FunctionDecl::TK_FunctionTemplateSpecialization: { + // This function is the result of instantiating a function + // template or possibly an explicit specialization of a + // function template. Could be a namespace scope function or a + // member function. + if (!fdecl->isTemplateInstantiation()) { + // We are either TSK_Undeclared or + // TSK_ExplicitSpecialization. + // Note: This might be ok, the body might be defined + // in a library, and all we have seen is the + // header file. + //print_error("make_wrapper", + // "Cannot make wrapper for a function template " + // "explicit specialization which is declared but not defined!"); + //return 0; + break; + } + const FunctionDecl* Pattern = fdecl->getTemplateInstantiationPattern(); + if (!Pattern) { + print_error("make_wrapper", + "cannot make wrapper for a function template instantiation with no pattern!"); + return 0; + } + FunctionDecl::TemplatedKind PTK = Pattern->getTemplatedKind(); + TemplateSpecializationKind PTSK = + Pattern->getTemplateSpecializationKind(); + if ( + // The pattern is an ordinary member function. + (PTK == FunctionDecl::TK_NonTemplate) || + // The pattern is an explicit specialization, and + // so is not a template. + ((PTK != FunctionDecl::TK_FunctionTemplate) && + ((PTSK == TSK_Undeclared) || + (PTSK == TSK_ExplicitSpecialization))) + ) { + // Note: This might be ok, the body might be defined + // in a library, and all we have seen is the + // header file. + break; + } + if (!Pattern->hasBody()) { + print_error("make_wrapper", + "cannot make wrapper for a function template instantiation with no body!"); + return 0; + } + if (fdecl->isImplicitlyInstantiable()) { + needInstantiation = true; + } + break; + } + case FunctionDecl::TK_DependentFunctionTemplateSpecialization: { + // This function is the result of instantiating or + // specializing a member function of a class template, + // or a member function of a class member of a class template, + // or a member function template of a class template, or a + // member function template of a class member of a class + // template where at least some part of the function is + // dependent on a template argument. + if (!fdecl->isTemplateInstantiation()) { + // We are either TSK_Undeclared or + // TSK_ExplicitSpecialization. + // Note: This might be ok, the body might be defined + // in a library, and all we have seen is the + // header file. + //print_error("make_wrapper", + // "Cannot make wrapper for a dependent function template explicit specialization + // " which is declared but not defined!"); + //return 0; + break; + } + const FunctionDecl* Pattern = fdecl->getTemplateInstantiationPattern(); + if (!Pattern) { + print_error("make_wrapper", + "cannot make wrapper for a dependent function template instantiation with no pattern!"); + return 0; + } + FunctionDecl::TemplatedKind PTK = Pattern->getTemplatedKind(); + TemplateSpecializationKind PTSK = + Pattern->getTemplateSpecializationKind(); + if ( + // The pattern is an ordinary member function. + (PTK == FunctionDecl::TK_NonTemplate) || + // The pattern is an explicit specialization, and + // so is not a template. + ((PTK != FunctionDecl::TK_FunctionTemplate) && + ((PTSK == TSK_Undeclared) || + (PTSK == TSK_ExplicitSpecialization))) + ) { + // Note: This might be ok, the body might be defined + // in a library, and all we have seen is the + // header file. + break; + } + if (!Pattern->hasBody()) { + print_error("make_wrapper", + "cannot make wrapper for a dependent function template instantiation with no body!"); + return 0; + } + if (fdecl->isImplicitlyInstantiable()) { + needInstantiation = true; + } + break; + } + default: { + // Will only happen if clang implementation changes. + // Protect ourselves in case that happens. + print_error("make_wrapper", "unhandled template kind!"); + return 0; + } + } + // We do not set needInstantiation to true in these cases: + // + // isInvalidDecl() + // TSK_Undeclared + // TSK_ExplicitInstantiationDefinition + // TSK_ExplicitSpecialization && !getClassScopeSpecializationPattern() + // TSK_ExplicitInstantiationDeclaration && + // getTemplateInstantiationPattern() && + // PatternDecl->hasBody() && + // !PatternDecl->isInlined() + // + // Set it true in these cases: + // + // TSK_ImplicitInstantiation + // TSK_ExplicitInstantiationDeclaration && (!getPatternDecl() || + // !PatternDecl->hasBody() || PatternDecl->isInlined()) + // + } + if (needInstantiation) { + clang::FunctionDecl* FDmod = const_cast(fdecl); + clang::Sema& S = gCppyy_Cling->getSema(); + // Could trigger deserialization of decls. + cling::Interpreter::PushTransactionRAII RAII(gCppyy_Cling); + S.InstantiateFunctionDefinition(SourceLocation(), FDmod, + /*Recursive=*/ true, + /*DefinitionRequired=*/ true); + if (!fdecl->isDefined(Definition)) { + print_error("make_wrapper", "failed to force template instantiation!"); + return 0; + } + } + if (Definition) { + FunctionDecl::TemplatedKind TK = Definition->getTemplatedKind(); + switch (TK) { + case FunctionDecl::TK_NonTemplate: { + // Ordinary function, not a template specialization. + if (Definition->isDeleted()) { + print_error("make_wrapper", "cannot make wrapper for a deleted function!"); + return 0; + } + else if (Definition->isLateTemplateParsed()) { + print_error("make_wrapper", + "Cannot make wrapper for a late template parsed function!"); + return 0; + } + //else if (Definition->isDefaulted()) { + // // Might not have a body, but we can still use it. + //} + //else { + // // Has a body. + //} + break; + } + case FunctionDecl::TK_FunctionTemplate: { + // This decl is actually a function template, + // not a function at all. + print_error("make_wrapper", "cannot make wrapper for a function template!"); + return 0; + } + case FunctionDecl::TK_MemberSpecialization: { + // This function is the result of instantiating an ordinary + // member function of a class template or of a member class + // of a class template. + if (Definition->isDeleted()) { + print_error("make_wrapper", + "cannot make wrapper for a deleted member function of a specialization!"); + return 0; + } + else if (Definition->isLateTemplateParsed()) { + print_error("make_wrapper", + "cannot make wrapper for a late template parsed member function of a specialization!"); + return 0; + } + //else if (Definition->isDefaulted()) { + // // Might not have a body, but we can still use it. + //} + //else { + // // Has a body. + //} + break; + } + case FunctionDecl::TK_FunctionTemplateSpecialization: { + // This function is the result of instantiating a function + // template or possibly an explicit specialization of a + // function template. Could be a namespace scope function or a + // member function. + if (Definition->isDeleted()) { + print_error("make_wrapper", + "cannot make wrapper for a deleted function template specialization!"); + return 0; + } + else if (Definition->isLateTemplateParsed()) { + print_error("make_wrapper", + "cannot make wrapper for a late template parsed function template specialization!"); + return 0; + } + //else if (Definition->isDefaulted()) { + // // Might not have a body, but we can still use it. + //} + //else { + // // Has a body. + //} + break; + } + case FunctionDecl::TK_DependentFunctionTemplateSpecialization: { + // This function is the result of instantiating or + // specializing a member function of a class template, + // or a member function of a class member of a class template, + // or a member function template of a class template, or a + // member function template of a class member of a class + // template where at least some part of the function is + // dependent on a template argument. + if (Definition->isDeleted()) { + print_error("make_wrapper", + "cannot make wrapper for a deleted dependent function template specialization!"); + return 0; + } + else if (Definition->isLateTemplateParsed()) { + print_error("make_wrapper", + "cannot make wrapper for a late template parsed " + "dependent function template specialization!"); + return 0; + } + //else if (Definition->isDefaulted()) { + // // Might not have a body, but we can still use it. + //} + //else { + // // Has a body. + //} + break; + } + default: { + // Will only happen if clang implementation changes. + // Protect ourselves in case that happens. + print_error("make_wrapper", "unhandled template kind!"); + return 0; + } + } + } + unsigned min_args = fdecl->getMinRequiredArguments(); + unsigned num_params = fdecl->getNumParams(); + // + // Make the wrapper name. + // + std::string wrapper_name; + { + std::ostringstream buf; + buf << "__cf"; + //const NamedDecl* ND = llvm::dyn_cast(fdecl); + //std::string mn; + //gCppyy_Cling->maybeMangleDeclName(ND, mn); + //buf << '_' << mn; + buf << '_' << wrapper_serial++; + wrapper_name = buf.str(); + } + // + // Write the wrapper code. + // FIXME: this should be synthesized into the AST! + // + int indent_level = 0; + std::ostringstream buf; + buf << "__attribute__((used)) "; + buf << "extern \"C\" void "; + buf << wrapper_name; + buf << "(void* obj, int nargs, void** args, void* ret)\n"; + buf << "{\n"; + ++indent_level; + if (min_args == num_params) { + // No parameters with defaults. + make_narg_call_with_return(num_params, class_name, buf, indent_level, fdecl); + } + else { + // We need one function call clause compiled for every + // possible number of arguments per call. + for (unsigned N = min_args; N <= num_params; ++N) { + for (int i = 0; i < indent_level; ++i) { + buf << indent_string; + } + buf << "if (nargs == " << N << ") {\n"; + ++indent_level; + make_narg_call_with_return(N, class_name, buf, indent_level, fdecl); + --indent_level; + for (int i = 0; i < indent_level; ++i) { + buf << indent_string; + } + buf << "}\n"; + } + } + --indent_level; + buf << "}\n"; + // + // Compile the wrapper code. + // + std::string wrapper_code(buf.str()); + std::cout << " CREATED WRAPPER: " << std::endl; + std::cout << wrapper_code << std::endl; + void* wrapper = gCppyy_Cling->compileFunction( + wrapper_name, wrapper_code, false /*ifUnique*/, true /*withAcessControl*/); + if (wrapper) + s_wrappers.insert(std::make_pair((cppyy_method_t)fdecl, (CPPYY_Cling_Wrapper_t)wrapper)); + return (CPPYY_Cling_Wrapper_t)wrapper; +} From noreply at buildbot.pypy.org Wed Jan 29 02:35:40 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 29 Jan 2014 02:35:40 +0100 (CET) Subject: [pypy-commit] pypy default: move low-level stuff out of rpython.annotator.model Message-ID: <20140129013540.C7CBC1D23CF@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r68986:f73987807040 Date: 2014-01-29 01:34 +0000 http://bitbucket.org/pypy/pypy/changeset/f73987807040/ Log: move low-level stuff out of rpython.annotator.model diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -829,8 +829,8 @@ # ____________________________________________________________ # annotation of low-level types -from rpython.rtyper.llannotation import SomePtr -from rpython.annotator.model import ll_to_annotation, annotation_to_lltype +from rpython.rtyper.llannotation import ( + SomePtr, ll_to_annotation, annotation_to_lltype, lltype_to_annotation) class __extend__(pairtype(SomePtr, SomePtr)): def union((p1, p2)): @@ -908,12 +908,10 @@ class __extend__(pairtype(SomeTypedAddressAccess, SomeInteger)): def getitem((s_taa, s_int)): - from rpython.annotator.model import lltype_to_annotation return lltype_to_annotation(s_taa.type) getitem.can_only_throw = [] def setitem((s_taa, s_int), s_value): - from rpython.annotator.model import annotation_to_lltype assert annotation_to_lltype(s_value) is s_taa.type setitem.can_only_throw = [] diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -12,8 +12,9 @@ SomeBuiltin, SomePBC, SomeInteger, TLS, SomeUnicodeCodePoint, s_None, s_ImpossibleValue, SomeBool, SomeTuple, SomeImpossibleValue, SomeUnicodeString, SomeList, HarmlesslyBlocked, - SomeWeakRef, lltype_to_annotation, SomeByteArray, SomeConstantType) -from rpython.rtyper.llannotation import SomeAddress, SomePtr, SomeLLADTMeth + SomeWeakRef, SomeByteArray, SomeConstantType) +from rpython.rtyper.llannotation import ( + SomeAddress, SomePtr, SomeLLADTMeth, lltype_to_annotation) from rpython.annotator.classdef import InstanceSource, ClassDef from rpython.annotator.listdef import ListDef, ListItem from rpython.annotator.dictdef import DictDef diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -4,13 +4,12 @@ import sys from rpython.annotator.model import ( - SomeInteger, SomeObject, SomeChar, SomeBool, SomeString, SomeTuple, s_Bool, + SomeInteger, SomeObject, SomeChar, SomeBool, SomeString, SomeTuple, SomeUnicodeCodePoint, SomeFloat, unionof, SomeUnicodeString, SomePBC, SomeInstance, SomeDict, SomeList, SomeWeakRef, SomeIterator, - SomeOrderedDict, - SomeByteArray, annotation_to_lltype, lltype_to_annotation, - ll_to_annotation, add_knowntypedata, s_ImpossibleValue,) -from rpython.rtyper.llannotation import SomeAddress + SomeOrderedDict, SomeByteArray, add_knowntypedata, s_ImpossibleValue,) +from rpython.rtyper.llannotation import ( + SomeAddress, annotation_to_lltype, lltype_to_annotation, ll_to_annotation) from rpython.annotator.bookkeeper import getbookkeeper from rpython.annotator import description from rpython.flowspace.model import Constant diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -568,86 +568,6 @@ # 'classdef' is None for known-to-be-dead weakrefs. self.classdef = classdef -#____________________________________________________________ -# annotation of low-level types - -from rpython.rtyper.lltypesystem import lltype - - - -from rpython.rtyper.llannotation import SomeAddress, SomePtr, SomeInteriorPtr -from rpython.rtyper.lltypesystem import llmemory - -annotation_to_ll_map = [ - (SomeSingleFloat(), lltype.SingleFloat), - (s_None, lltype.Void), # also matches SomeImpossibleValue() - (s_Bool, lltype.Bool), - (SomeFloat(), lltype.Float), - (SomeLongFloat(), lltype.LongFloat), - (SomeChar(), lltype.Char), - (SomeUnicodeCodePoint(), lltype.UniChar), - (SomeAddress(), llmemory.Address), -] - - -def annotation_to_lltype(s_val, info=None): - if isinstance(s_val, SomeInteriorPtr): - p = s_val.ll_ptrtype - if 0 in p.offsets: - assert list(p.offsets).count(0) == 1 - return lltype.Ptr(lltype.Ptr(p.PARENTTYPE)._interior_ptr_type_with_index(p.TO)) - else: - return lltype.Ptr(p.PARENTTYPE) - if isinstance(s_val, SomePtr): - return s_val.ll_ptrtype - if type(s_val) is SomeInteger: - return lltype.build_number(None, s_val.knowntype) - - for witness, T in annotation_to_ll_map: - if witness.contains(s_val): - return T - if info is None: - info = '' - else: - info = '%s: ' % info - raise ValueError("%sshould return a low-level type,\ngot instead %r" % ( - info, s_val)) - -ll_to_annotation_map = dict([(ll, ann) for ann, ll in annotation_to_ll_map]) - - -def lltype_to_annotation(T): - try: - s = ll_to_annotation_map.get(T) - except TypeError: - s = None # unhashable T, e.g. a Ptr(GcForwardReference()) - if s is None: - if isinstance(T, lltype.Typedef): - return lltype_to_annotation(T.OF) - if isinstance(T, lltype.Number): - return SomeInteger(knowntype=T._type) - elif isinstance(T, lltype.InteriorPtr): - return SomeInteriorPtr(T) - else: - return SomePtr(T) - else: - return s - - -def ll_to_annotation(v): - if v is None: - # i think we can only get here in the case of void-returning - # functions - return s_None - if isinstance(v, lltype._interior_ptr): - ob = v._parent - if ob is None: - raise RuntimeError - T = lltype.InteriorPtr(lltype.typeOf(ob), v._T, v._offsets) - return SomeInteriorPtr(T) - return lltype_to_annotation(lltype.typeOf(v)) - - # ____________________________________________________________ diff --git a/rpython/annotator/signature.py b/rpython/annotator/signature.py --- a/rpython/annotator/signature.py +++ b/rpython/annotator/signature.py @@ -2,10 +2,11 @@ from __future__ import absolute_import import types -from rpython.annotator.model import SomeBool, SomeInteger, SomeString,\ - SomeFloat, SomeList, SomeDict, s_None, \ - SomeObject, SomeInstance, SomeTuple, lltype_to_annotation,\ - unionof, SomeUnicodeString, SomeType, AnnotatorError +from rpython.annotator.model import ( + SomeBool, SomeInteger, SomeString, SomeFloat, SomeList, SomeDict, s_None, + SomeObject, SomeInstance, SomeTuple, unionof, SomeUnicodeString, SomeType, + AnnotatorError) +from rpython.rtyper.llannotation import lltype_to_annotation from rpython.annotator.listdef import ListDef from rpython.annotator.dictdef import DictDef diff --git a/rpython/annotator/test/test_model.py b/rpython/annotator/test/test_model.py --- a/rpython/annotator/test/test_model.py +++ b/rpython/annotator/test/test_model.py @@ -1,9 +1,11 @@ import py from rpython.annotator.model import * -from rpython.rtyper.llannotation import SomePtr +from rpython.rtyper.llannotation import ( + SomePtr, annotation_to_lltype, ll_to_annotation) from rpython.annotator.listdef import ListDef from rpython.translator.translator import TranslationContext +from rpython.rtyper.typesystem import lltype listdef1 = ListDef(None, SomeTuple([SomeInteger(nonneg=True), SomeString()])) diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -759,8 +759,9 @@ raise AnnotatorError("Cannot call len on a pbc") # annotation of low-level types -from rpython.rtyper.llannotation import SomePtr, SomeLLADTMeth -from rpython.annotator.model import ll_to_annotation, lltype_to_annotation, annotation_to_lltype +from rpython.rtyper.llannotation import ( + SomePtr, SomeLLADTMeth, ll_to_annotation, lltype_to_annotation, + annotation_to_lltype) class __extend__(SomePtr): diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -2,6 +2,7 @@ from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.llinterp import LLInterpreter from rpython.rtyper.annlowlevel import llhelper, MixLevelHelperAnnotator +from rpython.rtyper.llannotation import lltype_to_annotation from rpython.rlib.objectmodel import we_are_translated, specialize from rpython.jit.metainterp import history from rpython.jit.codewriter import heaptracker, longlong @@ -14,7 +15,6 @@ FieldDescr, ArrayDescr, CallDescr, InteriorFieldDescr, FLAG_POINTER, FLAG_FLOAT) from rpython.jit.backend.llsupport.asmmemmgr import AsmMemoryManager -from rpython.annotator import model as annmodel from rpython.rlib.unroll import unrolling_iterable @@ -111,8 +111,8 @@ fptr = llhelper(FUNC_TP, realloc_frame) else: FUNC = FUNC_TP.TO - args_s = [annmodel.lltype_to_annotation(ARG) for ARG in FUNC.ARGS] - s_result = annmodel.lltype_to_annotation(FUNC.RESULT) + args_s = [lltype_to_annotation(ARG) for ARG in FUNC.ARGS] + s_result = lltype_to_annotation(FUNC.RESULT) mixlevelann = MixLevelHelperAnnotator(self.rtyper) graph = mixlevelann.getgraph(realloc_frame, args_s, s_result) fptr = mixlevelann.graph2delayed(graph, FUNC) @@ -123,8 +123,8 @@ fptr = llhelper(FUNC_TP, realloc_frame_crash) else: FUNC = FUNC_TP.TO - args_s = [annmodel.lltype_to_annotation(ARG) for ARG in FUNC.ARGS] - s_result = annmodel.lltype_to_annotation(FUNC.RESULT) + args_s = [lltype_to_annotation(ARG) for ARG in FUNC.ARGS] + s_result = lltype_to_annotation(FUNC.RESULT) mixlevelann = MixLevelHelperAnnotator(self.rtyper) graph = mixlevelann.getgraph(realloc_frame_crash, args_s, s_result) fptr = mixlevelann.graph2delayed(graph, FUNC) diff --git a/rpython/jit/codewriter/support.py b/rpython/jit/codewriter/support.py --- a/rpython/jit/codewriter/support.py +++ b/rpython/jit/codewriter/support.py @@ -1,6 +1,7 @@ import sys from rpython.annotator import model as annmodel +from rpython.rtyper.llannotation import lltype_to_annotation from rpython.annotator.policy import AnnotatorPolicy from rpython.flowspace.model import Variable, Constant from rpython.jit.metainterp.typesystem import deref @@ -32,7 +33,7 @@ if T == lltype.Ptr(ll_rstr.STR): t = str else: - t = annmodel.lltype_to_annotation(T) + t = lltype_to_annotation(T) return a.typeannotation(t) def annotate(func, values, inline=None, backendoptimize=True, @@ -814,12 +815,12 @@ return rtyper._builtin_func_for_spec_cache[key] except (KeyError, AttributeError): pass - args_s = [annmodel.lltype_to_annotation(v) for v in ll_args] + args_s = [lltype_to_annotation(v) for v in ll_args] if '.' not in oopspec_name: # 'newxxx' operations LIST_OR_DICT = ll_res else: LIST_OR_DICT = ll_args[0] - s_result = annmodel.lltype_to_annotation(ll_res) + s_result = lltype_to_annotation(ll_res) impl = setup_extra_builtin(rtyper, oopspec_name, len(args_s), extra) if getattr(impl, 'need_result_type', False): bk = rtyper.annotator.bookkeeper diff --git a/rpython/jit/metainterp/test/test_virtualizable.py b/rpython/jit/metainterp/test/test_virtualizable.py --- a/rpython/jit/metainterp/test/test_virtualizable.py +++ b/rpython/jit/metainterp/test/test_virtualizable.py @@ -9,6 +9,7 @@ from rpython.rlib.jit import JitDriver, hint, dont_look_inside, promote, virtual_ref from rpython.rlib.rarithmetic import intmask from rpython.rtyper.annlowlevel import hlstr +from rpython.rtyper.llannotation import lltype_to_annotation from rpython.rtyper.extregistry import ExtRegistryEntry from rpython.rtyper.lltypesystem import lltype, lloperation, rclass, llmemory from rpython.rtyper.rclass import IR_IMMUTABLE, IR_IMMUTABLE_ARRAY, FieldListAccessor @@ -23,7 +24,6 @@ _about_ = promote_virtualizable def compute_result_annotation(self, *args): - from rpython.annotator.model import lltype_to_annotation return lltype_to_annotation(lltype.Void) def specialize_call(self, hop): diff --git a/rpython/jit/metainterp/warmspot.py b/rpython/jit/metainterp/warmspot.py --- a/rpython/jit/metainterp/warmspot.py +++ b/rpython/jit/metainterp/warmspot.py @@ -4,6 +4,7 @@ from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.rtyper.annlowlevel import (llhelper, MixLevelHelperAnnotator, cast_base_ptr_to_instance, hlstr) +from rpython.rtyper.llannotation import lltype_to_annotation from rpython.annotator import model as annmodel from rpython.rtyper.llinterp import LLException from rpython.rtyper.test.test_llinterp import get_interpreter, clear_tcache @@ -662,8 +663,8 @@ if not self.cpu.translate_support_code: return llhelper(FUNCPTR, func) FUNC = FUNCPTR.TO - args_s = [annmodel.lltype_to_annotation(ARG) for ARG in FUNC.ARGS] - s_result = annmodel.lltype_to_annotation(FUNC.RESULT) + args_s = [lltype_to_annotation(ARG) for ARG in FUNC.ARGS] + s_result = lltype_to_annotation(FUNC.RESULT) graph = self.annhelper.getgraph(func, args_s, s_result) return self.annhelper.graph2delayed(graph, FUNC) diff --git a/rpython/memory/gctransform/transform.py b/rpython/memory/gctransform/transform.py --- a/rpython/memory/gctransform/transform.py +++ b/rpython/memory/gctransform/transform.py @@ -9,7 +9,7 @@ from rpython.translator.backendopt.canraise import RaiseAnalyzer from rpython.translator.backendopt.ssa import DataFlowFamilyBuilder from rpython.translator.backendopt.constfold import constant_fold_graph -from rpython.annotator import model as annmodel +from rpython.rtyper.llannotation import lltype_to_annotation from rpython.rtyper import rmodel from rpython.rtyper.annlowlevel import MixLevelHelperAnnotator from rpython.rtyper.rtyper import LowLevelOpList @@ -259,8 +259,8 @@ def annotate_helper(self, ll_helper, ll_args, ll_result, inline=False): assert not self.finished_helpers - args_s = map(annmodel.lltype_to_annotation, ll_args) - s_result = annmodel.lltype_to_annotation(ll_result) + args_s = map(lltype_to_annotation, ll_args) + s_result = lltype_to_annotation(ll_result) graph = self.mixlevelannotator.getgraph(ll_helper, args_s, s_result) # the produced graphs does not need to be fully transformed self.need_minimal_transform(graph) diff --git a/rpython/rlib/_stacklet_asmgcc.py b/rpython/rlib/_stacklet_asmgcc.py --- a/rpython/rlib/_stacklet_asmgcc.py +++ b/rpython/rlib/_stacklet_asmgcc.py @@ -3,6 +3,7 @@ from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.annlowlevel import llhelper, MixLevelHelperAnnotator from rpython.annotator import model as annmodel +from rpython.rtyper.llannotation import lltype_to_annotation from rpython.rlib import _rffi_stacklet as _c @@ -145,7 +146,7 @@ def complete_destrptr(gctransformer): translator = gctransformer.translator mixlevelannotator = MixLevelHelperAnnotator(translator.rtyper) - args_s = [annmodel.lltype_to_annotation(lltype.Ptr(SUSPSTACK))] + args_s = [lltype_to_annotation(lltype.Ptr(SUSPSTACK))] s_result = annmodel.s_None destrptr = mixlevelannotator.delayedfunction(suspstack_destructor, args_s, s_result) diff --git a/rpython/rlib/debug.py b/rpython/rlib/debug.py --- a/rpython/rlib/debug.py +++ b/rpython/rlib/debug.py @@ -229,12 +229,13 @@ def compute_result_annotation(self, s_RESTYPE, s_pythonfunction, *args_s): from rpython.annotator import model as annmodel + from rpython.rtyper.llannotation import lltype_to_annotation from rpython.rtyper.lltypesystem import lltype assert s_RESTYPE.is_constant() assert s_pythonfunction.is_constant() s_result = s_RESTYPE.const if isinstance(s_result, lltype.LowLevelType): - s_result = annmodel.lltype_to_annotation(s_result) + s_result = lltype_to_annotation(s_result) assert isinstance(s_result, annmodel.SomeObject) return s_result diff --git a/rpython/rlib/jit_hooks.py b/rpython/rlib/jit_hooks.py --- a/rpython/rlib/jit_hooks.py +++ b/rpython/rlib/jit_hooks.py @@ -1,5 +1,5 @@ from rpython.annotator import model as annmodel -from rpython.rtyper.llannotation import SomePtr +from rpython.rtyper.llannotation import SomePtr, lltype_to_annotation from rpython.rlib.objectmodel import specialize from rpython.rtyper.annlowlevel import (cast_instance_to_base_ptr, cast_base_ptr_to_instance, llstr) @@ -16,7 +16,7 @@ if (isinstance(s_result, annmodel.SomeObject) or s_result is None): return s_result - return annmodel.lltype_to_annotation(s_result) + return lltype_to_annotation(s_result) def specialize_call(self, hop): from rpython.rtyper.lltypesystem import lltype diff --git a/rpython/rlib/rawstorage.py b/rpython/rlib/rawstorage.py --- a/rpython/rlib/rawstorage.py +++ b/rpython/rlib/rawstorage.py @@ -2,6 +2,7 @@ from rpython.rtyper.extregistry import ExtRegistryEntry from rpython.rtyper.lltypesystem import lltype, rffi, llmemory from rpython.annotator import model as annmodel +from rpython.rtyper.llannotation import lltype_to_annotation from rpython.rlib.rgc import lltype_is_gc from rpython.rlib.objectmodel import specialize @@ -33,7 +34,7 @@ def compute_result_annotation(self, s_TP, s_storage, s_index): assert s_TP.is_constant() - return annmodel.lltype_to_annotation(s_TP.const) + return lltype_to_annotation(s_TP.const) def specialize_call(self, hop): assert hop.args_r[1].lowleveltype == RAW_STORAGE_PTR diff --git a/rpython/rlib/rrawarray.py b/rpython/rlib/rrawarray.py --- a/rpython/rlib/rrawarray.py +++ b/rpython/rlib/rrawarray.py @@ -1,4 +1,4 @@ -from rpython.annotator import model as annmodel +from rpython.rtyper.llannotation import lltype_to_annotation from rpython.rlib.objectmodel import specialize from rpython.rlib import jit from rpython.rtyper.lltypesystem import lltype, llmemory @@ -30,7 +30,7 @@ _about_ = populate_list_from_raw_array def compute_result_annotation(self, s_list, s_array, s_length): - s_item = annmodel.lltype_to_annotation(s_array.ll_ptrtype.TO.OF) + s_item = lltype_to_annotation(s_array.ll_ptrtype.TO.OF) s_newlist = self.bookkeeper.newlist(s_item) s_newlist.listdef.resize() pair(s_list, s_newlist).union() diff --git a/rpython/rlib/types.py b/rpython/rlib/types.py --- a/rpython/rlib/types.py +++ b/rpython/rlib/types.py @@ -57,7 +57,8 @@ def ptr(ll_type): from rpython.rtyper.lltypesystem.lltype import Ptr - return model.SomePtr(Ptr(ll_type)) + from rpython.rtyper.llannotation import SomePtr + return SomePtr(Ptr(ll_type)) def list(element): diff --git a/rpython/rtyper/annlowlevel.py b/rpython/rtyper/annlowlevel.py --- a/rpython/rtyper/annlowlevel.py +++ b/rpython/rtyper/annlowlevel.py @@ -7,7 +7,8 @@ from rpython.annotator.policy import AnnotatorPolicy from rpython.annotator.signature import Sig from rpython.annotator.specialize import flatten_star_args -from rpython.rtyper.llannotation import SomePtr +from rpython.rtyper.llannotation import ( + SomePtr, annotation_to_lltype, lltype_to_annotation) from rpython.rtyper.normalizecalls import perform_normalizations from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.flowspace.model import Constant @@ -58,7 +59,7 @@ else: new_args_s.append(annmodel.not_const(s_obj)) try: - key.append(annmodel.annotation_to_lltype(s_obj)) + key.append(annotation_to_lltype(s_obj)) except ValueError: # passing non-low-level types to a ll_* function is allowed # for module/ll_* @@ -76,8 +77,8 @@ default_specialize = staticmethod(default_specialize) def specialize__semierased(funcdesc, args_s): - a2l = annmodel.annotation_to_lltype - l2a = annmodel.lltype_to_annotation + a2l = annotation_to_lltype + l2a = lltype_to_annotation args_s[:] = [l2a(a2l(s)) for s in args_s] return LowLevelAnnotatorPolicy.default_specialize(funcdesc, args_s) specialize__semierased = staticmethod(specialize__semierased) @@ -121,8 +122,8 @@ def specialize__genconst(pol, funcdesc, args_s, i): # XXX this is specific to the JIT - TYPE = annmodel.annotation_to_lltype(args_s[i], 'genconst') - args_s[i] = annmodel.lltype_to_annotation(TYPE) + TYPE = annotation_to_lltype(args_s[i], 'genconst') + args_s[i] = lltype_to_annotation(TYPE) alt_name = funcdesc.name + "__%s" % (TYPE._short_name(),) return funcdesc.cachedgraph(TYPE, alt_name=valid_identifier(alt_name)) @@ -356,10 +357,10 @@ assert s_callable.is_constant() F = s_F.const FUNC = F.TO - args_s = [annmodel.lltype_to_annotation(T) for T in FUNC.ARGS] + args_s = [lltype_to_annotation(T) for T in FUNC.ARGS] key = (llhelper, s_callable.const) s_res = self.bookkeeper.emulate_pbc_call(key, s_callable, args_s) - assert annmodel.lltype_to_annotation(FUNC.RESULT).contains(s_res) + assert lltype_to_annotation(FUNC.RESULT).contains(s_res) return SomePtr(F) def specialize_call(self, hop): @@ -419,9 +420,9 @@ def compute_result_annotation(self, s_str): from rpython.rtyper.lltypesystem.rstr import STR, UNICODE if strtype is str: - return annmodel.lltype_to_annotation(lltype.Ptr(STR)) + return lltype_to_annotation(lltype.Ptr(STR)) else: - return annmodel.lltype_to_annotation(lltype.Ptr(UNICODE)) + return lltype_to_annotation(lltype.Ptr(UNICODE)) def specialize_call(self, hop): hop.exception_cannot_occur() diff --git a/rpython/rtyper/llannotation.py b/rpython/rtyper/llannotation.py --- a/rpython/rtyper/llannotation.py +++ b/rpython/rtyper/llannotation.py @@ -1,8 +1,10 @@ """ Code for annotating low-level thingies. """ -from rpython.annotator.model import SomeObject -from rpython.rtyper.lltypesystem import lltype +from rpython.annotator.model import ( + SomeObject, SomeSingleFloat, SomeFloat, SomeLongFloat, SomeChar, + SomeUnicodeCodePoint, SomeInteger, s_None, s_Bool) +from rpython.rtyper.lltypesystem import lltype, llmemory class SomeAddress(SomeObject): immutable = True @@ -52,3 +54,72 @@ def can_be_none(self): return False + + +annotation_to_ll_map = [ + (SomeSingleFloat(), lltype.SingleFloat), + (s_None, lltype.Void), # also matches SomeImpossibleValue() + (s_Bool, lltype.Bool), + (SomeFloat(), lltype.Float), + (SomeLongFloat(), lltype.LongFloat), + (SomeChar(), lltype.Char), + (SomeUnicodeCodePoint(), lltype.UniChar), + (SomeAddress(), llmemory.Address), +] + + +def annotation_to_lltype(s_val, info=None): + if isinstance(s_val, SomeInteriorPtr): + p = s_val.ll_ptrtype + if 0 in p.offsets: + assert list(p.offsets).count(0) == 1 + return lltype.Ptr(lltype.Ptr(p.PARENTTYPE)._interior_ptr_type_with_index(p.TO)) + else: + return lltype.Ptr(p.PARENTTYPE) + if isinstance(s_val, SomePtr): + return s_val.ll_ptrtype + if type(s_val) is SomeInteger: + return lltype.build_number(None, s_val.knowntype) + + for witness, T in annotation_to_ll_map: + if witness.contains(s_val): + return T + if info is None: + info = '' + else: + info = '%s: ' % info + raise ValueError("%sshould return a low-level type,\ngot instead %r" % ( + info, s_val)) + +ll_to_annotation_map = dict([(ll, ann) for ann, ll in annotation_to_ll_map]) + +def lltype_to_annotation(T): + try: + s = ll_to_annotation_map.get(T) + except TypeError: + s = None # unhashable T, e.g. a Ptr(GcForwardReference()) + if s is None: + if isinstance(T, lltype.Typedef): + return lltype_to_annotation(T.OF) + if isinstance(T, lltype.Number): + return SomeInteger(knowntype=T._type) + elif isinstance(T, lltype.InteriorPtr): + return SomeInteriorPtr(T) + else: + return SomePtr(T) + else: + return s + + +def ll_to_annotation(v): + if v is None: + # i think we can only get here in the case of void-returning + # functions + return s_None + if isinstance(v, lltype._interior_ptr): + ob = v._parent + if ob is None: + raise RuntimeError + T = lltype.InteriorPtr(lltype.typeOf(ob), v._T, v._offsets) + return SomeInteriorPtr(T) + return lltype_to_annotation(lltype.typeOf(v)) diff --git a/rpython/rtyper/lltypesystem/ll2ctypes.py b/rpython/rtyper/lltypesystem/ll2ctypes.py --- a/rpython/rtyper/lltypesystem/ll2ctypes.py +++ b/rpython/rtyper/lltypesystem/ll2ctypes.py @@ -22,6 +22,7 @@ from rpython.rlib.rarithmetic import r_singlefloat, r_longfloat, base_int, intmask from rpython.rlib.rarithmetic import is_emulated_long, maxint from rpython.annotator import model as annmodel +from rpython.rtyper.llannotation import lltype_to_annotation from rpython.rtyper.llannotation import SomePtr from rpython.rtyper.llinterp import LLInterpreter, LLException from rpython.rtyper.lltypesystem.rclass import OBJECT, OBJECT_VTABLE @@ -1303,7 +1304,7 @@ def compute_result_annotation(self, s_RESTYPE, s_value): assert s_RESTYPE.is_constant() RESTYPE = s_RESTYPE.const - return annmodel.lltype_to_annotation(RESTYPE) + return lltype_to_annotation(RESTYPE) def specialize_call(self, hop): hop.exception_cannot_occur() @@ -1342,7 +1343,7 @@ assert isinstance(s_n, annmodel.SomeInteger) assert isinstance(s_ptr, SomePtr) typecheck_ptradd(s_ptr.ll_ptrtype) - return annmodel.lltype_to_annotation(s_ptr.ll_ptrtype) + return lltype_to_annotation(s_ptr.ll_ptrtype) def specialize_call(self, hop): hop.exception_cannot_occur() diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -141,7 +141,7 @@ _type_ = LLOp def compute_result_annotation(self, RESULTTYPE, *args): - from rpython.annotator.model import lltype_to_annotation + from rpython.rtyper.llannotation import lltype_to_annotation assert RESULTTYPE.is_constant() return lltype_to_annotation(RESULTTYPE.const) diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -5,7 +5,7 @@ from rpython.rtyper.lltypesystem import ll2ctypes from rpython.rtyper.lltypesystem.llmemory import cast_ptr_to_adr from rpython.rtyper.lltypesystem.llmemory import itemoffsetof, raw_memcopy -from rpython.annotator.model import lltype_to_annotation +from rpython.rtyper.llannotation import lltype_to_annotation from rpython.tool.sourcetools import func_with_new_name from rpython.rlib.objectmodel import Symbolic from rpython.rlib.objectmodel import keepalive_until_here, enforceargs diff --git a/rpython/rtyper/module/ll_os_stat.py b/rpython/rtyper/module/ll_os_stat.py --- a/rpython/rtyper/module/ll_os_stat.py +++ b/rpython/rtyper/module/ll_os_stat.py @@ -7,6 +7,7 @@ import sys from rpython.annotator import model as annmodel +from rpython.rtyper.llannotation import lltype_to_annotation from rpython.rlib import rposix from rpython.rlib.rarithmetic import intmask from rpython.rtyper import extregistry @@ -87,7 +88,7 @@ assert s_attr.is_constant(), "non-constant attr name in getattr()" attrname = s_attr.const TYPE = STAT_FIELD_TYPES[attrname] - return annmodel.lltype_to_annotation(TYPE) + return lltype_to_annotation(TYPE) def _get_rmarshall_support_(self): # for rlib.rmarshal # reduce and recreate stat_result objects from 10-tuples @@ -98,7 +99,7 @@ def stat_result_recreate(tup): return make_stat_result(tup + extra_zeroes) - s_reduced = annmodel.SomeTuple([annmodel.lltype_to_annotation(TYPE) + s_reduced = annmodel.SomeTuple([lltype_to_annotation(TYPE) for name, TYPE in PORTABLE_STAT_FIELDS]) extra_zeroes = (0,) * (len(STAT_FIELDS) - len(PORTABLE_STAT_FIELDS)) return s_reduced, stat_result_reduce, stat_result_recreate @@ -120,7 +121,7 @@ def getattr(self, s_attr): assert s_attr.is_constant() TYPE = STATVFS_FIELD_TYPES[s_attr.const] - return annmodel.lltype_to_annotation(TYPE) + return lltype_to_annotation(TYPE) class __extend__(pairtype(SomeStatResult, annmodel.SomeInteger)): @@ -129,14 +130,14 @@ index = s_int.const assert 0 <= index < N_INDEXABLE_FIELDS, "os.stat()[index] out of range" name, TYPE = STAT_FIELDS[index] - return annmodel.lltype_to_annotation(TYPE) + return lltype_to_annotation(TYPE) class __extend__(pairtype(SomeStatvfsResult, annmodel.SomeInteger)): def getitem((s_stat, s_int)): assert s_int.is_constant() name, TYPE = STATVFS_FIELDS[s_int.const] - return annmodel.lltype_to_annotation(TYPE) + return lltype_to_annotation(TYPE) s_StatResult = SomeStatResult() diff --git a/rpython/rtyper/module/r_os_stat.py b/rpython/rtyper/module/r_os_stat.py --- a/rpython/rtyper/module/r_os_stat.py +++ b/rpython/rtyper/module/r_os_stat.py @@ -8,6 +8,7 @@ the tuples returned by os.stat(). """ from rpython.annotator import model as annmodel +from rpython.rtyper.llannotation import lltype_to_annotation from rpython.flowspace.model import Constant from rpython.tool.pairtype import pairtype from rpython.rtyper.rmodel import Repr, IntegerRepr @@ -25,7 +26,7 @@ for i, (name, TYPE) in enumerate(self.stat_fields): self.stat_field_indexes[name] = i - self.s_tuple = annmodel.SomeTuple([annmodel.lltype_to_annotation(TYPE) + self.s_tuple = annmodel.SomeTuple([lltype_to_annotation(TYPE) for name, TYPE in self.stat_fields]) self.r_tuple = rtyper.getrepr(self.s_tuple) self.lowleveltype = self.r_tuple.lowleveltype @@ -76,7 +77,7 @@ for i, (name, TYPE) in enumerate(self.statvfs_fields): self.statvfs_field_indexes[name] = i - self.s_tuple = annmodel.SomeTuple([annmodel.lltype_to_annotation(TYPE) + self.s_tuple = annmodel.SomeTuple([lltype_to_annotation(TYPE) for name, TYPE in self.statvfs_fields]) self.r_tuple = rtyper.getrepr(self.s_tuple) self.lowleveltype = self.r_tuple.lowleveltype diff --git a/rpython/rtyper/rptr.py b/rpython/rtyper/rptr.py --- a/rpython/rtyper/rptr.py +++ b/rpython/rtyper/rptr.py @@ -1,6 +1,6 @@ from rpython.annotator import model as annmodel from rpython.rtyper.llannotation import ( - SomePtr, SomeInteriorPtr, SomeLLADTMeth) + SomePtr, SomeInteriorPtr, SomeLLADTMeth, lltype_to_annotation) from rpython.flowspace import model as flowmodel from rpython.rlib.rarithmetic import r_uint from rpython.rtyper.error import TyperError @@ -193,7 +193,7 @@ self.func = adtmeth.func self.lowleveltype = adtmeth.ll_ptrtype self.ll_ptrtype = adtmeth.ll_ptrtype - self.lowleveltype = rtyper.getrepr(annmodel.lltype_to_annotation(adtmeth.ll_ptrtype)).lowleveltype + self.lowleveltype = rtyper.getrepr(lltype_to_annotation(adtmeth.ll_ptrtype)).lowleveltype def rtype_simple_call(self, hop): hop2 = hop.copy() @@ -201,8 +201,7 @@ s_func = hop.rtyper.annotator.bookkeeper.immutablevalue(func) v_ptr = hop2.args_v[0] hop2.r_s_popfirstarg() - hop2.v_s_insertfirstarg( - v_ptr, annmodel.lltype_to_annotation(self.ll_ptrtype)) + hop2.v_s_insertfirstarg(v_ptr, lltype_to_annotation(self.ll_ptrtype)) hop2.v_s_insertfirstarg(flowmodel.Constant(func), s_func) return hop2.dispatch() diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -16,7 +16,7 @@ import py from rpython.annotator import model as annmodel, unaryop, binaryop -from rpython.rtyper.llannotation import SomePtr +from rpython.rtyper.llannotation import SomePtr, lltype_to_annotation from rpython.annotator.annrpython import FAIL from rpython.flowspace.model import Variable, Constant, SpaceOperation, c_last_exception from rpython.rtyper.annlowlevel import annotate_lowlevel_helper, LowLevelAnnotatorPolicy @@ -77,7 +77,7 @@ except KeyError: pass if isinstance(lltype, Primitive): - repr = self.primitive_to_repr[lltype] = self.getrepr(annmodel.lltype_to_annotation(lltype)) + repr = self.primitive_to_repr[lltype] = self.getrepr(lltype_to_annotation(lltype)) return repr raise TyperError('There is no primitive repr for %r' % (lltype,)) @@ -616,7 +616,7 @@ for s in argtypes: # assume 's' is a low-level type, unless it is already an annotation if not isinstance(s, annmodel.SomeObject): - s = annmodel.lltype_to_annotation(s) + s = lltype_to_annotation(s) args_s.append(s) # hack for bound methods if hasattr(ll_function, 'im_func'): @@ -903,7 +903,7 @@ raise TyperError("non-PBC Void argument: %r", (s_value,)) args_s.append(s_value) else: - args_s.append(annmodel.lltype_to_annotation(v.concretetype)) + args_s.append(lltype_to_annotation(v.concretetype)) newargs_v.append(v) self.rtyper.call_all_setups() # compute ForwardReferences now diff --git a/rpython/rtyper/test/test_llann.py b/rpython/rtyper/test/test_llann.py --- a/rpython/rtyper/test/test_llann.py +++ b/rpython/rtyper/test/test_llann.py @@ -1,7 +1,7 @@ import py from rpython.annotator import model as annmodel -from rpython.rtyper.llannotation import SomePtr +from rpython.rtyper.llannotation import SomePtr, lltype_to_annotation from rpython.conftest import option from rpython.rtyper.annlowlevel import (annotate_lowlevel_helper, MixLevelHelperAnnotator, PseudoHighLevelCallable, llhelper, @@ -202,7 +202,7 @@ assert a.binding(vT) == a.bookkeeper.immutablevalue(T) assert a.binding(vi).knowntype == int assert a.binding(vp).ll_ptrtype.TO == T - assert a.binding(rv) == annmodel.lltype_to_annotation(T.OF) + assert a.binding(rv) == lltype_to_annotation(T.OF) elif func is ll_make: vT, vn = args assert a.binding(vT) == a.bookkeeper.immutablevalue(T) @@ -281,7 +281,7 @@ vp, vi = args assert a.binding(vi).knowntype == int assert a.binding(vp).ll_ptrtype == T - assert a.binding(rv) == annmodel.lltype_to_annotation( + assert a.binding(rv) == lltype_to_annotation( T.TO.OF) else: assert False, func diff --git a/rpython/rtyper/test/test_llinterp.py b/rpython/rtyper/test/test_llinterp.py --- a/rpython/rtyper/test/test_llinterp.py +++ b/rpython/rtyper/test/test_llinterp.py @@ -9,7 +9,7 @@ from rpython.translator.translator import TranslationContext, graphof from rpython.rtyper.lltypesystem import lltype from rpython.annotator import model as annmodel -from rpython.annotator.model import lltype_to_annotation +from rpython.rtyper.llannotation import lltype_to_annotation from rpython.rlib.rarithmetic import r_uint, ovfcheck from rpython.tool import leakfinder from rpython.conftest import option diff --git a/rpython/rtyper/test/test_rpbc.py b/rpython/rtyper/test/test_rpbc.py --- a/rpython/rtyper/test/test_rpbc.py +++ b/rpython/rtyper/test/test_rpbc.py @@ -1,9 +1,10 @@ import py +from rpython.annotator import model as annmodel from rpython.annotator import policy, specialize from rpython.rtyper.lltypesystem.lltype import typeOf from rpython.rtyper.test.tool import BaseRtypingTest -from rpython.rtyper.llannotation import SomePtr +from rpython.rtyper.llannotation import SomePtr, lltype_to_annotation class MyBase: @@ -1702,7 +1703,6 @@ from rpython.translator import translator from rpython.annotator import annrpython a = annrpython.RPythonAnnotator() - from rpython.annotator import model as annmodel s_f = a.bookkeeper.immutablevalue(f) a.bookkeeper.emulate_pbc_call('f', s_f, [annmodel.SomeInteger(), annmodel.SomeInteger()]) @@ -1721,7 +1721,7 @@ r_f = rt.getrepr(s_f) s_R = a.bookkeeper.immutablevalue(r_f) - s_ll_f = annmodel.lltype_to_annotation(r_f.lowleveltype) + s_ll_f = lltype_to_annotation(r_f.lowleveltype) ll_h_graph = annlowlevel.annotate_lowlevel_helper(a, ll_h, [s_R, s_ll_f, annmodel.SomeInteger()]) assert a.binding(ll_h_graph.getreturnvar()).knowntype == int rt.specialize_more_blocks() @@ -1740,7 +1740,6 @@ return a - b from rpython.annotator import annrpython a = annrpython.RPythonAnnotator() - from rpython.annotator import model as annmodel def g(i): if i: @@ -1769,7 +1768,7 @@ r_f = rt.getrepr(s_f) s_R = a.bookkeeper.immutablevalue(r_f) - s_ll_f = annmodel.lltype_to_annotation(r_f.lowleveltype) + s_ll_f = lltype_to_annotation(r_f.lowleveltype) ll_h_graph= annlowlevel.annotate_lowlevel_helper(a, ll_h, [s_R, s_ll_f, annmodel.SomeInteger()]) assert a.binding(ll_h_graph.getreturnvar()).knowntype == int rt.specialize_more_blocks() @@ -1792,7 +1791,6 @@ from rpython.annotator import annrpython a = annrpython.RPythonAnnotator() - from rpython.annotator import model as annmodel def g(): a = A(None) @@ -1815,7 +1813,7 @@ r_f = rt.getrepr(s_f) s_R = a.bookkeeper.immutablevalue(r_f) - s_ll_f = annmodel.lltype_to_annotation(r_f.lowleveltype) + s_ll_f = lltype_to_annotation(r_f.lowleveltype) A_repr = rclass.getinstancerepr(rt, a.bookkeeper.getdesc(A). getuniqueclassdef()) ll_h_graph = annlowlevel.annotate_lowlevel_helper( @@ -1842,7 +1840,6 @@ from rpython.annotator import annrpython a = annrpython.RPythonAnnotator() - from rpython.annotator import model as annmodel def g(): a = A(None) @@ -1872,7 +1869,7 @@ r_f = rt.getrepr(s_f) s_R = a.bookkeeper.immutablevalue(r_f) - s_ll_f = annmodel.lltype_to_annotation(r_f.lowleveltype) + s_ll_f = lltype_to_annotation(r_f.lowleveltype) A_repr = rclass.getinstancerepr(rt, a.bookkeeper.getdesc(A). getuniqueclassdef()) ll_h_graph = annlowlevel.annotate_lowlevel_helper( @@ -1903,7 +1900,6 @@ from rpython.annotator import annrpython a = annrpython.RPythonAnnotator() - from rpython.annotator import model as annmodel i = Impl() @@ -1928,7 +1924,7 @@ r_f = rt.getrepr(s_f) s_R = a.bookkeeper.immutablevalue(r_f) - s_ll_f = annmodel.lltype_to_annotation(r_f.lowleveltype) + s_ll_f = lltype_to_annotation(r_f.lowleveltype) A_repr = rclass.getinstancerepr(rt, a.bookkeeper.getdesc(A). getuniqueclassdef()) diff --git a/rpython/translator/exceptiontransform.py b/rpython/translator/exceptiontransform.py --- a/rpython/translator/exceptiontransform.py +++ b/rpython/translator/exceptiontransform.py @@ -12,7 +12,7 @@ from rpython.rlib.rarithmetic import r_uint, r_longlong, r_ulonglong from rpython.rlib.rarithmetic import r_singlefloat, r_longfloat from rpython.rlib.debug import ll_assert -from rpython.annotator import model as annmodel +from rpython.rtyper.llannotation import lltype_to_annotation from rpython.rtyper.annlowlevel import MixLevelHelperAnnotator from rpython.tool.sourcetools import func_with_new_name @@ -164,7 +164,7 @@ return fn def build_func(self, name, fn, inputtypes, rettype, **kwds): - l2a = annmodel.lltype_to_annotation + l2a = lltype_to_annotation graph = self.mixlevelannotator.getgraph(fn, map(l2a, inputtypes), l2a(rettype)) return self.constant_func(name, inputtypes, rettype, graph, exception_policy="exc_helper", **kwds) diff --git a/rpython/translator/sandbox/rsandbox.py b/rpython/translator/sandbox/rsandbox.py --- a/rpython/translator/sandbox/rsandbox.py +++ b/rpython/translator/sandbox/rsandbox.py @@ -14,7 +14,7 @@ # from rpython.rtyper.lltypesystem import lltype, rffi -from rpython.annotator import model as annmodel +from rpython.rtyper.llannotation import lltype_to_annotation from rpython.tool.sourcetools import func_with_new_name from rpython.rtyper.annlowlevel import MixLevelHelperAnnotator from rpython.tool.ansi_print import ansi_log @@ -126,8 +126,8 @@ # pure external function - fall back to the annotations # corresponding to the ll types FUNCTYPE = lltype.typeOf(fnobj) - args_s = [annmodel.lltype_to_annotation(ARG) for ARG in FUNCTYPE.ARGS] - s_result = annmodel.lltype_to_annotation(FUNCTYPE.RESULT) + args_s = [lltype_to_annotation(ARG) for ARG in FUNCTYPE.ARGS] + s_result = lltype_to_annotation(FUNCTYPE.RESULT) try: if force_stub: # old case - don't try to support suggested_primitive From noreply at buildbot.pypy.org Wed Jan 29 10:25:10 2014 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 29 Jan 2014 10:25:10 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: revive clone if mutable, unfortunately needed Message-ID: <20140129092510.DB1771C00B3@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68987:3190114e9b0a Date: 2014-01-29 10:24 +0100 http://bitbucket.org/pypy/pypy/changeset/3190114e9b0a/ Log: revive clone if mutable, unfortunately needed diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -347,8 +347,6 @@ assert isinstance(frame, LLFrame) assert frame.forced_deadframe is None values = {} - import pdb - pdb.set_trace() for k, v in frame.lltrace.numbering.iteritems(): try: values[v] = frame.env[k] diff --git a/rpython/jit/codewriter/jitcode.py b/rpython/jit/codewriter/jitcode.py --- a/rpython/jit/codewriter/jitcode.py +++ b/rpython/jit/codewriter/jitcode.py @@ -102,8 +102,6 @@ def __repr__(self): return '' % self.name - def _clone_if_mutable(self): - raise NotImplementedError class MissingLiveness(Exception): pass diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -483,8 +483,8 @@ _counters = None # they get stored in _counters then. # the following attributes are used by the resume - rd_loop = None # keeping the loop alive rd_bytecode_position = -1 # position in the generated bytecode + rd_resume_bytecode = None CNT_BASE_MASK = 0x0FFFFFFF # the base counter value CNT_BUSY_FLAG = 0x10000000 # if set, busy tracing from the guard @@ -494,7 +494,7 @@ CNT_REF = 0x40000000 CNT_FLOAT = 0x60000000 - def store_final_boxes(self, guard_op, boxes): + def set_opnum(self, guard_op): self.guard_opnum = guard_op.getopnum() def make_a_counter_per_value(self, guard_value_op): @@ -618,11 +618,27 @@ new_loop.operations, new_loop.original_jitcell_token) + def copy_all_attributes_into(self, res): + # XXX a bit ugly to have to list them all here + res.rd_resume_bytecode = self.rd_resume_bytecode + res.rd_bytecode_position = self.rd_bytecode_position + + def _clone_if_mutable(self): + res = ResumeGuardDescr() + self.copy_all_attributes_into(res) + return res + class ResumeGuardNotInvalidated(ResumeGuardDescr): - pass + def _clone_if_mutable(self): + res = ResumeGuardNotInvalidated() + self.copy_all_attributes_into(res) + return res class ResumeAtPositionDescr(ResumeGuardDescr): - pass + def _clone_if_mutable(self): + res = ResumeAtPositionDescr() + self.copy_all_attributes_into(res) + return res class AllVirtuals: llopaque = True @@ -705,6 +721,12 @@ hidden_all_virtuals = obj.hide(metainterp_sd.cpu) metainterp_sd.cpu.set_savedata_ref(deadframe, hidden_all_virtuals) + def _clone_if_mutable(self): + res = ResumeGuardForcedDescr(self.metainterp_sd, + self.jitdriver_sd) + self.copy_all_attributes_into(res) + return res + class AbstractResumeGuardCounters(object): # Completely custom algorithm for now: keep 5 pairs (value, counter), diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -133,6 +133,14 @@ def repr_of_descr(self): return '%r' % (self,) + def _clone_if_mutable(self): + return self + def clone_if_mutable(self): + clone = self._clone_if_mutable() + if not we_are_translated(): + assert clone.__class__ is self.__class__ + return clone + def hide(self, cpu): descr_ptr = cpu.ts.cast_instance_to_base_ref(self) return cpu.ts.cast_to_ref(descr_ptr) diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -557,6 +557,8 @@ self.replace_op(self.replaces_guard[op], op) del self.replaces_guard[op] return + else: + op = self.fixup_guard(op, pendingfields) self.resumebuilder.guard_seen(op, pendingfields) elif op.can_raise(): self.exception_might_have_happened = True @@ -577,19 +579,11 @@ else: assert False - def store_final_boxes_in_guard(self, op, pendingfields): - xxx + def fixup_guard(self, op, pendingfields): assert pendingfields is not None descr = op.getdescr() assert isinstance(descr, compile.ResumeGuardDescr) - modifier = resume.ResumeDataVirtualAdder(descr, self.resumedata_memo) - try: - newboxes = modifier.finish(self, pendingfields) - if len(newboxes) > self.metainterp_sd.options.failargs_limit: - raise resume.TagOverflow - except resume.TagOverflow: - raise compile.giveup() - descr.store_final_boxes(op, newboxes) + descr.set_opnum(op) # if op.getopnum() == rop.GUARD_VALUE: if self.getvalue(op.getarg(0)) in self.bool_boxes: @@ -605,7 +599,6 @@ else: raise AssertionError("uh?") newop = ResOperation(opnum, [op.getarg(0)], op.result, descr) - newop.setfailargs(op.getfailargs()) return newop else: # a real GUARD_VALUE. Make it use one counter per value. diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -167,7 +167,7 @@ jump_args = [self.getvalue(a).get_key_box() for a in original_jump_args] assert self.optimizer.loop.resume_at_jump_descr - resume_at_jump_descr = self.optimizer.loop.resume_at_jump_descr + resume_at_jump_descr = self.optimizer.loop.resume_at_jump_descr.clone_if_mutable() assert isinstance(resume_at_jump_descr, ResumeGuardDescr) modifier = VirtualStateAdder(self.optimizer) @@ -416,7 +416,7 @@ if op.is_guard(): op = op.clone() op.setfailargs(None) - descr = target_token.resume_at_jump_descr + descr = target_token.resume_at_jump_descr.clone_if_mutable() op.setdescr(descr) short[i] = op @@ -439,7 +439,7 @@ if op.result and op.result in self.short_boxes.assumed_classes: target_token.assumed_classes[newop.result] = self.short_boxes.assumed_classes[op.result] short[i] = newop - target_token.resume_at_jump_descr = target_token.resume_at_jump_descr + target_token.resume_at_jump_descr = target_token.resume_at_jump_descr.clone_if_mutable() # Forget the values to allow them to be freed for box in short[0].getarglist(): @@ -483,7 +483,7 @@ if not isinstance(a, Const) and a not in self.short_seen: self.add_op_to_short(self.short_boxes.producer(a), emit, guards_needed) if op.is_guard(): - descr = self.short_resume_at_jump_descr + descr = self.short_resume_at_jump_descr.clone_if_mutable() op.setdescr(descr) if guards_needed and self.short_boxes.has_producer(op.result): @@ -582,7 +582,7 @@ for guard in extra_guards: if guard.is_guard(): - descr = target.resume_at_jump_descr + descr = target.resume_at_jump_descr.clone_if_mutable() inliner.inline_descr_inplace(descr) guard.setdescr(descr) self.optimizer.send_extra_operation(guard) diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -80,6 +80,8 @@ def clone(self): args = self.getarglist() descr = self.getdescr() + if descr is not None: + descr = descr.clone_if_mutable() op = ResOperation(self.getopnum(), args[:], self.result, descr) if not we_are_translated(): op.name = self.name diff --git a/rpython/jit/metainterp/test/test_loop.py b/rpython/jit/metainterp/test/test_loop.py --- a/rpython/jit/metainterp/test/test_loop.py +++ b/rpython/jit/metainterp/test/test_loop.py @@ -189,10 +189,6 @@ found = 0 for op in get_stats().loops[0]._all_operations(): if op.getopname() == 'guard_true': - liveboxes = op.getfailargs() - assert len(liveboxes) == 2 # x, y (in some order) - assert isinstance(liveboxes[0], history.BoxInt) - assert isinstance(liveboxes[1], history.BoxInt) found += 1 if 'unroll' in self.enable_opts: assert found == 2 diff --git a/rpython/jit/resume/frontend.py b/rpython/jit/resume/frontend.py --- a/rpython/jit/resume/frontend.py +++ b/rpython/jit/resume/frontend.py @@ -154,9 +154,18 @@ None, None) return virtual_box + def getkind(self, fielddescr): + if fielddescr.is_pointer_field(): + return REF + elif fielddescr.is_float_field(): + return FLOAT + else: + assert fielddescr.is_field_signed() + return INT + def setfield_gc(self, box, encoded_field_pos, fielddescr): field_box = self.get_box_value(-1, -1, encoded_field_pos, - fielddescr.kind) + self.getkind(fielddescr)) self.metainterp.execute_and_record(rop.SETFIELD_GC, fielddescr, box, field_box) @@ -173,6 +182,7 @@ miframe.registers_r[i] = box def store_float_box(self, frame_pos, pos, miframe, i, jitframe_pos): + xxx box = self.get_box_value(jitframe_pos) if box is None: return diff --git a/rpython/jit/resume/reader.py b/rpython/jit/resume/reader.py --- a/rpython/jit/resume/reader.py +++ b/rpython/jit/resume/reader.py @@ -32,7 +32,7 @@ self.fields = {} def allocate_box(self, metainterp): - return metainterp.execute_and_record(rop.NEW_WITH_VTABLE, + return metainterp.execute_and_record(rop.NEW_WITH_VTABLE, None, ConstInt(self.const_class)) def allocate_direct(self, cpu): @@ -176,6 +176,9 @@ def resume_new(self, v_pos, descr): self.l.append("%d = resume_new %d" % (v_pos, descr.global_descr_index)) + def resume_clear(self, frame_pos, pos_in_frame): + self.l.append("resume_clear %d %d" % (frame_pos, pos_in_frame)) + def resume_new_with_vtable(self, v_pos, c_const_class): self.l.append("%d = resume_new_with_vtable %d" % (v_pos, c_const_class.getint())) From noreply at buildbot.pypy.org Wed Jan 29 10:36:02 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Wed, 29 Jan 2014 10:36:02 +0100 (CET) Subject: [pypy-commit] stmgc c7: implement inevitable transactions Message-ID: <20140129093602.72A211C00B3@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r687:58ad71386b61 Date: 2014-01-29 10:36 +0100 http://bitbucket.org/pypy/stmgc/changeset/58ad71386b61/ Log: implement inevitable transactions diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -22,7 +22,7 @@ char *object_pages; static int num_threads_started; uint8_t write_locks[READMARKER_END - READMARKER_START]; - +volatile uint8_t inevitable_lock; struct _thread_local1_s* _stm_dbg_get_tl(int thread) { @@ -125,6 +125,7 @@ uintptr_t lock_idx = (((uintptr_t)obj) >> 4) - READMARKER_START; uint8_t lock_num = _STM_TL->thread_num + 1; uint8_t prev_owner; + retry: do { prev_owner = __sync_val_compare_and_swap(&write_locks[lock_idx], 0, lock_num); @@ -132,7 +133,16 @@ /* if there was no lock-holder or we already have the lock */ if ((!prev_owner) || (prev_owner == lock_num)) break; - + + if (_STM_TL->active == 2) { + /* we must succeed! */ + _stm_dbg_get_tl(prev_owner - 1)->need_abort = 1; + _stm_start_no_collect_safe_point(); + /* XXX: not good */ + usleep(1); + _stm_stop_no_collect_safe_point(); + goto retry; + } /* XXXXXX */ //_stm_start_semi_safe_point(); //usleep(1); @@ -161,6 +171,8 @@ { _stm_reset_shared_lock(); _stm_reset_pages(); + + inevitable_lock = 0; /* Check that some values are acceptable */ assert(4096 <= ((uintptr_t)_STM_TL)); @@ -259,12 +271,12 @@ _STM_TL->modified_objects = stm_list_create(); _STM_TL->uncommitted_objects = stm_list_create(); - assert(!_STM_TL->running_transaction); + assert(!_STM_TL->active); } bool _stm_is_in_transaction(void) { - return _STM_TL->running_transaction; + return _STM_TL->active; } void _stm_teardown_thread(void) @@ -288,6 +300,7 @@ void _stm_teardown(void) { + assert(inevitable_lock == 0); munmap(object_pages, TOTAL_MEMORY); _stm_reset_pages(); memset(write_locks, 0, sizeof(write_locks)); @@ -330,9 +343,46 @@ } +void stm_become_inevitable(char* msg) +{ + if (_STM_TL->active == 2) + return; + assert(_STM_TL->active == 1); + + uint8_t our_lock = _STM_TL->thread_num + 1; + do { + _stm_start_safe_point(); + + stm_start_exclusive_lock(); + if (_STM_TL->need_abort) { + stm_stop_exclusive_lock(); + stm_start_shared_lock(); + stm_abort_transaction(); + } + + if (!inevitable_lock) + break; + + stm_stop_exclusive_lock(); + _stm_stop_safe_point(); + } while (1); + + inevitable_lock = our_lock; + _STM_TL->active = 2; + stm_stop_exclusive_lock(); + + _stm_stop_safe_point(); +} + +void stm_start_inevitable_transaction() +{ + stm_start_transaction(NULL); + stm_become_inevitable("stm_start_inevitable_transaction"); +} + void stm_start_transaction(jmpbufptr_t *jmpbufptr) { - assert(!_STM_TL->running_transaction); + assert(!_STM_TL->active); stm_start_shared_lock(); @@ -346,14 +396,14 @@ nursery_on_start(); _STM_TL->jmpbufptr = jmpbufptr; - _STM_TL->running_transaction = 1; + _STM_TL->active = 1; _STM_TL->need_abort = 0; } void stm_stop_transaction(void) { - assert(_STM_TL->running_transaction); + assert(_STM_TL->active); /* do the minor_collection here and not in nursery_on_commit, since here we can still run concurrently with other threads @@ -361,8 +411,32 @@ _stm_minor_collect(); /* Some operations require us to have the EXCLUSIVE lock */ - stm_stop_shared_lock(); - stm_start_exclusive_lock(); + if (_STM_TL->active == 1) { + while (1) { + _stm_start_safe_point(); + usleep(1); /* XXX: better algorithm that allows + for waiting on a mutex */ + stm_start_exclusive_lock(); + if (_STM_TL->need_abort) { + stm_stop_exclusive_lock(); + stm_start_shared_lock(); + stm_abort_transaction(); + } + + if (!inevitable_lock) + break; + stm_stop_exclusive_lock(); + _stm_stop_safe_point(); + } + /* we have the exclusive lock */ + } else { + /* inevitable! no other transaction could have committed + or aborted us */ + stm_stop_shared_lock(); + stm_start_exclusive_lock(); + assert(!_STM_TL->need_abort); + inevitable_lock = 0; + } _STM_TL->jmpbufptr = NULL; /* cannot abort any more */ @@ -374,7 +448,7 @@ stm_list_clear(_STM_TL->modified_objects); - _STM_TL->running_transaction = 0; + _STM_TL->active = 0; stm_stop_exclusive_lock(); fprintf(stderr, "%c", 'C'+_STM_TL->thread_num*32); } @@ -421,13 +495,13 @@ void stm_abort_transaction(void) { /* here we hold the shared lock as a reader or writer */ - assert(_STM_TL->running_transaction); + assert(_STM_TL->active == 1); nursery_on_abort(); assert(_STM_TL->jmpbufptr != NULL); assert(_STM_TL->jmpbufptr != (jmpbufptr_t *)-1); /* for tests only */ - _STM_TL->running_transaction = 0; + _STM_TL->active = 0; stm_stop_shared_lock(); fprintf(stderr, "%c", 'A'+_STM_TL->thread_num*32); diff --git a/c7/core.h b/c7/core.h --- a/c7/core.h +++ b/c7/core.h @@ -44,8 +44,6 @@ - - #define TLPREFIX __attribute__((address_space(256))) typedef TLPREFIX struct _thread_local1_s _thread_local1_t; @@ -94,7 +92,7 @@ uint8_t transaction_read_version; int thread_num; - bool running_transaction; + uint8_t active; /* 1 normal, 2 inevitable, 0 no trans. */ bool need_abort; char *thread_base; struct stm_list_s *modified_objects; @@ -226,8 +224,8 @@ void stm_abort_transaction(void); void _stm_minor_collect(); -#define stm_become_inevitable(msg) /* XXX implement me! */ -#define stm_start_inevitable_transaction() stm_start_transaction(NULL) /* XXX implement me! */ +void stm_become_inevitable(char* msg); +void stm_start_inevitable_transaction(); struct _thread_local1_s* _stm_dbg_get_tl(int thread); /* -1 is current thread */ diff --git a/c7/nursery.c b/c7/nursery.c --- a/c7/nursery.c +++ b/c7/nursery.c @@ -19,7 +19,7 @@ void stm_major_collection(void) { - assert(_STM_TL->running_transaction); + assert(_STM_TL->active); abort(); } @@ -190,7 +190,7 @@ { _stm_start_safe_point(); _stm_stop_safe_point(); - assert(_STM_TL->running_transaction); + assert(_STM_TL->active); assert(size % 8 == 0); assert(16 <= size && size < NB_NURSERY_PAGES * 4096);//XXX diff --git a/c7/stmsync.h b/c7/stmsync.h --- a/c7/stmsync.h +++ b/c7/stmsync.h @@ -6,6 +6,9 @@ void stm_start_exclusive_lock(void); void _stm_start_safe_point(void); void _stm_stop_safe_point(void); -void _stm_reset_shared_lock(); +void _stm_reset_shared_lock(void); +/* XXX: major collections must not be possible here: */ +#define _stm_start_no_collect_safe_point(void) _stm_start_safe_point() +#define _stm_stop_no_collect_safe_point(void) _stm_stop_safe_point() diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -108,6 +108,10 @@ size_t _stm_data_size(struct object_s *data); void _stm_chunk_pages(struct object_s *data, uintptr_t *start, uintptr_t *num); +void stm_become_inevitable(char* msg); +void stm_start_inevitable_transaction(); +bool _checked_stm_become_inevitable(); + """) lib = ffi.verify(''' @@ -137,6 +141,19 @@ } +bool _checked_stm_become_inevitable() { + jmpbufptr_t here; + if (__builtin_setjmp(here) == 0) { // returned directly + assert(_STM_TL->jmpbufptr == (jmpbufptr_t*)-1); + _STM_TL->jmpbufptr = &here; + stm_become_inevitable("TEST"); + _STM_TL->jmpbufptr = (jmpbufptr_t*)-1; + return 0; + } + _STM_TL->jmpbufptr = (jmpbufptr_t*)-1; + return 1; +} + bool _checked_stm_write(object_t *object) { jmpbufptr_t here; if (__builtin_setjmp(here) == 0) { // returned directly @@ -349,6 +366,10 @@ if lib._stm_check_stop_safe_point(): raise Conflict() +def stm_become_inevitable(): + if lib._checked_stm_become_inevitable(): + raise Conflict() + def stm_minor_collect(): lib._stm_minor_collect() diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -473,6 +473,25 @@ stm_stop_transaction() + def test_inevitable_transaction(self): + py.test.skip("stm_write and stm_stop_transaction" + " of an inevitable tr. is not testable" + " since they wait for the other thread" + " to synchronize and possibly abort") + + old = stm_allocate_old(16) + stm_start_transaction() + + self.switch(1) + stm_start_transaction() + stm_write(old) + + self.switch(0) + stm_become_inevitable() + stm_write(old) # t1 needs to abort, not us + stm_stop_transaction() + + py.test.raises(Conflict, self.switch, 1) # def test_resolve_write_write_no_conflict(self): # stm_start_transaction() diff --git a/duhton/demo/run_transactions.duh b/duhton/demo/run_transactions.duh new file mode 100644 --- /dev/null +++ b/duhton/demo/run_transactions.duh @@ -0,0 +1,25 @@ + +(setq c (container 0)) + +(defun g (n) + (setq i n) + (while (< 0 i) + (set c (+ (get c) 1)) + (setq i (- i 1)) + ) + ) + +(defun f (thread n) + (g n) + ) + +(transaction f (quote t1) 10000) +(transaction f (quote t2) 20000) +(transaction f (quote t2) 10002) +(run-transactions) +(transaction f (quote t2) 15) +(transaction f (quote t2) 15) +(run-transactions) +(print (quote result) (get c)) +(print (quote finished)) + diff --git a/duhton/demo/sort.duh b/duhton/demo/sort.duh --- a/duhton/demo/sort.duh +++ b/duhton/demo/sort.duh @@ -28,14 +28,13 @@ (defun random_list (n) - (if (> n 0) - (progn - (setq res (random_list (- n 1))) - (append res (% (xor128) 10)) - res - ) - (list ) + (setq i n) + (setq res (list)) + (while (> i 0) + (append res (% (xor128) 10)) + (setq i (- i 1)) ) + res ) @@ -62,33 +61,21 @@ ) -(defun append_to_correct_half (xs first second half_len) - (if (< 0 (len xs)) - (progn - (setq elem (pop xs 0)) - (if (< 0 half_len) - (append_to_correct_half xs - (append first elem) - second - (- half_len 1)) - - (append_to_correct_half xs - first - (append second elem) - (- half_len 1)) - ) - ) - ) - ) (defun split_list (xs) ;; empties xs and fills 2 new lists to be returned (setq half_len (/ (len xs) 2)) - (setq first (list)) (setq second (list)) - (append_to_correct_half xs first second half_len) + (while (< 0 (len xs)) + (if (< 0 half_len) + (append first (pop xs 0)) + (append second (pop xs 0)) + ) + (setq half_len (- half_len 1)) + ) + (list first second) ) @@ -110,19 +97,16 @@ ) -(defun copy_list_helper (xs res idx) - (if (< idx (len xs)) - (progn - (append res (get xs idx)) - (copy_list_helper xs res (+ idx 1)) - ) - ) - ) (defun copy_list (xs) (setq res (list)) - (copy_list_helper xs res 0) + (setq idx 0) + (while (< idx (len xs)) + (append res (get xs idx)) + (setq idx (+ idx 1)) + ) res ) + (defun print_list (xs) (print (quote len:) (len xs) (quote ->) xs) ) @@ -130,11 +114,11 @@ -(setq as (random_list 20)) -(setq bs (random_list 20)) -(print as) -(print bs) -(print (split_list as)) +;; (setq as (random_list 20)) +;; (setq bs (random_list 20)) +;; (print as) +;; (print bs) +;; (print (split_list as)) (setq cs (random_list 10000)) (print_list cs) diff --git a/duhton/duhton.c b/duhton/duhton.c --- a/duhton/duhton.c +++ b/duhton/duhton.c @@ -42,7 +42,7 @@ printf("))) "); fflush(stdout); } - stm_start_transaction(NULL); + stm_start_inevitable_transaction(); DuObject *code = Du_Compile(filename, interactive); _du_save1(code); stm_stop_transaction(); @@ -53,7 +53,7 @@ } /*Du_Print(code, 1); printf("\n");*/ - stm_start_transaction(NULL); + stm_start_inevitable_transaction(); DuObject *res = Du_Eval(code, Du_Globals); if (interactive) { Du_Print(res, 1); diff --git a/duhton/glob.c b/duhton/glob.c --- a/duhton/glob.c +++ b/duhton/glob.c @@ -771,7 +771,7 @@ all_threads_count = num_threads; all_threads = (pthread_t*)malloc(sizeof(pthread_t) * num_threads); - stm_start_transaction(NULL); + stm_start_inevitable_transaction(); DuFrame_SetBuiltinMacro(Du_Globals, "progn", Du_Progn); DuFrame_SetBuiltinMacro(Du_Globals, "setq", du_setq); DuFrame_SetBuiltinMacro(Du_Globals, "print", du_print); diff --git a/duhton/transaction.c b/duhton/transaction.c --- a/duhton/transaction.c +++ b/duhton/transaction.c @@ -61,7 +61,7 @@ if (stm_thread_local_obj == NULL) return; - stm_start_transaction(NULL); + stm_start_inevitable_transaction(); DuConsObject *root = du_pending_transactions; _du_write1(root); root->cdr = stm_thread_local_obj; From noreply at buildbot.pypy.org Wed Jan 29 12:36:30 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 29 Jan 2014 12:36:30 +0100 (CET) Subject: [pypy-commit] pypy precompiled-headers: skip for abstract class Message-ID: <20140129113631.02E411D244C@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: precompiled-headers Changeset: r68988:8eb1f8f89bc6 Date: 2014-01-29 09:40 +0200 http://bitbucket.org/pypy/pypy/changeset/8eb1f8f89bc6/ Log: skip for abstract class diff --git a/rpython/translator/platform/test/test_distutils.py b/rpython/translator/platform/test/test_distutils.py --- a/rpython/translator/platform/test/test_distutils.py +++ b/rpython/translator/platform/test/test_distutils.py @@ -11,3 +11,7 @@ def test_900_files(self): py.test.skip('Makefiles not suppoerted') + + def test_precompiled_headers(self): + py.test.skip('Makefiles not suppoerted') + From noreply at buildbot.pypy.org Wed Jan 29 12:36:32 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 29 Jan 2014 12:36:32 +0100 (CET) Subject: [pypy-commit] pypy precompiled-headers: add a 'clean' target and test non-precompiled header make Message-ID: <20140129113632.3B7381D244C@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: precompiled-headers Changeset: r68989:d3af251178e9 Date: 2014-01-29 13:24 +0200 http://bitbucket.org/pypy/pypy/changeset/d3af251178e9/ Log: add a 'clean' target and test non-precompiled header make diff --git a/rpython/translator/platform/posix.py b/rpython/translator/platform/posix.py --- a/rpython/translator/platform/posix.py +++ b/rpython/translator/platform/posix.py @@ -202,7 +202,8 @@ def write_list(prefix, lst): lst = lst or [''] for i, fn in enumerate(lst): - fn = fn.replace('\\', '\\\\') + if sys.platform != 'win32': + fn = fn.replace('\\', '\\\\') print >> f, prefix, fn, if i < len(lst)-1: print >> f, '\\' diff --git a/rpython/translator/platform/test/test_platform.py b/rpython/translator/platform/test/test_platform.py --- a/rpython/translator/platform/test/test_platform.py +++ b/rpython/translator/platform/test/test_platform.py @@ -87,13 +87,14 @@ self.check_res(res, '%d\n' %sum(range(900))) def test_precompiled_headers(self): + import time tmpdir = udir.join('precompiled_headers').ensure(dir=1) # Create an eci that should not use precompiled headers eci = ExternalCompilationInfo(include_dirs=[tmpdir]) main_c = tmpdir.join('main_no_pch.c') eci.separate_module_files = [main_c] ncfiles = 10 - nprecompiled_headers = 4 + nprecompiled_headers = 20 txt = '' for i in range(ncfiles): txt += "int func%03d();\n" % i @@ -109,7 +110,7 @@ for i in range(nprecompiled_headers): pch_name =tmpdir.join('pcheader%03d.h' % i) txt = '' - for j in range(1200): + for j in range(3000): txt += "int pcfunc%03d_%03d();\n" %(i, j) pch_name.write(txt) cfiles_precompiled_headers.append(pch_name) @@ -124,11 +125,30 @@ c_name.write(txt) cfiles.append(c_name) mk = self.platform.gen_makefile(cfiles, eci, path=udir, - cfile_precompilation=cfiles_precompiled_headers) + cfile_precompilation=cfiles_precompiled_headers) + if sys.platform == 'win32': + clean = ('clean', '', 'for %f in ( $(OBJECTS) $(TARGET) ) do @if exist %f del /f %f') + else: + clean = ('clean', '', 'rm -f $(OBJECTS) $(TARGET) ') + mk.rule(*clean) mk.write() + t0 = time.clock() self.platform.execute_makefile(mk) + t1 = time.clock() + t_precompiled = t1 - t0 res = self.platform.execute(mk.exe_name) self.check_res(res, '%d\n' %sum(range(ncfiles))) + self.platform.execute_makefile(mk, extra_opts=['clean']) + #Rewrite a non-precompiled header makefile + mk = self.platform.gen_makefile(cfiles, eci, path=udir) + mk.rule(*clean) + mk.write() + t0 = time.clock() + self.platform.execute_makefile(mk) + t1 = time.clock() + t_normal = t1 - t0 + print "precompiled haeder 'make' time %.2f, non-precompiled header time %.2f" %(t_precompiled, t_normal) + assert t_precompiled < t_normal * 0.5 def test_nice_errors(self): cfile = udir.join('test_nice_errors.c') From noreply at buildbot.pypy.org Wed Jan 29 12:36:33 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 29 Jan 2014 12:36:33 +0100 (CET) Subject: [pypy-commit] pypy precompiled-headers: add NMake specific Definition class that does not fix backslashes Message-ID: <20140129113633.712CE1D244C@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: precompiled-headers Changeset: r68990:48a44cf98eff Date: 2014-01-29 13:35 +0200 http://bitbucket.org/pypy/pypy/changeset/48a44cf98eff/ Log: add NMake specific Definition class that does not fix backslashes diff --git a/rpython/translator/platform/posix.py b/rpython/translator/platform/posix.py --- a/rpython/translator/platform/posix.py +++ b/rpython/translator/platform/posix.py @@ -202,8 +202,7 @@ def write_list(prefix, lst): lst = lst or [''] for i, fn in enumerate(lst): - if sys.platform != 'win32': - fn = fn.replace('\\', '\\\\') + fn = fn.replace('\\', '\\\\') print >> f, prefix, fn, if i < len(lst)-1: print >> f, '\\' diff --git a/rpython/translator/platform/windows.py b/rpython/translator/platform/windows.py --- a/rpython/translator/platform/windows.py +++ b/rpython/translator/platform/windows.py @@ -392,6 +392,25 @@ self._handle_error(returncode, stdout, stderr, path.join('make')) +class WinDefinition(posix.Definition): + def write(self, f): + def write_list(prefix, lst): + lst = lst or [''] + for i, fn in enumerate(lst): + print >> f, prefix, fn, + if i < len(lst)-1: + print >> f, '\\' + else: + print >> f + prefix = ' ' * len(prefix) + name, value = self.name, self.value + if isinstance(value, str): + f.write('%s = %s\n' % (name, value)) + else: + write_list('%s =' % (name,), value) + f.write('\n') + + class NMakefile(posix.GnuMakefile): def write(self, out=None): # nmake expands macros when it parses rules. @@ -410,6 +429,14 @@ if out is None: f.close() + def definition(self, name, value): + defs = self.defs + defn = WinDefinition(name, value) + if name in defs: + self.lines[defs[name]] = defn + else: + defs[name] = len(self.lines) + self.lines.append(defn) class MingwPlatform(posix.BasePosix): name = 'mingw32' From noreply at buildbot.pypy.org Wed Jan 29 13:51:39 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Wed, 29 Jan 2014 13:51:39 +0100 (CET) Subject: [pypy-commit] stmgc c7: remove bogus assert; change some limits for sort.duh to work; implement 'time' builtin in duhton Message-ID: <20140129125139.74D151D24ED@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r688:72d1d84af244 Date: 2014-01-29 13:51 +0100 http://bitbucket.org/pypy/stmgc/changeset/72d1d84af244/ Log: remove bogus assert; change some limits for sort.duh to work; implement 'time' builtin in duhton diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -22,7 +22,7 @@ char *object_pages; static int num_threads_started; uint8_t write_locks[READMARKER_END - READMARKER_START]; -volatile uint8_t inevitable_lock; +volatile uint8_t inevitable_lock __attribute__((aligned(64))); /* cache-line alignment */ struct _thread_local1_s* _stm_dbg_get_tl(int thread) { @@ -113,7 +113,8 @@ _stm_chunk_pages((struct object_s*)REAL_ADDRESS(get_thread_base(0), obj), &pagenum2, &pages); assert(pagenum == pagenum2); - assert(pages == (stmcb_size(real_address(obj)) + 4095) / 4096); + /* assert(pages == (stmcb_size(real_address(obj)) + 4095) / 4096); + not true if obj spans two pages, but is itself smaller than 1 */ } for (pagenum2 += pages - 1; pagenum2 >= pagenum; pagenum2--) @@ -348,6 +349,7 @@ if (_STM_TL->active == 2) return; assert(_STM_TL->active == 1); + fprintf(stderr, "%c", 'I'+_STM_TL->thread_num*32); uint8_t our_lock = _STM_TL->thread_num + 1; do { diff --git a/c7/core.h b/c7/core.h --- a/c7/core.h +++ b/c7/core.h @@ -6,11 +6,11 @@ #include #include -#define NB_PAGES (256*256) // 256MB +#define NB_PAGES (6*256*256) // 6*256MB #define NB_THREADS 2 #define MAP_PAGES_FLAGS (MAP_SHARED | MAP_ANONYMOUS | MAP_NORESERVE) #define LARGE_OBJECT_WORDS 36 -#define NB_NURSERY_PAGES 1024 +#define NB_NURSERY_PAGES 2048 // 8MB #define LENGTH_SHADOW_STACK 163840 @@ -21,7 +21,7 @@ #define READMARKER_START ((FIRST_OBJECT_PAGE * 4096UL) >> 4) #define FIRST_READMARKER_PAGE (READMARKER_START / 4096UL) #define FIRST_AFTER_NURSERY_PAGE (FIRST_OBJECT_PAGE + NB_NURSERY_PAGES) -#define HEAP_PAGES (((NB_PAGES - FIRST_AFTER_NURSERY_PAGE) * 2) / 3) +#define HEAP_PAGES (((NB_PAGES - FIRST_AFTER_NURSERY_PAGE) * 3) / 4) diff --git a/duhton/demo/sort.duh b/duhton/demo/sort.duh --- a/duhton/demo/sort.duh +++ b/duhton/demo/sort.duh @@ -39,29 +39,39 @@ -(defun merge_lists (as bs res) - ;; empties the two lists and merges the result to res - (setq len_as (len as)) - (setq len_bs (len bs)) - (if (< 0 len_as) - (if (< 0 len_bs) - (if (> (get as 0) (get bs 0)) - (append res (pop bs 0)) - (append res (pop as 0)) - ) - (append res (pop as 0)) - ) - (if (< 0 len_bs) - (append res (pop bs 0)) +(defun merge_lists (as bs) + ;; merges the two lists and returns a new one + (setq res (list)) + (setq idxa 0) + (setq idxb 0) + (while (&& (< idxa (len as)) + (< idxb (len bs))) + (if (> (get as idxa) (get bs idxb)) + (progn + (append res (get bs idxb)) + (setq idxb (+ idxb 1)) + ) + (append res (get as idxa)) + (setq idxa (+ idxa 1)) ) ) - (if (|| (< 0 len_as) (< 0 len_bs)) - (merge_lists as bs res) - ) + + (if (< idxa (len as)) + (progn + (setq xs as) + (setq idxx idxa) + ) + (setq xs bs) + (setq idxx idxb)) + + (while (< idxx (len xs)) + (append res (get xs idxx)) + (setq idxx (+ idxx 1))) + + res ) - (defun split_list (xs) ;; empties xs and fills 2 new lists to be returned (setq half_len (/ (len xs) 2)) @@ -80,18 +90,47 @@ ) + (defun merge_sort (xs) (if (<= (len xs) 1) ; 1 elem xs (progn ; many elems (setq lists (split_list xs)) + (setq left (merge_sort (get lists 0))) (setq right (merge_sort (get lists 1))) ;; (print left right) - (setq merged (list)) - (merge_lists left right merged) - ;; (print (quote >) merged) - merged + (merge_lists left right) + ) + ) + ) + +(defun merge_sort_transaction (xs res-cont) + (set res-cont (merge_sort xs)) + ) + +(defun merge_sort_parallel (xs) + (if (<= (len xs) 1) ; 1 elem + xs + (progn ; many elems + (setq lists (split_list xs)) + (setq left-c (container None)) + (setq right-c (container None)) + + (transaction merge_sort_transaction + (get lists 0) left-c) + (transaction merge_sort_transaction + (get lists 1) right-c) + (print (quote start-transactions)) + (run-transactions) + (print (quote finished-transactions)) + + (setq left (get left-c)) + (setq right (get right-c)) + (assert (<= (len left) (+ (len right) 2))) + (assert (<= (len right) (+ (len left) 2))) + ;; (print left right) + (merge_lists left right) ) ) ) @@ -111,7 +150,16 @@ (print (quote len:) (len xs) (quote ->) xs) ) - +(defun is_sorted (xs) + (setq idx 0) + (while (< idx (- (len xs) 1)) + (assert (<= + (get xs idx) + (get xs (+ idx 1)))) + (setq idx (+ idx 1)) + ) + (quote true) + ) ;; (setq as (random_list 20)) @@ -120,10 +168,22 @@ ;; (print bs) ;; (print (split_list as)) -(setq cs (random_list 10000)) -(print_list cs) -(print_list (merge_sort (copy_list cs))) +(setq cs (random_list 50000)) +;; (print_list cs) +;; (setq res (container None)) +;; (transaction merge_sort_transaction cs res) +;; (run-transactions) +;; (print (is_sorted (get res))) +(setq current (time)) +(print (quote before-sorting:) current) +(setq sorted (merge_sort_parallel cs)) +(print (quote after-sorting:) (time)) +(print (quote difference:) (- (time) current)) +(print (quote sorted:) (is_sorted sorted)) + + + diff --git a/duhton/glob.c b/duhton/glob.c --- a/duhton/glob.c +++ b/duhton/glob.c @@ -1,5 +1,6 @@ #include "duhton.h" #include +#include pthread_t *all_threads; int all_threads_count; @@ -709,6 +710,18 @@ return Du_None; } +DuObject *du_time(DuObject *cons, DuObject *locals) +{ + struct timeval current; + long mtime; + + gettimeofday(¤t, NULL); + + mtime = ((current.tv_sec) * 1000 + current.tv_usec/1000.0) + 0.5; + return DuInt_FromInt(mtime & 0x7fffffff); /* make it always positive 32bit */ +} + + DuObject *du_defined(DuObject *cons, DuObject *locals) { /* _du_read1(cons); IMMUTABLE */ @@ -810,6 +823,7 @@ DuFrame_SetBuiltinMacro(Du_Globals, "transaction", du_transaction); DuFrame_SetBuiltinMacro(Du_Globals, "run-transactions", du_run_transactions); DuFrame_SetBuiltinMacro(Du_Globals, "sleepms", du_sleepms); + DuFrame_SetBuiltinMacro(Du_Globals, "time", du_time); DuFrame_SetBuiltinMacro(Du_Globals, "defined?", du_defined); DuFrame_SetBuiltinMacro(Du_Globals, "pair?", du_pair); DuFrame_SetBuiltinMacro(Du_Globals, "assert", du_assert); diff --git a/duhton/transaction.c b/duhton/transaction.c --- a/duhton/transaction.c +++ b/duhton/transaction.c @@ -85,7 +85,10 @@ while (__builtin_setjmp(here) == 1) { } restart: - stm_start_transaction(&here); + // stm_start_transaction(&here); + /* this code is critical enough so that we want it to + be serialized perfectly using inevitable transactions */ + stm_start_inevitable_transaction(); root = du_pending_transactions; /* _du_read1(root); IMMUTABLE */ From noreply at buildbot.pypy.org Wed Jan 29 15:07:26 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Wed, 29 Jan 2014 15:07:26 +0100 (CET) Subject: [pypy-commit] stmgc c7: try to implement more general safe-points. not sure if successful Message-ID: <20140129140726.7464D1D2476@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r689:4d91804e80ed Date: 2014-01-29 15:07 +0100 http://bitbucket.org/pypy/stmgc/changeset/4d91804e80ed/ Log: try to implement more general safe-points. not sure if successful diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -138,10 +138,10 @@ if (_STM_TL->active == 2) { /* we must succeed! */ _stm_dbg_get_tl(prev_owner - 1)->need_abort = 1; - _stm_start_no_collect_safe_point(); - /* XXX: not good */ + _stm_start_safe_point(0); + /* XXX: not good, maybe should be signalled by other thread */ usleep(1); - _stm_stop_no_collect_safe_point(); + _stm_stop_safe_point(0); goto retry; } /* XXXXXX */ @@ -353,27 +353,21 @@ uint8_t our_lock = _STM_TL->thread_num + 1; do { - _stm_start_safe_point(); - - stm_start_exclusive_lock(); - if (_STM_TL->need_abort) { - stm_stop_exclusive_lock(); - stm_start_shared_lock(); - stm_abort_transaction(); - } + _stm_start_safe_point(LOCK_COLLECT); + _stm_stop_safe_point(LOCK_COLLECT|LOCK_EXCLUSIVE); if (!inevitable_lock) break; - stm_stop_exclusive_lock(); - _stm_stop_safe_point(); + _stm_start_safe_point(LOCK_EXCLUSIVE|LOCK_COLLECT); + _stm_stop_safe_point(LOCK_COLLECT); } while (1); inevitable_lock = our_lock; _STM_TL->active = 2; - stm_stop_exclusive_lock(); - _stm_stop_safe_point(); + _stm_start_safe_point(LOCK_EXCLUSIVE|LOCK_COLLECT); + _stm_stop_safe_point(LOCK_COLLECT); } void stm_start_inevitable_transaction() @@ -385,8 +379,8 @@ void stm_start_transaction(jmpbufptr_t *jmpbufptr) { assert(!_STM_TL->active); - - stm_start_shared_lock(); + + _stm_stop_safe_point(LOCK_COLLECT); uint8_t old_rv = _STM_TL->transaction_read_version; _STM_TL->transaction_read_version = old_rv + 1; @@ -415,28 +409,23 @@ /* Some operations require us to have the EXCLUSIVE lock */ if (_STM_TL->active == 1) { while (1) { - _stm_start_safe_point(); + _stm_start_safe_point(LOCK_COLLECT); usleep(1); /* XXX: better algorithm that allows for waiting on a mutex */ - stm_start_exclusive_lock(); - if (_STM_TL->need_abort) { - stm_stop_exclusive_lock(); - stm_start_shared_lock(); - stm_abort_transaction(); - } + _stm_stop_safe_point(LOCK_COLLECT|LOCK_EXCLUSIVE); if (!inevitable_lock) break; - stm_stop_exclusive_lock(); - _stm_stop_safe_point(); + + _stm_start_safe_point(LOCK_COLLECT|LOCK_EXCLUSIVE); + _stm_stop_safe_point(LOCK_COLLECT); } /* we have the exclusive lock */ } else { /* inevitable! no other transaction could have committed or aborted us */ - stm_stop_shared_lock(); - stm_start_exclusive_lock(); - assert(!_STM_TL->need_abort); + _stm_start_safe_point(LOCK_COLLECT); + _stm_stop_safe_point(LOCK_EXCLUSIVE|LOCK_COLLECT); inevitable_lock = 0; } @@ -451,7 +440,7 @@ _STM_TL->active = 0; - stm_stop_exclusive_lock(); + _stm_start_safe_point(LOCK_EXCLUSIVE|LOCK_COLLECT); fprintf(stderr, "%c", 'C'+_STM_TL->thread_num*32); } @@ -504,13 +493,15 @@ assert(_STM_TL->jmpbufptr != NULL); assert(_STM_TL->jmpbufptr != (jmpbufptr_t *)-1); /* for tests only */ _STM_TL->active = 0; - stm_stop_shared_lock(); + /* _STM_TL->need_abort = 0; */ + + _stm_start_safe_point(LOCK_COLLECT); + fprintf(stderr, "%c", 'A'+_STM_TL->thread_num*32); /* reset all the modified objects (incl. re-adding GCFLAG_WRITE_BARRIER) */ reset_modified_from_other_threads(); stm_list_clear(_STM_TL->modified_objects); - __builtin_longjmp(*_STM_TL->jmpbufptr, 1); } diff --git a/c7/nursery.c b/c7/nursery.c --- a/c7/nursery.c +++ b/c7/nursery.c @@ -176,9 +176,14 @@ localchar_t *collect_and_reserve(size_t size) { - _stm_start_safe_point(); + _stm_start_safe_point(0); /* don't release the COLLECT lock, + that needs to be done afterwards if + we want a major collection */ minor_collect(); - _stm_stop_safe_point(); + _stm_stop_safe_point(0); + + /* XXX: if we_want_major_collect: acquire EXCLUSIVE & COLLECT lock + and do it */ localchar_t *current = _STM_TL->nursery_current; _STM_TL->nursery_current = current + size; @@ -188,8 +193,10 @@ object_t *stm_allocate(size_t size) { - _stm_start_safe_point(); - _stm_stop_safe_point(); + _stm_start_safe_point(LOCK_COLLECT); + /* all collections may happen here */ + _stm_stop_safe_point(LOCK_COLLECT); + assert(_STM_TL->active); assert(size % 8 == 0); assert(16 <= size && size < NB_NURSERY_PAGES * 4096);//XXX diff --git a/c7/stmsync.c b/c7/stmsync.c --- a/c7/stmsync.c +++ b/c7/stmsync.c @@ -1,8 +1,11 @@ +#include +#include +#include + + #include "stmsync.h" #include "core.h" #include "reader_writer_lock.h" -#include -#include /* a multi-reader, single-writer lock: transactions normally take a reader @@ -10,6 +13,8 @@ we take a writer lock to "stop the world". */ rwticket rw_shared_lock; /* the "GIL" */ +rwticket rw_collection_lock; /* for major collections */ + void _stm_reset_shared_lock() { @@ -17,16 +22,45 @@ assert(!rwticket_wrunlock(&rw_shared_lock)); memset(&rw_shared_lock, 0, sizeof(rwticket)); + + assert(!rwticket_wrtrylock(&rw_collection_lock)); + assert(!rwticket_wrunlock(&rw_collection_lock)); + + memset(&rw_collection_lock, 0, sizeof(rwticket)); +} + +void stm_acquire_collection_lock() +{ + /* we must have the exclusive lock here and + not the colletion lock!! */ + while (1) { + if (!rwticket_wrtrylock(&rw_collection_lock)) + break; /* acquired! */ + + stm_stop_exclusive_lock(); + usleep(1); + stm_start_exclusive_lock(); + if (_STM_TL->need_abort) { + stm_stop_exclusive_lock(); + stm_start_shared_lock(); + stm_abort_transaction(); + } + } } void stm_start_shared_lock(void) { - rwticket_rdlock(&rw_shared_lock); + rwticket_rdlock(&rw_shared_lock); } -void stm_stop_shared_lock(void) +void stm_stop_shared_lock() { - rwticket_rdunlock(&rw_shared_lock); + rwticket_rdunlock(&rw_shared_lock); +} + +void stm_start_exclusive_lock(void) +{ + rwticket_wrlock(&rw_shared_lock); } void stm_stop_exclusive_lock(void) @@ -34,20 +68,52 @@ rwticket_wrunlock(&rw_shared_lock); } -void stm_start_exclusive_lock(void) +/* _stm_start_safe_point(LOCK_EXCLUSIVE|LOCK_COLLECT) + -> release the exclusive lock and also the collect-read-lock */ +void _stm_start_safe_point(uint8_t flags) { - rwticket_wrlock(&rw_shared_lock); + if (flags & LOCK_EXCLUSIVE) + stm_stop_exclusive_lock(); + else + stm_stop_shared_lock(); + + if (flags & LOCK_COLLECT) + rwticket_rdunlock(&rw_collection_lock); } -void _stm_start_safe_point(void) +/* + _stm_stop_safe_point(LOCK_COLLECT|LOCK_EXCLUSIVE); + -> reacquire the collect-read-lock and the exclusive lock + */ +void _stm_stop_safe_point(uint8_t flags) { - assert(!_STM_TL->need_abort); - stm_stop_shared_lock(); + if (flags & LOCK_EXCLUSIVE) + stm_start_exclusive_lock(); + else + stm_start_shared_lock(); + + if (!(flags & LOCK_COLLECT)) { /* if we released the collection lock */ + /* acquire read-collection. always succeeds because + if there was a write-collection holder we would + also not have gotten the shared_lock */ + rwticket_rdlock(&rw_collection_lock); + } + + if (_STM_TL->active && _STM_TL->need_abort) { + if (flags & LOCK_EXCLUSIVE) { + /* restore to shared-mode with the collection lock */ + stm_stop_exclusive_lock(); + stm_start_shared_lock(); + if (flags & LOCK_COLLECT) + rwticket_rdlock(&rw_collection_lock); + stm_abort_transaction(); + } else { + if (flags & LOCK_COLLECT) + rwticket_rdlock(&rw_collection_lock); + stm_abort_transaction(); + } + } } -void _stm_stop_safe_point(void) -{ - stm_start_shared_lock(); - if (_STM_TL->need_abort) - stm_abort_transaction(); -} + + diff --git a/c7/stmsync.h b/c7/stmsync.h --- a/c7/stmsync.h +++ b/c7/stmsync.h @@ -1,14 +1,16 @@ +#include void stm_start_shared_lock(void); void stm_stop_shared_lock(void); void stm_stop_exclusive_lock(void); void stm_start_exclusive_lock(void); -void _stm_start_safe_point(void); -void _stm_stop_safe_point(void); +void _stm_start_safe_point(uint8_t flags); +void _stm_stop_safe_point(uint8_t flags); void _stm_reset_shared_lock(void); -/* XXX: major collections must not be possible here: */ -#define _stm_start_no_collect_safe_point(void) _stm_start_safe_point() -#define _stm_stop_no_collect_safe_point(void) _stm_stop_safe_point() +enum { + LOCK_COLLECT = (1 << 0), + LOCK_EXCLUSIVE = (1 << 1), +}; diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -58,8 +58,8 @@ bool _stm_is_young(object_t *o); object_t *_stm_allocate_old(size_t size); -void _stm_start_safe_point(void); -void _stm_stop_safe_point(void); +void _stm_start_safe_point(uint8_t); +void _stm_stop_safe_point(uint8_t); bool _stm_check_stop_safe_point(void); void _set_type_id(object_t *obj, uint32_t h); @@ -91,7 +91,12 @@ enum { GCFLAG_WRITE_BARRIER = 1, GCFLAG_NOT_COMMITTED = 2, - GCFLAG_MOVED = 4 + GCFLAG_MOVED = 4, +}; + +enum { + LOCK_COLLECT = 1, + LOCK_EXCLUSIVE = 2, }; @@ -185,7 +190,7 @@ if (__builtin_setjmp(here) == 0) { // returned directly assert(_STM_TL->jmpbufptr == (jmpbufptr_t*)-1); _STM_TL->jmpbufptr = &here; - _stm_stop_safe_point(); + _stm_stop_safe_point(LOCK_COLLECT); _STM_TL->jmpbufptr = (jmpbufptr_t*)-1; return 0; } @@ -360,7 +365,7 @@ def stm_start_safe_point(): - lib._stm_start_safe_point() + lib._stm_start_safe_point(lib.LOCK_COLLECT) def stm_stop_safe_point(): if lib._stm_check_stop_safe_point(): diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -314,7 +314,7 @@ def test_many_allocs(self): # assumes NB_NURSERY_PAGES 1024 obj_size = 1024 - num = 5000 # more than what fits in the nursery (4MB) + num = 9000 # more than what fits in the nursery (4MB) stm_start_transaction() for i in range(num): From noreply at buildbot.pypy.org Wed Jan 29 16:13:57 2014 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 29 Jan 2014 16:13:57 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: start fighting with strings Message-ID: <20140129151357.35CAC1C0962@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68991:07f41e9e2678 Date: 2014-01-29 14:52 +0100 http://bitbucket.org/pypy/pypy/changeset/07f41e9e2678/ Log: start fighting with strings diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -44,24 +44,20 @@ return Position(self.numbering[self.mapping(box)]) def process(self, op): - getattr(self, 'process_' + op.getopname())(op) + func = getattr(self, 'process_' + op.getopname(), None) + if func is not None: + func(op) ResumeBuilder.process(self, op) - def process_enter_frame(self, op): - pass - - def process_leave_frame(self, op): - pass - - def process_resume_set_pc(self, op): - pass - - def process_resume_new_with_vtable(self, op): - pass - def process_resume_setfield_gc(self, op): self._add_box_to_numbering(op.getarg(1)) + def process_resume_concatstr(self, op): + self._add_box_to_numbering(op.getarg(0)) + self._add_box_to_numbering(op.getarg(1)) + + process_resume_concatunicode = process_resume_concatstr + def _add_box_to_numbering(self, box): if isinstance(box, Const): return @@ -70,9 +66,6 @@ def process_resume_put(self, op): self._add_box_to_numbering(op.getarg(0)) - - def process_resume_clear(self, op): - pass class LLTrace(object): has_been_freed = False diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -503,6 +503,7 @@ return vvalue def make_varray(self, arraydescr, size, box, source_op=None): + xxx if arraydescr.is_array_of_structs(): vvalue = VArrayStructValue(arraydescr, size, box, source_op) else: @@ -519,12 +520,14 @@ return vvalue def make_virtual_raw_memory(self, size, box, source_op): + xxx logops = self.optimizer.loop.logops vvalue = VRawBufferValue(self.optimizer.cpu, logops, size, box, source_op) self.make_equal_to(box, vvalue) return vvalue def make_virtual_raw_slice(self, rawbuffer_value, offset, box, source_op): + xxx vvalue = VRawSliceValue(rawbuffer_value, offset, box, source_op) self.make_equal_to(box, vvalue) return vvalue diff --git a/rpython/jit/metainterp/optimizeopt/vstring.py b/rpython/jit/metainterp/optimizeopt/vstring.py --- a/rpython/jit/metainterp/optimizeopt/vstring.py +++ b/rpython/jit/metainterp/optimizeopt/vstring.py @@ -19,6 +19,7 @@ class StrOrUnicode(object): def __init__(self, LLTYPE, hlstr, emptystr, chr, NEWSTR, STRLEN, STRGETITEM, STRSETITEM, COPYSTRCONTENT, + RESUME_NEW, RESUME_CONCAT, OS_offset): self.LLTYPE = LLTYPE self.hlstr = hlstr @@ -29,6 +30,8 @@ self.STRGETITEM = STRGETITEM self.STRSETITEM = STRSETITEM self.COPYSTRCONTENT = COPYSTRCONTENT + self.RESUME_NEW = RESUME_NEW + self.RESUME_CONCAT = RESUME_CONCAT self.OS_offset = OS_offset def _freeze_(self): @@ -36,10 +39,12 @@ mode_string = StrOrUnicode(rstr.STR, annlowlevel.hlstr, '', chr, rop.NEWSTR, rop.STRLEN, rop.STRGETITEM, - rop.STRSETITEM, rop.COPYSTRCONTENT, 0) + rop.STRSETITEM, rop.COPYSTRCONTENT, + rop.RESUME_NEWSTR, rop.RESUME_CONCATSTR, 0) mode_unicode = StrOrUnicode(rstr.UNICODE, annlowlevel.hlunicode, u'', unichr, rop.NEWUNICODE, rop.UNICODELEN, rop.UNICODEGETITEM, rop.UNICODESETITEM, rop.COPYUNICODECONTENT, + rop.RESUME_NEWUNICODE, rop.RESUME_CONCATUNICODE, EffectInfo._OS_offset_uni) # ____________________________________________________________ @@ -390,17 +395,22 @@ def new(self): return OptString() - def make_vstring_plain(self, box, source_op, mode): + def make_vstring_plain(self, box, source_op, mode, length): vvalue = VStringPlainValue(box, source_op, mode) + vvalue.setup(length) + self.optimizer.resumebuilder.new_vstring(vvalue, length, mode) self.make_equal_to(box, vvalue) return vvalue - def make_vstring_concat(self, box, source_op, mode): + def make_vstring_concat(self, box, source_op, mode, left, right): vvalue = VStringConcatValue(box, source_op, mode) + vvalue.setup(left, right) + self.optimizer.resumebuilder.vstring_concat(vvalue, left, right, mode) self.make_equal_to(box, vvalue) return vvalue def make_vstring_slice(self, box, source_op, mode): + xxx vvalue = VStringSliceValue(box, source_op, mode) self.make_equal_to(box, vvalue) return vvalue @@ -417,8 +427,8 @@ # build a new one with the ConstInt argument if not isinstance(op.getarg(0), ConstInt): op = ResOperation(mode.NEWSTR, [length_box], op.result) - vvalue = self.make_vstring_plain(op.result, op, mode) - vvalue.setup(length_box.getint()) + self.make_vstring_plain(op.result, op, mode, + length_box.getint()) else: self.getvalue(op.result).ensure_nonnull() self.emit_operation(op) @@ -430,7 +440,10 @@ if value.is_virtual() and isinstance(value, VStringPlainValue): indexbox = self.get_constant_box(op.getarg(1)) if indexbox is not None: - value.setitem(indexbox.getint(), self.getvalue(op.getarg(2))) + index = indexbox.getint() + varg = self.getvalue(op.getarg(2)) + value.setitem(index, varg) + self.optimizer.resumebuilder.strsetitem(value, varg) return value.ensure_nonnull() self.emit_operation(op) @@ -592,8 +605,7 @@ vright = self.getvalue(op.getarg(2)) vleft.ensure_nonnull() vright.ensure_nonnull() - value = self.make_vstring_concat(op.result, op, mode) - value.setup(vleft, vright) + self.make_vstring_concat(op.result, op, mode, vleft, vright) return True def opt_call_stroruni_STR_SLICE(self, op, mode): diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -480,6 +480,9 @@ 'RESUME_NEW_ARRAY/1d', 'RESUME_NEWSTR/1', 'RESUME_NEWUNICODE/1', + 'RESUME_CONCATSTR/2', + 'RESUME_CONCATUNICODE/2', + 'RESUME_STRSETITEM/2', 'RESUME_SETFIELD_GC/2d', 'RESUME_SET_PC/1', 'RESUME_CLEAR/2', diff --git a/rpython/jit/resume/backend.py b/rpython/jit/resume/backend.py --- a/rpython/jit/resume/backend.py +++ b/rpython/jit/resume/backend.py @@ -1,10 +1,31 @@ from rpython.jit.metainterp.resoperation import rop -from rpython.jit.metainterp.history import Box, Const +from rpython.jit.metainterp.history import Box, Const, AbstractDescr from rpython.jit.resume.rescode import ResumeBytecodeBuilder, TAGBOX,\ ResumeBytecode, TAGVIRTUAL from rpython.jit.codewriter.jitcode import JitCode +class DescrForStr(AbstractDescr): + pass + +left_descr = DescrForStr() +right_descr = DescrForStr() + +class BaseDeps(object): + pass + +class DepsFields(BaseDeps): + def __init__(self): + self.fields = {} + +class DepsConcat(BaseDeps): + def __init__(self, left, right): + self.left = left + self.right = right + +class DepsArray(BaseDeps): + def __init__(self, size): + self.l = [None] * size class LivenessAnalyzer(object): def __init__(self): @@ -26,13 +47,22 @@ self.framestack[framepos][frontend_pos] = None def resume_new(self, result, descr): - self.deps[result] = {} + self.deps[result] = DepsFields() + + def resume_newunicode(self, result, length): + self.deps[result] = DepsArray(length) + + def resume_concatunicode(self, result, left, right): + self.deps[result] = DepsConcat(left, right) def resume_new_with_vtable(self, result, klass): - self.deps[result] = {} + self.deps[result] = DepsFields() def resume_setfield_gc(self, arg0, arg1, descr): - self.deps[arg0][descr] = arg1 + self.deps[arg0].fields[descr] = arg1 + + def resume_strsetitem(self, arg0, arg1): + xxx def resume_set_pc(self, pc): pass @@ -64,6 +94,16 @@ elif op.getopnum() == rop.RESUME_CLEAR: self.resume_clear(op.getarg(0).getint(), op.getarg(1).getint()) + elif op.getopnum() == rop.RESUME_NEWSTR: + xxx + elif op.getopnum() == rop.RESUME_NEWUNICODE: + self.resume_newunicode(op.result, op.getarg(0).getint()) + elif op.getopnum() == rop.RESUME_CONCATSTR: + xxx + elif op.getopnum() == rop.RESUME_CONCATUNICODE: + self.resume_concatunicode(op.result, op.getarg(0), op.getarg(1)) + elif op.getopnum() == rop.RESUME_STRSETITEM: + self.resume_strsetitem(op.getarg(0), op.getarg(1)) elif not op.is_resume(): pos += 1 continue @@ -150,6 +190,20 @@ elif op.getopnum() == rop.RESUME_CLEAR: self.builder.resume_clear(op.getarg(0).getint(), op.getarg(1).getint()) + elif op.getopnum() == rop.RESUME_NEWSTR: + xxx + elif op.getopnum() == rop.RESUME_NEWUNICODE: + v_pos = len(self.virtuals) + self.virtuals[op.result] = v_pos + self.builder.resume_newunicode(v_pos, op.getarg(0).getint()) + elif op.getopnum() == rop.RESUME_CONCATSTR: + xxx + elif op.getopnum() == rop.RESUME_CONCATUNICODE: + v_pos = len(self.virtuals) + self.virtuals[op.result] = v_pos + leftpos = self.get_box_pos(op.getarg(0)) + rightpos = self.get_box_pos(op.getarg(0)) + self.builder.resume_concatunicode(v_pos, leftpos, rightpos) else: raise Exception("strange operation") diff --git a/rpython/jit/resume/frontend.py b/rpython/jit/resume/frontend.py --- a/rpython/jit/resume/frontend.py +++ b/rpython/jit/resume/frontend.py @@ -76,11 +76,9 @@ return self.virtuals_cache[index] except KeyError: pass - val = self.virtuals[index].allocate_direct(self.cpu) + val = self.virtuals[index].allocate_direct(self, self.cpu) self.virtuals_cache[index] = val - fields = self.virtuals[index].fields - for fielddescr, encoded_field_pos in fields.iteritems(): - self.setfield_gc(val, encoded_field_pos, fielddescr) + self.virtuals[index].populate_fields(val, self) return val def setfield_gc(self, struct, encoded_field_pos, fielddescr): diff --git a/rpython/jit/resume/optimizer.py b/rpython/jit/resume/optimizer.py --- a/rpython/jit/resume/optimizer.py +++ b/rpython/jit/resume/optimizer.py @@ -51,10 +51,6 @@ else: self.opt.emit_operation(op) - def new_virtual(self, box): - xxx - self.optimizer.emit_operation(rop.RESUME_NEW) - def new_virtual_with_vtable(self, box, vtable, vvalue): virtualbox = BoxPtr() op = ResOperation(rop.RESUME_NEW_WITH_VTABLE, [vtable], virtualbox) @@ -67,6 +63,26 @@ op = ResOperation(rop.RESUME_NEW, [], newbox, descr=structdescr) self.opt._newoperations.append(op) + def new_vstring(self, vstring, lgt, mode): + newbox = BoxPtr() + vstring.resume_box = newbox + op = ResOperation(mode.RESUME_NEW, [ConstInt(lgt)], newbox) + self.opt._newoperations.append(op) + + def vstring_concat(self, vstring, left, right, mode): + leftbox = left.get_resume_box() + rightbox = right.get_resume_box() + newbox = BoxPtr() + vstring.resume_box = newbox + op = ResOperation(mode.RESUME_CONCAT, [leftbox, rightbox], newbox) + self.opt._newoperations.append(op) + + def strsetitem(self, vstring, varg): + argbox = varg.get_resume_box() + op = ResOperation(rop.RESUME_STRSETITEM, [vstring.get_resume_box(), + argbox], None) + self.opt._newoperations.append(op) + def setfield(self, box, fieldbox, descr): op = ResOperation(rop.RESUME_SETFIELD_GC, [box, fieldbox], None, descr=descr) diff --git a/rpython/jit/resume/reader.py b/rpython/jit/resume/reader.py --- a/rpython/jit/resume/reader.py +++ b/rpython/jit/resume/reader.py @@ -1,4 +1,7 @@ +from rpython.rtyper.lltypesystem import lltype, rstr, llmemory + +from rpython.jit.codewriter.effectinfo import EffectInfo from rpython.jit.metainterp.resoperation import rop from rpython.jit.metainterp.history import ConstInt from rpython.jit.codewriter import heaptracker @@ -11,9 +14,16 @@ self.pc = -1 class BaseVirtual(object): - pass + def populate_fields(self, reader): + pass -class VirtualStruct(BaseVirtual): +class BaseVirtualStruct(BaseVirtual): + def populate_fields(self, val, reader): + fields = self.fields + for fielddescr, encoded_field_pos in fields.iteritems(): + reader.setfield_gc(val, encoded_field_pos, fielddescr) + +class VirtualStruct(BaseVirtualStruct): def __init__(self, pos, descr): self.pos = pos self.fields = {} @@ -22,10 +32,10 @@ def allocate_box(self, metainterp): return metainterp.execute_and_record(rop.NEW, self.descr) - def allocate_direct(self, cpu): + def allocate_direct(self, reader, cpu): return cpu.bh_new(self.descr) -class VirtualWithVtable(BaseVirtual): +class VirtualWithVtable(BaseVirtualStruct): def __init__(self, pos, const_class): self.pos = pos self.const_class = const_class @@ -35,10 +45,37 @@ return metainterp.execute_and_record(rop.NEW_WITH_VTABLE, None, ConstInt(self.const_class)) - def allocate_direct(self, cpu): + def allocate_direct(self, reader, cpu): descr = heaptracker.vtable2descr(cpu, self.const_class) return cpu.bh_new_with_vtable(self.const_class, descr) +class VirtualStr(BaseVirtual): + def __init__(self, pos, lgt, mode): + self.pos = pos + self.lgt = lgt + self.mode = mode + +class VirtualConcat(BaseVirtual): + def __init__(self, pos, left, right, mode): + self.pos = pos + self.left = left + self.right = right + self.mode = mode + + def allocate_direct(self, reader, cpu): + leftval = reader.getref(self.left) + rightval = reader.getref(self.right) + cic = reader.staticdata.callinfocollection + if self.mode == 'u': + funcptr = cic.funcptr_for_oopspec(EffectInfo.OS_UNI_CONCAT) + str1 = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), leftval) + str2 = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), rightval) + result = funcptr(str1, str2) + return lltype.cast_opaque_ptr(llmemory.GCREF, result) + else: + xxx + xxx + class AbstractResumeReader(object): """ A resume reader that can follow resume until given point. Consult the concrete classes for details @@ -73,6 +110,22 @@ v = VirtualStruct(v_pos, descr) self._add_to_virtuals(v, v_pos) + def resume_newstr(self, v_pos, lgt): + v = VirtualStr(v_pos, lgt, 's') + self._add_to_virtuals(v, v_pos) + + def resume_newunicode(self, v_pos, lgt): + v = VirtualStr(v_pos, lgt, 'u') + self._add_to_virtuals(v, v_pos) + + def resume_concatstr(self, v_pos, leftpos, rightpos): + v = VirtualConcat(v_pos, leftpos, rightpos, 's') + self._add_to_virtuals(v, v_pos) + + def resume_concatunicode(self, v_pos, leftpos, rightpos): + v = VirtualConcat(v_pos, leftpos, rightpos, 'u') + self._add_to_virtuals(v, v_pos) + def resume_new_with_vtable(self, v_pos, c_const_class): const_class = c_const_class.getint() v = VirtualWithVtable(v_pos, const_class) @@ -153,6 +206,21 @@ pc = self.read_short(pos + 1) self.resume_set_pc(pc) pos += 3 + elif op == rescode.RESUME_NEWSTR: + xxx + elif op == rescode.RESUME_NEWUNICODE: + v_pos = self.read_short(pos + 1) + lgt = self.read(pos + 3) + self.resume_newunicode(v_pos, lgt) + pos += 4 + elif op == rescode.RESUME_CONCATSTR: + xxx + elif op == rescode.RESUME_CONCATUNICODE: + v_pos = self.read_short(pos + 1) + left = self.read_short(pos + 3) + right = self.read_short(pos + 5) + self.resume_concatunicode(v_pos, left, right) + pos += 7 else: xxx self.bytecode = None @@ -179,6 +247,18 @@ def resume_clear(self, frame_pos, pos_in_frame): self.l.append("resume_clear %d %d" % (frame_pos, pos_in_frame)) + def resume_newstr(self, v_pos, lgt): + xxx + + def resume_newunicode(self, v_pos, lgt): + xxx + + def resume_concatstr(self, v_pos, leftpos, rightpos): + xxx + + def resume_concatunicode(self, v_pos, leftpos, rightpos): + xxx + def resume_new_with_vtable(self, v_pos, c_const_class): self.l.append("%d = resume_new_with_vtable %d" % (v_pos, c_const_class.getint())) diff --git a/rpython/jit/resume/rescode.py b/rpython/jit/resume/rescode.py --- a/rpython/jit/resume/rescode.py +++ b/rpython/jit/resume/rescode.py @@ -4,7 +4,8 @@ (UNUSED, ENTER_FRAME, LEAVE_FRAME, RESUME_PUT, RESUME_NEW, RESUME_NEW_WITH_VTABLE, RESUME_SETFIELD_GC, - RESUME_SET_PC, RESUME_CLEAR) = range(9) + RESUME_SET_PC, RESUME_CLEAR, RESUME_NEWSTR, RESUME_NEWUNICODE, + RESUME_CONCATSTR, RESUME_CONCATUNICODE) = range(13) TAGCONST = 0x0 TAGVIRTUAL = 0x2 @@ -88,6 +89,17 @@ self.write_short(v_pos) # XXX byte virtuals? self.write_short(descr.global_descr_index) + def resume_newunicode(self, v_pos, lgt): + self.write(RESUME_NEWUNICODE) + self.write_short(v_pos) # XXX byte virtuals? + self.write(lgt) + + def resume_concatunicode(self, v_pos, leftpos, rightpos): + self.write(RESUME_CONCATUNICODE) + self.write_short(v_pos) + self.write_short(leftpos) + self.write_short(rightpos) + def resume_new_with_vtable(self, v_pos, const_class): self.write(RESUME_NEW_WITH_VTABLE) self.write_short(v_pos) # XXX byte virtuals? diff --git a/rpython/jit/resume/test/test_frontend.py b/rpython/jit/resume/test/test_frontend.py --- a/rpython/jit/resume/test/test_frontend.py +++ b/rpython/jit/resume/test/test_frontend.py @@ -2,7 +2,7 @@ from rpython.jit.tool.oparser import parse from rpython.jit.codewriter.jitcode import JitCode from rpython.jit.metainterp.history import AbstractDescr, Const, INT, Stats,\ - ConstInt, REF + ConstInt, REF, FLOAT from rpython.jit.resume.frontend import rebuild_from_resumedata,\ blackhole_from_resumedata from rpython.jit.resume.rescode import ResumeBytecode, TAGBOX,\ @@ -30,6 +30,9 @@ def is_field_signed(self): return self.kind == INT + def is_float_field(self): + return self.kind == FLOAT + class MockLoop(object): pass @@ -251,12 +254,12 @@ rebuild_from_resumedata(metainterp, "myframe", descr) expected = [(rop.NEW, descr), (rop.SETFIELD_GC, d2, AnyBox(), EqConstInt(1)), - (rop.NEW_WITH_VTABLE, EqConstInt(cls_as_int)), + (rop.NEW_WITH_VTABLE, None, EqConstInt(cls_as_int)), (rop.SETFIELD_GC, d3, AnyBox(), AnyBox()), (rop.RESUME_PUT, None, AnyBox(), EqConstInt(0), EqConstInt(0))] expected2 = [(rop.NEW, descr), - (rop.NEW_WITH_VTABLE, EqConstInt(cls_as_int)), + (rop.NEW_WITH_VTABLE, None, EqConstInt(cls_as_int)), (rop.SETFIELD_GC, d3, AnyBox(), AnyBox()), (rop.SETFIELD_GC, d2, AnyBox(), EqConstInt(1)), (rop.RESUME_PUT, None, AnyBox(), EqConstInt(0), From noreply at buildbot.pypy.org Wed Jan 29 16:13:58 2014 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 29 Jan 2014 16:13:58 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: more string support Message-ID: <20140129151358.7226F1C0962@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68992:f87bcc7fdc34 Date: 2014-01-29 15:27 +0100 http://bitbucket.org/pypy/pypy/changeset/f87bcc7fdc34/ Log: more string support diff --git a/rpython/jit/resume/backend.py b/rpython/jit/resume/backend.py --- a/rpython/jit/resume/backend.py +++ b/rpython/jit/resume/backend.py @@ -18,15 +18,28 @@ def __init__(self): self.fields = {} + def foreach(self, callback, arg): + for v in self.fields.itervalues(): + callback(arg, v) + class DepsConcat(BaseDeps): def __init__(self, left, right): self.left = left self.right = right + def foreach(self, callback, arg): + callback(arg, self.left) + callback(arg, self.right) + class DepsArray(BaseDeps): def __init__(self, size): self.l = [None] * size + def foreach(self, callback, arg): + for item in self.l: + if item is not None: + callback(arg, item) + class LivenessAnalyzer(object): def __init__(self): self.liveness = {} @@ -113,8 +126,7 @@ def _track(self, allboxes, box): if box in self.deps: - for dep in self.deps[box].values(): - self._track(allboxes, dep) + self.deps[box].foreach(self._track, allboxes) if not isinstance(box, Const) and box is not None: allboxes.append(box) diff --git a/rpython/jit/resume/frontend.py b/rpython/jit/resume/frontend.py --- a/rpython/jit/resume/frontend.py +++ b/rpython/jit/resume/frontend.py @@ -81,6 +81,12 @@ self.virtuals[index].populate_fields(val, self) return val + def strsetitem(self, str, index, char, mode): + if mode == 's': + self.cpu.bh_strsetitem(str, index, char) + else: + self.cpu.bh_unicodesetitem(str, index, char) + def setfield_gc(self, struct, encoded_field_pos, fielddescr): if fielddescr.is_field_signed(): intval = self.getint(encoded_field_pos) @@ -142,8 +148,7 @@ virtual = self.virtuals[pos] virtual_box = virtual.allocate_box(self.metainterp) self.cache[encoded_pos] = virtual_box - for fielddescr, encoded_field_pos in virtual.fields.iteritems(): - self.setfield_gc(virtual_box, encoded_field_pos, fielddescr) + virtual.populate_fields_boxes(virtual_box, self) if pos_in_frame != -1: self.metainterp.history.record(rop.RESUME_PUT, [virtual_box, @@ -167,6 +172,13 @@ self.metainterp.execute_and_record(rop.SETFIELD_GC, fielddescr, box, field_box) + def strsetitem(self, box, ibox, vbox, mode): + if mode == 's': + resop = rop.STRSETITEM + else: + resop = rop.UNICODESETITEM + self.metainterp.execute_and_record(resop, None, box, ibox, vbox) + def store_int_box(self, frame_pos, pos, miframe, i, jitframe_pos): box = self.get_box_value(frame_pos, pos, jitframe_pos, INT) if box is None: diff --git a/rpython/jit/resume/reader.py b/rpython/jit/resume/reader.py --- a/rpython/jit/resume/reader.py +++ b/rpython/jit/resume/reader.py @@ -3,7 +3,7 @@ from rpython.jit.codewriter.effectinfo import EffectInfo from rpython.jit.metainterp.resoperation import rop -from rpython.jit.metainterp.history import ConstInt +from rpython.jit.metainterp.history import ConstInt, INT from rpython.jit.codewriter import heaptracker from rpython.jit.resume import rescode @@ -23,6 +23,10 @@ for fielddescr, encoded_field_pos in fields.iteritems(): reader.setfield_gc(val, encoded_field_pos, fielddescr) + def populate_fields_boxes(self, box, reader): + for fielddescr, encoded_field_pos in self.fields.iteritems(): + reader.setfield_gc(box, encoded_field_pos, fielddescr) + class VirtualStruct(BaseVirtualStruct): def __init__(self, pos, descr): self.pos = pos @@ -54,6 +58,33 @@ self.pos = pos self.lgt = lgt self.mode = mode + self.chars = [-1] * lgt + + def populate_fields_boxes(self, box, reader): + for i in range(len(self.chars)): + ch = self.chars[i] + if ch != -1: + vbox = reader.get_box_value(-1, -1, ch, INT) + reader.strsetitem(box, ConstInt(i), vbox, self.mode) + + def populate_fields(self, val, reader): + for i in range(len(self.chars)): + ch = self.chars[i] + if ch != -1: + itemval = reader.getint(ch) + reader.strsetitem(val, i, itemval, self.mode) + + def allocate_box(self, metainterp): + if self.mode == 's': + resop = rop.NEWSTR + else: + resop = rop.NEWUNICODE + return metainterp.execute_and_record(resop, [ConstInt(self.lgt)]) + + def allocate_direct(self, reader, cpu): + if self.mode == 's': + return cpu.bh_newstr(self.lgt) + return cpu.bh_newunicode(self.lgt) class VirtualConcat(BaseVirtual): def __init__(self, pos, left, right, mode): @@ -62,6 +93,12 @@ self.right = right self.mode = mode + def populate_fields_boxes(self, box, reader): + pass + + def populate_fields(self, val, reader): + pass + def allocate_direct(self, reader, cpu): leftval = reader.getref(self.left) rightval = reader.getref(self.right) @@ -74,7 +111,6 @@ return lltype.cast_opaque_ptr(llmemory.GCREF, result) else: xxx - xxx class AbstractResumeReader(object): """ A resume reader that can follow resume until given point. Consult @@ -126,6 +162,11 @@ v = VirtualConcat(v_pos, leftpos, rightpos, 'u') self._add_to_virtuals(v, v_pos) + def resume_strsetitem(self, v_pos, index, sourcepos): + v = self.virtuals[v_pos] + assert isinstance(v, VirtualStr) + v.chars[index] = sourcepos + def resume_new_with_vtable(self, v_pos, c_const_class): const_class = c_const_class.getint() v = VirtualWithVtable(v_pos, const_class) @@ -207,7 +248,10 @@ self.resume_set_pc(pc) pos += 3 elif op == rescode.RESUME_NEWSTR: - xxx + v_pos = self.read_short(pos + 1) + lgt = self.read(pos + 3) + self.resume_newstr(v_pos, lgt) + pos += 4 elif op == rescode.RESUME_NEWUNICODE: v_pos = self.read_short(pos + 1) lgt = self.read(pos + 3) @@ -221,6 +265,12 @@ right = self.read_short(pos + 5) self.resume_concatunicode(v_pos, left, right) pos += 7 + elif op == rescode.RESUME_STRSETITEM: + v_pos = self.read_short(pos + 1) + index = self.read(pos + 3) + source = self.read(pos + 4) + self.resume_strsetitem(v_pos, index, source) + pos += 6 else: xxx self.bytecode = None diff --git a/rpython/jit/resume/rescode.py b/rpython/jit/resume/rescode.py --- a/rpython/jit/resume/rescode.py +++ b/rpython/jit/resume/rescode.py @@ -5,7 +5,7 @@ (UNUSED, ENTER_FRAME, LEAVE_FRAME, RESUME_PUT, RESUME_NEW, RESUME_NEW_WITH_VTABLE, RESUME_SETFIELD_GC, RESUME_SET_PC, RESUME_CLEAR, RESUME_NEWSTR, RESUME_NEWUNICODE, - RESUME_CONCATSTR, RESUME_CONCATUNICODE) = range(13) + RESUME_CONCATSTR, RESUME_CONCATUNICODE, RESUME_STRSETITEM) = range(14) TAGCONST = 0x0 TAGVIRTUAL = 0x2 @@ -89,6 +89,17 @@ self.write_short(v_pos) # XXX byte virtuals? self.write_short(descr.global_descr_index) + def resume_newstr(self, v_pos, lgt): + self.write(RESUME_NEWSTR) + self.write_short(v_pos) # XXX byte virtuals? + self.write(lgt) + + def resume_strsetitem(self, v_pos, index, encoded_source): + self.write(RESUME_STRSETITEM) + self.write_short(v_pos) # XXX byte virtuals? + self.write(index) + self.write_short(encoded_source) + def resume_newunicode(self, v_pos, lgt): self.write(RESUME_NEWUNICODE) self.write_short(v_pos) # XXX byte virtuals? diff --git a/rpython/jit/resume/test/test_frontend.py b/rpython/jit/resume/test/test_frontend.py --- a/rpython/jit/resume/test/test_frontend.py +++ b/rpython/jit/resume/test/test_frontend.py @@ -93,6 +93,14 @@ self.history.append(("new", descr)) return "new" + def bh_newstr(self, lgt): + self.history.append(("newstr", lgt)) + return "newstr" + + def bh_strsetitem(self, val, i, char): + assert val == "newstr" + self.history.append(("strsetitem", i, char)) + def bh_setfield_gc_i(self, struct, intval, fielddescr): self.history.append(("setfield_gc_i", struct, intval, fielddescr)) @@ -284,6 +292,43 @@ assert hist == dir_expected or hist == dir_expected2 assert ib.interp.registers_r[0] == "new" + def test_newstr_unicode(self): + jitcode1 = JitCode("jitcode") + jitcode1.global_index = 0 + jitcode1.setup(num_regs_i=0, num_regs_r=1, num_regs_f=0) + builder = ResumeBytecodeBuilder() + builder.enter_frame(-1, jitcode1) + builder.resume_newstr(0, 5) + builder.resume_strsetitem(0, 0, TAGBOX | (13 << 2)) + builder.resume_put(TAGVIRTUAL | (0 << 2), 0, 0) + + metainterp = MockMetaInterp() + metainterp.staticdata = MockStaticData([jitcode1], []) + metainterp.cpu = MockCPU() + metainterp.staticdata.cpu = metainterp.cpu + rd = builder.build() + descr = Descr() + descr.rd_resume_bytecode = ResumeBytecode(rd, []) + descr.rd_bytecode_position = len(rd) + + rebuild_from_resumedata(metainterp, "myframe", descr) + expected = [ + (rop.NEWSTR, [EqConstInt(5)]), + (rop.STRSETITEM, None, AnyBox(), EqConstInt(0), AnyBox()), + (rop.RESUME_PUT, None, AnyBox(), EqConstInt(0), EqConstInt(0)), + ] + assert expected == metainterp.history + ib = FakeInterpBuilder() + blackhole_from_resumedata(ib, metainterp.staticdata, + descr, "myframe") + hist = metainterp.cpu.history + dir_expected = [ + ("newstr", 5), + ("strsetitem", 0, 16) + ] + assert hist == dir_expected + assert ib.interp.registers_r[0] == "newstr" + def test_reconstructing_resume_reader(self): jitcode1 = JitCode("jitcode") jitcode1.global_index = 0 From noreply at buildbot.pypy.org Wed Jan 29 16:13:59 2014 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 29 Jan 2014 16:13:59 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: finish strings enough for test_loop_unroll to pass Message-ID: <20140129151359.8E2BA1C0962@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68993:9f414dbe4f5c Date: 2014-01-29 15:32 +0100 http://bitbucket.org/pypy/pypy/changeset/9f414dbe4f5c/ Log: finish strings enough for test_loop_unroll to pass diff --git a/rpython/jit/metainterp/optimizeopt/vstring.py b/rpython/jit/metainterp/optimizeopt/vstring.py --- a/rpython/jit/metainterp/optimizeopt/vstring.py +++ b/rpython/jit/metainterp/optimizeopt/vstring.py @@ -443,7 +443,8 @@ index = indexbox.getint() varg = self.getvalue(op.getarg(2)) value.setitem(index, varg) - self.optimizer.resumebuilder.strsetitem(value, varg) + self.optimizer.resumebuilder.strsetitem(value, ConstInt(index), + varg) return value.ensure_nonnull() self.emit_operation(op) diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -482,7 +482,7 @@ 'RESUME_NEWUNICODE/1', 'RESUME_CONCATSTR/2', 'RESUME_CONCATUNICODE/2', - 'RESUME_STRSETITEM/2', + 'RESUME_STRSETITEM/3', 'RESUME_SETFIELD_GC/2d', 'RESUME_SET_PC/1', 'RESUME_CLEAR/2', diff --git a/rpython/jit/resume/backend.py b/rpython/jit/resume/backend.py --- a/rpython/jit/resume/backend.py +++ b/rpython/jit/resume/backend.py @@ -74,8 +74,8 @@ def resume_setfield_gc(self, arg0, arg1, descr): self.deps[arg0].fields[descr] = arg1 - def resume_strsetitem(self, arg0, arg1): - xxx + def resume_strsetitem(self, arg0, index, arg1): + self.deps[arg0].l[index] = arg1 def resume_set_pc(self, pc): pass @@ -116,7 +116,8 @@ elif op.getopnum() == rop.RESUME_CONCATUNICODE: self.resume_concatunicode(op.result, op.getarg(0), op.getarg(1)) elif op.getopnum() == rop.RESUME_STRSETITEM: - self.resume_strsetitem(op.getarg(0), op.getarg(1)) + self.resume_strsetitem(op.getarg(0), op.getarg(1).getint(), + op.getarg(2)) elif not op.is_resume(): pos += 1 continue @@ -214,8 +215,13 @@ v_pos = len(self.virtuals) self.virtuals[op.result] = v_pos leftpos = self.get_box_pos(op.getarg(0)) - rightpos = self.get_box_pos(op.getarg(0)) + rightpos = self.get_box_pos(op.getarg(1)) self.builder.resume_concatunicode(v_pos, leftpos, rightpos) + elif op.getopnum() == rop.RESUME_STRSETITEM: + v_pos = self.virtuals[op.getarg(0)] + index = op.getarg(1).getint() + valpos = self.get_box_pos(op.getarg(2)) + self.builder.resume_strsetitem(v_pos, index, valpos) else: raise Exception("strange operation") diff --git a/rpython/jit/resume/optimizer.py b/rpython/jit/resume/optimizer.py --- a/rpython/jit/resume/optimizer.py +++ b/rpython/jit/resume/optimizer.py @@ -77,10 +77,10 @@ op = ResOperation(mode.RESUME_CONCAT, [leftbox, rightbox], newbox) self.opt._newoperations.append(op) - def strsetitem(self, vstring, varg): + def strsetitem(self, vstring, indexbox, varg): argbox = varg.get_resume_box() op = ResOperation(rop.RESUME_STRSETITEM, [vstring.get_resume_box(), - argbox], None) + indexbox, argbox], None) self.opt._newoperations.append(op) def setfield(self, box, fieldbox, descr): From noreply at buildbot.pypy.org Wed Jan 29 16:14:00 2014 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 29 Jan 2014 16:14:00 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: finish plumbing until we pass a few more tests Message-ID: <20140129151400.A864B1C0962@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68994:4726582b17ae Date: 2014-01-29 16:06 +0100 http://bitbucket.org/pypy/pypy/changeset/4726582b17ae/ Log: finish plumbing until we pass a few more tests diff --git a/TODO b/TODO --- a/TODO +++ b/TODO @@ -5,5 +5,6 @@ * compress the resumedata in the backend -* do escape analysis in the resumeopt.py +* do escape analysis in the resume/optimizer.py +* make_a_counter_per_value got screwed, but a bit no clue what it does diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -499,24 +499,23 @@ def make_a_counter_per_value(self, guard_value_op): assert guard_value_op.getopnum() == rop.GUARD_VALUE + # XXX I have no clue what exactly it does, but we killed failargs + # so i is always 0 now box = guard_value_op.getarg(0) - try: - i = guard_value_op.getfailargs().index(box) - except ValueError: - return # xxx probably very rare + i = 0 + # used to be i = guard_value_op.getfailargs().index(box) + if i > self.CNT_BASE_MASK: + return # probably never, but better safe than sorry + if box.type == history.INT: + cnt = self.CNT_INT + elif box.type == history.REF: + cnt = self.CNT_REF + elif box.type == history.FLOAT: + cnt = self.CNT_FLOAT else: - if i > self.CNT_BASE_MASK: - return # probably never, but better safe than sorry - if box.type == history.INT: - cnt = self.CNT_INT - elif box.type == history.REF: - cnt = self.CNT_REF - elif box.type == history.FLOAT: - cnt = self.CNT_FLOAT - else: - assert 0, box.type - assert cnt > self.CNT_BASE_MASK - self._counter = cnt | i + assert 0, box.type + assert cnt > self.CNT_BASE_MASK + self._counter = cnt | i def handle_fail(self, deadframe, metainterp_sd, jitdriver_sd): if self.must_compile(deadframe, metainterp_sd, jitdriver_sd): diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -415,7 +415,6 @@ op = short[i] if op.is_guard(): op = op.clone() - op.setfailargs(None) descr = target_token.resume_at_jump_descr.clone_if_mutable() op.setdescr(descr) short[i] = op @@ -583,7 +582,6 @@ for guard in extra_guards: if guard.is_guard(): descr = target.resume_at_jump_descr.clone_if_mutable() - inliner.inline_descr_inplace(descr) guard.setdescr(descr) self.optimizer.send_extra_operation(guard) diff --git a/rpython/jit/metainterp/test/test_loop_unroll.py b/rpython/jit/metainterp/test/test_loop_unroll.py --- a/rpython/jit/metainterp/test/test_loop_unroll.py +++ b/rpython/jit/metainterp/test/test_loop_unroll.py @@ -8,7 +8,7 @@ enable_opts = ALL_OPTS_NAMES automatic_promotion_result = { - 'int_gt': 1, 'guard_false': 1, 'jump': 1, 'int_add': 3, + 'int_gt': 2, 'guard_false': 2, 'jump': 1, 'int_add': 6, 'guard_value': 1 } diff --git a/rpython/jit/resume/backend.py b/rpython/jit/resume/backend.py --- a/rpython/jit/resume/backend.py +++ b/rpython/jit/resume/backend.py @@ -62,10 +62,10 @@ def resume_new(self, result, descr): self.deps[result] = DepsFields() - def resume_newunicode(self, result, length): + def resume_newstr(self, result, length): self.deps[result] = DepsArray(length) - def resume_concatunicode(self, result, left, right): + def resume_concatstr(self, result, left, right): self.deps[result] = DepsConcat(left, right) def resume_new_with_vtable(self, result, klass): @@ -108,13 +108,13 @@ self.resume_clear(op.getarg(0).getint(), op.getarg(1).getint()) elif op.getopnum() == rop.RESUME_NEWSTR: - xxx + self.resume_newstr(op.result, op.getarg(0).getint()) elif op.getopnum() == rop.RESUME_NEWUNICODE: - self.resume_newunicode(op.result, op.getarg(0).getint()) + self.resume_newstr(op.result, op.getarg(0).getint()) elif op.getopnum() == rop.RESUME_CONCATSTR: - xxx + self.resume_concatstr(op.result, op.getarg(0), op.getarg(1)) elif op.getopnum() == rop.RESUME_CONCATUNICODE: - self.resume_concatunicode(op.result, op.getarg(0), op.getarg(1)) + self.resume_concatstr(op.result, op.getarg(0), op.getarg(1)) elif op.getopnum() == rop.RESUME_STRSETITEM: self.resume_strsetitem(op.getarg(0), op.getarg(1).getint(), op.getarg(2)) @@ -204,13 +204,19 @@ self.builder.resume_clear(op.getarg(0).getint(), op.getarg(1).getint()) elif op.getopnum() == rop.RESUME_NEWSTR: - xxx + v_pos = len(self.virtuals) + self.virtuals[op.result] = v_pos + self.builder.resume_newstr(v_pos, op.getarg(0).getint()) elif op.getopnum() == rop.RESUME_NEWUNICODE: v_pos = len(self.virtuals) self.virtuals[op.result] = v_pos self.builder.resume_newunicode(v_pos, op.getarg(0).getint()) elif op.getopnum() == rop.RESUME_CONCATSTR: - xxx + v_pos = len(self.virtuals) + self.virtuals[op.result] = v_pos + leftpos = self.get_box_pos(op.getarg(0)) + rightpos = self.get_box_pos(op.getarg(1)) + self.builder.resume_concatstr(v_pos, leftpos, rightpos) elif op.getopnum() == rop.RESUME_CONCATUNICODE: v_pos = len(self.virtuals) self.virtuals[op.result] = v_pos diff --git a/rpython/jit/resume/frontend.py b/rpython/jit/resume/frontend.py --- a/rpython/jit/resume/frontend.py +++ b/rpython/jit/resume/frontend.py @@ -22,6 +22,7 @@ def finish(self): nextbh = None + curbh = None for frame in self.framestack: curbh = self.bhinterpbuilder.acquire_interp() curbh.nextblackholeinterp = nextbh diff --git a/rpython/jit/resume/reader.py b/rpython/jit/resume/reader.py --- a/rpython/jit/resume/reader.py +++ b/rpython/jit/resume/reader.py @@ -109,8 +109,11 @@ str2 = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), rightval) result = funcptr(str1, str2) return lltype.cast_opaque_ptr(llmemory.GCREF, result) - else: - xxx + funcptr = cic.funcptr_for_oopspec(EffectInfo.OS_STR_CONCAT) + str1 = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), leftval) + str2 = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), rightval) + result = funcptr(str1, str2) + return lltype.cast_opaque_ptr(llmemory.GCREF, result) class AbstractResumeReader(object): """ A resume reader that can follow resume until given point. Consult @@ -258,7 +261,11 @@ self.resume_newunicode(v_pos, lgt) pos += 4 elif op == rescode.RESUME_CONCATSTR: - xxx + v_pos = self.read_short(pos + 1) + left = self.read_short(pos + 3) + right = self.read_short(pos + 5) + self.resume_concatstr(v_pos, left, right) + pos += 7 elif op == rescode.RESUME_CONCATUNICODE: v_pos = self.read_short(pos + 1) left = self.read_short(pos + 3) diff --git a/rpython/jit/resume/rescode.py b/rpython/jit/resume/rescode.py --- a/rpython/jit/resume/rescode.py +++ b/rpython/jit/resume/rescode.py @@ -100,6 +100,12 @@ self.write(index) self.write_short(encoded_source) + def resume_concatstr(self, v_pos, leftpos, rightpos): + self.write(RESUME_CONCATSTR) + self.write_short(v_pos) + self.write_short(leftpos) + self.write_short(rightpos) + def resume_newunicode(self, v_pos, lgt): self.write(RESUME_NEWUNICODE) self.write_short(v_pos) # XXX byte virtuals? From noreply at buildbot.pypy.org Wed Jan 29 19:43:36 2014 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 29 Jan 2014 19:43:36 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: hack enough at the unrolling to pass the next test Message-ID: <20140129184336.385861C0962@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68995:d975673661fd Date: 2014-01-29 18:40 +0100 http://bitbucket.org/pypy/pypy/changeset/d975673661fd/ Log: hack enough at the unrolling to pass the next test diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -524,18 +524,22 @@ if box in self.optimizer.values: box = self.optimizer.values[box].force_box(self.optimizer) jumpargs.append(box) - - def jump_to_already_compiled_trace(self, jumpop): + + def jump_to_already_compiled_trace(self, jumpop): assert jumpop.getopnum() == rop.JUMP + leave_frame_op = self.optimizer._newoperations.pop() + assert leave_frame_op.getopnum() == rop.LEAVE_FRAME cell_token = jumpop.getdescr() assert isinstance(cell_token, JitCellToken) if not cell_token.target_tokens: + self.optimizer._newoperations.append(leave_frame_op) return False if not self.inline_short_preamble: assert cell_token.target_tokens[0].virtual_state is None jumpop.setdescr(cell_token.target_tokens[0]) + self.optimizer._newoperations.append(leave_frame_op) self.optimizer.send_extra_operation(jumpop) return True @@ -586,7 +590,9 @@ self.optimizer.send_extra_operation(guard) try: - for shop in target.short_preamble[1:]: + stop = len(target.short_preamble) - 1 + assert stop >= 0 + for shop in target.short_preamble[1:stop]: newop = inliner.inline_op(shop) self.optimizer.send_extra_operation(newop) if shop.result in target.assumed_classes: @@ -600,9 +606,14 @@ "jumping to preamble instead") assert cell_token.target_tokens[0].virtual_state is None jumpop.setdescr(cell_token.target_tokens[0]) + self.optimizer._newoperations.append(leave_frame_op) self.optimizer.send_extra_operation(jumpop) + self.optimizer._newoperations.append(leave_frame_op) + newop = inliner.inline_op(target.short_preamble[-1]) + self.optimizer.send_extra_operation(newop) return True debug_stop('jit-log-virtualstate') + self.optimizer._newoperations.append(leave_frame_op) return False diff --git a/rpython/jit/resume/frontend.py b/rpython/jit/resume/frontend.py --- a/rpython/jit/resume/frontend.py +++ b/rpython/jit/resume/frontend.py @@ -23,7 +23,7 @@ def finish(self): nextbh = None curbh = None - for frame in self.framestack: + for i, frame in enumerate(self.framestack): curbh = self.bhinterpbuilder.acquire_interp() curbh.nextblackholeinterp = nextbh nextbh = curbh From noreply at buildbot.pypy.org Wed Jan 29 19:43:37 2014 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 29 Jan 2014 19:43:37 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: apparently it can be called in slightly different circumstances too Message-ID: <20140129184337.7985D1C0962@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68996:b61b58c06f59 Date: 2014-01-29 18:43 +0100 http://bitbucket.org/pypy/pypy/changeset/b61b58c06f59/ Log: apparently it can be called in slightly different circumstances too diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -527,19 +527,23 @@ def jump_to_already_compiled_trace(self, jumpop): assert jumpop.getopnum() == rop.JUMP - leave_frame_op = self.optimizer._newoperations.pop() - assert leave_frame_op.getopnum() == rop.LEAVE_FRAME + if self.optimizer._newoperations[-1].getopnum() == rop.LEAVE_FRAME: + leave_frame_op = self.optimizer._newoperations.pop() + else: + leave_frame_op = None cell_token = jumpop.getdescr() assert isinstance(cell_token, JitCellToken) if not cell_token.target_tokens: - self.optimizer._newoperations.append(leave_frame_op) + if leave_frame_op is not None: + self.optimizer._newoperations.append(leave_frame_op) return False if not self.inline_short_preamble: assert cell_token.target_tokens[0].virtual_state is None jumpop.setdescr(cell_token.target_tokens[0]) - self.optimizer._newoperations.append(leave_frame_op) + if leave_frame_op is not None: + self.optimizer._newoperations.append(leave_frame_op) self.optimizer.send_extra_operation(jumpop) return True @@ -606,11 +610,14 @@ "jumping to preamble instead") assert cell_token.target_tokens[0].virtual_state is None jumpop.setdescr(cell_token.target_tokens[0]) - self.optimizer._newoperations.append(leave_frame_op) + if leave_frame_op is not None: + self.optimizer._newoperations.append(leave_frame_op) self.optimizer.send_extra_operation(jumpop) - self.optimizer._newoperations.append(leave_frame_op) - newop = inliner.inline_op(target.short_preamble[-1]) - self.optimizer.send_extra_operation(newop) + else: + if leave_frame_op is not None: + self.optimizer._newoperations.append(leave_frame_op) + newop = inliner.inline_op(target.short_preamble[-1]) + self.optimizer.send_extra_operation(newop) return True debug_stop('jit-log-virtualstate') self.optimizer._newoperations.append(leave_frame_op) From noreply at buildbot.pypy.org Wed Jan 29 19:43:38 2014 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 29 Jan 2014 19:43:38 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: I think this is the right fix for a test (it still fails, but for unrelated reasons I think) Message-ID: <20140129184338.ADBFE1C0962@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r68997:aa12390e9b43 Date: 2014-01-29 19:42 +0100 http://bitbucket.org/pypy/pypy/changeset/aa12390e9b43/ Log: I think this is the right fix for a test (it still fails, but for unrelated reasons I think) diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -842,7 +842,9 @@ self.emit_operation(op) def optimize_RESUME_PUT(self, op): - self.optimizer.resumebuilder.resume_put(op) + if op.getarg(0) in self.optimizer.producer: + self.optimizer.resumebuilder.resume_put(op) + # otherwise we did not emit the operation just yet dispatch_opt = make_dispatcher_method(OptVirtualize, 'optimize_', diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -82,10 +82,6 @@ found = 0 for op in get_stats().get_all_loops()[0]._all_operations(): if op.getopname() == 'guard_true': - liveboxes = op.getfailargs() - assert len(liveboxes) == 3 - for box in liveboxes: - assert isinstance(box, history.BoxInt) found += 1 assert found == 2 From noreply at buildbot.pypy.org Wed Jan 29 20:38:10 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 29 Jan 2014 20:38:10 +0100 (CET) Subject: [pypy-commit] pypy py3k: adapt to py3 Message-ID: <20140129193810.E82B71D2474@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68999:a5ea51eea081 Date: 2014-01-29 11:37 -0800 http://bitbucket.org/pypy/pypy/changeset/a5ea51eea081/ Log: adapt to py3 diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -1010,11 +1010,7 @@ import sys d = {} exec('# -*- coding: utf-8 -*-\n\nu = "\xf0\x9f\x92\x8b"', d) - if sys.maxunicode > 65535 and self.maxunicode > 65535: - expected_length = 1 - else: - expected_length = 2 - assert len(d['u']) == expected_length + assert len(d['u']) == 4 class TestOptimizations: From noreply at buildbot.pypy.org Wed Jan 29 20:38:09 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 29 Jan 2014 20:38:09 +0100 (CET) Subject: [pypy-commit] pypy py3k: add maketrans' docs Message-ID: <20140129193809.B33CA1D2474@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r68998:ced0ef825679 Date: 2014-01-28 15:00 -0800 http://bitbucket.org/pypy/pypy/changeset/ced0ef825679/ Log: add maketrans' docs diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -841,6 +841,20 @@ If chars is a str, it will be converted to unicode before stripping """ + def maketrans(): + """str.maketrans(x[, y[, z]]) -> dict (static method) + + Return a translation table usable for str.translate(). If there + is only one argument, it must be a dictionary mapping Unicode + ordinals (integers) or characters to Unicode ordinals, strings + or None. Character keys will be then converted to ordinals. If + there are two arguments, they must be strings of equal length, + and in the resulting dictionary, each character in x will be + mapped to the character at the same position in y. If there is a + third argument, it must be a string, whose characters will be + mapped to None in the result. + """ + def partition(): """S.partition(sep) -> (head, sep, tail) @@ -1114,7 +1128,8 @@ __getnewargs__ = interp2app(W_UnicodeObject.descr_getnewargs, doc=UnicodeDocstrings.__getnewargs__.__doc__), maketrans = interp2app(W_UnicodeObject.descr_maketrans, - as_classmethod=True), + as_classmethod=True, + doc=UnicodeDocstrings.maketrans.__doc__), ) From noreply at buildbot.pypy.org Wed Jan 29 22:50:21 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 29 Jan 2014 22:50:21 +0100 (CET) Subject: [pypy-commit] pypy default: allow setting dtype.names attribute Message-ID: <20140129215021.D44C61D2480@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69002:991a7649add9 Date: 2014-01-29 16:44 -0500 http://bitbucket.org/pypy/pypy/changeset/991a7649add9/ Log: allow setting dtype.names attribute diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -207,7 +207,7 @@ space.wrap(offset)])) return w_d - def set_fields(self, space, w_fields): + def descr_set_fields(self, space, w_fields): if w_fields == space.w_None: self.fields = None else: @@ -233,19 +233,26 @@ return space.w_None return space.newtuple([space.wrap(name) for name in self.fieldnames]) - def set_names(self, space, w_names): - self.fieldnames = [] - if w_names == space.w_None: - return - else: + def descr_set_names(self, space, w_names): + fieldnames = [] + if w_names != space.w_None: iter = space.iter(w_names) while True: try: - self.fieldnames.append(space.str_w(space.next(iter))) + name = space.str_w(space.next(iter)) except OperationError, e: if not e.match(space, space.w_StopIteration): raise break + if name in fieldnames: + raise OperationError(space.w_ValueError, space.wrap( + "Duplicate field names given.")) + fieldnames.append(name) + self.fieldnames = fieldnames + + def descr_del_names(self, space): + raise OperationError(space.w_AttributeError, space.wrap( + "Cannot delete dtype names attribute")) def descr_get_hasobject(self, space): return space.w_False @@ -321,10 +328,10 @@ self.byteorder = endian fieldnames = space.getitem(w_data, space.wrap(3)) - self.set_names(space, fieldnames) + self.descr_set_names(space, fieldnames) fields = space.getitem(w_data, space.wrap(4)) - self.set_fields(space, fields) + self.descr_set_fields(space, fields) @unwrap_spec(new_order=str) def descr_newbyteorder(self, space, new_order=NPY_SWAP): @@ -468,7 +475,9 @@ shape = GetSetProperty(W_Dtype.descr_get_shape), isnative = GetSetProperty(W_Dtype.descr_get_isnative), fields = GetSetProperty(W_Dtype.descr_get_fields), - names = GetSetProperty(W_Dtype.descr_get_names), + names = GetSetProperty(W_Dtype.descr_get_names, + W_Dtype.descr_set_names, + W_Dtype.descr_del_names), hasobject = GetSetProperty(W_Dtype.descr_get_hasobject), descr = GetSetProperty(W_Dtype.descr_get_descr), ) diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -948,6 +948,13 @@ assert d.type is void assert d.char == 'V' assert d.names == ("x", "y", "z", "value") + d.names = ('a', 'b', 'c', 'd') + assert d.names == ('a', 'b', 'c', 'd') + exc = raises(ValueError, "d.names = ('a', 'b', 'c', 'c')") + assert exc.value[0] == "Duplicate field names given." + exc = raises(AttributeError, 'del d.names') + assert exc.value[0] == "Cannot delete dtype names attribute" + assert d.names == ('a', 'b', 'c', 'd') raises(KeyError, 'd["xyz"]') raises(KeyError, 'd.fields["xyz"]') From noreply at buildbot.pypy.org Wed Jan 29 22:50:20 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 29 Jan 2014 22:50:20 +0100 (CET) Subject: [pypy-commit] pypy default: allow setting array.dtype attribute Message-ID: <20140129215020.AD8C11D2480@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69001:99640221ab47 Date: 2014-01-29 16:29 -0500 http://bitbucket.org/pypy/pypy/changeset/99640221ab47/ Log: allow setting array.dtype attribute diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -367,6 +367,9 @@ return SliceArray(0, strides, backstrides, new_shape, self, orig_array) + def set_dtype(self, space, dtype): + self.dtype = dtype + def argsort(self, space, w_axis): from pypy.module.micronumpy.arrayimpl.sort import argsort_array return argsort_array(self, space, w_axis) diff --git a/pypy/module/micronumpy/arrayimpl/scalar.py b/pypy/module/micronumpy/arrayimpl/scalar.py --- a/pypy/module/micronumpy/arrayimpl/scalar.py +++ b/pypy/module/micronumpy/arrayimpl/scalar.py @@ -173,6 +173,10 @@ raise OperationError(space.w_ValueError, space.wrap( "total size of the array must be unchanged")) + def set_dtype(self, space, dtype): + self.value = self.value.convert_to(space, dtype) + self.dtype = dtype + def reshape(self, space, orig_array, new_shape): return self.set_shape(space, orig_array, new_shape) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -84,6 +84,19 @@ def descr_get_dtype(self, space): return self.implementation.dtype + def descr_set_dtype(self, space, w_dtype): + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) + if (dtype.get_size() != self.get_dtype().get_size() or + dtype.is_flexible_type() or self.get_dtype().is_flexible_type()): + raise OperationError(space.w_ValueError, space.wrap( + "new type not compatible with array.")) + self.implementation.set_dtype(space, dtype) + + def descr_del_dtype(self, space): + raise OperationError(space.w_AttributeError, space.wrap( + "Cannot delete array dtype")) + def descr_get_ndim(self, space): return space.wrap(len(self.get_shape())) @@ -1290,7 +1303,9 @@ __gt__ = interp2app(W_NDimArray.descr_gt), __ge__ = interp2app(W_NDimArray.descr_ge), - dtype = GetSetProperty(W_NDimArray.descr_get_dtype), + dtype = GetSetProperty(W_NDimArray.descr_get_dtype, + W_NDimArray.descr_set_dtype, + W_NDimArray.descr_del_dtype), shape = GetSetProperty(W_NDimArray.descr_get_shape, W_NDimArray.descr_set_shape), strides = GetSetProperty(W_NDimArray.descr_get_strides), diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -291,7 +291,6 @@ def test_noop_ndmin(self): from numpypy import array - arr = array([1], ndmin=3) assert arr.shape == (1, 1, 1) @@ -321,6 +320,23 @@ e = d.repeat(3, 0) assert e.shape == (9, 4, 0) + def test_dtype_attribute(self): + import numpy as np + a = np.array(40000, dtype='uint16') + assert a.dtype == np.uint16 + a.dtype = np.int16 + assert a == -25536 + a = np.array([1, 2, 3, 4, 40000], dtype='uint16') + assert a.dtype == np.uint16 + a.dtype = np.int16 + assert a[4] == -25536 + exc = raises(ValueError, 'a.dtype = None') + assert exc.value[0] == 'new type not compatible with array.' + exc = raises(ValueError, 'a.dtype = np.int32') + assert exc.value[0] == 'new type not compatible with array.' + exc = raises(AttributeError, 'del a.dtype') + assert exc.value[0] == 'Cannot delete array dtype' + def test_buffer(self): import numpy as np a = np.array([1,2,3]) From noreply at buildbot.pypy.org Wed Jan 29 23:00:37 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 29 Jan 2014 23:00:37 +0100 (CET) Subject: [pypy-commit] pypy.org extradoc: The right column should select by default "Donate towards xxx" Message-ID: <20140129220037.25E261D2480@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r472:e71408289534 Date: 2014-01-29 23:00 +0100 http://bitbucket.org/pypy/pypy.org/changeset/e71408289534/ Log: The right column should select by default "Donate towards xxx" if we are visiting the page "xxxdonate.html". Otherwise, we get confusing results when changing the right-column subpage by clicking on "Donate towards xxx": when we click "read proposal", it always resets the right-column back to STM. diff --git a/js/script2.js b/js/script2.js --- a/js/script2.js +++ b/js/script2.js @@ -18,4 +18,10 @@ $.get("don3.html", set_sidebar_html); } -$(document).ready(stm_donate); +if (location.href.indexOf("numpydonate.html") >= 0) + f = numpy_donate; +else if (location.href.indexOf("py3donate.html") >= 0) + f = py3k_donate; +else + f = stm_donate; +$(document).ready(f); From noreply at buildbot.pypy.org Wed Jan 29 23:24:43 2014 From: noreply at buildbot.pypy.org (Aquana) Date: Wed, 29 Jan 2014 23:24:43 +0100 (CET) Subject: [pypy-commit] pypy prepare-split: moved py to pypy._py Message-ID: <20140129222443.D1C921C0962@cobra.cs.uni-duesseldorf.de> Author: Alexander Hesse Branch: prepare-split Changeset: r69003:e56516e8649a Date: 2014-01-29 23:23 +0100 http://bitbucket.org/pypy/pypy/changeset/e56516e8649a/ Log: moved py to pypy._py diff --git a/py/__init__.py b/pypy/_py/__init__.py rename from py/__init__.py rename to pypy/_py/__init__.py diff --git a/py/__metainfo.py b/pypy/_py/__metainfo.py rename from py/__metainfo.py rename to pypy/_py/__metainfo.py diff --git a/py/_apipkg.py b/pypy/_py/_apipkg.py rename from py/_apipkg.py rename to pypy/_py/_apipkg.py diff --git a/py/_builtin.py b/pypy/_py/_builtin.py rename from py/_builtin.py rename to pypy/_py/_builtin.py diff --git a/py/_code/__init__.py b/pypy/_py/_code/__init__.py rename from py/_code/__init__.py rename to pypy/_py/_code/__init__.py diff --git a/py/_code/_assertionnew.py b/pypy/_py/_code/_assertionnew.py rename from py/_code/_assertionnew.py rename to pypy/_py/_code/_assertionnew.py diff --git a/py/_code/_assertionold.py b/pypy/_py/_code/_assertionold.py rename from py/_code/_assertionold.py rename to pypy/_py/_code/_assertionold.py diff --git a/py/_code/assertion.py b/pypy/_py/_code/assertion.py rename from py/_code/assertion.py rename to pypy/_py/_code/assertion.py diff --git a/py/_code/code.py b/pypy/_py/_code/code.py rename from py/_code/code.py rename to pypy/_py/_code/code.py diff --git a/py/_code/source.py b/pypy/_py/_code/source.py rename from py/_code/source.py rename to pypy/_py/_code/source.py diff --git a/py/_error.py b/pypy/_py/_error.py rename from py/_error.py rename to pypy/_py/_error.py diff --git a/py/_iniconfig.py b/pypy/_py/_iniconfig.py rename from py/_iniconfig.py rename to pypy/_py/_iniconfig.py diff --git a/py/_io/__init__.py b/pypy/_py/_io/__init__.py rename from py/_io/__init__.py rename to pypy/_py/_io/__init__.py diff --git a/py/_io/capture.py b/pypy/_py/_io/capture.py rename from py/_io/capture.py rename to pypy/_py/_io/capture.py diff --git a/py/_io/saferepr.py b/pypy/_py/_io/saferepr.py rename from py/_io/saferepr.py rename to pypy/_py/_io/saferepr.py diff --git a/py/_io/terminalwriter.py b/pypy/_py/_io/terminalwriter.py rename from py/_io/terminalwriter.py rename to pypy/_py/_io/terminalwriter.py diff --git a/py/_log/__init__.py b/pypy/_py/_log/__init__.py rename from py/_log/__init__.py rename to pypy/_py/_log/__init__.py diff --git a/py/_log/log.py b/pypy/_py/_log/log.py rename from py/_log/log.py rename to pypy/_py/_log/log.py diff --git a/py/_log/warning.py b/pypy/_py/_log/warning.py rename from py/_log/warning.py rename to pypy/_py/_log/warning.py diff --git a/py/_path/__init__.py b/pypy/_py/_path/__init__.py rename from py/_path/__init__.py rename to pypy/_py/_path/__init__.py diff --git a/py/_path/cacheutil.py b/pypy/_py/_path/cacheutil.py rename from py/_path/cacheutil.py rename to pypy/_py/_path/cacheutil.py diff --git a/py/_path/common.py b/pypy/_py/_path/common.py rename from py/_path/common.py rename to pypy/_py/_path/common.py diff --git a/py/_path/local.py b/pypy/_py/_path/local.py rename from py/_path/local.py rename to pypy/_py/_path/local.py diff --git a/py/_path/svnurl.py b/pypy/_py/_path/svnurl.py rename from py/_path/svnurl.py rename to pypy/_py/_path/svnurl.py diff --git a/py/_path/svnwc.py b/pypy/_py/_path/svnwc.py rename from py/_path/svnwc.py rename to pypy/_py/_path/svnwc.py diff --git a/py/_process/__init__.py b/pypy/_py/_process/__init__.py rename from py/_process/__init__.py rename to pypy/_py/_process/__init__.py diff --git a/py/_process/cmdexec.py b/pypy/_py/_process/cmdexec.py rename from py/_process/cmdexec.py rename to pypy/_py/_process/cmdexec.py diff --git a/py/_process/forkedfunc.py b/pypy/_py/_process/forkedfunc.py rename from py/_process/forkedfunc.py rename to pypy/_py/_process/forkedfunc.py diff --git a/py/_process/killproc.py b/pypy/_py/_process/killproc.py rename from py/_process/killproc.py rename to pypy/_py/_process/killproc.py diff --git a/py/_std.py b/pypy/_py/_std.py rename from py/_std.py rename to pypy/_py/_std.py diff --git a/py/_xmlgen.py b/pypy/_py/_xmlgen.py rename from py/_xmlgen.py rename to pypy/_py/_xmlgen.py diff --git a/py/bin/_findpy.py b/pypy/_py/bin/_findpy.py rename from py/bin/_findpy.py rename to pypy/_py/bin/_findpy.py diff --git a/py/bin/py.test b/pypy/_py/bin/py.test rename from py/bin/py.test rename to pypy/_py/bin/py.test diff --git a/py/test.py b/pypy/_py/test.py rename from py/test.py rename to pypy/_py/test.py From noreply at buildbot.pypy.org Wed Jan 29 23:34:19 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 29 Jan 2014 23:34:19 +0100 (CET) Subject: [pypy-commit] pypy default: support keepdims arg for array reduce operations Message-ID: <20140129223419.0A4371C0962@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69004:9371233f1468 Date: 2014-01-29 17:26 -0500 http://bitbucket.org/pypy/pypy/changeset/9371233f1468/ Log: support keepdims arg for array reduce operations diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -961,7 +961,8 @@ def _reduce_ufunc_impl(ufunc_name, promote_to_largest=False, cumulative=False): - def impl(self, space, w_axis=None, w_dtype=None, w_out=None): + @unwrap_spec(keepdims=bool) + def impl(self, space, w_axis=None, w_dtype=None, w_out=None, keepdims=False): if space.is_none(w_out): out = None elif not isinstance(w_out, W_NDimArray): @@ -971,7 +972,7 @@ out = w_out return getattr(interp_ufuncs.get(space), ufunc_name).reduce( space, self, promote_to_largest, w_axis, - False, out, w_dtype, cumulative=cumulative) + keepdims, out, w_dtype, cumulative=cumulative) return func_with_new_name(impl, "reduce_%s_impl_%d_%d" % (ufunc_name, promote_to_largest, cumulative)) diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -252,6 +252,11 @@ if out: out.set_scalar_value(res) return out + if keepdims: + shape = [1] * len(obj_shape) + out = W_NDimArray.from_shape(space, [1] * len(obj_shape), dtype, w_instance=obj) + out.implementation.setitem(0, res) + return out return res def descr_outer(self, space, __args__): diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1399,6 +1399,8 @@ from numpypy import arange, array a = arange(15).reshape(5, 3) assert a.sum() == 105 + assert a.sum(keepdims=True) == 105 + assert a.sum(keepdims=True).shape == (1, 1) assert a.max() == 14 assert array([]).sum() == 0.0 assert array([]).reshape(0, 2).sum() == 0. @@ -1431,6 +1433,8 @@ from numpypy import array, dtype a = array(range(1, 6)) assert a.prod() == 120.0 + assert a.prod(keepdims=True) == 120.0 + assert a.prod(keepdims=True).shape == (1,) assert a[:4].prod() == 24.0 for dt in ['bool', 'int8', 'uint8', 'int16', 'uint16']: a = array([True, False], dtype=dt) @@ -1445,6 +1449,8 @@ from numpypy import array, zeros a = array([-1.2, 3.4, 5.7, -3.0, 2.7]) assert a.max() == 5.7 + assert a.max(keepdims=True) == 5.7 + assert a.max(keepdims=True).shape == (1,) b = array([]) raises(ValueError, "b.max()") assert list(zeros((0, 2)).max(axis=1)) == [] @@ -1458,6 +1464,8 @@ from numpypy import array, zeros a = array([-1.2, 3.4, 5.7, -3.0, 2.7]) assert a.min() == -3.0 + assert a.min(keepdims=True) == -3.0 + assert a.min(keepdims=True).shape == (1,) b = array([]) raises(ValueError, "b.min()") assert list(zeros((0, 2)).min(axis=1)) == [] @@ -1508,6 +1516,8 @@ assert a.all() == False a[0] = 3.0 assert a.all() == True + assert a.all(keepdims=True) == True + assert a.all(keepdims=True).shape == (1,) b = array([]) assert b.all() == True @@ -1515,6 +1525,8 @@ from numpypy import array, zeros a = array(range(5)) assert a.any() == True + assert a.any(keepdims=True) == True + assert a.any(keepdims=True).shape == (1,) b = zeros(5) assert b.any() == False c = array([]) From noreply at buildbot.pypy.org Thu Jan 30 00:08:22 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 30 Jan 2014 00:08:22 +0100 (CET) Subject: [pypy-commit] pypy default: kill unnecessary (as of 23a61cfed4a5) bits Message-ID: <20140129230822.D43D71C00B3@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r69005:8e51b6cb4481 Date: 2014-01-29 15:07 -0800 http://bitbucket.org/pypy/pypy/changeset/8e51b6cb4481/ Log: kill unnecessary (as of 23a61cfed4a5) bits diff --git a/rpython/rlib/rstruct/nativefmttable.py b/rpython/rlib/rstruct/nativefmttable.py --- a/rpython/rlib/rstruct/nativefmttable.py +++ b/rpython/rlib/rstruct/nativefmttable.py @@ -142,7 +142,7 @@ pack = std.pack_bool unpack = std.unpack_bool else: - pack = std.make_int_packer(size, signed, True) + pack = std.make_int_packer(size, signed) unpack = std.make_int_unpacker(size, signed) native_fmttable[fmtchar] = {'size': size, diff --git a/rpython/rlib/rstruct/standardfmttable.py b/rpython/rlib/rstruct/standardfmttable.py --- a/rpython/rlib/rstruct/standardfmttable.py +++ b/rpython/rlib/rstruct/standardfmttable.py @@ -13,13 +13,6 @@ from rpython.rlib.rstruct.error import StructError, StructOverflowError from rpython.rlib.unroll import unrolling_iterable -# In the CPython struct module, pack() unconsistently accepts inputs -# that are out-of-range or floats instead of ints. Should we emulate -# this? Let's use a flag for now: - -PACK_ACCEPTS_BROKEN_INPUT = True - -# ____________________________________________________________ def pack_pad(fmtiter, count): fmtiter.result.append_multiple_char('\x00', count) @@ -99,12 +92,8 @@ max = r_ulonglong(max) return min, max, accept_method -def make_int_packer(size, signed, cpython_checks_range, _memo={}): - if cpython_checks_range: - check_range = True - else: - check_range = not PACK_ACCEPTS_BROKEN_INPUT - key = (size, signed, check_range) +def make_int_packer(size, signed, _memo={}): + key = size, signed try: return _memo[key] except KeyError: @@ -121,9 +110,8 @@ def pack_int(fmtiter): method = getattr(fmtiter, accept_method) value = method() - if check_range: - if value < min or value > max: - raise StructError(errormsg) + if not min <= value <= max: + raise StructError(errormsg) if fmtiter.bigendian: for i in unroll_revrange_size: x = (value >> (8*i)) & 0xff @@ -237,8 +225,8 @@ for c, size in [('b', 1), ('h', 2), ('i', 4), ('l', 4), ('q', 8)]: standard_fmttable[c] = {'size': size, - 'pack': make_int_packer(size, True, True), + 'pack': make_int_packer(size, True), 'unpack': make_int_unpacker(size, True)} standard_fmttable[c.upper()] = {'size': size, - 'pack': make_int_packer(size, False, True), + 'pack': make_int_packer(size, False), 'unpack': make_int_unpacker(size, False)} From noreply at buildbot.pypy.org Thu Jan 30 00:25:27 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 30 Jan 2014 00:25:27 +0100 (CET) Subject: [pypy-commit] pypy default: fix overflow in cast to float16 Message-ID: <20140129232528.001FA1D247F@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69006:45c5e6b6f992 Date: 2014-01-29 17:58 -0500 http://bitbucket.org/pypy/pypy/changeset/45c5e6b6f992/ Log: fix overflow in cast to float16 diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -204,6 +204,9 @@ assert array([256], 'B')[0] == 0 assert array([32768], 'h')[0] == -32768 assert array([65536], 'H')[0] == 0 + a = array([65520], dtype='float64') + b = array(a, dtype='float16') + assert b == float('inf') if dtype('l').itemsize == 4: # 32-bit raises(OverflowError, "array([2**32/2], 'i')") raises(OverflowError, "array([2**32], 'I')") diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -404,6 +404,7 @@ if w_item is None: return self.box(0) return self.box(space.int_w(space.call_function(space.w_int, w_item))) + def _coerce(self, space, w_item): return self._base_coerce(space, w_item) @@ -979,7 +980,7 @@ def byteswap(self, w_v): value = self.unbox(w_v) - hbits = float_pack(value,2) + hbits = float_pack(value, 2) swapped = byteswap(rffi.cast(self._STORAGE_T, hbits)) return self.box(float_unpack(r_ulonglong(swapped), 2)) @@ -990,11 +991,14 @@ return float_unpack(r_ulonglong(hbits), 2) def _write(self, storage, i, offset, value): - hbits = rffi.cast(self._STORAGE_T, float_pack(value, 2)) + try: + hbits = float_pack(value, 2) + except OverflowError: + hbits = float_pack(rfloat.INFINITY, 2) if not self.native: hbits = byteswap(hbits) raw_storage_setitem(storage, i + offset, - rffi.cast(self._STORAGE_T, hbits)) + rffi.cast(self._STORAGE_T, hbits)) class Float32(BaseType, Float): T = rffi.FLOAT From noreply at buildbot.pypy.org Thu Jan 30 00:25:29 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 30 Jan 2014 00:25:29 +0100 (CET) Subject: [pypy-commit] pypy default: fix array init from array scalar Message-ID: <20140129232529.56E4E1D247F@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69007:caa0da925fad Date: 2014-01-29 18:15 -0500 http://bitbucket.org/pypy/pypy/changeset/caa0da925fad/ Log: fix array init from array scalar diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -72,6 +72,8 @@ is_rec_type = dtype is not None and dtype.is_record_type() if is_rec_type and is_single_elem(space, w_iterable, is_rec_type): return [], [w_iterable] + if isinstance(w_iterable, W_NDimArray) and w_iterable.is_scalar(): + return [], [w_iterable] shape = [space.len_w(w_iterable)] batch = space.listview(w_iterable) while True: diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -319,6 +319,9 @@ d = c.reshape(3, 4, 0) e = d.repeat(3, 0) assert e.shape == (9, 4, 0) + a = array(123) + b = array(a, dtype=float) + assert b == 123.0 def test_dtype_attribute(self): import numpy as np @@ -413,10 +416,6 @@ assert ten == 10 def test_empty(self): - """ - Test that empty() works. - """ - from numpypy import empty a = empty(2) a[1] = 1.0 From noreply at buildbot.pypy.org Thu Jan 30 00:25:30 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 30 Jan 2014 00:25:30 +0100 (CET) Subject: [pypy-commit] pypy default: fix float16 changes Message-ID: <20140129232530.864C81D247F@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69008:d4d83c8d386a Date: 2014-01-29 18:22 -0500 http://bitbucket.org/pypy/pypy/changeset/d4d83c8d386a/ Log: fix float16 changes diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -995,10 +995,10 @@ hbits = float_pack(value, 2) except OverflowError: hbits = float_pack(rfloat.INFINITY, 2) + hbits = rffi.cast(self._STORAGE_T, hbits) if not self.native: hbits = byteswap(hbits) - raw_storage_setitem(storage, i + offset, - rffi.cast(self._STORAGE_T, hbits)) + raw_storage_setitem(storage, i + offset, hbits) class Float32(BaseType, Float): T = rffi.FLOAT From noreply at buildbot.pypy.org Thu Jan 30 00:25:31 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 30 Jan 2014 00:25:31 +0100 (CET) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20140129232531.CE6451D247F@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69009:0ba3660ac64a Date: 2014-01-29 18:24 -0500 http://bitbucket.org/pypy/pypy/changeset/0ba3660ac64a/ Log: merge heads diff --git a/rpython/rlib/rstruct/nativefmttable.py b/rpython/rlib/rstruct/nativefmttable.py --- a/rpython/rlib/rstruct/nativefmttable.py +++ b/rpython/rlib/rstruct/nativefmttable.py @@ -142,7 +142,7 @@ pack = std.pack_bool unpack = std.unpack_bool else: - pack = std.make_int_packer(size, signed, True) + pack = std.make_int_packer(size, signed) unpack = std.make_int_unpacker(size, signed) native_fmttable[fmtchar] = {'size': size, diff --git a/rpython/rlib/rstruct/standardfmttable.py b/rpython/rlib/rstruct/standardfmttable.py --- a/rpython/rlib/rstruct/standardfmttable.py +++ b/rpython/rlib/rstruct/standardfmttable.py @@ -13,13 +13,6 @@ from rpython.rlib.rstruct.error import StructError, StructOverflowError from rpython.rlib.unroll import unrolling_iterable -# In the CPython struct module, pack() unconsistently accepts inputs -# that are out-of-range or floats instead of ints. Should we emulate -# this? Let's use a flag for now: - -PACK_ACCEPTS_BROKEN_INPUT = True - -# ____________________________________________________________ def pack_pad(fmtiter, count): fmtiter.result.append_multiple_char('\x00', count) @@ -99,12 +92,8 @@ max = r_ulonglong(max) return min, max, accept_method -def make_int_packer(size, signed, cpython_checks_range, _memo={}): - if cpython_checks_range: - check_range = True - else: - check_range = not PACK_ACCEPTS_BROKEN_INPUT - key = (size, signed, check_range) +def make_int_packer(size, signed, _memo={}): + key = size, signed try: return _memo[key] except KeyError: @@ -121,9 +110,8 @@ def pack_int(fmtiter): method = getattr(fmtiter, accept_method) value = method() - if check_range: - if value < min or value > max: - raise StructError(errormsg) + if not min <= value <= max: + raise StructError(errormsg) if fmtiter.bigendian: for i in unroll_revrange_size: x = (value >> (8*i)) & 0xff @@ -237,8 +225,8 @@ for c, size in [('b', 1), ('h', 2), ('i', 4), ('l', 4), ('q', 8)]: standard_fmttable[c] = {'size': size, - 'pack': make_int_packer(size, True, True), + 'pack': make_int_packer(size, True), 'unpack': make_int_unpacker(size, True)} standard_fmttable[c.upper()] = {'size': size, - 'pack': make_int_packer(size, False, True), + 'pack': make_int_packer(size, False), 'unpack': make_int_unpacker(size, False)} From noreply at buildbot.pypy.org Thu Jan 30 00:40:16 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 30 Jan 2014 00:40:16 +0100 (CET) Subject: [pypy-commit] pypy default: kill along with PACK_ACCEPTS_BROKEN_INPUT Message-ID: <20140129234016.811E91D2481@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r69010:c1f256c2c8ad Date: 2014-01-29 15:37 -0800 http://bitbucket.org/pypy/pypy/changeset/c1f256c2c8ad/ Log: kill along with PACK_ACCEPTS_BROKEN_INPUT diff --git a/pypy/module/struct/formatiterator.py b/pypy/module/struct/formatiterator.py --- a/pypy/module/struct/formatiterator.py +++ b/pypy/module/struct/formatiterator.py @@ -3,7 +3,7 @@ from rpython.rlib.rstring import StringBuilder from rpython.rlib.rstruct.error import StructError from rpython.rlib.rstruct.formatiterator import FormatIterator -from rpython.rlib.rstruct.standardfmttable import PACK_ACCEPTS_BROKEN_INPUT + from pypy.interpreter.error import OperationError @@ -45,69 +45,47 @@ self.args_index += 1 return w_obj - if PACK_ACCEPTS_BROKEN_INPUT: - # permissive version - accepts float arguments too + def accept_int_arg(self): + return self._accept_integral("int_w") - def accept_int_arg(self): - return self._accept_integral("int_w") + def accept_uint_arg(self): + return self._accept_integral("uint_w") - def accept_uint_arg(self): - return self._accept_integral("uint_w") + def accept_longlong_arg(self): + return self._accept_integral("r_longlong_w") - def accept_longlong_arg(self): - return self._accept_integral("r_longlong_w") + def accept_ulonglong_arg(self): + return self._accept_integral("r_ulonglong_w") - def accept_ulonglong_arg(self): - return self._accept_integral("r_ulonglong_w") + @specialize.arg(1) + def _accept_integral(self, meth): + space = self.space + w_obj = self.accept_obj_arg() + if (space.isinstance_w(w_obj, space.w_int) or + space.isinstance_w(w_obj, space.w_long)): + w_index = w_obj + else: + w_index = None + w_index_method = space.lookup(w_obj, "__index__") + if w_index_method is not None: + try: + w_index = space.index(w_obj) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + pass + if w_index is None: + w_index = self._maybe_float(w_obj) + return getattr(space, meth)(w_index) - @specialize.arg(1) - def _accept_integral(self, meth): - space = self.space - w_obj = self.accept_obj_arg() - if (space.isinstance_w(w_obj, space.w_int) or - space.isinstance_w(w_obj, space.w_long)): - w_index = w_obj - else: - w_index = None - w_index_method = space.lookup(w_obj, "__index__") - if w_index_method is not None: - try: - w_index = space.index(w_obj) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - pass - if w_index is None: - w_index = self._maybe_float(w_obj) - return getattr(space, meth)(w_index) - - def _maybe_float(self, w_obj): - space = self.space - if space.isinstance_w(w_obj, space.w_float): - msg = "struct: integer argument expected, got float" - else: - msg = "integer argument expected, got non-integer" - space.warn(space.wrap(msg), space.w_DeprecationWarning) - return space.int(w_obj) # wrapped float -> wrapped int or long - - else: - # strict version - - def accept_int_arg(self): - w_obj = self.accept_obj_arg() - return self.space.int_w(w_obj) - - def accept_uint_arg(self): - w_obj = self.accept_obj_arg() - return self.space.uint_w(w_obj) - - def accept_longlong_arg(self): - w_obj = self.accept_obj_arg() - return self.space.r_longlong_w(w_obj) - - def accept_ulonglong_arg(self): - w_obj = self.accept_obj_arg() - return self.space.r_ulonglong_w(w_obj) + def _maybe_float(self, w_obj): + space = self.space + if space.isinstance_w(w_obj, space.w_float): + msg = "struct: integer argument expected, got float" + else: + msg = "integer argument expected, got non-integer" + space.warn(space.wrap(msg), space.w_DeprecationWarning) + return space.int(w_obj) # wrapped float -> wrapped int or long def accept_bool_arg(self): w_obj = self.accept_obj_arg() From noreply at buildbot.pypy.org Thu Jan 30 01:34:44 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Thu, 30 Jan 2014 01:34:44 +0100 (CET) Subject: [pypy-commit] pypy default: move some low-level stuff out of the annotator Message-ID: <20140130003444.6D2031C0962@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r69011:083be7f23e9b Date: 2014-01-30 00:33 +0000 http://bitbucket.org/pypy/pypy/changeset/083be7f23e9b/ Log: move some low-level stuff out of the annotator diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -14,7 +14,6 @@ SomeLongFloat, SomeType, SomeConstantType, unionof, UnionError, read_can_only_throw, add_knowntypedata, merge_knowntypedata,) -from rpython.rtyper.llannotation import SomeAddress, SomeTypedAddressAccess from rpython.annotator.bookkeeper import getbookkeeper from rpython.flowspace.model import Variable, Constant from rpython.flowspace.operation import op @@ -827,50 +826,6 @@ raise AnnotatorError('add on %r' % pbc) return s_ImpossibleValue -# ____________________________________________________________ -# annotation of low-level types -from rpython.rtyper.llannotation import ( - SomePtr, ll_to_annotation, annotation_to_lltype, lltype_to_annotation) - -class __extend__(pairtype(SomePtr, SomePtr)): - def union((p1, p2)): - assert p1.ll_ptrtype == p2.ll_ptrtype,("mixing of incompatible pointer types: %r, %r" % - (p1.ll_ptrtype, p2.ll_ptrtype)) - return SomePtr(p1.ll_ptrtype) - -class __extend__(pairtype(SomePtr, SomeInteger)): - - def getitem((p, int1)): - example = p.ll_ptrtype._example() - try: - v = example[0] - except IndexError: - return None # impossible value, e.g. FixedSizeArray(0) - return ll_to_annotation(v) - getitem.can_only_throw = [] - - def setitem((p, int1), s_value): # just doing checking - example = p.ll_ptrtype._example() - if example[0] is not None: # ignore Void s_value - v_lltype = annotation_to_lltype(s_value) - example[0] = v_lltype._defl() - setitem.can_only_throw = [] - -class __extend__(pairtype(SomePtr, SomeObject)): - def union((p, obj)): - assert False, ("mixing pointer type %r with something else %r" % (p.ll_ptrtype, obj)) - - def getitem((p, obj)): - assert False,"ptr %r getitem index not an int: %r" % (p.ll_ptrtype, obj) - - def setitem((p, obj), s_value): - assert False,"ptr %r setitem index not an int: %r" % (p.ll_ptrtype, obj) - -class __extend__(pairtype(SomeObject, SomePtr)): - def union((obj, p2)): - return pair(p2, obj).union() - - #_________________________________________ # weakrefs @@ -885,60 +840,3 @@ if basedef is None: # no common base class! complain... return SomeObject() return SomeWeakRef(basedef) - -#_________________________________________ -# memory addresses - -class __extend__(pairtype(SomeAddress, SomeAddress)): - def union((s_addr1, s_addr2)): - return SomeAddress() - - def sub((s_addr1, s_addr2)): - if s_addr1.is_null_address() and s_addr2.is_null_address(): - return getbookkeeper().immutablevalue(0) - return SomeInteger() - - def is_((s_addr1, s_addr2)): - assert False, "comparisons with is not supported by addresses" - -class __extend__(pairtype(SomeTypedAddressAccess, SomeTypedAddressAccess)): - def union((s_taa1, s_taa2)): - assert s_taa1.type == s_taa2.type - return s_taa1 - -class __extend__(pairtype(SomeTypedAddressAccess, SomeInteger)): - def getitem((s_taa, s_int)): - return lltype_to_annotation(s_taa.type) - getitem.can_only_throw = [] - - def setitem((s_taa, s_int), s_value): - assert annotation_to_lltype(s_value) is s_taa.type - setitem.can_only_throw = [] - - -class __extend__(pairtype(SomeAddress, SomeInteger)): - def add((s_addr, s_int)): - return SomeAddress() - - def sub((s_addr, s_int)): - return SomeAddress() - -class __extend__(pairtype(SomeAddress, SomeImpossibleValue)): - # need to override this specifically to hide the 'raise UnionError' - # of pairtype(SomeAddress, SomeObject). - def union((s_addr, s_imp)): - return s_addr - -class __extend__(pairtype(SomeImpossibleValue, SomeAddress)): - # need to override this specifically to hide the 'raise UnionError' - # of pairtype(SomeObject, SomeAddress). - def union((s_imp, s_addr)): - return s_addr - -class __extend__(pairtype(SomeAddress, SomeObject)): - def union((s_addr, s_obj)): - raise UnionError(s_addr, s_obj) - -class __extend__(pairtype(SomeObject, SomeAddress)): - def union((s_obj, s_addr)): - raise UnionError(s_obj, s_addr) diff --git a/rpython/annotator/test/test_model.py b/rpython/annotator/test/test_model.py --- a/rpython/annotator/test/test_model.py +++ b/rpython/annotator/test/test_model.py @@ -1,11 +1,8 @@ import py from rpython.annotator.model import * -from rpython.rtyper.llannotation import ( - SomePtr, annotation_to_lltype, ll_to_annotation) from rpython.annotator.listdef import ListDef from rpython.translator.translator import TranslationContext -from rpython.rtyper.typesystem import lltype listdef1 = ListDef(None, SomeTuple([SomeInteger(nonneg=True), SomeString()])) @@ -102,73 +99,6 @@ assert not s1.contains(s2) assert s1 != s2 -def test_ll_to_annotation(): - s_z = ll_to_annotation(lltype.Signed._defl()) - s_s = SomeInteger() - s_u = SomeInteger(nonneg=True, unsigned=True) - assert s_z.contains(s_s) - assert not s_z.contains(s_u) - s_uz = ll_to_annotation(lltype.Unsigned._defl()) - assert s_uz.contains(s_u) - assert ll_to_annotation(lltype.Bool._defl()).contains(SomeBool()) - assert ll_to_annotation(lltype.Char._defl()).contains(SomeChar()) - S = lltype.GcStruct('s') - A = lltype.GcArray() - s_p = ll_to_annotation(lltype.malloc(S)) - assert isinstance(s_p, SomePtr) and s_p.ll_ptrtype == lltype.Ptr(S) - s_p = ll_to_annotation(lltype.malloc(A, 0)) - assert isinstance(s_p, SomePtr) and s_p.ll_ptrtype == lltype.Ptr(A) - -def test_annotation_to_lltype(): - from rpython.rlib.rarithmetic import r_uint, r_singlefloat - s_i = SomeInteger() - s_pos = SomeInteger(nonneg=True) - s_1 = SomeInteger(nonneg=True); s_1.const = 1 - s_m1 = SomeInteger(nonneg=False); s_m1.const = -1 - s_u = SomeInteger(nonneg=True, unsigned=True); - s_u1 = SomeInteger(nonneg=True, unsigned=True); - s_u1.const = r_uint(1) - assert annotation_to_lltype(s_i) == lltype.Signed - assert annotation_to_lltype(s_pos) == lltype.Signed - assert annotation_to_lltype(s_1) == lltype.Signed - assert annotation_to_lltype(s_m1) == lltype.Signed - assert annotation_to_lltype(s_u) == lltype.Unsigned - assert annotation_to_lltype(s_u1) == lltype.Unsigned - assert annotation_to_lltype(SomeBool()) == lltype.Bool - assert annotation_to_lltype(SomeChar()) == lltype.Char - PS = lltype.Ptr(lltype.GcStruct('s')) - s_p = SomePtr(ll_ptrtype=PS) - assert annotation_to_lltype(s_p) == PS - py.test.raises(ValueError, "annotation_to_lltype(si0)") - s_singlefloat = SomeSingleFloat() - s_singlefloat.const = r_singlefloat(0.0) - assert annotation_to_lltype(s_singlefloat) == lltype.SingleFloat - -def test_ll_union(): - PS1 = lltype.Ptr(lltype.GcStruct('s')) - PS2 = lltype.Ptr(lltype.GcStruct('s')) - PS3 = lltype.Ptr(lltype.GcStruct('s3')) - PA1 = lltype.Ptr(lltype.GcArray()) - PA2 = lltype.Ptr(lltype.GcArray()) - - assert unionof(SomePtr(PS1),SomePtr(PS1)) == SomePtr(PS1) - assert unionof(SomePtr(PS1),SomePtr(PS2)) == SomePtr(PS2) - assert unionof(SomePtr(PS1),SomePtr(PS2)) == SomePtr(PS1) - - assert unionof(SomePtr(PA1),SomePtr(PA1)) == SomePtr(PA1) - assert unionof(SomePtr(PA1),SomePtr(PA2)) == SomePtr(PA2) - assert unionof(SomePtr(PA1),SomePtr(PA2)) == SomePtr(PA1) - - assert unionof(SomePtr(PS1),SomeImpossibleValue()) == SomePtr(PS1) - assert unionof(SomeImpossibleValue(), SomePtr(PS1)) == SomePtr(PS1) - - py.test.raises(AssertionError, "unionof(SomePtr(PA1), SomePtr(PS1))") - py.test.raises(AssertionError, "unionof(SomePtr(PS1), SomePtr(PS3))") - py.test.raises(AssertionError, "unionof(SomePtr(PS1), SomeInteger())") - py.test.raises(AssertionError, "unionof(SomePtr(PS1), SomeObject())") - py.test.raises(AssertionError, "unionof(SomeInteger(), SomePtr(PS1))") - py.test.raises(AssertionError, "unionof(SomeObject(), SomePtr(PS1))") - def test_nan(): f1 = SomeFloat() f1.const = float("nan") diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -12,7 +12,6 @@ SomePBC, SomeType, s_ImpossibleValue, s_Bool, s_None, unionof, add_knowntypedata, HarmlesslyBlocked, SomeWeakRef, SomeUnicodeString, SomeByteArray) -from rpython.rtyper.llannotation import SomeAddress, SomeTypedAddressAccess from rpython.annotator.bookkeeper import getbookkeeper from rpython.annotator import builtin from rpython.annotator.binaryop import _clone ## XXX where to put this? @@ -824,20 +823,3 @@ return s_None # known to be a dead weakref else: return SomeInstance(self.classdef, can_be_None=True) - -#_________________________________________ -# memory addresses - -from rpython.rtyper.lltypesystem import llmemory - -class __extend__(SomeAddress): - def getattr(self, s_attr): - assert s_attr.is_constant() - assert isinstance(s_attr, SomeString) - assert s_attr.const in llmemory.supported_access_types - return SomeTypedAddressAccess( - llmemory.supported_access_types[s_attr.const]) - getattr.can_only_throw = [] - - def bool(self): - return s_Bool diff --git a/rpython/rtyper/llannotation.py b/rpython/rtyper/llannotation.py --- a/rpython/rtyper/llannotation.py +++ b/rpython/rtyper/llannotation.py @@ -1,9 +1,11 @@ """ Code for annotating low-level thingies. """ +from rpython.tool.pairtype import pair, pairtype from rpython.annotator.model import ( SomeObject, SomeSingleFloat, SomeFloat, SomeLongFloat, SomeChar, - SomeUnicodeCodePoint, SomeInteger, s_None, s_Bool) + SomeUnicodeCodePoint, SomeInteger, SomeString, SomeImpossibleValue, + s_None, s_Bool, UnionError, AnnotatorError) from rpython.rtyper.lltypesystem import lltype, llmemory class SomeAddress(SomeObject): @@ -15,6 +17,17 @@ def is_null_address(self): return self.is_immutable_constant() and not self.const + def getattr(self, s_attr): + assert s_attr.is_constant() + assert isinstance(s_attr, SomeString) + assert s_attr.const in llmemory.supported_access_types + return SomeTypedAddressAccess( + llmemory.supported_access_types[s_attr.const]) + getattr.can_only_throw = [] + + def bool(self): + return s_Bool + class SomeTypedAddressAccess(SomeObject): """This class is used to annotate the intermediate value that appears in expressions of the form: @@ -27,6 +40,63 @@ def can_be_none(self): return False + +class __extend__(pairtype(SomeAddress, SomeAddress)): + def union((s_addr1, s_addr2)): + return SomeAddress() + + def sub((s_addr1, s_addr2)): + from rpython.annotator.bookkeeper import getbookkeeper + if s_addr1.is_null_address() and s_addr2.is_null_address(): + return getbookkeeper().immutablevalue(0) + return SomeInteger() + + def is_((s_addr1, s_addr2)): + assert False, "comparisons with is not supported by addresses" + +class __extend__(pairtype(SomeTypedAddressAccess, SomeTypedAddressAccess)): + def union((s_taa1, s_taa2)): + assert s_taa1.type == s_taa2.type + return s_taa1 + +class __extend__(pairtype(SomeTypedAddressAccess, SomeInteger)): + def getitem((s_taa, s_int)): + return lltype_to_annotation(s_taa.type) + getitem.can_only_throw = [] + + def setitem((s_taa, s_int), s_value): + assert annotation_to_lltype(s_value) is s_taa.type + setitem.can_only_throw = [] + + +class __extend__(pairtype(SomeAddress, SomeInteger)): + def add((s_addr, s_int)): + return SomeAddress() + + def sub((s_addr, s_int)): + return SomeAddress() + +class __extend__(pairtype(SomeAddress, SomeImpossibleValue)): + # need to override this specifically to hide the 'raise UnionError' + # of pairtype(SomeAddress, SomeObject). + def union((s_addr, s_imp)): + return s_addr + +class __extend__(pairtype(SomeImpossibleValue, SomeAddress)): + # need to override this specifically to hide the 'raise UnionError' + # of pairtype(SomeObject, SomeAddress). + def union((s_imp, s_addr)): + return s_addr + +class __extend__(pairtype(SomeAddress, SomeObject)): + def union((s_addr, s_obj)): + raise UnionError(s_addr, s_obj) + +class __extend__(pairtype(SomeObject, SomeAddress)): + def union((s_obj, s_addr)): + raise UnionError(s_obj, s_addr) + + class SomePtr(SomeObject): knowntype = lltype._ptr immutable = True @@ -55,6 +125,46 @@ def can_be_none(self): return False +class __extend__(pairtype(SomePtr, SomePtr)): + def union((p1, p2)): + if p1.ll_ptrtype != p2.ll_ptrtype: + raise UnionError(p1, p2) + return SomePtr(p1.ll_ptrtype) + +class __extend__(pairtype(SomePtr, SomeInteger)): + + def getitem((p, int1)): + example = p.ll_ptrtype._example() + try: + v = example[0] + except IndexError: + return None # impossible value, e.g. FixedSizeArray(0) + return ll_to_annotation(v) + getitem.can_only_throw = [] + + def setitem((p, int1), s_value): # just doing checking + example = p.ll_ptrtype._example() + if example[0] is not None: # ignore Void s_value + v_lltype = annotation_to_lltype(s_value) + example[0] = v_lltype._defl() + setitem.can_only_throw = [] + +class __extend__(pairtype(SomePtr, SomeObject)): + def union((p, obj)): + raise UnionError(p, obj) + + def getitem((p, obj)): + raise AnnotatorError("ptr %r getitem index not an int: %r" % + (p.ll_ptrtype, obj)) + + def setitem((p, obj), s_value): + raise AnnotatorError("ptr %r setitem index not an int: %r" % + (p.ll_ptrtype, obj)) + +class __extend__(pairtype(SomeObject, SomePtr)): + def union((obj, p2)): + return pair(p2, obj).union() + annotation_to_ll_map = [ (SomeSingleFloat(), lltype.SingleFloat), From noreply at buildbot.pypy.org Thu Jan 30 03:11:57 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 30 Jan 2014 03:11:57 +0100 (CET) Subject: [pypy-commit] pypy default: implement array.itemset Message-ID: <20140130021157.D115C1C00B3@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69012:b23a46d3ff05 Date: 2014-01-29 21:02 -0500 http://bitbucket.org/pypy/pypy/changeset/b23a46d3ff05/ Log: implement array.itemset diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -502,6 +502,15 @@ raise OperationError(space.w_NotImplementedError, space.wrap( "non-int arg not supported")) + def descr_itemset(self, space, args_w): + if len(args_w) == 0: + raise OperationError(space.w_ValueError, space.wrap( + "itemset must have at least one argument")) + if len(args_w) != len(self.get_shape()) + 1: + raise OperationError(space.w_ValueError, space.wrap( + "incorrect number of indices for array")) + self.descr_setitem(space, space.newtuple(args_w[:-1]), args_w[-1]) + def descr___array__(self, space, w_dtype=None): if not space.is_none(w_dtype): raise OperationError(space.w_NotImplementedError, space.wrap( @@ -642,10 +651,6 @@ raise OperationError(space.w_NotImplementedError, space.wrap( "getfield not implemented yet")) - def descr_itemset(self, space, w_arg): - raise OperationError(space.w_NotImplementedError, space.wrap( - "itemset not implemented yet")) - @unwrap_spec(new_order=str) def descr_newbyteorder(self, space, new_order=NPY_SWAP): return self.descr_view(space, @@ -1354,6 +1359,7 @@ flat = GetSetProperty(W_NDimArray.descr_get_flatiter, W_NDimArray.descr_set_flatiter), item = interp2app(W_NDimArray.descr_item), + itemset = interp2app(W_NDimArray.descr_itemset), real = GetSetProperty(W_NDimArray.descr_get_real, W_NDimArray.descr_set_real), imag = GetSetProperty(W_NDimArray.descr_get_imag, diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -2822,6 +2822,19 @@ assert b[0] == 1 assert b[1] == 'ab' + def test_itemset(self): + import numpy as np + a = np.array(range(5)) + exc = raises(ValueError, a.itemset) + assert exc.value[0] == 'itemset must have at least one argument' + exc = raises(ValueError, a.itemset, 1, 2, 3) + assert exc.value[0] == 'incorrect number of indices for array' + a.itemset(1, 5) + assert a[1] == 5 + a = np.array(range(6)).reshape(2, 3) + a.itemset(1, 2, 100) + assert a[1, 2] == 100 + def test_index(self): import numpy as np a = np.array([1], np.uint16) From noreply at buildbot.pypy.org Thu Jan 30 06:50:53 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Thu, 30 Jan 2014 06:50:53 +0100 (CET) Subject: [pypy-commit] pypy refactor-buffer-api: Fix. Message-ID: <20140130055053.339841C2FFE@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: refactor-buffer-api Changeset: r69013:2f4fc613cc99 Date: 2014-01-30 06:49 +0100 http://bitbucket.org/pypy/pypy/changeset/2f4fc613cc99/ Log: Fix. diff --git a/pypy/objspace/std/longtype.py b/pypy/objspace/std/longtype.py --- a/pypy/objspace/std/longtype.py +++ b/pypy/objspace/std/longtype.py @@ -48,7 +48,7 @@ unicode_to_decimal_w(space, w_value)) else: try: - w_buffer = space.buffer(w_value) + buf = space.buffer_w(w_value) except OperationError, e: if not e.match(space, space.w_TypeError): raise @@ -56,7 +56,6 @@ "long() argument must be a string or a number, not '%T'", w_value) else: - buf = space.interp_w(Buffer, w_buffer) return string_to_w_long(space, w_longtype, buf.as_str()) else: base = space.int_w(w_base) From noreply at buildbot.pypy.org Thu Jan 30 11:13:17 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Thu, 30 Jan 2014 11:13:17 +0100 (CET) Subject: [pypy-commit] stmgc c7: add a mode to duhton that removes read-barriers and makes write-barriers Message-ID: <20140130101317.B29B11C0134@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r690:4b722d20b830 Date: 2014-01-30 11:13 +0100 http://bitbucket.org/pypy/stmgc/changeset/4b722d20b830/ Log: add a mode to duhton that removes read-barriers and makes write- barriers like normal GC write-barriers. Meaning there is no conflict detection going on and all pages will be SHARED at all times. There are still the start/stop_transaction calls left but their influence can be reduced by the program. diff --git a/c7/stmsync.c b/c7/stmsync.c --- a/c7/stmsync.c +++ b/c7/stmsync.c @@ -33,6 +33,12 @@ { /* we must have the exclusive lock here and not the colletion lock!! */ + /* XXX: for more than 2 threads, need a way + to signal other threads with need_major_collect + so that they don't leave COLLECT-safe-points + when this flag is set. Otherwise we simply + wait arbitrarily long until all threads reach + COLLECT-safe-points by chance at the same time. */ while (1) { if (!rwticket_wrtrylock(&rw_collection_lock)) break; /* acquired! */ diff --git a/duhton/duhton.c b/duhton/duhton.c --- a/duhton/duhton.c +++ b/duhton/duhton.c @@ -1,7 +1,6 @@ #include #include "duhton.h" -#define DEFAULT_NUM_THREADS 2 int main(int argc, char **argv) { diff --git a/duhton/duhton.h b/duhton/duhton.h --- a/duhton/duhton.h +++ b/duhton/duhton.h @@ -1,10 +1,16 @@ #ifndef _DUHTON_H_ #define _DUHTON_H_ -#include "../c7/core.h" #include #include #include +#include "../c7/core.h" +#include "../c7/list.h" + +#define STM 1 /* hackish removal of all read/write + barriers. synchronization is up to + the program */ +#define DEFAULT_NUM_THREADS 2 /* required by stm-c7 */ struct DuObject_s { @@ -173,8 +179,17 @@ p1 = (typeof(p1))_pop_root()) +#if STM #define _du_read1(p1) stm_read((object_t *)(p1)) #define _du_write1(p1) stm_write((object_t *)(p1)) +#else +#define _du_read1(p1) +#define _du_write1(p1) { \ + if (UNLIKELY(((object_t *)(p1))->stm_flags & GCFLAG_WRITE_BARRIER)) { \ + LIST_APPEND(_STM_TL->old_objects_to_trace, ((object_t *)(p1))); \ + ((object_t *)(p1))->stm_flags &= ~GCFLAG_WRITE_BARRIER; \ + }} +#endif #ifdef NDEBUG From noreply at buildbot.pypy.org Thu Jan 30 11:39:04 2014 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 30 Jan 2014 11:39:04 +0100 (CET) Subject: [pypy-commit] pypy default: Don't add BytesIO to autoflusher Message-ID: <20140130103904.32E381C1059@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r69014:40fa4f3a0740 Date: 2014-01-30 11:37 +0100 http://bitbucket.org/pypy/pypy/changeset/40fa4f3a0740/ Log: Don't add BytesIO to autoflusher diff --git a/pypy/module/_io/interp_bytesio.py b/pypy/module/_io/interp_bytesio.py --- a/pypy/module/_io/interp_bytesio.py +++ b/pypy/module/_io/interp_bytesio.py @@ -11,7 +11,7 @@ class W_BytesIO(RStringIO, W_BufferedIOBase): def __init__(self, space): - W_BufferedIOBase.__init__(self, space) + W_BufferedIOBase.__init__(self, space, add_to_autoflusher=False) self.init() def descr_init(self, space, w_initial_bytes=None): diff --git a/pypy/module/_io/interp_iobase.py b/pypy/module/_io/interp_iobase.py --- a/pypy/module/_io/interp_iobase.py +++ b/pypy/module/_io/interp_iobase.py @@ -44,15 +44,16 @@ class W_IOBase(W_Root): - def __init__(self, space): + def __init__(self, space, add_to_autoflusher=True): # XXX: IOBase thinks it has to maintain its own internal state in # `__IOBase_closed` and call flush() by itself, but it is redundant # with whatever behaviour a non-trivial derived class will implement. self.space = space self.w_dict = space.newdict() self.__IOBase_closed = False - self.streamholder = None # needed by AutoFlusher - get_autoflusher(space).add(self) + if add_to_autoflusher: + self.streamholder = None # needed by AutoFlusher + get_autoflusher(space).add(self) def getdict(self, space): return self.w_dict From noreply at buildbot.pypy.org Thu Jan 30 11:39:07 2014 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 30 Jan 2014 11:39:07 +0100 (CET) Subject: [pypy-commit] pypy default: merge Message-ID: <20140130103907.65D821C1059@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r69015:203f1a18b7ce Date: 2014-01-30 11:38 +0100 http://bitbucket.org/pypy/pypy/changeset/203f1a18b7ce/ Log: merge diff too long, truncating to 2000 out of 3269 lines diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -426,25 +426,12 @@ Could we use LLVM? ------------------ -In theory yes. But we tried to use it 5 or 6 times already, as a -translation backend or as a JIT backend --- and failed each time. +There is a (static) translation backend using LLVM in the branch +``llvm-translation-backend``. It can translate PyPy with or without the JIT on +Linux. -In more details: using LLVM as a (static) translation backend is -pointless nowadays because you can generate C code and compile it with -clang. (Note that compiling PyPy with clang gives a result that is not -faster than compiling it with gcc.) We might in theory get extra -benefits from LLVM's GC integration, but this requires more work on the -LLVM side before it would be remotely useful. Anyway, it could be -interfaced via a custom primitive in the C code. - -On the other hand, using LLVM as our JIT backend looks interesting as -well --- but again we made an attempt, and it failed: LLVM has no way to -patch the generated machine code. - -So the position of the core PyPy developers is that if anyone wants to -make an N+1'th attempt with LLVM, they are welcome, and will be happy to -provide help in the IRC channel, but they are left with the burden of proof -that (a) it works and (b) it gives important benefits. +Using LLVM as our JIT backend looks interesting as well -- we made an attempt, +but it failed: LLVM has no way to patch the generated machine code. ---------------------- How do I compile PyPy? diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -6,7 +6,7 @@ from errno import EINTR from rpython.rlib import jit -from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib.objectmodel import we_are_translated, specialize from pypy.interpreter import debug @@ -40,12 +40,11 @@ self.debug_excs = [] def clear(self, space): - # for sys.exc_clear() - self.w_type = space.w_None - self._w_value = space.w_None - self._application_traceback = None - if not we_are_translated(): - del self.debug_excs[:] + # XXX remove this method. The point is that we cannot always + # hack at 'self' to clear w_type and _w_value, because in some + # corner cases the OperationError will be used again: see + # test_interpreter.py:test_with_statement_and_sys_clear. + pass def match(self, space, w_check_class): "Check if this application-level exception matches 'w_check_class'." @@ -300,6 +299,10 @@ """ self._application_traceback = traceback + at specialize.memo() +def get_cleared_operation_error(space): + return OperationError(space.w_None, space.w_None) + # ____________________________________________________________ # optimization only: avoid the slowest operation -- the string # formatting with '%' -- in the common case were we don't @@ -371,8 +374,8 @@ class OpErrFmtNoArgs(OperationError): def __init__(self, w_type, value): + self._value = value self.setup(w_type) - self._value = value def get_w_value(self, space): w_value = self._w_value diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -1,5 +1,5 @@ import sys -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, get_cleared_operation_error from rpython.rlib.unroll import unrolling_iterable from rpython.rlib import jit @@ -217,6 +217,17 @@ if frame: # else, the exception goes nowhere and is lost frame.last_exception = operror + def clear_sys_exc_info(self): + # Find the frame out of which sys_exc_info() would return its result, + # and hack this frame's last_exception to become the cleared + # OperationError (which is different from None!). + frame = self.gettopframe_nohidden() + while frame: + if frame.last_exception is not None: + frame.last_exception = get_cleared_operation_error(self.space) + break + frame = self.getnextframe_nohidden(frame) + @jit.dont_look_inside def settrace(self, w_func): """Set the global trace function.""" diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -744,6 +744,9 @@ else: raise OperationError(space.w_TypeError, space.wrap("raise: no active exception to re-raise")) + if operror.w_type is space.w_None: + raise OperationError(space.w_TypeError, + space.wrap("raise: the exception to re-raise was cleared")) # re-raise, no new traceback obj will be attached self.last_exception = operror raise RaiseWithExplicitTraceback(operror) diff --git a/pypy/interpreter/test/test_interpreter.py b/pypy/interpreter/test/test_interpreter.py --- a/pypy/interpreter/test/test_interpreter.py +++ b/pypy/interpreter/test/test_interpreter.py @@ -311,3 +311,73 @@ assert str(e) == "maximum recursion depth exceeded" else: assert 0, "should have raised!" + + def test_with_statement_and_sys_clear(self): + import sys + class CM(object): + def __enter__(self): + return self + def __exit__(self, exc_type, exc_value, tb): + sys.exc_clear() + try: + with CM(): + 1 / 0 + raise AssertionError("should not be reached") + except ZeroDivisionError: + pass + + def test_sys_clear_while_handling_exception(self): + import sys + def f(): + try: + some_missing_name + except NameError: + g() + assert sys.exc_info()[0] is NameError + def g(): + assert sys.exc_info()[0] is NameError + try: + 1 / 0 + except ZeroDivisionError: + assert sys.exc_info()[0] is ZeroDivisionError + sys.exc_clear() + assert sys.exc_info()[0] is None + h() + assert sys.exc_info()[0] is None + def h(): + assert sys.exc_info()[0] is None + f() + + def test_sys_clear_while_handling_exception_nested(self): + import sys + def f(): + try: + some_missing_name + except NameError: + g() + assert sys.exc_info()[0] is NameError + def g(): + assert sys.exc_info()[0] is NameError + try: + 1 / 0 + except ZeroDivisionError: + assert sys.exc_info()[0] is ZeroDivisionError + h1() + assert sys.exc_info()[0] is None + h() + assert sys.exc_info()[0] is None + def h(): + assert sys.exc_info()[0] is None + def h1(): + sys.exc_clear() + f() + + def test_sys_clear_reraise(self): + import sys + def f(): + try: + 1 / 0 + except ZeroDivisionError: + sys.exc_clear() + raise + raises(TypeError, f) diff --git a/pypy/module/cpyext/include/pyconfig.h b/pypy/module/cpyext/include/pyconfig.h --- a/pypy/module/cpyext/include/pyconfig.h +++ b/pypy/module/cpyext/include/pyconfig.h @@ -15,6 +15,8 @@ #define HAVE_UNICODE #define WITHOUT_COMPLEX #define HAVE_WCHAR_H 1 +#define HAVE_SYS_TYPES_H 1 +#define HAVE_SYS_STAT_H 1 /* PyPy supposes Py_UNICODE == wchar_t */ #define HAVE_USABLE_WCHAR_T 1 diff --git a/pypy/module/cpyext/include/pyport.h b/pypy/module/cpyext/include/pyport.h --- a/pypy/module/cpyext/include/pyport.h +++ b/pypy/module/cpyext/include/pyport.h @@ -64,4 +64,45 @@ # error "Python needs a typedef for Py_uintptr_t in pyport.h." #endif /* HAVE_UINTPTR_T */ +/******************************* + * stat() and fstat() fiddling * + *******************************/ + +/* We expect that stat and fstat exist on most systems. + * It's confirmed on Unix, Mac and Windows. + * If you don't have them, add + * #define DONT_HAVE_STAT + * and/or + * #define DONT_HAVE_FSTAT + * to your pyconfig.h. Python code beyond this should check HAVE_STAT and + * HAVE_FSTAT instead. + * Also + * #define HAVE_SYS_STAT_H + * if exists on your platform, and + * #define HAVE_STAT_H + * if does. + */ +#ifndef DONT_HAVE_STAT +#define HAVE_STAT +#endif + +#ifndef DONT_HAVE_FSTAT +#define HAVE_FSTAT +#endif + +#ifdef RISCOS +#include +#include "unixstuff.h" +#endif + +#ifdef HAVE_SYS_STAT_H +#if defined(PYOS_OS2) && defined(PYCC_GCC) +#include +#endif +#include +#elif defined(HAVE_STAT_H) +#include +#else +#endif + #endif /* Py_PYPORT_H */ diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -367,6 +367,9 @@ return SliceArray(0, strides, backstrides, new_shape, self, orig_array) + def set_dtype(self, space, dtype): + self.dtype = dtype + def argsort(self, space, w_axis): from pypy.module.micronumpy.arrayimpl.sort import argsort_array return argsort_array(self, space, w_axis) diff --git a/pypy/module/micronumpy/arrayimpl/scalar.py b/pypy/module/micronumpy/arrayimpl/scalar.py --- a/pypy/module/micronumpy/arrayimpl/scalar.py +++ b/pypy/module/micronumpy/arrayimpl/scalar.py @@ -173,6 +173,10 @@ raise OperationError(space.w_ValueError, space.wrap( "total size of the array must be unchanged")) + def set_dtype(self, space, dtype): + self.value = self.value.convert_to(space, dtype) + self.dtype = dtype + def reshape(self, space, orig_array, new_shape): return self.set_shape(space, orig_array, new_shape) diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -207,7 +207,7 @@ space.wrap(offset)])) return w_d - def set_fields(self, space, w_fields): + def descr_set_fields(self, space, w_fields): if w_fields == space.w_None: self.fields = None else: @@ -233,19 +233,26 @@ return space.w_None return space.newtuple([space.wrap(name) for name in self.fieldnames]) - def set_names(self, space, w_names): - self.fieldnames = [] - if w_names == space.w_None: - return - else: + def descr_set_names(self, space, w_names): + fieldnames = [] + if w_names != space.w_None: iter = space.iter(w_names) while True: try: - self.fieldnames.append(space.str_w(space.next(iter))) + name = space.str_w(space.next(iter)) except OperationError, e: if not e.match(space, space.w_StopIteration): raise break + if name in fieldnames: + raise OperationError(space.w_ValueError, space.wrap( + "Duplicate field names given.")) + fieldnames.append(name) + self.fieldnames = fieldnames + + def descr_del_names(self, space): + raise OperationError(space.w_AttributeError, space.wrap( + "Cannot delete dtype names attribute")) def descr_get_hasobject(self, space): return space.w_False @@ -321,10 +328,10 @@ self.byteorder = endian fieldnames = space.getitem(w_data, space.wrap(3)) - self.set_names(space, fieldnames) + self.descr_set_names(space, fieldnames) fields = space.getitem(w_data, space.wrap(4)) - self.set_fields(space, fields) + self.descr_set_fields(space, fields) @unwrap_spec(new_order=str) def descr_newbyteorder(self, space, new_order=NPY_SWAP): @@ -468,7 +475,9 @@ shape = GetSetProperty(W_Dtype.descr_get_shape), isnative = GetSetProperty(W_Dtype.descr_get_isnative), fields = GetSetProperty(W_Dtype.descr_get_fields), - names = GetSetProperty(W_Dtype.descr_get_names), + names = GetSetProperty(W_Dtype.descr_get_names, + W_Dtype.descr_set_names, + W_Dtype.descr_del_names), hasobject = GetSetProperty(W_Dtype.descr_get_hasobject), descr = GetSetProperty(W_Dtype.descr_get_descr), ) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -84,6 +84,19 @@ def descr_get_dtype(self, space): return self.implementation.dtype + def descr_set_dtype(self, space, w_dtype): + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) + if (dtype.get_size() != self.get_dtype().get_size() or + dtype.is_flexible_type() or self.get_dtype().is_flexible_type()): + raise OperationError(space.w_ValueError, space.wrap( + "new type not compatible with array.")) + self.implementation.set_dtype(space, dtype) + + def descr_del_dtype(self, space): + raise OperationError(space.w_AttributeError, space.wrap( + "Cannot delete array dtype")) + def descr_get_ndim(self, space): return space.wrap(len(self.get_shape())) @@ -489,6 +502,15 @@ raise OperationError(space.w_NotImplementedError, space.wrap( "non-int arg not supported")) + def descr_itemset(self, space, args_w): + if len(args_w) == 0: + raise OperationError(space.w_ValueError, space.wrap( + "itemset must have at least one argument")) + if len(args_w) != len(self.get_shape()) + 1: + raise OperationError(space.w_ValueError, space.wrap( + "incorrect number of indices for array")) + self.descr_setitem(space, space.newtuple(args_w[:-1]), args_w[-1]) + def descr___array__(self, space, w_dtype=None): if not space.is_none(w_dtype): raise OperationError(space.w_NotImplementedError, space.wrap( @@ -629,10 +651,6 @@ raise OperationError(space.w_NotImplementedError, space.wrap( "getfield not implemented yet")) - def descr_itemset(self, space, w_arg): - raise OperationError(space.w_NotImplementedError, space.wrap( - "itemset not implemented yet")) - @unwrap_spec(new_order=str) def descr_newbyteorder(self, space, new_order=NPY_SWAP): return self.descr_view(space, @@ -903,8 +921,8 @@ w_res = self.descr_mul(space, other) assert isinstance(w_res, W_NDimArray) return w_res.descr_sum(space, space.wrap(-1), out) - dtype = interp_ufuncs.find_binop_result_dtype(space, - self.get_dtype(), other.get_dtype()) + dtype = interp_ufuncs.find_binop_result_dtype(space, self.get_dtype(), + other.get_dtype()) if self.get_size() < 1 and other.get_size() < 1: # numpy compatability return W_NDimArray.new_scalar(space, dtype, space.wrap(0)) @@ -912,25 +930,27 @@ out_shape, other_critical_dim = _match_dot_shapes(space, self, other) if out: matches = True - if len(out.get_shape()) != len(out_shape): + if dtype != out.get_dtype(): + matches = False + elif not out.implementation.order == "C": + matches = False + elif len(out.get_shape()) != len(out_shape): matches = False else: for i in range(len(out_shape)): if out.get_shape()[i] != out_shape[i]: matches = False break - if dtype != out.get_dtype(): - matches = False - if not out.implementation.order == "C": - matches = False if not matches: raise OperationError(space.w_ValueError, space.wrap( - 'output array is not acceptable (must have the right type, nr dimensions, and be a C-Array)')) + 'output array is not acceptable (must have the right type, ' + 'nr dimensions, and be a C-Array)')) w_res = out + w_res.fill(space, self.get_dtype().coerce(space, None)) else: w_res = W_NDimArray.from_shape(space, out_shape, dtype, w_instance=self) # This is the place to add fpypy and blas - return loop.multidim_dot(space, self, other, w_res, dtype, + return loop.multidim_dot(space, self, other, w_res, dtype, other_critical_dim) def descr_mean(self, space, __args__): @@ -946,7 +966,8 @@ def _reduce_ufunc_impl(ufunc_name, promote_to_largest=False, cumulative=False): - def impl(self, space, w_axis=None, w_dtype=None, w_out=None): + @unwrap_spec(keepdims=bool) + def impl(self, space, w_axis=None, w_dtype=None, w_out=None, keepdims=False): if space.is_none(w_out): out = None elif not isinstance(w_out, W_NDimArray): @@ -956,7 +977,7 @@ out = w_out return getattr(interp_ufuncs.get(space), ufunc_name).reduce( space, self, promote_to_largest, w_axis, - False, out, w_dtype, cumulative=cumulative) + keepdims, out, w_dtype, cumulative=cumulative) return func_with_new_name(impl, "reduce_%s_impl_%d_%d" % (ufunc_name, promote_to_largest, cumulative)) @@ -1288,7 +1309,9 @@ __gt__ = interp2app(W_NDimArray.descr_gt), __ge__ = interp2app(W_NDimArray.descr_ge), - dtype = GetSetProperty(W_NDimArray.descr_get_dtype), + dtype = GetSetProperty(W_NDimArray.descr_get_dtype, + W_NDimArray.descr_set_dtype, + W_NDimArray.descr_del_dtype), shape = GetSetProperty(W_NDimArray.descr_get_shape, W_NDimArray.descr_set_shape), strides = GetSetProperty(W_NDimArray.descr_get_strides), @@ -1336,6 +1359,7 @@ flat = GetSetProperty(W_NDimArray.descr_get_flatiter, W_NDimArray.descr_set_flatiter), item = interp2app(W_NDimArray.descr_item), + itemset = interp2app(W_NDimArray.descr_itemset), real = GetSetProperty(W_NDimArray.descr_get_real, W_NDimArray.descr_set_real), imag = GetSetProperty(W_NDimArray.descr_get_imag, diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -252,8 +252,20 @@ if out: out.set_scalar_value(res) return out + if keepdims: + shape = [1] * len(obj_shape) + out = W_NDimArray.from_shape(space, [1] * len(obj_shape), dtype, w_instance=obj) + out.implementation.setitem(0, res) + return out return res + def descr_outer(self, space, __args__): + return self._outer(space, __args__) + + def _outer(self, space, __args__): + raise OperationError(space.w_ValueError, + space.wrap("outer product only supported for binary functions")) + class W_Ufunc1(W_Ufunc): _immutable_fields_ = ["func", "bool_result"] argcount = 1 @@ -432,6 +444,7 @@ nin = interp_attrproperty("argcount", cls=W_Ufunc), reduce = interp2app(W_Ufunc.descr_reduce), + outer = interp2app(W_Ufunc.descr_outer), ) diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -146,8 +146,7 @@ while not obj_iter.done(): reduce_driver.jit_merge_point(shapelen=shapelen, func=func, done_func=done_func, - calc_dtype=calc_dtype, - ) + calc_dtype=calc_dtype) rval = obj_iter.getitem().convert_to(space, calc_dtype) if done_func is not None and done_func(calc_dtype, rval): return rval @@ -172,8 +171,7 @@ shapelen = len(obj.get_shape()) while not obj_iter.done(): reduce_cum_driver.jit_merge_point(shapelen=shapelen, func=func, - dtype=calc_dtype, - ) + dtype=calc_dtype) rval = obj_iter.getitem().convert_to(space, calc_dtype) cur_value = func(calc_dtype, cur_value, rval) out_iter.setitem(cur_value) @@ -271,8 +269,7 @@ iter.next() shapelen = len(arr.get_shape()) while not iter.done(): - arg_driver.jit_merge_point(shapelen=shapelen, dtype=dtype, - ) + arg_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) w_val = iter.getitem() new_best = getattr(dtype.itemtype, op_name)(cur_best, w_val) if dtype.itemtype.ne(new_best, cur_best): @@ -311,6 +308,7 @@ if i != right_critical_dim] right_skip = range(len(left_shape) - 1) result_skip = [len(result.get_shape()) - (len(right_shape) > 1)] + assert result.get_dtype() == dtype outi = result.create_dot_iter(broadcast_shape, result_skip) lefti = left.create_dot_iter(broadcast_shape, left_skip) righti = right.create_dot_iter(broadcast_shape, right_skip) @@ -318,10 +316,10 @@ dot_driver.jit_merge_point(dtype=dtype) lval = lefti.getitem().convert_to(space, dtype) rval = righti.getitem().convert_to(space, dtype) - outval = outi.getitem().convert_to(space, dtype) + outval = outi.getitem() v = dtype.itemtype.mul(lval, rval) - value = dtype.itemtype.add(v, outval).convert_to(space, dtype) - outi.setitem(value) + v = dtype.itemtype.add(v, outval) + outi.setitem(v) outi.next() righti.next() lefti.next() @@ -652,8 +650,8 @@ out_iter = out.create_iter(shape) while not arr_iter.done(): round_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) - w_v = dtype.itemtype.round(arr_iter.getitem().convert_to(space, dtype), - decimals) + w_v = arr_iter.getitem().convert_to(space, dtype) + w_v = dtype.itemtype.round(w_v, decimals) out_iter.setitem(w_v) arr_iter.next() out_iter.next() diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -72,6 +72,8 @@ is_rec_type = dtype is not None and dtype.is_record_type() if is_rec_type and is_single_elem(space, w_iterable, is_rec_type): return [], [w_iterable] + if isinstance(w_iterable, W_NDimArray) and w_iterable.is_scalar(): + return [], [w_iterable] shape = [space.len_w(w_iterable)] batch = space.listview(w_iterable) while True: diff --git a/pypy/module/micronumpy/test/test_arrayops.py b/pypy/module/micronumpy/test/test_arrayops.py --- a/pypy/module/micronumpy/test/test_arrayops.py +++ b/pypy/module/micronumpy/test/test_arrayops.py @@ -56,6 +56,10 @@ b = arange(12).reshape(4, 3) c = a.dot(b) assert (c == [[ 42, 48, 54], [114, 136, 158], [186, 224, 262]]).all() + c = a.dot(b.astype(float)) + assert (c == [[ 42, 48, 54], [114, 136, 158], [186, 224, 262]]).all() + c = a.astype(float).dot(b) + assert (c == [[ 42, 48, 54], [114, 136, 158], [186, 224, 262]]).all() a = arange(24).reshape(2, 3, 4) raises(ValueError, "a.dot(a)") @@ -91,9 +95,11 @@ out = arange(9).reshape(3, 3) c = dot(a, b, out=out) assert (c == out).all() - out = arange(9,dtype=float).reshape(3, 3) + assert (c == [[42, 48, 54], [114, 136, 158], [186, 224, 262]]).all() + out = arange(9, dtype=float).reshape(3, 3) exc = raises(ValueError, dot, a, b, out) - assert exc.value[0].find('not acceptable') > 0 + assert exc.value[0] == ('output array is not acceptable (must have the ' + 'right type, nr dimensions, and be a C-Array)') def test_choose_basic(self): from numpypy import array diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -204,6 +204,9 @@ assert array([256], 'B')[0] == 0 assert array([32768], 'h')[0] == -32768 assert array([65536], 'H')[0] == 0 + a = array([65520], dtype='float64') + b = array(a, dtype='float16') + assert b == float('inf') if dtype('l').itemsize == 4: # 32-bit raises(OverflowError, "array([2**32/2], 'i')") raises(OverflowError, "array([2**32], 'I')") @@ -948,6 +951,13 @@ assert d.type is void assert d.char == 'V' assert d.names == ("x", "y", "z", "value") + d.names = ('a', 'b', 'c', 'd') + assert d.names == ('a', 'b', 'c', 'd') + exc = raises(ValueError, "d.names = ('a', 'b', 'c', 'c')") + assert exc.value[0] == "Duplicate field names given." + exc = raises(AttributeError, 'del d.names') + assert exc.value[0] == "Cannot delete dtype names attribute" + assert d.names == ('a', 'b', 'c', 'd') raises(KeyError, 'd["xyz"]') raises(KeyError, 'd.fields["xyz"]') diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -291,7 +291,6 @@ def test_noop_ndmin(self): from numpypy import array - arr = array([1], ndmin=3) assert arr.shape == (1, 1, 1) @@ -320,6 +319,26 @@ d = c.reshape(3, 4, 0) e = d.repeat(3, 0) assert e.shape == (9, 4, 0) + a = array(123) + b = array(a, dtype=float) + assert b == 123.0 + + def test_dtype_attribute(self): + import numpy as np + a = np.array(40000, dtype='uint16') + assert a.dtype == np.uint16 + a.dtype = np.int16 + assert a == -25536 + a = np.array([1, 2, 3, 4, 40000], dtype='uint16') + assert a.dtype == np.uint16 + a.dtype = np.int16 + assert a[4] == -25536 + exc = raises(ValueError, 'a.dtype = None') + assert exc.value[0] == 'new type not compatible with array.' + exc = raises(ValueError, 'a.dtype = np.int32') + assert exc.value[0] == 'new type not compatible with array.' + exc = raises(AttributeError, 'del a.dtype') + assert exc.value[0] == 'Cannot delete array dtype' def test_buffer(self): import numpy as np @@ -397,10 +416,6 @@ assert ten == 10 def test_empty(self): - """ - Test that empty() works. - """ - from numpypy import empty a = empty(2) a[1] = 1.0 @@ -1383,6 +1398,8 @@ from numpypy import arange, array a = arange(15).reshape(5, 3) assert a.sum() == 105 + assert a.sum(keepdims=True) == 105 + assert a.sum(keepdims=True).shape == (1, 1) assert a.max() == 14 assert array([]).sum() == 0.0 assert array([]).reshape(0, 2).sum() == 0. @@ -1415,6 +1432,8 @@ from numpypy import array, dtype a = array(range(1, 6)) assert a.prod() == 120.0 + assert a.prod(keepdims=True) == 120.0 + assert a.prod(keepdims=True).shape == (1,) assert a[:4].prod() == 24.0 for dt in ['bool', 'int8', 'uint8', 'int16', 'uint16']: a = array([True, False], dtype=dt) @@ -1429,6 +1448,8 @@ from numpypy import array, zeros a = array([-1.2, 3.4, 5.7, -3.0, 2.7]) assert a.max() == 5.7 + assert a.max(keepdims=True) == 5.7 + assert a.max(keepdims=True).shape == (1,) b = array([]) raises(ValueError, "b.max()") assert list(zeros((0, 2)).max(axis=1)) == [] @@ -1442,6 +1463,8 @@ from numpypy import array, zeros a = array([-1.2, 3.4, 5.7, -3.0, 2.7]) assert a.min() == -3.0 + assert a.min(keepdims=True) == -3.0 + assert a.min(keepdims=True).shape == (1,) b = array([]) raises(ValueError, "b.min()") assert list(zeros((0, 2)).min(axis=1)) == [] @@ -1492,6 +1515,8 @@ assert a.all() == False a[0] = 3.0 assert a.all() == True + assert a.all(keepdims=True) == True + assert a.all(keepdims=True).shape == (1,) b = array([]) assert b.all() == True @@ -1499,6 +1524,8 @@ from numpypy import array, zeros a = array(range(5)) assert a.any() == True + assert a.any(keepdims=True) == True + assert a.any(keepdims=True).shape == (1,) b = zeros(5) assert b.any() == False c = array([]) @@ -2795,6 +2822,19 @@ assert b[0] == 1 assert b[1] == 'ab' + def test_itemset(self): + import numpy as np + a = np.array(range(5)) + exc = raises(ValueError, a.itemset) + assert exc.value[0] == 'itemset must have at least one argument' + exc = raises(ValueError, a.itemset, 1, 2, 3) + assert exc.value[0] == 'incorrect number of indices for array' + a.itemset(1, 5) + assert a[1] == 5 + a = np.array(range(6)).reshape(2, 3) + a.itemset(1, 2, 100) + assert a[1, 2] == 100 + def test_index(self): import numpy as np a = np.array([1], np.uint16) diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -1052,3 +1052,9 @@ np.array([0, -1, -3, -6, -10])).all() assert (np.divide.accumulate(todivide) == np.array([2., 4., 16.])).all() + + def test_outer(self): + import numpy as np + from numpypy import absolute + exc = raises(ValueError, np.absolute.outer, [-1, -2]) + assert exc.value[0] == 'outer product only supported for binary functions' diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -404,6 +404,7 @@ if w_item is None: return self.box(0) return self.box(space.int_w(space.call_function(space.w_int, w_item))) + def _coerce(self, space, w_item): return self._base_coerce(space, w_item) @@ -979,7 +980,7 @@ def byteswap(self, w_v): value = self.unbox(w_v) - hbits = float_pack(value,2) + hbits = float_pack(value, 2) swapped = byteswap(rffi.cast(self._STORAGE_T, hbits)) return self.box(float_unpack(r_ulonglong(swapped), 2)) @@ -990,11 +991,14 @@ return float_unpack(r_ulonglong(hbits), 2) def _write(self, storage, i, offset, value): - hbits = rffi.cast(self._STORAGE_T, float_pack(value, 2)) + try: + hbits = float_pack(value, 2) + except OverflowError: + hbits = float_pack(rfloat.INFINITY, 2) + hbits = rffi.cast(self._STORAGE_T, hbits) if not self.native: hbits = byteswap(hbits) - raw_storage_setitem(storage, i + offset, - rffi.cast(self._STORAGE_T, hbits)) + raw_storage_setitem(storage, i + offset, hbits) class Float32(BaseType, Float): T = rffi.FLOAT diff --git a/pypy/module/struct/formatiterator.py b/pypy/module/struct/formatiterator.py --- a/pypy/module/struct/formatiterator.py +++ b/pypy/module/struct/formatiterator.py @@ -3,7 +3,7 @@ from rpython.rlib.rstring import StringBuilder from rpython.rlib.rstruct.error import StructError from rpython.rlib.rstruct.formatiterator import FormatIterator -from rpython.rlib.rstruct.standardfmttable import PACK_ACCEPTS_BROKEN_INPUT + from pypy.interpreter.error import OperationError @@ -45,69 +45,47 @@ self.args_index += 1 return w_obj - if PACK_ACCEPTS_BROKEN_INPUT: - # permissive version - accepts float arguments too + def accept_int_arg(self): + return self._accept_integral("int_w") - def accept_int_arg(self): - return self._accept_integral("int_w") + def accept_uint_arg(self): + return self._accept_integral("uint_w") - def accept_uint_arg(self): - return self._accept_integral("uint_w") + def accept_longlong_arg(self): + return self._accept_integral("r_longlong_w") - def accept_longlong_arg(self): - return self._accept_integral("r_longlong_w") + def accept_ulonglong_arg(self): + return self._accept_integral("r_ulonglong_w") - def accept_ulonglong_arg(self): - return self._accept_integral("r_ulonglong_w") + @specialize.arg(1) + def _accept_integral(self, meth): + space = self.space + w_obj = self.accept_obj_arg() + if (space.isinstance_w(w_obj, space.w_int) or + space.isinstance_w(w_obj, space.w_long)): + w_index = w_obj + else: + w_index = None + w_index_method = space.lookup(w_obj, "__index__") + if w_index_method is not None: + try: + w_index = space.index(w_obj) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + pass + if w_index is None: + w_index = self._maybe_float(w_obj) + return getattr(space, meth)(w_index) - @specialize.arg(1) - def _accept_integral(self, meth): - space = self.space - w_obj = self.accept_obj_arg() - if (space.isinstance_w(w_obj, space.w_int) or - space.isinstance_w(w_obj, space.w_long)): - w_index = w_obj - else: - w_index = None - w_index_method = space.lookup(w_obj, "__index__") - if w_index_method is not None: - try: - w_index = space.index(w_obj) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - pass - if w_index is None: - w_index = self._maybe_float(w_obj) - return getattr(space, meth)(w_index) - - def _maybe_float(self, w_obj): - space = self.space - if space.isinstance_w(w_obj, space.w_float): - msg = "struct: integer argument expected, got float" - else: - msg = "integer argument expected, got non-integer" - space.warn(space.wrap(msg), space.w_DeprecationWarning) - return space.int(w_obj) # wrapped float -> wrapped int or long - - else: - # strict version - - def accept_int_arg(self): - w_obj = self.accept_obj_arg() - return self.space.int_w(w_obj) - - def accept_uint_arg(self): - w_obj = self.accept_obj_arg() - return self.space.uint_w(w_obj) - - def accept_longlong_arg(self): - w_obj = self.accept_obj_arg() - return self.space.r_longlong_w(w_obj) - - def accept_ulonglong_arg(self): - w_obj = self.accept_obj_arg() - return self.space.r_ulonglong_w(w_obj) + def _maybe_float(self, w_obj): + space = self.space + if space.isinstance_w(w_obj, space.w_float): + msg = "struct: integer argument expected, got float" + else: + msg = "integer argument expected, got non-integer" + space.warn(space.wrap(msg), space.w_DeprecationWarning) + return space.int(w_obj) # wrapped float -> wrapped int or long def accept_bool_arg(self): w_obj = self.accept_obj_arg() diff --git a/pypy/module/sys/vm.py b/pypy/module/sys/vm.py --- a/pypy/module/sys/vm.py +++ b/pypy/module/sys/vm.py @@ -155,9 +155,7 @@ to exc_info() will return (None,None,None) until another exception is raised and caught in the current thread or the execution stack returns to a frame where another exception is being handled.""" - operror = space.getexecutioncontext().sys_exc_info() - if operror is not None: - operror.clear(space) + space.getexecutioncontext().clear_sys_exc_info() def settrace(space, w_func): """Set the global debug tracing function. It will be called on each diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -10,7 +10,7 @@ SomeDict, SomeOrderedDict, SomeUnicodeCodePoint, SomeUnicodeString, SomeTuple, SomeImpossibleValue, s_ImpossibleValue, SomeInstance, SomeBuiltin, SomeIterator, SomePBC, SomeFloat, s_None, SomeByteArray, - SomeWeakRef, SomeAddress, SomeTypedAddressAccess, SomeSingleFloat, + SomeWeakRef, SomeSingleFloat, SomeLongFloat, SomeType, SomeConstantType, unionof, UnionError, read_can_only_throw, add_knowntypedata, merge_knowntypedata,) @@ -826,50 +826,6 @@ raise AnnotatorError('add on %r' % pbc) return s_ImpossibleValue -# ____________________________________________________________ -# annotation of low-level types -from rpython.annotator.model import SomePtr -from rpython.annotator.model import ll_to_annotation, annotation_to_lltype - -class __extend__(pairtype(SomePtr, SomePtr)): - def union((p1, p2)): - assert p1.ll_ptrtype == p2.ll_ptrtype,("mixing of incompatible pointer types: %r, %r" % - (p1.ll_ptrtype, p2.ll_ptrtype)) - return SomePtr(p1.ll_ptrtype) - -class __extend__(pairtype(SomePtr, SomeInteger)): - - def getitem((p, int1)): - example = p.ll_ptrtype._example() - try: - v = example[0] - except IndexError: - return None # impossible value, e.g. FixedSizeArray(0) - return ll_to_annotation(v) - getitem.can_only_throw = [] - - def setitem((p, int1), s_value): # just doing checking - example = p.ll_ptrtype._example() - if example[0] is not None: # ignore Void s_value - v_lltype = annotation_to_lltype(s_value) - example[0] = v_lltype._defl() - setitem.can_only_throw = [] - -class __extend__(pairtype(SomePtr, SomeObject)): - def union((p, obj)): - assert False, ("mixing pointer type %r with something else %r" % (p.ll_ptrtype, obj)) - - def getitem((p, obj)): - assert False,"ptr %r getitem index not an int: %r" % (p.ll_ptrtype, obj) - - def setitem((p, obj), s_value): - assert False,"ptr %r setitem index not an int: %r" % (p.ll_ptrtype, obj) - -class __extend__(pairtype(SomeObject, SomePtr)): - def union((obj, p2)): - return pair(p2, obj).union() - - #_________________________________________ # weakrefs @@ -884,62 +840,3 @@ if basedef is None: # no common base class! complain... return SomeObject() return SomeWeakRef(basedef) - -#_________________________________________ -# memory addresses - -class __extend__(pairtype(SomeAddress, SomeAddress)): - def union((s_addr1, s_addr2)): - return SomeAddress() - - def sub((s_addr1, s_addr2)): - if s_addr1.is_null_address() and s_addr2.is_null_address(): - return getbookkeeper().immutablevalue(0) - return SomeInteger() - - def is_((s_addr1, s_addr2)): - assert False, "comparisons with is not supported by addresses" - -class __extend__(pairtype(SomeTypedAddressAccess, SomeTypedAddressAccess)): - def union((s_taa1, s_taa2)): - assert s_taa1.type == s_taa2.type - return s_taa1 - -class __extend__(pairtype(SomeTypedAddressAccess, SomeInteger)): - def getitem((s_taa, s_int)): - from rpython.annotator.model import lltype_to_annotation - return lltype_to_annotation(s_taa.type) - getitem.can_only_throw = [] - - def setitem((s_taa, s_int), s_value): - from rpython.annotator.model import annotation_to_lltype - assert annotation_to_lltype(s_value) is s_taa.type - setitem.can_only_throw = [] - - -class __extend__(pairtype(SomeAddress, SomeInteger)): - def add((s_addr, s_int)): - return SomeAddress() - - def sub((s_addr, s_int)): - return SomeAddress() - -class __extend__(pairtype(SomeAddress, SomeImpossibleValue)): - # need to override this specifically to hide the 'raise UnionError' - # of pairtype(SomeAddress, SomeObject). - def union((s_addr, s_imp)): - return s_addr - -class __extend__(pairtype(SomeImpossibleValue, SomeAddress)): - # need to override this specifically to hide the 'raise UnionError' - # of pairtype(SomeObject, SomeAddress). - def union((s_imp, s_addr)): - return s_addr - -class __extend__(pairtype(SomeAddress, SomeObject)): - def union((s_addr, s_obj)): - raise UnionError(s_addr, s_obj) - -class __extend__(pairtype(SomeObject, SomeAddress)): - def union((s_obj, s_addr)): - raise UnionError(s_obj, s_addr) diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -8,11 +8,13 @@ from rpython.flowspace.model import Constant from rpython.annotator.model import (SomeOrderedDict, - SomeString, SomeChar, SomeFloat, SomePtr, unionof, SomeInstance, SomeDict, - SomeBuiltin, SomePBC, SomeInteger, TLS, SomeAddress, SomeUnicodeCodePoint, - s_None, s_ImpossibleValue, SomeLLADTMeth, SomeBool, SomeTuple, + SomeString, SomeChar, SomeFloat, unionof, SomeInstance, SomeDict, + SomeBuiltin, SomePBC, SomeInteger, TLS, SomeUnicodeCodePoint, + s_None, s_ImpossibleValue, SomeBool, SomeTuple, SomeImpossibleValue, SomeUnicodeString, SomeList, HarmlesslyBlocked, - SomeWeakRef, lltype_to_annotation, SomeType, SomeByteArray, SomeConstantType) + SomeWeakRef, SomeByteArray, SomeConstantType) +from rpython.rtyper.llannotation import ( + SomeAddress, SomePtr, SomeLLADTMeth, lltype_to_annotation) from rpython.annotator.classdef import InstanceSource, ClassDef from rpython.annotator.listdef import ListDef, ListItem from rpython.annotator.dictdef import DictDef diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -4,12 +4,12 @@ import sys from rpython.annotator.model import ( - SomeInteger, SomeObject, SomeChar, SomeBool, SomeString, SomeTuple, s_Bool, - SomeUnicodeCodePoint, SomeAddress, SomeFloat, unionof, SomeUnicodeString, + SomeInteger, SomeObject, SomeChar, SomeBool, SomeString, SomeTuple, + SomeUnicodeCodePoint, SomeFloat, unionof, SomeUnicodeString, SomePBC, SomeInstance, SomeDict, SomeList, SomeWeakRef, SomeIterator, - SomeOrderedDict, - SomeByteArray, annotation_to_lltype, lltype_to_annotation, - ll_to_annotation, add_knowntypedata, s_ImpossibleValue,) + SomeOrderedDict, SomeByteArray, add_knowntypedata, s_ImpossibleValue,) +from rpython.rtyper.llannotation import ( + SomeAddress, annotation_to_lltype, lltype_to_annotation, ll_to_annotation) from rpython.annotator.bookkeeper import getbookkeeper from rpython.annotator import description from rpython.flowspace.model import Constant @@ -356,7 +356,7 @@ @analyzer_for(rpython.rtyper.lltypesystem.llmemory.cast_ptr_to_adr) def llmemory_cast_ptr_to_adr(s): - from rpython.annotator.model import SomeInteriorPtr + from rpython.rtyper.llannotation import SomeInteriorPtr assert not isinstance(s, SomeInteriorPtr) return SomeAddress() @@ -389,7 +389,7 @@ # annotation of low-level types -from rpython.annotator.model import SomePtr +from rpython.rtyper.llannotation import SomePtr from rpython.rtyper.lltypesystem import lltype @analyzer_for(lltype.malloc) diff --git a/rpython/annotator/description.py b/rpython/annotator/description.py --- a/rpython/annotator/description.py +++ b/rpython/annotator/description.py @@ -624,7 +624,7 @@ except ValueError: pass else: - from rpython.annotator.model import SomePtr + from rpython.rtyper.llannotation import SomePtr assert not isinstance(s_arg, SomePtr) else: # call the constructor diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -569,139 +569,6 @@ self.classdef = classdef # ____________________________________________________________ -# memory addresses - -from rpython.rtyper.lltypesystem import llmemory - - -class SomeAddress(SomeObject): - immutable = True - - def can_be_none(self): - return False - - def is_null_address(self): - return self.is_immutable_constant() and not self.const - - -# The following class is used to annotate the intermediate value that -# appears in expressions of the form: -# addr.signed[offset] and addr.signed[offset] = value - -class SomeTypedAddressAccess(SomeObject): - def __init__(self, type): - self.type = type - - def can_be_none(self): - return False - -#____________________________________________________________ -# annotation of low-level types - -from rpython.rtyper.lltypesystem import lltype - - -class SomePtr(SomeObject): - knowntype = lltype._ptr - immutable = True - - def __init__(self, ll_ptrtype): - assert isinstance(ll_ptrtype, lltype.Ptr) - self.ll_ptrtype = ll_ptrtype - - def can_be_none(self): - return False - - -class SomeInteriorPtr(SomePtr): - def __init__(self, ll_ptrtype): - assert isinstance(ll_ptrtype, lltype.InteriorPtr) - self.ll_ptrtype = ll_ptrtype - - -class SomeLLADTMeth(SomeObject): - immutable = True - - def __init__(self, ll_ptrtype, func): - self.ll_ptrtype = ll_ptrtype - self.func = func - - def can_be_none(self): - return False - - - -annotation_to_ll_map = [ - (SomeSingleFloat(), lltype.SingleFloat), - (s_None, lltype.Void), # also matches SomeImpossibleValue() - (s_Bool, lltype.Bool), - (SomeFloat(), lltype.Float), - (SomeLongFloat(), lltype.LongFloat), - (SomeChar(), lltype.Char), - (SomeUnicodeCodePoint(), lltype.UniChar), - (SomeAddress(), llmemory.Address), -] - - -def annotation_to_lltype(s_val, info=None): - if isinstance(s_val, SomeInteriorPtr): - p = s_val.ll_ptrtype - if 0 in p.offsets: - assert list(p.offsets).count(0) == 1 - return lltype.Ptr(lltype.Ptr(p.PARENTTYPE)._interior_ptr_type_with_index(p.TO)) - else: - return lltype.Ptr(p.PARENTTYPE) - if isinstance(s_val, SomePtr): - return s_val.ll_ptrtype - if type(s_val) is SomeInteger: - return lltype.build_number(None, s_val.knowntype) - - for witness, T in annotation_to_ll_map: - if witness.contains(s_val): - return T - if info is None: - info = '' - else: - info = '%s: ' % info - raise ValueError("%sshould return a low-level type,\ngot instead %r" % ( - info, s_val)) - -ll_to_annotation_map = dict([(ll, ann) for ann, ll in annotation_to_ll_map]) - - -def lltype_to_annotation(T): - try: - s = ll_to_annotation_map.get(T) - except TypeError: - s = None # unhashable T, e.g. a Ptr(GcForwardReference()) - if s is None: - if isinstance(T, lltype.Typedef): - return lltype_to_annotation(T.OF) - if isinstance(T, lltype.Number): - return SomeInteger(knowntype=T._type) - elif isinstance(T, lltype.InteriorPtr): - return SomeInteriorPtr(T) - else: - return SomePtr(T) - else: - return s - - -def ll_to_annotation(v): - if v is None: - # i think we can only get here in the case of void-returning - # functions - return s_None - if isinstance(v, lltype._interior_ptr): - ob = v._parent - if ob is None: - raise RuntimeError - T = lltype.InteriorPtr(lltype.typeOf(ob), v._T, v._offsets) - return SomeInteriorPtr(T) - return lltype_to_annotation(lltype.typeOf(v)) - - -# ____________________________________________________________ class AnnotatorError(Exception): diff --git a/rpython/annotator/signature.py b/rpython/annotator/signature.py --- a/rpython/annotator/signature.py +++ b/rpython/annotator/signature.py @@ -2,10 +2,11 @@ from __future__ import absolute_import import types -from rpython.annotator.model import SomeBool, SomeInteger, SomeString,\ - SomeFloat, SomeList, SomeDict, s_None, \ - SomeObject, SomeInstance, SomeTuple, lltype_to_annotation,\ - unionof, SomeUnicodeString, SomeType, AnnotatorError +from rpython.annotator.model import ( + SomeBool, SomeInteger, SomeString, SomeFloat, SomeList, SomeDict, s_None, + SomeObject, SomeInstance, SomeTuple, unionof, SomeUnicodeString, SomeType, + AnnotatorError) +from rpython.rtyper.llannotation import lltype_to_annotation from rpython.annotator.listdef import ListDef from rpython.annotator.dictdef import DictDef diff --git a/rpython/annotator/test/test_model.py b/rpython/annotator/test/test_model.py --- a/rpython/annotator/test/test_model.py +++ b/rpython/annotator/test/test_model.py @@ -99,73 +99,6 @@ assert not s1.contains(s2) assert s1 != s2 -def test_ll_to_annotation(): - s_z = ll_to_annotation(lltype.Signed._defl()) - s_s = SomeInteger() - s_u = SomeInteger(nonneg=True, unsigned=True) - assert s_z.contains(s_s) - assert not s_z.contains(s_u) - s_uz = ll_to_annotation(lltype.Unsigned._defl()) - assert s_uz.contains(s_u) - assert ll_to_annotation(lltype.Bool._defl()).contains(SomeBool()) - assert ll_to_annotation(lltype.Char._defl()).contains(SomeChar()) - S = lltype.GcStruct('s') - A = lltype.GcArray() - s_p = ll_to_annotation(lltype.malloc(S)) - assert isinstance(s_p, SomePtr) and s_p.ll_ptrtype == lltype.Ptr(S) - s_p = ll_to_annotation(lltype.malloc(A, 0)) - assert isinstance(s_p, SomePtr) and s_p.ll_ptrtype == lltype.Ptr(A) - -def test_annotation_to_lltype(): - from rpython.rlib.rarithmetic import r_uint, r_singlefloat - s_i = SomeInteger() - s_pos = SomeInteger(nonneg=True) - s_1 = SomeInteger(nonneg=True); s_1.const = 1 - s_m1 = SomeInteger(nonneg=False); s_m1.const = -1 - s_u = SomeInteger(nonneg=True, unsigned=True); - s_u1 = SomeInteger(nonneg=True, unsigned=True); - s_u1.const = r_uint(1) - assert annotation_to_lltype(s_i) == lltype.Signed - assert annotation_to_lltype(s_pos) == lltype.Signed - assert annotation_to_lltype(s_1) == lltype.Signed - assert annotation_to_lltype(s_m1) == lltype.Signed - assert annotation_to_lltype(s_u) == lltype.Unsigned - assert annotation_to_lltype(s_u1) == lltype.Unsigned - assert annotation_to_lltype(SomeBool()) == lltype.Bool - assert annotation_to_lltype(SomeChar()) == lltype.Char - PS = lltype.Ptr(lltype.GcStruct('s')) - s_p = SomePtr(ll_ptrtype=PS) - assert annotation_to_lltype(s_p) == PS - py.test.raises(ValueError, "annotation_to_lltype(si0)") - s_singlefloat = SomeSingleFloat() - s_singlefloat.const = r_singlefloat(0.0) - assert annotation_to_lltype(s_singlefloat) == lltype.SingleFloat - -def test_ll_union(): - PS1 = lltype.Ptr(lltype.GcStruct('s')) - PS2 = lltype.Ptr(lltype.GcStruct('s')) - PS3 = lltype.Ptr(lltype.GcStruct('s3')) - PA1 = lltype.Ptr(lltype.GcArray()) - PA2 = lltype.Ptr(lltype.GcArray()) - - assert unionof(SomePtr(PS1),SomePtr(PS1)) == SomePtr(PS1) - assert unionof(SomePtr(PS1),SomePtr(PS2)) == SomePtr(PS2) - assert unionof(SomePtr(PS1),SomePtr(PS2)) == SomePtr(PS1) - - assert unionof(SomePtr(PA1),SomePtr(PA1)) == SomePtr(PA1) - assert unionof(SomePtr(PA1),SomePtr(PA2)) == SomePtr(PA2) - assert unionof(SomePtr(PA1),SomePtr(PA2)) == SomePtr(PA1) - - assert unionof(SomePtr(PS1),SomeImpossibleValue()) == SomePtr(PS1) - assert unionof(SomeImpossibleValue(), SomePtr(PS1)) == SomePtr(PS1) - - py.test.raises(AssertionError, "unionof(SomePtr(PA1), SomePtr(PS1))") - py.test.raises(AssertionError, "unionof(SomePtr(PS1), SomePtr(PS3))") - py.test.raises(AssertionError, "unionof(SomePtr(PS1), SomeInteger())") - py.test.raises(AssertionError, "unionof(SomePtr(PS1), SomeObject())") - py.test.raises(AssertionError, "unionof(SomeInteger(), SomePtr(PS1))") - py.test.raises(AssertionError, "unionof(SomeObject(), SomePtr(PS1))") - def test_nan(): f1 = SomeFloat() f1.const = float("nan") diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -9,7 +9,7 @@ from rpython.annotator.model import (SomeObject, SomeInteger, SomeBool, SomeString, SomeChar, SomeList, SomeDict, SomeTuple, SomeImpossibleValue, SomeUnicodeCodePoint, SomeInstance, SomeBuiltin, SomeFloat, SomeIterator, - SomePBC, SomeTypedAddressAccess, SomeAddress, SomeType, s_ImpossibleValue, + SomePBC, SomeType, s_ImpossibleValue, s_Bool, s_None, unionof, add_knowntypedata, HarmlesslyBlocked, SomeWeakRef, SomeUnicodeString, SomeByteArray) from rpython.annotator.bookkeeper import getbookkeeper @@ -758,8 +758,9 @@ raise AnnotatorError("Cannot call len on a pbc") # annotation of low-level types -from rpython.annotator.model import SomePtr, SomeLLADTMeth -from rpython.annotator.model import ll_to_annotation, lltype_to_annotation, annotation_to_lltype +from rpython.rtyper.llannotation import ( + SomePtr, SomeLLADTMeth, ll_to_annotation, lltype_to_annotation, + annotation_to_lltype) class __extend__(SomePtr): @@ -822,20 +823,3 @@ return s_None # known to be a dead weakref else: return SomeInstance(self.classdef, can_be_None=True) - -#_________________________________________ -# memory addresses - -from rpython.rtyper.lltypesystem import llmemory - -class __extend__(SomeAddress): - def getattr(self, s_attr): - assert s_attr.is_constant() - assert isinstance(s_attr, SomeString) - assert s_attr.const in llmemory.supported_access_types - return SomeTypedAddressAccess( - llmemory.supported_access_types[s_attr.const]) - getattr.can_only_throw = [] - - def bool(self): - return s_Bool diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -2,6 +2,7 @@ from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.llinterp import LLInterpreter from rpython.rtyper.annlowlevel import llhelper, MixLevelHelperAnnotator +from rpython.rtyper.llannotation import lltype_to_annotation from rpython.rlib.objectmodel import we_are_translated, specialize from rpython.jit.metainterp import history from rpython.jit.codewriter import heaptracker, longlong @@ -14,7 +15,6 @@ FieldDescr, ArrayDescr, CallDescr, InteriorFieldDescr, FLAG_POINTER, FLAG_FLOAT) from rpython.jit.backend.llsupport.asmmemmgr import AsmMemoryManager -from rpython.annotator import model as annmodel from rpython.rlib.unroll import unrolling_iterable @@ -111,8 +111,8 @@ fptr = llhelper(FUNC_TP, realloc_frame) else: FUNC = FUNC_TP.TO - args_s = [annmodel.lltype_to_annotation(ARG) for ARG in FUNC.ARGS] - s_result = annmodel.lltype_to_annotation(FUNC.RESULT) + args_s = [lltype_to_annotation(ARG) for ARG in FUNC.ARGS] + s_result = lltype_to_annotation(FUNC.RESULT) mixlevelann = MixLevelHelperAnnotator(self.rtyper) graph = mixlevelann.getgraph(realloc_frame, args_s, s_result) fptr = mixlevelann.graph2delayed(graph, FUNC) @@ -123,8 +123,8 @@ fptr = llhelper(FUNC_TP, realloc_frame_crash) else: FUNC = FUNC_TP.TO - args_s = [annmodel.lltype_to_annotation(ARG) for ARG in FUNC.ARGS] - s_result = annmodel.lltype_to_annotation(FUNC.RESULT) + args_s = [lltype_to_annotation(ARG) for ARG in FUNC.ARGS] + s_result = lltype_to_annotation(FUNC.RESULT) mixlevelann = MixLevelHelperAnnotator(self.rtyper) graph = mixlevelann.getgraph(realloc_frame_crash, args_s, s_result) fptr = mixlevelann.graph2delayed(graph, FUNC) diff --git a/rpython/jit/codewriter/support.py b/rpython/jit/codewriter/support.py --- a/rpython/jit/codewriter/support.py +++ b/rpython/jit/codewriter/support.py @@ -1,6 +1,7 @@ import sys from rpython.annotator import model as annmodel +from rpython.rtyper.llannotation import lltype_to_annotation from rpython.annotator.policy import AnnotatorPolicy from rpython.flowspace.model import Variable, Constant from rpython.jit.metainterp.typesystem import deref @@ -32,7 +33,7 @@ if T == lltype.Ptr(ll_rstr.STR): t = str else: - t = annmodel.lltype_to_annotation(T) + t = lltype_to_annotation(T) return a.typeannotation(t) def annotate(func, values, inline=None, backendoptimize=True, @@ -814,12 +815,12 @@ return rtyper._builtin_func_for_spec_cache[key] except (KeyError, AttributeError): pass - args_s = [annmodel.lltype_to_annotation(v) for v in ll_args] + args_s = [lltype_to_annotation(v) for v in ll_args] if '.' not in oopspec_name: # 'newxxx' operations LIST_OR_DICT = ll_res else: LIST_OR_DICT = ll_args[0] - s_result = annmodel.lltype_to_annotation(ll_res) + s_result = lltype_to_annotation(ll_res) impl = setup_extra_builtin(rtyper, oopspec_name, len(args_s), extra) if getattr(impl, 'need_result_type', False): bk = rtyper.annotator.bookkeeper diff --git a/rpython/jit/metainterp/test/test_virtualizable.py b/rpython/jit/metainterp/test/test_virtualizable.py --- a/rpython/jit/metainterp/test/test_virtualizable.py +++ b/rpython/jit/metainterp/test/test_virtualizable.py @@ -9,6 +9,7 @@ from rpython.rlib.jit import JitDriver, hint, dont_look_inside, promote, virtual_ref from rpython.rlib.rarithmetic import intmask from rpython.rtyper.annlowlevel import hlstr +from rpython.rtyper.llannotation import lltype_to_annotation from rpython.rtyper.extregistry import ExtRegistryEntry from rpython.rtyper.lltypesystem import lltype, lloperation, rclass, llmemory from rpython.rtyper.rclass import IR_IMMUTABLE, IR_IMMUTABLE_ARRAY, FieldListAccessor @@ -23,7 +24,6 @@ _about_ = promote_virtualizable def compute_result_annotation(self, *args): - from rpython.annotator.model import lltype_to_annotation return lltype_to_annotation(lltype.Void) def specialize_call(self, hop): diff --git a/rpython/jit/metainterp/warmspot.py b/rpython/jit/metainterp/warmspot.py --- a/rpython/jit/metainterp/warmspot.py +++ b/rpython/jit/metainterp/warmspot.py @@ -4,6 +4,7 @@ from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.rtyper.annlowlevel import (llhelper, MixLevelHelperAnnotator, cast_base_ptr_to_instance, hlstr) +from rpython.rtyper.llannotation import lltype_to_annotation from rpython.annotator import model as annmodel from rpython.rtyper.llinterp import LLException from rpython.rtyper.test.test_llinterp import get_interpreter, clear_tcache @@ -662,8 +663,8 @@ if not self.cpu.translate_support_code: return llhelper(FUNCPTR, func) FUNC = FUNCPTR.TO - args_s = [annmodel.lltype_to_annotation(ARG) for ARG in FUNC.ARGS] - s_result = annmodel.lltype_to_annotation(FUNC.RESULT) + args_s = [lltype_to_annotation(ARG) for ARG in FUNC.ARGS] + s_result = lltype_to_annotation(FUNC.RESULT) graph = self.annhelper.getgraph(func, args_s, s_result) return self.annhelper.graph2delayed(graph, FUNC) diff --git a/rpython/memory/gctransform/asmgcroot.py b/rpython/memory/gctransform/asmgcroot.py --- a/rpython/memory/gctransform/asmgcroot.py +++ b/rpython/memory/gctransform/asmgcroot.py @@ -7,6 +7,7 @@ from rpython.rtyper.lltypesystem.lloperation import llop from rpython.memory.gctransform.framework import ( BaseFrameworkGCTransformer, BaseRootWalker) +from rpython.rtyper.llannotation import SomeAddress from rpython.rtyper.rbuiltin import gen_cast from rpython.translator.unsimplify import copyvar, varoftype from rpython.translator.tool.cbuild import ExternalCompilationInfo @@ -215,7 +216,7 @@ # update the global stack counter rffi.stackcounter.stacks_counter += 1 # - s_addr = annmodel.SomeAddress() + s_addr = SomeAddress() s_None = annmodel.s_None self.gc_detach_callback_pieces_ptr = getfn(gc_detach_callback_pieces, [], s_addr) @@ -327,10 +328,10 @@ inline=True) self.thread_die_ptr = getfn(thread_die, [], annmodel.s_None) self.thread_before_fork_ptr = getfn(thread_before_fork, [], - annmodel.SomeAddress()) + SomeAddress()) self.thread_after_fork_ptr = getfn(thread_after_fork, [annmodel.SomeInteger(), - annmodel.SomeAddress()], + SomeAddress()], annmodel.s_None) # # check that the order of the need_*() is correct for us: if we @@ -496,7 +497,7 @@ # location -- but we check for consistency that ebp points # to a JITFRAME object. from rpython.jit.backend.llsupport.jitframe import STACK_DEPTH_OFS - + tid = self.gc.get_possibly_forwarded_type_id(ebp_in_caller) ll_assert(rffi.cast(lltype.Signed, tid) == rffi.cast(lltype.Signed, self.frame_tid), diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -1,4 +1,5 @@ from rpython.annotator import model as annmodel +from rpython.rtyper.llannotation import SomeAddress, SomePtr from rpython.rlib import rgc from rpython.rtyper import rmodel, annlowlevel from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, llgroup @@ -195,21 +196,11 @@ # the point of this little dance is to not annotate # self.gcdata.static_root_xyz as constants. XXX is it still needed?? data_classdef = bk.getuniqueclassdef(gctypelayout.GCData) - data_classdef.generalize_attr( - 'static_root_start', - annmodel.SomeAddress()) - data_classdef.generalize_attr( - 'static_root_nongcend', - annmodel.SomeAddress()) - data_classdef.generalize_attr( - 'static_root_end', - annmodel.SomeAddress()) - data_classdef.generalize_attr( - 'max_type_id', - annmodel.SomeInteger()) - data_classdef.generalize_attr( - 'typeids_z', - annmodel.SomeAddress()) + data_classdef.generalize_attr('static_root_start', SomeAddress()) + data_classdef.generalize_attr('static_root_nongcend', SomeAddress()) + data_classdef.generalize_attr('static_root_end', SomeAddress()) + data_classdef.generalize_attr('max_type_id', annmodel.SomeInteger()) + data_classdef.generalize_attr('typeids_z', SomeAddress()) annhelper = annlowlevel.MixLevelHelperAnnotator(self.translator.rtyper) @@ -277,7 +268,7 @@ from rpython.memory.gc.base import ARRAY_TYPEID_MAP from rpython.memory.gc import inspector - s_gcref = annmodel.SomePtr(llmemory.GCREF) + s_gcref = SomePtr(llmemory.GCREF) gcdata = self.gcdata translator = self.translator @@ -310,20 +301,20 @@ self.collect_ptr = getfn(GCClass.collect.im_func, [s_gc, annmodel.SomeInteger()], annmodel.s_None) self.can_move_ptr = getfn(GCClass.can_move.im_func, - [s_gc, annmodel.SomeAddress()], + [s_gc, SomeAddress()], annmodel.SomeBool()) if hasattr(GCClass, 'shrink_array'): self.shrink_array_ptr = getfn( GCClass.shrink_array.im_func, - [s_gc, annmodel.SomeAddress(), + [s_gc, SomeAddress(), annmodel.SomeInteger(nonneg=True)], annmodel.s_Bool) else: self.shrink_array_ptr = None if hasattr(GCClass, 'heap_stats'): self.heap_stats_ptr = getfn(GCClass.heap_stats.im_func, - [s_gc], annmodel.SomePtr(lltype.Ptr(ARRAY_TYPEID_MAP)), + [s_gc], SomePtr(lltype.Ptr(ARRAY_TYPEID_MAP)), minimal_transform=False) self.get_member_index_ptr = getfn( GCClass.get_member_index.im_func, @@ -333,7 +324,7 @@ if hasattr(GCClass, 'writebarrier_before_copy'): self.wb_before_copy_ptr = \ getfn(GCClass.writebarrier_before_copy.im_func, - [s_gc] + [annmodel.SomeAddress()] * 2 + + [s_gc] + [SomeAddress()] * 2 + [annmodel.SomeInteger()] * 3, annmodel.SomeBool()) elif GCClass.needs_write_barrier: raise NotImplementedError("GC needs write barrier, but does not provide writebarrier_before_copy functionality") @@ -421,7 +412,7 @@ if getattr(GCClass, 'obtain_free_space', False): self.obtainfreespace_ptr = getfn(GCClass.obtain_free_space.im_func, [s_gc, annmodel.SomeInteger()], - annmodel.SomeAddress()) + SomeAddress()) if GCClass.moving_gc: self.id_ptr = getfn(GCClass.id.im_func, @@ -457,8 +448,7 @@ minimal_transform=False) self.get_typeids_z_ptr = getfn(inspector.get_typeids_z, [s_gc], - annmodel.SomePtr( - lltype.Ptr(rgc.ARRAY_OF_CHAR)), + SomePtr(lltype.Ptr(rgc.ARRAY_OF_CHAR)), minimal_transform=False) self.set_max_heap_size_ptr = getfn(GCClass.set_max_heap_size.im_func, @@ -470,8 +460,7 @@ self.write_barrier_from_array_ptr = None if GCClass.needs_write_barrier: self.write_barrier_ptr = getfn(GCClass.write_barrier.im_func, - [s_gc, - annmodel.SomeAddress()], + [s_gc, SomeAddress()], annmodel.s_None, inline=True) func = getattr(gcdata.gc, 'remember_young_pointer', None) @@ -479,13 +468,12 @@ # func should not be a bound method, but a real function assert isinstance(func, types.FunctionType) self.write_barrier_failing_case_ptr = getfn(func, - [annmodel.SomeAddress()], + [SomeAddress()], annmodel.s_None) func = getattr(GCClass, 'write_barrier_from_array', None) if func is not None: self.write_barrier_from_array_ptr = getfn(func.im_func, - [s_gc, - annmodel.SomeAddress(), + [s_gc, SomeAddress(), annmodel.SomeInteger()], annmodel.s_None, inline=True) @@ -497,7 +485,7 @@ assert isinstance(func, types.FunctionType) self.write_barrier_from_array_failing_case_ptr = \ getfn(func, - [annmodel.SomeAddress()], + [SomeAddress()], annmodel.s_None) diff --git a/rpython/memory/gctransform/shadowstack.py b/rpython/memory/gctransform/shadowstack.py --- a/rpython/memory/gctransform/shadowstack.py +++ b/rpython/memory/gctransform/shadowstack.py @@ -1,10 +1,12 @@ from rpython.annotator import model as annmodel +from rpython.rtyper.llannotation import SomePtr from rpython.rlib.debug import ll_assert from rpython.rlib.nonconst import NonConstant from rpython.rlib import rgc from rpython.rtyper import rmodel from rpython.rtyper.annlowlevel import llhelper from rpython.rtyper.lltypesystem import lltype, llmemory +from rpython.rtyper.llannotation import SomeAddress from rpython.memory.gctransform.framework import ( BaseFrameworkGCTransformer, BaseRootWalker, sizeofaddr) from rpython.rtyper.rbuiltin import gen_cast @@ -14,11 +16,11 @@ def annotate_walker_functions(self, getfn): self.incr_stack_ptr = getfn(self.root_walker.incr_stack, [annmodel.SomeInteger()], - annmodel.SomeAddress(), + SomeAddress(), inline = True) self.decr_stack_ptr = getfn(self.root_walker.decr_stack, [annmodel.SomeInteger()], - annmodel.SomeAddress(), + SomeAddress(), inline = True) def build_root_walker(self): @@ -211,7 +213,7 @@ # no thread_before_fork_ptr here self.thread_after_fork_ptr = getfn(thread_after_fork, [annmodel.SomeInteger(), - annmodel.SomeAddress()], + SomeAddress()], annmodel.s_None, minimal_transform=False) @@ -241,8 +243,8 @@ def gc_start_fresh_new_state(): shadow_stack_pool.start_fresh_new_state() - s_gcref = annmodel.SomePtr(llmemory.GCREF) - s_addr = annmodel.SomeAddress() + s_gcref = SomePtr(llmemory.GCREF) + s_addr = SomeAddress() self.gc_shadowstackref_new_ptr = getfn(gc_shadowstackref_new, [], s_gcref, minimal_transform=False) diff --git a/rpython/memory/gctransform/transform.py b/rpython/memory/gctransform/transform.py --- a/rpython/memory/gctransform/transform.py +++ b/rpython/memory/gctransform/transform.py @@ -9,7 +9,7 @@ from rpython.translator.backendopt.canraise import RaiseAnalyzer from rpython.translator.backendopt.ssa import DataFlowFamilyBuilder from rpython.translator.backendopt.constfold import constant_fold_graph -from rpython.annotator import model as annmodel +from rpython.rtyper.llannotation import lltype_to_annotation from rpython.rtyper import rmodel from rpython.rtyper.annlowlevel import MixLevelHelperAnnotator from rpython.rtyper.rtyper import LowLevelOpList @@ -259,8 +259,8 @@ def annotate_helper(self, ll_helper, ll_args, ll_result, inline=False): assert not self.finished_helpers - args_s = map(annmodel.lltype_to_annotation, ll_args) - s_result = annmodel.lltype_to_annotation(ll_result) + args_s = map(lltype_to_annotation, ll_args) + s_result = lltype_to_annotation(ll_result) graph = self.mixlevelannotator.getgraph(ll_helper, args_s, s_result) # the produced graphs does not need to be fully transformed self.need_minimal_transform(graph) diff --git a/rpython/memory/test/test_transformed_gc.py b/rpython/memory/test/test_transformed_gc.py --- a/rpython/memory/test/test_transformed_gc.py +++ b/rpython/memory/test/test_transformed_gc.py @@ -3,6 +3,7 @@ from rpython.translator.c import gc from rpython.annotator import model as annmodel +from rpython.rtyper.llannotation import SomePtr from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, llgroup from rpython.memory.gctransform import framework, shadowstack from rpython.rtyper.lltypesystem.lloperation import llop, void @@ -98,7 +99,7 @@ from rpython.translator.c.genc import CStandaloneBuilder - s_args = annmodel.SomePtr(lltype.Ptr(ARGS)) + s_args = SomePtr(lltype.Ptr(ARGS)) t = rtype(entrypoint, [s_args], gcname=cls.gcname, taggedpointers=cls.taggedpointers) @@ -827,7 +828,7 @@ from rpython.translator.translator import graphof from rpython.flowspace.model import Constant from rpython.rtyper.lltypesystem import rffi - layoutbuilder = cls.ensure_layoutbuilder(translator) + layoutbuilder = cls.ensure_layoutbuilder(translator) type_id = layoutbuilder.get_type_id(P) # # now fix the do_malloc_fixedsize_clear in the graph of g @@ -1116,7 +1117,7 @@ def test_adr_of_nursery(self): run = self.runner("adr_of_nursery") - res = run([]) + res = run([]) class TestGenerationalNoFullCollectGC(GCTest): # test that nursery is doing its job and that no full collection diff --git a/rpython/rlib/_stacklet_asmgcc.py b/rpython/rlib/_stacklet_asmgcc.py --- a/rpython/rlib/_stacklet_asmgcc.py +++ b/rpython/rlib/_stacklet_asmgcc.py @@ -3,6 +3,7 @@ from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.annlowlevel import llhelper, MixLevelHelperAnnotator from rpython.annotator import model as annmodel +from rpython.rtyper.llannotation import lltype_to_annotation from rpython.rlib import _rffi_stacklet as _c @@ -145,7 +146,7 @@ def complete_destrptr(gctransformer): translator = gctransformer.translator mixlevelannotator = MixLevelHelperAnnotator(translator.rtyper) - args_s = [annmodel.lltype_to_annotation(lltype.Ptr(SUSPSTACK))] + args_s = [lltype_to_annotation(lltype.Ptr(SUSPSTACK))] s_result = annmodel.s_None destrptr = mixlevelannotator.delayedfunction(suspstack_destructor, args_s, s_result) diff --git a/rpython/rlib/debug.py b/rpython/rlib/debug.py --- a/rpython/rlib/debug.py +++ b/rpython/rlib/debug.py @@ -229,12 +229,13 @@ def compute_result_annotation(self, s_RESTYPE, s_pythonfunction, *args_s): from rpython.annotator import model as annmodel + from rpython.rtyper.llannotation import lltype_to_annotation from rpython.rtyper.lltypesystem import lltype assert s_RESTYPE.is_constant() assert s_pythonfunction.is_constant() s_result = s_RESTYPE.const if isinstance(s_result, lltype.LowLevelType): - s_result = annmodel.lltype_to_annotation(s_result) + s_result = lltype_to_annotation(s_result) assert isinstance(s_result, annmodel.SomeObject) return s_result diff --git a/rpython/rlib/jit_hooks.py b/rpython/rlib/jit_hooks.py --- a/rpython/rlib/jit_hooks.py +++ b/rpython/rlib/jit_hooks.py @@ -1,4 +1,5 @@ from rpython.annotator import model as annmodel +from rpython.rtyper.llannotation import SomePtr, lltype_to_annotation from rpython.rlib.objectmodel import specialize from rpython.rtyper.annlowlevel import (cast_instance_to_base_ptr, cast_base_ptr_to_instance, llstr) @@ -15,7 +16,7 @@ if (isinstance(s_result, annmodel.SomeObject) or s_result is None): return s_result - return annmodel.lltype_to_annotation(s_result) + return lltype_to_annotation(s_result) def specialize_call(self, hop): from rpython.rtyper.lltypesystem import lltype @@ -50,7 +51,7 @@ def emptyval(): return lltype.nullptr(llmemory.GCREF.TO) - at register_helper(annmodel.SomePtr(llmemory.GCREF)) + at register_helper(SomePtr(llmemory.GCREF)) def resop_new(no, llargs, llres): from rpython.jit.metainterp.history import ResOperation @@ -61,7 +62,7 @@ res = None return _cast_to_gcref(ResOperation(no, args, res)) - at register_helper(annmodel.SomePtr(llmemory.GCREF)) + at register_helper(SomePtr(llmemory.GCREF)) def boxint_new(no): from rpython.jit.metainterp.history import BoxInt return _cast_to_gcref(BoxInt(no)) @@ -74,7 +75,7 @@ def resop_getopname(llop): return llstr(_cast_to_resop(llop).getopname()) - at register_helper(annmodel.SomePtr(llmemory.GCREF)) + at register_helper(SomePtr(llmemory.GCREF)) def resop_getarg(llop, no): return _cast_to_gcref(_cast_to_resop(llop).getarg(no)) @@ -82,7 +83,7 @@ def resop_setarg(llop, no, llbox): _cast_to_resop(llop).setarg(no, _cast_to_box(llbox)) - at register_helper(annmodel.SomePtr(llmemory.GCREF)) + at register_helper(SomePtr(llmemory.GCREF)) def resop_getresult(llop): return _cast_to_gcref(_cast_to_resop(llop).result) @@ -94,15 +95,15 @@ def box_getint(llbox): return _cast_to_box(llbox).getint() - at register_helper(annmodel.SomePtr(llmemory.GCREF)) + at register_helper(SomePtr(llmemory.GCREF)) def box_clone(llbox): return _cast_to_gcref(_cast_to_box(llbox).clonebox()) - at register_helper(annmodel.SomePtr(llmemory.GCREF)) + at register_helper(SomePtr(llmemory.GCREF)) def box_constbox(llbox): return _cast_to_gcref(_cast_to_box(llbox).constbox()) - at register_helper(annmodel.SomePtr(llmemory.GCREF)) + at register_helper(SomePtr(llmemory.GCREF)) def box_nonconstbox(llbox): return _cast_to_gcref(_cast_to_box(llbox).nonconstbox()) diff --git a/rpython/rlib/rawstorage.py b/rpython/rlib/rawstorage.py --- a/rpython/rlib/rawstorage.py +++ b/rpython/rlib/rawstorage.py @@ -2,6 +2,7 @@ from rpython.rtyper.extregistry import ExtRegistryEntry from rpython.rtyper.lltypesystem import lltype, rffi, llmemory from rpython.annotator import model as annmodel +from rpython.rtyper.llannotation import lltype_to_annotation from rpython.rlib.rgc import lltype_is_gc from rpython.rlib.objectmodel import specialize @@ -33,7 +34,7 @@ def compute_result_annotation(self, s_TP, s_storage, s_index): assert s_TP.is_constant() - return annmodel.lltype_to_annotation(s_TP.const) + return lltype_to_annotation(s_TP.const) def specialize_call(self, hop): assert hop.args_r[1].lowleveltype == RAW_STORAGE_PTR diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py --- a/rpython/rlib/rgc.py +++ b/rpython/rlib/rgc.py @@ -94,9 +94,9 @@ _about_ = _heap_stats def compute_result_annotation(self): - from rpython.annotator import model as annmodel + from rpython.rtyper.llannotation import SomePtr from rpython.memory.gc.base import ARRAY_TYPEID_MAP - return annmodel.SomePtr(lltype.Ptr(ARRAY_TYPEID_MAP)) + return SomePtr(lltype.Ptr(ARRAY_TYPEID_MAP)) def specialize_call(self, hop): hop.exception_is_here() @@ -114,8 +114,8 @@ def compute_result_annotation(self, s_TP, s_n=None, s_zero=None): # basically return the same as malloc - from rpython.annotator.builtin import malloc From noreply at buildbot.pypy.org Thu Jan 30 12:29:48 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 30 Jan 2014 12:29:48 +0100 (CET) Subject: [pypy-commit] pypy default: Bah? Attribute never needed Message-ID: <20140130112948.E614A1C1059@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69016:e636fc70542a Date: 2014-01-30 11:29 +0100 http://bitbucket.org/pypy/pypy/changeset/e636fc70542a/ Log: Bah? Attribute never needed diff --git a/pypy/module/_io/interp_iobase.py b/pypy/module/_io/interp_iobase.py --- a/pypy/module/_io/interp_iobase.py +++ b/pypy/module/_io/interp_iobase.py @@ -341,7 +341,6 @@ class StreamHolder(object): def __init__(self, w_iobase): self.w_iobase_ref = rweakref.ref(w_iobase) - w_iobase.autoflusher = self def autoflush(self, space): w_iobase = self.w_iobase_ref() From noreply at buildbot.pypy.org Thu Jan 30 12:29:50 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 30 Jan 2014 12:29:50 +0100 (CET) Subject: [pypy-commit] pypy default: Moved the "weak list" logic out of _cffi_backend/handle.py. Message-ID: <20140130112950.11F741C1059@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69017:1101ea526ac3 Date: 2014-01-30 12:18 +0100 http://bitbucket.org/pypy/pypy/changeset/1101ea526ac3/ Log: Moved the "weak list" logic out of _cffi_backend/handle.py. Use it in module/_io/interp_iobase.py. This should fix the issue #1683. diff --git a/pypy/module/_cffi_backend/handle.py b/pypy/module/_cffi_backend/handle.py --- a/pypy/module/_cffi_backend/handle.py +++ b/pypy/module/_cffi_backend/handle.py @@ -2,58 +2,13 @@ from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import unwrap_spec from pypy.module._cffi_backend import ctypeobj, ctypeptr, cdataobj -from pypy.module._weakref.interp__weakref import dead_ref from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rlib import rweaklist -def reduced_value(s): - while True: - divide = s & 1 - s >>= 1 - if not divide: - return s - -# ____________________________________________________________ - - -class CffiHandles: +class CffiHandles(rweaklist.RWeakListMixin): def __init__(self, space): - self.handles = [] - self.look_distance = 0 - - def reserve_next_handle_index(self): - # The reservation ordering done here is tweaked for pypy's - # memory allocator. We look from index 'look_distance'. - # Look_distance increases from 0. But we also look at - # "look_distance/2" or "/4" or "/8", etc. If we find that one - # of these secondary locations is free, we assume it's because - # there was recently a minor collection; so we reset - # look_distance to 0 and start again from the lowest locations. - length = len(self.handles) - for d in range(self.look_distance, length): - if self.handles[d]() is None: - self.look_distance = d + 1 - return d - s = reduced_value(d) - if self.handles[s]() is None: - break - # restart from the beginning - for d in range(0, length): - if self.handles[d]() is None: - self.look_distance = d + 1 - return d - # full! extend, but don't use '+=' here - self.handles = self.handles + [dead_ref] * (length // 3 + 5) - self.look_distance = length + 1 - return length - - def store_handle(self, index, content): - self.handles[index] = weakref.ref(content) - - def fetch_handle(self, index): - if 0 <= index < len(self.handles): - return self.handles[index]() - return None + self.initialize() def get(space): return space.fromcache(CffiHandles) diff --git a/pypy/module/_io/interp_iobase.py b/pypy/module/_io/interp_iobase.py --- a/pypy/module/_io/interp_iobase.py +++ b/pypy/module/_io/interp_iobase.py @@ -5,7 +5,7 @@ from pypy.interpreter.gateway import interp2app from pypy.interpreter.error import OperationError, operationerrfmt from rpython.rlib.rstring import StringBuilder -from rpython.rlib import rweakref +from rpython.rlib import rweakref, rweaklist DEFAULT_BUFFER_SIZE = 8192 @@ -51,7 +51,6 @@ self.space = space self.w_dict = space.newdict() self.__IOBase_closed = False - self.streamholder = None # needed by AutoFlusher get_autoflusher(space).add(self) def getdict(self, space): @@ -114,7 +113,6 @@ space.call_method(self, "flush") finally: self.__IOBase_closed = True - get_autoflusher(space).remove(self) def flush_w(self, space): if self._CLOSED(): @@ -338,54 +336,35 @@ # functions to make sure that all streams are flushed on exit # ------------------------------------------------------------ -class StreamHolder(object): - def __init__(self, w_iobase): - self.w_iobase_ref = rweakref.ref(w_iobase) - def autoflush(self, space): - w_iobase = self.w_iobase_ref() - if w_iobase is not None: - try: - space.call_method(w_iobase, 'flush') - except OperationError: - # Silencing all errors is bad, but getting randomly - # interrupted here is equally as bad, and potentially - # more frequent (because of shutdown issues). - pass - - -class AutoFlusher(object): +class AutoFlusher(rweaklist.RWeakListMixin): def __init__(self, space): - self.streams = {} + self.initialize() def add(self, w_iobase): - assert w_iobase.streamholder is None if rweakref.has_weakref_support(): - holder = StreamHolder(w_iobase) - w_iobase.streamholder = holder - self.streams[holder] = None + self.add_handle(w_iobase) #else: # no support for weakrefs, so ignore and we # will not get autoflushing - def remove(self, w_iobase): - holder = w_iobase.streamholder - if holder is not None: - try: - del self.streams[holder] - except KeyError: - # this can happen in daemon threads - pass - def flush_all(self, space): - while self.streams: - for streamholder in self.streams.keys(): + while True: + handles = self.get_all_handles() + if len(handles) == 0: + break + self.initialize() # reset the state here + for wr in handles: + w_iobase = wr() + if w_iobase is None: + continue try: - del self.streams[streamholder] - except KeyError: - pass # key was removed in the meantime - else: - streamholder.autoflush(space) + space.call_method(w_iobase, 'flush') + except OperationError: + # Silencing all errors is bad, but getting randomly + # interrupted here is equally as bad, and potentially + # more frequent (because of shutdown issues). + pass def get_autoflusher(space): return space.fromcache(AutoFlusher) diff --git a/pypy/module/_weakref/interp__weakref.py b/pypy/module/_weakref/interp__weakref.py --- a/pypy/module/_weakref/interp__weakref.py +++ b/pypy/module/_weakref/interp__weakref.py @@ -6,6 +6,7 @@ from rpython.rlib import jit from rpython.rlib.rshrinklist import AbstractShrinkList from rpython.rlib.objectmodel import specialize +from rpython.rlib.rweakref import dead_ref import weakref @@ -144,14 +145,6 @@ # ____________________________________________________________ -class Dummy: - pass -dead_ref = weakref.ref(Dummy()) -for i in range(5): - if dead_ref() is not None: - import gc; gc.collect() -assert dead_ref() is None - class W_WeakrefBase(W_Root): def __init__(w_self, space, w_obj, w_callable): diff --git a/rpython/rlib/rweaklist.py b/rpython/rlib/rweaklist.py new file mode 100644 --- /dev/null +++ b/rpython/rlib/rweaklist.py @@ -0,0 +1,60 @@ +import weakref +from rpython.rlib.rweakref import dead_ref + + +def _reduced_value(s): + while True: + divide = s & 1 + s >>= 1 + if not divide: + return s + + +class RWeakListMixin(object): + _mixin_ = True + + def initialize(self): + self.handles = [] + self.look_distance = 0 + + def get_all_handles(self): + return self.handles + + def reserve_next_handle_index(self): + # The reservation ordering done here is tweaked for pypy's + # memory allocator. We look from index 'look_distance'. + # Look_distance increases from 0. But we also look at + # "look_distance/2" or "/4" or "/8", etc. If we find that one + # of these secondary locations is free, we assume it's because + # there was recently a minor collection; so we reset + # look_distance to 0 and start again from the lowest locations. + length = len(self.handles) + for d in range(self.look_distance, length): + if self.handles[d]() is None: + self.look_distance = d + 1 + return d + s = _reduced_value(d) + if self.handles[s]() is None: + break + # restart from the beginning + for d in range(0, length): + if self.handles[d]() is None: + self.look_distance = d + 1 + return d + # full! extend, but don't use '+=' here + self.handles = self.handles + [dead_ref] * (length // 3 + 5) + self.look_distance = length + 1 + return length + + def add_handle(self, content): + index = self.reserve_next_handle_index() + self.store_handle(index, content) + return index + + def store_handle(self, index, content): + self.handles[index] = weakref.ref(content) + + def fetch_handle(self, index): + if 0 <= index < len(self.handles): + return self.handles[index]() + return None diff --git a/rpython/rlib/rweakref.py b/rpython/rlib/rweakref.py --- a/rpython/rlib/rweakref.py +++ b/rpython/rlib/rweakref.py @@ -12,6 +12,14 @@ def has_weakref_support(): return True # returns False if --no-translation-rweakref +class Dummy: + pass +dead_ref = weakref.ref(Dummy()) +for i in range(5): + if dead_ref() is not None: + import gc; gc.collect() +assert dead_ref() is None # a known-to-be-dead weakref object + class RWeakValueDictionary(object): """A dictionary containing weak values.""" diff --git a/rpython/rlib/test/test_rweaklist.py b/rpython/rlib/test/test_rweaklist.py new file mode 100644 --- /dev/null +++ b/rpython/rlib/test/test_rweaklist.py @@ -0,0 +1,57 @@ +import gc +from rpython.rlib.rweaklist import RWeakListMixin + + +class A(object): + pass + + +def test_simple(): + a1 = A(); a2 = A() + wlist = RWeakListMixin(); wlist.initialize() + i = wlist.add_handle(a1) + assert i == 0 + i = wlist.reserve_next_handle_index() + assert i == 1 + wlist.store_handle(i, a2) + assert wlist.fetch_handle(0) is a1 + assert wlist.fetch_handle(1) is a2 + # + del a2 + for i in range(5): + gc.collect() + if wlist.fetch_handle(1) is None: + break + else: + raise AssertionError("handle(1) did not disappear") + assert wlist.fetch_handle(0) is a1 + +def test_reuse(): + alist = [A() for i in range(200)] + wlist = RWeakListMixin(); wlist.initialize() + for i in range(200): + j = wlist.reserve_next_handle_index() + assert j == i + wlist.store_handle(i, alist[i]) + # + del alist[1::2] + del alist[1::2] + del alist[1::2] + del alist[1::2] + del alist[1::2] + for i in range(5): + gc.collect() + # + for i in range(200): + a = wlist.fetch_handle(i) + if i % 32 == 0: + assert a is alist[i // 32] + else: + assert a is None + # + maximum = -1 + for i in range(200): + j = wlist.reserve_next_handle_index() + maximum = max(maximum, j) + wlist.store_handle(j, A()) + assert maximum <= 240 From noreply at buildbot.pypy.org Thu Jan 30 12:29:51 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 30 Jan 2014 12:29:51 +0100 (CET) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20140130112951.3A49C1C1059@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69018:c96af09f9fcd Date: 2014-01-30 12:29 +0100 http://bitbucket.org/pypy/pypy/changeset/c96af09f9fcd/ Log: merge heads diff --git a/pypy/module/_io/interp_bytesio.py b/pypy/module/_io/interp_bytesio.py --- a/pypy/module/_io/interp_bytesio.py +++ b/pypy/module/_io/interp_bytesio.py @@ -11,7 +11,7 @@ class W_BytesIO(RStringIO, W_BufferedIOBase): def __init__(self, space): - W_BufferedIOBase.__init__(self, space) + W_BufferedIOBase.__init__(self, space, add_to_autoflusher=False) self.init() def descr_init(self, space, w_initial_bytes=None): diff --git a/pypy/module/_io/interp_iobase.py b/pypy/module/_io/interp_iobase.py --- a/pypy/module/_io/interp_iobase.py +++ b/pypy/module/_io/interp_iobase.py @@ -44,14 +44,15 @@ class W_IOBase(W_Root): - def __init__(self, space): + def __init__(self, space, add_to_autoflusher=True): # XXX: IOBase thinks it has to maintain its own internal state in # `__IOBase_closed` and call flush() by itself, but it is redundant # with whatever behaviour a non-trivial derived class will implement. self.space = space self.w_dict = space.newdict() self.__IOBase_closed = False - get_autoflusher(space).add(self) + if add_to_autoflusher: + get_autoflusher(space).add(self) def getdict(self, space): return self.w_dict From noreply at buildbot.pypy.org Thu Jan 30 12:56:22 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Thu, 30 Jan 2014 12:56:22 +0100 (CET) Subject: [pypy-commit] pypy llvm-translation-backend: Move function around. Message-ID: <20140130115622.411401C1059@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r69019:7b0ecc2b0176 Date: 2014-01-30 09:28 +0100 http://bitbucket.org/pypy/pypy/changeset/7b0ecc2b0176/ Log: Move function around. diff --git a/rpython/translator/llvm/genllvm.py b/rpython/translator/llvm/genllvm.py --- a/rpython/translator/llvm/genllvm.py +++ b/rpython/translator/llvm/genllvm.py @@ -1021,6 +1021,9 @@ else: raise NotImplementedError(op) + def _tmp(self, type_=None): + return VariableRepr(type_, '%tmp{}'.format(next(self.tmp_counter))) + def op_llvm_gcmap(self, result): self.w('{result.V} = bitcast i8* @__gcmap to {result.T}' .format(**locals())) @@ -1037,9 +1040,6 @@ type_ = result.type_.to.repr_type() self.w('{result.V} = alloca {type_}'.format(**locals())) - def _tmp(self, type_=None): - return VariableRepr(type_, '%tmp{}'.format(next(self.tmp_counter))) - # TODO: implement def op_have_debug_prints(self, result): From noreply at buildbot.pypy.org Thu Jan 30 12:56:23 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Thu, 30 Jan 2014 12:56:23 +0100 (CET) Subject: [pypy-commit] pypy llvm-translation-backend: Rename simple_op to binary_op. Message-ID: <20140130115623.857831C1059@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r69020:f6ad0270171c Date: 2014-01-30 11:08 +0100 http://bitbucket.org/pypy/pypy/changeset/f6ad0270171c/ Log: Rename simple_op to binary_op. diff --git a/rpython/translator/llvm/genllvm.py b/rpython/translator/llvm/genllvm.py --- a/rpython/translator/llvm/genllvm.py +++ b/rpython/translator/llvm/genllvm.py @@ -1006,8 +1006,8 @@ opres = get_repr(op.result) opargs = [get_repr(arg) for arg in op.args] if opname in OPS: - simple_op = OPS[opname] - self.w('{opres.V} = {simple_op} {opargs[0].TV}, {opargs[1].V}' + binary_op = OPS[opname] + self.w('{opres.V} = {binary_op} {opargs[0].TV}, {opargs[1].V}' .format(**locals())) elif opname.startswith('cast_') or opname.startswith('truncate_'): self._cast(opres, opargs[0]) From noreply at buildbot.pypy.org Thu Jan 30 12:56:24 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Thu, 30 Jan 2014 12:56:24 +0100 (CET) Subject: [pypy-commit] pypy llvm-translation-backend: Support lshift and rshift operations where the RHS has another type than the LHS. Message-ID: <20140130115624.B29161C1059@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r69021:4f6f9aee9418 Date: 2014-01-30 12:28 +0100 http://bitbucket.org/pypy/pypy/changeset/4f6f9aee9418/ Log: Support lshift and rshift operations where the RHS has another type than the LHS. diff --git a/rpython/translator/llvm/genllvm.py b/rpython/translator/llvm/genllvm.py --- a/rpython/translator/llvm/genllvm.py +++ b/rpython/translator/llvm/genllvm.py @@ -1007,6 +1007,15 @@ opargs = [get_repr(arg) for arg in op.args] if opname in OPS: binary_op = OPS[opname] + assert len(opargs) == 2 + if ((opargs[0].type_ != opargs[1].type_) and + (opargs[0].type_.bitwidth != opargs[1].type_.bitwidth) and + isinstance(opargs[1], VariableRepr)): + assert binary_op in ('shl', 'lshr', 'ashr') + t = self._tmp() + self.w('{t.V} = sext {opargs[1].TV} to {opargs[0].T}' + .format(**locals())) + opargs[1] = t self.w('{opres.V} = {binary_op} {opargs[0].TV}, {opargs[1].V}' .format(**locals())) elif opname.startswith('cast_') or opname.startswith('truncate_'): From noreply at buildbot.pypy.org Thu Jan 30 15:24:28 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Thu, 30 Jan 2014 15:24:28 +0100 (CET) Subject: [pypy-commit] stmgc c7: over-allocate lists Message-ID: <20140130142428.EC5301D22C5@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r691:0a4507399ac1 Date: 2014-01-30 15:24 +0100 http://bitbucket.org/pypy/stmgc/changeset/0a4507399ac1/ Log: over-allocate lists diff --git a/duhton/listobject.c b/duhton/listobject.c --- a/duhton/listobject.c +++ b/duhton/listobject.c @@ -8,6 +8,7 @@ typedef TLPREFIX struct DuTupleObject_s { DuOBJECT_HEAD1 int ob_count; + int ob_capacity; DuObject *ob_items[1]; } DuTupleObject; @@ -27,7 +28,7 @@ size_t tuple_bytesize(struct DuTupleObject_s *ob) { - return sizeof(DuTupleObject) + (ob->ob_count - 1) * sizeof(DuObject *); + return sizeof(DuTupleObject) + (ob->ob_capacity - 1) * sizeof(DuObject *); } void list_trace(struct DuListObject_s *ob, void visit(object_t **)) @@ -35,6 +36,7 @@ visit((object_t **)&ob->ob_tuple); } + void list_print(DuListObject *ob) { int i; @@ -71,9 +73,15 @@ ob = (DuTupleObject *)stm_allocate(size); ob->ob_base.type_id = DUTYPE_TUPLE; ob->ob_count = length; + ob->ob_capacity = length; return ob; } +int overallocated_size(int size) +{ + return size + (size >> 3) + (size < 9 ? 3 : 6); +} + void _list_append(DuListObject *ob, DuObject *x) { _du_read1(ob); @@ -82,17 +90,24 @@ _du_read1(olditems); int i, newcount = olditems->ob_count + 1; - _du_save3(ob, x, olditems); - DuTupleObject *newitems = DuTuple_New(newcount); - _du_restore3(ob, x, olditems); + if (newcount <= olditems->ob_capacity) { + _du_write1(olditems); + olditems->ob_items[newcount-1] = x; + olditems->ob_count = newcount; + } else { /* allocate new one */ + _du_save3(ob, x, olditems); + DuTupleObject *newitems = DuTuple_New(overallocated_size(newcount)); + newitems->ob_count = newcount; + _du_restore3(ob, x, olditems); + + _du_write1(ob); - _du_write1(ob); + for (i=0; iob_items[i] = olditems->ob_items[i]; + newitems->ob_items[newcount-1] = x; - for (i=0; iob_items[i] = olditems->ob_items[i]; - newitems->ob_items[newcount-1] = x; - - ob->ob_tuple = newitems; + ob->ob_tuple = newitems; + } } void DuList_Append(DuObject *ob, DuObject *item) @@ -194,6 +209,7 @@ stm_allocate_prebuilt(sizeof(DuTupleObject)); du_empty_tuple->ob_base.type_id = DUTYPE_TUPLE; du_empty_tuple->ob_count = 0; + du_empty_tuple->ob_capacity = 0; } DuObject *DuList_New() From noreply at buildbot.pypy.org Thu Jan 30 16:00:02 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Thu, 30 Jan 2014 16:00:02 +0100 (CET) Subject: [pypy-commit] stmgc c7: lower the nursery size again Message-ID: <20140130150002.74E221D22C5@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r692:1e6f2b1810b2 Date: 2014-01-30 15:32 +0100 http://bitbucket.org/pypy/stmgc/changeset/1e6f2b1810b2/ Log: lower the nursery size again diff --git a/c7/core.h b/c7/core.h --- a/c7/core.h +++ b/c7/core.h @@ -10,7 +10,7 @@ #define NB_THREADS 2 #define MAP_PAGES_FLAGS (MAP_SHARED | MAP_ANONYMOUS | MAP_NORESERVE) #define LARGE_OBJECT_WORDS 36 -#define NB_NURSERY_PAGES 2048 // 8MB +#define NB_NURSERY_PAGES 1024 // 4MB #define LENGTH_SHADOW_STACK 163840 From noreply at buildbot.pypy.org Thu Jan 30 16:00:03 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Thu, 30 Jan 2014 16:00:03 +0100 (CET) Subject: [pypy-commit] stmgc c7: allocate big objects directly outside of the nursery Message-ID: <20140130150003.94C901D22C5@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r693:5a99e1398932 Date: 2014-01-30 16:00 +0100 http://bitbucket.org/pypy/stmgc/changeset/5a99e1398932/ Log: allocate big objects directly outside of the nursery diff --git a/c7/core.h b/c7/core.h --- a/c7/core.h +++ b/c7/core.h @@ -13,6 +13,9 @@ #define NB_NURSERY_PAGES 1024 // 4MB #define LENGTH_SHADOW_STACK 163840 +#define NURSERY_SECTION (32*4096) +/* (NB_NURSERY_PAGE * 4096) % NURSERY_SECTION == 0 */ + #define TOTAL_MEMORY (NB_PAGES * 4096UL * NB_THREADS) #define READMARKER_END ((NB_PAGES * 4096UL) >> 4) diff --git a/c7/nursery.c b/c7/nursery.c --- a/c7/nursery.c +++ b/c7/nursery.c @@ -193,24 +193,48 @@ object_t *stm_allocate(size_t size) { + object_t *result; + _stm_start_safe_point(LOCK_COLLECT); /* all collections may happen here */ _stm_stop_safe_point(LOCK_COLLECT); assert(_STM_TL->active); assert(size % 8 == 0); - assert(16 <= size && size < NB_NURSERY_PAGES * 4096);//XXX + assert(16 <= size); + /* XXX move out of fastpath */ + if (size >= NURSERY_SECTION) { + /* allocate large objects outside the nursery immediately, + otherwise they may trigger too many minor collections + and degrade performance */ + bool is_small; + result = stm_big_small_alloc_old(size, &is_small); + + memset((void*)real_address(result), 0, size); + + /* object is not committed yet */ + result->stm_flags |= GCFLAG_NOT_COMMITTED; + if (is_small) /* means, not allocated by large-malloc */ + result->stm_flags |= GCFLAG_SMALL; + assert(size == _stm_data_size((struct object_s*)REAL_ADDRESS(get_thread_base(0), result))); + + LIST_APPEND(_STM_TL->uncommitted_objects, result); + LIST_APPEND(_STM_TL->old_objects_to_trace, result); + return result; + } + localchar_t *current = _STM_TL->nursery_current; localchar_t *new_current = current + size; _STM_TL->nursery_current = new_current; assert((uintptr_t)new_current < (1L << 32)); + if ((uintptr_t)new_current > FIRST_AFTER_NURSERY_PAGE * 4096) { _STM_TL->nursery_current = current; /* reset for nursery-clearing in minor_collect!! */ current = collect_and_reserve(size); } - object_t *result = (object_t *)current; + result = (object_t *)current; return result; } diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -36,6 +36,9 @@ typedef ... jmpbufptr_t; #define SIZEOF_MYOBJ ... +#define NB_NURSERY_PAGES ... +#define NURSERY_SECTION ... + void stm_setup(void); void stm_setup_thread(void); @@ -244,13 +247,13 @@ size_t stmcb_size(struct object_s *obj) { struct myobj_s *myobj = (struct myobj_s*)obj; - if (myobj->type_id < 42142) { + if (myobj->type_id < 421420) { /* basic case: tid equals 42 plus the size of the object */ assert(myobj->type_id >= 42 + sizeof(struct myobj_s)); return myobj->type_id - 42; } else { - int nrefs = myobj->type_id - 42142; + int nrefs = myobj->type_id - 421420; assert(nrefs < 100); if (nrefs == 0) /* weakrefs */ nrefs = 1; @@ -262,11 +265,11 @@ { int i; struct myobj_s *myobj = (struct myobj_s*)obj; - if (myobj->type_id < 42142) { + if (myobj->type_id < 421420) { /* basic case: no references */ return; } - for (i=0; i < myobj->type_id - 42142; i++) { + for (i=0; i < myobj->type_id - 421420; i++) { object_t **ref = ((object_t **)(myobj + 1)) + i; visit(ref); } @@ -308,7 +311,7 @@ def stm_allocate_refs(n): o = lib.stm_allocate(HDR + n * WORD) - tid = 42142 + n + tid = 421420 + n lib._set_type_id(o, tid) return o diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -312,9 +312,8 @@ assert stm_get_char(lp1) == 'a' def test_many_allocs(self): - # assumes NB_NURSERY_PAGES 1024 obj_size = 1024 - num = 9000 # more than what fits in the nursery (4MB) + num = (lib.NB_NURSERY_PAGES * 4096) / obj_size + 100 # more than what fits in the nursery stm_start_transaction() for i in range(num): @@ -333,6 +332,14 @@ assert old assert young + def test_larger_than_section(self): + obj_size = lib.NURSERY_SECTION + 16 + + stm_start_transaction() + new = stm_allocate(obj_size) + assert not is_in_nursery(new) + + def test_large_obj_alloc(self): # test obj which doesn't fit into the size_classes # for now, we will still allocate it in the nursery. From noreply at buildbot.pypy.org Thu Jan 30 16:31:36 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Thu, 30 Jan 2014 16:31:36 +0100 (CET) Subject: [pypy-commit] pypy llvm-translation-backend: Instead of setting the translation backend to llvm by default, set it only in translate.py. Message-ID: <20140130153136.42A7E1D22C5@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: llvm-translation-backend Changeset: r69022:06d5fdca63a4 Date: 2014-01-30 16:30 +0100 http://bitbucket.org/pypy/pypy/changeset/06d5fdca63a4/ Log: Instead of setting the translation backend to llvm by default, set it only in translate.py. diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -292,7 +292,6 @@ if child._name != newname] descr = OptionDescription("pypy", "all options", children) config = Config(descr, **overrides) - config.translation.backend = 'llvm' if translating: config.translating = True if existing_config is not None: diff --git a/rpython/translator/goal/translate.py b/rpython/translator/goal/translate.py --- a/rpython/translator/goal/translate.py +++ b/rpython/translator/goal/translate.py @@ -109,6 +109,7 @@ opt_parser.disable_interspersed_args() config = get_combined_translation_config(translating=True) + config.translation.backend = 'llvm' to_optparse(config, parser=opt_parser, useoptions=['translation.*']) translateconfig = Config(translate_optiondescr) to_optparse(translateconfig, parser=opt_parser) From noreply at buildbot.pypy.org Thu Jan 30 17:02:07 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Thu, 30 Jan 2014 17:02:07 +0100 (CET) Subject: [pypy-commit] stmgc c7: some more debug output Message-ID: <20140130160207.6882F1D22C5@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r694:692279668bde Date: 2014-01-30 17:02 +0100 http://bitbucket.org/pypy/stmgc/changeset/692279668bde/ Log: some more debug output diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -394,6 +394,8 @@ _STM_TL->jmpbufptr = jmpbufptr; _STM_TL->active = 1; _STM_TL->need_abort = 0; + + fprintf(stderr, "%c", 'S'+_STM_TL->thread_num*32); } diff --git a/duhton/demo/sort.duh b/duhton/demo/sort.duh --- a/duhton/demo/sort.duh +++ b/duhton/demo/sort.duh @@ -121,9 +121,11 @@ (get lists 0) left-c) (transaction merge_sort_transaction (get lists 1) right-c) - (print (quote start-transactions)) + + (setq current (time)) + (print (quote before-parallel)) (run-transactions) - (print (quote finished-transactions)) + (print (quote time-parallel:) (- (time) current)) (setq left (get left-c)) (setq right (get right-c)) @@ -168,7 +170,11 @@ ;; (print bs) ;; (print (split_list as)) -(setq cs (random_list 50000)) +(setq current (time)) +(print (quote before-random)) +(setq cs (random_list 100000)) +(print (quote time-random:) (- (time) current)) + ;; (print_list cs) ;; (setq res (container None)) @@ -177,13 +183,14 @@ ;; (print (is_sorted (get res))) (setq current (time)) -(print (quote before-sorting:) current) +(print (quote before-sorting)) +(setq sorted (merge_sort_parallel cs)) +(print (quote time-sorting:) (- (time) current)) -(setq sorted (merge_sort_parallel cs)) -(print (quote after-sorting:) (time)) -(print (quote difference:) (- (time) current)) +(setq current (time)) +(print (quote before-check)) (print (quote sorted:) (is_sorted sorted)) +(print (quote time-check:) (- (time) current)) - From noreply at buildbot.pypy.org Thu Jan 30 23:36:37 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 30 Jan 2014 23:36:37 +0100 (CET) Subject: [pypy-commit] pypy default: provide byteswap for scalars Message-ID: <20140130223637.339E91D22C5@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69024:e7e998e6b19f Date: 2014-01-30 17:35 -0500 http://bitbucket.org/pypy/pypy/changeset/e7e998e6b19f/ Log: provide byteswap for scalars diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -327,6 +327,9 @@ def descr_buffer(self, space): return self.descr_ravel(space).descr_get_data(space) + def descr_byteswap(self, space): + return self.get_dtype(space).itemtype.byteswap(self) + w_flags = None def descr_get_flags(self, space): if self.w_flags is None: @@ -592,6 +595,7 @@ view = interp2app(W_GenericBox.descr_view), squeeze = interp2app(W_GenericBox.descr_self), copy = interp2app(W_GenericBox.descr_copy), + byteswap = interp2app(W_GenericBox.descr_byteswap), dtype = GetSetProperty(W_GenericBox.descr_get_dtype), size = GetSetProperty(W_GenericBox.descr_get_size), diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -111,6 +111,13 @@ b = buffer(a) assert str(b) == a + def test_byteswap(self): + import numpy as np + assert np.int64(123).byteswap() == 8863084066665136128 + a = np.complex64(1+2j).byteswap() + assert repr(a.real).startswith('4.60060') + assert repr(a.imag).startswith('8.96831') + def test_squeeze(self): import numpy as np assert np.True_.squeeze() is np.True_ From noreply at buildbot.pypy.org Thu Jan 30 23:36:36 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 30 Jan 2014 23:36:36 +0100 (CET) Subject: [pypy-commit] pypy default: fix numpy dtype char aliases Message-ID: <20140130223636.089711D22C5@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69023:eabb5dd86586 Date: 2014-01-30 17:24 -0500 http://bitbucket.org/pypy/pypy/changeset/eabb5dd86586/ Log: fix numpy dtype char aliases diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -803,29 +803,19 @@ for dtype in reversed(self.builtin_dtypes): self.dtypes_by_num[dtype.num] = dtype self.dtypes_by_name[dtype.name] = dtype - can_name = dtype.kind + str(dtype.get_size()) - self.dtypes_by_name[can_name] = dtype - self.dtypes_by_name[NPY_NATBYTE + can_name] = dtype - self.dtypes_by_name[NPY_NATIVE + can_name] = dtype - new_name = NPY_OPPBYTE + can_name - itemtype = type(dtype.itemtype)(False) - self.dtypes_by_name[new_name] = W_Dtype( - itemtype, dtype.num, dtype.kind, new_name, dtype.char, - dtype.w_box_type, byteorder=NPY_OPPBYTE, - float_type=dtype.float_type) - if dtype.kind != dtype.char: - can_name = dtype.char + for can_name in [dtype.kind + str(dtype.get_size()), + dtype.char]: + self.dtypes_by_name[can_name] = dtype self.dtypes_by_name[NPY_NATBYTE + can_name] = dtype self.dtypes_by_name[NPY_NATIVE + can_name] = dtype new_name = NPY_OPPBYTE + can_name + itemtype = type(dtype.itemtype)(False) self.dtypes_by_name[new_name] = W_Dtype( itemtype, dtype.num, dtype.kind, new_name, dtype.char, dtype.w_box_type, byteorder=NPY_OPPBYTE, float_type=dtype.float_type) - for alias in dtype.aliases: self.dtypes_by_name[alias] = dtype - self.dtypes_by_name[dtype.char] = dtype typeinfo_full = { 'LONGLONG': self.w_int64dtype, diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -788,6 +788,14 @@ assert dtype('>i8').str == '>i8' assert dtype('int8').str == '|i1' assert dtype('float').str == byteorder + 'f8' + assert dtype('f').str == byteorder + 'f4' + assert dtype('=f').str == byteorder + 'f4' + assert dtype('>f').str == '>f4' + assert dtype('d').str == '>f8' + assert dtype(' Author: Alexander Hesse Branch: prepare-split Changeset: r69025:6e6297ca0347 Date: 2014-01-31 00:06 +0100 http://bitbucket.org/pypy/pypy/changeset/6e6297ca0347/ Log: updated py imports in pypy/ to pypy._py diff too long, truncating to 2000 out of 3218 lines diff --git a/pypy/_py/__init__.py b/pypy/_py/__init__.py --- a/pypy/_py/__init__.py +++ b/pypy/_py/__init__.py @@ -10,7 +10,7 @@ """ __version__ = '1.4.7' -from py import _apipkg +from pypy._py import _apipkg # so that py.error.* instances are picklable import sys diff --git a/pypy/_py/__metainfo.py b/pypy/_py/__metainfo.py --- a/pypy/_py/__metainfo.py +++ b/pypy/_py/__metainfo.py @@ -1,2 +1,2 @@ -import py +import pypy._py as py pydir = py.path.local(py.__file__).dirpath() diff --git a/pypy/_py/_code/_assertionnew.py b/pypy/_py/_code/_assertionnew.py --- a/pypy/_py/_code/_assertionnew.py +++ b/pypy/_py/_code/_assertionnew.py @@ -6,7 +6,7 @@ import sys import ast -import py +import pypy._py as py from py._code.assertion import _format_explanation, BuiltinAssertionError diff --git a/pypy/_py/_code/_assertionold.py b/pypy/_py/_code/_assertionold.py --- a/pypy/_py/_code/_assertionold.py +++ b/pypy/_py/_code/_assertionold.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py import sys, inspect from compiler import parse, ast, pycodegen from py._code.assertion import BuiltinAssertionError, _format_explanation diff --git a/pypy/_py/_code/assertion.py b/pypy/_py/_code/assertion.py --- a/pypy/_py/_code/assertion.py +++ b/pypy/_py/_code/assertion.py @@ -1,5 +1,5 @@ import sys -import py +import pypy._py as py BuiltinAssertionError = py.builtin.builtins.AssertionError diff --git a/pypy/_py/_code/code.py b/pypy/_py/_code/code.py --- a/pypy/_py/_code/code.py +++ b/pypy/_py/_code/code.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py import sys, os.path builtin_repr = repr diff --git a/pypy/_py/_code/source.py b/pypy/_py/_code/source.py --- a/pypy/_py/_code/source.py +++ b/pypy/_py/_code/source.py @@ -1,7 +1,7 @@ from __future__ import generators import sys import inspect, tokenize -import py +import pypy._py as py from types import ModuleType cpy_compile = compile diff --git a/pypy/_py/_io/capture.py b/pypy/_py/_io/capture.py --- a/pypy/_py/_io/capture.py +++ b/pypy/_py/_io/capture.py @@ -1,6 +1,6 @@ import os import sys -import py +import pypy._py as py import tempfile try: diff --git a/pypy/_py/_io/saferepr.py b/pypy/_py/_io/saferepr.py --- a/pypy/_py/_io/saferepr.py +++ b/pypy/_py/_io/saferepr.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py import sys, os.path builtin_repr = repr diff --git a/pypy/_py/_io/terminalwriter.py b/pypy/_py/_io/terminalwriter.py --- a/pypy/_py/_io/terminalwriter.py +++ b/pypy/_py/_io/terminalwriter.py @@ -6,7 +6,7 @@ import sys, os -import py +import pypy._py as py win32_and_ctypes = False if sys.platform == "win32": diff --git a/pypy/_py/_log/log.py b/pypy/_py/_log/log.py --- a/pypy/_py/_log/log.py +++ b/pypy/_py/_log/log.py @@ -14,7 +14,7 @@ debug=py.log.STDOUT, command=None) """ -import py, sys +import pypy._py as py, sys class Message(object): def __init__(self, keywords, args): diff --git a/pypy/_py/_log/warning.py b/pypy/_py/_log/warning.py --- a/pypy/_py/_log/warning.py +++ b/pypy/_py/_log/warning.py @@ -1,4 +1,4 @@ -import py, sys +import pypy._py as py, sys class DeprecationWarning(DeprecationWarning): def __init__(self, msg, path, lineno): diff --git a/pypy/_py/_path/common.py b/pypy/_py/_path/common.py --- a/pypy/_py/_path/common.py +++ b/pypy/_py/_path/common.py @@ -1,7 +1,7 @@ """ """ import os, sys -import py +import pypy._py as py class Checkers: _depend_on_existence = 'exists', 'link', 'dir', 'file' diff --git a/pypy/_py/_path/local.py b/pypy/_py/_path/local.py --- a/pypy/_py/_path/local.py +++ b/pypy/_py/_path/local.py @@ -2,7 +2,7 @@ local path implementation. """ import sys, os, stat, re, atexit -import py +import pypy._py as py from py._path import common iswin32 = sys.platform == "win32" or (getattr(os, '_name', False) == 'nt') diff --git a/pypy/_py/_path/svnurl.py b/pypy/_py/_path/svnurl.py --- a/pypy/_py/_path/svnurl.py +++ b/pypy/_py/_path/svnurl.py @@ -5,8 +5,8 @@ """ import os, sys, time, re -import py -from py import path, process +import pypy._py as py +from pypy._py import path, process from py._path import common from py._path import svnwc as svncommon from py._path.cacheutil import BuildcostAccessCache, AgingCache diff --git a/pypy/_py/_path/svnwc.py b/pypy/_py/_path/svnwc.py --- a/pypy/_py/_path/svnwc.py +++ b/pypy/_py/_path/svnwc.py @@ -6,7 +6,7 @@ """ import os, sys, time, re, calendar -import py +import pypy._py as py import subprocess from py._path import common diff --git a/pypy/_py/_process/cmdexec.py b/pypy/_py/_process/cmdexec.py --- a/pypy/_py/_process/cmdexec.py +++ b/pypy/_py/_process/cmdexec.py @@ -4,7 +4,7 @@ import os, sys import subprocess -import py +import pypy._py as py from subprocess import Popen, PIPE def cmdexec(cmd): diff --git a/pypy/_py/_process/forkedfunc.py b/pypy/_py/_process/forkedfunc.py --- a/pypy/_py/_process/forkedfunc.py +++ b/pypy/_py/_process/forkedfunc.py @@ -7,7 +7,7 @@ XXX see if tempdir handling is sane """ -import py +import pypy._py as py import os import sys import marshal diff --git a/pypy/_py/_process/killproc.py b/pypy/_py/_process/killproc.py --- a/pypy/_py/_process/killproc.py +++ b/pypy/_py/_process/killproc.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py import os, sys if sys.platform == "win32" or getattr(os, '_name', '') == 'nt': diff --git a/pypy/_py/_xmlgen.py b/pypy/_py/_xmlgen.py --- a/pypy/_py/_xmlgen.py +++ b/pypy/_py/_xmlgen.py @@ -4,7 +4,7 @@ (c) holger krekel, holger at merlinux eu. 2009 """ -import py +import pypy._py as py import sys, re if sys.version_info >= (3,0): diff --git a/pypy/_py/bin/_findpy.py b/pypy/_py/bin/_findpy.py --- a/pypy/_py/bin/_findpy.py +++ b/pypy/_py/bin/_findpy.py @@ -31,7 +31,7 @@ if not searchpy(opd(__file__)): pass # let's hope it is just on sys.path -import py +import pypy._py as py import pytest if __name__ == '__main__': diff --git a/pypy/config/makerestdoc.py b/pypy/config/makerestdoc.py --- a/pypy/config/makerestdoc.py +++ b/pypy/config/makerestdoc.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py from pypy.tool.rest.rst import Rest, Paragraph, Strong, ListItem, Title, Link from pypy.tool.rest.rst import Directive, Text diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -1,6 +1,6 @@ import sys -import py +import pypy._py as py from rpython.config.config import (OptionDescription, BoolOption, IntOption, ChoiceOption, StrOption, to_optparse, ConflictConfigError) diff --git a/pypy/config/test/test_makerestdoc.py b/pypy/config/test/test_makerestdoc.py --- a/pypy/config/test/test_makerestdoc.py +++ b/pypy/config/test/test_makerestdoc.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py from rpython.config.config import * from pypy.config.makerestdoc import make_cmdline_overview diff --git a/pypy/config/test/test_pypyoption.py b/pypy/config/test/test_pypyoption.py --- a/pypy/config/test/test_pypyoption.py +++ b/pypy/config/test/test_pypyoption.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py from pypy.config.pypyoption import get_pypy_config, set_pypy_opt_level from rpython.config.config import Config, ConfigError from rpython.config.translationoption import set_opt_level diff --git a/pypy/conftest.py b/pypy/conftest.py --- a/pypy/conftest.py +++ b/pypy/conftest.py @@ -1,4 +1,4 @@ -import py, pytest, sys, os, textwrap +import pypy._py as py, pytest, sys, os, textwrap from inspect import isclass # pytest settings diff --git a/pypy/doc/config/generate.py b/pypy/doc/config/generate.py --- a/pypy/doc/config/generate.py +++ b/pypy/doc/config/generate.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py from pypy.config import pypyoption, makerestdoc from pypy.doc.config.confrest import all_optiondescrs from rpython.config import translationoption, config diff --git a/pypy/doc/config/makemodules.py b/pypy/doc/config/makemodules.py --- a/pypy/doc/config/makemodules.py +++ b/pypy/doc/config/makemodules.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py from pypy.config import pypyoption from rpython.config import translationoption, config diff --git a/pypy/doc/confrest.py b/pypy/doc/confrest.py --- a/pypy/doc/confrest.py +++ b/pypy/doc/confrest.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py # XXX importing/inheriting from an internal py lib class is hackish from confrest_oldpy import Project, Page, relpath diff --git a/pypy/doc/confrest_oldpy.py b/pypy/doc/confrest_oldpy.py --- a/pypy/doc/confrest_oldpy.py +++ b/pypy/doc/confrest_oldpy.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py from pypy.tool.rest.rest import convert_rest_html, strip_html_header from pypy.tool.difftime import worded_time diff --git a/pypy/doc/pypyconfig.py b/pypy/doc/pypyconfig.py --- a/pypy/doc/pypyconfig.py +++ b/pypy/doc/pypyconfig.py @@ -8,6 +8,6 @@ from pypy.doc.config import generate from pypy.config import makerestdoc - import py + import pypy._py as py role = makerestdoc.register_config_role(py.path.local()) app.add_role("config", role) diff --git a/pypy/doc/statistic/confrest.py b/pypy/doc/statistic/confrest.py --- a/pypy/doc/statistic/confrest.py +++ b/pypy/doc/statistic/confrest.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py from pypy.doc.confrest import * class PyPyPage(Page): diff --git a/pypy/doc/statistic/format.py b/pypy/doc/statistic/format.py --- a/pypy/doc/statistic/format.py +++ b/pypy/doc/statistic/format.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py import datetime import dateutil from dateutil import parser diff --git a/pypy/doc/statistic/rebin.py b/pypy/doc/statistic/rebin.py --- a/pypy/doc/statistic/rebin.py +++ b/pypy/doc/statistic/rebin.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py chunks = 7 diff --git a/pypy/doc/test/test_whatsnew.py b/pypy/doc/test/test_whatsnew.py --- a/pypy/doc/test/test_whatsnew.py +++ b/pypy/doc/test/test_whatsnew.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py import pypy from commands import getoutput, getstatusoutput ROOT = py.path.local(pypy.__file__).dirpath().dirpath() diff --git a/pypy/doc/tool/makecontributor.py b/pypy/doc/tool/makecontributor.py --- a/pypy/doc/tool/makecontributor.py +++ b/pypy/doc/tool/makecontributor.py @@ -1,6 +1,6 @@ # NOTE: run this script with LANG=en_US.UTF-8 -import py +import pypy._py as py import sys from collections import defaultdict import operator diff --git a/pypy/doc/tool/makeref.py b/pypy/doc/tool/makeref.py --- a/pypy/doc/tool/makeref.py +++ b/pypy/doc/tool/makeref.py @@ -1,5 +1,5 @@ -import py +import pypy._py as py import pypy pypydir = py.path.local(pypy.__file__).join('..') distdir = pypydir.dirpath() diff --git a/pypy/doc/tool/mydot.py b/pypy/doc/tool/mydot.py --- a/pypy/doc/tool/mydot.py +++ b/pypy/doc/tool/mydot.py @@ -1,6 +1,6 @@ #!/usr/bin/env python -import py +import pypy._py as py import string import re diff --git a/pypy/goal/getnightly.py b/pypy/goal/getnightly.py --- a/pypy/goal/getnightly.py +++ b/pypy/goal/getnightly.py @@ -2,7 +2,7 @@ import sys import os -import py +import pypy._py as py if sys.platform.startswith('linux'): arch = 'linux' diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py import os, sys diff --git a/pypy/interpreter/astcompiler/test/stdlib_testall.py b/pypy/interpreter/astcompiler/test/stdlib_testall.py --- a/pypy/interpreter/astcompiler/test/stdlib_testall.py +++ b/pypy/interpreter/astcompiler/test/stdlib_testall.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py from pypy.interpreter.astcompiler.test.test_compiler import compile_with_astcompiler class TestStdlib: diff --git a/pypy/interpreter/astcompiler/test/test_astbuilder.py b/pypy/interpreter/astcompiler/test/test_astbuilder.py --- a/pypy/interpreter/astcompiler/test/test_astbuilder.py +++ b/pypy/interpreter/astcompiler/test/test_astbuilder.py @@ -2,7 +2,7 @@ import random import string import sys -import py +import pypy._py as py from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.pyparser import pyparse from pypy.interpreter.pyparser.error import SyntaxError diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -1,4 +1,4 @@ -import py, sys +import pypy._py as py, sys from pypy.interpreter.astcompiler import codegen, astbuilder, symtable, optimize from pypy.interpreter.pyparser import pyparse from pypy.interpreter.pyparser.test import expressions diff --git a/pypy/interpreter/astcompiler/test/test_symtable.py b/pypy/interpreter/astcompiler/test/test_symtable.py --- a/pypy/interpreter/astcompiler/test/test_symtable.py +++ b/pypy/interpreter/astcompiler/test/test_symtable.py @@ -1,5 +1,5 @@ import string -import py +import pypy._py as py from pypy.interpreter.astcompiler import ast, astbuilder, symtable, consts from pypy.interpreter.pyparser import pyparse from pypy.interpreter.pyparser.error import SyntaxError diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1535,7 +1535,7 @@ """ NOT_RPYTHON """ space = cache.space # XXX will change once we have our own compiler - import py + import pypy._py as py source = source.lstrip() assert source.startswith('('), "incorrect header in:\n%s" % (source,) source = py.code.Source("def anonymous%s\n" % source) diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -14,7 +14,7 @@ import types import inspect -import py +import pypy._py as py from pypy.interpreter.eval import Code from pypy.interpreter.argument import Arguments diff --git a/pypy/interpreter/pyparser/test/test_future.py b/pypy/interpreter/pyparser/test/test_future.py --- a/pypy/interpreter/pyparser/test/test_future.py +++ b/pypy/interpreter/pyparser/test/test_future.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py from pypy.interpreter.pyparser import future, pytokenizer from pypy.tool import stdlib___future__ as fut diff --git a/pypy/interpreter/pyparser/test/test_metaparser.py b/pypy/interpreter/pyparser/test/test_metaparser.py --- a/pypy/interpreter/pyparser/test/test_metaparser.py +++ b/pypy/interpreter/pyparser/test/test_metaparser.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py import os import glob import tokenize diff --git a/pypy/interpreter/pyparser/test/test_parser.py b/pypy/interpreter/pyparser/test/test_parser.py --- a/pypy/interpreter/pyparser/test/test_parser.py +++ b/pypy/interpreter/pyparser/test/test_parser.py @@ -1,5 +1,5 @@ # New parser tests. -import py +import pypy._py as py import tokenize import token import StringIO diff --git a/pypy/interpreter/pyparser/test/test_parsestring.py b/pypy/interpreter/pyparser/test/test_parsestring.py --- a/pypy/interpreter/pyparser/test/test_parsestring.py +++ b/pypy/interpreter/pyparser/test/test_parsestring.py @@ -1,5 +1,5 @@ from pypy.interpreter.pyparser import parsestring -import py, sys +import pypy._py as py, sys class TestParsetring: def parse_and_compare(self, literal, value, encoding=None): diff --git a/pypy/interpreter/pyparser/test/test_pyparse.py b/pypy/interpreter/pyparser/test/test_pyparse.py --- a/pypy/interpreter/pyparser/test/test_pyparse.py +++ b/pypy/interpreter/pyparser/test/test_pyparse.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -import py +import pypy._py as py from pypy.interpreter.pyparser import pyparse from pypy.interpreter.pyparser.pygram import syms, tokens from pypy.interpreter.pyparser.error import SyntaxError, IndentationError diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py --- a/pypy/interpreter/test/test_app_main.py +++ b/pypy/interpreter/test/test_app_main.py @@ -2,7 +2,7 @@ Tests for the entry point of pypy-c, app_main.py. """ from __future__ import with_statement -import py +import pypy._py as py import sys, os, re, runpy, subprocess from rpython.tool.udir import udir from contextlib import contextmanager diff --git a/pypy/interpreter/test/test_appinterp.py b/pypy/interpreter/test/test_appinterp.py --- a/pypy/interpreter/test/test_appinterp.py +++ b/pypy/interpreter/test/test_appinterp.py @@ -1,5 +1,5 @@ -import py +import pypy._py as py from pypy.interpreter.gateway import appdef, ApplevelClass, applevel_temp from pypy.interpreter.error import OperationError diff --git a/pypy/interpreter/test/test_argument.py b/pypy/interpreter/test/test_argument.py --- a/pypy/interpreter/test/test_argument.py +++ b/pypy/interpreter/test/test_argument.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -import py +import pypy._py as py from pypy.interpreter.argument import (Arguments, ArgErr, ArgErrUnknownKwds, ArgErrMultipleValues, ArgErrCount) from pypy.interpreter.signature import Signature diff --git a/pypy/interpreter/test/test_buffer.py b/pypy/interpreter/test/test_buffer.py --- a/pypy/interpreter/test/test_buffer.py +++ b/pypy/interpreter/test/test_buffer.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py from pypy.interpreter.buffer import Buffer from rpython.tool.udir import udir diff --git a/pypy/interpreter/test/test_code.py b/pypy/interpreter/test/test_code.py --- a/pypy/interpreter/test/test_code.py +++ b/pypy/interpreter/test/test_code.py @@ -1,6 +1,6 @@ from pypy.interpreter import gateway from pypy.interpreter.astcompiler import consts -import py +import pypy._py as py class AppTestCodeIntrospection: def setup_class(cls): diff --git a/pypy/interpreter/test/test_compiler.py b/pypy/interpreter/test/test_compiler.py --- a/pypy/interpreter/test/test_compiler.py +++ b/pypy/interpreter/test/test_compiler.py @@ -1,5 +1,5 @@ import __future__ -import py, sys +import pypy._py as py, sys from pypy.interpreter.pycompiler import PythonAstCompiler from pypy.interpreter.pycode import PyCode from pypy.interpreter.error import OperationError diff --git a/pypy/interpreter/test/test_error.py b/pypy/interpreter/test/test_error.py --- a/pypy/interpreter/test/test_error.py +++ b/pypy/interpreter/test/test_error.py @@ -1,4 +1,4 @@ -import py, os, errno +import pypy._py as py, os, errno from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.error import decompose_valuefmt, get_operrcls2 from pypy.interpreter.error import wrap_oserror, new_exception_class diff --git a/pypy/interpreter/test/test_executioncontext.py b/pypy/interpreter/test/test_executioncontext.py --- a/pypy/interpreter/test/test_executioncontext.py +++ b/pypy/interpreter/test/test_executioncontext.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py from pypy.interpreter import executioncontext class Finished(Exception): diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -4,7 +4,7 @@ from pypy.interpreter import gateway, argument from pypy.interpreter.gateway import ObjSpace, W_Root, WrappedDefault from pypy.interpreter.signature import Signature -import py +import pypy._py as py import sys class FakeFunc(object): diff --git a/pypy/interpreter/test/test_interpreter.py b/pypy/interpreter/test/test_interpreter.py --- a/pypy/interpreter/test/test_interpreter.py +++ b/pypy/interpreter/test/test_interpreter.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py import sys from pypy.interpreter import gateway, module, error diff --git a/pypy/interpreter/test/test_main.py b/pypy/interpreter/test/test_main.py --- a/pypy/interpreter/test/test_main.py +++ b/pypy/interpreter/test/test_main.py @@ -1,6 +1,6 @@ from cStringIO import StringIO -import py +import pypy._py as py from rpython.tool.udir import udir from pypy.interpreter.baseobjspace import OperationError from pypy.interpreter import main diff --git a/pypy/interpreter/test/test_mixedmodule.py b/pypy/interpreter/test/test_mixedmodule.py --- a/pypy/interpreter/test/test_mixedmodule.py +++ b/pypy/interpreter/test/test_mixedmodule.py @@ -1,5 +1,5 @@ from pypy.interpreter.mixedmodule import MixedModule -import py.test +import pypy._py as py class TestMixedModule(object): diff --git a/pypy/interpreter/test/test_raise.py b/pypy/interpreter/test/test_raise.py --- a/pypy/interpreter/test/test_raise.py +++ b/pypy/interpreter/test/test_raise.py @@ -1,4 +1,4 @@ -import py.test +import pypy._py as py class AppTestRaise: def test_arg_as_string(self): diff --git a/pypy/interpreter/test/test_syntax.py b/pypy/interpreter/test/test_syntax.py --- a/pypy/interpreter/test/test_syntax.py +++ b/pypy/interpreter/test/test_syntax.py @@ -1,5 +1,5 @@ from __future__ import with_statement -import py +import pypy._py as py def splitcases(s): lines = [line.rstrip() for line in s.split('\n')] diff --git a/pypy/interpreter/test/test_zpy.py b/pypy/interpreter/test/test_zpy.py --- a/pypy/interpreter/test/test_zpy.py +++ b/pypy/interpreter/test/test_zpy.py @@ -1,6 +1,6 @@ from rpython.tool.udir import udir -import py +import pypy._py as py import sys import pypy import subprocess diff --git a/pypy/interpreter/test/test_zzpickle_and_slow.py b/pypy/interpreter/test/test_zzpickle_and_slow.py --- a/pypy/interpreter/test/test_zzpickle_and_slow.py +++ b/pypy/interpreter/test/test_zzpickle_and_slow.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py from pypy import conftest from pypy.interpreter import gateway from rpython.rlib.jit import non_virtual_ref, vref_None diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py from pypy.interpreter.argument import Arguments from pypy.interpreter.baseobjspace import W_Root, DescrMismatch diff --git a/pypy/module/__builtin__/test/test_classobj.py b/pypy/module/__builtin__/test/test_classobj.py --- a/pypy/module/__builtin__/test/test_classobj.py +++ b/pypy/module/__builtin__/test/test_classobj.py @@ -1,5 +1,5 @@ from __future__ import with_statement -import py +import pypy._py as py from pypy.interpreter import gateway diff --git a/pypy/module/__pypy__/test/test_debug.py b/pypy/module/__pypy__/test/test_debug.py --- a/pypy/module/__pypy__/test/test_debug.py +++ b/pypy/module/__pypy__/test/test_debug.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py from pypy.interpreter.gateway import interp2app from rpython.rlib import debug diff --git a/pypy/module/__pypy__/test/test_signal.py b/pypy/module/__pypy__/test/test_signal.py --- a/pypy/module/__pypy__/test/test_signal.py +++ b/pypy/module/__pypy__/test/test_signal.py @@ -105,7 +105,7 @@ def setup_class(cls): if (not cls.runappdirect or '__pypy__' not in sys.builtin_module_names): - import py + import pypy._py as py py.test.skip("this is only a test for -A runs on top of pypy") def test_enable_signals(self): diff --git a/pypy/module/__pypy__/test/test_special.py b/pypy/module/__pypy__/test/test_special.py --- a/pypy/module/__pypy__/test/test_special.py +++ b/pypy/module/__pypy__/test/test_special.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py class AppTest(object): spaceconfig = {"objspace.usemodules.select": False, diff --git a/pypy/module/__pypy__/test/test_time.py b/pypy/module/__pypy__/test/test_time.py --- a/pypy/module/__pypy__/test/test_time.py +++ b/pypy/module/__pypy__/test/test_time.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py from pypy.module.__pypy__.interp_time import HAS_CLOCK_GETTIME diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py --- a/pypy/module/_ast/test/test_ast.py +++ b/pypy/module/_ast/test/test_ast.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py class AppTestAST: diff --git a/pypy/module/_cffi_backend/test/test_c.py b/pypy/module/_cffi_backend/test/test_c.py --- a/pypy/module/_cffi_backend/test/test_c.py +++ b/pypy/module/_cffi_backend/test/test_c.py @@ -15,7 +15,7 @@ - if you could complete step 3, try running 'py.test test_file.py' here 5. make the test pass in pypy ('py.test test_c.py') """ -import py, sys, ctypes +import pypy._py as py, sys, ctypes if sys.version_info < (2, 6): py.test.skip("requires the b'' literal syntax") diff --git a/pypy/module/_cffi_backend/test/test_file.py b/pypy/module/_cffi_backend/test/test_file.py --- a/pypy/module/_cffi_backend/test/test_file.py +++ b/pypy/module/_cffi_backend/test/test_file.py @@ -1,4 +1,4 @@ -import urllib2, py +import urllib2, pypy._py as py def test_same_file(): diff --git a/pypy/module/_continuation/test/support.py b/pypy/module/_continuation/test/support.py --- a/pypy/module/_continuation/test/support.py +++ b/pypy/module/_continuation/test/support.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py from rpython.rtyper.tool.rffi_platform import CompilationError diff --git a/pypy/module/_continuation/test/test_translated.py b/pypy/module/_continuation/test/test_translated.py --- a/pypy/module/_continuation/test/test_translated.py +++ b/pypy/module/_continuation/test/test_translated.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py try: import _continuation except ImportError: diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py import os import stat import errno diff --git a/pypy/module/_file/interp_stream.py b/pypy/module/_file/interp_stream.py --- a/pypy/module/_file/interp_stream.py +++ b/pypy/module/_file/interp_stream.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py from rpython.rlib import streamio from rpython.rlib.streamio import StreamErrors diff --git a/pypy/module/_file/test/test_file.py b/pypy/module/_file/test/test_file.py --- a/pypy/module/_file/test/test_file.py +++ b/pypy/module/_file/test/test_file.py @@ -1,5 +1,5 @@ from __future__ import with_statement -import py, os, errno +import pypy._py as py, os, errno def getfile(space): return space.appexec([], """(): diff --git a/pypy/module/_file/test/test_file_extra.py b/pypy/module/_file/test/test_file_extra.py --- a/pypy/module/_file/test/test_file_extra.py +++ b/pypy/module/_file/test/test_file_extra.py @@ -1,6 +1,6 @@ import os, random, sys import rpython.tool.udir -import py +import pypy._py as py udir = rpython.tool.udir.udir.ensure('test_file_extra', dir=1) diff --git a/pypy/module/_file/test/test_large_file.py b/pypy/module/_file/test/test_large_file.py --- a/pypy/module/_file/test/test_large_file.py +++ b/pypy/module/_file/test/test_large_file.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py from pypy.module._file.test.test_file import getfile diff --git a/pypy/module/_io/test/test_bufferedio.py b/pypy/module/_io/test/test_bufferedio.py --- a/pypy/module/_io/test/test_bufferedio.py +++ b/pypy/module/_io/test/test_bufferedio.py @@ -3,7 +3,7 @@ from rpython.tool.udir import udir from pypy.module._io import interp_bufferedio from pypy.interpreter.error import OperationError -import py.test +import pypy._py as py class AppTestBufferedReader: spaceconfig = dict(usemodules=['_io']) diff --git a/pypy/module/_locale/test/test_locale.py b/pypy/module/_locale/test/test_locale.py --- a/pypy/module/_locale/test/test_locale.py +++ b/pypy/module/_locale/test/test_locale.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py import sys diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py --- a/pypy/module/_lsprof/interp_lsprof.py +++ b/pypy/module/_lsprof/interp_lsprof.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError diff --git a/pypy/module/_minimal_curses/__init__.py b/pypy/module/_minimal_curses/__init__.py --- a/pypy/module/_minimal_curses/__init__.py +++ b/pypy/module/_minimal_curses/__init__.py @@ -6,7 +6,7 @@ # we prefer _curses so any constants added make it into _minimal_curses import _minimal_curses as _curses except ImportError: - import py + import pypy._py as py py.test.skip("no _curses or _minimal_curses module") # no _curses at all from pypy.interpreter.mixedmodule import MixedModule diff --git a/pypy/module/_minimal_curses/test/test_curses.py b/pypy/module/_minimal_curses/test/test_curses.py --- a/pypy/module/_minimal_curses/test/test_curses.py +++ b/pypy/module/_minimal_curses/test/test_curses.py @@ -1,6 +1,6 @@ from pypy.conftest import pypydir from rpython.tool.udir import udir -import py +import pypy._py as py import sys # tests here are run as snippets through a pexpected python subprocess diff --git a/pypy/module/_multibytecodec/c_codecs.py b/pypy/module/_multibytecodec/c_codecs.py --- a/pypy/module/_multibytecodec/c_codecs.py +++ b/pypy/module/_multibytecodec/c_codecs.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py from rpython.rtyper.lltypesystem import lltype, rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.conftest import cdir diff --git a/pypy/module/_multibytecodec/test/test_c_codecs.py b/pypy/module/_multibytecodec/test/test_c_codecs.py --- a/pypy/module/_multibytecodec/test/test_c_codecs.py +++ b/pypy/module/_multibytecodec/test/test_c_codecs.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py from pypy.module._multibytecodec.c_codecs import getcodec, codecs from pypy.module._multibytecodec.c_codecs import decode, encode from pypy.module._multibytecodec.c_codecs import EncodeDecodeError diff --git a/pypy/module/_multiprocessing/test/test_connection.py b/pypy/module/_multiprocessing/test/test_connection.py --- a/pypy/module/_multiprocessing/test/test_connection.py +++ b/pypy/module/_multiprocessing/test/test_connection.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py import sys from pypy.interpreter.gateway import interp2app, W_Root diff --git a/pypy/module/_multiprocessing/test/test_win32.py b/pypy/module/_multiprocessing/test/test_win32.py --- a/pypy/module/_multiprocessing/test/test_win32.py +++ b/pypy/module/_multiprocessing/test/test_win32.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py import sys class AppTestWin32: diff --git a/pypy/module/_pypyjson/targetjson.py b/pypy/module/_pypyjson/targetjson.py --- a/pypy/module/_pypyjson/targetjson.py +++ b/pypy/module/_pypyjson/targetjson.py @@ -1,5 +1,5 @@ import sys -import py +import pypy._py as py ROOT = py.path.local(__file__).dirpath('..', '..', '..') sys.path.insert(0, str(ROOT)) diff --git a/pypy/module/_rawffi/alt/test/test_funcptr.py b/pypy/module/_rawffi/alt/test/test_funcptr.py --- a/pypy/module/_rawffi/alt/test/test_funcptr.py +++ b/pypy/module/_rawffi/alt/test/test_funcptr.py @@ -4,7 +4,7 @@ from rpython.rlib.libffi import CDLL from rpython.rlib.test.test_clibffi import get_libm_name -import sys, py +import sys, pypy._py as py class BaseAppTestFFI(object): spaceconfig = dict(usemodules=('_rawffi',)) diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -3,7 +3,7 @@ from pypy.module._rawffi.interp_rawffi import TYPEMAP, TYPEMAP_FLOAT_LETTERS from pypy.module._rawffi.tracker import Tracker -import os, sys, py +import os, sys, pypy._py as py class AppTestFfi: spaceconfig = dict(usemodules=['_rawffi', 'struct']) diff --git a/pypy/module/_rawffi/test/test_tracker.py b/pypy/module/_rawffi/test/test_tracker.py --- a/pypy/module/_rawffi/test/test_tracker.py +++ b/pypy/module/_rawffi/test/test_tracker.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py from pypy.conftest import option from pypy.module._rawffi.tracker import Tracker diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -1,5 +1,5 @@ import sys -import py +import pypy._py as py from pypy.tool.pytest.objspace import gettestobjspace from rpython.tool.udir import udir from rpython.rlib import rsocket diff --git a/pypy/module/_sre/test/test_app_sre.py b/pypy/module/_sre/test/test_app_sre.py --- a/pypy/module/_sre/test/test_app_sre.py +++ b/pypy/module/_sre/test/test_app_sre.py @@ -1,6 +1,6 @@ """Regular expression tests specific to _sre.py and accumulated during TDD.""" import os -import py +import pypy._py as py from py.test import raises, skip from pypy.interpreter.gateway import app2interp_temp diff --git a/pypy/module/_weakref/interp__weakref.py b/pypy/module/_weakref/interp__weakref.py --- a/pypy/module/_weakref/interp__weakref.py +++ b/pypy/module/_weakref/interp__weakref.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import interp2app, ObjSpace diff --git a/pypy/module/_winreg/test/test_winreg.py b/pypy/module/_winreg/test/test_winreg.py --- a/pypy/module/_winreg/test/test_winreg.py +++ b/pypy/module/_winreg/test/test_winreg.py @@ -1,6 +1,6 @@ from rpython.tool.udir import udir -import os, sys, py +import os, sys, pypy._py as py if sys.platform != 'win32': py.test.skip("_winreg is a win32 module") diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -1,6 +1,5 @@ import sys -import py -import py.test +import pypy._py as py ## class AppTestSimpleArray: diff --git a/pypy/module/array/test/test_array_old.py b/pypy/module/array/test/test_array_old.py --- a/pypy/module/array/test/test_array_old.py +++ b/pypy/module/array/test/test_array_old.py @@ -1,6 +1,6 @@ # minimal tests. See also lib-python/modified-2.4.1/test/test_array. -import py +import pypy._py as py from py.test import raises import struct diff --git a/pypy/module/bz2/test/test_bz2_compdecomp.py b/pypy/module/bz2/test/test_bz2_compdecomp.py --- a/pypy/module/bz2/test/test_bz2_compdecomp.py +++ b/pypy/module/bz2/test/test_bz2_compdecomp.py @@ -1,6 +1,6 @@ import os -import py +import pypy._py as py from pypy.interpreter.gateway import interp2app from pypy.module.bz2.test.support import CheckAllocation diff --git a/pypy/module/bz2/test/test_bz2_file.py b/pypy/module/bz2/test/test_bz2_file.py --- a/pypy/module/bz2/test/test_bz2_file.py +++ b/pypy/module/bz2/test/test_bz2_file.py @@ -3,7 +3,7 @@ import os import random -import py +import pypy._py as py from pypy.interpreter.gateway import unwrap_spec, interp2app from pypy.module.bz2.test.support import CheckAllocation diff --git a/pypy/module/bz2/test/test_large.py b/pypy/module/bz2/test/test_large.py --- a/pypy/module/bz2/test/test_large.py +++ b/pypy/module/bz2/test/test_large.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py class AppTestBZ2File: diff --git a/pypy/module/cStringIO/test/test_interp_stringio.py b/pypy/module/cStringIO/test/test_interp_stringio.py --- a/pypy/module/cStringIO/test/test_interp_stringio.py +++ b/pypy/module/cStringIO/test/test_interp_stringio.py @@ -1,4 +1,4 @@ -import os, sys, py +import os, sys, pypy._py as py class AppTestcStringIO: diff --git a/pypy/module/cppyy/capi/cint_capi.py b/pypy/module/cppyy/capi/cint_capi.py --- a/pypy/module/cppyy/capi/cint_capi.py +++ b/pypy/module/cppyy/capi/cint_capi.py @@ -1,4 +1,4 @@ -import py, os, sys +import pypy._py as py, os, sys from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import interp2app, unwrap_spec diff --git a/pypy/module/cppyy/capi/reflex_capi.py b/pypy/module/cppyy/capi/reflex_capi.py --- a/pypy/module/cppyy/capi/reflex_capi.py +++ b/pypy/module/cppyy/capi/reflex_capi.py @@ -1,4 +1,4 @@ -import py, os +import pypy._py as py, os from rpython.rlib import libffi from rpython.translator.tool.cbuild import ExternalCompilationInfo diff --git a/pypy/module/cppyy/test/conftest.py b/pypy/module/cppyy/test/conftest.py --- a/pypy/module/cppyy/test/conftest.py +++ b/pypy/module/cppyy/test/conftest.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py def pytest_runtest_setup(item): if py.path.local.sysfind('genreflex') is None: diff --git a/pypy/module/cppyy/test/test_aclassloader.py b/pypy/module/cppyy/test/test_aclassloader.py --- a/pypy/module/cppyy/test/test_aclassloader.py +++ b/pypy/module/cppyy/test/test_aclassloader.py @@ -1,4 +1,4 @@ -import py, os, sys +import pypy._py as py, os, sys currpath = py.path.local(__file__).dirpath() diff --git a/pypy/module/cppyy/test/test_advancedcpp.py b/pypy/module/cppyy/test/test_advancedcpp.py --- a/pypy/module/cppyy/test/test_advancedcpp.py +++ b/pypy/module/cppyy/test/test_advancedcpp.py @@ -1,4 +1,4 @@ -import py, os, sys +import pypy._py as py, os, sys from pypy.module.cppyy import capi diff --git a/pypy/module/cppyy/test/test_cint.py b/pypy/module/cppyy/test/test_cint.py --- a/pypy/module/cppyy/test/test_cint.py +++ b/pypy/module/cppyy/test/test_cint.py @@ -1,4 +1,4 @@ -import py, os, sys +import pypy._py as py, os, sys # These tests are for the CINT backend only (they exercise ROOT features # and classes that are not loaded/available with the Reflex backend). At diff --git a/pypy/module/cppyy/test/test_cppyy.py b/pypy/module/cppyy/test/test_cppyy.py --- a/pypy/module/cppyy/test/test_cppyy.py +++ b/pypy/module/cppyy/test/test_cppyy.py @@ -1,4 +1,4 @@ -import py, os, sys +import pypy._py as py, os, sys from pypy.module.cppyy import interp_cppyy, executor diff --git a/pypy/module/cppyy/test/test_crossing.py b/pypy/module/cppyy/test/test_crossing.py --- a/pypy/module/cppyy/test/test_crossing.py +++ b/pypy/module/cppyy/test/test_crossing.py @@ -1,4 +1,4 @@ -import py, os, sys +import pypy._py as py, os, sys from pypy.interpreter.gateway import interp2app, unwrap_spec from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.translator import platform diff --git a/pypy/module/cppyy/test/test_datatypes.py b/pypy/module/cppyy/test/test_datatypes.py --- a/pypy/module/cppyy/test/test_datatypes.py +++ b/pypy/module/cppyy/test/test_datatypes.py @@ -1,4 +1,4 @@ -import py, os, sys +import pypy._py as py, os, sys currpath = py.path.local(__file__).dirpath() diff --git a/pypy/module/cppyy/test/test_fragile.py b/pypy/module/cppyy/test/test_fragile.py --- a/pypy/module/cppyy/test/test_fragile.py +++ b/pypy/module/cppyy/test/test_fragile.py @@ -1,4 +1,4 @@ -import py, os, sys +import pypy._py as py, os, sys from pypy.module.cppyy import capi currpath = py.path.local(__file__).dirpath() diff --git a/pypy/module/cppyy/test/test_operators.py b/pypy/module/cppyy/test/test_operators.py --- a/pypy/module/cppyy/test/test_operators.py +++ b/pypy/module/cppyy/test/test_operators.py @@ -1,4 +1,4 @@ -import py, os, sys +import pypy._py as py, os, sys currpath = py.path.local(__file__).dirpath() diff --git a/pypy/module/cppyy/test/test_overloads.py b/pypy/module/cppyy/test/test_overloads.py --- a/pypy/module/cppyy/test/test_overloads.py +++ b/pypy/module/cppyy/test/test_overloads.py @@ -1,4 +1,4 @@ -import py, os, sys +import pypy._py as py, os, sys currpath = py.path.local(__file__).dirpath() diff --git a/pypy/module/cppyy/test/test_pythonify.py b/pypy/module/cppyy/test/test_pythonify.py --- a/pypy/module/cppyy/test/test_pythonify.py +++ b/pypy/module/cppyy/test/test_pythonify.py @@ -1,4 +1,4 @@ -import py, os, sys +import pypy._py as py, os, sys from pypy.module.cppyy import interp_cppyy, executor diff --git a/pypy/module/cppyy/test/test_stltypes.py b/pypy/module/cppyy/test/test_stltypes.py --- a/pypy/module/cppyy/test/test_stltypes.py +++ b/pypy/module/cppyy/test/test_stltypes.py @@ -1,4 +1,4 @@ -import py, os, sys +import pypy._py as py, os, sys currpath = py.path.local(__file__).dirpath() diff --git a/pypy/module/cppyy/test/test_streams.py b/pypy/module/cppyy/test/test_streams.py --- a/pypy/module/cppyy/test/test_streams.py +++ b/pypy/module/cppyy/test/test_streams.py @@ -1,4 +1,4 @@ -import py, os, sys +import pypy._py as py, os, sys currpath = py.path.local(__file__).dirpath() diff --git a/pypy/module/cppyy/test/test_zjit.py b/pypy/module/cppyy/test/test_zjit.py --- a/pypy/module/cppyy/test/test_zjit.py +++ b/pypy/module/cppyy/test/test_zjit.py @@ -1,4 +1,4 @@ -import py, os, sys +import pypy._py as py, os, sys from rpython.jit.metainterp.test.support import LLJitMixin from rpython.rlib.objectmodel import specialize, instantiate from rpython.rlib import rarithmetic, jit diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -2,7 +2,7 @@ import sys, os import atexit -import py +import pypy._py as py from pypy.conftest import pypydir from rpython.rtyper.lltypesystem import rffi, lltype diff --git a/pypy/module/cpyext/test/conftest.py b/pypy/module/cpyext/test/conftest.py --- a/pypy/module/cpyext/test/conftest.py +++ b/pypy/module/cpyext/test/conftest.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py import pytest def pytest_configure(config): diff --git a/pypy/module/cpyext/test/test_api.py b/pypy/module/cpyext/test/test_api.py --- a/pypy/module/cpyext/test/test_api.py +++ b/pypy/module/cpyext/test/test_api.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py from rpython.rtyper.lltypesystem import rffi, lltype from pypy.interpreter.baseobjspace import W_Root from pypy.module.cpyext.state import State diff --git a/pypy/module/cpyext/test/test_arraymodule.py b/pypy/module/cpyext/test/test_arraymodule.py --- a/pypy/module/cpyext/test/test_arraymodule.py +++ b/pypy/module/cpyext/test/test_arraymodule.py @@ -1,6 +1,6 @@ from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase -import py +import pypy._py as py import sys class AppTestArrayModule(AppTestCpythonExtensionBase): diff --git a/pypy/module/cpyext/test/test_borrow.py b/pypy/module/cpyext/test/test_borrow.py --- a/pypy/module/cpyext/test/test_borrow.py +++ b/pypy/module/cpyext/test/test_borrow.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.pyobject import make_ref, borrow_from, RefcountState diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -2,7 +2,7 @@ import weakref import os -import py +import pypy._py as py from pypy.conftest import pypydir from pypy.interpreter.error import OperationError diff --git a/pypy/module/cpyext/test/test_longobject.py b/pypy/module/cpyext/test/test_longobject.py --- a/pypy/module/cpyext/test/test_longobject.py +++ b/pypy/module/cpyext/test/test_longobject.py @@ -1,4 +1,4 @@ -import sys, py +import sys, pypy._py as py from rpython.rtyper.lltypesystem import rffi, lltype from pypy.objspace.std.intobject import W_IntObject from pypy.objspace.std.longobject import W_LongObject diff --git a/pypy/module/cpyext/test/test_memoryobject.py b/pypy/module/cpyext/test/test_memoryobject.py --- a/pypy/module/cpyext/test/test_memoryobject.py +++ b/pypy/module/cpyext/test/test_memoryobject.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py from pypy.module.cpyext.test.test_api import BaseApiTest class TestMemoryViewObject(BaseApiTest): diff --git a/pypy/module/cpyext/test/test_object.py b/pypy/module/cpyext/test/test_object.py --- a/pypy/module/cpyext/test/test_object.py +++ b/pypy/module/cpyext/test/test_object.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase diff --git a/pypy/module/cpyext/test/test_sequence.py b/pypy/module/cpyext/test/test_sequence.py --- a/pypy/module/cpyext/test/test_sequence.py +++ b/pypy/module/cpyext/test/test_sequence.py @@ -2,7 +2,7 @@ from pypy.interpreter.error import OperationError from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext import sequence -import py.test +import pypy._py as py class TestSequence(BaseApiTest): def test_sequence(self, space, api): diff --git a/pypy/module/cpyext/test/test_setobject.py b/pypy/module/cpyext/test/test_setobject.py --- a/pypy/module/cpyext/test/test_setobject.py +++ b/pypy/module/cpyext/test/test_setobject.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py from pypy.module.cpyext.pyobject import PyObject, PyObjectP, make_ref, from_ref from pypy.module.cpyext.test.test_api import BaseApiTest diff --git a/pypy/module/cpyext/test/test_stringobject.py b/pypy/module/cpyext/test/test_stringobject.py --- a/pypy/module/cpyext/test/test_stringobject.py +++ b/pypy/module/cpyext/test/test_stringobject.py @@ -5,7 +5,7 @@ from pypy.module.cpyext.api import PyObjectP, PyObject, Py_ssize_tP from pypy.module.cpyext.pyobject import Py_DecRef, from_ref, make_ref -import py +import pypy._py as py import sys class AppTestStringObject(AppTestCpythonExtensionBase): diff --git a/pypy/module/cpyext/test/test_thread.py b/pypy/module/cpyext/test/test_thread.py --- a/pypy/module/cpyext/test/test_thread.py +++ b/pypy/module/cpyext/test/test_thread.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase diff --git a/pypy/module/cpyext/test/test_tupleobject.py b/pypy/module/cpyext/test/test_tupleobject.py --- a/pypy/module/cpyext/test/test_tupleobject.py +++ b/pypy/module/cpyext/test/test_tupleobject.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py from pypy.module.cpyext.pyobject import PyObject, PyObjectP, make_ref, from_ref from pypy.module.cpyext.test.test_api import BaseApiTest diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -4,7 +4,7 @@ from pypy.module.cpyext.pyobject import PyObject, make_ref, from_ref from pypy.module.cpyext.typeobject import PyTypeObjectPtr -import py +import pypy._py as py import sys class AppTestTypeObject(AppTestCpythonExtensionBase): diff --git a/pypy/module/cpyext/test/test_unicodeobject.py b/pypy/module/cpyext/test/test_unicodeobject.py --- a/pypy/module/cpyext/test/test_unicodeobject.py +++ b/pypy/module/cpyext/test/test_unicodeobject.py @@ -6,7 +6,7 @@ from pypy.module.cpyext.api import PyObjectP, PyObject from pypy.module.cpyext.pyobject import Py_DecRef, from_ref from rpython.rtyper.lltypesystem import rffi, lltype -import sys, py +import sys, pypy._py as py class AppTestUnicodeObject(AppTestCpythonExtensionBase): def test_unicodeobject(self): diff --git a/pypy/module/crypt/test/test_crypt.py b/pypy/module/crypt/test/test_crypt.py --- a/pypy/module/crypt/test/test_crypt.py +++ b/pypy/module/crypt/test/test_crypt.py @@ -1,5 +1,5 @@ import os -import py +import pypy._py as py if os.name != 'posix': py.test.skip('crypt module only available on unix') diff --git a/pypy/module/fcntl/test/test_fcntl.py b/pypy/module/fcntl/test/test_fcntl.py --- a/pypy/module/fcntl/test/test_fcntl.py +++ b/pypy/module/fcntl/test/test_fcntl.py @@ -1,5 +1,5 @@ import os -import py +import pypy._py as py from rpython.tool.udir import udir if os.name != 'posix': diff --git a/pypy/module/gc/test/test_app_referents.py b/pypy/module/gc/test/test_app_referents.py --- a/pypy/module/gc/test/test_app_referents.py +++ b/pypy/module/gc/test/test_app_referents.py @@ -1,4 +1,4 @@ -import py, os +import pypy._py as py, os from rpython.tool.udir import udir diff --git a/pypy/module/gc/test/test_gc.py b/pypy/module/gc/test/test_gc.py --- a/pypy/module/gc/test/test_gc.py +++ b/pypy/module/gc/test/test_gc.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py class AppTestGC(object): def test_collect(self): @@ -73,7 +73,7 @@ pytestmark = py.test.mark.xfail(run=False) def setup_class(cls): - import py + import pypy._py as py from rpython.tool.udir import udir from rpython.rlib import rgc class X(object): @@ -92,7 +92,7 @@ cls._fname = fname def teardown_class(cls): - import py + import pypy._py as py from rpython.rlib import rgc rgc._heap_stats = cls._heap_stats diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py from pypy.interpreter.module import Module from pypy.interpreter import gateway from pypy.interpreter.error import OperationError diff --git a/pypy/module/itertools/test/test_itertools.py b/pypy/module/itertools/test/test_itertools.py --- a/pypy/module/itertools/test/test_itertools.py +++ b/pypy/module/itertools/test/test_itertools.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py class AppTestItertools: diff --git a/pypy/module/math/test/test_direct.py b/pypy/module/math/test/test_direct.py --- a/pypy/module/math/test/test_direct.py +++ b/pypy/module/math/test/test_direct.py @@ -1,7 +1,7 @@ """ Try to test systematically all cases of the math module. """ -import py, sys, math +import pypy._py as py, sys, math from rpython.rlib import rfloat from rpython.rtyper.lltypesystem.module.test.math_cases import (MathTests, get_tester) diff --git a/pypy/module/math/test/test_factorial.py b/pypy/module/math/test/test_factorial.py --- a/pypy/module/math/test/test_factorial.py +++ b/pypy/module/math/test/test_factorial.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py import math from pypy.module.math import app_math diff --git a/pypy/module/micronumpy/test/test_compile.py b/pypy/module/micronumpy/test/test_compile.py --- a/pypy/module/micronumpy/test/test_compile.py +++ b/pypy/module/micronumpy/test/test_compile.py @@ -1,5 +1,5 @@ -import py +import pypy._py as py from pypy.module.micronumpy.compile import (numpy_compile, Assignment, ArrayConstant, FloatConstant, Operator, Variable, RangeConstant, Execute, FunctionCall, FakeSpace, W_NDimArray) diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -1,4 +1,4 @@ -import py, sys +import pypy._py as py, sys from pypy.conftest import option from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest from pypy.interpreter.gateway import interp2app diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py import sys from pypy.conftest import option diff --git a/pypy/module/micronumpy/test/test_outarg.py b/pypy/module/micronumpy/test/test_outarg.py --- a/pypy/module/micronumpy/test/test_outarg.py +++ b/pypy/module/micronumpy/test/test_outarg.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest class AppTestOutArg(BaseNumpyAppTest): diff --git a/pypy/module/micronumpy/test/test_subtype.py b/pypy/module/micronumpy/test/test_subtype.py --- a/pypy/module/micronumpy/test/test_subtype.py +++ b/pypy/module/micronumpy/test/test_subtype.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -3,7 +3,7 @@ good assembler """ -import py +import pypy._py as py from rpython.jit.metainterp import pyjitpl from rpython.jit.metainterp.test.support import LLJitMixin from rpython.jit.metainterp.warmspot import reset_jit, get_stats diff --git a/pypy/module/oracle/roci.py b/pypy/module/oracle/roci.py --- a/pypy/module/oracle/roci.py +++ b/pypy/module/oracle/roci.py @@ -3,7 +3,7 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.conftest import option import sys, os -import py +import pypy._py as py oracle_home = getattr(option, 'oracle_home', os.environ.get("ORACLE_HOME")) diff --git a/pypy/module/oracle/test/conftest.py b/pypy/module/oracle/test/conftest.py --- a/pypy/module/oracle/test/conftest.py +++ b/pypy/module/oracle/test/conftest.py @@ -1,3 +1,3 @@ -import py +import pypy._py as py def pytest_runtest_setup(): py.test.importorskip("pypy.module.oracle.roci") diff --git a/pypy/module/oracle/test/test_connect.py b/pypy/module/oracle/test/test_connect.py --- a/pypy/module/oracle/test/test_connect.py +++ b/pypy/module/oracle/test/test_connect.py @@ -1,6 +1,6 @@ from pypy.conftest import option from rpython.rtyper.tool.rffi_platform import CompilationError -import py +import pypy._py as py class OracleNotConnectedTestBase(object): spaceconfig = dict(usemodules=('oracle',)) diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -9,7 +9,7 @@ from rpython.rtyper.module.ll_os import RegisterOs from rpython.translator.c.test.test_extfunc import need_sparse_files import os -import py +import pypy._py as py import sys import signal diff --git a/pypy/module/pwd/test/test_pwd.py b/pypy/module/pwd/test/test_pwd.py --- a/pypy/module/pwd/test/test_pwd.py +++ b/pypy/module/pwd/test/test_pwd.py @@ -1,5 +1,5 @@ import os -import py +import pypy._py as py if os.name != 'posix': py.test.skip('pwd module only available on unix') diff --git a/pypy/module/pyexpat/interp_pyexpat.py b/pypy/module/pyexpat/interp_pyexpat.py --- a/pypy/module/pyexpat/interp_pyexpat.py +++ b/pypy/module/pyexpat/interp_pyexpat.py @@ -10,7 +10,7 @@ import sys import weakref -import py +import pypy._py as py if sys.platform == "win32": libname = 'libexpat' diff --git a/pypy/module/pyexpat/test/test_build.py b/pypy/module/pyexpat/test/test_build.py --- a/pypy/module/pyexpat/test/test_build.py +++ b/pypy/module/pyexpat/test/test_build.py @@ -5,7 +5,7 @@ from rpython.rtyper.tool.rffi_platform import CompilationError import os -import py +import pypy._py as py try: import pyexpat except ImportError: diff --git a/pypy/module/pypyjit/test/test_jit_hook.py b/pypy/module/pypyjit/test/test_jit_hook.py --- a/pypy/module/pypyjit/test/test_jit_hook.py +++ b/pypy/module/pypyjit/test/test_jit_hook.py @@ -1,5 +1,5 @@ -import py +import pypy._py as py from pypy.interpreter.gateway import interp2app from pypy.interpreter.pycode import PyCode from rpython.jit.metainterp.history import JitCellToken, ConstInt, ConstPtr,\ diff --git a/pypy/module/pypyjit/test/test_pyframe.py b/pypy/module/pypyjit/test/test_pyframe.py --- a/pypy/module/pypyjit/test/test_pyframe.py +++ b/pypy/module/pypyjit/test/test_pyframe.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py class TestJitTraceInteraction(object): diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py import sys import re import os.path diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py @@ -2,7 +2,7 @@ import sys, os import types import subprocess -import py +import pypy._py as py from lib_pypy import disassembler from rpython.tool.udir import udir from rpython.tool import logparser diff --git a/pypy/module/pypyjit/test_pypy_c/test_alloc.py b/pypy/module/pypyjit/test_pypy_c/test_alloc.py --- a/pypy/module/pypyjit/test_pypy_c/test_alloc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_alloc.py @@ -1,4 +1,4 @@ -import py, sys +import pypy._py as py, sys from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC class TestAlloc(BaseTestPyPyC): diff --git a/pypy/module/pypyjit/test_pypy_c/test_array.py b/pypy/module/pypyjit/test_pypy_c/test_array.py --- a/pypy/module/pypyjit/test_pypy_c/test_array.py +++ b/pypy/module/pypyjit/test_pypy_c/test_array.py @@ -1,4 +1,4 @@ -import py, sys +import pypy._py as py, sys from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC class TestArray(BaseTestPyPyC): diff --git a/pypy/module/pypyjit/test_pypy_c/test_boolrewrite.py b/pypy/module/pypyjit/test_pypy_c/test_boolrewrite.py --- a/pypy/module/pypyjit/test_pypy_c/test_boolrewrite.py +++ b/pypy/module/pypyjit/test_pypy_c/test_boolrewrite.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC class TestBoolRewrite(BaseTestPyPyC): diff --git a/pypy/module/pypyjit/test_pypy_c/test_bug.py b/pypy/module/pypyjit/test_pypy_c/test_bug.py --- a/pypy/module/pypyjit/test_pypy_c/test_bug.py +++ b/pypy/module/pypyjit/test_pypy_c/test_bug.py @@ -1,4 +1,4 @@ -import os, sys, py, subprocess +import os, sys, pypy._py as py, subprocess localdir = os.path.dirname(os.path.abspath(__file__)) diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC from pypy.module.pypyjit.test_pypy_c.model import OpMatcher diff --git a/pypy/module/pypyjit/test_pypy_c/test_exception.py b/pypy/module/pypyjit/test_pypy_c/test_exception.py --- a/pypy/module/pypyjit/test_pypy_c/test_exception.py +++ b/pypy/module/pypyjit/test_pypy_c/test_exception.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC class TestException(BaseTestPyPyC): diff --git a/pypy/module/pypyjit/test_pypy_c/test_ffi.py b/pypy/module/pypyjit/test_pypy_c/test_ffi.py --- a/pypy/module/pypyjit/test_pypy_c/test_ffi.py +++ b/pypy/module/pypyjit/test_pypy_c/test_ffi.py @@ -1,4 +1,4 @@ -import sys, py +import sys, pypy._py as py from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC class Test__ffi(BaseTestPyPyC): diff --git a/pypy/module/pypyjit/test_pypy_c/test_import.py b/pypy/module/pypyjit/test_pypy_c/test_import.py --- a/pypy/module/pypyjit/test_pypy_c/test_import.py +++ b/pypy/module/pypyjit/test_pypy_c/test_import.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC class TestImport(BaseTestPyPyC): diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC class TestInstance(BaseTestPyPyC): diff --git a/pypy/module/pypyjit/test_pypy_c/test_intbound.py b/pypy/module/pypyjit/test_pypy_c/test_intbound.py --- a/pypy/module/pypyjit/test_pypy_c/test_intbound.py +++ b/pypy/module/pypyjit/test_pypy_c/test_intbound.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC class TestIntbound(BaseTestPyPyC): diff --git a/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py b/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py --- a/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py +++ b/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py import re from rpython.tool.logparser import extract_category diff --git a/pypy/module/pypyjit/test_pypy_c/test_math.py b/pypy/module/pypyjit/test_pypy_c/test_math.py --- a/pypy/module/pypyjit/test_pypy_c/test_math.py +++ b/pypy/module/pypyjit/test_pypy_c/test_math.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -1,4 +1,4 @@ -import py, sys +import pypy._py as py, sys from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC diff --git a/pypy/module/pypyjit/test_pypy_c/test_shift.py b/pypy/module/pypyjit/test_pypy_c/test_shift.py --- a/pypy/module/pypyjit/test_pypy_c/test_shift.py +++ b/pypy/module/pypyjit/test_pypy_c/test_shift.py @@ -1,4 +1,4 @@ -import py, sys +import pypy._py as py, sys from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC class TestShift(BaseTestPyPyC): diff --git a/pypy/module/select/conftest.py b/pypy/module/select/conftest.py --- a/pypy/module/select/conftest.py +++ b/pypy/module/select/conftest.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py def pytest_collect_directory(): py.test.importorskip("ctypes") diff --git a/pypy/module/select/test/test_epoll.py b/pypy/module/select/test/test_epoll.py --- a/pypy/module/select/test/test_epoll.py +++ b/pypy/module/select/test/test_epoll.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py import sys # add a larger timeout for slow ARM machines diff --git a/pypy/module/select/test/test_kqueue.py b/pypy/module/select/test/test_kqueue.py --- a/pypy/module/select/test/test_kqueue.py +++ b/pypy/module/select/test/test_kqueue.py @@ -1,6 +1,6 @@ # adapted from CPython: Lib/test/test_kqueue.py -import py +import pypy._py as py import sys class AppTestKqueue(object): diff --git a/pypy/module/select/test/test_select.py b/pypy/module/select/test/test_select.py --- a/pypy/module/select/test/test_select.py +++ b/pypy/module/select/test/test_select.py @@ -1,6 +1,6 @@ import sys -import py +import pypy._py as py from pypy.interpreter.error import OperationError diff --git a/pypy/module/signal/test/test_signal.py b/pypy/module/signal/test/test_signal.py --- a/pypy/module/signal/test/test_signal.py +++ b/pypy/module/signal/test/test_signal.py @@ -1,4 +1,4 @@ -import os, py, sys +import os, pypy._py as py, sys import signal as cpy_signal diff --git a/pypy/module/struct/test/test_struct.py b/pypy/module/struct/test/test_struct.py --- a/pypy/module/struct/test/test_struct.py +++ b/pypy/module/struct/test/test_struct.py @@ -2,7 +2,7 @@ Tests for the struct module implemented at interp-level in pypy/module/struct. """ -import py +import pypy._py as py from rpython.rlib.rstruct.nativefmttable import native_is_bigendian diff --git a/pypy/module/sys/test/test_encoding.py b/pypy/module/sys/test/test_encoding.py --- a/pypy/module/sys/test/test_encoding.py +++ b/pypy/module/sys/test/test_encoding.py @@ -1,4 +1,4 @@ -import os, py +import os, pypy._py as py from rpython.rlib import rlocale from pypy.module.sys.interp_encoding import _getfilesystemencoding from pypy.module.sys.interp_encoding import base_encoding diff --git a/pypy/module/sys/test/test_initpath.py b/pypy/module/sys/test/test_initpath.py --- a/pypy/module/sys/test/test_initpath.py +++ b/pypy/module/sys/test/test_initpath.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py import os.path from pypy.module.sys.initpath import (compute_stdlib_path, find_executable, find_stdlib, resolvedirof) diff --git a/pypy/module/termios/test/test_termios.py b/pypy/module/termios/test/test_termios.py --- a/pypy/module/termios/test/test_termios.py +++ b/pypy/module/termios/test/test_termios.py @@ -1,6 +1,6 @@ import os import sys -import py +import pypy._py as py from pypy.conftest import pypydir from rpython.tool.udir import udir diff --git a/pypy/module/test_lib_pypy/cffi_tests/backend_tests.py b/pypy/module/test_lib_pypy/cffi_tests/backend_tests.py --- a/pypy/module/test_lib_pypy/cffi_tests/backend_tests.py +++ b/pypy/module/test_lib_pypy/cffi_tests/backend_tests.py @@ -1,5 +1,5 @@ # Generated by pypy/tool/import_cffi.py -import py +import pypy._py as py import sys, ctypes from cffi import FFI, CDefError from pypy.module.test_lib_pypy.cffi_tests.support import * diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_cdata.py b/pypy/module/test_lib_pypy/cffi_tests/test_cdata.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_cdata.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_cdata.py @@ -1,5 +1,5 @@ # Generated by pypy/tool/import_cffi.py -import py +import pypy._py as py from cffi import FFI class FakeBackend(object): diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_ctypes.py b/pypy/module/test_lib_pypy/cffi_tests/test_ctypes.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_ctypes.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_ctypes.py @@ -1,5 +1,5 @@ # Generated by pypy/tool/import_cffi.py -import py, sys +import pypy._py as py, sys from pypy.module.test_lib_pypy.cffi_tests import backend_tests from cffi.backend_ctypes import CTypesBackend diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_ffi_backend.py b/pypy/module/test_lib_pypy/cffi_tests/test_ffi_backend.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_ffi_backend.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_ffi_backend.py @@ -1,5 +1,5 @@ # Generated by pypy/tool/import_cffi.py -import py, sys, platform +import pypy._py as py, sys, platform import pytest from pypy.module.test_lib_pypy.cffi_tests import backend_tests, test_function, test_ownlib from cffi import FFI diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_function.py b/pypy/module/test_lib_pypy/cffi_tests/test_function.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_function.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_function.py @@ -1,5 +1,5 @@ # Generated by pypy/tool/import_cffi.py -import py +import pypy._py as py from cffi import FFI import math, os, sys import ctypes.util diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_ownlib.py b/pypy/module/test_lib_pypy/cffi_tests/test_ownlib.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_ownlib.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_ownlib.py @@ -1,5 +1,5 @@ # Generated by pypy/tool/import_cffi.py -import py, sys +import pypy._py as py, sys import subprocess, weakref from cffi import FFI from cffi.backend_ctypes import CTypesBackend diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_parsing.py b/pypy/module/test_lib_pypy/cffi_tests/test_parsing.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_parsing.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_parsing.py @@ -1,5 +1,5 @@ # Generated by pypy/tool/import_cffi.py -import py, sys, re +import pypy._py as py, sys, re from cffi import FFI, FFIError, CDefError, VerificationError class FakeBackend(object): diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_verify.py b/pypy/module/test_lib_pypy/cffi_tests/test_verify.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_verify.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_verify.py @@ -1,5 +1,5 @@ # Generated by pypy/tool/import_cffi.py -import py +import pypy._py as py import sys, os, math, weakref from cffi import FFI, VerificationError, VerificationMissing, model from pypy.module.test_lib_pypy.cffi_tests.support import * diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_version.py b/pypy/module/test_lib_pypy/cffi_tests/test_version.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_version.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_version.py @@ -1,5 +1,5 @@ # Generated by pypy/tool/import_cffi.py -import py, os, sys +import pypy._py as py, os, sys import cffi, _cffi_backend def setup_module(mod): diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_zdistutils.py b/pypy/module/test_lib_pypy/cffi_tests/test_zdistutils.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_zdistutils.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_zdistutils.py @@ -1,6 +1,6 @@ # Generated by pypy/tool/import_cffi.py import sys, os, imp, math, shutil -import py +import pypy._py as py from cffi import FFI, FFIError from cffi.verifier import Verifier, _locate_engine_class, _get_so_suffixes from cffi.ffiplatform import maybe_relative_path diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_zintegration.py b/pypy/module/test_lib_pypy/cffi_tests/test_zintegration.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_zintegration.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_zintegration.py @@ -1,5 +1,5 @@ # Generated by pypy/tool/import_cffi.py -import py, os, sys, shutil +import pypy._py as py, os, sys, shutil import imp import subprocess from pypy.module.test_lib_pypy.cffi_tests.udir import udir diff --git a/pypy/module/test_lib_pypy/cffi_tests/udir.py b/pypy/module/test_lib_pypy/cffi_tests/udir.py --- a/pypy/module/test_lib_pypy/cffi_tests/udir.py +++ b/pypy/module/test_lib_pypy/cffi_tests/udir.py @@ -1,4 +1,4 @@ # Generated by pypy/tool/import_cffi.py -import py +import pypy._py as py udir = py.path.local.make_numbered_dir(prefix = 'ffi-') diff --git a/pypy/module/test_lib_pypy/ctypes_tests/conftest.py b/pypy/module/test_lib_pypy/ctypes_tests/conftest.py --- a/pypy/module/test_lib_pypy/ctypes_tests/conftest.py +++ b/pypy/module/test_lib_pypy/ctypes_tests/conftest.py @@ -1,4 +1,4 @@ -import py, pytest +import pypy._py as py, pytest import sys def pytest_ignore_collect(path): From noreply at buildbot.pypy.org Fri Jan 31 00:24:33 2014 From: noreply at buildbot.pypy.org (Aquana) Date: Fri, 31 Jan 2014 00:24:33 +0100 (CET) Subject: [pypy-commit] pypy prepare-split: missed a few Message-ID: <20140130232433.5AB701C396F@cobra.cs.uni-duesseldorf.de> Author: Alexander Hesse Branch: prepare-split Changeset: r69026:16b33a5a5911 Date: 2014-01-31 00:23 +0100 http://bitbucket.org/pypy/pypy/changeset/16b33a5a5911/ Log: missed a few diff --git a/pypy/_py/_code/_assertionnew.py b/pypy/_py/_code/_assertionnew.py --- a/pypy/_py/_code/_assertionnew.py +++ b/pypy/_py/_code/_assertionnew.py @@ -7,7 +7,7 @@ import ast import pypy._py as py -from py._code.assertion import _format_explanation, BuiltinAssertionError +from pypy._py._code.assertion import _format_explanation, BuiltinAssertionError if sys.platform.startswith("java") and sys.version_info < (2, 5, 2): diff --git a/pypy/_py/_code/_assertionold.py b/pypy/_py/_code/_assertionold.py --- a/pypy/_py/_code/_assertionold.py +++ b/pypy/_py/_code/_assertionold.py @@ -1,7 +1,7 @@ import pypy._py as py import sys, inspect from compiler import parse, ast, pycodegen -from py._code.assertion import BuiltinAssertionError, _format_explanation +from pypy._py._code.assertion import BuiltinAssertionError, _format_explanation passthroughex = py.builtin._sysex diff --git a/pypy/_py/_code/assertion.py b/pypy/_py/_code/assertion.py --- a/pypy/_py/_code/assertion.py +++ b/pypy/_py/_code/assertion.py @@ -86,9 +86,9 @@ AssertionError.__module__ = "builtins" reinterpret_old = "old reinterpretation not available for py3" else: - from py._code._assertionold import interpret as reinterpret_old + from pypy._py._code._assertionold import interpret as reinterpret_old if sys.version_info >= (2, 6) or (sys.platform.startswith("java")): - from py._code._assertionnew import interpret as reinterpret + from pypy._py._code._assertionnew import interpret as reinterpret else: reinterpret = reinterpret_old diff --git a/pypy/_py/_code/code.py b/pypy/_py/_code/code.py --- a/pypy/_py/_code/code.py +++ b/pypy/_py/_code/code.py @@ -37,7 +37,7 @@ def fullsource(self): """ return a py.code.Source object for the full source file of the code """ - from py._code import source + from pypy._py._code import source full, _ = source.findsource(self.raw) return full fullsource = property(fullsource, None, None, @@ -715,7 +715,7 @@ def patch_builtins(assertion=True, compile=True): """ put compile and AssertionError builtins to Python's builtins. """ if assertion: - from py._code import assertion + from pypy._py._code import assertion l = oldbuiltins.setdefault('AssertionError', []) l.append(py.builtin.builtins.AssertionError) py.builtin.builtins.AssertionError = assertion.AssertionError diff --git a/pypy/_py/_path/local.py b/pypy/_py/_path/local.py --- a/pypy/_py/_path/local.py +++ b/pypy/_py/_path/local.py @@ -3,7 +3,7 @@ """ import sys, os, stat, re, atexit import pypy._py as py -from py._path import common +from pypy._py._path import common iswin32 = sys.platform == "win32" or (getattr(os, '_name', False) == 'nt') diff --git a/pypy/_py/_path/svnurl.py b/pypy/_py/_path/svnurl.py --- a/pypy/_py/_path/svnurl.py +++ b/pypy/_py/_path/svnurl.py @@ -7,9 +7,9 @@ import os, sys, time, re import pypy._py as py from pypy._py import path, process -from py._path import common -from py._path import svnwc as svncommon -from py._path.cacheutil import BuildcostAccessCache, AgingCache +from pypy._py._path import common +from pypy._py._path import svnwc as svncommon +from pypy._py._path.cacheutil import BuildcostAccessCache, AgingCache DEBUG=False diff --git a/pypy/_py/_path/svnwc.py b/pypy/_py/_path/svnwc.py --- a/pypy/_py/_path/svnwc.py +++ b/pypy/_py/_path/svnwc.py @@ -8,7 +8,7 @@ import os, sys, time, re, calendar import pypy._py as py import subprocess -from py._path import common +from pypy._py._path import common #----------------------------------------------------------- # Caching latest repository revision and repo-paths diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py --- a/pypy/interpreter/test/test_objspace.py +++ b/pypy/interpreter/test/test_objspace.py @@ -1,4 +1,4 @@ -from py.test import raises +from pypy._py.test import raises from pypy.interpreter.error import OperationError from pypy.interpreter.function import Function from pypy.interpreter.pycode import PyCode diff --git a/pypy/interpreter/test/test_syntax.py b/pypy/interpreter/test/test_syntax.py --- a/pypy/interpreter/test/test_syntax.py +++ b/pypy/interpreter/test/test_syntax.py @@ -765,7 +765,7 @@ if __name__ == '__main__': # only to check on top of CPython (you need 2.4) - from py.test import raises + from pypy._py.test import raises for s in VALID: try: compile(s, '?', 'exec') diff --git a/pypy/module/_sre/test/test_app_sre.py b/pypy/module/_sre/test/test_app_sre.py --- a/pypy/module/_sre/test/test_app_sre.py +++ b/pypy/module/_sre/test/test_app_sre.py @@ -1,7 +1,7 @@ """Regular expression tests specific to _sre.py and accumulated during TDD.""" import os import pypy._py as py -from py.test import raises, skip +from pypy._py.test import raises, skip from pypy.interpreter.gateway import app2interp_temp def init_app_test(cls, space): diff --git a/pypy/module/array/test/test_array_old.py b/pypy/module/array/test/test_array_old.py --- a/pypy/module/array/test/test_array_old.py +++ b/pypy/module/array/test/test_array_old.py @@ -1,7 +1,7 @@ # minimal tests. See also lib-python/modified-2.4.1/test/test_array. import pypy._py as py -from py.test import raises +from pypy._py.test import raises import struct diff --git a/pypy/module/bz2/test/test_bz2_compdecomp.py b/pypy/module/bz2/test/test_bz2_compdecomp.py --- a/pypy/module/bz2/test/test_bz2_compdecomp.py +++ b/pypy/module/bz2/test/test_bz2_compdecomp.py @@ -10,7 +10,7 @@ HUGE_OK = False if os.name == "nt": - from py.test import skip + from pypy._py.test import skip skip("bz2 module is not available on Windows") def setup_module(mod): diff --git a/pypy/module/bz2/test/test_bz2_file.py b/pypy/module/bz2/test/test_bz2_file.py --- a/pypy/module/bz2/test/test_bz2_file.py +++ b/pypy/module/bz2/test/test_bz2_file.py @@ -10,7 +10,7 @@ if os.name == "nt": - from py.test import skip + from pypy._py.test import skip skip("bz2 module is not available on Windows") def setup_module(mod): diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -35,7 +35,7 @@ from pypy.module import exceptions from pypy.module.exceptions import interp_exceptions # CPython 2.4 compatibility -from py.builtin import BaseException +from pypy._py.builtin import BaseException from rpython.tool.sourcetools import func_with_new_name from rpython.rtyper.lltypesystem.lloperation import llop diff --git a/pypy/module/test_lib_pypy/test_coroutine.py b/pypy/module/test_lib_pypy/test_coroutine.py --- a/pypy/module/test_lib_pypy/test_coroutine.py +++ b/pypy/module/test_lib_pypy/test_coroutine.py @@ -1,5 +1,5 @@ from __future__ import absolute_import -from py.test import skip, raises +from pypy._py.test import skip, raises skip('test needs to be updated') diff --git a/pypy/module/test_lib_pypy/test_stackless.py b/pypy/module/test_lib_pypy/test_stackless.py --- a/pypy/module/test_lib_pypy/test_stackless.py +++ b/pypy/module/test_lib_pypy/test_stackless.py @@ -5,7 +5,7 @@ 3. pypy-c """ from __future__ import absolute_import -from py.test import skip +from pypy._py.test import skip try: import stackless except ImportError: diff --git a/pypy/module/test_lib_pypy/test_stackless_pickling.py b/pypy/module/test_lib_pypy/test_stackless_pickling.py --- a/pypy/module/test_lib_pypy/test_stackless_pickling.py +++ b/pypy/module/test_lib_pypy/test_stackless_pickling.py @@ -1,5 +1,5 @@ from __future__ import absolute_import -from py.test import skip +from pypy._py.test import skip try: import stackless except ImportError: diff --git a/pypy/module/zlib/test/test_zlib.py b/pypy/module/zlib/test/test_zlib.py --- a/pypy/module/zlib/test/test_zlib.py +++ b/pypy/module/zlib/test/test_zlib.py @@ -6,12 +6,12 @@ try: import zlib except ImportError: - import py; py.test.skip("no zlib module on this host Python") + import pypy._py as py; py.test.skip("no zlib module on this host Python") try: from pypy.module.zlib import interp_zlib except ImportError: - import py; py.test.skip("no zlib C library on this machine") + import pypy._py as py; py.test.skip("no zlib C library on this machine") def test_unsigned_to_signed_32bit(): assert interp_zlib.unsigned_to_signed_32bit(123) == 123 diff --git a/pypy/objspace/std/test/test_index.py b/pypy/objspace/std/test/test_index.py --- a/pypy/objspace/std/test/test_index.py +++ b/pypy/objspace/std/test/test_index.py @@ -1,4 +1,4 @@ -from py.test import raises +from pypy._py.test import raises class AppTest_IndexProtocol: def setup_class(self): diff --git a/pypy/objspace/std/test/test_multimethod.py b/pypy/objspace/std/test/test_multimethod.py --- a/pypy/objspace/std/test/test_multimethod.py +++ b/pypy/objspace/std/test/test_multimethod.py @@ -1,4 +1,4 @@ -from py.test import raises +from pypy._py.test import raises from pypy.objspace.std import multimethod from pypy.objspace.std.multimethod import FailedToImplement diff --git a/pypy/objspace/std/test/test_stdobjspace.py b/pypy/objspace/std/test/test_stdobjspace.py --- a/pypy/objspace/std/test/test_stdobjspace.py +++ b/pypy/objspace/std/test/test_stdobjspace.py @@ -1,5 +1,5 @@ import pypy._py as py -from py.test import raises +from pypy._py.test import raises from pypy.interpreter.error import OperationError from pypy.tool.pytest.objspace import gettestobjspace diff --git a/pypy/tool/genstatistic.py b/pypy/tool/genstatistic.py --- a/pypy/tool/genstatistic.py +++ b/pypy/tool/genstatistic.py @@ -1,7 +1,7 @@ import pypy._py as py -from py._cmdline import pycountloc as countloc -from py.xml import raw +from pypy._py._cmdline import pycountloc as countloc +from pypy._py.xml import raw from pypy import conftest pypydir = py.path.local(conftest.pypydir) diff --git a/pypy/tool/importfun.py b/pypy/tool/importfun.py --- a/pypy/tool/importfun.py +++ b/pypy/tool/importfun.py @@ -495,7 +495,7 @@ return link def html_for_module(module): - from py.xml import html + from pypy._py.xml import html out = file_for_module(module) ourlink = link_for_module('', module) head = [html.title(module.name)] From noreply at buildbot.pypy.org Fri Jan 31 00:24:39 2014 From: noreply at buildbot.pypy.org (Aquana) Date: Fri, 31 Jan 2014 00:24:39 +0100 (CET) Subject: [pypy-commit] pypy prepare-split: updated py imports from _pytest/ to pypy._py Message-ID: <20140130232439.A937C1C396F@cobra.cs.uni-duesseldorf.de> Author: Alexander Hesse Branch: prepare-split Changeset: r69027:6e7d0e9dc336 Date: 2014-01-31 00:23 +0100 http://bitbucket.org/pypy/pypy/changeset/6e7d0e9dc336/ Log: updated py imports from _pytest/ to pypy._py diff --git a/_pytest/assertion/__init__.py b/_pytest/assertion/__init__.py --- a/_pytest/assertion/__init__.py +++ b/_pytest/assertion/__init__.py @@ -1,7 +1,7 @@ """ support for presenting detailed information in failing assertions. """ -import py +import pypy._py as py import sys import pytest from _pytest.monkeypatch import monkeypatch diff --git a/_pytest/assertion/newinterpret.py b/_pytest/assertion/newinterpret.py --- a/_pytest/assertion/newinterpret.py +++ b/_pytest/assertion/newinterpret.py @@ -6,7 +6,7 @@ import sys import ast -import py +import pypy._py as py from _pytest.assertion import util from _pytest.assertion.reinterpret import BuiltinAssertionError diff --git a/_pytest/assertion/oldinterpret.py b/_pytest/assertion/oldinterpret.py --- a/_pytest/assertion/oldinterpret.py +++ b/_pytest/assertion/oldinterpret.py @@ -1,4 +1,4 @@ -import py +import pypy._py as py import sys, inspect from compiler import parse, ast, pycodegen from _pytest.assertion.util import format_explanation, BuiltinAssertionError diff --git a/_pytest/assertion/reinterpret.py b/_pytest/assertion/reinterpret.py --- a/_pytest/assertion/reinterpret.py +++ b/_pytest/assertion/reinterpret.py @@ -1,5 +1,5 @@ import sys -import py +import pypy._py as py from _pytest.assertion.util import BuiltinAssertionError class AssertionError(BuiltinAssertionError): diff --git a/_pytest/assertion/rewrite.py b/_pytest/assertion/rewrite.py --- a/_pytest/assertion/rewrite.py +++ b/_pytest/assertion/rewrite.py @@ -10,7 +10,7 @@ import sys import types -import py +import pypy._py as py from _pytest.assertion import util diff --git a/_pytest/assertion/util.py b/_pytest/assertion/util.py --- a/_pytest/assertion/util.py +++ b/_pytest/assertion/util.py @@ -1,6 +1,6 @@ """Utilities for assertion debugging""" -import py +import pypy._py as py BuiltinAssertionError = py.builtin.builtins.AssertionError diff --git a/_pytest/capture.py b/_pytest/capture.py --- a/_pytest/capture.py +++ b/_pytest/capture.py @@ -1,6 +1,6 @@ """ per-test stdout/stderr capturing mechanisms, ``capsys`` and ``capfd`` function arguments. """ -import pytest, py +import pytest, pypy._py as py import os def pytest_addoption(parser): diff --git a/_pytest/config.py b/_pytest/config.py --- a/_pytest/config.py +++ b/_pytest/config.py @@ -1,6 +1,6 @@ """ command line options, ini-file and conftest.py processing. """ -import py +import pypy._py as py import sys, os from _pytest.core import PluginManager import pytest @@ -125,7 +125,7 @@ class Conftest(object): """ the single place for accessing values and interacting - towards conftest modules from py.test objects. + towards conftest modules from pypy._py.test objects. """ def __init__(self, onimport=None, confcutdir=None): self._path2confmods = {} diff --git a/_pytest/core.py b/_pytest/core.py --- a/_pytest/core.py +++ b/_pytest/core.py @@ -4,7 +4,7 @@ """ import sys, os import inspect -import py +import pypy._py as py from _pytest import hookspec # the extension point definitions assert py.__version__.split(".")[:2] >= ['1', '4'], ("installation problem: " diff --git a/_pytest/doctest.py b/_pytest/doctest.py --- a/_pytest/doctest.py +++ b/_pytest/doctest.py @@ -1,7 +1,7 @@ """ discover and run doctests in modules and test files.""" -import pytest, py -from py._code.code import TerminalRepr, ReprFileLocation +import pytest, pypy._py as py +from pypy._py._code.code import TerminalRepr, ReprFileLocation def pytest_addoption(parser): group = parser.getgroup("collect") diff --git a/_pytest/genscript.py b/_pytest/genscript.py --- a/_pytest/genscript.py +++ b/_pytest/genscript.py @@ -1,5 +1,5 @@ """ generate a single-file self-contained version of py.test """ -import py +import pypy._py as py def find_toplevel(name): for syspath in py.std.sys.path: diff --git a/_pytest/helpconfig.py b/_pytest/helpconfig.py --- a/_pytest/helpconfig.py +++ b/_pytest/helpconfig.py @@ -1,5 +1,5 @@ """ version info, help messages, tracing configuration. """ -import py +import pypy._py as py import pytest import os, inspect, sys from _pytest.core import varnames diff --git a/_pytest/junitxml.py b/_pytest/junitxml.py --- a/_pytest/junitxml.py +++ b/_pytest/junitxml.py @@ -3,7 +3,7 @@ Based on initial code from Ross Lawley. """ -import py +import pypy._py as py import os import re import sys diff --git a/_pytest/main.py b/_pytest/main.py --- a/_pytest/main.py +++ b/_pytest/main.py @@ -1,6 +1,6 @@ """ core implementation of testing process: init, session, runtest loop. """ -import py +import pypy._py as py import pytest, _pytest import os, sys, imp tracebackcutdir = py.path.local(_pytest.__file__).dirpath() diff --git a/_pytest/mark.py b/_pytest/mark.py --- a/_pytest/mark.py +++ b/_pytest/mark.py @@ -1,5 +1,5 @@ """ generic mechanism for marking and selecting python functions. """ -import pytest, py +import pytest, pypy._py as py def pytest_namespace(): return {'mark': MarkGenerator()} @@ -115,7 +115,7 @@ """ Factory for :class:`MarkDecorator` objects - exposed as a ``py.test.mark`` singleton instance. Example:: - import py + import pypy._py as py @py.test.mark.slowtest def test_function(): pass diff --git a/_pytest/nose.py b/_pytest/nose.py --- a/_pytest/nose.py +++ b/_pytest/nose.py @@ -1,6 +1,6 @@ """ run test suites written for nose. """ -import pytest, py +import pytest, pypy._py as py import inspect import sys diff --git a/_pytest/pastebin.py b/_pytest/pastebin.py --- a/_pytest/pastebin.py +++ b/_pytest/pastebin.py @@ -1,5 +1,5 @@ """ submit failure or test session information to a pastebin service. """ -import py, sys +import pypy._py as py, sys class url: base = "http://paste.pocoo.org" diff --git a/_pytest/pdb.py b/_pytest/pdb.py --- a/_pytest/pdb.py +++ b/_pytest/pdb.py @@ -1,6 +1,6 @@ """ interactive debugging with PDB, the Python Debugger. """ -import pytest, py +import pytest, pypy._py as py import sys def pytest_addoption(parser): diff --git a/_pytest/pytester.py b/_pytest/pytester.py --- a/_pytest/pytester.py +++ b/_pytest/pytester.py @@ -1,13 +1,13 @@ """ (disabled by default) support for testing py.test and py.test plugins. """ -import py, pytest +import pypy._py as py, pytest import sys, os import re import inspect import time from fnmatch import fnmatch from _pytest.main import Session, EXIT_OK -from py.builtin import print_ +from pypy._py.builtin import print_ from _pytest.core import HookRelay def pytest_addoption(parser): @@ -114,7 +114,7 @@ def contains(self, entries): __tracebackhide__ = True - from py.builtin import print_ + from pypy._py.builtin import print_ i = 0 entries = list(entries) backlocals = py.std.sys._getframe(1).f_locals diff --git a/_pytest/python.py b/_pytest/python.py --- a/_pytest/python.py +++ b/_pytest/python.py @@ -1,9 +1,9 @@ """ Python test discovery, setup and run of test functions. """ -import py +import pypy._py as py import inspect import sys import pytest -from py._code.code import TerminalRepr +from pypy._py._code.code import TerminalRepr from _pytest.monkeypatch import monkeypatch import _pytest diff --git a/_pytest/recwarn.py b/_pytest/recwarn.py --- a/_pytest/recwarn.py +++ b/_pytest/recwarn.py @@ -1,6 +1,6 @@ """ recording warnings during test function execution. """ -import py +import pypy._py as py import sys, os def pytest_funcarg__recwarn(request): diff --git a/_pytest/resultlog.py b/_pytest/resultlog.py --- a/_pytest/resultlog.py +++ b/_pytest/resultlog.py @@ -1,6 +1,6 @@ """ (disabled by default) create result information in a plain text file. """ -import py +import pypy._py as py def pytest_addoption(parser): group = parser.getgroup("terminal reporting", "resultlog plugin options") diff --git a/_pytest/runner.py b/_pytest/runner.py --- a/_pytest/runner.py +++ b/_pytest/runner.py @@ -1,7 +1,7 @@ """ basic collect and runtest protocol implementations """ -import py, sys, time -from py._code.code import TerminalRepr +import pypy._py as py, sys, time +from pypy._py._code.code import TerminalRepr def pytest_namespace(): return { diff --git a/_pytest/skipping.py b/_pytest/skipping.py --- a/_pytest/skipping.py +++ b/_pytest/skipping.py @@ -1,6 +1,6 @@ """ support for skip/xfail functions and markers. """ -import py, pytest +import pypy._py as py, pytest import sys def pytest_addoption(parser): diff --git a/_pytest/terminal.py b/_pytest/terminal.py --- a/_pytest/terminal.py +++ b/_pytest/terminal.py @@ -2,7 +2,7 @@ This is a good source for looking at the various reporting hooks. """ -import pytest, py +import pytest, pypy._py as py import sys import os diff --git a/_pytest/tmpdir.py b/_pytest/tmpdir.py --- a/_pytest/tmpdir.py +++ b/_pytest/tmpdir.py @@ -1,5 +1,5 @@ """ support for providing temporary directories to test functions. """ -import pytest, py +import pytest, pypy._py as py from _pytest.monkeypatch import monkeypatch class TempdirHandler: diff --git a/_pytest/unittest.py b/_pytest/unittest.py --- a/_pytest/unittest.py +++ b/_pytest/unittest.py @@ -1,5 +1,5 @@ """ discovery and running of std-library "unittest" style tests. """ -import pytest, py +import pytest, pypy._py as py import sys, pdb # for transfering markers From noreply at buildbot.pypy.org Fri Jan 31 01:15:38 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 31 Jan 2014 01:15:38 +0100 (CET) Subject: [pypy-commit] pypy default: provide additional scalar methods Message-ID: <20140131001538.08E9F1C1059@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69028:1ae08cdaa8dc Date: 2014-01-30 19:14 -0500 http://bitbucket.org/pypy/pypy/changeset/1ae08cdaa8dc/ Log: provide additional scalar methods diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -586,6 +586,10 @@ __hash__ = interp2app(W_GenericBox.descr_hash), tolist = interp2app(W_GenericBox.item), + min = interp2app(W_GenericBox.descr_self), + max = interp2app(W_GenericBox.descr_self), + sum = interp2app(W_GenericBox.descr_self), + prod = interp2app(W_GenericBox.descr_self), any = interp2app(W_GenericBox.descr_any), all = interp2app(W_GenericBox.descr_all), ravel = interp2app(W_GenericBox.descr_ravel), diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -102,6 +102,12 @@ assert b == a assert b is not a + def test_methods(self): + import numpy as np + for a in [np.int32(2), np.float64(2.0), np.complex64(42)]: + for op in ['min', 'max', 'sum', 'prod']: + assert getattr(a, op)() == a + def test_buffer(self): import numpy as np a = np.int32(123) From noreply at buildbot.pypy.org Fri Jan 31 01:21:16 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 31 Jan 2014 01:21:16 +0100 (CET) Subject: [pypy-commit] pypy default: a couple more scalar methods Message-ID: <20140131002116.C32B11C0134@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69029:2ee84c0996da Date: 2014-01-30 19:20 -0500 http://bitbucket.org/pypy/pypy/changeset/2ee84c0996da/ Log: a couple more scalar methods diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -256,6 +256,10 @@ value = space.is_true(self) return get_dtype_cache(space).w_booldtype.box(value) + def descr_zero(self, space): + from pypy.module.micronumpy.interp_dtype import get_dtype_cache + return get_dtype_cache(space).w_longdtype.box(0) + def descr_ravel(self, space): from pypy.module.micronumpy.base import convert_to_array w_values = space.newtuple([self]) @@ -588,6 +592,8 @@ tolist = interp2app(W_GenericBox.item), min = interp2app(W_GenericBox.descr_self), max = interp2app(W_GenericBox.descr_self), + argmin = interp2app(W_GenericBox.descr_zero), + argmax = interp2app(W_GenericBox.descr_zero), sum = interp2app(W_GenericBox.descr_self), prod = interp2app(W_GenericBox.descr_self), any = interp2app(W_GenericBox.descr_any), diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -107,6 +107,10 @@ for a in [np.int32(2), np.float64(2.0), np.complex64(42)]: for op in ['min', 'max', 'sum', 'prod']: assert getattr(a, op)() == a + for op in ['argmin', 'argmax']: + b = getattr(a, op)() + assert type(b) is np.int_ + assert b == 0 def test_buffer(self): import numpy as np From noreply at buildbot.pypy.org Fri Jan 31 02:30:27 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 31 Jan 2014 02:30:27 +0100 (CET) Subject: [pypy-commit] pypy default: sync/port the cpython issue12802 fix from py3k to rpython proper Message-ID: <20140131013027.0DA601D22C6@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r69030:724698bcb8ce Date: 2014-01-30 15:39 -0800 http://bitbucket.org/pypy/pypy/changeset/724698bcb8ce/ Log: sync/port the cpython issue12802 fix from py3k to rpython proper diff --git a/pypy/module/exceptions/interp_exceptions.py b/pypy/module/exceptions/interp_exceptions.py --- a/pypy/module/exceptions/interp_exceptions.py +++ b/pypy/module/exceptions/interp_exceptions.py @@ -446,6 +446,9 @@ if hasattr(rwin32, 'build_winerror_to_errno'): _winerror_to_errno, _default_errno = rwin32.build_winerror_to_errno() + # Python 2 doesn't map ERROR_DIRECTORY (267) to ENOTDIR but + # Python 3 (CPython issue #12802) and build_winerror_to_errno do + del _winerror_to_errno[267] else: _winerror_to_errno, _default_errno = {}, 22 # EINVAL diff --git a/rpython/rlib/rwin32.py b/rpython/rlib/rwin32.py --- a/rpython/rlib/rwin32.py +++ b/rpython/rlib/rwin32.py @@ -178,8 +178,13 @@ int i; for(i=1; i < 65000; i++) { _dosmaperr(i); - if (errno == EINVAL) - continue; + if (errno == EINVAL) { + /* CPython issue #12802 */ + if (i == ERROR_DIRECTORY) + errno = ENOTDIR; + else + continue; + } printf("%d\t%d\n", i, errno); } return 0; @@ -201,7 +206,7 @@ 132: 13, 145: 41, 158: 13, 161: 2, 164: 11, 167: 13, 183: 17, 188: 8, 189: 8, 190: 8, 191: 8, 192: 8, 193: 8, 194: 8, 195: 8, 196: 8, 197: 8, 198: 8, 199: 8, 200: 8, 201: 8, - 202: 8, 206: 2, 215: 11, 1816: 12, + 202: 8, 206: 2, 215: 11, 267: 20, 1816: 12, } else: output = os.popen(str(exename)) From noreply at buildbot.pypy.org Fri Jan 31 02:30:33 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 31 Jan 2014 02:30:33 +0100 (CET) Subject: [pypy-commit] pypy default: bring over the ability to ignore the L suffix from py3k Message-ID: <20140131013033.4401E1D22C6@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r69031:307818c61207 Date: 2014-01-30 15:39 -0800 http://bitbucket.org/pypy/pypy/changeset/307818c61207/ Log: bring over the ability to ignore the L suffix from py3k diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -254,16 +254,18 @@ @staticmethod @jit.elidable - def fromstr(s, base=0): - """As string_to_int(), but ignores an optional 'l' or 'L' suffix - and returns an rbigint.""" + def fromstr(s, base=0, ignore_l_suffix=False, fname='long'): + """As string_to_int(), but optionally ignores an optional 'l' or + 'L' suffix and returns an rbigint. + """ from rpython.rlib.rstring import NumberStringParser, \ strip_spaces s = literal = strip_spaces(s) - if (s.endswith('l') or s.endswith('L')) and base < 22: + if (not ignore_l_suffix and (s.endswith('l') or s.endswith('L')) and + base < 22): # in base 22 and above, 'L' is a valid digit! try: long('L',22) s = s[:-1] - parser = NumberStringParser(s, literal, base, 'long') + parser = NumberStringParser(s, literal, base, fname) return rbigint._from_numberstring_parser(parser) @staticmethod diff --git a/rpython/rlib/test/test_rbigint.py b/rpython/rlib/test/test_rbigint.py --- a/rpython/rlib/test/test_rbigint.py +++ b/rpython/rlib/test/test_rbigint.py @@ -214,8 +214,13 @@ from rpython.rlib.rstring import ParseStringError assert rbigint.fromstr('123L').tolong() == 123 assert rbigint.fromstr('123L ').tolong() == 123 + py.test.raises(ParseStringError, rbigint.fromstr, '123L ', + ignore_l_suffix=True) py.test.raises(ParseStringError, rbigint.fromstr, 'L') py.test.raises(ParseStringError, rbigint.fromstr, 'L ') + e = py.test.raises(ParseStringError, rbigint.fromstr, 'L ', + fname='int') + assert 'int()' in e.value.msg assert rbigint.fromstr('123L', 4).tolong() == 27 assert rbigint.fromstr('123L', 30).tolong() == 27000 + 1800 + 90 + 21 assert rbigint.fromstr('123L', 22).tolong() == 10648 + 968 + 66 + 21 From noreply at buildbot.pypy.org Fri Jan 31 02:30:40 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 31 Jan 2014 02:30:40 +0100 (CET) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140131013040.979B91D22C6@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r69032:3b27c6417427 Date: 2014-01-30 17:27 -0800 http://bitbucket.org/pypy/pypy/changeset/3b27c6417427/ Log: merge default diff too long, truncating to 2000 out of 2315 lines diff --git a/pypy/module/_cffi_backend/handle.py b/pypy/module/_cffi_backend/handle.py --- a/pypy/module/_cffi_backend/handle.py +++ b/pypy/module/_cffi_backend/handle.py @@ -2,58 +2,13 @@ from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import unwrap_spec from pypy.module._cffi_backend import ctypeobj, ctypeptr, cdataobj -from pypy.module._weakref.interp__weakref import dead_ref from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rlib import rweaklist -def reduced_value(s): - while True: - divide = s & 1 - s >>= 1 - if not divide: - return s - -# ____________________________________________________________ - - -class CffiHandles: +class CffiHandles(rweaklist.RWeakListMixin): def __init__(self, space): - self.handles = [] - self.look_distance = 0 - - def reserve_next_handle_index(self): - # The reservation ordering done here is tweaked for pypy's - # memory allocator. We look from index 'look_distance'. - # Look_distance increases from 0. But we also look at - # "look_distance/2" or "/4" or "/8", etc. If we find that one - # of these secondary locations is free, we assume it's because - # there was recently a minor collection; so we reset - # look_distance to 0 and start again from the lowest locations. - length = len(self.handles) - for d in range(self.look_distance, length): - if self.handles[d]() is None: - self.look_distance = d + 1 - return d - s = reduced_value(d) - if self.handles[s]() is None: - break - # restart from the beginning - for d in range(0, length): - if self.handles[d]() is None: - self.look_distance = d + 1 - return d - # full! extend, but don't use '+=' here - self.handles = self.handles + [dead_ref] * (length // 3 + 5) - self.look_distance = length + 1 - return length - - def store_handle(self, index, content): - self.handles[index] = weakref.ref(content) - - def fetch_handle(self, index): - if 0 <= index < len(self.handles): - return self.handles[index]() - return None + self.initialize() def get(space): return space.fromcache(CffiHandles) diff --git a/pypy/module/_io/interp_bytesio.py b/pypy/module/_io/interp_bytesio.py --- a/pypy/module/_io/interp_bytesio.py +++ b/pypy/module/_io/interp_bytesio.py @@ -46,7 +46,7 @@ class W_BytesIO(RStringIO, W_BufferedIOBase): def __init__(self, space): - W_BufferedIOBase.__init__(self, space) + W_BufferedIOBase.__init__(self, space, add_to_autoflusher=False) self.init() def descr_new(space, w_subtype, __args__): diff --git a/pypy/module/_io/interp_iobase.py b/pypy/module/_io/interp_iobase.py --- a/pypy/module/_io/interp_iobase.py +++ b/pypy/module/_io/interp_iobase.py @@ -5,7 +5,7 @@ from pypy.interpreter.gateway import interp2app from pypy.interpreter.error import OperationError, operationerrfmt from rpython.rlib.rstring import StringBuilder -from rpython.rlib import rweakref +from rpython.rlib import rweakref, rweaklist DEFAULT_BUFFER_SIZE = 8192 @@ -39,15 +39,15 @@ class W_IOBase(W_Root): cffi_fileobj = None # pypy/module/_cffi_backend - def __init__(self, space): + def __init__(self, space, add_to_autoflusher=True): # XXX: IOBase thinks it has to maintain its own internal state in # `__IOBase_closed` and call flush() by itself, but it is redundant # with whatever behaviour a non-trivial derived class will implement. self.space = space self.w_dict = space.newdict() self.__IOBase_closed = False - self.streamholder = None # needed by AutoFlusher - get_autoflusher(space).add(self) + if add_to_autoflusher: + get_autoflusher(space).add(self) def getdict(self, space): return self.w_dict @@ -118,7 +118,6 @@ space.call_method(self, "flush") finally: self.__IOBase_closed = True - get_autoflusher(space).remove(self) def _dealloc_warn_w(self, space, w_source): """Called when the io is implicitly closed via the deconstructor""" @@ -353,55 +352,35 @@ # functions to make sure that all streams are flushed on exit # ------------------------------------------------------------ -class StreamHolder(object): - def __init__(self, w_iobase): - self.w_iobase_ref = rweakref.ref(w_iobase) - w_iobase.autoflusher = self - def autoflush(self, space): - w_iobase = self.w_iobase_ref() - if w_iobase is not None: - try: - space.call_method(w_iobase, 'flush') - except OperationError: - # Silencing all errors is bad, but getting randomly - # interrupted here is equally as bad, and potentially - # more frequent (because of shutdown issues). - pass - - -class AutoFlusher(object): +class AutoFlusher(rweaklist.RWeakListMixin): def __init__(self, space): - self.streams = {} + self.initialize() def add(self, w_iobase): - assert w_iobase.streamholder is None if rweakref.has_weakref_support(): - holder = StreamHolder(w_iobase) - w_iobase.streamholder = holder - self.streams[holder] = None + self.add_handle(w_iobase) #else: # no support for weakrefs, so ignore and we # will not get autoflushing - def remove(self, w_iobase): - holder = w_iobase.streamholder - if holder is not None: - try: - del self.streams[holder] - except KeyError: - # this can happen in daemon threads - pass - def flush_all(self, space): - while self.streams: - for streamholder in self.streams.keys(): + while True: + handles = self.get_all_handles() + if len(handles) == 0: + break + self.initialize() # reset the state here + for wr in handles: + w_iobase = wr() + if w_iobase is None: + continue try: - del self.streams[streamholder] - except KeyError: - pass # key was removed in the meantime - else: - streamholder.autoflush(space) + space.call_method(w_iobase, 'flush') + except OperationError: + # Silencing all errors is bad, but getting randomly + # interrupted here is equally as bad, and potentially + # more frequent (because of shutdown issues). + pass def get_autoflusher(space): return space.fromcache(AutoFlusher) diff --git a/pypy/module/_weakref/interp__weakref.py b/pypy/module/_weakref/interp__weakref.py --- a/pypy/module/_weakref/interp__weakref.py +++ b/pypy/module/_weakref/interp__weakref.py @@ -6,6 +6,7 @@ from rpython.rlib import jit from rpython.rlib.rshrinklist import AbstractShrinkList from rpython.rlib.objectmodel import specialize +from rpython.rlib.rweakref import dead_ref import weakref @@ -144,14 +145,6 @@ # ____________________________________________________________ -class Dummy: - pass -dead_ref = weakref.ref(Dummy()) -for i in range(5): - if dead_ref() is not None: - import gc; gc.collect() -assert dead_ref() is None - class W_WeakrefBase(W_Root): def __init__(w_self, space, w_obj, w_callable): diff --git a/pypy/module/exceptions/interp_exceptions.py b/pypy/module/exceptions/interp_exceptions.py --- a/pypy/module/exceptions/interp_exceptions.py +++ b/pypy/module/exceptions/interp_exceptions.py @@ -454,6 +454,9 @@ if hasattr(rwin32, 'build_winerror_to_errno'): _winerror_to_errno, _default_errno = rwin32.build_winerror_to_errno() + # Python 2 doesn't map ERROR_DIRECTORY (267) to ENOTDIR but + # Python 3 (CPython issue #12802) and build_winerror_to_errno do + del _winerror_to_errno[267] else: _winerror_to_errno, _default_errno = {}, 22 # EINVAL diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -367,6 +367,9 @@ return SliceArray(0, strides, backstrides, new_shape, self, orig_array) + def set_dtype(self, space, dtype): + self.dtype = dtype + def argsort(self, space, w_axis): from pypy.module.micronumpy.arrayimpl.sort import argsort_array return argsort_array(self, space, w_axis) diff --git a/pypy/module/micronumpy/arrayimpl/scalar.py b/pypy/module/micronumpy/arrayimpl/scalar.py --- a/pypy/module/micronumpy/arrayimpl/scalar.py +++ b/pypy/module/micronumpy/arrayimpl/scalar.py @@ -173,6 +173,10 @@ raise OperationError(space.w_ValueError, space.wrap( "total size of the array must be unchanged")) + def set_dtype(self, space, dtype): + self.value = self.value.convert_to(space, dtype) + self.dtype = dtype + def reshape(self, space, orig_array, new_shape): return self.set_shape(space, orig_array, new_shape) diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -322,6 +322,9 @@ def descr_buffer(self, space): return self.descr_ravel(space).descr_get_data(space) + def descr_byteswap(self, space): + return self.get_dtype(space).itemtype.byteswap(self) + w_flags = None def descr_get_flags(self, space): if self.w_flags is None: @@ -588,6 +591,7 @@ view = interp2app(W_GenericBox.descr_view), squeeze = interp2app(W_GenericBox.descr_self), copy = interp2app(W_GenericBox.descr_copy), + byteswap = interp2app(W_GenericBox.descr_byteswap), dtype = GetSetProperty(W_GenericBox.descr_get_dtype), size = GetSetProperty(W_GenericBox.descr_get_size), diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -207,7 +207,7 @@ space.wrap(offset)])) return w_d - def set_fields(self, space, w_fields): + def descr_set_fields(self, space, w_fields): if w_fields == space.w_None: self.fields = None else: @@ -233,19 +233,26 @@ return space.w_None return space.newtuple([space.wrap(name) for name in self.fieldnames]) - def set_names(self, space, w_names): - self.fieldnames = [] - if w_names == space.w_None: - return - else: + def descr_set_names(self, space, w_names): + fieldnames = [] + if w_names != space.w_None: iter = space.iter(w_names) while True: try: - self.fieldnames.append(space.str_w(space.next(iter))) + name = space.str_w(space.next(iter)) except OperationError, e: if not e.match(space, space.w_StopIteration): raise break + if name in fieldnames: + raise OperationError(space.w_ValueError, space.wrap( + "Duplicate field names given.")) + fieldnames.append(name) + self.fieldnames = fieldnames + + def descr_del_names(self, space): + raise OperationError(space.w_AttributeError, space.wrap( + "Cannot delete dtype names attribute")) def descr_get_hasobject(self, space): return space.w_False @@ -322,10 +329,10 @@ self.byteorder = endian fieldnames = space.getitem(w_data, space.wrap(3)) - self.set_names(space, fieldnames) + self.descr_set_names(space, fieldnames) fields = space.getitem(w_data, space.wrap(4)) - self.set_fields(space, fields) + self.descr_set_fields(space, fields) @unwrap_spec(new_order=str) def descr_newbyteorder(self, space, new_order=NPY_SWAP): @@ -469,7 +476,9 @@ shape = GetSetProperty(W_Dtype.descr_get_shape), isnative = GetSetProperty(W_Dtype.descr_get_isnative), fields = GetSetProperty(W_Dtype.descr_get_fields), - names = GetSetProperty(W_Dtype.descr_get_names), + names = GetSetProperty(W_Dtype.descr_get_names, + W_Dtype.descr_set_names, + W_Dtype.descr_del_names), hasobject = GetSetProperty(W_Dtype.descr_get_hasobject), descr = GetSetProperty(W_Dtype.descr_get_descr), ) @@ -795,29 +804,19 @@ for dtype in reversed(self.builtin_dtypes): self.dtypes_by_num[dtype.num] = dtype self.dtypes_by_name[dtype.name] = dtype - can_name = dtype.kind + str(dtype.get_size()) - self.dtypes_by_name[can_name] = dtype - self.dtypes_by_name[NPY_NATBYTE + can_name] = dtype - self.dtypes_by_name[NPY_NATIVE + can_name] = dtype - new_name = NPY_OPPBYTE + can_name - itemtype = type(dtype.itemtype)(False) - self.dtypes_by_name[new_name] = W_Dtype( - itemtype, dtype.num, dtype.kind, new_name, dtype.char, - dtype.w_box_type, byteorder=NPY_OPPBYTE, - float_type=dtype.float_type) - if dtype.kind != dtype.char: - can_name = dtype.char + for can_name in [dtype.kind + str(dtype.get_size()), + dtype.char]: + self.dtypes_by_name[can_name] = dtype self.dtypes_by_name[NPY_NATBYTE + can_name] = dtype self.dtypes_by_name[NPY_NATIVE + can_name] = dtype new_name = NPY_OPPBYTE + can_name + itemtype = type(dtype.itemtype)(False) self.dtypes_by_name[new_name] = W_Dtype( itemtype, dtype.num, dtype.kind, new_name, dtype.char, dtype.w_box_type, byteorder=NPY_OPPBYTE, float_type=dtype.float_type) - for alias in dtype.aliases: self.dtypes_by_name[alias] = dtype - self.dtypes_by_name[dtype.char] = dtype typeinfo_full = { 'LONGLONG': self.w_int64dtype, diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -84,6 +84,19 @@ def descr_get_dtype(self, space): return self.implementation.dtype + def descr_set_dtype(self, space, w_dtype): + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) + if (dtype.get_size() != self.get_dtype().get_size() or + dtype.is_flexible_type() or self.get_dtype().is_flexible_type()): + raise OperationError(space.w_ValueError, space.wrap( + "new type not compatible with array.")) + self.implementation.set_dtype(space, dtype) + + def descr_del_dtype(self, space): + raise OperationError(space.w_AttributeError, space.wrap( + "Cannot delete array dtype")) + def descr_get_ndim(self, space): return space.wrap(len(self.get_shape())) @@ -489,6 +502,15 @@ raise OperationError(space.w_NotImplementedError, space.wrap( "non-int arg not supported")) + def descr_itemset(self, space, args_w): + if len(args_w) == 0: + raise OperationError(space.w_ValueError, space.wrap( + "itemset must have at least one argument")) + if len(args_w) != len(self.get_shape()) + 1: + raise OperationError(space.w_ValueError, space.wrap( + "incorrect number of indices for array")) + self.descr_setitem(space, space.newtuple(args_w[:-1]), args_w[-1]) + def descr___array__(self, space, w_dtype=None): if not space.is_none(w_dtype): raise OperationError(space.w_NotImplementedError, space.wrap( @@ -629,10 +651,6 @@ raise OperationError(space.w_NotImplementedError, space.wrap( "getfield not implemented yet")) - def descr_itemset(self, space, w_arg): - raise OperationError(space.w_NotImplementedError, space.wrap( - "itemset not implemented yet")) - @unwrap_spec(new_order=str) def descr_newbyteorder(self, space, new_order=NPY_SWAP): return self.descr_view(space, @@ -948,7 +966,8 @@ def _reduce_ufunc_impl(ufunc_name, promote_to_largest=False, cumulative=False): - def impl(self, space, w_axis=None, w_dtype=None, w_out=None): + @unwrap_spec(keepdims=bool) + def impl(self, space, w_axis=None, w_dtype=None, w_out=None, keepdims=False): if space.is_none(w_out): out = None elif not isinstance(w_out, W_NDimArray): @@ -958,7 +977,7 @@ out = w_out return getattr(interp_ufuncs.get(space), ufunc_name).reduce( space, self, promote_to_largest, w_axis, - False, out, w_dtype, cumulative=cumulative) + keepdims, out, w_dtype, cumulative=cumulative) return func_with_new_name(impl, "reduce_%s_impl_%d_%d" % (ufunc_name, promote_to_largest, cumulative)) @@ -1274,7 +1293,9 @@ __gt__ = interp2app(W_NDimArray.descr_gt), __ge__ = interp2app(W_NDimArray.descr_ge), - dtype = GetSetProperty(W_NDimArray.descr_get_dtype), + dtype = GetSetProperty(W_NDimArray.descr_get_dtype, + W_NDimArray.descr_set_dtype, + W_NDimArray.descr_del_dtype), shape = GetSetProperty(W_NDimArray.descr_get_shape, W_NDimArray.descr_set_shape), strides = GetSetProperty(W_NDimArray.descr_get_strides), @@ -1322,6 +1343,7 @@ flat = GetSetProperty(W_NDimArray.descr_get_flatiter, W_NDimArray.descr_set_flatiter), item = interp2app(W_NDimArray.descr_item), + itemset = interp2app(W_NDimArray.descr_itemset), real = GetSetProperty(W_NDimArray.descr_get_real, W_NDimArray.descr_set_real), imag = GetSetProperty(W_NDimArray.descr_get_imag, diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -252,6 +252,11 @@ if out: out.set_scalar_value(res) return out + if keepdims: + shape = [1] * len(obj_shape) + out = W_NDimArray.from_shape(space, [1] * len(obj_shape), dtype, w_instance=obj) + out.implementation.setitem(0, res) + return out return res def descr_outer(self, space, __args__): diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -72,6 +72,8 @@ is_rec_type = dtype is not None and dtype.is_record_type() if is_rec_type and is_single_elem(space, w_iterable, is_rec_type): return [], [w_iterable] + if isinstance(w_iterable, W_NDimArray) and w_iterable.is_scalar(): + return [], [w_iterable] shape = [space.len_w(w_iterable)] batch = space.listview(w_iterable) while True: diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -203,6 +203,9 @@ assert array([256], 'B')[0] == 0 assert array([32768], 'h')[0] == -32768 assert array([65536], 'H')[0] == 0 + a = array([65520], dtype='float64') + b = array(a, dtype='float16') + assert b == float('inf') if dtype('l').itemsize == 4: # 32-bit raises(OverflowError, "array([2**32/2], 'i')") raises(OverflowError, "array([2**32], 'I')") @@ -784,6 +787,14 @@ assert dtype('>i8').str == '>i8' assert dtype('int8').str == '|i1' assert dtype('float').str == byteorder + 'f8' + assert dtype('f').str == byteorder + 'f4' + assert dtype('=f').str == byteorder + 'f4' + assert dtype('>f').str == '>f4' + assert dtype('d').str == '>f8' + assert dtype(' max: - raise StructError(errormsg) + if not min <= value <= max: + raise StructError(errormsg) if fmtiter.bigendian: for i in unroll_revrange_size: x = (value >> (8*i)) & 0xff @@ -228,8 +225,8 @@ for c, size in [('b', 1), ('h', 2), ('i', 4), ('l', 4), ('q', 8)]: standard_fmttable[c] = {'size': size, - 'pack': make_int_packer(size, True, True), + 'pack': make_int_packer(size, True), 'unpack': make_int_unpacker(size, True)} standard_fmttable[c.upper()] = {'size': size, - 'pack': make_int_packer(size, False, True), + 'pack': make_int_packer(size, False), 'unpack': make_int_unpacker(size, False)} diff --git a/rpython/rlib/rweaklist.py b/rpython/rlib/rweaklist.py new file mode 100644 --- /dev/null +++ b/rpython/rlib/rweaklist.py @@ -0,0 +1,60 @@ +import weakref +from rpython.rlib.rweakref import dead_ref + + +def _reduced_value(s): + while True: + divide = s & 1 + s >>= 1 + if not divide: + return s + + +class RWeakListMixin(object): + _mixin_ = True + + def initialize(self): + self.handles = [] + self.look_distance = 0 + + def get_all_handles(self): + return self.handles + + def reserve_next_handle_index(self): + # The reservation ordering done here is tweaked for pypy's + # memory allocator. We look from index 'look_distance'. + # Look_distance increases from 0. But we also look at + # "look_distance/2" or "/4" or "/8", etc. If we find that one + # of these secondary locations is free, we assume it's because + # there was recently a minor collection; so we reset + # look_distance to 0 and start again from the lowest locations. + length = len(self.handles) + for d in range(self.look_distance, length): + if self.handles[d]() is None: + self.look_distance = d + 1 + return d + s = _reduced_value(d) + if self.handles[s]() is None: + break + # restart from the beginning + for d in range(0, length): + if self.handles[d]() is None: + self.look_distance = d + 1 + return d + # full! extend, but don't use '+=' here + self.handles = self.handles + [dead_ref] * (length // 3 + 5) + self.look_distance = length + 1 + return length + + def add_handle(self, content): + index = self.reserve_next_handle_index() + self.store_handle(index, content) + return index + + def store_handle(self, index, content): + self.handles[index] = weakref.ref(content) + + def fetch_handle(self, index): + if 0 <= index < len(self.handles): + return self.handles[index]() + return None diff --git a/rpython/rlib/rweakref.py b/rpython/rlib/rweakref.py --- a/rpython/rlib/rweakref.py +++ b/rpython/rlib/rweakref.py @@ -12,6 +12,14 @@ def has_weakref_support(): return True # returns False if --no-translation-rweakref +class Dummy: + pass +dead_ref = weakref.ref(Dummy()) +for i in range(5): + if dead_ref() is not None: + import gc; gc.collect() +assert dead_ref() is None # a known-to-be-dead weakref object + class RWeakValueDictionary(object): """A dictionary containing weak values.""" diff --git a/rpython/rlib/test/test_rbigint.py b/rpython/rlib/test/test_rbigint.py --- a/rpython/rlib/test/test_rbigint.py +++ b/rpython/rlib/test/test_rbigint.py @@ -212,16 +212,19 @@ def test_fromstr(self): from rpython.rlib.rstring import ParseStringError - assert rbigint.fromstr(u'123').tolong() == 123 - assert rbigint.fromstr(u'123 ').tolong() == 123 - py.test.raises(ParseStringError, rbigint.fromstr, u'123L') - py.test.raises(ParseStringError, rbigint.fromstr, u'123L ') + assert rbigint.fromstr(u'123L').tolong() == 123 + assert rbigint.fromstr(u'123L ').tolong() == 123 + py.test.raises(ParseStringError, rbigint.fromstr, u'123L ', + ignore_l_suffix=True) py.test.raises(ParseStringError, rbigint.fromstr, u'L') py.test.raises(ParseStringError, rbigint.fromstr, u'L ') - assert rbigint.fromstr(u'123', 4).tolong() == 27 + e = py.test.raises(ParseStringError, rbigint.fromstr, u'L ', + fname=u'int') + assert u'int()' in e.value.msg + assert rbigint.fromstr(u'123L', 4).tolong() == 27 assert rbigint.fromstr(u'123L', 30).tolong() == 27000 + 1800 + 90 + 21 assert rbigint.fromstr(u'123L', 22).tolong() == 10648 + 968 + 66 + 21 - py.test.raises(ParseStringError, rbigint.fromstr, u'123L', 21) + assert rbigint.fromstr(u'123L', 21).tolong() == 441 + 42 + 3 assert rbigint.fromstr(u'1891234174197319').tolong() == 1891234174197319 def test_from_numberstring_parser(self): diff --git a/rpython/rlib/test/test_rweaklist.py b/rpython/rlib/test/test_rweaklist.py new file mode 100644 --- /dev/null +++ b/rpython/rlib/test/test_rweaklist.py @@ -0,0 +1,57 @@ +import gc +from rpython.rlib.rweaklist import RWeakListMixin + + +class A(object): + pass + + +def test_simple(): + a1 = A(); a2 = A() + wlist = RWeakListMixin(); wlist.initialize() + i = wlist.add_handle(a1) + assert i == 0 + i = wlist.reserve_next_handle_index() + assert i == 1 + wlist.store_handle(i, a2) + assert wlist.fetch_handle(0) is a1 + assert wlist.fetch_handle(1) is a2 + # + del a2 + for i in range(5): + gc.collect() + if wlist.fetch_handle(1) is None: + break + else: + raise AssertionError("handle(1) did not disappear") + assert wlist.fetch_handle(0) is a1 + +def test_reuse(): + alist = [A() for i in range(200)] + wlist = RWeakListMixin(); wlist.initialize() + for i in range(200): + j = wlist.reserve_next_handle_index() + assert j == i + wlist.store_handle(i, alist[i]) + # + del alist[1::2] + del alist[1::2] + del alist[1::2] + del alist[1::2] + del alist[1::2] + for i in range(5): + gc.collect() + # + for i in range(200): + a = wlist.fetch_handle(i) + if i % 32 == 0: + assert a is alist[i // 32] + else: + assert a is None + # + maximum = -1 + for i in range(200): + j = wlist.reserve_next_handle_index() + maximum = max(maximum, j) + wlist.store_handle(j, A()) + assert maximum <= 240 diff --git a/rpython/rlib/types.py b/rpython/rlib/types.py --- a/rpython/rlib/types.py +++ b/rpython/rlib/types.py @@ -57,7 +57,8 @@ def ptr(ll_type): from rpython.rtyper.lltypesystem.lltype import Ptr - return model.SomePtr(Ptr(ll_type)) + from rpython.rtyper.llannotation import SomePtr + return SomePtr(Ptr(ll_type)) def list(element): diff --git a/rpython/rtyper/annlowlevel.py b/rpython/rtyper/annlowlevel.py --- a/rpython/rtyper/annlowlevel.py +++ b/rpython/rtyper/annlowlevel.py @@ -7,7 +7,8 @@ from rpython.annotator.policy import AnnotatorPolicy from rpython.annotator.signature import Sig from rpython.annotator.specialize import flatten_star_args -from rpython.rtyper.llannotation import SomePtr +from rpython.rtyper.llannotation import ( + SomePtr, annotation_to_lltype, lltype_to_annotation) from rpython.rtyper.normalizecalls import perform_normalizations from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.flowspace.model import Constant @@ -58,7 +59,7 @@ else: new_args_s.append(annmodel.not_const(s_obj)) try: - key.append(annmodel.annotation_to_lltype(s_obj)) + key.append(annotation_to_lltype(s_obj)) except ValueError: # passing non-low-level types to a ll_* function is allowed # for module/ll_* @@ -76,8 +77,8 @@ default_specialize = staticmethod(default_specialize) def specialize__semierased(funcdesc, args_s): - a2l = annmodel.annotation_to_lltype - l2a = annmodel.lltype_to_annotation + a2l = annotation_to_lltype + l2a = lltype_to_annotation args_s[:] = [l2a(a2l(s)) for s in args_s] return LowLevelAnnotatorPolicy.default_specialize(funcdesc, args_s) specialize__semierased = staticmethod(specialize__semierased) @@ -121,8 +122,8 @@ def specialize__genconst(pol, funcdesc, args_s, i): # XXX this is specific to the JIT - TYPE = annmodel.annotation_to_lltype(args_s[i], 'genconst') - args_s[i] = annmodel.lltype_to_annotation(TYPE) + TYPE = annotation_to_lltype(args_s[i], 'genconst') + args_s[i] = lltype_to_annotation(TYPE) alt_name = funcdesc.name + "__%s" % (TYPE._short_name(),) return funcdesc.cachedgraph(TYPE, alt_name=valid_identifier(alt_name)) @@ -356,10 +357,10 @@ assert s_callable.is_constant() F = s_F.const FUNC = F.TO - args_s = [annmodel.lltype_to_annotation(T) for T in FUNC.ARGS] + args_s = [lltype_to_annotation(T) for T in FUNC.ARGS] key = (llhelper, s_callable.const) s_res = self.bookkeeper.emulate_pbc_call(key, s_callable, args_s) - assert annmodel.lltype_to_annotation(FUNC.RESULT).contains(s_res) + assert lltype_to_annotation(FUNC.RESULT).contains(s_res) return SomePtr(F) def specialize_call(self, hop): @@ -419,9 +420,9 @@ def compute_result_annotation(self, s_str): from rpython.rtyper.lltypesystem.rstr import STR, UNICODE if strtype is str: - return annmodel.lltype_to_annotation(lltype.Ptr(STR)) + return lltype_to_annotation(lltype.Ptr(STR)) else: - return annmodel.lltype_to_annotation(lltype.Ptr(UNICODE)) + return lltype_to_annotation(lltype.Ptr(UNICODE)) def specialize_call(self, hop): hop.exception_cannot_occur() diff --git a/rpython/rtyper/llannotation.py b/rpython/rtyper/llannotation.py --- a/rpython/rtyper/llannotation.py +++ b/rpython/rtyper/llannotation.py @@ -1,8 +1,12 @@ """ Code for annotating low-level thingies. """ -from rpython.annotator.model import SomeObject -from rpython.rtyper.lltypesystem import lltype +from rpython.tool.pairtype import pair, pairtype +from rpython.annotator.model import ( + SomeObject, SomeSingleFloat, SomeFloat, SomeLongFloat, SomeChar, + SomeUnicodeCodePoint, SomeInteger, SomeString, SomeImpossibleValue, + s_None, s_Bool, UnionError, AnnotatorError) +from rpython.rtyper.lltypesystem import lltype, llmemory class SomeAddress(SomeObject): immutable = True @@ -13,6 +17,17 @@ def is_null_address(self): return self.is_immutable_constant() and not self.const + def getattr(self, s_attr): + assert s_attr.is_constant() + assert isinstance(s_attr, SomeString) + assert s_attr.const in llmemory.supported_access_types + return SomeTypedAddressAccess( + llmemory.supported_access_types[s_attr.const]) + getattr.can_only_throw = [] + + def bool(self): + return s_Bool + class SomeTypedAddressAccess(SomeObject): """This class is used to annotate the intermediate value that appears in expressions of the form: @@ -25,6 +40,63 @@ def can_be_none(self): return False + +class __extend__(pairtype(SomeAddress, SomeAddress)): + def union((s_addr1, s_addr2)): + return SomeAddress() + + def sub((s_addr1, s_addr2)): + from rpython.annotator.bookkeeper import getbookkeeper + if s_addr1.is_null_address() and s_addr2.is_null_address(): + return getbookkeeper().immutablevalue(0) + return SomeInteger() + + def is_((s_addr1, s_addr2)): + assert False, "comparisons with is not supported by addresses" + +class __extend__(pairtype(SomeTypedAddressAccess, SomeTypedAddressAccess)): + def union((s_taa1, s_taa2)): + assert s_taa1.type == s_taa2.type + return s_taa1 + +class __extend__(pairtype(SomeTypedAddressAccess, SomeInteger)): + def getitem((s_taa, s_int)): + return lltype_to_annotation(s_taa.type) + getitem.can_only_throw = [] + + def setitem((s_taa, s_int), s_value): + assert annotation_to_lltype(s_value) is s_taa.type + setitem.can_only_throw = [] + + +class __extend__(pairtype(SomeAddress, SomeInteger)): + def add((s_addr, s_int)): + return SomeAddress() + + def sub((s_addr, s_int)): + return SomeAddress() + +class __extend__(pairtype(SomeAddress, SomeImpossibleValue)): + # need to override this specifically to hide the 'raise UnionError' + # of pairtype(SomeAddress, SomeObject). + def union((s_addr, s_imp)): + return s_addr + +class __extend__(pairtype(SomeImpossibleValue, SomeAddress)): + # need to override this specifically to hide the 'raise UnionError' + # of pairtype(SomeObject, SomeAddress). + def union((s_imp, s_addr)): + return s_addr + +class __extend__(pairtype(SomeAddress, SomeObject)): + def union((s_addr, s_obj)): + raise UnionError(s_addr, s_obj) + +class __extend__(pairtype(SomeObject, SomeAddress)): + def union((s_obj, s_addr)): + raise UnionError(s_obj, s_addr) + + class SomePtr(SomeObject): knowntype = lltype._ptr immutable = True @@ -52,3 +124,112 @@ def can_be_none(self): return False + +class __extend__(pairtype(SomePtr, SomePtr)): + def union((p1, p2)): + if p1.ll_ptrtype != p2.ll_ptrtype: + raise UnionError(p1, p2) + return SomePtr(p1.ll_ptrtype) + +class __extend__(pairtype(SomePtr, SomeInteger)): + + def getitem((p, int1)): + example = p.ll_ptrtype._example() + try: + v = example[0] + except IndexError: + return None # impossible value, e.g. FixedSizeArray(0) + return ll_to_annotation(v) + getitem.can_only_throw = [] + + def setitem((p, int1), s_value): # just doing checking + example = p.ll_ptrtype._example() + if example[0] is not None: # ignore Void s_value + v_lltype = annotation_to_lltype(s_value) + example[0] = v_lltype._defl() + setitem.can_only_throw = [] + +class __extend__(pairtype(SomePtr, SomeObject)): + def union((p, obj)): + raise UnionError(p, obj) + + def getitem((p, obj)): + raise AnnotatorError("ptr %r getitem index not an int: %r" % + (p.ll_ptrtype, obj)) + + def setitem((p, obj), s_value): + raise AnnotatorError("ptr %r setitem index not an int: %r" % + (p.ll_ptrtype, obj)) + +class __extend__(pairtype(SomeObject, SomePtr)): + def union((obj, p2)): + return pair(p2, obj).union() + + +annotation_to_ll_map = [ + (SomeSingleFloat(), lltype.SingleFloat), + (s_None, lltype.Void), # also matches SomeImpossibleValue() + (s_Bool, lltype.Bool), + (SomeFloat(), lltype.Float), + (SomeLongFloat(), lltype.LongFloat), + (SomeChar(), lltype.Char), + (SomeUnicodeCodePoint(), lltype.UniChar), + (SomeAddress(), llmemory.Address), +] + + +def annotation_to_lltype(s_val, info=None): + if isinstance(s_val, SomeInteriorPtr): + p = s_val.ll_ptrtype + if 0 in p.offsets: + assert list(p.offsets).count(0) == 1 + return lltype.Ptr(lltype.Ptr(p.PARENTTYPE)._interior_ptr_type_with_index(p.TO)) + else: + return lltype.Ptr(p.PARENTTYPE) + if isinstance(s_val, SomePtr): + return s_val.ll_ptrtype + if type(s_val) is SomeInteger: + return lltype.build_number(None, s_val.knowntype) + + for witness, T in annotation_to_ll_map: + if witness.contains(s_val): + return T + if info is None: + info = '' + else: + info = '%s: ' % info + raise ValueError("%sshould return a low-level type,\ngot instead %r" % ( + info, s_val)) + +ll_to_annotation_map = dict([(ll, ann) for ann, ll in annotation_to_ll_map]) + +def lltype_to_annotation(T): + try: + s = ll_to_annotation_map.get(T) + except TypeError: + s = None # unhashable T, e.g. a Ptr(GcForwardReference()) + if s is None: + if isinstance(T, lltype.Typedef): + return lltype_to_annotation(T.OF) + if isinstance(T, lltype.Number): + return SomeInteger(knowntype=T._type) + elif isinstance(T, lltype.InteriorPtr): + return SomeInteriorPtr(T) + else: + return SomePtr(T) + else: + return s + + +def ll_to_annotation(v): + if v is None: + # i think we can only get here in the case of void-returning + # functions + return s_None + if isinstance(v, lltype._interior_ptr): + ob = v._parent + if ob is None: + raise RuntimeError + T = lltype.InteriorPtr(lltype.typeOf(ob), v._T, v._offsets) + return SomeInteriorPtr(T) + return lltype_to_annotation(lltype.typeOf(v)) diff --git a/rpython/rtyper/lltypesystem/ll2ctypes.py b/rpython/rtyper/lltypesystem/ll2ctypes.py --- a/rpython/rtyper/lltypesystem/ll2ctypes.py +++ b/rpython/rtyper/lltypesystem/ll2ctypes.py @@ -22,6 +22,7 @@ from rpython.rlib.rarithmetic import r_singlefloat, r_longfloat, base_int, intmask from rpython.rlib.rarithmetic import is_emulated_long, maxint from rpython.annotator import model as annmodel +from rpython.rtyper.llannotation import lltype_to_annotation from rpython.rtyper.llannotation import SomePtr from rpython.rtyper.llinterp import LLInterpreter, LLException from rpython.rtyper.lltypesystem.rclass import OBJECT, OBJECT_VTABLE @@ -1303,7 +1304,7 @@ def compute_result_annotation(self, s_RESTYPE, s_value): assert s_RESTYPE.is_constant() RESTYPE = s_RESTYPE.const - return annmodel.lltype_to_annotation(RESTYPE) + return lltype_to_annotation(RESTYPE) def specialize_call(self, hop): hop.exception_cannot_occur() @@ -1342,7 +1343,7 @@ assert isinstance(s_n, annmodel.SomeInteger) assert isinstance(s_ptr, SomePtr) typecheck_ptradd(s_ptr.ll_ptrtype) - return annmodel.lltype_to_annotation(s_ptr.ll_ptrtype) + return lltype_to_annotation(s_ptr.ll_ptrtype) def specialize_call(self, hop): hop.exception_cannot_occur() diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -141,7 +141,7 @@ _type_ = LLOp def compute_result_annotation(self, RESULTTYPE, *args): - from rpython.annotator.model import lltype_to_annotation + from rpython.rtyper.llannotation import lltype_to_annotation assert RESULTTYPE.is_constant() return lltype_to_annotation(RESULTTYPE.const) diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -5,7 +5,7 @@ from rpython.rtyper.lltypesystem import ll2ctypes from rpython.rtyper.lltypesystem.llmemory import cast_ptr_to_adr from rpython.rtyper.lltypesystem.llmemory import itemoffsetof, raw_memcopy -from rpython.annotator.model import lltype_to_annotation +from rpython.rtyper.llannotation import lltype_to_annotation from rpython.tool.sourcetools import func_with_new_name from rpython.rlib.objectmodel import Symbolic from rpython.rlib.objectmodel import keepalive_until_here, enforceargs diff --git a/rpython/rtyper/module/ll_os_stat.py b/rpython/rtyper/module/ll_os_stat.py --- a/rpython/rtyper/module/ll_os_stat.py +++ b/rpython/rtyper/module/ll_os_stat.py @@ -7,6 +7,7 @@ import sys from rpython.annotator import model as annmodel +from rpython.rtyper.llannotation import lltype_to_annotation from rpython.rlib import rposix from rpython.rlib.rarithmetic import intmask from rpython.rtyper import extregistry @@ -87,7 +88,7 @@ assert s_attr.is_constant(), "non-constant attr name in getattr()" attrname = s_attr.const TYPE = STAT_FIELD_TYPES[attrname] - return annmodel.lltype_to_annotation(TYPE) + return lltype_to_annotation(TYPE) def _get_rmarshall_support_(self): # for rlib.rmarshal # reduce and recreate stat_result objects from 10-tuples @@ -98,7 +99,7 @@ def stat_result_recreate(tup): return make_stat_result(tup + extra_zeroes) From noreply at buildbot.pypy.org Fri Jan 31 02:30:46 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 31 Jan 2014 02:30:46 +0100 (CET) Subject: [pypy-commit] pypy py3k: adapt to new API Message-ID: <20140131013046.CAC9A1D22C6@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r69033:32009d039283 Date: 2014-01-30 17:29 -0800 http://bitbucket.org/pypy/pypy/changeset/32009d039283/ Log: adapt to new API diff --git a/pypy/objspace/std/longtype.py b/pypy/objspace/std/longtype.py --- a/pypy/objspace/std/longtype.py +++ b/pypy/objspace/std/longtype.py @@ -85,7 +85,7 @@ def string_to_w_long(space, w_longtype, s, base=10): try: - bigint = rbigint.fromstr(s, base) + bigint = rbigint.fromstr(s, base, ignore_l_suffix=True, fname=u'int') except ParseStringError, e: raise OperationError(space.w_ValueError, space.wrap(e.msg)) From noreply at buildbot.pypy.org Fri Jan 31 02:30:51 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 31 Jan 2014 02:30:51 +0100 (CET) Subject: [pypy-commit] pypy default: merge upstream Message-ID: <20140131013051.C99CB1D22C6@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r69034:50e4bd9b64b1 Date: 2014-01-30 17:29 -0800 http://bitbucket.org/pypy/pypy/changeset/50e4bd9b64b1/ Log: merge upstream diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -256,6 +256,10 @@ value = space.is_true(self) return get_dtype_cache(space).w_booldtype.box(value) + def descr_zero(self, space): + from pypy.module.micronumpy.interp_dtype import get_dtype_cache + return get_dtype_cache(space).w_longdtype.box(0) + def descr_ravel(self, space): from pypy.module.micronumpy.base import convert_to_array w_values = space.newtuple([self]) @@ -586,6 +590,12 @@ __hash__ = interp2app(W_GenericBox.descr_hash), tolist = interp2app(W_GenericBox.item), + min = interp2app(W_GenericBox.descr_self), + max = interp2app(W_GenericBox.descr_self), + argmin = interp2app(W_GenericBox.descr_zero), + argmax = interp2app(W_GenericBox.descr_zero), + sum = interp2app(W_GenericBox.descr_self), + prod = interp2app(W_GenericBox.descr_self), any = interp2app(W_GenericBox.descr_any), all = interp2app(W_GenericBox.descr_all), ravel = interp2app(W_GenericBox.descr_ravel), diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -102,6 +102,16 @@ assert b == a assert b is not a + def test_methods(self): + import numpy as np + for a in [np.int32(2), np.float64(2.0), np.complex64(42)]: + for op in ['min', 'max', 'sum', 'prod']: + assert getattr(a, op)() == a + for op in ['argmin', 'argmax']: + b = getattr(a, op)() + assert type(b) is np.int_ + assert b == 0 + def test_buffer(self): import numpy as np a = np.int32(123) From noreply at buildbot.pypy.org Fri Jan 31 13:28:51 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Fri, 31 Jan 2014 13:28:51 +0100 (CET) Subject: [pypy-commit] stmgc c7: WIP: allow for arbitrary number of pthreads using fixed Message-ID: <20140131122851.D070C1C396F@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r695:59a2204a4c1b Date: 2014-01-31 13:29 +0100 http://bitbucket.org/pypy/stmgc/changeset/59a2204a4c1b/ Log: WIP: allow for arbitrary number of pthreads using fixed number of thread segments. first test with duhton works diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -5,9 +5,7 @@ #include #include #include -#include -#include -#include + #include #include "core.h" @@ -165,8 +163,27 @@ } } +void _stm_setup_static_thread(void) +{ + int thread_num = __sync_fetch_and_add(&num_threads_started, 1); + assert(thread_num < 2); /* only 2 threads for now */ + _stm_restore_local_state(thread_num); + _STM_TL->nursery_current = (localchar_t*)(FIRST_NURSERY_PAGE * 4096); + memset((void*)real_address((object_t*)_STM_TL->nursery_current), 0x0, + (FIRST_AFTER_NURSERY_PAGE - FIRST_NURSERY_PAGE) * 4096); /* clear nursery */ + + _STM_TL->shadow_stack = NULL; + _STM_TL->shadow_stack_base = NULL; + + _STM_TL->old_objects_to_trace = stm_list_create(); + + _STM_TL->modified_objects = stm_list_create(); + _STM_TL->uncommitted_objects = stm_list_create(); + assert(!_STM_TL->active); + _stm_assert_clean_tl(); +} void stm_setup(void) { @@ -244,44 +261,19 @@ char *heap = REAL_ADDRESS(get_thread_base(0), first_heap * 4096UL); assert(memset(heap, 0xcd, HEAP_PAGES * 4096)); // testing stm_largemalloc_init(heap, HEAP_PAGES * 4096UL); + + for (i = 0; i < NB_THREADS; i++) { + _stm_setup_static_thread(); + } } -#define INVALID_GS_VALUE 0x6D6D6D6D -static void set_gs_register(uint64_t value) + +void _stm_teardown_static_thread(int thread_num) { - int result = syscall(SYS_arch_prctl, ARCH_SET_GS, value); - assert(result == 0); -} - -void stm_setup_thread(void) -{ - int thread_num = __sync_fetch_and_add(&num_threads_started, 1); - assert(thread_num < 2); /* only 2 threads for now */ - _stm_restore_local_state(thread_num); - - _STM_TL->nursery_current = (localchar_t*)(FIRST_NURSERY_PAGE * 4096); - memset((void*)real_address((object_t*)_STM_TL->nursery_current), 0x0, - (FIRST_AFTER_NURSERY_PAGE - FIRST_NURSERY_PAGE) * 4096); /* clear nursery */ - _STM_TL->shadow_stack = (object_t**)malloc(LENGTH_SHADOW_STACK * sizeof(void*)); - _STM_TL->shadow_stack_base = _STM_TL->shadow_stack; - - _STM_TL->old_objects_to_trace = stm_list_create(); - - _STM_TL->modified_objects = stm_list_create(); - _STM_TL->uncommitted_objects = stm_list_create(); - assert(!_STM_TL->active); -} - -bool _stm_is_in_transaction(void) -{ - return _STM_TL->active; -} - -void _stm_teardown_thread(void) -{ + _stm_assert_clean_tl(); _stm_reset_shared_lock(); stm_list_free(_STM_TL->modified_objects); @@ -295,12 +287,16 @@ assert(_STM_TL->old_objects_to_trace->count == 0); stm_list_free(_STM_TL->old_objects_to_trace); - - set_gs_register(INVALID_GS_VALUE); + + _stm_restore_local_state(-1); // invalid } -void _stm_teardown(void) +void stm_teardown(void) { + for (; num_threads_started > 0; num_threads_started--) { + _stm_teardown_static_thread(num_threads_started - 1); + } + assert(inevitable_lock == 0); munmap(object_pages, TOTAL_MEMORY); _stm_reset_pages(); @@ -308,14 +304,7 @@ object_pages = NULL; } -void _stm_restore_local_state(int thread_num) -{ - char *thread_base = get_thread_base(thread_num); - set_gs_register((uintptr_t)thread_base); - assert(_STM_TL->thread_num == thread_num); - assert(_STM_TL->thread_base == thread_base); -} static void reset_transaction_read_version(void) { @@ -378,10 +367,11 @@ void stm_start_transaction(jmpbufptr_t *jmpbufptr) { + /* GS invalid before this point! */ + _stm_stop_safe_point(LOCK_COLLECT|THREAD_YIELD); + assert(!_STM_TL->active); - _stm_stop_safe_point(LOCK_COLLECT); - uint8_t old_rv = _STM_TL->transaction_read_version; _STM_TL->transaction_read_version = old_rv + 1; if (UNLIKELY(old_rv == 0xff)) @@ -442,8 +432,11 @@ _STM_TL->active = 0; - _stm_start_safe_point(LOCK_EXCLUSIVE|LOCK_COLLECT); + fprintf(stderr, "%c", 'C'+_STM_TL->thread_num*32); + + _stm_start_safe_point(LOCK_EXCLUSIVE|LOCK_COLLECT|THREAD_YIELD); + /* GS invalid after this point! */ } @@ -495,15 +488,17 @@ assert(_STM_TL->jmpbufptr != NULL); assert(_STM_TL->jmpbufptr != (jmpbufptr_t *)-1); /* for tests only */ _STM_TL->active = 0; - /* _STM_TL->need_abort = 0; */ - - _stm_start_safe_point(LOCK_COLLECT); - - fprintf(stderr, "%c", 'A'+_STM_TL->thread_num*32); + _STM_TL->need_abort = 0; /* reset all the modified objects (incl. re-adding GCFLAG_WRITE_BARRIER) */ reset_modified_from_other_threads(); stm_list_clear(_STM_TL->modified_objects); - __builtin_longjmp(*_STM_TL->jmpbufptr, 1); + jmpbufptr_t *buf = _STM_TL->jmpbufptr; /* _STM_TL not valid during safe-point */ + fprintf(stderr, "%c", 'A'+_STM_TL->thread_num*32); + + _stm_start_safe_point(LOCK_COLLECT|THREAD_YIELD); + /* GS invalid after this point! */ + + __builtin_longjmp(*buf, 1); } diff --git a/c7/core.h b/c7/core.h --- a/c7/core.h +++ b/c7/core.h @@ -93,21 +93,25 @@ struct _thread_local1_s { jmpbufptr_t *jmpbufptr; uint8_t transaction_read_version; + + /* static threads, not pthreads */ + int thread_num; + char *thread_base; - int thread_num; uint8_t active; /* 1 normal, 2 inevitable, 0 no trans. */ bool need_abort; - char *thread_base; - struct stm_list_s *modified_objects; - + object_t **old_shadow_stack; object_t **shadow_stack; object_t **shadow_stack_base; + localchar_t *nursery_current; + + struct stm_list_s *modified_objects; + struct alloc_for_size_s alloc[LARGE_OBJECT_WORDS]; struct stm_list_s *uncommitted_objects; - localchar_t *nursery_current; struct stm_list_s *old_objects_to_trace; }; #define _STM_TL ((_thread_local1_t *)4352) @@ -126,6 +130,7 @@ #define LIKELY(x) __builtin_expect(x, true) #define UNLIKELY(x) __builtin_expect(x, false) +#define IMPLY(a, b) (!(a) || (b)) #define REAL_ADDRESS(object_pages, src) ((object_pages) + (uintptr_t)(src)) @@ -177,6 +182,7 @@ } + /* ==================== API ==================== */ static inline void stm_read(object_t *obj) @@ -206,16 +212,17 @@ extern void stmcb_trace(struct object_s *, void (object_t **)); void _stm_restore_local_state(int thread_num); -void _stm_teardown(void); -void _stm_teardown_thread(void); +void stm_teardown(void); bool _stm_is_in_transaction(void); +void _stm_assert_clean_tl(void); bool _stm_was_read(object_t *obj); bool _stm_was_written(object_t *obj); object_t *stm_allocate(size_t size); void stm_setup(void); -void stm_setup_thread(void); +void stm_setup_pthread(void); + void stm_start_transaction(jmpbufptr_t *jmpbufptr); void stm_stop_transaction(void); diff --git a/c7/list.h b/c7/list.h --- a/c7/list.h +++ b/c7/list.h @@ -2,7 +2,7 @@ #define _STM_LIST_H #include "core.h" - +#include struct stm_list_s { uintptr_t count; diff --git a/c7/nursery.c b/c7/nursery.c --- a/c7/nursery.c +++ b/c7/nursery.c @@ -41,7 +41,8 @@ object_t *stm_allocate_prebuilt(size_t size) { - return _stm_allocate_old(size); /* XXX */ + object_t* res = _stm_allocate_old(size); /* XXX */ + return res; } localchar_t *_stm_alloc_next_page(size_t size_class) diff --git a/c7/stmsync.c b/c7/stmsync.c --- a/c7/stmsync.c +++ b/c7/stmsync.c @@ -1,12 +1,18 @@ #include #include #include - +#include +#include +#include +#include +#include #include "stmsync.h" #include "core.h" #include "reader_writer_lock.h" +#include "list.h" +#define INVALID_GS_VALUE 0x6D6D6D6D /* a multi-reader, single-writer lock: transactions normally take a reader lock, so don't conflict with each other; when we need to do a global GC, @@ -15,6 +21,141 @@ rwticket rw_shared_lock; /* the "GIL" */ rwticket rw_collection_lock; /* for major collections */ +sem_t static_thread_semaphore; +uint8_t static_threads[NB_THREADS]; /* 1 if running a pthread */ +__thread struct _thread_local1_s *pthread_tl = NULL; + + + + +void _stm_acquire_tl_segment(); +void _stm_release_tl_segment(); + +static void set_gs_register(uint64_t value) +{ + int result = syscall(SYS_arch_prctl, ARCH_SET_GS, value); + assert(result == 0); +} + +bool _stm_is_in_transaction(void) +{ + return pthread_tl->active; +} + + +void _stm_restore_local_state(int thread_num) +{ + if (thread_num == -1) { /* mostly for debugging */ + set_gs_register(INVALID_GS_VALUE); + return; + } + + char *thread_base = get_thread_base(thread_num); + set_gs_register((uintptr_t)thread_base); + + assert(_STM_TL->thread_num == thread_num); + assert(_STM_TL->thread_base == thread_base); +} + + +void _stm_yield_thread_segment() +{ + _stm_release_tl_segment(); + + /* release our static thread: */ + static_threads[_STM_TL->thread_num] = 0; + sem_post(&static_thread_semaphore); + + _stm_restore_local_state(-1); /* invalid */ +} + +void _stm_grab_thread_segment() +{ + /* acquire a static thread: */ + sem_wait(&static_thread_semaphore); + int thread_num = 0; + while (1) { + if (!__sync_lock_test_and_set(&static_threads[thread_num], 1)) + break; + thread_num = (thread_num + 1) % NB_THREADS; + } + + _stm_restore_local_state(thread_num); + _stm_acquire_tl_segment(); +} + + +void _stm_assert_clean_tl() +{ + /* between a pthread switch, these are the things + that must be guaranteed */ + + /* already set are + thread_num, thread_base: to the current static thread + nursery_current: nursery should be cleared + active, need_abort: no transaction running + modified_objects: empty + alloc: re-usable by this thread + uncommitted_objects: empty + old_objects_to_trace: empty + !!shadow_stack...: still belongs to previous thread + */ + assert(stm_list_is_empty(_STM_TL->modified_objects)); + assert(stm_list_is_empty(_STM_TL->uncommitted_objects)); + assert(stm_list_is_empty(_STM_TL->old_objects_to_trace)); + + assert(!_STM_TL->active); + /* assert(!_STM_TL->need_abort); may happen, but will be cleared by + start_transaction() */ + assert(_STM_TL->nursery_current == (localchar_t*)(FIRST_NURSERY_PAGE * 4096)); +} + +void _stm_acquire_tl_segment() +{ + /* makes tl-segment ours! */ + _stm_assert_clean_tl(); + + _STM_TL->shadow_stack = pthread_tl->shadow_stack; + _STM_TL->shadow_stack_base = pthread_tl->shadow_stack_base; + _STM_TL->old_shadow_stack = pthread_tl->old_shadow_stack; +} + +void _stm_release_tl_segment() +{ + /* makes tl-segment ours! */ + _stm_assert_clean_tl(); + + pthread_tl->shadow_stack = _STM_TL->shadow_stack; + pthread_tl->shadow_stack_base = _STM_TL->shadow_stack_base; + pthread_tl->old_shadow_stack = _STM_TL->old_shadow_stack; +} + +void stm_setup_pthread(void) +{ + struct _thread_local1_s* tl = malloc(sizeof(struct _thread_local1_s)); + assert(!pthread_tl); + pthread_tl = tl; + + /* get us a clean thread segment */ + _stm_grab_thread_segment(); + _stm_assert_clean_tl(); + + /* allocate shadow stack for this thread */ + _STM_TL->shadow_stack = (object_t**)malloc(LENGTH_SHADOW_STACK * sizeof(void*)); + _STM_TL->shadow_stack_base = _STM_TL->shadow_stack; + + /* copy everything from _STM_TL */ + memcpy(tl, REAL_ADDRESS(get_thread_base(_STM_TL->thread_num), _STM_TL), + sizeof(struct _thread_local1_s)); + + /* go into safe-point again: */ + _stm_yield_thread_segment(); +} + + + + + void _stm_reset_shared_lock() { @@ -27,32 +168,40 @@ assert(!rwticket_wrunlock(&rw_collection_lock)); memset(&rw_collection_lock, 0, sizeof(rwticket)); + + int i; + for (i = 0; i < NB_THREADS; i++) + assert(static_threads[i] == 0); + memset(static_threads, 0, sizeof(static_threads)); + sem_init(&static_thread_semaphore, 0, NB_THREADS); + sem_getvalue(&static_thread_semaphore, &i); + assert(i == NB_THREADS); } -void stm_acquire_collection_lock() -{ - /* we must have the exclusive lock here and - not the colletion lock!! */ - /* XXX: for more than 2 threads, need a way - to signal other threads with need_major_collect - so that they don't leave COLLECT-safe-points - when this flag is set. Otherwise we simply - wait arbitrarily long until all threads reach - COLLECT-safe-points by chance at the same time. */ - while (1) { - if (!rwticket_wrtrylock(&rw_collection_lock)) - break; /* acquired! */ +/* void stm_acquire_collection_lock() */ +/* { */ +/* /\* we must have the exclusive lock here and */ +/* not the colletion lock!! *\/ */ +/* /\* XXX: for more than 2 threads, need a way */ +/* to signal other threads with need_major_collect */ +/* so that they don't leave COLLECT-safe-points */ +/* when this flag is set. Otherwise we simply */ +/* wait arbitrarily long until all threads reach */ +/* COLLECT-safe-points by chance at the same time. *\/ */ +/* while (1) { */ +/* if (!rwticket_wrtrylock(&rw_collection_lock)) */ +/* break; /\* acquired! *\/ */ - stm_stop_exclusive_lock(); - usleep(1); - stm_start_exclusive_lock(); - if (_STM_TL->need_abort) { - stm_stop_exclusive_lock(); - stm_start_shared_lock(); - stm_abort_transaction(); - } - } -} +/* stm_stop_exclusive_lock(); */ +/* usleep(1); */ +/* stm_start_exclusive_lock(); */ +/* if (_STM_TL->need_abort) { */ +/* stm_stop_exclusive_lock(); */ +/* stm_start_shared_lock(); */ +/* stm_abort_transaction(); */ +/* } */ +/* } */ +/* } */ void stm_start_shared_lock(void) { @@ -75,30 +224,51 @@ } /* _stm_start_safe_point(LOCK_EXCLUSIVE|LOCK_COLLECT) - -> release the exclusive lock and also the collect-read-lock */ + -> release the exclusive lock and also the collect-read-lock + + THREAD_YIELD: gives up its (current thread's) GS segment + so that other threads can grab it and run. This will + make _STM_TL and all thread-local addresses unusable + for the current thread. (requires LOCK_COLLECT) +*/ void _stm_start_safe_point(uint8_t flags) { + assert(IMPLY(flags & THREAD_YIELD, flags & LOCK_COLLECT)); + if (flags & LOCK_EXCLUSIVE) stm_stop_exclusive_lock(); else stm_stop_shared_lock(); - if (flags & LOCK_COLLECT) + if (flags & LOCK_COLLECT) { rwticket_rdunlock(&rw_collection_lock); + + if (flags & THREAD_YIELD) { + _stm_yield_thread_segment(); + } + } } /* _stm_stop_safe_point(LOCK_COLLECT|LOCK_EXCLUSIVE); -> reacquire the collect-read-lock and the exclusive lock + + THREAD_YIELD: wait until we get a GS segment assigned + and then continue (requires LOCK_COLLECT) */ void _stm_stop_safe_point(uint8_t flags) { + assert(IMPLY(flags & THREAD_YIELD, flags & LOCK_COLLECT)); + if (flags & THREAD_YIELD) { + _stm_grab_thread_segment(); + } + if (flags & LOCK_EXCLUSIVE) stm_start_exclusive_lock(); else stm_start_shared_lock(); - if (!(flags & LOCK_COLLECT)) { /* if we released the collection lock */ + if (flags & LOCK_COLLECT) { /* if we released the collection lock */ /* acquire read-collection. always succeeds because if there was a write-collection holder we would also not have gotten the shared_lock */ @@ -110,12 +280,8 @@ /* restore to shared-mode with the collection lock */ stm_stop_exclusive_lock(); stm_start_shared_lock(); - if (flags & LOCK_COLLECT) - rwticket_rdlock(&rw_collection_lock); stm_abort_transaction(); } else { - if (flags & LOCK_COLLECT) - rwticket_rdlock(&rw_collection_lock); stm_abort_transaction(); } } diff --git a/c7/stmsync.h b/c7/stmsync.h --- a/c7/stmsync.h +++ b/c7/stmsync.h @@ -8,9 +8,12 @@ void _stm_start_safe_point(uint8_t flags); void _stm_stop_safe_point(uint8_t flags); void _stm_reset_shared_lock(void); +void _stm_grab_thread_segment(void); +void _stm_yield_thread_segment(void); enum { LOCK_COLLECT = (1 << 0), LOCK_EXCLUSIVE = (1 << 1), + THREAD_YIELD = (1 << 2), }; diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -151,6 +151,7 @@ bool _checked_stm_become_inevitable() { jmpbufptr_t here; + int tn = _STM_TL->thread_num; if (__builtin_setjmp(here) == 0) { // returned directly assert(_STM_TL->jmpbufptr == (jmpbufptr_t*)-1); _STM_TL->jmpbufptr = &here; @@ -158,12 +159,13 @@ _STM_TL->jmpbufptr = (jmpbufptr_t*)-1; return 0; } - _STM_TL->jmpbufptr = (jmpbufptr_t*)-1; + _stm_dbg_get_tl(tn)->jmpbufptr = (jmpbufptr_t*)-1; return 1; } bool _checked_stm_write(object_t *object) { jmpbufptr_t here; + int tn = _STM_TL->thread_num; if (__builtin_setjmp(here) == 0) { // returned directly assert(_STM_TL->jmpbufptr == (jmpbufptr_t*)-1); _STM_TL->jmpbufptr = &here; @@ -171,25 +173,27 @@ _STM_TL->jmpbufptr = (jmpbufptr_t*)-1; return 0; } - _STM_TL->jmpbufptr = (jmpbufptr_t*)-1; + _stm_dbg_get_tl(tn)->jmpbufptr = (jmpbufptr_t*)-1; return 1; } bool _stm_stop_transaction(void) { jmpbufptr_t here; + int tn = _STM_TL->thread_num; if (__builtin_setjmp(here) == 0) { // returned directly assert(_STM_TL->jmpbufptr == (jmpbufptr_t*)-1); _STM_TL->jmpbufptr = &here; stm_stop_transaction(); - _STM_TL->jmpbufptr = (jmpbufptr_t*)-1; + _stm_dbg_get_tl(tn)->jmpbufptr = (jmpbufptr_t*)-1; return 0; } - _STM_TL->jmpbufptr = (jmpbufptr_t*)-1; + _stm_dbg_get_tl(tn)->jmpbufptr = (jmpbufptr_t*)-1; return 1; } bool _stm_check_stop_safe_point(void) { jmpbufptr_t here; + int tn = _STM_TL->thread_num; if (__builtin_setjmp(here) == 0) { // returned directly assert(_STM_TL->jmpbufptr == (jmpbufptr_t*)-1); _STM_TL->jmpbufptr = &here; @@ -197,20 +201,21 @@ _STM_TL->jmpbufptr = (jmpbufptr_t*)-1; return 0; } - _STM_TL->jmpbufptr = (jmpbufptr_t*)-1; + _stm_dbg_get_tl(tn)->jmpbufptr = (jmpbufptr_t*)-1; return 1; } bool _stm_check_abort_transaction(void) { jmpbufptr_t here; + int tn = _STM_TL->thread_num; if (__builtin_setjmp(here) == 0) { // returned directly assert(_STM_TL->jmpbufptr == (jmpbufptr_t*)-1); _STM_TL->jmpbufptr = &here; stm_abort_transaction(); - _STM_TL->jmpbufptr = (jmpbufptr_t*)-1; + _stm_dbg_get_tl(tn)->jmpbufptr = (jmpbufptr_t*)-1; return 0; } - _STM_TL->jmpbufptr = (jmpbufptr_t*)-1; + _stm_dbg_get_tl(tn)->jmpbufptr = (jmpbufptr_t*)-1; return 1; } diff --git a/duhton/duhton.c b/duhton/duhton.c --- a/duhton/duhton.c +++ b/duhton/duhton.c @@ -43,23 +43,22 @@ } stm_start_inevitable_transaction(); DuObject *code = Du_Compile(filename, interactive); - _du_save1(code); - stm_stop_transaction(); - _du_restore1(code); + if (code == NULL) { printf("\n"); break; } - /*Du_Print(code, 1); - printf("\n");*/ - stm_start_inevitable_transaction(); + DuObject *res = Du_Eval(code, Du_Globals); if (interactive) { Du_Print(res, 1); } + _du_save1(stm_thread_local_obj); + _stm_minor_collect(); /* hack... */ + _du_restore1(stm_thread_local_obj); + stm_stop_transaction(); - _du_restore1(stm_thread_local_obj); Du_TransactionRun(); if (!interactive) diff --git a/duhton/glob.c b/duhton/glob.c --- a/duhton/glob.c +++ b/duhton/glob.c @@ -686,8 +686,10 @@ Du_FatalError("run-transactions: expected no argument"); _du_save1(stm_thread_local_obj); + _stm_minor_collect(); /* hack... */ + _du_restore1(stm_thread_local_obj); + stm_stop_transaction(); - _du_restore1(stm_thread_local_obj); Du_TransactionRun(); @@ -771,9 +773,9 @@ assert(num_threads == 2); stm_setup(); - stm_setup_thread(); - stm_setup_thread(); - _stm_restore_local_state(0); + stm_setup_pthread(); + + stm_start_inevitable_transaction(); init_prebuilt_object_objects(); init_prebuilt_symbol_objects(); @@ -784,7 +786,6 @@ all_threads_count = num_threads; all_threads = (pthread_t*)malloc(sizeof(pthread_t) * num_threads); - stm_start_inevitable_transaction(); DuFrame_SetBuiltinMacro(Du_Globals, "progn", Du_Progn); DuFrame_SetBuiltinMacro(Du_Globals, "setq", du_setq); DuFrame_SetBuiltinMacro(Du_Globals, "print", du_print); @@ -833,11 +834,5 @@ void Du_Finalize(void) { - _stm_restore_local_state(1); - _stm_teardown_thread(); - - _stm_restore_local_state(0); - _stm_teardown_thread(); - - _stm_teardown(); + stm_teardown(); } diff --git a/duhton/transaction.c b/duhton/transaction.c --- a/duhton/transaction.c +++ b/duhton/transaction.c @@ -62,9 +62,11 @@ return; stm_start_inevitable_transaction(); + DuConsObject *root = du_pending_transactions; _du_write1(root); root->cdr = stm_thread_local_obj; + stm_stop_transaction(); stm_thread_local_obj = NULL; @@ -173,8 +175,8 @@ void *run_thread(void *thread_id) { jmpbufptr_t here; - int thread_num = (uintptr_t)thread_id; - _stm_restore_local_state(thread_num); + stm_setup_pthread(); + stm_thread_local_obj = NULL; while (1) { @@ -185,10 +187,14 @@ while (__builtin_setjmp(here) == 1) { } stm_start_transaction(&here); + run_transaction(cell); + _du_save1(stm_thread_local_obj); + _stm_minor_collect(); /* hack.. */ + _du_restore1(stm_thread_local_obj); + stm_stop_transaction(); - _du_restore1(stm_thread_local_obj); } From noreply at buildbot.pypy.org Fri Jan 31 13:59:27 2014 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 31 Jan 2014 13:59:27 +0100 (CET) Subject: [pypy-commit] pypy default: (fijal, arigo, cfbolz) Fix (hopefully) the case where deleting and Message-ID: <20140131125927.301341C13DF@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r69035:74b0b82fd9e2 Date: 2014-01-31 13:45 +0100 http://bitbucket.org/pypy/pypy/changeset/74b0b82fd9e2/ Log: (fijal, arigo, cfbolz) Fix (hopefully) the case where deleting and reinstantiating the attribute does not mark it as mutated diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -57,7 +57,7 @@ return True def delete(self, obj, selector): - return None + pass def find_map_attr(self, selector): if jit.we_are_jitted(): @@ -291,6 +291,7 @@ def delete(self, obj, selector): if selector == self.selector: # ok, attribute is deleted + self.ever_mutated = True return self.back.copy(obj) new_obj = self.back.delete(obj, selector) if new_obj is not None: diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -144,7 +144,17 @@ assert obj2.map.back.ever_mutated == True assert obj2.map is obj.map - +def test_attr_immutability_delete(monkeypatch): + cls = Class() + obj = cls.instantiate() + obj.setdictvalue(space, "a", 10) + map1 = obj.map + import pdb + pdb.set_trace() + obj.deldictvalue(space, "a") + obj.setdictvalue(space, "a", 20) + assert obj.map.ever_mutated == True + assert obj.map is map1 def test_delete(): for i, dattr in enumerate(["a", "b", "c"]): From noreply at buildbot.pypy.org Fri Jan 31 13:59:29 2014 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 31 Jan 2014 13:59:29 +0100 (CET) Subject: [pypy-commit] pypy default: merge Message-ID: <20140131125929.43C931C13DF@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r69036:22ee335c3d51 Date: 2014-01-31 13:58 +0100 http://bitbucket.org/pypy/pypy/changeset/22ee335c3d51/ Log: merge diff --git a/pypy/module/_cffi_backend/handle.py b/pypy/module/_cffi_backend/handle.py --- a/pypy/module/_cffi_backend/handle.py +++ b/pypy/module/_cffi_backend/handle.py @@ -2,58 +2,13 @@ from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import unwrap_spec from pypy.module._cffi_backend import ctypeobj, ctypeptr, cdataobj -from pypy.module._weakref.interp__weakref import dead_ref from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rlib import rweaklist -def reduced_value(s): - while True: - divide = s & 1 - s >>= 1 - if not divide: - return s - -# ____________________________________________________________ - - -class CffiHandles: +class CffiHandles(rweaklist.RWeakListMixin): def __init__(self, space): - self.handles = [] - self.look_distance = 0 - - def reserve_next_handle_index(self): - # The reservation ordering done here is tweaked for pypy's - # memory allocator. We look from index 'look_distance'. - # Look_distance increases from 0. But we also look at - # "look_distance/2" or "/4" or "/8", etc. If we find that one - # of these secondary locations is free, we assume it's because - # there was recently a minor collection; so we reset - # look_distance to 0 and start again from the lowest locations. - length = len(self.handles) - for d in range(self.look_distance, length): - if self.handles[d]() is None: - self.look_distance = d + 1 - return d - s = reduced_value(d) - if self.handles[s]() is None: - break - # restart from the beginning - for d in range(0, length): - if self.handles[d]() is None: - self.look_distance = d + 1 - return d - # full! extend, but don't use '+=' here - self.handles = self.handles + [dead_ref] * (length // 3 + 5) - self.look_distance = length + 1 - return length - - def store_handle(self, index, content): - self.handles[index] = weakref.ref(content) - - def fetch_handle(self, index): - if 0 <= index < len(self.handles): - return self.handles[index]() - return None + self.initialize() def get(space): return space.fromcache(CffiHandles) diff --git a/pypy/module/_io/interp_iobase.py b/pypy/module/_io/interp_iobase.py --- a/pypy/module/_io/interp_iobase.py +++ b/pypy/module/_io/interp_iobase.py @@ -5,7 +5,7 @@ from pypy.interpreter.gateway import interp2app from pypy.interpreter.error import OperationError, operationerrfmt from rpython.rlib.rstring import StringBuilder -from rpython.rlib import rweakref +from rpython.rlib import rweakref, rweaklist DEFAULT_BUFFER_SIZE = 8192 @@ -52,7 +52,6 @@ self.w_dict = space.newdict() self.__IOBase_closed = False if add_to_autoflusher: - self.streamholder = None # needed by AutoFlusher get_autoflusher(space).add(self) def getdict(self, space): @@ -115,7 +114,6 @@ space.call_method(self, "flush") finally: self.__IOBase_closed = True - get_autoflusher(space).remove(self) def flush_w(self, space): if self._CLOSED(): @@ -339,55 +337,35 @@ # functions to make sure that all streams are flushed on exit # ------------------------------------------------------------ -class StreamHolder(object): - def __init__(self, w_iobase): - self.w_iobase_ref = rweakref.ref(w_iobase) - w_iobase.autoflusher = self - def autoflush(self, space): - w_iobase = self.w_iobase_ref() - if w_iobase is not None: - try: - space.call_method(w_iobase, 'flush') - except OperationError: - # Silencing all errors is bad, but getting randomly - # interrupted here is equally as bad, and potentially - # more frequent (because of shutdown issues). - pass - - -class AutoFlusher(object): +class AutoFlusher(rweaklist.RWeakListMixin): def __init__(self, space): - self.streams = {} + self.initialize() def add(self, w_iobase): - assert w_iobase.streamholder is None if rweakref.has_weakref_support(): - holder = StreamHolder(w_iobase) - w_iobase.streamholder = holder - self.streams[holder] = None + self.add_handle(w_iobase) #else: # no support for weakrefs, so ignore and we # will not get autoflushing - def remove(self, w_iobase): - holder = w_iobase.streamholder - if holder is not None: - try: - del self.streams[holder] - except KeyError: - # this can happen in daemon threads - pass - def flush_all(self, space): - while self.streams: - for streamholder in self.streams.keys(): + while True: + handles = self.get_all_handles() + if len(handles) == 0: + break + self.initialize() # reset the state here + for wr in handles: + w_iobase = wr() + if w_iobase is None: + continue try: - del self.streams[streamholder] - except KeyError: - pass # key was removed in the meantime - else: - streamholder.autoflush(space) + space.call_method(w_iobase, 'flush') + except OperationError: + # Silencing all errors is bad, but getting randomly + # interrupted here is equally as bad, and potentially + # more frequent (because of shutdown issues). + pass def get_autoflusher(space): return space.fromcache(AutoFlusher) diff --git a/pypy/module/_weakref/interp__weakref.py b/pypy/module/_weakref/interp__weakref.py --- a/pypy/module/_weakref/interp__weakref.py +++ b/pypy/module/_weakref/interp__weakref.py @@ -6,6 +6,7 @@ from rpython.rlib import jit from rpython.rlib.rshrinklist import AbstractShrinkList from rpython.rlib.objectmodel import specialize +from rpython.rlib.rweakref import dead_ref import weakref @@ -144,14 +145,6 @@ # ____________________________________________________________ -class Dummy: - pass -dead_ref = weakref.ref(Dummy()) -for i in range(5): - if dead_ref() is not None: - import gc; gc.collect() -assert dead_ref() is None - class W_WeakrefBase(W_Root): def __init__(w_self, space, w_obj, w_callable): diff --git a/pypy/module/exceptions/interp_exceptions.py b/pypy/module/exceptions/interp_exceptions.py --- a/pypy/module/exceptions/interp_exceptions.py +++ b/pypy/module/exceptions/interp_exceptions.py @@ -446,6 +446,9 @@ if hasattr(rwin32, 'build_winerror_to_errno'): _winerror_to_errno, _default_errno = rwin32.build_winerror_to_errno() + # Python 2 doesn't map ERROR_DIRECTORY (267) to ENOTDIR but + # Python 3 (CPython issue #12802) and build_winerror_to_errno do + del _winerror_to_errno[267] else: _winerror_to_errno, _default_errno = {}, 22 # EINVAL diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -256,6 +256,10 @@ value = space.is_true(self) return get_dtype_cache(space).w_booldtype.box(value) + def descr_zero(self, space): + from pypy.module.micronumpy.interp_dtype import get_dtype_cache + return get_dtype_cache(space).w_longdtype.box(0) + def descr_ravel(self, space): from pypy.module.micronumpy.base import convert_to_array w_values = space.newtuple([self]) @@ -327,6 +331,9 @@ def descr_buffer(self, space): return self.descr_ravel(space).descr_get_data(space) + def descr_byteswap(self, space): + return self.get_dtype(space).itemtype.byteswap(self) + w_flags = None def descr_get_flags(self, space): if self.w_flags is None: @@ -583,6 +590,12 @@ __hash__ = interp2app(W_GenericBox.descr_hash), tolist = interp2app(W_GenericBox.item), + min = interp2app(W_GenericBox.descr_self), + max = interp2app(W_GenericBox.descr_self), + argmin = interp2app(W_GenericBox.descr_zero), + argmax = interp2app(W_GenericBox.descr_zero), + sum = interp2app(W_GenericBox.descr_self), + prod = interp2app(W_GenericBox.descr_self), any = interp2app(W_GenericBox.descr_any), all = interp2app(W_GenericBox.descr_all), ravel = interp2app(W_GenericBox.descr_ravel), @@ -592,6 +605,7 @@ view = interp2app(W_GenericBox.descr_view), squeeze = interp2app(W_GenericBox.descr_self), copy = interp2app(W_GenericBox.descr_copy), + byteswap = interp2app(W_GenericBox.descr_byteswap), dtype = GetSetProperty(W_GenericBox.descr_get_dtype), size = GetSetProperty(W_GenericBox.descr_get_size), diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -803,29 +803,19 @@ for dtype in reversed(self.builtin_dtypes): self.dtypes_by_num[dtype.num] = dtype self.dtypes_by_name[dtype.name] = dtype - can_name = dtype.kind + str(dtype.get_size()) - self.dtypes_by_name[can_name] = dtype - self.dtypes_by_name[NPY_NATBYTE + can_name] = dtype - self.dtypes_by_name[NPY_NATIVE + can_name] = dtype - new_name = NPY_OPPBYTE + can_name - itemtype = type(dtype.itemtype)(False) - self.dtypes_by_name[new_name] = W_Dtype( - itemtype, dtype.num, dtype.kind, new_name, dtype.char, - dtype.w_box_type, byteorder=NPY_OPPBYTE, - float_type=dtype.float_type) - if dtype.kind != dtype.char: - can_name = dtype.char + for can_name in [dtype.kind + str(dtype.get_size()), + dtype.char]: + self.dtypes_by_name[can_name] = dtype self.dtypes_by_name[NPY_NATBYTE + can_name] = dtype self.dtypes_by_name[NPY_NATIVE + can_name] = dtype new_name = NPY_OPPBYTE + can_name + itemtype = type(dtype.itemtype)(False) self.dtypes_by_name[new_name] = W_Dtype( itemtype, dtype.num, dtype.kind, new_name, dtype.char, dtype.w_box_type, byteorder=NPY_OPPBYTE, float_type=dtype.float_type) - for alias in dtype.aliases: self.dtypes_by_name[alias] = dtype - self.dtypes_by_name[dtype.char] = dtype typeinfo_full = { 'LONGLONG': self.w_int64dtype, diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -788,6 +788,14 @@ assert dtype('>i8').str == '>i8' assert dtype('int8').str == '|i1' assert dtype('float').str == byteorder + 'f8' + assert dtype('f').str == byteorder + 'f4' + assert dtype('=f').str == byteorder + 'f4' + assert dtype('>f').str == '>f4' + assert dtype('d').str == '>f8' + assert dtype('>= 1 + if not divide: + return s + + +class RWeakListMixin(object): + _mixin_ = True + + def initialize(self): + self.handles = [] + self.look_distance = 0 + + def get_all_handles(self): + return self.handles + + def reserve_next_handle_index(self): + # The reservation ordering done here is tweaked for pypy's + # memory allocator. We look from index 'look_distance'. + # Look_distance increases from 0. But we also look at + # "look_distance/2" or "/4" or "/8", etc. If we find that one + # of these secondary locations is free, we assume it's because + # there was recently a minor collection; so we reset + # look_distance to 0 and start again from the lowest locations. + length = len(self.handles) + for d in range(self.look_distance, length): + if self.handles[d]() is None: + self.look_distance = d + 1 + return d + s = _reduced_value(d) + if self.handles[s]() is None: + break + # restart from the beginning + for d in range(0, length): + if self.handles[d]() is None: + self.look_distance = d + 1 + return d + # full! extend, but don't use '+=' here + self.handles = self.handles + [dead_ref] * (length // 3 + 5) + self.look_distance = length + 1 + return length + + def add_handle(self, content): + index = self.reserve_next_handle_index() + self.store_handle(index, content) + return index + + def store_handle(self, index, content): + self.handles[index] = weakref.ref(content) + + def fetch_handle(self, index): + if 0 <= index < len(self.handles): + return self.handles[index]() + return None diff --git a/rpython/rlib/rweakref.py b/rpython/rlib/rweakref.py --- a/rpython/rlib/rweakref.py +++ b/rpython/rlib/rweakref.py @@ -12,6 +12,14 @@ def has_weakref_support(): return True # returns False if --no-translation-rweakref +class Dummy: + pass +dead_ref = weakref.ref(Dummy()) +for i in range(5): + if dead_ref() is not None: + import gc; gc.collect() +assert dead_ref() is None # a known-to-be-dead weakref object + class RWeakValueDictionary(object): """A dictionary containing weak values.""" diff --git a/rpython/rlib/rwin32.py b/rpython/rlib/rwin32.py --- a/rpython/rlib/rwin32.py +++ b/rpython/rlib/rwin32.py @@ -178,8 +178,13 @@ int i; for(i=1; i < 65000; i++) { _dosmaperr(i); - if (errno == EINVAL) - continue; + if (errno == EINVAL) { + /* CPython issue #12802 */ + if (i == ERROR_DIRECTORY) + errno = ENOTDIR; + else + continue; + } printf("%d\t%d\n", i, errno); } return 0; @@ -201,7 +206,7 @@ 132: 13, 145: 41, 158: 13, 161: 2, 164: 11, 167: 13, 183: 17, 188: 8, 189: 8, 190: 8, 191: 8, 192: 8, 193: 8, 194: 8, 195: 8, 196: 8, 197: 8, 198: 8, 199: 8, 200: 8, 201: 8, - 202: 8, 206: 2, 215: 11, 1816: 12, + 202: 8, 206: 2, 215: 11, 267: 20, 1816: 12, } else: output = os.popen(str(exename)) diff --git a/rpython/rlib/test/test_rbigint.py b/rpython/rlib/test/test_rbigint.py --- a/rpython/rlib/test/test_rbigint.py +++ b/rpython/rlib/test/test_rbigint.py @@ -214,8 +214,13 @@ from rpython.rlib.rstring import ParseStringError assert rbigint.fromstr('123L').tolong() == 123 assert rbigint.fromstr('123L ').tolong() == 123 + py.test.raises(ParseStringError, rbigint.fromstr, '123L ', + ignore_l_suffix=True) py.test.raises(ParseStringError, rbigint.fromstr, 'L') py.test.raises(ParseStringError, rbigint.fromstr, 'L ') + e = py.test.raises(ParseStringError, rbigint.fromstr, 'L ', + fname='int') + assert 'int()' in e.value.msg assert rbigint.fromstr('123L', 4).tolong() == 27 assert rbigint.fromstr('123L', 30).tolong() == 27000 + 1800 + 90 + 21 assert rbigint.fromstr('123L', 22).tolong() == 10648 + 968 + 66 + 21 diff --git a/rpython/rlib/test/test_rweaklist.py b/rpython/rlib/test/test_rweaklist.py new file mode 100644 --- /dev/null +++ b/rpython/rlib/test/test_rweaklist.py @@ -0,0 +1,57 @@ +import gc +from rpython.rlib.rweaklist import RWeakListMixin + + +class A(object): + pass + + +def test_simple(): + a1 = A(); a2 = A() + wlist = RWeakListMixin(); wlist.initialize() + i = wlist.add_handle(a1) + assert i == 0 + i = wlist.reserve_next_handle_index() + assert i == 1 + wlist.store_handle(i, a2) + assert wlist.fetch_handle(0) is a1 + assert wlist.fetch_handle(1) is a2 + # + del a2 + for i in range(5): + gc.collect() + if wlist.fetch_handle(1) is None: + break + else: + raise AssertionError("handle(1) did not disappear") + assert wlist.fetch_handle(0) is a1 + +def test_reuse(): + alist = [A() for i in range(200)] + wlist = RWeakListMixin(); wlist.initialize() + for i in range(200): + j = wlist.reserve_next_handle_index() + assert j == i + wlist.store_handle(i, alist[i]) + # + del alist[1::2] + del alist[1::2] + del alist[1::2] + del alist[1::2] + del alist[1::2] + for i in range(5): + gc.collect() + # + for i in range(200): + a = wlist.fetch_handle(i) + if i % 32 == 0: + assert a is alist[i // 32] + else: + assert a is None + # + maximum = -1 + for i in range(200): + j = wlist.reserve_next_handle_index() + maximum = max(maximum, j) + wlist.store_handle(j, A()) + assert maximum <= 240 From noreply at buildbot.pypy.org Fri Jan 31 14:18:42 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Fri, 31 Jan 2014 14:18:42 +0100 (CET) Subject: [pypy-commit] stmgc c7: add a teardown_pthread function to free the shadow stack Message-ID: <20140131131842.BD3791C0153@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r696:2a59456815d8 Date: 2014-01-31 13:41 +0100 http://bitbucket.org/pypy/stmgc/changeset/2a59456815d8/ Log: add a teardown_pthread function to free the shadow stack diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -282,9 +282,6 @@ assert(stm_list_is_empty(_STM_TL->uncommitted_objects)); stm_list_free(_STM_TL->uncommitted_objects); - assert(_STM_TL->shadow_stack == _STM_TL->shadow_stack_base); - free(_STM_TL->shadow_stack); - assert(_STM_TL->old_objects_to_trace->count == 0); stm_list_free(_STM_TL->old_objects_to_trace); diff --git a/c7/core.h b/c7/core.h --- a/c7/core.h +++ b/c7/core.h @@ -213,6 +213,7 @@ void _stm_restore_local_state(int thread_num); void stm_teardown(void); +void stm_teardown_pthread(void); bool _stm_is_in_transaction(void); void _stm_assert_clean_tl(void); diff --git a/c7/stmsync.c b/c7/stmsync.c --- a/c7/stmsync.c +++ b/c7/stmsync.c @@ -153,6 +153,14 @@ } +void stm_teardown_pthread(void) +{ + free(pthread_tl->shadow_stack_base); + + free(pthread_tl); + pthread_tl = NULL; +} + diff --git a/duhton/demo/container_transaction.duh b/duhton/demo/container_transaction.duh --- a/duhton/demo/container_transaction.duh +++ b/duhton/demo/container_transaction.duh @@ -3,11 +3,11 @@ (defun g (thread n) (set c (+ (get c) 1)) - (if (> (get c) 200000) + (if (> (get c) 20000) (print (quote overflow) (get c)) - (if (< n 100000) + (if (< n 10000) (transaction f thread (+ n 1)) - (if (< (get c) 200000) + (if (< (get c) 20000) (print (quote not-enough)) (print (quote ok)))))) @@ -17,3 +17,4 @@ (transaction f (quote t1) 1) (transaction f (quote t2) 1) +(transaction f (quote t3) 1) diff --git a/duhton/demo/run_transactions.duh b/duhton/demo/run_transactions.duh --- a/duhton/demo/run_transactions.duh +++ b/duhton/demo/run_transactions.duh @@ -15,9 +15,9 @@ (transaction f (quote t1) 10000) (transaction f (quote t2) 20000) -(transaction f (quote t2) 10002) +(transaction f (quote t3) 10002) (run-transactions) -(transaction f (quote t2) 15) +(transaction f (quote t1) 15) (transaction f (quote t2) 15) (run-transactions) (print (quote result) (get c)) diff --git a/duhton/duhton.h b/duhton/duhton.h --- a/duhton/duhton.h +++ b/duhton/duhton.h @@ -10,7 +10,7 @@ #define STM 1 /* hackish removal of all read/write barriers. synchronization is up to the program */ -#define DEFAULT_NUM_THREADS 2 /* required by stm-c7 */ +#define DEFAULT_NUM_THREADS 2 struct DuObject_s { diff --git a/duhton/glob.c b/duhton/glob.c --- a/duhton/glob.c +++ b/duhton/glob.c @@ -770,8 +770,6 @@ void Du_Initialize(int num_threads) { - assert(num_threads == 2); - stm_setup(); stm_setup_pthread(); @@ -834,5 +832,6 @@ void Du_Finalize(void) { + stm_teardown_pthread(); stm_teardown(); } diff --git a/duhton/transaction.c b/duhton/transaction.c --- a/duhton/transaction.c +++ b/duhton/transaction.c @@ -198,5 +198,7 @@ } + stm_teardown_pthread(); + return NULL; } From noreply at buildbot.pypy.org Fri Jan 31 14:18:43 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Fri, 31 Jan 2014 14:18:43 +0100 (CET) Subject: [pypy-commit] stmgc c7: little things Message-ID: <20140131131843.E07481C0153@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r697:7bc72d2248b8 Date: 2014-01-31 13:59 +0100 http://bitbucket.org/pypy/stmgc/changeset/7bc72d2248b8/ Log: little things diff --git a/c7/stmsync.c b/c7/stmsync.c --- a/c7/stmsync.c +++ b/c7/stmsync.c @@ -182,8 +182,6 @@ assert(static_threads[i] == 0); memset(static_threads, 0, sizeof(static_threads)); sem_init(&static_thread_semaphore, 0, NB_THREADS); - sem_getvalue(&static_thread_semaphore, &i); - assert(i == NB_THREADS); } /* void stm_acquire_collection_lock() */ diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -40,7 +40,7 @@ #define NURSERY_SECTION ... void stm_setup(void); -void stm_setup_thread(void); +void stm_setup_pthread(void); void stm_start_transaction(jmpbufptr_t *); bool _stm_stop_transaction(void); @@ -53,8 +53,8 @@ _Bool _stm_was_written(object_t *object); void _stm_restore_local_state(int thread_num); -void _stm_teardown(void); -void _stm_teardown_thread(void); +void stm_teardown(void); +void stm_teardown_pthread(void); char *_stm_real_address(object_t *o); object_t *_stm_tl_address(char *ptr); @@ -100,6 +100,7 @@ enum { LOCK_COLLECT = 1, LOCK_EXCLUSIVE = 2, + THREAD_YIELD = 4, }; From noreply at buildbot.pypy.org Fri Jan 31 14:18:44 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Fri, 31 Jan 2014 14:18:44 +0100 (CET) Subject: [pypy-commit] stmgc c7: update demo2 Message-ID: <20140131131844.EF1CF1C0153@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r698:738662da75b3 Date: 2014-01-31 14:18 +0100 http://bitbucket.org/pypy/stmgc/changeset/738662da75b3/ Log: update demo2 diff --git a/c7/Makefile b/c7/Makefile --- a/c7/Makefile +++ b/c7/Makefile @@ -14,9 +14,9 @@ rm -f $(BUILD_EXE) $(DEBUG_EXE) $(RELEASE_EXE) -H_FILES = core.h list.h pagecopy.h reader_writer_lock.h stmsync.h pages.h nursery.h +H_FILES = core.h list.h pagecopy.h reader_writer_lock.h stmsync.h pages.h nursery.h largemalloc.h -C_FILES = core.c list.c pagecopy.c reader_writer_lock.c stmsync.c pages.c nursery.c +C_FILES = core.c list.c pagecopy.c reader_writer_lock.c stmsync.c pages.c nursery.c largemalloc.c DEBUG = -g diff --git a/c7/demo2.c b/c7/demo2.c --- a/c7/demo2.c +++ b/c7/demo2.c @@ -58,8 +58,8 @@ stm_read((objptr_t)r_n); sum += r_n->value; - _stm_start_safe_point(); - _stm_stop_safe_point(); + _stm_start_safe_point(0); + _stm_stop_safe_point(0); if (prev >= r_n->value) { stm_stop_transaction(); return -1; @@ -109,8 +109,8 @@ current->next = next->next; next->next = current; - _stm_start_safe_point(); - _stm_stop_safe_point(); + _stm_start_safe_point(0); + _stm_stop_safe_point(0); } prev = current; } @@ -163,9 +163,12 @@ w_prev = w_newnode; } + _stm_minor_collect(); /* hack.. */ + global_chained_list = (nodeptr_t)stm_pop_root(); + stm_stop_transaction(); - global_chained_list = (nodeptr_t)stm_pop_root(); + printf("setup ok\n"); } @@ -182,7 +185,7 @@ int status; if (arg != NULL) { /* we still need to initialize */ - stm_setup_thread(); + stm_setup_pthread(); sem_post(&initialized); status = sem_wait(&go); assert(status == 0); @@ -195,7 +198,9 @@ if (arg != NULL) { status = sem_post(&done); assert(status == 0); + stm_teardown_pthread(); } + return NULL; } @@ -208,9 +213,10 @@ sum = check_sorted(); // little Gauss: - assert(sum == (1 + LIST_LENGTH) * (LIST_LENGTH / 2)); - - printf("check ok\n"); + if (sum == (1 + LIST_LENGTH) * (LIST_LENGTH / 2)) + printf("check ok\n"); + else + printf("check ERROR\n"); } @@ -236,7 +242,7 @@ assert(status == 0); stm_setup(); - stm_setup_thread(); + stm_setup_pthread(); newthread(demo2, (void*)1); @@ -252,8 +258,11 @@ status = sem_wait(&done); assert(status == 0); - + final_check(); + stm_teardown_pthread(); + stm_teardown(); + return 0; } From noreply at buildbot.pypy.org Fri Jan 31 15:56:31 2014 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 31 Jan 2014 15:56:31 +0100 (CET) Subject: [pypy-commit] pypy resume-refactor: shuffle stuff around, one more test pass Message-ID: <20140131145631.B46661D24AC@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: resume-refactor Changeset: r69037:44ef0203c38b Date: 2014-01-31 15:55 +0100 http://bitbucket.org/pypy/pypy/changeset/44ef0203c38b/ Log: shuffle stuff around, one more test pass diff --git a/rpython/jit/metainterp/optimizeopt/__init__.py b/rpython/jit/metainterp/optimizeopt/__init__.py --- a/rpython/jit/metainterp/optimizeopt/__init__.py +++ b/rpython/jit/metainterp/optimizeopt/__init__.py @@ -4,6 +4,7 @@ from rpython.jit.metainterp.optimizeopt.virtualize import OptVirtualize from rpython.jit.metainterp.optimizeopt.heap import OptHeap from rpython.jit.metainterp.optimizeopt.vstring import OptString +from rpython.jit.metainterp.optimizeopt.resume import OptResume from rpython.jit.metainterp.optimizeopt.unroll import optimize_unroll from rpython.jit.metainterp.optimizeopt.simplify import OptSimplify from rpython.jit.metainterp.optimizeopt.pure import OptPure @@ -13,7 +14,8 @@ from rpython.rlib.debug import debug_start, debug_stop, debug_print -ALL_OPTS = [('intbounds', OptIntBounds), +ALL_OPTS = [('resume', OptResume), + ('intbounds', OptIntBounds), ('rewrite', OptRewrite), ('virtualize', OptVirtualize), ('string', OptString), diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -347,6 +347,7 @@ def __init__(self, metainterp_sd, loop, optimizations=None): self.metainterp_sd = metainterp_sd self.cpu = metainterp_sd.cpu + self.delayed_resume_put = None self.loop = loop self.values = {} self.interned_refs = self.cpu.ts.new_ref_dict() @@ -567,6 +568,10 @@ raise ValueError, "invalid optimization" self.seen_results[op.result] = None self._newoperations.append(op) + if (self.delayed_resume_put is not None and + self.delayed_resume_put.getarg(0) is op.result): + self._newoperations.append(self.delayed_resume_put) + self.delayed_resume_put = None def replace_op(self, old_op, new_op): # XXX: Do we want to cache indexes to prevent search? @@ -671,17 +676,6 @@ value = self.getvalue(op.getarg(0)) self.optimizer.opaque_pointers[value] = True - # the following stuff should go to the default Optimization thing, - # pending refactor - - def optimize_ENTER_FRAME(self, op): - self.resumebuilder.enter_frame(op.getarg(0).getint(), op.getdescr()) - self.optimize_default(op) - - def optimize_LEAVE_FRAME(self, op): - self.resumebuilder.leave_frame(op) - self.optimize_default(op) - dispatch_opt = make_dispatcher_method(Optimizer, 'optimize_', default=Optimizer.optimize_default) diff --git a/rpython/jit/metainterp/optimizeopt/resume.py b/rpython/jit/metainterp/optimizeopt/resume.py new file mode 100644 --- /dev/null +++ b/rpython/jit/metainterp/optimizeopt/resume.py @@ -0,0 +1,32 @@ + +from rpython.jit.metainterp.optimizeopt import optimizer +from rpython.jit.metainterp.optimizeopt.util import make_dispatcher_method + +""" All of this directly emit the ops, without calling emit_operation +(they also don't have boxes except a resume_put) +""" + +class OptResume(optimizer.Optimization): + def optimize_RESUME_PUT(self, op): + if op.getarg(0) in self.optimizer.producer: + self.optimizer.resumebuilder.resume_put(op) + else: + self.optimizer.delayed_resume_put = op + # otherwise we did not emit the operation just yet + + def optimize_ENTER_FRAME(self, op): + rb = self.optimizer.resumebuilder + rb.enter_frame(op.getarg(0).getint(), op.getdescr()) + self.optimizer._newoperations.append(op) + + def optimize_LEAVE_FRAME(self, op): + self.optimizer.resumebuilder.leave_frame(op) + self.optimizer._newoperations.append(op) + + def optimize_RESUME_SET_PC(self, op): + self.optimizer._newoperations.append(op) + +dispatch_opt = make_dispatcher_method(OptResume, 'optimize_', + default=OptResume.emit_operation) + +OptResume.propagate_forward = dispatch_opt diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -57,17 +57,6 @@ self.optimizer = UnrollableOptimizer(metainterp_sd, loop, optimizations) self.boxes_created_this_iteration = None - def fix_snapshot(self, jump_args, snapshot): - if snapshot is None: - return None - snapshot_args = snapshot.boxes - new_snapshot_args = [] - for a in snapshot_args: - a = self.getvalue(a).get_key_box() - new_snapshot_args.append(a) - prev = self.fix_snapshot(jump_args, snapshot.prev) - return Snapshot(prev, new_snapshot_args) - def propagate_all_forward(self): loop = self.optimizer.loop self.optimizer.clear_newoperations() diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -841,11 +841,6 @@ value.ensure_nonnull() self.emit_operation(op) - def optimize_RESUME_PUT(self, op): - if op.getarg(0) in self.optimizer.producer: - self.optimizer.resumebuilder.resume_put(op) - # otherwise we did not emit the operation just yet - dispatch_opt = make_dispatcher_method(OptVirtualize, 'optimize_', default=OptVirtualize.emit_operation) diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -437,7 +437,7 @@ """Inconsistency in the JIT hints.""" ENABLE_ALL_OPTS = ( - 'intbounds:rewrite:virtualize:string:earlyforce:pure:heap:unroll') + 'resume:intbounds:rewrite:virtualize:string:earlyforce:pure:heap:unroll') PARAMETER_DOCS = { 'threshold': 'number of times a loop has to run for it to become hot', From noreply at buildbot.pypy.org Fri Jan 31 16:17:19 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 31 Jan 2014 16:17:19 +0100 (CET) Subject: [pypy-commit] pypy default: Remove leftover pdb Message-ID: <20140131151719.42F621C0153@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r69038:691e7bfae05a Date: 2014-01-31 07:16 -0800 http://bitbucket.org/pypy/pypy/changeset/691e7bfae05a/ Log: Remove leftover pdb diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -149,8 +149,6 @@ obj = cls.instantiate() obj.setdictvalue(space, "a", 10) map1 = obj.map - import pdb - pdb.set_trace() obj.deldictvalue(space, "a") obj.setdictvalue(space, "a", 20) assert obj.map.ever_mutated == True From noreply at buildbot.pypy.org Fri Jan 31 16:32:08 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Fri, 31 Jan 2014 16:32:08 +0100 (CET) Subject: [pypy-commit] stmgc c7: minor changes Message-ID: <20140131153208.5533A1C3058@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r699:d3750e653c90 Date: 2014-01-31 16:05 +0100 http://bitbucket.org/pypy/stmgc/changeset/d3750e653c90/ Log: minor changes diff --git a/c7/core.h b/c7/core.h --- a/c7/core.h +++ b/c7/core.h @@ -211,7 +211,7 @@ extern size_t stmcb_size(struct object_s *); extern void stmcb_trace(struct object_s *, void (object_t **)); -void _stm_restore_local_state(int thread_num); +char* _stm_restore_local_state(int thread_num); void stm_teardown(void); void stm_teardown_pthread(void); bool _stm_is_in_transaction(void); diff --git a/c7/stmsync.c b/c7/stmsync.c --- a/c7/stmsync.c +++ b/c7/stmsync.c @@ -43,11 +43,11 @@ } -void _stm_restore_local_state(int thread_num) +char* _stm_restore_local_state(int thread_num) { if (thread_num == -1) { /* mostly for debugging */ set_gs_register(INVALID_GS_VALUE); - return; + return (char*)1; } char *thread_base = get_thread_base(thread_num); @@ -55,6 +55,7 @@ assert(_STM_TL->thread_num == thread_num); assert(_STM_TL->thread_base == thread_base); + return thread_base; } @@ -66,7 +67,7 @@ static_threads[_STM_TL->thread_num] = 0; sem_post(&static_thread_semaphore); - _stm_restore_local_state(-1); /* invalid */ + assert(_stm_restore_local_state(-1)); /* invalid */ } void _stm_grab_thread_segment() From noreply at buildbot.pypy.org Fri Jan 31 16:32:09 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Fri, 31 Jan 2014 16:32:09 +0100 (CET) Subject: [pypy-commit] stmgc c7: small performance improvements thanks to cache-line alignment of locks (thanks kirma) Message-ID: <20140131153209.802F21C3058@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r700:04bbddecdb33 Date: 2014-01-31 16:32 +0100 http://bitbucket.org/pypy/stmgc/changeset/04bbddecdb33/ Log: small performance improvements thanks to cache-line alignment of locks (thanks kirma) diff --git a/c7/stmsync.c b/c7/stmsync.c --- a/c7/stmsync.c +++ b/c7/stmsync.c @@ -18,10 +18,10 @@ lock, so don't conflict with each other; when we need to do a global GC, we take a writer lock to "stop the world". */ -rwticket rw_shared_lock; /* the "GIL" */ -rwticket rw_collection_lock; /* for major collections */ +rwticket rw_shared_lock __attribute__((aligned(64))); /* the "GIL" */ +rwticket rw_collection_lock __attribute__((aligned(64))); /* for major collections */ -sem_t static_thread_semaphore; +sem_t static_thread_semaphore __attribute__((aligned(64))); uint8_t static_threads[NB_THREADS]; /* 1 if running a pthread */ __thread struct _thread_local1_s *pthread_tl = NULL; diff --git a/duhton/Makefile b/duhton/Makefile --- a/duhton/Makefile +++ b/duhton/Makefile @@ -11,11 +11,15 @@ C7HEADERS = ../c7/*.h -all: duhton_debug duhton +all: duhton_debug duhton duhton: *.c *.h $(C7SOURCES) $(C7HEADERS) clang -pthread -g -O2 -o duhton *.c $(C7SOURCES) -Wall +duhton_release: *.c *.h $(C7SOURCES) $(C7HEADERS) + clang -pthread -g -DNDEBUG -O2 -o duhton_release *.c $(C7SOURCES) -Wall + + duhton_debug: *.c *.h $(C7SOURCES) $(C7HEADERS) clang -pthread -g -DDu_DEBUG -o duhton_debug *.c $(C7SOURCES) -Wall diff --git a/duhton/demo/sort.duh b/duhton/demo/sort.duh --- a/duhton/demo/sort.duh +++ b/duhton/demo/sort.duh @@ -77,13 +77,14 @@ (setq half_len (/ (len xs) 2)) (setq first (list)) (setq second (list)) - - (while (< 0 (len xs)) - (if (< 0 half_len) - (append first (pop xs 0)) - (append second (pop xs 0)) + (setq xidx 0) + + (while (< xidx (len xs)) + (if (< xidx half_len) + (append first (get xs xidx)) + (append second (get xs xidx)) ) - (setq half_len (- half_len 1)) + (setq xidx (+ xidx 1)) ) (list first second) @@ -172,7 +173,7 @@ (setq current (time)) (print (quote before-random)) -(setq cs (random_list 100000)) +(setq cs (random_list 200000)) (print (quote time-random:) (- (time) current)) ;; (print_list cs) From noreply at buildbot.pypy.org Fri Jan 31 18:46:13 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 31 Jan 2014 18:46:13 +0100 (CET) Subject: [pypy-commit] pypy default: fix test_handle after 1101ea526ac3 Message-ID: <20140131174613.EA8561C13DF@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69039:001e11396293 Date: 2014-01-31 12:45 -0500 http://bitbucket.org/pypy/pypy/changeset/001e11396293/ Log: fix test_handle after 1101ea526ac3 diff --git a/pypy/module/_cffi_backend/test/test_handle.py b/pypy/module/_cffi_backend/test/test_handle.py --- a/pypy/module/_cffi_backend/test/test_handle.py +++ b/pypy/module/_cffi_backend/test/test_handle.py @@ -1,20 +1,5 @@ import random -from pypy.module._cffi_backend.handle import CffiHandles, reduced_value - - -def test_reduced_value(): - assert reduced_value(0) == 0 - assert reduced_value(1) == 0 - assert reduced_value(2) == 1 - assert reduced_value(3) == 0 - assert reduced_value(4) == 2 - assert reduced_value(5) == 1 - assert reduced_value(6) == 3 - assert reduced_value(7) == 0 - assert reduced_value(8) == 4 - assert reduced_value(9) == 2 - assert reduced_value(10) == 5 - assert reduced_value(11) == 1 +from pypy.module._cffi_backend.handle import CffiHandles class PseudoWeakRef(object): diff --git a/rpython/rlib/test/test_rweaklist.py b/rpython/rlib/test/test_rweaklist.py --- a/rpython/rlib/test/test_rweaklist.py +++ b/rpython/rlib/test/test_rweaklist.py @@ -1,5 +1,20 @@ import gc -from rpython.rlib.rweaklist import RWeakListMixin +from rpython.rlib.rweaklist import RWeakListMixin, _reduced_value as reduced_value + + +def test_reduced_value(): + assert reduced_value(0) == 0 + assert reduced_value(1) == 0 + assert reduced_value(2) == 1 + assert reduced_value(3) == 0 + assert reduced_value(4) == 2 + assert reduced_value(5) == 1 + assert reduced_value(6) == 3 + assert reduced_value(7) == 0 + assert reduced_value(8) == 4 + assert reduced_value(9) == 2 + assert reduced_value(10) == 5 + assert reduced_value(11) == 1 class A(object):