From noreply at buildbot.pypy.org Wed Apr 1 09:52:15 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 1 Apr 2015 09:52:15 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <20150401075215.5BB8E1C09A3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r588:e821e6946471 Date: 2015-04-01 09:52 +0200 http://bitbucket.org/pypy/pypy.org/changeset/e821e6946471/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -15,7 +15,7 @@ - $59097 of $105000 (56.3%) + $59106 of $105000 (56.3%)
diff --git a/don4.html b/don4.html --- a/don4.html +++ b/don4.html @@ -17,7 +17,7 @@ 2nd call: - $22603 of $80000 (28.3%) + $22663 of $80000 (28.3%)
From noreply at buildbot.pypy.org Wed Apr 1 11:08:47 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 1 Apr 2015 11:08:47 +0200 (CEST) Subject: [pypy-commit] pypy object-dtype2: Fix Message-ID: <20150401090847.838091C1169@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: object-dtype2 Changeset: r76667:662d2c806a12 Date: 2015-04-01 11:08 +0200 http://bitbucket.org/pypy/pypy/changeset/662d2c806a12/ Log: Fix diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -348,7 +348,7 @@ #debug_print('in customtrace w/obj', obj) length = rffi.cast(rffi.SIGNEDP, obj + offset_of_length)[0] step = rffi.cast(rffi.SIGNEDP, obj + offset_of_step)[0] - storage = obj + offset_of_storage + storage = (obj + offset_of_storage).address[0] debug_print('tracing', length, 'objects in ndarray.storage') i = 0 while i < length: From noreply at buildbot.pypy.org Wed Apr 1 11:12:59 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 1 Apr 2015 11:12:59 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c8: add become_inevitable to make sure that a piece of raw memory is only given to Message-ID: <20150401091259.AADD61C0207@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c8 Changeset: r76668:61dcdb6f958a Date: 2015-04-01 11:14 +0200 http://bitbucket.org/pypy/pypy/changeset/61dcdb6f958a/ Log: add become_inevitable to make sure that a piece of raw memory is only given to one transaction at a time (JITFRAMEINFO is stm_dont_track_raw_access and hence may have been given to and modified by two transactions doing an assemble_loop) diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -71,6 +71,12 @@ # the address of the function called by 'new' gc_ll_descr = self.cpu.gc_ll_descr gc_ll_descr.initialize() + if self.cpu.gc_ll_descr.stm: + from rpython.rlib import rstm + # become inevitable so that the raw-memory used later + # will only be modified by one thread at a time + rstm.become_inevitable() + if hasattr(gc_ll_descr, 'minimal_size_in_nursery'): self.gc_minimal_size_in_nursery = gc_ll_descr.minimal_size_in_nursery else: diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -86,6 +86,12 @@ if WORD == 8: self.pending_memoryerror_trampoline_from = [] self.error_trampoline_64 = 0 + + if self.cpu.gc_ll_descr.stm: + # become inevitable so that the raw-memory used later + # will only be modified by one thread at a time + rstm.become_inevitable() + self.mc = codebuf.MachineCodeBlockWrapper() #assert self.datablockwrapper is None --- but obscure case # possible, e.g. getting MemoryError and continuing From noreply at buildbot.pypy.org Wed Apr 1 11:13:17 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 1 Apr 2015 11:13:17 +0200 (CEST) Subject: [pypy-commit] stmgc default: add an assert for the stm_write_card fastpath Message-ID: <20150401091317.D706D1C0207@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r1744:575d288a77d3 Date: 2015-04-01 10:26 +0200 http://bitbucket.org/pypy/stmgc/changeset/575d288a77d3/ Log: add an assert for the stm_write_card fastpath diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c --- a/c8/stm/nursery.c +++ b/c8/stm/nursery.c @@ -106,7 +106,6 @@ /* case "small enough" */ nobj = (object_t *)allocate_outside_nursery_small(size); } - //dprintf(("move %p -> %p\n", obj, nobj)); /* copy the object */ @@ -143,6 +142,9 @@ /* Must trace the object later */ LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, nobj_sync_now); _cards_cleared_in_object(get_priv_segment(STM_SEGMENT->segment_num), nobj, true); + + assert(IMPLY(obj_should_use_cards(STM_SEGMENT->segment_base, nobj), + (((uintptr_t)nobj) & 15) == 0)); } static void _cards_cleared_in_object(struct stm_priv_segment_info_s *pseg, object_t *obj, From noreply at buildbot.pypy.org Wed Apr 1 11:40:33 2015 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 1 Apr 2015 11:40:33 +0200 (CEST) Subject: [pypy-commit] pypy vmprof: sync vmprof.c Message-ID: <20150401094033.35ECA1C00F7@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: vmprof Changeset: r76669:9316e03f8cdd Date: 2015-04-01 11:40 +0200 http://bitbucket.org/pypy/pypy/changeset/9316e03f8cdd/ Log: sync vmprof.c diff --git a/pypy/module/_vmprof/src/vmprof.c b/pypy/module/_vmprof/src/vmprof.c --- a/pypy/module/_vmprof/src/vmprof.c +++ b/pypy/module/_vmprof/src/vmprof.c @@ -70,6 +70,8 @@ prof_word(0); prof_word(period_usec); prof_word(0); + write(profile_file, profile_write_buffer, profile_buffer_position); + profile_buffer_position = 0; } static void prof_write_stacktrace(void** stack, int depth, int count) { @@ -298,9 +300,11 @@ static int remove_sigprof_timer(void) { static struct itimerval timer; + last_period_usec = 0; timer.it_interval.tv_sec = 0; timer.it_interval.tv_usec = 0; - timer.it_value = timer.it_interval; + timer.it_value.tv_sec = 0; + timer.it_value.tv_usec = 0; if (setitimer(ITIMER_PROF, &timer, NULL) != 0) { return -1; } From noreply at buildbot.pypy.org Wed Apr 1 12:06:01 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 1 Apr 2015 12:06:01 +0200 (CEST) Subject: [pypy-commit] pypy default: Expand the docs Message-ID: <20150401100601.E84F31C09A3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r76670:2485283de005 Date: 2015-04-01 12:06 +0200 http://bitbucket.org/pypy/pypy/changeset/2485283de005/ Log: Expand the docs diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py --- a/rpython/rlib/rgc.py +++ b/rpython/rlib/rgc.py @@ -678,7 +678,12 @@ """ This function does not do anything, but called from any annotated place, will tell that "func" is used to trace GC roots inside any instance of the type TP. The func must be specified as "lambda: func" in this - call, for internal reasons. + call, for internal reasons. Note that the func will be automatically + specialized on the 'callback' argument value. Example: + + def customtrace(gc, obj, callback, arg): + gc._trace_callback(callback, arg, obj + offset_of_x) + lambda_customtrace = lambda: customtrace """ class RegisterGcTraceEntry(ExtRegistryEntry): From noreply at buildbot.pypy.org Wed Apr 1 12:36:30 2015 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 1 Apr 2015 12:36:30 +0200 (CEST) Subject: [pypy-commit] pypy vmprof: tweaks and sync with upstream Message-ID: <20150401103630.D9E491C0FEE@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: vmprof Changeset: r76671:2e6cd706e475 Date: 2015-04-01 12:36 +0200 http://bitbucket.org/pypy/pypy/changeset/2e6cd706e475/ Log: tweaks and sync with upstream diff --git a/pypy/module/_vmprof/src/vmprof.c b/pypy/module/_vmprof/src/vmprof.c --- a/pypy/module/_vmprof/src/vmprof.c +++ b/pypy/module/_vmprof/src/vmprof.c @@ -84,6 +84,7 @@ for(i=0; i Author: Maciej Fijalkowski Branch: vmprof Changeset: r76672:a3d990e5c7d4 Date: 2015-04-01 14:16 +0200 http://bitbucket.org/pypy/pypy/changeset/a3d990e5c7d4/ Log: Rewrite addresses to point to the start of JIT code diff --git a/pypy/module/_vmprof/src/get_custom_offset.c b/pypy/module/_vmprof/src/get_custom_offset.c --- a/pypy/module/_vmprof/src/get_custom_offset.c +++ b/pypy/module/_vmprof/src/get_custom_offset.c @@ -13,31 +13,37 @@ int custom_sanity_check() { - return !pypy_codemap_currently_invalid; + return !pypy_codemap_currently_invalid; } static ptrdiff_t vmprof_unw_get_custom_offset(void* ip, unw_cursor_t *cp) { - intptr_t ip_l = (intptr_t)ip; - return pypy_jit_stack_depth_at_loc(ip_l); + intptr_t ip_l = (intptr_t)ip; + return pypy_jit_stack_depth_at_loc(ip_l); } static long vmprof_write_header_for_jit_addr(void **result, long n, - void *ip, int max_depth) + void *ip, int max_depth) { - void *codemap; - long current_pos = 0; - intptr_t id; - intptr_t addr = (intptr_t)ip; + void *codemap; + long current_pos = 0; + intptr_t id; + long start_addr = 0; + intptr_t addr = (intptr_t)ip; - codemap = pypy_find_codemap_at_addr(addr); - if (codemap == NULL) - return n; + codemap = pypy_find_codemap_at_addr(addr, &start_addr); + if (codemap == NULL) + // not a jit code at all + return n; - while (n < max_depth) { - id = pypy_yield_codemap_at_addr(codemap, addr, ¤t_pos); - if (id == 0) - break; - result[n++] = (void *)id; - } - return n; + // modify the last entry to point to start address and not the random one + // in the middle + result[n - 1] = (void*)start_addr; + while (n < max_depth) { + id = pypy_yield_codemap_at_addr(codemap, addr, ¤t_pos); + if (id == 0) + // finish + break; + result[n++] = (void *)id; + } + return n; } diff --git a/rpython/jit/backend/llsupport/src/codemap.c b/rpython/jit/backend/llsupport/src/codemap.c --- a/rpython/jit/backend/llsupport/src/codemap.c +++ b/rpython/jit/backend/llsupport/src/codemap.c @@ -76,12 +76,13 @@ /*** interface used from pypy/module/_vmprof ***/ RPY_EXTERN -void *pypy_find_codemap_at_addr(long addr) +void *pypy_find_codemap_at_addr(long addr, long* start_addr) { skipnode_t *codemap = skiplist_search(&jit_codemap_head, addr); codemap_data_t *data; uintptr_t rel_addr; + *start_addr = 0; if (codemap == &jit_codemap_head) return NULL; @@ -90,6 +91,7 @@ if (rel_addr >= data->machine_code_size) return NULL; + *start_addr = (long)codemap->key; return (void *)codemap; } From noreply at buildbot.pypy.org Wed Apr 1 14:18:47 2015 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 1 Apr 2015 14:18:47 +0200 (CEST) Subject: [pypy-commit] pypy vmprof: fix the signature Message-ID: <20150401121847.B98361C00F7@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: vmprof Changeset: r76673:8186806d9d7a Date: 2015-04-01 14:18 +0200 http://bitbucket.org/pypy/pypy/changeset/8186806d9d7a/ Log: fix the signature diff --git a/pypy/module/_vmprof/src/get_custom_offset.c b/pypy/module/_vmprof/src/get_custom_offset.c --- a/pypy/module/_vmprof/src/get_custom_offset.c +++ b/pypy/module/_vmprof/src/get_custom_offset.c @@ -1,7 +1,7 @@ extern volatile int pypy_codemap_currently_invalid; -void *pypy_find_codemap_at_addr(long addr); +void *pypy_find_codemap_at_addr(long addr, long *start_addr); long pypy_yield_codemap_at_addr(void *codemap_raw, long addr, long *current_pos_addr); long pypy_jit_stack_depth_at_loc(long loc); From noreply at buildbot.pypy.org Wed Apr 1 14:30:41 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 1 Apr 2015 14:30:41 +0200 (CEST) Subject: [pypy-commit] pypy default: detail Message-ID: <20150401123041.BF5061C116D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r76674:b030fc0e88cf Date: 2015-04-01 14:30 +0200 http://bitbucket.org/pypy/pypy/changeset/b030fc0e88cf/ Log: detail diff --git a/rpython/doc/rpython.rst b/rpython/doc/rpython.rst --- a/rpython/doc/rpython.rst +++ b/rpython/doc/rpython.rst @@ -149,7 +149,7 @@ and you can pass a ``*args`` argument (it must be a tuple). + as explained above, tuples are not of a variable length. If you need - to call a function with a variable number of arguments, refactor the + to call a function with a dynamic number of arguments, refactor the function itself to accept a single argument which is a regular list. + dynamic dispatch enforces the use of signatures that are equal for all From noreply at buildbot.pypy.org Wed Apr 1 14:48:12 2015 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 1 Apr 2015 14:48:12 +0200 (CEST) Subject: [pypy-commit] pypy vmprof: make tests to run Message-ID: <20150401124812.7EEE81C00F7@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: vmprof Changeset: r76676:983fff622638 Date: 2015-04-01 14:48 +0200 http://bitbucket.org/pypy/pypy/changeset/983fff622638/ Log: make tests to run diff --git a/rpython/jit/backend/llsupport/test/test_codemap.py b/rpython/jit/backend/llsupport/test/test_codemap.py --- a/rpython/jit/backend/llsupport/test/test_codemap.py +++ b/rpython/jit/backend/llsupport/test/test_codemap.py @@ -1,8 +1,11 @@ +from rpython.rtyper.lltypesystem import rffi, lltype from rpython.jit.backend.llsupport.codemap import stack_depth_at_loc from rpython.jit.backend.llsupport.codemap import CodemapStorage, \ CodemapBuilder, unpack_traceback, find_codemap_at_addr +NULL = lltype.nullptr(rffi.CArray(lltype.Signed)) + def test_register_codemap(): codemap = CodemapStorage() codemap.setup() @@ -10,17 +13,17 @@ codemap.register_codemap((300, 30, [16, 17, 18])) codemap.register_codemap((200, 100, [19, 20, 21, 22, 23])) # - raw100 = find_codemap_at_addr(100) - assert find_codemap_at_addr(119) == raw100 - assert not find_codemap_at_addr(120) + raw100 = find_codemap_at_addr(100, NULL) + assert find_codemap_at_addr(119, NULL) == raw100 + assert not find_codemap_at_addr(120, NULL) # - raw200 = find_codemap_at_addr(200) + raw200 = find_codemap_at_addr(200, NULL) assert raw200 != raw100 - assert find_codemap_at_addr(299) == raw200 + assert find_codemap_at_addr(299, NULL) == raw200 # - raw300 = find_codemap_at_addr(329) + raw300 = find_codemap_at_addr(329, NULL) assert raw300 != raw100 and raw300 != raw200 - assert find_codemap_at_addr(300) == raw300 + assert find_codemap_at_addr(300, NULL) == raw300 # codemap.free() From noreply at buildbot.pypy.org Wed Apr 1 14:48:11 2015 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 1 Apr 2015 14:48:11 +0200 (CEST) Subject: [pypy-commit] pypy vmprof: fix another caller of this function Message-ID: <20150401124811.499EE1C00F7@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: vmprof Changeset: r76675:a04dc8013c67 Date: 2015-04-01 14:44 +0200 http://bitbucket.org/pypy/pypy/changeset/a04dc8013c67/ Log: fix another caller of this function diff --git a/rpython/jit/backend/llsupport/codemap.py b/rpython/jit/backend/llsupport/codemap.py --- a/rpython/jit/backend/llsupport/codemap.py +++ b/rpython/jit/backend/llsupport/codemap.py @@ -33,7 +33,7 @@ unsigned int bytecode_info_size); RPY_EXTERN long *pypy_jit_codemap_del(uintptr_t addr); RPY_EXTERN uintptr_t pypy_jit_codemap_firstkey(void); -RPY_EXTERN void *pypy_find_codemap_at_addr(long addr); +RPY_EXTERN void *pypy_find_codemap_at_addr(long addr, long* start_addr); RPY_EXTERN long pypy_yield_codemap_at_addr(void *codemap_raw, long addr, long *current_pos_addr); @@ -69,7 +69,7 @@ stack_depth_at_loc = llexternal('pypy_jit_stack_depth_at_loc', [lltype.Signed], lltype.Signed) find_codemap_at_addr = llexternal('pypy_find_codemap_at_addr', - [lltype.Signed], lltype.Signed) + [lltype.Signed, rffi.CArrayPtr(lltype.Signed)], lltype.Signed) yield_bytecode_at_addr = llexternal('pypy_yield_codemap_at_addr', [lltype.Signed, lltype.Signed, rffi.CArrayPtr(lltype.Signed)], @@ -123,7 +123,8 @@ self.free() def unpack_traceback(addr): - codemap_raw = find_codemap_at_addr(addr) + codemap_raw = find_codemap_at_addr(addr, + lltype.nullptr(rffi.CArray(lltype.Signed))) if not codemap_raw: return [] # no codemap for that position storage = lltype.malloc(rffi.CArray(lltype.Signed), 1, flavor='raw') diff --git a/rpython/jit/backend/llsupport/src/codemap.c b/rpython/jit/backend/llsupport/src/codemap.c --- a/rpython/jit/backend/llsupport/src/codemap.c +++ b/rpython/jit/backend/llsupport/src/codemap.c @@ -82,16 +82,20 @@ codemap_data_t *data; uintptr_t rel_addr; - *start_addr = 0; if (codemap == &jit_codemap_head) + if (start_addr) + *start_addr = 0; return NULL; rel_addr = (uintptr_t)addr - codemap->key; data = (codemap_data_t *)codemap->data; if (rel_addr >= data->machine_code_size) + if (start_addr) + *start_addr = 0; return NULL; - *start_addr = (long)codemap->key; + if (start_addr) + *start_addr = (long)codemap->key; return (void *)codemap; } From noreply at buildbot.pypy.org Wed Apr 1 15:25:26 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 1 Apr 2015 15:25:26 +0200 (CEST) Subject: [pypy-commit] pypy default: Test and fix for issue #2006 Message-ID: <20150401132526.E090F1C0207@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r76677:ba063d80a04f Date: 2015-04-01 15:25 +0200 http://bitbucket.org/pypy/pypy/changeset/ba063d80a04f/ Log: Test and fix for issue #2006 diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -454,6 +454,7 @@ self.__cursors_counter = 0 self.__statements = [] self.__statements_counter = 0 + self.__rawstatements = set() self._statement_cache = _StatementCache(self, cached_statements) self.__func_cache = {} @@ -483,6 +484,14 @@ self.__do_all_statements(Statement._finalize, True) + # depending on when this close() is called, the statements' weakrefs + # may be already dead, even though Statement.__del__() was not called + # yet. In this case, self.__rawstatements is not empty. + if self.__rawstatements is not None: + for stmt in list(self.__rawstatements): + self._finalize_raw_statement(stmt) + self.__rawstatements = None + if self._db: ret = _lib.sqlite3_close(self._db) if ret != _lib.SQLITE_OK: @@ -562,6 +571,7 @@ self.__cursors = [r for r in self.__cursors if r() is not None] def _remember_statement(self, statement): + self.__rawstatements.add(statement._statement) self.__statements.append(weakref.ref(statement)) self.__statements_counter += 1 if self.__statements_counter < 200: @@ -569,6 +579,11 @@ self.__statements_counter = 0 self.__statements = [r for r in self.__statements if r() is not None] + def _finalize_raw_statement(self, _statement): + if self.__rawstatements is not None: + self.__rawstatements.remove(_statement) + _lib.sqlite3_finalize(_statement) + def __do_all_statements(self, action, reset_cursors): for weakref in self.__statements: statement = weakref() @@ -1199,7 +1214,6 @@ def __init__(self, connection, sql): self.__con = connection - self.__con._remember_statement(self) self._in_use = False @@ -1232,6 +1246,7 @@ ret = _lib.sqlite3_prepare_v2(self.__con._db, c_sql, -1, statement_star, next_char) self._statement = statement_star[0] + self.__con._remember_statement(self) if ret == _lib.SQLITE_OK and not self._statement: # an empty statement, work around that, as it's the least trouble @@ -1250,11 +1265,11 @@ def __del__(self): if self._statement: - _lib.sqlite3_finalize(self._statement) + self.__con._finalize_raw_statement(self._statement) def _finalize(self): if self._statement: - _lib.sqlite3_finalize(self._statement) + self.__con._finalize_raw_statement(self._statement) self._statement = None self._in_use = False diff --git a/pypy/module/test_lib_pypy/test_sqlite3.py b/pypy/module/test_lib_pypy/test_sqlite3.py --- a/pypy/module/test_lib_pypy/test_sqlite3.py +++ b/pypy/module/test_lib_pypy/test_sqlite3.py @@ -276,6 +276,32 @@ exc = raises(ValueError, cur.execute, "select 2\0") assert str(exc.value) == "the query contains a null character" + import sqlite3 + + def test_close_in_del_ordering(self): + import gc + class SQLiteBackend(object): + success = False + def __init__(self): + self.connection = _sqlite3.connect(":memory:") + def close(self): + self.connection.close() + def __del__(self): + self.close() + SQLiteBackend.success = True + def create_db_if_needed(self): + conn = self.connection + cursor = conn.cursor() + cursor.execute(""" + create table if not exists nameoftable(value text) + """) + cursor.close() + conn.commit() + SQLiteBackend().create_db_if_needed() + gc.collect() + gc.collect() + assert SQLiteBackend.success + class TestSQLiteHost(BaseTestSQLite): def setup_class(cls): From noreply at buildbot.pypy.org Wed Apr 1 15:28:39 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 1 Apr 2015 15:28:39 +0200 (CEST) Subject: [pypy-commit] pypy default: oups Message-ID: <20150401132839.946BD1C0207@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r76678:a6ac391b4bd7 Date: 2015-04-01 15:28 +0200 http://bitbucket.org/pypy/pypy/changeset/a6ac391b4bd7/ Log: oups diff --git a/pypy/module/test_lib_pypy/test_sqlite3.py b/pypy/module/test_lib_pypy/test_sqlite3.py --- a/pypy/module/test_lib_pypy/test_sqlite3.py +++ b/pypy/module/test_lib_pypy/test_sqlite3.py @@ -276,8 +276,6 @@ exc = raises(ValueError, cur.execute, "select 2\0") assert str(exc.value) == "the query contains a null character" - import sqlite3 - def test_close_in_del_ordering(self): import gc class SQLiteBackend(object): From noreply at buildbot.pypy.org Wed Apr 1 15:57:43 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 1 Apr 2015 15:57:43 +0200 (CEST) Subject: [pypy-commit] stmgc default: stm_wait_for_current_inevitable_transaction is actually used from outside Message-ID: <20150401135743.C5F7D1C00F7@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r1745:1062987da64f Date: 2015-04-01 15:58 +0200 http://bitbucket.org/pypy/stmgc/changeset/1062987da64f/ Log: stm_wait_for_current_inevitable_transaction is actually used from outside transactions, make sure nobody messes with the commit log during that time. diff --git a/c8/stm/sync.c b/c8/stm/sync.c --- a/c8/stm/sync.c +++ b/c8/stm/sync.c @@ -105,18 +105,25 @@ void stm_wait_for_current_inevitable_transaction(void) { + restart: + /* make sure there is no major collection happening, which + could free some commit log entries */ + s_mutex_lock(); + struct stm_commit_log_entry_s *current = STM_PSEGMENT->last_commit_log_entry; /* XXX: don't do busy-waiting */ - while (1) { - if (current->next == NULL) { - break; - } else if (current->next == INEV_RUNNING) { + while (current->next != NULL) { + if (current->next == INEV_RUNNING) { + s_mutex_unlock(); usleep(10); - continue; + /* some major collection could have freed "current", so + restart from the beginning */ + goto restart; } current = current->next; } + s_mutex_unlock(); } From noreply at buildbot.pypy.org Wed Apr 1 16:23:35 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 1 Apr 2015 16:23:35 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c8: import stmgc-c8 1062987da64f Message-ID: <20150401142335.8739F1C0207@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c8 Changeset: r76679:cd4d873b68c0 Date: 2015-04-01 16:04 +0200 http://bitbucket.org/pypy/pypy/changeset/cd4d873b68c0/ Log: import stmgc-c8 1062987da64f diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -f6788cf5fb73 +1062987da64f diff --git a/rpython/translator/stm/src_stm/stm/nursery.c b/rpython/translator/stm/src_stm/stm/nursery.c --- a/rpython/translator/stm/src_stm/stm/nursery.c +++ b/rpython/translator/stm/src_stm/stm/nursery.c @@ -106,7 +106,6 @@ /* case "small enough" */ nobj = (object_t *)allocate_outside_nursery_small(size); } - //dprintf(("move %p -> %p\n", obj, nobj)); /* copy the object */ @@ -143,6 +142,9 @@ /* Must trace the object later */ LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, nobj_sync_now); _cards_cleared_in_object(get_priv_segment(STM_SEGMENT->segment_num), nobj, true); + + assert(IMPLY(obj_should_use_cards(STM_SEGMENT->segment_base, nobj), + (((uintptr_t)nobj) & 15) == 0)); } static void _cards_cleared_in_object(struct stm_priv_segment_info_s *pseg, object_t *obj, diff --git a/rpython/translator/stm/src_stm/stm/sync.c b/rpython/translator/stm/src_stm/stm/sync.c --- a/rpython/translator/stm/src_stm/stm/sync.c +++ b/rpython/translator/stm/src_stm/stm/sync.c @@ -105,18 +105,25 @@ void stm_wait_for_current_inevitable_transaction(void) { + restart: + /* make sure there is no major collection happening, which + could free some commit log entries */ + s_mutex_lock(); + struct stm_commit_log_entry_s *current = STM_PSEGMENT->last_commit_log_entry; /* XXX: don't do busy-waiting */ - while (1) { - if (current->next == NULL) { - break; - } else if (current->next == INEV_RUNNING) { + while (current->next != NULL) { + if (current->next == INEV_RUNNING) { + s_mutex_unlock(); usleep(10); - continue; + /* some major collection could have freed "current", so + restart from the beginning */ + goto restart; } current = current->next; } + s_mutex_unlock(); } From noreply at buildbot.pypy.org Wed Apr 1 16:23:36 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 1 Apr 2015 16:23:36 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c8: increase the default of LOW_FILL_MARK, improving single thread performance Message-ID: <20150401142336.CAA561C0207@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c8 Changeset: r76680:20cadf94b7e9 Date: 2015-04-01 16:25 +0200 http://bitbucket.org/pypy/pypy/changeset/20cadf94b7e9/ Log: increase the default of LOW_FILL_MARK, improving single thread performance significantly. however, it may mean more conflicts on multiple threads diff --git a/rpython/translator/stm/src_stm/stmgcintf.c b/rpython/translator/stm/src_stm/stmgcintf.c --- a/rpython/translator/stm/src_stm/stmgcintf.c +++ b/rpython/translator/stm/src_stm/stmgcintf.c @@ -64,7 +64,7 @@ /************************************************************/ -#define LOW_FILL_MARK 400000 +#define LOW_FILL_MARK (NURSERY_SIZE / 2) static long pypy_transaction_length; From noreply at buildbot.pypy.org Wed Apr 1 17:02:11 2015 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 1 Apr 2015 17:02:11 +0200 (CEST) Subject: [pypy-commit] pypy default: update whatsnew-head for release-2.5.1 Message-ID: <20150401150211.87ECF1C0399@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r76681:7ccce6f1e74f Date: 2015-04-01 18:00 +0300 http://bitbucket.org/pypy/pypy/changeset/7ccce6f1e74f/ Log: update whatsnew-head for release-2.5.1 diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,7 +3,7 @@ ======================= .. this is a revision shortly after release-2.5.1 -.. startrev: 397b96217b85 +.. startrev: cb01edcb59414d9d93056e54ed060673d24e67c1 .. branch: gc-incminimark-pinning-improve Object Pinning is now used in `bz2` and `rzlib` (therefore also affects From noreply at buildbot.pypy.org Wed Apr 1 17:24:45 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 1 Apr 2015 17:24:45 +0200 (CEST) Subject: [pypy-commit] pypy default: Add a "Packaging" section and mention it's needed to run it Message-ID: <20150401152445.A51C21C02BB@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r76682:72b016aec157 Date: 2015-04-01 17:24 +0200 http://bitbucket.org/pypy/pypy/changeset/72b016aec157/ Log: Add a "Packaging" section and mention it's needed to run it diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst --- a/pypy/doc/build.rst +++ b/pypy/doc/build.rst @@ -146,6 +146,26 @@ :doc:`objspace proxies ` document. +Packaging (preparing for installation) +-------------------------------------- + +Packaging is required if you want to install PyPy system-wide, even to +install on the same machine. The reason is that doing so prepares a +number of extra features that cannot be done lazily on a root-installed +PyPy, because the normal users don't have write access. This concerns +mostly libraries that would normally be compiled if and when they are +imported the first time. + +:: + + cd pypy/tool/release + ./package.py pypy-VER-PLATFORM + +This creates a clean and prepared hierarchy, as well as a ``.tar.bz2`` +with the same content; both are found by default in +``/tmp/usession-YOURNAME/build/``. You can then either move the file +hierarchy or unpack the ``.tar.bz2`` at the correct place. + Installation ------------ From noreply at buildbot.pypy.org Wed Apr 1 21:00:40 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 1 Apr 2015 21:00:40 +0200 (CEST) Subject: [pypy-commit] pypy refactor-pycall: Clone desc.pycall() and inline its parameters for the get_call_parameters() call site. Message-ID: <20150401190040.46CFE1C0207@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: refactor-pycall Changeset: r76683:9de382ca4b9b Date: 2015-04-01 19:58 +0100 http://bitbucket.org/pypy/pypy/changeset/9de382ca4b9b/ Log: Clone desc.pycall() and inline its parameters for the get_call_parameters() call site. diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -91,22 +91,17 @@ def get_call_parameters(self, function, args_s, policy): desc = self.bookkeeper.getdesc(function) + from rpython.annotator.description import FunctionDesc + assert isinstance(desc, FunctionDesc) args = simple_args(args_s) - result = [] - def schedule(graph, inputcells): - result.append((graph, inputcells)) - return annmodel.s_ImpossibleValue - prevpolicy = self.policy self.policy = policy self.bookkeeper.enter(None) try: - desc.pycall(schedule, args, annmodel.s_ImpossibleValue) + return desc.pycall_2(args) finally: self.bookkeeper.leave() self.policy = prevpolicy - [(graph, inputcells)] = result - return graph, inputcells def annotate_helper(self, function, args_s, policy=None): if policy is None: diff --git a/rpython/annotator/description.py b/rpython/annotator/description.py --- a/rpython/annotator/description.py +++ b/rpython/annotator/description.py @@ -7,7 +7,8 @@ from rpython.annotator.argument import rawshape, ArgErr from rpython.tool.sourcetools import valid_identifier, func_with_new_name from rpython.tool.pairtype import extendabletype -from rpython.annotator.model import AnnotatorError, SomeInteger, SomeString +from rpython.annotator.model import ( + AnnotatorError, SomeInteger, SomeString, s_ImpossibleValue) class CallFamily(object): """A family of Desc objects that could be called from common call sites. @@ -75,7 +76,6 @@ try: return self.attrs[attrname] except KeyError: - from rpython.annotator.model import s_ImpossibleValue return s_ImpossibleValue def set_s_value(self, attrname, s_value): @@ -97,7 +97,6 @@ # ClassAttrFamily is more precise: it is only about one attribut name. def __init__(self, desc): - from rpython.annotator.model import s_ImpossibleValue self.descs = {desc: True} self.read_locations = {} # set of position_keys self.s_value = s_ImpossibleValue # union of possible values @@ -321,6 +320,26 @@ result = unionof(result, s_previous_result) return result + def pycall_2(self, args): + inputcells = self.parse_arguments(args) + graph = self.specialize(inputcells) + assert isinstance(graph, FunctionGraph) + # if that graph has a different signature, we need to re-parse + # the arguments. + # recreate the args object because inputcells may have been changed + new_args = args.unmatch_signature(self.signature, inputcells) + inputcells = self.parse_arguments(new_args, graph) + res = graph, inputcells + result = s_ImpossibleValue + signature = getattr(self.pyobj, '_signature_', None) + if signature: + sigresult = enforce_signature_return(self, signature[1], result) + if sigresult is not None: + self.bookkeeper.annotator.addpendingblock( + graph, graph.returnblock, [sigresult]) + result = sigresult + return res + def bind_under(self, classdef, name): # XXX static methods return self.bookkeeper.getmethoddesc(self, @@ -352,7 +371,6 @@ @staticmethod def row_to_consider(descs, args, op): # see comments in CallFamily - from rpython.annotator.model import s_ImpossibleValue row = {} for desc in descs: def enlist(graph, ignore): @@ -685,7 +703,6 @@ # look up an attribute in the class cdesc = self.lookup(name) if cdesc is None: - from rpython.annotator.model import s_ImpossibleValue return s_ImpossibleValue else: # delegate to s_get_value to turn it into an annotation @@ -999,7 +1016,6 @@ try: value = self.read_attribute(attr) except AttributeError: - from rpython.annotator.model import s_ImpossibleValue return s_ImpossibleValue else: return self.bookkeeper.immutablevalue(value) From noreply at buildbot.pypy.org Thu Apr 2 10:52:25 2015 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 2 Apr 2015 10:52:25 +0200 (CEST) Subject: [pypy-commit] stmgc default: minor improvement by using allocate_old_small for small prebuilt objs Message-ID: <20150402085225.D5F8C1C0684@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r1746:9ac6dfa3ca3b Date: 2015-04-02 10:37 +0200 http://bitbucket.org/pypy/stmgc/changeset/9ac6dfa3ca3b/ Log: minor improvement by using allocate_old_small for small prebuilt objs diff --git a/c8/stm/gcpage.c b/c8/stm/gcpage.c --- a/c8/stm/gcpage.c +++ b/c8/stm/gcpage.c @@ -106,6 +106,9 @@ object_t *_stm_allocate_old(ssize_t size_rounded_up) { /* only for tests xxx but stm_setup_prebuilt() uses this now too */ + if (size_rounded_up <= GC_LAST_SMALL_SIZE) + return _stm_allocate_old_small(size_rounded_up); + stm_char *p = allocate_outside_nursery_large(size_rounded_up); object_t *o = (object_t *)p; diff --git a/c8/test/test_card_marking.py b/c8/test/test_card_marking.py --- a/c8/test/test_card_marking.py +++ b/c8/test/test_card_marking.py @@ -374,3 +374,28 @@ self.start_transaction() assert stm_get_char(o, HDR) == '\0' + + def test_some_sizes(self): + sizes = range(16, GC_LAST_SMALL_SIZE*2, 16) + [FAST_ALLOC, FAST_ALLOC+16] + old = stm_allocate_old_refs(1) + for size in sizes: + self.start_transaction() + p = stm_allocate(size) + stm_set_char(p, 'a', use_cards=True) + stm_set_ref(old, 0, p) + self.commit_transaction() + + self.start_transaction() + p = stm_get_ref(old, 0) + assert stm_get_char(p) == 'a' + stm_set_char(p, 'b', use_cards=True) + self.commit_transaction() + + self.switch(1) + + self.start_transaction() + p = stm_get_ref(old, 0) + assert stm_get_char(p) == 'b' + self.commit_transaction() + + self.switch(0) diff --git a/c8/test/test_gcpage.py b/c8/test/test_gcpage.py --- a/c8/test/test_gcpage.py +++ b/c8/test/test_gcpage.py @@ -318,10 +318,16 @@ def test_keepalive_prebuilt(self): stm_allocate_old(64) + big = GC_LAST_SMALL_SIZE+64 + stm_allocate_old(big) + + # see allocate_outside_nursery_large: + actual_big = (big + 15 ) & ~15 + self.start_transaction() - assert lib._stm_total_allocated() == 64 + LMO # large malloc'd + assert lib._stm_total_allocated() == 64 + (actual_big + LMO) # large malloc'd stm_major_collect() - assert lib._stm_total_allocated() == 64 + LMO # large malloc'd + assert lib._stm_total_allocated() == 64 + (actual_big + LMO) # large malloc'd self.commit_transaction() def test_bug(self): From noreply at buildbot.pypy.org Thu Apr 2 11:14:24 2015 From: noreply at buildbot.pypy.org (mgedmin) Date: Thu, 2 Apr 2015 11:14:24 +0200 (CEST) Subject: [pypy-commit] pypy.org mgedmin/point-directly-to-the-bitbucket-issue-tr-1427956834221: Point directly to the BitBucket issue tracker Message-ID: <20150402091424.2D5D91C0351@cobra.cs.uni-duesseldorf.de> Author: Marius Gedminas Branch: mgedmin/point-directly-to-the-bitbucket-issue-tr-1427956834221 Changeset: r589:58cc5c924e26 Date: 2015-04-02 06:40 +0000 http://bitbucket.org/pypy/pypy.org/changeset/58cc5c924e26/ Log: Point directly to the BitBucket issue tracker bugs.pypy.org redirects there, after trying to scare people away with an invalid SSL certificate. diff --git a/source/contact.txt b/source/contact.txt --- a/source/contact.txt +++ b/source/contact.txt @@ -19,6 +19,6 @@ * code on `bitbucket`_. .. __: http://mail.python.org/mailman/listinfo/pypy-dev -.. _`bug tracker`: https://bugs.pypy.org +.. _`bug tracker`: https://bitbucket.org/pypy/pypy/issues .. _`dev site`: http://doc.pypy.org .. _`bitbucket`: https://bitbucket.org/pypy/pypy/overview From noreply at buildbot.pypy.org Thu Apr 2 11:14:25 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 2 Apr 2015 11:14:25 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: Merged in mgedmin/pypy.org/mgedmin/point-directly-to-the-bitbucket-issue-tr-1427956834221 (pull request #10) Message-ID: <20150402091425.386C41C0351@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r590:586bc091084a Date: 2015-04-02 11:15 +0200 http://bitbucket.org/pypy/pypy.org/changeset/586bc091084a/ Log: Merged in mgedmin/pypy.org/mgedmin/point-directly-to-the-bitbucket- issue-tr-1427956834221 (pull request #10) Point directly to the BitBucket issue tracker diff --git a/source/contact.txt b/source/contact.txt --- a/source/contact.txt +++ b/source/contact.txt @@ -19,6 +19,6 @@ * code on `bitbucket`_. .. __: http://mail.python.org/mailman/listinfo/pypy-dev -.. _`bug tracker`: https://bugs.pypy.org +.. _`bug tracker`: https://bitbucket.org/pypy/pypy/issues .. _`dev site`: http://doc.pypy.org .. _`bitbucket`: https://bitbucket.org/pypy/pypy/overview From noreply at buildbot.pypy.org Thu Apr 2 11:15:34 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 2 Apr 2015 11:15:34 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: Use the same url as the redirection from bugs.pypy.org. Regenerate. Message-ID: <20150402091534.9154C1C0351@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r591:7d623a341b93 Date: 2015-04-02 11:16 +0200 http://bitbucket.org/pypy/pypy.org/changeset/7d623a341b93/ Log: Use the same url as the redirection from bugs.pypy.org. Regenerate. diff --git a/contact.html b/contact.html --- a/contact.html +++ b/contact.html @@ -73,7 +73,7 @@
  • irc: #pypy on irc.freenode.net
  • mailing list: pypy-dev at python.org
  • for security related issues, non-public funding enquiries etc. please contact pypy@sfconservancy.org
  • -
  • the bug tracker
  • +
  • the bug tracker
  • more on our dev site.
  • code on bitbucket.
  • diff --git a/source/contact.txt b/source/contact.txt --- a/source/contact.txt +++ b/source/contact.txt @@ -19,6 +19,6 @@ * code on `bitbucket`_. .. __: http://mail.python.org/mailman/listinfo/pypy-dev -.. _`bug tracker`: https://bitbucket.org/pypy/pypy/issues +.. _`bug tracker`: https://bitbucket.org/pypy/pypy/issues?status=new&status=open .. _`dev site`: http://doc.pypy.org .. _`bitbucket`: https://bitbucket.org/pypy/pypy/overview From noreply at buildbot.pypy.org Thu Apr 2 11:20:40 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 2 Apr 2015 11:20:40 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: Another place Message-ID: <20150402092040.959B31C0684@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r592:9a60e0a350fd Date: 2015-04-02 11:21 +0200 http://bitbucket.org/pypy/pypy.org/changeset/9a60e0a350fd/ Log: Another place diff --git a/performance.html b/performance.html --- a/performance.html +++ b/performance.html @@ -347,7 +347,7 @@

    We generally consider things that are slower on PyPy than CPython to be bugs of PyPy. If you find some issue that is not documented here, -please report it to our bug tracker for investigation.

    +please report it to our bug tracker for investigation.

    - - - - - diff --git a/rpython/rlib/rstrategies/htmlcov1/rstrategies.html b/rpython/rlib/rstrategies/htmlcov1/rstrategies.html deleted file mode 100644 --- a/rpython/rlib/rstrategies/htmlcov1/rstrategies.html +++ /dev/null @@ -1,1226 +0,0 @@ - - - - - - - - Coverage for rstrategies: 95% - - - - - - - - - - - - -
    - -

    Hot-keys on this page

    -
    -

    - r - m - x - p   toggle line displays -

    -

    - j - k   next/prev highlighted chunk -

    -

    - 0   (zero) top of page -

    -

    - 1   (one) first highlighted chunk -

    -
    -
    - -
    - - - -
    -

    1

    -

    2

    -

    3

    -

    4

    -

    5

    -

    6

    -

    7

    -

    8

    -

    9

    -

    10

    -

    11

    -

    12

    -

    13

    -

    14

    -

    15

    -

    16

    -

    17

    -

    18

    -

    19

    -

    20

    -

    21

    -

    22

    -

    23

    -

    24

    -

    25

    -

    26

    -

    27

    -

    28

    -

    29

    -

    30

    -

    31

    -

    32

    -

    33

    -

    34

    -

    35

    -

    36

    -

    37

    -

    38

    -

    39

    -

    40

    -

    41

    -

    42

    -

    43

    -

    44

    -

    45

    -

    46

    -

    47

    -

    48

    -

    49

    -

    50

    -

    51

    -

    52

    -

    53

    -

    54

    -

    55

    -

    56

    -

    57

    -

    58

    -

    59

    -

    60

    -

    61

    -

    62

    -

    63

    -

    64

    -

    65

    -

    66

    -

    67

    -

    68

    -

    69

    -

    70

    -

    71

    -

    72

    -

    73

    -

    74

    -

    75

    -

    76

    -

    77

    -

    78

    -

    79

    -

    80

    -

    81

    -

    82

    -

    83

    -

    84

    -

    85

    -

    86

    -

    87

    -

    88

    -

    89

    -

    90

    -

    91

    -

    92

    -

    93

    -

    94

    -

    95

    -

    96

    -

    97

    -

    98

    -

    99

    -

    100

    -

    101

    -

    102

    -

    103

    -

    104

    -

    105

    -

    106

    -

    107

    -

    108

    -

    109

    -

    110

    -

    111

    -

    112

    -

    113

    -

    114

    -

    115

    -

    116

    -

    117

    -

    118

    -

    119

    -

    120

    -

    121

    -

    122

    -

    123

    -

    124

    -

    125

    -

    126

    -

    127

    -

    128

    -

    129

    -

    130

    -

    131

    -

    132

    -

    133

    -

    134

    -

    135

    -

    136

    -

    137

    -

    138

    -

    139

    -

    140

    -

    141

    -

    142

    -

    143

    -

    144

    -

    145

    -

    146

    -

    147

    -

    148

    -

    149

    -

    150

    -

    151

    -

    152

    -

    153

    -

    154

    -

    155

    -

    156

    -

    157

    -

    158

    -

    159

    -

    160

    -

    161

    -

    162

    -

    163

    -

    164

    -

    165

    -

    166

    -

    167

    -

    168

    -

    169

    -

    170

    -

    171

    -

    172

    -

    173

    -

    174

    -

    175

    -

    176

    -

    177

    -

    178

    -

    179

    -

    180

    -

    181

    -

    182

    -

    183

    -

    184

    -

    185

    -

    186

    -

    187

    -

    188

    -

    189

    -

    190

    -

    191

    -

    192

    -

    193

    -

    194

    -

    195

    -

    196

    -

    197

    -

    198

    -

    199

    -

    200

    -

    201

    -

    202

    -

    203

    -

    204

    -

    205

    -

    206

    -

    207

    -

    208

    -

    209

    -

    210

    -

    211

    -

    212

    -

    213

    -

    214

    -

    215

    -

    216

    -

    217

    -

    218

    -

    219

    -

    220

    -

    221

    -

    222

    -

    223

    -

    224

    -

    225

    -

    226

    -

    227

    -

    228

    -

    229

    -

    230

    -

    231

    -

    232

    -

    233

    -

    234

    -

    235

    -

    236

    -

    237

    -

    238

    -

    239

    -

    240

    -

    241

    -

    242

    -

    243

    -

    244

    -

    245

    -

    246

    -

    247

    -

    248

    -

    249

    -

    250

    -

    251

    -

    252

    -

    253

    -

    254

    -

    255

    -

    256

    -

    257

    -

    258

    -

    259

    -

    260

    -

    261

    -

    262

    -

    263

    -

    264

    -

    265

    -

    266

    -

    267

    -

    268

    -

    269

    -

    270

    -

    271

    -

    272

    -

    273

    -

    274

    -

    275

    -

    276

    -

    277

    -

    278

    -

    279

    -

    280

    -

    281

    -

    282

    -

    283

    -

    284

    -

    285

    -

    286

    -

    287

    -

    288

    -

    289

    -

    290

    -

    291

    -

    292

    -

    293

    -

    294

    -

    295

    -

    296

    -

    297

    -

    298

    -

    299

    -

    300

    -

    301

    -

    302

    -

    303

    -

    304

    -

    305

    -

    306

    -

    307

    -

    308

    -

    309

    -

    310

    -

    311

    -

    312

    -

    313

    -

    314

    -

    315

    -

    316

    -

    317

    -

    318

    -

    319

    -

    320

    -

    321

    -

    322

    -

    323

    -

    324

    -

    325

    -

    326

    -

    327

    -

    328

    -

    329

    -

    330

    -

    331

    -

    332

    -

    333

    -

    334

    -

    335

    -

    336

    -

    337

    -

    338

    -

    339

    -

    340

    -

    341

    -

    342

    -

    343

    -

    344

    -

    345

    -

    346

    -

    347

    -

    348

    -

    349

    -

    350

    -

    351

    -

    352

    -

    353

    -

    354

    -

    355

    -

    356

    -

    357

    -

    358

    -

    359

    -

    360

    -

    361

    -

    362

    -

    363

    -

    364

    -

    365

    -

    366

    -

    367

    -

    368

    -

    369

    -

    370

    -

    371

    -

    372

    -

    373

    -

    374

    -

    375

    -

    376

    -

    377

    -

    378

    -

    379

    -

    380

    -

    381

    -

    382

    -

    383

    -

    384

    -

    385

    -

    386

    -

    387

    -

    388

    -

    389

    -

    390

    -

    391

    -

    392

    -

    393

    -

    394

    -

    395

    -

    396

    -

    397

    -

    398

    -

    399

    -

    400

    -

    401

    -

    402

    -

    403

    -

    404

    -

    405

    -

    406

    -

    407

    -

    408

    -

    409

    -

    410

    -

    411

    -

    412

    -

    413

    -

    414

    -

    415

    -

    416

    -

    417

    -

    418

    -

    419

    -

    420

    -

    421

    -

    422

    -

    423

    -

    424

    -

    425

    -

    426

    -

    427

    -

    428

    -

    429

    -

    430

    -

    431

    -

    432

    -

    433

    -

    434

    -

    435

    -

    436

    -

    437

    -

    438

    -

    439

    -

    440

    -

    441

    -

    442

    -

    443

    -

    444

    -

    445

    -

    446

    -

    447

    -

    448

    -

    449

    -

    450

    -

    451

    -

    452

    -

    453

    -

    454

    -

    455

    -

    456

    -

    457

    -

    458

    -

    459

    -

    460

    -

    461

    -

    462

    -

    463

    -

    464

    -

    465

    -

    466

    -

    467

    -

    468

    -

    469

    -

    470

    -

    471

    -

    472

    -

    473

    -

    474

    -

    475

    -

    476

    -

    477

    -

    478

    -

    479

    -

    480

    -

    481

    -

    482

    -

    483

    -

    484

    -

    485

    -

    486

    -

    487

    -

    488

    -

    489

    -

    490

    -

    491

    -

    492

    -

    493

    -

    494

    -

    495

    -

    496

    -

    497

    -

    498

    -

    499

    -

    500

    -

    501

    -

    502

    -

    503

    -

    504

    -

    505

    -

    506

    -

    507

    -

    508

    -

    509

    -

    510

    -

    511

    -

    512

    -

    513

    -

    514

    -

    515

    -

    516

    -

    517

    -

    518

    -

    519

    -

    520

    -

    521

    -

    522

    -

    523

    -

    524

    -

    525

    -

    526

    -

    527

    -

    528

    -

    529

    -

    530

    -

    531

    -

    532

    -

    533

    -

    534

    -

    535

    -

    536

    -

    537

    -

    538

    -

    539

    -

    540

    -

    541

    -

    542

    -

    543

    -

    544

    -

    545

    -

    546

    -

    547

    -

    548

    -

    549

    -

    550

    -

    551

    -

    552

    -

    553

    -

    554

    -

    555

    -

    556

    -

    557

    -

    558

    -

    559

    -

    560

    -

    561

    -

    562

    -

    563

    -

    564

    -

    565

    -

    566

    -

    567

    -

    568

    -

    569

    -

    570

    -

    571

    -

    572

    - -
    -

     

    -

    import weakref, sys 

    -

    from rpython.rlib.rstrategies import logger 

    -

    from rpython.rlib import jit, objectmodel, rerased 

    -

    from rpython.rlib.objectmodel import specialize 

    -

     

    -

    def make_accessors(strategy='strategy', storage='storage'): 

    -

        """ 

    -

        Instead of using this generator, the methods can be implemented manually. 

    -

        A third way is to overwrite the getter/setter methods in StrategyFactory. 

    -

        """ 

    -

        def make_getter(attr): 

    -

            def getter(self): return getattr(self, attr) 

    -

            return getter 

    -

        def make_setter(attr): 

    -

            def setter(self, val): setattr(self, attr, val) 

    -

            return setter 

    -

        classdef = sys._getframe(1).f_locals 

    -

        classdef['_get_strategy'] = make_getter(strategy) 

    -

        classdef['_set_strategy'] = make_setter(strategy) 

    -

        classdef['_get_storage'] = make_getter(storage) 

    -

        classdef['_set_storage'] = make_setter(storage) 

    -

     

    -

    class StrategyMetaclass(type): 

    -

        """ 

    -

        A metaclass is required, because we need certain attributes to be special 

    -

        for every single strategy class. 

    -

        """ 

    -

        def __new__(self, name, bases, attrs): 

    -

            attrs['_is_strategy'] = False 

    -

            attrs['_is_singleton'] = False 

    -

            attrs['_specializations'] = [] 

    -

            # Not every strategy uses rerased-pairs, but they won't hurt 

    -

            erase, unerase = rerased.new_erasing_pair(name) 

    -

            def get_storage(self, w_self): 

    -

                erased = self.strategy_factory().get_storage(w_self) 

    -

                return unerase(erased) 

    -

            def set_storage(self, w_self, storage): 

    -

                erased = erase(storage) 

    -

                self.strategy_factory().set_storage(w_self, erased) 

    -

            attrs['get_storage'] = get_storage 

    -

            attrs['set_storage'] = set_storage 

    -

            return type.__new__(self, name, bases, attrs) 

    -

     

    -

    def strategy(generalize=None, singleton=True): 

    -

        """ 

    -

        Strategy classes must be decorated with this. 

    -

        generalize is a list of other strategies, that can be switched to from the decorated strategy. 

    -

        If the singleton flag is set to False, new strategy instances will be created, 

    -

        instead of always reusing the singleton object. 

    -

        """ 

    -

        def decorator(strategy_class): 

    -

            # Patch strategy class: Add generalized_strategy_for and mark as strategy class. 

    -

            if generalize: 

    -

                @jit.unroll_safe 

    -

                def generalized_strategy_for(self, value): 

    -

                    # TODO - optimize this method 

    -

                    for strategy in generalize: 

    -

                        if self.strategy_factory().strategy_singleton_instance(strategy)._check_can_handle(value): 

    -

                            return strategy 

    -

                    raise Exception("Could not find generalized strategy for %s coming from %s" % (value, self)) 

    -

                strategy_class.generalized_strategy_for = generalized_strategy_for 

    -

                for generalized in generalize: 

    -

                    generalized._specializations.append(strategy_class) 

    -

            strategy_class._is_strategy = True 

    -

            strategy_class._generalizations = generalize 

    -

            strategy_class._is_singleton = singleton 

    -

            return strategy_class 

    -

        return decorator 

    -

     

    -

    class StrategyFactory(object): 

    -

        _immutable_fields_ = ["strategies[*]", "logger", "strategy_singleton_field"] 

    -

        factory_instance_counter = 0 

    -

     

    -

        def __init__(self, root_class, all_strategy_classes=None): 

    -

            if all_strategy_classes is None: 

    -

                all_strategy_classes = self._collect_subclasses(root_class) 

    -

            self.strategies = [] 

    -

            self.logger = logger.Logger() 

    -

     

    -

            # This is to avoid confusion between multiple factories existing simultaneously (e.g. in tests) 

    -

            self.strategy_singleton_field = "__singleton_%i" % StrategyFactory.factory_instance_counter 

    -

            StrategyFactory.factory_instance_counter += 1 

    -

     

    -

            self._create_strategy_instances(root_class, all_strategy_classes) 

    -

     

    -

        def _create_strategy_instances(self, root_class, all_strategy_classes): 

    -

            for strategy_class in all_strategy_classes: 

    -

                if strategy_class._is_strategy: 

    -

                    setattr(strategy_class, self.strategy_singleton_field, self.instantiate_strategy(strategy_class)) 

    -

                    self.strategies.append(strategy_class) 

    -

                self._patch_strategy_class(strategy_class, root_class) 

    -

            self._order_strategies() 

    -

     

    -

        # ============================= 

    -

        # API methods 

    -

        # ============================= 

    -

     

    -

        def switch_strategy(self, w_self, new_strategy_type, new_element=None): 

    -

            """ 

    -

            Switch the strategy of w_self to the new type. 

    -

            new_element can be given as as hint, purely for logging purposes. 

    -

            It should be the object that was added to w_self, causing the strategy switch. 

    -

            """ 

    -

            old_strategy = self.get_strategy(w_self) 

    -

            if new_strategy_type._is_singleton: 

    -

                new_strategy = self.strategy_singleton_instance(new_strategy_type) 

    -

            else: 

    -

                size = old_strategy.size(w_self) 

    -

                new_strategy = self.instantiate_strategy(new_strategy_type, w_self, size) 

    -

            self.set_strategy(w_self, new_strategy) 

    -

            old_strategy._convert_storage_to(w_self, new_strategy) 

    -

            new_strategy.strategy_switched(w_self) 

    -

            self.log(w_self, new_strategy, old_strategy, new_element) 

    -

            return new_strategy 

    -

     

    -

        def set_initial_strategy(self, w_self, strategy_type, size, elements=None): 

    -

            """ 

    -

            Initialize the strategy and storage fields of w_self. 

    -

            This must be called before switch_strategy or any strategy method can be used. 

    -

            elements is an optional list of values initially stored in w_self. 

    -

            If given, then len(elements) == size must hold. 

    -

            """ 

    -

            assert self.get_strategy(w_self) is None, "Strategy should not be initialized yet!" 

    -

            if strategy_type._is_singleton: 

    -

                strategy = self.strategy_singleton_instance(strategy_type) 

    -

            else: 

    -

                strategy = self.instantiate_strategy(strategy_type, w_self, size) 

    -

            self.set_strategy(w_self, strategy) 

    -

            strategy._initialize_storage(w_self, size) 

    -

            element = None 

    -

            if elements: 

    -

                strategy.store_all(w_self, elements) 

    -

                if len(elements) > 0: element = elements[0] 

    -

            strategy.strategy_switched(w_self) 

    -

            self.log(w_self, strategy, None, element) 

    -

            return strategy 

    -

     

    -

        @jit.unroll_safe 

    -

        def strategy_type_for(self, objects): 

    -

            """ 

    -

            Return the best-fitting strategy to hold all given objects. 

    -

            """ 

    -

            specialized_strategies = len(self.strategies) 

    -

            can_handle = [True] * specialized_strategies 

    -

            for obj in objects: 

    -

                if specialized_strategies <= 1: 

    -

                    break 

    -

                for i, strategy in enumerate(self.strategies): 

    -

                    if can_handle[i] and not self.strategy_singleton_instance(strategy)._check_can_handle(obj): 

    -

                        can_handle[i] = False 

    -

                        specialized_strategies -= 1 

    -

            for i, strategy_type in enumerate(self.strategies): 

    -

                if can_handle[i]: 

    -

                    return strategy_type 

    -

            raise Exception("Could not find strategy to handle: %s" % objects) 

    -

     

    -

        def decorate_strategies(self, transitions): 

    -

            """ 

    -

            As an alternative to decorating all strategies with @strategy, 

    -

            invoke this in the constructor of your StrategyFactory subclass, before 

    -

            calling __init__. transitions is a dict mapping all strategy classes to 

    -

            their 'generalize' list parameter (see @strategy decorator). 

    -

            """ 

    -

            "NOT_RPYTHON" 

    -

            for strategy_class, generalized in transitions.items(): 

    -

                strategy(generalized)(strategy_class) 

    -

     

    -

        # ============================= 

    -

        # The following methods can be overwritten to customize certain aspects of the factory. 

    -

        # ============================= 

    -

     

    -

        def instantiate_strategy(self, strategy_type, w_self=None, initial_size=0): 

    -

            """ 

    -

            Return a functional instance of strategy_type. 

    -

            Overwrite this if you need a non-default constructor. 

    -

            The two additional parameters should be ignored for singleton-strategies. 

    -

            """ 

    -

            return strategy_type() 

    -

     

    -

        def log(self, w_self, new_strategy, old_strategy=None, new_element=None): 

    -

            """ 

    -

            This can be overwritten into a more appropriate call to self.logger.log 

    -

            """ 

    -

            if not self.logger.active: return 

    -

            new_strategy_str = self.log_string_for_object(new_strategy) 

    -

            old_strategy_str = self.log_string_for_object(old_strategy) 

    -

            element_typename = self.log_string_for_object(new_element) 

    -

            size = new_strategy.size(w_self) 

    -

            typename = "" 

    -

            cause = "Switched" if old_strategy else "Created" 

    -

            self.logger.log(new_strategy_str, size, cause, old_strategy_str, typename, element_typename) 

    -

     

    -

        @specialize.call_location() 

    -

        def log_string_for_object(self, obj): 

    -

            """ 

    -

            This can be overwritten instead of the entire log() method. 

    -

            Keep the specialize-annotation in order to handle different kinds of objects here. 

    -

            """ 

    -

            return obj.__class__.__name__ if obj else "" 

    -

     

    -

        # These storage accessors are specialized because the storage field is  

    -

        # populated by erased-objects which seem to be incompatible sometimes. 

    -

        @specialize.call_location() 

    -

        def get_storage(self, obj): 

    -

            return obj._get_storage() 

    -

        @specialize.call_location() 

    -

        def set_storage(self, obj, val): 

    -

            return obj._set_storage(val) 

    -

     

    -

        def get_strategy(self, obj): 

    -

            return obj._get_strategy() 

    -

        def set_strategy(self, obj, val): 

    -

            return obj._set_strategy(val) 

    -

     

    -

        # ============================= 

    -

        # Internal methods 

    -

        # ============================= 

    -

     

    -

        def _patch_strategy_class(self, strategy_class, root_class): 

    -

            "NOT_RPYTHON" 

    -

            # Patch root class: Add default handler for visitor 

    -

            def _convert_storage_from_OTHER(self, w_self, previous_strategy): 

    -

                self._convert_storage_from(w_self, previous_strategy) 

    -

            funcname = "_convert_storage_from_" + strategy_class.__name__ 

    -

            _convert_storage_from_OTHER.func_name = funcname 

    -

            setattr(root_class, funcname, _convert_storage_from_OTHER) 

    -

     

    -

            # Patch strategy class: Add polymorphic visitor function 

    -

            def _convert_storage_to(self, w_self, new_strategy): 

    -

                getattr(new_strategy, funcname)(w_self, self) 

    -

            strategy_class._convert_storage_to = _convert_storage_to 

    -

     

    -

        def _collect_subclasses(self, cls): 

    -

            "NOT_RPYTHON" 

    -

            subclasses = [] 

    -

            for subcls in cls.__subclasses__(): 

    -

                subclasses.append(subcls) 

    -

                subclasses.extend(self._collect_subclasses(subcls)) 

    -

            return subclasses 

    -

     

    -

        def _order_strategies(self): 

    -

            "NOT_RPYTHON" 

    -

            def get_generalization_depth(strategy, visited=None): 

    -

                if visited is None: 

    -

                    visited = set() 

    -

                if strategy._generalizations: 

    -

                    if strategy in visited: 

    -

                        raise Exception("Cycle in generalization-tree of %s" % strategy) 

    -

                    visited.add(strategy) 

    -

                    depth = 0 

    -

                    for generalization in strategy._generalizations: 

    -

                        other_depth = get_generalization_depth(generalization, set(visited)) 

    -

                        depth = max(depth, other_depth) 

    -

                    return depth + 1 

    -

                else: 

    -

                    return 0 

    -

            self.strategies.sort(key=get_generalization_depth, reverse=True) 

    -

     

    -

        @jit.elidable 

    -

        def strategy_singleton_instance(self, strategy_class): 

    -

            return getattr(strategy_class, self.strategy_singleton_field) 

    -

     

    -

        def _freeze_(self): 

    -

            # Instance will be frozen at compile time, making accesses constant. 

    -

            # The constructor does meta stuff which is not possible after translation. 

    -

            return True 

    -

     

    -

    class AbstractStrategy(object): 

    -

        """ 

    -

        == Required: 

    -

        strategy_factory(self) - Access to StorageFactory 

    -

        """ 

    -

     

    -

        def strategy_switched(self, w_self): 

    -

            # Overwrite this method for a hook whenever the strategy 

    -

            # of w_self was switched to self. 

    -

            pass 

    -

     

    -

        # Main Fixedsize API 

    -

     

    -

        def store(self, w_self, index0, value): 

    -

            raise NotImplementedError("Abstract method") 

    -

     

    -

        def fetch(self, w_self, index0): 

    -

            raise NotImplementedError("Abstract method") 

    -

     

    -

        def size(self, w_self): 

    -

            raise NotImplementedError("Abstract method") 

    -

     

    -

        # Fixedsize utility methods 

    -

     

    -

        def slice(self, w_self, start, end): 

    -

            return [ self.fetch(w_self, i) for i in range(start, end)] 

    -

     

    -

        def fetch_all(self, w_self): 

    -

            return self.slice(w_self, 0, self.size(w_self)) 

    -

     

    -

        def store_all(self, w_self, elements): 

    -

            for i, e in enumerate(elements): 

    -

                self.store(w_self, i, e) 

    -

     

    -

        # Main Varsize API 

    -

     

    -

        def insert(self, w_self, index0, list_w): 

    -

            raise NotImplementedError("Abstract method") 

    -

     

    -

        def delete(self, w_self, start, end): 

    -

            raise NotImplementedError("Abstract method") 

    -

     

    -

        # Varsize utility methods 

    -

     

    -

        def append(self, w_self, list_w): 

    -

            self.insert(w_self, self.size(w_self), list_w) 

    -

     

    -

        def pop(self, w_self, index0): 

    -

            e = self.fetch(w_self, index0) 

    -

            self.delete(w_self, index0, index0+1) 

    -

            return e 

    From noreply at buildbot.pypy.org Wed Apr 22 12:21:02 2015 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 22 Apr 2015 12:21:02 +0200 (CEST) Subject: [pypy-commit] pypy default: Merged file edited online. Message-ID: <20150422102102.291C41C06CD@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: Changeset: r76882:f1984a4d5583 Date: 2015-03-12 15:04 +0100 http://bitbucket.org/pypy/pypy/changeset/f1984a4d5583/ Log: Merged file edited online. diff --git a/rpython/doc/rstrategies.rst b/rpython/doc/rstrategies.rst --- a/rpython/doc/rstrategies.rst +++ b/rpython/doc/rstrategies.rst @@ -98,18 +98,18 @@ :: -class AbstractStrategy(AbstractStrategy): - _attrs_ = ['space'] - _immutable_fields_ = ['space'] - __metaclass__ = rstrat.StrategyMetaclass - import_from_mixin(rstrat.AbstractStrategy) - import_from_mixin(rstrategies.SafeIndexingMixin) - - def __init__(self, space): - self.space = space - - def strategy_factory(self): - return self.space.strategy_factory + class AbstractStrategy(AbstractStrategy): + _attrs_ = ['space'] + _immutable_fields_ = ['space'] + __metaclass__ = rstrat.StrategyMetaclass + import_from_mixin(rstrat.AbstractStrategy) + import_from_mixin(rstrategies.SafeIndexingMixin) + + def __init__(self, space): + self.space = space + + def strategy_factory(self): + return self.space.strategy_factory Strategy classes @@ -144,14 +144,14 @@ :: - at rstrat.strategy(generalize=[GenericStrategy]) -class IntegerOrNilStrategy(AbstractStrategy): - import_from_mixin(rstrat.TaggingStrategy) - contained_type = model.W_Integer - def wrap(self, val): return self.space.wrap_int(val) - def unwrap(self, w_val): return self.space.unwrap_int(w_val) - def wrapped_tagged_value(self): return self.space.w_nil - def unwrapped_tagged_value(self): return constants.MAXINT + @rstrat.strategy(generalize=[GenericStrategy]) + class IntegerOrNilStrategy(AbstractStrategy): + import_from_mixin(rstrat.TaggingStrategy) + contained_type = model.W_Integer + def wrap(self, val): return self.space.wrap_int(val) + def unwrap(self, w_val): return self.space.unwrap_int(w_val) + def wrapped_tagged_value(self): return self.space.w_nil + def unwrapped_tagged_value(self): return constants.MAXINT Strategy Factory ---------------- @@ -188,22 +188,22 @@ :: -class StrategyFactory(rstrategies.StrategyFactory): - _attrs_ = ['space'] - _immutable_fields_ = ['space'] - - def __init__(self, space): - self.space = space - rstrat.StrategyFactory.__init__(self, AbstractStrategy) - - def instantiate_strategy(self, strategy_type): - return strategy_type(self.space) - - def strategy_type_for(self, list_w, weak=False): - """ - Helper method for handling weak objects specially - """ - if weak: - return WeakListStrategy + class StrategyFactory(rstrategies.StrategyFactory): + _attrs_ = ['space'] + _immutable_fields_ = ['space'] + + def __init__(self, space): + self.space = space + rstrat.StrategyFactory.__init__(self, AbstractStrategy) + + def instantiate_strategy(self, strategy_type): + return strategy_type(self.space) + + def strategy_type_for(self, list_w, weak=False): + """ + Helper method for handling weak objects specially + """ + if weak: + return WeakListStrategy return rstrategies.StrategyFactory.strategy_type_for(self, list_w) \ No newline at end of file From noreply at buildbot.pypy.org Wed Apr 22 12:21:03 2015 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 22 Apr 2015 12:21:03 +0200 (CEST) Subject: [pypy-commit] pypy default: Merged in anton_gulenko/pypy (pull request #306) Message-ID: <20150422102103.4FB351C06CD@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r76883:62106262ee6c Date: 2015-04-22 12:20 +0200 http://bitbucket.org/pypy/pypy/changeset/62106262ee6c/ Log: Merged in anton_gulenko/pypy (pull request #306) rstrategies - A library for Storage Strategies diff too long, truncating to 2000 out of 2151 lines diff --git a/rpython/doc/rlib.rst b/rpython/doc/rlib.rst --- a/rpython/doc/rlib.rst +++ b/rpython/doc/rlib.rst @@ -128,6 +128,14 @@ a hierarchy of Address classes, in a typical static-OO-programming style. +rstrategies +----------- + +The :source:`rpython/rlib/rstrategies` module contains a library to implement storage strategies in +RPython VMs. The library is language-independent and extensible. +More details and examples can be found in the :doc:`rstrategies documentation `. + + streamio -------- diff --git a/rpython/doc/rstrategies.rst b/rpython/doc/rstrategies.rst new file mode 100644 --- /dev/null +++ b/rpython/doc/rstrategies.rst @@ -0,0 +1,209 @@ +rstrategies +=========== + +A library to implement storage strategies in VMs based on the RPython +toolchain. rstrategies can be used in VMs for any language or language +family. + +This library has been developed as part of a Masters Thesis by `Anton +Gulenko `__. + +The original paper describing the optimization "Storage Strategies for +collections in dynamically typed languages" by C.F. Bolz, L. Diekmann +and L. Tratt can be found +`here `__. + +So far, this library has been adpoted by 3 VMs: +`RSqueak `__, +`Topaz `__ (`Forked +here `__) and +`Pycket `__ (`Forked +here `__). + +Concept +------- + +Collections are often used homogeneously, i.e. they contain only objects +of the same type. Primitive numeric types like ints or floats are +especially interesting for optimization. These cases can be optimized by +storing the unboxed data of these objects in consecutive memory. This is +done by letting a special "strategy" object handle the entire storage of +a collection. The collection object holds two separate references: one +to its strategy and one to its storage. Every operation on the +collection is delegated to the strategy, which accesses the storage when +needed. The strategy can be switched to a more suitable one, which might +require converting the storage array. + +Usage +~~~~~ + +The following are the steps needed to integrated rstrategies in an +RPython VM. Because of the special nature of this library it is not +enough to simply call some API methods; the library must be integrated +within existing VM classes using a metaclass, mixins and other +meta-programming techniques. + +The sequence of steps described here is something like a "setup +walkthrough", and might be a bit abstract. To see a concrete example, +look at +`SingletonStorageStrategy `__, +`StrategyFactory `__ +and +`W\_PointersObject `__ +from the `RSqueak VM `__. The +code is also well commented. + +Basics +------- + +Currently the rstrategies library supports fixed sized and variable +sized collections. This can be used to optimize a wide range of +primitive data structures like arrays, lists or regular objects. Any of +these are called 'collections' in this context. The VM should have a +central class or class hierarchy for collections. In order to extend +these classes and use strategies, the library needs accessor methods for +two attributes of collection objects: strategy and storage. The easiest +way is adding the following line to the body of the root collection +class: + +:: + + rstrategies.make_accessors(strategy='strategy', storage='storage') + +This will generate the 4 accessor methods +``_[get/set]_[storage/strategy]()`` for the respective attributes. +Alternatively, implement these methods manually or overwrite the +getters/setters in ``StrategyFactory``. + +Next, the strategy classes must be defined. This requires a small class +hierarchy with a dedicated root class. In the definition of this root +class, include the following lines: + +:: + + __metaclass__ = rstrategies.StrategyMetaclass + import_from_mixin(rstrategies.AbstractStrategy) + import_from_mixin(rstrategies.SafeIndexingMixin) + +``import_from_mixin`` can be found in ``rpython.rlib.objectmodel``. If +index-checking is performed safely at other places in the VM, you can +use ``rstrategies.UnsafeIndexingMixin`` instead. If you need your own +metaclass, you can combine yours with the rstrategies one using multiple +inheritance `like +here `__. +Also implement a ``storage_factory()`` method, which returns an instance +of ``rstrategies.StorageFactory``, which is described below. + +An example ``AbstractStrategy`` class, which also stores an additional ``space`` parameter could looks like this: + +:: + + class AbstractStrategy(AbstractStrategy): + _attrs_ = ['space'] + _immutable_fields_ = ['space'] + __metaclass__ = rstrat.StrategyMetaclass + import_from_mixin(rstrat.AbstractStrategy) + import_from_mixin(rstrategies.SafeIndexingMixin) + + def __init__(self, space): + self.space = space + + def strategy_factory(self): + return self.space.strategy_factory + + +Strategy classes +---------------- + +Now you can create the actual strategy classes, subclassing them from +the single root class. The following list summarizes the basic +strategies available. + +- ``EmptyStrategy`` A strategy for empty collections; very efficient, but limited. Does not allocate anything. +- ``SingleValueStrategy`` A strategy for collections containing the same object ``n`` times. Only allocates memory to store the size of the collection. +- ``GenericStrategy`` A non-optimized strategy backed by a generic python list. This is the fallback strategy, since it can store everything, but is not optimized. +- ``WeakGenericStrategy`` Like ``GenericStrategy``, but uses ``weakref`` to hold on weakly to its elements. +- ``SingleTypeStrategy`` Can store a single unboxed type like int or float. This is the main optimizing strategy +- ``TaggingStrategy`` Extension of SingleTypeStrategy. Uses a specific value in the value range of the unboxed type to represent one additional, arbitrary object. For example, one of ``float``'s ``NaN`` representations can be used to represent special value like ``nil``. + +There are also intermediate classes, which allow creating new, more +customized strategies. For this, you should get familiar with the code. + +Include one of these mixin classes using ``import_from_mixin``. The +mixin classes contain comments describing methods or fields which are +also required in the strategy class in order to use them. Additionally, +add the ``@rstrategies.strategy(generalize=alist)`` decorator to all +strategy classes. The ``alist`` parameter must contain all strategies, +which the decorated strategy can switch to, if it can not represent a +new element anymore. +`Example `__ +for an implemented strategy. See the other strategy classes behind this +link for more examples. + +An example strategy class for optimized ``int`` storage could look like this: + +:: + + @rstrat.strategy(generalize=[GenericStrategy]) + class IntegerOrNilStrategy(AbstractStrategy): + import_from_mixin(rstrat.TaggingStrategy) + contained_type = model.W_Integer + def wrap(self, val): return self.space.wrap_int(val) + def unwrap(self, w_val): return self.space.unwrap_int(w_val) + def wrapped_tagged_value(self): return self.space.w_nil + def unwrapped_tagged_value(self): return constants.MAXINT + +Strategy Factory +---------------- + +The last part is subclassing ``rstrategies.StrategyFactory``, +overwriting the method ``instantiate_strategy`` if necessary and passing +the strategies root class to the constructor. The factory provides the +methods ``switch_strategy``, ``set_initial_strategy``, +``strategy_type_for`` which can be used by the VM code to use the +mechanism behind strategies. See the comments in the source code. + +The strategy mixins offer the following methods to manipulate the +contents of the collection: + +- basic API + + - ``size`` + +- fixed size API + + - ``store``, ``fetch``, ``slice``, ``store_all``, ``fetch_all`` + +- variable size API + + - ``insert``, ``delete``, ``append``, ``pop`` + +If the collection has a fixed size, simply never use any of the variable +size methods in the VM code. Since the strategies are singletons, these +methods need the collection object as first parameter. For convenience, +more fitting accessor methods should be implemented on the collection +class itself. + +An example strategy factory for the ``AbstractStrategy`` class above could look like this: + +:: + + class StrategyFactory(rstrategies.StrategyFactory): + _attrs_ = ['space'] + _immutable_fields_ = ['space'] + + def __init__(self, space): + self.space = space + rstrat.StrategyFactory.__init__(self, AbstractStrategy) + + def instantiate_strategy(self, strategy_type): + return strategy_type(self.space) + + def strategy_type_for(self, list_w, weak=False): + """ + Helper method for handling weak objects specially + """ + if weak: + return WeakListStrategy + return rstrategies.StrategyFactory.strategy_type_for(self, list_w) + \ No newline at end of file diff --git a/rpython/rlib/rstrategies/.coveragerc b/rpython/rlib/rstrategies/.coveragerc new file mode 100644 --- /dev/null +++ b/rpython/rlib/rstrategies/.coveragerc @@ -0,0 +1,10 @@ +# .coveragerc file to control coverage.py (code coverage plugin for pytest) +# Get it here: https://pypi.python.org/pypi/pytest-cov +# Examples: +# $ python -m pytest test --cov rpython.rlib.rstrategies --cov-report html --cov-config .coveragerc + +[run] +omit = + test/* + */__init__.py + logparser.py diff --git a/rpython/rlib/rstrategies/__init__.py b/rpython/rlib/rstrategies/__init__.py new file mode 100644 --- /dev/null +++ b/rpython/rlib/rstrategies/__init__.py @@ -0,0 +1,1 @@ +# Empy diff --git a/rpython/rlib/rstrategies/logger.py b/rpython/rlib/rstrategies/logger.py new file mode 100644 --- /dev/null +++ b/rpython/rlib/rstrategies/logger.py @@ -0,0 +1,58 @@ + +class LogEntry(object): + def __init__(self): + self.slots = 0 + self.objects = 0 + self.element_typenames = {} + + def add(self, size, element_typename): + self.slots += size + self.objects += 1 + if element_typename: + self.element_typenames[element_typename] = None + + def classnames(self): + return self.element_typenames.keys() + +class Logger(object): + _attrs_ = ["active", "aggregate", "logs"] + _immutable_fields_ = ["active?", "aggregate?", "logs"] + + def __init__(self): + self.active = False + self.aggregate = False + self.logs = {} + + def activate(self, aggregate=False): + self.active = True + self.aggregate = self.aggregate or aggregate + + def log(self, new_strategy, size, cause="", old_strategy="", typename="", element_typename=""): + if self.aggregate: + key = (cause, old_strategy, new_strategy, typename) + if key not in self.logs: + self.logs[key] = LogEntry() + entry = self.logs[key] + entry.add(size, element_typename) + else: + element_typenames = [ element_typename ] if element_typename else [] + self.output(cause, old_strategy, new_strategy, typename, size, 1, element_typenames) + + def print_aggregated_log(self): + if not self.aggregate: + return + for key, entry in self.logs.items(): + cause, old_strategy, new_strategy, typename = key + slots, objects, element_typenames = entry.slots, entry.objects, entry.classnames() + self.output(cause, old_strategy, new_strategy, typename, slots, objects, element_typenames) + + def output(self, cause, old_strategy, new_strategy, typename, slots, objects, element_typenames): + old_strategy_string = "%s -> " % old_strategy if old_strategy else "" + classname_string = " of %s" % typename if typename else "" + element_string = (" elements: " + " ".join(element_typenames)) if element_typenames else "" + format = (cause, old_strategy_string, new_strategy, classname_string, slots, objects, element_string) + self.do_print("%s (%s%s)%s size %d objects %d%s" % format) + + def do_print(self, str): + # Hook to increase testability + print str diff --git a/rpython/rlib/rstrategies/logparser.py b/rpython/rlib/rstrategies/logparser.py new file mode 100644 --- /dev/null +++ b/rpython/rlib/rstrategies/logparser.py @@ -0,0 +1,694 @@ + +import re, os, sys, operator + +""" +This script parses a log produced by rstrategies_logger.py into a graph and converts it to various outputs. +The most useful outputs are the dot* commands producing a visualization of the log using the dot-command of graphviz. +Every strategy is a node in the graph, and the edges are collections or objects that transition between +two strategies at some point during the log. +Artificial nodes are created for log entries without an explicit source node. These are the events when a +collection is created. +The input to this script is a logfile, a command and optional flags. +If the name of the logfile includes one of the AVAILABLE_VMS as a substring, the first three global variables +are automatically configured. +The script should work without these configurations, but the output will probably not be that pretty. +To avoid errors, the -a flag is implied when running without proper configuration. +""" + +# This should contain a full list of storage nodes (strategies). +# All strategies not included here will be combined into a single "Other"-node, if the -a flag is not given. +STORAGE_NODES = [] + +# This allows arbitrary renamings of storage strategy nodes +NODE_RENAMINGS = {} + +# Artificial storage-source nodes are automatically named like the associated operation. +# This dict allows customizing the names of these nodes. +STORAGE_SOURCES = {} + +def SET_VM(vm_name): + global STORAGE_NODES + global NODE_RENAMINGS + global STORAGE_SOURCES + if vm_name == 'RSqueak': + STORAGE_NODES = ['List', 'WeakList', 'SmallIntegerOrNil', 'FloatOrNil', 'AllNil'] + NODE_RENAMINGS = dict((x+'Strategy', x) for x in STORAGE_NODES) + STORAGE_SOURCES = {'Filledin': 'Image Loading', 'Initialized': 'Object Creation'} + elif vm_name == 'Pycket': + STORAGE_SOURCES = {'Created': 'Array Creation'} + # TODO + elif vm_name == 'Topaz': + # TODO + pass + else: + raise Exception("Unhandled vm name %s" % vm_name) + +AVAILABLE_VMS = ['RSqueak', 'Pycket', 'Topaz'] + +def configure_vm(logfile, flags): + vm_config_name = None + for vm_name in AVAILABLE_VMS: + if vm_name in logfile: + vm_config_name = vm_name + break + if vm_config_name is not None: + print "Using VM configuration %s" % vm_name + SET_VM(vm_name) + else: + print "No VM configuration found in filename '%s'. Available configurations: %s" % \ + (logfile, AVAILABLE_VMS) + print "Please add new VM configuration or rename logfile. Turning on -a flag to avoid errors." + flags.allstorage = True + +# ==================================================================== +# ======== Logfile parsing +# ==================================================================== + +def percent(part, total): + if total == 0: + return 0 + return float(part)*100 / total + +def parse(filename, flags, callback): + parsed_entries = 0 + if filename == "-": + opener = lambda: sys.stdin + else: + opener = lambda: open(filename, 'r', 1) + with opener() as file: + while True: + line = file.readline() + if len(line) == 0: + break + entry = parse_line(line, flags) + if entry: + parsed_entries += 1 + callback(entry) + return parsed_entries + +line_pattern = re.compile("^(?P\w+) \(((?P\w+) -> )?(?P\w+)\)( of (?P.+))? size (?P[0-9]+)( objects (?P[0-9]+))?( elements: (?P.+( .+)*))?$") + +def parse_line(line, flags): + result = line_pattern.match(line) + if result is None: + if flags.verbose: + print "Could not parse line: %s" % line[:-1] + return None + operation = str(result.group('operation')) + old_storage = result.group('old') + new_storage = str(result.group('new')) + classname = str(result.group('classname')) + size = int(result.group('size')) + objects = result.group('objects') + objects = int(objects) if objects else 1 + classnames = result.group('classnames') + if classnames is not None: + classnames = classnames.split(' ') + classnames = set(classnames) + else: + classnames = set() + + is_storage_source = old_storage is None + if is_storage_source: + if operation in STORAGE_SOURCES: + old_storage = STORAGE_SOURCES[operation] + else: + print "Using operation %s as storage source." % operation + old_storage = str(old_storage) + + if new_storage in NODE_RENAMINGS: + new_storage = NODE_RENAMINGS[new_storage] + if old_storage in NODE_RENAMINGS: + old_storage = NODE_RENAMINGS[old_storage] + + return LogEntry(operation, old_storage, new_storage, classname, size, objects, classnames, is_storage_source) + +class LogEntry(object): + + def __init__(self, operation, old_storage, new_storage, classname, size, objects, classnames, is_storage_source): + self.operation = operation + self.old_storage = old_storage + self.new_storage = new_storage + self.classname = classname + self.size = size + self.objects = objects + self.classnames = classnames + self.is_storage_source = is_storage_source + assert old_storage != new_storage, "old and new storage identical in log entry: %s" % self + + def full_key(self): + return (self.operation, self.old_storage, self.new_storage) + + def __lt__(self, other): + return self.classname < other.classname + + def __repr__(self): + return "%s(%s)" % (self.__str__(), object.__repr__(self)) + + def __str__(self): + old_storage_string = "%s -> " % self.old_storage if self.old_storage else "" + classname_string = " of %s" % self.classname if self.classname else "" + objects_string = " objects %d" % self.objects if self.objects > 1 else "" + return "%s (%s%s)%s size %d%s" % (self.operation, old_storage_string, self.new_storage, classname_string, self.size, objects_string) + +# ==================================================================== +# ======== Graph parsing +# ==================================================================== + +class Operations(object): + + def __init__(self, objects=0, slots=0, element_classnames=[]): + self.objects = objects + self.slots = slots + self.element_classnames = set(element_classnames) + + def __str__(self, total=None): + if self.objects == 0: + avg_slots = 0 + else: + avg_slots = float(self.slots) / self.objects + if total is not None and total.slots != 0: + percent_slots = " (%.1f%%)" % percent(self.slots, total.slots) + else: + percent_slots = "" + if total is not None and total.objects != 0: + percent_objects = " (%.1f%%)" % percent(self.objects, total.objects) + else: + percent_objects = "" + slots = format(self.slots, ",d") + objects = format(self.objects, ",d") + classnames = (" [ elements: %s ]" % ' '.join([str(x) for x in self.element_classnames])) \ + if len(self.element_classnames) else "" + return "%s%s slots in %s%s objects (avg size: %.1f)%s" % (slots, percent_slots, objects, percent_objects, avg_slots, classnames) + + def __repr__(self): + return "%s(%s)" % (self.__str__(), object.__repr__(self)) + + def add_log_entry(self, entry): + self.slots = self.slots + entry.size + self.objects = self.objects + entry.objects + self.element_classnames |= entry.classnames + + def __sub__(self, other): + return Operations(self.objects - other.objects, self.slots - other.slots) + + def __add__(self, other): + return Operations(self.objects + other.objects, self.slots + other.slots) + + def __lt__(self, other): + return self.slots < other.slots + + def empty(self): + return self.objects == 0 and self.slots == 0 + + def prefixprint(self, key="", total=None): + if not self.empty(): + print "%s%s" % (key, self.__str__(total)) + +class ClassOperations(object): + + def __init__(self): + self.classes = {} + + def cls(self, name): + if name not in self.classes: + self.classes[name] = Operations() + return self.classes[name] + + def total(self): + return reduce(operator.add, self.classes.values(), Operations()) + + def __str__(self): + return "ClassOperations(%s)" % self.classes + + def __repr__(self): + return "%s(%s)" % (self.__str__(), object.__repr__(self)) + + def __add__(self, other): + result = ClassOperations() + result.classes = dict(self.classes) + for classname, other_class in other.classes.items(): + result.cls(classname) # Make sure exists. + result.classes[classname] += other_class + return result + + def __sub__(self, other): + result = ClassOperations() + result.classes = dict(self.classes) + for classname, other_class in other.classes.items(): + result.cls(classname) # Make sure exists. + result.classes[classname] -= other_class + return result + +class StorageEdge(object): + + def __init__(self, operation="None", origin=None, target=None): + self.operation = operation + self.classes = ClassOperations() + self.origin = origin + self.target = target + self.is_storage_source = False + + def full_key(self): + return (self.operation, self.origin.name, self.target.name) + + def cls(self, classname): + return self.classes.cls(classname) + + def total(self): + return self.classes.total() + + def notify_nodes(self): + self.origin.note_outgoing(self) + self.target.note_incoming(self) + + def add_log_entry(self, entry): + self.cls(entry.classname).add_log_entry(entry) + if entry.is_storage_source: + self.is_storage_source = True + + def as_log_entries(self): + entries = [] + for classname, ops in self.classes.classes.items(): + origin = None if self.is_storage_source else self.origin.name + entry = LogEntry(self.operation, origin, self.target.name, classname, + ops.slots, ops.objects, ops.element_classnames, self.is_storage_source) + entries.append(entry) + return entries + + def __lt__(self, other): + return self.full_key() < other.full_key() + + def __str__(self): + return "[%s %s -> %s]" % (self.operation, self.origin, self.target) + + def __repr__(self): + return "%s(%s)" % (self.__str__(), object.__repr__(self)) + + def __add__(self, other): + origin = self.origin if self.origin is not None else other.origin + target = self.target if self.target is not None else other.target + result = StorageEdge(self.operation, origin, target) + result.classes += self.classes + other.classes + return result + + def __sub__(self, other): + origin = self.origin if self.origin is not None else other.origin + target = self.target if self.target is not None else other.target + result = StorageEdge(self.operation, origin, target) + result.classes += self.classes - other.classes + return result + +class StorageNode(object): + + def __init__(self, name): + self.name = name + self.incoming = set() + self.outgoing = set() + + def note_incoming(self, edge): + assert edge.target is self + if edge not in self.incoming: + self.incoming.add(edge) + + def note_outgoing(self, edge): + assert edge.origin is self + if edge not in self.outgoing: + self.outgoing.add(edge) + + def incoming_edges(self, operation): + return filter(lambda x: x.operation == operation, self.incoming) + + def outgoing_edges(self, operation): + return filter(lambda x: x.operation == operation, self.outgoing) + + def sum_incoming(self, operation): + return reduce(operator.add, self.incoming_edges(operation), StorageEdge(operation)) + + def sum_outgoing(self, operation): + return reduce(operator.add, self.outgoing_edges(operation), StorageEdge(operation)) + + def sum_all_incoming(self): + return reduce(operator.add, self.incoming, StorageEdge()) + + def sum_all_outgoing(self): + return reduce(operator.add, self.outgoing, StorageEdge()) + + def __str__(self): + return self.name + + def __repr__(self): + return "%s(%s)" % (self.__str__(), object.__repr__(self)) + + def merge_edge_sets(self, set1, set2, key_slot): + getter = lambda edge: edge.__dict__[key_slot] + set_dict = dict([(getter(edge), edge) for edge in set1]) + for edge in set2: + key = getter(edge) + if key not in set_dict: + set_dict[key] = edge + else: + set_dict[key] += edge + return set(set_dict.values()) + + def __add__(self, other): + result = StorageNode("%s %s" % (self.name, other.name)) + result.incoming = self.merge_edge_sets(self.incoming, other.incoming, "origin") + # TODO bad code + for edge in result.incoming: + edge.target = result + result.outgoing = self.merge_edge_sets(self.outgoing, other.outgoing, "target") + for edge in result.outgoing: + edge.origin = result + return result + + def __lt__(self, other): + return self.name < other.name + + def is_artificial(self): + for outgoing in self.outgoing: + if outgoing.is_storage_source: + return True + return False + + def is_storage_node(self): + return self.is_artificial() or self.name in STORAGE_NODES + + def dot_name(self): + return self.name.replace(" ", "_") + +class StorageGraph(object): + + def __init__(self): + self.nodes = {} + self.edges = {} + self.operations = set() + + def node(self, name): + if name not in self.nodes: + self.nodes[name] = StorageNode(name) + return self.nodes[name] + + def assert_sanity(self): + visited_edges = set() + for node in self.nodes.values(): + for edge in node.incoming: + assert edge in self.edges.values(), "Edge not in graph's edges: %s" % edge + visited_edges.add(edge) + if not edge.target is node: + print "Wrong edge target: %s\nIncoming edge: %s\nIn node: %s" % (edge.target, edge, node) + assert False + if not edge in edge.origin.outgoing: + print "Edge not in origin's outgoing: %s\nIncoming edge: %s\nIn node: %s" % (edge.origin.outgoing, edge, node) + assert False + for edge in node.outgoing: + assert edge in self.edges.values(), "Edge not in graph's edges: %s" % edge + visited_edges.add(edge) + if not edge.origin is node: + print "Wrong edge origin: %s\nOutgoing edge: %s\nIn node: %s" % (edge.origin, edge, node) + assert False + if not edge in edge.target.incoming: + print "Edge not in origin's incoming: %s\nOutgoing edge: %s\nIn node: %s" % (edge.target.incoming, edge, node) + assert False + assert len(visited_edges) == len(self.edges.values()), "Not all of graph's edges visited." + + def add_log_entry(self, log_entry): + self.operations.add(log_entry.operation) + key = log_entry.full_key() + if key not in self.edges: + edge = StorageEdge(log_entry.operation, self.node(log_entry.old_storage), self.node(log_entry.new_storage)) + self.edges[key] = edge + edge.notify_nodes() + self.edges[key].add_log_entry(log_entry) + + def collapse_nodes(self, collapsed_nodes, new_name=None): + if len(collapsed_nodes) == 0: + return + for node in collapsed_nodes: + del self.nodes[node.name] + for edge in node.incoming: + del self.edges[edge.full_key()] + for edge in node.outgoing: + del self.edges[edge.full_key()] + new_node = reduce(operator.add, collapsed_nodes) + if new_name is not None: + new_node.name = new_name + self.nodes[new_node.name] = new_node + # TODO bad code + for node in collapsed_nodes: + for edge in node.incoming: + edge.origin.outgoing.remove(edge) + new_edges = filter(lambda filtered: filtered.origin == edge.origin, new_node.incoming) + assert len(new_edges) == 1 + edge.origin.outgoing.add(new_edges[0]) + for edge in node.outgoing: + edge.target.incoming.remove(edge) + new_edges = filter(lambda filtered: filtered.target == edge.target, new_node.outgoing) + assert len(new_edges) == 1 + edge.target.incoming.add(new_edges[0]) + for edge in new_node.incoming: + self.edges[edge.full_key()] = edge + for edge in new_node.outgoing: + self.edges[edge.full_key()] = edge + self.assert_sanity() + + def collapse_nonstorage_nodes(self, new_name=None): + nodes = filter(lambda x: not x.is_storage_node(), self.nodes.values()) + self.collapse_nodes(nodes, new_name) + + def sorted_nodes(self): + nodes = self.nodes.values() + nodes.sort() + return nodes + +def make_graph(logfile, flags): + graph = StorageGraph() + def callback(entry): + graph.add_log_entry(entry) + parse(logfile, flags, callback) + graph.assert_sanity() + return graph + +# ==================================================================== +# ======== Command - Summarize log content +# ==================================================================== + +def command_summarize(logfile, flags): + graph = make_graph(logfile, flags) + if not flags.allstorage: + graph.collapse_nonstorage_nodes() + for node in graph.sorted_nodes(): + node.print_summary(flags, graph.operations) + +def StorageNode_print_summary(self, flags, all_operations): + print "\n%s:" % self.name + sum = StorageEdge() + total_incoming = self.sum_all_incoming().total() if flags.percent else None + + print "\tIncoming:" + for operation in all_operations: + if flags.detailed: + edges = [ (edge.origin.name, edge) for edge in self.incoming_edges(operation) ] + else: + edges = [ (operation, self.sum_incoming(operation)) ] + for edgename, edge in edges: + edge.print_with_name("\t\t\t", edgename, total_incoming, flags) + sum += edge + + print "\tOutgoing:" + for operation in all_operations: + if flags.detailed: + edges = [ (edge.target.name, edge) for edge in self.outgoing_edges(operation) ] + else: + edges = [ (operation, self.sum_outgoing(operation)) ] + for edgename, edge in edges: + edge.print_with_name("\t\t\t", edgename, total_incoming, flags) + sum -= edge + + sum.print_with_name("\t", "Remaining", total_incoming, flags) + +StorageNode.print_summary = StorageNode_print_summary + +def StorageEdge_print_with_name(self, prefix, edgename, total_reference, flags): + if flags.classes: + print "%s%s:" % (prefix, edgename) + prefix += "\t\t" + operations = self.classes.classes.items() + operations.sort(reverse=True, key=operator.itemgetter(1)) + else: + operations = [ (edgename, self.total()) ] + for classname, classops in operations: + classops.prefixprint("%s%s: " % (prefix, classname), total_reference) + +StorageEdge.print_with_name = StorageEdge_print_with_name + +# ==================================================================== +# ======== Command - DOT output +# ==================================================================== + +# Output is valid dot code and can be parsed by the graphviz dot utility. +def command_print_dot(logfile, flags): + graph = make_graph(logfile, flags) + print "/*" + print "Storage Statistics (dot format):" + print "================================" + print "*/" + print dot_string(graph, flags) + +def run_dot(logfile, flags, output_type): + import subprocess + dot = dot_string(make_graph(logfile, flags), flags) + command = ["dot", "-T%s" % output_type, "-o%s.%s" % (flags.logfile, output_type)] + print "Running:\n%s" % " ".join(command) + p = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + output = p.communicate(input=dot)[0] + print output + +def command_dot(logfile, flags): + run_dot(logfile, flags, "jpg") +def command_dot_ps(logfile, flags): + run_dot(logfile, flags, "ps") +def command_dot_pdf(logfile, flags): + run_dot(logfile, flags, "pdf") +def command_dot_svg(logfile, flags): + run_dot(logfile, flags, "svg") + +def dot_string(graph, flags): + result = "digraph G {" + incoming_cache = {} + if not flags.allstorage: + graph.collapse_nonstorage_nodes("Other") + + def make_label(edge, prefix="", total_edge=None, slots_per_object=False): + object_suffix = " objects" + slots_suffix = " slots" + if not flags.objects or not flags.slots: + object_suffix = slots_suffix = "" + if total_edge and flags.percent and total_edge.objects != 0: + percent_objects = " (%.1f%%)" % percent(edge.objects, total_edge.objects) + percent_slots = " (%.1f%%)" % percent(edge.slots, total_edge.slots) + else: + percent_objects = percent_slots = "" + label = "" + if flags.objects: + label += "%s%s%s%s
    " % (prefix, format(edge.objects, ",.0f"), object_suffix, percent_objects) + if flags.slots: + label += "%s%s%s%s
    " % (prefix, format(edge.slots, ",.0f"), slots_suffix, percent_slots) + if slots_per_object and flags.slotsPerObject: + label += "%.1f slots/object
    " % (float(total.slots) / total.objects) + return label + + for node in graph.nodes.values(): + incoming = node.sum_all_incoming().total() + outgoing = node.sum_all_outgoing().total() + remaining = incoming - outgoing + if node.is_artificial(): + incoming_cache[node.name] = outgoing + shape = ",shape=box" + label = make_label(outgoing) + else: + incoming_cache[node.name] = incoming + shape = "" + label = make_label(incoming, "Incoming: ") + if remaining.objects != incoming.objects: + label += make_label(remaining, "Remaining: ", incoming) + result += "%s [label=<%s
    %s>%s];" % (node.dot_name(), node.name, label, shape) + + for edge in graph.edges.values(): + total = edge.total() + incoming = incoming_cache[edge.origin.name] + label = make_label(total, "", incoming, slots_per_object=True) + target_node = edge.target.dot_name() + source_node = edge.origin.dot_name() + result += "%s -> %s [label=<%s>];" % (source_node, target_node, label) + + result += "}" + return result + +# ==================================================================== +# ======== Other commands +# ==================================================================== + +def command_aggregate(logfile, flags): + graph = make_graph(logfile, flags) + edges = graph.edges.values() + edges.sort() + for edge in edges: + logentries = edge.as_log_entries() + logentries.sort() + for entry in logentries: + print entry + +def command_print_entries(logfile, flags): + def callback(entry): + print entry + parse(logfile, flags, callback) + +# ==================================================================== +# ======== Main +# ==================================================================== + +class Flags(object): + + def __init__(self, flags): + self.flags = {} + for name, short in flags: + self.__dict__[name] = False + self.flags[short] = name + + def handle(self, arg): + if arg in self.flags: + self.__dict__[self.flags[arg]] = True + return True + else: + return False + + def __str__(self): + descriptions = [ ("%s (%s)" % description) for description in self.flags.items() ] + return "[%s]" % " | ".join(descriptions) + +def usage(flags, commands): + print "Arguments: logfile command %s" % flags + print "Available commands: %s" % commands + exit(1) + +def main(argv): + flags = Flags([ + # General + ('verbose', '-v'), + + # All outputs + ('percent', '-p'), + ('allstorage', '-a'), + + # Text outputs + ('detailed', '-d'), + ('classes', '-c'), + + # dot outputs + ('slots', '-s'), + ('objects', '-o'), + ('slotsPerObject', '-S'), + ]) + + command_prefix = "command_" + module = sys.modules[__name__].__dict__ + commands = [ a[len(command_prefix):] for a in module.keys() if a.startswith(command_prefix) ] + + if len(argv) < 2: + usage(flags, commands) + logfile = argv[0] + flags.logfile = logfile + configure_vm(logfile, flags) + command = argv[1] + for flag in argv[2:]: + if not flags.handle(flag): + usage(flags, commands) + if command not in commands: + usage(flags, commands) + + func = module[command_prefix + command] + func(logfile, flags) + +if __name__ == "__main__": + main(sys.argv[1:]) diff --git a/rpython/rlib/rstrategies/rstrategies.py b/rpython/rlib/rstrategies/rstrategies.py new file mode 100644 --- /dev/null +++ b/rpython/rlib/rstrategies/rstrategies.py @@ -0,0 +1,572 @@ + +import weakref, sys +from rpython.rlib.rstrategies import logger +from rpython.rlib import jit, objectmodel, rerased +from rpython.rlib.objectmodel import specialize + +def make_accessors(strategy='strategy', storage='storage'): + """ + Instead of using this generator, the methods can be implemented manually. + A third way is to overwrite the getter/setter methods in StrategyFactory. + """ + def make_getter(attr): + def getter(self): return getattr(self, attr) + return getter + def make_setter(attr): + def setter(self, val): setattr(self, attr, val) + return setter + classdef = sys._getframe(1).f_locals + classdef['_get_strategy'] = make_getter(strategy) + classdef['_set_strategy'] = make_setter(strategy) + classdef['_get_storage'] = make_getter(storage) + classdef['_set_storage'] = make_setter(storage) + +class StrategyMetaclass(type): + """ + A metaclass is required, because we need certain attributes to be special + for every single strategy class. + """ + def __new__(self, name, bases, attrs): + attrs['_is_strategy'] = False + attrs['_is_singleton'] = False + attrs['_specializations'] = [] + # Not every strategy uses rerased-pairs, but they won't hurt + erase, unerase = rerased.new_erasing_pair(name) + def get_storage(self, w_self): + erased = self.strategy_factory().get_storage(w_self) + return unerase(erased) + def set_storage(self, w_self, storage): + erased = erase(storage) + self.strategy_factory().set_storage(w_self, erased) + attrs['get_storage'] = get_storage + attrs['set_storage'] = set_storage + return type.__new__(self, name, bases, attrs) + +def strategy(generalize=None, singleton=True): + """ + Strategy classes must be decorated with this. + generalize is a list of other strategies, that can be switched to from the decorated strategy. + If the singleton flag is set to False, new strategy instances will be created, + instead of always reusing the singleton object. + """ + def decorator(strategy_class): + # Patch strategy class: Add generalized_strategy_for and mark as strategy class. + if generalize: + @jit.unroll_safe + def generalized_strategy_for(self, value): + # TODO - optimize this method + for strategy in generalize: + if self.strategy_factory().strategy_singleton_instance(strategy)._check_can_handle(value): + return strategy + raise Exception("Could not find generalized strategy for %s coming from %s" % (value, self)) + strategy_class.generalized_strategy_for = generalized_strategy_for + for generalized in generalize: + generalized._specializations.append(strategy_class) + strategy_class._is_strategy = True + strategy_class._generalizations = generalize + strategy_class._is_singleton = singleton + return strategy_class + return decorator + +class StrategyFactory(object): + _immutable_fields_ = ["strategies[*]", "logger", "strategy_singleton_field"] + factory_instance_counter = 0 + + def __init__(self, root_class, all_strategy_classes=None): + if all_strategy_classes is None: + all_strategy_classes = self._collect_subclasses(root_class) + self.strategies = [] + self.logger = logger.Logger() + + # This is to avoid confusion between multiple factories existing simultaneously (e.g. in tests) + self.strategy_singleton_field = "__singleton_%i" % StrategyFactory.factory_instance_counter + StrategyFactory.factory_instance_counter += 1 + + self._create_strategy_instances(root_class, all_strategy_classes) + + def _create_strategy_instances(self, root_class, all_strategy_classes): + for strategy_class in all_strategy_classes: + if strategy_class._is_strategy: + setattr(strategy_class, self.strategy_singleton_field, self.instantiate_strategy(strategy_class)) + self.strategies.append(strategy_class) + self._patch_strategy_class(strategy_class, root_class) + self._order_strategies() + + # ============================= + # API methods + # ============================= + + def switch_strategy(self, w_self, new_strategy_type, new_element=None): + """ + Switch the strategy of w_self to the new type. + new_element can be given as as hint, purely for logging purposes. + It should be the object that was added to w_self, causing the strategy switch. + """ + old_strategy = self.get_strategy(w_self) + if new_strategy_type._is_singleton: + new_strategy = self.strategy_singleton_instance(new_strategy_type) + else: + size = old_strategy.size(w_self) + new_strategy = self.instantiate_strategy(new_strategy_type, w_self, size) + self.set_strategy(w_self, new_strategy) + old_strategy._convert_storage_to(w_self, new_strategy) + new_strategy.strategy_switched(w_self) + self.log(w_self, new_strategy, old_strategy, new_element) + return new_strategy + + def set_initial_strategy(self, w_self, strategy_type, size, elements=None): + """ + Initialize the strategy and storage fields of w_self. + This must be called before switch_strategy or any strategy method can be used. + elements is an optional list of values initially stored in w_self. + If given, then len(elements) == size must hold. + """ + assert self.get_strategy(w_self) is None, "Strategy should not be initialized yet!" + if strategy_type._is_singleton: + strategy = self.strategy_singleton_instance(strategy_type) + else: + strategy = self.instantiate_strategy(strategy_type, w_self, size) + self.set_strategy(w_self, strategy) + strategy._initialize_storage(w_self, size) + element = None + if elements: + strategy.store_all(w_self, elements) + if len(elements) > 0: element = elements[0] + strategy.strategy_switched(w_self) + self.log(w_self, strategy, None, element) + return strategy + + @jit.unroll_safe + def strategy_type_for(self, objects): + """ + Return the best-fitting strategy to hold all given objects. + """ + specialized_strategies = len(self.strategies) + can_handle = [True] * specialized_strategies + for obj in objects: + if specialized_strategies <= 1: + break + for i, strategy in enumerate(self.strategies): + if can_handle[i] and not self.strategy_singleton_instance(strategy)._check_can_handle(obj): + can_handle[i] = False + specialized_strategies -= 1 + for i, strategy_type in enumerate(self.strategies): + if can_handle[i]: + return strategy_type + raise Exception("Could not find strategy to handle: %s" % objects) + + def decorate_strategies(self, transitions): + """ + As an alternative to decorating all strategies with @strategy, + invoke this in the constructor of your StrategyFactory subclass, before + calling __init__. transitions is a dict mapping all strategy classes to + their 'generalize' list parameter (see @strategy decorator). + """ + "NOT_RPYTHON" + for strategy_class, generalized in transitions.items(): + strategy(generalized)(strategy_class) + + # ============================= + # The following methods can be overwritten to customize certain aspects of the factory. + # ============================= + + def instantiate_strategy(self, strategy_type, w_self=None, initial_size=0): + """ + Return a functional instance of strategy_type. + Overwrite this if you need a non-default constructor. + The two additional parameters should be ignored for singleton-strategies. + """ + return strategy_type() + + def log(self, w_self, new_strategy, old_strategy=None, new_element=None): + """ + This can be overwritten into a more appropriate call to self.logger.log + """ + if not self.logger.active: return + new_strategy_str = self.log_string_for_object(new_strategy) + old_strategy_str = self.log_string_for_object(old_strategy) + element_typename = self.log_string_for_object(new_element) + size = new_strategy.size(w_self) + typename = "" + cause = "Switched" if old_strategy else "Created" + self.logger.log(new_strategy_str, size, cause, old_strategy_str, typename, element_typename) + + @specialize.call_location() + def log_string_for_object(self, obj): + """ + This can be overwritten instead of the entire log() method. + Keep the specialize-annotation in order to handle different kinds of objects here. + """ + return obj.__class__.__name__ if obj else "" + + # These storage accessors are specialized because the storage field is + # populated by erased-objects which seem to be incompatible sometimes. + @specialize.call_location() + def get_storage(self, obj): + return obj._get_storage() + @specialize.call_location() + def set_storage(self, obj, val): + return obj._set_storage(val) + + def get_strategy(self, obj): + return obj._get_strategy() + def set_strategy(self, obj, val): + return obj._set_strategy(val) + + # ============================= + # Internal methods + # ============================= + + def _patch_strategy_class(self, strategy_class, root_class): + "NOT_RPYTHON" + # Patch root class: Add default handler for visitor + def _convert_storage_from_OTHER(self, w_self, previous_strategy): + self._convert_storage_from(w_self, previous_strategy) + funcname = "_convert_storage_from_" + strategy_class.__name__ + _convert_storage_from_OTHER.func_name = funcname + setattr(root_class, funcname, _convert_storage_from_OTHER) + + # Patch strategy class: Add polymorphic visitor function + def _convert_storage_to(self, w_self, new_strategy): + getattr(new_strategy, funcname)(w_self, self) + strategy_class._convert_storage_to = _convert_storage_to + + def _collect_subclasses(self, cls): + "NOT_RPYTHON" + subclasses = [] + for subcls in cls.__subclasses__(): + subclasses.append(subcls) + subclasses.extend(self._collect_subclasses(subcls)) + return subclasses + + def _order_strategies(self): + "NOT_RPYTHON" + def get_generalization_depth(strategy, visited=None): + if visited is None: + visited = set() + if strategy._generalizations: + if strategy in visited: + raise Exception("Cycle in generalization-tree of %s" % strategy) + visited.add(strategy) + depth = 0 + for generalization in strategy._generalizations: + other_depth = get_generalization_depth(generalization, set(visited)) + depth = max(depth, other_depth) + return depth + 1 + else: + return 0 + self.strategies.sort(key=get_generalization_depth, reverse=True) + + @jit.elidable + def strategy_singleton_instance(self, strategy_class): + return getattr(strategy_class, self.strategy_singleton_field) + + def _freeze_(self): + # Instance will be frozen at compile time, making accesses constant. + # The constructor does meta stuff which is not possible after translation. + return True + +class AbstractStrategy(object): + """ + == Required: + strategy_factory(self) - Access to StorageFactory + """ + + def strategy_switched(self, w_self): + # Overwrite this method for a hook whenever the strategy + # of w_self was switched to self. + pass + + # Main Fixedsize API + + def store(self, w_self, index0, value): + raise NotImplementedError("Abstract method") + + def fetch(self, w_self, index0): + raise NotImplementedError("Abstract method") + + def size(self, w_self): + raise NotImplementedError("Abstract method") + + # Fixedsize utility methods + + def slice(self, w_self, start, end): + return [ self.fetch(w_self, i) for i in range(start, end)] + + def fetch_all(self, w_self): + return self.slice(w_self, 0, self.size(w_self)) + + def store_all(self, w_self, elements): + for i, e in enumerate(elements): + self.store(w_self, i, e) + + # Main Varsize API + + def insert(self, w_self, index0, list_w): + raise NotImplementedError("Abstract method") + + def delete(self, w_self, start, end): + raise NotImplementedError("Abstract method") + + # Varsize utility methods + + def append(self, w_self, list_w): + self.insert(w_self, self.size(w_self), list_w) + + def pop(self, w_self, index0): + e = self.fetch(w_self, index0) + self.delete(w_self, index0, index0+1) + return e + + # Internal methods + + def _initialize_storage(self, w_self, initial_size): + raise NotImplementedError("Abstract method") + + def _check_can_handle(self, value): + raise NotImplementedError("Abstract method") + + def _convert_storage_to(self, w_self, new_strategy): + # This will be overwritten in _patch_strategy_class + new_strategy._convert_storage_from(w_self, self) + + @jit.unroll_safe + def _convert_storage_from(self, w_self, previous_strategy): + # This is a very unefficient (but most generic) way to do this. + # Subclasses should specialize. + storage = previous_strategy.fetch_all(w_self) + self._initialize_storage(w_self, previous_strategy.size(w_self)) + for i, field in enumerate(storage): + self.store(w_self, i, field) + + def _generalize_for_value(self, w_self, value): + strategy_type = self.generalized_strategy_for(value) + new_instance = self.strategy_factory().switch_strategy(w_self, strategy_type, new_element=value) + return new_instance + + def _cannot_handle_store(self, w_self, index0, value): + new_instance = self._generalize_for_value(w_self, value) + new_instance.store(w_self, index0, value) + + def _cannot_handle_insert(self, w_self, index0, list_w): + # TODO - optimize. Prevent multiple generalizations and slicing done by callers. + new_strategy = self._generalize_for_value(w_self, list_w[0]) + new_strategy.insert(w_self, index0, list_w) + +# ============== Special Strategies with no storage array ============== + +class EmptyStrategy(AbstractStrategy): + # == Required: + # See AbstractStrategy + + def _initialize_storage(self, w_self, initial_size): + assert initial_size == 0 + self.set_storage(w_self, None) + def _convert_storage_from(self, w_self, previous_strategy): + self.set_storage(w_self, None) + def _check_can_handle(self, value): + return False + + def fetch(self, w_self, index0): + raise IndexError + def store(self, w_self, index0, value): + self._cannot_handle_store(w_self, index0, [value]) + def insert(self, w_self, index0, list_w): + self._cannot_handle_insert(w_self, index0, list_w) + def delete(self, w_self, start, end): + self.check_index_range(w_self, start, end) + def size(self, w_self): + return 0 + +class SingleValueStrategyStorage(object): + """Small container object for a size value.""" + _attrs_ = ['size'] + def __init__(self, size=0): + self.size = size + +class SingleValueStrategy(AbstractStrategy): + # == Required: + # See AbstractStrategy + # check_index_*(...) - use mixin SafeIndexingMixin or UnsafeIndexingMixin + # value(self) - the single value contained in this strategy. Should be constant. + + def _initialize_storage(self, w_self, initial_size): + storage_obj = SingleValueStrategyStorage(initial_size) + self.set_storage(w_self, storage_obj) + def _convert_storage_from(self, w_self, previous_strategy): + self._initialize_storage(w_self, previous_strategy.size(w_self)) + def _check_can_handle(self, value): + return value is self.value() + + def fetch(self, w_self, index0): + self.check_index_fetch(w_self, index0) + return self.value() + def store(self, w_self, index0, value): + self.check_index_store(w_self, index0) + if self._check_can_handle(value): + return + self._cannot_handle_store(w_self, index0, value) + def delete(self, w_self, start, end): + self.check_index_range(w_self, start, end) + self.get_storage(w_self).size -= (end - start) + def size(self, w_self): + return self.get_storage(w_self).size + + @jit.unroll_safe + def insert(self, w_self, index0, list_w): + storage_obj = self.get_storage(w_self) + for i in range(len(list_w)): + if self._check_can_handle(list_w[i]): + storage_obj.size += 1 + else: + self._cannot_handle_insert(w_self, index0 + i, list_w[i:]) + return + +# ============== Basic strategies with storage ============== + +class StrategyWithStorage(AbstractStrategy): + # == Required: + # See AbstractStrategy + # check_index_*(...) - use mixin SafeIndexingMixin or UnsafeIndexingMixin + # default_value(self) - The value to be initially contained in this strategy + + def _initialize_storage(self, w_self, initial_size): + default = self._unwrap(self.default_value()) + self.set_storage(w_self, [default] * initial_size) + + @jit.unroll_safe + def _convert_storage_from(self, w_self, previous_strategy): + size = previous_strategy.size(w_self) + new_storage = [ self._unwrap(previous_strategy.fetch(w_self, i)) + for i in range(size) ] + self.set_storage(w_self, new_storage) + + def store(self, w_self, index0, wrapped_value): + self.check_index_store(w_self, index0) + if self._check_can_handle(wrapped_value): + unwrapped = self._unwrap(wrapped_value) + self.get_storage(w_self)[index0] = unwrapped + else: + self._cannot_handle_store(w_self, index0, wrapped_value) + + def fetch(self, w_self, index0): + self.check_index_fetch(w_self, index0) + unwrapped = self.get_storage(w_self)[index0] + return self._wrap(unwrapped) + + def _wrap(self, value): + raise NotImplementedError("Abstract method") + + def _unwrap(self, value): + raise NotImplementedError("Abstract method") + + def size(self, w_self): + return len(self.get_storage(w_self)) + + @jit.unroll_safe + def insert(self, w_self, start, list_w): + # This is following Python's behaviour - insert automatically + # happens at the beginning of an array, even if index is larger + if start > self.size(w_self): + start = self.size(w_self) + for i in range(len(list_w)): + if self._check_can_handle(list_w[i]): + self.get_storage(w_self).insert(start + i, self._unwrap(list_w[i])) + else: + self._cannot_handle_insert(w_self, start + i, list_w[i:]) + return + + def delete(self, w_self, start, end): + self.check_index_range(w_self, start, end) + assert start >= 0 and end >= 0 + del self.get_storage(w_self)[start : end] + +class GenericStrategy(StrategyWithStorage): + # == Required: + # See StrategyWithStorage + + def _wrap(self, value): + return value + def _unwrap(self, value): + return value + def _check_can_handle(self, wrapped_value): + return True + +class WeakGenericStrategy(StrategyWithStorage): + # == Required: + # See StrategyWithStorage + + def _wrap(self, value): + return value() or self.default_value() + def _unwrap(self, value): + assert value is not None + return weakref.ref(value) + def _check_can_handle(self, wrapped_value): + return True + +# ============== Mixins for index checking operations ============== + +class SafeIndexingMixin(object): + def check_index_store(self, w_self, index0): + self.check_index(w_self, index0) + def check_index_fetch(self, w_self, index0): + self.check_index(w_self, index0) + def check_index_range(self, w_self, start, end): + if end < start: + raise IndexError + self.check_index(w_self, start) + self.check_index(w_self, end) + def check_index(self, w_self, index0): + if index0 < 0 or index0 >= self.size(w_self): + raise IndexError + +class UnsafeIndexingMixin(object): + def check_index_store(self, w_self, index0): + pass + def check_index_fetch(self, w_self, index0): + pass + def check_index_range(self, w_self, start, end): + pass + +# ============== Specialized Storage Strategies ============== + +class SpecializedStrategy(StrategyWithStorage): + # == Required: + # See StrategyWithStorage + # wrap(self, value) - Return a boxed object for the primitive value + # unwrap(self, value) - Return the unboxed primitive value of value + + def _unwrap(self, value): + return self.unwrap(value) + def _wrap(self, value): + return self.wrap(value) + +class SingleTypeStrategy(SpecializedStrategy): + # == Required Functions: + # See SpecializedStrategy + # contained_type - The wrapped type that can be stored in this strategy + + def _check_can_handle(self, value): + return isinstance(value, self.contained_type) + +class TaggingStrategy(SingleTypeStrategy): + """This strategy uses a special tag value to represent a single additional object.""" + # == Required: + # See SingleTypeStrategy + # wrapped_tagged_value(self) - The tagged object + # unwrapped_tagged_value(self) - The unwrapped tag value representing the tagged object + + def _check_can_handle(self, value): + return value is self.wrapped_tagged_value() or \ + (isinstance(value, self.contained_type) and \ + self.unwrap(value) != self.unwrapped_tagged_value()) + + def _unwrap(self, value): + if value is self.wrapped_tagged_value(): + return self.unwrapped_tagged_value() + return self.unwrap(value) + + def _wrap(self, value): + if value == self.unwrapped_tagged_value(): + return self.wrapped_tagged_value() + return self.wrap(value) diff --git a/rpython/rlib/rstrategies/test/test_rstrategies.py b/rpython/rlib/rstrategies/test/test_rstrategies.py new file mode 100644 --- /dev/null +++ b/rpython/rlib/rstrategies/test/test_rstrategies.py @@ -0,0 +1,552 @@ + +import py +from rpython.rlib.rstrategies import rstrategies as rs +from rpython.rlib.objectmodel import import_from_mixin + +# === Define small model tree + +class W_AbstractObject(object): + pass + +class W_Object(W_AbstractObject): + pass + +class W_Integer(W_AbstractObject): + def __init__(self, value): + self.value = value + def __eq__(self, other): + return isinstance(other, W_Integer) and self.value == other.value + +class W_List(W_AbstractObject): + rs.make_accessors() + def __init__(self, strategy=None, size=0, elements=None): + self.strategy = None + if strategy: + factory.set_initial_strategy(self, strategy, size, elements) + def fetch(self, i): + assert self.strategy + return self.strategy.fetch(self, i) + def store(self, i, value): + assert self.strategy + return self.strategy.store(self, i, value) + def size(self): + assert self.strategy + return self.strategy.size(self) + def insert(self, index0, list_w): + assert self.strategy + return self.strategy.insert(self, index0, list_w) + def delete(self, start, end): + assert self.strategy + return self.strategy.delete(self, start, end) + def append(self, list_w): + assert self.strategy + return self.strategy.append(self, list_w) + def pop(self, index0): + assert self.strategy + return self.strategy.pop(self, index0) + def slice(self, start, end): + assert self.strategy + return self.strategy.slice(self, start, end) + def fetch_all(self): + assert self.strategy + return self.strategy.fetch_all(self) + def store_all(self, elements): + assert self.strategy + return self.strategy.store_all(self, elements) + +w_nil = W_Object() + +# === Define concrete strategy classes + +class AbstractStrategy(object): + __metaclass__ = rs.StrategyMetaclass + import_from_mixin(rs.AbstractStrategy) + import_from_mixin(rs.SafeIndexingMixin) + def __init__(self, factory, w_self=None, size=0): + self.factory = factory + def strategy_factory(self): + return self.factory + +class Factory(rs.StrategyFactory): + switching_log = [] + + def __init__(self, root_class): + self.decorate_strategies({ + EmptyStrategy: [NilStrategy, IntegerStrategy, IntegerOrNilStrategy, GenericStrategy], + NilStrategy: [IntegerOrNilStrategy, GenericStrategy], + GenericStrategy: [], + IntegerStrategy: [IntegerOrNilStrategy, GenericStrategy], + IntegerOrNilStrategy: [GenericStrategy], + }) + rs.StrategyFactory.__init__(self, root_class) + + def instantiate_strategy(self, strategy_type, w_self=None, size=0): + return strategy_type(self, w_self, size) + + def set_strategy(self, w_list, strategy): + old_strategy = self.get_strategy(w_list) + self.switching_log.append((old_strategy, strategy)) + super(Factory, self).set_strategy(w_list, strategy) + + def clear_log(self): + del self.switching_log[:] + +class EmptyStrategy(AbstractStrategy): + import_from_mixin(rs.EmptyStrategy) + # TODO - implement and test transition from Generic back to Empty + +class NilStrategy(AbstractStrategy): + import_from_mixin(rs.SingleValueStrategy) + def value(self): return w_nil + +class GenericStrategy(AbstractStrategy): + import_from_mixin(rs.GenericStrategy) + import_from_mixin(rs.UnsafeIndexingMixin) + def default_value(self): return w_nil + +class WeakGenericStrategy(AbstractStrategy): + import_from_mixin(rs.WeakGenericStrategy) + def default_value(self): return w_nil + +class IntegerStrategy(AbstractStrategy): + import_from_mixin(rs.SingleTypeStrategy) + contained_type = W_Integer + def wrap(self, value): return W_Integer(value) + def unwrap(self, value): return value.value + def default_value(self): return W_Integer(0) + +class IntegerOrNilStrategy(AbstractStrategy): + import_from_mixin(rs.TaggingStrategy) + contained_type = W_Integer + def wrap(self, value): return W_Integer(value) + def unwrap(self, value): return value.value + def default_value(self): return w_nil + def wrapped_tagged_value(self): return w_nil + def unwrapped_tagged_value(self): import sys; return sys.maxint + + at rs.strategy(generalize=[], singleton=False) +class NonSingletonStrategy(GenericStrategy): + def __init__(self, factory, w_list=None, size=0): + super(NonSingletonStrategy, self).__init__(factory, w_list, size) + self.w_list = w_list + self.the_size = size + +class NonStrategy(NonSingletonStrategy): + pass + + at rs.strategy(generalize=[]) +class InefficientStrategy(GenericStrategy): + def _convert_storage_from(self, w_self, previous_strategy): + return AbstractStrategy._convert_storage_from(self, w_self, previous_strategy) + +factory = Factory(AbstractStrategy) + +def check_contents(list, expected): + assert list.size() == len(expected) + for i, val in enumerate(expected): + assert list.fetch(i) == val + +def teardown(): + factory.clear_log() + +# === Test Initialization and fetch + +def test_setup(): + pass + +def test_factory_setup(): + expected_strategies = 7 + assert len(factory.strategies) == expected_strategies + assert len(set(factory.strategies)) == len(factory.strategies) + for strategy in factory.strategies: + assert isinstance(factory.strategy_singleton_instance(strategy), strategy) + +def test_factory_setup_singleton_instances(): + new_factory = Factory(AbstractStrategy) + s1 = factory.strategy_singleton_instance(GenericStrategy) + s2 = new_factory.strategy_singleton_instance(GenericStrategy) + assert s1 is not s2 + assert s1.strategy_factory() is factory + assert s2.strategy_factory() is new_factory + +def test_metaclass(): + assert NonStrategy._is_strategy == False + assert IntegerOrNilStrategy._is_strategy == True + assert IntegerOrNilStrategy._is_singleton == True + assert NonSingletonStrategy._is_singleton == False + assert NonStrategy._is_singleton == False + assert NonStrategy.get_storage is not NonSingletonStrategy.get_storage + +def test_singletons(): + def do_test_singletons(cls, expected_true): + l1 = W_List(cls, 0) + l2 = W_List(cls, 0) + if expected_true: + assert l1.strategy is l2.strategy + else: + assert l1.strategy is not l2.strategy + do_test_singletons(EmptyStrategy, True) + do_test_singletons(NonSingletonStrategy, False) + do_test_singletons(NonStrategy, False) + do_test_singletons(GenericStrategy, True) + +def do_test_initialization(cls, default_value=w_nil, is_safe=True): + size = 10 + l = W_List(cls, size) + s = l.strategy + assert s.size(l) == size + assert s.fetch(l,0) == default_value + assert s.fetch(l,size/2) == default_value + assert s.fetch(l,size-1) == default_value + py.test.raises(IndexError, s.fetch, l, size) + py.test.raises(IndexError, s.fetch, l, size+1) + py.test.raises(IndexError, s.fetch, l, size+5) + if is_safe: + py.test.raises(IndexError, s.fetch, l, -1) + else: + assert s.fetch(l, -1) == s.fetch(l, size - 1) + +def test_init_Empty(): + l = W_List(EmptyStrategy, 0) + s = l.strategy + assert s.size(l) == 0 + py.test.raises(IndexError, s.fetch, l, 0) + py.test.raises(IndexError, s.fetch, l, 10) + py.test.raises(IndexError, s.delete, l, 0, 1) + py.test.raises(AssertionError, W_List, EmptyStrategy, 2) # Only size 0 possible. + +def test_init_Nil(): + do_test_initialization(NilStrategy) + +def test_init_Generic(): + do_test_initialization(GenericStrategy, is_safe=False) + +def test_init_WeakGeneric(): + do_test_initialization(WeakGenericStrategy) + +def test_init_Integer(): + do_test_initialization(IntegerStrategy, default_value=W_Integer(0)) + +def test_init_IntegerOrNil(): + do_test_initialization(IntegerOrNilStrategy) + +# === Test Simple store + +def do_test_store(cls, stored_value=W_Object(), is_safe=True, is_varsize=False): + size = 10 + l = W_List(cls, size) + s = l.strategy + def store_test(index): + s.store(l, index, stored_value) + assert s.fetch(l, index) == stored_value + store_test(0) + store_test(size/2) + store_test(size-1) + if not is_varsize: + py.test.raises(IndexError, s.store, l, size, stored_value) + py.test.raises(IndexError, s.store, l, size+1, stored_value) + py.test.raises(IndexError, s.store, l, size+5, stored_value) + if is_safe: + py.test.raises(IndexError, s.store, l, -1, stored_value) + else: + store_test(-1) + +def test_store_Nil(): + do_test_store(NilStrategy, stored_value=w_nil) + +def test_store_Generic(): + do_test_store(GenericStrategy, is_safe=False) + +def test_store_WeakGeneric(): + do_test_store(WeakGenericStrategy, stored_value=w_nil) + +def test_store_Integer(): + do_test_store(IntegerStrategy, stored_value=W_Integer(100)) + +def test_store_IntegerOrNil(): + do_test_store(IntegerOrNilStrategy, stored_value=W_Integer(100)) + do_test_store(IntegerOrNilStrategy, stored_value=w_nil) + +# === Test Insert + +def do_test_insert(cls, values): + l = W_List(cls, 0) + assert len(values) >= 6 + values0 = values[0:1] + values1 = values[1:2] + values2 = values[2:4] + values3 = values[4:6] + l.insert(3, values0) # Will still be inserted at the very beginning + check_contents(l, values0) + l.insert(1, values1+values3) + check_contents(l, values0+values1+values3) + l.insert(2, values2) + check_contents(l, values) + return l + +def test_insert_Nil(): + do_test_insert(NilStrategy, [w_nil]*6) + +def test_insert_Generic(): + do_test_insert(GenericStrategy, [W_Object() for _ in range(6)]) + +def test_insert_WeakGeneric(): + do_test_insert(WeakGenericStrategy, [W_Object() for _ in range(6)]) + +def test_insert_Integer(): + do_test_insert(IntegerStrategy, [W_Integer(x) for x in range(6)]) + +def test_insert_IntegerOrNil(): + do_test_insert(IntegerOrNilStrategy, [w_nil]+[W_Integer(x) for x in range(4)]+[w_nil]) + do_test_insert(IntegerOrNilStrategy, [w_nil]*6) + +# === Test Delete + +def do_test_delete(cls, values, indexing_unsafe=False): + assert len(values) >= 6 + l = W_List(cls, len(values), values) + if not indexing_unsafe: + py.test.raises(IndexError, l.delete, 2, 1) + l.delete(2, 4) + del values[2: 4] + check_contents(l, values) + l.delete(1, 2) + del values[1: 2] + check_contents(l, values) + +def test_delete_Nil(): + do_test_delete(NilStrategy, [w_nil]*6) + +def test_delete_Generic(): + do_test_delete(GenericStrategy, [W_Object() for _ in range(6)], indexing_unsafe=True) + +def test_delete_WeakGeneric(): + do_test_delete(WeakGenericStrategy, [W_Object() for _ in range(6)]) + +def test_delete_Integer(): + do_test_delete(IntegerStrategy, [W_Integer(x) for x in range(6)]) + +def test_delete_IntegerOrNil(): + do_test_delete(IntegerOrNilStrategy, [w_nil]+[W_Integer(x) for x in range(4)]+[w_nil]) + do_test_delete(IntegerOrNilStrategy, [w_nil]*6) + +# === Test Transitions + +def test_CheckCanHandle(): + def assert_handles(cls, good, bad): + s = cls(0) + for val in good: + assert s._check_can_handle(val) + for val in bad: + assert not s._check_can_handle(val) + obj = W_Object() + i = W_Integer(0) + nil = w_nil + + assert_handles(EmptyStrategy, [], [nil, obj, i]) + assert_handles(NilStrategy, [nil], [obj, i]) + assert_handles(GenericStrategy, [nil, obj, i], []) + assert_handles(WeakGenericStrategy, [nil, obj, i], []) + assert_handles(IntegerStrategy, [i], [nil, obj]) + assert_handles(IntegerOrNilStrategy, [nil, i], [obj]) + +def do_test_transition(OldStrategy, value, NewStrategy, initial_size=10): + w = W_List(OldStrategy, initial_size) + old = w.strategy + w.store(0, value) + assert isinstance(w.strategy, NewStrategy) + assert factory.switching_log == [(None, old), (old, w.strategy)] + +def test_AllNil_to_Generic(): + do_test_transition(NilStrategy, W_Object(), GenericStrategy) + +def test_AllNil_to_IntegerOrNil(): + do_test_transition(NilStrategy, W_Integer(0), IntegerOrNilStrategy) + +def test_IntegerOrNil_to_Generic(): + do_test_transition(IntegerOrNilStrategy, W_Object(), GenericStrategy) + +def test_Integer_to_IntegerOrNil(): + do_test_transition(IntegerStrategy, w_nil, IntegerOrNilStrategy) + +def test_Generic_to_AllNil(): + w = W_List(GenericStrategy, 5) + old = w.strategy + factory.switch_strategy(w, NilStrategy) + assert isinstance(w.strategy, NilStrategy) + assert factory.switching_log == [(None, old), (old, w.strategy)] + +def test_Integer_Generic(): + do_test_transition(IntegerStrategy, W_Object(), GenericStrategy) + +def test_TaggingValue_not_storable(): + tag = IntegerOrNilStrategy(10).unwrapped_tagged_value() # sys.maxint + do_test_transition(IntegerOrNilStrategy, W_Integer(tag), GenericStrategy) + +def test_insert_StrategySwitch_IntOrNil(): + o = W_Object() + l = do_test_insert(IntegerOrNilStrategy, [W_Integer(1), w_nil, o, o, w_nil, W_Integer(3)]) + assert isinstance(l.strategy, GenericStrategy) + +def test_insert_StrategySwitch_AllNil(): + o = W_Object() + l = do_test_insert(NilStrategy, [w_nil, w_nil, o, o, w_nil, w_nil]) + assert isinstance(l.strategy, GenericStrategy) + +def test_transition_to_nonSingleton(): + l = W_List(NilStrategy, 5) + factory.switch_strategy(l, NonSingletonStrategy) + strategy1 = l.strategy + assert isinstance(strategy1, NonSingletonStrategy) + factory.switch_strategy(l, NonSingletonStrategy) + assert strategy1 != l.strategy From noreply at buildbot.pypy.org Wed Apr 22 15:09:53 2015 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 22 Apr 2015 15:09:53 +0200 (CEST) Subject: [pypy-commit] pypy vmprof2: fix test_compile Message-ID: <20150422130953.015A61C02A3@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: vmprof2 Changeset: r76884:a8d35ae5a9a4 Date: 2015-04-21 20:05 +0200 http://bitbucket.org/pypy/pypy/changeset/a8d35ae5a9a4/ Log: fix test_compile diff --git a/rpython/jit/metainterp/test/test_compile.py b/rpython/jit/metainterp/test/test_compile.py --- a/rpython/jit/metainterp/test/test_compile.py +++ b/rpython/jit/metainterp/test/test_compile.py @@ -19,7 +19,8 @@ ts = typesystem.llhelper def __init__(self): self.seen = [] - def compile_loop(self, inputargs, operations, token, log=True, name='', + def compile_loop(self, inputargs, operations, token, jd_id=0, + unique_id=0, log=True, name='', logger=None): token.compiled_loop_token = self.Storage() self.seen.append((inputargs, operations, token)) @@ -38,6 +39,9 @@ def attach_unoptimized_bridge_from_interp(*args): pass + def get_unique_id(*args): + return 0 + def get_location_str(self, args): return 'location' @@ -59,6 +63,7 @@ class FakeMetaInterp: call_pure_results = {} class jitdriver_sd: + index = 0 warmstate = FakeState() virtualizable_info = None From noreply at buildbot.pypy.org Wed Apr 22 15:09:54 2015 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 22 Apr 2015 15:09:54 +0200 (CEST) Subject: [pypy-commit] pypy vmprof2: fix test_pypyjit Message-ID: <20150422130954.317B51C02A3@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: vmprof2 Changeset: r76885:9629a3afd70c Date: 2015-04-22 15:09 +0200 http://bitbucket.org/pypy/pypy/changeset/9629a3afd70c/ Log: fix test_pypyjit diff --git a/pypy/module/pypyjit/interp_resop.py b/pypy/module/pypyjit/interp_resop.py --- a/pypy/module/pypyjit/interp_resop.py +++ b/pypy/module/pypyjit/interp_resop.py @@ -105,7 +105,7 @@ ofs = ops_offset.get(op, 0) if op.opnum == rop.DEBUG_MERGE_POINT: jd_sd = jitdrivers_sd[op.getarg(0).getint()] - greenkey = op.getarglist()[4:] + greenkey = op.getarglist()[3:] repr = jd_sd.warmstate.get_location_str(greenkey) w_greenkey = wrap_greenkey(space, jd_sd.jitdriver, greenkey, repr) l_w.append(DebugMergePoint(space, jit_hooks._cast_to_gcref(op), diff --git a/pypy/module/pypyjit/test/test_jit_hook.py b/pypy/module/pypyjit/test/test_jit_hook.py --- a/pypy/module/pypyjit/test/test_jit_hook.py +++ b/pypy/module/pypyjit/test/test_jit_hook.py @@ -55,7 +55,7 @@ oplist = parse(""" [i1, i2, p2] i3 = int_add(i1, i2) - debug_merge_point(0, 0, 0, 0, 0, 0, ConstPtr(ptr0)) + debug_merge_point(0, 0, 0, 0, 0, ConstPtr(ptr0)) guard_nonnull(p2) [] guard_true(i3) [] """, namespace={'ptr0': code_gcref}).operations From noreply at buildbot.pypy.org Wed Apr 22 15:10:24 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 22 Apr 2015 15:10:24 +0200 (CEST) Subject: [pypy-commit] pypy default: A test and fix for rewrite.py. Message-ID: <20150422131024.956901C02A3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r76886:70a22aaa3a7c Date: 2015-04-22 15:10 +0200 http://bitbucket.org/pypy/pypy/changeset/70a22aaa3a7c/ Log: A test and fix for rewrite.py. diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -62,6 +62,10 @@ op = operations[i] if op.getopnum() == rop.DEBUG_MERGE_POINT: continue + # ---------- GETFIELD_GC ---------- + if op.getopnum() == rop.GETFIELD_GC: + self.handle_getfield_gc(op) + continue # ---------- turn NEWxxx into CALL_MALLOC_xxx ---------- if op.is_malloc(): self.handle_malloc_operation(op) @@ -122,6 +126,18 @@ # ---------- + def handle_getfield_gc(self, op): + """See test_zero_ptr_field_before_getfield(). We hope there is + no getfield_gc in the middle of initialization code, but there + shouldn't be, given that a 'new' is already delayed by previous + optimization steps. In practice it should immediately be + followed by a bunch of 'setfields', and the 'pending_zeros' + optimization we do here is meant for this case.""" + self.emit_pending_zeros() + self.newops.append(op) + + # ---------- + def handle_malloc_operation(self, op): opnum = op.getopnum() if opnum == rop.NEW: diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -1031,3 +1031,21 @@ guard_false(i1, descr=guarddescr) [] jump() """) + + def test_zero_ptr_field_before_getfield(self): + # This case may need to be fixed in the metainterp/optimizeopt + # already so that it no longer occurs for rewrite.py. But anyway + # it's a good idea to make sure rewrite.py is correct on its own. + self.check_rewrite(""" + [] + p0 = new(descr=tdescr) + p1 = getfield_gc(p0, descr=tdescr) + jump(p1) + """, """ + [] + p0 = call_malloc_nursery(%(tdescr.size)d) + setfield_gc(p0, 5678, descr=tiddescr) + zero_ptr_field(p0, %(tdescr.gc_fielddescrs[0].offset)s) + p1 = getfield_gc(p0, descr=tdescr) + jump(p1) + """) From noreply at buildbot.pypy.org Wed Apr 22 15:26:08 2015 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 22 Apr 2015 15:26:08 +0200 (CEST) Subject: [pypy-commit] pypy vmprof2: fix this test Message-ID: <20150422132608.F2AC31C069E@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: vmprof2 Changeset: r76887:647fe5a61ba2 Date: 2015-04-22 15:25 +0200 http://bitbucket.org/pypy/pypy/changeset/647fe5a61ba2/ Log: fix this test diff --git a/pypy/module/_vmprof/test/test_direct.py b/pypy/module/_vmprof/test/test_direct.py --- a/pypy/module/_vmprof/test/test_direct.py +++ b/pypy/module/_vmprof/test/test_direct.py @@ -29,7 +29,7 @@ { long c = *current_pos_addr; if (c >= 5) - return 0; + return -1; *current_pos_addr = c + 1; return *((long*)codemap_raw + c); } @@ -57,10 +57,11 @@ lib.buffer[2] = 12 lib.buffer[3] = 16 lib.buffer[4] = 0 - buf = ffi.new("long[5]", [0] * 5) + buf = ffi.new("long[10]", [0] * 10) result = ffi.cast("void**", buf) res = lib.vmprof_write_header_for_jit_addr(result, 0, ffi.NULL, 100) - assert res == 3 - assert buf[0] == 16 - assert buf[1] == 12 - assert buf[2] == 8 + assert res == 6 + assert buf[0] == 2 + assert buf[1] == 16 + assert buf[2] == 12 + assert buf[3] == 8 From noreply at buildbot.pypy.org Wed Apr 22 15:41:06 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 22 Apr 2015 15:41:06 +0200 (CEST) Subject: [pypy-commit] pypy vmprof2: Move _code_unique_id out of ExecutionContext into its own small Message-ID: <20150422134106.E40571C140B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: vmprof2 Changeset: r76888:cc4843b3d8b2 Date: 2015-04-22 15:41 +0200 http://bitbucket.org/pypy/pypy/changeset/cc4843b3d8b2/ Log: Move _code_unique_id out of ExecutionContext into its own small singleton: it is a global counter, not a per-thread one. diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -11,7 +11,7 @@ INT_MIN, INT_MAX, UINT_MAX, USHRT_MAX from pypy.interpreter.executioncontext import (ExecutionContext, ActionFlag, - UserDelAction) + UserDelAction, CodeUniqueIds) from pypy.interpreter.error import OperationError, new_exception_class, oefmt from pypy.interpreter.argument import Arguments from pypy.interpreter.miscutils import ThreadLocals, make_weak_value_dictionary @@ -388,6 +388,7 @@ self.actionflag = ActionFlag() # changed by the signal module self.check_signal_action = None # changed by the signal module self.user_del_action = UserDelAction(self) + self.code_unique_ids = CodeUniqueIds() self._code_of_sys_exc_info = None # can be overridden to a subclass @@ -667,15 +668,15 @@ return ec def register_code_callback(self, callback): - ec = self.getexecutioncontext() - ec._code_callback = callback + cui = self.code_unique_ids + cui.code_callback = callback def register_code_object(self, pycode): - ec = self.getexecutioncontext() - if ec._code_callback is None: + cui = self.code_unique_ids + if cui.code_callback is None: return - ec._code_callback(self, pycode) - + cui.code_callback(self, pycode) + def _freeze_(self): return True diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -2,7 +2,6 @@ from pypy.interpreter.error import OperationError, get_cleared_operation_error from rpython.rlib.unroll import unrolling_iterable from rpython.rlib import jit -from rpython.rlib.objectmodel import we_are_translated TICK_COUNTER_STEP = 100 @@ -35,16 +34,6 @@ self.w_profilefuncarg = None self.thread_disappeared = False # might be set to True after os.fork() - if sys.maxint == 2147483647: - self._code_unique_id = 0 # XXX this is wrong, it won't work on 32bit - else: - if we_are_translated(): - self._code_unique_id = 0x7000000000000000 - else: - self._code_unique_id = 0x7700000000000000 - # should be enough code objects - self._code_callback = None - @staticmethod def _mark_thread_disappeared(space): # Called in the child process after os.fork() by interp_posix.py. @@ -590,3 +579,11 @@ # there is no list of length n: if n is large, then the GC # will run several times while walking the list, but it will # see lower and lower memory usage, with no lower bound of n. + +class CodeUniqueIds(object): + def __init__(self): + if sys.maxint == 2147483647: + self.code_unique_id = 0 # XXX this is wrong, it won't work on 32bit + else: + self.code_unique_id = 0x7000000000000000 + self.code_callback = None diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -127,9 +127,9 @@ from pypy.objspace.std.mapdict import init_mapdict_cache init_mapdict_cache(self) - ec = self.space.getexecutioncontext() - self._unique_id = ec._code_unique_id - ec._code_unique_id += 4 # so we have two bits that we can mark stuff + cui = self.space.code_unique_ids + self._unique_id = cui.code_unique_id + cui.code_unique_id += 4 # so we have two bits that we can mark stuff # with def _get_full_name(self): From noreply at buildbot.pypy.org Wed Apr 22 15:51:43 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 22 Apr 2015 15:51:43 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: in-progress: declare structs on FFIs after a cdef() Message-ID: <20150422135143.11D201C1468@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1775:8acbc3c22b22 Date: 2015-04-22 15:52 +0200 http://bitbucket.org/cffi/cffi/changeset/8acbc3c22b22/ Log: in-progress: declare structs on FFIs after a cdef() diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -97,6 +97,7 @@ if override: for cache in self._function_caches: cache.clear() + _set_cdef_types(self) def dlopen(self, name, flags=0): """Load and return a dynamic library identified by 'name'. @@ -565,3 +566,27 @@ else: with ffi._lock: return ffi._get_cached_btype(tp) + +def _set_cdef_types(ffi): + struct_unions = [] + pending_completion = [] + for name, tp in sorted(ffi._parser._declarations.items()): + if name.startswith('struct '): + tp.check_not_partial() + basename = name[7:] + BType = _cffi1_backend.new_struct_type(basename) + struct_unions.append(basename) + struct_unions.append(BType) + if tp.fldtypes is not None: + pending_completion.append((tp, BType)) + # + ffi.__set_types(struct_unions) + # + for tp, BType in pending_completion: + fldtypes = [ffi.typeof(ftp._get_c_name()) for ftp in tp.fldtypes] + lst = list(zip(tp.fldnames, fldtypes, tp.fldbitsize)) + sflags = 0 + if tp.packed: + sflags = 8 # SF_PACKED + _cffi1_backend.complete_struct_or_union(BType, lst, ffi, + -1, -1, sflags) diff --git a/new/cffi1_module.c b/new/cffi1_module.c --- a/new/cffi1_module.c +++ b/new/cffi1_module.c @@ -51,7 +51,7 @@ if (m == NULL) return -1; - FFIObject *ffi = ffi_internal_new(&FFI_Type, ctx, 1); + FFIObject *ffi = ffi_internal_new(&FFI_Type, ctx); Py_XINCREF(ffi); /* make the ffi object really immortal */ if (ffi == NULL || PyModule_AddObject(m, "ffi", (PyObject *)ffi) < 0) return -1; diff --git a/new/ffi_obj.c b/new/ffi_obj.c --- a/new/ffi_obj.c +++ b/new/ffi_obj.c @@ -24,15 +24,15 @@ struct _cffi_parse_info_s info; int ctx_is_static; builder_c_t *types_builder; + PyObject *dynamic_types; _cffi_opcode_t internal_output[FFI_COMPLEXITY_OUTPUT]; }; static FFIObject *ffi_internal_new(PyTypeObject *ffitype, - const struct _cffi_type_context_s *ctx, - int ctx_is_static) + const struct _cffi_type_context_s *static_ctx) { FFIObject *ffi; - if (ctx_is_static) { + if (static_ctx != NULL) { ffi = (FFIObject *)PyObject_GC_New(FFIObject, ffitype); /* we don't call PyObject_GC_Track() here: from _cffi_init_module() it is not needed, because in this case the ffi object is immortal */ @@ -43,16 +43,17 @@ if (ffi == NULL) return NULL; - ffi->types_builder = new_builder_c(ctx); + ffi->types_builder = new_builder_c(static_ctx); if (ffi->types_builder == NULL) { Py_DECREF(ffi); return NULL; } ffi->gc_wrefs = NULL; - ffi->info.ctx = ctx; + ffi->info.ctx = &ffi->types_builder->ctx; ffi->info.output = ffi->internal_output; ffi->info.output_size = FFI_COMPLEXITY_OUTPUT; - ffi->ctx_is_static = ctx_is_static; + ffi->ctx_is_static = (static_ctx != NULL); + ffi->dynamic_types = NULL; return ffi; } @@ -60,6 +61,7 @@ { PyObject_GC_UnTrack(ffi); Py_XDECREF(ffi->gc_wrefs); + Py_XDECREF(ffi->dynamic_types); if (!ffi->ctx_is_static) free_builder_c(ffi->types_builder); @@ -77,22 +79,7 @@ static PyObject *ffiobj_new(PyTypeObject *type, PyObject *args, PyObject *kwds) { /* user-facing initialization code, for explicit FFI() calls */ - struct _cffi_type_context_s *ctx; - PyObject *result; - - ctx = PyMem_Malloc(sizeof(struct _cffi_type_context_s)); - if (ctx == NULL) { - PyErr_NoMemory(); - return NULL; - } - memset(ctx, 0, sizeof(struct _cffi_type_context_s)); - - result = (PyObject *)ffi_internal_new(type, ctx, 0); - if (result == NULL) { - PyMem_Free(ctx); - return NULL; - } - return result; + return (PyObject *)ffi_internal_new(type, NULL); } static int ffiobj_init(PyObject *self, PyObject *args, PyObject *kwds) @@ -548,7 +535,72 @@ return 0; } +static PyObject *ffi__set_types(FFIObject *self, PyObject *args) +{ + PyObject *lst1; + _cffi_opcode_t *types = NULL; + struct _cffi_struct_union_s *struct_unions = NULL; + + if (!PyArg_ParseTuple(args, "O!", &PyList_Type, &lst1)) + return NULL; + + if (self->ctx_is_static) { + bad_usage: + PyMem_Free(struct_unions); + PyMem_Free(types); + if (!PyErr_Occurred()) + PyErr_SetString(PyExc_RuntimeError, "internal error"); + return NULL; + } + + cleanup_builder_c(self->types_builder); + + int i, lst_length = PyList_GET_SIZE(lst1) / 2; + Py_ssize_t new_size_1 = sizeof(_cffi_opcode_t) * lst_length; + Py_ssize_t new_size_2 = sizeof(struct _cffi_struct_union_s) * lst_length; + types = PyMem_Malloc(new_size_1); + struct_unions = PyMem_Malloc(new_size_2); + if (!types || !struct_unions) { + PyErr_NoMemory(); + goto bad_usage; + } + memset(types, 0, new_size_1); + memset(struct_unions, 0, new_size_2); + + for (i = 0; i < lst_length; i++) { + PyObject *x = PyList_GET_ITEM(lst1, i * 2); + if (!PyString_Check(x)) + goto bad_usage; + struct_unions[i].name = PyString_AS_STRING(x); + struct_unions[i].type_index = i; + //struct_unions[i].flags = ...; + struct_unions[i].size = (size_t)-2; + struct_unions[i].alignment = -2; + + x = PyList_GET_ITEM(lst1, i * 2 + 1); + if (!CTypeDescr_Check(x)) + goto bad_usage; + types[i] = x; + } + for (i = 0; i < lst_length; i++) { + PyObject *x = (PyObject *)types[i]; + Py_INCREF(x); + } + + Py_INCREF(lst1); /* to keep alive the strings in '.name' */ + Py_XDECREF(self->dynamic_types); + self->dynamic_types = lst1; + self->types_builder->ctx.types = types; + self->types_builder->num_types_imported = lst_length; + self->types_builder->ctx.struct_unions = struct_unions; + self->types_builder->ctx.num_struct_unions = lst_length; + + Py_INCREF(Py_None); + return Py_None; +} + static PyMethodDef ffi_methods[] = { + {"__set_types", (PyCFunction)ffi__set_types,METH_VARARGS}, #if 0 {"addressof", (PyCFunction)ffi_addressof, METH_VARARGS}, #endif diff --git a/new/realize_c_type.c b/new/realize_c_type.c --- a/new/realize_c_type.c +++ b/new/realize_c_type.c @@ -2,6 +2,7 @@ typedef struct { struct _cffi_type_context_s ctx; /* inlined substructure */ PyObject *types_dict; + int num_types_imported; } builder_c_t; @@ -44,9 +45,15 @@ return err; } -static void free_builder_c(builder_c_t *builder) +static void cleanup_builder_c(builder_c_t *builder) { - Py_XDECREF(builder->types_dict); + int i; + for (i = builder->num_types_imported; (--i) >= 0; ) { + _cffi_opcode_t x = builder->ctx.types[i]; + if ((((uintptr_t)x) & 1) == 0) { + Py_XDECREF((PyObject *)x); + } + } const void *mem[] = {builder->ctx.types, builder->ctx.globals, @@ -54,11 +61,16 @@ builder->ctx.fields, builder->ctx.enums, builder->ctx.typenames}; - int i; for (i = 0; i < sizeof(mem) / sizeof(*mem); i++) { if (mem[i] != NULL) PyMem_Free((void *)mem[i]); } +} + +static void free_builder_c(builder_c_t *builder) +{ + Py_XDECREF(builder->types_dict); + cleanup_builder_c(builder); PyMem_Free(builder); } @@ -74,8 +86,13 @@ PyErr_NoMemory(); return NULL; } - builder->ctx = *ctx; + if (ctx) + builder->ctx = *ctx; + else + memset(&builder->ctx, 0, sizeof(builder->ctx)); + builder->types_dict = ldict; + builder->num_types_imported = 0; return builder; } diff --git a/new/test_dlopen.py b/new/test_dlopen.py --- a/new/test_dlopen.py +++ b/new/test_dlopen.py @@ -3,6 +3,11 @@ import math +def test_cdef_struct(): + ffi = FFI() + ffi.cdef("struct foo_s { int a, b; };") + assert ffi.sizeof("struct foo_s") == 8 + def test_math_sin(): py.test.skip("XXX redo!") ffi = FFI() From noreply at buildbot.pypy.org Wed Apr 22 16:10:02 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 22 Apr 2015 16:10:02 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: fix Message-ID: <20150422141002.2B36D1C02A3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1776:9972564d7f2c Date: 2015-04-22 16:10 +0200 http://bitbucket.org/cffi/cffi/changeset/9972564d7f2c/ Log: fix diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -572,12 +572,11 @@ pending_completion = [] for name, tp in sorted(ffi._parser._declarations.items()): if name.startswith('struct '): - tp.check_not_partial() basename = name[7:] BType = _cffi1_backend.new_struct_type(basename) struct_unions.append(basename) struct_unions.append(BType) - if tp.fldtypes is not None: + if not tp.partial and tp.fldtypes is not None: pending_completion.append((tp, BType)) # ffi.__set_types(struct_unions) From noreply at buildbot.pypy.org Wed Apr 22 16:13:12 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 22 Apr 2015 16:13:12 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: cdef() unions Message-ID: <20150422141312.7C17A1C0661@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1777:f19eb734b709 Date: 2015-04-22 16:13 +0200 http://bitbucket.org/cffi/cffi/changeset/f19eb734b709/ Log: cdef() unions diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -571,9 +571,12 @@ struct_unions = [] pending_completion = [] for name, tp in sorted(ffi._parser._declarations.items()): - if name.startswith('struct '): - basename = name[7:] - BType = _cffi1_backend.new_struct_type(basename) + kind, basename = name.split(' ', 1) + if kind == 'struct' or kind == 'union': + if kind == 'struct': + BType = _cffi1_backend.new_struct_type(basename) + else: + BType = _cffi1_backend.new_union_type(basename) struct_unions.append(basename) struct_unions.append(BType) if not tp.partial and tp.fldtypes is not None: diff --git a/new/ffi_obj.c b/new/ffi_obj.c --- a/new/ffi_obj.c +++ b/new/ffi_obj.c @@ -573,14 +573,14 @@ goto bad_usage; struct_unions[i].name = PyString_AS_STRING(x); struct_unions[i].type_index = i; - //struct_unions[i].flags = ...; - struct_unions[i].size = (size_t)-2; - struct_unions[i].alignment = -2; x = PyList_GET_ITEM(lst1, i * 2 + 1); if (!CTypeDescr_Check(x)) goto bad_usage; types[i] = x; + struct_unions[i].flags = ((CTypeDescrObject *)x)->ct_flags & CT_UNION; + struct_unions[i].size = (size_t)-2; + struct_unions[i].alignment = -2; } for (i = 0; i < lst_length; i++) { PyObject *x = (PyObject *)types[i]; diff --git a/new/test_dlopen.py b/new/test_dlopen.py --- a/new/test_dlopen.py +++ b/new/test_dlopen.py @@ -8,6 +8,11 @@ ffi.cdef("struct foo_s { int a, b; };") assert ffi.sizeof("struct foo_s") == 8 +def test_cdef_union(): + ffi = FFI() + ffi.cdef("union foo_s { int a, b; };") + assert ffi.sizeof("union foo_s") == 4 + def test_math_sin(): py.test.skip("XXX redo!") ffi = FFI() From noreply at buildbot.pypy.org Wed Apr 22 16:14:31 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 22 Apr 2015 16:14:31 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: pass one more line of the next test Message-ID: <20150422141431.39D8A1C0661@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1778:e3d0c9fc52cb Date: 2015-04-22 16:15 +0200 http://bitbucket.org/cffi/cffi/changeset/e3d0c9fc52cb/ Log: pass one more line of the next test diff --git a/new/test_verify1.py b/new/test_verify1.py --- a/new/test_verify1.py +++ b/new/test_verify1.py @@ -1,5 +1,5 @@ import sys, math, py -from cffi1 import FFI, VerificationError, VerificationMissing, model +from cffi1 import FFI, VerificationError, model lib_m = ['m'] if sys.platform == 'win32': @@ -450,9 +450,9 @@ ...; }; """) - py.test.raises(VerificationMissing, ffi.sizeof, 'struct foo_s') - py.test.raises(VerificationMissing, ffi.offsetof, 'struct foo_s', 'x') - py.test.raises(VerificationMissing, ffi.new, 'struct foo_s *') + py.test.raises(ffi.error, ffi.sizeof, 'struct foo_s') + py.test.raises(ffi.error, ffi.offsetof, 'struct foo_s', 'x') + py.test.raises(ffi.error, ffi.new, 'struct foo_s *') ffi.verify(""" struct foo_s { int a, b, x, c, d, e; From noreply at buildbot.pypy.org Wed Apr 22 16:27:11 2015 From: noreply at buildbot.pypy.org (mjacob) Date: Wed, 22 Apr 2015 16:27:11 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Change some whitespace to make a new revision to test something on the new build slave. Message-ID: <20150422142711.C3AE51C06CD@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: py3.3 Changeset: r76889:d27cff3ec7ad Date: 2015-04-22 16:26 +0200 http://bitbucket.org/pypy/pypy/changeset/d27cff3ec7ad/ Log: Change some whitespace to make a new revision to test something on the new build slave. diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -13,7 +13,7 @@ class CConfig: _compilation_info_ = ExternalCompilationInfo( - includes=['sys/stat.h', + includes=['sys/stat.h', 'unistd.h', 'fcntl.h'], ) From noreply at buildbot.pypy.org Wed Apr 22 16:39:33 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 22 Apr 2015 16:39:33 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: ffi.offsetof() Message-ID: <20150422143933.471721C02A3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1779:3f61ed84336c Date: 2015-04-22 16:40 +0200 http://bitbucket.org/cffi/cffi/changeset/3f61ed84336c/ Log: ffi.offsetof() diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -4952,27 +4952,26 @@ return res; } -static PyObject *b_typeoffsetof(PyObject *self, PyObject *args) -{ - PyObject *res, *fieldname; - CTypeDescrObject *ct; +static CTypeDescrObject *direct_typeoffsetof(CTypeDescrObject *ct, + PyObject *fieldname, + int following, Py_ssize_t *offset) +{ + /* Does not return a new reference! */ + CTypeDescrObject *res; CFieldObject *cf; - Py_ssize_t offset; - int following = 0; - - if (!PyArg_ParseTuple(args, "O!O|i:typeoffsetof", - &CTypeDescr_Type, &ct, &fieldname, &following)) - return NULL; if (PyTextAny_Check(fieldname)) { if (!following && (ct->ct_flags & CT_POINTER)) ct = ct->ct_itemdescr; - if (!(ct->ct_flags & (CT_STRUCT|CT_UNION)) || - force_lazy_struct(ct) <= 0) { + if (!(ct->ct_flags & (CT_STRUCT|CT_UNION))) { + PyErr_SetString(PyExc_TypeError, + "with a field name argument, expected a " + "struct or union ctype"); + return NULL; + } + if (force_lazy_struct(ct) <= 0) { if (!PyErr_Occurred()) - PyErr_SetString(PyExc_TypeError, - "with a field name argument, expected an " - "initialized struct or union ctype"); + PyErr_SetString(PyExc_TypeError, "struct/union is opaque"); return NULL; } cf = (CFieldObject *)PyDict_GetItem(ct->ct_stuff, fieldname); @@ -4984,8 +4983,8 @@ PyErr_SetString(PyExc_TypeError, "not supported for bitfields"); return NULL; } - res = (PyObject *)cf->cf_type; - offset = cf->cf_offset; + res = cf->cf_type; + *offset = cf->cf_offset; } else { ssize_t index = PyInt_AsSsize_t(fieldname); @@ -5002,14 +5001,32 @@ "pointer to non-opaque"); return NULL; } - res = (PyObject *)ct->ct_itemdescr; - offset = index * ct->ct_itemdescr->ct_size; - if ((offset / ct->ct_itemdescr->ct_size) != index) { + res = ct->ct_itemdescr; + *offset = index * ct->ct_itemdescr->ct_size; + if ((*offset / ct->ct_itemdescr->ct_size) != index) { PyErr_SetString(PyExc_OverflowError, "array offset would overflow a Py_ssize_t"); return NULL; } } + return res; +} + +static PyObject *b_typeoffsetof(PyObject *self, PyObject *args) +{ + PyObject *res, *fieldname; + CTypeDescrObject *ct; + Py_ssize_t offset; + int following = 0; + + if (!PyArg_ParseTuple(args, "O!O|i:typeoffsetof", + &CTypeDescr_Type, &ct, &fieldname, &following)) + return NULL; + + res = (PyObject *)direct_typeoffsetof(ct, fieldname, following, &offset); + if (res == NULL) + return NULL; + return Py_BuildValue("(On)", res, offset); } diff --git a/new/ffi_obj.c b/new/ffi_obj.c --- a/new/ffi_obj.c +++ b/new/ffi_obj.c @@ -290,58 +290,46 @@ "string or unicode string.\n" "\n" "If 'cdata' is an enum, returns the value of the enumerator as a\n" -"string, or 'NUMBER' if the value is out of range.\n"); +"string, or 'NUMBER' if the value is out of range."); #define ffi_string b_string /* ffi_string() => b_string() from _cffi_backend.c */ -#if 0 -static CFieldObject *_ffi_field(CTypeDescrObject *ct, const char *fieldname) +PyDoc_STRVAR(ffi_offsetof_doc, +"Return the offset of the named field inside the given structure or\n" +"array, which must be given as a C type name. You can give several\n" +"field names in case of nested structures. You can also give numeric\n" +"values which correspond to array items, in case of an array type."); + +static PyObject *ffi_offsetof(FFIObject *self, PyObject *args) { - CFieldObject *cf; - if (force_lazy_struct(ct) == NULL) { - PyErr_Format(PyExc_TypeError, "'%s' is incomplete", ct->ct_name); + PyObject *arg; + CTypeDescrObject *ct; + Py_ssize_t i, offset; + + if (PyTuple_Size(args) < 2) { + PyErr_SetString(PyExc_TypeError, + "offsetof() expects at least 2 arguments"); return NULL; } - cf = (CFieldObject *)PyDict_GetItemString(ct->ct_stuff, fieldname); - if (cf == NULL) { - PyErr_Format(PyExc_KeyError, "'%s' has got no field '%s'", - ct->ct_name, fieldname); - return NULL; - } - if (cf->cf_bitshift >= 0) { - PyErr_SetString(PyExc_TypeError, "not supported for bitfields"); - return NULL; - } - return cf; -} -static PyObject *ffi_offsetof(ZefFFIObject *self, PyObject *args) -{ - PyObject *arg; - char *fieldname; - CTypeDescrObject *ct; - CFieldObject *cf; - - if (!PyArg_ParseTuple(args, "Os:offsetof", &arg, &fieldname)) - return NULL; - + arg = PyTuple_GET_ITEM(args, 0); ct = _ffi_type(self, arg, ACCEPT_STRING|ACCEPT_CTYPE); if (ct == NULL) return NULL; - if (!(ct->ct_flags & (CT_STRUCT|CT_UNION))) { - PyErr_Format(PyExc_TypeError, - "expected a struct or union ctype, got '%s'", - ct->ct_name); - return NULL; + offset = 0; + for (i = 1; i < PyTuple_GET_SIZE(args); i++) { + Py_ssize_t ofs1; + ct = direct_typeoffsetof(ct, PyTuple_GET_ITEM(args, i), i > 1, &ofs1); + if (ct == NULL) + return NULL; + offset += ofs1; } - cf = _ffi_field(ct, fieldname); - if (cf == NULL) - return NULL; - return PyInt_FromSsize_t(cf->cf_offset); + return PyInt_FromSsize_t(offset); } +#if 0 static PyObject *ffi_addressof(ZefFFIObject *self, PyObject *args) { PyObject *obj; @@ -612,8 +600,8 @@ {"gc", (PyCFunction)ffi_gc, METH_VARARGS}, {"getctype", (PyCFunction)ffi_getctype, METH_VARARGS}, {"load_library", (PyCFunction)ffi_load_library,METH_VARARGS|METH_KEYWORDS}, - {"offsetof", (PyCFunction)ffi_offsetof, METH_VARARGS}, #endif + {"offsetof", (PyCFunction)ffi_offsetof, METH_VARARGS,ffi_offsetof_doc}, {"new", (PyCFunction)ffi_new, METH_VARARGS, ffi_new_doc}, #if 0 {"new_handle", (PyCFunction)ffi_new_handle,METH_O}, diff --git a/new/test_recompiler.py b/new/test_recompiler.py --- a/new/test_recompiler.py +++ b/new/test_recompiler.py @@ -216,6 +216,9 @@ py.test.raises(OverflowError, "p.b -= 1") q = ffi.new("struct bar_s *", {'f': p}) assert q.f == p + # + assert ffi.offsetof("struct foo_s", "a") == 0 + assert ffi.offsetof("struct foo_s", "b") == 4 def test_verify_exact_field_offset(): ffi = FFI() diff --git a/new/test_verify1.py b/new/test_verify1.py --- a/new/test_verify1.py +++ b/new/test_verify1.py @@ -451,8 +451,8 @@ }; """) py.test.raises(ffi.error, ffi.sizeof, 'struct foo_s') - py.test.raises(ffi.error, ffi.offsetof, 'struct foo_s', 'x') - py.test.raises(ffi.error, ffi.new, 'struct foo_s *') + py.test.raises(TypeError, ffi.offsetof, 'struct foo_s', 'x') + py.test.raises(TypeError, ffi.new, 'struct foo_s *') ffi.verify(""" struct foo_s { int a, b, x, c, d, e; From noreply at buildbot.pypy.org Wed Apr 22 17:38:27 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 22 Apr 2015 17:38:27 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: in-progress: bitfield support Message-ID: <20150422153827.E97501C0D78@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1780:ded8861f09fd Date: 2015-04-22 17:39 +0200 http://bitbucket.org/cffi/cffi/changeset/ded8861f09fd/ Log: in-progress: bitfield support diff --git a/new/realize_c_type.c b/new/realize_c_type.c --- a/new/realize_c_type.c +++ b/new/realize_c_type.c @@ -466,11 +466,16 @@ int i; for (i = 0; i < s->num_fields; i++, fld++) { _cffi_opcode_t op = fld->field_type_op; + int fbitsize = -1; PyObject *f; CTypeDescrObject *ctf; switch (_CFFI_GETOP(op)) { + case _CFFI_OP_BITFIELD: + assert(fld->field_size >= 0); + fbitsize = (int)fld->field_size; + /* fall-through */ case _CFFI_OP_NOOP: ctf = realize_c_type(builder, builder->ctx.types, _CFFI_GETARG(op)); @@ -484,8 +489,9 @@ if (fld->field_offset == (size_t)-1) { /* unnamed struct, with field positions and sizes entirely - determined by complete_struct_or_union() and not checked */ - assert(fld->field_size == -1); + determined by complete_struct_or_union() and not checked. + Or, bitfields (field_size >= 0), similarly not checked. */ + assert(fld->field_size == (size_t)-1 || fbitsize >= 0); } else if (detect_custom_layout(ct, SF_STD_FIELD_POS, ctf->ct_size, fld->field_size, @@ -494,7 +500,7 @@ return -1; f = Py_BuildValue("(sOin)", fld->name, ctf, - (int)-1, (Py_ssize_t)fld->field_offset); + fbitsize, (Py_ssize_t)fld->field_offset); if (f == NULL) { Py_DECREF(fields); return -1; diff --git a/new/recompiler.py b/new/recompiler.py --- a/new/recompiler.py +++ b/new/recompiler.py @@ -439,25 +439,29 @@ flags = ('|'.join(flags)) or '0' if tp.fldtypes is not None: c_field = [approxname] - for fldname, fldtype in zip(tp.fldnames, tp.fldtypes): + for fldname, fldtype, fbitsize in tp.enumfields(): fldtype = self._field_type(tp, fldname, fldtype) spaces = " " * len(fldname) # cname is None for _add_missing_struct_unions() only - if cname is None or ( + op = '_CFFI_OP_NOOP' + if fbitsize >= 0: + op = '_CFFI_OP_BITFIELD' + size = '%d /* bits */' % fbitsize + elif cname is None or ( isinstance(fldtype, model.ArrayType) and fldtype.length is None): size = '(size_t)-1' else: size = 'sizeof(((%s)0)->%s)' % (tp.get_c_name('*'), fldname) - if cname is None: + if cname is None or fbitsize >= 0: offset = '(size_t)-1' else: offset = 'offsetof(%s, %s)' % (tp.get_c_name(''), fldname) c_field.append( ' { "%s", %s,\n' % (fldname, offset) + ' %s %s,\n' % (spaces, size) + - ' %s _CFFI_OP(_CFFI_OP_NOOP, %s) },' % ( - spaces, self._typesdict[fldtype])) + ' %s _CFFI_OP(%s, %s) },' % ( + spaces, op, self._typesdict[fldtype])) self._lsts["field"].append('\n'.join(c_field)) # if cname is None: # unknown name, for _add_missing_struct_unions From noreply at buildbot.pypy.org Wed Apr 22 17:46:01 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 22 Apr 2015 17:46:01 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: Fix Message-ID: <20150422154601.2E4771C02A3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1781:538f36a9670b Date: 2015-04-22 17:46 +0200 http://bitbucket.org/cffi/cffi/changeset/538f36a9670b/ Log: Fix diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -4084,8 +4084,9 @@ as 1 instead. But for ctypes support, we allow the manually- specified totalsize to be zero in this case. */ boffsetmax = (boffsetmax + 7) / 8; /* bits -> bytes */ + boffsetmax = (boffsetmax + alignment - 1) & ~(alignment-1); if (totalsize < 0) { - totalsize = (boffsetmax + alignment - 1) & ~(alignment-1); + totalsize = boffsetmax; if (totalsize == 0) totalsize = 1; } From noreply at buildbot.pypy.org Wed Apr 22 18:21:15 2015 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 22 Apr 2015 18:21:15 +0200 (CEST) Subject: [pypy-commit] pypy vmprof2: try to fix those tests Message-ID: <20150422162115.345BA1C1334@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: vmprof2 Changeset: r76890:233ac16e55d6 Date: 2015-04-22 18:21 +0200 http://bitbucket.org/pypy/pypy/changeset/233ac16e55d6/ Log: try to fix those tests diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -134,7 +134,8 @@ def _ops_for_chunk(self, chunk, include_guard_not_invalidated): for op in chunk.operations: - if op.name != 'debug_merge_point' and \ + if op.name not in ('debug_merge_point', 'enter_portal_frame', + 'leave_portal_frame') and \ (op.name != 'guard_not_invalidated' or include_guard_not_invalidated): yield op From noreply at buildbot.pypy.org Wed Apr 22 21:53:14 2015 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 22 Apr 2015 21:53:14 +0200 (CEST) Subject: [pypy-commit] pypy object-dtype2: test, fix for numpy object scalar actually becoming the contained object Message-ID: <20150422195314.8B0591C1334@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: object-dtype2 Changeset: r76891:e91a14676362 Date: 2015-04-22 22:07 +0300 http://bitbucket.org/pypy/pypy/changeset/e91a14676362/ Log: test, fix for numpy object scalar actually becoming the contained object diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -202,12 +202,16 @@ return self elif isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool() \ and w_idx.ndims() > 0: - return self.getitem_filter(space, w_idx) + w_ret = self.getitem_filter(space, w_idx) else: try: - return self.implementation.descr_getitem(space, self, w_idx) + w_ret = self.implementation.descr_getitem(space, self, w_idx) except ArrayArgumentException: - return self.getitem_array_int(space, w_idx) + w_ret = self.getitem_array_int(space, w_idx) + if isinstance(w_ret, boxes.W_ObjectBox): + #return the W_Root object, not a scalar + w_ret = w_ret.w_obj + return w_ret def getitem(self, space, index_list): return self.implementation.getitem_index(space, index_list) diff --git a/pypy/module/micronumpy/test/test_object_arrays.py b/pypy/module/micronumpy/test/test_object_arrays.py --- a/pypy/module/micronumpy/test/test_object_arrays.py +++ b/pypy/module/micronumpy/test/test_object_arrays.py @@ -117,14 +117,15 @@ b = np.object_(3) b2 = np.object_(3.0) c = np.object_([4, 5]) - d = np.object_([None, {}, []]) - print type(a) + d = np.array([None])[0] assert a is None assert type(b) is int assert type(b2) is float assert type(c) is np.ndarray assert c.dtype == object - assert d.dtype == object + assert type(d) is type(None) + e = np.object_([None, {}, []]) + assert e.dtype == object def test_mem_array_creation_invalid_specification(self): # while not specifically testing object dtype, this From noreply at buildbot.pypy.org Wed Apr 22 21:53:15 2015 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 22 Apr 2015 21:53:15 +0200 (CEST) Subject: [pypy-commit] pypy object-dtype2: skip test_zjit pending moving the tests to test_micronumpy in test_pypy_c Message-ID: <20150422195315.B55D01C1334@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: object-dtype2 Changeset: r76892:ec7e1a7a0847 Date: 2015-04-22 22:53 +0300 http://bitbucket.org/pypy/pypy/changeset/ec7e1a7a0847/ Log: skip test_zjit pending moving the tests to test_micronumpy in test_pypy_c diff --git a/pypy/goal/targetnumpystandalone.py b/pypy/goal/targetnumpystandalone.py deleted file mode 100644 --- a/pypy/goal/targetnumpystandalone.py +++ /dev/null @@ -1,43 +0,0 @@ - -""" Usage: - -./targetnumpystandalone-c array_size - -Will execute a give numpy bytecode. Arrays will be ranges (in float) modulo 10, -constants would be consecutive starting from one. - -Bytecode should contain letters 'a' 'l' and 'f' so far and be correct -""" - -import time -from pypy.module.micronumpy.compile import numpy_compile -from rpython.jit.codewriter.policy import JitPolicy -from rpython.rtyper.annlowlevel import hlstr - -def entry_point(argv): - if len(argv) != 3: - print __doc__ - return 1 - try: - size = int(argv[2]) - except ValueError: - print "INVALID LITERAL FOR INT:", argv[2] - print __doc__ - return 3 - t0 = time.time() - main(argv[0], size) - print "bytecode:", argv[0], "size:", size - print "took:", time.time() - t0 - return 0 - -def main(bc, size): - if not isinstance(bc, str): - bc = hlstr(bc) # for tests - a = numpy_compile(bc, size) - a = a.compute() - -def target(*args): - return entry_point, None - -def jitpolicy(driver): - return JitPolicy() diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -9,6 +9,7 @@ from pypy.module.micronumpy.compile import FakeSpace, Parser, InterpreterState from pypy.module.micronumpy.base import W_NDimArray +py.test.skip('move these to pypyjit/test_pypy_c/test_micronumpy') class TestNumpyJit(LLJitMixin): graph = None From noreply at buildbot.pypy.org Wed Apr 22 23:38:28 2015 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 22 Apr 2015 23:38:28 +0200 (CEST) Subject: [pypy-commit] pypy vmprof2: try a fix for msvc on win32 Message-ID: <20150422213828.6919E1C0F93@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: vmprof2 Changeset: r76893:9591d90505b9 Date: 2015-04-23 00:38 +0300 http://bitbucket.org/pypy/pypy/changeset/9591d90505b9/ Log: try a fix for msvc on win32 diff --git a/rpython/jit/backend/llsupport/codemap.py b/rpython/jit/backend/llsupport/codemap.py --- a/rpython/jit/backend/llsupport/codemap.py +++ b/rpython/jit/backend/llsupport/codemap.py @@ -26,7 +26,16 @@ srcdir = os.path.join(os.path.dirname(__file__), 'src') eci = ExternalCompilationInfo(post_include_bits=[""" +#if defined _MSC_VER && _MSC_VER < 1600 +#ifdef _WIN32 +typedef unsigned int uintptr_t; +#else +typedef usigned long uintptr_t; +#endif +#else #include +#endif + RPY_EXTERN long pypy_jit_codemap_add(uintptr_t addr, unsigned int machine_code_size, long *bytecode_info, diff --git a/rpython/jit/backend/llsupport/src/skiplist.c b/rpython/jit/backend/llsupport/src/skiplist.c --- a/rpython/jit/backend/llsupport/src/skiplist.c +++ b/rpython/jit/backend/llsupport/src/skiplist.c @@ -1,5 +1,25 @@ #include +#if defined _MSC_VER + #if _MSC_VER < 1600 + #include + int __sync_lock_test_and_set(int * i, int j) + { + return _interlockedbittestandreset(i, j); + } + int __sync_lock_release(int *i) + { + return _interlockedbittestandreset(i, 0); + } + #ifdef _WIN32 + typedef unsigned int uintptr_t; + #else + typedef usigned long uintptr_t; + #endif + #endif +#else #include +#endif + #define HAS_SKIPLIST #define SKIPLIST_HEIGHT 8 From noreply at buildbot.pypy.org Wed Apr 22 23:44:54 2015 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 22 Apr 2015 23:44:54 +0200 (CEST) Subject: [pypy-commit] pypy vmprof2: fix bad logic Message-ID: <20150422214454.634821C1370@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: vmprof2 Changeset: r76894:df219e810ccb Date: 2015-04-23 00:45 +0300 http://bitbucket.org/pypy/pypy/changeset/df219e810ccb/ Log: fix bad logic diff --git a/rpython/jit/backend/llsupport/src/skiplist.c b/rpython/jit/backend/llsupport/src/skiplist.c --- a/rpython/jit/backend/llsupport/src/skiplist.c +++ b/rpython/jit/backend/llsupport/src/skiplist.c @@ -1,15 +1,15 @@ #include #if defined _MSC_VER + #include + int __sync_lock_test_and_set(int * i, int j) + { + return _interlockedbittestandreset(i, j); + } + int __sync_lock_release(int *i) + { + return _interlockedbittestandreset(i, 0); + } #if _MSC_VER < 1600 - #include - int __sync_lock_test_and_set(int * i, int j) - { - return _interlockedbittestandreset(i, j); - } - int __sync_lock_release(int *i) - { - return _interlockedbittestandreset(i, 0); - } #ifdef _WIN32 typedef unsigned int uintptr_t; #else From noreply at buildbot.pypy.org Thu Apr 23 00:26:45 2015 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 23 Apr 2015 00:26:45 +0200 (CEST) Subject: [pypy-commit] pypy default: encode unicode dtype spec to ascii Message-ID: <20150422222645.458DD1C0F93@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r76895:2301f0bf4a68 Date: 2015-04-23 01:24 +0300 http://bitbucket.org/pypy/pypy/changeset/2301f0bf4a68/ Log: encode unicode dtype spec to ascii diff --git a/pypy/module/micronumpy/descriptor.py b/pypy/module/micronumpy/descriptor.py --- a/pypy/module/micronumpy/descriptor.py +++ b/pypy/module/micronumpy/descriptor.py @@ -545,6 +545,8 @@ space.gettypefor(boxes.W_VoidBox), shape=shape, subdtype=subdtype, elsize=size) + if space.isinstance_w(w_dtype, space.w_unicode): + w_dtype = space.wrap(space.str_w(w_dtype)) # may raise if invalid if space.is_none(w_dtype): return cache.w_float64dtype elif space.isinstance_w(w_dtype, w_subtype): diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -58,6 +58,7 @@ assert exc.value[0] == "there are no fields defined" assert dtype('int8').num == 1 + assert dtype(u'int8').num == 1 assert dtype('int8').name == 'int8' assert dtype('void').name == 'void' assert dtype(int).fields is None From noreply at buildbot.pypy.org Thu Apr 23 04:25:05 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Thu, 23 Apr 2015 04:25:05 +0200 (CEST) Subject: [pypy-commit] pypy default: fix 2301f0bf4a68 Message-ID: <20150423022505.E15531C140B@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r76896:a007e985b031 Date: 2015-04-23 03:24 +0100 http://bitbucket.org/pypy/pypy/changeset/a007e985b031/ Log: fix 2301f0bf4a68 diff --git a/pypy/module/micronumpy/descriptor.py b/pypy/module/micronumpy/descriptor.py --- a/pypy/module/micronumpy/descriptor.py +++ b/pypy/module/micronumpy/descriptor.py @@ -545,13 +545,13 @@ space.gettypefor(boxes.W_VoidBox), shape=shape, subdtype=subdtype, elsize=size) - if space.isinstance_w(w_dtype, space.w_unicode): - w_dtype = space.wrap(space.str_w(w_dtype)) # may raise if invalid if space.is_none(w_dtype): return cache.w_float64dtype - elif space.isinstance_w(w_dtype, w_subtype): + if space.isinstance_w(w_dtype, w_subtype): return w_dtype - elif space.isinstance_w(w_dtype, space.w_str): + if space.isinstance_w(w_dtype, space.w_unicode): + w_dtype = space.wrap(space.str_w(w_dtype)) # may raise if invalid + if space.isinstance_w(w_dtype, space.w_str): name = space.str_w(w_dtype) if _check_for_commastring(name): return dtype_from_spec(space, w_dtype) From noreply at buildbot.pypy.org Thu Apr 23 05:56:43 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Thu, 23 Apr 2015 05:56:43 +0200 (CEST) Subject: [pypy-commit] pypy can_cast: create stub for np.can_cast() and copy test from numpy Message-ID: <20150423035643.769351C0661@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: can_cast Changeset: r76897:32155f8c346e Date: 2015-04-23 02:03 +0100 http://bitbucket.org/pypy/pypy/changeset/32155f8c346e/ Log: create stub for np.can_cast() and copy test from numpy diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -21,6 +21,7 @@ 'count_nonzero': 'arrayops.count_nonzero', 'dot': 'arrayops.dot', 'result_type': 'arrayops.result_type', + 'can_cast': 'arrayops.can_cast', 'where': 'arrayops.where', 'set_string_function': 'appbridge.set_string_function', diff --git a/pypy/module/micronumpy/arrayops.py b/pypy/module/micronumpy/arrayops.py --- a/pypy/module/micronumpy/arrayops.py +++ b/pypy/module/micronumpy/arrayops.py @@ -310,3 +310,7 @@ space.call_function(space.gettypefor(descriptor.W_Dtype), w_arg)) result = ufuncs.find_binop_result_dtype(space, result, dtype) return result + + at unwrap_spec(casting=str) +def can_cast(space, w_from, w_totype, casting='safe'): + return space.w_True diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -1817,7 +1817,7 @@ s[...] = 2 v = s.view(x.__class__) assert (v == 2).all() - + def test_tolist_scalar(self): from numpy import dtype int32 = dtype('int32').type @@ -3973,3 +3973,80 @@ a = ndarray._from_shape_and_storage((2,), addr, int, sz, strides=[2 * base.strides[0]]) assert a[1] == 3 + + def test_can_cast(self): + import numpy as np + + assert np.can_cast(np.int32, np.int64) + assert np.can_cast(np.float64, complex) + assert not np.can_cast(np.complex64, float) + + assert np.can_cast('i8', 'f8') + assert not np.can_cast('i8', 'f4') + assert np.can_cast('i4', 'S11') + + assert np.can_cast('i8', 'i8', 'no') + assert not np.can_cast('i8', 'no') + + assert np.can_cast('i8', 'equiv') + assert not np.can_cast('i8', 'equiv') + + assert np.can_cast('i8', 'safe') + assert not np.can_cast('i4', 'safe') + + assert np.can_cast('i4', 'same_kind') + assert not np.can_cast('u4', 'same_kind') + + assert np.can_cast('u4', 'unsafe') + + assert np.can_cast('bool', 'S5') + assert not np.can_cast('bool', 'S4') + + assert np.can_cast('b', 'S4') + assert not np.can_cast('b', 'S3') + + assert np.can_cast('u1', 'S3') + assert not np.can_cast('u1', 'S2') + assert np.can_cast('u2', 'S5') + assert not np.can_cast('u2', 'S4') + assert np.can_cast('u4', 'S10') + assert not np.can_cast('u4', 'S9') + assert np.can_cast('u8', 'S20') + assert not np.can_cast('u8', 'S19') + + assert np.can_cast('i1', 'S4') + assert not np.can_cast('i1', 'S3') + assert np.can_cast('i2', 'S6') + assert not np.can_cast('i2', 'S5') + assert np.can_cast('i4', 'S11') + assert not np.can_cast('i4', 'S10') + assert np.can_cast('i8', 'S21') + assert not np.can_cast('i8', 'S20') + + assert np.can_cast('bool', 'S5') + assert not np.can_cast('bool', 'S4') + + assert np.can_cast('b', 'U4') + assert not np.can_cast('b', 'U3') + + assert np.can_cast('u1', 'U3') + assert not np.can_cast('u1', 'U2') + assert np.can_cast('u2', 'U5') + assert not np.can_cast('u2', 'U4') + assert np.can_cast('u4', 'U10') + assert not np.can_cast('u4', 'U9') + assert np.can_cast('u8', 'U20') + assert not np.can_cast('u8', 'U19') + + assert np.can_cast('i1', 'U4') + assert not np.can_cast('i1', 'U3') + assert np.can_cast('i2', 'U6') + assert not np.can_cast('i2', 'U5') + assert np.can_cast('i4', 'U11') + assert not np.can_cast('i4', 'U10') + assert np.can_cast('i8', 'U21') + assert not np.can_cast('i8', 'U20') + + raises(TypeError, np.can_cast, 'i4', None) + raises(TypeError, np.can_cast, None, 'i4') + From noreply at buildbot.pypy.org Thu Apr 23 09:57:21 2015 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 23 Apr 2015 09:57:21 +0200 (CEST) Subject: [pypy-commit] pypy object-dtype2: add upstream compatible logic to allow True for 'foo' != np.arange(3)[1] in a shorcut hack Message-ID: <20150423075721.64E951C12C8@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: object-dtype2 Changeset: r76898:cbba3abb9864 Date: 2015-04-23 10:52 +0300 http://bitbucket.org/pypy/pypy/changeset/cbba3abb9864/ Log: add upstream compatible logic to allow True for 'foo' != np.arange(3)[1] in a shorcut hack diff --git a/pypy/module/micronumpy/test/test_object_arrays.py b/pypy/module/micronumpy/test/test_object_arrays.py --- a/pypy/module/micronumpy/test/test_object_arrays.py +++ b/pypy/module/micronumpy/test/test_object_arrays.py @@ -37,6 +37,11 @@ assert (b == a).all() c = np.array([1, 2, 3]) assert (a[0] != c[0]) + assert (c[0] != a[0]) + assert (a[0] > c[0]) + assert (not a[0] < c[0]) + assert (c[0] < a[0]) + assert (not c[0] > a[0]) def test_logical_ufunc(self): import numpy as np diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -441,8 +441,15 @@ elif w_ldtype.is_str() and w_rdtype.is_str() and \ self.comparison_func: pass - elif (w_ldtype.is_str() or w_rdtype.is_str()) and \ + elif (w_ldtype.is_str()) and \ self.comparison_func and w_out is None: + if self.name in ('equal', 'less_equal', 'less'): + return space.wrap(False) + return space.wrap(True) + elif (w_rdtype.is_str()) and \ + self.comparison_func and w_out is None: + if self.name in ('not_equal','less', 'less_equal'): + return space.wrap(True) return space.wrap(False) elif w_ldtype.is_flexible() or w_rdtype.is_flexible(): if self.comparison_func: From noreply at buildbot.pypy.org Thu Apr 23 18:14:04 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Thu, 23 Apr 2015 18:14:04 +0200 (CEST) Subject: [pypy-commit] pypy default: hack to support pytest 2.7 Message-ID: <20150423161404.4ECF11C0755@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r76899:a24deef350ce Date: 2015-04-23 17:13 +0100 http://bitbucket.org/pypy/pypy/changeset/a24deef350ce/ Log: hack to support pytest 2.7 diff --git a/pypy/tool/pytest/appsupport.py b/pypy/tool/pytest/appsupport.py --- a/pypy/tool/pytest/appsupport.py +++ b/pypy/tool/pytest/appsupport.py @@ -99,6 +99,7 @@ debug_excs = getattr(operr, 'debug_excs', []) if debug_excs: self._excinfo = debug_excs[0] + self.value = self.operr.errorstr(self.space) # XXX def __repr__(self): return "" % self.operr.errorstr(self.space) From noreply at buildbot.pypy.org Thu Apr 23 19:03:08 2015 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 23 Apr 2015 19:03:08 +0200 (CEST) Subject: [pypy-commit] pypy object-dtype2: adjust tests to pass -A, leave fixing pypy to after branch merge Message-ID: <20150423170308.F04511C12C8@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: object-dtype2 Changeset: r76900:7f4e8ce39e3a Date: 2015-04-23 18:41 +0300 http://bitbucket.org/pypy/pypy/changeset/7f4e8ce39e3a/ Log: adjust tests to pass -A, leave fixing pypy to after branch merge diff --git a/pypy/module/micronumpy/test/test_object_arrays.py b/pypy/module/micronumpy/test/test_object_arrays.py --- a/pypy/module/micronumpy/test/test_object_arrays.py +++ b/pypy/module/micronumpy/test/test_object_arrays.py @@ -45,6 +45,10 @@ def test_logical_ufunc(self): import numpy as np + import sys + + if '__pypy__' in sys.builtin_module_names: + skip('need to refactor use of raw_xxx_op in types to make this work') a = np.array(["foo"], dtype=object) b = np.array([1], dtype=object) d = np.array([complex(1, 10)], dtype=object) @@ -75,12 +79,15 @@ def test_complex_op(self): import numpy as np + import sys a = np.array(['abc', 'def'], dtype=object) b = np.array([1, 2, 3], dtype=object) c = np.array([complex(1, 1), complex(1, -1)], dtype=object) for arg in (a,b,c): assert (arg == np.real(arg)).all() assert (0 == np.imag(arg)).all() + if '__pypy__' in sys.builtin_module_names: + skip('not implemented yet') raises(AttributeError, np.conj, a) res = np.conj(b) assert (res == b).all() @@ -102,6 +109,7 @@ def test_array_interface(self): import numpy as np + import sys class DummyArray(object): def __init__(self, interface, base=None): self.__array_interface__ = interface @@ -111,6 +119,8 @@ interface = dict(a.__array_interface__) interface['shape'] = tuple([3]) interface['strides'] = tuple([0]) + if '__pypy__' in sys.builtin_module_names: + skip('not implemented yet') c = np.array(DummyArray(interface, base=a)) c.dtype = a.dtype #print c @@ -118,6 +128,7 @@ def test_for_object_scalar_creation(self): import numpy as np + import sys a = np.object_() b = np.object_(3) b2 = np.object_(3.0) @@ -129,6 +140,8 @@ assert type(c) is np.ndarray assert c.dtype == object assert type(d) is type(None) + if '__pypy__' in sys.builtin_module_names: + skip('not implemented yet') e = np.object_([None, {}, []]) assert e.dtype == object @@ -137,36 +150,13 @@ # test segfaulted during ObjectType.store due to # missing gc hooks import numpy as np - dt = np.dtype([('x', int), ('y', np.object_)]) + import sys + ytype = np.object_ + if '__pypy__' in sys.builtin_module_names: + ytype = str + dt = np.dtype([('x', int), ('y', ytype)]) # Correct way a = np.array([(1, 'object')], dt) # Wrong way - should complain about writing buffer to object dtype - raises(np.array, [1, 'object'], dt) + raises(ValueError, np.array, [1, 'object'], dt) - def test_zeros(self): - skip('move this to unicode test when we have them') - import numpy as np - def sixu(s): - return unicode(s, 'unicode_escape') - - def buffer_len(arr): - if isinstance(arr, np.ndarray): - return len(arr.data) - return len(buffer(arr)) - - def content_check(ua, ua_scalar, nbtyes): - assert int(ua.dtype.str[2:]) == ulen - assert buffer_len(ua) == 4*ulen - assert ua_scalar -- sixu('') - assert ua_scalar.encode('ascii') == '' - assert buffer_len(ua_scalar) == 0 - - for ulen in [1, 2, 1099]: - ua = np.zeros((), dtype='U%s' % ulen) - content_check(ua, ua[()], 4 * ulen) - ua = zeros((2,), dtype='U%s' % ulen) - content_check(ua, ua[0], 4 * ulen*2) - content_check(ua, ua[1], 4 * ulen*2) - ua = zeros((2, 3, 4), dtype='U%s' % ulen) - content_check(ua, ua[0, 0, 0], 4 * ulen * 2 * 3 * 4) - content_check(ua, ua[-1, -1, -1], 4 * ulen * 2 * 3 * 4) From noreply at buildbot.pypy.org Thu Apr 23 19:03:10 2015 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 23 Apr 2015 19:03:10 +0200 (CEST) Subject: [pypy-commit] pypy default: fix/update whatsnew-head with an eye towards PyPy 2.6 Message-ID: <20150423170310.298D01C12C8@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r76901:3d4aa967c95c Date: 2015-04-23 19:57 +0300 http://bitbucket.org/pypy/pypy/changeset/3d4aa967c95c/ Log: fix/update whatsnew-head with an eye towards PyPy 2.6 diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -5,12 +5,46 @@ .. this is a revision shortly after release-2.5.1 .. startrev: cb01edcb59414d9d93056e54ed060673d24e67c1 -Issue #2017: on non-Linux-x86 platforms, reduced the memory impact of +issue2005: +ignore errors on closing random file handles while importing a module (cpython compatibility) + +issue2013: +added constants to _ssl for TLS 1.1 and 1.2 + +issue2014: +Add PyLong_FromUnicode to cpyext. + +issue2017: +On non-Linux-x86 platforms, reduced the memory impact of creating a lot of greenlets/tasklets. Particularly useful on Win32 and on ARM, where you used to get a MemoryError after only 2500-5000 greenlets (the 32-bit address space is exhausted). +Update gdb_pypy for python3 (gdb comatability) + +Merged rstrategies into rpython which provides a library for Storage Strategies + +Support unicode strings in numpy.dtype creation i.e. np.dtype(u'int64') + +Various rpython cleanups for vmprof support + +issue2019: +Fix isspace as called by rpython unicode.strip() + +issue2023: +In the cpyext 'Concrete Object Layer' API, +don't call methods on the object (which can be overriden), +but directly on the concrete base type. + +issue2029: +Hide the default_factory attribute in a dict + +issue2027: +Better document pyinteractive and add --withmod-time + .. branch: gc-incminimark-pinning-improve + +branch gc-incminimark-pinning-improve: Object Pinning is now used in `bz2` and `rzlib` (therefore also affects Python's `zlib`). In case the data to compress/decompress is inside the nursery (incminimark) it no longer needs to create a non-moving copy of it. This saves @@ -18,8 +52,15 @@ is introduced (`PYPY_GC_MAX_PINNED`) primarily for debugging purposes. .. branch: refactor-pycall + +branch refactor-pycall: Make `*`-unpacking in RPython function calls completely equivalent to passing the tuple's elements as arguments. In other words, `f(*(a, b))` now behaves exactly like `f(a, b)`. .. branch: issue2018 +branch issue2018: +Allow prebuilt rpython dict with function values + +.. branch: vmprof +.. Merged but then backed out, hopefully it will return as vmprof2 From noreply at buildbot.pypy.org Thu Apr 23 19:03:11 2015 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 23 Apr 2015 19:03:11 +0200 (CEST) Subject: [pypy-commit] pypy object-dtype2: merge default into branch Message-ID: <20150423170311.9B1021C12C8@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: object-dtype2 Changeset: r76902:fcc3a72a167e Date: 2015-04-23 19:58 +0300 http://bitbucket.org/pypy/pypy/changeset/fcc3a72a167e/ Log: merge default into branch diff too long, truncating to 2000 out of 2315 lines diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -5,12 +5,46 @@ .. this is a revision shortly after release-2.5.1 .. startrev: cb01edcb59414d9d93056e54ed060673d24e67c1 -Issue #2017: on non-Linux-x86 platforms, reduced the memory impact of +issue2005: +ignore errors on closing random file handles while importing a module (cpython compatibility) + +issue2013: +added constants to _ssl for TLS 1.1 and 1.2 + +issue2014: +Add PyLong_FromUnicode to cpyext. + +issue2017: +On non-Linux-x86 platforms, reduced the memory impact of creating a lot of greenlets/tasklets. Particularly useful on Win32 and on ARM, where you used to get a MemoryError after only 2500-5000 greenlets (the 32-bit address space is exhausted). +Update gdb_pypy for python3 (gdb comatability) + +Merged rstrategies into rpython which provides a library for Storage Strategies + +Support unicode strings in numpy.dtype creation i.e. np.dtype(u'int64') + +Various rpython cleanups for vmprof support + +issue2019: +Fix isspace as called by rpython unicode.strip() + +issue2023: +In the cpyext 'Concrete Object Layer' API, +don't call methods on the object (which can be overriden), +but directly on the concrete base type. + +issue2029: +Hide the default_factory attribute in a dict + +issue2027: +Better document pyinteractive and add --withmod-time + .. branch: gc-incminimark-pinning-improve + +branch gc-incminimark-pinning-improve: Object Pinning is now used in `bz2` and `rzlib` (therefore also affects Python's `zlib`). In case the data to compress/decompress is inside the nursery (incminimark) it no longer needs to create a non-moving copy of it. This saves @@ -18,8 +52,15 @@ is introduced (`PYPY_GC_MAX_PINNED`) primarily for debugging purposes. .. branch: refactor-pycall + +branch refactor-pycall: Make `*`-unpacking in RPython function calls completely equivalent to passing the tuple's elements as arguments. In other words, `f(*(a, b))` now behaves exactly like `f(a, b)`. .. branch: issue2018 +branch issue2018: +Allow prebuilt rpython dict with function values + +.. branch: vmprof +.. Merged but then backed out, hopefully it will return as vmprof2 diff --git a/pypy/module/micronumpy/descriptor.py b/pypy/module/micronumpy/descriptor.py --- a/pypy/module/micronumpy/descriptor.py +++ b/pypy/module/micronumpy/descriptor.py @@ -559,9 +559,11 @@ if space.is_none(w_dtype): return cache.w_float64dtype - elif space.isinstance_w(w_dtype, w_subtype): + if space.isinstance_w(w_dtype, w_subtype): return w_dtype - elif space.isinstance_w(w_dtype, space.w_str): + if space.isinstance_w(w_dtype, space.w_unicode): + w_dtype = space.wrap(space.str_w(w_dtype)) # may raise if invalid + if space.isinstance_w(w_dtype, space.w_str): name = space.str_w(w_dtype) if _check_for_commastring(name): return dtype_from_spec(space, w_dtype) diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -58,6 +58,7 @@ assert exc.value[0] == "there are no fields defined" assert dtype('int8').num == 1 + assert dtype(u'int8').num == 1 assert dtype('int8').name == 'int8' assert dtype('void').name == 'void' assert dtype(int).fields is None diff --git a/pypy/tool/pytest/appsupport.py b/pypy/tool/pytest/appsupport.py --- a/pypy/tool/pytest/appsupport.py +++ b/pypy/tool/pytest/appsupport.py @@ -99,6 +99,7 @@ debug_excs = getattr(operr, 'debug_excs', []) if debug_excs: self._excinfo = debug_excs[0] + self.value = self.operr.errorstr(self.space) # XXX def __repr__(self): return "" % self.operr.errorstr(self.space) diff --git a/rpython/doc/rlib.rst b/rpython/doc/rlib.rst --- a/rpython/doc/rlib.rst +++ b/rpython/doc/rlib.rst @@ -128,6 +128,14 @@ a hierarchy of Address classes, in a typical static-OO-programming style. +rstrategies +----------- + +The :source:`rpython/rlib/rstrategies` module contains a library to implement storage strategies in +RPython VMs. The library is language-independent and extensible. +More details and examples can be found in the :doc:`rstrategies documentation `. + + streamio -------- diff --git a/rpython/doc/rstrategies.rst b/rpython/doc/rstrategies.rst new file mode 100644 --- /dev/null +++ b/rpython/doc/rstrategies.rst @@ -0,0 +1,209 @@ +rstrategies +=========== + +A library to implement storage strategies in VMs based on the RPython +toolchain. rstrategies can be used in VMs for any language or language +family. + +This library has been developed as part of a Masters Thesis by `Anton +Gulenko `__. + +The original paper describing the optimization "Storage Strategies for +collections in dynamically typed languages" by C.F. Bolz, L. Diekmann +and L. Tratt can be found +`here `__. + +So far, this library has been adpoted by 3 VMs: +`RSqueak `__, +`Topaz `__ (`Forked +here `__) and +`Pycket `__ (`Forked +here `__). + +Concept +------- + +Collections are often used homogeneously, i.e. they contain only objects +of the same type. Primitive numeric types like ints or floats are +especially interesting for optimization. These cases can be optimized by +storing the unboxed data of these objects in consecutive memory. This is +done by letting a special "strategy" object handle the entire storage of +a collection. The collection object holds two separate references: one +to its strategy and one to its storage. Every operation on the +collection is delegated to the strategy, which accesses the storage when +needed. The strategy can be switched to a more suitable one, which might +require converting the storage array. + +Usage +~~~~~ + +The following are the steps needed to integrated rstrategies in an +RPython VM. Because of the special nature of this library it is not +enough to simply call some API methods; the library must be integrated +within existing VM classes using a metaclass, mixins and other +meta-programming techniques. + +The sequence of steps described here is something like a "setup +walkthrough", and might be a bit abstract. To see a concrete example, +look at +`SingletonStorageStrategy `__, +`StrategyFactory `__ +and +`W\_PointersObject `__ +from the `RSqueak VM `__. The +code is also well commented. + +Basics +------- + +Currently the rstrategies library supports fixed sized and variable +sized collections. This can be used to optimize a wide range of +primitive data structures like arrays, lists or regular objects. Any of +these are called 'collections' in this context. The VM should have a +central class or class hierarchy for collections. In order to extend +these classes and use strategies, the library needs accessor methods for +two attributes of collection objects: strategy and storage. The easiest +way is adding the following line to the body of the root collection +class: + +:: + + rstrategies.make_accessors(strategy='strategy', storage='storage') + +This will generate the 4 accessor methods +``_[get/set]_[storage/strategy]()`` for the respective attributes. +Alternatively, implement these methods manually or overwrite the +getters/setters in ``StrategyFactory``. + +Next, the strategy classes must be defined. This requires a small class +hierarchy with a dedicated root class. In the definition of this root +class, include the following lines: + +:: + + __metaclass__ = rstrategies.StrategyMetaclass + import_from_mixin(rstrategies.AbstractStrategy) + import_from_mixin(rstrategies.SafeIndexingMixin) + +``import_from_mixin`` can be found in ``rpython.rlib.objectmodel``. If +index-checking is performed safely at other places in the VM, you can +use ``rstrategies.UnsafeIndexingMixin`` instead. If you need your own +metaclass, you can combine yours with the rstrategies one using multiple +inheritance `like +here `__. +Also implement a ``storage_factory()`` method, which returns an instance +of ``rstrategies.StorageFactory``, which is described below. + +An example ``AbstractStrategy`` class, which also stores an additional ``space`` parameter could looks like this: + +:: + + class AbstractStrategy(AbstractStrategy): + _attrs_ = ['space'] + _immutable_fields_ = ['space'] + __metaclass__ = rstrat.StrategyMetaclass + import_from_mixin(rstrat.AbstractStrategy) + import_from_mixin(rstrategies.SafeIndexingMixin) + + def __init__(self, space): + self.space = space + + def strategy_factory(self): + return self.space.strategy_factory + + +Strategy classes +---------------- + +Now you can create the actual strategy classes, subclassing them from +the single root class. The following list summarizes the basic +strategies available. + +- ``EmptyStrategy`` A strategy for empty collections; very efficient, but limited. Does not allocate anything. +- ``SingleValueStrategy`` A strategy for collections containing the same object ``n`` times. Only allocates memory to store the size of the collection. +- ``GenericStrategy`` A non-optimized strategy backed by a generic python list. This is the fallback strategy, since it can store everything, but is not optimized. +- ``WeakGenericStrategy`` Like ``GenericStrategy``, but uses ``weakref`` to hold on weakly to its elements. +- ``SingleTypeStrategy`` Can store a single unboxed type like int or float. This is the main optimizing strategy +- ``TaggingStrategy`` Extension of SingleTypeStrategy. Uses a specific value in the value range of the unboxed type to represent one additional, arbitrary object. For example, one of ``float``'s ``NaN`` representations can be used to represent special value like ``nil``. + +There are also intermediate classes, which allow creating new, more +customized strategies. For this, you should get familiar with the code. + +Include one of these mixin classes using ``import_from_mixin``. The +mixin classes contain comments describing methods or fields which are +also required in the strategy class in order to use them. Additionally, +add the ``@rstrategies.strategy(generalize=alist)`` decorator to all +strategy classes. The ``alist`` parameter must contain all strategies, +which the decorated strategy can switch to, if it can not represent a +new element anymore. +`Example `__ +for an implemented strategy. See the other strategy classes behind this +link for more examples. + +An example strategy class for optimized ``int`` storage could look like this: + +:: + + @rstrat.strategy(generalize=[GenericStrategy]) + class IntegerOrNilStrategy(AbstractStrategy): + import_from_mixin(rstrat.TaggingStrategy) + contained_type = model.W_Integer + def wrap(self, val): return self.space.wrap_int(val) + def unwrap(self, w_val): return self.space.unwrap_int(w_val) + def wrapped_tagged_value(self): return self.space.w_nil + def unwrapped_tagged_value(self): return constants.MAXINT + +Strategy Factory +---------------- + +The last part is subclassing ``rstrategies.StrategyFactory``, +overwriting the method ``instantiate_strategy`` if necessary and passing +the strategies root class to the constructor. The factory provides the +methods ``switch_strategy``, ``set_initial_strategy``, +``strategy_type_for`` which can be used by the VM code to use the +mechanism behind strategies. See the comments in the source code. + +The strategy mixins offer the following methods to manipulate the +contents of the collection: + +- basic API + + - ``size`` + +- fixed size API + + - ``store``, ``fetch``, ``slice``, ``store_all``, ``fetch_all`` + +- variable size API + + - ``insert``, ``delete``, ``append``, ``pop`` + +If the collection has a fixed size, simply never use any of the variable +size methods in the VM code. Since the strategies are singletons, these +methods need the collection object as first parameter. For convenience, +more fitting accessor methods should be implemented on the collection +class itself. + +An example strategy factory for the ``AbstractStrategy`` class above could look like this: + +:: + + class StrategyFactory(rstrategies.StrategyFactory): + _attrs_ = ['space'] + _immutable_fields_ = ['space'] + + def __init__(self, space): + self.space = space + rstrat.StrategyFactory.__init__(self, AbstractStrategy) + + def instantiate_strategy(self, strategy_type): + return strategy_type(self.space) + + def strategy_type_for(self, list_w, weak=False): + """ + Helper method for handling weak objects specially + """ + if weak: + return WeakListStrategy + return rstrategies.StrategyFactory.strategy_type_for(self, list_w) + \ No newline at end of file diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -62,6 +62,10 @@ op = operations[i] if op.getopnum() == rop.DEBUG_MERGE_POINT: continue + # ---------- GETFIELD_GC ---------- + if op.getopnum() == rop.GETFIELD_GC: + self.handle_getfield_gc(op) + continue # ---------- turn NEWxxx into CALL_MALLOC_xxx ---------- if op.is_malloc(): self.handle_malloc_operation(op) @@ -122,6 +126,18 @@ # ---------- + def handle_getfield_gc(self, op): + """See test_zero_ptr_field_before_getfield(). We hope there is + no getfield_gc in the middle of initialization code, but there + shouldn't be, given that a 'new' is already delayed by previous + optimization steps. In practice it should immediately be + followed by a bunch of 'setfields', and the 'pending_zeros' + optimization we do here is meant for this case.""" + self.emit_pending_zeros() + self.newops.append(op) + + # ---------- + def handle_malloc_operation(self, op): opnum = op.getopnum() if opnum == rop.NEW: diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -1031,3 +1031,21 @@ guard_false(i1, descr=guarddescr) [] jump() """) + + def test_zero_ptr_field_before_getfield(self): + # This case may need to be fixed in the metainterp/optimizeopt + # already so that it no longer occurs for rewrite.py. But anyway + # it's a good idea to make sure rewrite.py is correct on its own. + self.check_rewrite(""" + [] + p0 = new(descr=tdescr) + p1 = getfield_gc(p0, descr=tdescr) + jump(p1) + """, """ + [] + p0 = call_malloc_nursery(%(tdescr.size)d) + setfield_gc(p0, 5678, descr=tiddescr) + zero_ptr_field(p0, %(tdescr.gc_fielddescrs[0].offset)s) + p1 = getfield_gc(p0, descr=tdescr) + jump(p1) + """) diff --git a/rpython/rlib/rstrategies/.coveragerc b/rpython/rlib/rstrategies/.coveragerc new file mode 100644 --- /dev/null +++ b/rpython/rlib/rstrategies/.coveragerc @@ -0,0 +1,10 @@ +# .coveragerc file to control coverage.py (code coverage plugin for pytest) +# Get it here: https://pypi.python.org/pypi/pytest-cov +# Examples: +# $ python -m pytest test --cov rpython.rlib.rstrategies --cov-report html --cov-config .coveragerc + +[run] +omit = + test/* + */__init__.py + logparser.py diff --git a/rpython/rlib/rstrategies/__init__.py b/rpython/rlib/rstrategies/__init__.py new file mode 100644 --- /dev/null +++ b/rpython/rlib/rstrategies/__init__.py @@ -0,0 +1,1 @@ +# Empy diff --git a/rpython/rlib/rstrategies/logger.py b/rpython/rlib/rstrategies/logger.py new file mode 100644 --- /dev/null +++ b/rpython/rlib/rstrategies/logger.py @@ -0,0 +1,58 @@ + +class LogEntry(object): + def __init__(self): + self.slots = 0 + self.objects = 0 + self.element_typenames = {} + + def add(self, size, element_typename): + self.slots += size + self.objects += 1 + if element_typename: + self.element_typenames[element_typename] = None + + def classnames(self): + return self.element_typenames.keys() + +class Logger(object): + _attrs_ = ["active", "aggregate", "logs"] + _immutable_fields_ = ["active?", "aggregate?", "logs"] + + def __init__(self): + self.active = False + self.aggregate = False + self.logs = {} + + def activate(self, aggregate=False): + self.active = True + self.aggregate = self.aggregate or aggregate + + def log(self, new_strategy, size, cause="", old_strategy="", typename="", element_typename=""): + if self.aggregate: + key = (cause, old_strategy, new_strategy, typename) + if key not in self.logs: + self.logs[key] = LogEntry() + entry = self.logs[key] + entry.add(size, element_typename) + else: + element_typenames = [ element_typename ] if element_typename else [] + self.output(cause, old_strategy, new_strategy, typename, size, 1, element_typenames) + + def print_aggregated_log(self): + if not self.aggregate: + return + for key, entry in self.logs.items(): + cause, old_strategy, new_strategy, typename = key + slots, objects, element_typenames = entry.slots, entry.objects, entry.classnames() + self.output(cause, old_strategy, new_strategy, typename, slots, objects, element_typenames) + + def output(self, cause, old_strategy, new_strategy, typename, slots, objects, element_typenames): + old_strategy_string = "%s -> " % old_strategy if old_strategy else "" + classname_string = " of %s" % typename if typename else "" + element_string = (" elements: " + " ".join(element_typenames)) if element_typenames else "" + format = (cause, old_strategy_string, new_strategy, classname_string, slots, objects, element_string) + self.do_print("%s (%s%s)%s size %d objects %d%s" % format) + + def do_print(self, str): + # Hook to increase testability + print str diff --git a/rpython/rlib/rstrategies/logparser.py b/rpython/rlib/rstrategies/logparser.py new file mode 100644 --- /dev/null +++ b/rpython/rlib/rstrategies/logparser.py @@ -0,0 +1,694 @@ + +import re, os, sys, operator + +""" +This script parses a log produced by rstrategies_logger.py into a graph and converts it to various outputs. +The most useful outputs are the dot* commands producing a visualization of the log using the dot-command of graphviz. +Every strategy is a node in the graph, and the edges are collections or objects that transition between +two strategies at some point during the log. +Artificial nodes are created for log entries without an explicit source node. These are the events when a +collection is created. +The input to this script is a logfile, a command and optional flags. +If the name of the logfile includes one of the AVAILABLE_VMS as a substring, the first three global variables +are automatically configured. +The script should work without these configurations, but the output will probably not be that pretty. +To avoid errors, the -a flag is implied when running without proper configuration. +""" + +# This should contain a full list of storage nodes (strategies). +# All strategies not included here will be combined into a single "Other"-node, if the -a flag is not given. +STORAGE_NODES = [] + +# This allows arbitrary renamings of storage strategy nodes +NODE_RENAMINGS = {} + +# Artificial storage-source nodes are automatically named like the associated operation. +# This dict allows customizing the names of these nodes. +STORAGE_SOURCES = {} + +def SET_VM(vm_name): + global STORAGE_NODES + global NODE_RENAMINGS + global STORAGE_SOURCES + if vm_name == 'RSqueak': + STORAGE_NODES = ['List', 'WeakList', 'SmallIntegerOrNil', 'FloatOrNil', 'AllNil'] + NODE_RENAMINGS = dict((x+'Strategy', x) for x in STORAGE_NODES) + STORAGE_SOURCES = {'Filledin': 'Image Loading', 'Initialized': 'Object Creation'} + elif vm_name == 'Pycket': + STORAGE_SOURCES = {'Created': 'Array Creation'} + # TODO + elif vm_name == 'Topaz': + # TODO + pass + else: + raise Exception("Unhandled vm name %s" % vm_name) + +AVAILABLE_VMS = ['RSqueak', 'Pycket', 'Topaz'] + +def configure_vm(logfile, flags): + vm_config_name = None + for vm_name in AVAILABLE_VMS: + if vm_name in logfile: + vm_config_name = vm_name + break + if vm_config_name is not None: + print "Using VM configuration %s" % vm_name + SET_VM(vm_name) + else: + print "No VM configuration found in filename '%s'. Available configurations: %s" % \ + (logfile, AVAILABLE_VMS) + print "Please add new VM configuration or rename logfile. Turning on -a flag to avoid errors." + flags.allstorage = True + +# ==================================================================== +# ======== Logfile parsing +# ==================================================================== + +def percent(part, total): + if total == 0: + return 0 + return float(part)*100 / total + +def parse(filename, flags, callback): + parsed_entries = 0 + if filename == "-": + opener = lambda: sys.stdin + else: + opener = lambda: open(filename, 'r', 1) + with opener() as file: + while True: + line = file.readline() + if len(line) == 0: + break + entry = parse_line(line, flags) + if entry: + parsed_entries += 1 + callback(entry) + return parsed_entries + +line_pattern = re.compile("^(?P\w+) \(((?P\w+) -> )?(?P\w+)\)( of (?P.+))? size (?P[0-9]+)( objects (?P[0-9]+))?( elements: (?P.+( .+)*))?$") + +def parse_line(line, flags): + result = line_pattern.match(line) + if result is None: + if flags.verbose: + print "Could not parse line: %s" % line[:-1] + return None + operation = str(result.group('operation')) + old_storage = result.group('old') + new_storage = str(result.group('new')) + classname = str(result.group('classname')) + size = int(result.group('size')) + objects = result.group('objects') + objects = int(objects) if objects else 1 + classnames = result.group('classnames') + if classnames is not None: + classnames = classnames.split(' ') + classnames = set(classnames) + else: + classnames = set() + + is_storage_source = old_storage is None + if is_storage_source: + if operation in STORAGE_SOURCES: + old_storage = STORAGE_SOURCES[operation] + else: + print "Using operation %s as storage source." % operation + old_storage = str(old_storage) + + if new_storage in NODE_RENAMINGS: + new_storage = NODE_RENAMINGS[new_storage] + if old_storage in NODE_RENAMINGS: + old_storage = NODE_RENAMINGS[old_storage] + + return LogEntry(operation, old_storage, new_storage, classname, size, objects, classnames, is_storage_source) + +class LogEntry(object): + + def __init__(self, operation, old_storage, new_storage, classname, size, objects, classnames, is_storage_source): + self.operation = operation + self.old_storage = old_storage + self.new_storage = new_storage + self.classname = classname + self.size = size + self.objects = objects + self.classnames = classnames + self.is_storage_source = is_storage_source + assert old_storage != new_storage, "old and new storage identical in log entry: %s" % self + + def full_key(self): + return (self.operation, self.old_storage, self.new_storage) + + def __lt__(self, other): + return self.classname < other.classname + + def __repr__(self): + return "%s(%s)" % (self.__str__(), object.__repr__(self)) + + def __str__(self): + old_storage_string = "%s -> " % self.old_storage if self.old_storage else "" + classname_string = " of %s" % self.classname if self.classname else "" + objects_string = " objects %d" % self.objects if self.objects > 1 else "" + return "%s (%s%s)%s size %d%s" % (self.operation, old_storage_string, self.new_storage, classname_string, self.size, objects_string) + +# ==================================================================== +# ======== Graph parsing +# ==================================================================== + +class Operations(object): + + def __init__(self, objects=0, slots=0, element_classnames=[]): + self.objects = objects + self.slots = slots + self.element_classnames = set(element_classnames) + + def __str__(self, total=None): + if self.objects == 0: + avg_slots = 0 + else: + avg_slots = float(self.slots) / self.objects + if total is not None and total.slots != 0: + percent_slots = " (%.1f%%)" % percent(self.slots, total.slots) + else: + percent_slots = "" + if total is not None and total.objects != 0: + percent_objects = " (%.1f%%)" % percent(self.objects, total.objects) + else: + percent_objects = "" + slots = format(self.slots, ",d") + objects = format(self.objects, ",d") + classnames = (" [ elements: %s ]" % ' '.join([str(x) for x in self.element_classnames])) \ + if len(self.element_classnames) else "" + return "%s%s slots in %s%s objects (avg size: %.1f)%s" % (slots, percent_slots, objects, percent_objects, avg_slots, classnames) + + def __repr__(self): + return "%s(%s)" % (self.__str__(), object.__repr__(self)) + + def add_log_entry(self, entry): + self.slots = self.slots + entry.size + self.objects = self.objects + entry.objects + self.element_classnames |= entry.classnames + + def __sub__(self, other): + return Operations(self.objects - other.objects, self.slots - other.slots) + + def __add__(self, other): + return Operations(self.objects + other.objects, self.slots + other.slots) + + def __lt__(self, other): + return self.slots < other.slots + + def empty(self): + return self.objects == 0 and self.slots == 0 + + def prefixprint(self, key="", total=None): + if not self.empty(): + print "%s%s" % (key, self.__str__(total)) + +class ClassOperations(object): + + def __init__(self): + self.classes = {} + + def cls(self, name): + if name not in self.classes: + self.classes[name] = Operations() + return self.classes[name] + + def total(self): + return reduce(operator.add, self.classes.values(), Operations()) + + def __str__(self): + return "ClassOperations(%s)" % self.classes + + def __repr__(self): + return "%s(%s)" % (self.__str__(), object.__repr__(self)) + + def __add__(self, other): + result = ClassOperations() + result.classes = dict(self.classes) + for classname, other_class in other.classes.items(): + result.cls(classname) # Make sure exists. + result.classes[classname] += other_class + return result + + def __sub__(self, other): + result = ClassOperations() + result.classes = dict(self.classes) + for classname, other_class in other.classes.items(): + result.cls(classname) # Make sure exists. + result.classes[classname] -= other_class + return result + +class StorageEdge(object): + + def __init__(self, operation="None", origin=None, target=None): + self.operation = operation + self.classes = ClassOperations() + self.origin = origin + self.target = target + self.is_storage_source = False + + def full_key(self): + return (self.operation, self.origin.name, self.target.name) + + def cls(self, classname): + return self.classes.cls(classname) + + def total(self): + return self.classes.total() + + def notify_nodes(self): + self.origin.note_outgoing(self) + self.target.note_incoming(self) + + def add_log_entry(self, entry): + self.cls(entry.classname).add_log_entry(entry) + if entry.is_storage_source: + self.is_storage_source = True + + def as_log_entries(self): + entries = [] + for classname, ops in self.classes.classes.items(): + origin = None if self.is_storage_source else self.origin.name + entry = LogEntry(self.operation, origin, self.target.name, classname, + ops.slots, ops.objects, ops.element_classnames, self.is_storage_source) + entries.append(entry) + return entries + + def __lt__(self, other): + return self.full_key() < other.full_key() + + def __str__(self): + return "[%s %s -> %s]" % (self.operation, self.origin, self.target) + + def __repr__(self): + return "%s(%s)" % (self.__str__(), object.__repr__(self)) + + def __add__(self, other): + origin = self.origin if self.origin is not None else other.origin + target = self.target if self.target is not None else other.target + result = StorageEdge(self.operation, origin, target) + result.classes += self.classes + other.classes + return result + + def __sub__(self, other): + origin = self.origin if self.origin is not None else other.origin + target = self.target if self.target is not None else other.target + result = StorageEdge(self.operation, origin, target) + result.classes += self.classes - other.classes + return result + +class StorageNode(object): + + def __init__(self, name): + self.name = name + self.incoming = set() + self.outgoing = set() + + def note_incoming(self, edge): + assert edge.target is self + if edge not in self.incoming: + self.incoming.add(edge) + + def note_outgoing(self, edge): + assert edge.origin is self + if edge not in self.outgoing: + self.outgoing.add(edge) + + def incoming_edges(self, operation): + return filter(lambda x: x.operation == operation, self.incoming) + + def outgoing_edges(self, operation): + return filter(lambda x: x.operation == operation, self.outgoing) + + def sum_incoming(self, operation): + return reduce(operator.add, self.incoming_edges(operation), StorageEdge(operation)) + + def sum_outgoing(self, operation): + return reduce(operator.add, self.outgoing_edges(operation), StorageEdge(operation)) + + def sum_all_incoming(self): + return reduce(operator.add, self.incoming, StorageEdge()) + + def sum_all_outgoing(self): + return reduce(operator.add, self.outgoing, StorageEdge()) + + def __str__(self): + return self.name + + def __repr__(self): + return "%s(%s)" % (self.__str__(), object.__repr__(self)) + + def merge_edge_sets(self, set1, set2, key_slot): + getter = lambda edge: edge.__dict__[key_slot] + set_dict = dict([(getter(edge), edge) for edge in set1]) + for edge in set2: + key = getter(edge) + if key not in set_dict: + set_dict[key] = edge + else: + set_dict[key] += edge + return set(set_dict.values()) + + def __add__(self, other): + result = StorageNode("%s %s" % (self.name, other.name)) + result.incoming = self.merge_edge_sets(self.incoming, other.incoming, "origin") + # TODO bad code + for edge in result.incoming: + edge.target = result + result.outgoing = self.merge_edge_sets(self.outgoing, other.outgoing, "target") + for edge in result.outgoing: + edge.origin = result + return result + + def __lt__(self, other): + return self.name < other.name + + def is_artificial(self): + for outgoing in self.outgoing: + if outgoing.is_storage_source: + return True + return False + + def is_storage_node(self): + return self.is_artificial() or self.name in STORAGE_NODES + + def dot_name(self): + return self.name.replace(" ", "_") + +class StorageGraph(object): + + def __init__(self): + self.nodes = {} + self.edges = {} + self.operations = set() + + def node(self, name): + if name not in self.nodes: + self.nodes[name] = StorageNode(name) + return self.nodes[name] + + def assert_sanity(self): + visited_edges = set() + for node in self.nodes.values(): + for edge in node.incoming: + assert edge in self.edges.values(), "Edge not in graph's edges: %s" % edge + visited_edges.add(edge) + if not edge.target is node: + print "Wrong edge target: %s\nIncoming edge: %s\nIn node: %s" % (edge.target, edge, node) + assert False + if not edge in edge.origin.outgoing: + print "Edge not in origin's outgoing: %s\nIncoming edge: %s\nIn node: %s" % (edge.origin.outgoing, edge, node) + assert False + for edge in node.outgoing: + assert edge in self.edges.values(), "Edge not in graph's edges: %s" % edge + visited_edges.add(edge) + if not edge.origin is node: + print "Wrong edge origin: %s\nOutgoing edge: %s\nIn node: %s" % (edge.origin, edge, node) + assert False + if not edge in edge.target.incoming: + print "Edge not in origin's incoming: %s\nOutgoing edge: %s\nIn node: %s" % (edge.target.incoming, edge, node) + assert False + assert len(visited_edges) == len(self.edges.values()), "Not all of graph's edges visited." + + def add_log_entry(self, log_entry): + self.operations.add(log_entry.operation) + key = log_entry.full_key() + if key not in self.edges: + edge = StorageEdge(log_entry.operation, self.node(log_entry.old_storage), self.node(log_entry.new_storage)) + self.edges[key] = edge + edge.notify_nodes() + self.edges[key].add_log_entry(log_entry) + + def collapse_nodes(self, collapsed_nodes, new_name=None): + if len(collapsed_nodes) == 0: + return + for node in collapsed_nodes: + del self.nodes[node.name] + for edge in node.incoming: + del self.edges[edge.full_key()] + for edge in node.outgoing: + del self.edges[edge.full_key()] + new_node = reduce(operator.add, collapsed_nodes) + if new_name is not None: + new_node.name = new_name + self.nodes[new_node.name] = new_node + # TODO bad code + for node in collapsed_nodes: + for edge in node.incoming: + edge.origin.outgoing.remove(edge) + new_edges = filter(lambda filtered: filtered.origin == edge.origin, new_node.incoming) + assert len(new_edges) == 1 + edge.origin.outgoing.add(new_edges[0]) + for edge in node.outgoing: + edge.target.incoming.remove(edge) + new_edges = filter(lambda filtered: filtered.target == edge.target, new_node.outgoing) + assert len(new_edges) == 1 + edge.target.incoming.add(new_edges[0]) + for edge in new_node.incoming: + self.edges[edge.full_key()] = edge + for edge in new_node.outgoing: + self.edges[edge.full_key()] = edge + self.assert_sanity() + + def collapse_nonstorage_nodes(self, new_name=None): + nodes = filter(lambda x: not x.is_storage_node(), self.nodes.values()) + self.collapse_nodes(nodes, new_name) + + def sorted_nodes(self): + nodes = self.nodes.values() + nodes.sort() + return nodes + +def make_graph(logfile, flags): + graph = StorageGraph() + def callback(entry): + graph.add_log_entry(entry) + parse(logfile, flags, callback) + graph.assert_sanity() + return graph + +# ==================================================================== +# ======== Command - Summarize log content +# ==================================================================== + +def command_summarize(logfile, flags): + graph = make_graph(logfile, flags) + if not flags.allstorage: + graph.collapse_nonstorage_nodes() + for node in graph.sorted_nodes(): + node.print_summary(flags, graph.operations) + +def StorageNode_print_summary(self, flags, all_operations): + print "\n%s:" % self.name + sum = StorageEdge() + total_incoming = self.sum_all_incoming().total() if flags.percent else None + + print "\tIncoming:" + for operation in all_operations: + if flags.detailed: + edges = [ (edge.origin.name, edge) for edge in self.incoming_edges(operation) ] + else: + edges = [ (operation, self.sum_incoming(operation)) ] + for edgename, edge in edges: + edge.print_with_name("\t\t\t", edgename, total_incoming, flags) + sum += edge + + print "\tOutgoing:" + for operation in all_operations: + if flags.detailed: + edges = [ (edge.target.name, edge) for edge in self.outgoing_edges(operation) ] + else: + edges = [ (operation, self.sum_outgoing(operation)) ] + for edgename, edge in edges: + edge.print_with_name("\t\t\t", edgename, total_incoming, flags) + sum -= edge + + sum.print_with_name("\t", "Remaining", total_incoming, flags) + +StorageNode.print_summary = StorageNode_print_summary + +def StorageEdge_print_with_name(self, prefix, edgename, total_reference, flags): + if flags.classes: + print "%s%s:" % (prefix, edgename) + prefix += "\t\t" + operations = self.classes.classes.items() + operations.sort(reverse=True, key=operator.itemgetter(1)) + else: + operations = [ (edgename, self.total()) ] + for classname, classops in operations: + classops.prefixprint("%s%s: " % (prefix, classname), total_reference) + +StorageEdge.print_with_name = StorageEdge_print_with_name + +# ==================================================================== +# ======== Command - DOT output +# ==================================================================== + +# Output is valid dot code and can be parsed by the graphviz dot utility. +def command_print_dot(logfile, flags): + graph = make_graph(logfile, flags) + print "/*" + print "Storage Statistics (dot format):" + print "================================" + print "*/" + print dot_string(graph, flags) + +def run_dot(logfile, flags, output_type): + import subprocess + dot = dot_string(make_graph(logfile, flags), flags) + command = ["dot", "-T%s" % output_type, "-o%s.%s" % (flags.logfile, output_type)] + print "Running:\n%s" % " ".join(command) + p = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + output = p.communicate(input=dot)[0] + print output + +def command_dot(logfile, flags): + run_dot(logfile, flags, "jpg") +def command_dot_ps(logfile, flags): + run_dot(logfile, flags, "ps") +def command_dot_pdf(logfile, flags): + run_dot(logfile, flags, "pdf") +def command_dot_svg(logfile, flags): + run_dot(logfile, flags, "svg") + +def dot_string(graph, flags): + result = "digraph G {" + incoming_cache = {} + if not flags.allstorage: + graph.collapse_nonstorage_nodes("Other") + + def make_label(edge, prefix="", total_edge=None, slots_per_object=False): + object_suffix = " objects" + slots_suffix = " slots" + if not flags.objects or not flags.slots: + object_suffix = slots_suffix = "" + if total_edge and flags.percent and total_edge.objects != 0: + percent_objects = " (%.1f%%)" % percent(edge.objects, total_edge.objects) + percent_slots = " (%.1f%%)" % percent(edge.slots, total_edge.slots) + else: + percent_objects = percent_slots = "" + label = "" + if flags.objects: + label += "%s%s%s%s
    " % (prefix, format(edge.objects, ",.0f"), object_suffix, percent_objects) + if flags.slots: + label += "%s%s%s%s
    " % (prefix, format(edge.slots, ",.0f"), slots_suffix, percent_slots) + if slots_per_object and flags.slotsPerObject: + label += "%.1f slots/object
    " % (float(total.slots) / total.objects) + return label + + for node in graph.nodes.values(): + incoming = node.sum_all_incoming().total() + outgoing = node.sum_all_outgoing().total() + remaining = incoming - outgoing + if node.is_artificial(): + incoming_cache[node.name] = outgoing + shape = ",shape=box" + label = make_label(outgoing) + else: + incoming_cache[node.name] = incoming + shape = "" + label = make_label(incoming, "Incoming: ") + if remaining.objects != incoming.objects: + label += make_label(remaining, "Remaining: ", incoming) + result += "%s [label=<%s
    %s>%s];" % (node.dot_name(), node.name, label, shape) + + for edge in graph.edges.values(): + total = edge.total() + incoming = incoming_cache[edge.origin.name] + label = make_label(total, "", incoming, slots_per_object=True) + target_node = edge.target.dot_name() + source_node = edge.origin.dot_name() + result += "%s -> %s [label=<%s>];" % (source_node, target_node, label) + + result += "}" + return result + +# ==================================================================== +# ======== Other commands +# ==================================================================== + +def command_aggregate(logfile, flags): + graph = make_graph(logfile, flags) + edges = graph.edges.values() + edges.sort() + for edge in edges: + logentries = edge.as_log_entries() + logentries.sort() + for entry in logentries: + print entry + +def command_print_entries(logfile, flags): + def callback(entry): + print entry + parse(logfile, flags, callback) + +# ==================================================================== +# ======== Main +# ==================================================================== + +class Flags(object): + + def __init__(self, flags): + self.flags = {} + for name, short in flags: + self.__dict__[name] = False + self.flags[short] = name + + def handle(self, arg): + if arg in self.flags: + self.__dict__[self.flags[arg]] = True + return True + else: + return False + + def __str__(self): + descriptions = [ ("%s (%s)" % description) for description in self.flags.items() ] + return "[%s]" % " | ".join(descriptions) + +def usage(flags, commands): + print "Arguments: logfile command %s" % flags + print "Available commands: %s" % commands + exit(1) + +def main(argv): + flags = Flags([ + # General + ('verbose', '-v'), + + # All outputs + ('percent', '-p'), + ('allstorage', '-a'), + + # Text outputs + ('detailed', '-d'), + ('classes', '-c'), + + # dot outputs + ('slots', '-s'), + ('objects', '-o'), + ('slotsPerObject', '-S'), + ]) + + command_prefix = "command_" + module = sys.modules[__name__].__dict__ + commands = [ a[len(command_prefix):] for a in module.keys() if a.startswith(command_prefix) ] + + if len(argv) < 2: + usage(flags, commands) + logfile = argv[0] + flags.logfile = logfile + configure_vm(logfile, flags) + command = argv[1] + for flag in argv[2:]: + if not flags.handle(flag): + usage(flags, commands) + if command not in commands: + usage(flags, commands) + + func = module[command_prefix + command] + func(logfile, flags) + +if __name__ == "__main__": + main(sys.argv[1:]) diff --git a/rpython/rlib/rstrategies/rstrategies.py b/rpython/rlib/rstrategies/rstrategies.py new file mode 100644 --- /dev/null +++ b/rpython/rlib/rstrategies/rstrategies.py @@ -0,0 +1,572 @@ + +import weakref, sys +from rpython.rlib.rstrategies import logger +from rpython.rlib import jit, objectmodel, rerased +from rpython.rlib.objectmodel import specialize + +def make_accessors(strategy='strategy', storage='storage'): + """ + Instead of using this generator, the methods can be implemented manually. + A third way is to overwrite the getter/setter methods in StrategyFactory. + """ + def make_getter(attr): + def getter(self): return getattr(self, attr) + return getter + def make_setter(attr): + def setter(self, val): setattr(self, attr, val) + return setter + classdef = sys._getframe(1).f_locals + classdef['_get_strategy'] = make_getter(strategy) + classdef['_set_strategy'] = make_setter(strategy) + classdef['_get_storage'] = make_getter(storage) + classdef['_set_storage'] = make_setter(storage) + +class StrategyMetaclass(type): + """ + A metaclass is required, because we need certain attributes to be special + for every single strategy class. + """ + def __new__(self, name, bases, attrs): + attrs['_is_strategy'] = False + attrs['_is_singleton'] = False + attrs['_specializations'] = [] + # Not every strategy uses rerased-pairs, but they won't hurt + erase, unerase = rerased.new_erasing_pair(name) + def get_storage(self, w_self): + erased = self.strategy_factory().get_storage(w_self) + return unerase(erased) + def set_storage(self, w_self, storage): + erased = erase(storage) + self.strategy_factory().set_storage(w_self, erased) + attrs['get_storage'] = get_storage + attrs['set_storage'] = set_storage + return type.__new__(self, name, bases, attrs) + +def strategy(generalize=None, singleton=True): + """ + Strategy classes must be decorated with this. + generalize is a list of other strategies, that can be switched to from the decorated strategy. + If the singleton flag is set to False, new strategy instances will be created, + instead of always reusing the singleton object. + """ + def decorator(strategy_class): + # Patch strategy class: Add generalized_strategy_for and mark as strategy class. + if generalize: + @jit.unroll_safe + def generalized_strategy_for(self, value): + # TODO - optimize this method + for strategy in generalize: + if self.strategy_factory().strategy_singleton_instance(strategy)._check_can_handle(value): + return strategy + raise Exception("Could not find generalized strategy for %s coming from %s" % (value, self)) + strategy_class.generalized_strategy_for = generalized_strategy_for + for generalized in generalize: + generalized._specializations.append(strategy_class) + strategy_class._is_strategy = True + strategy_class._generalizations = generalize + strategy_class._is_singleton = singleton + return strategy_class + return decorator + +class StrategyFactory(object): + _immutable_fields_ = ["strategies[*]", "logger", "strategy_singleton_field"] + factory_instance_counter = 0 + + def __init__(self, root_class, all_strategy_classes=None): + if all_strategy_classes is None: + all_strategy_classes = self._collect_subclasses(root_class) + self.strategies = [] + self.logger = logger.Logger() + + # This is to avoid confusion between multiple factories existing simultaneously (e.g. in tests) + self.strategy_singleton_field = "__singleton_%i" % StrategyFactory.factory_instance_counter + StrategyFactory.factory_instance_counter += 1 + + self._create_strategy_instances(root_class, all_strategy_classes) + + def _create_strategy_instances(self, root_class, all_strategy_classes): + for strategy_class in all_strategy_classes: + if strategy_class._is_strategy: + setattr(strategy_class, self.strategy_singleton_field, self.instantiate_strategy(strategy_class)) + self.strategies.append(strategy_class) + self._patch_strategy_class(strategy_class, root_class) + self._order_strategies() + + # ============================= + # API methods + # ============================= + + def switch_strategy(self, w_self, new_strategy_type, new_element=None): + """ + Switch the strategy of w_self to the new type. + new_element can be given as as hint, purely for logging purposes. + It should be the object that was added to w_self, causing the strategy switch. + """ + old_strategy = self.get_strategy(w_self) + if new_strategy_type._is_singleton: + new_strategy = self.strategy_singleton_instance(new_strategy_type) + else: + size = old_strategy.size(w_self) + new_strategy = self.instantiate_strategy(new_strategy_type, w_self, size) + self.set_strategy(w_self, new_strategy) + old_strategy._convert_storage_to(w_self, new_strategy) + new_strategy.strategy_switched(w_self) + self.log(w_self, new_strategy, old_strategy, new_element) + return new_strategy + + def set_initial_strategy(self, w_self, strategy_type, size, elements=None): + """ + Initialize the strategy and storage fields of w_self. + This must be called before switch_strategy or any strategy method can be used. + elements is an optional list of values initially stored in w_self. + If given, then len(elements) == size must hold. + """ + assert self.get_strategy(w_self) is None, "Strategy should not be initialized yet!" + if strategy_type._is_singleton: + strategy = self.strategy_singleton_instance(strategy_type) + else: + strategy = self.instantiate_strategy(strategy_type, w_self, size) + self.set_strategy(w_self, strategy) + strategy._initialize_storage(w_self, size) + element = None + if elements: + strategy.store_all(w_self, elements) + if len(elements) > 0: element = elements[0] + strategy.strategy_switched(w_self) + self.log(w_self, strategy, None, element) + return strategy + + @jit.unroll_safe + def strategy_type_for(self, objects): + """ + Return the best-fitting strategy to hold all given objects. + """ + specialized_strategies = len(self.strategies) + can_handle = [True] * specialized_strategies + for obj in objects: + if specialized_strategies <= 1: + break + for i, strategy in enumerate(self.strategies): + if can_handle[i] and not self.strategy_singleton_instance(strategy)._check_can_handle(obj): + can_handle[i] = False + specialized_strategies -= 1 + for i, strategy_type in enumerate(self.strategies): + if can_handle[i]: + return strategy_type + raise Exception("Could not find strategy to handle: %s" % objects) + + def decorate_strategies(self, transitions): + """ + As an alternative to decorating all strategies with @strategy, + invoke this in the constructor of your StrategyFactory subclass, before + calling __init__. transitions is a dict mapping all strategy classes to + their 'generalize' list parameter (see @strategy decorator). + """ + "NOT_RPYTHON" + for strategy_class, generalized in transitions.items(): + strategy(generalized)(strategy_class) + + # ============================= + # The following methods can be overwritten to customize certain aspects of the factory. + # ============================= + + def instantiate_strategy(self, strategy_type, w_self=None, initial_size=0): + """ + Return a functional instance of strategy_type. + Overwrite this if you need a non-default constructor. + The two additional parameters should be ignored for singleton-strategies. + """ + return strategy_type() + + def log(self, w_self, new_strategy, old_strategy=None, new_element=None): + """ + This can be overwritten into a more appropriate call to self.logger.log + """ + if not self.logger.active: return + new_strategy_str = self.log_string_for_object(new_strategy) + old_strategy_str = self.log_string_for_object(old_strategy) + element_typename = self.log_string_for_object(new_element) + size = new_strategy.size(w_self) + typename = "" + cause = "Switched" if old_strategy else "Created" + self.logger.log(new_strategy_str, size, cause, old_strategy_str, typename, element_typename) + + @specialize.call_location() + def log_string_for_object(self, obj): + """ + This can be overwritten instead of the entire log() method. + Keep the specialize-annotation in order to handle different kinds of objects here. + """ + return obj.__class__.__name__ if obj else "" + + # These storage accessors are specialized because the storage field is + # populated by erased-objects which seem to be incompatible sometimes. + @specialize.call_location() + def get_storage(self, obj): + return obj._get_storage() + @specialize.call_location() + def set_storage(self, obj, val): + return obj._set_storage(val) + + def get_strategy(self, obj): + return obj._get_strategy() + def set_strategy(self, obj, val): + return obj._set_strategy(val) + + # ============================= + # Internal methods + # ============================= + + def _patch_strategy_class(self, strategy_class, root_class): + "NOT_RPYTHON" + # Patch root class: Add default handler for visitor + def _convert_storage_from_OTHER(self, w_self, previous_strategy): + self._convert_storage_from(w_self, previous_strategy) + funcname = "_convert_storage_from_" + strategy_class.__name__ + _convert_storage_from_OTHER.func_name = funcname + setattr(root_class, funcname, _convert_storage_from_OTHER) + + # Patch strategy class: Add polymorphic visitor function + def _convert_storage_to(self, w_self, new_strategy): + getattr(new_strategy, funcname)(w_self, self) + strategy_class._convert_storage_to = _convert_storage_to + + def _collect_subclasses(self, cls): + "NOT_RPYTHON" + subclasses = [] + for subcls in cls.__subclasses__(): + subclasses.append(subcls) + subclasses.extend(self._collect_subclasses(subcls)) + return subclasses + + def _order_strategies(self): + "NOT_RPYTHON" + def get_generalization_depth(strategy, visited=None): + if visited is None: + visited = set() + if strategy._generalizations: + if strategy in visited: + raise Exception("Cycle in generalization-tree of %s" % strategy) + visited.add(strategy) + depth = 0 + for generalization in strategy._generalizations: + other_depth = get_generalization_depth(generalization, set(visited)) + depth = max(depth, other_depth) + return depth + 1 + else: + return 0 + self.strategies.sort(key=get_generalization_depth, reverse=True) + + @jit.elidable + def strategy_singleton_instance(self, strategy_class): + return getattr(strategy_class, self.strategy_singleton_field) + + def _freeze_(self): + # Instance will be frozen at compile time, making accesses constant. + # The constructor does meta stuff which is not possible after translation. + return True + +class AbstractStrategy(object): + """ + == Required: + strategy_factory(self) - Access to StorageFactory + """ + + def strategy_switched(self, w_self): + # Overwrite this method for a hook whenever the strategy + # of w_self was switched to self. + pass + + # Main Fixedsize API + + def store(self, w_self, index0, value): + raise NotImplementedError("Abstract method") + + def fetch(self, w_self, index0): + raise NotImplementedError("Abstract method") + + def size(self, w_self): + raise NotImplementedError("Abstract method") + + # Fixedsize utility methods + + def slice(self, w_self, start, end): + return [ self.fetch(w_self, i) for i in range(start, end)] + + def fetch_all(self, w_self): + return self.slice(w_self, 0, self.size(w_self)) + + def store_all(self, w_self, elements): + for i, e in enumerate(elements): + self.store(w_self, i, e) + + # Main Varsize API + + def insert(self, w_self, index0, list_w): + raise NotImplementedError("Abstract method") + + def delete(self, w_self, start, end): + raise NotImplementedError("Abstract method") + + # Varsize utility methods + + def append(self, w_self, list_w): + self.insert(w_self, self.size(w_self), list_w) + + def pop(self, w_self, index0): + e = self.fetch(w_self, index0) + self.delete(w_self, index0, index0+1) + return e + + # Internal methods + + def _initialize_storage(self, w_self, initial_size): + raise NotImplementedError("Abstract method") + + def _check_can_handle(self, value): + raise NotImplementedError("Abstract method") + + def _convert_storage_to(self, w_self, new_strategy): + # This will be overwritten in _patch_strategy_class + new_strategy._convert_storage_from(w_self, self) + + @jit.unroll_safe + def _convert_storage_from(self, w_self, previous_strategy): + # This is a very unefficient (but most generic) way to do this. + # Subclasses should specialize. + storage = previous_strategy.fetch_all(w_self) + self._initialize_storage(w_self, previous_strategy.size(w_self)) + for i, field in enumerate(storage): + self.store(w_self, i, field) + + def _generalize_for_value(self, w_self, value): + strategy_type = self.generalized_strategy_for(value) + new_instance = self.strategy_factory().switch_strategy(w_self, strategy_type, new_element=value) + return new_instance + + def _cannot_handle_store(self, w_self, index0, value): + new_instance = self._generalize_for_value(w_self, value) + new_instance.store(w_self, index0, value) + + def _cannot_handle_insert(self, w_self, index0, list_w): + # TODO - optimize. Prevent multiple generalizations and slicing done by callers. + new_strategy = self._generalize_for_value(w_self, list_w[0]) + new_strategy.insert(w_self, index0, list_w) + +# ============== Special Strategies with no storage array ============== + +class EmptyStrategy(AbstractStrategy): + # == Required: + # See AbstractStrategy + + def _initialize_storage(self, w_self, initial_size): + assert initial_size == 0 + self.set_storage(w_self, None) + def _convert_storage_from(self, w_self, previous_strategy): + self.set_storage(w_self, None) + def _check_can_handle(self, value): + return False + + def fetch(self, w_self, index0): + raise IndexError + def store(self, w_self, index0, value): + self._cannot_handle_store(w_self, index0, [value]) + def insert(self, w_self, index0, list_w): + self._cannot_handle_insert(w_self, index0, list_w) + def delete(self, w_self, start, end): + self.check_index_range(w_self, start, end) + def size(self, w_self): + return 0 + +class SingleValueStrategyStorage(object): + """Small container object for a size value.""" + _attrs_ = ['size'] + def __init__(self, size=0): + self.size = size + +class SingleValueStrategy(AbstractStrategy): + # == Required: + # See AbstractStrategy + # check_index_*(...) - use mixin SafeIndexingMixin or UnsafeIndexingMixin + # value(self) - the single value contained in this strategy. Should be constant. + + def _initialize_storage(self, w_self, initial_size): + storage_obj = SingleValueStrategyStorage(initial_size) + self.set_storage(w_self, storage_obj) + def _convert_storage_from(self, w_self, previous_strategy): + self._initialize_storage(w_self, previous_strategy.size(w_self)) + def _check_can_handle(self, value): + return value is self.value() + + def fetch(self, w_self, index0): + self.check_index_fetch(w_self, index0) + return self.value() + def store(self, w_self, index0, value): + self.check_index_store(w_self, index0) + if self._check_can_handle(value): + return + self._cannot_handle_store(w_self, index0, value) + def delete(self, w_self, start, end): + self.check_index_range(w_self, start, end) + self.get_storage(w_self).size -= (end - start) + def size(self, w_self): + return self.get_storage(w_self).size + + @jit.unroll_safe + def insert(self, w_self, index0, list_w): + storage_obj = self.get_storage(w_self) + for i in range(len(list_w)): + if self._check_can_handle(list_w[i]): + storage_obj.size += 1 + else: + self._cannot_handle_insert(w_self, index0 + i, list_w[i:]) + return + +# ============== Basic strategies with storage ============== + +class StrategyWithStorage(AbstractStrategy): + # == Required: + # See AbstractStrategy + # check_index_*(...) - use mixin SafeIndexingMixin or UnsafeIndexingMixin + # default_value(self) - The value to be initially contained in this strategy + + def _initialize_storage(self, w_self, initial_size): + default = self._unwrap(self.default_value()) + self.set_storage(w_self, [default] * initial_size) + + @jit.unroll_safe + def _convert_storage_from(self, w_self, previous_strategy): + size = previous_strategy.size(w_self) + new_storage = [ self._unwrap(previous_strategy.fetch(w_self, i)) + for i in range(size) ] + self.set_storage(w_self, new_storage) + + def store(self, w_self, index0, wrapped_value): + self.check_index_store(w_self, index0) + if self._check_can_handle(wrapped_value): + unwrapped = self._unwrap(wrapped_value) + self.get_storage(w_self)[index0] = unwrapped + else: + self._cannot_handle_store(w_self, index0, wrapped_value) + + def fetch(self, w_self, index0): + self.check_index_fetch(w_self, index0) + unwrapped = self.get_storage(w_self)[index0] + return self._wrap(unwrapped) + + def _wrap(self, value): + raise NotImplementedError("Abstract method") + + def _unwrap(self, value): + raise NotImplementedError("Abstract method") + + def size(self, w_self): + return len(self.get_storage(w_self)) + + @jit.unroll_safe + def insert(self, w_self, start, list_w): + # This is following Python's behaviour - insert automatically + # happens at the beginning of an array, even if index is larger + if start > self.size(w_self): + start = self.size(w_self) + for i in range(len(list_w)): + if self._check_can_handle(list_w[i]): + self.get_storage(w_self).insert(start + i, self._unwrap(list_w[i])) + else: + self._cannot_handle_insert(w_self, start + i, list_w[i:]) + return + + def delete(self, w_self, start, end): + self.check_index_range(w_self, start, end) + assert start >= 0 and end >= 0 + del self.get_storage(w_self)[start : end] + +class GenericStrategy(StrategyWithStorage): + # == Required: + # See StrategyWithStorage + + def _wrap(self, value): + return value + def _unwrap(self, value): + return value + def _check_can_handle(self, wrapped_value): + return True + +class WeakGenericStrategy(StrategyWithStorage): + # == Required: + # See StrategyWithStorage + + def _wrap(self, value): + return value() or self.default_value() + def _unwrap(self, value): + assert value is not None + return weakref.ref(value) + def _check_can_handle(self, wrapped_value): + return True + +# ============== Mixins for index checking operations ============== + +class SafeIndexingMixin(object): + def check_index_store(self, w_self, index0): + self.check_index(w_self, index0) + def check_index_fetch(self, w_self, index0): + self.check_index(w_self, index0) + def check_index_range(self, w_self, start, end): + if end < start: + raise IndexError + self.check_index(w_self, start) + self.check_index(w_self, end) + def check_index(self, w_self, index0): + if index0 < 0 or index0 >= self.size(w_self): + raise IndexError + +class UnsafeIndexingMixin(object): + def check_index_store(self, w_self, index0): + pass + def check_index_fetch(self, w_self, index0): + pass + def check_index_range(self, w_self, start, end): + pass + +# ============== Specialized Storage Strategies ============== + +class SpecializedStrategy(StrategyWithStorage): + # == Required: + # See StrategyWithStorage + # wrap(self, value) - Return a boxed object for the primitive value + # unwrap(self, value) - Return the unboxed primitive value of value + + def _unwrap(self, value): + return self.unwrap(value) + def _wrap(self, value): + return self.wrap(value) + +class SingleTypeStrategy(SpecializedStrategy): + # == Required Functions: + # See SpecializedStrategy + # contained_type - The wrapped type that can be stored in this strategy + + def _check_can_handle(self, value): + return isinstance(value, self.contained_type) + +class TaggingStrategy(SingleTypeStrategy): + """This strategy uses a special tag value to represent a single additional object.""" + # == Required: + # See SingleTypeStrategy + # wrapped_tagged_value(self) - The tagged object + # unwrapped_tagged_value(self) - The unwrapped tag value representing the tagged object + + def _check_can_handle(self, value): + return value is self.wrapped_tagged_value() or \ + (isinstance(value, self.contained_type) and \ + self.unwrap(value) != self.unwrapped_tagged_value()) + + def _unwrap(self, value): + if value is self.wrapped_tagged_value(): + return self.unwrapped_tagged_value() + return self.unwrap(value) + + def _wrap(self, value): + if value == self.unwrapped_tagged_value(): + return self.wrapped_tagged_value() + return self.wrap(value) diff --git a/rpython/rlib/rstrategies/test/test_rstrategies.py b/rpython/rlib/rstrategies/test/test_rstrategies.py new file mode 100644 --- /dev/null +++ b/rpython/rlib/rstrategies/test/test_rstrategies.py @@ -0,0 +1,552 @@ + +import py +from rpython.rlib.rstrategies import rstrategies as rs +from rpython.rlib.objectmodel import import_from_mixin + +# === Define small model tree + +class W_AbstractObject(object): + pass + +class W_Object(W_AbstractObject): + pass + +class W_Integer(W_AbstractObject): + def __init__(self, value): + self.value = value + def __eq__(self, other): + return isinstance(other, W_Integer) and self.value == other.value + +class W_List(W_AbstractObject): + rs.make_accessors() + def __init__(self, strategy=None, size=0, elements=None): + self.strategy = None + if strategy: + factory.set_initial_strategy(self, strategy, size, elements) + def fetch(self, i): + assert self.strategy + return self.strategy.fetch(self, i) + def store(self, i, value): + assert self.strategy + return self.strategy.store(self, i, value) + def size(self): + assert self.strategy + return self.strategy.size(self) + def insert(self, index0, list_w): + assert self.strategy + return self.strategy.insert(self, index0, list_w) + def delete(self, start, end): + assert self.strategy + return self.strategy.delete(self, start, end) + def append(self, list_w): + assert self.strategy + return self.strategy.append(self, list_w) + def pop(self, index0): + assert self.strategy + return self.strategy.pop(self, index0) + def slice(self, start, end): + assert self.strategy + return self.strategy.slice(self, start, end) + def fetch_all(self): + assert self.strategy + return self.strategy.fetch_all(self) + def store_all(self, elements): + assert self.strategy + return self.strategy.store_all(self, elements) + +w_nil = W_Object() + +# === Define concrete strategy classes + +class AbstractStrategy(object): + __metaclass__ = rs.StrategyMetaclass + import_from_mixin(rs.AbstractStrategy) + import_from_mixin(rs.SafeIndexingMixin) + def __init__(self, factory, w_self=None, size=0): + self.factory = factory + def strategy_factory(self): + return self.factory + +class Factory(rs.StrategyFactory): + switching_log = [] + + def __init__(self, root_class): + self.decorate_strategies({ + EmptyStrategy: [NilStrategy, IntegerStrategy, IntegerOrNilStrategy, GenericStrategy], + NilStrategy: [IntegerOrNilStrategy, GenericStrategy], + GenericStrategy: [], + IntegerStrategy: [IntegerOrNilStrategy, GenericStrategy], + IntegerOrNilStrategy: [GenericStrategy], + }) + rs.StrategyFactory.__init__(self, root_class) + + def instantiate_strategy(self, strategy_type, w_self=None, size=0): + return strategy_type(self, w_self, size) + + def set_strategy(self, w_list, strategy): + old_strategy = self.get_strategy(w_list) + self.switching_log.append((old_strategy, strategy)) + super(Factory, self).set_strategy(w_list, strategy) + + def clear_log(self): + del self.switching_log[:] + +class EmptyStrategy(AbstractStrategy): + import_from_mixin(rs.EmptyStrategy) + # TODO - implement and test transition from Generic back to Empty + +class NilStrategy(AbstractStrategy): + import_from_mixin(rs.SingleValueStrategy) + def value(self): return w_nil + +class GenericStrategy(AbstractStrategy): + import_from_mixin(rs.GenericStrategy) + import_from_mixin(rs.UnsafeIndexingMixin) + def default_value(self): return w_nil + +class WeakGenericStrategy(AbstractStrategy): + import_from_mixin(rs.WeakGenericStrategy) + def default_value(self): return w_nil + +class IntegerStrategy(AbstractStrategy): + import_from_mixin(rs.SingleTypeStrategy) + contained_type = W_Integer + def wrap(self, value): return W_Integer(value) + def unwrap(self, value): return value.value + def default_value(self): return W_Integer(0) + +class IntegerOrNilStrategy(AbstractStrategy): + import_from_mixin(rs.TaggingStrategy) + contained_type = W_Integer + def wrap(self, value): return W_Integer(value) + def unwrap(self, value): return value.value + def default_value(self): return w_nil + def wrapped_tagged_value(self): return w_nil + def unwrapped_tagged_value(self): import sys; return sys.maxint + + at rs.strategy(generalize=[], singleton=False) +class NonSingletonStrategy(GenericStrategy): + def __init__(self, factory, w_list=None, size=0): + super(NonSingletonStrategy, self).__init__(factory, w_list, size) + self.w_list = w_list + self.the_size = size + +class NonStrategy(NonSingletonStrategy): + pass + + at rs.strategy(generalize=[]) +class InefficientStrategy(GenericStrategy): + def _convert_storage_from(self, w_self, previous_strategy): + return AbstractStrategy._convert_storage_from(self, w_self, previous_strategy) + +factory = Factory(AbstractStrategy) + +def check_contents(list, expected): + assert list.size() == len(expected) + for i, val in enumerate(expected): + assert list.fetch(i) == val + +def teardown(): + factory.clear_log() + +# === Test Initialization and fetch + +def test_setup(): + pass + +def test_factory_setup(): + expected_strategies = 7 + assert len(factory.strategies) == expected_strategies + assert len(set(factory.strategies)) == len(factory.strategies) + for strategy in factory.strategies: + assert isinstance(factory.strategy_singleton_instance(strategy), strategy) + +def test_factory_setup_singleton_instances(): + new_factory = Factory(AbstractStrategy) + s1 = factory.strategy_singleton_instance(GenericStrategy) + s2 = new_factory.strategy_singleton_instance(GenericStrategy) + assert s1 is not s2 + assert s1.strategy_factory() is factory + assert s2.strategy_factory() is new_factory + +def test_metaclass(): + assert NonStrategy._is_strategy == False + assert IntegerOrNilStrategy._is_strategy == True + assert IntegerOrNilStrategy._is_singleton == True + assert NonSingletonStrategy._is_singleton == False + assert NonStrategy._is_singleton == False + assert NonStrategy.get_storage is not NonSingletonStrategy.get_storage + +def test_singletons(): + def do_test_singletons(cls, expected_true): + l1 = W_List(cls, 0) + l2 = W_List(cls, 0) + if expected_true: + assert l1.strategy is l2.strategy + else: + assert l1.strategy is not l2.strategy + do_test_singletons(EmptyStrategy, True) + do_test_singletons(NonSingletonStrategy, False) + do_test_singletons(NonStrategy, False) + do_test_singletons(GenericStrategy, True) + +def do_test_initialization(cls, default_value=w_nil, is_safe=True): + size = 10 + l = W_List(cls, size) + s = l.strategy + assert s.size(l) == size + assert s.fetch(l,0) == default_value + assert s.fetch(l,size/2) == default_value + assert s.fetch(l,size-1) == default_value + py.test.raises(IndexError, s.fetch, l, size) + py.test.raises(IndexError, s.fetch, l, size+1) + py.test.raises(IndexError, s.fetch, l, size+5) + if is_safe: + py.test.raises(IndexError, s.fetch, l, -1) + else: + assert s.fetch(l, -1) == s.fetch(l, size - 1) + +def test_init_Empty(): + l = W_List(EmptyStrategy, 0) + s = l.strategy + assert s.size(l) == 0 + py.test.raises(IndexError, s.fetch, l, 0) + py.test.raises(IndexError, s.fetch, l, 10) + py.test.raises(IndexError, s.delete, l, 0, 1) + py.test.raises(AssertionError, W_List, EmptyStrategy, 2) # Only size 0 possible. + +def test_init_Nil(): + do_test_initialization(NilStrategy) + +def test_init_Generic(): + do_test_initialization(GenericStrategy, is_safe=False) + +def test_init_WeakGeneric(): + do_test_initialization(WeakGenericStrategy) + +def test_init_Integer(): + do_test_initialization(IntegerStrategy, default_value=W_Integer(0)) + +def test_init_IntegerOrNil(): + do_test_initialization(IntegerOrNilStrategy) + +# === Test Simple store + +def do_test_store(cls, stored_value=W_Object(), is_safe=True, is_varsize=False): + size = 10 + l = W_List(cls, size) + s = l.strategy From noreply at buildbot.pypy.org Thu Apr 23 19:03:12 2015 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 23 Apr 2015 19:03:12 +0200 (CEST) Subject: [pypy-commit] pypy object-dtype2: document branch to be closed Message-ID: <20150423170312.C21D51C12C8@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: object-dtype2 Changeset: r76903:cc8c8b4aba30 Date: 2015-04-23 20:00 +0300 http://bitbucket.org/pypy/pypy/changeset/cc8c8b4aba30/ Log: document branch to be closed diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -64,3 +64,6 @@ .. branch: vmprof .. Merged but then backed out, hopefully it will return as vmprof2 + +.. branch: object-dtype2 +Extend numpy dtypes to allow using objects with associated garbage collection hook From noreply at buildbot.pypy.org Thu Apr 23 19:03:13 2015 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 23 Apr 2015 19:03:13 +0200 (CEST) Subject: [pypy-commit] pypy object-dtype2: close branch to be merged Message-ID: <20150423170313.D9AC71C12C8@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: object-dtype2 Changeset: r76904:03977f2520fe Date: 2015-04-23 20:00 +0300 http://bitbucket.org/pypy/pypy/changeset/03977f2520fe/ Log: close branch to be merged From noreply at buildbot.pypy.org Thu Apr 23 19:03:15 2015 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 23 Apr 2015 19:03:15 +0200 (CEST) Subject: [pypy-commit] pypy default: merge in object-dtype2, which provides an object dtype for numpy Message-ID: <20150423170315.7E3411C12C8@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r76905:bc2c76a447dc Date: 2015-04-23 20:01 +0300 http://bitbucket.org/pypy/pypy/changeset/bc2c76a447dc/ Log: merge in object-dtype2, which provides an object dtype for numpy diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -64,3 +64,6 @@ .. branch: vmprof .. Merged but then backed out, hopefully it will return as vmprof2 + +.. branch: object-dtype2 +Extend numpy dtypes to allow using objects with associated garbage collection hook diff --git a/pypy/goal/targetnumpystandalone.py b/pypy/goal/targetnumpystandalone.py deleted file mode 100644 --- a/pypy/goal/targetnumpystandalone.py +++ /dev/null @@ -1,43 +0,0 @@ - -""" Usage: - -./targetnumpystandalone-c array_size - -Will execute a give numpy bytecode. Arrays will be ranges (in float) modulo 10, -constants would be consecutive starting from one. - -Bytecode should contain letters 'a' 'l' and 'f' so far and be correct -""" - -import time -from pypy.module.micronumpy.compile import numpy_compile -from rpython.jit.codewriter.policy import JitPolicy -from rpython.rtyper.annlowlevel import hlstr - -def entry_point(argv): - if len(argv) != 3: - print __doc__ - return 1 - try: - size = int(argv[2]) - except ValueError: - print "INVALID LITERAL FOR INT:", argv[2] - print __doc__ - return 3 - t0 = time.time() - main(argv[0], size) - print "bytecode:", argv[0], "size:", size - print "took:", time.time() - t0 - return 0 - -def main(bc, size): - if not isinstance(bc, str): - bc = hlstr(bc) # for tests - a = numpy_compile(bc, size) - a = a.compute() - -def target(*args): - return entry_point, None - -def jitpolicy(driver): - return JitPolicy() diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -30,6 +30,9 @@ for c in ['MAXDIMS', 'CLIP', 'WRAP', 'RAISE']: interpleveldefs[c] = 'space.wrap(constants.%s)' % c + def startup(self, space): + from pypy.module.micronumpy.concrete import _setup + _setup() class UMathModule(MixedModule): appleveldefs = {} diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -34,11 +34,13 @@ @staticmethod def from_shape(space, shape, dtype, order='C', w_instance=None, zero=True): - from pypy.module.micronumpy import concrete + from pypy.module.micronumpy import concrete, descriptor, boxes from pypy.module.micronumpy.strides import calc_strides strides, backstrides = calc_strides(shape, dtype.base, order) impl = concrete.ConcreteArray(shape, dtype.base, order, strides, backstrides, zero=zero) + if dtype == descriptor.get_dtype_cache(space).w_objectdtype: + impl.fill(space, boxes.W_ObjectBox(space.w_None)) if w_instance: return wrap_impl(space, space.type(w_instance), w_instance, impl) return W_NDimArray(impl) @@ -123,7 +125,7 @@ def get_shape(self): return self.implementation.get_shape() - def get_dtype(self): + def get_dtype(self, space=None): return self.implementation.dtype def get_order(self): diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -607,6 +607,19 @@ # arr.storage[i] = arg[i] return W_UnicodeBox(arr, 0, arr.dtype) +class W_ObjectBox(W_GenericBox): + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.OBJECT) + + def __init__(self, w_obj): + self.w_obj = w_obj + + def convert_to(self, space, dtype): + if dtype.is_bool(): + return W_BoolBox(space.bool_w(self.w_obj)) + return self # XXX + + def descr__getattr__(self, space, w_key): + return space.getattr(self.w_obj, w_key) W_GenericBox.typedef = TypeDef("numpy.generic", __new__ = interp2app(W_GenericBox.descr__new__.im_func), @@ -856,3 +869,9 @@ __new__ = interp2app(W_UnicodeBox.descr__new__unicode_box.im_func), __len__ = interp2app(W_UnicodeBox.descr_len), ) + +W_ObjectBox.typedef = TypeDef("numpy.object_", W_ObjectBox.typedef, + __new__ = interp2app(W_ObjectBox.descr__new__.im_func), + __getattr__ = interp2app(W_ObjectBox.descr__getattr__), +) + diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -3,7 +3,7 @@ """ import re from pypy.interpreter import special -from pypy.interpreter.baseobjspace import InternalSpaceCache, W_Root +from pypy.interpreter.baseobjspace import InternalSpaceCache, W_Root, ObjSpace from pypy.interpreter.error import OperationError from rpython.rlib.objectmodel import specialize, instantiate from rpython.rlib.nonconst import NonConstant @@ -47,7 +47,7 @@ def lookup(self, name): return self.getdictvalue(self, name) -class FakeSpace(object): +class FakeSpace(ObjSpace): w_ValueError = W_TypeObject("ValueError") w_TypeError = W_TypeObject("TypeError") w_IndexError = W_TypeObject("IndexError") @@ -67,6 +67,7 @@ w_unicode = W_TypeObject("unicode") w_complex = W_TypeObject("complex") w_dict = W_TypeObject("dict") + w_object = W_TypeObject("object") def __init__(self): """NOT_RPYTHON""" @@ -88,7 +89,8 @@ return self.wrap(len(w_obj.items)) def getattr(self, w_obj, w_attr): - return StringObject(NonConstant('foo')) + assert isinstance(w_attr, StringObject) + return w_obj.getdictvalue(self, w_attr.v) def isinstance_w(self, w_obj, w_tp): try: diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -1,11 +1,11 @@ from pypy.interpreter.error import OperationError, oefmt -from rpython.rlib import jit +from rpython.rlib import jit, rgc from rpython.rlib.buffer import Buffer -from rpython.rlib.debug import make_sure_not_resized +from rpython.rlib.debug import make_sure_not_resized, debug_print from rpython.rlib.rawstorage import alloc_raw_storage, free_raw_storage, \ raw_storage_getitem, raw_storage_setitem, RAW_STORAGE -from rpython.rtyper.lltypesystem import rffi, lltype -from pypy.module.micronumpy import support, loop +from rpython.rtyper.lltypesystem import rffi, lltype, llmemory +from pypy.module.micronumpy import support, loop, constants as NPY from pypy.module.micronumpy.base import convert_to_array, W_NDimArray, \ ArrayArgumentException from pypy.module.micronumpy.iterators import ArrayIter @@ -13,11 +13,13 @@ RecordChunk, calc_strides, calc_new_strides, shape_agreement, calculate_broadcast_strides, calc_backstrides) from rpython.rlib.objectmodel import keepalive_until_here +from rpython.rtyper.annlowlevel import cast_gcref_to_instance +from pypy.interpreter.baseobjspace import W_Root class BaseConcreteArray(object): _immutable_fields_ = ['dtype?', 'storage', 'start', 'size', 'shape[*]', - 'strides[*]', 'backstrides[*]', 'order'] + 'strides[*]', 'backstrides[*]', 'order', 'gcstruct'] start = 0 parent = None flags = 0 @@ -333,6 +335,44 @@ loop.setslice(space, impl.get_shape(), impl, self) return impl +OBJECTSTORE = lltype.GcStruct('ObjectStore', + ('length', lltype.Signed), + ('step', lltype.Signed), + ('storage', llmemory.Address), + rtti=True) +offset_of_storage = llmemory.offsetof(OBJECTSTORE, 'storage') +offset_of_length = llmemory.offsetof(OBJECTSTORE, 'length') +offset_of_step = llmemory.offsetof(OBJECTSTORE, 'step') + +V_OBJECTSTORE = lltype.nullptr(OBJECTSTORE) + +def customtrace(gc, obj, callback, arg): + #debug_print('in customtrace w/obj', obj) + length = (obj + offset_of_length).signed[0] + step = (obj + offset_of_step).signed[0] + storage = (obj + offset_of_storage).address[0] + #debug_print('tracing', length, 'objects in ndarray.storage') + i = 0 + while i < length: + gc._trace_callback(callback, arg, storage) + storage += step + i += 1 + +lambda_customtrace = lambda: customtrace + +def _setup(): + rgc.register_custom_trace_hook(OBJECTSTORE, lambda_customtrace) + + at jit.dont_look_inside +def _create_objectstore(storage, length, elsize): + gcstruct = lltype.malloc(OBJECTSTORE) + # JIT does not support cast_ptr_to_adr + gcstruct.storage = llmemory.cast_ptr_to_adr(storage) + #print 'create gcstruct',gcstruct,'with storage',storage,'as',gcstruct.storage + gcstruct.length = length + gcstruct.step = elsize + return gcstruct + class ConcreteArrayNotOwning(BaseConcreteArray): def __init__(self, shape, dtype, order, strides, backstrides, storage, start=0): @@ -347,10 +387,11 @@ self.backstrides = backstrides self.storage = storage self.start = start + self.gcstruct = V_OBJECTSTORE def fill(self, space, box): self.dtype.itemtype.fill(self.storage, self.dtype.elsize, - box, 0, self.size, 0) + box, 0, self.size, 0, self.gcstruct) def set_shape(self, space, orig_array, new_shape): strides, backstrides = calc_strides(new_shape, self.dtype, @@ -374,17 +415,24 @@ def base(self): return None - class ConcreteArray(ConcreteArrayNotOwning): def __init__(self, shape, dtype, order, strides, backstrides, storage=lltype.nullptr(RAW_STORAGE), zero=True): + gcstruct = V_OBJECTSTORE if storage == lltype.nullptr(RAW_STORAGE): - storage = dtype.itemtype.malloc(support.product(shape) * - dtype.elsize, zero=zero) + length = support.product(shape) + if dtype.num == NPY.OBJECT: + storage = dtype.itemtype.malloc(length * dtype.elsize, zero=True) + gcstruct = _create_objectstore(storage, length, dtype.elsize) + else: + storage = dtype.itemtype.malloc(length * dtype.elsize, zero=zero) ConcreteArrayNotOwning.__init__(self, shape, dtype, order, strides, backstrides, storage) + self.gcstruct = gcstruct def __del__(self): + if self.gcstruct: + self.gcstruct.length = 0 free_raw_storage(self.storage, track_allocation=False) @@ -423,6 +471,7 @@ parent = parent.parent # one level only self.parent = parent self.storage = parent.storage + self.gcstruct = parent.gcstruct self.order = parent.order self.dtype = dtype self.size = support.product(shape) * self.dtype.elsize @@ -480,6 +529,7 @@ class VoidBoxStorage(BaseConcreteArray): def __init__(self, size, dtype): self.storage = alloc_raw_storage(size) + self.gcstruct = V_OBJECTSTORE self.dtype = dtype self.size = size diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -38,6 +38,34 @@ raise oefmt(space.w_ValueError, "object __array__ method not producing an array") +def try_interface_method(space, w_object): + try: + w_interface = space.getattr(w_object, space.wrap("__array_interface__")) + except OperationError, e: + if e.match(space, space.w_AttributeError): + return None + raise + if w_interface is None: + # happens from compile.py + return None + version = space.int_w(space.finditem(w_interface, space.wrap("version"))) + if version < 3: + raise oefmt(space.w_NotImplementedError, + "__array_interface__ version %d not supported", version) + # make a view into the data + w_shape = space.finditem(w_interface, space.wrap('shape')) + w_dtype = space.finditem(w_interface, space.wrap('typestr')) + w_descr = space.finditem(w_interface, space.wrap('descr')) + data_w = space.listview(space.finditem(w_interface, space.wrap('data'))) + w_strides = space.finditem(w_interface, space.wrap('strides')) + shape = [space.int_w(i) for i in space.listview(w_shape)] + dtype = descriptor.decode_w_dtype(space, w_dtype) + rw = space.is_true(data_w[1]) + #print 'create view from shape',shape,'dtype',dtype,'descr',w_descr,'data',data_w[0],'rw',rw + raise oefmt(space.w_NotImplementedError, + "creating array from __array_interface__ not supported yet") + return + @unwrap_spec(ndmin=int, copy=bool, subok=bool) def array(space, w_object, w_dtype=None, copy=True, w_order=None, subok=False, @@ -63,7 +91,11 @@ # continue with w_array, but do further operations in place w_object = w_array copy = False - + if not isinstance(w_object, W_NDimArray): + w_array = try_interface_method(space, w_object) + if w_array is not None: + w_object = w_array + copy = False dtype = descriptor.decode_w_dtype(space, w_dtype) if space.is_none(w_order): diff --git a/pypy/module/micronumpy/descriptor.py b/pypy/module/micronumpy/descriptor.py --- a/pypy/module/micronumpy/descriptor.py +++ b/pypy/module/micronumpy/descriptor.py @@ -6,7 +6,7 @@ from pypy.interpreter.typedef import (TypeDef, GetSetProperty, interp_attrproperty, interp_attrproperty_w) from rpython.rlib import jit -from rpython.rlib.objectmodel import specialize, compute_hash +from rpython.rlib.objectmodel import specialize, compute_hash, we_are_translated from rpython.rlib.rarithmetic import r_longlong, r_ulonglong from pypy.module.micronumpy import types, boxes, base, support, constants as NPY from pypy.module.micronumpy.appbridge import get_appbridge_cache @@ -56,7 +56,7 @@ self.char = char self.w_box_type = w_box_type if byteorder is None: - if itemtype.get_element_size() == 1: + if itemtype.get_element_size() == 1 or isinstance(itemtype, types.ObjectType): byteorder = NPY.IGNORE else: byteorder = NPY.NATIVE @@ -112,6 +112,9 @@ def is_str(self): return self.num == NPY.STRING + def is_object(self): + return self.num == NPY.OBJECT + def is_str_or_unicode(self): return self.num == NPY.STRING or self.num == NPY.UNICODE @@ -428,7 +431,7 @@ self.names.append(name) self.fields[name] = offset, dtype - self.itemtype = types.RecordType() + self.itemtype = types.RecordType(space) if self.is_flexible(): self.elsize = size @@ -443,7 +446,7 @@ endian = NPY.OPPBYTE if self.is_native() else NPY.NATBYTE elif newendian != NPY.IGNORE: endian = newendian - itemtype = self.itemtype.__class__(endian in (NPY.NATIVE, NPY.NATBYTE)) + itemtype = self.itemtype.__class__(space, endian in (NPY.NATIVE, NPY.NATBYTE)) fields = self.fields if fields is None: fields = {} @@ -482,7 +485,7 @@ fields[fldname] = (offset, subdtype) offset += subdtype.elsize names.append(fldname) - return W_Dtype(types.RecordType(), NPY.VOID, NPY.VOIDLTR, NPY.VOIDLTR, + return W_Dtype(types.RecordType(space), NPY.VOID, NPY.VOIDLTR, NPY.VOIDLTR, space.gettypefor(boxes.W_VoidBox), names=names, fields=fields, elsize=offset) @@ -493,8 +496,17 @@ def dtype_from_spec(space, w_spec): - w_lst = get_appbridge_cache(space).call_method(space, - 'numpy.core._internal', '_commastring', Arguments(space, [w_spec])) + + if we_are_translated(): + w_lst = get_appbridge_cache(space).call_method(space, + 'numpy.core._internal', '_commastring', Arguments(space, [w_spec])) + else: + # testing, handle manually + if space.eq_w(w_spec, space.wrap('u4,u4,u4')): + w_lst = space.newlist([space.wrap('u4')]*3) + else: + raise oefmt(space.w_RuntimeError, + "cannot parse w_spec") if not space.isinstance_w(w_lst, space.w_list) or space.len_w(w_lst) < 1: raise oefmt(space.w_RuntimeError, "_commastring is not returning a list with len >= 1") @@ -541,7 +553,7 @@ if size == 1: return subdtype size *= subdtype.elsize - return W_Dtype(types.VoidType(), NPY.VOID, NPY.VOIDLTR, NPY.VOIDLTR, + return W_Dtype(types.VoidType(space), NPY.VOID, NPY.VOIDLTR, NPY.VOIDLTR, space.gettypefor(boxes.W_VoidBox), shape=shape, subdtype=subdtype, elsize=size) @@ -587,8 +599,7 @@ if w_dtype is dtype.w_box_type: return dtype if space.isinstance_w(w_dtype, space.w_type): - raise oefmt(space.w_NotImplementedError, - "cannot create dtype with type '%N'", w_dtype) + return cache.w_objectdtype raise oefmt(space.w_TypeError, "data type not understood") @@ -655,7 +666,7 @@ def new_string_dtype(space, size, char=NPY.STRINGLTR): return W_Dtype( - types.StringType(), + types.StringType(space), elsize=size, num=NPY.STRING, kind=NPY.STRINGLTR, @@ -665,7 +676,7 @@ def new_unicode_dtype(space, size): - itemtype = types.UnicodeType() + itemtype = types.UnicodeType(space) return W_Dtype( itemtype, elsize=size * itemtype.get_element_size(), @@ -678,7 +689,7 @@ def new_void_dtype(space, size): return W_Dtype( - types.VoidType(), + types.VoidType(space), elsize=size, num=NPY.VOID, kind=NPY.VOIDLTR, @@ -690,126 +701,126 @@ class DtypeCache(object): def __init__(self, space): self.w_booldtype = W_Dtype( - types.Bool(), + types.Bool(space), num=NPY.BOOL, kind=NPY.GENBOOLLTR, char=NPY.BOOLLTR, w_box_type=space.gettypefor(boxes.W_BoolBox), ) self.w_int8dtype = W_Dtype( - types.Int8(), + types.Int8(space), num=NPY.BYTE, kind=NPY.SIGNEDLTR, char=NPY.BYTELTR, w_box_type=space.gettypefor(boxes.W_Int8Box), ) self.w_uint8dtype = W_Dtype( - types.UInt8(), + types.UInt8(space), num=NPY.UBYTE, kind=NPY.UNSIGNEDLTR, char=NPY.UBYTELTR, w_box_type=space.gettypefor(boxes.W_UInt8Box), ) self.w_int16dtype = W_Dtype( - types.Int16(), + types.Int16(space), num=NPY.SHORT, kind=NPY.SIGNEDLTR, char=NPY.SHORTLTR, w_box_type=space.gettypefor(boxes.W_Int16Box), ) self.w_uint16dtype = W_Dtype( - types.UInt16(), + types.UInt16(space), num=NPY.USHORT, kind=NPY.UNSIGNEDLTR, char=NPY.USHORTLTR, w_box_type=space.gettypefor(boxes.W_UInt16Box), ) self.w_int32dtype = W_Dtype( - types.Int32(), + types.Int32(space), num=NPY.INT, kind=NPY.SIGNEDLTR, char=NPY.INTLTR, w_box_type=space.gettypefor(boxes.W_Int32Box), ) self.w_uint32dtype = W_Dtype( - types.UInt32(), + types.UInt32(space), num=NPY.UINT, kind=NPY.UNSIGNEDLTR, char=NPY.UINTLTR, w_box_type=space.gettypefor(boxes.W_UInt32Box), ) self.w_longdtype = W_Dtype( - types.Long(), + types.Long(space), num=NPY.LONG, kind=NPY.SIGNEDLTR, char=NPY.LONGLTR, w_box_type=space.gettypefor(boxes.W_LongBox), ) self.w_ulongdtype = W_Dtype( - types.ULong(), + types.ULong(space), num=NPY.ULONG, kind=NPY.UNSIGNEDLTR, char=NPY.ULONGLTR, w_box_type=space.gettypefor(boxes.W_ULongBox), ) self.w_int64dtype = W_Dtype( - types.Int64(), + types.Int64(space), num=NPY.LONGLONG, kind=NPY.SIGNEDLTR, char=NPY.LONGLONGLTR, w_box_type=space.gettypefor(boxes.W_Int64Box), ) self.w_uint64dtype = W_Dtype( - types.UInt64(), + types.UInt64(space), num=NPY.ULONGLONG, kind=NPY.UNSIGNEDLTR, char=NPY.ULONGLONGLTR, w_box_type=space.gettypefor(boxes.W_UInt64Box), ) self.w_float32dtype = W_Dtype( - types.Float32(), + types.Float32(space), num=NPY.FLOAT, kind=NPY.FLOATINGLTR, char=NPY.FLOATLTR, w_box_type=space.gettypefor(boxes.W_Float32Box), ) self.w_float64dtype = W_Dtype( - types.Float64(), + types.Float64(space), num=NPY.DOUBLE, kind=NPY.FLOATINGLTR, char=NPY.DOUBLELTR, w_box_type=space.gettypefor(boxes.W_Float64Box), ) self.w_floatlongdtype = W_Dtype( - types.FloatLong(), + types.FloatLong(space), num=NPY.LONGDOUBLE, kind=NPY.FLOATINGLTR, char=NPY.LONGDOUBLELTR, w_box_type=space.gettypefor(boxes.W_FloatLongBox), ) self.w_complex64dtype = W_Dtype( - types.Complex64(), + types.Complex64(space), num=NPY.CFLOAT, kind=NPY.COMPLEXLTR, char=NPY.CFLOATLTR, w_box_type=space.gettypefor(boxes.W_Complex64Box), ) self.w_complex128dtype = W_Dtype( - types.Complex128(), + types.Complex128(space), num=NPY.CDOUBLE, kind=NPY.COMPLEXLTR, char=NPY.CDOUBLELTR, w_box_type=space.gettypefor(boxes.W_Complex128Box), ) self.w_complexlongdtype = W_Dtype( - types.ComplexLong(), + types.ComplexLong(space), num=NPY.CLONGDOUBLE, kind=NPY.COMPLEXLTR, char=NPY.CLONGDOUBLELTR, w_box_type=space.gettypefor(boxes.W_ComplexLongBox), ) self.w_stringdtype = W_Dtype( - types.StringType(), + types.StringType(space), elsize=0, num=NPY.STRING, kind=NPY.STRINGLTR, @@ -817,7 +828,7 @@ w_box_type=space.gettypefor(boxes.W_StringBox), ) self.w_unicodedtype = W_Dtype( - types.UnicodeType(), + types.UnicodeType(space), elsize=0, num=NPY.UNICODE, kind=NPY.UNICODELTR, @@ -825,7 +836,7 @@ w_box_type=space.gettypefor(boxes.W_UnicodeBox), ) self.w_voiddtype = W_Dtype( - types.VoidType(), + types.VoidType(space), elsize=0, num=NPY.VOID, kind=NPY.VOIDLTR, @@ -833,26 +844,33 @@ w_box_type=space.gettypefor(boxes.W_VoidBox), ) self.w_float16dtype = W_Dtype( - types.Float16(), + types.Float16(space), num=NPY.HALF, kind=NPY.FLOATINGLTR, char=NPY.HALFLTR, w_box_type=space.gettypefor(boxes.W_Float16Box), ) self.w_intpdtype = W_Dtype( - types.Long(), + types.Long(space), num=NPY.LONG, kind=NPY.SIGNEDLTR, char=NPY.INTPLTR, w_box_type=space.gettypefor(boxes.W_LongBox), ) self.w_uintpdtype = W_Dtype( - types.ULong(), + types.ULong(space), num=NPY.ULONG, kind=NPY.UNSIGNEDLTR, char=NPY.UINTPLTR, w_box_type=space.gettypefor(boxes.W_ULongBox), ) + self.w_objectdtype = W_Dtype( + types.ObjectType(space), + num=NPY.OBJECT, + kind=NPY.OBJECTLTR, + char=NPY.OBJECTLTR, + w_box_type=space.gettypefor(boxes.W_ObjectBox), + ) aliases = { NPY.BOOL: ['bool_', 'bool8'], NPY.BYTE: ['byte'], @@ -871,6 +889,7 @@ NPY.CLONGDOUBLE: ['clongdouble', 'clongfloat'], NPY.STRING: ['string_', 'str'], NPY.UNICODE: ['unicode_'], + NPY.OBJECT: ['object_'], } self.alternate_constructors = { NPY.BOOL: [space.w_bool], @@ -889,6 +908,8 @@ NPY.UNICODE: [space.w_unicode], NPY.VOID: [space.gettypefor(boxes.W_GenericBox)], #space.w_buffer, # XXX no buffer in space + NPY.OBJECT: [space.gettypefor(boxes.W_ObjectBox), + space.w_object], } float_dtypes = [self.w_float16dtype, self.w_float32dtype, self.w_float64dtype, self.w_floatlongdtype] @@ -908,7 +929,7 @@ self.w_int64dtype, self.w_uint64dtype, ] + float_dtypes + complex_dtypes + [ self.w_stringdtype, self.w_unicodedtype, self.w_voiddtype, - self.w_intpdtype, self.w_uintpdtype, + self.w_intpdtype, self.w_uintpdtype, self.w_objectdtype, ] self.float_dtypes_by_num_bytes = sorted( (dtype.elsize, dtype) @@ -960,6 +981,7 @@ 'USHORT': self.w_uint16dtype, 'FLOAT': self.w_float32dtype, 'BOOL': self.w_booldtype, + 'OBJECT': self.w_objectdtype, } typeinfo_partial = { diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -202,11 +202,16 @@ return self elif isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool() \ and w_idx.ndims() > 0: - return self.getitem_filter(space, w_idx) - try: - return self.implementation.descr_getitem(space, self, w_idx) - except ArrayArgumentException: - return self.getitem_array_int(space, w_idx) + w_ret = self.getitem_filter(space, w_idx) + else: + try: + w_ret = self.implementation.descr_getitem(space, self, w_idx) + except ArrayArgumentException: + w_ret = self.getitem_array_int(space, w_idx) + if isinstance(w_ret, boxes.W_ObjectBox): + #return the W_Root object, not a scalar + w_ret = w_ret.w_obj + return w_ret def getitem(self, space, index_list): return self.implementation.getitem_index(space, index_list) @@ -550,6 +555,7 @@ else: strides = self.descr_get_strides(space) space.setitem_str(w_d, 'strides', strides) + space.setitem_str(w_d, 'version', space.wrap(3)) return w_d w_pypy_data = None @@ -845,7 +851,7 @@ "new type not compatible with array.")) # Strides, shape does not change v = impl.astype(space, dtype) - return wrap_impl(space, w_type, self, v) + return wrap_impl(space, w_type, self, v) strides = impl.get_strides() if dims == 1 or strides[0] = 0 - else: - a = array(Polynomial()) - assert a.shape == () + diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -17,6 +17,7 @@ def __init__(self): self.base = self self.elsize = 1 + self.num = 0 def create_slice(space, a, chunks): @@ -3150,11 +3151,7 @@ assert b[35] == 200 b[[slice(25, 30)]] = range(5) assert all(a[:5] == range(5)) - import sys - if '__pypy__' not in sys.builtin_module_names: - raises(TypeError, 'b[[[slice(25, 125)]]]') - else: - raises(NotImplementedError, 'b[[[slice(25, 125)]]]') + raises(IndexError, 'b[[[slice(25, 125)]]]') def test_cumsum(self): from numpy import arange diff --git a/pypy/module/micronumpy/test/test_object_arrays.py b/pypy/module/micronumpy/test/test_object_arrays.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/test/test_object_arrays.py @@ -0,0 +1,162 @@ +from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest + + +class AppTestObjectDtypes(BaseNumpyAppTest): + def test_scalar_from_object(self): + from numpy import array + import sys + class Polynomial(object): + def whatami(self): + return 'an object' + a = array(Polynomial()) + assert a.shape == () + assert a.sum().whatami() == 'an object' + + def test_uninitialized_object_array_is_filled_by_None(self): + import numpy as np + + a = np.ndarray([5], dtype="O") + + assert a[0] == None + + def test_object_arrays_add(self): + import numpy as np + + a = np.array(["foo"], dtype=object) + b = np.array(["bar"], dtype=object) + raises(TypeError, np.add, a, 1) + res = a + b + assert res[0] == "foobar" + + def test_bool_func(self): + import numpy as np + a = np.array(["foo"], dtype=object) + b = a and complex(1, -1) + assert b == complex(1, -1) + b = np.array(complex(1, -1)) and a + assert (b == a).all() + c = np.array([1, 2, 3]) + assert (a[0] != c[0]) + assert (c[0] != a[0]) + assert (a[0] > c[0]) + assert (not a[0] < c[0]) + assert (c[0] < a[0]) + assert (not c[0] > a[0]) + + def test_logical_ufunc(self): + import numpy as np + import sys + + if '__pypy__' in sys.builtin_module_names: + skip('need to refactor use of raw_xxx_op in types to make this work') + a = np.array(["foo"], dtype=object) + b = np.array([1], dtype=object) + d = np.array([complex(1, 10)], dtype=object) + c = np.logical_and(a, 1) + assert c.dtype == np.dtype('object') + assert c == 1 + c = np.logical_and(b, complex(1, -1)) + assert c.dtype == np.dtype('object') + assert c == complex(1, -1) + c = np.logical_and(d, b) + assert c == 1 + c = b & 1 + assert c.dtype == np.dtype('object') + assert (c == 1).all() + c = np.array(1) & b + assert (c == b).all() + + def test_reduce(self): + import numpy as np + class O(object): + def whatami(self): + return 'an object' + fiveOs = [O()] * 5 + a = np.array(fiveOs, dtype=object) + print np.maximum + b = np.maximum.reduce(a) + assert b is not None + + def test_complex_op(self): + import numpy as np + import sys + a = np.array(['abc', 'def'], dtype=object) + b = np.array([1, 2, 3], dtype=object) + c = np.array([complex(1, 1), complex(1, -1)], dtype=object) + for arg in (a,b,c): + assert (arg == np.real(arg)).all() + assert (0 == np.imag(arg)).all() + if '__pypy__' in sys.builtin_module_names: + skip('not implemented yet') + raises(AttributeError, np.conj, a) + res = np.conj(b) + assert (res == b).all() + res = np.conj(c) + assert res[0] == c[1] and res[1] == c[0] + + def test_keep_object_alive(self): + # only translated does it really test the gc + import numpy as np + import gc + class O(object): + def whatami(self): + return 'an object' + fiveOs = [O()] * 5 + a = np.array(fiveOs, dtype=object) + del fiveOs + gc.collect() + assert a[2].whatami() == 'an object' + + def test_array_interface(self): + import numpy as np + import sys + class DummyArray(object): + def __init__(self, interface, base=None): + self.__array_interface__ = interface + self.base = base + a = np.array([(1, 2, 3)], dtype='u4,u4,u4') + b = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4') + interface = dict(a.__array_interface__) + interface['shape'] = tuple([3]) + interface['strides'] = tuple([0]) + if '__pypy__' in sys.builtin_module_names: + skip('not implemented yet') + c = np.array(DummyArray(interface, base=a)) + c.dtype = a.dtype + #print c + assert (c == np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3)], dtype='u4,u4,u4') ).all() + + def test_for_object_scalar_creation(self): + import numpy as np + import sys + a = np.object_() + b = np.object_(3) + b2 = np.object_(3.0) + c = np.object_([4, 5]) + d = np.array([None])[0] + assert a is None + assert type(b) is int + assert type(b2) is float + assert type(c) is np.ndarray + assert c.dtype == object + assert type(d) is type(None) + if '__pypy__' in sys.builtin_module_names: + skip('not implemented yet') + e = np.object_([None, {}, []]) + assert e.dtype == object + + def test_mem_array_creation_invalid_specification(self): + # while not specifically testing object dtype, this + # test segfaulted during ObjectType.store due to + # missing gc hooks + import numpy as np + import sys + ytype = np.object_ + if '__pypy__' in sys.builtin_module_names: + ytype = str + dt = np.dtype([('x', int), ('y', ytype)]) + # Correct way + a = np.array([(1, 'object')], dt) + # Wrong way - should complain about writing buffer to object dtype + raises(ValueError, np.array, [1, 'object'], dt) + diff --git a/pypy/module/micronumpy/test/test_selection.py b/pypy/module/micronumpy/test/test_selection.py --- a/pypy/module/micronumpy/test/test_selection.py +++ b/pypy/module/micronumpy/test/test_selection.py @@ -12,14 +12,11 @@ exp = sorted(range(len(exp)), key=exp.__getitem__) c = a.copy() res = a.argsort() - assert (res == exp).all(), '%r\n%r\n%r' % (a,res,exp) + assert (res == exp).all(), 'Failed sortng %r\na=%r\nres=%r\nexp=%r' % (dtype,a,res,exp) assert (a == c).all() # not modified a = arange(100, dtype=dtype) assert (a.argsort() == a).all() - import sys - if '__pypy__' in sys.builtin_module_names: - raises(NotImplementedError, 'arange(10,dtype="float16").argsort()') def test_argsort_ndim(self): from numpy import array @@ -63,14 +60,13 @@ 'i2', complex]: a = array([6, 4, -1, 3, 8, 3, 256+20, 100, 101], dtype=dtype) exp = sorted(list(a)) - res = a.copy() - res.sort() - assert (res == exp).all(), '%r\n%r\n%r' % (a,res,exp) + a.sort() + assert (a == exp).all(), 'Failed sorting %r\n%r\n%r' % (dtype, a, exp) a = arange(100, dtype=dtype) c = a.copy() a.sort() - assert (a == c).all() + assert (a == c).all(), 'Failed sortng %r\na=%r\nc=%r' % (dtype,a,c) def test_sort_nonnative(self): from numpy import array diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -397,11 +397,11 @@ for i in range(3): assert min_c_b[i] == min(b[i], c) - def test_scalar(self): + def test_all_available(self): # tests that by calling all available ufuncs on scalars, none will # raise uncaught interp-level exceptions, (and crash the test) # and those that are uncallable can be accounted for. - # test on the four base-class dtypes: int, bool, float, complex + # test on the base-class dtypes: int, bool, float, complex, object # We need this test since they have no common base class. import numpy as np def find_uncallable_ufuncs(dtype): @@ -412,6 +412,11 @@ if isinstance(u, np.ufunc): try: u(* [array] * u.nin) + except AttributeError: + pass + except NotImplementedError: + print s + uncallable.add(s) except TypeError: assert s not in uncallable uncallable.add(s) @@ -427,6 +432,9 @@ 'fabs', 'fmod', 'invert', 'mod', 'logaddexp', 'logaddexp2', 'left_shift', 'right_shift', 'copysign', 'signbit', 'ceil', 'floor', 'trunc']) + assert find_uncallable_ufuncs('object') == set( + ['isnan', 'logaddexp2', 'copysign', 'isfinite', 'signbit', + 'isinf', 'logaddexp']) def test_int_only(self): from numpy import bitwise_and, array diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -9,6 +9,7 @@ from pypy.module.micronumpy.compile import FakeSpace, Parser, InterpreterState from pypy.module.micronumpy.base import W_NDimArray +py.test.skip('move these to pypyjit/test_pypy_c/test_micronumpy') class TestNumpyJit(LLJitMixin): graph = None diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -3,8 +3,9 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.objspace.std.floatobject import float2string from pypy.objspace.std.complexobject import str_format +from pypy.interpreter.baseobjspace import W_Root, ObjSpace from rpython.rlib import clibffi, jit, rfloat, rcomplex -from rpython.rlib.objectmodel import specialize +from rpython.rlib.objectmodel import specialize, we_are_translated from rpython.rlib.rarithmetic import widen, byteswap, r_ulonglong, \ most_neg_value_of, LONG_BIT from rpython.rlib.rawstorage import (alloc_raw_storage, @@ -14,10 +15,12 @@ pack_float80, unpack_float80) from rpython.rlib.rstruct.nativefmttable import native_is_bigendian from rpython.rlib.rstruct.runpack import runpack -from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rtyper.annlowlevel import cast_instance_to_gcref,\ + cast_gcref_to_instance +from rpython.rtyper.lltypesystem import lltype, rffi, llmemory from rpython.tool.sourcetools import func_with_new_name from pypy.module.micronumpy import boxes -from pypy.module.micronumpy.concrete import SliceArray, VoidBoxStorage +from pypy.module.micronumpy.concrete import SliceArray, VoidBoxStorage, V_OBJECTSTORE from pypy.module.micronumpy.strides import calc_strides degToRad = math.pi / 180.0 @@ -109,10 +112,12 @@ return dispatcher class BaseType(object): - _immutable_fields_ = ['native'] + _immutable_fields_ = ['native', 'space'] - def __init__(self, native=True): + def __init__(self, space, native=True): + assert isinstance(space, ObjSpace) self.native = native + self.space = space def __repr__(self): return self.__class__.__name__ @@ -191,7 +196,7 @@ with arr as storage: self._write(storage, i, offset, self.unbox(box)) - def fill(self, storage, width, box, start, stop, offset): + def fill(self, storage, width, box, start, stop, offset, gcstruct): value = self.unbox(box) for i in xrange(start, stop, width): self._write(storage, i, offset, value) @@ -306,7 +311,7 @@ @raw_unary_op def rint(self, v): - float64 = Float64() + float64 = Float64(self.space) return float64.rint(float64.box(v)) class Bool(BaseType, Primitive): @@ -399,7 +404,7 @@ def round(self, v, decimals=0): if decimals != 0: return v - return Float64().box(self.unbox(v)) + return Float64(self.space).box(self.unbox(v)) class Integer(Primitive): _mixin_ = True @@ -444,7 +449,7 @@ self.T is rffi.LONG or self.T is rffi.LONGLONG): if v2 == -1 and v1 == self.for_computation(most_neg_value_of(self.T)): return self.box(0) - return self.box(v1 // v2) + return self.box(v1 / v2) @simple_binary_op def mod(self, v1, v2): @@ -1152,7 +1157,7 @@ with arr as storage: self._write(storage, i, offset, self.unbox(box)) - def fill(self, storage, width, box, start, stop, offset): + def fill(self, storage, width, box, start, stop, offset, gcstruct): value = self.unbox(box) for i in xrange(start, stop, width): self._write(storage, i, offset, value) @@ -1253,25 +1258,25 @@ def ge(self, v1, v2): return self._lt(v2, v1) or self._eq(v2, v1) - def _bool(self, v): + def _cbool(self, v): return bool(v[0]) or bool(v[1]) @raw_binary_op def logical_and(self, v1, v2): - return self._bool(v1) and self._bool(v2) + return self._cbool(v1) and self._cbool(v2) @raw_binary_op def logical_or(self, v1, v2): - return self._bool(v1) or self._bool(v2) + return self._cbool(v1) or self._cbool(v2) @raw_unary_op def logical_not(self, v): - return not self._bool(v) + return not self._cbool(v) @raw_binary_op def logical_xor(self, v1, v2): - a = self._bool(v1) - b = self._bool(v2) + a = self._cbool(v1) + b = self._cbool(v2) return (not b and a) or (not a and b) def min(self, v1, v2): @@ -1629,6 +1634,283 @@ BoxType = boxes.W_ComplexLongBox ComponentBoxType = boxes.W_FloatLongBox +_all_objs_for_tests = [] # for tests + +class ObjectType(Primitive, BaseType): + T = lltype.Signed + BoxType = boxes.W_ObjectBox + + def get_element_size(self): + return rffi.sizeof(lltype.Signed) + + def coerce(self, space, dtype, w_item): + if isinstance(w_item, boxes.W_ObjectBox): + return w_item + return boxes.W_ObjectBox(w_item) + + def coerce_subtype(self, space, w_subtype, w_item): + # return the item itself + return self.unbox(self.box(w_item)) + + def store(self, arr, i, offset, box): + if arr.gcstruct is V_OBJECTSTORE: + raise oefmt(self.space.w_NotImplementedError, + "cannot store object in array with no gc hook") + self._write(arr.storage, i, offset, self.unbox(box), + arr.gcstruct) + + def read(self, arr, i, offset, dtype=None): + return self.box(self._read(arr.storage, i, offset)) + + def byteswap(self, w_v): + return w_v + + @jit.dont_look_inside + def _write(self, storage, i, offset, w_obj, gcstruct): + # no GC anywhere in this function! + if we_are_translated(): + from rpython.rlib import rgc + rgc.ll_writebarrier(gcstruct) + value = rffi.cast(lltype.Signed, cast_instance_to_gcref(w_obj)) + else: + value = len(_all_objs_for_tests) + _all_objs_for_tests.append(w_obj) + raw_storage_setitem_unaligned(storage, i + offset, value) + + @jit.dont_look_inside + def _read(self, storage, i, offset): + res = raw_storage_getitem_unaligned(self.T, storage, i + offset) + if we_are_translated(): + gcref = rffi.cast(llmemory.GCREF, res) + w_obj = cast_gcref_to_instance(W_Root, gcref) + else: + w_obj = _all_objs_for_tests[res] + return w_obj + + def fill(self, storage, width, box, start, stop, offset, gcstruct): + value = self.unbox(box) + for i in xrange(start, stop, width): + self._write(storage, i, offset, value, gcstruct) + + def unbox(self, box): + if isinstance(box, self.BoxType): + return box.w_obj + else: + raise oefmt(self.space.w_NotImplementedError, + "object dtype cannot unbox %s", str(box)) + + @specialize.argtype(1) + def box(self, w_obj): + if isinstance(w_obj, W_Root): + pass + elif isinstance(w_obj, bool): + w_obj = self.space.newbool(w_obj) + elif isinstance(w_obj, int): + w_obj = self.space.newint(w_obj) + elif isinstance(w_obj, lltype.Number): + w_obj = self.space.newint(w_obj) + elif isinstance(w_obj, float): + w_obj = self.space.newfloat(w_obj) + elif w_obj is None: + w_obj = self.space.w_None + else: + raise oefmt(self.space.w_NotImplementedError, + "cannot create object array/scalar from lltype") + return self.BoxType(w_obj) + + @specialize.argtype(1, 2) + def box_complex(self, real, imag): + if isinstance(real, rffi.r_singlefloat): + real = rffi.cast(rffi.DOUBLE, real) + if isinstance(imag, rffi.r_singlefloat): + imag = rffi.cast(rffi.DOUBLE, imag) + w_obj = self.space.newcomplex(real, imag) + return self.BoxType(w_obj) + + def str_format(self, box): + return self.space.str_w(self.space.repr(self.unbox(box))) + + def runpack_str(self, space, s): + raise oefmt(space.w_NotImplementedError, + "fromstring not implemented for object type") + + def to_builtin_type(self, space, box): + assert isinstance(box, self.BoxType) + return box.w_obj + + @staticmethod + def for_computation(v): + return v + + @raw_binary_op + def eq(self, v1, v2): + return self.space.eq_w(v1, v2) + + @simple_binary_op + def max(self, v1, v2): + if self.space.is_true(self.space.ge(v1, v2)): + return v1 + return v2 + + @simple_binary_op + def min(self, v1, v2): + if self.space.is_true(self.space.le(v1, v2)): + return v1 + return v2 + + @raw_unary_op + def bool(self,v): + return self._obool(v) + + def _obool(self, v): + if self.space.is_true(v): + return True + return False + + @raw_binary_op + def logical_and(self, v1, v2): + if self._obool(v1): + return self.space.bool_w(v2) + return self.space.bool_w(v1) + + @raw_binary_op + def logical_or(self, v1, v2): + if self._obool(v1): + return self.space.bool_w(v1) + return self.space.bool_w(v2) + + @raw_unary_op + def logical_not(self, v): + return not self._obool(v) + + @raw_binary_op + def logical_xor(self, v1, v2): + a = self._obool(v1) + b = self._obool(v2) + return (not b and a) or (not a and b) + + @simple_binary_op + def bitwise_and(self, v1, v2): + return self.space.and_(v1, v2) + + @simple_binary_op + def bitwise_or(self, v1, v2): + return self.space.or_(v1, v2) + + @simple_binary_op + def bitwise_xor(self, v1, v2): + return self.space.xor(v1, v2) + + @simple_binary_op + def pow(self, v1, v2): + return self.space.pow(v1, v2, self.space.wrap(1)) + + @simple_unary_op + def reciprocal(self, v1): + return self.space.div(self.space.wrap(1.0), v1) + + @simple_unary_op + def sign(self, v): + zero = self.space.wrap(0) + one = self.space.wrap(1) + m_one = self.space.wrap(-1) + if self.space.is_true(self.space.gt(v, zero)): + return one + elif self.space.is_true(self.space.lt(v, zero)): + return m_one + else: + return zero + + @simple_unary_op + def real(self, v): + return v + + @simple_unary_op + def imag(self, v): + return 0 + + @simple_unary_op + def square(self, v): + return self.space.mul(v, v) + + @raw_binary_op + def le(self, v1, v2): + return self.space.bool_w(self.space.le(v1, v2)) + + @raw_binary_op + def ge(self, v1, v2): + return self.space.bool_w(self.space.ge(v1, v2)) + + @raw_binary_op + def lt(self, v1, v2): + return self.space.bool_w(self.space.lt(v1, v2)) + + @raw_binary_op + def gt(self, v1, v2): + return self.space.bool_w(self.space.gt(v1, v2)) + + @raw_binary_op + def ne(self, v1, v2): + return self.space.bool_w(self.space.ne(v1, v2)) + +def add_attributeerr_op(cls, op): + def func(self, *args): + raise oefmt(self.space.w_AttributeError, + "%s", op) + func.__name__ = 'object_' + op + setattr(cls, op, func) + +def add_unsupported_op(cls, op): + def func(self, *args): + raise oefmt(self.space.w_TypeError, + "ufunc '%s' not supported for input types", op) + func.__name__ = 'object_' + op + setattr(cls, op, func) + +def add_unary_op(cls, op, method): + @simple_unary_op + def func(self, w_v): + space = self.space + w_impl = space.lookup(w_v, method) + if w_impl is None: + raise oefmt(space.w_AttributeError, 'unknown op "%s" on object' % op) + return space.get_and_call_function(w_impl, w_v) + func.__name__ = 'object_' + op + setattr(cls, op, func) + +def add_space_unary_op(cls, op): + @simple_unary_op + def func(self, v): + return getattr(self.space, op)(v) + func.__name__ = 'object_' + op + setattr(cls, op, func) + +def add_space_binary_op(cls, op): + @simple_binary_op + def func(self, v1, v2): + return getattr(self.space, op)(v1, v2) + func.__name__ = 'object_' + op + setattr(cls, op, func) + +for op in ('copysign', 'isfinite', 'isinf', 'isnan', 'logaddexp', 'logaddexp2', + 'signbit'): + add_unsupported_op(ObjectType, op) +for op in ('arctan2', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', + 'arctanh', 'ceil', 'floor', 'cos', 'sin', 'tan', 'cosh', 'sinh', + 'tanh', 'radians', 'degrees', 'exp','exp2', 'expm1', 'fabs', + 'log', 'log10', 'log1p', 'log2', 'sqrt', 'trunc'): + add_attributeerr_op(ObjectType, op) +for op in ('abs', 'neg', 'pos', 'invert'): + add_space_unary_op(ObjectType, op) +for op, method in (('conj', 'descr_conjugate'), ('rint', 'descr_rint')): + add_unary_op(ObjectType, op, method) +for op in ('add', 'floordiv', 'div', 'mod', 'mul', 'sub', 'lshift', 'rshift'): + add_space_binary_op(ObjectType, op) + +ObjectType.fmax = ObjectType.max +ObjectType.fmin = ObjectType.min +ObjectType.fmod = ObjectType.mod + class FlexibleType(BaseType): def get_element_size(self): return rffi.sizeof(self.T) @@ -1758,7 +2040,7 @@ def bool(self, v): return bool(self.to_str(v)) - def fill(self, storage, width, box, start, stop, offset): + def fill(self, storage, width, box, start, stop, offset, gcstruct): for i in xrange(start, stop, width): self._store(storage, i, offset, box, width) @@ -1775,6 +2057,57 @@ raise OperationError(space.w_NotImplementedError, space.wrap( "coerce (probably from set_item) not implemented for unicode type")) + def store(self, arr, i, offset, box): + assert isinstance(box, boxes.W_UnicodeBox) + raise oefmt(self.space.w_NotImplementedError, "unicode type not completed") + + def read(self, arr, i, offset, dtype=None): + raise oefmt(self.space.w_NotImplementedError, "unicode type not completed") + + def str_format(self, item): + raise oefmt(self.space.w_NotImplementedError, "unicode type not completed") + + def to_builtin_type(self, space, box): + raise oefmt(self.space.w_NotImplementedError, "unicode type not completed") + + def eq(self, v1, v2): + raise oefmt(self.space.w_NotImplementedError, "unicode type not completed") + + def ne(self, v1, v2): + raise oefmt(self.space.w_NotImplementedError, "unicode type not completed") + + def lt(self, v1, v2): + raise oefmt(self.space.w_NotImplementedError, "unicode type not completed") + + def le(self, v1, v2): + raise oefmt(self.space.w_NotImplementedError, "unicode type not completed") + + def gt(self, v1, v2): + raise oefmt(self.space.w_NotImplementedError, "unicode type not completed") + + def ge(self, v1, v2): + raise oefmt(self.space.w_NotImplementedError, "unicode type not completed") + + def logical_and(self, v1, v2): + raise oefmt(self.space.w_NotImplementedError, "unicode type not completed") + + def logical_or(self, v1, v2): + raise oefmt(self.space.w_NotImplementedError, "unicode type not completed") + + def logical_not(self, v): + raise oefmt(self.space.w_NotImplementedError, "unicode type not completed") + + @str_binary_op + def logical_xor(self, v1, v2): + raise oefmt(self.space.w_NotImplementedError, "unicode type not completed") + + def bool(self, v): + raise oefmt(self.space.w_NotImplementedError, "unicode type not completed") + + def fill(self, storage, width, box, start, stop, offset, gcstruct): + raise oefmt(self.space.w_NotImplementedError, "unicode type not completed") + + class VoidType(FlexibleType): T = lltype.Char @@ -1882,6 +2215,9 @@ items_w = space.fixedview(w_item) elif isinstance(w_item, W_NDimArray) and w_item.is_scalar(): items_w = space.fixedview(w_item.get_scalar_value()) + elif space.isinstance_w(w_item, space.w_list): + raise oefmt(space.w_TypeError, + "expected a readable buffer object") else: # XXX support initializing from readable buffers items_w = [w_item] * len(dtype.fields) @@ -1913,7 +2249,7 @@ for k in range(size): storage[k + i + ofs] = box_storage[k + box.ofs] - def fill(self, storage, width, box, start, stop, offset): + def fill(self, storage, width, box, start, stop, offset, gcstruct): assert isinstance(box, boxes.W_VoidBox) assert width == box.dtype.elsize for i in xrange(start, stop, width): diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -349,7 +349,7 @@ if dtype.is_flexible(): raise OperationError(space.w_TypeError, space.wrap('Not implemented for this type')) - if (self.int_only and not dtype.is_int() or + if (self.int_only and not (dtype.is_int() or dtype.is_object()) or not self.allow_bool and dtype.is_bool() or not self.allow_complex and dtype.is_complex()): raise oefmt(space.w_TypeError, @@ -378,6 +378,8 @@ w_val = self.func(calc_dtype, w_obj.get_scalar_value().convert_to(space, calc_dtype)) if out is None: + if res_dtype.is_object(): + w_val = w_obj.get_scalar_value() return w_val w_val = res_dtype.coerce(space, w_val) if out.is_scalar(): @@ -434,11 +436,20 @@ w_rhs = numpify(space, w_rhs) w_ldtype = _get_dtype(space, w_lhs) w_rdtype = _get_dtype(space, w_rhs) - if w_ldtype.is_str() and w_rdtype.is_str() and \ + if w_ldtype.is_object() or w_rdtype.is_object(): + pass + elif w_ldtype.is_str() and w_rdtype.is_str() and \ self.comparison_func: pass - elif (w_ldtype.is_str() or w_rdtype.is_str()) and \ + elif (w_ldtype.is_str()) and \ self.comparison_func and w_out is None: + if self.name in ('equal', 'less_equal', 'less'): + return space.wrap(False) + return space.wrap(True) + elif (w_rdtype.is_str()) and \ + self.comparison_func and w_out is None: + if self.name in ('not_equal','less', 'less_equal'): + return space.wrap(True) return space.wrap(False) elif w_ldtype.is_flexible() or w_rdtype.is_flexible(): if self.comparison_func: @@ -463,9 +474,9 @@ w_ldtype, w_rdtype, promote_to_float=self.promote_to_float, promote_bools=self.promote_bools) - if (self.int_only and (not w_ldtype.is_int() or - not w_rdtype.is_int() or - not calc_dtype.is_int()) or + if (self.int_only and (not (w_ldtype.is_int() or w_ldtype.is_object()) or + not (w_rdtype.is_int() or w_rdtype.is_object()) or + not (calc_dtype.is_int() or calc_dtype.is_object())) or not self.allow_bool and (w_ldtype.is_bool() or w_rdtype.is_bool()) or not self.allow_complex and (w_ldtype.is_complex() or @@ -643,7 +654,7 @@ # from frompyfunc pass # mimic NpyIter_AdvancedNew with a nditer - w_itershape = space.newlist([space.wrap(i) for i in iter_shape]) + w_itershape = space.newlist([space.wrap(i) for i in iter_shape]) nd_it = W_NDIter(space, space.newlist(inargs + outargs), w_flags, w_op_flags, w_op_dtypes, w_casting, w_op_axes, w_itershape) @@ -749,7 +760,7 @@ else: raise oefmt(space.w_TypeError, "a type-string for %s " \ "requires 1 typecode or %d typecode(s) before and %d" \ - " after the -> sign, not '%s'", self.name, self.nin, + " after the -> sign, not '%s'", self.name, self.nin, self.nout, type_tup) except KeyError: raise oefmt(space.w_ValueError, "unknown typecode in" \ @@ -773,11 +784,11 @@ for j in range(self.nargs): if dtypes[j] is not None and dtypes[j] != _dtypes[i+j]: allok = False - if allok: + if allok: break else: if len(self.funcs) > 1: - + dtypesstr = '' for d in dtypes: if d is None: @@ -787,7 +798,7 @@ _dtypesstr = ','.join(['%s%s%s' % (d.byteorder, d.kind, d.elsize) \ for d in _dtypes]) raise oefmt(space.w_TypeError, - "input dtype [%s] did not match any known dtypes [%s] ", + "input dtype [%s] did not match any known dtypes [%s] ", dtypesstr,_dtypesstr) i = 0 # Fill in empty dtypes @@ -807,7 +818,7 @@ assert isinstance(curarg, W_NDimArray) if len(arg_shapes[i]) != curarg.ndims(): # reshape - + sz = product(curarg.get_shape()) * curarg.get_dtype().elsize with curarg.implementation as storage: inargs[i] = W_NDimArray.from_shape_and_storage( @@ -865,7 +876,7 @@ "%s of gufunc was not specified", self.name, name, _i, core_dim_index, self.signature) target_dims.append(v) - arg_shapes.append(iter_shape + target_dims) + arg_shapes.append(iter_shape + target_dims) continue n = len(curarg.get_shape()) - num_dims if n < 0: @@ -907,7 +918,7 @@ raise oefmt(space.w_ValueError, "%s: %s operand %d has a " "mismatch in its core dimension %d, with gufunc " "signature %s (expected %d, got %d)", - self.name, name, _i, j, + self.name, name, _i, j, self.signature, matched_dims[core_dim_index], dims_to_match[core_dim_index]) #print 'adding',iter_shape,'+',dims_to_match,'to arg_shapes' @@ -950,6 +961,10 @@ return dt1 if dt1 is None: return dt2 + + if dt1.num == NPY.OBJECT or dt2.num == NPY.OBJECT: + return get_dtype_cache(space).w_objectdtype + # dt1.num should be <= dt2.num if dt1.num > dt2.num: dt1, dt2 = dt2, dt1 @@ -1032,6 +1047,8 @@ @jit.unroll_safe def find_unaryop_result_dtype(space, dt, promote_to_float=False, promote_bools=False, promote_to_largest=False): + if dt.is_object(): + return dt if promote_to_largest: if dt.kind == NPY.GENBOOLLTR or dt.kind == NPY.SIGNEDLTR: if dt.elsize * 8 < LONG_BIT: @@ -1064,6 +1081,7 @@ uint64_dtype = get_dtype_cache(space).w_uint64dtype complex_dtype = get_dtype_cache(space).w_complex128dtype float_dtype = get_dtype_cache(space).w_float64dtype + object_dtype = get_dtype_cache(space).w_objectdtype if isinstance(w_obj, boxes.W_GenericBox): dtype = w_obj.get_dtype(space) return find_binop_result_dtype(space, dtype, current_guess) @@ -1097,9 +1115,10 @@ return variable_dtype(space, 'S%d' % space.len_w(w_obj)) return current_guess - raise oefmt(space.w_NotImplementedError, - 'unable to create dtype from objects, "%T" instance not ' - 'supported', w_obj) + return object_dtype + #raise oefmt(space.w_NotImplementedError, + # 'unable to create dtype from objects, "%T" instance not ' + # 'supported', w_obj) def ufunc_dtype_caller(space, ufunc_name, op_name, nin, comparison_func, @@ -1263,7 +1282,7 @@ w_identity=None, name='', doc='', stack_inputs=False): ''' frompyfunc(func, nin, nout) #cpython numpy compatible frompyfunc(func, nin, nout, dtypes=None, signature='', - identity=None, name='', doc='', + identity=None, name='', doc='', stack_inputs=False) Takes an arbitrary Python function and returns a ufunc. @@ -1282,7 +1301,7 @@ dtypes: None or [dtype, ...] of the input, output args for each function, or 'match' to force output to exactly match input dtype Note that 'match' is a pypy-only extension to allow non-object - return dtypes + return dtypes signature*: str, default='' The mapping of input args to output args, defining the inner-loop indexing. If it is empty, the func operates on scalars @@ -1293,7 +1312,7 @@ stack_inputs*: boolean, whether the function is of the form out = func(*in) False or - func(*[in + out]) True + func(*[in + out]) True only one of out_dtype or signature may be specified diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py --- a/rpython/rlib/rgc.py +++ b/rpython/rlib/rgc.py @@ -686,6 +686,15 @@ lambda_customtrace = lambda: customtrace """ + at specialize.ll() +def ll_writebarrier(gc_obj): + """Use together with custom tracers. When you update some object pointer + stored in raw memory, you must call this function on 'gc_obj', which must + be the object of type TP with the custom tracer (*not* the value stored!). + This makes sure that the custom hook will be called again.""" + from rpython.rtyper.lltypesystem.lloperation import llop + llop.gc_writebarrier(lltype.Void, gc_obj) + class RegisterGcTraceEntry(ExtRegistryEntry): _about_ = register_custom_trace_hook From noreply at buildbot.pypy.org Thu Apr 23 21:25:10 2015 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 23 Apr 2015 21:25:10 +0200 (CEST) Subject: [pypy-commit] pypy default: skip one more object non-implemented feature Message-ID: <20150423192510.8982D1C1229@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r76906:86b0eb2de56f Date: 2015-04-23 22:24 +0300 http://bitbucket.org/pypy/pypy/changeset/86b0eb2de56f/ Log: skip one more object non-implemented feature diff --git a/pypy/module/micronumpy/test/test_selection.py b/pypy/module/micronumpy/test/test_selection.py --- a/pypy/module/micronumpy/test/test_selection.py +++ b/pypy/module/micronumpy/test/test_selection.py @@ -218,6 +218,7 @@ def test_sort_objects(self): # test object array sorts. + skip('object type not supported yet') from numpy import empty try: a = empty((101,), dtype=object) From noreply at buildbot.pypy.org Thu Apr 23 21:25:11 2015 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 23 Apr 2015 21:25:11 +0200 (CEST) Subject: [pypy-commit] pypy default: fix astype creation to match memory layout of source array Message-ID: <20150423192511.C17731C1229@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r76907:d198d926afb8 Date: 2015-04-23 22:24 +0300 http://bitbucket.org/pypy/pypy/changeset/d198d926afb8/ Log: fix astype creation to match memory layout of source array diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -328,8 +328,11 @@ return ArrayBuffer(self, readonly) def astype(self, space, dtype): - strides, backstrides = calc_strides(self.get_shape(), dtype, - self.order) + # we want to create a new array, but must respect the strides + # in self. So find a factor of the itemtype.elsize, and use this + factor = float(dtype.elsize) / self.dtype.elsize + strides = [int(factor*s) for s in self.get_strides()] + backstrides = [int(factor*s) for s in self.get_backstrides()] impl = ConcreteArray(self.get_shape(), dtype, self.order, strides, backstrides) loop.setslice(space, impl.get_shape(), impl, self) diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -2183,7 +2183,8 @@ assert b.dtype == 'bool' a = arange(6, dtype='f4').reshape(2,3) - b = a.astype('i4') + b = a.T.astype('i4') + assert (a.T.strides == b.strides) a = array('x').astype('S3').dtype assert a.itemsize == 3 From noreply at buildbot.pypy.org Thu Apr 23 22:35:51 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 23 Apr 2015 22:35:51 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <20150423203551.A9CFC1C12C8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r595:fe9e99ca741b Date: 2015-04-23 22:36 +0200 http://bitbucket.org/pypy/pypy.org/changeset/fe9e99ca741b/ Log: update the values diff --git a/don4.html b/don4.html --- a/don4.html +++ b/don4.html @@ -9,7 +9,7 @@ @@ -17,7 +17,7 @@ 2nd call: - $22674 of $80000 (28.3%) + $28642 of $80000 (35.8%)
    From noreply at buildbot.pypy.org Thu Apr 23 23:26:55 2015 From: noreply at buildbot.pypy.org (amauryfa) Date: Thu, 23 Apr 2015 23:26:55 +0200 (CEST) Subject: [pypy-commit] pypy py3k: Fix the _curses module to work with Python3. Message-ID: <20150423212655.2DB7F1C0EB2@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3k Changeset: r76908:105de1f4b6b3 Date: 2015-04-23 23:24 +0200 http://bitbucket.org/pypy/pypy/changeset/105de1f4b6b3/ Log: Fix the _curses module to work with Python3. Also fix test_curses which is still in CPython3.2... Apply fix from CPython issue20358. diff --git a/lib-python/3/test/test_curses.py b/lib-python/3/test/test_curses.py --- a/lib-python/3/test/test_curses.py +++ b/lib-python/3/test/test_curses.py @@ -115,8 +115,8 @@ stdscr.notimeout(1) win2.overlay(win) win2.overwrite(win) - win2.overlay(win, 1, 2, 3, 3, 2, 1) - win2.overwrite(win, 1, 2, 3, 3, 2, 1) + win2.overlay(win, 1, 2, 2, 1, 3, 3) + win2.overwrite(win, 1, 2, 2, 1, 3, 3) stdscr.redrawln(1,2) stdscr.scrollok(1) diff --git a/lib_pypy/_curses.py b/lib_pypy/_curses.py --- a/lib_pypy/_curses.py +++ b/lib_pypy/_curses.py @@ -484,13 +484,13 @@ def _chtype(ch): return int(ffi.cast("chtype", ch)) -def _texttype(text): - if isinstance(text, str): +def _bytestype(text): + if isinstance(text, bytes): return text - elif isinstance(text, unicode): - return str(text) # default encoding + elif isinstance(text, str): + return text.encode() else: - raise TypeError("str or unicode expected, got a '%s' object" + raise TypeError("bytes or str expected, got a '%s' object" % (type(text).__name__,)) @@ -606,7 +606,7 @@ @_argspec(1, 1, 2) def addstr(self, y, x, text, attr=None): - text = _texttype(text) + text = _bytestype(text) if attr is not None: attr_old = lib.getattrs(self._win) lib.wattrset(self._win, attr) @@ -620,7 +620,7 @@ @_argspec(2, 1, 2) def addnstr(self, y, x, text, n, attr=None): - text = _texttype(text) + text = _bytestype(text) if attr is not None: attr_old = lib.getattrs(self._win) lib.wattrset(self._win, attr) @@ -799,7 +799,7 @@ @_argspec(1, 1, 2) def insstr(self, y, x, text, attr=None): - text = _texttype(text) + text = _bytestype(text) if attr is not None: attr_old = lib.getattrs(self._win) lib.wattrset(self._win, attr) @@ -813,7 +813,7 @@ @_argspec(2, 1, 2) def insnstr(self, y, x, text, n, attr=None): - text = _texttype(text) + text = _bytestype(text) if attr is not None: attr_old = lib.getattrs(self._win) lib.wattrset(self._win, attr) @@ -1221,7 +1221,7 @@ def putp(text): - text = _texttype(text) + text = _bytestype(text) return _check_ERR(lib.putp(text), "putp") @@ -1347,17 +1347,17 @@ def tigetflag(capname): _ensure_initialised_setupterm() - return lib.tigetflag(capname) + return lib.tigetflag(capname.encode()) def tigetnum(capname): _ensure_initialised_setupterm() - return lib.tigetnum(capname) + return lib.tigetnum(capname.encode()) def tigetstr(capname): _ensure_initialised_setupterm() - val = lib.tigetstr(capname) + val = lib.tigetstr(capname.encode()) if int(ffi.cast("intptr_t", val)) in (0, -1): return None return ffi.string(val) From noreply at buildbot.pypy.org Fri Apr 24 01:39:48 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 24 Apr 2015 01:39:48 +0200 (CEST) Subject: [pypy-commit] pypy can_cast: extract as_dtype() from result_type() so it can be used in can_cast() as well Message-ID: <20150423233948.4227C1C1229@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: can_cast Changeset: r76909:6048923edd3e Date: 2015-04-23 14:37 +0100 http://bitbucket.org/pypy/pypy/changeset/6048923edd3e/ Log: extract as_dtype() from result_type() so it can be used in can_cast() as well diff --git a/pypy/module/micronumpy/arrayops.py b/pypy/module/micronumpy/arrayops.py --- a/pypy/module/micronumpy/arrayops.py +++ b/pypy/module/micronumpy/arrayops.py @@ -296,21 +296,25 @@ raise oefmt(space.w_ValueError, "at least one array or dtype is required") result = None for w_arg in args_w: - if isinstance(w_arg, W_NDimArray): - dtype = w_arg.get_dtype() - elif isinstance(w_arg, W_GenericBox) or ( - space.isinstance_w(w_arg, space.w_int) or - space.isinstance_w(w_arg, space.w_float) or - space.isinstance_w(w_arg, space.w_complex) or - space.isinstance_w(w_arg, space.w_long) or - space.isinstance_w(w_arg, space.w_bool)): - dtype = ufuncs.find_dtype_for_scalar(space, w_arg) - else: - dtype = space.interp_w(descriptor.W_Dtype, - space.call_function(space.gettypefor(descriptor.W_Dtype), w_arg)) + dtype = as_dtype(space, w_arg) result = ufuncs.find_binop_result_dtype(space, result, dtype) return result @unwrap_spec(casting=str) def can_cast(space, w_from, w_totype, casting='safe'): return space.w_True + +def as_dtype(space, w_arg): + # roughly equivalent to CNumPy's PyArray_DescrConverter2 + if isinstance(w_arg, W_NDimArray): + return w_arg.get_dtype() + elif isinstance(w_arg, W_GenericBox) or ( + space.isinstance_w(w_arg, space.w_int) or + space.isinstance_w(w_arg, space.w_float) or + space.isinstance_w(w_arg, space.w_complex) or + space.isinstance_w(w_arg, space.w_long) or + space.isinstance_w(w_arg, space.w_bool)): + return ufuncs.find_dtype_for_scalar(space, w_arg) + else: + return space.interp_w(descriptor.W_Dtype, + space.call_function(space.gettypefor(descriptor.W_Dtype), w_arg)) diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -4049,4 +4049,3 @@ raises(TypeError, np.can_cast, 'i4', None) raises(TypeError, np.can_cast, None, 'i4') - From noreply at buildbot.pypy.org Fri Apr 24 01:39:49 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 24 Apr 2015 01:39:49 +0200 (CEST) Subject: [pypy-commit] pypy can_cast: create W_Dtype.can_cast_to() Message-ID: <20150423233949.60F791C1229@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: can_cast Changeset: r76910:9e8922d9dce5 Date: 2015-04-23 20:32 +0100 http://bitbucket.org/pypy/pypy/changeset/9e8922d9dce5/ Log: create W_Dtype.can_cast_to() diff --git a/pypy/module/micronumpy/arrayops.py b/pypy/module/micronumpy/arrayops.py --- a/pypy/module/micronumpy/arrayops.py +++ b/pypy/module/micronumpy/arrayops.py @@ -302,7 +302,10 @@ @unwrap_spec(casting=str) def can_cast(space, w_from, w_totype, casting='safe'): - return space.w_True + target = as_dtype(space, w_totype) + origin = as_dtype(space, w_from) # XXX + return space.wrap(origin.can_cast_to(target)) + def as_dtype(space, w_arg): # roughly equivalent to CNumPy's PyArray_DescrConverter2 diff --git a/pypy/module/micronumpy/descriptor.py b/pypy/module/micronumpy/descriptor.py --- a/pypy/module/micronumpy/descriptor.py +++ b/pypy/module/micronumpy/descriptor.py @@ -87,6 +87,9 @@ def box_complex(self, real, imag): return self.itemtype.box_complex(real, imag) + def can_cast_to(self, other): + return True + def coerce(self, space, w_item): return self.itemtype.coerce(space, self, w_item) From noreply at buildbot.pypy.org Fri Apr 24 01:39:51 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 24 Apr 2015 01:39:51 +0200 (CEST) Subject: [pypy-commit] pypy can_cast: hg merge default Message-ID: <20150423233951.368DB1C1229@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: can_cast Changeset: r76911:1894fda8b437 Date: 2015-04-23 21:57 +0100 http://bitbucket.org/pypy/pypy/changeset/1894fda8b437/ Log: hg merge default diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -5,12 +5,46 @@ .. this is a revision shortly after release-2.5.1 .. startrev: cb01edcb59414d9d93056e54ed060673d24e67c1 -Issue #2017: on non-Linux-x86 platforms, reduced the memory impact of +issue2005: +ignore errors on closing random file handles while importing a module (cpython compatibility) + +issue2013: +added constants to _ssl for TLS 1.1 and 1.2 + +issue2014: +Add PyLong_FromUnicode to cpyext. + +issue2017: +On non-Linux-x86 platforms, reduced the memory impact of creating a lot of greenlets/tasklets. Particularly useful on Win32 and on ARM, where you used to get a MemoryError after only 2500-5000 greenlets (the 32-bit address space is exhausted). +Update gdb_pypy for python3 (gdb comatability) + +Merged rstrategies into rpython which provides a library for Storage Strategies + +Support unicode strings in numpy.dtype creation i.e. np.dtype(u'int64') + +Various rpython cleanups for vmprof support + +issue2019: +Fix isspace as called by rpython unicode.strip() + +issue2023: +In the cpyext 'Concrete Object Layer' API, +don't call methods on the object (which can be overriden), +but directly on the concrete base type. + +issue2029: +Hide the default_factory attribute in a dict + +issue2027: +Better document pyinteractive and add --withmod-time + .. branch: gc-incminimark-pinning-improve + +branch gc-incminimark-pinning-improve: Object Pinning is now used in `bz2` and `rzlib` (therefore also affects Python's `zlib`). In case the data to compress/decompress is inside the nursery (incminimark) it no longer needs to create a non-moving copy of it. This saves @@ -18,8 +52,18 @@ is introduced (`PYPY_GC_MAX_PINNED`) primarily for debugging purposes. .. branch: refactor-pycall + +branch refactor-pycall: Make `*`-unpacking in RPython function calls completely equivalent to passing the tuple's elements as arguments. In other words, `f(*(a, b))` now behaves exactly like `f(a, b)`. .. branch: issue2018 +branch issue2018: +Allow prebuilt rpython dict with function values + +.. branch: vmprof +.. Merged but then backed out, hopefully it will return as vmprof2 + +.. branch: object-dtype2 +Extend numpy dtypes to allow using objects with associated garbage collection hook diff --git a/pypy/goal/targetnumpystandalone.py b/pypy/goal/targetnumpystandalone.py deleted file mode 100644 --- a/pypy/goal/targetnumpystandalone.py +++ /dev/null @@ -1,43 +0,0 @@ - -""" Usage: - -./targetnumpystandalone-c array_size - -Will execute a give numpy bytecode. Arrays will be ranges (in float) modulo 10, -constants would be consecutive starting from one. - -Bytecode should contain letters 'a' 'l' and 'f' so far and be correct -""" - -import time -from pypy.module.micronumpy.compile import numpy_compile -from rpython.jit.codewriter.policy import JitPolicy -from rpython.rtyper.annlowlevel import hlstr - -def entry_point(argv): - if len(argv) != 3: - print __doc__ - return 1 - try: - size = int(argv[2]) - except ValueError: - print "INVALID LITERAL FOR INT:", argv[2] - print __doc__ - return 3 - t0 = time.time() - main(argv[0], size) - print "bytecode:", argv[0], "size:", size - print "took:", time.time() - t0 - return 0 - -def main(bc, size): - if not isinstance(bc, str): - bc = hlstr(bc) # for tests - a = numpy_compile(bc, size) - a = a.compute() - -def target(*args): - return entry_point, None - -def jitpolicy(driver): - return JitPolicy() diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -31,6 +31,9 @@ for c in ['MAXDIMS', 'CLIP', 'WRAP', 'RAISE']: interpleveldefs[c] = 'space.wrap(constants.%s)' % c + def startup(self, space): + from pypy.module.micronumpy.concrete import _setup + _setup() class UMathModule(MixedModule): appleveldefs = {} diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -34,11 +34,13 @@ @staticmethod def from_shape(space, shape, dtype, order='C', w_instance=None, zero=True): - from pypy.module.micronumpy import concrete + from pypy.module.micronumpy import concrete, descriptor, boxes from pypy.module.micronumpy.strides import calc_strides strides, backstrides = calc_strides(shape, dtype.base, order) impl = concrete.ConcreteArray(shape, dtype.base, order, strides, backstrides, zero=zero) + if dtype == descriptor.get_dtype_cache(space).w_objectdtype: + impl.fill(space, boxes.W_ObjectBox(space.w_None)) if w_instance: return wrap_impl(space, space.type(w_instance), w_instance, impl) return W_NDimArray(impl) @@ -123,7 +125,7 @@ def get_shape(self): return self.implementation.get_shape() - def get_dtype(self): + def get_dtype(self, space=None): return self.implementation.dtype def get_order(self): diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -607,6 +607,19 @@ # arr.storage[i] = arg[i] return W_UnicodeBox(arr, 0, arr.dtype) +class W_ObjectBox(W_GenericBox): + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.OBJECT) + + def __init__(self, w_obj): + self.w_obj = w_obj + + def convert_to(self, space, dtype): + if dtype.is_bool(): + return W_BoolBox(space.bool_w(self.w_obj)) + return self # XXX + + def descr__getattr__(self, space, w_key): + return space.getattr(self.w_obj, w_key) W_GenericBox.typedef = TypeDef("numpy.generic", __new__ = interp2app(W_GenericBox.descr__new__.im_func), @@ -856,3 +869,9 @@ __new__ = interp2app(W_UnicodeBox.descr__new__unicode_box.im_func), __len__ = interp2app(W_UnicodeBox.descr_len), ) + +W_ObjectBox.typedef = TypeDef("numpy.object_", W_ObjectBox.typedef, + __new__ = interp2app(W_ObjectBox.descr__new__.im_func), + __getattr__ = interp2app(W_ObjectBox.descr__getattr__), +) + diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -3,7 +3,7 @@ """ import re from pypy.interpreter import special -from pypy.interpreter.baseobjspace import InternalSpaceCache, W_Root +from pypy.interpreter.baseobjspace import InternalSpaceCache, W_Root, ObjSpace from pypy.interpreter.error import OperationError from rpython.rlib.objectmodel import specialize, instantiate from rpython.rlib.nonconst import NonConstant @@ -47,7 +47,7 @@ def lookup(self, name): return self.getdictvalue(self, name) -class FakeSpace(object): +class FakeSpace(ObjSpace): w_ValueError = W_TypeObject("ValueError") w_TypeError = W_TypeObject("TypeError") w_IndexError = W_TypeObject("IndexError") @@ -67,6 +67,7 @@ w_unicode = W_TypeObject("unicode") w_complex = W_TypeObject("complex") w_dict = W_TypeObject("dict") + w_object = W_TypeObject("object") def __init__(self): """NOT_RPYTHON""" @@ -88,7 +89,8 @@ return self.wrap(len(w_obj.items)) def getattr(self, w_obj, w_attr): - return StringObject(NonConstant('foo')) + assert isinstance(w_attr, StringObject) + return w_obj.getdictvalue(self, w_attr.v) def isinstance_w(self, w_obj, w_tp): try: diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -1,11 +1,11 @@ from pypy.interpreter.error import OperationError, oefmt -from rpython.rlib import jit +from rpython.rlib import jit, rgc from rpython.rlib.buffer import Buffer -from rpython.rlib.debug import make_sure_not_resized +from rpython.rlib.debug import make_sure_not_resized, debug_print from rpython.rlib.rawstorage import alloc_raw_storage, free_raw_storage, \ raw_storage_getitem, raw_storage_setitem, RAW_STORAGE -from rpython.rtyper.lltypesystem import rffi, lltype -from pypy.module.micronumpy import support, loop +from rpython.rtyper.lltypesystem import rffi, lltype, llmemory +from pypy.module.micronumpy import support, loop, constants as NPY from pypy.module.micronumpy.base import convert_to_array, W_NDimArray, \ ArrayArgumentException from pypy.module.micronumpy.iterators import ArrayIter @@ -13,11 +13,13 @@ RecordChunk, calc_strides, calc_new_strides, shape_agreement, calculate_broadcast_strides, calc_backstrides) from rpython.rlib.objectmodel import keepalive_until_here +from rpython.rtyper.annlowlevel import cast_gcref_to_instance +from pypy.interpreter.baseobjspace import W_Root class BaseConcreteArray(object): _immutable_fields_ = ['dtype?', 'storage', 'start', 'size', 'shape[*]', - 'strides[*]', 'backstrides[*]', 'order'] + 'strides[*]', 'backstrides[*]', 'order', 'gcstruct'] start = 0 parent = None flags = 0 @@ -326,13 +328,54 @@ return ArrayBuffer(self, readonly) def astype(self, space, dtype): - strides, backstrides = calc_strides(self.get_shape(), dtype, - self.order) + # we want to create a new array, but must respect the strides + # in self. So find a factor of the itemtype.elsize, and use this + factor = float(dtype.elsize) / self.dtype.elsize + strides = [int(factor*s) for s in self.get_strides()] + backstrides = [int(factor*s) for s in self.get_backstrides()] impl = ConcreteArray(self.get_shape(), dtype, self.order, strides, backstrides) loop.setslice(space, impl.get_shape(), impl, self) return impl +OBJECTSTORE = lltype.GcStruct('ObjectStore', + ('length', lltype.Signed), + ('step', lltype.Signed), + ('storage', llmemory.Address), + rtti=True) +offset_of_storage = llmemory.offsetof(OBJECTSTORE, 'storage') +offset_of_length = llmemory.offsetof(OBJECTSTORE, 'length') +offset_of_step = llmemory.offsetof(OBJECTSTORE, 'step') + +V_OBJECTSTORE = lltype.nullptr(OBJECTSTORE) + +def customtrace(gc, obj, callback, arg): + #debug_print('in customtrace w/obj', obj) + length = (obj + offset_of_length).signed[0] + step = (obj + offset_of_step).signed[0] + storage = (obj + offset_of_storage).address[0] + #debug_print('tracing', length, 'objects in ndarray.storage') + i = 0 + while i < length: + gc._trace_callback(callback, arg, storage) + storage += step + i += 1 + +lambda_customtrace = lambda: customtrace + +def _setup(): + rgc.register_custom_trace_hook(OBJECTSTORE, lambda_customtrace) + + at jit.dont_look_inside +def _create_objectstore(storage, length, elsize): + gcstruct = lltype.malloc(OBJECTSTORE) + # JIT does not support cast_ptr_to_adr + gcstruct.storage = llmemory.cast_ptr_to_adr(storage) + #print 'create gcstruct',gcstruct,'with storage',storage,'as',gcstruct.storage + gcstruct.length = length + gcstruct.step = elsize + return gcstruct + class ConcreteArrayNotOwning(BaseConcreteArray): def __init__(self, shape, dtype, order, strides, backstrides, storage, start=0): @@ -347,10 +390,11 @@ self.backstrides = backstrides self.storage = storage self.start = start + self.gcstruct = V_OBJECTSTORE def fill(self, space, box): self.dtype.itemtype.fill(self.storage, self.dtype.elsize, - box, 0, self.size, 0) + box, 0, self.size, 0, self.gcstruct) def set_shape(self, space, orig_array, new_shape): strides, backstrides = calc_strides(new_shape, self.dtype, @@ -374,17 +418,24 @@ def base(self): return None - class ConcreteArray(ConcreteArrayNotOwning): def __init__(self, shape, dtype, order, strides, backstrides, storage=lltype.nullptr(RAW_STORAGE), zero=True): + gcstruct = V_OBJECTSTORE if storage == lltype.nullptr(RAW_STORAGE): - storage = dtype.itemtype.malloc(support.product(shape) * - dtype.elsize, zero=zero) + length = support.product(shape) + if dtype.num == NPY.OBJECT: + storage = dtype.itemtype.malloc(length * dtype.elsize, zero=True) + gcstruct = _create_objectstore(storage, length, dtype.elsize) + else: + storage = dtype.itemtype.malloc(length * dtype.elsize, zero=zero) ConcreteArrayNotOwning.__init__(self, shape, dtype, order, strides, backstrides, storage) + self.gcstruct = gcstruct def __del__(self): + if self.gcstruct: + self.gcstruct.length = 0 free_raw_storage(self.storage, track_allocation=False) @@ -423,6 +474,7 @@ parent = parent.parent # one level only self.parent = parent self.storage = parent.storage + self.gcstruct = parent.gcstruct self.order = parent.order self.dtype = dtype self.size = support.product(shape) * self.dtype.elsize @@ -480,6 +532,7 @@ class VoidBoxStorage(BaseConcreteArray): def __init__(self, size, dtype): self.storage = alloc_raw_storage(size) + self.gcstruct = V_OBJECTSTORE self.dtype = dtype self.size = size diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -38,6 +38,34 @@ raise oefmt(space.w_ValueError, "object __array__ method not producing an array") +def try_interface_method(space, w_object): + try: + w_interface = space.getattr(w_object, space.wrap("__array_interface__")) + except OperationError, e: + if e.match(space, space.w_AttributeError): + return None + raise + if w_interface is None: + # happens from compile.py + return None + version = space.int_w(space.finditem(w_interface, space.wrap("version"))) + if version < 3: + raise oefmt(space.w_NotImplementedError, + "__array_interface__ version %d not supported", version) + # make a view into the data + w_shape = space.finditem(w_interface, space.wrap('shape')) + w_dtype = space.finditem(w_interface, space.wrap('typestr')) + w_descr = space.finditem(w_interface, space.wrap('descr')) + data_w = space.listview(space.finditem(w_interface, space.wrap('data'))) + w_strides = space.finditem(w_interface, space.wrap('strides')) + shape = [space.int_w(i) for i in space.listview(w_shape)] + dtype = descriptor.decode_w_dtype(space, w_dtype) + rw = space.is_true(data_w[1]) + #print 'create view from shape',shape,'dtype',dtype,'descr',w_descr,'data',data_w[0],'rw',rw + raise oefmt(space.w_NotImplementedError, + "creating array from __array_interface__ not supported yet") + return + @unwrap_spec(ndmin=int, copy=bool, subok=bool) def array(space, w_object, w_dtype=None, copy=True, w_order=None, subok=False, @@ -63,7 +91,11 @@ # continue with w_array, but do further operations in place w_object = w_array copy = False - + if not isinstance(w_object, W_NDimArray): + w_array = try_interface_method(space, w_object) + if w_array is not None: + w_object = w_array + copy = False dtype = descriptor.decode_w_dtype(space, w_dtype) if space.is_none(w_order): diff --git a/pypy/module/micronumpy/descriptor.py b/pypy/module/micronumpy/descriptor.py --- a/pypy/module/micronumpy/descriptor.py +++ b/pypy/module/micronumpy/descriptor.py @@ -6,7 +6,7 @@ from pypy.interpreter.typedef import (TypeDef, GetSetProperty, interp_attrproperty, interp_attrproperty_w) from rpython.rlib import jit -from rpython.rlib.objectmodel import specialize, compute_hash +from rpython.rlib.objectmodel import specialize, compute_hash, we_are_translated from rpython.rlib.rarithmetic import r_longlong, r_ulonglong from pypy.module.micronumpy import types, boxes, base, support, constants as NPY from pypy.module.micronumpy.appbridge import get_appbridge_cache @@ -56,7 +56,7 @@ self.char = char self.w_box_type = w_box_type if byteorder is None: - if itemtype.get_element_size() == 1: + if itemtype.get_element_size() == 1 or isinstance(itemtype, types.ObjectType): byteorder = NPY.IGNORE else: byteorder = NPY.NATIVE @@ -115,6 +115,9 @@ def is_str(self): return self.num == NPY.STRING + def is_object(self): + return self.num == NPY.OBJECT + def is_str_or_unicode(self): return self.num == NPY.STRING or self.num == NPY.UNICODE @@ -431,7 +434,7 @@ self.names.append(name) self.fields[name] = offset, dtype - self.itemtype = types.RecordType() + self.itemtype = types.RecordType(space) if self.is_flexible(): self.elsize = size @@ -446,7 +449,7 @@ endian = NPY.OPPBYTE if self.is_native() else NPY.NATBYTE elif newendian != NPY.IGNORE: endian = newendian - itemtype = self.itemtype.__class__(endian in (NPY.NATIVE, NPY.NATBYTE)) + itemtype = self.itemtype.__class__(space, endian in (NPY.NATIVE, NPY.NATBYTE)) fields = self.fields if fields is None: fields = {} @@ -485,7 +488,7 @@ fields[fldname] = (offset, subdtype) offset += subdtype.elsize names.append(fldname) - return W_Dtype(types.RecordType(), NPY.VOID, NPY.VOIDLTR, NPY.VOIDLTR, + return W_Dtype(types.RecordType(space), NPY.VOID, NPY.VOIDLTR, NPY.VOIDLTR, space.gettypefor(boxes.W_VoidBox), names=names, fields=fields, elsize=offset) @@ -496,8 +499,17 @@ def dtype_from_spec(space, w_spec): - w_lst = get_appbridge_cache(space).call_method(space, - 'numpy.core._internal', '_commastring', Arguments(space, [w_spec])) + + if we_are_translated(): + w_lst = get_appbridge_cache(space).call_method(space, + 'numpy.core._internal', '_commastring', Arguments(space, [w_spec])) + else: + # testing, handle manually + if space.eq_w(w_spec, space.wrap('u4,u4,u4')): + w_lst = space.newlist([space.wrap('u4')]*3) + else: + raise oefmt(space.w_RuntimeError, + "cannot parse w_spec") if not space.isinstance_w(w_lst, space.w_list) or space.len_w(w_lst) < 1: raise oefmt(space.w_RuntimeError, "_commastring is not returning a list with len >= 1") @@ -544,7 +556,7 @@ if size == 1: return subdtype size *= subdtype.elsize - return W_Dtype(types.VoidType(), NPY.VOID, NPY.VOIDLTR, NPY.VOIDLTR, + return W_Dtype(types.VoidType(space), NPY.VOID, NPY.VOIDLTR, NPY.VOIDLTR, space.gettypefor(boxes.W_VoidBox), shape=shape, subdtype=subdtype, elsize=size) @@ -590,8 +602,7 @@ if w_dtype is dtype.w_box_type: return dtype if space.isinstance_w(w_dtype, space.w_type): - raise oefmt(space.w_NotImplementedError, - "cannot create dtype with type '%N'", w_dtype) + return cache.w_objectdtype raise oefmt(space.w_TypeError, "data type not understood") @@ -658,7 +669,7 @@ def new_string_dtype(space, size, char=NPY.STRINGLTR): return W_Dtype( - types.StringType(), + types.StringType(space), elsize=size, num=NPY.STRING, kind=NPY.STRINGLTR, @@ -668,7 +679,7 @@ def new_unicode_dtype(space, size): - itemtype = types.UnicodeType() + itemtype = types.UnicodeType(space) return W_Dtype( itemtype, elsize=size * itemtype.get_element_size(), @@ -681,7 +692,7 @@ def new_void_dtype(space, size): return W_Dtype( - types.VoidType(), + types.VoidType(space), elsize=size, num=NPY.VOID, kind=NPY.VOIDLTR, @@ -693,126 +704,126 @@ class DtypeCache(object): def __init__(self, space): self.w_booldtype = W_Dtype( - types.Bool(), + types.Bool(space), num=NPY.BOOL, kind=NPY.GENBOOLLTR, char=NPY.BOOLLTR, w_box_type=space.gettypefor(boxes.W_BoolBox), ) self.w_int8dtype = W_Dtype( - types.Int8(), + types.Int8(space), num=NPY.BYTE, kind=NPY.SIGNEDLTR, char=NPY.BYTELTR, w_box_type=space.gettypefor(boxes.W_Int8Box), ) self.w_uint8dtype = W_Dtype( - types.UInt8(), + types.UInt8(space), num=NPY.UBYTE, kind=NPY.UNSIGNEDLTR, char=NPY.UBYTELTR, w_box_type=space.gettypefor(boxes.W_UInt8Box), ) self.w_int16dtype = W_Dtype( - types.Int16(), + types.Int16(space), num=NPY.SHORT, kind=NPY.SIGNEDLTR, char=NPY.SHORTLTR, w_box_type=space.gettypefor(boxes.W_Int16Box), ) self.w_uint16dtype = W_Dtype( - types.UInt16(), + types.UInt16(space), num=NPY.USHORT, kind=NPY.UNSIGNEDLTR, char=NPY.USHORTLTR, w_box_type=space.gettypefor(boxes.W_UInt16Box), ) self.w_int32dtype = W_Dtype( - types.Int32(), + types.Int32(space), num=NPY.INT, kind=NPY.SIGNEDLTR, char=NPY.INTLTR, w_box_type=space.gettypefor(boxes.W_Int32Box), ) self.w_uint32dtype = W_Dtype( - types.UInt32(), + types.UInt32(space), num=NPY.UINT, kind=NPY.UNSIGNEDLTR, char=NPY.UINTLTR, w_box_type=space.gettypefor(boxes.W_UInt32Box), ) self.w_longdtype = W_Dtype( - types.Long(), + types.Long(space), num=NPY.LONG, kind=NPY.SIGNEDLTR, char=NPY.LONGLTR, w_box_type=space.gettypefor(boxes.W_LongBox), ) self.w_ulongdtype = W_Dtype( - types.ULong(), + types.ULong(space), num=NPY.ULONG, kind=NPY.UNSIGNEDLTR, char=NPY.ULONGLTR, w_box_type=space.gettypefor(boxes.W_ULongBox), ) self.w_int64dtype = W_Dtype( - types.Int64(), + types.Int64(space), num=NPY.LONGLONG, kind=NPY.SIGNEDLTR, char=NPY.LONGLONGLTR, w_box_type=space.gettypefor(boxes.W_Int64Box), ) self.w_uint64dtype = W_Dtype( - types.UInt64(), + types.UInt64(space), num=NPY.ULONGLONG, kind=NPY.UNSIGNEDLTR, char=NPY.ULONGLONGLTR, w_box_type=space.gettypefor(boxes.W_UInt64Box), ) self.w_float32dtype = W_Dtype( - types.Float32(), + types.Float32(space), num=NPY.FLOAT, kind=NPY.FLOATINGLTR, char=NPY.FLOATLTR, w_box_type=space.gettypefor(boxes.W_Float32Box), ) self.w_float64dtype = W_Dtype( - types.Float64(), + types.Float64(space), num=NPY.DOUBLE, kind=NPY.FLOATINGLTR, char=NPY.DOUBLELTR, w_box_type=space.gettypefor(boxes.W_Float64Box), ) self.w_floatlongdtype = W_Dtype( - types.FloatLong(), + types.FloatLong(space), num=NPY.LONGDOUBLE, kind=NPY.FLOATINGLTR, char=NPY.LONGDOUBLELTR, w_box_type=space.gettypefor(boxes.W_FloatLongBox), ) self.w_complex64dtype = W_Dtype( - types.Complex64(), + types.Complex64(space), num=NPY.CFLOAT, kind=NPY.COMPLEXLTR, char=NPY.CFLOATLTR, w_box_type=space.gettypefor(boxes.W_Complex64Box), ) self.w_complex128dtype = W_Dtype( - types.Complex128(), + types.Complex128(space), num=NPY.CDOUBLE, kind=NPY.COMPLEXLTR, char=NPY.CDOUBLELTR, w_box_type=space.gettypefor(boxes.W_Complex128Box), ) self.w_complexlongdtype = W_Dtype( - types.ComplexLong(), + types.ComplexLong(space), num=NPY.CLONGDOUBLE, kind=NPY.COMPLEXLTR, char=NPY.CLONGDOUBLELTR, w_box_type=space.gettypefor(boxes.W_ComplexLongBox), ) self.w_stringdtype = W_Dtype( - types.StringType(), + types.StringType(space), elsize=0, num=NPY.STRING, kind=NPY.STRINGLTR, @@ -820,7 +831,7 @@ w_box_type=space.gettypefor(boxes.W_StringBox), ) self.w_unicodedtype = W_Dtype( - types.UnicodeType(), + types.UnicodeType(space), elsize=0, num=NPY.UNICODE, kind=NPY.UNICODELTR, @@ -828,7 +839,7 @@ w_box_type=space.gettypefor(boxes.W_UnicodeBox), ) self.w_voiddtype = W_Dtype( - types.VoidType(), + types.VoidType(space), elsize=0, num=NPY.VOID, kind=NPY.VOIDLTR, @@ -836,26 +847,33 @@ w_box_type=space.gettypefor(boxes.W_VoidBox), ) self.w_float16dtype = W_Dtype( - types.Float16(), + types.Float16(space), num=NPY.HALF, kind=NPY.FLOATINGLTR, char=NPY.HALFLTR, w_box_type=space.gettypefor(boxes.W_Float16Box), ) self.w_intpdtype = W_Dtype( - types.Long(), + types.Long(space), num=NPY.LONG, kind=NPY.SIGNEDLTR, char=NPY.INTPLTR, w_box_type=space.gettypefor(boxes.W_LongBox), ) self.w_uintpdtype = W_Dtype( - types.ULong(), + types.ULong(space), num=NPY.ULONG, kind=NPY.UNSIGNEDLTR, char=NPY.UINTPLTR, w_box_type=space.gettypefor(boxes.W_ULongBox), ) + self.w_objectdtype = W_Dtype( + types.ObjectType(space), + num=NPY.OBJECT, + kind=NPY.OBJECTLTR, + char=NPY.OBJECTLTR, + w_box_type=space.gettypefor(boxes.W_ObjectBox), + ) aliases = { NPY.BOOL: ['bool_', 'bool8'], NPY.BYTE: ['byte'], @@ -874,6 +892,7 @@ NPY.CLONGDOUBLE: ['clongdouble', 'clongfloat'], NPY.STRING: ['string_', 'str'], NPY.UNICODE: ['unicode_'], + NPY.OBJECT: ['object_'], } self.alternate_constructors = { NPY.BOOL: [space.w_bool], @@ -892,6 +911,8 @@ NPY.UNICODE: [space.w_unicode], NPY.VOID: [space.gettypefor(boxes.W_GenericBox)], #space.w_buffer, # XXX no buffer in space + NPY.OBJECT: [space.gettypefor(boxes.W_ObjectBox), + space.w_object], } float_dtypes = [self.w_float16dtype, self.w_float32dtype, self.w_float64dtype, self.w_floatlongdtype] @@ -911,7 +932,7 @@ self.w_int64dtype, self.w_uint64dtype, ] + float_dtypes + complex_dtypes + [ self.w_stringdtype, self.w_unicodedtype, self.w_voiddtype, - self.w_intpdtype, self.w_uintpdtype, + self.w_intpdtype, self.w_uintpdtype, self.w_objectdtype, ] self.float_dtypes_by_num_bytes = sorted( (dtype.elsize, dtype) @@ -963,6 +984,7 @@ 'USHORT': self.w_uint16dtype, 'FLOAT': self.w_float32dtype, 'BOOL': self.w_booldtype, + 'OBJECT': self.w_objectdtype, } typeinfo_partial = { diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -202,11 +202,16 @@ return self elif isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool() \ and w_idx.ndims() > 0: - return self.getitem_filter(space, w_idx) - try: - return self.implementation.descr_getitem(space, self, w_idx) - except ArrayArgumentException: - return self.getitem_array_int(space, w_idx) + w_ret = self.getitem_filter(space, w_idx) + else: + try: + w_ret = self.implementation.descr_getitem(space, self, w_idx) + except ArrayArgumentException: + w_ret = self.getitem_array_int(space, w_idx) + if isinstance(w_ret, boxes.W_ObjectBox): + #return the W_Root object, not a scalar + w_ret = w_ret.w_obj + return w_ret def getitem(self, space, index_list): return self.implementation.getitem_index(space, index_list) @@ -550,6 +555,7 @@ else: strides = self.descr_get_strides(space) space.setitem_str(w_d, 'strides', strides) + space.setitem_str(w_d, 'version', space.wrap(3)) return w_d w_pypy_data = None @@ -845,7 +851,7 @@ "new type not compatible with array.")) # Strides, shape does not change v = impl.astype(space, dtype) - return wrap_impl(space, w_type, self, v) + return wrap_impl(space, w_type, self, v) strides = impl.get_strides() if dims == 1 or strides[0] = 0 - else: - a = array(Polynomial()) - assert a.shape == () + diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -17,6 +17,7 @@ def __init__(self): self.base = self self.elsize = 1 + self.num = 0 def create_slice(space, a, chunks): @@ -2182,7 +2183,8 @@ assert b.dtype == 'bool' a = arange(6, dtype='f4').reshape(2,3) - b = a.astype('i4') + b = a.T.astype('i4') + assert (a.T.strides == b.strides) a = array('x').astype('S3').dtype assert a.itemsize == 3 @@ -3150,11 +3152,7 @@ assert b[35] == 200 b[[slice(25, 30)]] = range(5) assert all(a[:5] == range(5)) - import sys - if '__pypy__' not in sys.builtin_module_names: - raises(TypeError, 'b[[[slice(25, 125)]]]') - else: - raises(NotImplementedError, 'b[[[slice(25, 125)]]]') + raises(IndexError, 'b[[[slice(25, 125)]]]') def test_cumsum(self): from numpy import arange diff --git a/pypy/module/micronumpy/test/test_object_arrays.py b/pypy/module/micronumpy/test/test_object_arrays.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/test/test_object_arrays.py @@ -0,0 +1,162 @@ +from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest + + +class AppTestObjectDtypes(BaseNumpyAppTest): + def test_scalar_from_object(self): + from numpy import array + import sys + class Polynomial(object): + def whatami(self): + return 'an object' + a = array(Polynomial()) + assert a.shape == () + assert a.sum().whatami() == 'an object' + + def test_uninitialized_object_array_is_filled_by_None(self): + import numpy as np + + a = np.ndarray([5], dtype="O") + + assert a[0] == None + + def test_object_arrays_add(self): + import numpy as np + + a = np.array(["foo"], dtype=object) + b = np.array(["bar"], dtype=object) + raises(TypeError, np.add, a, 1) + res = a + b + assert res[0] == "foobar" + + def test_bool_func(self): + import numpy as np + a = np.array(["foo"], dtype=object) + b = a and complex(1, -1) + assert b == complex(1, -1) + b = np.array(complex(1, -1)) and a + assert (b == a).all() + c = np.array([1, 2, 3]) + assert (a[0] != c[0]) + assert (c[0] != a[0]) + assert (a[0] > c[0]) + assert (not a[0] < c[0]) + assert (c[0] < a[0]) + assert (not c[0] > a[0]) + + def test_logical_ufunc(self): + import numpy as np + import sys + + if '__pypy__' in sys.builtin_module_names: + skip('need to refactor use of raw_xxx_op in types to make this work') + a = np.array(["foo"], dtype=object) + b = np.array([1], dtype=object) + d = np.array([complex(1, 10)], dtype=object) + c = np.logical_and(a, 1) + assert c.dtype == np.dtype('object') + assert c == 1 + c = np.logical_and(b, complex(1, -1)) + assert c.dtype == np.dtype('object') + assert c == complex(1, -1) + c = np.logical_and(d, b) + assert c == 1 + c = b & 1 + assert c.dtype == np.dtype('object') + assert (c == 1).all() + c = np.array(1) & b + assert (c == b).all() + + def test_reduce(self): + import numpy as np + class O(object): + def whatami(self): + return 'an object' + fiveOs = [O()] * 5 + a = np.array(fiveOs, dtype=object) + print np.maximum + b = np.maximum.reduce(a) + assert b is not None + + def test_complex_op(self): + import numpy as np + import sys + a = np.array(['abc', 'def'], dtype=object) + b = np.array([1, 2, 3], dtype=object) + c = np.array([complex(1, 1), complex(1, -1)], dtype=object) + for arg in (a,b,c): + assert (arg == np.real(arg)).all() + assert (0 == np.imag(arg)).all() + if '__pypy__' in sys.builtin_module_names: + skip('not implemented yet') + raises(AttributeError, np.conj, a) + res = np.conj(b) + assert (res == b).all() + res = np.conj(c) + assert res[0] == c[1] and res[1] == c[0] + + def test_keep_object_alive(self): + # only translated does it really test the gc + import numpy as np + import gc + class O(object): + def whatami(self): + return 'an object' + fiveOs = [O()] * 5 + a = np.array(fiveOs, dtype=object) + del fiveOs + gc.collect() + assert a[2].whatami() == 'an object' + + def test_array_interface(self): + import numpy as np + import sys + class DummyArray(object): + def __init__(self, interface, base=None): + self.__array_interface__ = interface + self.base = base + a = np.array([(1, 2, 3)], dtype='u4,u4,u4') + b = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4') + interface = dict(a.__array_interface__) + interface['shape'] = tuple([3]) + interface['strides'] = tuple([0]) + if '__pypy__' in sys.builtin_module_names: + skip('not implemented yet') + c = np.array(DummyArray(interface, base=a)) + c.dtype = a.dtype + #print c + assert (c == np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3)], dtype='u4,u4,u4') ).all() + + def test_for_object_scalar_creation(self): + import numpy as np + import sys + a = np.object_() + b = np.object_(3) + b2 = np.object_(3.0) + c = np.object_([4, 5]) + d = np.array([None])[0] + assert a is None + assert type(b) is int + assert type(b2) is float + assert type(c) is np.ndarray + assert c.dtype == object + assert type(d) is type(None) + if '__pypy__' in sys.builtin_module_names: + skip('not implemented yet') + e = np.object_([None, {}, []]) + assert e.dtype == object + + def test_mem_array_creation_invalid_specification(self): + # while not specifically testing object dtype, this + # test segfaulted during ObjectType.store due to + # missing gc hooks + import numpy as np + import sys + ytype = np.object_ + if '__pypy__' in sys.builtin_module_names: + ytype = str + dt = np.dtype([('x', int), ('y', ytype)]) + # Correct way + a = np.array([(1, 'object')], dt) + # Wrong way - should complain about writing buffer to object dtype + raises(ValueError, np.array, [1, 'object'], dt) + diff --git a/pypy/module/micronumpy/test/test_selection.py b/pypy/module/micronumpy/test/test_selection.py --- a/pypy/module/micronumpy/test/test_selection.py +++ b/pypy/module/micronumpy/test/test_selection.py @@ -12,14 +12,11 @@ exp = sorted(range(len(exp)), key=exp.__getitem__) c = a.copy() res = a.argsort() - assert (res == exp).all(), '%r\n%r\n%r' % (a,res,exp) + assert (res == exp).all(), 'Failed sortng %r\na=%r\nres=%r\nexp=%r' % (dtype,a,res,exp) assert (a == c).all() # not modified a = arange(100, dtype=dtype) assert (a.argsort() == a).all() - import sys - if '__pypy__' in sys.builtin_module_names: - raises(NotImplementedError, 'arange(10,dtype="float16").argsort()') def test_argsort_ndim(self): from numpy import array @@ -63,14 +60,13 @@ 'i2', complex]: a = array([6, 4, -1, 3, 8, 3, 256+20, 100, 101], dtype=dtype) exp = sorted(list(a)) - res = a.copy() - res.sort() - assert (res == exp).all(), '%r\n%r\n%r' % (a,res,exp) + a.sort() + assert (a == exp).all(), 'Failed sorting %r\n%r\n%r' % (dtype, a, exp) a = arange(100, dtype=dtype) c = a.copy() a.sort() - assert (a == c).all() + assert (a == c).all(), 'Failed sortng %r\na=%r\nc=%r' % (dtype,a,c) def test_sort_nonnative(self): from numpy import array @@ -222,6 +218,7 @@ def test_sort_objects(self): # test object array sorts. + skip('object type not supported yet') from numpy import empty try: a = empty((101,), dtype=object) diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -397,11 +397,11 @@ for i in range(3): assert min_c_b[i] == min(b[i], c) - def test_scalar(self): + def test_all_available(self): # tests that by calling all available ufuncs on scalars, none will # raise uncaught interp-level exceptions, (and crash the test) # and those that are uncallable can be accounted for. - # test on the four base-class dtypes: int, bool, float, complex + # test on the base-class dtypes: int, bool, float, complex, object # We need this test since they have no common base class. import numpy as np def find_uncallable_ufuncs(dtype): @@ -412,6 +412,11 @@ if isinstance(u, np.ufunc): try: u(* [array] * u.nin) + except AttributeError: + pass + except NotImplementedError: + print s + uncallable.add(s) except TypeError: assert s not in uncallable uncallable.add(s) @@ -427,6 +432,9 @@ 'fabs', 'fmod', 'invert', 'mod', 'logaddexp', 'logaddexp2', 'left_shift', 'right_shift', 'copysign', 'signbit', 'ceil', 'floor', 'trunc']) + assert find_uncallable_ufuncs('object') == set( + ['isnan', 'logaddexp2', 'copysign', 'isfinite', 'signbit', + 'isinf', 'logaddexp']) def test_int_only(self): from numpy import bitwise_and, array diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -9,6 +9,7 @@ from pypy.module.micronumpy.compile import FakeSpace, Parser, InterpreterState from pypy.module.micronumpy.base import W_NDimArray +py.test.skip('move these to pypyjit/test_pypy_c/test_micronumpy') class TestNumpyJit(LLJitMixin): graph = None diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -3,8 +3,9 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.objspace.std.floatobject import float2string from pypy.objspace.std.complexobject import str_format +from pypy.interpreter.baseobjspace import W_Root, ObjSpace from rpython.rlib import clibffi, jit, rfloat, rcomplex -from rpython.rlib.objectmodel import specialize +from rpython.rlib.objectmodel import specialize, we_are_translated from rpython.rlib.rarithmetic import widen, byteswap, r_ulonglong, \ most_neg_value_of, LONG_BIT from rpython.rlib.rawstorage import (alloc_raw_storage, @@ -14,10 +15,12 @@ pack_float80, unpack_float80) from rpython.rlib.rstruct.nativefmttable import native_is_bigendian from rpython.rlib.rstruct.runpack import runpack -from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rtyper.annlowlevel import cast_instance_to_gcref,\ + cast_gcref_to_instance +from rpython.rtyper.lltypesystem import lltype, rffi, llmemory from rpython.tool.sourcetools import func_with_new_name from pypy.module.micronumpy import boxes -from pypy.module.micronumpy.concrete import SliceArray, VoidBoxStorage +from pypy.module.micronumpy.concrete import SliceArray, VoidBoxStorage, V_OBJECTSTORE from pypy.module.micronumpy.strides import calc_strides degToRad = math.pi / 180.0 @@ -109,10 +112,12 @@ return dispatcher class BaseType(object): - _immutable_fields_ = ['native'] + _immutable_fields_ = ['native', 'space'] - def __init__(self, native=True): + def __init__(self, space, native=True): + assert isinstance(space, ObjSpace) self.native = native + self.space = space def __repr__(self): return self.__class__.__name__ @@ -191,7 +196,7 @@ with arr as storage: self._write(storage, i, offset, self.unbox(box)) - def fill(self, storage, width, box, start, stop, offset): + def fill(self, storage, width, box, start, stop, offset, gcstruct): value = self.unbox(box) for i in xrange(start, stop, width): self._write(storage, i, offset, value) @@ -306,7 +311,7 @@ @raw_unary_op def rint(self, v): - float64 = Float64() + float64 = Float64(self.space) return float64.rint(float64.box(v)) class Bool(BaseType, Primitive): @@ -399,7 +404,7 @@ def round(self, v, decimals=0): if decimals != 0: return v - return Float64().box(self.unbox(v)) + return Float64(self.space).box(self.unbox(v)) class Integer(Primitive): _mixin_ = True @@ -444,7 +449,7 @@ self.T is rffi.LONG or self.T is rffi.LONGLONG): if v2 == -1 and v1 == self.for_computation(most_neg_value_of(self.T)): return self.box(0) - return self.box(v1 // v2) + return self.box(v1 / v2) @simple_binary_op def mod(self, v1, v2): @@ -1152,7 +1157,7 @@ with arr as storage: self._write(storage, i, offset, self.unbox(box)) - def fill(self, storage, width, box, start, stop, offset): + def fill(self, storage, width, box, start, stop, offset, gcstruct): value = self.unbox(box) for i in xrange(start, stop, width): self._write(storage, i, offset, value) @@ -1253,25 +1258,25 @@ def ge(self, v1, v2): return self._lt(v2, v1) or self._eq(v2, v1) - def _bool(self, v): + def _cbool(self, v): return bool(v[0]) or bool(v[1]) @raw_binary_op def logical_and(self, v1, v2): - return self._bool(v1) and self._bool(v2) + return self._cbool(v1) and self._cbool(v2) @raw_binary_op def logical_or(self, v1, v2): - return self._bool(v1) or self._bool(v2) + return self._cbool(v1) or self._cbool(v2) @raw_unary_op def logical_not(self, v): - return not self._bool(v) + return not self._cbool(v) @raw_binary_op def logical_xor(self, v1, v2): - a = self._bool(v1) - b = self._bool(v2) + a = self._cbool(v1) + b = self._cbool(v2) return (not b and a) or (not a and b) def min(self, v1, v2): @@ -1629,6 +1634,283 @@ BoxType = boxes.W_ComplexLongBox ComponentBoxType = boxes.W_FloatLongBox +_all_objs_for_tests = [] # for tests + +class ObjectType(Primitive, BaseType): + T = lltype.Signed + BoxType = boxes.W_ObjectBox + + def get_element_size(self): + return rffi.sizeof(lltype.Signed) + + def coerce(self, space, dtype, w_item): + if isinstance(w_item, boxes.W_ObjectBox): + return w_item + return boxes.W_ObjectBox(w_item) + + def coerce_subtype(self, space, w_subtype, w_item): + # return the item itself + return self.unbox(self.box(w_item)) + + def store(self, arr, i, offset, box): + if arr.gcstruct is V_OBJECTSTORE: + raise oefmt(self.space.w_NotImplementedError, + "cannot store object in array with no gc hook") + self._write(arr.storage, i, offset, self.unbox(box), + arr.gcstruct) + + def read(self, arr, i, offset, dtype=None): + return self.box(self._read(arr.storage, i, offset)) + + def byteswap(self, w_v): + return w_v + + @jit.dont_look_inside + def _write(self, storage, i, offset, w_obj, gcstruct): + # no GC anywhere in this function! + if we_are_translated(): + from rpython.rlib import rgc + rgc.ll_writebarrier(gcstruct) + value = rffi.cast(lltype.Signed, cast_instance_to_gcref(w_obj)) + else: + value = len(_all_objs_for_tests) + _all_objs_for_tests.append(w_obj) + raw_storage_setitem_unaligned(storage, i + offset, value) + + @jit.dont_look_inside + def _read(self, storage, i, offset): + res = raw_storage_getitem_unaligned(self.T, storage, i + offset) + if we_are_translated(): + gcref = rffi.cast(llmemory.GCREF, res) + w_obj = cast_gcref_to_instance(W_Root, gcref) + else: + w_obj = _all_objs_for_tests[res] + return w_obj + + def fill(self, storage, width, box, start, stop, offset, gcstruct): + value = self.unbox(box) + for i in xrange(start, stop, width): + self._write(storage, i, offset, value, gcstruct) + + def unbox(self, box): + if isinstance(box, self.BoxType): + return box.w_obj + else: + raise oefmt(self.space.w_NotImplementedError, + "object dtype cannot unbox %s", str(box)) + + @specialize.argtype(1) + def box(self, w_obj): + if isinstance(w_obj, W_Root): + pass + elif isinstance(w_obj, bool): + w_obj = self.space.newbool(w_obj) + elif isinstance(w_obj, int): + w_obj = self.space.newint(w_obj) + elif isinstance(w_obj, lltype.Number): + w_obj = self.space.newint(w_obj) + elif isinstance(w_obj, float): + w_obj = self.space.newfloat(w_obj) + elif w_obj is None: + w_obj = self.space.w_None + else: + raise oefmt(self.space.w_NotImplementedError, + "cannot create object array/scalar from lltype") + return self.BoxType(w_obj) + + @specialize.argtype(1, 2) + def box_complex(self, real, imag): + if isinstance(real, rffi.r_singlefloat): + real = rffi.cast(rffi.DOUBLE, real) + if isinstance(imag, rffi.r_singlefloat): + imag = rffi.cast(rffi.DOUBLE, imag) + w_obj = self.space.newcomplex(real, imag) + return self.BoxType(w_obj) + + def str_format(self, box): + return self.space.str_w(self.space.repr(self.unbox(box))) + + def runpack_str(self, space, s): + raise oefmt(space.w_NotImplementedError, + "fromstring not implemented for object type") + + def to_builtin_type(self, space, box): + assert isinstance(box, self.BoxType) + return box.w_obj + + @staticmethod + def for_computation(v): + return v + + @raw_binary_op + def eq(self, v1, v2): + return self.space.eq_w(v1, v2) + + @simple_binary_op + def max(self, v1, v2): + if self.space.is_true(self.space.ge(v1, v2)): + return v1 + return v2 + + @simple_binary_op + def min(self, v1, v2): + if self.space.is_true(self.space.le(v1, v2)): + return v1 + return v2 + + @raw_unary_op + def bool(self,v): + return self._obool(v) + + def _obool(self, v): + if self.space.is_true(v): + return True + return False + + @raw_binary_op + def logical_and(self, v1, v2): + if self._obool(v1): + return self.space.bool_w(v2) + return self.space.bool_w(v1) + + @raw_binary_op + def logical_or(self, v1, v2): + if self._obool(v1): + return self.space.bool_w(v1) + return self.space.bool_w(v2) + + @raw_unary_op + def logical_not(self, v): + return not self._obool(v) + + @raw_binary_op + def logical_xor(self, v1, v2): + a = self._obool(v1) + b = self._obool(v2) + return (not b and a) or (not a and b) + + @simple_binary_op + def bitwise_and(self, v1, v2): + return self.space.and_(v1, v2) + + @simple_binary_op + def bitwise_or(self, v1, v2): + return self.space.or_(v1, v2) + + @simple_binary_op + def bitwise_xor(self, v1, v2): + return self.space.xor(v1, v2) + + @simple_binary_op + def pow(self, v1, v2): + return self.space.pow(v1, v2, self.space.wrap(1)) + + @simple_unary_op + def reciprocal(self, v1): + return self.space.div(self.space.wrap(1.0), v1) + + @simple_unary_op + def sign(self, v): + zero = self.space.wrap(0) + one = self.space.wrap(1) + m_one = self.space.wrap(-1) + if self.space.is_true(self.space.gt(v, zero)): + return one + elif self.space.is_true(self.space.lt(v, zero)): + return m_one + else: + return zero + + @simple_unary_op + def real(self, v): + return v + + @simple_unary_op + def imag(self, v): + return 0 + + @simple_unary_op + def square(self, v): + return self.space.mul(v, v) + + @raw_binary_op + def le(self, v1, v2): + return self.space.bool_w(self.space.le(v1, v2)) + + @raw_binary_op + def ge(self, v1, v2): + return self.space.bool_w(self.space.ge(v1, v2)) + + @raw_binary_op + def lt(self, v1, v2): + return self.space.bool_w(self.space.lt(v1, v2)) + + @raw_binary_op + def gt(self, v1, v2): + return self.space.bool_w(self.space.gt(v1, v2)) + + @raw_binary_op + def ne(self, v1, v2): + return self.space.bool_w(self.space.ne(v1, v2)) + +def add_attributeerr_op(cls, op): + def func(self, *args): + raise oefmt(self.space.w_AttributeError, + "%s", op) + func.__name__ = 'object_' + op + setattr(cls, op, func) + +def add_unsupported_op(cls, op): + def func(self, *args): + raise oefmt(self.space.w_TypeError, + "ufunc '%s' not supported for input types", op) + func.__name__ = 'object_' + op + setattr(cls, op, func) + +def add_unary_op(cls, op, method): + @simple_unary_op + def func(self, w_v): + space = self.space + w_impl = space.lookup(w_v, method) + if w_impl is None: + raise oefmt(space.w_AttributeError, 'unknown op "%s" on object' % op) + return space.get_and_call_function(w_impl, w_v) + func.__name__ = 'object_' + op + setattr(cls, op, func) + +def add_space_unary_op(cls, op): + @simple_unary_op + def func(self, v): + return getattr(self.space, op)(v) + func.__name__ = 'object_' + op + setattr(cls, op, func) + +def add_space_binary_op(cls, op): + @simple_binary_op + def func(self, v1, v2): + return getattr(self.space, op)(v1, v2) + func.__name__ = 'object_' + op + setattr(cls, op, func) + +for op in ('copysign', 'isfinite', 'isinf', 'isnan', 'logaddexp', 'logaddexp2', + 'signbit'): + add_unsupported_op(ObjectType, op) +for op in ('arctan2', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', + 'arctanh', 'ceil', 'floor', 'cos', 'sin', 'tan', 'cosh', 'sinh', + 'tanh', 'radians', 'degrees', 'exp','exp2', 'expm1', 'fabs', + 'log', 'log10', 'log1p', 'log2', 'sqrt', 'trunc'): + add_attributeerr_op(ObjectType, op) +for op in ('abs', 'neg', 'pos', 'invert'): + add_space_unary_op(ObjectType, op) +for op, method in (('conj', 'descr_conjugate'), ('rint', 'descr_rint')): + add_unary_op(ObjectType, op, method) +for op in ('add', 'floordiv', 'div', 'mod', 'mul', 'sub', 'lshift', 'rshift'): + add_space_binary_op(ObjectType, op) + +ObjectType.fmax = ObjectType.max +ObjectType.fmin = ObjectType.min +ObjectType.fmod = ObjectType.mod + class FlexibleType(BaseType): def get_element_size(self): return rffi.sizeof(self.T) @@ -1758,7 +2040,7 @@ def bool(self, v): return bool(self.to_str(v)) - def fill(self, storage, width, box, start, stop, offset): + def fill(self, storage, width, box, start, stop, offset, gcstruct): for i in xrange(start, stop, width): self._store(storage, i, offset, box, width) @@ -1775,6 +2057,57 @@ raise OperationError(space.w_NotImplementedError, space.wrap( "coerce (probably from set_item) not implemented for unicode type")) + def store(self, arr, i, offset, box): + assert isinstance(box, boxes.W_UnicodeBox) + raise oefmt(self.space.w_NotImplementedError, "unicode type not completed") + + def read(self, arr, i, offset, dtype=None): + raise oefmt(self.space.w_NotImplementedError, "unicode type not completed") + + def str_format(self, item): + raise oefmt(self.space.w_NotImplementedError, "unicode type not completed") + + def to_builtin_type(self, space, box): + raise oefmt(self.space.w_NotImplementedError, "unicode type not completed") + + def eq(self, v1, v2): + raise oefmt(self.space.w_NotImplementedError, "unicode type not completed") + + def ne(self, v1, v2): + raise oefmt(self.space.w_NotImplementedError, "unicode type not completed") + + def lt(self, v1, v2): + raise oefmt(self.space.w_NotImplementedError, "unicode type not completed") + + def le(self, v1, v2): + raise oefmt(self.space.w_NotImplementedError, "unicode type not completed") + + def gt(self, v1, v2): + raise oefmt(self.space.w_NotImplementedError, "unicode type not completed") + + def ge(self, v1, v2): + raise oefmt(self.space.w_NotImplementedError, "unicode type not completed") + + def logical_and(self, v1, v2): + raise oefmt(self.space.w_NotImplementedError, "unicode type not completed") + + def logical_or(self, v1, v2): + raise oefmt(self.space.w_NotImplementedError, "unicode type not completed") + + def logical_not(self, v): + raise oefmt(self.space.w_NotImplementedError, "unicode type not completed") + + @str_binary_op + def logical_xor(self, v1, v2): + raise oefmt(self.space.w_NotImplementedError, "unicode type not completed") + + def bool(self, v): + raise oefmt(self.space.w_NotImplementedError, "unicode type not completed") + + def fill(self, storage, width, box, start, stop, offset, gcstruct): + raise oefmt(self.space.w_NotImplementedError, "unicode type not completed") + + class VoidType(FlexibleType): T = lltype.Char @@ -1882,6 +2215,9 @@ items_w = space.fixedview(w_item) elif isinstance(w_item, W_NDimArray) and w_item.is_scalar(): items_w = space.fixedview(w_item.get_scalar_value()) + elif space.isinstance_w(w_item, space.w_list): + raise oefmt(space.w_TypeError, + "expected a readable buffer object") else: # XXX support initializing from readable buffers items_w = [w_item] * len(dtype.fields) @@ -1913,7 +2249,7 @@ for k in range(size): storage[k + i + ofs] = box_storage[k + box.ofs] - def fill(self, storage, width, box, start, stop, offset): + def fill(self, storage, width, box, start, stop, offset, gcstruct): assert isinstance(box, boxes.W_VoidBox) assert width == box.dtype.elsize for i in xrange(start, stop, width): diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -349,7 +349,7 @@ if dtype.is_flexible(): raise OperationError(space.w_TypeError, space.wrap('Not implemented for this type')) - if (self.int_only and not dtype.is_int() or + if (self.int_only and not (dtype.is_int() or dtype.is_object()) or not self.allow_bool and dtype.is_bool() or not self.allow_complex and dtype.is_complex()): raise oefmt(space.w_TypeError, @@ -378,6 +378,8 @@ w_val = self.func(calc_dtype, w_obj.get_scalar_value().convert_to(space, calc_dtype)) if out is None: + if res_dtype.is_object(): + w_val = w_obj.get_scalar_value() return w_val w_val = res_dtype.coerce(space, w_val) if out.is_scalar(): @@ -434,11 +436,20 @@ w_rhs = numpify(space, w_rhs) w_ldtype = _get_dtype(space, w_lhs) w_rdtype = _get_dtype(space, w_rhs) - if w_ldtype.is_str() and w_rdtype.is_str() and \ + if w_ldtype.is_object() or w_rdtype.is_object(): + pass + elif w_ldtype.is_str() and w_rdtype.is_str() and \ self.comparison_func: pass - elif (w_ldtype.is_str() or w_rdtype.is_str()) and \ + elif (w_ldtype.is_str()) and \ self.comparison_func and w_out is None: + if self.name in ('equal', 'less_equal', 'less'): + return space.wrap(False) + return space.wrap(True) + elif (w_rdtype.is_str()) and \ + self.comparison_func and w_out is None: + if self.name in ('not_equal','less', 'less_equal'): + return space.wrap(True) return space.wrap(False) elif w_ldtype.is_flexible() or w_rdtype.is_flexible(): if self.comparison_func: @@ -463,9 +474,9 @@ w_ldtype, w_rdtype, promote_to_float=self.promote_to_float, promote_bools=self.promote_bools) - if (self.int_only and (not w_ldtype.is_int() or - not w_rdtype.is_int() or - not calc_dtype.is_int()) or + if (self.int_only and (not (w_ldtype.is_int() or w_ldtype.is_object()) or + not (w_rdtype.is_int() or w_rdtype.is_object()) or + not (calc_dtype.is_int() or calc_dtype.is_object())) or not self.allow_bool and (w_ldtype.is_bool() or w_rdtype.is_bool()) or not self.allow_complex and (w_ldtype.is_complex() or @@ -643,7 +654,7 @@ # from frompyfunc pass # mimic NpyIter_AdvancedNew with a nditer - w_itershape = space.newlist([space.wrap(i) for i in iter_shape]) + w_itershape = space.newlist([space.wrap(i) for i in iter_shape]) nd_it = W_NDIter(space, space.newlist(inargs + outargs), w_flags, w_op_flags, w_op_dtypes, w_casting, w_op_axes, w_itershape) @@ -749,7 +760,7 @@ else: raise oefmt(space.w_TypeError, "a type-string for %s " \ "requires 1 typecode or %d typecode(s) before and %d" \ - " after the -> sign, not '%s'", self.name, self.nin, + " after the -> sign, not '%s'", self.name, self.nin, self.nout, type_tup) except KeyError: raise oefmt(space.w_ValueError, "unknown typecode in" \ @@ -773,11 +784,11 @@ for j in range(self.nargs): if dtypes[j] is not None and dtypes[j] != _dtypes[i+j]: allok = False - if allok: + if allok: break else: if len(self.funcs) > 1: - + dtypesstr = '' for d in dtypes: if d is None: @@ -787,7 +798,7 @@ _dtypesstr = ','.join(['%s%s%s' % (d.byteorder, d.kind, d.elsize) \ for d in _dtypes]) raise oefmt(space.w_TypeError, - "input dtype [%s] did not match any known dtypes [%s] ", + "input dtype [%s] did not match any known dtypes [%s] ", dtypesstr,_dtypesstr) i = 0 # Fill in empty dtypes @@ -807,7 +818,7 @@ assert isinstance(curarg, W_NDimArray) if len(arg_shapes[i]) != curarg.ndims(): # reshape - + sz = product(curarg.get_shape()) * curarg.get_dtype().elsize with curarg.implementation as storage: inargs[i] = W_NDimArray.from_shape_and_storage( @@ -865,7 +876,7 @@ "%s of gufunc was not specified", self.name, name, _i, core_dim_index, self.signature) target_dims.append(v) - arg_shapes.append(iter_shape + target_dims) + arg_shapes.append(iter_shape + target_dims) continue n = len(curarg.get_shape()) - num_dims if n < 0: @@ -907,7 +918,7 @@ raise oefmt(space.w_ValueError, "%s: %s operand %d has a " "mismatch in its core dimension %d, with gufunc " "signature %s (expected %d, got %d)", - self.name, name, _i, j, + self.name, name, _i, j, self.signature, matched_dims[core_dim_index], dims_to_match[core_dim_index]) #print 'adding',iter_shape,'+',dims_to_match,'to arg_shapes' @@ -950,6 +961,10 @@ return dt1 if dt1 is None: return dt2 + + if dt1.num == NPY.OBJECT or dt2.num == NPY.OBJECT: + return get_dtype_cache(space).w_objectdtype + # dt1.num should be <= dt2.num if dt1.num > dt2.num: dt1, dt2 = dt2, dt1 @@ -1032,6 +1047,8 @@ @jit.unroll_safe def find_unaryop_result_dtype(space, dt, promote_to_float=False, promote_bools=False, promote_to_largest=False): + if dt.is_object(): + return dt if promote_to_largest: if dt.kind == NPY.GENBOOLLTR or dt.kind == NPY.SIGNEDLTR: if dt.elsize * 8 < LONG_BIT: @@ -1064,6 +1081,7 @@ uint64_dtype = get_dtype_cache(space).w_uint64dtype complex_dtype = get_dtype_cache(space).w_complex128dtype float_dtype = get_dtype_cache(space).w_float64dtype + object_dtype = get_dtype_cache(space).w_objectdtype if isinstance(w_obj, boxes.W_GenericBox): dtype = w_obj.get_dtype(space) return find_binop_result_dtype(space, dtype, current_guess) @@ -1097,9 +1115,10 @@ return variable_dtype(space, 'S%d' % space.len_w(w_obj)) return current_guess - raise oefmt(space.w_NotImplementedError, - 'unable to create dtype from objects, "%T" instance not ' - 'supported', w_obj) + return object_dtype + #raise oefmt(space.w_NotImplementedError, + # 'unable to create dtype from objects, "%T" instance not ' + # 'supported', w_obj) def ufunc_dtype_caller(space, ufunc_name, op_name, nin, comparison_func, @@ -1263,7 +1282,7 @@ w_identity=None, name='', doc='', stack_inputs=False): ''' frompyfunc(func, nin, nout) #cpython numpy compatible frompyfunc(func, nin, nout, dtypes=None, signature='', - identity=None, name='', doc='', + identity=None, name='', doc='', stack_inputs=False) Takes an arbitrary Python function and returns a ufunc. @@ -1282,7 +1301,7 @@ dtypes: None or [dtype, ...] of the input, output args for each function, or 'match' to force output to exactly match input dtype Note that 'match' is a pypy-only extension to allow non-object - return dtypes + return dtypes signature*: str, default='' The mapping of input args to output args, defining the inner-loop indexing. If it is empty, the func operates on scalars @@ -1293,7 +1312,7 @@ stack_inputs*: boolean, whether the function is of the form out = func(*in) False or - func(*[in + out]) True + func(*[in + out]) True only one of out_dtype or signature may be specified diff --git a/pypy/tool/pytest/appsupport.py b/pypy/tool/pytest/appsupport.py --- a/pypy/tool/pytest/appsupport.py +++ b/pypy/tool/pytest/appsupport.py @@ -99,6 +99,7 @@ debug_excs = getattr(operr, 'debug_excs', []) if debug_excs: self._excinfo = debug_excs[0] + self.value = self.operr.errorstr(self.space) # XXX def __repr__(self): return "" % self.operr.errorstr(self.space) diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py --- a/rpython/rlib/rgc.py +++ b/rpython/rlib/rgc.py @@ -686,6 +686,15 @@ lambda_customtrace = lambda: customtrace """ + at specialize.ll() +def ll_writebarrier(gc_obj): + """Use together with custom tracers. When you update some object pointer + stored in raw memory, you must call this function on 'gc_obj', which must + be the object of type TP with the custom tracer (*not* the value stored!). + This makes sure that the custom hook will be called again.""" + from rpython.rtyper.lltypesystem.lloperation import llop + llop.gc_writebarrier(lltype.Void, gc_obj) + class RegisterGcTraceEntry(ExtRegistryEntry): _about_ = register_custom_trace_hook From noreply at buildbot.pypy.org Fri Apr 24 07:00:45 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 24 Apr 2015 07:00:45 +0200 (CEST) Subject: [pypy-commit] pypy can_cast: remove (almost) unused w_intpdtype and w_uintpdtype Message-ID: <20150424050045.52A231C069E@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: can_cast Changeset: r76912:c009ce07c90f Date: 2015-04-24 04:17 +0100 http://bitbucket.org/pypy/pypy/changeset/c009ce07c90f/ Log: remove (almost) unused w_intpdtype and w_uintpdtype diff --git a/pypy/module/micronumpy/descriptor.py b/pypy/module/micronumpy/descriptor.py --- a/pypy/module/micronumpy/descriptor.py +++ b/pypy/module/micronumpy/descriptor.py @@ -853,20 +853,6 @@ char=NPY.HALFLTR, w_box_type=space.gettypefor(boxes.W_Float16Box), ) - self.w_intpdtype = W_Dtype( - types.Long(space), - num=NPY.LONG, - kind=NPY.SIGNEDLTR, - char=NPY.INTPLTR, - w_box_type=space.gettypefor(boxes.W_LongBox), - ) - self.w_uintpdtype = W_Dtype( - types.ULong(space), - num=NPY.ULONG, - kind=NPY.UNSIGNEDLTR, - char=NPY.UINTPLTR, - w_box_type=space.gettypefor(boxes.W_ULongBox), - ) self.w_objectdtype = W_Dtype( types.ObjectType(space), num=NPY.OBJECT, @@ -932,7 +918,7 @@ self.w_int64dtype, self.w_uint64dtype, ] + float_dtypes + complex_dtypes + [ self.w_stringdtype, self.w_unicodedtype, self.w_voiddtype, - self.w_intpdtype, self.w_uintpdtype, self.w_objectdtype, + self.w_objectdtype, ] self.float_dtypes_by_num_bytes = sorted( (dtype.elsize, dtype) @@ -973,8 +959,7 @@ 'CLONGDOUBLE': self.w_complexlongdtype, #'DATETIME', 'UINT': self.w_uint32dtype, - 'INTP': self.w_intpdtype, - 'UINTP': self.w_uintpdtype, + 'INTP': self.w_longdtype, 'HALF': self.w_float16dtype, 'BYTE': self.w_int8dtype, #'TIMEDELTA', From noreply at buildbot.pypy.org Fri Apr 24 07:00:46 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 24 Apr 2015 07:00:46 +0200 (CEST) Subject: [pypy-commit] pypy can_cast: Add num, kind and char information to the itemtypes Message-ID: <20150424050046.A6A011C069E@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: can_cast Changeset: r76913:0efb8027fc48 Date: 2015-04-24 04:18 +0100 http://bitbucket.org/pypy/pypy/changeset/0efb8027fc48/ Log: Add num, kind and char information to the itemtypes diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -22,6 +22,7 @@ from pypy.module.micronumpy import boxes from pypy.module.micronumpy.concrete import SliceArray, VoidBoxStorage, V_OBJECTSTORE from pypy.module.micronumpy.strides import calc_strides +from . import constants as NPY degToRad = math.pi / 180.0 log2 = math.log(2) @@ -316,6 +317,9 @@ class Bool(BaseType, Primitive): T = lltype.Bool + num = NPY.BOOL + kind = NPY.GENBOOLLTR + char = NPY.BOOLLTR BoxType = boxes.W_BoolBox format_code = "?" @@ -551,31 +555,49 @@ class Int8(BaseType, Integer): T = rffi.SIGNEDCHAR + num = NPY.BYTE + kind = NPY.SIGNEDLTR + char = NPY.BYTELTR BoxType = boxes.W_Int8Box format_code = "b" class UInt8(BaseType, Integer): T = rffi.UCHAR + num = NPY.UBYTE + kind = NPY.UNSIGNEDLTR + char = NPY.UBYTELTR BoxType = boxes.W_UInt8Box format_code = "B" class Int16(BaseType, Integer): T = rffi.SHORT + num = NPY.SHORT + kind = NPY.SIGNEDLTR + char = NPY.SHORTLTR BoxType = boxes.W_Int16Box format_code = "h" class UInt16(BaseType, Integer): T = rffi.USHORT + num = NPY.USHORT + kind = NPY.UNSIGNEDLTR + char = NPY.USHORTLTR BoxType = boxes.W_UInt16Box format_code = "H" class Int32(BaseType, Integer): T = rffi.INT + num = NPY.INT + kind = NPY.SIGNEDLTR + char = NPY.INTLTR BoxType = boxes.W_Int32Box format_code = "i" class UInt32(BaseType, Integer): T = rffi.UINT + num = NPY.UINT + kind = NPY.UNSIGNEDLTR + char = NPY.UINTLTR BoxType = boxes.W_UInt32Box format_code = "I" @@ -594,6 +616,9 @@ class Int64(BaseType, Integer): T = rffi.LONGLONG + num = NPY.LONGLONG + kind = NPY.SIGNEDLTR + char = NPY.LONGLONGLTR BoxType = boxes.W_Int64Box format_code = "q" @@ -615,6 +640,9 @@ class UInt64(BaseType, Integer): T = rffi.ULONGLONG + num = NPY.ULONGLONG + kind = NPY.UNSIGNEDLTR + char = NPY.ULONGLONGLTR BoxType = boxes.W_UInt64Box format_code = "Q" @@ -622,6 +650,9 @@ class Long(BaseType, Integer): T = rffi.LONG + num = NPY.LONG + kind = NPY.SIGNEDLTR + char = NPY.LONGLTR BoxType = boxes.W_LongBox format_code = "l" @@ -640,6 +671,9 @@ class ULong(BaseType, Integer): T = rffi.ULONG + num = NPY.ULONG + kind = NPY.UNSIGNEDLTR + char = NPY.ULONGLTR BoxType = boxes.W_ULongBox format_code = "L" @@ -974,6 +1008,9 @@ class Float16(BaseType, Float): _STORAGE_T = rffi.USHORT T = rffi.SHORT + num = NPY.HALF + kind = NPY.FLOATINGLTR + char = NPY.HALFLTR BoxType = boxes.W_Float16Box @specialize.argtype(1) @@ -1014,11 +1051,17 @@ class Float32(BaseType, Float): T = rffi.FLOAT + num = NPY.FLOAT + kind = NPY.FLOATINGLTR + char = NPY.FLOATLTR BoxType = boxes.W_Float32Box format_code = "f" class Float64(BaseType, Float): T = rffi.DOUBLE + num = NPY.DOUBLE + kind = NPY.FLOATINGLTR + char = NPY.DOUBLELTR BoxType = boxes.W_Float64Box format_code = "d" @@ -1592,28 +1635,43 @@ class Complex64(ComplexFloating, BaseType): T = rffi.FLOAT + num = NPY.CFLOAT + kind = NPY.COMPLEXLTR + char = NPY.CFLOATLTR BoxType = boxes.W_Complex64Box ComponentBoxType = boxes.W_Float32Box class Complex128(ComplexFloating, BaseType): T = rffi.DOUBLE + num = NPY.CDOUBLE + kind = NPY.COMPLEXLTR + char = NPY.CDOUBLELTR BoxType = boxes.W_Complex128Box ComponentBoxType = boxes.W_Float64Box if boxes.long_double_size == 8: class FloatLong(BaseType, Float): T = rffi.DOUBLE + num = NPY.LONGDOUBLE + kind = NPY.FLOATINGLTR + char = NPY.LONGDOUBLELTR BoxType = boxes.W_FloatLongBox format_code = "d" class ComplexLong(ComplexFloating, BaseType): T = rffi.DOUBLE + num = NPY.CLONGDOUBLE + kind = NPY.COMPLEXLTR + char = NPY.CLONGDOUBLELTR BoxType = boxes.W_ComplexLongBox ComponentBoxType = boxes.W_FloatLongBox elif boxes.long_double_size in (12, 16): class FloatLong(BaseType, Float): T = rffi.LONGDOUBLE + num = NPY.LONGDOUBLE + kind = NPY.FLOATINGLTR + char = NPY.LONGDOUBLELTR BoxType = boxes.W_FloatLongBox def runpack_str(self, space, s): @@ -1631,6 +1689,9 @@ class ComplexLong(ComplexFloating, BaseType): T = rffi.LONGDOUBLE + num = NPY.CLONGDOUBLE + kind = NPY.COMPLEXLTR + char = NPY.CLONGDOUBLELTR BoxType = boxes.W_ComplexLongBox ComponentBoxType = boxes.W_FloatLongBox @@ -1638,6 +1699,9 @@ class ObjectType(Primitive, BaseType): T = lltype.Signed + num = NPY.OBJECT + kind = NPY.OBJECTLTR + char = NPY.OBJECTLTR BoxType = boxes.W_ObjectBox def get_element_size(self): @@ -1698,7 +1762,7 @@ else: raise oefmt(self.space.w_NotImplementedError, "object dtype cannot unbox %s", str(box)) - + @specialize.argtype(1) def box(self, w_obj): if isinstance(w_obj, W_Root): @@ -1949,6 +2013,9 @@ class StringType(FlexibleType): T = lltype.Char + num = NPY.STRING + kind = NPY.STRINGLTR + char = NPY.STRINGLTR @jit.unroll_safe def coerce(self, space, dtype, w_item): @@ -2046,6 +2113,9 @@ class UnicodeType(FlexibleType): T = lltype.Char + num = NPY.UNICODE + kind = NPY.UNICODELTR + char = NPY.UNICODELTR def get_element_size(self): return 4 # always UTF-32 @@ -2110,6 +2180,9 @@ class VoidType(FlexibleType): T = lltype.Char + num = NPY.VOID + kind = NPY.VOIDLTR + char = NPY.VOIDLTR def _coerce(self, space, arr, ofs, dtype, w_items, shape): # TODO: Make sure the shape and the array match @@ -2196,6 +2269,9 @@ class RecordType(FlexibleType): T = lltype.Char + num = NPY.VOID + kind = NPY.VOIDLTR + char = NPY.VOIDLTR def read(self, arr, i, offset, dtype=None): if dtype is None: From noreply at buildbot.pypy.org Fri Apr 24 07:00:47 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 24 Apr 2015 07:00:47 +0200 (CEST) Subject: [pypy-commit] pypy can_cast: remove num, kind, char from W_Dtype Message-ID: <20150424050047.D2AEC1C069E@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: can_cast Changeset: r76914:05d75330bba0 Date: 2015-04-24 06:00 +0100 http://bitbucket.org/pypy/pypy/changeset/05d75330bba0/ Log: remove num, kind, char from W_Dtype diff --git a/pypy/module/micronumpy/descriptor.py b/pypy/module/micronumpy/descriptor.py --- a/pypy/module/micronumpy/descriptor.py +++ b/pypy/module/micronumpy/descriptor.py @@ -42,18 +42,12 @@ class W_Dtype(W_Root): _immutable_fields_ = [ - "itemtype?", "num", "kind", "char", "w_box_type", - "byteorder?", "names?", "fields?", "elsize?", "alignment?", - "shape?", "subdtype?", "base?", - ] + "itemtype?", "w_box_type", "byteorder?", "names?", "fields?", + "elsize?", "alignment?", "shape?", "subdtype?", "base?"] - def __init__(self, itemtype, num, kind, char, w_box_type, - byteorder=None, names=[], fields={}, - elsize=None, shape=[], subdtype=None): + def __init__(self, itemtype, w_box_type, byteorder=None, names=[], + fields={}, elsize=None, shape=[], subdtype=None): self.itemtype = itemtype - self.num = num - self.kind = kind - self.char = char self.w_box_type = w_box_type if byteorder is None: if itemtype.get_element_size() == 1 or isinstance(itemtype, types.ObjectType): @@ -74,6 +68,18 @@ else: self.base = subdtype.base + @property + def num(self): + return self.itemtype.num + + @property + def kind(self): + return self.itemtype.kind + + @property + def char(self): + return self.itemtype.char + def __repr__(self): if self.fields: return '' % self.fields @@ -453,7 +459,7 @@ fields = self.fields if fields is None: fields = {} - return W_Dtype(itemtype, self.num, self.kind, self.char, + return W_Dtype(itemtype, self.w_box_type, byteorder=endian, elsize=self.elsize, names=self.names, fields=fields, shape=self.shape, subdtype=self.subdtype) @@ -488,8 +494,7 @@ fields[fldname] = (offset, subdtype) offset += subdtype.elsize names.append(fldname) - return W_Dtype(types.RecordType(space), NPY.VOID, NPY.VOIDLTR, NPY.VOIDLTR, - space.gettypefor(boxes.W_VoidBox), + return W_Dtype(types.RecordType(space), space.gettypefor(boxes.W_VoidBox), names=names, fields=fields, elsize=offset) @@ -556,7 +561,7 @@ if size == 1: return subdtype size *= subdtype.elsize - return W_Dtype(types.VoidType(space), NPY.VOID, NPY.VOIDLTR, NPY.VOIDLTR, + return W_Dtype(types.VoidType(space), space.gettypefor(boxes.W_VoidBox), shape=shape, subdtype=subdtype, elsize=size) @@ -657,7 +662,10 @@ except ValueError: raise oefmt(space.w_TypeError, "data type not understood") if char == NPY.CHARLTR: - return new_string_dtype(space, 1, NPY.CHARLTR) + return W_Dtype( + types.CharType(space), + elsize=1, + w_box_type=space.gettypefor(boxes.W_StringBox)) elif char == NPY.STRINGLTR or char == NPY.STRINGLTR2: return new_string_dtype(space, size) elif char == NPY.UNICODELTR: @@ -667,13 +675,10 @@ assert False -def new_string_dtype(space, size, char=NPY.STRINGLTR): +def new_string_dtype(space, size): return W_Dtype( types.StringType(space), elsize=size, - num=NPY.STRING, - kind=NPY.STRINGLTR, - char=char, w_box_type=space.gettypefor(boxes.W_StringBox), ) @@ -683,9 +688,6 @@ return W_Dtype( itemtype, elsize=size * itemtype.get_element_size(), - num=NPY.UNICODE, - kind=NPY.UNICODELTR, - char=NPY.UNICODELTR, w_box_type=space.gettypefor(boxes.W_UnicodeBox), ) @@ -694,9 +696,6 @@ return W_Dtype( types.VoidType(space), elsize=size, - num=NPY.VOID, - kind=NPY.VOIDLTR, - char=NPY.VOIDLTR, w_box_type=space.gettypefor(boxes.W_VoidBox), ) @@ -705,159 +704,93 @@ def __init__(self, space): self.w_booldtype = W_Dtype( types.Bool(space), - num=NPY.BOOL, - kind=NPY.GENBOOLLTR, - char=NPY.BOOLLTR, w_box_type=space.gettypefor(boxes.W_BoolBox), ) self.w_int8dtype = W_Dtype( types.Int8(space), - num=NPY.BYTE, - kind=NPY.SIGNEDLTR, - char=NPY.BYTELTR, w_box_type=space.gettypefor(boxes.W_Int8Box), ) self.w_uint8dtype = W_Dtype( types.UInt8(space), - num=NPY.UBYTE, - kind=NPY.UNSIGNEDLTR, - char=NPY.UBYTELTR, w_box_type=space.gettypefor(boxes.W_UInt8Box), ) self.w_int16dtype = W_Dtype( types.Int16(space), - num=NPY.SHORT, - kind=NPY.SIGNEDLTR, - char=NPY.SHORTLTR, w_box_type=space.gettypefor(boxes.W_Int16Box), ) self.w_uint16dtype = W_Dtype( types.UInt16(space), - num=NPY.USHORT, - kind=NPY.UNSIGNEDLTR, - char=NPY.USHORTLTR, w_box_type=space.gettypefor(boxes.W_UInt16Box), ) self.w_int32dtype = W_Dtype( types.Int32(space), - num=NPY.INT, - kind=NPY.SIGNEDLTR, - char=NPY.INTLTR, w_box_type=space.gettypefor(boxes.W_Int32Box), ) self.w_uint32dtype = W_Dtype( types.UInt32(space), - num=NPY.UINT, - kind=NPY.UNSIGNEDLTR, - char=NPY.UINTLTR, w_box_type=space.gettypefor(boxes.W_UInt32Box), ) self.w_longdtype = W_Dtype( types.Long(space), - num=NPY.LONG, - kind=NPY.SIGNEDLTR, - char=NPY.LONGLTR, w_box_type=space.gettypefor(boxes.W_LongBox), ) self.w_ulongdtype = W_Dtype( types.ULong(space), - num=NPY.ULONG, - kind=NPY.UNSIGNEDLTR, - char=NPY.ULONGLTR, w_box_type=space.gettypefor(boxes.W_ULongBox), ) self.w_int64dtype = W_Dtype( types.Int64(space), - num=NPY.LONGLONG, - kind=NPY.SIGNEDLTR, - char=NPY.LONGLONGLTR, w_box_type=space.gettypefor(boxes.W_Int64Box), ) self.w_uint64dtype = W_Dtype( types.UInt64(space), - num=NPY.ULONGLONG, - kind=NPY.UNSIGNEDLTR, - char=NPY.ULONGLONGLTR, w_box_type=space.gettypefor(boxes.W_UInt64Box), ) self.w_float32dtype = W_Dtype( types.Float32(space), - num=NPY.FLOAT, - kind=NPY.FLOATINGLTR, - char=NPY.FLOATLTR, w_box_type=space.gettypefor(boxes.W_Float32Box), ) self.w_float64dtype = W_Dtype( types.Float64(space), - num=NPY.DOUBLE, - kind=NPY.FLOATINGLTR, - char=NPY.DOUBLELTR, w_box_type=space.gettypefor(boxes.W_Float64Box), ) self.w_floatlongdtype = W_Dtype( types.FloatLong(space), - num=NPY.LONGDOUBLE, - kind=NPY.FLOATINGLTR, - char=NPY.LONGDOUBLELTR, w_box_type=space.gettypefor(boxes.W_FloatLongBox), ) self.w_complex64dtype = W_Dtype( types.Complex64(space), - num=NPY.CFLOAT, - kind=NPY.COMPLEXLTR, - char=NPY.CFLOATLTR, w_box_type=space.gettypefor(boxes.W_Complex64Box), ) self.w_complex128dtype = W_Dtype( types.Complex128(space), - num=NPY.CDOUBLE, - kind=NPY.COMPLEXLTR, - char=NPY.CDOUBLELTR, w_box_type=space.gettypefor(boxes.W_Complex128Box), ) self.w_complexlongdtype = W_Dtype( types.ComplexLong(space), - num=NPY.CLONGDOUBLE, - kind=NPY.COMPLEXLTR, - char=NPY.CLONGDOUBLELTR, w_box_type=space.gettypefor(boxes.W_ComplexLongBox), ) self.w_stringdtype = W_Dtype( types.StringType(space), elsize=0, - num=NPY.STRING, - kind=NPY.STRINGLTR, - char=NPY.STRINGLTR, w_box_type=space.gettypefor(boxes.W_StringBox), ) self.w_unicodedtype = W_Dtype( types.UnicodeType(space), elsize=0, - num=NPY.UNICODE, - kind=NPY.UNICODELTR, - char=NPY.UNICODELTR, w_box_type=space.gettypefor(boxes.W_UnicodeBox), ) self.w_voiddtype = W_Dtype( types.VoidType(space), elsize=0, - num=NPY.VOID, - kind=NPY.VOIDLTR, - char=NPY.VOIDLTR, w_box_type=space.gettypefor(boxes.W_VoidBox), ) self.w_float16dtype = W_Dtype( types.Float16(space), - num=NPY.HALF, - kind=NPY.FLOATINGLTR, - char=NPY.HALFLTR, w_box_type=space.gettypefor(boxes.W_Float16Box), ) self.w_objectdtype = W_Dtype( types.ObjectType(space), - num=NPY.OBJECT, - kind=NPY.OBJECTLTR, - char=NPY.OBJECTLTR, w_box_type=space.gettypefor(boxes.W_ObjectBox), ) aliases = { diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -2267,6 +2267,9 @@ "item() for Void aray with no fields not implemented")) return space.newtuple(ret_unwrapped) +class CharType(StringType): + char = NPY.CHARLTR + class RecordType(FlexibleType): T = lltype.Char num = NPY.VOID From noreply at buildbot.pypy.org Fri Apr 24 07:13:47 2015 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 24 Apr 2015 07:13:47 +0200 (CEST) Subject: [pypy-commit] buildbot default: add anubis64 Message-ID: <20150424051347.59AEE1C069E@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r949:98c105c0aaef Date: 2015-04-24 08:14 +0300 http://bitbucket.org/pypy/buildbot/changeset/98c105c0aaef/ Log: add anubis64 diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -36,3 +36,4 @@ *-win-x86-64 slave/pypy-buildbot master/pypy-buildbot +*.swp diff --git a/bot2/pypybuildbot/master.py b/bot2/pypybuildbot/master.py --- a/bot2/pypybuildbot/master.py +++ b/bot2/pypybuildbot/master.py @@ -439,7 +439,7 @@ # 'category' : 'mac64', # }, {"name": WIN32, - "slavenames": ["SalsaSalsa", "allegro_win32"], + "slavenames": ["SalsaSalsa", "allegro_win32", "anubis64"], "builddir": WIN32, "factory": pypyOwnTestFactoryWin, "locks": [WinSlaveLock.access('counting')], @@ -453,7 +453,7 @@ "category": "win32", }, {"name" : JITWIN32, - "slavenames": ["SalsaSalsa", "allegro_win32"], + "slavenames": ["SalsaSalsa", "allegro_win32", "anubis64"], 'builddir' : JITWIN32, 'factory' : pypyJITTranslatedTestFactoryWin, "locks": [WinSlaveLock.access('counting')], From noreply at buildbot.pypy.org Fri Apr 24 07:52:31 2015 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 24 Apr 2015 07:52:31 +0200 (CEST) Subject: [pypy-commit] pypy default: skip if translated, test is useful for development only Message-ID: <20150424055231.68F2D1C069E@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r76915:8117b0f94004 Date: 2015-04-24 08:42 +0300 http://bitbucket.org/pypy/pypy/changeset/8117b0f94004/ Log: skip if translated, test is useful for development only diff --git a/pypy/module/micronumpy/test/test_object_arrays.py b/pypy/module/micronumpy/test/test_object_arrays.py --- a/pypy/module/micronumpy/test/test_object_arrays.py +++ b/pypy/module/micronumpy/test/test_object_arrays.py @@ -1,7 +1,12 @@ from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest +from pypy.conftest import option class AppTestObjectDtypes(BaseNumpyAppTest): + def setup_class(cls): + BaseNumpyAppTest.setup_class.im_func(cls) + cls.w_runappdirect = cls.space.wrap(option.runappdirect) + def test_scalar_from_object(self): from numpy import array import sys @@ -109,6 +114,8 @@ def test_array_interface(self): import numpy as np + if self.runappdirect: + skip('requires numpy.core, test with numpy test suite instead') import sys class DummyArray(object): def __init__(self, interface, base=None): From noreply at buildbot.pypy.org Fri Apr 24 10:12:41 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 24 Apr 2015 10:12:41 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: fixes Message-ID: <20150424081241.8AA5E1C022E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1782:e69cf8f75f84 Date: 2015-04-23 09:08 +0200 http://bitbucket.org/cffi/cffi/changeset/e69cf8f75f84/ Log: fixes diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -4086,13 +4086,11 @@ boffsetmax = (boffsetmax + 7) / 8; /* bits -> bytes */ boffsetmax = (boffsetmax + alignment - 1) & ~(alignment-1); if (totalsize < 0) { - totalsize = boffsetmax; - if (totalsize == 0) - totalsize = 1; + totalsize = boffsetmax ? boffsetmax : 1; } else { - if (detect_custom_layout(ct, sflags, boffsetmax, totalsize, - "wrong total size", "", "") < 0) + if (detect_custom_layout(ct, sflags, boffsetmax ? boffsetmax : 1, + totalsize, "wrong total size", "", "") < 0) goto error; if (totalsize < boffsetmax) { PyErr_Format(PyExc_TypeError, diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -570,7 +570,9 @@ def _set_cdef_types(ffi): struct_unions = [] pending_completion = [] - for name, tp in sorted(ffi._parser._declarations.items()): + lst = ffi._parser._declarations.items() + lst = sorted(lst, key=lambda x: x[0].split(' ', 1)[1]) + for name, tp in lst: kind, basename = name.split(' ', 1) if kind == 'struct' or kind == 'union': if kind == 'struct': diff --git a/new/test_dlopen.py b/new/test_dlopen.py --- a/new/test_dlopen.py +++ b/new/test_dlopen.py @@ -13,6 +13,12 @@ ffi.cdef("union foo_s { int a, b; };") assert ffi.sizeof("union foo_s") == 4 +def test_cdef_struct_union(): + ffi = FFI() + ffi.cdef("union bar_s { int a; }; struct foo_s { int b; };") + assert ffi.sizeof("union bar_s") == 4 + assert ffi.sizeof("struct foo_s") == 4 + def test_math_sin(): py.test.skip("XXX redo!") ffi = FFI() From noreply at buildbot.pypy.org Fri Apr 24 10:12:42 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 24 Apr 2015 10:12:42 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: in-progress Message-ID: <20150424081242.89AB81C022E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1783:68760daf07ab Date: 2015-04-24 09:00 +0200 http://bitbucket.org/cffi/cffi/changeset/68760daf07ab/ Log: in-progress diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -568,23 +568,48 @@ return ffi._get_cached_btype(tp) def _set_cdef_types(ffi): + from . import model + + all_structs = {} + all_typedefs = {} + for name, tp in ffi._parser._declarations.items(): + kind, basename = name.split(' ', 1) + if kind == 'struct' or kind == 'union' or kind == 'anonymous': + all_structs[tp.name] = tp + elif kind == 'typedef': + all_typedefs[basename] = tp + if getattr(tp, "origin", None) == "unknown_type": + all_structs[tp.name] = tp + elif isinstance(tp, model.NamedPointerType): + all_structs[tp.totype.name] = tp.totype + struct_unions = [] pending_completion = [] - lst = ffi._parser._declarations.items() - lst = sorted(lst, key=lambda x: x[0].split(' ', 1)[1]) - for name, tp in lst: - kind, basename = name.split(' ', 1) - if kind == 'struct' or kind == 'union': - if kind == 'struct': - BType = _cffi1_backend.new_struct_type(basename) - else: - BType = _cffi1_backend.new_union_type(basename) - struct_unions.append(basename) - struct_unions.append(BType) - if not tp.partial and tp.fldtypes is not None: - pending_completion.append((tp, BType)) + for name, tp in sorted(all_structs.items()): + if not isinstance(tp, model.UnionType): + BType = _cffi1_backend.new_struct_type(name) + else: + BType = _cffi1_backend.new_union_type(name) + struct_unions.append(name) + struct_unions.append(BType) + if not tp.partial and tp.fldtypes is not None: + pending_completion.append((tp, BType)) # - ffi.__set_types(struct_unions) + typenames = [] + for name, tp in sorted(all_typedefs.items()): + cname = tp._get_c_name() + if cname == name: + assert isinstance(tp, model.StructOrUnionOrEnum) + cname = '%s %s' % (tp.kind, tp.name) + try: + BType = ffi.typeof(cname) + except ffi.error: + ffi.__set_types(struct_unions, typenames) + BType = ffi.typeof(cname) + typenames.append(name) + typenames.append(BType) + # + ffi.__set_types(struct_unions, typenames) # for tp, BType in pending_completion: fldtypes = [ffi.typeof(ftp._get_c_name()) for ftp in tp.fldtypes] diff --git a/cffi/model.py b/cffi/model.py --- a/cffi/model.py +++ b/cffi/model.py @@ -459,7 +459,7 @@ def unknown_ptr_type(name, structname=None): if structname is None: - structname = '*$%s' % name + structname = '$$%s' % name tp = StructType(structname, None, None, None) return NamedPointerType(tp, name) diff --git a/new/ffi_obj.c b/new/ffi_obj.c --- a/new/ffi_obj.c +++ b/new/ffi_obj.c @@ -525,15 +525,18 @@ static PyObject *ffi__set_types(FFIObject *self, PyObject *args) { - PyObject *lst1; + PyObject *lst1, *lst2; _cffi_opcode_t *types = NULL; struct _cffi_struct_union_s *struct_unions = NULL; + struct _cffi_typename_s *typenames = NULL; - if (!PyArg_ParseTuple(args, "O!", &PyList_Type, &lst1)) + if (!PyArg_ParseTuple(args, "O!O!", + &PyList_Type, &lst1, &PyList_Type, &lst2)) return NULL; if (self->ctx_is_static) { bad_usage: + PyMem_Free(typenames); PyMem_Free(struct_unions); PyMem_Free(types); if (!PyErr_Occurred()) @@ -543,19 +546,24 @@ cleanup_builder_c(self->types_builder); - int i, lst_length = PyList_GET_SIZE(lst1) / 2; - Py_ssize_t new_size_1 = sizeof(_cffi_opcode_t) * lst_length; - Py_ssize_t new_size_2 = sizeof(struct _cffi_struct_union_s) * lst_length; - types = PyMem_Malloc(new_size_1); - struct_unions = PyMem_Malloc(new_size_2); - if (!types || !struct_unions) { + int i; + int lst1_length = PyList_GET_SIZE(lst1) / 2; + int lst2_length = PyList_GET_SIZE(lst2) / 2; + Py_ssize_t newsize0 = sizeof(_cffi_opcode_t) * (lst1_length + lst2_length); + Py_ssize_t newsize1 = sizeof(struct _cffi_struct_union_s) * lst1_length; + Py_ssize_t newsize2 = sizeof(struct _cffi_typename_s) * lst2_length; + types = PyMem_Malloc(newsize0); + struct_unions = PyMem_Malloc(newsize1); + typenames = PyMem_Malloc(newsize2); + if (!types || !struct_unions || !typenames) { PyErr_NoMemory(); goto bad_usage; } - memset(types, 0, new_size_1); - memset(struct_unions, 0, new_size_2); + memset(types, 0, newsize0); + memset(struct_unions, 0, newsize1); + memset(typenames, 0, newsize2); - for (i = 0; i < lst_length; i++) { + for (i = 0; i < lst1_length; i++) { PyObject *x = PyList_GET_ITEM(lst1, i * 2); if (!PyString_Check(x)) goto bad_usage; @@ -570,18 +578,32 @@ struct_unions[i].size = (size_t)-2; struct_unions[i].alignment = -2; } - for (i = 0; i < lst_length; i++) { + for (i = 0; i < lst2_length; i++) { + PyObject *x = PyList_GET_ITEM(lst2, i * 2); + if (!PyString_Check(x)) + goto bad_usage; + typenames[i].name = PyString_AS_STRING(x); + typenames[i].type_index = lst1_length + i; + + x = PyList_GET_ITEM(lst2, i * 2 + 1); + if (!CTypeDescr_Check(x)) + goto bad_usage; + types[lst1_length + i] = x; + } + for (i = 0; i < lst1_length + lst2_length; i++) { PyObject *x = (PyObject *)types[i]; Py_INCREF(x); } - Py_INCREF(lst1); /* to keep alive the strings in '.name' */ + Py_INCREF(args); /* to keep alive the strings in '.name' */ Py_XDECREF(self->dynamic_types); - self->dynamic_types = lst1; + self->dynamic_types = args; self->types_builder->ctx.types = types; - self->types_builder->num_types_imported = lst_length; + self->types_builder->num_types_imported = lst1_length + lst2_length; self->types_builder->ctx.struct_unions = struct_unions; - self->types_builder->ctx.num_struct_unions = lst_length; + self->types_builder->ctx.num_struct_unions = lst1_length; + self->types_builder->ctx.typenames = typenames; + self->types_builder->ctx.num_typenames = lst2_length; Py_INCREF(Py_None); return Py_None; diff --git a/new/parse_c_type.c b/new/parse_c_type.c --- a/new/parse_c_type.c +++ b/new/parse_c_type.c @@ -56,7 +56,9 @@ static int is_ident_first(char x) { - return (('A' <= x && x <= 'Z') || ('a' <= x && x <= 'z') || x == '_'); + return (('A' <= x && x <= 'Z') || ('a' <= x && x <= 'z') || x == '_' || + x == '$'); /* '$' in names is supported here, for the struct + names invented by cparser */ } static int is_digit(char x) diff --git a/new/recompiler.py b/new/recompiler.py --- a/new/recompiler.py +++ b/new/recompiler.py @@ -508,8 +508,17 @@ _generate_cpy_union_collecttype = _generate_cpy_struct_collecttype def _generate_cpy_struct_decl(self, tp, name): - cname = tp.get_c_name('') - self._struct_decl(tp, cname, cname.replace(' ', '_')) + cname = tp._get_c_name() + if ' ' in cname: + prefix, declname = cname.split(' ', 1) + else: + prefix, declname = '', cname + while declname.startswith('$'): + prefix += 'D' + declname = declname[1:] + approxname = prefix + '_' + declname + assert '$' not in approxname + self._struct_decl(tp, cname, approxname) _generate_cpy_union_decl = _generate_cpy_struct_decl def _generate_cpy_struct_ctx(self, tp, name, prefix='s'): @@ -518,28 +527,6 @@ _generate_cpy_union_ctx = _generate_cpy_struct_ctx # ---------- - # 'anonymous' declarations. These are produced for anonymous structs - # or unions; the 'name' is obtained by a typedef. - - def _generate_cpy_anonymous_collecttype(self, tp, name): - if isinstance(tp, model.EnumType): - self._generate_cpy_enum_collecttype(tp, name) - else: - self._struct_collecttype(tp) - - def _generate_cpy_anonymous_decl(self, tp, name): - if isinstance(tp, model.EnumType): - self._generate_cpy_enum_decl(tp, name, '') - else: - self._struct_decl(tp, name, 'typedef_' + name) - - def _generate_cpy_anonymous_ctx(self, tp, name): - if isinstance(tp, model.EnumType): - self._generate_cpy_enum_ctx(tp, name, '') - else: - self._struct_ctx(tp, name, 'typedef_' + name) - - # ---------- # constants, declared with "static const ..." def _generate_cpy_const(self, is_int, name, tp=None, category='const', diff --git a/new/test_dlopen.py b/new/test_dlopen.py --- a/new/test_dlopen.py +++ b/new/test_dlopen.py @@ -19,6 +19,34 @@ assert ffi.sizeof("union bar_s") == 4 assert ffi.sizeof("struct foo_s") == 4 +def test_cdef_struct_typename_1(): + ffi = FFI() + ffi.cdef("typedef struct { int a; } t1; typedef struct { t1* m; } t2;") + assert ffi.sizeof("t2") == ffi.sizeof("void *") + assert ffi.sizeof("t1") == 4 + +def test_cdef_struct_typename_2(): + ffi = FFI() + ffi.cdef("typedef struct { int a; } *p1; typedef struct { p1 m; } *p2;") + p2 = ffi.new("p2") + assert ffi.sizeof(p2[0]) == ffi.sizeof("void *") + assert ffi.sizeof(p2[0].m) == ffi.sizeof("void *") + +def test_cdef_struct_anon_1(): + ffi = FFI() + ffi.cdef("typedef struct { int a; } t1; struct foo_s { t1* m; };") + assert ffi.sizeof("struct foo_s") == ffi.sizeof("void *") + +def test_cdef_struct_anon_2(): + ffi = FFI() + ffi.cdef("typedef struct { int a; } *p1; struct foo_s { p1 m; };") + assert ffi.sizeof("struct foo_s") == ffi.sizeof("void *") + +def test_cdef_struct_anon_3(): + ffi = FFI() + ffi.cdef("typedef struct { int a; } **pp; struct foo_s { pp m; };") + assert ffi.sizeof("struct foo_s") == ffi.sizeof("void *") + def test_math_sin(): py.test.skip("XXX redo!") ffi = FFI() From noreply at buildbot.pypy.org Fri Apr 24 10:12:43 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 24 Apr 2015 10:12:43 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: Change of scope for now: the initial goal is now to avoid breaking Message-ID: <20150424081243.CFCBF1C022E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1784:6d8fa9486905 Date: 2015-04-24 09:17 +0200 http://bitbucket.org/cffi/cffi/changeset/6d8fa9486905/ Log: Change of scope for now: the initial goal is now to avoid breaking ffi.dlopen() and ffi.verify(), and only add _cffi1.recompile(). diff --git a/new/PLAN b/_cffi1/PLAN rename from new/PLAN rename to _cffi1/PLAN diff --git a/new/_cffi_include.h b/_cffi1/_cffi_include.h rename from new/_cffi_include.h rename to _cffi1/_cffi_include.h diff --git a/new/bsdopendirtype.py b/_cffi1/bsdopendirtype.py rename from new/bsdopendirtype.py rename to _cffi1/bsdopendirtype.py diff --git a/new/bsdopendirtype_build.py b/_cffi1/bsdopendirtype_build.py rename from new/bsdopendirtype_build.py rename to _cffi1/bsdopendirtype_build.py diff --git a/new/cffi-1.0.rst b/_cffi1/cffi-1.0.rst rename from new/cffi-1.0.rst rename to _cffi1/cffi-1.0.rst diff --git a/new/cffi1 b/_cffi1/cffi1 rename from new/cffi1 rename to _cffi1/cffi1 diff --git a/new/cffi1_module.c b/_cffi1/cffi1_module.c rename from new/cffi1_module.c rename to _cffi1/cffi1_module.c diff --git a/new/cffi_opcode.py b/_cffi1/cffi_opcode.py rename from new/cffi_opcode.py rename to _cffi1/cffi_opcode.py diff --git a/new/cglob.c b/_cffi1/cglob.c rename from new/cglob.c rename to _cffi1/cglob.c diff --git a/new/ffi_obj.c b/_cffi1/ffi_obj.c rename from new/ffi_obj.c rename to _cffi1/ffi_obj.c diff --git a/new/lib_obj.c b/_cffi1/lib_obj.c rename from new/lib_obj.c rename to _cffi1/lib_obj.c diff --git a/new/manual.c b/_cffi1/manual.c rename from new/manual.c rename to _cffi1/manual.c diff --git a/new/parse_c_type.c b/_cffi1/parse_c_type.c rename from new/parse_c_type.c rename to _cffi1/parse_c_type.c diff --git a/new/parse_c_type.h b/_cffi1/parse_c_type.h rename from new/parse_c_type.h rename to _cffi1/parse_c_type.h diff --git a/new/readdir2.py b/_cffi1/readdir2.py rename from new/readdir2.py rename to _cffi1/readdir2.py diff --git a/new/readdir2_build.py b/_cffi1/readdir2_build.py rename from new/readdir2_build.py rename to _cffi1/readdir2_build.py diff --git a/new/realize_c_type.c b/_cffi1/realize_c_type.c rename from new/realize_c_type.c rename to _cffi1/realize_c_type.c diff --git a/new/recompiler.py b/_cffi1/recompiler.py rename from new/recompiler.py rename to _cffi1/recompiler.py diff --git a/new/setup.py b/_cffi1/setup.py rename from new/setup.py rename to _cffi1/setup.py diff --git a/new/setup_manual.py b/_cffi1/setup_manual.py rename from new/setup_manual.py rename to _cffi1/setup_manual.py diff --git a/new/test_dlopen.py b/_cffi1/test_dlopen.py rename from new/test_dlopen.py rename to _cffi1/test_dlopen.py diff --git a/new/test_ffi_obj.py b/_cffi1/test_ffi_obj.py rename from new/test_ffi_obj.py rename to _cffi1/test_ffi_obj.py diff --git a/new/test_parse_c_type.py b/_cffi1/test_parse_c_type.py rename from new/test_parse_c_type.py rename to _cffi1/test_parse_c_type.py diff --git a/new/test_realize_c_type.py b/_cffi1/test_realize_c_type.py rename from new/test_realize_c_type.py rename to _cffi1/test_realize_c_type.py diff --git a/new/test_recompiler.py b/_cffi1/test_recompiler.py rename from new/test_recompiler.py rename to _cffi1/test_recompiler.py diff --git a/new/test_verify1.py b/_cffi1/test_verify1.py rename from new/test_verify1.py rename to _cffi1/test_verify1.py diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -5402,6 +5402,12 @@ return NULL; } +static PyObject *b__get_types(PyObject *self, PyObject *noarg) +{ + return PyTuple_Pack(2, (PyObject *)&CData_Type, + (PyObject *)&CTypeDescr_Type); +} + /************************************************************/ static char _testfunc0(char a, char b) @@ -5706,6 +5712,7 @@ #ifdef MS_WIN32 {"getwinerror", b_getwinerror, METH_VARARGS}, #endif + {"_get_types", b__get_types, METH_NOARGS}, {"_testfunc", b__testfunc, METH_VARARGS}, {"_testbuff", b__testbuff, METH_VARARGS}, {NULL, NULL} /* Sentinel */ @@ -5820,7 +5827,7 @@ } #endif -#include "../new/cffi1_module.c" +#include "../_cffi1/cffi1_module.c" static void *cffi_exports[] = { (void *)26, @@ -5861,7 +5868,7 @@ #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef FFIBackendModuleDef = { PyModuleDef_HEAD_INIT, - "_cffi1_backend", + "_cffi_backend", NULL, -1, FFIBackendMethods, @@ -5875,7 +5882,7 @@ #define INITERROR return PyMODINIT_FUNC -init_cffi1_backend(void) +init_cffi_backend(void) #endif { PyObject *m, *v; @@ -5892,7 +5899,7 @@ #if PY_MAJOR_VERSION >= 3 m = PyModule_Create(&FFIBackendModuleDef); #else - m = Py_InitModule("_cffi1_backend", FFIBackendMethods); + m = Py_InitModule("_cffi_backend", FFIBackendMethods); #endif if (m == NULL) diff --git a/cffi/__init__.py b/cffi/__init__.py --- a/cffi/__init__.py +++ b/cffi/__init__.py @@ -1,10 +1,13 @@ __all__ = ['FFI', 'VerificationError', 'VerificationMissing', 'CDefError', 'FFIError'] -from .api import FFI, CDefError +from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -FFIError = FFI.error # backward compatibility - __version__ = "1.0.0" __version_info__ = (1, 0, 0) + +# The verifier module file names are based on the CRC32 of a string that +# contains the following version number. It may be older than __version__ +# if nothing is clearly incompatible. +__version_verifier_modules__ = "0.8.6" diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -1,9 +1,5 @@ import sys, types from .lock import allocate_lock -import _cffi1_backend - -class DeprecatedError(Exception): - pass try: callable @@ -19,7 +15,8 @@ basestring = str -FFIError = _cffi1_backend.FFI.error +class FFIError(Exception): + pass class CDefError(Exception): def __str__(self): @@ -30,7 +27,7 @@ return '%s%s' % (line, self.args[0]) -class FFI(_cffi1_backend.FFI): +class FFI(object): r''' The main top-level class that you instantiate once, or once per module. @@ -48,16 +45,21 @@ C.printf("hello, %s!\n", ffi.new("char[]", "world")) ''' - def __init__(self): + def __init__(self, backend=None): """Create an FFI instance. The 'backend' argument is used to select a non-default backend, mostly for tests. """ from . import cparser, model - from . import __version__ - - backend = _cffi1_backend - assert backend.__version__ == __version__, \ - "version mismatch, %s != %s" % (backend.__version__, __version__) + if backend is None: + # You need PyPy (>= 2.0 beta), or a CPython (>= 2.6) with + # _cffi_backend.so compiled. + import _cffi_backend as backend + from . import __version__ + assert backend.__version__ == __version__, \ + "version mismatch, %s != %s" % (backend.__version__, __version__) + # (If you insist you can also try to pass the option + # 'backend=backend_ctypes.CTypesBackend()', but don't + # rely on it! It's probably not going to work well.) self._backend = backend self._lock = allocate_lock() @@ -75,9 +77,18 @@ if name.startswith('RTLD_'): setattr(self, name, getattr(backend, name)) # - #with self._lock: - # self.BVoidP = self._get_cached_btype(model.voidp_type) - # self.BCharA = self._get_cached_btype(model.char_array_type) + with self._lock: + self.BVoidP = self._get_cached_btype(model.voidp_type) + self.BCharA = self._get_cached_btype(model.char_array_type) + if isinstance(backend, types.ModuleType): + # _cffi_backend: attach these constants to the class + if not hasattr(FFI, 'NULL'): + FFI.NULL = self.cast(self.BVoidP, 0) + FFI.CData, FFI.CType = backend._get_types() + else: + # ctypes backend: attach these constants to the instance + self.NULL = self.cast(self.BVoidP, 0) + self.CData, self.CType = backend._get_types() def cdef(self, csource, override=False, packed=False): """Parse the given C source. This registers all declared functions, @@ -97,7 +108,6 @@ if override: for cache in self._function_caches: cache.clear() - _set_cdef_types(self) def dlopen(self, name, flags=0): """Load and return a dynamic library identified by 'name'. @@ -145,7 +155,7 @@ "pointer-to-function type" % (cdecl,)) return btype - def XXXtypeof(self, cdecl): + def typeof(self, cdecl): """Parse the C type given as a string and return the corresponding object. It can also be used on 'cdata' instance to get its C type. @@ -164,7 +174,7 @@ return self._get_cached_btype(cdecl._cffi_base_type) raise TypeError(type(cdecl)) - def XXXsizeof(self, cdecl): + def sizeof(self, cdecl): """Return the size in bytes of the argument. It can be a string naming a C type, or a 'cdata' instance. """ @@ -174,7 +184,7 @@ else: return self._backend.sizeof(cdecl) - def XXXalignof(self, cdecl): + def alignof(self, cdecl): """Return the natural alignment size in bytes of the C type given as a string. """ @@ -182,7 +192,7 @@ cdecl = self._typeof(cdecl) return self._backend.alignof(cdecl) - def XXXoffsetof(self, cdecl, *fields_or_indexes): + def offsetof(self, cdecl, *fields_or_indexes): """Return the offset of the named field inside the given structure or array, which must be given as a C type name. You can give several field names in case of nested structures. @@ -193,7 +203,7 @@ cdecl = self._typeof(cdecl) return self._typeoffsetof(cdecl, *fields_or_indexes)[1] - def XXXnew(self, cdecl, init=None): + def new(self, cdecl, init=None): """Allocate an instance according to the specified C type and return a pointer to it. The specified C type must be either a pointer or an array: ``new('X *')`` allocates an X and returns @@ -220,7 +230,7 @@ cdecl = self._typeof(cdecl) return self._backend.newp(cdecl, init) - def XXXcast(self, cdecl, source): + def cast(self, cdecl, source): """Similar to a C cast: returns an instance of the named C type initialized with the given 'source'. The source is casted between integers or pointers of any type. @@ -229,7 +239,7 @@ cdecl = self._typeof(cdecl) return self._backend.cast(cdecl, source) - def XXXstring(self, cdata, maxlen=-1): + def string(self, cdata, maxlen=-1): """Return a Python string (or unicode string) from the 'cdata'. If 'cdata' is a pointer or array of characters or bytes, returns the null-terminated string. The returned string extends until @@ -247,7 +257,7 @@ """ return self._backend.string(cdata, maxlen) - def XXXbuffer(self, cdata, size=-1): + def buffer(self, cdata, size=-1): """Return a read-write buffer object that references the raw C data pointed to by the given 'cdata'. The 'cdata' must be a pointer or an array. Can be passed to functions expecting a buffer, or directly @@ -260,7 +270,7 @@ """ return self._backend.buffer(cdata, size) - def XXXfrom_buffer(self, python_buffer): + def from_buffer(self, python_buffer): """Return a that points to the data of the given Python object, which must support the buffer interface. Note that this is not meant to be used on the built-in types str, @@ -270,7 +280,7 @@ """ return self._backend.from_buffer(self.BCharA, python_buffer) - def XXXcallback(self, cdecl, python_callable=None, error=None): + def callback(self, cdecl, python_callable=None, error=None): """Return a callback object or a decorator making such a callback object. 'cdecl' must name a C function pointer type. The callback invokes the specified 'python_callable' (which may @@ -290,7 +300,7 @@ else: return callback_decorator_wrap(python_callable) # direct mode - def XXXgetctype(self, cdecl, replace_with=''): + def getctype(self, cdecl, replace_with=''): """Return a string giving the C type 'cdecl', which may be itself a string or a object. If 'replace_with' is given, it gives extra text to append (or insert for more complicated C types), like @@ -306,7 +316,7 @@ replace_with = ' ' + replace_with return self._backend.getcname(cdecl, replace_with) - def XXXgc(self, cdata, destructor): + def gc(self, cdata, destructor): """Return a new cdata object that points to the same data. Later, when this new cdata object is garbage-collected, 'destructor(old_cdata_object)' will be called. @@ -320,15 +330,18 @@ return gc_weakrefs.build(cdata, destructor) def _get_cached_btype(self, type): - raise DeprecatedError + assert self._lock.acquire(False) is False + # call me with the lock! + try: + BType = self._cached_btypes[type] + except KeyError: + finishlist = [] + BType = type.get_cached_btype(self, finishlist) + for type in finishlist: + type.finish_backend_type(self, finishlist) + return BType - def verify(self, source='', **kwargs): - from recompiler import verify # XXX must be in the current dir - FFI._verify_counter += 1 - return verify(self, 'verify%d' % FFI._verify_counter, source, **kwargs) - _verify_counter = 0 - - def XXXverify(self, source='', tmpdir=None, **kwargs): + def verify(self, source='', tmpdir=None, **kwargs): """Verify that the current ffi signatures compile on this machine, and return a dynamic library object. The dynamic library can be used to call functions and access global @@ -362,10 +375,10 @@ return self._backend.get_errno() def _set_errno(self, errno): self._backend.set_errno(errno) - XXXerrno = property(_get_errno, _set_errno, None, + errno = property(_get_errno, _set_errno, None, "the value of 'errno' from/to the C calls") - def XXXgetwinerror(self, code=-1): + def getwinerror(self, code=-1): return self._backend.getwinerror(code) def _pointer_to(self, ctype): @@ -373,7 +386,7 @@ with self._lock: return model.pointer_cache(self, ctype) - def XXXaddressof(self, cdata, *fields_or_indexes): + def addressof(self, cdata, *fields_or_indexes): """Return the address of a . If 'fields_or_indexes' are given, returns the address of that field or array item in the structure or array, recursively in @@ -405,7 +418,6 @@ variables, which must anyway be accessed directly from the lib object returned by the original FFI instance. """ - XXX with ffi_to_include._lock: with self._lock: self._parser.include(ffi_to_include._parser) @@ -413,10 +425,10 @@ self._cdefsources.extend(ffi_to_include._cdefsources) self._cdefsources.append(']') - def XXXnew_handle(self, x): + def new_handle(self, x): return self._backend.newp_handle(self.BVoidP, x) - def XXXfrom_handle(self, x): + def from_handle(self, x): return self._backend.from_handle(x) def set_unicode(self, enabled_flag): @@ -426,7 +438,6 @@ declare these types to be (pointers to) plain 8-bit characters. This is mostly for backward compatibility; you usually want True. """ - XXX if self._windows_unicode is not None: raise ValueError("set_unicode() can only be called once") enabled_flag = bool(enabled_flag) @@ -566,56 +577,3 @@ else: with ffi._lock: return ffi._get_cached_btype(tp) - -def _set_cdef_types(ffi): - from . import model - - all_structs = {} - all_typedefs = {} - for name, tp in ffi._parser._declarations.items(): - kind, basename = name.split(' ', 1) - if kind == 'struct' or kind == 'union' or kind == 'anonymous': - all_structs[tp.name] = tp - elif kind == 'typedef': - all_typedefs[basename] = tp - if getattr(tp, "origin", None) == "unknown_type": - all_structs[tp.name] = tp - elif isinstance(tp, model.NamedPointerType): - all_structs[tp.totype.name] = tp.totype - - struct_unions = [] - pending_completion = [] - for name, tp in sorted(all_structs.items()): - if not isinstance(tp, model.UnionType): - BType = _cffi1_backend.new_struct_type(name) - else: - BType = _cffi1_backend.new_union_type(name) - struct_unions.append(name) - struct_unions.append(BType) - if not tp.partial and tp.fldtypes is not None: - pending_completion.append((tp, BType)) - # - typenames = [] - for name, tp in sorted(all_typedefs.items()): - cname = tp._get_c_name() - if cname == name: - assert isinstance(tp, model.StructOrUnionOrEnum) - cname = '%s %s' % (tp.kind, tp.name) - try: - BType = ffi.typeof(cname) - except ffi.error: - ffi.__set_types(struct_unions, typenames) - BType = ffi.typeof(cname) - typenames.append(name) - typenames.append(BType) - # - ffi.__set_types(struct_unions, typenames) - # - for tp, BType in pending_completion: - fldtypes = [ffi.typeof(ftp._get_c_name()) for ftp in tp.fldtypes] - lst = list(zip(tp.fldnames, fldtypes, tp.fldbitsize)) - sflags = 0 - if tp.packed: - sflags = 8 # SF_PACKED - _cffi1_backend.complete_struct_or_union(BType, lst, ffi, - -1, -1, sflags) diff --git a/setup_base.py b/setup_base.py --- a/setup_base.py +++ b/setup_base.py @@ -9,7 +9,9 @@ from distutils.core import setup from distutils.extension import Extension standard = '__pypy__' not in sys.modules - setup(ext_modules=[Extension(name = '_cffi1_backend', + setup(packages=['cffi'], + requires=['pycparser'], + ext_modules=[Extension(name = '_cffi_backend', include_dirs=include_dirs, sources=sources, libraries=libraries, From noreply at buildbot.pypy.org Fri Apr 24 10:12:44 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 24 Apr 2015 10:12:44 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: Fix for test_verify Message-ID: <20150424081244.DEDF71C022E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1785:548efd04854c Date: 2015-04-24 09:23 +0200 http://bitbucket.org/cffi/cffi/changeset/548efd04854c/ Log: Fix for test_verify diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -3802,7 +3802,7 @@ CTypeDescrObject *ct; PyObject *fields, *interned_fields, *ignored; int is_union, alignment; - Py_ssize_t boffset, i, nb_fields, boffsetmax; + Py_ssize_t boffset, i, nb_fields, boffsetmax, alignedsize; Py_ssize_t totalsize = -1; int totalalignment = -1; CFieldObject **previous; @@ -4084,12 +4084,15 @@ as 1 instead. But for ctypes support, we allow the manually- specified totalsize to be zero in this case. */ boffsetmax = (boffsetmax + 7) / 8; /* bits -> bytes */ - boffsetmax = (boffsetmax + alignment - 1) & ~(alignment-1); + alignedsize = (boffsetmax + alignment - 1) & ~(alignment-1); + if (alignedsize == 0) + alignedsize = 1; + if (totalsize < 0) { - totalsize = boffsetmax ? boffsetmax : 1; + totalsize = alignedsize; } else { - if (detect_custom_layout(ct, sflags, boffsetmax ? boffsetmax : 1, + if (detect_custom_layout(ct, sflags, alignedsize, totalsize, "wrong total size", "", "") < 0) goto error; if (totalsize < boffsetmax) { From noreply at buildbot.pypy.org Fri Apr 24 10:12:45 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 24 Apr 2015 10:12:45 +0200 (CEST) Subject: [pypy-commit] cffi default: Skip an existing test after figuring out that it is indeed likely to Message-ID: <20150424081245.EB27F1C022E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1786:a6987485dc7f Date: 2015-04-24 10:13 +0200 http://bitbucket.org/cffi/cffi/changeset/a6987485dc7f/ Log: Skip an existing test after figuring out that it is indeed likely to crash in test_vgen, depending on details of the platform. Changed it to clearly flag this as invalid already in the cdef, and why. diff --git a/cffi/model.py b/cffi/model.py --- a/cffi/model.py +++ b/cffi/model.py @@ -140,6 +140,16 @@ replace_with = self._base_pattern % (', '.join(reprargs),) self.c_name_with_marker = ( self.result.c_name_with_marker.replace('&', replace_with)) + # + if isinstance(result, StructOrUnion) and result.partial: + from .ffiplatform import VerificationError + raise VerificationError( + '%s: the %s is a struct with "...;", which is not ' + 'supported as return type (how to call it with ' + 'libffi depends on possibly-omitted fields). ' + 'Workaround: write a wrapper function which takes ' + 'a pointer-to-struct as extra argument and writes ' + 'the result there' % (self, result)) class RawFunctionType(BaseFunctionType): diff --git a/testing/test_verify.py b/testing/test_verify.py --- a/testing/test_verify.py +++ b/testing/test_verify.py @@ -1233,11 +1233,13 @@ py.test.skip('Segfaults on mips64el') # XXX bad abuse of "struct { ...; }". It only works a bit by chance # anyway. XXX think about something better :-( + # ...in fact, it is no longer supported: likely crashes in vgen ffi = FFI() - ffi.cdef(""" + py.test.raises(VerificationError, ffi.cdef, """ typedef struct { ...; } myhandle_t; myhandle_t foo(void); """) + py.test.skip("XXX reimplement maybe?") lib = ffi.verify(""" typedef short myhandle_t; myhandle_t foo(void) { return 42; } @@ -1245,6 +1247,21 @@ h = lib.foo() assert ffi.sizeof(h) == ffi.sizeof("short") +def test_return_partial_struct(): + py.test.skip("not implemented") + ffi = FFI() + ffi.cdef(""" + typedef struct { int x; ...; } foo_t; + foo_t foo(void); + """) + lib = ffi.verify(""" + typedef struct { int y, x; } foo_t; + foo_t foo(void) { foo_t r = { 45, 81 }; return r; } + """) + h = lib.foo() + assert ffi.sizeof(h) == 2 * ffi.sizeof("int") + assert h.x == 81 + def test_cannot_name_struct_type(): ffi = FFI() ffi.cdef("typedef struct { int x; } *sp; void foo(sp);") From noreply at buildbot.pypy.org Fri Apr 24 10:13:48 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 24 Apr 2015 10:13:48 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: hg merge default Message-ID: <20150424081348.3F5C71C022E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1787:f58471f990dd Date: 2015-04-24 10:13 +0200 http://bitbucket.org/cffi/cffi/changeset/f58471f990dd/ Log: hg merge default diff --git a/cffi/model.py b/cffi/model.py --- a/cffi/model.py +++ b/cffi/model.py @@ -140,6 +140,16 @@ replace_with = self._base_pattern % (', '.join(reprargs),) self.c_name_with_marker = ( self.result.c_name_with_marker.replace('&', replace_with)) + # + if isinstance(result, StructOrUnion) and result.partial: + from .ffiplatform import VerificationError + raise VerificationError( + '%s: the %s is a struct with "...;", which is not ' + 'supported as return type (how to call it with ' + 'libffi depends on possibly-omitted fields). ' + 'Workaround: write a wrapper function which takes ' + 'a pointer-to-struct as extra argument and writes ' + 'the result there' % (self, result)) class RawFunctionType(BaseFunctionType): diff --git a/testing/test_verify.py b/testing/test_verify.py --- a/testing/test_verify.py +++ b/testing/test_verify.py @@ -657,9 +657,9 @@ # case the 'static' is completely ignored. ffi.cdef("static const int AA, BB, CC, DD;") lib = ffi.verify("#define AA 42\n" - "#define BB (-43)\n" - "#define CC (22*2)\n" - "#define DD ((unsigned int)142)\n") + "#define BB (-43) // blah\n" + "#define CC (22*2) /* foobar */\n" + "#define DD ((unsigned int)142) /* foo\nbar */\n") assert lib.AA == 42 assert lib.BB == -43 assert lib.CC == 44 @@ -1233,11 +1233,13 @@ py.test.skip('Segfaults on mips64el') # XXX bad abuse of "struct { ...; }". It only works a bit by chance # anyway. XXX think about something better :-( + # ...in fact, it is no longer supported: likely crashes in vgen ffi = FFI() - ffi.cdef(""" + py.test.raises(VerificationError, ffi.cdef, """ typedef struct { ...; } myhandle_t; myhandle_t foo(void); """) + py.test.skip("XXX reimplement maybe?") lib = ffi.verify(""" typedef short myhandle_t; myhandle_t foo(void) { return 42; } @@ -1245,6 +1247,21 @@ h = lib.foo() assert ffi.sizeof(h) == ffi.sizeof("short") +def test_return_partial_struct(): + py.test.skip("not implemented") + ffi = FFI() + ffi.cdef(""" + typedef struct { int x; ...; } foo_t; + foo_t foo(void); + """) + lib = ffi.verify(""" + typedef struct { int y, x; } foo_t; + foo_t foo(void) { foo_t r = { 45, 81 }; return r; } + """) + h = lib.foo() + assert ffi.sizeof(h) == 2 * ffi.sizeof("int") + assert h.x == 81 + def test_cannot_name_struct_type(): ffi = FFI() ffi.cdef("typedef struct { int x; } *sp; void foo(sp);") From noreply at buildbot.pypy.org Fri Apr 24 10:33:37 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 24 Apr 2015 10:33:37 +0200 (CEST) Subject: [pypy-commit] pypy vmprof2: Remove usage of "uintptr_t" and use "unsigned long", which is more like Message-ID: <20150424083337.96A121C061E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: vmprof2 Changeset: r76916:79023f7a715e Date: 2015-04-24 10:33 +0200 http://bitbucket.org/pypy/pypy/changeset/79023f7a715e/ Log: Remove usage of "uintptr_t" and use "unsigned long", which is more like the rest of the code. Avoid calling __sync_lock_test_and_set on Windows and directly call the "proper" functions. diff --git a/rpython/jit/backend/llsupport/codemap.py b/rpython/jit/backend/llsupport/codemap.py --- a/rpython/jit/backend/llsupport/codemap.py +++ b/rpython/jit/backend/llsupport/codemap.py @@ -26,29 +26,19 @@ srcdir = os.path.join(os.path.dirname(__file__), 'src') eci = ExternalCompilationInfo(post_include_bits=[""" -#if defined _MSC_VER && _MSC_VER < 1600 -#ifdef _WIN32 -typedef unsigned int uintptr_t; -#else -typedef usigned long uintptr_t; -#endif -#else -#include -#endif - -RPY_EXTERN long pypy_jit_codemap_add(uintptr_t addr, +RPY_EXTERN long pypy_jit_codemap_add(unsigned long addr, unsigned int machine_code_size, long *bytecode_info, unsigned int bytecode_info_size); -RPY_EXTERN long *pypy_jit_codemap_del(uintptr_t addr, unsigned int size); -RPY_EXTERN uintptr_t pypy_jit_codemap_firstkey(void); +RPY_EXTERN long *pypy_jit_codemap_del(unsigned long addr, unsigned int size); +RPY_EXTERN unsigned long pypy_jit_codemap_firstkey(void); RPY_EXTERN void *pypy_find_codemap_at_addr(long addr, long* start_addr); RPY_EXTERN long pypy_yield_codemap_at_addr(void *codemap_raw, long addr, long *current_pos_addr); -RPY_EXTERN long pypy_jit_depthmap_add(uintptr_t addr, unsigned int size, +RPY_EXTERN long pypy_jit_depthmap_add(unsigned long addr, unsigned int size, unsigned int stackdepth); -RPY_EXTERN void pypy_jit_depthmap_clear(uintptr_t addr, unsigned int size); +RPY_EXTERN void pypy_jit_depthmap_clear(unsigned long addr, unsigned int size); """], separate_module_sources=[ open(os.path.join(srcdir, 'skiplist.c'), 'r').read() + diff --git a/rpython/jit/backend/llsupport/src/codemap.c b/rpython/jit/backend/llsupport/src/codemap.c --- a/rpython/jit/backend/llsupport/src/codemap.c +++ b/rpython/jit/backend/llsupport/src/codemap.c @@ -9,10 +9,15 @@ void pypy_codemap_invalid_set(int value) { +#ifndef _MSC_VER if (value) __sync_lock_test_and_set(&pypy_codemap_currently_invalid, 1); else __sync_lock_release(&pypy_codemap_currently_invalid); +#else + InterlockedExchange((long volatile *)&pypy_codemap_currently_invalid, + (long)value); +#endif } @@ -31,7 +36,7 @@ /*** interface used from codemap.py ***/ RPY_EXTERN -long pypy_jit_codemap_add(uintptr_t addr, unsigned int machine_code_size, +long pypy_jit_codemap_add(unsigned long addr, unsigned int machine_code_size, long *bytecode_info, unsigned int bytecode_info_size) { skipnode_t *new = skiplist_malloc(sizeof(codemap_data_t)); @@ -52,9 +57,9 @@ } RPY_EXTERN -long *pypy_jit_codemap_del(uintptr_t addr, unsigned int size) +long *pypy_jit_codemap_del(unsigned long addr, unsigned int size) { - uintptr_t search_key = addr + size - 1; + unsigned long search_key = addr + size - 1; long *result; skipnode_t *node; @@ -78,7 +83,7 @@ } RPY_EXTERN -uintptr_t pypy_jit_codemap_firstkey(void) +unsigned long pypy_jit_codemap_firstkey(void) { return skiplist_firstkey(&jit_codemap_head); } @@ -90,7 +95,7 @@ { skipnode_t *codemap = skiplist_search(&jit_codemap_head, addr); codemap_data_t *data; - uintptr_t rel_addr; + unsigned long rel_addr; if (codemap == &jit_codemap_head) { if (start_addr) @@ -98,7 +103,7 @@ return NULL; } - rel_addr = (uintptr_t)addr - codemap->key; + rel_addr = (unsigned long)addr - codemap->key; data = (codemap_data_t *)codemap->data; if (rel_addr >= data->machine_code_size) { if (start_addr) @@ -153,7 +158,7 @@ /*** interface used from codemap.py ***/ RPY_EXTERN -long pypy_jit_depthmap_add(uintptr_t addr, unsigned int size, +long pypy_jit_depthmap_add(unsigned long addr, unsigned int size, unsigned int stackdepth) { skipnode_t *new = skiplist_malloc(sizeof(depthmap_data_t)); @@ -173,9 +178,9 @@ } RPY_EXTERN -void pypy_jit_depthmap_clear(uintptr_t addr, unsigned int size) +void pypy_jit_depthmap_clear(unsigned long addr, unsigned int size) { - uintptr_t search_key = addr + size - 1; + unsigned long search_key = addr + size - 1; if (size == 0) return; @@ -196,14 +201,15 @@ RPY_EXTERN long pypy_jit_stack_depth_at_loc(long loc) { - skipnode_t *depthmap = skiplist_search(&jit_depthmap_head, (uintptr_t)loc); + skipnode_t *depthmap = skiplist_search(&jit_depthmap_head, + (unsigned long)loc); depthmap_data_t *data; - uintptr_t rel_addr; + unsigned long rel_addr; if (depthmap == &jit_depthmap_head) return -1; - rel_addr = (uintptr_t)loc - depthmap->key; + rel_addr = (unsigned long)loc - depthmap->key; data = (depthmap_data_t *)depthmap->data; if (rel_addr >= data->block_size) return -1; diff --git a/rpython/jit/backend/llsupport/src/skiplist.c b/rpython/jit/backend/llsupport/src/skiplist.c --- a/rpython/jit/backend/llsupport/src/skiplist.c +++ b/rpython/jit/backend/llsupport/src/skiplist.c @@ -1,40 +1,19 @@ #include -#if defined _MSC_VER - #include - int __sync_lock_test_and_set(int * i, int j) - { - return _interlockedbittestandreset(i, j); - } - int __sync_lock_release(int *i) - { - return _interlockedbittestandreset(i, 0); - } - #if _MSC_VER < 1600 - #ifdef _WIN32 - typedef unsigned int uintptr_t; - #else - typedef usigned long uintptr_t; - #endif - #endif -#else -#include -#endif - #define HAS_SKIPLIST #define SKIPLIST_HEIGHT 8 typedef struct skipnode_s { - uintptr_t key; + unsigned long key; char *data; struct skipnode_s *next[SKIPLIST_HEIGHT]; /* may be smaller */ } skipnode_t; -static skipnode_t *skiplist_malloc(uintptr_t datasize) +static skipnode_t *skiplist_malloc(unsigned long datasize) { char *result; - uintptr_t basesize; - uintptr_t length = 1; + unsigned long basesize; + unsigned long length = 1; while (length < SKIPLIST_HEIGHT && (rand() & 3) == 0) length++; basesize = sizeof(skipnode_t) - @@ -46,12 +25,12 @@ return (skipnode_t *)result; } -static skipnode_t *skiplist_search(skipnode_t *head, uintptr_t searchkey) +static skipnode_t *skiplist_search(skipnode_t *head, unsigned long searchkey) { /* Returns the skipnode with key closest (but <=) searchkey. Note that if there is no item with key <= searchkey in the list, this will return the head node. */ - uintptr_t level = SKIPLIST_HEIGHT - 1; + unsigned long level = SKIPLIST_HEIGHT - 1; while (1) { skipnode_t *next = head->next[level]; if (next != NULL && next->key <= searchkey) { @@ -68,13 +47,13 @@ static void skiplist_insert(skipnode_t *head, skipnode_t *new) { - uintptr_t size0 = sizeof(skipnode_t) - - SKIPLIST_HEIGHT * sizeof(skipnode_t *); - uintptr_t height_of_new = (new->data - ((char *)new + size0)) / - sizeof(skipnode_t *); + unsigned long size0 = sizeof(skipnode_t) - + SKIPLIST_HEIGHT * sizeof(skipnode_t *); + unsigned long height_of_new = (new->data - ((char *)new + size0)) / + sizeof(skipnode_t *); - uintptr_t level = SKIPLIST_HEIGHT - 1; - uintptr_t searchkey = new->key; + unsigned long level = SKIPLIST_HEIGHT - 1; + unsigned long searchkey = new->key; while (1) { skipnode_t *next = head->next[level]; if (next != NULL && next->key <= searchkey) { @@ -92,9 +71,9 @@ } } -static skipnode_t *skiplist_remove(skipnode_t *head, uintptr_t exact_key) +static skipnode_t *skiplist_remove(skipnode_t *head, unsigned long exact_key) { - uintptr_t level = SKIPLIST_HEIGHT - 1; + unsigned long level = SKIPLIST_HEIGHT - 1; while (1) { skipnode_t *next = head->next[level]; if (next != NULL && next->key <= exact_key) { @@ -115,7 +94,7 @@ } } -static uintptr_t skiplist_firstkey(skipnode_t *head) +static unsigned long skiplist_firstkey(skipnode_t *head) { if (head->next[0] == NULL) return 0; diff --git a/rpython/jit/backend/llsupport/test/test_skiplist.py b/rpython/jit/backend/llsupport/test/test_skiplist.py --- a/rpython/jit/backend/llsupport/test/test_skiplist.py +++ b/rpython/jit/backend/llsupport/test/test_skiplist.py @@ -5,16 +5,16 @@ ffi.cdef(""" typedef struct { - uintptr_t key; + unsigned long key; char *data; ...; } skipnode_t; -skipnode_t *skiplist_malloc(uintptr_t datasize); -skipnode_t *skiplist_search(skipnode_t *head, uintptr_t searchkey); +skipnode_t *skiplist_malloc(unsigned long datasize); +skipnode_t *skiplist_search(skipnode_t *head, unsigned long searchkey); void skiplist_insert(skipnode_t *head, skipnode_t *new); -skipnode_t *skiplist_remove(skipnode_t *head, uintptr_t exact_key); -uintptr_t skiplist_firstkey(skipnode_t *head); +skipnode_t *skiplist_remove(skipnode_t *head, unsigned long exact_key); +unsigned long skiplist_firstkey(skipnode_t *head); """) filename = os.path.join(os.path.dirname(__file__), '..', 'src', 'skiplist.c') From noreply at buildbot.pypy.org Fri Apr 24 11:06:38 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 24 Apr 2015 11:06:38 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: hg merge default Message-ID: <20150424090638.0C67F1C06F5@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1789:111b2f47845a Date: 2015-04-24 11:07 +0200 http://bitbucket.org/cffi/cffi/changeset/111b2f47845a/ Log: hg merge default diff --git a/cffi/model.py b/cffi/model.py --- a/cffi/model.py +++ b/cffi/model.py @@ -140,16 +140,6 @@ replace_with = self._base_pattern % (', '.join(reprargs),) self.c_name_with_marker = ( self.result.c_name_with_marker.replace('&', replace_with)) - # - if isinstance(result, StructOrUnion) and result.partial: - from .ffiplatform import VerificationError - raise VerificationError( - '%s: the %s is a struct with "...;", which is not ' - 'supported as return type (how to call it with ' - 'libffi depends on possibly-omitted fields). ' - 'Workaround: write a wrapper function which takes ' - 'a pointer-to-struct as extra argument and writes ' - 'the result there' % (self, result)) class RawFunctionType(BaseFunctionType): diff --git a/cffi/vengine_gen.py b/cffi/vengine_gen.py --- a/cffi/vengine_gen.py +++ b/cffi/vengine_gen.py @@ -149,15 +149,21 @@ context = 'argument of %s' % name arglist = [type.get_c_name(' %s' % arg, context) for type, arg in zip(tp.args, argnames)] + tpresult = tp.result + if isinstance(tpresult, model.StructOrUnion): + arglist.insert(0, tpresult.get_c_name(' *r', context)) + tpresult = model.void_type arglist = ', '.join(arglist) or 'void' wrappername = '_cffi_f_%s' % name self.export_symbols.append(wrappername) funcdecl = ' %s(%s)' % (wrappername, arglist) context = 'result of %s' % name - prnt(tp.result.get_c_name(funcdecl, context)) + prnt(tpresult.get_c_name(funcdecl, context)) prnt('{') # - if not isinstance(tp.result, model.VoidType): + if isinstance(tp.result, model.StructOrUnion): + result_code = '*r = ' + elif not isinstance(tp.result, model.VoidType): result_code = 'return ' else: result_code = '' @@ -174,15 +180,22 @@ else: indirections = [] base_tp = tp - if any(isinstance(typ, model.StructOrUnion) for typ in tp.args): + if (any(isinstance(typ, model.StructOrUnion) for typ in tp.args) + or isinstance(tp.result, model.StructOrUnion)): indirect_args = [] for i, typ in enumerate(tp.args): if isinstance(typ, model.StructOrUnion): typ = model.PointerType(typ) indirections.append((i, typ)) indirect_args.append(typ) + indirect_result = tp.result + if isinstance(indirect_result, model.StructOrUnion): + indirect_result = model.PointerType(indirect_result) + indirect_args.insert(0, indirect_result) + indirections.insert(0, ("result", indirect_result)) + indirect_result = model.void_type tp = model.FunctionPtrType(tuple(indirect_args), - tp.result, tp.ellipsis) + indirect_result, tp.ellipsis) BFunc = self.ffi._get_cached_btype(tp) wrappername = '_cffi_f_%s' % name newfunction = module.load_function(BFunc, wrappername) @@ -195,9 +208,16 @@ def _make_struct_wrapper(self, oldfunc, i, tp, base_tp): backend = self.ffi._backend BType = self.ffi._get_cached_btype(tp) - def newfunc(*args): - args = args[:i] + (backend.newp(BType, args[i]),) + args[i+1:] - return oldfunc(*args) + if i == "result": + ffi = self.ffi + def newfunc(*args): + res = ffi.new(BType) + oldfunc(res, *args) + return res[0] + else: + def newfunc(*args): + args = args[:i] + (backend.newp(BType, args[i]),) + args[i+1:] + return oldfunc(*args) newfunc._cffi_base_type = base_tp return newfunc diff --git a/testing/test_verify.py b/testing/test_verify.py --- a/testing/test_verify.py +++ b/testing/test_verify.py @@ -1233,13 +1233,11 @@ py.test.skip('Segfaults on mips64el') # XXX bad abuse of "struct { ...; }". It only works a bit by chance # anyway. XXX think about something better :-( - # ...in fact, it is no longer supported: likely crashes in vgen ffi = FFI() - py.test.raises(VerificationError, ffi.cdef, """ + ffi.cdef(""" typedef struct { ...; } myhandle_t; myhandle_t foo(void); """) - py.test.skip("XXX reimplement maybe?") lib = ffi.verify(""" typedef short myhandle_t; myhandle_t foo(void) { return 42; } @@ -1248,7 +1246,6 @@ assert ffi.sizeof(h) == ffi.sizeof("short") def test_return_partial_struct(): - py.test.skip("not implemented") ffi = FFI() ffi.cdef(""" typedef struct { int x; ...; } foo_t; @@ -1262,6 +1259,26 @@ assert ffi.sizeof(h) == 2 * ffi.sizeof("int") assert h.x == 81 +def test_take_and_return_partial_structs(): + ffi = FFI() + ffi.cdef(""" + typedef struct { int x; ...; } foo_t; + foo_t foo(foo_t, foo_t); + """) + lib = ffi.verify(""" + typedef struct { int y, x; } foo_t; + foo_t foo(foo_t a, foo_t b) { + foo_t r = { 100, a.x * 5 + b.x * 7 }; + return r; + } + """) + args = ffi.new("foo_t[3]") + args[0].x = 1000 + args[2].x = -498 + h = lib.foo(args[0], args[2]) + assert ffi.sizeof(h) == 2 * ffi.sizeof("int") + assert h.x == 1000 * 5 - 498 * 7 + def test_cannot_name_struct_type(): ffi = FFI() ffi.cdef("typedef struct { int x; } *sp; void foo(sp);") From noreply at buildbot.pypy.org Fri Apr 24 11:06:15 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 24 Apr 2015 11:06:15 +0200 (CEST) Subject: [pypy-commit] cffi default: Reimplement vengine_gen returning incomplete structs. Message-ID: <20150424090615.0267F1C06F5@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1788:e4b22b167187 Date: 2015-04-24 11:06 +0200 http://bitbucket.org/cffi/cffi/changeset/e4b22b167187/ Log: Reimplement vengine_gen returning incomplete structs. diff --git a/cffi/model.py b/cffi/model.py --- a/cffi/model.py +++ b/cffi/model.py @@ -140,16 +140,6 @@ replace_with = self._base_pattern % (', '.join(reprargs),) self.c_name_with_marker = ( self.result.c_name_with_marker.replace('&', replace_with)) - # - if isinstance(result, StructOrUnion) and result.partial: - from .ffiplatform import VerificationError - raise VerificationError( - '%s: the %s is a struct with "...;", which is not ' - 'supported as return type (how to call it with ' - 'libffi depends on possibly-omitted fields). ' - 'Workaround: write a wrapper function which takes ' - 'a pointer-to-struct as extra argument and writes ' - 'the result there' % (self, result)) class RawFunctionType(BaseFunctionType): diff --git a/cffi/vengine_gen.py b/cffi/vengine_gen.py --- a/cffi/vengine_gen.py +++ b/cffi/vengine_gen.py @@ -149,15 +149,21 @@ context = 'argument of %s' % name arglist = [type.get_c_name(' %s' % arg, context) for type, arg in zip(tp.args, argnames)] + tpresult = tp.result + if isinstance(tpresult, model.StructOrUnion): + arglist.insert(0, tpresult.get_c_name(' *r', context)) + tpresult = model.void_type arglist = ', '.join(arglist) or 'void' wrappername = '_cffi_f_%s' % name self.export_symbols.append(wrappername) funcdecl = ' %s(%s)' % (wrappername, arglist) context = 'result of %s' % name - prnt(tp.result.get_c_name(funcdecl, context)) + prnt(tpresult.get_c_name(funcdecl, context)) prnt('{') # - if not isinstance(tp.result, model.VoidType): + if isinstance(tp.result, model.StructOrUnion): + result_code = '*r = ' + elif not isinstance(tp.result, model.VoidType): result_code = 'return ' else: result_code = '' @@ -174,15 +180,22 @@ else: indirections = [] base_tp = tp - if any(isinstance(typ, model.StructOrUnion) for typ in tp.args): + if (any(isinstance(typ, model.StructOrUnion) for typ in tp.args) + or isinstance(tp.result, model.StructOrUnion)): indirect_args = [] for i, typ in enumerate(tp.args): if isinstance(typ, model.StructOrUnion): typ = model.PointerType(typ) indirections.append((i, typ)) indirect_args.append(typ) + indirect_result = tp.result + if isinstance(indirect_result, model.StructOrUnion): + indirect_result = model.PointerType(indirect_result) + indirect_args.insert(0, indirect_result) + indirections.insert(0, ("result", indirect_result)) + indirect_result = model.void_type tp = model.FunctionPtrType(tuple(indirect_args), - tp.result, tp.ellipsis) + indirect_result, tp.ellipsis) BFunc = self.ffi._get_cached_btype(tp) wrappername = '_cffi_f_%s' % name newfunction = module.load_function(BFunc, wrappername) @@ -195,9 +208,16 @@ def _make_struct_wrapper(self, oldfunc, i, tp, base_tp): backend = self.ffi._backend BType = self.ffi._get_cached_btype(tp) - def newfunc(*args): - args = args[:i] + (backend.newp(BType, args[i]),) + args[i+1:] - return oldfunc(*args) + if i == "result": + ffi = self.ffi + def newfunc(*args): + res = ffi.new(BType) + oldfunc(res, *args) + return res[0] + else: + def newfunc(*args): + args = args[:i] + (backend.newp(BType, args[i]),) + args[i+1:] + return oldfunc(*args) newfunc._cffi_base_type = base_tp return newfunc diff --git a/testing/test_verify.py b/testing/test_verify.py --- a/testing/test_verify.py +++ b/testing/test_verify.py @@ -1233,13 +1233,11 @@ py.test.skip('Segfaults on mips64el') # XXX bad abuse of "struct { ...; }". It only works a bit by chance # anyway. XXX think about something better :-( - # ...in fact, it is no longer supported: likely crashes in vgen ffi = FFI() - py.test.raises(VerificationError, ffi.cdef, """ + ffi.cdef(""" typedef struct { ...; } myhandle_t; myhandle_t foo(void); """) - py.test.skip("XXX reimplement maybe?") lib = ffi.verify(""" typedef short myhandle_t; myhandle_t foo(void) { return 42; } @@ -1248,7 +1246,6 @@ assert ffi.sizeof(h) == ffi.sizeof("short") def test_return_partial_struct(): - py.test.skip("not implemented") ffi = FFI() ffi.cdef(""" typedef struct { int x; ...; } foo_t; @@ -1262,6 +1259,26 @@ assert ffi.sizeof(h) == 2 * ffi.sizeof("int") assert h.x == 81 +def test_take_and_return_partial_structs(): + ffi = FFI() + ffi.cdef(""" + typedef struct { int x; ...; } foo_t; + foo_t foo(foo_t, foo_t); + """) + lib = ffi.verify(""" + typedef struct { int y, x; } foo_t; + foo_t foo(foo_t a, foo_t b) { + foo_t r = { 100, a.x * 5 + b.x * 7 }; + return r; + } + """) + args = ffi.new("foo_t[3]") + args[0].x = 1000 + args[2].x = -498 + h = lib.foo(args[0], args[2]) + assert ffi.sizeof(h) == 2 * ffi.sizeof("int") + assert h.x == 1000 * 5 - 498 * 7 + def test_cannot_name_struct_type(): ffi = FFI() ffi.cdef("typedef struct { int x; } *sp; void foo(sp);") From noreply at buildbot.pypy.org Fri Apr 24 11:20:37 2015 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 24 Apr 2015 11:20:37 +0200 (CEST) Subject: [pypy-commit] pypy default: Backed out changeset: d198d926afb8, more tests needed Message-ID: <20150424092037.98ED51C0726@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r76917:8cb5c941efb4 Date: 2015-04-24 12:18 +0300 http://bitbucket.org/pypy/pypy/changeset/8cb5c941efb4/ Log: Backed out changeset: d198d926afb8, more tests needed diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -328,11 +328,8 @@ return ArrayBuffer(self, readonly) def astype(self, space, dtype): - # we want to create a new array, but must respect the strides - # in self. So find a factor of the itemtype.elsize, and use this - factor = float(dtype.elsize) / self.dtype.elsize - strides = [int(factor*s) for s in self.get_strides()] - backstrides = [int(factor*s) for s in self.get_backstrides()] + strides, backstrides = calc_strides(self.get_shape(), dtype, + self.order) impl = ConcreteArray(self.get_shape(), dtype, self.order, strides, backstrides) loop.setslice(space, impl.get_shape(), impl, self) diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -2183,8 +2183,7 @@ assert b.dtype == 'bool' a = arange(6, dtype='f4').reshape(2,3) - b = a.T.astype('i4') - assert (a.T.strides == b.strides) + b = a.astype('i4') a = array('x').astype('S3').dtype assert a.itemsize == 3 From noreply at buildbot.pypy.org Fri Apr 24 11:53:42 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 24 Apr 2015 11:53:42 +0200 (CEST) Subject: [pypy-commit] cffi default: Fix for test_vgen.test_struct_returned_by_func (and uniformize the error message) Message-ID: <20150424095342.88EC91C061E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1790:3fcfce174377 Date: 2015-04-24 11:54 +0200 http://bitbucket.org/cffi/cffi/changeset/3fcfce174377/ Log: Fix for test_vgen.test_struct_returned_by_func (and uniformize the error message) diff --git a/cffi/vengine_gen.py b/cffi/vengine_gen.py --- a/cffi/vengine_gen.py +++ b/cffi/vengine_gen.py @@ -190,6 +190,10 @@ indirect_args.append(typ) indirect_result = tp.result if isinstance(indirect_result, model.StructOrUnion): + if indirect_result.fldtypes is None: + raise TypeError("'%s' is used as result type, " + "but is opaque" % ( + indirect_result._get_c_name(),)) indirect_result = model.PointerType(indirect_result) indirect_args.insert(0, indirect_result) indirections.insert(0, ("result", indirect_result)) diff --git a/testing/test_verify.py b/testing/test_verify.py --- a/testing/test_verify.py +++ b/testing/test_verify.py @@ -1680,9 +1680,8 @@ e = py.test.raises(TypeError, ffi.verify, "typedef struct { int x; } foo_t; " "foo_t myfunc(void) { foo_t x = { 42 }; return x; }") - assert str(e.value) in [ - "function myfunc: 'foo_t' is used as result type, but is opaque", - "function myfunc: result type 'foo_t' is opaque"] + assert str(e.value) == ( + "function myfunc: 'foo_t' is used as result type, but is opaque") def test_include(): ffi1 = FFI() From noreply at buildbot.pypy.org Fri Apr 24 11:53:47 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 24 Apr 2015 11:53:47 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: hg merge default Message-ID: <20150424095347.E7AEA1C061E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1791:d04c328c3d75 Date: 2015-04-24 11:54 +0200 http://bitbucket.org/cffi/cffi/changeset/d04c328c3d75/ Log: hg merge default diff --git a/cffi/vengine_gen.py b/cffi/vengine_gen.py --- a/cffi/vengine_gen.py +++ b/cffi/vengine_gen.py @@ -190,6 +190,10 @@ indirect_args.append(typ) indirect_result = tp.result if isinstance(indirect_result, model.StructOrUnion): + if indirect_result.fldtypes is None: + raise TypeError("'%s' is used as result type, " + "but is opaque" % ( + indirect_result._get_c_name(),)) indirect_result = model.PointerType(indirect_result) indirect_args.insert(0, indirect_result) indirections.insert(0, ("result", indirect_result)) diff --git a/testing/test_verify.py b/testing/test_verify.py --- a/testing/test_verify.py +++ b/testing/test_verify.py @@ -1680,9 +1680,8 @@ e = py.test.raises(TypeError, ffi.verify, "typedef struct { int x; } foo_t; " "foo_t myfunc(void) { foo_t x = { 42 }; return x; }") - assert str(e.value) in [ - "function myfunc: 'foo_t' is used as result type, but is opaque", - "function myfunc: result type 'foo_t' is opaque"] + assert str(e.value) == ( + "function myfunc: 'foo_t' is used as result type, but is opaque") def test_include(): ffi1 = FFI() From noreply at buildbot.pypy.org Fri Apr 24 11:55:26 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 24 Apr 2015 11:55:26 +0200 (CEST) Subject: [pypy-commit] cffi default: I *think* there is a chance for this to work now Message-ID: <20150424095526.E43A81C061E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1792:14c34ed8b40a Date: 2015-04-24 11:55 +0200 http://bitbucket.org/cffi/cffi/changeset/14c34ed8b40a/ Log: I *think* there is a chance for this to work now diff --git a/testing/test_verify.py b/testing/test_verify.py --- a/testing/test_verify.py +++ b/testing/test_verify.py @@ -1226,11 +1226,11 @@ xxx def test_opaque_integer_as_function_result(): - import platform - if platform.machine().startswith('sparc'): - py.test.skip('Breaks horribly on sparc (SIGILL + corrupted stack)') - elif platform.machine() == 'mips64' and sys.maxsize > 2**32: - py.test.skip('Segfaults on mips64el') + #import platform + #if platform.machine().startswith('sparc'): + # py.test.skip('Breaks horribly on sparc (SIGILL + corrupted stack)') + #elif platform.machine() == 'mips64' and sys.maxsize > 2**32: + # py.test.skip('Segfaults on mips64el') # XXX bad abuse of "struct { ...; }". It only works a bit by chance # anyway. XXX think about something better :-( ffi = FFI() From noreply at buildbot.pypy.org Fri Apr 24 12:10:03 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 24 Apr 2015 12:10:03 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: Fix the tests so that they mostly run again Message-ID: <20150424101003.E50141C01D6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1793:59c86e07b247 Date: 2015-04-24 12:10 +0200 http://bitbucket.org/cffi/cffi/changeset/59c86e07b247/ Log: Fix the tests so that they mostly run again diff --git a/_cffi1/__init__.py b/_cffi1/__init__.py new file mode 100644 --- /dev/null +++ b/_cffi1/__init__.py @@ -0,0 +1,1 @@ +from recompiler import make_c_source, recompile diff --git a/_cffi1/_cffi_include.h b/_cffi1/_cffi_include.h --- a/_cffi1/_cffi_include.h +++ b/_cffi1/_cffi_include.h @@ -167,7 +167,7 @@ PyObject *module, *c_api_object = NULL; void *src; - module = PyImport_ImportModule("_cffi1_backend"); + module = PyImport_ImportModule("_cffi_backend"); if (module == NULL) goto failure; @@ -181,7 +181,7 @@ src = PyCapsule_GetPointer(c_api_object, "cffi"); if ((uintptr_t)(((void **)src)[0]) < _CFFI_NUM_EXPORTS) { PyErr_SetString(PyExc_ImportError, - "the _cffi1_backend module is an outdated version"); + "the _cffi_backend module is an outdated version"); goto failure; } memcpy(_cffi_exports, src, _CFFI_NUM_EXPORTS * sizeof(void *)); diff --git a/_cffi1/cffi1 b/_cffi1/cffi1 deleted file mode 120000 --- a/_cffi1/cffi1 +++ /dev/null @@ -1,1 +0,0 @@ -../cffi \ No newline at end of file diff --git a/_cffi1/recompiler.py b/_cffi1/recompiler.py --- a/_cffi1/recompiler.py +++ b/_cffi1/recompiler.py @@ -1,5 +1,5 @@ import os, sys -from cffi1 import ffiplatform, model +from cffi import ffiplatform, model from cffi_opcode import * @@ -183,7 +183,7 @@ prnt('};') prnt() # - # the init function, loading _cffi1_backend and calling a method there + # the init function, loading _cffi_backend and calling a method there prnt('PyMODINIT_FUNC') prnt('init%s(void)' % (self.module_name,)) prnt('{') @@ -695,6 +695,6 @@ for name in dir(module.ffi): if not name.startswith('_'): attr = getattr(module.ffi, name) - if attr is not getattr(ffi, name): + if attr is not getattr(ffi, name, object()): setattr(ffi, name, attr) return module.lib diff --git a/_cffi1/test_dlopen.py b/_cffi1/test_dlopen.py --- a/_cffi1/test_dlopen.py +++ b/_cffi1/test_dlopen.py @@ -1,4 +1,6 @@ import py +py.test.skip("later") + from cffi1 import FFI import math @@ -48,7 +50,6 @@ assert ffi.sizeof("struct foo_s") == ffi.sizeof("void *") def test_math_sin(): - py.test.skip("XXX redo!") ffi = FFI() ffi.cdef("double sin(double);") m = ffi.dlopen('m') diff --git a/_cffi1/test_ffi_obj.py b/_cffi1/test_ffi_obj.py --- a/_cffi1/test_ffi_obj.py +++ b/_cffi1/test_ffi_obj.py @@ -1,5 +1,5 @@ import py -import _cffi1_backend +import _cffi_backend as _cffi1_backend def test_ffi_new(): diff --git a/_cffi1/test_parse_c_type.py b/_cffi1/test_parse_c_type.py --- a/_cffi1/test_parse_c_type.py +++ b/_cffi1/test_parse_c_type.py @@ -251,9 +251,9 @@ '->', Pointer(0)] def test_cffi_opcode_sync(): - import cffi_opcode, cffi1.model + import cffi_opcode, cffi.model for name in dir(lib): if name.startswith('_CFFI_'): assert getattr(cffi_opcode, name[6:]) == getattr(lib, name) assert sorted(cffi_opcode.PRIMITIVE_TO_INDEX.keys()) == ( - sorted(cffi1.model.PrimitiveType.ALL_PRIMITIVE_TYPES.keys())) + sorted(cffi.model.PrimitiveType.ALL_PRIMITIVE_TYPES.keys())) diff --git a/_cffi1/test_realize_c_type.py b/_cffi1/test_realize_c_type.py --- a/_cffi1/test_realize_c_type.py +++ b/_cffi1/test_realize_c_type.py @@ -2,8 +2,8 @@ def check(input, expected_output=None, expected_ffi_error=False): - import _cffi1_backend - ffi = _cffi1_backend.FFI() + import _cffi_backend + ffi = _cffi_backend.FFI() if not expected_ffi_error: ct = ffi.typeof(input) assert isinstance(ct, ffi.CType) diff --git a/_cffi1/test_recompiler.py b/_cffi1/test_recompiler.py --- a/_cffi1/test_recompiler.py +++ b/_cffi1/test_recompiler.py @@ -1,6 +1,6 @@ import py -import recompiler -from cffi1 import FFI +from cffi import FFI +from _cffi1 import recompiler def check_type_table(input, expected_output): diff --git a/_cffi1/test_verify1.py b/_cffi1/test_verify1.py --- a/_cffi1/test_verify1.py +++ b/_cffi1/test_verify1.py @@ -1,5 +1,6 @@ import sys, math, py -from cffi1 import FFI, VerificationError, model +from cffi import FFI, VerificationError, model +from _cffi1 import recompiler lib_m = ['m'] if sys.platform == 'win32': @@ -7,7 +8,7 @@ import distutils.ccompiler if distutils.ccompiler.get_default_compiler() == 'msvc': lib_m = ['msvcrt'] - pass # no obvious -Werror equivalent on MSVC + extra_compile_args = [] # no obvious -Werror equivalent on MSVC else: if (sys.platform == 'darwin' and [int(x) for x in os.uname()[2].split('.')] >= [11, 0, 0]): @@ -19,10 +20,13 @@ # assume a standard gcc extra_compile_args = ['-Werror', '-Wall', '-Wextra', '-Wconversion'] - class FFI(FFI): - def verify(self, *args, **kwds): - return super(FFI, self).verify( - *args, extra_compile_args=extra_compile_args, **kwds) +class FFI(FFI): + _verify_counter = 0 + def verify(self, preamble='', *args, **kwds): + FFI._verify_counter += 1 + return recompiler.verify(self, 'verify%d' % FFI._verify_counter, + preamble, *args, + extra_compile_args=extra_compile_args, **kwds) class U(object): def __add__(self, other): @@ -49,8 +53,8 @@ def test_missing_function_import_error(): # uses the original FFI that just gives a warning during compilation - import cffi1 - test_missing_function(ffi=cffi1.FFI()) + import cffi + test_missing_function(ffi=cffi.FFI()) def test_simple_case(): ffi = FFI() From noreply at buildbot.pypy.org Fri Apr 24 12:52:40 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 24 Apr 2015 12:52:40 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: Fix these two demos Message-ID: <20150424105240.6B6921C03F7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1794:fdb29c447984 Date: 2015-04-24 12:53 +0200 http://bitbucket.org/cffi/cffi/changeset/fdb29c447984/ Log: Fix these two demos diff --git a/_cffi1/bsdopendirtype_build.py b/_cffi1/bsdopendirtype_build.py --- a/_cffi1/bsdopendirtype_build.py +++ b/_cffi1/bsdopendirtype_build.py @@ -1,5 +1,5 @@ -from cffi1 import FFI -import recompiler +from cffi import FFI +from _cffi1 import recompile ffi = FFI() ffi.cdef(""" @@ -15,7 +15,7 @@ static const int DT_BLK, DT_CHR, DT_DIR, DT_FIFO, DT_LNK, DT_REG, DT_SOCK; """) -recompiler.recompile(ffi, "_bsdopendirtype", """ +recompile(ffi, "_bsdopendirtype", """ #include #include """) diff --git a/_cffi1/readdir2_build.py b/_cffi1/readdir2_build.py --- a/_cffi1/readdir2_build.py +++ b/_cffi1/readdir2_build.py @@ -1,5 +1,5 @@ -from cffi1 import FFI -from recompiler import recompile +from cffi import FFI +from _cffi1 import recompile ffi = FFI() ffi.cdef(""" diff --git a/_cffi1/recompiler.py b/_cffi1/recompiler.py --- a/_cffi1/recompiler.py +++ b/_cffi1/recompiler.py @@ -672,11 +672,7 @@ include_dirs.insert(0, '.') # XXX return ffiplatform.get_extension(source_name, module_name, **kwds) -def recompile(ffi, module_name, preamble, tmpdir=None, **kwds): - if tmpdir is None: - tmpdir = 'build' - if not os.path.isdir(tmpdir): - os.mkdir(tmpdir) +def recompile(ffi, module_name, preamble, tmpdir='.', **kwds): c_file = os.path.join(tmpdir, module_name + '.c') ext = _get_extension(module_name, c_file, kwds) make_c_source(ffi, module_name, preamble, c_file) From noreply at buildbot.pypy.org Fri Apr 24 13:22:42 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 24 Apr 2015 13:22:42 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: re-add this code Message-ID: <20150424112242.05DD51C069E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1796:eb4454178e77 Date: 2015-04-24 13:22 +0200 http://bitbucket.org/cffi/cffi/changeset/eb4454178e77/ Log: re-add this code diff --git a/_cffi1/recompiler.py b/_cffi1/recompiler.py --- a/_cffi1/recompiler.py +++ b/_cffi1/recompiler.py @@ -527,6 +527,28 @@ _generate_cpy_union_ctx = _generate_cpy_struct_ctx # ---------- + # 'anonymous' declarations. These are produced for anonymous structs + # or unions; the 'name' is obtained by a typedef. + + def _generate_cpy_anonymous_collecttype(self, tp, name): + if isinstance(tp, model.EnumType): + self._generate_cpy_enum_collecttype(tp, name) + else: + self._struct_collecttype(tp) + + def _generate_cpy_anonymous_decl(self, tp, name): + if isinstance(tp, model.EnumType): + self._generate_cpy_enum_decl(tp, name, '') + else: + self._struct_decl(tp, name, 'typedef_' + name) + + def _generate_cpy_anonymous_ctx(self, tp, name): + if isinstance(tp, model.EnumType): + self._generate_cpy_enum_ctx(tp, name, '') + else: + self._struct_ctx(tp, name, 'typedef_' + name) + + # ---------- # constants, declared with "static const ..." def _generate_cpy_const(self, is_int, name, tp=None, category='const', From noreply at buildbot.pypy.org Fri Apr 24 13:22:40 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 24 Apr 2015 13:22:40 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: rename tests Message-ID: <20150424112240.DF7091C069E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1795:f5d8a9cbf03e Date: 2015-04-24 13:18 +0200 http://bitbucket.org/cffi/cffi/changeset/f5d8a9cbf03e/ Log: rename tests diff --git a/_cffi1/test_recompiler.py b/_cffi1/test_recompiler.py --- a/_cffi1/test_recompiler.py +++ b/_cffi1/test_recompiler.py @@ -31,7 +31,7 @@ "(FUNCTION 4)(PRIMITIVE 14)(FUNCTION_END 0)" # sin "(FUNCTION 4)(PRIMITIVE 13)(FUNCTION_END 0)") # cos -def test_use_noop_for_repeated_args(): +def test_type_table_use_noop_for_repeated_args(): check_type_table("double sin(double *, double *);", "(FUNCTION 4)(POINTER 4)(NOOP 1)(FUNCTION_END 0)" "(PRIMITIVE 14)") @@ -39,51 +39,51 @@ "(FUNCTION 3)(POINTER 3)(NOOP 1)(PRIMITIVE 14)" "(FUNCTION_END 0)") -def test_dont_use_noop_for_primitives(): +def test_type_table_dont_use_noop_for_primitives(): check_type_table("double sin(double, double);", "(FUNCTION 1)(PRIMITIVE 14)(PRIMITIVE 14)(FUNCTION_END 0)") -def test_funcptr_as_argument(): +def test_type_table_funcptr_as_argument(): check_type_table("int sin(double(float));", "(FUNCTION 6)(PRIMITIVE 13)(FUNCTION_END 0)" "(FUNCTION 7)(POINTER 0)(FUNCTION_END 0)" "(PRIMITIVE 14)(PRIMITIVE 7)") -def test_variadic_function(): +def test_type_table_variadic_function(): check_type_table("int sin(int, ...);", "(FUNCTION 1)(PRIMITIVE 7)(FUNCTION_END 1)") -def test_array(): +def test_type_table_array(): check_type_table("int a[100];", "(PRIMITIVE 7)(ARRAY 0)(None 100)") -def test_typedef(): +def test_type_table_typedef(): check_type_table("typedef int foo_t;", "(PRIMITIVE 7)") -def test_prebuilt_type(): +def test_type_table_prebuilt_type(): check_type_table("int32_t f(void);", "(FUNCTION 2)(FUNCTION_END 0)(PRIMITIVE 21)") -def test_struct_opaque(): +def test_type_table_struct_opaque(): check_type_table("struct foo_s;", "(STRUCT_UNION 0)") -def test_struct(): +def test_type_table_struct(): check_type_table("struct foo_s { int a; long b; };", "(PRIMITIVE 7)(PRIMITIVE 9)(STRUCT_UNION 0)") -def test_union(): +def test_type_table_union(): check_type_table("union foo_u { int a; long b; };", "(PRIMITIVE 7)(PRIMITIVE 9)(STRUCT_UNION 0)") -def test_struct_used(): +def test_type_table_struct_used(): check_type_table("struct foo_s { int a; long b; }; int f(struct foo_s*);", "(FUNCTION 3)(POINTER 5)(FUNCTION_END 0)" "(PRIMITIVE 7)(PRIMITIVE 9)" "(STRUCT_UNION 0)") -def test_anonymous_struct_with_typedef(): +def test_type_table_anonymous_struct_with_typedef(): check_type_table("typedef struct { int a; long b; } foo_t;", "(STRUCT_UNION 0)(PRIMITIVE 7)(PRIMITIVE 9)") From noreply at buildbot.pypy.org Fri Apr 24 13:40:44 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 24 Apr 2015 13:40:44 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: more sanitization Message-ID: <20150424114044.BA7B21C022E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1797:09a38a35f6b7 Date: 2015-04-24 13:34 +0200 http://bitbucket.org/cffi/cffi/changeset/09a38a35f6b7/ Log: more sanitization diff --git a/_cffi1/recompiler.py b/_cffi1/recompiler.py --- a/_cffi1/recompiler.py +++ b/_cffi1/recompiler.py @@ -702,10 +702,13 @@ return outputfilename def verify(ffi, module_name, preamble, *args, **kwds): + from _cffi1.udir import udir import imp assert module_name not in sys.modules, "module name conflict: %r" % ( module_name,) - outputfilename = recompile(ffi, module_name, preamble, *args, **kwds) + outputfilename = recompile(ffi, module_name, preamble, + tmpdir=str(udir), + *args, **kwds) module = imp.load_dynamic(module_name, outputfilename) # # hack hack hack: copy all *bound methods* from module.ffi back to the diff --git a/_cffi1/test_verify1.py b/_cffi1/test_verify1.py --- a/_cffi1/test_verify1.py +++ b/_cffi1/test_verify1.py @@ -21,12 +21,18 @@ extra_compile_args = ['-Werror', '-Wall', '-Wextra', '-Wconversion'] class FFI(FFI): + _extra_compile_args = extra_compile_args _verify_counter = 0 def verify(self, preamble='', *args, **kwds): + from _cffi1.udir import udir FFI._verify_counter += 1 return recompiler.verify(self, 'verify%d' % FFI._verify_counter, preamble, *args, - extra_compile_args=extra_compile_args, **kwds) + extra_compile_args=self._extra_compile_args, + tmp=str(udir), **kwds) + +class FFI_warnings_not_error(FFI): + _extra_compile_args = [] class U(object): def __add__(self, other): @@ -53,8 +59,7 @@ def test_missing_function_import_error(): # uses the original FFI that just gives a warning during compilation - import cffi - test_missing_function(ffi=cffi.FFI()) + test_missing_function(ffi=FFI_warnings_not_error()) def test_simple_case(): ffi = FFI() @@ -1855,8 +1860,7 @@ assert repr(ffi.typeof(lib.a)) == "" def test_bug_const_char_ptr_array_2(): - from cffi import FFI # ignore warnings - ffi = FFI() + ffi = FFI_warnings_not_error() # ignore warnings ffi.cdef("""const int a[];""") lib = ffi.verify("""const int a[5];""") assert repr(ffi.typeof(lib.a)) == "" diff --git a/testing/udir.py b/_cffi1/udir.py copy from testing/udir.py copy to _cffi1/udir.py --- a/testing/udir.py +++ b/_cffi1/udir.py @@ -1,3 +1,3 @@ import py -udir = py.path.local.make_numbered_dir(prefix = 'ffi-') +udir = py.path.local.make_numbered_dir(prefix = 'cffi1-') From noreply at buildbot.pypy.org Fri Apr 24 13:40:45 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 24 Apr 2015 13:40:45 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: Manual merge of the changes to testing/test_verify.py Message-ID: <20150424114045.CD7571C022E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1798:47b21ee09737 Date: 2015-04-24 13:36 +0200 http://bitbucket.org/cffi/cffi/changeset/47b21ee09737/ Log: Manual merge of the changes to testing/test_verify.py diff --git a/_cffi1/test_verify1.py b/_cffi1/test_verify1.py --- a/_cffi1/test_verify1.py +++ b/_cffi1/test_verify1.py @@ -639,9 +639,9 @@ # case the 'static' is completely ignored. ffi.cdef("static const int AA, BB, CC, DD;") lib = ffi.verify("#define AA 42\n" - "#define BB (-43)\n" - "#define CC (22*2)\n" - "#define DD ((unsigned int)142)\n") + "#define BB (-43) // blah\n" + "#define CC (22*2) /* foobar */\n" + "#define DD ((unsigned int)142) /* foo\nbar */\n") assert lib.AA == 42 assert lib.BB == -43 assert lib.CC == 44 @@ -1208,11 +1208,11 @@ xxx def test_opaque_integer_as_function_result(): - import platform - if platform.machine().startswith('sparc'): - py.test.skip('Breaks horribly on sparc (SIGILL + corrupted stack)') - elif platform.machine() == 'mips64' and sys.maxsize > 2**32: - py.test.skip('Segfaults on mips64el') + #import platform + #if platform.machine().startswith('sparc'): + # py.test.skip('Breaks horribly on sparc (SIGILL + corrupted stack)') + #elif platform.machine() == 'mips64' and sys.maxsize > 2**32: + # py.test.skip('Segfaults on mips64el') # XXX bad abuse of "struct { ...; }". It only works a bit by chance # anyway. XXX think about something better :-( ffi = FFI() @@ -1227,6 +1227,40 @@ h = lib.foo() assert ffi.sizeof(h) == ffi.sizeof("short") +def test_return_partial_struct(): + ffi = FFI() + ffi.cdef(""" + typedef struct { int x; ...; } foo_t; + foo_t foo(void); + """) + lib = ffi.verify(""" + typedef struct { int y, x; } foo_t; + foo_t foo(void) { foo_t r = { 45, 81 }; return r; } + """) + h = lib.foo() + assert ffi.sizeof(h) == 2 * ffi.sizeof("int") + assert h.x == 81 + +def test_take_and_return_partial_structs(): + ffi = FFI() + ffi.cdef(""" + typedef struct { int x; ...; } foo_t; + foo_t foo(foo_t, foo_t); + """) + lib = ffi.verify(""" + typedef struct { int y, x; } foo_t; + foo_t foo(foo_t a, foo_t b) { + foo_t r = { 100, a.x * 5 + b.x * 7 }; + return r; + } + """) + args = ffi.new("foo_t[3]") + args[0].x = 1000 + args[2].x = -498 + h = lib.foo(args[0], args[2]) + assert ffi.sizeof(h) == 2 * ffi.sizeof("int") + assert h.x == 1000 * 5 - 498 * 7 + def test_cannot_name_struct_type(): ffi = FFI() ffi.cdef("typedef struct { int x; } *sp; void foo(sp);") @@ -1628,9 +1662,8 @@ e = py.test.raises(TypeError, ffi.verify, "typedef struct { int x; } foo_t; " "foo_t myfunc(void) { foo_t x = { 42 }; return x; }") - assert str(e.value) in [ - "function myfunc: 'foo_t' is used as result type, but is opaque", - "function myfunc: result type 'foo_t' is opaque"] + assert str(e.value) == ( + "function myfunc: 'foo_t' is used as result type, but is opaque") def test_include(): ffi1 = FFI() From noreply at buildbot.pypy.org Fri Apr 24 13:40:46 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 24 Apr 2015 13:40:46 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: fix tests Message-ID: <20150424114046.D766B1C022E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1799:8b8f76173bdc Date: 2015-04-24 13:41 +0200 http://bitbucket.org/cffi/cffi/changeset/8b8f76173bdc/ Log: fix tests diff --git a/_cffi1/test_verify1.py b/_cffi1/test_verify1.py --- a/_cffi1/test_verify1.py +++ b/_cffi1/test_verify1.py @@ -1,6 +1,7 @@ import sys, math, py -from cffi import FFI, VerificationError, model +from cffi import FFI, VerificationError, VerificationMissing, model from _cffi1 import recompiler +import _cffi_backend lib_m = ['m'] if sys.platform == 'win32': @@ -21,8 +22,10 @@ extra_compile_args = ['-Werror', '-Wall', '-Wextra', '-Wconversion'] class FFI(FFI): + error = _cffi_backend.FFI.error _extra_compile_args = extra_compile_args _verify_counter = 0 + def verify(self, preamble='', *args, **kwds): from _cffi1.udir import udir FFI._verify_counter += 1 @@ -459,9 +462,9 @@ ...; }; """) - py.test.raises(ffi.error, ffi.sizeof, 'struct foo_s') - py.test.raises(TypeError, ffi.offsetof, 'struct foo_s', 'x') - py.test.raises(TypeError, ffi.new, 'struct foo_s *') + py.test.raises(VerificationMissing, ffi.sizeof, 'struct foo_s') + py.test.raises(VerificationMissing, ffi.offsetof, 'struct foo_s', 'x') + py.test.raises(VerificationMissing, ffi.new, 'struct foo_s *') ffi.verify(""" struct foo_s { int a, b, x, c, d, e; From noreply at buildbot.pypy.org Fri Apr 24 14:05:26 2015 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 24 Apr 2015 14:05:26 +0200 (CEST) Subject: [pypy-commit] pypy vmprof2: try to properly disable codemaps Message-ID: <20150424120526.88F0A1C061E@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: vmprof2 Changeset: r76918:a4a719af0754 Date: 2015-04-24 14:05 +0200 http://bitbucket.org/pypy/pypy/changeset/a4a719af0754/ Log: try to properly disable codemaps diff --git a/rpython/jit/backend/arm/assembler.py b/rpython/jit/backend/arm/assembler.py --- a/rpython/jit/backend/arm/assembler.py +++ b/rpython/jit/backend/arm/assembler.py @@ -586,8 +586,8 @@ assert len(set(inputargs)) == len(inputargs) self.setup(looptoken) - self.codemap_builder.enter_portal_frame(jd_id, unique_id, - self.mc.get_relative_pos()) + #self.codemap_builder.enter_portal_frame(jd_id, unique_id, + # self.mc.get_relative_pos()) frame_info = self.datablockwrapper.malloc_aligned( @@ -662,7 +662,7 @@ assert len(set(inputargs)) == len(inputargs) self.setup(original_loop_token) - self.codemap.inherit_code_from_position(faildescr.adr_jump_offset) + #self.codemap.inherit_code_from_position(faildescr.adr_jump_offset) descr_number = compute_unique_id(faildescr) if log: operations = self._inject_debugging_code(faildescr, operations, @@ -886,8 +886,8 @@ size = self.mc.get_relative_pos() res = self.mc.materialize(self.cpu, allblocks, self.cpu.gc_ll_descr.gcrootmap) - self.cpu.codemap.register_codemap( - self.codemap.get_final_bytecode(res, size)) + #self.cpu.codemap.register_codemap( + # self.codemap.get_final_bytecode(res, size)) return res def update_frame_depth(self, frame_depth): diff --git a/rpython/jit/backend/arm/runner.py b/rpython/jit/backend/arm/runner.py --- a/rpython/jit/backend/arm/runner.py +++ b/rpython/jit/backend/arm/runner.py @@ -50,7 +50,7 @@ def setup_once(self): self.cpuinfo.arch_version = detect_arch_version() self.cpuinfo.hf_abi = detect_hardfloat() - self.codemap.setup() + #self.codemap.setup() self.assembler.setup_once() def finish_once(self): diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -130,7 +130,8 @@ self.gcmap_for_finish[0] = r_uint(1) def setup(self, looptoken): - self.codemap_builder = CodemapBuilder() + if self.cpu.HAS_CODEMAP: + self.codemap_builder = CodemapBuilder() self._finish_gcmap = lltype.nullptr(jitframe.GCMAP) def set_debug(self, v): @@ -200,13 +201,15 @@ return fail_descr, target def enter_portal_frame(self, op): - self.codemap_builder.enter_portal_frame(op.getarg(0).getint(), - op.getarg(1).getint(), - self.mc.get_relative_pos()) + if self.cpu.HAS_CODEMAP: + self.codemap_builder.enter_portal_frame(op.getarg(0).getint(), + op.getarg(1).getint(), + self.mc.get_relative_pos()) def leave_portal_frame(self, op): - self.codemap_builder.leave_portal_frame(op.getarg(0).getint(), - self.mc.get_relative_pos()) + if self.cpu.HAS_CODEMAP: + self.codemap_builder.leave_portal_frame(op.getarg(0).getint(), + self.mc.get_relative_pos()) def call_assembler(self, op, guard_op, argloc, vloc, result_loc, tmploc): self._store_force_index(guard_op) diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -23,6 +23,8 @@ class AbstractLLCPU(AbstractCPU): from rpython.jit.metainterp.typesystem import llhelper as ts + HAS_CODEMAP = False + def __init__(self, rtyper, stats, opts, translate_support_code=False, gcdescr=None): assert type(opts) is not bool @@ -49,7 +51,8 @@ else: self._setup_exception_handling_untranslated() self.asmmemmgr = asmmemmgr.AsmMemoryManager() - self.codemap = codemap.CodemapStorage() + if self.HAS_CODEMAP: + self.codemap = codemap.CodemapStorage() self._setup_frame_realloc(translate_support_code) ad = self.gc_ll_descr.getframedescrs(self).arraydescr self.signedarraydescr = ad @@ -80,7 +83,8 @@ pass def finish_once(self): - self.codemap.finish_once() + if self.HAS_CODEMAP: + self.codemap.finish_once() def compile_loop(self, inputargs, operations, looptoken, jd_id=0, unique_id=0, log=True, name='', logger=None): @@ -222,7 +226,8 @@ for rawstart, rawstop in blocks: self.gc_ll_descr.freeing_block(rawstart, rawstop) self.asmmemmgr.free(rawstart, rawstop) - self.codemap.free_asm_block(rawstart, rawstop) + if self.HAS_CODEMAP: + self.codemap.free_asm_block(rawstart, rawstop) def force(self, addr_of_force_token): frame = rffi.cast(jitframe.JITFRAMEPTR, addr_of_force_token) diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -457,8 +457,9 @@ assert len(set(inputargs)) == len(inputargs) self.setup(looptoken) - self.codemap_builder.enter_portal_frame(jd_id, unique_id, - self.mc.get_relative_pos()) + if self.cpu.HAS_CODEMAP: + self.codemap_builder.enter_portal_frame(jd_id, unique_id, + self.mc.get_relative_pos()) frame_info = self.datablockwrapper.malloc_aligned( jitframe.JITFRAMEINFO_SIZE, alignment=WORD) clt.frame_info = rffi.cast(jitframe.JITFRAMEINFOPTR, frame_info) @@ -526,8 +527,9 @@ assert len(set(inputargs)) == len(inputargs) self.setup(original_loop_token) - self.codemap_builder.inherit_code_from_position( - faildescr.adr_jump_offset) + if self.cpu.HAS_CODEMAP: + self.codemap_builder.inherit_code_from_position( + faildescr.adr_jump_offset) self.mc.force_frame_size(DEFAULT_FRAME_BYTES) descr_number = compute_unique_id(faildescr) if log: @@ -692,8 +694,9 @@ size = self.mc.get_relative_pos() res = self.mc.materialize(self.cpu, allblocks, self.cpu.gc_ll_descr.gcrootmap) - self.cpu.codemap.register_codemap( - self.codemap_builder.get_final_bytecode(res, size)) + if self.cpu.HAS_CODEMAP: + self.cpu.codemap.register_codemap( + self.codemap_builder.get_final_bytecode(res, size)) return res def patch_jump_for_descr(self, faildescr, adr_new_target): diff --git a/rpython/jit/backend/x86/runner.py b/rpython/jit/backend/x86/runner.py --- a/rpython/jit/backend/x86/runner.py +++ b/rpython/jit/backend/x86/runner.py @@ -67,7 +67,8 @@ @rgc.no_release_gil def setup_once(self): self.profile_agent.startup() - self.codemap.setup() + if self.HAS_CODEMAP: + self.codemap.setup() self.assembler.setup_once() @rgc.no_release_gil @@ -158,5 +159,6 @@ CALLEE_SAVE_REGISTERS = [regloc.ebx, regloc.r12, regloc.r13, regloc.r14, regloc.r15] IS_64_BIT = True + HAS_CODEMAP = True CPU = CPU386 diff --git a/rpython/jit/backend/x86/rx86.py b/rpython/jit/backend/x86/rx86.py --- a/rpython/jit/backend/x86/rx86.py +++ b/rpython/jit/backend/x86/rx86.py @@ -4,6 +4,7 @@ from rpython.rlib.unroll import unrolling_iterable from rpython.rlib.rarithmetic import intmask from rpython.rtyper.lltypesystem import rffi +from rpython.jit.backend.x86.arch import IS_X86_64 BYTE_REG_FLAG = 0x20 NO_BASE_REGISTER = -1 @@ -489,9 +490,10 @@ assert self._frame_size >= self.WORD def check_stack_size_at_ret(self): - assert self._frame_size == self.WORD - if not we_are_translated(): - self._frame_size = None + if IS_X86_64: + assert self._frame_size == self.WORD + if not we_are_translated(): + self._frame_size = None # ------------------------------ MOV ------------------------------ diff --git a/rpython/jit/backend/x86/test/test_assembler.py b/rpython/jit/backend/x86/test/test_assembler.py --- a/rpython/jit/backend/x86/test/test_assembler.py +++ b/rpython/jit/backend/x86/test/test_assembler.py @@ -51,7 +51,8 @@ asmmemmgr_blocks = None cpu = ACTUAL_CPU(None, None) cpu.setup() - cpu.codemap.setup() + if cpu.HAS_CODEMAP: + cpu.codemap.setup() looptoken = FakeToken() asm = cpu.assembler asm.setup_once() From noreply at buildbot.pypy.org Fri Apr 24 14:33:05 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 24 Apr 2015 14:33:05 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: grr Message-ID: <20150424123305.AF3E91C0726@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1800:6f19cda57c3e Date: 2015-04-24 14:02 +0200 http://bitbucket.org/cffi/cffi/changeset/6f19cda57c3e/ Log: grr diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -2251,10 +2251,11 @@ else return convert_to_object_bitfield(data, cf); } + break; case -1: return NULL; default: - ; + break; } } return PyObject_GenericGetAttr((PyObject *)cd, attr); @@ -2284,10 +2285,11 @@ return -1; } } + break; case -1: return -1; default: - ; + break; } } return PyObject_GenericSetAttr((PyObject *)cd, attr, value); From noreply at buildbot.pypy.org Fri Apr 24 14:33:06 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 24 Apr 2015 14:33:06 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: Parsing enums Message-ID: <20150424123306.BE8621C0726@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1801:e5783fd730ed Date: 2015-04-24 14:21 +0200 http://bitbucket.org/cffi/cffi/changeset/e5783fd730ed/ Log: Parsing enums diff --git a/_cffi1/parse_c_type.c b/_cffi1/parse_c_type.c --- a/_cffi1/parse_c_type.c +++ b/_cffi1/parse_c_type.c @@ -26,6 +26,7 @@ //TOK__COMPLEX, TOK_CONST, TOK_DOUBLE, + TOK_ENUM, TOK_FLOAT, //TOK__IMAGINARY, TOK_INT, @@ -150,6 +151,9 @@ case 'd': if (tok->size == 6 && !memcmp(p, "double", 6)) tok->kind = TOK_DOUBLE; break; + case 'e': + if (tok->size == 4 && !memcmp(p, "enum", 4)) tok->kind = TOK_ENUM; + break; case 'f': if (tok->size == 5 && !memcmp(p, "float", 5)) tok->kind = TOK_FLOAT; break; @@ -375,6 +379,7 @@ MAKE_SEARCH_FUNC(globals) MAKE_SEARCH_FUNC(struct_unions) MAKE_SEARCH_FUNC(typenames) +MAKE_SEARCH_FUNC(enums) #undef MAKE_SEARCH_FUNC @@ -509,6 +514,7 @@ case TOK_FLOAT: case TOK_STRUCT: case TOK_UNION: + case TOK_ENUM: return parse_error(tok, "invalid combination of types"); case TOK_DOUBLE: @@ -598,6 +604,19 @@ t1 = _CFFI_OP(_CFFI_OP_STRUCT_UNION, n); break; } + case TOK_ENUM: + { + next_token(tok); + if (tok->kind != TOK_IDENTIFIER) + return parse_error(tok, "enum name expected"); + + int n = search_in_enums(tok->info->ctx, tok->p, tok->size); + if (n < 0) + return parse_error(tok, "undefined enum name"); + + t1 = _CFFI_OP(_CFFI_OP_ENUM, n); + break; + } default: return parse_error(tok, "identifier expected"); } diff --git a/_cffi1/test_parse_c_type.py b/_cffi1/test_parse_c_type.py --- a/_cffi1/test_parse_c_type.py +++ b/_cffi1/test_parse_c_type.py @@ -21,6 +21,9 @@ struct_names = ["bar_s", "foo", "foo_", "foo_s", "foo_s1", "foo_s12"] assert struct_names == sorted(struct_names) +enum_names = ["ebar_s", "efoo", "efoo_", "efoo_s", "efoo_s1", "efoo_s12"] +assert enum_names == sorted(enum_names) + identifier_names = ["id", "id0", "id05", "id05b", "tail"] assert identifier_names == sorted(identifier_names) @@ -33,6 +36,13 @@ ctx.struct_unions = ctx_structs ctx.num_struct_unions = len(struct_names) +c_enum_names = [ffi.new("char[]", _n) for _n in enum_names] +ctx_enums = ffi.new("struct _cffi_enum_s[]", len(enum_names)) +for _i in range(len(enum_names)): + ctx_enums[_i].name = c_enum_names[_i] +ctx.enums = ctx_enums +ctx.num_enums = len(enum_names) + c_identifier_names = [ffi.new("char[]", _n) for _n in identifier_names] ctx_identifiers = ffi.new("struct _cffi_typename_s[]", len(identifier_names)) for _i in range(len(identifier_names)): @@ -93,6 +103,7 @@ Func = make_getter('FUNCTION') FuncEnd = make_getter('FUNCTION_END') Struct = make_getter('STRUCT_UNION') +Enum = make_getter('ENUM') Typename = make_getter('TYPENAME') @@ -197,6 +208,12 @@ Prim(lib._CFFI_PRIM_CHAR), OpenArray(4)] +def test_enum(): + for i in range(len(enum_names)): + assert parse("enum %s" % (enum_names[i],)) == ['->', Enum(i)] + assert parse("enum %s*" % (enum_names[i],)) == [Enum(i), + '->', Pointer(0)] + def test_error(): parse_error("short short int", "'short' after another 'short' or 'long'", 6) parse_error("long long long", "'long long long' is too long", 10) From noreply at buildbot.pypy.org Fri Apr 24 14:55:48 2015 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 24 Apr 2015 14:55:48 +0200 (CEST) Subject: [pypy-commit] pypy vmprof2: one missing place Message-ID: <20150424125548.70A8B1C03F7@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: vmprof2 Changeset: r76919:afd3d870360e Date: 2015-04-24 14:55 +0200 http://bitbucket.org/pypy/pypy/changeset/afd3d870360e/ Log: one missing place diff --git a/pypy/module/_vmprof/test/test__vmprof.py b/pypy/module/_vmprof/test/test__vmprof.py --- a/pypy/module/_vmprof/test/test__vmprof.py +++ b/pypy/module/_vmprof/test/test__vmprof.py @@ -27,6 +27,10 @@ i += 1 # length i += len('pypy') while i < len(s): + if s[i] == '\x03': + break + if s[i] == '\x01': + xxx assert s[i] == '\x02' i += 1 _, size = struct.unpack("ll", s[i:i + 2 * WORD]) diff --git a/rpython/jit/backend/llsupport/asmmemmgr.py b/rpython/jit/backend/llsupport/asmmemmgr.py --- a/rpython/jit/backend/llsupport/asmmemmgr.py +++ b/rpython/jit/backend/llsupport/asmmemmgr.py @@ -319,9 +319,10 @@ assert gcrootmap is not None for pos, mark in self.gcroot_markers: gcrootmap.register_asm_addr(rawstart + pos, mark) - cpu.codemap.register_frame_depth_map(rawstart, rawstart + size, - self.frame_positions, - self.frame_assignments) + if cpu.HAS_CODEMAP: + cpu.codemap.register_frame_depth_map(rawstart, rawstart + size, + self.frame_positions, + self.frame_assignments) self.frame_positions = None self.frame_assignments = None return rawstart From noreply at buildbot.pypy.org Fri Apr 24 15:04:04 2015 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 24 Apr 2015 15:04:04 +0200 (CEST) Subject: [pypy-commit] pypy vmprof2: move those ops to no-side-effect (why they had side effects in the first place?) Message-ID: <20150424130404.50E501C0726@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: vmprof2 Changeset: r76920:329a83be5923 Date: 2015-04-24 15:04 +0200 http://bitbucket.org/pypy/pypy/changeset/329a83be5923/ Log: move those ops to no-side-effect (why they had side effects in the first place?) diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -503,6 +503,10 @@ 'MARK_OPAQUE_PTR/1b', # this one has no *visible* side effect, since the virtualizable # must be forced, however we need to execute it anyway + 'DEBUG_MERGE_POINT/*', # debugging only + 'ENTER_PORTAL_FRAME/2', # debugging only + 'LEAVE_PORTAL_FRAME/1', # debugging only + 'JIT_DEBUG/*', # debugging only '_NOSIDEEFFECT_LAST', # ----- end of no_side_effect operations ----- 'INCREMENT_DEBUG_COUNTER/1', @@ -521,10 +525,6 @@ 'UNICODESETITEM/3', 'COND_CALL_GC_WB/1d', # [objptr] (for the write barrier) 'COND_CALL_GC_WB_ARRAY/2d', # [objptr, arrayindex] (write barr. for array) - 'DEBUG_MERGE_POINT/*', # debugging only - 'ENTER_PORTAL_FRAME/2', # debugging only - 'LEAVE_PORTAL_FRAME/1', # debugging only - 'JIT_DEBUG/*', # debugging only 'VIRTUAL_REF_FINISH/2', # removed before it's passed to the backend 'COPYSTRCONTENT/5', # src, dst, srcstart, dststart, length 'COPYUNICODECONTENT/5', From noreply at buildbot.pypy.org Fri Apr 24 15:27:08 2015 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 24 Apr 2015 15:27:08 +0200 (CEST) Subject: [pypy-commit] pypy vmprof2: backend removes operations that have no side effects. Ignore those ops Message-ID: <20150424132708.48C631C069E@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: vmprof2 Changeset: r76921:b16af35250ab Date: 2015-04-24 15:27 +0200 http://bitbucket.org/pypy/pypy/changeset/b16af35250ab/ Log: backend removes operations that have no side effects. Ignore those ops explicitely in heap.py diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -293,6 +293,8 @@ opnum == rop.UNICODESETITEM or # no effect on GC struct/array opnum == rop.DEBUG_MERGE_POINT or # no effect whatsoever opnum == rop.JIT_DEBUG or # no effect whatsoever + opnum == rop.ENTER_PORTAL_FRAME or # no effect whatsoever + opnum == rop.LEAVE_PORTAL_FRAME or # no effect whatsoever opnum == rop.COPYSTRCONTENT or # no effect on GC struct/array opnum == rop.COPYUNICODECONTENT): # no effect on GC struct/array return diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -503,10 +503,6 @@ 'MARK_OPAQUE_PTR/1b', # this one has no *visible* side effect, since the virtualizable # must be forced, however we need to execute it anyway - 'DEBUG_MERGE_POINT/*', # debugging only - 'ENTER_PORTAL_FRAME/2', # debugging only - 'LEAVE_PORTAL_FRAME/1', # debugging only - 'JIT_DEBUG/*', # debugging only '_NOSIDEEFFECT_LAST', # ----- end of no_side_effect operations ----- 'INCREMENT_DEBUG_COUNTER/1', @@ -525,6 +521,10 @@ 'UNICODESETITEM/3', 'COND_CALL_GC_WB/1d', # [objptr] (for the write barrier) 'COND_CALL_GC_WB_ARRAY/2d', # [objptr, arrayindex] (write barr. for array) + 'DEBUG_MERGE_POINT/*', # debugging only + 'ENTER_PORTAL_FRAME/2', # debugging only + 'LEAVE_PORTAL_FRAME/1', # debugging only + 'JIT_DEBUG/*', # debugging only 'VIRTUAL_REF_FINISH/2', # removed before it's passed to the backend 'COPYSTRCONTENT/5', # src, dst, srcstart, dststart, length 'COPYUNICODECONTENT/5', From noreply at buildbot.pypy.org Fri Apr 24 15:49:50 2015 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 24 Apr 2015 15:49:50 +0200 (CEST) Subject: [pypy-commit] pypy vmprof2: fix the test Message-ID: <20150424134950.094451C01D6@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: vmprof2 Changeset: r76922:1d47005edfcc Date: 2015-04-24 15:38 +0200 http://bitbucket.org/pypy/pypy/changeset/1d47005edfcc/ Log: fix the test diff --git a/rpython/jit/backend/x86/test/test_regloc.py b/rpython/jit/backend/x86/test/test_regloc.py --- a/rpython/jit/backend/x86/test/test_regloc.py +++ b/rpython/jit/backend/x86/test/test_regloc.py @@ -105,13 +105,17 @@ assert ''.join([buf[i] for i in range(length)]) == expected lltype.free(buf, flavor='raw') +class Fake32CodeBlockWrapper(codebuf.MachineCodeBlockWrapper): + def check_stack_size_at_ret(self): + pass + def test_follow_jump_instructions_32(): buf = lltype.malloc(rffi.CCHARP.TO, 80, flavor='raw') raw = rffi.cast(lltype.Signed, buf) - mc = codebuf.MachineCodeBlockWrapper(); mc.WORD = 4; mc.relocations = [] + mc = Fake32CodeBlockWrapper(); mc.WORD = 4; mc.relocations = [] mc.RET() mc.copy_to_raw_memory(raw) - mc = codebuf.MachineCodeBlockWrapper(); mc.WORD = 4; mc.relocations = [] + mc = Fake32CodeBlockWrapper(); mc.WORD = 4; mc.relocations = [] assert follow_jump(raw) == raw mc.JMP(imm(raw)) mc.copy_to_raw_memory(raw + 20) @@ -120,7 +124,7 @@ assert buf[22] == '\xFF' assert buf[23] == '\xFF' assert buf[24] == '\xFF' - mc = codebuf.MachineCodeBlockWrapper(); mc.WORD = 4; mc.relocations = [] + mc = Fake32CodeBlockWrapper(); mc.WORD = 4; mc.relocations = [] assert follow_jump(raw + 20) == raw mc.JMP(imm(raw)) mc.copy_to_raw_memory(raw + 40) From noreply at buildbot.pypy.org Fri Apr 24 15:49:51 2015 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 24 Apr 2015 15:49:51 +0200 (CEST) Subject: [pypy-commit] pypy vmprof2: fix test_asmmemmgr Message-ID: <20150424134951.33CCF1C01D6@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: vmprof2 Changeset: r76923:b37b7f15984b Date: 2015-04-24 15:39 +0200 http://bitbucket.org/pypy/pypy/changeset/b37b7f15984b/ Log: fix test_asmmemmgr diff --git a/rpython/jit/backend/llsupport/test/test_asmmemmgr.py b/rpython/jit/backend/llsupport/test/test_asmmemmgr.py --- a/rpython/jit/backend/llsupport/test/test_asmmemmgr.py +++ b/rpython/jit/backend/llsupport/test/test_asmmemmgr.py @@ -173,6 +173,7 @@ # gcrootmap = FakeGcRootMap() allblocks = [] + self.HAS_CODEMAP = False rawstart = mc.materialize(self, allblocks, gcrootmap) p = rffi.cast(rffi.CArrayPtr(lltype.Char), rawstart) assert p[0] == 'X' From noreply at buildbot.pypy.org Fri Apr 24 15:49:52 2015 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 24 Apr 2015 15:49:52 +0200 (CEST) Subject: [pypy-commit] pypy vmprof2: try a different approach to building against libunwind Message-ID: <20150424134952.65D321C01D6@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: vmprof2 Changeset: r76924:4ece3460b60d Date: 2015-04-24 15:49 +0200 http://bitbucket.org/pypy/pypy/changeset/4ece3460b60d/ Log: try a different approach to building against libunwind diff --git a/pypy/module/_vmprof/interp_vmprof.py b/pypy/module/_vmprof/interp_vmprof.py --- a/pypy/module/_vmprof/interp_vmprof.py +++ b/pypy/module/_vmprof/interp_vmprof.py @@ -27,7 +27,7 @@ include_dirs = [SRC], includes = ['vmprof.h', 'trampoline.h'], separate_module_files = [SRC.join('trampoline.asmgcc.s')], - libraries = ['unwind'], + link_extra = ['-Wl,-Bstatic', '-lunwind', '-Wl,-Bdynamic', '-llzma'], post_include_bits=[""" void pypy_vmprof_init(void); From noreply at buildbot.pypy.org Fri Apr 24 16:12:29 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 24 Apr 2015 16:12:29 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: for now, we don't check the cdef value given in a "#define X Y" Message-ID: <20150424141229.C3F3A1C061E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1803:ba730ccef0e0 Date: 2015-04-24 15:24 +0200 http://bitbucket.org/cffi/cffi/changeset/ba730ccef0e0/ Log: for now, we don't check the cdef value given in a "#define X Y" diff --git a/_cffi1/recompiler.py b/_cffi1/recompiler.py --- a/_cffi1/recompiler.py +++ b/_cffi1/recompiler.py @@ -563,9 +563,7 @@ # ---------- # constants, declared with "static const ..." - def _generate_cpy_const(self, is_int, name, tp=None, category='const', - check_value=None): - assert check_value is None # XXX + def _generate_cpy_const(self, is_int, name, tp=None, category='const'): prnt = self._prnt funcname = '_cffi_%s_%s' % (category, name) if is_int: @@ -603,7 +601,7 @@ # ---------- # enums - + def _generate_cpy_enum_collecttype(self, tp, name): self._do_collect_type(tp) @@ -614,11 +612,9 @@ pass def _generate_cpy_macro_decl(self, tp, name): - if tp == '...': - check_value = None - else: - check_value = tp # an integer - self._generate_cpy_const(True, name, check_value=check_value) + # for now, we ignore the value (if != ',,,') given in the cdef + # and always trust the value coming from the C compiler + self._generate_cpy_const(True, name) def _generate_cpy_macro_ctx(self, tp, name): self._lsts["global"].append( diff --git a/_cffi1/test_recompiler.py b/_cffi1/test_recompiler.py --- a/_cffi1/test_recompiler.py +++ b/_cffi1/test_recompiler.py @@ -1,4 +1,4 @@ -import py +import sys, py from cffi import FFI from _cffi1 import recompiler @@ -160,6 +160,18 @@ assert lib.FOOBAR == -6912 py.test.raises(AttributeError, "lib.FOOBAR = 2") +def test_macro_check_value_ok(): + ffi = FFI() + ffi.cdef("#define FOOBAR 42") + lib = verify(ffi, 'test_macro_check_value_ok', "#define FOOBAR 42") + assert lib.FOOBAR == 42 + +def test_macro_check_value_fail(): + ffi = FFI() + ffi.cdef("#define FOOBAR 42") + lib = verify(ffi, 'test_macro_check_value_fail', "#define FOOBAR 43") + assert lib.FOOBAR == 43 # for now, we don't check the cdef value + def test_constant(): ffi = FFI() ffi.cdef("static const int FOOBAR;") @@ -251,6 +263,17 @@ assert ffi1.typeof("void(*)(struct foo_s*)") is not ( ffi2.typeof("void(*)(struct foo_s*)")) +def test_verify_enum(): + py.test.skip("in-progress") + ffi = FFI() + ffi.cdef("""enum e1 { B1, A1, ... };""") + lib = verify(ffi, 'test_verify_enum', + "enum e1 { A1, B1, C1=%d };" % sys.maxint) + ffi.typeof("enum e1") + assert lib.A1 == 0 + assert lib.B1 == 0 + assert ffi.sizeof("enum e1") == ffi.sizeof("long") + def test_dotdotdot_length_of_array_field(): ffi = FFI() ffi.cdef("struct foo_s { int a[...]; int b[...]; };") From noreply at buildbot.pypy.org Fri Apr 24 16:12:28 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 24 Apr 2015 16:12:28 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: enums in the type table Message-ID: <20150424141228.B3D641C061E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1802:5acd08cb3958 Date: 2015-04-24 14:39 +0200 http://bitbucket.org/cffi/cffi/changeset/5acd08cb3958/ Log: enums in the type table diff --git a/_cffi1/parse_c_type.h b/_cffi1/parse_c_type.h --- a/_cffi1/parse_c_type.h +++ b/_cffi1/parse_c_type.h @@ -90,7 +90,7 @@ struct _cffi_enum_s { const char *name; - _cffi_opcode_t integer_type_op; + int size_and_sign; }; struct _cffi_typename_s { diff --git a/_cffi1/recompiler.py b/_cffi1/recompiler.py --- a/_cffi1/recompiler.py +++ b/_cffi1/recompiler.py @@ -42,14 +42,20 @@ self.cffi_types.append('LEN') # placeholder assert None not in self._typesdict.values() # - # collect all structs and unions + # collect all structs and unions and enums self._struct_unions = {} + self._enums = {} for tp in all_decls: if isinstance(tp, model.StructOrUnion): self._struct_unions[tp] = None + elif isinstance(tp, model.EnumType): + self._enums[tp] = None for i, tp in enumerate(sorted(self._struct_unions, key=lambda tp: tp.name)): self._struct_unions[tp] = i + for i, tp in enumerate(sorted(self._enums, + key=lambda tp: tp.name)): + self._enums[tp] = i # # emit all bytecode sequences now for tp in all_decls: @@ -89,7 +95,7 @@ step_name)) except AttributeError: raise ffiplatform.VerificationError( - "not implemented in verify(): %r" % name) + "not implemented in recompile(): %r" % name) try: method(tp, realname) except Exception as e: @@ -168,6 +174,12 @@ assert i < len(lst) assert lst[i].startswith(' { "%s"' % tp.name) assert len(lst) == len(self._struct_unions) + # same with enums + lst = self._lsts["enum"] + for tp, i in self._enums.items(): + assert i < len(lst) + assert lst[i].startswith(' { "%s"' % tp.name) + assert len(lst) == len(self._enums) # # the declaration of '_cffi_type_context' prnt('static const struct _cffi_type_context_s _cffi_type_context = {') @@ -590,6 +602,12 @@ ' { "%s", _cffi_const_%s, %s },' % (name, name, type_op)) # ---------- + # enums + + def _generate_cpy_enum_collecttype(self, tp, name): + self._do_collect_type(tp) + + # ---------- # macros: for now only for integers def _generate_cpy_macro_collecttype(self, tp, name): @@ -679,8 +697,12 @@ def _emit_bytecode_StructType(self, tp, index): struct_index = self._struct_unions[tp] self.cffi_types[index] = CffiOp(OP_STRUCT_UNION, struct_index) + _emit_bytecode_UnionType = _emit_bytecode_StructType - _emit_bytecode_UnionType = _emit_bytecode_StructType + def _emit_bytecode_EnumType(self, tp, index): + enum_index = self._enums[tp] + self.cffi_types[index] = CffiOp(OP_ENUM, enum_index) + def make_c_source(ffi, module_name, preamble, target_c_file): recompiler = Recompiler(ffi, module_name) diff --git a/_cffi1/test_recompiler.py b/_cffi1/test_recompiler.py --- a/_cffi1/test_recompiler.py +++ b/_cffi1/test_recompiler.py @@ -87,6 +87,10 @@ check_type_table("typedef struct { int a; long b; } foo_t;", "(STRUCT_UNION 0)(PRIMITIVE 7)(PRIMITIVE 9)") +def test_type_table_enum(): + check_type_table("enum foo_e { AA, BB, ... };", + "(ENUM 0)") + def test_math_sin(): import math From noreply at buildbot.pypy.org Fri Apr 24 16:12:30 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 24 Apr 2015 16:12:30 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: Enums, first complete passing tests Message-ID: <20150424141230.D30491C061E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1804:fff6ae4c61b2 Date: 2015-04-24 16:00 +0200 http://bitbucket.org/cffi/cffi/changeset/fff6ae4c61b2/ Log: Enums, first complete passing tests diff --git a/_cffi1/_cffi_include.h b/_cffi1/_cffi_include.h --- a/_cffi1/_cffi_include.h +++ b/_cffi1/_cffi_include.h @@ -161,6 +161,13 @@ #define _cffi_array_len(array) (sizeof(array) / sizeof((array)[0])) +#define _cffi_prim_int(size, sign) \ + ((size) == 1 ? ((sign) ? _CFFI_PRIM_INT8 : _CFFI_PRIM_UINT8) : \ + (size) == 2 ? ((sign) ? _CFFI_PRIM_INT16 : _CFFI_PRIM_UINT16) : \ + (size) == 4 ? ((sign) ? _CFFI_PRIM_INT32 : _CFFI_PRIM_UINT32) : \ + (size) == 8 ? ((sign) ? _CFFI_PRIM_INT64 : _CFFI_PRIM_UINT64) : \ + 0) + static int _cffi_init(void) { diff --git a/_cffi1/parse_c_type.h b/_cffi1/parse_c_type.h --- a/_cffi1/parse_c_type.h +++ b/_cffi1/parse_c_type.h @@ -90,7 +90,7 @@ struct _cffi_enum_s { const char *name; - int size_and_sign; + int type_prim; /* _CFFI_PRIM_xxx */ }; struct _cffi_typename_s { diff --git a/_cffi1/realize_c_type.c b/_cffi1/realize_c_type.c --- a/_cffi1/realize_c_type.c +++ b/_cffi1/realize_c_type.c @@ -360,6 +360,18 @@ break; } + case _CFFI_OP_ENUM: + { + const struct _cffi_enum_s *e; + + e = &builder->ctx.enums[_CFFI_GETARG(op)]; + x = all_primitives[e->type_prim]; + if (x == NULL) + x = build_primitive_type(e->type_prim); + Py_XINCREF(x); + break; + } + case _CFFI_OP_FUNCTION: { PyObject *fargs; diff --git a/_cffi1/recompiler.py b/_cffi1/recompiler.py --- a/_cffi1/recompiler.py +++ b/_cffi1/recompiler.py @@ -550,13 +550,13 @@ def _generate_cpy_anonymous_decl(self, tp, name): if isinstance(tp, model.EnumType): - self._generate_cpy_enum_decl(tp, name, '') + self._generate_cpy_enum_decl(tp) else: self._struct_decl(tp, name, 'typedef_' + name) def _generate_cpy_anonymous_ctx(self, tp, name): if isinstance(tp, model.EnumType): - self._generate_cpy_enum_ctx(tp, name, '') + self._enum_ctx(tp, name) else: self._struct_ctx(tp, name, 'typedef_' + name) @@ -589,15 +589,19 @@ is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type() self._generate_cpy_const(is_int, name, tp) + def _generate_const_int_ctx(self, name): + self._lsts["global"].append( + ' { "%s", _cffi_const_%s, _CFFI_OP(_CFFI_OP_CONSTANT_INT, 0) },' % + (name, name)) + def _generate_cpy_constant_ctx(self, tp, name): - is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type() - if not is_int: + if isinstance(tp, model.PrimitiveType) and tp.is_integer_type(): + self._generate_const_int_ctx(name) + else: type_index = self._typesdict[tp] type_op = '_CFFI_OP(_CFFI_OP_CONSTANT, %d)' % type_index - else: - type_op = '_CFFI_OP(_CFFI_OP_CONSTANT_INT, 0)' - self._lsts["global"].append( - ' { "%s", _cffi_const_%s, %s },' % (name, name, type_op)) + self._lsts["global"].append( + ' { "%s", _cffi_const_%s, %s },' % (name, name, type_op)) # ---------- # enums @@ -605,6 +609,29 @@ def _generate_cpy_enum_collecttype(self, tp, name): self._do_collect_type(tp) + def _generate_cpy_enum_decl(self, tp, name=None): + for enumerator in tp.enumerators: + self._generate_cpy_const(True, enumerator) + + def _enum_ctx(self, tp, cname): + for enumerator in tp.enumerators: + self._generate_const_int_ctx(enumerator) + if cname is not None: + size = "sizeof(%s)" % cname + signed = "((%s)-1) <= 0" % cname + prim = "_cffi_prim_int(%s, %s)" % (size, signed) + else: + size = xxxx + self._lsts["enum"].append( + ' { "%s", %s },' % (tp.name, prim)) + + def _generate_cpy_enum_ctx(self, tp, name): + if tp.has_c_name(): + cname = tp.get_c_name('') + else: + cname = None + self._enum_ctx(tp, cname) + # ---------- # macros: for now only for integers diff --git a/_cffi1/test_recompiler.py b/_cffi1/test_recompiler.py --- a/_cffi1/test_recompiler.py +++ b/_cffi1/test_recompiler.py @@ -264,16 +264,20 @@ ffi2.typeof("void(*)(struct foo_s*)")) def test_verify_enum(): - py.test.skip("in-progress") ffi = FFI() - ffi.cdef("""enum e1 { B1, A1, ... };""") + ffi.cdef("""enum e1 { B1, A1, ... }; enum e2 { B2, A2, ... };""") lib = verify(ffi, 'test_verify_enum', - "enum e1 { A1, B1, C1=%d };" % sys.maxint) + "enum e1 { A1, B1, C1=%d };" % sys.maxint + + "enum e2 { A2, B2, C2 };") ffi.typeof("enum e1") + ffi.typeof("enum e2") assert lib.A1 == 0 - assert lib.B1 == 0 + assert lib.B1 == 1 + assert lib.A2 == 0 + assert lib.B2 == 1 assert ffi.sizeof("enum e1") == ffi.sizeof("long") - + assert ffi.sizeof("enum e2") == ffi.sizeof("int") + def test_dotdotdot_length_of_array_field(): ffi = FFI() ffi.cdef("struct foo_s { int a[...]; int b[...]; };") @@ -341,3 +345,18 @@ "typedef struct { int a; long b; } *foo_t;") p = ffi.new("foo_t", {'b': 42}) assert p.b == 42 + +def test_verify_anonymous_enum_with_typedef(): + ffi = FFI() + ffi.cdef("typedef enum { AA, ... } e1;") + lib = verify(ffi, 'test_verify_anonymous_enum_with_typedef1', + "typedef enum { BB, CC, AA } e1;") + assert lib.AA == 2 + assert ffi.sizeof("e1") == ffi.sizeof("int") + # + ffi = FFI() + ffi.cdef("typedef enum { AA=%d } e1;" % sys.maxint) + lib = verify(ffi, 'test_verify_anonymous_enum_with_typedef2', + "typedef enum { AA=%d } e1;" % sys.maxint) + assert lib.AA == sys.maxint + assert ffi.sizeof("e1") == ffi.sizeof("long") From noreply at buildbot.pypy.org Fri Apr 24 16:31:45 2015 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 24 Apr 2015 16:31:45 +0200 (CEST) Subject: [pypy-commit] pypy numpy-fixes: test for nan in complex.sign Message-ID: <20150424143145.77CE51C06F5@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: numpy-fixes Changeset: r76927:43ccc6f8802a Date: 2015-04-24 17:20 +0300 http://bitbucket.org/pypy/pypy/changeset/43ccc6f8802a/ Log: test for nan in complex.sign diff --git a/pypy/module/micronumpy/test/test_complex.py b/pypy/module/micronumpy/test/test_complex.py --- a/pypy/module/micronumpy/test/test_complex.py +++ b/pypy/module/micronumpy/test/test_complex.py @@ -331,6 +331,12 @@ complex(float('nan'), 0)], dtype=complex)) == \ [False, True, True, False, False]).all() + def test_sign_for_complex_nan(self): + from numpy import array, nan, sign, isnan + C = array([nan], dtype=complex) + res = sign(C) + assert isnan(res.real) + assert res.imag == 0+0j def test_square(self): from numpy import square diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1347,15 +1347,17 @@ sign of complex number could be either the point closest to the unit circle or {-1,0,1}, for compatability with numpy we choose the latter ''' + if rfloat.isnan(v[0]) or rfloat.isnan(v[1]): + return rfloat.NAN, 0 if v[0] == 0.0: if v[1] == 0: - return 0,0 + return 0, 0 if v[1] > 0: - return 1,0 - return -1,0 + return 1, 0 + return -1, 0 if v[0] > 0: - return 1,0 - return -1,0 + return 1, 0 + return -1, 0 def fmax(self, v1, v2): if self.ge(v1, v2) or self.isnan(v2): From noreply at buildbot.pypy.org Fri Apr 24 16:31:44 2015 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 24 Apr 2015 16:31:44 +0200 (CEST) Subject: [pypy-commit] pypy numpy-fixes: a branch to fix numpy test suite failures with "Arrays are not equal" message Message-ID: <20150424143144.59EC41C06F5@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: numpy-fixes Changeset: r76926:6981237fb078 Date: 2015-04-24 17:03 +0300 http://bitbucket.org/pypy/pypy/changeset/6981237fb078/ Log: a branch to fix numpy test suite failures with "Arrays are not equal" message diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -323,7 +323,7 @@ def __exit__(self, typ, value, traceback): keepalive_until_here(self) - + def get_buffer(self, space, readonly): return ArrayBuffer(self, readonly) From noreply at buildbot.pypy.org Fri Apr 24 16:31:43 2015 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 24 Apr 2015 16:31:43 +0200 (CEST) Subject: [pypy-commit] pypy numpy-fixes: fix d198d926afb8 Message-ID: <20150424143143.398801C06F5@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: numpy-fixes Changeset: r76925:cfb31de4972f Date: 2015-04-24 16:27 +0300 http://bitbucket.org/pypy/pypy/changeset/cfb31de4972f/ Log: fix d198d926afb8 diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -11,7 +11,7 @@ from pypy.module.micronumpy.iterators import ArrayIter from pypy.module.micronumpy.strides import (Chunk, Chunks, NewAxisChunk, RecordChunk, calc_strides, calc_new_strides, shape_agreement, - calculate_broadcast_strides, calc_backstrides) + calculate_broadcast_strides, calc_backstrides, calc_start) from rpython.rlib.objectmodel import keepalive_until_here from rpython.rtyper.annlowlevel import cast_gcref_to_instance from pypy.interpreter.baseobjspace import W_Root @@ -328,8 +328,11 @@ return ArrayBuffer(self, readonly) def astype(self, space, dtype): - strides, backstrides = calc_strides(self.get_shape(), dtype, - self.order) + # we want to create a new array, but must respect the strides + # in self. So find a factor of the itemtype.elsize, and use this + factor = float(dtype.elsize) / self.dtype.elsize + strides = [int(factor*s) for s in self.get_strides()] + backstrides = [int(factor*s) for s in self.get_backstrides()] impl = ConcreteArray(self.get_shape(), dtype, self.order, strides, backstrides) loop.setslice(space, impl.get_shape(), impl, self) @@ -426,8 +429,9 @@ gcstruct = _create_objectstore(storage, length, dtype.elsize) else: storage = dtype.itemtype.malloc(length * dtype.elsize, zero=zero) + start = calc_start(shape, strides) ConcreteArrayNotOwning.__init__(self, shape, dtype, order, strides, backstrides, - storage) + storage, start=start) self.gcstruct = gcstruct def __del__(self): diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -429,6 +429,17 @@ n_old_elems_to_use *= old_shape[oldI] return new_strides[:] +def calc_start(shape, strides): + ''' Strides can be negative for non-contiguous data. + Calculate the appropriate positive starting position so + the indexing still works properly + ''' + start = 0 + for i in range(len(shape)): + if strides[i] < 0: + start -= strides[i] * (shape[i] - 1) + return start + @jit.unroll_safe def is_c_contiguous(arr): shape = arr.get_shape() diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -2169,6 +2169,7 @@ def test_astype(self): from numpy import array, arange + import gc b = array(1).astype(float) assert b == 1 assert b.dtype == float @@ -2182,8 +2183,15 @@ assert (b == [False, True, True]).all() assert b.dtype == 'bool' + a = arange(11)[::-1] + b = a.astype('int32') + assert (b == a).all() + del b + gc.collect() + a = arange(6, dtype='f4').reshape(2,3) - b = a.astype('i4') + b = a.T.astype('i4') + assert (a.T.strides == b.strides) a = array('x').astype('S3').dtype assert a.itemsize == 3 diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -28,6 +28,20 @@ log2e = 1. / log2 log10 = math.log(10) +''' +if not we_are_translated(): + _raw_storage_setitem_unaligned = raw_storage_setitem_unaligned + _raw_storage_getitem_unaligned = raw_storage_getitem_unaligned + def raw_storage_setitem_unaligned(storage, offset, value): + assert offset >=0 + assert offset < storage._obj.getlength() + return _raw_storage_setitem_unaligned(storage, offset, value) + + def raw_storage_getitem_unaligned(T, storage, offset): + assert offset >=0 + assert offset < storage._obj.getlength() + return _raw_storage_getitem_unaligned(T, storage, offset) +''' def simple_unary_op(func): specialize.argtype(1)(func) From noreply at buildbot.pypy.org Fri Apr 24 17:15:19 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 24 Apr 2015 17:15:19 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: progress on enums Message-ID: <20150424151519.10DF51C04C7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1805:7231fd7eae98 Date: 2015-04-24 17:15 +0200 http://bitbucket.org/cffi/cffi/changeset/7231fd7eae98/ Log: progress on enums diff --git a/_cffi1/_cffi_include.h b/_cffi1/_cffi_include.h --- a/_cffi1/_cffi_include.h +++ b/_cffi1/_cffi_include.h @@ -162,10 +162,12 @@ #define _cffi_array_len(array) (sizeof(array) / sizeof((array)[0])) #define _cffi_prim_int(size, sign) \ - ((size) == 1 ? ((sign) ? _CFFI_PRIM_INT8 : _CFFI_PRIM_UINT8) : \ - (size) == 2 ? ((sign) ? _CFFI_PRIM_INT16 : _CFFI_PRIM_UINT16) : \ - (size) == 4 ? ((sign) ? _CFFI_PRIM_INT32 : _CFFI_PRIM_UINT32) : \ - (size) == 8 ? ((sign) ? _CFFI_PRIM_INT64 : _CFFI_PRIM_UINT64) : \ + ((size) == sizeof(int) ? ((sign) ? _CFFI_PRIM_INT : _CFFI_PRIM_UINT) : \ + (size) == sizeof(long)? ((sign) ? _CFFI_PRIM_LONG : _CFFI_PRIM_ULONG) : \ + (size) == 1 ? ((sign) ? _CFFI_PRIM_INT8 : _CFFI_PRIM_UINT8) : \ + (size) == 2 ? ((sign) ? _CFFI_PRIM_INT16 : _CFFI_PRIM_UINT16) : \ + (size) == 4 ? ((sign) ? _CFFI_PRIM_INT32 : _CFFI_PRIM_UINT32) : \ + (size) == 8 ? ((sign) ? _CFFI_PRIM_INT64 : _CFFI_PRIM_UINT64) : \ 0) diff --git a/_cffi1/parse_c_type.h b/_cffi1/parse_c_type.h --- a/_cffi1/parse_c_type.h +++ b/_cffi1/parse_c_type.h @@ -90,7 +90,8 @@ struct _cffi_enum_s { const char *name; - int type_prim; /* _CFFI_PRIM_xxx */ + int type_index; // -> _cffi_types, on a OP_ENUM + int type_prim; // _CFFI_PRIM_xxx }; struct _cffi_typename_s { diff --git a/_cffi1/realize_c_type.c b/_cffi1/realize_c_type.c --- a/_cffi1/realize_c_type.c +++ b/_cffi1/realize_c_type.c @@ -11,6 +11,10 @@ static PyObject *build_primitive_type(int num); /* forward */ +#define get_primitive_type(num) \ + (all_primitives[num] != NULL ? all_primitives[num] \ + : build_primitive_type(num)) + static int init_global_types_dict(PyObject *ffi_type_dict) { int err; @@ -20,7 +24,7 @@ if (global_types_dict == NULL) return -1; - ct = build_primitive_type(_CFFI_PRIM_VOID); // 'void' + ct = get_primitive_type(_CFFI_PRIM_VOID); // 'void' if (ct == NULL) return -1; if (PyDict_SetItemString(global_types_dict, @@ -260,9 +264,7 @@ switch (_CFFI_GETOP(op)) { case _CFFI_OP_PRIMITIVE: - x = all_primitives[_CFFI_GETARG(op)]; - if (x == NULL) - x = build_primitive_type(_CFFI_GETARG(op)); + x = get_primitive_type(_CFFI_GETARG(op)); Py_XINCREF(x); break; @@ -334,7 +336,7 @@ ct->ct_extra = builder; } - /* Update the "primary" OP_STRUCT_OR_UNION slot, which + /* Update the "primary" OP_STRUCT_UNION slot, which may be the same or a different slot than the "current" one */ assert((((uintptr_t)x) & 1) == 0); assert(builder->ctx.types[s->type_index] == op2); @@ -363,12 +365,40 @@ case _CFFI_OP_ENUM: { const struct _cffi_enum_s *e; + _cffi_opcode_t op2; e = &builder->ctx.enums[_CFFI_GETARG(op)]; - x = all_primitives[e->type_prim]; - if (x == NULL) - x = build_primitive_type(e->type_prim); - Py_XINCREF(x); + op2 = builder->ctx.types[e->type_index]; + if ((((uintptr_t)op2) & 1) == 0) { + x = (PyObject *)op2; + Py_INCREF(x); + } + else { + PyObject *basetd = get_primitive_type(e->type_prim); + if (basetd == NULL) + return NULL; + + PyObject *args = Py_BuildValue("(s()()O)", e->name, basetd); + if (args == NULL) + return NULL; + + x = b_new_enum_type(NULL, args); + Py_DECREF(args); + if (x == NULL) + return NULL; + + /* Update the "primary" _CFFI_OP_ENUM slot, which + may be the same or a different slot than the "current" one */ + assert((((uintptr_t)x) & 1) == 0); + assert(builder->ctx.types[e->type_index] == op2); + Py_INCREF(x); + builder->ctx.types[e->type_index] = x; + + /* Done, leave without updating the "current" slot because + it may be done already above. If not, never mind, the + next call to realize_c_type() will do it. */ + return x; + } break; } diff --git a/_cffi1/recompiler.py b/_cffi1/recompiler.py --- a/_cffi1/recompiler.py +++ b/_cffi1/recompiler.py @@ -589,19 +589,14 @@ is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type() self._generate_cpy_const(is_int, name, tp) - def _generate_const_int_ctx(self, name): - self._lsts["global"].append( - ' { "%s", _cffi_const_%s, _CFFI_OP(_CFFI_OP_CONSTANT_INT, 0) },' % - (name, name)) - def _generate_cpy_constant_ctx(self, tp, name): if isinstance(tp, model.PrimitiveType) and tp.is_integer_type(): - self._generate_const_int_ctx(name) + type_op = '_CFFI_OP(_CFFI_OP_CONSTANT_INT, 0)' else: type_index = self._typesdict[tp] type_op = '_CFFI_OP(_CFFI_OP_CONSTANT, %d)' % type_index - self._lsts["global"].append( - ' { "%s", _cffi_const_%s, %s },' % (name, name, type_op)) + self._lsts["global"].append( + ' { "%s", _cffi_const_%s, %s },' % (name, name, type_op)) # ---------- # enums @@ -614,8 +609,13 @@ self._generate_cpy_const(True, enumerator) def _enum_ctx(self, tp, cname): + type_index = self._typesdict[tp] + type_op = '_CFFI_OP(_CFFI_OP_ENUM, %d)' % type_index for enumerator in tp.enumerators: - self._generate_const_int_ctx(enumerator) + self._lsts["global"].append( + ' { "%s", _cffi_const_%s, %s },' % (enumerator, enumerator, + type_op)) + # if cname is not None: size = "sizeof(%s)" % cname signed = "((%s)-1) <= 0" % cname @@ -623,7 +623,7 @@ else: size = xxxx self._lsts["enum"].append( - ' { "%s", %s },' % (tp.name, prim)) + ' { "%s", %d, %s },' % (tp.name, type_index, prim)) def _generate_cpy_enum_ctx(self, tp, name): if tp.has_c_name(): From noreply at buildbot.pypy.org Fri Apr 24 17:48:42 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 24 Apr 2015 17:48:42 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: Add to the recompiler the value of the elements that "belong" to an Message-ID: <20150424154842.DF7EB1C01D6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1806:4adfe9dbfcbf Date: 2015-04-24 17:49 +0200 http://bitbucket.org/cffi/cffi/changeset/4adfe9dbfcbf/ Log: Add to the recompiler the value of the elements that "belong" to an enum (even though they don't really belong there in the C sense) diff --git a/_cffi1/lib_obj.c b/_cffi1/lib_obj.c --- a/_cffi1/lib_obj.c +++ b/_cffi1/lib_obj.c @@ -165,23 +165,11 @@ break; case _CFFI_OP_CONSTANT_INT: + case _CFFI_OP_ENUM: { /* a constant integer whose value, in an "unsigned long long", is obtained by calling the function at g->address */ - unsigned long long value; - int neg = ((int(*)(unsigned long long*))g->address)(&value); - if (!neg) { - if (value <= (unsigned long long)LONG_MAX) - x = PyInt_FromLong((long)value); - else - x = PyLong_FromUnsignedLongLong(value); - } - else { - if ((long long)value >= (long long)LONG_MIN) - x = PyInt_FromLong((long)value); - else - x = PyLong_FromLongLong((long long)value); - } + x = realize_global_int(g); break; } diff --git a/_cffi1/parse_c_type.h b/_cffi1/parse_c_type.h --- a/_cffi1/parse_c_type.h +++ b/_cffi1/parse_c_type.h @@ -92,6 +92,7 @@ const char *name; int type_index; // -> _cffi_types, on a OP_ENUM int type_prim; // _CFFI_PRIM_xxx + const char *enumerators; // comma-delimited string }; struct _cffi_typename_s { diff --git a/_cffi1/realize_c_type.c b/_cffi1/realize_c_type.c --- a/_cffi1/realize_c_type.c +++ b/_cffi1/realize_c_type.c @@ -214,6 +214,25 @@ return x; } +static PyObject *realize_global_int(const struct _cffi_global_s *g) +{ + PyObject *x; + unsigned long long value; + int neg = ((int(*)(unsigned long long*))g->address)(&value); + if (!neg) { + if (value <= (unsigned long long)LONG_MAX) + x = PyInt_FromLong((long)value); + else + x = PyLong_FromUnsignedLongLong(value); + } + else { + if ((long long)value >= (long long)LONG_MIN) + x = PyInt_FromLong((long)value); + else + x = PyLong_FromLongLong((long long)value); + } + return x; +} static PyObject * _realize_c_type_or_func(builder_c_t *builder, @@ -378,7 +397,51 @@ if (basetd == NULL) return NULL; - PyObject *args = Py_BuildValue("(s()()O)", e->name, basetd); + PyObject *enumerators = NULL, *enumvalues = NULL, *tmp; + Py_ssize_t i, j, n = 0; + const char *p; + const struct _cffi_global_s *g; + int gindex; + + if (*e->enumerators != '\0') { + n++; + for (p = e->enumerators; *p != '\0'; p++) + n += (*p == ','); + } + enumerators = PyTuple_New(n); + if (enumerators == NULL) + return NULL; + + enumvalues = PyTuple_New(n); + if (enumvalues == NULL) { + Py_DECREF(enumerators); + return NULL; + } + + p = e->enumerators; + for (i = 0; i < n; i++) { + j = 0; + while (p[j] != ',' && p[j] != '\0') + j++; + tmp = PyString_FromStringAndSize(p, j); + PyTuple_SET_ITEM(enumerators, i, tmp); + + gindex = search_in_globals(&builder->ctx, p, j); + g = &builder->ctx.globals[gindex]; + assert(gindex >= 0 && g->type_op == op); + + tmp = realize_global_int(g); + PyTuple_SET_ITEM(enumvalues, i, tmp); + + p += j + 1; + } + + PyObject *args = NULL; + if (!PyErr_Occurred()) + args = Py_BuildValue("(sOOO)", e->name, enumerators, + enumvalues, basetd); + Py_DECREF(enumerators); + Py_DECREF(enumvalues); if (args == NULL) return NULL; diff --git a/_cffi1/recompiler.py b/_cffi1/recompiler.py --- a/_cffi1/recompiler.py +++ b/_cffi1/recompiler.py @@ -620,10 +620,12 @@ size = "sizeof(%s)" % cname signed = "((%s)-1) <= 0" % cname prim = "_cffi_prim_int(%s, %s)" % (size, signed) + allenums = ",".join(tp.enumerators) else: size = xxxx self._lsts["enum"].append( - ' { "%s", %d, %s },' % (tp.name, type_index, prim)) + ' { "%s", %d, %s,\n "%s" },' % (tp.name, type_index, + prim, allenums)) def _generate_cpy_enum_ctx(self, tp, name): if tp.has_c_name(): diff --git a/_cffi1/test_verify1.py b/_cffi1/test_verify1.py --- a/_cffi1/test_verify1.py +++ b/_cffi1/test_verify1.py @@ -688,6 +688,9 @@ # try again ffi.verify("enum ee { EE1=10, EE2, EE3=-10, EE4 };") assert ffi.string(ffi.cast('enum ee', 11)) == "EE2" + # + assert ffi.typeof("enum ee").relements == {'EE1': 10, 'EE2': 11, 'EE3': -10} + assert ffi.typeof("enum ee").elements == {10: 'EE1', 11: 'EE2', -10: 'EE3'} def test_full_enum(): ffi = FFI() diff --git a/testing/test_verify.py b/testing/test_verify.py --- a/testing/test_verify.py +++ b/testing/test_verify.py @@ -703,6 +703,9 @@ # try again ffi.verify("enum ee { EE1=10, EE2, EE3=-10, EE4 };") assert ffi.string(ffi.cast('enum ee', 11)) == "EE2" + # + assert ffi.typeof("enum ee").relements == {'EE1': 10, 'EE2': 11, 'EE3': -10} + assert ffi.typeof("enum ee").elements == {10: 'EE1', 11: 'EE2', -10: 'EE3'} def test_full_enum(): ffi = FFI() From noreply at buildbot.pypy.org Fri Apr 24 17:49:39 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 24 Apr 2015 17:49:39 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <20150424154939.72A481C01D6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r597:07be4292a404 Date: 2015-04-24 17:50 +0200 http://bitbucket.org/pypy/pypy.org/changeset/07be4292a404/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -15,7 +15,7 @@ - $59150 of $105000 (56.3%) + $59164 of $105000 (56.3%)
    From noreply at buildbot.pypy.org Fri Apr 24 18:01:24 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 24 Apr 2015 18:01:24 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: anonymous enums Message-ID: <20150424160124.75F951C069E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1807:779034948ac1 Date: 2015-04-24 18:01 +0200 http://bitbucket.org/cffi/cffi/changeset/779034948ac1/ Log: anonymous enums diff --git a/_cffi1/recompiler.py b/_cffi1/recompiler.py --- a/_cffi1/recompiler.py +++ b/_cffi1/recompiler.py @@ -616,23 +616,20 @@ ' { "%s", _cffi_const_%s, %s },' % (enumerator, enumerator, type_op)) # - if cname is not None: + if cname is not None and '$' not in cname: size = "sizeof(%s)" % cname signed = "((%s)-1) <= 0" % cname - prim = "_cffi_prim_int(%s, %s)" % (size, signed) - allenums = ",".join(tp.enumerators) else: - size = xxxx + basetp = tp.build_baseinttype(self.ffi, []) + size = self.ffi.sizeof(basetp) + signed = int(int(self.ffi.cast(basetp, -1)) < 0) + allenums = ",".join(tp.enumerators) self._lsts["enum"].append( - ' { "%s", %d, %s,\n "%s" },' % (tp.name, type_index, - prim, allenums)) + ' { "%s", %d, _cffi_prim_int(%s, %s),\n' + ' "%s" },' % (tp.name, type_index, size, signed, allenums)) def _generate_cpy_enum_ctx(self, tp, name): - if tp.has_c_name(): - cname = tp.get_c_name('') - else: - cname = None - self._enum_ctx(tp, cname) + self._enum_ctx(tp, tp._get_c_name()) # ---------- # macros: for now only for integers diff --git a/_cffi1/test_verify1.py b/_cffi1/test_verify1.py --- a/_cffi1/test_verify1.py +++ b/_cffi1/test_verify1.py @@ -697,9 +697,10 @@ ffi.cdef("enum ee { EE1, EE2, EE3 };") ffi.verify("enum ee { EE1, EE2, EE3 };") py.test.raises(VerificationError, ffi.verify, "enum ee { EE1, EE2 };") - e = py.test.raises(VerificationError, ffi.verify, - "enum ee { EE1, EE3, EE2 };") - assert str(e.value) == 'enum ee: EE2 has the real value 2, not 1' + # disabled: for now, we always accept and fix transparently constant values + #e = py.test.raises(VerificationError, ffi.verify, + # "enum ee { EE1, EE3, EE2 };") + #assert str(e.value) == 'enum ee: EE2 has the real value 2, not 1' # extra items cannot be seen and have no bad consequence anyway lib = ffi.verify("enum ee { EE1, EE2, EE3, EE4 };") assert lib.EE3 == 2 From noreply at buildbot.pypy.org Fri Apr 24 18:07:14 2015 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 24 Apr 2015 18:07:14 +0200 (CEST) Subject: [pypy-commit] pypy vmprof2: close to be merged branch Message-ID: <20150424160714.58D761C069E@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: vmprof2 Changeset: r76928:bae7ef19ac11 Date: 2015-04-24 18:06 +0200 http://bitbucket.org/pypy/pypy/changeset/bae7ef19ac11/ Log: close to be merged branch From noreply at buildbot.pypy.org Fri Apr 24 18:07:17 2015 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 24 Apr 2015 18:07:17 +0200 (CEST) Subject: [pypy-commit] pypy default: Merge vmprof2 Message-ID: <20150424160717.10C7B1C069E@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r76929:97193bc80d75 Date: 2015-04-24 18:07 +0200 http://bitbucket.org/pypy/pypy/changeset/97193bc80d75/ Log: Merge vmprof2 This branch enables the module _vmprof which is basis for vmprof, a lightweight statistical profiler diff too long, truncating to 2000 out of 4350 lines diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -420,3 +420,10 @@ the terms of the GPL license version 2 or any later version. Thus the gdbm module, provided in the file lib_pypy/gdbm.py, is redistributed under the terms of the GPL license as well. + +License for 'pypy/module/_vmprof/src' +-------------------------------------- + +The code is based on gperftools. You may see a copy of the License for it at + + https://code.google.com/p/gperftools/source/browse/COPYING diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -38,6 +38,9 @@ "_csv", "cppyy", "_pypyjson" ]) +if sys.platform.startswith('linux') and sys.maxint > 2147483647: + working_modules.add('_vmprof') + translation_modules = default_modules.copy() translation_modules.update([ "fcntl", "time", "select", "signal", "_rawffi", "zlib", "struct", "_md5", @@ -99,6 +102,7 @@ "_hashlib" : ["pypy.module._ssl.interp_ssl"], "_minimal_curses": ["pypy.module._minimal_curses.fficurses"], "_continuation": ["rpython.rlib.rstacklet"], + "_vmprof" : ["pypy.module._vmprof.interp_vmprof"], } def get_module_validator(modname): diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -11,7 +11,7 @@ INT_MIN, INT_MAX, UINT_MAX, USHRT_MAX from pypy.interpreter.executioncontext import (ExecutionContext, ActionFlag, - UserDelAction) + UserDelAction, CodeUniqueIds) from pypy.interpreter.error import OperationError, new_exception_class, oefmt from pypy.interpreter.argument import Arguments from pypy.interpreter.miscutils import ThreadLocals, make_weak_value_dictionary @@ -388,6 +388,7 @@ self.actionflag = ActionFlag() # changed by the signal module self.check_signal_action = None # changed by the signal module self.user_del_action = UserDelAction(self) + self.code_unique_ids = CodeUniqueIds() self._code_of_sys_exc_info = None # can be overridden to a subclass @@ -666,6 +667,16 @@ assert ec is not None return ec + def register_code_callback(self, callback): + cui = self.code_unique_ids + cui.code_callback = callback + + def register_code_object(self, pycode): + cui = self.code_unique_ids + if cui.code_callback is None: + return + cui.code_callback(self, pycode) + def _freeze_(self): return True diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -579,3 +579,11 @@ # there is no list of length n: if n is large, then the GC # will run several times while walking the list, but it will # see lower and lower memory usage, with no lower bound of n. + +class CodeUniqueIds(object): + def __init__(self): + if sys.maxint == 2147483647: + self.code_unique_id = 0 # XXX this is wrong, it won't work on 32bit + else: + self.code_unique_id = 0x7000000000000000 + self.code_callback = None diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -14,9 +14,10 @@ CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS, CO_NESTED, CO_GENERATOR, CO_KILL_DOCSTRING, CO_YIELD_INSIDE_TRY) from pypy.tool.stdlib_opcode import opcodedesc, HAVE_ARGUMENT -from rpython.rlib.rarithmetic import intmask +from rpython.rlib.rarithmetic import intmask, r_longlong from rpython.rlib.objectmodel import compute_hash from rpython.rlib import jit +from rpython.rlib.debug import debug_start, debug_stop, debug_print class BytecodeCorruption(Exception): @@ -54,8 +55,9 @@ "CPython-style code objects." _immutable_ = True _immutable_fields_ = ["co_consts_w[*]", "co_names_w[*]", "co_varnames[*]", - "co_freevars[*]", "co_cellvars[*]", "_args_as_cellvars[*]"] - + "co_freevars[*]", "co_cellvars[*]", + "_args_as_cellvars[*]"] + def __init__(self, space, argcount, nlocals, stacksize, flags, code, consts, names, varnames, filename, name, firstlineno, lnotab, freevars, cellvars, @@ -83,6 +85,7 @@ self.magic = magic self._signature = cpython_code_signature(self) self._initialize() + space.register_code_object(self) def _initialize(self): if self.co_cellvars: @@ -124,6 +127,15 @@ from pypy.objspace.std.mapdict import init_mapdict_cache init_mapdict_cache(self) + cui = self.space.code_unique_ids + self._unique_id = cui.code_unique_id + cui.code_unique_id += 4 # so we have two bits that we can mark stuff + # with + + def _get_full_name(self): + return "py:%s:%d:%s" % (self.co_name, self.co_firstlineno, + self.co_filename) + def _cleanup_(self): if (self.magic == cpython_magic and '__pypy__' not in sys.builtin_module_names): diff --git a/pypy/module/_vmprof/__init__.py b/pypy/module/_vmprof/__init__.py new file mode 100644 --- /dev/null +++ b/pypy/module/_vmprof/__init__.py @@ -0,0 +1,18 @@ +from pypy.interpreter.mixedmodule import MixedModule + +class Module(MixedModule): + """ + Write me :) + """ + appleveldefs = { + } + + interpleveldefs = { + 'enable': 'interp_vmprof.enable', + 'disable': 'interp_vmprof.disable', + } + + def setup_after_space_initialization(self): + # force the __extend__ hacks to occur early + from pypy.module._vmprof.interp_vmprof import VMProf + self.vmprof = VMProf() diff --git a/pypy/module/_vmprof/interp_vmprof.py b/pypy/module/_vmprof/interp_vmprof.py new file mode 100644 --- /dev/null +++ b/pypy/module/_vmprof/interp_vmprof.py @@ -0,0 +1,240 @@ +import py, os, sys +from rpython.rtyper.lltypesystem import lltype, rffi, llmemory +from rpython.translator.tool.cbuild import ExternalCompilationInfo +from rpython.rtyper.annlowlevel import cast_instance_to_gcref, cast_base_ptr_to_instance +from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib import jit, rposix, rgc +from rpython.rlib.rarithmetic import ovfcheck_float_to_int +from rpython.rtyper.tool import rffi_platform as platform +from rpython.rlib.rstring import StringBuilder +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.error import oefmt, wrap_oserror, OperationError +from pypy.interpreter.gateway import unwrap_spec +from pypy.interpreter.pyframe import PyFrame +from pypy.interpreter.pycode import PyCode + +ROOT = py.path.local(__file__).join('..') +SRC = ROOT.join('src') + +# by default, we statically link vmprof.c into pypy; however, if you set +# DYNAMIC_VMPROF to True, it will be dynamically linked to the libvmprof.so +# which is expected to be inside pypy/module/_vmprof/src: this is very useful +# during development. Note that you have to manually build libvmprof by +# running make inside the src dir +DYNAMIC_VMPROF = False + +eci_kwds = dict( + include_dirs = [SRC], + includes = ['vmprof.h', 'trampoline.h'], + separate_module_files = [SRC.join('trampoline.asmgcc.s')], + link_extra = ['-Wl,-Bstatic', '-lunwind', '-Wl,-Bdynamic', '-llzma'], + + post_include_bits=[""" + void pypy_vmprof_init(void); + """], + + separate_module_sources=[""" + void pypy_vmprof_init(void) { + vmprof_set_mainloop(pypy_execute_frame_trampoline, 0, + NULL); + } + """], + ) + + +if DYNAMIC_VMPROF: + eci_kwds['libraries'] += ['vmprof'] + eci_kwds['link_extra'] = ['-Wl,-rpath,%s' % SRC, '-L%s' % SRC] +else: + eci_kwds['separate_module_files'] += [SRC.join('vmprof.c')] + +eci = ExternalCompilationInfo(**eci_kwds) + +check_eci = eci.merge(ExternalCompilationInfo(separate_module_files=[ + SRC.join('fake_pypy_api.c')])) + +platform.verify_eci(check_eci) + +pypy_execute_frame_trampoline = rffi.llexternal( + "pypy_execute_frame_trampoline", + [llmemory.GCREF, llmemory.GCREF, llmemory.GCREF, lltype.Signed], + llmemory.GCREF, + compilation_info=eci, + _nowrapper=True, sandboxsafe=True, + random_effects_on_gcobjs=True) + +pypy_vmprof_init = rffi.llexternal("pypy_vmprof_init", [], lltype.Void, + compilation_info=eci) +vmprof_enable = rffi.llexternal("vmprof_enable", + [rffi.INT, rffi.LONG, rffi.INT, + rffi.CCHARP, rffi.INT], + rffi.INT, compilation_info=eci, + save_err=rffi.RFFI_SAVE_ERRNO) +vmprof_disable = rffi.llexternal("vmprof_disable", [], rffi.INT, + compilation_info=eci, + save_err=rffi.RFFI_SAVE_ERRNO) + +vmprof_register_virtual_function = rffi.llexternal( + "vmprof_register_virtual_function", + [rffi.CCHARP, rffi.VOIDP, rffi.VOIDP], lltype.Void, + compilation_info=eci, _nowrapper=True) + +original_execute_frame = PyFrame.execute_frame.im_func +original_execute_frame.c_name = 'pypy_pyframe_execute_frame' +original_execute_frame._dont_inline_ = True + +class __extend__(PyFrame): + def execute_frame(frame, w_inputvalue=None, operr=None): + # go through the asm trampoline ONLY if we are translated but not being JITted. + # + # If we are not translated, we obviously don't want to go through the + # trampoline because there is no C function it can call. + # + # If we are being JITted, we want to skip the trampoline, else the JIT + # cannot see throug it + if we_are_translated() and not jit.we_are_jitted(): + # if we are translated, call the trampoline + gc_frame = cast_instance_to_gcref(frame) + gc_inputvalue = cast_instance_to_gcref(w_inputvalue) + gc_operr = cast_instance_to_gcref(operr) + unique_id = frame.pycode._unique_id + gc_result = pypy_execute_frame_trampoline(gc_frame, gc_inputvalue, + gc_operr, unique_id) + return cast_base_ptr_to_instance(W_Root, gc_result) + else: + return original_execute_frame(frame, w_inputvalue, operr) + + + +def write_long_to_string_builder(l, b): + if sys.maxint == 2147483647: + b.append(chr(l & 0xff)) + b.append(chr((l >> 8) & 0xff)) + b.append(chr((l >> 16) & 0xff)) + b.append(chr((l >> 24) & 0xff)) + else: + b.append(chr(l & 0xff)) + b.append(chr((l >> 8) & 0xff)) + b.append(chr((l >> 16) & 0xff)) + b.append(chr((l >> 24) & 0xff)) + b.append(chr((l >> 32) & 0xff)) + b.append(chr((l >> 40) & 0xff)) + b.append(chr((l >> 48) & 0xff)) + b.append(chr((l >> 56) & 0xff)) + +def try_cast_to_pycode(gcref): + return rgc.try_cast_gcref_to_instance(PyCode, gcref) + +MAX_CODES = 1000 + +class VMProf(object): + def __init__(self): + self.is_enabled = False + self.ever_enabled = False + self.fileno = -1 + self.current_codes = [] + + def enable(self, space, fileno, period_usec): + if self.is_enabled: + raise oefmt(space.w_ValueError, "_vmprof already enabled") + self.fileno = fileno + self.is_enabled = True + self.write_header(fileno, period_usec) + if not self.ever_enabled: + if we_are_translated(): + pypy_vmprof_init() + self.ever_enabled = True + self.gather_all_code_objs(space) + space.register_code_callback(vmprof_register_code) + if we_are_translated(): + # does not work untranslated + res = vmprof_enable(fileno, period_usec, 0, + lltype.nullptr(rffi.CCHARP.TO), 0) + else: + res = 0 + if res == -1: + raise wrap_oserror(space, OSError(rposix.get_saved_errno(), + "_vmprof.enable")) + + def gather_all_code_objs(self, space): + all_code_objs = rgc.do_get_objects(try_cast_to_pycode) + for code in all_code_objs: + self.register_code(space, code) + + def write_header(self, fileno, period_usec): + assert period_usec > 0 + b = StringBuilder() + write_long_to_string_builder(0, b) + write_long_to_string_builder(3, b) + write_long_to_string_builder(0, b) + write_long_to_string_builder(period_usec, b) + write_long_to_string_builder(0, b) + b.append('\x04') # interp name + b.append(chr(len('pypy'))) + b.append('pypy') + os.write(fileno, b.build()) + + def register_code(self, space, code): + if self.fileno == -1: + raise OperationError(space.w_RuntimeError, + space.wrap("vmprof not running")) + self.current_codes.append(code) + if len(self.current_codes) >= MAX_CODES: + self._flush_codes(space) + + def _flush_codes(self, space): + b = StringBuilder() + for code in self.current_codes: + name = code._get_full_name() + b.append('\x02') + write_long_to_string_builder(code._unique_id, b) + write_long_to_string_builder(len(name), b) + b.append(name) + os.write(self.fileno, b.build()) + self.current_codes = [] + + def disable(self, space): + if not self.is_enabled: + raise oefmt(space.w_ValueError, "_vmprof not enabled") + self.is_enabled = False + space.register_code_callback(None) + self._flush_codes(space) + self.fileno = -1 + if we_are_translated(): + # does not work untranslated + res = vmprof_disable() + else: + res = 0 + if res == -1: + raise wrap_oserror(space, OSError(rposix.get_saved_errno(), + "_vmprof.disable")) + +def vmprof_register_code(space, code): + from pypy.module._vmprof import Module + mod_vmprof = space.getbuiltinmodule('_vmprof') + assert isinstance(mod_vmprof, Module) + mod_vmprof.vmprof.register_code(space, code) + + at unwrap_spec(fileno=int, period=float) +def enable(space, fileno, period=0.01): # default 100 Hz + from pypy.module._vmprof import Module + mod_vmprof = space.getbuiltinmodule('_vmprof') + assert isinstance(mod_vmprof, Module) + # + try: + period_usec = ovfcheck_float_to_int(period * 1000000.0 + 0.5) + if period_usec <= 0 or period_usec >= 1e6: + # we don't want seconds here at all + raise ValueError + except (ValueError, OverflowError): + raise OperationError(space.w_ValueError, + space.wrap("'period' too large or non positive")) + # + mod_vmprof.vmprof.enable(space, fileno, period_usec) + +def disable(space): + from pypy.module._vmprof import Module + mod_vmprof = space.getbuiltinmodule('_vmprof') + assert isinstance(mod_vmprof, Module) + mod_vmprof.vmprof.disable(space) + diff --git a/pypy/module/_vmprof/src/config.h b/pypy/module/_vmprof/src/config.h new file mode 100644 --- /dev/null +++ b/pypy/module/_vmprof/src/config.h @@ -0,0 +1,2 @@ +#define HAVE_SYS_UCONTEXT_H +#define PC_FROM_UCONTEXT uc_mcontext.gregs[REG_RIP] diff --git a/pypy/module/_vmprof/src/fake_pypy_api.c b/pypy/module/_vmprof/src/fake_pypy_api.c new file mode 100644 --- /dev/null +++ b/pypy/module/_vmprof/src/fake_pypy_api.c @@ -0,0 +1,21 @@ + +long pypy_jit_stack_depth_at_loc(long x) +{ + return 0; +} + +void *pypy_find_codemap_at_addr(long x) +{ + return (void *)0; +} + +long pypy_yield_codemap_at_addr(void *x, long y, long *a) +{ + return 0; +} + +void pypy_pyframe_execute_frame(void) +{ +} + +volatile int pypy_codemap_currently_invalid = 0; diff --git a/pypy/module/_vmprof/src/get_custom_offset.c b/pypy/module/_vmprof/src/get_custom_offset.c new file mode 100644 --- /dev/null +++ b/pypy/module/_vmprof/src/get_custom_offset.c @@ -0,0 +1,66 @@ + +extern volatile int pypy_codemap_currently_invalid; + +void *pypy_find_codemap_at_addr(long addr, long *start_addr); +long pypy_yield_codemap_at_addr(void *codemap_raw, long addr, + long *current_pos_addr); +long pypy_jit_stack_depth_at_loc(long loc); + + +void vmprof_set_tramp_range(void* start, void* end) +{ +} + +int custom_sanity_check() +{ + return !pypy_codemap_currently_invalid; +} + +static ptrdiff_t vmprof_unw_get_custom_offset(void* ip, void *cp) { + intptr_t ip_l = (intptr_t)ip; + return pypy_jit_stack_depth_at_loc(ip_l); +} + +static long vmprof_write_header_for_jit_addr(void **result, long n, + void *ip, int max_depth) +{ + void *codemap; + long current_pos = 0; + intptr_t id; + long start_addr = 0; + intptr_t addr = (intptr_t)ip; + int start, k; + void *tmp; + + codemap = pypy_find_codemap_at_addr(addr, &start_addr); + if (codemap == NULL) + // not a jit code at all + return n; + + // modify the last entry to point to start address and not the random one + // in the middle + result[n - 1] = (void*)start_addr; + result[n] = (void*)2; + n++; + start = n; + while (n < max_depth) { + id = pypy_yield_codemap_at_addr(codemap, addr, ¤t_pos); + if (id == -1) + // finish + break; + if (id == 0) + continue; // not main codemap + result[n++] = (void *)id; + } + k = 0; + while (k < (n - start) / 2) { + tmp = result[start + k]; + result[start + k] = result[n - k - 1]; + result[n - k - 1] = tmp; + k++; + } + if (n < max_depth) { + result[n++] = (void*)3; + } + return n; +} diff --git a/pypy/module/_vmprof/src/getpc.h b/pypy/module/_vmprof/src/getpc.h new file mode 100644 --- /dev/null +++ b/pypy/module/_vmprof/src/getpc.h @@ -0,0 +1,187 @@ +// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- +// Copyright (c) 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// --- +// Author: Craig Silverstein +// +// This is an internal header file used by profiler.cc. It defines +// the single (inline) function GetPC. GetPC is used in a signal +// handler to figure out the instruction that was being executed when +// the signal-handler was triggered. +// +// To get this, we use the ucontext_t argument to the signal-handler +// callback, which holds the full context of what was going on when +// the signal triggered. How to get from a ucontext_t to a Program +// Counter is OS-dependent. + +#ifndef BASE_GETPC_H_ +#define BASE_GETPC_H_ + +#include "config.h" + +// On many linux systems, we may need _GNU_SOURCE to get access to +// the defined constants that define the register we want to see (eg +// REG_EIP). Note this #define must come first! +#define _GNU_SOURCE 1 +// If #define _GNU_SOURCE causes problems, this might work instead. +// It will cause problems for FreeBSD though!, because it turns off +// the needed __BSD_VISIBLE. +//#define _XOPEN_SOURCE 500 + +#include // for memcmp +#if defined(HAVE_SYS_UCONTEXT_H) +#include +#elif defined(HAVE_UCONTEXT_H) +#include // for ucontext_t (and also mcontext_t) +#elif defined(HAVE_CYGWIN_SIGNAL_H) +#include +typedef ucontext ucontext_t; +#endif + + +// Take the example where function Foo() calls function Bar(). For +// many architectures, Bar() is responsible for setting up and tearing +// down its own stack frame. In that case, it's possible for the +// interrupt to happen when execution is in Bar(), but the stack frame +// is not properly set up (either before it's done being set up, or +// after it's been torn down but before Bar() returns). In those +// cases, the stack trace cannot see the caller function anymore. +// +// GetPC can try to identify this situation, on architectures where it +// might occur, and unwind the current function call in that case to +// avoid false edges in the profile graph (that is, edges that appear +// to show a call skipping over a function). To do this, we hard-code +// in the asm instructions we might see when setting up or tearing +// down a stack frame. +// +// This is difficult to get right: the instructions depend on the +// processor, the compiler ABI, and even the optimization level. This +// is a best effort patch -- if we fail to detect such a situation, or +// mess up the PC, nothing happens; the returned PC is not used for +// any further processing. +struct CallUnrollInfo { + // Offset from (e)ip register where this instruction sequence + // should be matched. Interpreted as bytes. Offset 0 is the next + // instruction to execute. Be extra careful with negative offsets in + // architectures of variable instruction length (like x86) - it is + // not that easy as taking an offset to step one instruction back! + int pc_offset; + // The actual instruction bytes. Feel free to make it larger if you + // need a longer sequence. + unsigned char ins[16]; + // How many bytes to match from ins array? + int ins_size; + // The offset from the stack pointer (e)sp where to look for the + // call return address. Interpreted as bytes. + int return_sp_offset; +}; + + +// The dereferences needed to get the PC from a struct ucontext were +// determined at configure time, and stored in the macro +// PC_FROM_UCONTEXT in config.h. The only thing we need to do here, +// then, is to do the magic call-unrolling for systems that support it. + +// -- Special case 1: linux x86, for which we have CallUnrollInfo +#if defined(__linux) && defined(__i386) && defined(__GNUC__) +static const CallUnrollInfo callunrollinfo[] = { + // Entry to a function: push %ebp; mov %esp,%ebp + // Top-of-stack contains the caller IP. + { 0, + {0x55, 0x89, 0xe5}, 3, + 0 + }, + // Entry to a function, second instruction: push %ebp; mov %esp,%ebp + // Top-of-stack contains the old frame, caller IP is +4. + { -1, + {0x55, 0x89, 0xe5}, 3, + 4 + }, + // Return from a function: RET. + // Top-of-stack contains the caller IP. + { 0, + {0xc3}, 1, + 0 + } +}; + +inline void* GetPC(ucontext_t *signal_ucontext) { + // See comment above struct CallUnrollInfo. Only try instruction + // flow matching if both eip and esp looks reasonable. + const int eip = signal_ucontext->uc_mcontext.gregs[REG_EIP]; + const int esp = signal_ucontext->uc_mcontext.gregs[REG_ESP]; + if ((eip & 0xffff0000) != 0 && (~eip & 0xffff0000) != 0 && + (esp & 0xffff0000) != 0) { + char* eip_char = reinterpret_cast(eip); + for (int i = 0; i < sizeof(callunrollinfo)/sizeof(*callunrollinfo); ++i) { + if (!memcmp(eip_char + callunrollinfo[i].pc_offset, + callunrollinfo[i].ins, callunrollinfo[i].ins_size)) { + // We have a match. + void **retaddr = (void**)(esp + callunrollinfo[i].return_sp_offset); + return *retaddr; + } + } + } + return (void*)eip; +} + +// Special case #2: Windows, which has to do something totally different. +#elif defined(_WIN32) || defined(__CYGWIN__) || defined(__CYGWIN32__) || defined(__MINGW32__) +// If this is ever implemented, probably the way to do it is to have +// profiler.cc use a high-precision timer via timeSetEvent: +// http://msdn2.microsoft.com/en-us/library/ms712713.aspx +// We'd use it in mode TIME_CALLBACK_FUNCTION/TIME_PERIODIC. +// The callback function would be something like prof_handler, but +// alas the arguments are different: no ucontext_t! I don't know +// how we'd get the PC (using StackWalk64?) +// http://msdn2.microsoft.com/en-us/library/ms680650.aspx + +#include "base/logging.h" // for RAW_LOG +#ifndef HAVE_CYGWIN_SIGNAL_H +typedef int ucontext_t; +#endif + +inline void* GetPC(ucontext_t *signal_ucontext) { + RAW_LOG(ERROR, "GetPC is not yet implemented on Windows\n"); + return NULL; +} + +// Normal cases. If this doesn't compile, it's probably because +// PC_FROM_UCONTEXT is the empty string. You need to figure out +// the right value for your system, and add it to the list in +// configure.ac (or set it manually in your config.h). +#else +inline void* GetPC(ucontext_t *signal_ucontext) { + return (void*)signal_ucontext->PC_FROM_UCONTEXT; // defined in config.h +} + +#endif + +#endif // BASE_GETPC_H_ diff --git a/pypy/module/_vmprof/src/trampoline.asmgcc.s b/pypy/module/_vmprof/src/trampoline.asmgcc.s new file mode 100644 --- /dev/null +++ b/pypy/module/_vmprof/src/trampoline.asmgcc.s @@ -0,0 +1,16 @@ +// NOTE: you need to use TABs, not spaces! + + .text + .p2align 4,,-1 + .globl pypy_execute_frame_trampoline + .type pypy_execute_frame_trampoline, @function +pypy_execute_frame_trampoline: + .cfi_startproc + pushq %rcx + .cfi_def_cfa_offset 16 + call pypy_pyframe_execute_frame at PLT + popq %rcx + .cfi_def_cfa_offset 8 + ret + .cfi_endproc + .size pypy_execute_frame_trampoline, .-pypy_execute_frame_trampoline diff --git a/pypy/module/_vmprof/src/trampoline.h b/pypy/module/_vmprof/src/trampoline.h new file mode 100644 --- /dev/null +++ b/pypy/module/_vmprof/src/trampoline.h @@ -0,0 +1,1 @@ +void* pypy_execute_frame_trampoline(void*, void*, void*, long); diff --git a/pypy/module/_vmprof/src/vmprof.c b/pypy/module/_vmprof/src/vmprof.c new file mode 100644 --- /dev/null +++ b/pypy/module/_vmprof/src/vmprof.c @@ -0,0 +1,398 @@ +/* VMPROF + * + * statistical sampling profiler specifically designed to profile programs + * which run on a Virtual Machine and/or bytecode interpreter, such as Python, + * etc. + * + * The logic to dump the C stack traces is partly stolen from the code in gperftools. + * The file "getpc.h" has been entirely copied from gperftools. + * + * Tested only on gcc, linux, x86_64. + * + * Copyright (C) 2014-2015 + * Antonio Cuni - anto.cuni at gmail.com + * Maciej Fijalkowski - fijall at gmail.com + * + */ + + +#include "getpc.h" // should be first to get the _GNU_SOURCE dfn +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define UNW_LOCAL_ONLY +#include + +#include "vmprof.h" + +#define _unused(x) ((void)x) + +#define MAX_FUNC_NAME 128 +#define MAX_STACK_DEPTH 1024 +#define BUFFER_SIZE 8192 + + +static int profile_file = 0; +static char profile_write_buffer[BUFFER_SIZE]; +static int profile_buffer_position = 0; +void* vmprof_mainloop_func; +static ptrdiff_t mainloop_sp_offset; +static vmprof_get_virtual_ip_t mainloop_get_virtual_ip; +static long last_period_usec = 0; +static int atfork_hook_installed = 0; + + +/* ************************************************************* + * functions to write a profile file compatible with gperftools + * ************************************************************* + */ + +#define MARKER_STACKTRACE '\x01' +#define MARKER_VIRTUAL_IP '\x02' +#define MARKER_TRAILER '\x03' + +static void prof_word(long x) { + ((long*)(profile_write_buffer + profile_buffer_position))[0] = x; + profile_buffer_position += sizeof(long); +} + +static void prof_header(long period_usec) { + // XXX never used here? + prof_word(0); + prof_word(3); + prof_word(0); + prof_word(period_usec); + prof_word(0); + write(profile_file, profile_write_buffer, profile_buffer_position); + profile_buffer_position = 0; +} + +static void prof_write_stacktrace(void** stack, int depth, int count) { + int i; + char marker = MARKER_STACKTRACE; + + profile_write_buffer[profile_buffer_position++] = MARKER_STACKTRACE; + prof_word(count); + prof_word(depth); + for(i=0; isp = bp; + bp -= sizeof(void*); + cp2->ip = ((void**)bp)[0]; + // the ret is on the top of the stack minus WORD + return 1; + } +} + + +/* ************************************************************* + * functions to dump the stack trace + * ************************************************************* + */ + +// The original code here has a comment, "stolen from pprof", +// about a "__thread int recursive". But general __thread +// variables are not really supposed to be accessed from a +// signal handler. Moreover, we are using SIGPROF, which +// should not be recursively called on the same thread. +//static __thread int recursive; + +int get_stack_trace(void** result, int max_depth, ucontext_t *ucontext) { + void *ip; + int n = 0; + unw_cursor_t cursor; + unw_context_t uc = *ucontext; + //if (recursive) { + // return 0; + //} + if (!custom_sanity_check()) { + return 0; + } + //++recursive; + + int ret = unw_init_local(&cursor, &uc); + assert(ret >= 0); + _unused(ret); + int first_run = 1; + + while (n < max_depth) { + if (unw_get_reg(&cursor, UNW_REG_IP, (unw_word_t *) &ip) < 0) { + break; + } + + unw_proc_info_t pip; + unw_get_proc_info(&cursor, &pip); + + /* char funcname[4096]; */ + /* unw_word_t offset; */ + /* unw_get_proc_name(&cursor, funcname, 4096, &offset); */ + /* printf("%s+%#lx <%p>\n", funcname, offset, ip); */ + + /* if n==0, it means that the signal handler interrupted us while we + were in the trampoline, so we are not executing (yet) the real main + loop function; just skip it */ + if (vmprof_mainloop_func && + (void*)pip.start_ip == (void*)vmprof_mainloop_func && + n > 0) { + // found main loop stack frame + void* sp; + unw_get_reg(&cursor, UNW_REG_SP, (unw_word_t *) &sp); + void *arg_addr = (char*)sp + mainloop_sp_offset; + void **arg_ptr = (void**)arg_addr; + // fprintf(stderr, "stacktrace mainloop: rsp %p &f2 %p offset %ld\n", + // sp, arg_addr, mainloop_sp_offset); + if (mainloop_get_virtual_ip) { + ip = mainloop_get_virtual_ip(*arg_ptr); + } else { + ip = *arg_ptr; + } + } + + result[n++] = ip; + n = vmprof_write_header_for_jit_addr(result, n, ip, max_depth); + if (vmprof_unw_step(&cursor, first_run) <= 0) { + break; + } + first_run = 0; + } + //--recursive; + return n; +} + + +static int __attribute__((noinline)) frame_forcer(int rv) { + return rv; +} + +static void sigprof_handler(int sig_nr, siginfo_t* info, void *ucontext) { + void* stack[MAX_STACK_DEPTH]; + int saved_errno = errno; + stack[0] = GetPC((ucontext_t*)ucontext); + int depth = frame_forcer(get_stack_trace(stack+1, MAX_STACK_DEPTH-1, ucontext)); + depth++; // To account for pc value in stack[0]; + prof_write_stacktrace(stack, depth, 1); + errno = saved_errno; +} + +/* ************************************************************* + * functions to enable/disable the profiler + * ************************************************************* + */ + +static int open_profile(int fd, long period_usec, int write_header, char *s, + int slen) { + if ((fd = dup(fd)) == -1) { + return -1; + } + profile_buffer_position = 0; + profile_file = fd; + if (write_header) + prof_header(period_usec); + if (s) + write(profile_file, s, slen); + return 0; +} + +static int close_profile(void) { + // XXX all of this can happily fail + FILE* src; + char buf[BUFSIZ]; + size_t size; + int marker = MARKER_TRAILER; + write(profile_file, &marker, 1); + + // copy /proc/PID/maps to the end of the profile file + sprintf(buf, "/proc/%d/maps", getpid()); + src = fopen(buf, "r"); + while ((size = fread(buf, 1, BUFSIZ, src))) { + write(profile_file, buf, size); + } + fclose(src); + close(profile_file); + return 0; +} + + +static int install_sigprof_handler(void) { + struct sigaction sa; + memset(&sa, 0, sizeof(sa)); + sa.sa_sigaction = sigprof_handler; + sa.sa_flags = SA_RESTART | SA_SIGINFO; + if (sigemptyset(&sa.sa_mask) == -1 || + sigaction(SIGPROF, &sa, NULL) == -1) { + return -1; + } + return 0; +} + +static int remove_sigprof_handler(void) { + sighandler_t res = signal(SIGPROF, SIG_DFL); + if (res == SIG_ERR) { + return -1; + } + return 0; +}; + +static int install_sigprof_timer(long period_usec) { + static struct itimerval timer; + last_period_usec = period_usec; + timer.it_interval.tv_sec = 0; + timer.it_interval.tv_usec = period_usec; + timer.it_value = timer.it_interval; + if (setitimer(ITIMER_PROF, &timer, NULL) != 0) { + return -1; + } + return 0; +} + +static int remove_sigprof_timer(void) { + static struct itimerval timer; + last_period_usec = 0; + timer.it_interval.tv_sec = 0; + timer.it_interval.tv_usec = 0; + timer.it_value.tv_sec = 0; + timer.it_value.tv_usec = 0; + if (setitimer(ITIMER_PROF, &timer, NULL) != 0) { + return -1; + } + return 0; +} + +static void atfork_disable_timer(void) { + remove_sigprof_timer(); +} + +static void atfork_enable_timer(void) { + install_sigprof_timer(last_period_usec); +} + +static int install_pthread_atfork_hooks(void) { + /* this is needed to prevent the problems described there: + - http://code.google.com/p/gperftools/issues/detail?id=278 + - http://lists.debian.org/debian-glibc/2010/03/msg00161.html + + TL;DR: if the RSS of the process is large enough, the clone() syscall + will be interrupted by the SIGPROF before it can complete, then + retried, interrupted again and so on, in an endless loop. The + solution is to disable the timer around the fork, and re-enable it + only inside the parent. + */ + if (atfork_hook_installed) + return 0; + int ret = pthread_atfork(atfork_disable_timer, atfork_enable_timer, NULL); + if (ret != 0) + return -1; + atfork_hook_installed = 1; + return 0; +} + +/* ************************************************************* + * public API + * ************************************************************* + */ + +void vmprof_set_mainloop(void* func, ptrdiff_t sp_offset, + vmprof_get_virtual_ip_t get_virtual_ip) { + mainloop_sp_offset = sp_offset; + mainloop_get_virtual_ip = get_virtual_ip; + vmprof_mainloop_func = func; +} + +int vmprof_enable(int fd, long period_usec, int write_header, char *s, + int slen) +{ + assert(period_usec > 0); + if (open_profile(fd, period_usec, write_header, s, slen) == -1) { + return -1; + } + if (install_sigprof_handler() == -1) { + return -1; + } + if (install_sigprof_timer(period_usec) == -1) { + return -1; + } + if (install_pthread_atfork_hooks() == -1) { + return -1; + } + return 0; +} + +int vmprof_disable(void) { + if (remove_sigprof_timer() == -1) { + return -1; + } + if (remove_sigprof_handler() == -1) { + return -1; + } + if (close_profile() == -1) { + return -1; + } + return 0; +} + +void vmprof_register_virtual_function(const char* name, void* start, void* end) { + // XXX unused by pypy + // for now *end is simply ignored + char buf[1024]; + int lgt = strlen(name) + 2 * sizeof(long) + 1; + + if (lgt > 1024) { + lgt = 1024; + } + buf[0] = MARKER_VIRTUAL_IP; + ((void **)(((void*)buf) + 1))[0] = start; + ((long *)(((void*)buf) + 1 + sizeof(long)))[0] = lgt - 2 * sizeof(long) - 1; + strncpy(buf + 2 * sizeof(long) + 1, name, 1024 - 2 * sizeof(long) - 1); + write(profile_file, buf, lgt); +} diff --git a/pypy/module/_vmprof/src/vmprof.h b/pypy/module/_vmprof/src/vmprof.h new file mode 100644 --- /dev/null +++ b/pypy/module/_vmprof/src/vmprof.h @@ -0,0 +1,22 @@ +#ifndef VMPROF_VMPROF_H_ +#define VMPROF_VMPROF_H_ + +#include + +typedef void* (*vmprof_get_virtual_ip_t)(void*); + +extern void* vmprof_mainloop_func; +void vmprof_set_mainloop(void* func, ptrdiff_t sp_offset, + vmprof_get_virtual_ip_t get_virtual_ip); + +void vmprof_register_virtual_function(const char* name, void* start, void* end); + + +int vmprof_enable(int fd, long period_usec, int write_header, char* vips, + int vips_len); +int vmprof_disable(void); + +// XXX: this should be part of _vmprof (the CPython extension), not vmprof (the library) +void vmprof_set_tramp_range(void* start, void* end); + +#endif diff --git a/pypy/module/_vmprof/test/__init__.py b/pypy/module/_vmprof/test/__init__.py new file mode 100644 diff --git a/pypy/module/_vmprof/test/test__vmprof.py b/pypy/module/_vmprof/test/test__vmprof.py new file mode 100644 --- /dev/null +++ b/pypy/module/_vmprof/test/test__vmprof.py @@ -0,0 +1,72 @@ + +import tempfile +from pypy.tool.pytest.objspace import gettestobjspace + +class AppTestVMProf(object): + def setup_class(cls): + cls.space = gettestobjspace(usemodules=['_vmprof', 'struct']) + cls.tmpfile = tempfile.NamedTemporaryFile() + cls.w_tmpfileno = cls.space.wrap(cls.tmpfile.fileno()) + cls.w_tmpfilename = cls.space.wrap(cls.tmpfile.name) + cls.tmpfile2 = tempfile.NamedTemporaryFile() + cls.w_tmpfileno2 = cls.space.wrap(cls.tmpfile2.fileno()) + cls.w_tmpfilename2 = cls.space.wrap(cls.tmpfile2.name) + + def test_import_vmprof(self): + import struct, sys + + WORD = struct.calcsize('l') + + def count(s): + i = 0 + count = 0 + i += 5 * WORD # header + assert s[i] == '\x04' + i += 1 # marker + assert s[i] == '\x04' + i += 1 # length + i += len('pypy') + while i < len(s): + if s[i] == '\x03': + break + if s[i] == '\x01': + xxx + assert s[i] == '\x02' + i += 1 + _, size = struct.unpack("ll", s[i:i + 2 * WORD]) + count += 1 + i += 2 * WORD + size + return count + + import _vmprof + _vmprof.enable(self.tmpfileno) + _vmprof.disable() + s = open(self.tmpfilename).read() + no_of_codes = count(s) + assert no_of_codes > 10 + d = {} + + exec """def foo(): + pass + """ in d + + _vmprof.enable(self.tmpfileno2) + + exec """def foo2(): + pass + """ in d + + _vmprof.disable() + s = open(self.tmpfilename2).read() + no_of_codes2 = count(s) + assert "py:foo:" in s + assert "py:foo2:" in s + assert no_of_codes2 >= no_of_codes + 2 # some extra codes from tests + + def test_enable_ovf(self): + import _vmprof + raises(ValueError, _vmprof.enable, 999, 0) + raises(ValueError, _vmprof.enable, 999, -2.5) + raises(ValueError, _vmprof.enable, 999, 1e300) + raises(ValueError, _vmprof.enable, 999, 1e300 * 1e300) + raises(ValueError, _vmprof.enable, 999, (1e300*1e300) / (1e300*1e300)) diff --git a/pypy/module/_vmprof/test/test_direct.py b/pypy/module/_vmprof/test/test_direct.py new file mode 100644 --- /dev/null +++ b/pypy/module/_vmprof/test/test_direct.py @@ -0,0 +1,67 @@ + +import cffi, py + +srcdir = py.path.local(__file__).join("..", "..", "src") + +ffi = cffi.FFI() +ffi.cdef(""" +long vmprof_write_header_for_jit_addr(void **, long, void*, int); +void *pypy_find_codemap_at_addr(long addr, long *start_addr); +long pypy_yield_codemap_at_addr(void *codemap_raw, long addr, + long *current_pos_addr); +long buffer[]; +""") + +lib = ffi.verify(""" +volatile int pypy_codemap_currently_invalid = 0; + +long buffer[] = {0, 0, 0, 0, 0}; + + + +void *pypy_find_codemap_at_addr(long addr, long *start_addr) +{ + return (void*)buffer; +} + +long pypy_yield_codemap_at_addr(void *codemap_raw, long addr, + long *current_pos_addr) +{ + long c = *current_pos_addr; + if (c >= 5) + return -1; + *current_pos_addr = c + 1; + return *((long*)codemap_raw + c); +} + + +""" + open(str(srcdir.join("get_custom_offset.c"))).read()) + +class TestDirect(object): + def test_infrastructure(self): + cont = ffi.new("long[1]", [0]) + buf = lib.pypy_find_codemap_at_addr(0, cont) + assert buf + cont[0] = 0 + next_addr = lib.pypy_yield_codemap_at_addr(buf, 0, cont) + assert cont[0] == 1 + assert not next_addr + lib.buffer[0] = 13 + cont[0] = 0 + next_addr = lib.pypy_yield_codemap_at_addr(buf, 0, cont) + assert int(ffi.cast("long", next_addr)) == 13 + + def test_write_header_for_jit_addr(self): + lib.buffer[0] = 4 + lib.buffer[1] = 8 + lib.buffer[2] = 12 + lib.buffer[3] = 16 + lib.buffer[4] = 0 + buf = ffi.new("long[10]", [0] * 10) + result = ffi.cast("void**", buf) + res = lib.vmprof_write_header_for_jit_addr(result, 0, ffi.NULL, 100) + assert res == 6 + assert buf[0] == 2 + assert buf[1] == 16 + assert buf[2] == 12 + assert buf[3] == 8 diff --git a/pypy/module/gc/referents.py b/pypy/module/gc/referents.py --- a/pypy/module/gc/referents.py +++ b/pypy/module/gc/referents.py @@ -44,30 +44,6 @@ return OperationError(space.w_NotImplementedError, space.wrap("operation not implemented by this GC")) -# ____________________________________________________________ - -def clear_gcflag_extra(fromlist): - pending = fromlist[:] - while pending: - gcref = pending.pop() - if rgc.get_gcflag_extra(gcref): - rgc.toggle_gcflag_extra(gcref) - pending.extend(rgc.get_rpy_referents(gcref)) - -def do_get_objects(): - roots = [gcref for gcref in rgc.get_rpy_roots() if gcref] - pending = roots[:] - result_w = [] - while pending: - gcref = pending.pop() - if not rgc.get_gcflag_extra(gcref): - rgc.toggle_gcflag_extra(gcref) - w_obj = try_cast_gcref_to_w_root(gcref) - if w_obj is not None: - result_w.append(w_obj) - pending.extend(rgc.get_rpy_referents(gcref)) - clear_gcflag_extra(roots) - return result_w # ____________________________________________________________ @@ -116,8 +92,8 @@ break # done. Clear flags carefully rgc.toggle_gcflag_extra(gcarg) - clear_gcflag_extra(roots) - clear_gcflag_extra([gcarg]) + rgc.clear_gcflag_extra(roots) + rgc.clear_gcflag_extra([gcarg]) return result_w # ____________________________________________________________ @@ -189,8 +165,7 @@ """Return a list of all app-level objects.""" if not rgc.has_gcflag_extra(): raise missing_operation(space) - result_w = do_get_objects() - rgc.assert_no_more_gcflags() + result_w = rgc.do_get_objects(try_cast_gcref_to_w_root) return space.newlist(result_w) def get_referents(space, args_w): diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -35,6 +35,9 @@ name = opcode_method_names[ord(bytecode.co_code[next_instr])] return '%s #%d %s' % (bytecode.get_repr(), next_instr, name) +def get_unique_id(next_instr, is_being_profiled, bytecode): + return bytecode._unique_id + def should_unroll_one_iteration(next_instr, is_being_profiled, bytecode): return (bytecode.co_flags & CO_GENERATOR) != 0 @@ -45,6 +48,7 @@ virtualizables = ['frame'] pypyjitdriver = PyPyJitDriver(get_printable_location = get_printable_location, + get_unique_id = get_unique_id, should_unroll_one_iteration = should_unroll_one_iteration, name='pypyjit') diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -134,7 +134,8 @@ def _ops_for_chunk(self, chunk, include_guard_not_invalidated): for op in chunk.operations: - if op.name != 'debug_merge_point' and \ + if op.name not in ('debug_merge_point', 'enter_portal_frame', + 'leave_portal_frame') and \ (op.name != 'guard_not_invalidated' or include_guard_not_invalidated): yield op diff --git a/rpython/bin/rpython-vmprof b/rpython/bin/rpython-vmprof new file mode 100755 --- /dev/null +++ b/rpython/bin/rpython-vmprof @@ -0,0 +1,28 @@ +#!/usr/bin/env pypy + +"""RPython translation usage: + +rpython target + +run with --help for more information +""" + +import sys, os +sys.path.insert(0, os.path.dirname(os.path.dirname( + os.path.dirname(os.path.realpath(__file__))))) +from rpython.translator.goal.translate import main + +# no implicit targets +if len(sys.argv) == 1: + print __doc__ + sys.exit(1) + +import _vmprof, subprocess +x = subprocess.Popen('gzip > vmprof.log.gz', shell=True, stdin=subprocess.PIPE) +_vmprof.enable(x.stdin.fileno(), 0.001) +try: + main() +finally: + _vmprof.disable() + x.stdin.close() + x.wait() diff --git a/rpython/jit/backend/arm/assembler.py b/rpython/jit/backend/arm/assembler.py --- a/rpython/jit/backend/arm/assembler.py +++ b/rpython/jit/backend/arm/assembler.py @@ -57,6 +57,7 @@ BaseAssembler.setup_once(self) def setup(self, looptoken): + BaseAssembler.setup(self, looptoken) assert self.memcpy_addr != 0, 'setup_once() not called?' if we_are_translated(): self.debug = False @@ -71,7 +72,6 @@ self.mc.datablockwrapper = self.datablockwrapper self.target_tokens_currently_compiling = {} self.frame_depth_to_patch = [] - self._finish_gcmap = lltype.nullptr(jitframe.GCMAP) def teardown(self): self.current_clt = None @@ -102,7 +102,7 @@ self.store_reg(mc, r.r0, r.fp, ofs) mc.MOV_rr(r.r0.value, r.fp.value) self.gen_func_epilog(mc) - rawstart = mc.materialize(self.cpu.asmmemmgr, []) + rawstart = mc.materialize(self.cpu, []) self.propagate_exception_path = rawstart def _store_and_reset_exception(self, mc, excvalloc=None, exctploc=None, @@ -198,7 +198,7 @@ mc.ADD_ri(r.sp.value, r.sp.value, (len(r.argument_regs) + 2) * WORD) mc.B(self.propagate_exception_path) # - rawstart = mc.materialize(self.cpu.asmmemmgr, []) + rawstart = mc.materialize(self.cpu, []) self.stack_check_slowpath = rawstart def _build_wb_slowpath(self, withcards, withfloats=False, for_frame=False): @@ -255,7 +255,7 @@ # mc.POP([r.ip.value, r.pc.value]) # - rawstart = mc.materialize(self.cpu.asmmemmgr, []) + rawstart = mc.materialize(self.cpu, []) if for_frame: self.wb_slowpath[4] = rawstart else: @@ -276,7 +276,7 @@ callee_only) # return mc.POP([r.ip.value, r.pc.value]) - return mc.materialize(self.cpu.asmmemmgr, []) + return mc.materialize(self.cpu, []) def _build_malloc_slowpath(self, kind): """ While arriving on slowpath, we have a gcpattern on stack 0. @@ -352,7 +352,7 @@ mc.POP([r.ip.value, r.pc.value]) # - rawstart = mc.materialize(self.cpu.asmmemmgr, []) + rawstart = mc.materialize(self.cpu, []) return rawstart def _reload_frame_if_necessary(self, mc): @@ -473,7 +473,7 @@ mc.MOV_rr(r.r0.value, r.fp.value) # self.gen_func_epilog(mc) - rawstart = mc.materialize(self.cpu.asmmemmgr, []) + rawstart = mc.materialize(self.cpu, []) self.failure_recovery_code[exc + 2 * withfloats] = rawstart def generate_quick_failure(self, guardtok): @@ -575,8 +575,8 @@ self.mc.BL(self.stack_check_slowpath, c=c.HI) # call if ip > lr # cpu interface - def assemble_loop(self, logger, loopname, inputargs, operations, looptoken, - log): + def assemble_loop(self, jd_id, unique_id, logger, loopname, inputargs, + operations, looptoken, log): clt = CompiledLoopToken(self.cpu, looptoken.number) looptoken.compiled_loop_token = clt clt._debug_nbargs = len(inputargs) @@ -586,6 +586,9 @@ assert len(set(inputargs)) == len(inputargs) self.setup(looptoken) + #self.codemap_builder.enter_portal_frame(jd_id, unique_id, + # self.mc.get_relative_pos()) + frame_info = self.datablockwrapper.malloc_aligned( jitframe.JITFRAMEINFO_SIZE, alignment=WORD) @@ -659,6 +662,7 @@ assert len(set(inputargs)) == len(inputargs) self.setup(original_loop_token) + #self.codemap.inherit_code_from_position(faildescr.adr_jump_offset) descr_number = compute_unique_id(faildescr) if log: operations = self._inject_debugging_code(faildescr, operations, @@ -850,7 +854,7 @@ # restore registers self._pop_all_regs_from_jitframe(mc, [], self.cpu.supports_floats) mc.POP([r.ip.value, r.pc.value]) # return - self._frame_realloc_slowpath = mc.materialize(self.cpu.asmmemmgr, []) + self._frame_realloc_slowpath = mc.materialize(self.cpu, []) def _load_shadowstack_top(self, mc, reg, gcrootmap): rst = gcrootmap.get_root_stack_top_addr() @@ -879,8 +883,12 @@ self.datablockwrapper.done() # finish using cpu.asmmemmgr self.datablockwrapper = None allblocks = self.get_asmmemmgr_blocks(looptoken) - return self.mc.materialize(self.cpu.asmmemmgr, allblocks, + size = self.mc.get_relative_pos() + res = self.mc.materialize(self.cpu, allblocks, self.cpu.gc_ll_descr.gcrootmap) + #self.cpu.codemap.register_codemap( + # self.codemap.get_final_bytecode(res, size)) + return res def update_frame_depth(self, frame_depth): baseofs = self.cpu.get_baseofs_of_frame_field() diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -373,6 +373,12 @@ return gcmap # ------------------------------------------------------------ + def perform_enter_portal_frame(self, op): + self.assembler.enter_portal_frame(op) + + def perform_leave_portal_frame(self, op): + self.assembler.leave_portal_frame(op) + def perform_extra(self, op, args, fcond): return self.assembler.regalloc_emit_extra(op, args, fcond, self) diff --git a/rpython/jit/backend/arm/runner.py b/rpython/jit/backend/arm/runner.py --- a/rpython/jit/backend/arm/runner.py +++ b/rpython/jit/backend/arm/runner.py @@ -50,16 +50,12 @@ def setup_once(self): self.cpuinfo.arch_version = detect_arch_version() self.cpuinfo.hf_abi = detect_hardfloat() + #self.codemap.setup() self.assembler.setup_once() def finish_once(self): self.assembler.finish_once() - def compile_loop(self, inputargs, operations, looptoken, - log=True, name='', logger=None): - return self.assembler.assemble_loop(logger, name, inputargs, operations, - looptoken, log=log) - def compile_bridge(self, faildescr, inputargs, operations, original_loop_token, log=True, logger=None): clt = original_loop_token.compiled_loop_token diff --git a/rpython/jit/backend/arm/test/support.py b/rpython/jit/backend/arm/test/support.py --- a/rpython/jit/backend/arm/test/support.py +++ b/rpython/jit/backend/arm/test/support.py @@ -24,7 +24,7 @@ def run_asm(asm): BOOTSTRAP_TP = lltype.FuncType([], lltype.Signed) - addr = asm.mc.materialize(asm.cpu.asmmemmgr, [], None) + addr = asm.mc.materialize(asm.cpu, [], None) assert addr % 8 == 0 func = rffi.cast(lltype.Ptr(BOOTSTRAP_TP), addr) asm.mc._dump_trace(addr, 'test.asm') diff --git a/rpython/jit/backend/arm/test/test_calling_convention.py b/rpython/jit/backend/arm/test/test_calling_convention.py --- a/rpython/jit/backend/arm/test/test_calling_convention.py +++ b/rpython/jit/backend/arm/test/test_calling_convention.py @@ -29,7 +29,7 @@ mc = InstrBuilder() mc.MOV_rr(r.r0.value, r.sp.value) mc.MOV_rr(r.pc.value, r.lr.value) - return mc.materialize(self.cpu.asmmemmgr, []) + return mc.materialize(self.cpu, []) def get_alignment_requirements(self): return 8 diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -245,8 +245,8 @@ self.stats = stats or MiniStats() self.vinfo_for_tests = kwds.get('vinfo_for_tests', None) - def compile_loop(self, inputargs, operations, looptoken, log=True, - name='', logger=None): + def compile_loop(self, inputargs, operations, looptoken, jd_id=0, + unique_id=0, log=True, name='', logger=None): clt = model.CompiledLoopToken(self, looptoken.number) looptoken.compiled_loop_token = clt lltrace = LLTrace(inputargs, operations) @@ -1025,6 +1025,12 @@ else: stats.add_merge_point_location(args[1:]) + def execute_enter_portal_frame(self, descr, *args): + pass + + def execute_leave_portal_frame(self, descr, *args): + pass + def execute_new_with_vtable(self, _, vtable): descr = heaptracker.vtable2descr(self.cpu, vtable) return self.cpu.bh_new_with_vtable(vtable, descr) diff --git a/rpython/jit/backend/llsupport/asmmemmgr.py b/rpython/jit/backend/llsupport/asmmemmgr.py --- a/rpython/jit/backend/llsupport/asmmemmgr.py +++ b/rpython/jit/backend/llsupport/asmmemmgr.py @@ -4,7 +4,7 @@ from rpython.rlib import rmmap from rpython.rlib.debug import debug_start, debug_print, debug_stop from rpython.rlib.debug import have_debug_prints -from rpython.rtyper.lltypesystem import lltype, llmemory, rffi +from rpython.rtyper.lltypesystem import lltype, rffi class AsmMemoryManager(object): @@ -212,6 +212,9 @@ gcroot_markers = None + frame_positions = None + frame_assignments = None + def __init__(self, translated=None): if translated is None: translated = we_are_translated() @@ -303,11 +306,11 @@ # debug_stop(logname) - def materialize(self, asmmemmgr, allblocks, gcrootmap=None): + def materialize(self, cpu, allblocks, gcrootmap=None): size = self.get_relative_pos() align = self.ALIGN_MATERIALIZE size += align - 1 - malloced = asmmemmgr.malloc(size, size) + malloced = cpu.asmmemmgr.malloc(size, size) allblocks.append(malloced) rawstart = malloced[0] rawstart = (rawstart + align - 1) & (-align) @@ -316,6 +319,12 @@ assert gcrootmap is not None for pos, mark in self.gcroot_markers: gcrootmap.register_asm_addr(rawstart + pos, mark) + if cpu.HAS_CODEMAP: + cpu.codemap.register_frame_depth_map(rawstart, rawstart + size, + self.frame_positions, + self.frame_assignments) + self.frame_positions = None + self.frame_assignments = None return rawstart def _become_a_plain_block_builder(self): diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -1,6 +1,7 @@ from rpython.jit.backend.llsupport import jitframe from rpython.jit.backend.llsupport.memcpy import memcpy_fn, memset_fn from rpython.jit.backend.llsupport.symbolic import WORD +from rpython.jit.backend.llsupport.codemap import CodemapBuilder from rpython.jit.metainterp.history import (INT, REF, FLOAT, JitCellToken, ConstInt, BoxInt, AbstractFailDescr) from rpython.jit.metainterp.resoperation import ResOperation, rop @@ -128,6 +129,11 @@ track_allocation=False) self.gcmap_for_finish[0] = r_uint(1) + def setup(self, looptoken): + if self.cpu.HAS_CODEMAP: + self.codemap_builder = CodemapBuilder() + self._finish_gcmap = lltype.nullptr(jitframe.GCMAP) + def set_debug(self, v): r = self._debug self._debug = v @@ -194,6 +200,17 @@ guardtok.faildescr.rd_locs = positions return fail_descr, target + def enter_portal_frame(self, op): + if self.cpu.HAS_CODEMAP: + self.codemap_builder.enter_portal_frame(op.getarg(0).getint(), + op.getarg(1).getint(), + self.mc.get_relative_pos()) + + def leave_portal_frame(self, op): + if self.cpu.HAS_CODEMAP: + self.codemap_builder.leave_portal_frame(op.getarg(0).getint(), + self.mc.get_relative_pos()) + def call_assembler(self, op, guard_op, argloc, vloc, result_loc, tmploc): self._store_force_index(guard_op) descr = op.getdescr() @@ -276,6 +293,9 @@ # YYY very minor leak -- we need the counters to stay alive # forever, just because we want to report them at the end # of the process + + # XXX the numbers here are ALMOST unique, but not quite, use a counter + # or something struct = lltype.malloc(DEBUG_COUNTER, flavor='raw', track_allocation=False) struct.i = 0 diff --git a/rpython/jit/backend/llsupport/codemap.py b/rpython/jit/backend/llsupport/codemap.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/llsupport/codemap.py @@ -0,0 +1,180 @@ + +""" Bytecode for storage in asmmemmgr.jit_codemap. Format is as follows: + + list of tuples of shape (addr, machine code size, bytecode info) + where bytecode info is a string made up of: + 8 bytes unique_id, 4 bytes start_addr (relative), 4 bytes size (relative), + 2 bytes how many items to skip to go to the next on similar level + [so far represented by a list of integers for simplicity] + +""" + +import os +from rpython.rlib import rgc +from rpython.rlib.objectmodel import specialize, we_are_translated +from rpython.rlib.entrypoint import jit_entrypoint +from rpython.rlib.rbisect import bisect_right, bisect_right_addr +from rpython.rlib.rbisect import bisect_left, bisect_left_addr +from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.translator.tool.cbuild import ExternalCompilationInfo +from rpython.translator import cdir + + +INT_LIST_PTR = rffi.CArrayPtr(lltype.Signed) + + +srcdir = os.path.join(os.path.dirname(__file__), 'src') + +eci = ExternalCompilationInfo(post_include_bits=[""" +RPY_EXTERN long pypy_jit_codemap_add(unsigned long addr, + unsigned int machine_code_size, + long *bytecode_info, + unsigned int bytecode_info_size); +RPY_EXTERN long *pypy_jit_codemap_del(unsigned long addr, unsigned int size); +RPY_EXTERN unsigned long pypy_jit_codemap_firstkey(void); +RPY_EXTERN void *pypy_find_codemap_at_addr(long addr, long* start_addr); +RPY_EXTERN long pypy_yield_codemap_at_addr(void *codemap_raw, long addr, + long *current_pos_addr); + +RPY_EXTERN long pypy_jit_depthmap_add(unsigned long addr, unsigned int size, + unsigned int stackdepth); +RPY_EXTERN void pypy_jit_depthmap_clear(unsigned long addr, unsigned int size); + +"""], separate_module_sources=[ + open(os.path.join(srcdir, 'skiplist.c'), 'r').read() + + open(os.path.join(srcdir, 'codemap.c'), 'r').read() +], include_dirs=[cdir]) + +def llexternal(name, args, res): + return rffi.llexternal(name, args, res, compilation_info=eci, + releasegil=False) + +pypy_jit_codemap_add = llexternal('pypy_jit_codemap_add', + [lltype.Signed, lltype.Signed, + INT_LIST_PTR, lltype.Signed], + lltype.Signed) +pypy_jit_codemap_del = llexternal('pypy_jit_codemap_del', + [lltype.Signed, lltype.Signed], INT_LIST_PTR) +pypy_jit_codemap_firstkey = llexternal('pypy_jit_codemap_firstkey', + [], lltype.Signed) + +pypy_jit_depthmap_add = llexternal('pypy_jit_depthmap_add', + [lltype.Signed, lltype.Signed, + lltype.Signed], lltype.Signed) +pypy_jit_depthmap_clear = llexternal('pypy_jit_depthmap_clear', + [lltype.Signed, lltype.Signed], + lltype.Void) + +stack_depth_at_loc = llexternal('pypy_jit_stack_depth_at_loc', + [lltype.Signed], lltype.Signed) +find_codemap_at_addr = llexternal('pypy_find_codemap_at_addr', + [lltype.Signed, rffi.CArrayPtr(lltype.Signed)], lltype.Signed) +yield_bytecode_at_addr = llexternal('pypy_yield_codemap_at_addr', + [lltype.Signed, lltype.Signed, + rffi.CArrayPtr(lltype.Signed)], + lltype.Signed) + + +class CodemapStorage(object): + """ An immortal wrapper around underlaying jit codemap data + """ + def setup(self): + if not we_are_translated(): + # in case someone failed to call free(), in tests only anyway + self.free() + + def free(self): + while True: + key = pypy_jit_codemap_firstkey() + if not key: + break + items = pypy_jit_codemap_del(key, 1) + lltype.free(items, flavor='raw', track_allocation=False) + + def free_asm_block(self, start, stop): + items = pypy_jit_codemap_del(start, stop - start) + if items: + lltype.free(items, flavor='raw', track_allocation=False) + pypy_jit_depthmap_clear(start, stop - start) + + def register_frame_depth_map(self, rawstart, rawstop, frame_positions, + frame_assignments): + if not frame_positions: + return + assert len(frame_positions) == len(frame_assignments) + for i in range(len(frame_positions)-1, -1, -1): + pos = rawstart + frame_positions[i] + length = rawstop - pos + if length > 0: + #print "ADD:", pos, length, frame_assignments[i] + pypy_jit_depthmap_add(pos, length, frame_assignments[i]) + rawstop = pos + + def register_codemap(self, (start, size, l)): + items = lltype.malloc(INT_LIST_PTR.TO, len(l), flavor='raw', + track_allocation=False) + for i in range(len(l)): + items[i] = l[i] + if pypy_jit_codemap_add(start, size, items, len(l)) < 0: + lltype.free(items, flavor='raw', track_allocation=False) + + def finish_once(self): + self.free() + +def unpack_traceback(addr): + codemap_raw = find_codemap_at_addr(addr, + lltype.nullptr(rffi.CArray(lltype.Signed))) + if not codemap_raw: + return [] # no codemap for that position + storage = lltype.malloc(rffi.CArray(lltype.Signed), 1, flavor='raw') + storage[0] = 0 + res = [] + while True: + item = yield_bytecode_at_addr(codemap_raw, addr, storage) + if item == -1: + break + res.append(item) + lltype.free(storage, flavor='raw') + return res + + +class CodemapBuilder(object): + def __init__(self): + self.l = [] + self.patch_position = [] + + def enter_portal_frame(self, jd_id, unique_id, relpos): + self.l.append(unique_id) + self.l.append(relpos) + self.patch_position.append(len(self.l)) + self.l.append(0) # marker + self.l.append(0) # second marker + + def leave_portal_frame(self, jd_id, relpos): + to_patch = self.patch_position.pop() + self.l[to_patch] = relpos + self.l[to_patch + 1] = len(self.l) + + def inherit_code_from_position(self, pos): + lst = unpack_traceback(pos) + for item in lst: + self.l.append(item) + self.l.append(0) + self.patch_position.append(len(self.l)) + self.l.append(0) # marker + self.l.append(0) # second marker + + def get_final_bytecode(self, addr, size): + while self.patch_position: + pos = self.patch_position.pop() + self.l[pos] = size + self.l[pos + 1] = len(self.l) + # at the end there should be no zeros, except unique_id which can + # be zero + for i in range(len(self.l) / 4): + item = self.l[i * 4] # unique_id + item = self.l[i * 4 + 2] # end in asm + assert item > 0 + item = self.l[i * 4 + 3] # end in l + assert item > 0 + return (addr, size, self.l) # XXX compact self.l diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -16,13 +16,15 @@ FieldDescr, ArrayDescr, CallDescr, InteriorFieldDescr, FLAG_POINTER, FLAG_FLOAT) from rpython.jit.backend.llsupport.memcpy import memset_fn -from rpython.jit.backend.llsupport.asmmemmgr import AsmMemoryManager +from rpython.jit.backend.llsupport import asmmemmgr, codemap from rpython.rlib.unroll import unrolling_iterable class AbstractLLCPU(AbstractCPU): from rpython.jit.metainterp.typesystem import llhelper as ts + HAS_CODEMAP = False + def __init__(self, rtyper, stats, opts, translate_support_code=False, gcdescr=None): assert type(opts) is not bool @@ -48,7 +50,9 @@ self._setup_exception_handling_translated() else: self._setup_exception_handling_untranslated() - self.asmmemmgr = AsmMemoryManager() + self.asmmemmgr = asmmemmgr.AsmMemoryManager() + if self.HAS_CODEMAP: + self.codemap = codemap.CodemapStorage() self._setup_frame_realloc(translate_support_code) ad = self.gc_ll_descr.getframedescrs(self).arraydescr self.signedarraydescr = ad @@ -78,6 +82,16 @@ def setup(self): pass + def finish_once(self): + if self.HAS_CODEMAP: + self.codemap.finish_once() + + def compile_loop(self, inputargs, operations, looptoken, jd_id=0, + unique_id=0, log=True, name='', logger=None): + return self.assembler.assemble_loop(jd_id, unique_id, logger, name, + inputargs, operations, + looptoken, log=log) + def _setup_frame_realloc(self, translate_support_code): FUNC_TP = lltype.Ptr(lltype.FuncType([llmemory.GCREF, lltype.Signed], llmemory.GCREF)) @@ -212,6 +226,8 @@ for rawstart, rawstop in blocks: self.gc_ll_descr.freeing_block(rawstart, rawstop) self.asmmemmgr.free(rawstart, rawstop) + if self.HAS_CODEMAP: + self.codemap.free_asm_block(rawstart, rawstop) def force(self, addr_of_force_token): frame = rffi.cast(jitframe.JITFRAMEPTR, addr_of_force_token) diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -17,8 +17,6 @@ class GcRewriterAssembler(object): """ This class performs the following rewrites on the list of operations: - - Remove the DEBUG_MERGE_POINTs. - - Turn all NEW_xxx to either a CALL_MALLOC_GC, or a CALL_MALLOC_NURSERY followed by SETFIELDs in order to initialize their GC fields. The two advantages of CALL_MALLOC_NURSERY is that it inlines the common @@ -75,6 +73,8 @@ From noreply at buildbot.pypy.org Sat Apr 25 04:33:43 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 25 Apr 2015 04:33:43 +0200 (CEST) Subject: [pypy-commit] pypy can_cast: create casting_table and use it in can_cast() Message-ID: <20150425023343.5952F1C0D78@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: can_cast Changeset: r76930:2c0b8658d9ed Date: 2015-04-25 03:33 +0100 http://bitbucket.org/pypy/pypy/changeset/2c0b8658d9ed/ Log: create casting_table and use it in can_cast() diff --git a/pypy/module/micronumpy/descriptor.py b/pypy/module/micronumpy/descriptor.py --- a/pypy/module/micronumpy/descriptor.py +++ b/pypy/module/micronumpy/descriptor.py @@ -94,7 +94,7 @@ return self.itemtype.box_complex(real, imag) def can_cast_to(self, other): - return True + return self.itemtype.can_cast_to(other.itemtype) def coerce(self, space, w_item): return self.itemtype.coerce(space, self, w_item) diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -129,6 +129,13 @@ else: return alloc_raw_storage(size, track_allocation=False, zero=False) + @classmethod + def basesize(cls): + return rffi.sizeof(cls.T) + + def can_cast_to(self, other): + return casting_table[self.num][other.num] + class Primitive(object): _mixin_ = True @@ -412,6 +419,7 @@ class Integer(Primitive): _mixin_ = True + signed = True def _base_coerce(self, space, w_item): if w_item is None: @@ -568,6 +576,7 @@ char = NPY.UBYTELTR BoxType = boxes.W_UInt8Box format_code = "B" + signed = False class Int16(BaseType, Integer): T = rffi.SHORT @@ -584,6 +593,7 @@ char = NPY.USHORTLTR BoxType = boxes.W_UInt16Box format_code = "H" + signed = False class Int32(BaseType, Integer): T = rffi.INT @@ -600,6 +610,7 @@ char = NPY.UINTLTR BoxType = boxes.W_UInt32Box format_code = "I" + signed = False def _int64_coerce(self, space, w_item): try: @@ -645,6 +656,7 @@ char = NPY.ULONGLONGLTR BoxType = boxes.W_UInt64Box format_code = "Q" + signed = False _coerce = func_with_new_name(_uint64_coerce, '_coerce') @@ -676,6 +688,7 @@ char = NPY.ULONGLTR BoxType = boxes.W_ULongBox format_code = "L" + signed = False _coerce = func_with_new_name(_ulong_coerce, '_coerce') @@ -2392,8 +2405,11 @@ del tp all_float_types = [] +float_types = [] all_int_types = [] +int_types = [] all_complex_types = [] +complex_types = [] def _setup(): # compute alignment @@ -2402,9 +2418,57 @@ tp.alignment = widen(clibffi.cast_type_to_ffitype(tp.T).c_alignment) if issubclass(tp, Float): all_float_types.append((tp, 'float')) + float_types.append(tp) if issubclass(tp, Integer): all_int_types.append((tp, 'int')) + int_types.append(tp) if issubclass(tp, ComplexFloating): all_complex_types.append((tp, 'complex')) + complex_types.append(tp) _setup() del _setup + +casting_table = [[False] * NPY.NTYPES for _ in range(NPY.NTYPES)] +number_types = int_types + float_types + complex_types +all_types = number_types + [ObjectType, StringType, UnicodeType, VoidType] + +def enable_cast(type1, type2): + casting_table[type1.num][type2.num] = True + +for tp in all_types: + enable_cast(tp, tp) + if tp.num != NPY.DATETIME: + enable_cast(Bool, tp) + enable_cast(tp, ObjectType) + enable_cast(tp, VoidType) +enable_cast(StringType, UnicodeType) +#enable_cast(Bool, TimeDelta) + +for tp in number_types: + enable_cast(tp, StringType) + enable_cast(tp, UnicodeType) + +for tp1 in int_types: + for tp2 in int_types: + if tp1.signed: + if tp2.signed and tp1.basesize() <= tp2.basesize(): + enable_cast(tp1, tp2) + else: + if tp2.signed and tp1.basesize() < tp2.basesize(): + enable_cast(tp1, tp2) + elif not tp2.signed and tp1.basesize() <= tp2.basesize(): + enable_cast(tp1, tp2) +for tp1 in int_types: + for tp2 in float_types + complex_types: + size1 = tp1.basesize() + size2 = tp2.basesize() + if (size1 < 8 and size2 > size1) or (size1 >= 8 and size2 >= size1): + enable_cast(tp1, tp2) +for tp1 in float_types: + for tp2 in float_types + complex_types: + if tp1.basesize() <= tp2.basesize(): + enable_cast(tp1, tp2) +for tp1 in complex_types: + for tp2 in complex_types: + if tp1.basesize() <= tp2.basesize(): + enable_cast(tp1, tp2) From noreply at buildbot.pypy.org Sat Apr 25 05:41:08 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 25 Apr 2015 05:41:08 +0200 (CEST) Subject: [pypy-commit] pypy can_cast: implement casting=no and casting=equiv Message-ID: <20150425034108.1D28E1C022E@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: can_cast Changeset: r76931:48070170c83b Date: 2015-04-25 04:00 +0100 http://bitbucket.org/pypy/pypy/changeset/48070170c83b/ Log: implement casting=no and casting=equiv diff --git a/pypy/module/micronumpy/arrayops.py b/pypy/module/micronumpy/arrayops.py --- a/pypy/module/micronumpy/arrayops.py +++ b/pypy/module/micronumpy/arrayops.py @@ -304,7 +304,15 @@ def can_cast(space, w_from, w_totype, casting='safe'): target = as_dtype(space, w_totype) origin = as_dtype(space, w_from) # XXX - return space.wrap(origin.can_cast_to(target)) + return space.wrap(can_cast_type(space, origin, target, casting)) + +def can_cast_type(space, origin, target, casting): + if casting == 'no': + return origin.eq(space, target) + elif casting == 'equiv': + return origin.num == target.num and origin.elsize == target.elsize + else: + return origin.can_cast_to(target) def as_dtype(space, w_arg): From noreply at buildbot.pypy.org Sat Apr 25 05:41:09 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 25 Apr 2015 05:41:09 +0200 (CEST) Subject: [pypy-commit] pypy can_cast: implement casting=unsafe and casting=same_kind Message-ID: <20150425034109.5F0E01C022E@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: can_cast Changeset: r76932:c34ce46667f0 Date: 2015-04-25 04:41 +0100 http://bitbucket.org/pypy/pypy/changeset/c34ce46667f0/ Log: implement casting=unsafe and casting=same_kind diff --git a/pypy/module/micronumpy/arrayops.py b/pypy/module/micronumpy/arrayops.py --- a/pypy/module/micronumpy/arrayops.py +++ b/pypy/module/micronumpy/arrayops.py @@ -5,9 +5,11 @@ constants as NPY from pypy.module.micronumpy.base import convert_to_array, W_NDimArray from pypy.module.micronumpy.converters import clipmode_converter -from pypy.module.micronumpy.strides import Chunk, Chunks, shape_agreement, \ - shape_agreement_multiple +from pypy.module.micronumpy.strides import ( + Chunk, Chunks, shape_agreement, shape_agreement_multiple) from .boxes import W_GenericBox +from .types import ( + Bool, ULong, Long, Float64, Complex64, UnicodeType, VoidType, ObjectType) def where(space, w_arr, w_x=None, w_y=None): @@ -306,11 +308,25 @@ origin = as_dtype(space, w_from) # XXX return space.wrap(can_cast_type(space, origin, target, casting)) +kind_ordering = { + Bool.kind: 0, ULong.kind: 1, Long.kind: 2, + Float64.kind: 4, Complex64.kind: 5, + NPY.STRINGLTR: 6, NPY.STRINGLTR2: 6, + UnicodeType.kind: 7, VoidType.kind: 8, ObjectType.kind: 9} + def can_cast_type(space, origin, target, casting): if casting == 'no': return origin.eq(space, target) elif casting == 'equiv': return origin.num == target.num and origin.elsize == target.elsize + elif casting == 'unsafe': + return True + elif casting == 'same_kind': + if origin.can_cast_to(target): + return True + if origin.kind in kind_ordering and target.kind in kind_ordering: + return kind_ordering[origin.kind] <= kind_ordering[target.kind] + return False else: return origin.can_cast_to(target) From noreply at buildbot.pypy.org Sat Apr 25 10:56:36 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 25 Apr 2015 10:56:36 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: ffi.callback() Message-ID: <20150425085636.346CA1C1207@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1808:6b749854ff50 Date: 2015-04-25 10:57 +0200 http://bitbucket.org/cffi/cffi/changeset/6b749854ff50/ Log: ffi.callback() diff --git a/_cffi1/ffi_obj.c b/_cffi1/ffi_obj.c --- a/_cffi1/ffi_obj.c +++ b/_cffi1/ffi_obj.c @@ -94,6 +94,7 @@ #define ACCEPT_CTYPE 2 #define ACCEPT_CDATA 4 #define ACCEPT_ALL (ACCEPT_STRING | ACCEPT_CTYPE | ACCEPT_CDATA) +#define CONSIDER_FN_AS_FNPTR 8 static CTypeDescrObject *_ffi_type(FFIObject *ffi, PyObject *arg, int accept) @@ -120,8 +121,14 @@ input_text, spaces); return NULL; } - CTypeDescrObject *ct = realize_c_type(ffi->types_builder, - ffi->info.output, index); + CTypeDescrObject *ct; + if (accept & CONSIDER_FN_AS_FNPTR) { + ct = realize_c_type_fn_as_fnptr(ffi->types_builder, + ffi->info.output, index); + } + else { + ct = realize_c_type(ffi->types_builder, ffi->info.output, index); + } if (ct == NULL) return NULL; @@ -505,6 +512,55 @@ } #endif +PyDoc_STRVAR(ffi_callback_doc, +"Return a callback object or a decorator making such a callback object.\n" +"'cdecl' must name a C function pointer type. The callback invokes the\n" +"specified 'python_callable' (which may be provided either directly or\n" +"via a decorator). Important: the callback object must be manually\n" +"kept alive for as long as the callback may be invoked from the C code."); + +static PyObject *_ffi_callback_decorator(PyObject *outer_args, PyObject *fn) +{ + PyObject *res, *old; + + old = PyTuple_GET_ITEM(outer_args, 1); + PyTuple_SET_ITEM(outer_args, 1, fn); + res = b_callback(NULL, outer_args); + PyTuple_SET_ITEM(outer_args, 1, old); + return res; +} + +static PyObject *ffi_callback(FFIObject *self, PyObject *args, PyObject *kwds) +{ + PyObject *cdecl, *python_callable = Py_None, *error = Py_None; + PyObject *res; + static char *keywords[] = {"cdecl", "python_callable", "error", NULL}; + + if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|OO", keywords, + &cdecl, &python_callable, &error)) + return NULL; + + cdecl = (PyObject *)_ffi_type(self, cdecl, ACCEPT_STRING | ACCEPT_CDATA | + CONSIDER_FN_AS_FNPTR); + if (cdecl == NULL) + return NULL; + + args = Py_BuildValue("(OOO)", cdecl, python_callable, error); + if (args == NULL) + return NULL; + + if (python_callable != Py_None) { + res = b_callback(NULL, args); + } + else { + static PyMethodDef md = {"callback_decorator", + (PyCFunction)_ffi_callback_decorator, METH_O}; + res = PyCFunction_New(&md, args); + } + Py_DECREF(args); + return res; +} + PyDoc_STRVAR(ffi_errno_doc, "the value of 'errno' from/to the C calls"); static PyObject *ffi_get_errno(PyObject *self, void *closure) @@ -610,27 +666,27 @@ } static PyMethodDef ffi_methods[] = { - {"__set_types", (PyCFunction)ffi__set_types,METH_VARARGS}, + {"__set_types",(PyCFunction)ffi__set_types, METH_VARARGS}, #if 0 - {"addressof", (PyCFunction)ffi_addressof, METH_VARARGS}, + {"addressof", (PyCFunction)ffi_addressof, METH_VARARGS}, #endif - {"alignof", (PyCFunction)ffi_alignof, METH_O, ffi_alignof_doc}, - {"cast", (PyCFunction)ffi_cast, METH_VARARGS, ffi_cast_doc}, + {"alignof", (PyCFunction)ffi_alignof, METH_O, ffi_alignof_doc}, + {"callback", (PyCFunction)ffi_callback, METH_VARARGS | + METH_KEYWORDS,ffi_callback_doc}, + {"cast", (PyCFunction)ffi_cast, METH_VARARGS, ffi_cast_doc}, #if 0 - {"close_library", ffi_close_library, METH_VARARGS | METH_STATIC}, - {"from_handle", (PyCFunction)ffi_from_handle,METH_O}, - {"gc", (PyCFunction)ffi_gc, METH_VARARGS}, - {"getctype", (PyCFunction)ffi_getctype, METH_VARARGS}, - {"load_library", (PyCFunction)ffi_load_library,METH_VARARGS|METH_KEYWORDS}, + {"from_handle",(PyCFunction)ffi_from_handle,METH_O}, + {"gc", (PyCFunction)ffi_gc, METH_VARARGS}, + {"getctype", (PyCFunction)ffi_getctype, METH_VARARGS}, #endif - {"offsetof", (PyCFunction)ffi_offsetof, METH_VARARGS,ffi_offsetof_doc}, - {"new", (PyCFunction)ffi_new, METH_VARARGS, ffi_new_doc}, + {"offsetof", (PyCFunction)ffi_offsetof, METH_VARARGS, ffi_offsetof_doc}, + {"new", (PyCFunction)ffi_new, METH_VARARGS, ffi_new_doc}, #if 0 - {"new_handle", (PyCFunction)ffi_new_handle,METH_O}, + {"new_handle", (PyCFunction)ffi_new_handle, METH_O}, #endif - {"sizeof", (PyCFunction)ffi_sizeof, METH_O, ffi_sizeof_doc}, - {"string", (PyCFunction)ffi_string, METH_VARARGS, ffi_string_doc}, - {"typeof", (PyCFunction)ffi_typeof, METH_O, ffi_typeof_doc}, + {"sizeof", (PyCFunction)ffi_sizeof, METH_O, ffi_sizeof_doc}, + {"string", (PyCFunction)ffi_string, METH_VARARGS, ffi_string_doc}, + {"typeof", (PyCFunction)ffi_typeof, METH_O, ffi_typeof_doc}, {NULL} }; diff --git a/_cffi1/realize_c_type.c b/_cffi1/realize_c_type.c --- a/_cffi1/realize_c_type.c +++ b/_cffi1/realize_c_type.c @@ -266,6 +266,27 @@ } } +/* Same as realize_c_type(), but if it's a function type, return the + corresponding function pointer ctype instead of complaining. +*/ +static CTypeDescrObject * +realize_c_type_fn_as_fnptr(builder_c_t *builder, + _cffi_opcode_t opcodes[], int index) +{ + PyObject *x = _realize_c_type_or_func(builder, opcodes, index); + if (x == NULL || CTypeDescr_Check(x)) { + return (CTypeDescrObject *)x; + } + else { + PyObject *y; + assert(PyTuple_Check(x)); + y = PyTuple_GET_ITEM(x, 0); + Py_INCREF(y); + Py_DECREF(x); + return (CTypeDescrObject *)y; + } +} + static PyObject * _realize_c_type_or_func(builder_c_t *builder, _cffi_opcode_t opcodes[], int index) diff --git a/_cffi1/recompiler.py b/_cffi1/recompiler.py --- a/_cffi1/recompiler.py +++ b/_cffi1/recompiler.py @@ -762,4 +762,7 @@ attr = getattr(module.ffi, name) if attr is not getattr(ffi, name, object()): setattr(ffi, name, attr) + def typeof_disabled(*args, **kwds): + raise NotImplementedError + ffi._typeof = typeof_disabled return module.lib diff --git a/_cffi1/test_ffi_obj.py b/_cffi1/test_ffi_obj.py --- a/_cffi1/test_ffi_obj.py +++ b/_cffi1/test_ffi_obj.py @@ -82,3 +82,17 @@ assert ffi.sizeof("int[41]") == 41 * 4 assert ffi.sizeof(ffi.new("int[41]")) == 41 * 4 assert ffi.sizeof(ffi.new("int[]", 41)) == 41 * 4 + +def test_ffi_callback(): + ffi = _cffi1_backend.FFI() + assert ffi.callback("int(int)", lambda x: x + 42)(10) == 52 + assert ffi.callback("int(*)(int)", lambda x: x + 42)(10) == 52 + assert ffi.callback("int(int)", lambda x: x + "", -66)(10) == -66 + assert ffi.callback("int(int)", lambda x: x + "", error=-66)(10) == -66 + +def test_ffi_callback_decorator(): + ffi = _cffi1_backend.FFI() + assert ffi.callback("int(*)(int)")(lambda x: x + 42)(10) == 52 + deco = ffi.callback("int(int)", error=-66) + assert deco(lambda x: x + "")(10) == -66 + assert deco(lambda x: x + 42)(10) == 52 From noreply at buildbot.pypy.org Sat Apr 25 11:04:18 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 25 Apr 2015 11:04:18 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: remove a test that doesn't make sense Message-ID: <20150425090418.BA5BF1C1207@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1809:6055d7106150 Date: 2015-04-25 11:04 +0200 http://bitbucket.org/cffi/cffi/changeset/6055d7106150/ Log: remove a test that doesn't make sense diff --git a/_cffi1/test_verify1.py b/_cffi1/test_verify1.py --- a/_cffi1/test_verify1.py +++ b/_cffi1/test_verify1.py @@ -898,15 +898,6 @@ lib.cb = my_callback assert lib.foo(4) == 887 -def test_ctypes_backend_forces_generic_engine(): - from cffi.backend_ctypes import CTypesBackend - ffi = FFI(backend=CTypesBackend()) - ffi.cdef("int func(int a);") - lib = ffi.verify("int func(int a) { return a * 42; }") - assert not hasattr(lib, '_cffi_python_module') - assert hasattr(lib, '_cffi_generic_module') - assert lib.func(100) == 4200 - def test_call_with_struct_ptr(): ffi = FFI() ffi.cdef("typedef struct { int x; ...; } foo_t; int foo(foo_t *);") From noreply at buildbot.pypy.org Sat Apr 25 11:04:19 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 25 Apr 2015 11:04:19 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: ellipsis Message-ID: <20150425090419.C09991C1207@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1810:c818c7444718 Date: 2015-04-25 11:04 +0200 http://bitbucket.org/cffi/cffi/changeset/c818c7444718/ Log: ellipsis diff --git a/_cffi1/realize_c_type.c b/_cffi1/realize_c_type.c --- a/_cffi1/realize_c_type.c +++ b/_cffi1/realize_c_type.c @@ -489,7 +489,7 @@ case _CFFI_OP_FUNCTION: { PyObject *fargs; - int i, base_index, num_args; + int i, base_index, num_args, ellipsis; y = (PyObject *)realize_c_type(builder, opcodes, _CFFI_GETARG(op)); if (y == NULL) @@ -501,6 +501,8 @@ _CFFI_OP_FUNCTION_END) num_args++; + ellipsis = _CFFI_GETARG(opcodes[base_index + num_args]) & 1; + fargs = PyTuple_New(num_args); if (fargs == NULL) { Py_DECREF(y); @@ -517,7 +519,8 @@ PyTuple_SET_ITEM(fargs, i, z); } - z = new_function_type(fargs, (CTypeDescrObject *)y, 0, FFI_DEFAULT_ABI); + z = new_function_type(fargs, (CTypeDescrObject *)y, ellipsis, + FFI_DEFAULT_ABI); z = get_unique_type(builder, z); Py_DECREF(fargs); Py_DECREF(y); diff --git a/_cffi1/recompiler.py b/_cffi1/recompiler.py --- a/_cffi1/recompiler.py +++ b/_cffi1/recompiler.py @@ -312,6 +312,8 @@ def _generate_cpy_function_collecttype(self, tp, name): self._do_collect_type(tp.as_raw_function()) + if tp.ellipsis: + self._do_collect_type(tp) def _generate_cpy_function_decl(self, tp, name): assert isinstance(tp, model.FunctionPtrType) @@ -319,7 +321,7 @@ # cannot support vararg functions better than this: check for its # exact type (including the fixed arguments), and build it as a # constant function pointer (no CPython wrapper) - self._generate_cpy_const(False, name, tp) + self._generate_cpy_constant_decl(tp, name) return prnt = self._prnt numargs = len(tp.args) @@ -388,7 +390,8 @@ def _generate_cpy_function_ctx(self, tp, name): if tp.ellipsis: - XXX + self._generate_cpy_constant_ctx(tp, name) + return type_index = self._typesdict[tp.as_raw_function()] numargs = len(tp.args) if numargs == 0: From noreply at buildbot.pypy.org Sat Apr 25 11:15:48 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 25 Apr 2015 11:15:48 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: fix test Message-ID: <20150425091548.855401C00F7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1811:d3fe2c3910db Date: 2015-04-25 11:07 +0200 http://bitbucket.org/cffi/cffi/changeset/d3fe2c3910db/ Log: fix test diff --git a/_cffi1/test_verify1.py b/_cffi1/test_verify1.py --- a/_cffi1/test_verify1.py +++ b/_cffi1/test_verify1.py @@ -1027,7 +1027,7 @@ ffi = FFI() ffi.cdef("struct foo_s { long a; ...; };\n" "int (*foo)(struct foo_s);") - e = py.test.raises(TypeError, ffi.verify, """ + lib = ffi.verify(""" struct foo_s { double b; long a; @@ -1037,6 +1037,7 @@ } int (*foo)(struct foo_s s) = &foo1; """) + e = py.test.raises(TypeError, "lib.foo") # lazily msg ='cannot pass as an argument a struct that was completed with verify()' assert msg in str(e.value) From noreply at buildbot.pypy.org Sat Apr 25 11:15:49 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 25 Apr 2015 11:15:49 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: fix Message-ID: <20150425091549.7D4B71C00F7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1812:7a25149295e0 Date: 2015-04-25 11:16 +0200 http://bitbucket.org/cffi/cffi/changeset/7a25149295e0/ Log: fix diff --git a/_cffi1/test_recompiler.py b/_cffi1/test_recompiler.py --- a/_cffi1/test_recompiler.py +++ b/_cffi1/test_recompiler.py @@ -51,7 +51,7 @@ def test_type_table_variadic_function(): check_type_table("int sin(int, ...);", - "(FUNCTION 1)(PRIMITIVE 7)(FUNCTION_END 1)") + "(FUNCTION 1)(PRIMITIVE 7)(FUNCTION_END 1)(POINTER 0)") def test_type_table_array(): check_type_table("int a[100];", From noreply at buildbot.pypy.org Sat Apr 25 11:15:50 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 25 Apr 2015 11:15:50 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: A clearer way to complain Message-ID: <20150425091550.7CBB71C00F7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1813:bdb89e5e392e Date: 2015-04-25 11:16 +0200 http://bitbucket.org/cffi/cffi/changeset/bdb89e5e392e/ Log: A clearer way to complain diff --git a/_cffi1/recompiler.py b/_cffi1/recompiler.py --- a/_cffi1/recompiler.py +++ b/_cffi1/recompiler.py @@ -142,6 +142,7 @@ # # call generate_cpy_xxx_decl(), for every xxx found from # ffi._parser._declarations. This generates all the functions. + self._seen_constants = set() self._generate("decl") # # the declaration of '_cffi_globals' and '_cffi_typenames' @@ -567,6 +568,11 @@ # constants, declared with "static const ..." def _generate_cpy_const(self, is_int, name, tp=None, category='const'): + if (category, name) in self._seen_constants: + raise ffiplatform.VerificationError( + "duplicate declaration of %s '%s'" % (category, name)) + self._seen_constants.add((category, name)) + # prnt = self._prnt funcname = '_cffi_%s_%s' % (category, name) if is_int: diff --git a/_cffi1/test_recompiler.py b/_cffi1/test_recompiler.py --- a/_cffi1/test_recompiler.py +++ b/_cffi1/test_recompiler.py @@ -1,5 +1,5 @@ import sys, py -from cffi import FFI +from cffi import FFI, VerificationError from _cffi1 import recompiler @@ -277,7 +277,13 @@ assert lib.B2 == 1 assert ffi.sizeof("enum e1") == ffi.sizeof("long") assert ffi.sizeof("enum e2") == ffi.sizeof("int") - + +def test_duplicate_enum(): + ffi = FFI() + ffi.cdef("enum e1 { A1, ... }; enum e2 { A1, ... };") + py.test.raises(VerificationError, verify, ffi, 'test_duplicate_enum', + "enum e1 { A1 }; enum e2 { B1 };") + def test_dotdotdot_length_of_array_field(): ffi = FFI() ffi.cdef("struct foo_s { int a[...]; int b[...]; };") From noreply at buildbot.pypy.org Sat Apr 25 11:42:00 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 25 Apr 2015 11:42:00 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: fix: partially drop checking for internal consistency, which was bogus Message-ID: <20150425094200.9F7311C022E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1814:fd28da258879 Date: 2015-04-25 11:38 +0200 http://bitbucket.org/cffi/cffi/changeset/fd28da258879/ Log: fix: partially drop checking for internal consistency, which was bogus diff --git a/_cffi1/realize_c_type.c b/_cffi1/realize_c_type.c --- a/_cffi1/realize_c_type.c +++ b/_cffi1/realize_c_type.c @@ -448,8 +448,9 @@ PyTuple_SET_ITEM(enumerators, i, tmp); gindex = search_in_globals(&builder->ctx, p, j); + assert(gindex >= 0); g = &builder->ctx.globals[gindex]; - assert(gindex >= 0 && g->type_op == op); + assert(g->type_op == _CFFI_OP(_CFFI_OP_ENUM, -1)); tmp = realize_global_int(g); PyTuple_SET_ITEM(enumvalues, i, tmp); diff --git a/_cffi1/recompiler.py b/_cffi1/recompiler.py --- a/_cffi1/recompiler.py +++ b/_cffi1/recompiler.py @@ -619,7 +619,7 @@ def _enum_ctx(self, tp, cname): type_index = self._typesdict[tp] - type_op = '_CFFI_OP(_CFFI_OP_ENUM, %d)' % type_index + type_op = '_CFFI_OP(_CFFI_OP_ENUM, -1)' for enumerator in tp.enumerators: self._lsts["global"].append( ' { "%s", _cffi_const_%s, %s },' % (enumerator, enumerator, From noreply at buildbot.pypy.org Sat Apr 25 11:42:01 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 25 Apr 2015 11:42:01 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: more test fixes Message-ID: <20150425094201.BF37F1C022E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1815:c2023e4a4a6d Date: 2015-04-25 11:42 +0200 http://bitbucket.org/cffi/cffi/changeset/c2023e4a4a6d/ Log: more test fixes diff --git a/_cffi1/test_verify1.py b/_cffi1/test_verify1.py --- a/_cffi1/test_verify1.py +++ b/_cffi1/test_verify1.py @@ -1139,10 +1139,12 @@ assert lib.BB == 1 def test_typedef_broken_complete_enum(): + # xxx this is broken in old cffis, but works with recompiler.py ffi = FFI() ffi.cdef("typedef enum { AA, BB } enum1_t;") - py.test.raises(VerificationError, ffi.verify, - "typedef enum { AA, CC, BB } enum1_t;") + lib = ffi.verify("typedef enum { AA, CC, BB } enum1_t;") + assert lib.AA == 0 + assert lib.BB == 2 def test_typedef_incomplete_enum(): ffi = FFI() @@ -1723,18 +1725,17 @@ def test_enum_bug118(): maxulong = 256 ** FFI().sizeof("unsigned long") - 1 - for c1, c2, c2c in [(0xffffffff, -1, ''), - (maxulong, -1, ''), - (-1, 0xffffffff, 'U'), - (-1, maxulong, 'UL')]: + for c2, c2c in [(-1, ''), + (-1, ''), + (0xffffffff, 'U'), + (maxulong, 'UL'), + (-maxulong / 3, 'L')]: if c2c and sys.platform == 'win32': continue # enums may always be signed with MSVC ffi = FFI() - ffi.cdef("enum foo_e { AA=%s };" % c1) - e = py.test.raises(VerificationError, ffi.verify, - "enum foo_e { AA=%s%s };" % (c2, c2c)) - assert str(e.value) == ('enum foo_e: AA has the real value %d, not %d' - % (c2, c1)) + ffi.cdef("enum foo_e { AA };") + lib = ffi.verify("enum foo_e { AA=%s%s };" % (c2, c2c)) + assert lib.AA == c2 def test_string_to_voidp_arg(): ffi = FFI() From noreply at buildbot.pypy.org Sat Apr 25 12:05:23 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 25 Apr 2015 12:05:23 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: fixes for anonymous nested structs Message-ID: <20150425100523.B98621C1207@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1816:d92f0d7cfb50 Date: 2015-04-25 12:06 +0200 http://bitbucket.org/cffi/cffi/changeset/d92f0d7cfb50/ Log: fixes for anonymous nested structs diff --git a/_cffi1/recompiler.py b/_cffi1/recompiler.py --- a/_cffi1/recompiler.py +++ b/_cffi1/recompiler.py @@ -78,7 +78,7 @@ self._do_collect_type(tp.as_raw_function()) elif isinstance(tp, model.StructOrUnion): if tp.fldtypes is not None: - for name1, tp1 in zip(tp.fldnames, tp.fldtypes): + for name1, tp1, _ in tp.enumfields(): self._do_collect_type(self._field_type(tp, name1, tp1)) else: for _, x in tp._get_items(): @@ -448,14 +448,15 @@ def _struct_ctx(self, tp, cname, approxname): type_index = self._typesdict[tp] flags = [] - if tp.partial: + if tp.partial or tp.has_anonymous_struct_fields(): flags.append('CT_CUSTOM_FIELD_POS') if isinstance(tp, model.UnionType): flags.append('CT_UNION') flags = ('|'.join(flags)) or '0' if tp.fldtypes is not None: c_field = [approxname] - for fldname, fldtype, fbitsize in tp.enumfields(): + enumfields = list(tp.enumfields()) + for fldname, fldtype, fbitsize in enumfields: fldtype = self._field_type(tp, fldname, fldtype) spaces = " " * len(fldname) # cname is None for _add_missing_struct_unions() only @@ -483,13 +484,13 @@ if cname is None: # unknown name, for _add_missing_struct_unions size_align = (' (size_t)-2, -2, /* unnamed */\n' + ' _cffi_FIELDS_FOR_%s, %d },' % (approxname, - len(tp.fldtypes),)) + len(enumfields),)) else: size_align = ('\n' + ' sizeof(%s),\n' % (cname,) + ' offsetof(struct _cffi_align_%s, y),\n'% (approxname,) + ' _cffi_FIELDS_FOR_%s, %d },' % (approxname, - len(tp.fldtypes),)) + len(enumfields),)) else: size_align = ' (size_t)-1, -1, -1, 0 /* opaque */ },' self._lsts["struct_union"].append( diff --git a/_cffi1/test_verify1.py b/_cffi1/test_verify1.py --- a/_cffi1/test_verify1.py +++ b/_cffi1/test_verify1.py @@ -1282,6 +1282,8 @@ ffi.cdef(""" struct foo_s { struct { int a; char b; }; union { char c, d; }; }; """) + assert ffi.offsetof("struct foo_s", "c") == 2 * ffi.sizeof("int") + assert ffi.sizeof("struct foo_s") == 3 * ffi.sizeof("int") ffi.verify(""" struct foo_s { struct { int a; char b; }; union { char c, d; }; }; """) @@ -1305,9 +1307,10 @@ py.test.raises(VerificationError, ffi.verify, """ struct foo_s { struct { int a; short b; }; union { char c, d; }; }; """) - py.test.raises(VerificationError, ffi.verify, """ - struct foo_s { struct { int a; char e, b; }; union { char c, d; }; }; - """) + # works fine now + #py.test.raises(VerificationError, ffi.verify, """ + # struct foo_s { struct { int a; char e, b; }; union { char c, d; }; }; + #""") def test_nested_anonymous_struct_inexact_1(): ffi = FFI() diff --git a/cffi/model.py b/cffi/model.py --- a/cffi/model.py +++ b/cffi/model.py @@ -271,6 +271,12 @@ self.fldbitsize = fldbitsize self.build_c_name_with_marker() + def has_anonymous_struct_fields(self): + for name, type in zip(self.fldnames, self.fldtypes): + if name == '' and isinstance(type, StructOrUnion): + return True + return False + def enumfields(self): for name, type, bitsize in zip(self.fldnames, self.fldtypes, self.fldbitsize): From noreply at buildbot.pypy.org Sat Apr 25 12:06:08 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 25 Apr 2015 12:06:08 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: oups Message-ID: <20150425100608.E845F1C1207@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1817:0da180ccac93 Date: 2015-04-25 12:06 +0200 http://bitbucket.org/cffi/cffi/changeset/0da180ccac93/ Log: oups diff --git a/cffi/model.py b/cffi/model.py --- a/cffi/model.py +++ b/cffi/model.py @@ -272,6 +272,8 @@ self.build_c_name_with_marker() def has_anonymous_struct_fields(self): + if self.fldtypes is None: + return False for name, type in zip(self.fldnames, self.fldtypes): if name == '' and isinstance(type, StructOrUnion): return True From noreply at buildbot.pypy.org Sat Apr 25 12:06:38 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 25 Apr 2015 12:06:38 +0200 (CEST) Subject: [pypy-commit] cffi default: Add a passing test Message-ID: <20150425100638.880341C1207@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1818:b5bfa871d282 Date: 2015-04-25 12:07 +0200 http://bitbucket.org/cffi/cffi/changeset/b5bfa871d282/ Log: Add a passing test diff --git a/testing/backend_tests.py b/testing/backend_tests.py --- a/testing/backend_tests.py +++ b/testing/backend_tests.py @@ -1388,6 +1388,17 @@ assert p.c == 14 assert p.d == 14 + def test_nested_field_offset_align(self): + ffi = FFI(backend=self.Backend()) + ffi.cdef(""" + struct foo_s { + struct { int a; char b; }; + union { char c; }; + }; + """) + assert ffi.offsetof("struct foo_s", "c") == 2 * SIZE_OF_INT + assert ffi.sizeof("struct foo_s") == 3 * SIZE_OF_INT + def test_nested_anonymous_union(self): ffi = FFI(backend=self.Backend()) ffi.cdef(""" diff --git a/testing/test_ctypes.py b/testing/test_ctypes.py --- a/testing/test_ctypes.py +++ b/testing/test_ctypes.py @@ -28,6 +28,9 @@ def test_nested_anonymous_struct(self): py.test.skip("ctypes backend: not supported: nested anonymous struct") + def test_nested_field_offset_align(self): + py.test.skip("ctypes backend: not supported: nested anonymous struct") + def test_nested_anonymous_union(self): py.test.skip("ctypes backend: not supported: nested anonymous union") From noreply at buildbot.pypy.org Sat Apr 25 12:12:24 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 25 Apr 2015 12:12:24 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: fix/skip Message-ID: <20150425101225.0134D1C00F7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1819:4744cc882ca2 Date: 2015-04-25 12:13 +0200 http://bitbucket.org/cffi/cffi/changeset/4744cc882ca2/ Log: fix/skip diff --git a/_cffi1/recompiler.py b/_cffi1/recompiler.py --- a/_cffi1/recompiler.py +++ b/_cffi1/recompiler.py @@ -760,9 +760,8 @@ import imp assert module_name not in sys.modules, "module name conflict: %r" % ( module_name,) - outputfilename = recompile(ffi, module_name, preamble, - tmpdir=str(udir), - *args, **kwds) + kwds.setdefault('tmpdir', str(udir)) + outputfilename = recompile(ffi, module_name, preamble, *args, **kwds) module = imp.load_dynamic(module_name, outputfilename) # # hack hack hack: copy all *bound methods* from module.ffi back to the diff --git a/_cffi1/test_verify1.py b/_cffi1/test_verify1.py --- a/_cffi1/test_verify1.py +++ b/_cffi1/test_verify1.py @@ -27,12 +27,11 @@ _verify_counter = 0 def verify(self, preamble='', *args, **kwds): - from _cffi1.udir import udir FFI._verify_counter += 1 return recompiler.verify(self, 'verify%d' % FFI._verify_counter, preamble, *args, extra_compile_args=self._extra_compile_args, - tmp=str(udir), **kwds) + **kwds) class FFI_warnings_not_error(FFI): _extra_compile_args = [] @@ -1390,6 +1389,7 @@ assert lib.foo(100) == 142 def test_relative_to(): + py.test.skip("not available") import tempfile, os from testing.udir import udir tmpdir = tempfile.mkdtemp(dir=str(udir)) From noreply at buildbot.pypy.org Sat Apr 25 12:24:57 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 25 Apr 2015 12:24:57 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: Remove the dependency on the header files in _cffi1 Message-ID: <20150425102457.354F61C12E0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1820:a721107bac60 Date: 2015-04-25 12:25 +0200 http://bitbucket.org/cffi/cffi/changeset/a721107bac60/ Log: Remove the dependency on the header files in _cffi1 diff --git a/_cffi1/parse_c_type.c b/_cffi1/parse_c_type.c --- a/_cffi1/parse_c_type.c +++ b/_cffi1/parse_c_type.c @@ -2,6 +2,8 @@ #include #include #include + +#define _CFFI_INTERNAL #include "parse_c_type.h" diff --git a/_cffi1/parse_c_type.h b/_cffi1/parse_c_type.h --- a/_cffi1/parse_c_type.h +++ b/_cffi1/parse_c_type.h @@ -69,17 +69,18 @@ struct _cffi_struct_union_s { const char *name; int type_index; // -> _cffi_types, on a OP_STRUCT_UNION - int flags; // CT_UNION? CT_IS_OPAQUE? + int flags; // CT_UNION? CT_CUSTOM_FIELD_POS? size_t size; int alignment; int first_field_index; // -> _cffi_fields array int num_fields; }; +#ifdef _CFFI_INTERNAL #define CT_UNION 128 -#define CT_IS_OPAQUE 4096 #define CT_CUSTOM_FIELD_POS 32768 /* ^^^ if not CUSTOM_FIELD_POS, complain if fields are not in the "standard layout" and/or if some are missing */ +#endif struct _cffi_field_s { const char *name; @@ -122,8 +123,10 @@ const char *error_message; }; +#ifdef _CFFI_INTERNAL int parse_c_type(struct _cffi_parse_info_s *info, const char *input); int search_in_globals(const struct _cffi_type_context_s *ctx, const char *search, size_t search_len); int search_in_struct_unions(const struct _cffi_type_context_s *ctx, const char *search, size_t search_len); +#endif diff --git a/_cffi1/recompiler.py b/_cffi1/recompiler.py --- a/_cffi1/recompiler.py +++ b/_cffi1/recompiler.py @@ -111,12 +111,21 @@ # a KeyError here is a bug. please report it! :-) return self._typesdict[type] + def _rel_readlines(self, filename): + g = open(os.path.join(os.path.dirname(__file__), filename), 'r') + lines = g.readlines() + g.close() + return lines + def write_source_to_f(self, f, preamble): self._f = f prnt = self._prnt # - # first the '#include' - prnt('#include "_cffi_include.h"') + # first the '#include' (actually done by inlining the file's content) + lines = self._rel_readlines('_cffi_include.h') + i = lines.index('#include "parse_c_type.h"\n') + lines[i:i+1] = self._rel_readlines('parse_c_type.h') + prnt(''.join(lines)) # # then paste the C source given by the user, verbatim. prnt('/************************************************************/') @@ -447,12 +456,11 @@ def _struct_ctx(self, tp, cname, approxname): type_index = self._typesdict[tp] - flags = [] + flags = 0 if tp.partial or tp.has_anonymous_struct_fields(): - flags.append('CT_CUSTOM_FIELD_POS') + flags |= 32768 # CT_CUSTOM_FIELD_POS if isinstance(tp, model.UnionType): - flags.append('CT_UNION') - flags = ('|'.join(flags)) or '0' + flags |= 128 # CT_UNION if tp.fldtypes is not None: c_field = [approxname] enumfields = list(tp.enumfields()) @@ -494,7 +502,7 @@ else: size_align = ' (size_t)-1, -1, -1, 0 /* opaque */ },' self._lsts["struct_union"].append( - ' { "%s", %d, %s,' % (tp.name, type_index, flags) + size_align) + ' { "%s", %d, 0x%x,' % (tp.name, type_index, flags) + size_align) self._seen_struct_unions.add(tp) def _add_missing_struct_unions(self): @@ -744,8 +752,6 @@ def _get_extension(module_name, c_file, kwds): source_name = ffiplatform.maybe_relative_path(c_file) - include_dirs = kwds.setdefault('include_dirs', []) - include_dirs.insert(0, '.') # XXX return ffiplatform.get_extension(source_name, module_name, **kwds) def recompile(ffi, module_name, preamble, tmpdir='.', **kwds): diff --git a/_cffi1/test_parse_c_type.py b/_cffi1/test_parse_c_type.py --- a/_cffi1/test_parse_c_type.py +++ b/_cffi1/test_parse_c_type.py @@ -5,9 +5,11 @@ r_macro = re.compile(r"#define \w+[(][^\n]*|#include [^\n]*") r_define = re.compile(r"(#define \w+) [^\n]*") +r_ifdefs = re.compile(r"(#ifdef |#endif)[^\n]*") header = open('parse_c_type.h').read() header = r_macro.sub(r"", header) header = r_define.sub(r"\1 ...", header) +header = r_ifdefs.sub(r"", header) ffi = cffi.FFI() ffi.cdef(header) From noreply at buildbot.pypy.org Sat Apr 25 12:35:44 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 25 Apr 2015 12:35:44 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: fix Message-ID: <20150425103544.E19EC1C12E0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1821:17a39c154ed7 Date: 2015-04-25 12:36 +0200 http://bitbucket.org/cffi/cffi/changeset/17a39c154ed7/ Log: fix diff --git a/_cffi1/recompiler.py b/_cffi1/recompiler.py --- a/_cffi1/recompiler.py +++ b/_cffi1/recompiler.py @@ -532,23 +532,19 @@ self._struct_collecttype(tp) _generate_cpy_union_collecttype = _generate_cpy_struct_collecttype + def _struct_names(self, tp): + cname = tp.get_c_name('') + if ' ' in cname: + return cname, cname.replace(' ', '_') + else: + return cname, '_' + cname + def _generate_cpy_struct_decl(self, tp, name): - cname = tp._get_c_name() - if ' ' in cname: - prefix, declname = cname.split(' ', 1) - else: - prefix, declname = '', cname - while declname.startswith('$'): - prefix += 'D' - declname = declname[1:] - approxname = prefix + '_' + declname - assert '$' not in approxname - self._struct_decl(tp, cname, approxname) + self._struct_decl(tp, *self._struct_names(tp)) _generate_cpy_union_decl = _generate_cpy_struct_decl - def _generate_cpy_struct_ctx(self, tp, name, prefix='s'): - cname = tp.get_c_name('') - self._struct_ctx(tp, cname, cname.replace(' ', '_')) + def _generate_cpy_struct_ctx(self, tp, name): + self._struct_ctx(tp, *self._struct_names(tp)) _generate_cpy_union_ctx = _generate_cpy_struct_ctx # ---------- From noreply at buildbot.pypy.org Sat Apr 25 12:47:31 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 25 Apr 2015 12:47:31 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: ffi.getctype() Message-ID: <20150425104731.400371C00F7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1822:30d86d0edda2 Date: 2015-04-25 12:48 +0200 http://bitbucket.org/cffi/cffi/changeset/30d86d0edda2/ Log: ffi.getctype() diff --git a/_cffi1/ffi_obj.c b/_cffi1/ffi_obj.c --- a/_cffi1/ffi_obj.c +++ b/_cffi1/ffi_obj.c @@ -414,8 +414,36 @@ return NULL; } } +#endif -static PyObject *ffi_getctype(ZefFFIObject *self, PyObject *args) +static PyObject *_combine_type_name_l(CTypeDescrObject *ct, + size_t extra_text_len) +{ + size_t base_name_len; + PyObject *result; + char *p; + + base_name_len = strlen(ct->ct_name); + result = PyString_FromStringAndSize(NULL, base_name_len + extra_text_len); + if (result == NULL) + return NULL; + + p = PyString_AS_STRING(result); + memcpy(p, ct->ct_name, ct->ct_name_position); + p += ct->ct_name_position; + p += extra_text_len; + memcpy(p, ct->ct_name + ct->ct_name_position, + base_name_len - ct->ct_name_position); + return result; +} + +PyDoc_STRVAR(ffi_getctype_doc, +"Return a string giving the C type 'cdecl', which may be itself a\n" +"string or a object. If 'replace_with' is given, it gives\n" +"extra text to append (or insert for more complicated C types), like a\n" +"variable name, or '*' to get actually the C type 'pointer-to-cdecl'."); + +static PyObject *ffi_getctype(FFIObject *self, PyObject *args) { PyObject *cdecl, *res; char *p, *replace_with = ""; @@ -437,11 +465,11 @@ replace_with_len--; add_paren = (replace_with[0] == '*' && - ((ct->ct_flags & (CT_ARRAY | CT_FUNCTION)) != 0)); + ((ct->ct_flags & CT_ARRAY) != 0)); add_space = (!add_paren && replace_with_len > 0 && replace_with[0] != '[' && replace_with[0] != '('); - res = combine_type_name_l(ct, replace_with_len + add_space + 2 * add_paren); + res = _combine_type_name_l(ct, replace_with_len + add_space + 2*add_paren); if (res == NULL) return NULL; @@ -456,6 +484,7 @@ return res; } +#if 0 static PyObject *ffi_new_handle(ZefFFIObject *self, PyObject *arg) { CTypeDescrObject *ct = ZefNULL->c_type; // @@ -677,8 +706,8 @@ #if 0 {"from_handle",(PyCFunction)ffi_from_handle,METH_O}, {"gc", (PyCFunction)ffi_gc, METH_VARARGS}, - {"getctype", (PyCFunction)ffi_getctype, METH_VARARGS}, #endif + {"getctype", (PyCFunction)ffi_getctype, METH_VARARGS, ffi_getctype_doc}, {"offsetof", (PyCFunction)ffi_offsetof, METH_VARARGS, ffi_offsetof_doc}, {"new", (PyCFunction)ffi_new, METH_VARARGS, ffi_new_doc}, #if 0 diff --git a/_cffi1/test_ffi_obj.py b/_cffi1/test_ffi_obj.py --- a/_cffi1/test_ffi_obj.py +++ b/_cffi1/test_ffi_obj.py @@ -96,3 +96,8 @@ deco = ffi.callback("int(int)", error=-66) assert deco(lambda x: x + "")(10) == -66 assert deco(lambda x: x + 42)(10) == 52 + +def test_ffi_getctype(): + ffi = _cffi1_backend.FFI() + assert ffi.getctype("int*") == "int *" + assert ffi.getctype("int[5]", "a") == "int a[5]" From noreply at buildbot.pypy.org Sat Apr 25 12:48:34 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 25 Apr 2015 12:48:34 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: more tests Message-ID: <20150425104834.797421C00F7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1823:c9d8791139c0 Date: 2015-04-25 12:49 +0200 http://bitbucket.org/cffi/cffi/changeset/c9d8791139c0/ Log: more tests diff --git a/_cffi1/test_ffi_obj.py b/_cffi1/test_ffi_obj.py --- a/_cffi1/test_ffi_obj.py +++ b/_cffi1/test_ffi_obj.py @@ -99,5 +99,18 @@ def test_ffi_getctype(): ffi = _cffi1_backend.FFI() + assert ffi.getctype("int") == "int" + assert ffi.getctype("int", 'x') == "int x" assert ffi.getctype("int*") == "int *" - assert ffi.getctype("int[5]", "a") == "int a[5]" + assert ffi.getctype("int*", '') == "int *" + assert ffi.getctype("int*", 'x') == "int * x" + assert ffi.getctype("int", '*') == "int *" + assert ffi.getctype("int", ' * x ') == "int * x" + assert ffi.getctype(ffi.typeof("int*"), '*') == "int * *" + assert ffi.getctype("int", '[5]') == "int[5]" + assert ffi.getctype("int[5]", '[6]') == "int[6][5]" + assert ffi.getctype("int[5]", '(*)') == "int(*)[5]" + # special-case for convenience: automatically put '()' around '*' + assert ffi.getctype("int[5]", '*') == "int(*)[5]" + assert ffi.getctype("int[5]", '*foo') == "int(*foo)[5]" + assert ffi.getctype("int[5]", ' ** foo ') == "int(** foo)[5]" From noreply at buildbot.pypy.org Sat Apr 25 14:42:53 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 25 Apr 2015 14:42:53 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: test and fix Message-ID: <20150425124253.428D01C00F7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1824:38f60cc08b65 Date: 2015-04-25 13:02 +0200 http://bitbucket.org/cffi/cffi/changeset/38f60cc08b65/ Log: test and fix diff --git a/_cffi1/ffi_obj.c b/_cffi1/ffi_obj.c --- a/_cffi1/ffi_obj.c +++ b/_cffi1/ffi_obj.c @@ -569,7 +569,7 @@ &cdecl, &python_callable, &error)) return NULL; - cdecl = (PyObject *)_ffi_type(self, cdecl, ACCEPT_STRING | ACCEPT_CDATA | + cdecl = (PyObject *)_ffi_type(self, cdecl, ACCEPT_STRING | ACCEPT_CTYPE | CONSIDER_FN_AS_FNPTR); if (cdecl == NULL) return NULL; diff --git a/_cffi1/test_ffi_obj.py b/_cffi1/test_ffi_obj.py --- a/_cffi1/test_ffi_obj.py +++ b/_cffi1/test_ffi_obj.py @@ -92,7 +92,7 @@ def test_ffi_callback_decorator(): ffi = _cffi1_backend.FFI() - assert ffi.callback("int(*)(int)")(lambda x: x + 42)(10) == 52 + assert ffi.callback(ffi.typeof("int(*)(int)"))(lambda x: x + 42)(10) == 52 deco = ffi.callback("int(int)", error=-66) assert deco(lambda x: x + "")(10) == -66 assert deco(lambda x: x + 42)(10) == 52 From noreply at buildbot.pypy.org Sat Apr 25 14:42:54 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 25 Apr 2015 14:42:54 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: ffi.addressof() Message-ID: <20150425124254.56DFB1C00F7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1825:d073836b9835 Date: 2015-04-25 14:43 +0200 http://bitbucket.org/cffi/cffi/changeset/d073836b9835/ Log: ffi.addressof() diff --git a/_cffi1/ffi_obj.c b/_cffi1/ffi_obj.c --- a/_cffi1/ffi_obj.c +++ b/_cffi1/ffi_obj.c @@ -336,85 +336,65 @@ return PyInt_FromSsize_t(offset); } -#if 0 -static PyObject *ffi_addressof(ZefFFIObject *self, PyObject *args) +PyDoc_STRVAR(ffi_addressof_doc, +"With a single arg, return the address of a .\n" +"If 'fields_or_indexes' are given, returns the address of that field or\n" +"array item in the structure or array, recursively in case of nested\n" +"structures."); + +static PyObject *ffi_addressof(FFIObject *self, PyObject *args) { - PyObject *obj; - char *fieldname = NULL; + PyObject *arg, *z, *result; + CTypeDescrObject *ct; + Py_ssize_t i, offset = 0; + int accepted_flags; - if (!PyArg_ParseTuple(args, "O|z:addressof", &obj, &fieldname)) + if (PyTuple_Size(args) < 1) { + PyErr_SetString(PyExc_TypeError, + "addressof() expects at least 1 argument"); + return NULL; + } + + arg = PyTuple_GET_ITEM(args, 0); + ct = _ffi_type(self, arg, ACCEPT_CDATA); + if (ct == NULL) return NULL; - if (CData_Check(obj)) { - CDataObject *cd = (CDataObject *)obj; - CTypeDescrObject *ct; - Py_ssize_t offset; - - ct = cd->c_type; - if (fieldname != NULL && ct->ct_flags & CT_POINTER) - ct = ct->ct_itemdescr; - - if (!(ct->ct_flags & (CT_STRUCT|CT_UNION))) { - PyErr_Format(PyExc_TypeError, - "expected a struct or union cdata, got '%s'", - ct->ct_name); + if (PyTuple_GET_SIZE(args) == 1) { + accepted_flags = CT_STRUCT | CT_UNION | CT_ARRAY; + if ((ct->ct_flags & accepted_flags) == 0) { + PyErr_SetString(PyExc_TypeError, + "expected a cdata struct/union/array object"); return NULL; } - - if (fieldname == NULL) { - offset = 0; - } - else { - CFieldObject *cf = _ffi_field(ct, fieldname); - if (cf == NULL) - return NULL; - offset = cf->cf_offset; - ct = cf->cf_type; - } - ct = fetch_pointer_type(self->types_dict, ct); - if (ct == NULL) - return NULL; - return new_simple_cdata(cd->c_data + offset, ct); } - else if (ZefLib_Check(obj)) { - PyObject *attr, *name; - char *reason; - - if (fieldname == NULL) { - PyErr_SetString(PyExc_TypeError, "addressof(Lib, fieldname) " - "cannot be used with only one argument"); + else { + accepted_flags = CT_STRUCT | CT_UNION | CT_ARRAY | CT_POINTER; + if ((ct->ct_flags & accepted_flags) == 0) { + PyErr_SetString(PyExc_TypeError, + "expected a cdata struct/union/array/pointer object"); return NULL; } - name = PyString_FromString(fieldname); - if (name == NULL) - return NULL; - attr = lib_findattr((ZefLibObject *)obj, name, ZefError); - Py_DECREF(name); - if (attr == NULL) - return NULL; + for (i = 1; i < PyTuple_GET_SIZE(args); i++) { + Py_ssize_t ofs1; + ct = direct_typeoffsetof(ct, PyTuple_GET_ITEM(args, i), + i > 1, &ofs1); + if (ct == NULL) + return NULL; + offset += ofs1; + } + } - if (ZefGlobSupport_Check(attr)) { - return addressof_global_var((ZefGlobSupportObject *)attr); - } + z = new_pointer_type(ct); + z = get_unique_type(self->types_builder, z); + if (z == NULL) + return NULL; - if (PyCFunction_Check(attr)) - reason = "declare that function as a function pointer instead"; - else - reason = "numeric constants don't have addresses"; - - PyErr_Format(PyExc_TypeError, - "cannot take the address of '%s' (%s)", - fieldname, reason); - return NULL; - } - else { - PyErr_SetString(PyExc_TypeError, "addressof() first argument must be " - "a cdata struct or union, a pointer to one, or a Lib " - "object"); - return NULL; - } + result = new_simple_cdata(((CDataObject *)arg)->c_data + offset, + (CTypeDescrObject *)z); + Py_DECREF(z); + return result; } -#endif static PyObject *_combine_type_name_l(CTypeDescrObject *ct, size_t extra_text_len) @@ -695,28 +675,26 @@ } static PyMethodDef ffi_methods[] = { - {"__set_types",(PyCFunction)ffi__set_types, METH_VARARGS}, + {"__set_types",(PyCFunction)ffi__set_types, METH_VARARGS}, + {"addressof", (PyCFunction)ffi_addressof, METH_VARARGS, ffi_addressof_doc}, + {"alignof", (PyCFunction)ffi_alignof, METH_O, ffi_alignof_doc}, + {"callback", (PyCFunction)ffi_callback, METH_VARARGS | + METH_KEYWORDS,ffi_callback_doc}, + {"cast", (PyCFunction)ffi_cast, METH_VARARGS, ffi_cast_doc}, #if 0 - {"addressof", (PyCFunction)ffi_addressof, METH_VARARGS}, + {"from_handle",(PyCFunction)ffi_from_handle,METH_O}, + {"gc", (PyCFunction)ffi_gc, METH_VARARGS}, #endif - {"alignof", (PyCFunction)ffi_alignof, METH_O, ffi_alignof_doc}, - {"callback", (PyCFunction)ffi_callback, METH_VARARGS | - METH_KEYWORDS,ffi_callback_doc}, - {"cast", (PyCFunction)ffi_cast, METH_VARARGS, ffi_cast_doc}, + {"getctype", (PyCFunction)ffi_getctype, METH_VARARGS, ffi_getctype_doc}, + {"offsetof", (PyCFunction)ffi_offsetof, METH_VARARGS, ffi_offsetof_doc}, + {"new", (PyCFunction)ffi_new, METH_VARARGS, ffi_new_doc}, #if 0 - {"from_handle",(PyCFunction)ffi_from_handle,METH_O}, - {"gc", (PyCFunction)ffi_gc, METH_VARARGS}, + {"new_handle", (PyCFunction)ffi_new_handle, METH_O}, #endif - {"getctype", (PyCFunction)ffi_getctype, METH_VARARGS, ffi_getctype_doc}, - {"offsetof", (PyCFunction)ffi_offsetof, METH_VARARGS, ffi_offsetof_doc}, - {"new", (PyCFunction)ffi_new, METH_VARARGS, ffi_new_doc}, -#if 0 - {"new_handle", (PyCFunction)ffi_new_handle, METH_O}, -#endif - {"sizeof", (PyCFunction)ffi_sizeof, METH_O, ffi_sizeof_doc}, - {"string", (PyCFunction)ffi_string, METH_VARARGS, ffi_string_doc}, - {"typeof", (PyCFunction)ffi_typeof, METH_O, ffi_typeof_doc}, - {NULL} + {"sizeof", (PyCFunction)ffi_sizeof, METH_O, ffi_sizeof_doc}, + {"string", (PyCFunction)ffi_string, METH_VARARGS, ffi_string_doc}, + {"typeof", (PyCFunction)ffi_typeof, METH_O, ffi_typeof_doc}, + {NULL} }; static PyGetSetDef ffi_getsets[] = { diff --git a/_cffi1/test_ffi_obj.py b/_cffi1/test_ffi_obj.py --- a/_cffi1/test_ffi_obj.py +++ b/_cffi1/test_ffi_obj.py @@ -114,3 +114,10 @@ assert ffi.getctype("int[5]", '*') == "int(*)[5]" assert ffi.getctype("int[5]", '*foo') == "int(*foo)[5]" assert ffi.getctype("int[5]", ' ** foo ') == "int(** foo)[5]" + +def test_addressof(): + ffi = _cffi1_backend.FFI() + a = ffi.new("int[10]") + b = ffi.addressof(a, 5) + b[2] = -123 + assert a[7] == -123 diff --git a/_cffi1/test_recompiler.py b/_cffi1/test_recompiler.py --- a/_cffi1/test_recompiler.py +++ b/_cffi1/test_recompiler.py @@ -235,6 +235,12 @@ # assert ffi.offsetof("struct foo_s", "a") == 0 assert ffi.offsetof("struct foo_s", "b") == 4 + # + py.test.raises(TypeError, ffi.addressof, p) + assert ffi.addressof(p[0]) == p + assert ffi.typeof(ffi.addressof(p[0])) is ffi.typeof("struct foo_s *") + assert ffi.typeof(ffi.addressof(p, "b")) is ffi.typeof("int *") + assert ffi.addressof(p, "b")[0] == p.b def test_verify_exact_field_offset(): ffi = FFI() From noreply at buildbot.pypy.org Sat Apr 25 15:05:51 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 25 Apr 2015 15:05:51 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: ffi.new_handle(), ffi.from_handle() Message-ID: <20150425130551.C40D81C03F7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1826:e76db3405d4b Date: 2015-04-25 15:04 +0200 http://bitbucket.org/cffi/cffi/changeset/e76db3405d4b/ Log: ffi.new_handle(), ffi.from_handle() diff --git a/_cffi1/ffi_obj.c b/_cffi1/ffi_obj.c --- a/_cffi1/ffi_obj.c +++ b/_cffi1/ffi_obj.c @@ -464,17 +464,21 @@ return res; } -#if 0 -static PyObject *ffi_new_handle(ZefFFIObject *self, PyObject *arg) +PyDoc_STRVAR(ffi_new_handle_doc, +"Return a non-NULL cdata of type 'void *' that contains an opaque\n" +"reference to the argument, which can be any Python object. To cast it\n" +"back to the original object, use from_handle(). You must keep alive\n" +"the cdata object returned by new_handle()!"); + +static PyObject *ffi_new_handle(FFIObject *self, PyObject *arg) { - CTypeDescrObject *ct = ZefNULL->c_type; // CDataObject *cd; cd = (CDataObject *)PyObject_GC_New(CDataObject, &CDataOwningGC_Type); if (cd == NULL) return NULL; - Py_INCREF(ct); - cd->c_type = ct; + Py_INCREF(g_ct_voidp); // + cd->c_type = g_ct_voidp; Py_INCREF(arg); cd->c_data = ((char *)arg) - 42; cd->c_weakreflist = NULL; @@ -482,6 +486,12 @@ return (PyObject *)cd; } +PyDoc_STRVAR(ffi_from_handle_doc, +"Cast a 'void *' back to a Python object. Must be used *only* on the\n" +"pointers returned by new_handle(), and *only* as long as the exact\n" +"cdata object returned by new_handle() is still alive (somewhere else\n" +"in the program). Failure to follow these rules will crash."); + static PyObject *ffi_from_handle(PyObject *self, PyObject *arg) { CTypeDescrObject *ct; @@ -509,6 +519,7 @@ return x; } +#if 0 static PyObject *ffi_gc(ZefFFIObject *self, PyObject *args) { CDataObject *cd; @@ -681,16 +692,14 @@ {"callback", (PyCFunction)ffi_callback, METH_VARARGS | METH_KEYWORDS,ffi_callback_doc}, {"cast", (PyCFunction)ffi_cast, METH_VARARGS, ffi_cast_doc}, + {"from_handle",(PyCFunction)ffi_from_handle,METH_O, ffi_from_handle_doc}, #if 0 - {"from_handle",(PyCFunction)ffi_from_handle,METH_O}, {"gc", (PyCFunction)ffi_gc, METH_VARARGS}, #endif {"getctype", (PyCFunction)ffi_getctype, METH_VARARGS, ffi_getctype_doc}, {"offsetof", (PyCFunction)ffi_offsetof, METH_VARARGS, ffi_offsetof_doc}, {"new", (PyCFunction)ffi_new, METH_VARARGS, ffi_new_doc}, -#if 0 - {"new_handle", (PyCFunction)ffi_new_handle, METH_O}, -#endif + {"new_handle", (PyCFunction)ffi_new_handle, METH_O, ffi_new_handle_doc}, {"sizeof", (PyCFunction)ffi_sizeof, METH_O, ffi_sizeof_doc}, {"string", (PyCFunction)ffi_string, METH_VARARGS, ffi_string_doc}, {"typeof", (PyCFunction)ffi_typeof, METH_O, ffi_typeof_doc}, diff --git a/_cffi1/realize_c_type.c b/_cffi1/realize_c_type.c --- a/_cffi1/realize_c_type.c +++ b/_cffi1/realize_c_type.c @@ -8,6 +8,7 @@ static PyObject *all_primitives[_CFFI__NUM_PRIM]; static PyObject *global_types_dict; +static CTypeDescrObject *g_ct_voidp; static PyObject *build_primitive_type(int num); /* forward */ @@ -39,9 +40,9 @@ Py_DECREF(ct2); return -1; } + g_ct_voidp = (CTypeDescrObject *)ct2; pnull = new_simple_cdata(NULL, (CTypeDescrObject *)ct2); - Py_DECREF(ct2); if (pnull == NULL) return -1; err = PyDict_SetItemString(ffi_type_dict, "NULL", pnull); diff --git a/_cffi1/test_ffi_obj.py b/_cffi1/test_ffi_obj.py --- a/_cffi1/test_ffi_obj.py +++ b/_cffi1/test_ffi_obj.py @@ -121,3 +121,12 @@ b = ffi.addressof(a, 5) b[2] = -123 assert a[7] == -123 + +def test_handle(): + ffi = _cffi1_backend.FFI() + x = [2, 4, 6] + xp = ffi.new_handle(x) + assert ffi.typeof(xp) == ffi.typeof("void *") + assert ffi.from_handle(xp) is x + yp = ffi.new_handle([6, 4, 2]) + assert ffi.from_handle(yp) == [6, 4, 2] From noreply at buildbot.pypy.org Sat Apr 25 15:05:52 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 25 Apr 2015 15:05:52 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: skip some tests Message-ID: <20150425130552.DA18B1C03F7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1827:d1015f99712f Date: 2015-04-25 15:06 +0200 http://bitbucket.org/cffi/cffi/changeset/d1015f99712f/ Log: skip some tests diff --git a/_cffi1/test_verify1.py b/_cffi1/test_verify1.py --- a/_cffi1/test_verify1.py +++ b/_cffi1/test_verify1.py @@ -1540,6 +1540,7 @@ py.test.raises(TypeError, lib.sum_coord, res2) def test_callback_in_thread(): + py.test.skip("adapt or remove") if sys.platform == 'win32': py.test.skip("pthread only") import os, subprocess, imp @@ -1550,6 +1551,7 @@ assert result == 0 def test_keepalive_lib(): + py.test.skip("adapt or remove") ffi = FFI() ffi.cdef("int foobar(void);") lib = ffi.verify("int foobar(void) { return 42; }") @@ -1563,6 +1565,7 @@ assert func() == 42 def test_keepalive_ffi(): + py.test.skip("adapt or remove") ffi = FFI() ffi.cdef("int foobar(void);") lib = ffi.verify("int foobar(void) { return 42; }") From noreply at buildbot.pypy.org Sat Apr 25 19:58:40 2015 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 25 Apr 2015 19:58:40 +0200 (CEST) Subject: [pypy-commit] pypy default: document merged branch Message-ID: <20150425175840.186841C0228@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r76933:98e1cf6b4854 Date: 2015-04-25 20:58 +0300 http://bitbucket.org/pypy/pypy/changeset/98e1cf6b4854/ Log: document merged branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -67,3 +67,7 @@ .. branch: object-dtype2 Extend numpy dtypes to allow using objects with associated garbage collection hook + +.. branch: vmprof2 +Add backend support for vmprof - a lightweight statistical profiler - +to linux64, see client at https://vmprof.readthedocs.org From noreply at buildbot.pypy.org Sat Apr 25 21:02:43 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 25 Apr 2015 21:02:43 +0200 (CEST) Subject: [pypy-commit] pypy can_cast: Add .elsize-dependent checks to dtype.can_cast_to() Message-ID: <20150425190243.A0E371C03F7@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: can_cast Changeset: r76934:6c7224a53fc9 Date: 2015-04-25 20:02 +0100 http://bitbucket.org/pypy/pypy/changeset/6c7224a53fc9/ Log: Add .elsize-dependent checks to dtype.can_cast_to() diff --git a/pypy/module/micronumpy/descriptor.py b/pypy/module/micronumpy/descriptor.py --- a/pypy/module/micronumpy/descriptor.py +++ b/pypy/module/micronumpy/descriptor.py @@ -40,6 +40,8 @@ return out +_REQ_STRLEN = [0, 3, 5, 10, 10, 20, 20, 20, 20] # data for can_cast_to() + class W_Dtype(W_Root): _immutable_fields_ = [ "itemtype?", "w_box_type", "byteorder?", "names?", "fields?", @@ -94,7 +96,37 @@ return self.itemtype.box_complex(real, imag) def can_cast_to(self, other): - return self.itemtype.can_cast_to(other.itemtype) + result = self.itemtype.can_cast_to(other.itemtype) + if result: + if self.num == NPY.STRING: + if other.num == NPY.STRING: + return self.elsize <= other.elsize + elif other.num == NPY.UNICODE: + return self.elsize * 4 <= other.elsize + elif self.num == NPY.UNICODE and other.num == NPY.UNICODE: + return self.elsize <= other.elsize + elif other.num in (NPY.STRING, NPY.UNICODE): + if other.num == NPY.STRING: + char_size = 1 + else: # NPY.UNICODE + char_size = 4 + if other.elsize == 0: + return True + if self.is_bool(): + return other.elsize >= 5 * char_size + elif self.is_unsigned(): + if self.elsize > 8 or self.elsize < 0: + return False + else: + return (other.elsize >= + _REQ_STRLEN[self.elsize] * char_size) + elif self.is_signed(): + if self.elsize > 8 or self.elsize < 0: + return False + else: + return (other.elsize >= + (_REQ_STRLEN[self.elsize] + 1) * char_size) + return result def coerce(self, space, w_item): return self.itemtype.coerce(space, self, w_item) From noreply at buildbot.pypy.org Sat Apr 25 21:17:14 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 25 Apr 2015 21:17:14 +0200 (CEST) Subject: [pypy-commit] pypy can_cast: Handle None in np.can_cast() Message-ID: <20150425191715.006941C00F7@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: can_cast Changeset: r76935:ac2f88bfc375 Date: 2015-04-25 20:17 +0100 http://bitbucket.org/pypy/pypy/changeset/ac2f88bfc375/ Log: Handle None in np.can_cast() diff --git a/pypy/module/micronumpy/arrayops.py b/pypy/module/micronumpy/arrayops.py --- a/pypy/module/micronumpy/arrayops.py +++ b/pypy/module/micronumpy/arrayops.py @@ -304,8 +304,12 @@ @unwrap_spec(casting=str) def can_cast(space, w_from, w_totype, casting='safe'): - target = as_dtype(space, w_totype) - origin = as_dtype(space, w_from) # XXX + try: + target = as_dtype(space, w_totype, allow_None=False) + origin = as_dtype(space, w_from, allow_None=False) # XXX + except TypeError: + raise oefmt(space.w_TypeError, + "did not understand one of the types; 'None' not accepted") return space.wrap(can_cast_type(space, origin, target, casting)) kind_ordering = { @@ -331,8 +335,10 @@ return origin.can_cast_to(target) -def as_dtype(space, w_arg): +def as_dtype(space, w_arg, allow_None=True): # roughly equivalent to CNumPy's PyArray_DescrConverter2 + if not allow_None and space.is_none(w_arg): + raise TypeError("Cannot create dtype from None here") if isinstance(w_arg, W_NDimArray): return w_arg.get_dtype() elif isinstance(w_arg, W_GenericBox) or ( From noreply at buildbot.pypy.org Sat Apr 25 22:21:14 2015 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 25 Apr 2015 22:21:14 +0200 (CEST) Subject: [pypy-commit] pypy default: fix so tests pass, even though vmprof is not supported in win32 Message-ID: <20150425202114.09F871C022E@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r76936:c0a4171c4d0f Date: 2015-04-25 23:21 +0300 http://bitbucket.org/pypy/pypy/changeset/c0a4171c4d0f/ Log: fix so tests pass, even though vmprof is not supported in win32 diff --git a/rpython/jit/backend/llsupport/codemap.py b/rpython/jit/backend/llsupport/codemap.py --- a/rpython/jit/backend/llsupport/codemap.py +++ b/rpython/jit/backend/llsupport/codemap.py @@ -25,6 +25,10 @@ srcdir = os.path.join(os.path.dirname(__file__), 'src') +libraries = [] +if os.name == 'nt': + libraries.append('Kernel32') + eci = ExternalCompilationInfo(post_include_bits=[""" RPY_EXTERN long pypy_jit_codemap_add(unsigned long addr, unsigned int machine_code_size, @@ -43,7 +47,7 @@ """], separate_module_sources=[ open(os.path.join(srcdir, 'skiplist.c'), 'r').read() + open(os.path.join(srcdir, 'codemap.c'), 'r').read() -], include_dirs=[cdir]) +], include_dirs=[cdir], libraries=libraries) def llexternal(name, args, res): return rffi.llexternal(name, args, res, compilation_info=eci, diff --git a/rpython/jit/backend/llsupport/src/codemap.c b/rpython/jit/backend/llsupport/src/codemap.c --- a/rpython/jit/backend/llsupport/src/codemap.c +++ b/rpython/jit/backend/llsupport/src/codemap.c @@ -15,7 +15,7 @@ else __sync_lock_release(&pypy_codemap_currently_invalid); #else - InterlockedExchange((long volatile *)&pypy_codemap_currently_invalid, + _InterlockedExchange((long volatile *)&pypy_codemap_currently_invalid, (long)value); #endif } From noreply at buildbot.pypy.org Sun Apr 26 04:03:31 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 26 Apr 2015 04:03:31 +0200 (CEST) Subject: [pypy-commit] pypy can_cast: Add stub implementation for can_cast(, dtype) Message-ID: <20150426020331.D4D101C1207@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: can_cast Changeset: r76937:6aaed87e0b78 Date: 2015-04-26 03:03 +0100 http://bitbucket.org/pypy/pypy/changeset/6aaed87e0b78/ Log: Add stub implementation for can_cast(, dtype) diff --git a/pypy/module/micronumpy/arrayops.py b/pypy/module/micronumpy/arrayops.py --- a/pypy/module/micronumpy/arrayops.py +++ b/pypy/module/micronumpy/arrayops.py @@ -306,7 +306,18 @@ def can_cast(space, w_from, w_totype, casting='safe'): try: target = as_dtype(space, w_totype, allow_None=False) - origin = as_dtype(space, w_from, allow_None=False) # XXX + except TypeError: + raise oefmt(space.w_TypeError, + "did not understand one of the types; 'None' not accepted") + if isinstance(w_from, W_NDimArray): + return space.wrap(can_cast_array(space, w_from, target, casting)) + elif is_scalar_w(space, w_from): + w_scalar = as_scalar(space, w_from) + w_arr = W_NDimArray.from_scalar(space, w_scalar) + return space.wrap(can_cast_array(space, w_arr, target, casting)) + + try: + origin = as_dtype(space, w_from, allow_None=False) except TypeError: raise oefmt(space.w_TypeError, "did not understand one of the types; 'None' not accepted") @@ -334,6 +345,30 @@ else: return origin.can_cast_to(target) +def can_cast_array(space, w_from, target, casting): + origin = w_from.get_dtype() + if w_from.is_scalar(): + return can_cast_scalar( + space, origin, w_from.get_scalar_value(), target, casting) + else: + return can_cast_type(space, origin, target, casting) + +def can_cast_scalar(space, from_type, value, target, casting): + if from_type == target or casting == 'unsafe': + return True + if not from_type.is_number() or casting in ('no', 'equiv'): + return can_cast_type(space, from_type, target, casting) + if not from_type.is_native(): + value = value.descr_byteswap(space) + return can_cast_type(space, from_type, target, casting) # XXX: stub impl + +def is_scalar_w(space, w_arg): + return (isinstance(w_arg, W_GenericBox) or + space.isinstance_w(w_arg, space.w_int) or + space.isinstance_w(w_arg, space.w_float) or + space.isinstance_w(w_arg, space.w_complex) or + space.isinstance_w(w_arg, space.w_long) or + space.isinstance_w(w_arg, space.w_bool)) def as_dtype(space, w_arg, allow_None=True): # roughly equivalent to CNumPy's PyArray_DescrConverter2 @@ -341,13 +376,12 @@ raise TypeError("Cannot create dtype from None here") if isinstance(w_arg, W_NDimArray): return w_arg.get_dtype() - elif isinstance(w_arg, W_GenericBox) or ( - space.isinstance_w(w_arg, space.w_int) or - space.isinstance_w(w_arg, space.w_float) or - space.isinstance_w(w_arg, space.w_complex) or - space.isinstance_w(w_arg, space.w_long) or - space.isinstance_w(w_arg, space.w_bool)): + elif is_scalar_w(space, w_arg): return ufuncs.find_dtype_for_scalar(space, w_arg) else: return space.interp_w(descriptor.W_Dtype, space.call_function(space.gettypefor(descriptor.W_Dtype), w_arg)) + +def as_scalar(space, w_obj): + dtype = ufuncs.find_dtype_for_scalar(space, w_obj) + return dtype.coerce(space, w_obj) diff --git a/pypy/module/micronumpy/descriptor.py b/pypy/module/micronumpy/descriptor.py --- a/pypy/module/micronumpy/descriptor.py +++ b/pypy/module/micronumpy/descriptor.py @@ -150,6 +150,9 @@ def is_complex(self): return self.kind == NPY.COMPLEXLTR + def is_number(self): + return self.is_int() or self.is_float() or self.is_complex() + def is_str(self): return self.num == NPY.STRING diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -4047,3 +4047,9 @@ raises(TypeError, np.can_cast, 'i4', None) raises(TypeError, np.can_cast, None, 'i4') + + def test_can_cast_scalar(self): + import numpy as np + assert np.can_cast(127, np.int8) + assert not np.can_cast(128, np.int8) + assert np.can_cast(128, np.int16) From noreply at buildbot.pypy.org Sun Apr 26 09:34:05 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 26 Apr 2015 09:34:05 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: tweaks to tests Message-ID: <20150426073405.6D5791C0EC0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1828:9f5ea786ded2 Date: 2015-04-26 09:23 +0200 http://bitbucket.org/cffi/cffi/changeset/9f5ea786ded2/ Log: tweaks to tests diff --git a/_cffi1/support.py b/_cffi1/support.py new file mode 100644 --- /dev/null +++ b/_cffi1/support.py @@ -0,0 +1,6 @@ + +class U(object): + def __add__(self, other): + return eval('u'+repr(other).replace(r'\\u', r'\u') + .replace(r'\\U', r'\U')) +u = U() diff --git a/_cffi1/test_recompiler.py b/_cffi1/test_recompiler.py --- a/_cffi1/test_recompiler.py +++ b/_cffi1/test_recompiler.py @@ -190,7 +190,6 @@ ffi = FFI() ffi.cdef("static double *const FOOBAR;") lib = verify(ffi, 'test_constant_ptr', "#define FOOBAR NULL") - py.test.skip("XXX in-progress:") assert lib.FOOBAR == ffi.NULL assert ffi.typeof(lib.FOOBAR) == ffi.typeof("double *") diff --git a/_cffi1/test_verify1.py b/_cffi1/test_verify1.py --- a/_cffi1/test_verify1.py +++ b/_cffi1/test_verify1.py @@ -1,6 +1,7 @@ import sys, math, py from cffi import FFI, VerificationError, VerificationMissing, model -from _cffi1 import recompiler +from . import recompiler +from .support import * import _cffi_backend lib_m = ['m'] @@ -36,12 +37,6 @@ class FFI_warnings_not_error(FFI): _extra_compile_args = [] -class U(object): - def __add__(self, other): - return eval('u'+repr(other).replace(r'\\u', r'\u') - .replace(r'\\U', r'\U')) -u = U() - def test_missing_function(ffi=None): # uses the FFI hacked above with '-Werror' @@ -1540,7 +1535,7 @@ py.test.raises(TypeError, lib.sum_coord, res2) def test_callback_in_thread(): - py.test.skip("adapt or remove") + py.test.xfail("adapt or remove") if sys.platform == 'win32': py.test.skip("pthread only") import os, subprocess, imp @@ -1551,7 +1546,7 @@ assert result == 0 def test_keepalive_lib(): - py.test.skip("adapt or remove") + py.test.xfail("adapt or remove") ffi = FFI() ffi.cdef("int foobar(void);") lib = ffi.verify("int foobar(void) { return 42; }") @@ -1565,7 +1560,7 @@ assert func() == 42 def test_keepalive_ffi(): - py.test.skip("adapt or remove") + py.test.xfail("adapt or remove") ffi = FFI() ffi.cdef("int foobar(void);") lib = ffi.verify("int foobar(void) { return 42; }") From noreply at buildbot.pypy.org Sun Apr 26 09:34:06 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 26 Apr 2015 09:34:06 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: test and fix Message-ID: <20150426073406.86CB71C0EC0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1829:60463383e5bc Date: 2015-04-26 09:34 +0200 http://bitbucket.org/cffi/cffi/changeset/60463383e5bc/ Log: test and fix diff --git a/_cffi1/realize_c_type.c b/_cffi1/realize_c_type.c --- a/_cffi1/realize_c_type.c +++ b/_cffi1/realize_c_type.c @@ -460,9 +460,13 @@ } PyObject *args = NULL; - if (!PyErr_Occurred()) - args = Py_BuildValue("(sOOO)", e->name, enumerators, + if (!PyErr_Occurred()) { + char *name = alloca(6 + strlen(e->name)); + strcpy(name, "enum "); + strcat(name, e->name); + args = Py_BuildValue("(sOOO)", name, enumerators, enumvalues, basetd); + } Py_DECREF(enumerators); Py_DECREF(enumvalues); if (args == NULL) diff --git a/_cffi1/test_recompiler.py b/_cffi1/test_recompiler.py --- a/_cffi1/test_recompiler.py +++ b/_cffi1/test_recompiler.py @@ -282,6 +282,7 @@ assert lib.B2 == 1 assert ffi.sizeof("enum e1") == ffi.sizeof("long") assert ffi.sizeof("enum e2") == ffi.sizeof("int") + assert repr(ffi.cast("enum e1", 0)) == "" def test_duplicate_enum(): ffi = FFI() @@ -348,6 +349,7 @@ "typedef struct { long b; int hidden, a; } foo_t;") p = ffi.new("foo_t *", {'b': 42}) assert p.b == 42 + assert repr(p).startswith("" # ffi = FFI() ffi.cdef("typedef enum { AA=%d } e1;" % sys.maxint) From noreply at buildbot.pypy.org Sun Apr 26 10:13:37 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 26 Apr 2015 10:13:37 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: give the expected name to $struct and $enums Message-ID: <20150426081337.B718A1C0359@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1830:c92ee3ee4642 Date: 2015-04-26 09:58 +0200 http://bitbucket.org/cffi/cffi/changeset/c92ee3ee4642/ Log: give the expected name to $struct and $enums diff --git a/_cffi1/realize_c_type.c b/_cffi1/realize_c_type.c --- a/_cffi1/realize_c_type.c +++ b/_cffi1/realize_c_type.c @@ -288,6 +288,38 @@ } } +static void _realize_name(char *target, const char *prefix, const char *srcname) +{ + /* "xyz" => "struct xyz" + "$xyz" => "xyz" + */ + if (srcname[0] == '$' && srcname[1] != '$') { + strcpy(target, &srcname[1]); + } + else { + strcpy(target, prefix); + strcat(target, srcname); + } +} + +static void _unrealize_name(char *target, const char *srcname) +{ + /* reverse of _realize_name() */ + if (strncmp(srcname, "struct ", 7) == 0) { + strcpy(target, &srcname[7]); + } + else if (strncmp(srcname, "union ", 6) == 0) { + strcpy(target, &srcname[6]); + } + else if (strncmp(srcname, "enum ", 5) == 0) { + strcpy(target, &srcname[5]); + } + else { + strcpy(target, "$"); + strcat(target, srcname); + } +} + static PyObject * _realize_c_type_or_func(builder_c_t *builder, _cffi_opcode_t opcodes[], int index) @@ -354,17 +386,11 @@ Py_INCREF(x); } else { - int flags; + int flags = (s->flags & CT_UNION) ? CT_UNION : CT_STRUCT; char *name = alloca(8 + strlen(s->name)); - if (s->flags & CT_UNION) { - strcpy(name, "union "); - flags = CT_UNION; - } - else { - strcpy(name, "struct "); - flags = CT_STRUCT; - } - strcat(name, s->name); + _realize_name(name, + (s->flags & CT_UNION) ? "union " : "struct ", + s->name); x = new_struct_or_union_type(name, flags); CTypeDescrObject *ct = NULL; @@ -462,8 +488,7 @@ PyObject *args = NULL; if (!PyErr_Occurred()) { char *name = alloca(6 + strlen(e->name)); - strcpy(name, "enum "); - strcat(name, e->name); + _realize_name(name, "enum ", e->name); args = Py_BuildValue("(sOOO)", name, enumerators, enumvalues, basetd); } @@ -577,11 +602,8 @@ builder_c_t *builder = ct->ct_extra; assert(builder != NULL); - char *p = ct->ct_name; - if (memcmp(p, "struct ", 7) == 0) - p += 7; - else if (memcmp(p, "union ", 6) == 0) - p += 6; + char *p = alloca(2 + strlen(ct->ct_name)); + _unrealize_name(p, ct->ct_name); int n = search_in_struct_unions(&builder->ctx, p, strlen(p)); if (n < 0) diff --git a/_cffi1/test_recompiler.py b/_cffi1/test_recompiler.py --- a/_cffi1/test_recompiler.py +++ b/_cffi1/test_recompiler.py @@ -349,7 +349,7 @@ "typedef struct { long b; int hidden, a; } foo_t;") p = ffi.new("foo_t *", {'b': 42}) assert p.b == 42 - assert repr(p).startswith("" + assert repr(ffi.cast("e1", 2)) == "" # ffi = FFI() ffi.cdef("typedef enum { AA=%d } e1;" % sys.maxint) From noreply at buildbot.pypy.org Sun Apr 26 10:13:38 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 26 Apr 2015 10:13:38 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: in-progress: import backend_tests Message-ID: <20150426081338.EDE6C1C0359@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1831:48276f27dd98 Date: 2015-04-26 10:14 +0200 http://bitbucket.org/cffi/cffi/changeset/48276f27dd98/ Log: in-progress: import backend_tests diff --git a/testing/backend_tests.py b/_cffi1/test_new_ffi_1.py copy from testing/backend_tests.py copy to _cffi1/test_new_ffi_1.py --- a/testing/backend_tests.py +++ b/_cffi1/test_new_ffi_1.py @@ -1,8 +1,11 @@ import py -import platform +import platform, imp import sys, ctypes -from cffi import FFI, CDefError, FFIError -from testing.support import * +import cffi +from cffi import CDefError, FFIError +from .udir import udir +from .recompiler import recompile +from .support import * SIZE_OF_INT = ctypes.sizeof(ctypes.c_int) SIZE_OF_LONG = ctypes.sizeof(ctypes.c_long) @@ -11,10 +14,75 @@ SIZE_OF_WCHAR = ctypes.sizeof(ctypes.c_wchar) -class BackendTests: +def setup_module(): + global ffi + ffi1 = cffi.FFI() + DEFS = r""" + struct repr { short a, b, c; }; + struct simple { int a; short b, c; }; + struct array { int a[2]; char b[3]; }; + struct recursive { int value; struct recursive *next; }; + union simple_u { int a; short b, c; }; + union init_u { char a; int b; }; + struct four_s { int a; short b, c, d; }; + union four_u { int a; short b, c, d; }; + struct string { const char *name; }; + struct ustring { const wchar_t *name; }; + struct voidp { void *p; int *q; short *r; }; + struct ab { int a, b; }; + struct abc { int a, b, c; }; + + enum foq { A0, B0, CC0, D0 }; + enum bar { A1, B1=-2, CC1, D1, E1 }; + enum baz { A2=0x1000, B2=0x2000 }; + enum foo2 { A3, B3, C3, D3 }; + struct bar_with_e { enum foo2 e; }; + enum noncont { A4, B4=42, C4 }; + enum etypes {A5='!', B5='\'', C5=0x10, D5=010, E5=- 0x10, F5=-010}; + typedef enum { Value0 = 0 } e_t, *pe_t; + enum e_noninj { AA3=0, BB3=0, CC3=0, DD3=0 }; + enum e_prev { AA4, BB4=2, CC4=4, DD4=BB4, EE4, FF4=CC4, GG4=FF4 }; + + struct nesting { struct abc d, e; }; + struct array2 { int a, b; int c[99]; }; + struct align { char a; short b; char c; }; + struct bitfield { int a:10, b:20, c:3; }; + typedef enum { AA2, BB2, CC2 } foo_e_t; + typedef struct { foo_e_t f:2; } bfenum_t; + typedef struct { int a; } anon_foo_t; + typedef struct { char b, c; } anon_bar_t; + typedef struct named_foo_s { int a; } named_foo_t, *named_foo_p; + typedef struct { int a; } unnamed_foo_t, *unnamed_foo_p; + struct nonpacked { char a; int b; }; + struct array0 { int len; short data[0]; }; + + struct nested_anon { + struct { int a, b; }; + union { int c, d; }; + }; + union nested_anon_u { + struct { int a, b; }; + union { int c, d; }; + }; + struct abc50 { int a, b; int c[50]; }; + """ + DEFS_PACKED = """ + struct is_packed { char a; int b; } /*here*/; + """ + CCODE = DEFS + DEFS_PACKED.replace('/*here*/', '__attribute__((packed))') + + ffi1.cdef(DEFS) + ffi1.cdef(DEFS_PACKED, packed=True) + + outputfilename = recompile(ffi1, "test_old_ffi1", CCODE, + tmpdir=str(udir)) + module = imp.load_dynamic("test_old_ffi1", outputfilename) + ffi = module.ffi + + +class TestOldFFI1: def test_integer_ranges(self): - ffi = FFI(backend=self.Backend()) for (c_type, size) in [('char', 1), ('short', 2), ('short int', 2), @@ -34,7 +102,6 @@ self._test_int_type(ffi, c_decl, size, unsigned) def test_fixedsize_int(self): - ffi = FFI(backend=self.Backend()) for size in [1, 2, 4, 8]: self._test_int_type(ffi, 'int%d_t' % (8*size), size, False) self._test_int_type(ffi, 'uint%d_t' % (8*size), size, True) @@ -79,12 +146,10 @@ assert ffi.new(c_decl_ptr, long(max))[0] == max def test_new_unsupported_type(self): - ffi = FFI(backend=self.Backend()) e = py.test.raises(TypeError, ffi.new, "int") assert str(e.value) == "expected a pointer or array ctype, got 'int'" def test_new_single_integer(self): - ffi = FFI(backend=self.Backend()) p = ffi.new("int *") # similar to ffi.new("int[1]") assert p[0] == 0 p[0] = -123 @@ -94,14 +159,12 @@ assert repr(p) == "" % SIZE_OF_INT def test_new_array_no_arg(self): - ffi = FFI(backend=self.Backend()) p = ffi.new("int[10]") # the object was zero-initialized: for i in range(10): assert p[i] == 0 def test_array_indexing(self): - ffi = FFI(backend=self.Backend()) p = ffi.new("int[10]") p[0] = 42 p[9] = 43 @@ -113,7 +176,6 @@ py.test.raises(IndexError, "p[-1] = 44") def test_new_array_args(self): - ffi = FFI(backend=self.Backend()) # this tries to be closer to C: where we say "int x[5] = {10, 20, ..}" # then here we must enclose the items in a list p = ffi.new("int[5]", [10, 20, 30, 40, 50]) @@ -132,7 +194,6 @@ assert repr(p) == "" % (4*SIZE_OF_INT) def test_new_array_varsize(self): - ffi = FFI(backend=self.Backend()) p = ffi.new("int[]", 10) # a single integer is the length assert p[9] == 0 py.test.raises(IndexError, "p[10]") @@ -151,7 +212,6 @@ assert repr(p) == "" def test_pointer_init(self): - ffi = FFI(backend=self.Backend()) n = ffi.new("int *", 24) a = ffi.new("int *[10]", [ffi.NULL, ffi.NULL, n, n, ffi.NULL]) for i in range(10): @@ -160,14 +220,12 @@ assert a[2] == a[3] == n def test_cannot_cast(self): - ffi = FFI(backend=self.Backend()) a = ffi.new("short int[10]") e = py.test.raises(TypeError, ffi.new, "long int **", a) msg = str(e.value) assert "'short[10]'" in msg and "'long *'" in msg def test_new_pointer_to_array(self): - ffi = FFI(backend=self.Backend()) a = ffi.new("int[4]", [100, 102, 104, 106]) p = ffi.new("int **", a) assert p[0] == ffi.cast("int *", a) @@ -180,7 +238,6 @@ # keepalive: a def test_pointer_direct(self): - ffi = FFI(backend=self.Backend()) p = ffi.cast("int*", 0) assert p is not None assert bool(p) is False @@ -196,9 +253,7 @@ assert p[1] == 456 def test_repr(self): - typerepr = self.TypeRepr - ffi = FFI(backend=self.Backend()) - ffi.cdef("struct foo { short a, b, c; };") + typerepr = "" p = ffi.cast("short unsigned int", 0) assert repr(p) == "" assert repr(ffi.typeof(p)) == typerepr % "unsigned short" @@ -222,10 +277,10 @@ assert repr(p) == "" % ( 6*SIZE_OF_PTR) assert repr(ffi.typeof(p)) == typerepr % "int *[2][3]" - p = ffi.new("struct foo *") - assert repr(p) == "" % ( + p = ffi.new("struct repr *") + assert repr(p) == "" % ( 3*SIZE_OF_SHORT) - assert repr(ffi.typeof(p)) == typerepr % "struct foo *" + assert repr(ffi.typeof(p)) == typerepr % "struct repr *" # q = ffi.cast("short", -123) assert repr(q) == "" @@ -238,17 +293,16 @@ q = ffi.cast("int*", p) assert repr(q).startswith("" % ( + assert repr(s) == "" % ( SIZE_OF_INT + 2 * SIZE_OF_SHORT) # - py.test.raises(ValueError, ffi.new, "struct foo*", [1, 2, 3, 4]) + py.test.raises(ValueError, ffi.new, "struct simple*", [1, 2, 3, 4]) def test_constructor_struct_from_dict(self): - ffi = FFI(backend=self.Backend()) - ffi.cdef("struct foo { int a; short b, c; };") - s = ffi.new("struct foo*", {'b': 123, 'c': 456}) + s = ffi.new("struct simple*", {'b': 123, 'c': 456}) assert s.a == 0 assert s.b == 123 assert s.c == 456 - py.test.raises(KeyError, ffi.new, "struct foo*", {'d': 456}) + py.test.raises(KeyError, ffi.new, "struct simple*", {'d': 456}) def test_struct_pointer(self): - ffi = FFI(backend=self.Backend()) - ffi.cdef("struct foo { int a; short b, c; };") - s = ffi.new("struct foo*") + s = ffi.new("struct simple*") assert s[0].a == s[0].b == s[0].c == 0 s[0].b = -23 assert s[0].b == s.b == -23 @@ -444,18 +485,16 @@ py.test.raises(IndexError, "s[1]") def test_struct_opaque(self): - ffi = FFI(backend=self.Backend()) - py.test.raises(TypeError, ffi.new, "struct baz*") - p = ffi.new("struct baz **") # this works - assert p[0] == ffi.NULL + py.test.raises(ffi.error, ffi.new, "struct baz*") + # should 'ffi.new("struct baz **") work? it used to, but it was + # not particularly useful... + py.test.raises(ffi.error, ffi.new, "struct baz**") def test_pointer_to_struct(self): - ffi = FFI(backend=self.Backend()) - ffi.cdef("struct foo { int a; short b, c; };") - s = ffi.new("struct foo *") + s = ffi.new("struct simple *") s.a = -42 assert s[0].a == -42 - p = ffi.new("struct foo **", s) + p = ffi.new("struct simple **", s) assert p[0].a == -42 assert p[0][0].a == -42 p[0].a = -43 @@ -472,9 +511,7 @@ assert p[0][0].a == -46 def test_constructor_struct_of_array(self): - ffi = FFI(backend=self.Backend()) - ffi.cdef("struct foo { int a[2]; char b[3]; };") - s = ffi.new("struct foo *", [[10, 11], [b'a', b'b', b'c']]) + s = ffi.new("struct array *", [[10, 11], [b'a', b'b', b'c']]) assert s.a[1] == 11 assert s.b[2] == b'c' s.b[1] = b'X' @@ -483,10 +520,8 @@ assert s.b[2] == b'c' def test_recursive_struct(self): - ffi = FFI(backend=self.Backend()) - ffi.cdef("struct foo { int value; struct foo *next; };") - s = ffi.new("struct foo*") - t = ffi.new("struct foo*") + s = ffi.new("struct recursive*") + t = ffi.new("struct recursive*") s.value = 123 s.next = t t.value = 456 @@ -494,60 +529,51 @@ assert s.next.value == 456 def test_union_simple(self): - ffi = FFI(backend=self.Backend()) - ffi.cdef("union foo { int a; short b, c; };") - u = ffi.new("union foo*") + u = ffi.new("union simple_u*") assert u.a == u.b == u.c == 0 u.b = -23 assert u.b == -23 assert u.a != 0 py.test.raises(OverflowError, "u.b = 32768") # - u = ffi.new("union foo*", [-2]) + u = ffi.new("union simple_u*", [-2]) assert u.a == -2 py.test.raises((AttributeError, TypeError), "del u.a") - assert repr(u) == "" % SIZE_OF_INT + assert repr(u) == "" % ( + SIZE_OF_INT,) def test_union_opaque(self): - ffi = FFI(backend=self.Backend()) - py.test.raises(TypeError, ffi.new, "union baz *") - u = ffi.new("union baz **") # this works - assert u[0] == ffi.NULL + py.test.raises(ffi.error, ffi.new, "union baz*") + # should 'ffi.new("union baz **") work? it used to, but it was + # not particularly useful... + py.test.raises(ffi.error, ffi.new, "union baz**") def test_union_initializer(self): - ffi = FFI(backend=self.Backend()) - ffi.cdef("union foo { char a; int b; };") - py.test.raises(TypeError, ffi.new, "union foo*", b'A') - py.test.raises(TypeError, ffi.new, "union foo*", 5) - py.test.raises(ValueError, ffi.new, "union foo*", [b'A', 5]) - u = ffi.new("union foo*", [b'A']) + py.test.raises(TypeError, ffi.new, "union init_u*", b'A') + py.test.raises(TypeError, ffi.new, "union init_u*", 5) + py.test.raises(ValueError, ffi.new, "union init_u*", [b'A', 5]) + u = ffi.new("union init_u*", [b'A']) assert u.a == b'A' - py.test.raises(TypeError, ffi.new, "union foo*", [1005]) - u = ffi.new("union foo*", {'b': 12345}) + py.test.raises(TypeError, ffi.new, "union init_u*", [1005]) + u = ffi.new("union init_u*", {'b': 12345}) assert u.b == 12345 - u = ffi.new("union foo*", []) + u = ffi.new("union init_u*", []) assert u.a == b'\x00' assert u.b == 0 def test_sizeof_type(self): - ffi = FFI(backend=self.Backend()) - ffi.cdef(""" - struct foo { int a; short b, c, d; }; - union foo { int a; short b, c, d; }; - """) for c_type, expected_size in [ ('char', 1), ('unsigned int', 4), ('char *', SIZE_OF_PTR), ('int[5]', 20), - ('struct foo', 12), - ('union foo', 4), + ('struct four_s', 12), + ('union four_u', 4), ]: size = ffi.sizeof(c_type) assert size == expected_size, (size, expected_size, ctype) def test_sizeof_cdata(self): - ffi = FFI(backend=self.Backend()) assert ffi.sizeof(ffi.new("short*")) == SIZE_OF_PTR assert ffi.sizeof(ffi.cast("short", 123)) == SIZE_OF_SHORT # @@ -556,7 +582,6 @@ assert ffi.sizeof(a) == 5 * SIZE_OF_INT def test_string_from_char_pointer(self): - ffi = FFI(backend=self.Backend()) x = ffi.new("char*", b"x") assert str(x) == repr(x) assert ffi.string(x) == b"x" @@ -564,7 +589,6 @@ py.test.raises(TypeError, ffi.new, "char*", unicode("foo")) def test_unicode_from_wchar_pointer(self): - ffi = FFI(backend=self.Backend()) self.check_wchar_t(ffi) x = ffi.new("wchar_t*", u+"x") assert unicode(x) == unicode(repr(x)) @@ -572,7 +596,6 @@ assert ffi.string(ffi.new("wchar_t*", u+"\x00")) == u+"" def test_string_from_char_array(self): - ffi = FFI(backend=self.Backend()) p = ffi.new("char[]", b"hello.") p[5] = b'!' assert ffi.string(p) == b"hello!" @@ -589,7 +612,6 @@ assert ffi.string(p) == b'hello' def test_string_from_wchar_array(self): - ffi = FFI(backend=self.Backend()) self.check_wchar_t(ffi) assert ffi.string(ffi.cast("wchar_t", "x")) == u+"x" assert ffi.string(ffi.cast("wchar_t", u+"x")) == u+"x" @@ -616,11 +638,9 @@ assert ffi.string(p, 2) == u+'he' def test_fetch_const_char_p_field(self): - # 'const' is ignored so far - ffi = FFI(backend=self.Backend()) - ffi.cdef("struct foo { const char *name; };") + # 'const' is ignored so far, in the declaration of 'struct string' t = ffi.new("const char[]", b"testing") - s = ffi.new("struct foo*", [t]) + s = ffi.new("struct string*", [t]) assert type(s.name) not in (bytes, str, unicode) assert ffi.string(s.name) == b"testing" py.test.raises(TypeError, "s.name = None") @@ -629,18 +649,15 @@ def test_fetch_const_wchar_p_field(self): # 'const' is ignored so far - ffi = FFI(backend=self.Backend()) self.check_wchar_t(ffi) - ffi.cdef("struct foo { const wchar_t *name; };") t = ffi.new("const wchar_t[]", u+"testing") - s = ffi.new("struct foo*", [t]) + s = ffi.new("struct ustring*", [t]) assert type(s.name) not in (bytes, str, unicode) assert ffi.string(s.name) == u+"testing" s.name = ffi.NULL assert s.name == ffi.NULL def test_voidp(self): - ffi = FFI(backend=self.Backend()) py.test.raises(TypeError, ffi.new, "void*") p = ffi.new("void **") assert p[0] == ffi.NULL @@ -650,8 +667,7 @@ py.test.raises(TypeError, "vp[0]") py.test.raises(TypeError, ffi.new, "short **", a) # - ffi.cdef("struct foo { void *p; int *q; short *r; };") - s = ffi.new("struct foo *") + s = ffi.new("struct voidp *") s.p = a # works s.q = a # works py.test.raises(TypeError, "s.r = a") # fails @@ -661,7 +677,6 @@ py.test.raises(TypeError, "s.r = b") # fails def test_functionptr_simple(self): - ffi = FFI(backend=self.Backend()) py.test.raises(TypeError, ffi.callback, "int(*)(int)", 0) def cb(n): return n + 1 @@ -686,12 +701,10 @@ assert res == 46 def test_functionptr_advanced(self): - ffi = FFI(backend=self.Backend()) t = ffi.typeof("int(*(*)(int))(int)") - assert repr(t) == self.TypeRepr % "int(*(*)(int))(int)" + assert repr(t) == "" % "int(*(*)(int))(int)" def test_functionptr_voidptr_return(self): - ffi = FFI(backend=self.Backend()) def cb(): return ffi.NULL p = ffi.callback("void*(*)()", cb) @@ -707,7 +720,6 @@ assert res == void_ptr def test_functionptr_intptr_return(self): - ffi = FFI(backend=self.Backend()) def cb(): return ffi.NULL p = ffi.callback("int*(*)()", cb) @@ -729,7 +741,6 @@ assert res == int_array_ptr def test_functionptr_void_return(self): - ffi = FFI(backend=self.Backend()) def foo(): pass foo_cb = ffi.callback("void foo()", foo) @@ -737,7 +748,6 @@ assert result is None def test_char_cast(self): - ffi = FFI(backend=self.Backend()) p = ffi.cast("int", b'\x01') assert ffi.typeof(p) is ffi.typeof("int") assert int(p) == 1 @@ -749,7 +759,6 @@ assert int(p) == 0x81 def test_wchar_cast(self): - ffi = FFI(backend=self.Backend()) self.check_wchar_t(ffi) p = ffi.cast("int", ffi.cast("wchar_t", u+'\u1234')) assert int(p) == 0x1234 @@ -764,7 +773,6 @@ assert int(p) == 0x1234 def test_cast_array_to_charp(self): - ffi = FFI(backend=self.Backend()) a = ffi.new("short int[]", [0x1234, 0x5678]) p = ffi.cast("char*", a) data = b''.join([p[i] for i in range(4)]) @@ -774,7 +782,6 @@ assert data == b'\x12\x34\x56\x78' def test_cast_between_pointers(self): - ffi = FFI(backend=self.Backend()) a = ffi.new("short int[]", [0x1234, 0x5678]) p = ffi.cast("short*", a) p2 = ffi.cast("int*", p) @@ -786,7 +793,6 @@ assert data == b'\x12\x34\x56\x78' def test_cast_pointer_and_int(self): - ffi = FFI(backend=self.Backend()) a = ffi.new("short int[]", [0x1234, 0x5678]) l1 = ffi.cast("intptr_t", a) p = ffi.cast("short*", a) @@ -798,7 +804,6 @@ assert int(ffi.cast("intptr_t", ffi.NULL)) == 0 def test_cast_functionptr_and_int(self): - ffi = FFI(backend=self.Backend()) def cb(n): return n + 1 a = ffi.callback("int(*)(int)", cb) @@ -810,7 +815,6 @@ assert hash(a) == hash(b) def test_callback_crash(self): - ffi = FFI(backend=self.Backend()) def cb(n): raise Exception a = ffi.callback("int(*)(int)", cb, error=42) @@ -818,19 +822,15 @@ assert res == 42 def test_structptr_argument(self): - ffi = FFI(backend=self.Backend()) - ffi.cdef("struct foo_s { int a, b; };") def cb(p): return p[0].a * 1000 + p[0].b * 100 + p[1].a * 10 + p[1].b - a = ffi.callback("int(*)(struct foo_s[])", cb) + a = ffi.callback("int(*)(struct ab[])", cb) res = a([[5, 6], {'a': 7, 'b': 8}]) assert res == 5678 res = a([[5], {'b': 8}]) assert res == 5008 def test_array_argument_as_list(self): - ffi = FFI(backend=self.Backend()) - ffi.cdef("struct foo_s { int a, b; };") seen = [] def cb(argv): seen.append(ffi.string(argv[0])) @@ -840,7 +840,6 @@ assert seen == [b"foobar", b"baz"] def test_cast_float(self): - ffi = FFI(backend=self.Backend()) a = ffi.cast("float", 12) assert float(a) == 12.0 a = ffi.cast("float", 12.5) @@ -864,31 +863,30 @@ assert ffi.string(a) == b"B" def test_enum(self): - ffi = FFI(backend=self.Backend()) - ffi.cdef("enum foo { A0, B0, CC0, D0 };") - assert ffi.string(ffi.cast("enum foo", 0)) == "A0" - assert ffi.string(ffi.cast("enum foo", 2)) == "CC0" - assert ffi.string(ffi.cast("enum foo", 3)) == "D0" - assert ffi.string(ffi.cast("enum foo", 4)) == "4" - ffi.cdef("enum bar { A1, B1=-2, CC1, D1, E1 };") + # enum foq { A0, B0, CC0, D0 }; + assert ffi.string(ffi.cast("enum foq", 0)) == "A0" + assert ffi.string(ffi.cast("enum foq", 2)) == "CC0" + assert ffi.string(ffi.cast("enum foq", 3)) == "D0" + assert ffi.string(ffi.cast("enum foq", 4)) == "4" + # enum bar { A1, B1=-2, CC1, D1, E1 }; assert ffi.string(ffi.cast("enum bar", 0)) == "A1" assert ffi.string(ffi.cast("enum bar", -2)) == "B1" assert ffi.string(ffi.cast("enum bar", -1)) == "CC1" assert ffi.string(ffi.cast("enum bar", 1)) == "E1" assert ffi.cast("enum bar", -2) != ffi.cast("enum bar", -2) - assert ffi.cast("enum foo", 0) != ffi.cast("enum bar", 0) + assert ffi.cast("enum foq", 0) != ffi.cast("enum bar", 0) assert ffi.cast("enum bar", 0) != ffi.cast("int", 0) assert repr(ffi.cast("enum bar", -1)) == "" - assert repr(ffi.cast("enum foo", -1)) == ( # enums are unsigned, if - "") # they contain no neg value - ffi.cdef("enum baz { A2=0x1000, B2=0x2000 };") + assert repr(ffi.cast("enum foq", -1)) == ( # enums are unsigned, if + "") # they contain no neg value + # enum baz { A2=0x1000, B2=0x2000 }; assert ffi.string(ffi.cast("enum baz", 0x1000)) == "A2" assert ffi.string(ffi.cast("enum baz", 0x2000)) == "B2" def test_enum_in_struct(self): - ffi = FFI(backend=self.Backend()) - ffi.cdef("enum foo { A, B, C, D }; struct bar { enum foo e; };") - s = ffi.new("struct bar *") + # enum foo2 { A3, B3, C3, D3 }; + # struct bar_with_e { enum foo2 e; }; + s = ffi.new("struct bar_with_e *") s.e = 0 assert s.e == 0 s.e = 3 @@ -897,39 +895,35 @@ s[0].e = 2 assert s.e == 2 assert s[0].e == 2 - s.e = ffi.cast("enum foo", -1) + s.e = ffi.cast("enum foo2", -1) assert s.e == 4294967295 assert s[0].e == 4294967295 s.e = s.e - py.test.raises(TypeError, "s.e = 'B'") + py.test.raises(TypeError, "s.e = 'B3'") py.test.raises(TypeError, "s.e = '2'") py.test.raises(TypeError, "s.e = '#2'") py.test.raises(TypeError, "s.e = '#7'") def test_enum_non_contiguous(self): - ffi = FFI(backend=self.Backend()) - ffi.cdef("enum foo { A, B=42, C };") - assert ffi.string(ffi.cast("enum foo", 0)) == "A" - assert ffi.string(ffi.cast("enum foo", 42)) == "B" - assert ffi.string(ffi.cast("enum foo", 43)) == "C" - invalid_value = ffi.cast("enum foo", 2) + # enum noncont { A4, B4=42, C4 }; + assert ffi.string(ffi.cast("enum noncont", 0)) == "A4" + assert ffi.string(ffi.cast("enum noncont", 42)) == "B4" + assert ffi.string(ffi.cast("enum noncont", 43)) == "C4" + invalid_value = ffi.cast("enum noncont", 2) assert int(invalid_value) == 2 assert ffi.string(invalid_value) == "2" def test_enum_char_hex_oct(self): - ffi = FFI(backend=self.Backend()) - ffi.cdef(r"enum foo{A='!', B='\'', C=0x10, D=010, E=- 0x10, F=-010};") - assert ffi.string(ffi.cast("enum foo", ord('!'))) == "A" - assert ffi.string(ffi.cast("enum foo", ord("'"))) == "B" - assert ffi.string(ffi.cast("enum foo", 16)) == "C" - assert ffi.string(ffi.cast("enum foo", 8)) == "D" - assert ffi.string(ffi.cast("enum foo", -16)) == "E" - assert ffi.string(ffi.cast("enum foo", -8)) == "F" + # enum etypes {A5='!', B5='\'', C5=0x10, D5=010, E5=- 0x10, F5=-010}; + assert ffi.string(ffi.cast("enum etypes", ord('!'))) == "A5" + assert ffi.string(ffi.cast("enum etypes", ord("'"))) == "B5" + assert ffi.string(ffi.cast("enum etypes", 16)) == "C5" + assert ffi.string(ffi.cast("enum etypes", 8)) == "D5" + assert ffi.string(ffi.cast("enum etypes", -16)) == "E5" + assert ffi.string(ffi.cast("enum etypes", -8)) == "F5" def test_array_of_struct(self): - ffi = FFI(backend=self.Backend()) - ffi.cdef("struct foo { int a, b; };") - s = ffi.new("struct foo[1]") + s = ffi.new("struct ab[1]") py.test.raises(AttributeError, 's.b') py.test.raises(AttributeError, 's.b = 412') s[0].b = 412 @@ -937,12 +931,10 @@ py.test.raises(IndexError, 's[1]') def test_pointer_to_array(self): - ffi = FFI(backend=self.Backend()) p = ffi.new("int(**)[5]") assert repr(p) == "" % SIZE_OF_PTR def test_iterate_array(self): - ffi = FFI(backend=self.Backend()) a = ffi.new("char[]", b"hello") assert list(a) == [b"h", b"e", b"l", b"l", b"o", b"\0"] assert list(iter(a)) == [b"h", b"e", b"l", b"l", b"o", b"\0"] @@ -953,43 +945,37 @@ py.test.raises(TypeError, list, ffi.new("int *")) def test_offsetof(self): - ffi = FFI(backend=self.Backend()) - ffi.cdef("struct foo { int a, b, c; };") - assert ffi.offsetof("struct foo", "a") == 0 - assert ffi.offsetof("struct foo", "b") == 4 - assert ffi.offsetof("struct foo", "c") == 8 + # struct abc { int a, b, c; }; + assert ffi.offsetof("struct abc", "a") == 0 + assert ffi.offsetof("struct abc", "b") == 4 + assert ffi.offsetof("struct abc", "c") == 8 def test_offsetof_nested(self): - ffi = FFI(backend=self.Backend()) - ffi.cdef("struct foo { int a, b, c; };" - "struct bar { struct foo d, e; };") - assert ffi.offsetof("struct bar", "e") == 12 - py.test.raises(KeyError, ffi.offsetof, "struct bar", "e.a") - assert ffi.offsetof("struct bar", "e", "a") == 12 - assert ffi.offsetof("struct bar", "e", "b") == 16 - assert ffi.offsetof("struct bar", "e", "c") == 20 + # struct nesting { struct abc d, e; }; + assert ffi.offsetof("struct nesting", "e") == 12 + py.test.raises(KeyError, ffi.offsetof, "struct nesting", "e.a") + assert ffi.offsetof("struct nesting", "e", "a") == 12 + assert ffi.offsetof("struct nesting", "e", "b") == 16 + assert ffi.offsetof("struct nesting", "e", "c") == 20 def test_offsetof_array(self): - ffi = FFI(backend=self.Backend()) assert ffi.offsetof("int[]", 51) == 51 * ffi.sizeof("int") assert ffi.offsetof("int *", 51) == 51 * ffi.sizeof("int") - ffi.cdef("struct bar { int a, b; int c[99]; };") - assert ffi.offsetof("struct bar", "c") == 2 * ffi.sizeof("int") - assert ffi.offsetof("struct bar", "c", 0) == 2 * ffi.sizeof("int") - assert ffi.offsetof("struct bar", "c", 51) == 53 * ffi.sizeof("int") + # struct array2 { int a, b; int c[99]; }; + assert ffi.offsetof("struct array2", "c") == 2 * ffi.sizeof("int") + assert ffi.offsetof("struct array2", "c", 0) == 2 * ffi.sizeof("int") + assert ffi.offsetof("struct array2", "c", 51) == 53 * ffi.sizeof("int") def test_alignof(self): - ffi = FFI(backend=self.Backend()) - ffi.cdef("struct foo { char a; short b; char c; };") + # struct align { char a; short b; char c; }; assert ffi.alignof("int") == 4 assert ffi.alignof("double") in (4, 8) - assert ffi.alignof("struct foo") == 2 + assert ffi.alignof("struct align") == 2 def test_bitfield(self): - ffi = FFI(backend=self.Backend()) - ffi.cdef("struct foo { int a:10, b:20, c:3; };") - assert ffi.sizeof("struct foo") == 8 - s = ffi.new("struct foo *") + # struct bitfield { int a:10, b:20, c:3; }; + assert ffi.sizeof("struct bitfield") == 8 + s = ffi.new("struct bitfield *") s.a = 511 py.test.raises(OverflowError, "s.a = 512") py.test.raises(OverflowError, "s[0].a = 512") @@ -1006,35 +992,31 @@ assert s.c == -4 def test_bitfield_enum(self): - ffi = FFI(backend=self.Backend()) - ffi.cdef(""" - typedef enum { AA, BB, CC } foo_e; - typedef struct { foo_e f:2; } foo_s; - """) - s = ffi.new("foo_s *") + # typedef enum { AA1, BB1, CC1 } foo_e_t; + # typedef struct { foo_e_t f:2; } bfenum_t; + s = ffi.new("bfenum_t *") s.f = 2 assert s.f == 2 def test_anonymous_struct(self): - ffi = FFI(backend=self.Backend()) - ffi.cdef("typedef struct { int a; } foo_t;") - ffi.cdef("typedef struct { char b, c; } bar_t;") - f = ffi.new("foo_t *", [12345]) - b = ffi.new("bar_t *", [b"B", b"C"]) + # typedef struct { int a; } anon_foo_t; + # typedef struct { char b, c; } anon_bar_t; + f = ffi.new("anon_foo_t *", [12345]) + b = ffi.new("anon_bar_t *", [b"B", b"C"]) assert f.a == 12345 assert b.b == b"B" assert b.c == b"C" - assert repr(b).startswith(" Author: Armin Rigo Branch: cffi-1.0 Changeset: r1832:fd16324ee375 Date: 2015-04-26 10:46 +0200 http://bitbucket.org/cffi/cffi/changeset/fd16324ee375/ Log: Array lengths given directly by constants diff --git a/_cffi1/parse_c_type.c b/_cffi1/parse_c_type.c --- a/_cffi1/parse_c_type.c +++ b/_cffi1/parse_c_type.c @@ -203,6 +203,8 @@ return index; } +#define MAX_SSIZE_T (((size_t)-1) >> 1) + static int parse_complete(token_t *tok); static int parse_sequel(token_t *tok, int outer) @@ -329,17 +331,44 @@ next_token(tok); if (tok->kind != TOK_CLOSE_BRACKET) { size_t length; + int gindex; - if (tok->kind != TOK_INTEGER) + switch (tok->kind) { + + case TOK_INTEGER: + errno = 0; + if (sizeof(length) > sizeof(unsigned long)) + length = strtoull(tok->p, NULL, 10); + else + length = strtoul(tok->p, NULL, 10); + if (errno == ERANGE || length > MAX_SSIZE_T) + return parse_error(tok, "number too large"); + break; + + case TOK_IDENTIFIER: + gindex = search_in_globals(tok->info->ctx, tok->p, tok->size); + if (gindex >= 0) { + const struct _cffi_global_s *g; + g = &tok->info->ctx->globals[gindex]; + if (_CFFI_GETOP(g->type_op) == _CFFI_OP_CONSTANT_INT || + _CFFI_GETOP(g->type_op) == _CFFI_OP_ENUM) { + unsigned long long value; + int neg = ((int(*)(unsigned long long*))g->address) + (&value); + if (!neg && value > MAX_SSIZE_T) + return parse_error(tok, + "integer constant too large"); + if (!neg || value == 0) { + length = (size_t)value; + break; + } + } + } + /* fall-through to the default case */ + default: return parse_error(tok, "expected a positive integer constant"); + } - errno = 0; - if (sizeof(length) > sizeof(unsigned long)) - length = strtoull(tok->p, NULL, 10); - else - length = strtoul(tok->p, NULL, 10); - if (errno == ERANGE || ((ssize_t)length) < 0) - return parse_error(tok, "number too large"); next_token(tok); write_ds(tok, _CFFI_OP(_CFFI_OP_ARRAY, 0)); diff --git a/_cffi1/realize_c_type.c b/_cffi1/realize_c_type.c --- a/_cffi1/realize_c_type.c +++ b/_cffi1/realize_c_type.c @@ -219,6 +219,8 @@ { PyObject *x; unsigned long long value; + /* note: we cast g->address to this function type; we do the same + in parse_c_type:parse_sequel() too */ int neg = ((int(*)(unsigned long long*))g->address)(&value); if (!neg) { if (value <= (unsigned long long)LONG_MAX) diff --git a/_cffi1/test_parse_c_type.py b/_cffi1/test_parse_c_type.py --- a/_cffi1/test_parse_c_type.py +++ b/_cffi1/test_parse_c_type.py @@ -1,7 +1,6 @@ -import re -import os -import py +import sys, re, os, py import cffi +from . import cffi_opcode r_macro = re.compile(r"#define \w+[(][^\n]*|#include [^\n]*") r_define = re.compile(r"(#define \w+) [^\n]*") @@ -29,6 +28,9 @@ identifier_names = ["id", "id0", "id05", "id05b", "tail"] assert identifier_names == sorted(identifier_names) +global_names = ["FIVE", "NEG", "ZERO"] +assert global_names == sorted(global_names) + ctx = ffi.new("struct _cffi_type_context_s *") c_struct_names = [ffi.new("char[]", _n) for _n in struct_names] ctx_structs = ffi.new("struct _cffi_struct_union_s[]", len(struct_names)) @@ -53,6 +55,32 @@ ctx.typenames = ctx_identifiers ctx.num_typenames = len(identifier_names) + at ffi.callback("int(unsigned long long *)") +def fetch_constant_five(p): + p[0] = 5 + return 0 + at ffi.callback("int(unsigned long long *)") +def fetch_constant_zero(p): + p[0] = 0 + return 1 + at ffi.callback("int(unsigned long long *)") +def fetch_constant_neg(p): + p[0] = 123321 + return 1 + +ctx_globals = ffi.new("struct _cffi_global_s[]", len(global_names)) +c_glob_names = [ffi.new("char[]", _n) for _n in global_names] +for _i, _fn in enumerate([fetch_constant_five, + fetch_constant_neg, + fetch_constant_zero]): + ctx_globals[_i].name = c_glob_names[_i] + ctx_globals[_i].address = _fn + ctx_globals[_i].type_op = ffi.cast("_cffi_opcode_t", + cffi_opcode.OP_CONSTANT_INT if _i != 1 + else cffi_opcode.OP_ENUM) +ctx.globals = ctx_globals +ctx.num_globals = len(global_names) + def parse(input): out = ffi.new("_cffi_opcode_t[]", 100) @@ -243,6 +271,12 @@ parse_error("int a(*)", "identifier expected", 6) parse_error("int[123456789012345678901234567890]", "number too large", 4) +def test_number_too_large(): + num_max = sys.maxsize + assert parse("char[%d]" % num_max) == [Prim(lib._CFFI_PRIM_CHAR), + '->', Array(0), num_max] + parse_error("char[%d]" % (num_max + 1), "number too large", 5) + def test_complexity_limit(): parse_error("int" + "[]" * 2500, "internal type complexity limit reached", 202) @@ -270,9 +304,15 @@ '->', Pointer(0)] def test_cffi_opcode_sync(): - import cffi_opcode, cffi.model + import cffi.model for name in dir(lib): if name.startswith('_CFFI_'): assert getattr(cffi_opcode, name[6:]) == getattr(lib, name) assert sorted(cffi_opcode.PRIMITIVE_TO_INDEX.keys()) == ( sorted(cffi.model.PrimitiveType.ALL_PRIMITIVE_TYPES.keys())) + +def test_array_length_from_constant(): + parse_error("int[UNKNOWN]", "expected a positive integer constant", 4) + assert parse("int[FIVE]") == [Prim(lib._CFFI_PRIM_INT), '->', Array(0), 5] + assert parse("int[ZERO]") == [Prim(lib._CFFI_PRIM_INT), '->', Array(0), 0] + parse_error("int[NEG]", "expected a positive integer constant", 4) From noreply at buildbot.pypy.org Sun Apr 26 10:59:40 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 26 Apr 2015 10:59:40 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: fix test Message-ID: <20150426085940.94FA81C0845@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1833:0a04e4942846 Date: 2015-04-26 10:48 +0200 http://bitbucket.org/cffi/cffi/changeset/0a04e4942846/ Log: fix test diff --git a/_cffi1/test_new_ffi_1.py b/_cffi1/test_new_ffi_1.py --- a/_cffi1/test_new_ffi_1.py +++ b/_cffi1/test_new_ffi_1.py @@ -2,7 +2,6 @@ import platform, imp import sys, ctypes import cffi -from cffi import CDefError, FFIError from .udir import udir from .recompiler import recompile from .support import * @@ -1230,8 +1229,8 @@ def test_array_of_func_ptr(self): f = ffi.cast("int(*)(int)", 42) assert f != ffi.NULL - py.test.raises(CDefError, ffi.cast, "int(int)", 42) - py.test.raises(CDefError, ffi.new, "int([5])(int)") + py.test.raises(ffi.error, ffi.cast, "int(int)", 42) + py.test.raises(ffi.error, ffi.new, "int([5])(int)") a = ffi.new("int(*[5])(int)", [f]) assert ffi.getctype(ffi.typeof(a)) == "int(*[5])(int)" assert len(a) == 5 From noreply at buildbot.pypy.org Sun Apr 26 10:59:41 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 26 Apr 2015 10:59:41 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: Add the extra tests from test_ffi_backend.py Message-ID: <20150426085941.A7AE61C0845@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1834:40b66d3965df Date: 2015-04-26 10:51 +0200 http://bitbucket.org/cffi/cffi/changeset/40b66d3965df/ Log: Add the extra tests from test_ffi_backend.py diff --git a/_cffi1/test_new_ffi_1.py b/_cffi1/test_new_ffi_1.py --- a/_cffi1/test_new_ffi_1.py +++ b/_cffi1/test_new_ffi_1.py @@ -64,6 +64,7 @@ union { int c, d; }; }; struct abc50 { int a, b; int c[50]; }; + struct ints_and_bitfield { int a,b,c,d,e; int x:1; }; """ DEFS_PACKED = """ struct is_packed { char a; int b; } /*here*/; @@ -1581,3 +1582,24 @@ assert s[0].a == b'X' assert s[1].b == -4892220 assert s[1].a == b'Y' + + def test_not_supported_bitfield_in_result(self): + # struct ints_and_bitfield { int a,b,c,d,e; int x:1; }; + e = py.test.raises(NotImplementedError, ffi.callback, + "struct ints_and_bitfield foo(void)", lambda: 42) + assert str(e.value) == ("struct ints_and_bitfield(*)(): " + "callback with unsupported argument or return type or with '...'") + + def test_inspecttype(self): + assert ffi.typeof("long").kind == "primitive" + assert ffi.typeof("long(*)(long, long**, ...)").cname == ( + "long(*)(long, long * *, ...)") + assert ffi.typeof("long(*)(long, long**, ...)").ellipsis is True + + def test_new_handle(self): + o = [2, 3, 4] + p = ffi.new_handle(o) + assert ffi.typeof(p) == ffi.typeof("void *") + assert ffi.from_handle(p) is o + assert ffi.from_handle(ffi.cast("char *", p)) is o + py.test.raises(RuntimeError, ffi.from_handle, ffi.NULL) From noreply at buildbot.pypy.org Sun Apr 26 10:59:42 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 26 Apr 2015 10:59:42 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: Rename the types, which are not available in the cffi module Message-ID: <20150426085942.B44B41C0845@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1835:5e6d6dfcf4e7 Date: 2015-04-26 10:55 +0200 http://bitbucket.org/cffi/cffi/changeset/5e6d6dfcf4e7/ Log: Rename the types, which are not available in the cffi module diff --git a/_cffi1/cglob.c b/_cffi1/cglob.c --- a/_cffi1/cglob.c +++ b/_cffi1/cglob.c @@ -15,7 +15,7 @@ static PyTypeObject GlobSupport_Type = { PyVarObject_HEAD_INIT(NULL, 0) - "cffi.GlobSupport", + "FFIGlobSupport", sizeof(GlobSupportObject), 0, (destructor)glob_support_dealloc, /* tp_dealloc */ diff --git a/_cffi1/ffi_obj.c b/_cffi1/ffi_obj.c --- a/_cffi1/ffi_obj.c +++ b/_cffi1/ffi_obj.c @@ -713,7 +713,7 @@ static PyTypeObject FFI_Type = { PyVarObject_HEAD_INIT(NULL, 0) - "cffi.FFI", + "FFI", sizeof(FFIObject), 0, (destructor)ffi_dealloc, /* tp_dealloc */ diff --git a/_cffi1/lib_obj.c b/_cffi1/lib_obj.c --- a/_cffi1/lib_obj.c +++ b/_cffi1/lib_obj.c @@ -69,7 +69,7 @@ static PyObject *lib_repr(LibObject *lib) { - return PyText_FromFormat("", + return PyText_FromFormat("", PyText_AS_UTF8(lib->l_libname)); } @@ -284,7 +284,7 @@ static PyTypeObject Lib_Type = { PyVarObject_HEAD_INIT(NULL, 0) - "cffi.Lib", + "Lib", sizeof(LibObject), 0, (destructor)lib_dealloc, /* tp_dealloc */ From noreply at buildbot.pypy.org Sun Apr 26 10:59:43 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 26 Apr 2015 10:59:43 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: List the remaining methods that are definitely missing Message-ID: <20150426085943.B1D651C0845@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1836:f59f9e25a388 Date: 2015-04-26 11:00 +0200 http://bitbucket.org/cffi/cffi/changeset/f59f9e25a388/ Log: List the remaining methods that are definitely missing diff --git a/_cffi1/ffi_obj.c b/_cffi1/ffi_obj.c --- a/_cffi1/ffi_obj.c +++ b/_cffi1/ffi_obj.c @@ -689,14 +689,23 @@ {"__set_types",(PyCFunction)ffi__set_types, METH_VARARGS}, {"addressof", (PyCFunction)ffi_addressof, METH_VARARGS, ffi_addressof_doc}, {"alignof", (PyCFunction)ffi_alignof, METH_O, ffi_alignof_doc}, +#if 0 + {"buffer", (PyCFunction)ffi_buffer, METH_VARARGS, ffi_buffer_doc}, +#endif {"callback", (PyCFunction)ffi_callback, METH_VARARGS | METH_KEYWORDS,ffi_callback_doc}, {"cast", (PyCFunction)ffi_cast, METH_VARARGS, ffi_cast_doc}, +#if 0 + {"from_buffer",(PyCFunction)ffi_from_buffer,METH_O, ffi_from_buffer_doc}, +#endif {"from_handle",(PyCFunction)ffi_from_handle,METH_O, ffi_from_handle_doc}, #if 0 {"gc", (PyCFunction)ffi_gc, METH_VARARGS}, #endif {"getctype", (PyCFunction)ffi_getctype, METH_VARARGS, ffi_getctype_doc}, +#if 0 + {"getwinerror",(PyCFunction)ffi_getwinerror,METH_VARARGS, ffi_getwinerror_doc}, +#endif {"offsetof", (PyCFunction)ffi_offsetof, METH_VARARGS, ffi_offsetof_doc}, {"new", (PyCFunction)ffi_new, METH_VARARGS, ffi_new_doc}, {"new_handle", (PyCFunction)ffi_new_handle, METH_O, ffi_new_handle_doc}, From noreply at buildbot.pypy.org Sun Apr 26 11:23:26 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 26 Apr 2015 11:23:26 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: ffi.getwinerror() (untested so far) Message-ID: <20150426092326.F02451C0359@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1837:bc882378fefd Date: 2015-04-26 11:05 +0200 http://bitbucket.org/cffi/cffi/changeset/bc882378fefd/ Log: ffi.getwinerror() (untested so far) diff --git a/_cffi1/ffi_obj.c b/_cffi1/ffi_obj.c --- a/_cffi1/ffi_obj.c +++ b/_cffi1/ffi_obj.c @@ -581,6 +581,15 @@ return res; } +#ifdef MS_WIN32 +PyDoc_STRVAR(ffi_getwinerror_doc, +"Return either the GetLastError() or the error number given by the\n" +"optional 'code' argument, as a tuple '(code, message)'."); + +#define ffi_getwinerror b_getwinerror /* ffi_getwinerror() => b_getwinerror() + from misc_win32.h */ +#endif + PyDoc_STRVAR(ffi_errno_doc, "the value of 'errno' from/to the C calls"); static PyObject *ffi_get_errno(PyObject *self, void *closure) @@ -703,7 +712,7 @@ {"gc", (PyCFunction)ffi_gc, METH_VARARGS}, #endif {"getctype", (PyCFunction)ffi_getctype, METH_VARARGS, ffi_getctype_doc}, -#if 0 +#ifdef MS_WIN32 {"getwinerror",(PyCFunction)ffi_getwinerror,METH_VARARGS, ffi_getwinerror_doc}, #endif {"offsetof", (PyCFunction)ffi_offsetof, METH_VARARGS, ffi_offsetof_doc}, From noreply at buildbot.pypy.org Sun Apr 26 11:23:28 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 26 Apr 2015 11:23:28 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: ffi.buffer() Message-ID: <20150426092328.0FCC41C0359@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1838:c1a8a13f781d Date: 2015-04-26 11:09 +0200 http://bitbucket.org/cffi/cffi/changeset/c1a8a13f781d/ Log: ffi.buffer() diff --git a/_cffi1/ffi_obj.c b/_cffi1/ffi_obj.c --- a/_cffi1/ffi_obj.c +++ b/_cffi1/ffi_obj.c @@ -302,6 +302,20 @@ #define ffi_string b_string /* ffi_string() => b_string() from _cffi_backend.c */ +PyDoc_STRVAR(ffi_buffer_doc, +"Return a read-write buffer object that references the raw C data\n" +"pointed to by the given 'cdata'. The 'cdata' must be a pointer or an\n" +"array. Can be passed to functions expecting a buffer, or directly\n" +"manipulated with:\n" +"\n" +" buf[:] get a copy of it in a regular string, or\n" +" buf[idx] as a single character\n" +" buf[:] = ...\n" +" buf[idx] = ... change the content"); + +#define ffi_buffer b_buffer /* ffi_buffer() => b_buffer() + from _cffi_backend.c */ + PyDoc_STRVAR(ffi_offsetof_doc, "Return the offset of the named field inside the given structure or\n" "array, which must be given as a C type name. You can give several\n" @@ -698,9 +712,7 @@ {"__set_types",(PyCFunction)ffi__set_types, METH_VARARGS}, {"addressof", (PyCFunction)ffi_addressof, METH_VARARGS, ffi_addressof_doc}, {"alignof", (PyCFunction)ffi_alignof, METH_O, ffi_alignof_doc}, -#if 0 {"buffer", (PyCFunction)ffi_buffer, METH_VARARGS, ffi_buffer_doc}, -#endif {"callback", (PyCFunction)ffi_callback, METH_VARARGS | METH_KEYWORDS,ffi_callback_doc}, {"cast", (PyCFunction)ffi_cast, METH_VARARGS, ffi_cast_doc}, From noreply at buildbot.pypy.org Sun Apr 26 11:23:29 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 26 Apr 2015 11:23:29 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: more tests from test_ffi_backend Message-ID: <20150426092329.1E7141C0359@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1839:e80b46e5fa79 Date: 2015-04-26 11:11 +0200 http://bitbucket.org/cffi/cffi/changeset/e80b46e5fa79/ Log: more tests from test_ffi_backend diff --git a/_cffi1/test_new_ffi_1.py b/_cffi1/test_new_ffi_1.py --- a/_cffi1/test_new_ffi_1.py +++ b/_cffi1/test_new_ffi_1.py @@ -54,6 +54,7 @@ typedef struct { int a; } unnamed_foo_t, *unnamed_foo_p; struct nonpacked { char a; int b; }; struct array0 { int len; short data[0]; }; + struct array_no_length { int x; int a[]; }; struct nested_anon { struct { int a, b; }; @@ -1603,3 +1604,20 @@ assert ffi.from_handle(p) is o assert ffi.from_handle(ffi.cast("char *", p)) is o py.test.raises(RuntimeError, ffi.from_handle, ffi.NULL) + + def test_struct_array_no_length(self): + # struct array_no_length { int x; int a[]; }; + p = ffi.new("struct array_no_length *", [100, [200, 300, 400]]) + assert p.x == 100 + assert ffi.typeof(p.a) is ffi.typeof("int *") # no length available + assert p.a[0] == 200 + assert p.a[1] == 300 + assert p.a[2] == 400 + + def test_from_buffer(self): + import array + a = array.array('H', [10000, 20000, 30000]) + c = ffi.from_buffer(a) + assert ffi.typeof(c) is ffi.typeof("char[]") + ffi.cast("unsigned short *", c)[1] += 500 + assert list(a) == [10000, 20500, 30000] From noreply at buildbot.pypy.org Sun Apr 26 11:23:30 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 26 Apr 2015 11:23:30 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: ffi.from_buffer() Message-ID: <20150426092330.2D2D11C0359@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1840:923f1d3495c9 Date: 2015-04-26 11:23 +0200 http://bitbucket.org/cffi/cffi/changeset/923f1d3495c9/ Log: ffi.from_buffer() diff --git a/_cffi1/ffi_obj.c b/_cffi1/ffi_obj.c --- a/_cffi1/ffi_obj.c +++ b/_cffi1/ffi_obj.c @@ -533,6 +533,19 @@ return x; } +PyDoc_STRVAR(ffi_from_buffer_doc, +"Return a that points to the data of the given Python\n" +"object, which must support the buffer interface. Note that this is\n" +"not meant to be used on the built-in types str, unicode, or bytearray\n" +"(you can build 'char[]' arrays explicitly) but only on objects\n" +"containing large quantities of raw data in some other format, like\n" +"'array.array' or numpy arrays."); + +static PyObject *ffi_from_buffer(PyObject *self, PyObject *arg) +{ + return direct_from_buffer(g_ct_chararray, arg); +} + #if 0 static PyObject *ffi_gc(ZefFFIObject *self, PyObject *args) { @@ -716,9 +729,7 @@ {"callback", (PyCFunction)ffi_callback, METH_VARARGS | METH_KEYWORDS,ffi_callback_doc}, {"cast", (PyCFunction)ffi_cast, METH_VARARGS, ffi_cast_doc}, -#if 0 {"from_buffer",(PyCFunction)ffi_from_buffer,METH_O, ffi_from_buffer_doc}, -#endif {"from_handle",(PyCFunction)ffi_from_handle,METH_O, ffi_from_handle_doc}, #if 0 {"gc", (PyCFunction)ffi_gc, METH_VARARGS}, diff --git a/_cffi1/realize_c_type.c b/_cffi1/realize_c_type.c --- a/_cffi1/realize_c_type.c +++ b/_cffi1/realize_c_type.c @@ -8,7 +8,7 @@ static PyObject *all_primitives[_CFFI__NUM_PRIM]; static PyObject *global_types_dict; -static CTypeDescrObject *g_ct_voidp; +static CTypeDescrObject *g_ct_voidp, *g_ct_chararray; static PyObject *build_primitive_type(int num); /* forward */ @@ -16,33 +16,49 @@ (all_primitives[num] != NULL ? all_primitives[num] \ : build_primitive_type(num)) +static int _add_to_global_types_dict(PyObject *ct) +{ + if (ct == NULL) + return -1; + return PyDict_SetItemString(global_types_dict, + ((CTypeDescrObject *)ct)->ct_name, ct); +} + static int init_global_types_dict(PyObject *ffi_type_dict) { int err; - PyObject *ct, *ct2, *pnull; + PyObject *ct_void, *ct_char, *ct2, *pnull; + /* XXX some leaks in case these functions fail, but well, + MemoryErrors during importing an extension module are kind + of bad anyway */ global_types_dict = PyDict_New(); if (global_types_dict == NULL) return -1; - ct = get_primitive_type(_CFFI_PRIM_VOID); // 'void' - if (ct == NULL) + ct_void = get_primitive_type(_CFFI_PRIM_VOID); // 'void' + if (_add_to_global_types_dict(ct_void) < 0) return -1; - if (PyDict_SetItemString(global_types_dict, - ((CTypeDescrObject *)ct)->ct_name, ct) < 0) { + + ct2 = new_pointer_type((CTypeDescrObject *)ct_void); // 'void *' + if (_add_to_global_types_dict(ct2) < 0) return -1; - } - ct2 = new_pointer_type((CTypeDescrObject *)ct); // 'void *' - if (ct2 == NULL) - return -1; - if (PyDict_SetItemString(global_types_dict, - ((CTypeDescrObject *)ct2)->ct_name, ct2) < 0) { - Py_DECREF(ct2); - return -1; - } g_ct_voidp = (CTypeDescrObject *)ct2; - pnull = new_simple_cdata(NULL, (CTypeDescrObject *)ct2); + ct_char = get_primitive_type(_CFFI_PRIM_CHAR); // 'char' + if (_add_to_global_types_dict(ct_char) < 0) + return -1; + + ct2 = new_pointer_type((CTypeDescrObject *)ct_char); // 'char *' + if (_add_to_global_types_dict(ct2) < 0) + return -1; + + ct2 = new_array_type((CTypeDescrObject *)ct2, -1); // 'char[]' + if (_add_to_global_types_dict(ct2) < 0) + return -1; + g_ct_chararray = (CTypeDescrObject *)ct2; + + pnull = new_simple_cdata(NULL, g_ct_voidp); if (pnull == NULL) return -1; err = PyDict_SetItemString(ffi_type_dict, "NULL", pnull); diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -5359,21 +5359,11 @@ return 0; } -static PyObject *b_from_buffer(PyObject *self, PyObject *args) -{ - CTypeDescrObject *ct; +static PyObject *direct_from_buffer(CTypeDescrObject *ct, PyObject *x) +{ CDataObject *cd; - PyObject *x; Py_buffer *view; - if (!PyArg_ParseTuple(args, "O!O", &CTypeDescr_Type, &ct, &x)) - return NULL; - - if (!(ct->ct_flags & CT_IS_UNSIZED_CHAR_A)) { - PyErr_Format(PyExc_TypeError, "needs 'char[]', got '%s'", ct->ct_name); - return NULL; - } - if (invalid_input_buffer_type(x)) { PyErr_SetString(PyExc_TypeError, "from_buffer() cannot return the address of the " @@ -5407,6 +5397,21 @@ return NULL; } +static PyObject *b_from_buffer(PyObject *self, PyObject *args) +{ + CTypeDescrObject *ct; + PyObject *x; + + if (!PyArg_ParseTuple(args, "O!O", &CTypeDescr_Type, &ct, &x)) + return NULL; + + if (!(ct->ct_flags & CT_IS_UNSIZED_CHAR_A)) { + PyErr_Format(PyExc_TypeError, "needs 'char[]', got '%s'", ct->ct_name); + return NULL; + } + return direct_from_buffer(ct, x); +} + static PyObject *b__get_types(PyObject *self, PyObject *noarg) { return PyTuple_Pack(2, (PyObject *)&CData_Type, From noreply at buildbot.pypy.org Sun Apr 26 11:29:08 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 26 Apr 2015 11:29:08 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: ffi.gc(), copied straight from zeffir Message-ID: <20150426092908.B33B61C0359@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1841:e0ca28cfacbd Date: 2015-04-26 11:29 +0200 http://bitbucket.org/cffi/cffi/changeset/e0ca28cfacbd/ Log: ffi.gc(), copied straight from zeffir diff --git a/_cffi1/cffi1_module.c b/_cffi1/cffi1_module.c --- a/_cffi1/cffi1_module.c +++ b/_cffi1/cffi1_module.c @@ -10,6 +10,7 @@ #include "ffi_obj.c" #include "cglob.c" +#include "cgc.c" #include "lib_obj.c" diff --git a/_cffi1/cgc.c b/_cffi1/cgc.c new file mode 100644 --- /dev/null +++ b/_cffi1/cgc.c @@ -0,0 +1,80 @@ + +/* translated to C from cffi/gc_weakref.py */ + + +static PyObject *const_name_pop; + +static PyObject *gc_wref_remove(PyObject *ffi_wref_data, PyObject *arg) +{ + PyObject *destructor, *cdata, *x; + PyObject *res = PyObject_CallMethodObjArgs(ffi_wref_data, + const_name_pop, arg, NULL); + if (res == NULL) + return NULL; + + assert(PyTuple_Check(res)); + destructor = PyTuple_GET_ITEM(res, 0); + cdata = PyTuple_GET_ITEM(res, 1); + x = PyObject_CallFunctionObjArgs(destructor, cdata, NULL); + Py_DECREF(res); + if (x == NULL) + return NULL; + Py_DECREF(x); + + Py_INCREF(Py_None); + return Py_None; +} + +static PyMethodDef remove_callback = { + "gc_wref_remove", (PyCFunction)gc_wref_remove, METH_O +}; + +static PyObject *gc_weakrefs_build(FFIObject *ffi, CDataObject *cd, + PyObject *destructor) +{ + PyObject *new_cdata, *ref = NULL, *tup = NULL; + + if (ffi->gc_wrefs == NULL) { + /* initialize */ + PyObject *data; + + if (const_name_pop == NULL) { + const_name_pop = PyString_InternFromString("pop"); + if (const_name_pop == NULL) + return NULL; + } + data = PyDict_New(); + if (data == NULL) + return NULL; + ffi->gc_wrefs = PyCFunction_New(&remove_callback, data); + Py_DECREF(data); + if (ffi->gc_wrefs == NULL) + return NULL; + } + + new_cdata = do_cast(cd->c_type, (PyObject *)cd); + if (new_cdata == NULL) + goto error; + + ref = PyWeakref_NewRef(new_cdata, ffi->gc_wrefs); + if (ref == NULL) + goto error; + + tup = PyTuple_Pack(2, destructor, cd); + if (tup == NULL) + goto error; + + /* the 'self' of the function 'gc_wrefs' is actually the data dict */ + if (PyDict_SetItem(PyCFunction_GET_SELF(ffi->gc_wrefs), ref, tup) < 0) + goto error; + + Py_DECREF(tup); + Py_DECREF(ref); + return new_cdata; + + error: + Py_XDECREF(new_cdata); + Py_XDECREF(ref); + Py_XDECREF(tup); + return NULL; +} diff --git a/_cffi1/ffi_obj.c b/_cffi1/ffi_obj.c --- a/_cffi1/ffi_obj.c +++ b/_cffi1/ffi_obj.c @@ -546,8 +546,15 @@ return direct_from_buffer(g_ct_chararray, arg); } -#if 0 -static PyObject *ffi_gc(ZefFFIObject *self, PyObject *args) +PyDoc_STRVAR(ffi_gc_doc, +"Return a new cdata object that points to the same data.\n" +"Later, when this new cdata object is garbage-collected,\n" +"'destructor(old_cdata_object)' will be called."); + +static PyObject *gc_weakrefs_build(FFIObject *ffi, CDataObject *cd, + PyObject *destructor); /* forward */ + +static PyObject *ffi_gc(FFIObject *self, PyObject *args) { CDataObject *cd; PyObject *destructor; @@ -557,7 +564,6 @@ return gc_weakrefs_build(self, cd, destructor); } -#endif PyDoc_STRVAR(ffi_callback_doc, "Return a callback object or a decorator making such a callback object.\n" @@ -731,9 +737,7 @@ {"cast", (PyCFunction)ffi_cast, METH_VARARGS, ffi_cast_doc}, {"from_buffer",(PyCFunction)ffi_from_buffer,METH_O, ffi_from_buffer_doc}, {"from_handle",(PyCFunction)ffi_from_handle,METH_O, ffi_from_handle_doc}, -#if 0 - {"gc", (PyCFunction)ffi_gc, METH_VARARGS}, -#endif + {"gc", (PyCFunction)ffi_gc, METH_VARARGS, ffi_gc_doc}, {"getctype", (PyCFunction)ffi_getctype, METH_VARARGS, ffi_getctype_doc}, #ifdef MS_WIN32 {"getwinerror",(PyCFunction)ffi_getwinerror,METH_VARARGS, ffi_getwinerror_doc}, From noreply at buildbot.pypy.org Sun Apr 26 11:37:07 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 26 Apr 2015 11:37:07 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: Move and fix the test Message-ID: <20150426093707.3077C1C0359@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1842:3919dd27269f Date: 2015-04-26 11:37 +0200 http://bitbucket.org/cffi/cffi/changeset/3919dd27269f/ Log: Move and fix the test diff --git a/_cffi1/test_new_ffi_1.py b/_cffi1/test_new_ffi_1.py --- a/_cffi1/test_new_ffi_1.py +++ b/_cffi1/test_new_ffi_1.py @@ -1278,26 +1278,6 @@ sz = ffi.sizeof("long") assert cb((1 << (sz*8-1)) - 1, -10) == 42 - def test_unique_types(self): - xxxx - ffi1 = FFI(backend=self.Backend()) - ffi2 = FFI(backend=self.Backend()) - assert ffi1.typeof("char") is ffi2.typeof("char ") - assert ffi1.typeof("long") is ffi2.typeof("signed long int") - assert ffi1.typeof("double *") is ffi2.typeof("double*") - assert ffi1.typeof("int ***") is ffi2.typeof(" int * * *") - assert ffi1.typeof("int[]") is ffi2.typeof("signed int[]") - assert ffi1.typeof("signed int*[17]") is ffi2.typeof("int *[17]") - assert ffi1.typeof("void") is ffi2.typeof("void") - assert ffi1.typeof("int(*)(int,int)") is ffi2.typeof("int(*)(int,int)") - # - # these depend on user-defined data, so should not be shared - assert ffi1.typeof("struct foo") is not ffi2.typeof("struct foo") - assert ffi1.typeof("union foo *") is not ffi2.typeof("union foo*") - assert ffi1.typeof("enum foo") is not ffi2.typeof("enum foo") - # sanity check: twice 'ffi1' - assert ffi1.typeof("struct foo*") is ffi1.typeof("struct foo *") - def test_anonymous_enum(self): # typedef enum { Value0 = 0 } e_t, *pe_t; assert ffi.getctype("e_t*") == 'e_t *' diff --git a/_cffi1/test_recompiler.py b/_cffi1/test_recompiler.py --- a/_cffi1/test_recompiler.py +++ b/_cffi1/test_recompiler.py @@ -374,3 +374,29 @@ "typedef enum { AA=%d } e1;" % sys.maxint) assert lib.AA == sys.maxint assert ffi.sizeof("e1") == ffi.sizeof("long") + +def test_unique_types(): + CDEF = "struct foo_s; union foo_u; enum foo_e { AA };" + ffi1 = FFI(); ffi1.cdef(CDEF); verify(ffi1, "test_unique_types_1", CDEF) + ffi2 = FFI(); ffi2.cdef(CDEF); verify(ffi2, "test_unique_types_2", CDEF) + # + assert ffi1.typeof("char") is ffi2.typeof("char ") + assert ffi1.typeof("long") is ffi2.typeof("signed long int") + assert ffi1.typeof("double *") is ffi2.typeof("double*") + assert ffi1.typeof("int ***") is ffi2.typeof(" int * * *") + assert ffi1.typeof("int[]") is ffi2.typeof("signed int[]") + assert ffi1.typeof("signed int*[17]") is ffi2.typeof("int *[17]") + assert ffi1.typeof("void") is ffi2.typeof("void") + assert ffi1.typeof("int(*)(int,int)") is ffi2.typeof("int(*)(int,int)") + # + # these depend on user-defined data, so should not be shared + for name in ["struct foo_s", + "union foo_u *", + "enum foo_e", + "struct foo_s *(*)()", + "void(*)(struct foo_s *)", + "struct foo_s *(*[5])[8]", + ]: + assert ffi1.typeof(name) is not ffi2.typeof(name) + # sanity check: twice 'ffi1' + assert ffi1.typeof("struct foo_s*") is ffi1.typeof("struct foo_s *") From noreply at buildbot.pypy.org Sun Apr 26 11:51:46 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 26 Apr 2015 11:51:46 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: small refactoring ending up with support for SF_PACKED structures Message-ID: <20150426095146.4685F1C0359@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1843:fd13bd62a49a Date: 2015-04-26 11:52 +0200 http://bitbucket.org/cffi/cffi/changeset/fd13bd62a49a/ Log: small refactoring ending up with support for SF_PACKED structures diff --git a/_cffi1/cffi_opcode.py b/_cffi1/cffi_opcode.py --- a/_cffi1/cffi_opcode.py +++ b/_cffi1/cffi_opcode.py @@ -97,6 +97,10 @@ 'ssize_t': PRIM_SSIZE, } +F_UNION = 0x01 +F_CHECK_FIELDS = 0x02 +F_PACKED = 0x04 + CLASS_NAME = {} for _name, _value in globals().items(): if _name.startswith('OP_') and isinstance(_value, int): diff --git a/_cffi1/ffi_obj.c b/_cffi1/ffi_obj.c --- a/_cffi1/ffi_obj.c +++ b/_cffi1/ffi_obj.c @@ -692,7 +692,8 @@ if (!CTypeDescr_Check(x)) goto bad_usage; types[i] = x; - struct_unions[i].flags = ((CTypeDescrObject *)x)->ct_flags & CT_UNION; + struct_unions[i].flags = ((CTypeDescrObject *)x)->ct_flags & CT_UNION ? + _CFFI_F_UNION : 0; struct_unions[i].size = (size_t)-2; struct_unions[i].alignment = -2; } diff --git a/_cffi1/parse_c_type.c b/_cffi1/parse_c_type.c --- a/_cffi1/parse_c_type.c +++ b/_cffi1/parse_c_type.c @@ -628,7 +628,7 @@ int n = search_in_struct_unions(tok->info->ctx, tok->p, tok->size); if (n < 0) return parse_error(tok, "undefined struct/union name"); - if (((tok->info->ctx->struct_unions[n].flags & CT_UNION) != 0) + if (((tok->info->ctx->struct_unions[n].flags & _CFFI_F_UNION) != 0) ^ (kind == TOK_UNION)) return parse_error(tok, "wrong kind of tag: struct vs union"); diff --git a/_cffi1/parse_c_type.h b/_cffi1/parse_c_type.h --- a/_cffi1/parse_c_type.h +++ b/_cffi1/parse_c_type.h @@ -69,18 +69,16 @@ struct _cffi_struct_union_s { const char *name; int type_index; // -> _cffi_types, on a OP_STRUCT_UNION - int flags; // CT_UNION? CT_CUSTOM_FIELD_POS? + int flags; // _CFFI_F_* flags below size_t size; int alignment; int first_field_index; // -> _cffi_fields array int num_fields; }; -#ifdef _CFFI_INTERNAL -#define CT_UNION 128 -#define CT_CUSTOM_FIELD_POS 32768 -/* ^^^ if not CUSTOM_FIELD_POS, complain if fields are not in the - "standard layout" and/or if some are missing */ -#endif +#define _CFFI_F_UNION 0x01 // is a union, not a struct +#define _CFFI_F_CHECK_FIELDS 0x02 // complain if fields are not in the + // "standard layout" or if some are missing +#define _CFFI_F_PACKED 0x04 // for CHECK_FIELDS, assume a packed struct struct _cffi_field_s { const char *name; diff --git a/_cffi1/realize_c_type.c b/_cffi1/realize_c_type.c --- a/_cffi1/realize_c_type.c +++ b/_cffi1/realize_c_type.c @@ -404,10 +404,10 @@ Py_INCREF(x); } else { - int flags = (s->flags & CT_UNION) ? CT_UNION : CT_STRUCT; + int flags = (s->flags & _CFFI_F_UNION) ? CT_UNION : CT_STRUCT; char *name = alloca(8 + strlen(s->name)); _realize_name(name, - (s->flags & CT_UNION) ? "union " : "struct ", + (s->flags & _CFFI_F_UNION) ? "union " : "struct ", s->name); x = new_struct_or_union_type(name, flags); @@ -683,7 +683,11 @@ PyList_SET_ITEM(fields, i, f); } - int sflags = (s->flags & CT_CUSTOM_FIELD_POS) ? 0 : SF_STD_FIELD_POS; + int sflags = 0; + if (s->flags & _CFFI_F_CHECK_FIELDS) + sflags |= SF_STD_FIELD_POS; + if (s->flags & _CFFI_F_PACKED) + sflags |= SF_PACKED; PyObject *args = Py_BuildValue("(OOOnni)", ct, fields, Py_None, @@ -696,7 +700,6 @@ ct->ct_extra = NULL; ct->ct_flags |= CT_IS_OPAQUE; - ct->ct_flags &= ~CT_CUSTOM_FIELD_POS; PyObject *res = b_complete_struct_or_union(NULL, args); ct->ct_flags &= ~CT_IS_OPAQUE; Py_DECREF(args); diff --git a/_cffi1/recompiler.py b/_cffi1/recompiler.py --- a/_cffi1/recompiler.py +++ b/_cffi1/recompiler.py @@ -456,11 +456,16 @@ def _struct_ctx(self, tp, cname, approxname): type_index = self._typesdict[tp] - flags = 0 + flags = [] + if isinstance(tp, model.UnionType): + flags.append("_CFFI_F_UNION") if tp.partial or tp.has_anonymous_struct_fields(): - flags |= 32768 # CT_CUSTOM_FIELD_POS - if isinstance(tp, model.UnionType): - flags |= 128 # CT_UNION + pass # the field layout is obtained silently from the C compiler + else: + flags.append("_CFFI_F_CHECK_FIELDS") + if tp.packed: + flags.append("_CFFI_F_PACKED") + flags = '|'.join(flags) or '0' if tp.fldtypes is not None: c_field = [approxname] enumfields = list(tp.enumfields()) @@ -502,7 +507,7 @@ else: size_align = ' (size_t)-1, -1, -1, 0 /* opaque */ },' self._lsts["struct_union"].append( - ' { "%s", %d, 0x%x,' % (tp.name, type_index, flags) + size_align) + ' { "%s", %d, %s,' % (tp.name, type_index, flags) + size_align) self._seen_struct_unions.add(tp) def _add_missing_struct_unions(self): diff --git a/_cffi1/test_new_ffi_1.py b/_cffi1/test_new_ffi_1.py --- a/_cffi1/test_new_ffi_1.py +++ b/_cffi1/test_new_ffi_1.py @@ -75,9 +75,9 @@ ffi1.cdef(DEFS) ffi1.cdef(DEFS_PACKED, packed=True) - outputfilename = recompile(ffi1, "test_old_ffi1", CCODE, + outputfilename = recompile(ffi1, "test_new_ffi_1", CCODE, tmpdir=str(udir)) - module = imp.load_dynamic("test_old_ffi1", outputfilename) + module = imp.load_dynamic("test_new_ffi_1", outputfilename) ffi = module.ffi diff --git a/_cffi1/test_parse_c_type.py b/_cffi1/test_parse_c_type.py --- a/_cffi1/test_parse_c_type.py +++ b/_cffi1/test_parse_c_type.py @@ -36,7 +36,7 @@ ctx_structs = ffi.new("struct _cffi_struct_union_s[]", len(struct_names)) for _i in range(len(struct_names)): ctx_structs[_i].name = c_struct_names[_i] -ctx_structs[3].flags = lib.CT_UNION +ctx_structs[3].flags = lib._CFFI_F_UNION ctx.struct_unions = ctx_structs ctx.num_struct_unions = len(struct_names) From noreply at buildbot.pypy.org Sun Apr 26 11:56:12 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 26 Apr 2015 11:56:12 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: test skips Message-ID: <20150426095612.7C7C31C0359@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1844:71537992434b Date: 2015-04-26 11:56 +0200 http://bitbucket.org/cffi/cffi/changeset/71537992434b/ Log: test skips diff --git a/_cffi1/test_verify1.py b/_cffi1/test_verify1.py --- a/_cffi1/test_verify1.py +++ b/_cffi1/test_verify1.py @@ -2079,6 +2079,7 @@ assert ffi.getwinerror()[0] == n def test_verify_dlopen_flags(): + py.test.xfail("dlopen flags") # Careful with RTLD_GLOBAL. If by chance the FFI is not deleted # promptly, like on PyPy, then other tests may see the same # exported symbols as well. So we must not export a simple name @@ -2157,13 +2158,6 @@ assert ord(outbuf[n]) == 0 assert ord(outbuf[0]) < 128 # should be a letter, or '\' -def test_use_local_dir(): - ffi = FFI() - lib = ffi.verify("", modulename="test_use_local_dir") - this_dir = os.path.dirname(__file__) - pycache_files = os.listdir(os.path.join(this_dir, '__pycache__')) - assert any('test_use_local_dir' in s for s in pycache_files) - def test_define_known_value(): ffi = FFI() ffi.cdef("#define FOO 0x123") @@ -2173,5 +2167,5 @@ def test_define_wrong_value(): ffi = FFI() ffi.cdef("#define FOO 123") - e = py.test.raises(VerificationError, ffi.verify, "#define FOO 124") - assert str(e.value).endswith("FOO has the real value 124, not 123") + lib = ffi.verify("#define FOO 124") # used to complain + assert lib.FOO == 124 From noreply at buildbot.pypy.org Sun Apr 26 13:49:09 2015 From: noreply at buildbot.pypy.org (mjacob) Date: Sun, 26 Apr 2015 13:49:09 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Change some whitespace to make a new revision to test something on the new build slave. Message-ID: <20150426114909.44C651C116B@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: py3.3 Changeset: r76938:975e8a2af096 Date: 2015-04-26 13:48 +0200 http://bitbucket.org/pypy/pypy/changeset/975e8a2af096/ Log: Change some whitespace to make a new revision to test something on the new build slave. diff --git a/rpython/translator/c/src/signals.c b/rpython/translator/c/src/signals.c --- a/rpython/translator/c/src/signals.c +++ b/rpython/translator/c/src/signals.c @@ -95,12 +95,12 @@ #endif if (wakeup_with_nul_byte) { - res = write(wakeup_fd, "\0", 1); + res = write(wakeup_fd, "\0", 1); } else { unsigned char byte = (unsigned char)signum; - res = write(wakeup_fd, &byte, 1); + res = write(wakeup_fd, &byte, 1); } - + /* the return value is ignored here */ } } From noreply at buildbot.pypy.org Sun Apr 26 14:11:50 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 26 Apr 2015 14:11:50 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: Hex/octal numbers in array lengths Message-ID: <20150426121150.4461A1C03F7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1845:4d94d2c57548 Date: 2015-04-26 14:11 +0200 http://bitbucket.org/cffi/cffi/changeset/4d94d2c57548/ Log: Hex/octal numbers in array lengths diff --git a/_cffi1/parse_c_type.c b/_cffi1/parse_c_type.c --- a/_cffi1/parse_c_type.c +++ b/_cffi1/parse_c_type.c @@ -69,6 +69,13 @@ return ('0' <= x && x <= '9'); } +static int is_hex_digit(char x) +{ + return (('0' <= x && x <= '9') || + ('A' <= x && x <= 'F') || + ('a' <= x && x <= 'f')); +} + static int is_ident_next(char x) { return (is_ident_first(x) || is_digit(x)); @@ -113,7 +120,9 @@ tok->kind = TOK_INTEGER; tok->p = p; tok->size = 1; - while (is_digit(p[tok->size])) + if (p[1] == 'x' || p[1] == 'X') + tok->size = 2; + while (is_hex_digit(p[tok->size])) tok->size++; return; } @@ -332,15 +341,18 @@ if (tok->kind != TOK_CLOSE_BRACKET) { size_t length; int gindex; + char *endptr; switch (tok->kind) { case TOK_INTEGER: errno = 0; if (sizeof(length) > sizeof(unsigned long)) - length = strtoull(tok->p, NULL, 10); + length = strtoull(tok->p, &endptr, 0); else - length = strtoul(tok->p, NULL, 10); + length = strtoul(tok->p, &endptr, 0); + if (endptr != tok->p + tok->size) + return parse_error(tok, "invalid number"); if (errno == ERANGE || length > MAX_SSIZE_T) return parse_error(tok, "number too large"); break; diff --git a/_cffi1/test_parse_c_type.py b/_cffi1/test_parse_c_type.py --- a/_cffi1/test_parse_c_type.py +++ b/_cffi1/test_parse_c_type.py @@ -316,3 +316,21 @@ assert parse("int[FIVE]") == [Prim(lib._CFFI_PRIM_INT), '->', Array(0), 5] assert parse("int[ZERO]") == [Prim(lib._CFFI_PRIM_INT), '->', Array(0), 0] parse_error("int[NEG]", "expected a positive integer constant", 4) + +def test_various_constant_exprs(): + def array(n): + return [Prim(lib._CFFI_PRIM_CHAR), '->', Array(0), n] + assert parse("char[21]") == array(21) + assert parse("char[0x10]") == array(16) + assert parse("char[0X21]") == array(33) + assert parse("char[0Xb]") == array(11) + assert parse("char[0x1C]") == array(0x1C) + assert parse("char[0xc6]") == array(0xC6) + assert parse("char[010]") == array(8) + assert parse("char[021]") == array(17) + parse_error("char[08]", "invalid number", 5) + parse_error("char[1C]", "invalid number", 5) + parse_error("char[0C]", "invalid number", 5) + # not supported (really obscure): + # "char[+5]" + # "char['A']" From noreply at buildbot.pypy.org Sun Apr 26 15:19:30 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 26 Apr 2015 15:19:30 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: a leak Message-ID: <20150426131930.EA2DA1C0EC0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1846:d708ab8e01c6 Date: 2015-04-26 15:20 +0200 http://bitbucket.org/cffi/cffi/changeset/d708ab8e01c6/ Log: a leak diff --git a/_cffi1/realize_c_type.c b/_cffi1/realize_c_type.c --- a/_cffi1/realize_c_type.c +++ b/_cffi1/realize_c_type.c @@ -657,6 +657,7 @@ break; default: + Py_DECREF(fields); PyErr_Format(PyExc_NotImplementedError, "field op=%d", (int)_CFFI_GETOP(op)); return -1; @@ -671,8 +672,10 @@ else if (detect_custom_layout(ct, SF_STD_FIELD_POS, ctf->ct_size, fld->field_size, "wrong size for field '", - fld->name, "'") < 0) + fld->name, "'") < 0) { + Py_DECREF(fields); return -1; + } f = Py_BuildValue("(sOin)", fld->name, ctf, fbitsize, (Py_ssize_t)fld->field_offset); From noreply at buildbot.pypy.org Sun Apr 26 15:59:39 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 26 Apr 2015 15:59:39 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: xfail the last two failures Message-ID: <20150426135939.957631C116B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1848:798a6d0fee0d Date: 2015-04-26 16:00 +0200 http://bitbucket.org/cffi/cffi/changeset/798a6d0fee0d/ Log: xfail the last two failures diff --git a/_cffi1/test_verify1.py b/_cffi1/test_verify1.py --- a/_cffi1/test_verify1.py +++ b/_cffi1/test_verify1.py @@ -1645,6 +1645,8 @@ assert repr(lib.fooarray).startswith(" Author: Armin Rigo Branch: cffi-1.0 Changeset: r1847:1e0789c75797 Date: 2015-04-26 15:56 +0200 http://bitbucket.org/cffi/cffi/changeset/1e0789c75797/ Log: FILE in the recompiler diff --git a/_cffi1/realize_c_type.c b/_cffi1/realize_c_type.c --- a/_cffi1/realize_c_type.c +++ b/_cffi1/realize_c_type.c @@ -409,6 +409,9 @@ _realize_name(name, (s->flags & _CFFI_F_UNION) ? "union " : "struct ", s->name); + if (strcmp(name, "struct _IO_FILE") == 0) + flags |= CT_IS_FILE; + x = new_struct_or_union_type(name, flags); CTypeDescrObject *ct = NULL; diff --git a/_cffi1/recompiler.py b/_cffi1/recompiler.py --- a/_cffi1/recompiler.py +++ b/_cffi1/recompiler.py @@ -308,10 +308,13 @@ def _generate_cpy_typedef_decl(self, tp, name): pass - def _generate_cpy_typedef_ctx(self, tp, name): + def _typedef_ctx(self, tp, name): type_index = self._typesdict[tp] self._lsts["typename"].append( ' { "%s", %d },' % (name, type_index)) + + def _generate_cpy_typedef_ctx(self, tp, name): + self._typedef_ctx(tp, name) if getattr(tp, "origin", None) == "unknown_type": self._struct_ctx(tp, tp.name, approxname=None) elif isinstance(tp, model.NamedPointerType): @@ -521,8 +524,15 @@ raise NotImplementedError("internal inconsistency: %r is " "partial but was not seen at " "this point" % (tp,)) - assert tp.name.startswith('$') and tp.name[1:].isdigit() - self._struct_ctx(tp, None, tp.name[1:]) + if tp.name.startswith('$') and tp.name[1:].isdigit(): + approxname = tp.name[1:] + elif tp.name == '_IO_FILE' and tp.forcename == 'FILE': + approxname = 'FILE' + self._typedef_ctx(tp, 'FILE') + else: + raise NotImplementedError("internal inconsistency: %r" % + (tp,)) + self._struct_ctx(tp, None, approxname) def _fix_final_field_list(self, lst): count = 0 From noreply at buildbot.pypy.org Sun Apr 26 16:14:44 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 26 Apr 2015 16:14:44 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: Don't emit _CFFI_F_CHECK_FIELDS on opaque structs (it is ignored anyway but confusing) Message-ID: <20150426141444.D1D401C0EC0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1849:8bb6d2089f42 Date: 2015-04-26 16:15 +0200 http://bitbucket.org/cffi/cffi/changeset/8bb6d2089f42/ Log: Don't emit _CFFI_F_CHECK_FIELDS on opaque structs (it is ignored anyway but confusing) diff --git a/_cffi1/recompiler.py b/_cffi1/recompiler.py --- a/_cffi1/recompiler.py +++ b/_cffi1/recompiler.py @@ -462,7 +462,9 @@ flags = [] if isinstance(tp, model.UnionType): flags.append("_CFFI_F_UNION") - if tp.partial or tp.has_anonymous_struct_fields(): + if tp.fldtypes is None: + pass # opaque + elif tp.partial or tp.has_anonymous_struct_fields(): pass # the field layout is obtained silently from the C compiler else: flags.append("_CFFI_F_CHECK_FIELDS") From noreply at buildbot.pypy.org Sun Apr 26 16:17:22 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 26 Apr 2015 16:17:22 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: complain clearly if an ffi.include() was used Message-ID: <20150426141722.AAD031C0EC0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1850:87eaba8629ad Date: 2015-04-26 16:17 +0200 http://bitbucket.org/cffi/cffi/changeset/87eaba8629ad/ Log: complain clearly if an ffi.include() was used diff --git a/_cffi1/recompiler.py b/_cffi1/recompiler.py --- a/_cffi1/recompiler.py +++ b/_cffi1/recompiler.py @@ -8,6 +8,9 @@ def __init__(self, ffi, module_name): self.ffi = ffi self.module_name = module_name + # + if ']' in self.ffi._cdefsources: + raise NotImplementedError("ffi.include()") def collect_type_table(self): self._typesdict = {} From noreply at buildbot.pypy.org Sun Apr 26 16:46:54 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 26 Apr 2015 16:46:54 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: Move the demos from _cffi1/ back in the demo/ directory. Add the Message-ID: <20150426144654.1CB2F1C03F7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1851:3a8606fab574 Date: 2015-04-26 16:47 +0200 http://bitbucket.org/cffi/cffi/changeset/3a8606fab574/ Log: Move the demos from _cffi1/ back in the demo/ directory. Add the cffi-1.0 version of the _curses demo too. diff --git a/_cffi1/bsdopendirtype.py b/_cffi1/bsdopendirtype.py deleted file mode 100644 --- a/_cffi1/bsdopendirtype.py +++ /dev/null @@ -1,48 +0,0 @@ -from _bsdopendirtype import ffi, lib - - -def _posix_error(): - raise OSError(ffi.errno, os.strerror(ffi.errno)) - -_dtype_to_smode = { - lib.DT_BLK: 0o060000, - lib.DT_CHR: 0o020000, - lib.DT_DIR: 0o040000, - lib.DT_FIFO: 0o010000, - lib.DT_LNK: 0o120000, - lib.DT_REG: 0o100000, - lib.DT_SOCK: 0o140000, -} - -def opendir(dir): - if len(dir) == 0: - dir = '.' - dirname = dir - if not dirname.endswith('/'): - dirname += '/' - dirp = lib.opendir(dir) - if dirp == ffi.NULL: - raise _posix_error() - try: - while True: - ffi.errno = 0 - dirent = lib.readdir(dirp) - if dirent == ffi.NULL: - if ffi.errno != 0: - raise _posix_error() - return - name = ffi.string(dirent.d_name) - if name == '.' or name == '..': - continue - name = dirname + name - try: - smode = _dtype_to_smode[dirent.d_type] - except KeyError: - smode = os.lstat(name).st_mode - yield name, smode - finally: - lib.closedir(dirp) - -if __name__ == '__main__': - for name, smode in opendir('/tmp'): - print hex(smode), name diff --git a/_cffi1/readdir2.py b/_cffi1/readdir2.py deleted file mode 100644 --- a/_cffi1/readdir2.py +++ /dev/null @@ -1,33 +0,0 @@ -# A Linux-only demo, using verify() instead of hard-coding the exact layouts -# -import sys -from _readdir2 import ffi, lib - -if not sys.platform.startswith('linux'): - raise Exception("Linux-only demo") - - -def walk(basefd, path): - print '{', path - dirfd = lib.openat(basefd, path, 0) - if dirfd < 0: - # error in openat() - return - dir = lib.fdopendir(dirfd) - dirent = ffi.new("struct dirent *") - result = ffi.new("struct dirent **") - while True: - if lib.readdir_r(dir, dirent, result): - # error in readdir_r() - break - if result[0] == ffi.NULL: - break - name = ffi.string(dirent.d_name) - print '%3d %s' % (dirent.d_type, name) - if dirent.d_type == lib.DT_DIR and name != '.' and name != '..': - walk(dirfd, name) - lib.closedir(dir) - print '}' - - -walk(-1, "/tmp") diff --git a/demo/_curses.py b/demo/_curses.py --- a/demo/_curses.py +++ b/demo/_curses.py @@ -1,265 +1,1075 @@ -"""Very partial replacement of the standard extension module '_curses'. -Just contains the minimal amount of stuff to make one of my curses -programs run. XXX should also check for and report errors. -""" -from cffi import FFI +"""Reimplementation of the standard extension module '_curses' using cffi.""" -ffi = FFI() +import sys +from functools import wraps +from _curses_cffi import ffi, lib -ffi.cdef(""" -typedef ... WINDOW; -typedef unsigned char bool; -typedef unsigned long chtype; -static const int ERR, OK; -WINDOW *initscr(void); -int endwin(void); -bool isendwin(void); - -const char *keyname(int c); -static const int KEY_MIN, KEY_MAX; - -int setupterm(char *term, int fildes, int *errret); - -int tigetflag(char *); -int tigetnum(char *); -char *tigetstr(char *); -char *tparm (const char *, ...); - -int cbreak(void); -int nocbreak(void); -int echo(void); -int noecho(void); -int keypad(WINDOW *win, bool bf); -int notimeout(WINDOW *win, bool bf); -void wtimeout(WINDOW *win, int delay); - -int def_prog_mode(void); -int def_shell_mode(void); -int reset_prog_mode(void); -int reset_shell_mode(void); -int resetty(void); -int savetty(void); -void getsyx(int y, int x); -void setsyx(int y, int x); -//int ripoffline(int line, int (*init)(WINDOW *, int)); -int curs_set(int visibility); -int napms(int ms); - -int start_color(void); -int init_pair(short pair, short f, short b); -int init_color(short color, short r, short g, short b); -bool has_colors(void); -bool can_change_color(void); -int color_content(short color, short *r, short *g, short *b); -int pair_content(short pair, short *f, short *b); - -int use_default_colors(void); - -static const int COLOR_BLACK; -static const int COLOR_RED; -static const int COLOR_GREEN; -static const int COLOR_YELLOW; -static const int COLOR_BLUE; -static const int COLOR_MAGENTA; -static const int COLOR_CYAN; -static const int COLOR_WHITE; - -static const int A_ATTRIBUTES; -static const int A_NORMAL; -static const int A_STANDOUT; -static const int A_UNDERLINE; -static const int A_REVERSE; -static const int A_BLINK; -static const int A_DIM; -static const int A_BOLD; -static const int A_ALTCHARSET; -static const int A_INVIS; -static const int A_PROTECT; -static const int A_CHARTEXT; -static const int A_COLOR; - -int COLORS, COLOR_PAIRS; - -void _m_getyx(WINDOW *win, int yx[2]); -void _m_getparyx(WINDOW *win, int yx[2]); -void _m_getbegyx(WINDOW *win, int yx[2]); -void _m_getmaxyx(WINDOW *win, int yx[2]); - -int wclear(WINDOW *win); -int wclrtoeol(WINDOW *win); -int wmove(WINDOW *win, int y, int x); -int waddstr(WINDOW *win, const char *str); -int mvwaddstr(WINDOW *win, int y, int x, const char *str); -void wbkgdset(WINDOW *win, chtype ch); -int wrefresh(WINDOW *win); -int wgetch(WINDOW *win); - -int getattrs(WINDOW *win); -int wattrset(WINDOW *win, int attrs); -""") - - -lib = ffi.verify(""" -#include -#include - -void _m_getyx(WINDOW *win, int yx[2]) { - getyx(win, yx[0], yx[1]); -} -void _m_getparyx(WINDOW *win, int yx[2]) { - getparyx(win, yx[0], yx[1]); -} -void _m_getbegyx(WINDOW *win, int yx[2]) { - getbegyx(win, yx[0], yx[1]); -} -void _m_getmaxyx(WINDOW *win, int yx[2]) { - getmaxyx(win, yx[0], yx[1]); -} -""", - libraries=['ncurses']) +def _copy_to_globals(name): + globals()[name] = getattr(lib, name) def _setup(): - globals().update(lib.__dict__) - for key in range(KEY_MIN, KEY_MAX): - key_n = keyname(key) - if key_n == ffi.NULL or ffi.string(key_n) == "UNKNOWN KEY": - continue - key_n = ffi.string(key_n).replace('(', '').replace(')', '') - globals()[key_n] = key + for name in ['ERR', 'OK', 'KEY_MIN', 'KEY_MAX', + 'A_ATTRIBUTES', 'A_NORMAL', 'A_STANDOUT', 'A_UNDERLINE', + 'A_REVERSE', 'A_BLINK', 'A_DIM', 'A_BOLD', 'A_ALTCHARSET', + 'A_PROTECT', 'A_CHARTEXT', 'A_COLOR', + 'COLOR_BLACK', 'COLOR_RED', 'COLOR_GREEN', 'COLOR_YELLOW', + 'COLOR_BLUE', 'COLOR_MAGENTA', 'COLOR_CYAN', 'COLOR_WHITE', + ]: + _copy_to_globals(name) + + if not lib._m_NetBSD: + _copy_to_globals('A_INVIS') + + for name in ['A_HORIZONTAL', 'A_LEFT', 'A_LOW', 'A_RIGHT', 'A_TOP', + 'A_VERTICAL', + ]: + if hasattr(lib, name): + _copy_to_globals(name) + + if lib._m_NCURSES_MOUSE_VERSION: + for name in ["BUTTON1_PRESSED", "BUTTON1_RELEASED", "BUTTON1_CLICKED", + "BUTTON1_DOUBLE_CLICKED", "BUTTON1_TRIPLE_CLICKED", + "BUTTON2_PRESSED", "BUTTON2_RELEASED", "BUTTON2_CLICKED", + "BUTTON2_DOUBLE_CLICKED", "BUTTON2_TRIPLE_CLICKED", + "BUTTON3_PRESSED", "BUTTON3_RELEASED", "BUTTON3_CLICKED", + "BUTTON3_DOUBLE_CLICKED", "BUTTON3_TRIPLE_CLICKED", + "BUTTON4_PRESSED", "BUTTON4_RELEASED", "BUTTON4_CLICKED", + "BUTTON4_DOUBLE_CLICKED", "BUTTON4_TRIPLE_CLICKED", + "BUTTON_SHIFT", "BUTTON_CTRL", "BUTTON_ALT", + "ALL_MOUSE_EVENTS", "REPORT_MOUSE_POSITION", + ]: + _copy_to_globals(name) + + if not lib._m_NetBSD: + for key in range(lib.KEY_MIN, lib.KEY_MAX): + key_n = lib.keyname(key) + if key_n == ffi.NULL: + continue + key_n = ffi.string(key_n) + if key_n == b"UNKNOWN KEY": + continue + if not isinstance(key_n, str): # python 3 + key_n = key_n.decode() + key_n = key_n.replace('(', '').replace(')', '') + globals()[key_n] = key _setup() +# Do we want this? +# version = "2.2" +# __version__ = "2.2" + + # ____________________________________________________________ + +_initialised_setupterm = False +_initialised = False +_initialised_color = False + + +def _ensure_initialised_setupterm(): + if not _initialised_setupterm: + raise error("must call (at least) setupterm() first") + + +def _ensure_initialised(): + if not _initialised: + raise error("must call initscr() first") + + +def _ensure_initialised_color(): + if not _initialised and _initialised_color: + raise error("must call start_color() first") + + +def _check_ERR(code, fname): + if code != lib.ERR: + return None + elif fname is None: + raise error("curses function returned ERR") + else: + raise error("%s() returned ERR" % (fname,)) + + +def _check_NULL(rval): + if rval == ffi.NULL: + raise error("curses function returned NULL") + return rval + + +def _call_lib(method_name, *args): + return getattr(lib, method_name)(*args) + + +def _call_lib_check_ERR(method_name, *args): + return _check_ERR(_call_lib(method_name, *args), method_name) + + +def _mk_no_return(method_name): + def _execute(): + _ensure_initialised() + return _call_lib_check_ERR(method_name) + _execute.__name__ = method_name + return _execute + + +def _mk_flag_func(method_name): + # This is in the CPython implementation, but not documented anywhere. + # We have to support it, though, even if it make me sad. + def _execute(flag=True): + _ensure_initialised() + if flag: + return _call_lib_check_ERR(method_name) + else: + return _call_lib_check_ERR('no' + method_name) + _execute.__name__ = method_name + return _execute + + +def _mk_return_val(method_name): + def _execute(): + return _call_lib(method_name) + _execute.__name__ = method_name + return _execute + + +def _mk_w_getyx(method_name): + def _execute(self): + y = _call_lib(method_name + 'y', self._win) + x = _call_lib(method_name + 'x', self._win) + return (y, x) + _execute.__name__ = method_name + return _execute + + +def _mk_w_no_return(method_name): + def _execute(self, *args): + return _call_lib_check_ERR(method_name, self._win, *args) + _execute.__name__ = method_name + return _execute + + +def _mk_w_return_val(method_name): + def _execute(self, *args): + return _call_lib(method_name, self._win, *args) + _execute.__name__ = method_name + return _execute + + +def _chtype(ch): + return int(ffi.cast("chtype", ch)) + +def _texttype(text): + if isinstance(text, str): + return text + elif isinstance(text, unicode): + return str(text) # default encoding + else: + raise TypeError("str or unicode expected, got a '%s' object" + % (type(text).__name__,)) + + +def _extract_yx(args): + if len(args) >= 2: + return (args[0], args[1], args[2:]) + return (None, None, args) + + +def _process_args(funcname, args, count, optcount, frontopt=0): + outargs = [] + if frontopt: + if len(args) > count + optcount: + # We have the front optional args here. + outargs.extend(args[:frontopt]) + args = args[frontopt:] + else: + # No front optional args, so make them None. + outargs.extend([None] * frontopt) + if (len(args) < count) or (len(args) > count + optcount): + raise error("%s requires %s to %s arguments" % ( + funcname, count, count + optcount + frontopt)) + outargs.extend(args) + return outargs + + +def _argspec(count, optcount=0, frontopt=0): + def _argspec_deco(func): + @wraps(func) + def _wrapped(self, *args): + outargs = _process_args( + func.__name__, args, count, optcount, frontopt) + return func(self, *outargs) + return _wrapped + return _argspec_deco + + +# ____________________________________________________________ + + class error(Exception): pass + class Window(object): - def __init__(self): - self._window = lib.initscr() + def __init__(self, window): + self._win = window - def getyx(self): - yx = ffi.new("int[2]") - lib._m_getyx(self._window, yx) - return tuple(yx) + def __del__(self): + if self._win != lib.stdscr: + lib.delwin(self._win) - def getparyx(self): - yx = ffi.new("int[2]") - lib._m_getparyx(self._window, yx) - return tuple(yx) + untouchwin = _mk_w_no_return("untouchwin") + touchwin = _mk_w_no_return("touchwin") + redrawwin = _mk_w_no_return("redrawwin") + insertln = _mk_w_no_return("winsertln") + erase = _mk_w_no_return("werase") + deleteln = _mk_w_no_return("wdeleteln") - def getbegyx(self): - yx = ffi.new("int[2]") - lib._m_getbegyx(self._window, yx) - return tuple(yx) + is_wintouched = _mk_w_return_val("is_wintouched") - def getmaxyx(self): - yx = ffi.new("int[2]") - lib._m_getmaxyx(self._window, yx) - return tuple(yx) + syncdown = _mk_w_return_val("wsyncdown") + syncup = _mk_w_return_val("wsyncup") + standend = _mk_w_return_val("wstandend") + standout = _mk_w_return_val("wstandout") + cursyncup = _mk_w_return_val("wcursyncup") + clrtoeol = _mk_w_return_val("wclrtoeol") + clrtobot = _mk_w_return_val("wclrtobot") + clear = _mk_w_return_val("wclear") - def addstr(self, *args): - y = None - attr = None - if len(args) == 1: - text, = args + idcok = _mk_w_no_return("idcok") + immedok = _mk_w_no_return("immedok") + timeout = _mk_w_no_return("wtimeout") + + getyx = _mk_w_getyx("getcur") + getbegyx = _mk_w_getyx("getbeg") + getmaxyx = _mk_w_getyx("getmax") + getparyx = _mk_w_getyx("getpar") + + clearok = _mk_w_no_return("clearok") + idlok = _mk_w_no_return("idlok") + leaveok = _mk_w_no_return("leaveok") + notimeout = _mk_w_no_return("notimeout") + scrollok = _mk_w_no_return("scrollok") + insdelln = _mk_w_no_return("winsdelln") + syncok = _mk_w_no_return("syncok") + + mvwin = _mk_w_no_return("mvwin") + mvderwin = _mk_w_no_return("mvderwin") + move = _mk_w_no_return("wmove") + + if not lib._m_STRICT_SYSV_CURSES: + resize = _mk_w_no_return("wresize") + + if lib._m_NetBSD: + keypad = _mk_w_return_val("keypad") + nodelay = _mk_w_return_val("nodelay") + else: + keypad = _mk_w_no_return("keypad") + nodelay = _mk_w_no_return("nodelay") + + @_argspec(1, 1, 2) + def addch(self, y, x, ch, attr=None): + if attr is None: + attr = lib.A_NORMAL + ch = _chtype(ch) + + if y is not None: + code = lib.mvwaddch(self._win, y, x, ch | attr) + else: + code = lib.waddch(self._win, ch | attr) + return _check_ERR(code, "addch") + + @_argspec(1, 1, 2) + def addstr(self, y, x, text, attr=None): + text = _texttype(text) + if attr is not None: + attr_old = lib.getattrs(self._win) + lib.wattrset(self._win, attr) + if y is not None: + code = lib.mvwaddstr(self._win, y, x, text) + else: + code = lib.waddstr(self._win, text) + if attr is not None: + lib.wattrset(self._win, attr_old) + return _check_ERR(code, "addstr") + + @_argspec(2, 1, 2) + def addnstr(self, y, x, text, n, attr=None): + text = _texttype(text) + if attr is not None: + attr_old = lib.getattrs(self._win) + lib.wattrset(self._win, attr) + if y is not None: + code = lib.mvwaddnstr(self._win, y, x, text, n) + else: + code = lib.waddnstr(self._win, text, n) + if attr is not None: + lib.wattrset(self._win, attr_old) + return _check_ERR(code, "addnstr") + + def bkgd(self, ch, attr=None): + if attr is None: + attr = lib.A_NORMAL + return _check_ERR(lib.wbkgd(self._win, _chtype(ch) | attr), "bkgd") + + attroff = _mk_w_no_return("wattroff") + attron = _mk_w_no_return("wattron") + attrset = _mk_w_no_return("wattrset") + + def bkgdset(self, ch, attr=None): + if attr is None: + attr = lib.A_NORMAL + lib.wbkgdset(self._win, _chtype(ch) | attr) + return None + + def border(self, ls=0, rs=0, ts=0, bs=0, tl=0, tr=0, bl=0, br=0): + lib.wborder(self._win, + _chtype(ls), _chtype(rs), _chtype(ts), _chtype(bs), + _chtype(tl), _chtype(tr), _chtype(bl), _chtype(br)) + return None + + def box(self, vertint=0, horint=0): + lib.box(self._win, vertint, horint) + return None + + @_argspec(1, 1, 2) + def chgat(self, y, x, num, attr=None): + # These optional args are in a weird order. + if attr is None: + attr = num + num = -1 + + color = ((attr >> 8) & 0xff) + attr = attr - (color << 8) + + if y is not None: + code = lib.mvwchgat(self._win, y, x, num, attr, color, ffi.NULL) + lib.touchline(self._win, y, 1) + else: + yy, _ = self.getyx() + code = lib.wchgat(self._win, num, attr, color, ffi.NULL) + lib.touchline(self._win, yy, 1) + return _check_ERR(code, "chgat") + + def delch(self, *args): + if len(args) == 0: + code = lib.wdelch(self._win) elif len(args) == 2: - text, attr = args - elif len(args) == 3: - y, x, text = args + code = lib.mvwdelch(self._win, *args) + else: + raise error("delch requires 0 or 2 arguments") + return _check_ERR(code, "[mv]wdelch") + + def derwin(self, *args): + nlines = 0 + ncols = 0 + if len(args) == 2: + begin_y, begin_x = args elif len(args) == 4: - y, x, text, attr = args + nlines, ncols, begin_y, begin_x = args else: - raise TypeError("addstr requires 1 to 4 arguments") + raise error("derwin requires 2 or 4 arguments") + + win = lib.derwin(self._win, nlines, ncols, begin_y, begin_x) + return Window(_check_NULL(win)) + + def echochar(self, ch, attr=None): + if attr is None: + attr = lib.A_NORMAL + ch = _chtype(ch) + + if lib._m_ispad(self._win): + code = lib.pechochar(self._win, ch | attr) + else: + code = lib.wechochar(self._win, ch | attr) + return _check_ERR(code, "echochar") + + if lib._m_NCURSES_MOUSE_VERSION: + enclose = _mk_w_return_val("wenclose") + + getbkgd = _mk_w_return_val("getbkgd") + + def getch(self, *args): + if len(args) == 0: + val = lib.wgetch(self._win) + elif len(args) == 2: + val = lib.mvwgetch(self._win, *args) + else: + raise error("getch requires 0 or 2 arguments") + return val + + def getkey(self, *args): + if len(args) == 0: + val = lib.wgetch(self._win) + elif len(args) == 2: + val = lib.mvwgetch(self._win, *args) + else: + raise error("getkey requires 0 or 2 arguments") + + if val == lib.ERR: + raise error("no input") + elif val <= 255: + return chr(val) + else: + # XXX: The following line is different if `__NetBSD__` is defined. + val = lib.keyname(val) + if val == ffi.NULL: + return "" + return ffi.string(val) + + @_argspec(0, 1, 2) + def getstr(self, y, x, n=1023): + n = min(n, 1023) + buf = ffi.new("char[1024]") # /* This should be big enough.. I hope */ + + if y is None: + val = lib.wgetnstr(self._win, buf, n) + else: + val = lib.mvwgetnstr(self._win, y, x, buf, n) + + if val == lib.ERR: + return "" + return ffi.string(buf) + + @_argspec(2, 1, 2) + def hline(self, y, x, ch, n, attr=None): + ch = _chtype(ch) + if attr is None: + attr = lib.A_NORMAL + if y is not None: + _check_ERR(lib.wmove(self._win, y, x), "wmove") + return _check_ERR(lib.whline(self._win, ch | attr, n), "hline") + + @_argspec(1, 1, 2) + def insch(self, y, x, ch, attr=None): + ch = _chtype(ch) + if attr is None: + attr = lib.A_NORMAL + if y is not None: + code = lib.mvwinsch(self._win, y, x, ch | attr) + else: + code = lib.winsch(self._win, ch | attr) + return _check_ERR(code, "insch") + + def inch(self, *args): + if len(args) == 0: + return lib.winch(self._win) + elif len(args) == 2: + return lib.mvwinch(self._win, *args) + else: + raise error("inch requires 0 or 2 arguments") + + @_argspec(0, 1, 2) + def instr(self, y, x, n=1023): + n = min(n, 1023) + buf = ffi.new("char[1024]") # /* This should be big enough.. I hope */ + if y is None: + code = lib.winnstr(self._win, buf, n) + else: + code = lib.mvwinnstr(self._win, y, x, buf, n) + + if code == lib.ERR: + return "" + return ffi.string(buf) + + @_argspec(1, 1, 2) + def insstr(self, y, x, text, attr=None): + text = _texttype(text) if attr is not None: - attr_old = getattrs(self._window) - wattrset(self._window, attr) + attr_old = lib.getattrs(self._win) + lib.wattrset(self._win, attr) if y is not None: - mvwaddstr(self._window, y, x, text) + code = lib.mvwinsstr(self._win, y, x, text) else: - waddstr(self._window, text) + code = lib.winsstr(self._win, text) if attr is not None: - wattrset(self._window, attr_old) + lib.wattrset(self._win, attr_old) + return _check_ERR(code, "insstr") - def bkgdset(self, bkgd, attr=A_NORMAL): - if isinstance(bkgd, str): - bkgd = ord(bkgd) - wbkgdset(self._window, bkgd | attr) + @_argspec(2, 1, 2) + def insnstr(self, y, x, text, n, attr=None): + text = _texttype(text) + if attr is not None: + attr_old = lib.getattrs(self._win) + lib.wattrset(self._win, attr) + if y is not None: + code = lib.mvwinsnstr(self._win, y, x, text, n) + else: + code = lib.winsnstr(self._win, text, n) + if attr is not None: + lib.wattrset(self._win, attr_old) + return _check_ERR(code, "insnstr") + def is_linetouched(self, line): + code = lib.is_linetouched(self._win, line) + if code == lib.ERR: + raise error("is_linetouched: line number outside of boundaries") + if code == lib.FALSE: + return False + return True - def _make_method(cname): - method = getattr(lib, cname) - def _execute(self, *args): - return method(self._window, *args) - return _execute + def noutrefresh(self, *args): + if lib._m_ispad(self._win): + if len(args) != 6: + raise error( + "noutrefresh() called for a pad requires 6 arguments") + return _check_ERR(lib.pnoutrefresh(self._win, *args), + "pnoutrefresh") + else: + # XXX: Better args check here? We need zero args. + return _check_ERR(lib.wnoutrefresh(self._win, *args), + "wnoutrefresh") - keypad = _make_method('keypad') - clear = _make_method('wclear') - clrtoeol = _make_method('wclrtoeol') - move = _make_method('wmove') - refresh = _make_method('wrefresh') - getch = _make_method('wgetch') - notimeout = _make_method('notimeout') - timeout = _make_method('wtimeout') + nooutrefresh = noutrefresh # "to be removed in 2.3", but in 2.7, 3.x. - del _make_method + def _copywin(self, dstwin, overlay, + sminr, sminc, dminr, dminc, dmaxr, dmaxc): + return _check_ERR(lib.copywin(self._win, dstwin._win, + sminr, sminc, dminr, dminc, dmaxr, dmaxc, + overlay), "copywin") + def overlay(self, dstwin, *args): + if len(args) == 6: + return self._copywin(dstwin, True, *args) + elif len(args) == 0: + return _check_ERR(lib.overlay(self._win, dstwin._win), "overlay") + else: + raise error("overlay requires one or seven arguments") -initscr = Window + def overwrite(self, dstwin, *args): + if len(args) == 6: + return self._copywin(dstwin, False, *args) + elif len(args) == 0: + return _check_ERR(lib.overwrite(self._win, dstwin._win), + "overwrite") + else: + raise error("overwrite requires one or seven arguments") -_setupterm_called = False + def putwin(self, filep): + # filestar = ffi.new("FILE *", filep) + return _check_ERR(lib.putwin(self._win, filep), "putwin") + def redrawln(self, beg, num): + return _check_ERR(lib.wredrawln(self._win, beg, num), "redrawln") -def _ensure_setupterm_called(): - if not _setupterm_called: - raise error("must call (at least) setupterm() first") + def refresh(self, *args): + if lib._m_ispad(self._win): + if len(args) != 6: + raise error( + "noutrefresh() called for a pad requires 6 arguments") + return _check_ERR(lib.prefresh(self._win, *args), "prefresh") + else: + # XXX: Better args check here? We need zero args. + return _check_ERR(lib.wrefresh(self._win, *args), "wrefresh") + + def setscrreg(self, y, x): + return _check_ERR(lib.wsetscrreg(self._win, y, x), "wsetscrreg") + + def subwin(self, *args): + nlines = 0 + ncols = 0 + if len(args) == 2: + begin_y, begin_x = args + elif len(args) == 4: + nlines, ncols, begin_y, begin_x = args + else: + raise error("subwin requires 2 or 4 arguments") + + if lib._m_ispad(self._win): + win = lib.subpad(self._win, nlines, ncols, begin_y, begin_x) + else: + win = lib.subwin(self._win, nlines, ncols, begin_y, begin_x) + return Window(_check_NULL(win)) + + def scroll(self, nlines=None): + if nlines is None: + return _check_ERR(lib.scroll(self._win), "scroll") + else: + return _check_ERR(lib.wscrl(self._win, nlines), "scroll") + + def touchline(self, st, cnt, val=None): + if val is None: + return _check_ERR(lib.touchline(self._win, st, cnt), "touchline") + else: + return _check_ERR(lib.wtouchln(self._win, st, cnt, val), + "touchline") + + @_argspec(2, 1, 2) + def vline(self, y, x, ch, n, attr=None): + ch = _chtype(ch) + if attr is None: + attr = lib.A_NORMAL + if y is not None: + _check_ERR(lib.wmove(self._win, y, x), "wmove") + return _check_ERR(lib.wvline(self._win, ch | attr, n), "vline") + + +beep = _mk_no_return("beep") +def_prog_mode = _mk_no_return("def_prog_mode") +def_shell_mode = _mk_no_return("def_shell_mode") +doupdate = _mk_no_return("doupdate") +endwin = _mk_no_return("endwin") +flash = _mk_no_return("flash") +nocbreak = _mk_no_return("nocbreak") +noecho = _mk_no_return("noecho") +nonl = _mk_no_return("nonl") +noraw = _mk_no_return("noraw") +reset_prog_mode = _mk_no_return("reset_prog_mode") +reset_shell_mode = _mk_no_return("reset_shell_mode") +resetty = _mk_no_return("resetty") +savetty = _mk_no_return("savetty") + +cbreak = _mk_flag_func("cbreak") +echo = _mk_flag_func("echo") +nl = _mk_flag_func("nl") +raw = _mk_flag_func("raw") + +baudrate = _mk_return_val("baudrate") +termattrs = _mk_return_val("termattrs") + +termname = _mk_return_val("termname") +longname = _mk_return_val("longname") + +can_change_color = _mk_return_val("can_change_color") +has_colors = _mk_return_val("has_colors") +has_ic = _mk_return_val("has_ic") +has_il = _mk_return_val("has_il") +isendwin = _mk_return_val("isendwin") +flushinp = _mk_return_val("flushinp") +noqiflush = _mk_return_val("noqiflush") + + +def filter(): + lib.filter() + return None + + +def color_content(color): + _ensure_initialised_color() + r, g, b = ffi.new("short *"), ffi.new("short *"), ffi.new("short *") + if lib.color_content(color, r, g, b) == lib.ERR: + raise error("Argument 1 was out of range. Check value of COLORS.") + return (r[0], g[0], b[0]) + + +def color_pair(n): + _ensure_initialised_color() + return (n << 8) + + +def curs_set(vis): + _ensure_initialised() + val = lib.curs_set(vis) + _check_ERR(val, "curs_set") + return val + + +def delay_output(ms): + _ensure_initialised() + return _check_ERR(lib.delay_output(ms), "delay_output") + + +def erasechar(): + _ensure_initialised() + return lib.erasechar() + + +def getsyx(): + _ensure_initialised() + yx = ffi.new("int[2]") + lib._m_getsyx(yx) + return (yx[0], yx[1]) + + +if lib._m_NCURSES_MOUSE_VERSION: + + def getmouse(): + _ensure_initialised() + mevent = ffi.new("MEVENT *") + _check_ERR(lib.getmouse(mevent), "getmouse") + return (mevent.id, mevent.x, mevent.y, mevent.z, mevent.bstate) + + def ungetmouse(id, x, y, z, bstate): + _ensure_initialised() + mevent = ffi.new("MEVENT *") + mevent.id, mevent.x, mevent.y, mevent.z, mevent.bstate = ( + id, x, y, z, bstate) + return _check_ERR(lib.ungetmouse(mevent), "ungetmouse") + + +def getwin(filep): + return Window(_check_NULL(lib.getwin(filep))) + + +def halfdelay(tenths): + _ensure_initialised() + return _check_ERR(lib.halfdelay(tenths), "halfdelay") + + +if not lib._m_STRICT_SYSV_CURSES: + def has_key(ch): + _ensure_initialised() + return lib.has_key(ch) + + +def init_color(color, r, g, b): + _ensure_initialised_color() + return _check_ERR(lib.init_color(color, r, g, b), "init_color") + + +def init_pair(pair, f, b): + _ensure_initialised_color() + return _check_ERR(lib.init_pair(pair, f, b), "init_pair") + + +def _mk_acs(name, ichar): + if len(ichar) == 1: + globals()[name] = lib.acs_map[ord(ichar)] + else: + globals()[name] = globals()[ichar] + + +def _map_acs(): + _mk_acs("ACS_ULCORNER", 'l') + _mk_acs("ACS_LLCORNER", 'm') + _mk_acs("ACS_URCORNER", 'k') + _mk_acs("ACS_LRCORNER", 'j') + _mk_acs("ACS_LTEE", 't') + _mk_acs("ACS_RTEE", 'u') + _mk_acs("ACS_BTEE", 'v') + _mk_acs("ACS_TTEE", 'w') + _mk_acs("ACS_HLINE", 'q') + _mk_acs("ACS_VLINE", 'x') + _mk_acs("ACS_PLUS", 'n') + _mk_acs("ACS_S1", 'o') + _mk_acs("ACS_S9", 's') + _mk_acs("ACS_DIAMOND", '`') + _mk_acs("ACS_CKBOARD", 'a') + _mk_acs("ACS_DEGREE", 'f') + _mk_acs("ACS_PLMINUS", 'g') + _mk_acs("ACS_BULLET", '~') + _mk_acs("ACS_LARROW", ',') + _mk_acs("ACS_RARROW", '+') + _mk_acs("ACS_DARROW", '.') + _mk_acs("ACS_UARROW", '-') + _mk_acs("ACS_BOARD", 'h') + _mk_acs("ACS_LANTERN", 'i') + _mk_acs("ACS_BLOCK", '0') + _mk_acs("ACS_S3", 'p') + _mk_acs("ACS_S7", 'r') + _mk_acs("ACS_LEQUAL", 'y') + _mk_acs("ACS_GEQUAL", 'z') + _mk_acs("ACS_PI", '{') + _mk_acs("ACS_NEQUAL", '|') + _mk_acs("ACS_STERLING", '}') + _mk_acs("ACS_BSSB", "ACS_ULCORNER") + _mk_acs("ACS_SSBB", "ACS_LLCORNER") + _mk_acs("ACS_BBSS", "ACS_URCORNER") + _mk_acs("ACS_SBBS", "ACS_LRCORNER") + _mk_acs("ACS_SBSS", "ACS_RTEE") + _mk_acs("ACS_SSSB", "ACS_LTEE") + _mk_acs("ACS_SSBS", "ACS_BTEE") + _mk_acs("ACS_BSSS", "ACS_TTEE") + _mk_acs("ACS_BSBS", "ACS_HLINE") + _mk_acs("ACS_SBSB", "ACS_VLINE") + _mk_acs("ACS_SSSS", "ACS_PLUS") + + +def initscr(): + if _initialised: + lib.wrefresh(lib.stdscr) + return Window(lib.stdscr) + + win = _check_NULL(lib.initscr()) + globals()['_initialised_setupterm'] = True + globals()['_initialised'] = True + + _map_acs() + + globals()["LINES"] = lib.LINES + globals()["COLS"] = lib.COLS + + return Window(win) def setupterm(term=None, fd=-1): + if fd == -1: + # XXX: Check for missing stdout here? + fd = sys.stdout.fileno() + + if _initialised_setupterm: + return None + if term is None: term = ffi.NULL - if fd < 0: - import sys - fd = sys.stdout.fileno() err = ffi.new("int *") - if lib.setupterm(term, fd, err) == ERR: - if err[0] == 0: - s = "setupterm: could not find terminal" - elif err[0] == 1: - s = "setupterm: could not find terminfo database" + if lib.setupterm(term, fd, err) == lib.ERR: + err = err[0] + if err == 0: + raise error("setupterm: could not find terminal") + elif err == -1: + raise error("setupterm: could not find terminfo database") else: - s = "setupterm: unknown error %d" % err[0] - raise error(s) - global _setupterm_called - _setupterm_called = True + raise error("setupterm: unknown error") + + globals()["_initialised_setupterm"] = True + return None + + +def intrflush(ch): + _ensure_initialised() + return _check_ERR(lib.intrflush(ffi.NULL, ch), "intrflush") + + +# XXX: #ifdef HAVE_CURSES_IS_TERM_RESIZED +def is_term_resized(lines, columns): + _ensure_initialised() + return lib.is_term_resized(lines, columns) + + +if not lib._m_NetBSD: + def keyname(ch): + _ensure_initialised() + if ch < 0: + raise error("invalid key number") + knp = lib.keyname(ch) + if knp == ffi.NULL: + return "" + return ffi.string(knp) + + +def killchar(): + return lib.killchar() + + +def meta(ch): + return _check_ERR(lib.meta(lib.stdscr, ch), "meta") + + +if lib._m_NCURSES_MOUSE_VERSION: + + def mouseinterval(interval): + _ensure_initialised() + return _check_ERR(lib.mouseinterval(interval), "mouseinterval") + + def mousemask(newmask): + _ensure_initialised() + oldmask = ffi.new("mmask_t *") + availmask = lib.mousemask(newmask, oldmask) + return (availmask, oldmask) + + +def napms(ms): + _ensure_initialised() + return lib.napms(ms) + + +def newpad(nlines, ncols): + _ensure_initialised() + return Window(_check_NULL(lib.newpad(nlines, ncols))) + + +def newwin(nlines, ncols, begin_y=None, begin_x=None): + _ensure_initialised() + if begin_x is None: + if begin_y is not None: + raise error("newwin requires 2 or 4 arguments") + begin_y = begin_x = 0 + + return Window(_check_NULL(lib.newwin(nlines, ncols, begin_y, begin_x))) + + +def pair_content(pair): + _ensure_initialised_color() + f = ffi.new("short *") + b = ffi.new("short *") + if lib.pair_content(pair, f, b) == lib.ERR: + raise error("Argument 1 was out of range. (1..COLOR_PAIRS-1)") + return (f, b) + + +def pair_number(pairvalue): + _ensure_initialised_color() + return (pairvalue & lib.A_COLOR) >> 8 + + +def putp(text): + text = _texttype(text) + return _check_ERR(lib.putp(text), "putp") + + +def qiflush(flag=True): + _ensure_initialised() + if flag: + lib.qiflush() + else: + lib.noqiflush() + return None + + +# XXX: Do something about the following? +# /* Internal helper used for updating curses.LINES, curses.COLS, _curses.LINES +# * and _curses.COLS */ +# #if defined(HAVE_CURSES_RESIZETERM) || defined(HAVE_CURSES_RESIZE_TERM) +# static int +# update_lines_cols(void) +# { +# PyObject *o; +# PyObject *m = PyImport_ImportModuleNoBlock("curses"); + +# if (!m) +# return 0; + +# o = PyInt_FromLong(LINES); +# if (!o) { +# Py_DECREF(m); +# return 0; +# } +# if (PyObject_SetAttrString(m, "LINES", o)) { +# Py_DECREF(m); +# Py_DECREF(o); +# return 0; +# } +# if (PyDict_SetItemString(ModDict, "LINES", o)) { +# Py_DECREF(m); +# Py_DECREF(o); +# return 0; +# } +# Py_DECREF(o); +# o = PyInt_FromLong(COLS); +# if (!o) { +# Py_DECREF(m); +# return 0; +# } +# if (PyObject_SetAttrString(m, "COLS", o)) { +# Py_DECREF(m); +# Py_DECREF(o); +# return 0; +# } +# if (PyDict_SetItemString(ModDict, "COLS", o)) { +# Py_DECREF(m); +# Py_DECREF(o); +# return 0; +# } +# Py_DECREF(o); +# Py_DECREF(m); +# return 1; +# } +# #endif + +# #ifdef HAVE_CURSES_RESIZETERM +# static PyObject * +# PyCurses_ResizeTerm(PyObject *self, PyObject *args) +# { +# int lines; +# int columns; +# PyObject *result; + +# PyCursesInitialised; + +# if (!PyArg_ParseTuple(args,"ii:resizeterm", &lines, &columns)) +# return NULL; + +# result = PyCursesCheckERR(resizeterm(lines, columns), "resizeterm"); +# if (!result) +# return NULL; +# if (!update_lines_cols()) +# return NULL; +# return result; +# } + +# #endif + +# #ifdef HAVE_CURSES_RESIZE_TERM +# static PyObject * +# PyCurses_Resize_Term(PyObject *self, PyObject *args) +# { +# int lines; +# int columns; + +# PyObject *result; + +# PyCursesInitialised; + +# if (!PyArg_ParseTuple(args,"ii:resize_term", &lines, &columns)) +# return NULL; + +# result = PyCursesCheckERR(resize_term(lines, columns), "resize_term"); +# if (!result) +# return NULL; +# if (!update_lines_cols()) +# return NULL; +# return result; +# } +# #endif /* HAVE_CURSES_RESIZE_TERM */ + + +def setsyx(y, x): + _ensure_initialised() + lib.setsyx(y, x) + return None + + +def start_color(): + _check_ERR(lib.start_color(), "start_color") + globals()["COLORS"] = lib.COLORS + globals()["COLOR_PAIRS"] = lib.COLOR_PAIRS + globals()["_initialised_color"] = True + return None def tigetflag(capname): - _ensure_setupterm_called() + _ensure_initialised_setupterm() return lib.tigetflag(capname) def tigetnum(capname): - _ensure_setupterm_called() + _ensure_initialised_setupterm() return lib.tigetnum(capname) def tigetstr(capname): - _ensure_setupterm_called() - out = lib.tigetstr(capname) - if out == ffi.NULL: + _ensure_initialised_setupterm() + val = lib.tigetstr(capname) + if int(ffi.cast("intptr_t", val)) in (0, -1): return None - return ffi.string(out) + return ffi.string(val) -def tparm(name, *args): - _ensure_setupterm_called() - cargs = [ffi.cast("long", arg) for arg in args] - return ffi.string(lib.tparm(name, *cargs)) +def tparm(fmt, i1=0, i2=0, i3=0, i4=0, i5=0, i6=0, i7=0, i8=0, i9=0): + args = [ffi.cast("int", i) for i in (i1, i2, i3, i4, i5, i6, i7, i8, i9)] + result = lib.tparm(fmt, *args) + if result == ffi.NULL: + raise error("tparm() returned NULL") + return ffi.string(result) -def color_pair(n): - return n << 8 +def typeahead(fd): + _ensure_initialised() + return _check_ERR(lib.typeahead(fd), "typeahead") + + +def unctrl(ch): + _ensure_initialised() + return lib.unctrl(_chtype(ch)) + + +def ungetch(ch): + _ensure_initialised() + return _check_ERR(lib.ungetch(_chtype(ch)), "ungetch") + + +def use_env(flag): + lib.use_env(flag) + return None + + +if not lib._m_STRICT_SYSV_CURSES: + + def use_default_colors(): + _ensure_initialised_color() + return _check_ERR(lib.use_default_colors(), "use_default_colors") diff --git a/demo/_curses_build.py b/demo/_curses_build.py new file mode 100644 --- /dev/null +++ b/demo/_curses_build.py @@ -0,0 +1,325 @@ +import sys +if sys.platform == 'win32': + #This module does not exist in windows + raise ImportError('No module named _curses') + +from cffi import FFI +from _cffi1 import recompile + +ffi = FFI() + +ffi.cdef(""" +typedef ... WINDOW; +typedef ... SCREEN; +typedef unsigned long mmask_t; +typedef unsigned char bool; +typedef unsigned long chtype; +typedef chtype attr_t; + +typedef struct +{ + short id; /* ID to distinguish multiple devices */ + int x, y, z; /* event coordinates (character-cell) */ + mmask_t bstate; /* button state bits */ +} +MEVENT; + +static const int ERR, OK; +static const int TRUE, FALSE; +static const int KEY_MIN, KEY_MAX; + +static const int COLOR_BLACK; +static const int COLOR_RED; +static const int COLOR_GREEN; +static const int COLOR_YELLOW; +static const int COLOR_BLUE; +static const int COLOR_MAGENTA; +static const int COLOR_CYAN; +static const int COLOR_WHITE; + +static const chtype A_ATTRIBUTES; +static const chtype A_NORMAL; +static const chtype A_STANDOUT; +static const chtype A_UNDERLINE; +static const chtype A_REVERSE; +static const chtype A_BLINK; +static const chtype A_DIM; +static const chtype A_BOLD; +static const chtype A_ALTCHARSET; +static const chtype A_INVIS; +static const chtype A_PROTECT; +static const chtype A_CHARTEXT; +static const chtype A_COLOR; + +static const int BUTTON1_RELEASED; +static const int BUTTON1_PRESSED; +static const int BUTTON1_CLICKED; +static const int BUTTON1_DOUBLE_CLICKED; +static const int BUTTON1_TRIPLE_CLICKED; +static const int BUTTON2_RELEASED; +static const int BUTTON2_PRESSED; +static const int BUTTON2_CLICKED; +static const int BUTTON2_DOUBLE_CLICKED; +static const int BUTTON2_TRIPLE_CLICKED; +static const int BUTTON3_RELEASED; +static const int BUTTON3_PRESSED; +static const int BUTTON3_CLICKED; +static const int BUTTON3_DOUBLE_CLICKED; +static const int BUTTON3_TRIPLE_CLICKED; +static const int BUTTON4_RELEASED; +static const int BUTTON4_PRESSED; +static const int BUTTON4_CLICKED; +static const int BUTTON4_DOUBLE_CLICKED; +static const int BUTTON4_TRIPLE_CLICKED; +static const int BUTTON_SHIFT; +static const int BUTTON_CTRL; +static const int BUTTON_ALT; +static const int ALL_MOUSE_EVENTS; +static const int REPORT_MOUSE_POSITION; + +int setupterm(char *, int, int *); + +WINDOW *stdscr; +int COLORS; +int COLOR_PAIRS; +int COLS; +int LINES; + +int baudrate(void); +int beep(void); +int box(WINDOW *, chtype, chtype); +bool can_change_color(void); +int cbreak(void); +int clearok(WINDOW *, bool); +int color_content(short, short*, short*, short*); +int copywin(const WINDOW*, WINDOW*, int, int, int, int, int, int, int); +int curs_set(int); +int def_prog_mode(void); +int def_shell_mode(void); +int delay_output(int); +int delwin(WINDOW *); +WINDOW * derwin(WINDOW *, int, int, int, int); +int doupdate(void); +int echo(void); +int endwin(void); +char erasechar(void); +void filter(void); +int flash(void); +int flushinp(void); +chtype getbkgd(WINDOW *); +WINDOW * getwin(FILE *); +int halfdelay(int); +bool has_colors(void); +bool has_ic(void); +bool has_il(void); +void idcok(WINDOW *, bool); +int idlok(WINDOW *, bool); +void immedok(WINDOW *, bool); +WINDOW * initscr(void); +int init_color(short, short, short, short); +int init_pair(short, short, short); +int intrflush(WINDOW *, bool); +bool isendwin(void); +bool is_linetouched(WINDOW *, int); +bool is_wintouched(WINDOW *); +const char * keyname(int); +int keypad(WINDOW *, bool); +char killchar(void); +int leaveok(WINDOW *, bool); +char * longname(void); +int meta(WINDOW *, bool); +int mvderwin(WINDOW *, int, int); +int mvwaddch(WINDOW *, int, int, const chtype); +int mvwaddnstr(WINDOW *, int, int, const char *, int); +int mvwaddstr(WINDOW *, int, int, const char *); +int mvwchgat(WINDOW *, int, int, int, attr_t, short, const void *); +int mvwdelch(WINDOW *, int, int); +int mvwgetch(WINDOW *, int, int); +int mvwgetnstr(WINDOW *, int, int, char *, int); +int mvwin(WINDOW *, int, int); +chtype mvwinch(WINDOW *, int, int); +int mvwinnstr(WINDOW *, int, int, char *, int); +int mvwinsch(WINDOW *, int, int, chtype); +int mvwinsnstr(WINDOW *, int, int, const char *, int); +int mvwinsstr(WINDOW *, int, int, const char *); +int napms(int); +WINDOW * newpad(int, int); +WINDOW * newwin(int, int, int, int); +int nl(void); +int nocbreak(void); +int nodelay(WINDOW *, bool); +int noecho(void); +int nonl(void); +void noqiflush(void); +int noraw(void); +int notimeout(WINDOW *, bool); +int overlay(const WINDOW*, WINDOW *); +int overwrite(const WINDOW*, WINDOW *); +int pair_content(short, short*, short*); +int pechochar(WINDOW *, const chtype); +int pnoutrefresh(WINDOW*, int, int, int, int, int, int); +int prefresh(WINDOW *, int, int, int, int, int, int); +int putwin(WINDOW *, FILE *); +void qiflush(void); +int raw(void); +int redrawwin(WINDOW *); +int resetty(void); +int reset_prog_mode(void); +int reset_shell_mode(void); +int savetty(void); +int scroll(WINDOW *); +int scrollok(WINDOW *, bool); +int start_color(void); +WINDOW * subpad(WINDOW *, int, int, int, int); +WINDOW * subwin(WINDOW *, int, int, int, int); +int syncok(WINDOW *, bool); +chtype termattrs(void); +char * termname(void); +int touchline(WINDOW *, int, int); +int touchwin(WINDOW *); +int typeahead(int); +int ungetch(int); +int untouchwin(WINDOW *); +void use_env(bool); +int waddch(WINDOW *, const chtype); +int waddnstr(WINDOW *, const char *, int); +int waddstr(WINDOW *, const char *); +int wattron(WINDOW *, int); +int wattroff(WINDOW *, int); +int wattrset(WINDOW *, int); +int wbkgd(WINDOW *, chtype); +void wbkgdset(WINDOW *, chtype); +int wborder(WINDOW *, chtype, chtype, chtype, chtype, + chtype, chtype, chtype, chtype); +int wchgat(WINDOW *, int, attr_t, short, const void *); +int wclear(WINDOW *); +int wclrtobot(WINDOW *); +int wclrtoeol(WINDOW *); +void wcursyncup(WINDOW *); +int wdelch(WINDOW *); +int wdeleteln(WINDOW *); +int wechochar(WINDOW *, const chtype); +int werase(WINDOW *); +int wgetch(WINDOW *); +int wgetnstr(WINDOW *, char *, int); +int whline(WINDOW *, chtype, int); +chtype winch(WINDOW *); +int winnstr(WINDOW *, char *, int); +int winsch(WINDOW *, chtype); +int winsdelln(WINDOW *, int); +int winsertln(WINDOW *); +int winsnstr(WINDOW *, const char *, int); +int winsstr(WINDOW *, const char *); +int wmove(WINDOW *, int, int); +int wresize(WINDOW *, int, int); +int wnoutrefresh(WINDOW *); +int wredrawln(WINDOW *, int, int); +int wrefresh(WINDOW *); +int wscrl(WINDOW *, int); +int wsetscrreg(WINDOW *, int, int); +int wstandout(WINDOW *); +int wstandend(WINDOW *); +void wsyncdown(WINDOW *); +void wsyncup(WINDOW *); +void wtimeout(WINDOW *, int); +int wtouchln(WINDOW *, int, int, int); +int wvline(WINDOW *, chtype, int); +int tigetflag(char *); +int tigetnum(char *); +char * tigetstr(char *); +int putp(const char *); +char * tparm(const char *, ...); +int getattrs(const WINDOW *); +int getcurx(const WINDOW *); +int getcury(const WINDOW *); +int getbegx(const WINDOW *); +int getbegy(const WINDOW *); +int getmaxx(const WINDOW *); +int getmaxy(const WINDOW *); +int getparx(const WINDOW *); +int getpary(const WINDOW *); + +int getmouse(MEVENT *); +int ungetmouse(MEVENT *); +mmask_t mousemask(mmask_t, mmask_t *); +bool wenclose(const WINDOW *, int, int); +int mouseinterval(int); + +void setsyx(int y, int x); +const char *unctrl(chtype); +int use_default_colors(void); + +int has_key(int); +bool is_term_resized(int, int); + +#define _m_STRICT_SYSV_CURSES ... +#define _m_NCURSES_MOUSE_VERSION ... +#define _m_NetBSD ... +int _m_ispad(WINDOW *); + +chtype acs_map[]; + +// For _curses_panel: + +typedef ... PANEL; + +WINDOW *panel_window(const PANEL *); +void update_panels(void); +int hide_panel(PANEL *); +int show_panel(PANEL *); +int del_panel(PANEL *); +int top_panel(PANEL *); +int bottom_panel(PANEL *); +PANEL *new_panel(WINDOW *); +PANEL *panel_above(const PANEL *); +PANEL *panel_below(const PANEL *); +int set_panel_userptr(PANEL *, void *); +const void *panel_userptr(const PANEL *); +int move_panel(PANEL *, int, int); +int replace_panel(PANEL *,WINDOW *); +int panel_hidden(const PANEL *); + +void _m_getsyx(int *yx); +""") + + +recompile(ffi, "_curses_cffi", """ +#ifdef __APPLE__ +/* the following define is necessary for OS X 10.6+; without it, the + Apple-supplied ncurses.h sets NCURSES_OPAQUE to 1, and then Python + can't get at the WINDOW flags field. */ +#define NCURSES_OPAQUE 0 +#endif + +#include +#include +#include + +#if defined STRICT_SYSV_CURSES +#define _m_STRICT_SYSV_CURSES TRUE +#else +#define _m_STRICT_SYSV_CURSES FALSE +#endif + +#if defined NCURSES_MOUSE_VERSION +#define _m_NCURSES_MOUSE_VERSION TRUE +#else +#define _m_NCURSES_MOUSE_VERSION FALSE +#endif + +#if defined __NetBSD__ +#define _m_NetBSD TRUE +#else +#define _m_NetBSD FALSE +#endif + +int _m_ispad(WINDOW *win) { + // may not have _flags (and possibly _ISPAD), + // but for now let's assume that always has it + return (win->_flags & _ISPAD); +} + +void _m_getsyx(int *yx) { + getsyx(yx[0], yx[1]); +} +""", libraries=['ncurses', 'panel']) diff --git a/_cffi1/bsdopendirtype_build.py b/demo/bsdopendirtype_build.py rename from _cffi1/bsdopendirtype_build.py rename to demo/bsdopendirtype_build.py diff --git a/demo/readdir2.py b/demo/readdir2.py --- a/demo/readdir2.py +++ b/demo/readdir2.py @@ -1,65 +1,32 @@ # A Linux-only demo, using verify() instead of hard-coding the exact layouts # import sys -from cffi import FFI +from _readdir2 import ffi, lib if not sys.platform.startswith('linux'): raise Exception("Linux-only demo") -ffi = FFI() -ffi.cdef(""" - - typedef ... DIR; - - struct dirent { - unsigned char d_type; /* type of file; not supported - by all file system types */ - char d_name[...]; /* filename */ - ...; - }; - - int readdir_r(DIR *dirp, struct dirent *entry, struct dirent **result); - int openat(int dirfd, const char *pathname, int flags); - DIR *fdopendir(int fd); - int closedir(DIR *dirp); - - static const int DT_DIR; - -""") -ffi.C = ffi.verify(""" -#ifndef _ATFILE_SOURCE -# define _ATFILE_SOURCE -#endif -#ifndef _BSD_SOURCE -# define _BSD_SOURCE -#endif -#include -#include -#include -""") - - def walk(basefd, path): print '{', path - dirfd = ffi.C.openat(basefd, path, 0) + dirfd = lib.openat(basefd, path, 0) if dirfd < 0: # error in openat() return - dir = ffi.C.fdopendir(dirfd) + dir = lib.fdopendir(dirfd) dirent = ffi.new("struct dirent *") result = ffi.new("struct dirent **") while True: - if ffi.C.readdir_r(dir, dirent, result): + if lib.readdir_r(dir, dirent, result): # error in readdir_r() break if result[0] == ffi.NULL: break name = ffi.string(dirent.d_name) print '%3d %s' % (dirent.d_type, name) - if dirent.d_type == ffi.C.DT_DIR and name != '.' and name != '..': + if dirent.d_type == lib.DT_DIR and name != '.' and name != '..': walk(dirfd, name) - ffi.C.closedir(dir) + lib.closedir(dir) print '}' diff --git a/_cffi1/readdir2_build.py b/demo/readdir2_build.py rename from _cffi1/readdir2_build.py rename to demo/readdir2_build.py From noreply at buildbot.pypy.org Sun Apr 26 18:10:39 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 26 Apr 2015 18:10:39 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: Support dotted names in 'module_name' Message-ID: <20150426161039.300731C0EC0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1852:288228787b93 Date: 2015-04-26 18:11 +0200 http://bitbucket.org/cffi/cffi/changeset/288228787b93/ Log: Support dotted names in 'module_name' diff --git a/_cffi1/recompiler.py b/_cffi1/recompiler.py --- a/_cffi1/recompiler.py +++ b/_cffi1/recompiler.py @@ -209,8 +209,9 @@ prnt() # # the init function, loading _cffi_backend and calling a method there + base_module_name = self.module_name.split('.')[-1] prnt('PyMODINIT_FUNC') - prnt('init%s(void)' % (self.module_name,)) + prnt('init%s(void)' % (base_module_name,)) prnt('{') prnt(' if (_cffi_init() < 0)') prnt(' return;') diff --git a/_cffi1/test_recompiler.py b/_cffi1/test_recompiler.py --- a/_cffi1/test_recompiler.py +++ b/_cffi1/test_recompiler.py @@ -1,6 +1,7 @@ -import sys, py +import sys, os, py from cffi import FFI, VerificationError from _cffi1 import recompiler +from _cffi1.udir import udir def check_type_table(input, expected_output): @@ -400,3 +401,22 @@ assert ffi1.typeof(name) is not ffi2.typeof(name) # sanity check: twice 'ffi1' assert ffi1.typeof("struct foo_s*") is ffi1.typeof("struct foo_s *") + +def test_module_name_in_package(): + ffi = FFI() + ffi.cdef("int foo(int);") + recompiler.recompile(ffi, "test_module_name_in_package.mymod", + "int foo(int x) { return x + 32; }", + tmpdir=str(udir)) + old_sys_path = sys.path[:] + try: + package_dir = udir.join('test_module_name_in_package') + assert os.path.isdir(str(package_dir)) + assert len(os.listdir(str(package_dir))) > 0 + package_dir.join('__init__.py').write('') + # + sys.path.insert(0, str(udir)) + import test_module_name_in_package.mymod + assert test_module_name_in_package.mymod.lib.foo(10) == 42 + finally: + sys.path[:] = old_sys_path From noreply at buildbot.pypy.org Sun Apr 26 18:36:54 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 26 Apr 2015 18:36:54 +0200 (CEST) Subject: [pypy-commit] cffi default: Obscure workaround for https://bugs.python.org/issue23246 on Python 2.7.9 Message-ID: <20150426163654.82DFE1C116F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1853:ee5aa6781207 Date: 2015-04-26 18:37 +0200 http://bitbucket.org/cffi/cffi/changeset/ee5aa6781207/ Log: Obscure workaround for https://bugs.python.org/issue23246 on Python 2.7.9 diff --git a/cffi/ffiplatform.py b/cffi/ffiplatform.py --- a/cffi/ffiplatform.py +++ b/cffi/ffiplatform.py @@ -1,4 +1,4 @@ -import os +import sys, os class VerificationError(Exception): @@ -14,6 +14,15 @@ LIST_OF_FILE_NAMES = ['sources', 'include_dirs', 'library_dirs', 'extra_objects', 'depends'] +def _hack_at_distutils(): + # Windows-only workaround for some configurations: see + # https://bugs.python.org/issue23246 (Python 2.7.9) + if sys.platform == "win32": + try: + import setuptools # for side-effects, patches distutils + except ImportError: + pass + def get_extension(srcfilename, modname, sources=(), **kwds): from distutils.core import Extension allsources = [srcfilename] @@ -37,6 +46,7 @@ def _build(tmpdir, ext): # XXX compact but horrible :-( + _hack_at_distutils() from distutils.core import Distribution import distutils.errors # From noreply at buildbot.pypy.org Sun Apr 26 18:39:59 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 26 Apr 2015 18:39:59 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: hg merge default Message-ID: <20150426163959.665951C0359@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1854:b20650f90e1c Date: 2015-04-26 18:40 +0200 http://bitbucket.org/cffi/cffi/changeset/b20650f90e1c/ Log: hg merge default diff --git a/_cffi1/test_new_ffi_1.py b/_cffi1/test_new_ffi_1.py --- a/_cffi1/test_new_ffi_1.py +++ b/_cffi1/test_new_ffi_1.py @@ -60,6 +60,10 @@ struct { int a, b; }; union { int c, d; }; }; + struct nested_field_ofs_s { + struct { int a; char b; }; + union { char c; }; + }; union nested_anon_u { struct { int a, b; }; union { int c, d; }; @@ -81,7 +85,7 @@ ffi = module.ffi -class TestOldFFI1: +class TestNewFFI1: def test_integer_ranges(self): for (c_type, size) in [('char', 1), @@ -1328,6 +1332,14 @@ assert p.c == 14 assert p.d == 14 + def test_nested_field_offset_align(self): + # struct nested_field_ofs_s { + # struct { int a; char b; }; + # union { char c; }; + # }; + assert ffi.offsetof("struct nested_field_ofs_s", "c") == 2 * SIZE_OF_INT + assert ffi.sizeof("struct nested_field_ofs_s") == 3 * SIZE_OF_INT + def test_nested_anonymous_union(self): # union nested_anon_u { # struct { int a, b; }; diff --git a/cffi/ffiplatform.py b/cffi/ffiplatform.py --- a/cffi/ffiplatform.py +++ b/cffi/ffiplatform.py @@ -1,4 +1,4 @@ -import os +import sys, os class VerificationError(Exception): @@ -14,6 +14,15 @@ LIST_OF_FILE_NAMES = ['sources', 'include_dirs', 'library_dirs', 'extra_objects', 'depends'] +def _hack_at_distutils(): + # Windows-only workaround for some configurations: see + # https://bugs.python.org/issue23246 (Python 2.7.9) + if sys.platform == "win32": + try: + import setuptools # for side-effects, patches distutils + except ImportError: + pass + def get_extension(srcfilename, modname, sources=(), **kwds): from distutils.core import Extension allsources = [srcfilename] @@ -37,6 +46,7 @@ def _build(tmpdir, ext): # XXX compact but horrible :-( + _hack_at_distutils() from distutils.core import Distribution import distutils.errors # diff --git a/testing/backend_tests.py b/testing/backend_tests.py --- a/testing/backend_tests.py +++ b/testing/backend_tests.py @@ -1388,6 +1388,17 @@ assert p.c == 14 assert p.d == 14 + def test_nested_field_offset_align(self): + ffi = FFI(backend=self.Backend()) + ffi.cdef(""" + struct foo_s { + struct { int a; char b; }; + union { char c; }; + }; + """) + assert ffi.offsetof("struct foo_s", "c") == 2 * SIZE_OF_INT + assert ffi.sizeof("struct foo_s") == 3 * SIZE_OF_INT + def test_nested_anonymous_union(self): ffi = FFI(backend=self.Backend()) ffi.cdef(""" diff --git a/testing/test_ctypes.py b/testing/test_ctypes.py --- a/testing/test_ctypes.py +++ b/testing/test_ctypes.py @@ -28,6 +28,9 @@ def test_nested_anonymous_struct(self): py.test.skip("ctypes backend: not supported: nested anonymous struct") + def test_nested_field_offset_align(self): + py.test.skip("ctypes backend: not supported: nested anonymous struct") + def test_nested_anonymous_union(self): py.test.skip("ctypes backend: not supported: nested anonymous union") diff --git a/testing/test_verify.py b/testing/test_verify.py --- a/testing/test_verify.py +++ b/testing/test_verify.py @@ -1229,11 +1229,11 @@ xxx def test_opaque_integer_as_function_result(): - import platform - if platform.machine().startswith('sparc'): - py.test.skip('Breaks horribly on sparc (SIGILL + corrupted stack)') - elif platform.machine() == 'mips64' and sys.maxsize > 2**32: - py.test.skip('Segfaults on mips64el') + #import platform + #if platform.machine().startswith('sparc'): + # py.test.skip('Breaks horribly on sparc (SIGILL + corrupted stack)') + #elif platform.machine() == 'mips64' and sys.maxsize > 2**32: + # py.test.skip('Segfaults on mips64el') # XXX bad abuse of "struct { ...; }". It only works a bit by chance # anyway. XXX think about something better :-( ffi = FFI() From noreply at buildbot.pypy.org Sun Apr 26 19:29:51 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 26 Apr 2015 19:29:51 +0200 (CEST) Subject: [pypy-commit] pypy default: Try to fix ARM Message-ID: <20150426172951.32A0E1C0A5B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r76939:61817d7f51b1 Date: 2015-04-26 19:29 +0200 http://bitbucket.org/pypy/pypy/changeset/61817d7f51b1/ Log: Try to fix ARM diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -459,6 +459,8 @@ return fcond emit_op_jit_debug = emit_op_debug_merge_point emit_op_keepalive = emit_op_debug_merge_point + emit_op_enter_portal_frame = emit_op_debug_merge_point + emit_op_leave_portal_frame = emit_op_debug_merge_point def emit_op_cond_call_gc_wb(self, op, arglocs, regalloc, fcond): self._write_barrier_fastpath(self.mc, op.getdescr(), arglocs, fcond) diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -1155,6 +1155,8 @@ prepare_op_debug_merge_point = void prepare_op_jit_debug = void prepare_op_keepalive = void + prepare_op_enter_portal_frame = void + prepare_op_leave_portal_frame = void def prepare_op_cond_call_gc_wb(self, op, fcond): assert op.result is None From noreply at buildbot.pypy.org Sun Apr 26 19:47:24 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 26 Apr 2015 19:47:24 +0200 (CEST) Subject: [pypy-commit] pypy default: If cffi is not installed, just skip this test Message-ID: <20150426174724.56F5E1C0845@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r76940:9c621f634f52 Date: 2015-04-26 19:47 +0200 http://bitbucket.org/pypy/pypy/changeset/9c621f634f52/ Log: If cffi is not installed, just skip this test diff --git a/rpython/jit/backend/llsupport/test/test_skiplist.py b/rpython/jit/backend/llsupport/test/test_skiplist.py --- a/rpython/jit/backend/llsupport/test/test_skiplist.py +++ b/rpython/jit/backend/llsupport/test/test_skiplist.py @@ -1,5 +1,9 @@ import random, os -import cffi +try: + import cffi +except ImportError: + import py + py.test.skip("cffi not installed") ffi = cffi.FFI() From noreply at buildbot.pypy.org Sun Apr 26 21:22:56 2015 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 26 Apr 2015 21:22:56 +0200 (CEST) Subject: [pypy-commit] pypy default: (ab)use link_files since it appears _after_ the object files and gcc's linker is picky about order Message-ID: <20150426192256.B71981C03F7@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r76941:8bbba5f3aeb8 Date: 2015-04-26 22:23 +0300 http://bitbucket.org/pypy/pypy/changeset/8bbba5f3aeb8/ Log: (ab)use link_files since it appears _after_ the object files and gcc's linker is picky about order diff --git a/pypy/module/_vmprof/interp_vmprof.py b/pypy/module/_vmprof/interp_vmprof.py --- a/pypy/module/_vmprof/interp_vmprof.py +++ b/pypy/module/_vmprof/interp_vmprof.py @@ -27,7 +27,7 @@ include_dirs = [SRC], includes = ['vmprof.h', 'trampoline.h'], separate_module_files = [SRC.join('trampoline.asmgcc.s')], - link_extra = ['-Wl,-Bstatic', '-lunwind', '-Wl,-Bdynamic', '-llzma'], + link_files = ['-Wl,-Bstatic', '-lunwind', '-Wl,-Bdynamic', '-llzma'], post_include_bits=[""" void pypy_vmprof_init(void); From noreply at buildbot.pypy.org Sun Apr 26 23:26:02 2015 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 26 Apr 2015 23:26:02 +0200 (CEST) Subject: [pypy-commit] pypy numpy-fixes: test, redo complex floordiv to avoid overflow if possible Message-ID: <20150426212602.521721C0A5B@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: numpy-fixes Changeset: r76942:9ea844c184eb Date: 2015-04-26 23:33 +0300 http://bitbucket.org/pypy/pypy/changeset/9ea844c184eb/ Log: test, redo complex floordiv to avoid overflow if possible diff --git a/pypy/module/micronumpy/test/test_complex.py b/pypy/module/micronumpy/test/test_complex.py --- a/pypy/module/micronumpy/test/test_complex.py +++ b/pypy/module/micronumpy/test/test_complex.py @@ -486,12 +486,16 @@ assert c[i] == max(a[i], b[i]) - def test_abs_overflow(self): - from numpy import array, absolute, isinf + def test_complex_overflow(self): + from numpy import array, absolute, isinf, complex128, floor_divide a = array(complex(1.5e308,1.5e308)) # Prints a RuntimeWarning, but does not raise b = absolute(a) assert isinf(b) + c = array([1.e+110, 1.e-110], dtype=complex128) + d = floor_divide(c**2, c) + assert (d == [1.e+110, 0]).all() + def test_basic(self): @@ -598,7 +602,7 @@ # but numpy.raises a TypeError if '__pypy__' in sys.builtin_module_names: exct, excm = TypeError, 'readonly attribute' - else: + else : exct, excm = AttributeError, 'is not writable' exc = raises(exct, 'c2.real = 10.') assert excm in exc.value[0] From noreply at buildbot.pypy.org Sun Apr 26 23:34:29 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 26 Apr 2015 23:34:29 +0200 (CEST) Subject: [pypy-commit] pypy default: The "-llzma" seems to have been checked in by accident. Removing it seems to have no bad effect, and liblzma is not Message-ID: <20150426213429.832A51C116B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r76943:b43cd584adec Date: 2015-04-26 23:02 +0200 http://bitbucket.org/pypy/pypy/changeset/b43cd584adec/ Log: The "-llzma" seems to have been checked in by accident. Removing it seems to have no bad effect, and liblzma is not necessarily installed (e.g. not documented in the list of deps) diff --git a/pypy/module/_vmprof/interp_vmprof.py b/pypy/module/_vmprof/interp_vmprof.py --- a/pypy/module/_vmprof/interp_vmprof.py +++ b/pypy/module/_vmprof/interp_vmprof.py @@ -27,7 +27,7 @@ include_dirs = [SRC], includes = ['vmprof.h', 'trampoline.h'], separate_module_files = [SRC.join('trampoline.asmgcc.s')], - link_files = ['-Wl,-Bstatic', '-lunwind', '-Wl,-Bdynamic', '-llzma'], + link_files = ['-Wl,-Bstatic', '-lunwind', '-Wl,-Bdynamic'], post_include_bits=[""" void pypy_vmprof_init(void); From noreply at buildbot.pypy.org Sun Apr 26 23:59:26 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 26 Apr 2015 23:59:26 +0200 (CEST) Subject: [pypy-commit] pypy default: disable _vmprof Message-ID: <20150426215926.E5C621C116F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r76944:d32226e955cb Date: 2015-04-26 23:59 +0200 http://bitbucket.org/pypy/pypy/changeset/d32226e955cb/ Log: disable _vmprof diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -39,6 +39,7 @@ ]) if sys.platform.startswith('linux') and sys.maxint > 2147483647: + if 0: # XXX disabled until we fix the absurd .so mess working_modules.add('_vmprof') translation_modules = default_modules.copy() From noreply at buildbot.pypy.org Mon Apr 27 06:44:46 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 27 Apr 2015 06:44:46 +0200 (CEST) Subject: [pypy-commit] pypy can_cast: Implement enough of can_cast(, dtype) to pass the test Message-ID: <20150427044446.AF1291C0EC0@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: can_cast Changeset: r76945:d45abfb9685d Date: 2015-04-27 05:44 +0100 http://bitbucket.org/pypy/pypy/changeset/d45abfb9685d/ Log: Implement enough of can_cast(, dtype) to pass the test diff --git a/pypy/module/micronumpy/arrayops.py b/pypy/module/micronumpy/arrayops.py --- a/pypy/module/micronumpy/arrayops.py +++ b/pypy/module/micronumpy/arrayops.py @@ -10,6 +10,7 @@ from .boxes import W_GenericBox from .types import ( Bool, ULong, Long, Float64, Complex64, UnicodeType, VoidType, ObjectType) +from .descriptor import get_dtype_cache def where(space, w_arr, w_x=None, w_y=None): @@ -360,7 +361,11 @@ return can_cast_type(space, from_type, target, casting) if not from_type.is_native(): value = value.descr_byteswap(space) - return can_cast_type(space, from_type, target, casting) # XXX: stub impl + dtypenum, altnum = value.min_dtype() + if target.is_unsigned(): + dtypenum = altnum + dtype = get_dtype_cache(space).dtypes_by_num[dtypenum] + return can_cast_type(space, dtype, target, casting) # XXX: stub impl def is_scalar_w(space, w_arg): return (isinstance(w_arg, W_GenericBox) or diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -874,4 +874,3 @@ __new__ = interp2app(W_ObjectBox.descr__new__.im_func), __getattr__ = interp2app(W_ObjectBox.descr__getattr__), ) - diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1,5 +1,6 @@ import functools import math +from rpython.rlib.unroll import unrolling_iterable from pypy.interpreter.error import OperationError, oefmt from pypy.objspace.std.floatobject import float2string from pypy.objspace.std.complexobject import str_format @@ -2472,3 +2473,56 @@ for tp2 in complex_types: if tp1.basesize() <= tp2.basesize(): enable_cast(tp1, tp2) + +_int_types = [(Int8, UInt8), (Int16, UInt16), (Int32, UInt32), + (Int64, UInt64), (Long, ULong)] +for Int_t, UInt_t in _int_types: + Int_t.Unsigned = UInt_t + UInt_t.Signed = Int_t + size = rffi.sizeof(Int_t.T) + Int_t.min_value = rffi.cast(Int_t.T, -1) << (8*size - 1) + Int_t.max_value = ~Int_t.min_value + UInt_t.max_value = ~rffi.cast(UInt_t.T, 0) + + +signed_types = [Int8, Int16, Int32, Int64, Long] + +for Int_t in signed_types: + UInt_t = Int_t.Unsigned + smaller_types = [tp for tp in signed_types + if rffi.sizeof(tp.T) < rffi.sizeof(Int_t.T)] + smaller_types = unrolling_iterable( + [(tp, tp.Unsigned) for tp in smaller_types]) + def min_dtype(self): + for Small, USmall in smaller_types: + signed_max = rffi.cast(UInt_t.T, Small.max_value) + unsigned_max = rffi.cast(UInt_t.T, USmall.max_value) + if self.value <= unsigned_max: + if self.value <= signed_max: + return Small.num, USmall.num + else: + return USmall.num, USmall.num + if self.value <= rffi.cast(UInt_t.T, Int_t.max_value): + return Int_t.num, UInt_t.num + else: + return UInt_t.num, UInt_t.num + UInt_t.BoxType.min_dtype = min_dtype + + def min_dtype(self): + if self.value >= 0: + for Small, USmall in smaller_types: + signed_max = rffi.cast(UInt_t.T, Small.max_value) + unsigned_max = rffi.cast(UInt_t.T, USmall.max_value) + if self.value <= unsigned_max: + if self.value <= signed_max: + return Small.num, USmall.num + else: + return USmall.num, USmall.num + return Int_t.num, UInt_t.num + else: + for Small, USmall in smaller_types: + signed_min = rffi.cast(UInt_t.T, Small.min_value) + if self.value >= signed_max: + return Small.num, Small.num + return Int_t.num, Int_t.num + Int_t.BoxType.min_dtype = min_dtype From noreply at buildbot.pypy.org Mon Apr 27 10:11:55 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 27 Apr 2015 10:11:55 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: Try to avoid confusion Message-ID: <20150427081155.951211C04A4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r598:88189d3b1d8f Date: 2015-04-27 10:12 +0200 http://bitbucket.org/pypy/pypy.org/changeset/88189d3b1d8f/ Log: Try to avoid confusion diff --git a/features.html b/features.html --- a/features.html +++ b/features.html @@ -145,6 +145,7 @@
     cd pypy/sandbox
     pypy_interact.py path/to/pypy-sandbox
    +# don't confuse it with pypy/goal/pyinteractive.py!
     

    You get a fully sandboxed interpreter, in its own filesystem hierarchy (try os.listdir('/')). For example, you would run an untrusted diff --git a/source/features.txt b/source/features.txt --- a/source/features.txt +++ b/source/features.txt @@ -113,6 +113,7 @@ cd pypy/sandbox pypy_interact.py path/to/pypy-sandbox + # don't confuse it with pypy/goal/pyinteractive.py! You get a fully sandboxed interpreter, in its own filesystem hierarchy (try ``os.listdir('/')``). For example, you would run an untrusted From noreply at buildbot.pypy.org Mon Apr 27 10:22:15 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 27 Apr 2015 10:22:15 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: Expand the warning. Message-ID: <20150427082215.E7FBB1C0EC0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r599:270cd2221470 Date: 2015-04-27 10:22 +0200 http://bitbucket.org/pypy/pypy.org/changeset/270cd2221470/ Log: Expand the warning. diff --git a/features.html b/features.html --- a/features.html +++ b/features.html @@ -136,8 +136,21 @@ try to limit language features considered “unsafe”. Instead we replace all calls to external libraries (C or platform) with a stub that communicates with an external process handling the policy.

    -

    Please be aware that it is a prototype only. It needs work to become -more complete, and you are welcome to help.

    + +++ + + + + +
    Please be aware that it is a prototype only. It needs work to become +more complete, and you are welcome to help. In particular, almost none +of the extension modules work (not even time ), and pypy_interact +is merely a demo. Also, a more complete system would include a way +to do the same as pypy_interact from other languages than Python, +to embed a sandboxed interpreter inside programs written in other +languages.

    To run the sandboxed process, you need to get the full sources and build pypy-sandbox from it (see Building from source). These instructions give you a pypy-c that you should rename to diff --git a/source/features.txt b/source/features.txt --- a/source/features.txt +++ b/source/features.txt @@ -103,8 +103,15 @@ calls to external libraries (C or platform) with a stub that communicates with an external process handling the policy. -Please be aware that it is a **prototype** only. It needs work to become -more complete, and you are welcome to help. ++-----------------------------------------------------------------------------+ +| **Please be aware that it is a prototype only.** *It needs work to become | +| more complete, and you are welcome to help. In particular, almost none | +| of the extension modules work (not even* ``time`` *), and* ``pypy_interact``| +| *is merely a demo. Also, a more complete system would include a way | +| to do the same as* ``pypy_interact`` *from other languages than Python, | +| to embed a sandboxed interpreter inside programs written in other | +| languages.* | ++-----------------------------------------------------------------------------+ To run the sandboxed process, you need to get the full sources and build ``pypy-sandbox`` from it (see `Building from source`_). These From noreply at buildbot.pypy.org Mon Apr 27 16:18:18 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 27 Apr 2015 16:18:18 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: Move the guarantee of uniqueness of types to the core _cffi_backend.c. Message-ID: <20150427141818.2CB2F1C1427@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1855:04e073297436 Date: 2015-04-27 15:33 +0200 http://bitbucket.org/cffi/cffi/changeset/04e073297436/ Log: Move the guarantee of uniqueness of types to the core _cffi_backend.c. Gets rid of USES_LOCAL which is not really compatible with ffi.include(). diff --git a/_cffi1/ffi_obj.c b/_cffi1/ffi_obj.c --- a/_cffi1/ffi_obj.c +++ b/_cffi1/ffi_obj.c @@ -400,7 +400,6 @@ } z = new_pointer_type(ct); - z = get_unique_type(self->types_builder, z); if (z == NULL) return NULL; diff --git a/_cffi1/realize_c_type.c b/_cffi1/realize_c_type.c --- a/_cffi1/realize_c_type.c +++ b/_cffi1/realize_c_type.c @@ -7,7 +7,6 @@ static PyObject *all_primitives[_CFFI__NUM_PRIM]; -static PyObject *global_types_dict; static CTypeDescrObject *g_ct_voidp, *g_ct_chararray; static PyObject *build_primitive_type(int num); /* forward */ @@ -16,14 +15,6 @@ (all_primitives[num] != NULL ? all_primitives[num] \ : build_primitive_type(num)) -static int _add_to_global_types_dict(PyObject *ct) -{ - if (ct == NULL) - return -1; - return PyDict_SetItemString(global_types_dict, - ((CTypeDescrObject *)ct)->ct_name, ct); -} - static int init_global_types_dict(PyObject *ffi_type_dict) { int err; @@ -32,29 +23,25 @@ MemoryErrors during importing an extension module are kind of bad anyway */ - global_types_dict = PyDict_New(); - if (global_types_dict == NULL) - return -1; - ct_void = get_primitive_type(_CFFI_PRIM_VOID); // 'void' - if (_add_to_global_types_dict(ct_void) < 0) + if (ct_void == NULL) return -1; ct2 = new_pointer_type((CTypeDescrObject *)ct_void); // 'void *' - if (_add_to_global_types_dict(ct2) < 0) + if (ct2 == NULL) return -1; g_ct_voidp = (CTypeDescrObject *)ct2; ct_char = get_primitive_type(_CFFI_PRIM_CHAR); // 'char' - if (_add_to_global_types_dict(ct_char) < 0) + if (ct_char == NULL) return -1; ct2 = new_pointer_type((CTypeDescrObject *)ct_char); // 'char *' - if (_add_to_global_types_dict(ct2) < 0) + if (ct2 == NULL) return -1; ct2 = new_array_type((CTypeDescrObject *)ct2, -1); // 'char[]' - if (_add_to_global_types_dict(ct2) < 0) + if (ct2 == NULL) return -1; g_ct_chararray = (CTypeDescrObject *)ct2; @@ -117,66 +104,6 @@ return builder; } -static PyObject *get_unique_type(builder_c_t *builder, PyObject *x) -{ - /* Replace the CTypeDescrObject 'x' with a standardized one. - This either just returns x, or x is decrefed and a new reference - to the standard equivalent is returned. - - In this function, 'x' always contains a reference that must be - decrefed, and 'y' never does. - */ - CTypeDescrObject *ct = (CTypeDescrObject *)x; - if (ct == NULL) - return NULL; - - /* XXX maybe change the type of ct_name to be a real 'PyObject *'? */ - PyObject *name = PyString_FromString(ct->ct_name); - if (name == NULL) - goto no_memory; - - PyObject *y = PyDict_GetItem(builder->types_dict, name); - if (y != NULL) { - /* Already found the same ct_name in the dict. Return the old one. */ - Py_INCREF(y); - Py_DECREF(x); - x = y; - goto done; - } - - if (!(ct->ct_flags & CT_USES_LOCAL)) { - /* The type is not "local", i.e. does not make use of any struct, - union or enum. This means it should be shared across independent - ffi instances. Look it up and possibly add it to the global - types dict. - */ - y = PyDict_GetItem(global_types_dict, name); - if (y != NULL) { - Py_INCREF(y); - Py_DECREF(x); - x = y; - } - else { - /* Not found in the global dictionary. Put it there. */ - if (PyDict_SetItem(global_types_dict, name, x) < 0) - goto no_memory; - } - } - - /* Set x in the local dict. */ - if (PyDict_SetItem(builder->types_dict, name, x) < 0) - goto no_memory; - - done: - Py_DECREF(name); - return x; - - no_memory: - Py_XDECREF(name); - Py_DECREF(x); - return NULL; -} - static PyObject *build_primitive_type(int num) { /* XXX too many translations between here and new_primitive_type() */ @@ -365,7 +292,6 @@ return NULL; if (CTypeDescr_Check(y)) { x = new_pointer_type((CTypeDescrObject *)y); - x = get_unique_type(builder, x); } else { assert(PyTuple_Check(y)); /* from _CFFI_OP_FUNCTION */ @@ -383,12 +309,10 @@ if (y == NULL) return NULL; z = new_pointer_type((CTypeDescrObject *)y); - z = get_unique_type(builder, z); Py_DECREF(y); if (z == NULL) return NULL; x = new_array_type((CTypeDescrObject *)z, length); - x = get_unique_type(builder, x); Py_DECREF(z); break; @@ -573,7 +497,6 @@ z = new_function_type(fargs, (CTypeDescrObject *)y, ellipsis, FFI_DEFAULT_ABI); - z = get_unique_type(builder, z); Py_DECREF(fargs); Py_DECREF(y); if (z == NULL) diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -132,7 +132,6 @@ #define CT_WITH_VAR_ARRAY 1048576 #define CT_IS_UNSIZED_CHAR_A 2097152 #define CT_LAZY_FIELD_LIST 4194304 -#define CT_USES_LOCAL 8388608 #define CT_PRIMITIVE_ANY (CT_PRIMITIVE_SIGNED | \ CT_PRIMITIVE_UNSIGNED | \ CT_PRIMITIVE_CHAR | \ @@ -3381,6 +3380,57 @@ /************************************************************/ +static PyObject *unique_cache; + +static PyObject *get_unique_type(CTypeDescrObject *x, + const void *unique_key[], long keylength) +{ + /* Replace the CTypeDescrObject 'x' with a standardized one. + This either just returns x, or x is decrefed and a new reference + to the already-existing equivalent is returned. + + In this function, 'x' always contains a reference that must be + either decrefed or returned. + + Keys: + void ["void"] + primitive [&static_struct] + pointer [ctype] + array [ctype, length] + funcptr [ctresult, ellipsis+abi, num_args, ctargs...] + */ + long i; + PyObject *key, *y; + const void **pkey; + int err; + + key = PyString_FromStringAndSize(NULL, keylength * sizeof(void *)); + if (key == NULL) + goto error; + + pkey = (const void **)PyString_AS_STRING(key); + for (i = 0; i < keylength; i++) + pkey[i] = unique_key[i]; + + y = PyDict_GetItem(unique_cache, key); + if (y != NULL) { + Py_DECREF(key); + Py_INCREF(y); + Py_DECREF(x); + return y; + } + err = PyDict_SetItem(unique_cache, key, (PyObject *)x); + Py_DECREF(key); + if (err < 0) + goto error; + + return (PyObject *)x; + + error: + Py_DECREF(x); + return NULL; +} + static PyObject *new_primitive_type(const char *name) { #define ENUM_PRIMITIVE_TYPES \ @@ -3461,6 +3511,7 @@ { NULL } }; const struct descr_s *ptypes; + const void *unique_key[1]; int name_size; ffi_type *ffitype; @@ -3526,7 +3577,8 @@ td->ct_flags |= CT_PRIMITIVE_FITS_LONG; } td->ct_name_position = strlen(td->ct_name); - return (PyObject *)td; + unique_key[0] = ptypes; + return get_unique_type(td, unique_key, 1); bad_ffi_type: PyErr_Format(PyExc_NotImplementedError, @@ -3548,6 +3600,7 @@ { CTypeDescrObject *td; const char *extra; + const void *unique_key[1]; if (ctitem->ct_flags & CT_ARRAY) extra = "(*)"; /* obscure case: see test_array_add */ @@ -3559,7 +3612,7 @@ td->ct_size = sizeof(void *); td->ct_length = -1; - td->ct_flags = CT_POINTER | (ctitem->ct_flags & CT_USES_LOCAL); + td->ct_flags = CT_POINTER; if (ctitem->ct_flags & (CT_STRUCT|CT_UNION)) td->ct_flags |= CT_IS_PTR_TO_OWNED; if (ctitem->ct_flags & CT_VOID) @@ -3568,7 +3621,8 @@ ((ctitem->ct_flags & CT_PRIMITIVE_CHAR) && ctitem->ct_size == sizeof(char))) td->ct_flags |= CT_CAST_ANYTHING; /* 'void *' or 'char *' only */ - return (PyObject *)td; + unique_key[0] = ctitem; + return get_unique_type(td, unique_key, 1); } static PyObject *b_new_pointer_type(PyObject *self, PyObject *args) @@ -3611,6 +3665,7 @@ char extra_text[32]; Py_ssize_t arraysize; int flags = CT_ARRAY; + const void *unique_key[2]; if (!(ctptr->ct_flags & CT_POINTER)) { PyErr_SetString(PyExc_TypeError, "first arg must be a pointer ctype"); @@ -3648,13 +3703,16 @@ td->ct_stuff = (PyObject *)ctptr; td->ct_size = arraysize; td->ct_length = length; - td->ct_flags = flags | (ctptr->ct_flags & CT_USES_LOCAL); - return (PyObject *)td; + td->ct_flags = flags; + unique_key[0] = ctptr; + unique_key[1] = (void *)length; + return get_unique_type(td, unique_key, 2); } static PyObject *new_void_type(void) { int name_size = strlen("void") + 1; + const void *unique_key[1]; CTypeDescrObject *td = ctypedescr_new(name_size); if (td == NULL) return NULL; @@ -3663,7 +3721,8 @@ td->ct_size = -1; td->ct_flags = CT_VOID | CT_IS_OPAQUE; td->ct_name_position = strlen("void"); - return (PyObject *)td; + unique_key[0] = "void"; + return get_unique_type(td, unique_key, 1); } static PyObject *b_new_void_type(PyObject *self, PyObject *args) @@ -3680,7 +3739,7 @@ td->ct_size = -1; td->ct_length = -1; - td->ct_flags = flag | CT_IS_OPAQUE | CT_USES_LOCAL; + td->ct_flags = flag | CT_IS_OPAQUE; td->ct_extra = NULL; memcpy(td->ct_name, name, namelen + 1); td->ct_name_position = namelen; @@ -4406,8 +4465,6 @@ int ellipsis) { CTypeDescrObject *fct; - Py_ssize_t i, nargs; - int all_flags; fb->nb_bytes = 0; fb->bufferp = NULL; @@ -4429,16 +4486,9 @@ goto error; assert(fb->bufferp == fct->ct_name + fb->nb_bytes); - all_flags = fresult->ct_flags; - nargs = PyTuple_GET_SIZE(fargs); - for (i = 0; i < nargs; i++) { - CTypeDescrObject *farg = (CTypeDescrObject *)PyTuple_GET_ITEM(fargs, i); - all_flags |= farg->ct_flags; - } - fct->ct_extra = NULL; fct->ct_size = sizeof(void(*)(void)); - fct->ct_flags = CT_FUNCTIONPTR | (all_flags & CT_USES_LOCAL); + fct->ct_flags = CT_FUNCTIONPTR; return fct; error: @@ -4496,6 +4546,7 @@ CTypeDescrObject *fct; struct funcbuilder_s funcbuilder; Py_ssize_t i; + const void **unique_key; if ((fresult->ct_size < 0 && !(fresult->ct_flags & CT_VOID)) || (fresult->ct_flags & CT_ARRAY)) { @@ -4551,7 +4602,15 @@ Py_INCREF(o); PyTuple_SET_ITEM(fct->ct_stuff, 2 + i, o); } - return (PyObject *)fct; + + /* [ctresult, ellipsis+abi, num_args, ctargs...] */ + unique_key = alloca((3 + funcbuilder.nargs) * sizeof(void *)); + unique_key[0] = fresult; + unique_key[1] = (const void *)(Py_ssize_t)((fabi << 1) | !!ellipsis); + unique_key[2] = (const void *)(Py_ssize_t)(funcbuilder.nargs); + for (i=0; ict_stuff, 2 + i); + return get_unique_type(fct, unique_key, 3 + funcbuilder.nargs); error: Py_DECREF(fct); @@ -4890,7 +4949,7 @@ td->ct_size = basetd->ct_size; td->ct_length = basetd->ct_length; /* alignment */ td->ct_extra = basetd->ct_extra; /* ffi type */ - td->ct_flags = basetd->ct_flags | CT_IS_ENUM | CT_USES_LOCAL; + td->ct_flags = basetd->ct_flags | CT_IS_ENUM; td->ct_name_position = name_size - 1; return (PyObject *)td; @@ -5914,6 +5973,11 @@ if (m == NULL) INITERROR; + + unique_cache = PyDict_New(); + if (unique_cache == NULL) + INITERROR; + if (PyType_Ready(&dl_type) < 0) INITERROR; if (PyType_Ready(&CTypeDescr_Type) < 0) From noreply at buildbot.pypy.org Mon Apr 27 18:27:40 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 27 Apr 2015 18:27:40 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: Check sizeof(global variables) when possible Message-ID: <20150427162740.29AEE1C0EF2@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1856:7d84833187ae Date: 2015-04-27 17:26 +0200 http://bitbucket.org/cffi/cffi/changeset/7d84833187ae/ Log: Check sizeof(global variables) when possible diff --git a/_cffi1/lib_obj.c b/_cffi1/lib_obj.c --- a/_cffi1/lib_obj.c +++ b/_cffi1/lib_obj.c @@ -141,8 +141,7 @@ PyErr_Format(PyExc_AttributeError, "lib '%.200s' has no function," " global variable or constant named '%.200s'", - PyText_AS_UTF8(lib->l_libname), - PyText_Check(name) ? PyText_AS_UTF8(name) : "?"); + PyText_AS_UTF8(lib->l_libname), s); return NULL; } @@ -198,12 +197,23 @@ _CFFI_GETARG(g->type_op)); if (ct == NULL) return NULL; - x = make_global_var(ct, g->address); + if (g->size != ct->ct_size && + g->size != (size_t)-1 && ct->ct_size != -1) { + PyErr_Format(FFIError, + "global variable '%.200s' should be %zd bytes " + "according to the cdef, but is actually %zd", + s, ct->ct_size, g->size); + x = NULL; + } + else { + x = make_global_var(ct, g->address); + } Py_DECREF(ct); break; default: - PyErr_SetString(PyExc_NotImplementedError, "in lib_build_attr"); + PyErr_Format(PyExc_NotImplementedError, "in lib_build_attr: op=%d", + (int)_CFFI_GETOP(g->type_op)); return NULL; } diff --git a/_cffi1/parse_c_type.h b/_cffi1/parse_c_type.h --- a/_cffi1/parse_c_type.h +++ b/_cffi1/parse_c_type.h @@ -63,6 +63,7 @@ struct _cffi_global_s { const char *name; void *address; + size_t size; // -1 if unknown _cffi_opcode_t type_op; }; diff --git a/_cffi1/recompiler.py b/_cffi1/recompiler.py --- a/_cffi1/recompiler.py +++ b/_cffi1/recompiler.py @@ -418,7 +418,8 @@ else: meth_kind = 'V' # 'METH_VARARGS' self._lsts["global"].append( - ' { "%s", _cffi_f_%s, _CFFI_OP(_CFFI_OP_CPYTHON_BLTN_%s, %d) },' + ' { "%s", _cffi_f_%s, (size_t)-1, ' + '_CFFI_OP(_CFFI_OP_CPYTHON_BLTN_%s, %d) },' % (name, name, meth_kind, type_index)) # ---------- @@ -631,7 +632,8 @@ type_index = self._typesdict[tp] type_op = '_CFFI_OP(_CFFI_OP_CONSTANT, %d)' % type_index self._lsts["global"].append( - ' { "%s", _cffi_const_%s, %s },' % (name, name, type_op)) + ' { "%s", _cffi_const_%s, (size_t)-1, %s },' % + (name, name, type_op)) # ---------- # enums @@ -648,8 +650,8 @@ type_op = '_CFFI_OP(_CFFI_OP_ENUM, -1)' for enumerator in tp.enumerators: self._lsts["global"].append( - ' { "%s", _cffi_const_%s, %s },' % (enumerator, enumerator, - type_op)) + ' { "%s", _cffi_const_%s, (size_t)-1, %s },' % + (enumerator, enumerator, type_op)) # if cname is not None and '$' not in cname: size = "sizeof(%s)" % cname @@ -679,7 +681,8 @@ def _generate_cpy_macro_ctx(self, tp, name): self._lsts["global"].append( - ' { "%s", _cffi_const_%s, _CFFI_OP(_CFFI_OP_CONSTANT_INT, 0) },' % + ' { "%s", _cffi_const_%s, (size_t)-1,' + ' _CFFI_OP(_CFFI_OP_CONSTANT_INT, 0) },' % (name, name)) # ---------- @@ -700,9 +703,13 @@ def _generate_cpy_variable_ctx(self, tp, name): tp = self._global_type(tp, name) type_index = self._typesdict[tp] + if tp.sizeof_enabled(): + size = "sizeof(%s)" % (name,) + else: + size = "(size_t)-1" self._lsts["global"].append( - ' { "%s", &%s, _CFFI_OP(_CFFI_OP_GLOBAL_VAR, %d)},' - % (name, name, type_index)) + ' { "%s", &%s, %s, _CFFI_OP(_CFFI_OP_GLOBAL_VAR, %d)},' + % (name, name, size, type_index)) # ---------- # emitting the opcodes for individual types diff --git a/_cffi1/test_recompiler.py b/_cffi1/test_recompiler.py --- a/_cffi1/test_recompiler.py +++ b/_cffi1/test_recompiler.py @@ -420,3 +420,23 @@ assert test_module_name_in_package.mymod.lib.foo(10) == 42 finally: sys.path[:] = old_sys_path + +def test_bad_size_of_global_1(): + ffi = FFI() + ffi.cdef("short glob;") + lib = verify(ffi, "test_bad_size_of_global_1", "long glob;") + py.test.raises(ffi.error, "lib.glob") + +def test_bad_size_of_global_2(): + ffi = FFI() + ffi.cdef("int glob[10];") + lib = verify(ffi, "test_bad_size_of_global_2", "int glob[9];") + e = py.test.raises(ffi.error, "lib.glob") + assert str(e.value) == ("global variable 'glob' should be 40 bytes " + "according to the cdef, but is actually 36") + +def test_unspecified_size_of_global(): + ffi = FFI() + ffi.cdef("int glob[];") + lib = verify(ffi, "test_unspecified_size_of_global", "int glob[10];") + lib.glob # does not crash diff --git a/cffi/model.py b/cffi/model.py --- a/cffi/model.py +++ b/cffi/model.py @@ -31,6 +31,9 @@ def has_c_name(self): return '$' not in self._get_c_name() + + def sizeof_enabled(self): + return False def get_cached_btype(self, ffi, finishlist, can_delay=False): try: @@ -121,6 +124,9 @@ def is_float_type(self): return self.ALL_PRIMITIVE_TYPES[self.name] == 'f' + def sizeof_enabled(self): + return True + def build_backend_type(self, ffi, finishlist): return global_cache(self, ffi, 'new_primitive_type', self.name) @@ -161,6 +167,9 @@ class FunctionPtrType(BaseFunctionType): _base_pattern = '(*&)(%s)' + def sizeof_enabled(self): + return True + def build_backend_type(self, ffi, finishlist): result = self.result.get_cached_btype(ffi, finishlist) args = [] @@ -186,6 +195,9 @@ extra = self._base_pattern self.c_name_with_marker = totype.c_name_with_marker.replace('&', extra) + def sizeof_enabled(self): + return True + def build_backend_type(self, ffi, finishlist): BItem = self.totype.get_cached_btype(ffi, finishlist, can_delay=True) return global_cache(self, ffi, 'new_pointer_type', BItem) @@ -226,6 +238,9 @@ self.c_name_with_marker = ( self.item.c_name_with_marker.replace('&', brackets)) + def sizeof_enabled(self): + return self.item.sizeof_enabled() and self.length is not None + def resolve_length(self, newlength): return ArrayType(self.item, newlength) @@ -379,6 +394,9 @@ from . import ffiplatform raise ffiplatform.VerificationMissing(self._get_c_name()) + def sizeof_enabled(self): + return self.fldtypes is not None + def build_backend_type(self, ffi, finishlist): self.check_not_partial() finishlist.append(self) @@ -407,6 +425,9 @@ self.baseinttype = baseinttype self.build_c_name_with_marker() + def sizeof_enabled(self): + return True # not strictly true, but external enums are obscure + def force_the_name(self, forcename): StructOrUnionOrEnum.force_the_name(self, forcename) if self.forcename is None: From noreply at buildbot.pypy.org Mon Apr 27 18:34:05 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 27 Apr 2015 18:34:05 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: Move the "size" field to the end of the structure and use 0 to mean "unknown" Message-ID: <20150427163405.99B951C116B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1857:107bf9987254 Date: 2015-04-27 18:34 +0200 http://bitbucket.org/cffi/cffi/changeset/107bf9987254/ Log: Move the "size" field to the end of the structure and use 0 to mean "unknown" diff --git a/_cffi1/lib_obj.c b/_cffi1/lib_obj.c --- a/_cffi1/lib_obj.c +++ b/_cffi1/lib_obj.c @@ -197,8 +197,7 @@ _CFFI_GETARG(g->type_op)); if (ct == NULL) return NULL; - if (g->size != ct->ct_size && - g->size != (size_t)-1 && ct->ct_size != -1) { + if (g->size != ct->ct_size && g->size != 0 && ct->ct_size > 0) { PyErr_Format(FFIError, "global variable '%.200s' should be %zd bytes " "according to the cdef, but is actually %zd", diff --git a/_cffi1/parse_c_type.h b/_cffi1/parse_c_type.h --- a/_cffi1/parse_c_type.h +++ b/_cffi1/parse_c_type.h @@ -63,8 +63,8 @@ struct _cffi_global_s { const char *name; void *address; - size_t size; // -1 if unknown _cffi_opcode_t type_op; + size_t size; // 0 if unknown }; struct _cffi_struct_union_s { diff --git a/_cffi1/recompiler.py b/_cffi1/recompiler.py --- a/_cffi1/recompiler.py +++ b/_cffi1/recompiler.py @@ -418,8 +418,7 @@ else: meth_kind = 'V' # 'METH_VARARGS' self._lsts["global"].append( - ' { "%s", _cffi_f_%s, (size_t)-1, ' - '_CFFI_OP(_CFFI_OP_CPYTHON_BLTN_%s, %d) },' + ' { "%s", _cffi_f_%s, _CFFI_OP(_CFFI_OP_CPYTHON_BLTN_%s, %d), 0 },' % (name, name, meth_kind, type_index)) # ---------- @@ -632,8 +631,7 @@ type_index = self._typesdict[tp] type_op = '_CFFI_OP(_CFFI_OP_CONSTANT, %d)' % type_index self._lsts["global"].append( - ' { "%s", _cffi_const_%s, (size_t)-1, %s },' % - (name, name, type_op)) + ' { "%s", _cffi_const_%s, %s, 0 },' % (name, name, type_op)) # ---------- # enums @@ -650,7 +648,7 @@ type_op = '_CFFI_OP(_CFFI_OP_ENUM, -1)' for enumerator in tp.enumerators: self._lsts["global"].append( - ' { "%s", _cffi_const_%s, (size_t)-1, %s },' % + ' { "%s", _cffi_const_%s, %s, 0 },' % (enumerator, enumerator, type_op)) # if cname is not None and '$' not in cname: @@ -681,9 +679,8 @@ def _generate_cpy_macro_ctx(self, tp, name): self._lsts["global"].append( - ' { "%s", _cffi_const_%s, (size_t)-1,' - ' _CFFI_OP(_CFFI_OP_CONSTANT_INT, 0) },' % - (name, name)) + ' { "%s", _cffi_const_%s,' + ' _CFFI_OP(_CFFI_OP_CONSTANT_INT, 0), 0 },' % (name, name)) # ---------- # global variables @@ -706,10 +703,10 @@ if tp.sizeof_enabled(): size = "sizeof(%s)" % (name,) else: - size = "(size_t)-1" + size = "0" self._lsts["global"].append( - ' { "%s", &%s, %s, _CFFI_OP(_CFFI_OP_GLOBAL_VAR, %d)},' - % (name, name, size, type_index)) + ' { "%s", &%s, _CFFI_OP(_CFFI_OP_GLOBAL_VAR, %d), %s },' + % (name, name, type_index, size)) # ---------- # emitting the opcodes for individual types From noreply at buildbot.pypy.org Mon Apr 27 19:04:40 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 27 Apr 2015 19:04:40 +0200 (CEST) Subject: [pypy-commit] cffi default: Test and fix: the new types like "int_least8_t" were actually not Message-ID: <20150427170440.5F0881C115A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1858:a7b42ee0db8e Date: 2015-04-27 19:04 +0200 http://bitbucket.org/cffi/cffi/changeset/a7b42ee0db8e/ Log: Test and fix: the new types like "int_least8_t" were actually not available because model.py didn't list them diff --git a/cffi/model.py b/cffi/model.py --- a/cffi/model.py +++ b/cffi/model.py @@ -102,8 +102,26 @@ 'uint32_t': 'i', 'int64_t': 'i', 'uint64_t': 'i', + 'int_least8_t': 'i', + 'uint_least8_t': 'i', + 'int_least16_t': 'i', + 'uint_least16_t': 'i', + 'int_least32_t': 'i', + 'uint_least32_t': 'i', + 'int_least64_t': 'i', + 'uint_least64_t': 'i', + 'int_fast8_t': 'i', + 'uint_fast8_t': 'i', + 'int_fast16_t': 'i', + 'uint_fast16_t': 'i', + 'int_fast32_t': 'i', + 'uint_fast32_t': 'i', + 'int_fast64_t': 'i', + 'uint_fast64_t': 'i', 'intptr_t': 'i', 'uintptr_t': 'i', + 'intmax_t': 'i', + 'uintmax_t': 'i', 'ptrdiff_t': 'i', 'size_t': 'i', 'ssize_t': 'i', diff --git a/testing/backend_tests.py b/testing/backend_tests.py --- a/testing/backend_tests.py +++ b/testing/backend_tests.py @@ -1703,5 +1703,3 @@ assert lib.DOT_HEX == 0x100 assert lib.DOT_HEX2 == 0x10 assert lib.DOT_UL == 1000 - - diff --git a/testing/test_ffi_backend.py b/testing/test_ffi_backend.py --- a/testing/test_ffi_backend.py +++ b/testing/test_ffi_backend.py @@ -222,3 +222,57 @@ assert ffi.typeof(c) is ffi.typeof("char[]") ffi.cast("unsigned short *", c)[1] += 500 assert list(a) == [10000, 20500, 30000] + + def test_all_primitives(self): + ffi = FFI() + for name in [ + "char", + "short", + "int", + "long", + "long long", + "signed char", + "unsigned char", + "unsigned short", + "unsigned int", + "unsigned long", + "unsigned long long", + "float", + "double", + "long double", + "wchar_t", + "_Bool", + "int8_t", + "uint8_t", + "int16_t", + "uint16_t", + "int32_t", + "uint32_t", + "int64_t", + "uint64_t", + "int_least8_t", + "uint_least8_t", + "int_least16_t", + "uint_least16_t", + "int_least32_t", + "uint_least32_t", + "int_least64_t", + "uint_least64_t", + "int_fast8_t", + "uint_fast8_t", + "int_fast16_t", + "uint_fast16_t", + "int_fast32_t", + "uint_fast32_t", + "int_fast64_t", + "uint_fast64_t", + "intptr_t", + "uintptr_t", + "intmax_t", + "uintmax_t", + "ptrdiff_t", + "size_t", + "ssize_t", + ]: + x = ffi.sizeof(name) + assert 1 <= x <= 16 From noreply at buildbot.pypy.org Mon Apr 27 19:09:47 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 27 Apr 2015 19:09:47 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: hg merge default Message-ID: <20150427170947.E4A541C04A4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1859:283af058e34b Date: 2015-04-27 19:10 +0200 http://bitbucket.org/cffi/cffi/changeset/283af058e34b/ Log: hg merge default diff --git a/cffi/model.py b/cffi/model.py --- a/cffi/model.py +++ b/cffi/model.py @@ -105,8 +105,26 @@ 'uint32_t': 'i', 'int64_t': 'i', 'uint64_t': 'i', + 'int_least8_t': 'i', + 'uint_least8_t': 'i', + 'int_least16_t': 'i', + 'uint_least16_t': 'i', + 'int_least32_t': 'i', + 'uint_least32_t': 'i', + 'int_least64_t': 'i', + 'uint_least64_t': 'i', + 'int_fast8_t': 'i', + 'uint_fast8_t': 'i', + 'int_fast16_t': 'i', + 'uint_fast16_t': 'i', + 'int_fast32_t': 'i', + 'uint_fast32_t': 'i', + 'int_fast64_t': 'i', + 'uint_fast64_t': 'i', 'intptr_t': 'i', 'uintptr_t': 'i', + 'intmax_t': 'i', + 'uintmax_t': 'i', 'ptrdiff_t': 'i', 'size_t': 'i', 'ssize_t': 'i', diff --git a/testing/backend_tests.py b/testing/backend_tests.py --- a/testing/backend_tests.py +++ b/testing/backend_tests.py @@ -1703,5 +1703,3 @@ assert lib.DOT_HEX == 0x100 assert lib.DOT_HEX2 == 0x10 assert lib.DOT_UL == 1000 - - diff --git a/testing/test_ffi_backend.py b/testing/test_ffi_backend.py --- a/testing/test_ffi_backend.py +++ b/testing/test_ffi_backend.py @@ -222,3 +222,57 @@ assert ffi.typeof(c) is ffi.typeof("char[]") ffi.cast("unsigned short *", c)[1] += 500 assert list(a) == [10000, 20500, 30000] + + def test_all_primitives(self): + ffi = FFI() + for name in [ + "char", + "short", + "int", + "long", + "long long", + "signed char", + "unsigned char", + "unsigned short", + "unsigned int", + "unsigned long", + "unsigned long long", + "float", + "double", + "long double", + "wchar_t", + "_Bool", + "int8_t", + "uint8_t", + "int16_t", + "uint16_t", + "int32_t", + "uint32_t", + "int64_t", + "uint64_t", + "int_least8_t", + "uint_least8_t", + "int_least16_t", + "uint_least16_t", + "int_least32_t", + "uint_least32_t", + "int_least64_t", + "uint_least64_t", + "int_fast8_t", + "uint_fast8_t", + "int_fast16_t", + "uint_fast16_t", + "int_fast32_t", + "uint_fast32_t", + "int_fast64_t", + "uint_fast64_t", + "intptr_t", + "uintptr_t", + "intmax_t", + "uintmax_t", + "ptrdiff_t", + "size_t", + "ssize_t", + ]: + x = ffi.sizeof(name) + assert 1 <= x <= 16 From noreply at buildbot.pypy.org Mon Apr 27 19:13:07 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 27 Apr 2015 19:13:07 +0200 (CEST) Subject: [pypy-commit] cffi default: Mention the bug Message-ID: <20150427171307.1706E1C04A4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1860:ccbd4be0624d Date: 2015-04-27 19:13 +0200 http://bitbucket.org/cffi/cffi/changeset/ccbd4be0624d/ Log: Mention the bug diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -437,9 +437,9 @@ types ``TBYTE TCHAR LPCTSTR PCTSTR LPTSTR PTSTR PTBYTE PTCHAR`` are no longer automatically defined; see ``ffi.set_unicode()`` below. -* *New in version 0.9:* the other standard integer types from stdint.h, +* *New in version 0.9.3:* the other standard integer types from stdint.h, as long as they map to integers of 1, 2, 4 or 8 bytes. Larger integers - are not supported. + are not supported. (Actually added in version 0.9 but this was buggy.) .. _`common Windows types`: http://msdn.microsoft.com/en-us/library/windows/desktop/aa383751%28v=vs.85%29.aspx From noreply at buildbot.pypy.org Mon Apr 27 21:31:36 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 27 Apr 2015 21:31:36 +0200 (CEST) Subject: [pypy-commit] pypy default: Add a @signature here (seen by translating Message-ID: <20150427193136.A0E821C114E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r76946:76b0dfb05efb Date: 2015-04-27 21:31 +0200 http://bitbucket.org/pypy/pypy/changeset/76b0dfb05efb/ Log: Add a @signature here (seen by translating https://github.com/cgswords/laurens/blob/master/laurens/stg.py at baa4326) diff --git a/rpython/rtyper/lltypesystem/rordereddict.py b/rpython/rtyper/lltypesystem/rordereddict.py --- a/rpython/rtyper/lltypesystem/rordereddict.py +++ b/rpython/rtyper/lltypesystem/rordereddict.py @@ -3,7 +3,8 @@ from rpython.flowspace.model import Constant from rpython.rtyper.rdict import AbstractDictRepr, AbstractDictIteratorRepr from rpython.rtyper.lltypesystem import lltype, llmemory, rffi -from rpython.rlib import objectmodel, jit, rgc +from rpython.rlib import objectmodel, jit, rgc, types +from rpython.rlib.signature import signature from rpython.rlib.objectmodel import specialize, likely from rpython.rlib.debug import ll_assert from rpython.rlib.rarithmetic import r_uint, intmask @@ -515,9 +516,11 @@ dummy = ENTRIES.dummy_obj.ll_dummy_value entries[i].value = dummy + at signature(types.any(), types.int(), returns=types.any()) def ll_hash_from_cache(entries, i): return entries[i].f_hash + at signature(types.any(), types.int(), returns=types.any()) def ll_hash_recomputed(entries, i): ENTRIES = lltype.typeOf(entries).TO return ENTRIES.fasthashfn(entries[i].key) From noreply at buildbot.pypy.org Mon Apr 27 23:48:54 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 27 Apr 2015 23:48:54 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: Move this as a static global Message-ID: <20150427214854.50B7E1C0EC0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1863:2f227a0cb551 Date: 2015-04-27 21:56 +0200 http://bitbucket.org/cffi/cffi/changeset/2f227a0cb551/ Log: Move this as a static global diff --git a/_cffi1/ffi_obj.c b/_cffi1/ffi_obj.c --- a/_cffi1/ffi_obj.c +++ b/_cffi1/ffi_obj.c @@ -24,12 +24,13 @@ struct _cffi_parse_info_s info; int ctx_is_static; builder_c_t *types_builder; - _cffi_opcode_t internal_output[FFI_COMPLEXITY_OUTPUT]; }; static FFIObject *ffi_internal_new(PyTypeObject *ffitype, const struct _cffi_type_context_s *static_ctx) { + static _cffi_opcode_t internal_output[FFI_COMPLEXITY_OUTPUT]; + FFIObject *ffi; if (static_ctx != NULL) { ffi = (FFIObject *)PyObject_GC_New(FFIObject, ffitype); @@ -49,7 +50,7 @@ } ffi->gc_wrefs = NULL; ffi->info.ctx = &ffi->types_builder->ctx; - ffi->info.output = ffi->internal_output; + ffi->info.output = internal_output; ffi->info.output_size = FFI_COMPLEXITY_OUTPUT; ffi->ctx_is_static = (static_ctx != NULL); #if 0 From noreply at buildbot.pypy.org Mon Apr 27 23:48:55 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 27 Apr 2015 23:48:55 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: Basic idea: we ffi.include(baseffi), and baseffi must also be turned into Message-ID: <20150427214855.518251C0EC0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1864:bebb7929ea1f Date: 2015-04-27 21:58 +0200 http://bitbucket.org/cffi/cffi/changeset/bebb7929ea1f/ Log: Basic idea: we ffi.include(baseffi), and baseffi must also be turned into C code when ffi is. Then the name of the C module from baseffi ends up written in the global data for ffi. diff --git a/_cffi1/parse_c_type.h b/_cffi1/parse_c_type.h --- a/_cffi1/parse_c_type.h +++ b/_cffi1/parse_c_type.h @@ -113,6 +113,7 @@ int num_struct_unions; int num_enums; int num_typenames; + const char * const *includes; }; struct _cffi_parse_info_s { diff --git a/_cffi1/recompiler.py b/_cffi1/recompiler.py --- a/_cffi1/recompiler.py +++ b/_cffi1/recompiler.py @@ -192,6 +192,19 @@ assert lst[i].startswith(' { "%s"' % tp.name) assert len(lst) == len(self._enums) # + # the declaration of '_cffi_includes' + if self.ffi._included_ffis: + prnt('static const char * const _cffi_includes[] = {') + for ffi_to_include in self.ffi._included_ffis: + if not hasattr(ffi_to_include, '_recompiler_module_name'): + raise ffiplatform.VerificationError( + "this ffi includes %r, but the latter has not been " + "turned into a C module" % (ffi_to_include,)) + prnt(' "%s",' % (ffi_to_include._recompiler_module_name,)) + prnt(' NULL') + prnt('};') + prnt() + # # the declaration of '_cffi_type_context' prnt('static const struct _cffi_type_context_s _cffi_type_context = {') prnt(' _cffi_types,') @@ -203,6 +216,10 @@ for step_name in ALL_STEPS: if step_name != "field": prnt(' %d, /* num_%ss */' % (nums[step_name], step_name)) + if self.ffi._included_ffis: + prnt(' _cffi_includes,') + else: + prnt(' NULL, /* no includes */') prnt('};') prnt() # @@ -216,6 +233,7 @@ prnt(' _cffi_init_module("%s", &_cffi_type_context);' % ( self.module_name,)) prnt('}') + self.ffi._recompiler_module_name = self.module_name # ---------- diff --git a/_cffi1/test_recompiler.py b/_cffi1/test_recompiler.py --- a/_cffi1/test_recompiler.py +++ b/_cffi1/test_recompiler.py @@ -456,7 +456,9 @@ lib.glob # does not crash def test_include_1(): - ffi1 = FFI(); ffi1.cdef("typedef double foo_t;") + ffi1 = FFI() + ffi1.cdef("typedef double foo_t;") + verify(ffi1, "test_include_1_parent", "typedef double foo_t;") ffi = FFI() ffi.include(ffi1) ffi.cdef("foo_t ff1(foo_t);") @@ -464,7 +466,9 @@ assert lib.ff1(0) == 42.5 def test_include_2(): - ffi1 = FFI(); ffi1.cdef("struct foo_s { int x, y; };") + ffi1 = FFI() + ffi1.cdef("struct foo_s { int x, y; };") + verify(ffi1, "test_include_2_parent", "struct foo_s { int x, y; };") ffi = FFI() ffi.include(ffi1) ffi.cdef("struct foo_s *ff2(struct foo_s *);") diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -70,6 +70,7 @@ self._function_caches = [] self._libraries = [] self._cdefsources = [] + self._included_ffis = [] self._windows_unicode = None if hasattr(backend, 'set_ffi'): backend.set_ffi(self) @@ -424,6 +425,7 @@ self._cdefsources.append('[') self._cdefsources.extend(ffi_to_include._cdefsources) self._cdefsources.append(']') + self._included_ffis.append(ffi_to_include) def new_handle(self, x): return self._backend.newp_handle(self.BVoidP, x) From noreply at buildbot.pypy.org Mon Apr 27 23:48:53 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 27 Apr 2015 23:48:53 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: Comment out ffi.__set_types(), never used for now Message-ID: <20150427214853.579EA1C0EC0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1862:987e9d3d69c4 Date: 2015-04-27 18:53 +0200 http://bitbucket.org/cffi/cffi/changeset/987e9d3d69c4/ Log: Comment out ffi.__set_types(), never used for now diff --git a/_cffi1/ffi_obj.c b/_cffi1/ffi_obj.c --- a/_cffi1/ffi_obj.c +++ b/_cffi1/ffi_obj.c @@ -24,7 +24,6 @@ struct _cffi_parse_info_s info; int ctx_is_static; builder_c_t *types_builder; - PyObject *dynamic_types; _cffi_opcode_t internal_output[FFI_COMPLEXITY_OUTPUT]; }; @@ -53,7 +52,9 @@ ffi->info.output = ffi->internal_output; ffi->info.output_size = FFI_COMPLEXITY_OUTPUT; ffi->ctx_is_static = (static_ctx != NULL); +#if 0 ffi->dynamic_types = NULL; +#endif return ffi; } @@ -61,7 +62,9 @@ { PyObject_GC_UnTrack(ffi); Py_XDECREF(ffi->gc_wrefs); +#if 0 Py_XDECREF(ffi->dynamic_types); +#endif if (!ffi->ctx_is_static) free_builder_c(ffi->types_builder); @@ -640,6 +643,7 @@ return 0; } +#if 0 static PyObject *ffi__set_types(FFIObject *self, PyObject *args) { PyObject *lst1, *lst2; @@ -726,9 +730,9 @@ Py_INCREF(Py_None); return Py_None; } +#endif static PyMethodDef ffi_methods[] = { - {"__set_types",(PyCFunction)ffi__set_types, METH_VARARGS}, {"addressof", (PyCFunction)ffi_addressof, METH_VARARGS, ffi_addressof_doc}, {"alignof", (PyCFunction)ffi_alignof, METH_O, ffi_alignof_doc}, {"buffer", (PyCFunction)ffi_buffer, METH_VARARGS, ffi_buffer_doc}, diff --git a/_cffi1/realize_c_type.c b/_cffi1/realize_c_type.c --- a/_cffi1/realize_c_type.c +++ b/_cffi1/realize_c_type.c @@ -2,7 +2,6 @@ typedef struct { struct _cffi_type_context_s ctx; /* inlined substructure */ PyObject *types_dict; - int num_types_imported; } builder_c_t; @@ -56,12 +55,14 @@ static void cleanup_builder_c(builder_c_t *builder) { int i; +#if 0 for (i = builder->num_types_imported; (--i) >= 0; ) { _cffi_opcode_t x = builder->ctx.types[i]; if ((((uintptr_t)x) & 1) == 0) { Py_XDECREF((PyObject *)x); } } +#endif const void *mem[] = {builder->ctx.types, builder->ctx.globals, @@ -100,7 +101,9 @@ memset(&builder->ctx, 0, sizeof(builder->ctx)); builder->types_dict = ldict; +#if 0 builder->num_types_imported = 0; +#endif return builder; } From noreply at buildbot.pypy.org Mon Apr 27 23:48:52 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 27 Apr 2015 23:48:52 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: start on ffi.include Message-ID: <20150427214852.4151B1C0EC0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1861:fa462bbfa883 Date: 2015-04-27 18:27 +0200 http://bitbucket.org/cffi/cffi/changeset/fa462bbfa883/ Log: start on ffi.include diff --git a/_cffi1/parse_c_type.h b/_cffi1/parse_c_type.h --- a/_cffi1/parse_c_type.h +++ b/_cffi1/parse_c_type.h @@ -80,6 +80,7 @@ #define _CFFI_F_CHECK_FIELDS 0x02 // complain if fields are not in the // "standard layout" or if some are missing #define _CFFI_F_PACKED 0x04 // for CHECK_FIELDS, assume a packed struct +#define _CFFI_F_EXTERNAL 0x08 // in some other ffi.include() struct _cffi_field_s { const char *name; diff --git a/_cffi1/recompiler.py b/_cffi1/recompiler.py --- a/_cffi1/recompiler.py +++ b/_cffi1/recompiler.py @@ -8,9 +8,6 @@ def __init__(self, ffi, module_name): self.ffi = ffi self.module_name = module_name - # - if ']' in self.ffi._cdefsources: - raise NotImplementedError("ffi.include()") def collect_type_table(self): self._typesdict = {} @@ -80,7 +77,8 @@ if isinstance(tp, model.FunctionPtrType): self._do_collect_type(tp.as_raw_function()) elif isinstance(tp, model.StructOrUnion): - if tp.fldtypes is not None: + if tp.fldtypes is not None and ( + tp not in self.ffi._parser._included_declarations): for name1, tp1, _ in tp.enumfields(): self._do_collect_type(self._field_type(tp, name1, tp1)) else: @@ -464,19 +462,24 @@ def _struct_ctx(self, tp, cname, approxname): type_index = self._typesdict[tp] + reason_for_not_expanding = None flags = [] if isinstance(tp, model.UnionType): flags.append("_CFFI_F_UNION") - if tp.fldtypes is None: - pass # opaque - elif tp.partial or tp.has_anonymous_struct_fields(): - pass # the field layout is obtained silently from the C compiler + if tp not in self.ffi._parser._included_declarations: + if tp.fldtypes is None: + reason_for_not_expanding = "opaque" + elif tp.partial or tp.has_anonymous_struct_fields(): + pass # field layout obtained silently from the C compiler + else: + flags.append("_CFFI_F_CHECK_FIELDS") + if tp.packed: + flags.append("_CFFI_F_PACKED") else: - flags.append("_CFFI_F_CHECK_FIELDS") - if tp.packed: - flags.append("_CFFI_F_PACKED") + flags.append("_CFFI_F_EXTERNAL") + reason_for_not_expanding = "external" flags = '|'.join(flags) or '0' - if tp.fldtypes is not None: + if reason_for_not_expanding is None: c_field = [approxname] enumfields = list(tp.enumfields()) for fldname, fldtype, fbitsize in enumfields: @@ -515,7 +518,8 @@ ' _cffi_FIELDS_FOR_%s, %d },' % (approxname, len(enumfields),)) else: - size_align = ' (size_t)-1, -1, -1, 0 /* opaque */ },' + size_align = ' (size_t)-1, -1, -1, 0 /* %s */ },' % ( + reason_for_not_expanding,) self._lsts["struct_union"].append( ' { "%s", %d, %s,' % (tp.name, type_index, flags) + size_align) self._seen_struct_unions.add(tp) diff --git a/_cffi1/test_recompiler.py b/_cffi1/test_recompiler.py --- a/_cffi1/test_recompiler.py +++ b/_cffi1/test_recompiler.py @@ -4,8 +4,12 @@ from _cffi1.udir import udir -def check_type_table(input, expected_output): +def check_type_table(input, expected_output, included=None): ffi = FFI() + if included: + ffi1 = FFI() + ffi1.cdef(included) + ffi.include(ffi1) ffi.cdef(input) recomp = recompiler.Recompiler(ffi, 'testmod') recomp.collect_type_table() @@ -92,6 +96,16 @@ check_type_table("enum foo_e { AA, BB, ... };", "(ENUM 0)") +def test_type_table_include_1(): + check_type_table("foo_t sin(foo_t);", + "(FUNCTION 1)(PRIMITIVE 14)(FUNCTION_END 0)", + included="typedef double foo_t;") + +def test_type_table_include_2(): + check_type_table("struct foo_s *sin(struct foo_s *);", + "(FUNCTION 1)(POINTER 3)(FUNCTION_END 0)(STRUCT_UNION 0)", + included="struct foo_s { int x, y; };") + def test_math_sin(): import math @@ -440,3 +454,25 @@ ffi.cdef("int glob[];") lib = verify(ffi, "test_unspecified_size_of_global", "int glob[10];") lib.glob # does not crash + +def test_include_1(): + ffi1 = FFI(); ffi1.cdef("typedef double foo_t;") + ffi = FFI() + ffi.include(ffi1) + ffi.cdef("foo_t ff1(foo_t);") + lib = verify(ffi, "test_include_1", "double ff1(double x) { return 42.5; }") + assert lib.ff1(0) == 42.5 + +def test_include_2(): + ffi1 = FFI(); ffi1.cdef("struct foo_s { int x, y; };") + ffi = FFI() + ffi.include(ffi1) + ffi.cdef("struct foo_s *ff2(struct foo_s *);") + lib = verify(ffi, "test_include_2", + "struct foo_s { int x, y; };\n" + "struct foo_s *ff2(struct foo_s *p) { p->y++; return p; }") + p = ffi.new("struct foo_s *") + p.y = 41 + q = lib.ff2(p) + assert q == p + assert p.y == 42 diff --git a/cffi/cparser.py b/cffi/cparser.py --- a/cffi/cparser.py +++ b/cffi/cparser.py @@ -95,6 +95,7 @@ def __init__(self): self._declarations = {} + self._included_declarations = set() self._anonymous_counter = 0 self._structnode2type = weakref.WeakKeyDictionary() self._override = False @@ -278,7 +279,7 @@ raise api.CDefError("unknown identifier '%s'" % (exprnode.name,)) return self._get_type(exprnode.type) - def _declare(self, name, obj): + def _declare(self, name, obj, included=False): if name in self._declarations: if self._declarations[name] is obj: return @@ -288,6 +289,8 @@ "try cdef(xx, override=True))" % (name,)) assert '__dotdotdot__' not in name.split() self._declarations[name] = obj + if included: + self._included_declarations.add(obj) def _get_type_pointer(self, type, const=False): if isinstance(type, model.RawFunctionType): @@ -601,7 +604,9 @@ def include(self, other): for name, tp in other._declarations.items(): kind = name.split(' ', 1)[0] - if kind in ('typedef', 'struct', 'union', 'enum'): + if kind in ('struct', 'union', 'enum'): + self._declare(name, tp, included=True) + elif kind == 'typedef': self._declare(name, tp) for k, v in other._int_constants.items(): self._add_constants(k, v) From noreply at buildbot.pypy.org Mon Apr 27 23:48:56 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 27 Apr 2015 23:48:56 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: look in ffi.included modules for global functions/variables/consts Message-ID: <20150427214856.4ED291C0EC0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1865:42497db90079 Date: 2015-04-27 22:56 +0200 http://bitbucket.org/cffi/cffi/changeset/42497db90079/ Log: look in ffi.included modules for global functions/variables/consts diff --git a/_cffi1/cffi1_module.c b/_cffi1/cffi1_module.c --- a/_cffi1/cffi1_module.c +++ b/_cffi1/cffi1_module.c @@ -45,6 +45,59 @@ return 0; } +static int make_included_tuples(const char *const *ctx_includes, + PyObject **included_ffis, + PyObject **included_libs) +{ + Py_ssize_t num = 0; + const char *const *p_include; + + if (ctx_includes == NULL) + return 0; + + for (p_include = ctx_includes; *p_include; p_include++) { + num++; + } + *included_ffis = PyTuple_New(num); + *included_libs = PyTuple_New(num); + if (*included_ffis == NULL || *included_libs == NULL) + goto error; + + num = 0; + for (p_include = ctx_includes; *p_include; p_include++) { + PyObject *included_ffi, *included_lib; + PyObject *m = PyImport_ImportModule(*p_include); + if (m == NULL) + goto error; + + included_ffi = PyObject_GetAttrString(m, "ffi"); + PyTuple_SET_ITEM(*included_ffis, num, included_ffi); + + included_lib = (included_ffi == NULL) ? NULL : + PyObject_GetAttrString(m, "lib"); + PyTuple_SET_ITEM(*included_libs, num, included_lib); + + Py_DECREF(m); + if (included_lib == NULL) + goto error; + + if (!FFIObject_Check(included_ffi) || + !LibObject_Check(included_lib)) { + PyErr_Format(PyExc_TypeError, + "expected FFI/Lib objects in %.200s.ffi/lib", + *p_include); + goto error; + } + num++; + } + return 0; + + error: + Py_XDECREF(*included_ffis); *included_ffis = NULL; + Py_XDECREF(*included_libs); *included_libs = NULL; + return -1; +} + static int _cffi_init_module(char *module_name, const struct _cffi_type_context_s *ctx) { @@ -61,5 +114,9 @@ if (lib == NULL || PyModule_AddObject(m, "lib", (PyObject *)lib) < 0) return -1; + if (make_included_tuples(ctx->includes, &ffi->included_ffis, + &lib->l_includes) < 0) + return -1; + return 0; } diff --git a/_cffi1/ffi_obj.c b/_cffi1/ffi_obj.c --- a/_cffi1/ffi_obj.c +++ b/_cffi1/ffi_obj.c @@ -18,12 +18,15 @@ #define FFI_COMPLEXITY_OUTPUT 1200 /* xxx should grow as needed */ +#define FFIObject_Check(op) PyObject_TypeCheck(op, &FFI_Type) + struct FFIObject_s { PyObject_HEAD PyObject *gc_wrefs; struct _cffi_parse_info_s info; int ctx_is_static; builder_c_t *types_builder; + PyObject *included_ffis; }; static FFIObject *ffi_internal_new(PyTypeObject *ffitype, @@ -53,6 +56,7 @@ ffi->info.output = internal_output; ffi->info.output_size = FFI_COMPLEXITY_OUTPUT; ffi->ctx_is_static = (static_ctx != NULL); + ffi->included_ffis = NULL; #if 0 ffi->dynamic_types = NULL; #endif @@ -70,6 +74,7 @@ if (!ffi->ctx_is_static) free_builder_c(ffi->types_builder); + Py_XDECREF(ffi->included_ffis); Py_TYPE(ffi)->tp_free((PyObject *)ffi); } @@ -77,6 +82,7 @@ { Py_VISIT(ffi->types_builder->types_dict); Py_VISIT(ffi->gc_wrefs); + Py_VISIT(ffi->included_ffis); return 0; } diff --git a/_cffi1/lib_obj.c b/_cffi1/lib_obj.c --- a/_cffi1/lib_obj.c +++ b/_cffi1/lib_obj.c @@ -23,6 +23,7 @@ builder_c_t *l_types_builder; /* same as the one on the ffi object */ PyObject *l_dict; /* content, built lazily */ PyObject *l_libname; /* some string that gives the name of the lib */ + PyObject *l_includes; /* tuple of LibObjects included here */ }; #define LibObject_Check(ob) ((Py_TYPE(ob) == &Lib_Type)) @@ -64,6 +65,7 @@ { Py_DECREF(lib->l_dict); Py_DECREF(lib->l_libname); + Py_XDECREF(lib->l_includes); PyObject_Del(lib); } @@ -128,9 +130,11 @@ return NULL; } -static PyObject *lib_build_and_cache_attr(LibObject *lib, PyObject *name) +static PyObject *lib_build_and_cache_attr(LibObject *lib, PyObject *name, + int recursion) { /* does not return a new reference! */ + PyObject *x; char *s = PyText_AsUTF8(name); if (s == NULL) @@ -138,15 +142,45 @@ int index = search_in_globals(&lib->l_types_builder->ctx, s, strlen(s)); if (index < 0) { + + if (lib->l_includes != NULL) { + + if (recursion > 100) { + PyErr_SetString(PyExc_RuntimeError, + "recursion overflow in ffi.include() delegations"); + return NULL; + } + + Py_ssize_t i; + for (i = 0; i < PyTuple_GET_SIZE(lib->l_includes); i++) { + LibObject *lib1; + lib1 = (LibObject *)PyTuple_GET_ITEM(lib->l_includes, i); + x = PyDict_GetItem(lib1->l_dict, name); + if (x != NULL) { + Py_INCREF(x); + goto found; + } + x = lib_build_and_cache_attr(lib1, name, recursion + 1); + if (x != NULL) { + Py_INCREF(x); + goto found; + } + if (PyErr_Occurred()) + return NULL; + } + } + + if (recursion > 0) + return NULL; /* no error set, continue looking elsewhere */ + PyErr_Format(PyExc_AttributeError, - "lib '%.200s' has no function," + "cffi lib '%.200s' has no function," " global variable or constant named '%.200s'", PyText_AS_UTF8(lib->l_libname), s); return NULL; } const struct _cffi_global_s *g = &lib->l_types_builder->ctx.globals[index]; - PyObject *x; CTypeDescrObject *ct; switch (_CFFI_GETOP(g->type_op)) { @@ -217,6 +251,7 @@ return NULL; } + found: if (x != NULL) { int err = PyDict_SetItem(lib->l_dict, name, x); Py_DECREF(x); @@ -230,7 +265,7 @@ { PyObject *x = PyDict_GetItem(lib->l_dict, name); if (x == NULL) { - x = lib_build_and_cache_attr(lib, name); + x = lib_build_and_cache_attr(lib, name, 0); if (x == NULL) return NULL; } @@ -246,7 +281,7 @@ { PyObject *x = PyDict_GetItem(lib->l_dict, name); if (x == NULL) { - x = lib_build_and_cache_attr(lib, name); + x = lib_build_and_cache_attr(lib, name, 0); if (x == NULL) return -1; } @@ -351,5 +386,6 @@ lib->l_types_builder = types_builder; lib->l_dict = dict; lib->l_libname = libname; + lib->l_includes = NULL; return lib; } diff --git a/_cffi1/parse_c_type.h b/_cffi1/parse_c_type.h --- a/_cffi1/parse_c_type.h +++ b/_cffi1/parse_c_type.h @@ -113,7 +113,7 @@ int num_struct_unions; int num_enums; int num_typenames; - const char * const *includes; + const char *const *includes; }; struct _cffi_parse_info_s { diff --git a/_cffi1/test_recompiler.py b/_cffi1/test_recompiler.py --- a/_cffi1/test_recompiler.py +++ b/_cffi1/test_recompiler.py @@ -465,6 +465,17 @@ lib = verify(ffi, "test_include_1", "double ff1(double x) { return 42.5; }") assert lib.ff1(0) == 42.5 +def test_include_1b(): + ffi1 = FFI() + ffi1.cdef("int foo1(int);") + verify(ffi1, "test_include_1b_parent", "int foo1(int x) { return x + 10; }") + ffi = FFI() + ffi.include(ffi1) + ffi.cdef("int foo2(int);") + lib = verify(ffi, "test_include_1b", "int foo2(int x) { return x - 5; }") + assert lib.foo2(42) == 37 + assert lib.foo1(42) == 52 + def test_include_2(): ffi1 = FFI() ffi1.cdef("struct foo_s { int x, y; };") From noreply at buildbot.pypy.org Mon Apr 27 23:48:57 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 27 Apr 2015 23:48:57 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: Resolve the F_EXTERNAL struct names by following the ffi.include chain Message-ID: <20150427214857.50AF81C0EC0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1866:8218d7a07822 Date: 2015-04-27 23:48 +0200 http://bitbucket.org/cffi/cffi/changeset/8218d7a07822/ Log: Resolve the F_EXTERNAL struct names by following the ffi.include chain diff --git a/_cffi1/cffi1_module.c b/_cffi1/cffi1_module.c --- a/_cffi1/cffi1_module.c +++ b/_cffi1/cffi1_module.c @@ -114,7 +114,7 @@ if (lib == NULL || PyModule_AddObject(m, "lib", (PyObject *)lib) < 0) return -1; - if (make_included_tuples(ctx->includes, &ffi->included_ffis, + if (make_included_tuples(ctx->includes, &ffi->types_builder->included_ffis, &lib->l_includes) < 0) return -1; diff --git a/_cffi1/cffi_opcode.py b/_cffi1/cffi_opcode.py --- a/_cffi1/cffi_opcode.py +++ b/_cffi1/cffi_opcode.py @@ -100,6 +100,7 @@ F_UNION = 0x01 F_CHECK_FIELDS = 0x02 F_PACKED = 0x04 +F_EXTERNAL = 0x08 CLASS_NAME = {} for _name, _value in globals().items(): diff --git a/_cffi1/ffi_obj.c b/_cffi1/ffi_obj.c --- a/_cffi1/ffi_obj.c +++ b/_cffi1/ffi_obj.c @@ -26,7 +26,6 @@ struct _cffi_parse_info_s info; int ctx_is_static; builder_c_t *types_builder; - PyObject *included_ffis; }; static FFIObject *ffi_internal_new(PyTypeObject *ffitype, @@ -56,7 +55,6 @@ ffi->info.output = internal_output; ffi->info.output_size = FFI_COMPLEXITY_OUTPUT; ffi->ctx_is_static = (static_ctx != NULL); - ffi->included_ffis = NULL; #if 0 ffi->dynamic_types = NULL; #endif @@ -74,15 +72,14 @@ if (!ffi->ctx_is_static) free_builder_c(ffi->types_builder); - Py_XDECREF(ffi->included_ffis); Py_TYPE(ffi)->tp_free((PyObject *)ffi); } static int ffi_traverse(FFIObject *ffi, visitproc visit, void *arg) { Py_VISIT(ffi->types_builder->types_dict); + Py_VISIT(ffi->types_builder->included_ffis); Py_VISIT(ffi->gc_wrefs); - Py_VISIT(ffi->included_ffis); return 0; } @@ -809,3 +806,44 @@ ffiobj_new, /* tp_new */ PyObject_GC_Del, /* tp_free */ }; + + +static PyObject * +_fetch_external_struct_or_union(const struct _cffi_struct_union_s *s, + PyObject *included_ffis, int recursion) +{ + if (included_ffis == NULL) + return NULL; + + if (recursion > 100) { + PyErr_SetString(PyExc_RuntimeError, + "recursion overflow in ffi.include() delegations"); + return NULL; + } + + Py_ssize_t i; + for (i = 0; i < PyTuple_GET_SIZE(included_ffis); i++) { + FFIObject *ffi1; + const struct _cffi_struct_union_s *s1; + int sindex; + PyObject *x; + + ffi1 = (FFIObject *)PyTuple_GET_ITEM(included_ffis, i); + sindex = search_in_struct_unions(&ffi1->types_builder->ctx, s->name, + strlen(s->name)); + if (sindex < 0) /* not found at all */ + continue; + s1 = &ffi1->types_builder->ctx.struct_unions[sindex]; + if ((s1->flags & (_CFFI_F_EXTERNAL | _CFFI_F_UNION)) + == (s->flags & _CFFI_F_UNION)) { + /* s1 is not external, and the same kind (struct or union) as s */ + return _realize_c_struct_or_union(ffi1->types_builder, sindex); + } + /* not found, look more recursively */ + x = _fetch_external_struct_or_union( + s, ffi1->types_builder->included_ffis, recursion + 1); + if (x != NULL || PyErr_Occurred()) + return x; /* either found, or got an error */ + } + return NULL; /* not found at all, leave without an error */ +} diff --git a/_cffi1/realize_c_type.c b/_cffi1/realize_c_type.c --- a/_cffi1/realize_c_type.c +++ b/_cffi1/realize_c_type.c @@ -2,6 +2,7 @@ typedef struct { struct _cffi_type_context_s ctx; /* inlined substructure */ PyObject *types_dict; + PyObject *included_ffis; } builder_c_t; @@ -74,6 +75,9 @@ if (mem[i] != NULL) PyMem_Free((void *)mem[i]); } + + Py_XDECREF(builder->included_ffis); + builder->included_ffis = NULL; } static void free_builder_c(builder_c_t *builder) @@ -101,6 +105,7 @@ memset(&builder->ctx, 0, sizeof(builder->ctx)); builder->types_dict = ldict; + builder->included_ffis = NULL; #if 0 builder->num_types_imported = 0; #endif @@ -268,6 +273,79 @@ } } +static PyObject * /* forward */ +_fetch_external_struct_or_union(const struct _cffi_struct_union_s *s, + PyObject *included_ffis, int recursion); + +static PyObject * +_realize_c_struct_or_union(builder_c_t *builder, int sindex) +{ + PyObject *x; + _cffi_opcode_t op2; + const struct _cffi_struct_union_s *s; + + s = &builder->ctx.struct_unions[sindex]; + op2 = builder->ctx.types[s->type_index]; + if ((((uintptr_t)op2) & 1) == 0) { + x = (PyObject *)op2; /* found already in the "primary" slot */ + Py_INCREF(x); + } + else { + CTypeDescrObject *ct = NULL; + + if (!(s->flags & _CFFI_F_EXTERNAL)) { + int flags = (s->flags & _CFFI_F_UNION) ? CT_UNION : CT_STRUCT; + char *name = alloca(8 + strlen(s->name)); + _realize_name(name, + (s->flags & _CFFI_F_UNION) ? "union " : "struct ", + s->name); + if (strcmp(name, "struct _IO_FILE") == 0) + flags |= CT_IS_FILE; + + x = new_struct_or_union_type(name, flags); + if (x == NULL) + return NULL; + + if (s->first_field_index >= 0) { + ct = (CTypeDescrObject *)x; + ct->ct_size = (Py_ssize_t)s->size; + ct->ct_length = s->alignment; + ct->ct_flags &= ~CT_IS_OPAQUE; + ct->ct_flags |= CT_LAZY_FIELD_LIST; + ct->ct_extra = builder; + } + } + else { + x = _fetch_external_struct_or_union(s, builder->included_ffis, 0); + if (x == NULL) { + if (!PyErr_Occurred()) + PyErr_Format(FFIError, "'%s %.200s' should come from " + "ffi.include() but was not found", + (s->flags & _CFFI_F_UNION) ? "union" + : "struct", s->name); + return NULL; + } + } + + /* Update the "primary" OP_STRUCT_UNION slot */ + assert((((uintptr_t)x) & 1) == 0); + assert(builder->ctx.types[s->type_index] == op2); + Py_INCREF(x); + builder->ctx.types[s->type_index] = x; + + if (ct != NULL && s->size == (size_t)-2) { + /* oops, this struct is unnamed and we couldn't generate + a C expression to get its size. We have to rely on + complete_struct_or_union() to compute it now. */ + if (do_realize_lazy_struct(ct) < 0) { + builder->ctx.types[s->type_index] = op2; + return NULL; + } + } + } + return x; +} + static PyObject * _realize_c_type_or_func(builder_c_t *builder, _cffi_opcode_t opcodes[], int index) @@ -320,62 +398,8 @@ break; case _CFFI_OP_STRUCT_UNION: - { - const struct _cffi_struct_union_s *s; - _cffi_opcode_t op2; - - s = &builder->ctx.struct_unions[_CFFI_GETARG(op)]; - op2 = builder->ctx.types[s->type_index]; - if ((((uintptr_t)op2) & 1) == 0) { - x = (PyObject *)op2; - Py_INCREF(x); - } - else { - int flags = (s->flags & _CFFI_F_UNION) ? CT_UNION : CT_STRUCT; - char *name = alloca(8 + strlen(s->name)); - _realize_name(name, - (s->flags & _CFFI_F_UNION) ? "union " : "struct ", - s->name); - if (strcmp(name, "struct _IO_FILE") == 0) - flags |= CT_IS_FILE; - - x = new_struct_or_union_type(name, flags); - - CTypeDescrObject *ct = NULL; - if (s->first_field_index >= 0) { - ct = (CTypeDescrObject *)x; - ct->ct_size = (Py_ssize_t)s->size; - ct->ct_length = s->alignment; - ct->ct_flags &= ~CT_IS_OPAQUE; - ct->ct_flags |= CT_LAZY_FIELD_LIST; - ct->ct_extra = builder; - } - - /* Update the "primary" OP_STRUCT_UNION slot, which - may be the same or a different slot than the "current" one */ - assert((((uintptr_t)x) & 1) == 0); - assert(builder->ctx.types[s->type_index] == op2); - Py_INCREF(x); - builder->ctx.types[s->type_index] = x; - - if (s->size == (size_t)-2) { - /* oops, this struct is unnamed and we couldn't generate - a C expression to get its size. We have to rely on - complete_struct_or_union() to compute it now. */ - assert(ct != NULL); - if (do_realize_lazy_struct(ct) < 0) { - builder->ctx.types[s->type_index] = op2; - return NULL; - } - } - - /* Done, leave without updating the "current" slot because - it may be done already above. If not, never mind, the - next call to realize_c_type() will do it. */ - return x; - } + x = _realize_c_struct_or_union(builder, _CFFI_GETARG(op)); break; - } case _CFFI_OP_ENUM: { @@ -530,7 +554,7 @@ return NULL; } - if (x != NULL && opcodes == builder->ctx.types) { + if (x != NULL && opcodes == builder->ctx.types && opcodes[index] != x) { assert((((uintptr_t)x) & 1) == 0); assert((((uintptr_t)opcodes[index]) & 1) == 1); Py_INCREF(x); diff --git a/_cffi1/test_recompiler.py b/_cffi1/test_recompiler.py --- a/_cffi1/test_recompiler.py +++ b/_cffi1/test_recompiler.py @@ -484,7 +484,7 @@ ffi.include(ffi1) ffi.cdef("struct foo_s *ff2(struct foo_s *);") lib = verify(ffi, "test_include_2", - "struct foo_s { int x, y; };\n" + "struct foo_s { int x, y; }; //usually from a #include\n" "struct foo_s *ff2(struct foo_s *p) { p->y++; return p; }") p = ffi.new("struct foo_s *") p.y = 41 From noreply at buildbot.pypy.org Mon Apr 27 23:48:58 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 27 Apr 2015 23:48:58 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: merge heads Message-ID: <20150427214858.4FF6E1C0EC0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1867:292cb69ac76c Date: 2015-04-27 23:49 +0200 http://bitbucket.org/cffi/cffi/changeset/292cb69ac76c/ Log: merge heads diff --git a/_cffi1/lib_obj.c b/_cffi1/lib_obj.c --- a/_cffi1/lib_obj.c +++ b/_cffi1/lib_obj.c @@ -231,8 +231,7 @@ _CFFI_GETARG(g->type_op)); if (ct == NULL) return NULL; - if (g->size != ct->ct_size && - g->size != (size_t)-1 && ct->ct_size != -1) { + if (g->size != ct->ct_size && g->size != 0 && ct->ct_size > 0) { PyErr_Format(FFIError, "global variable '%.200s' should be %zd bytes " "according to the cdef, but is actually %zd", diff --git a/_cffi1/parse_c_type.h b/_cffi1/parse_c_type.h --- a/_cffi1/parse_c_type.h +++ b/_cffi1/parse_c_type.h @@ -63,8 +63,8 @@ struct _cffi_global_s { const char *name; void *address; - size_t size; // -1 if unknown _cffi_opcode_t type_op; + size_t size; // 0 if unknown }; struct _cffi_struct_union_s { diff --git a/_cffi1/recompiler.py b/_cffi1/recompiler.py --- a/_cffi1/recompiler.py +++ b/_cffi1/recompiler.py @@ -434,8 +434,7 @@ else: meth_kind = 'V' # 'METH_VARARGS' self._lsts["global"].append( - ' { "%s", _cffi_f_%s, (size_t)-1, ' - '_CFFI_OP(_CFFI_OP_CPYTHON_BLTN_%s, %d) },' + ' { "%s", _cffi_f_%s, _CFFI_OP(_CFFI_OP_CPYTHON_BLTN_%s, %d), 0 },' % (name, name, meth_kind, type_index)) # ---------- @@ -654,8 +653,7 @@ type_index = self._typesdict[tp] type_op = '_CFFI_OP(_CFFI_OP_CONSTANT, %d)' % type_index self._lsts["global"].append( - ' { "%s", _cffi_const_%s, (size_t)-1, %s },' % - (name, name, type_op)) + ' { "%s", _cffi_const_%s, %s, 0 },' % (name, name, type_op)) # ---------- # enums @@ -672,7 +670,7 @@ type_op = '_CFFI_OP(_CFFI_OP_ENUM, -1)' for enumerator in tp.enumerators: self._lsts["global"].append( - ' { "%s", _cffi_const_%s, (size_t)-1, %s },' % + ' { "%s", _cffi_const_%s, %s, 0 },' % (enumerator, enumerator, type_op)) # if cname is not None and '$' not in cname: @@ -703,9 +701,8 @@ def _generate_cpy_macro_ctx(self, tp, name): self._lsts["global"].append( - ' { "%s", _cffi_const_%s, (size_t)-1,' - ' _CFFI_OP(_CFFI_OP_CONSTANT_INT, 0) },' % - (name, name)) + ' { "%s", _cffi_const_%s,' + ' _CFFI_OP(_CFFI_OP_CONSTANT_INT, 0), 0 },' % (name, name)) # ---------- # global variables @@ -728,10 +725,10 @@ if tp.sizeof_enabled(): size = "sizeof(%s)" % (name,) else: - size = "(size_t)-1" + size = "0" self._lsts["global"].append( - ' { "%s", &%s, %s, _CFFI_OP(_CFFI_OP_GLOBAL_VAR, %d)},' - % (name, name, size, type_index)) + ' { "%s", &%s, _CFFI_OP(_CFFI_OP_GLOBAL_VAR, %d), %s },' + % (name, name, type_index, size)) # ---------- # emitting the opcodes for individual types diff --git a/cffi/model.py b/cffi/model.py --- a/cffi/model.py +++ b/cffi/model.py @@ -105,8 +105,26 @@ 'uint32_t': 'i', 'int64_t': 'i', 'uint64_t': 'i', + 'int_least8_t': 'i', + 'uint_least8_t': 'i', + 'int_least16_t': 'i', + 'uint_least16_t': 'i', + 'int_least32_t': 'i', + 'uint_least32_t': 'i', + 'int_least64_t': 'i', + 'uint_least64_t': 'i', + 'int_fast8_t': 'i', + 'uint_fast8_t': 'i', + 'int_fast16_t': 'i', + 'uint_fast16_t': 'i', + 'int_fast32_t': 'i', + 'uint_fast32_t': 'i', + 'int_fast64_t': 'i', + 'uint_fast64_t': 'i', 'intptr_t': 'i', 'uintptr_t': 'i', + 'intmax_t': 'i', + 'uintmax_t': 'i', 'ptrdiff_t': 'i', 'size_t': 'i', 'ssize_t': 'i', diff --git a/testing/backend_tests.py b/testing/backend_tests.py --- a/testing/backend_tests.py +++ b/testing/backend_tests.py @@ -1703,5 +1703,3 @@ assert lib.DOT_HEX == 0x100 assert lib.DOT_HEX2 == 0x10 assert lib.DOT_UL == 1000 - - diff --git a/testing/test_ffi_backend.py b/testing/test_ffi_backend.py --- a/testing/test_ffi_backend.py +++ b/testing/test_ffi_backend.py @@ -222,3 +222,57 @@ assert ffi.typeof(c) is ffi.typeof("char[]") ffi.cast("unsigned short *", c)[1] += 500 assert list(a) == [10000, 20500, 30000] + + def test_all_primitives(self): + ffi = FFI() + for name in [ + "char", + "short", + "int", + "long", + "long long", + "signed char", + "unsigned char", + "unsigned short", + "unsigned int", + "unsigned long", + "unsigned long long", + "float", + "double", + "long double", + "wchar_t", + "_Bool", + "int8_t", + "uint8_t", + "int16_t", + "uint16_t", + "int32_t", + "uint32_t", + "int64_t", + "uint64_t", + "int_least8_t", + "uint_least8_t", + "int_least16_t", + "uint_least16_t", + "int_least32_t", + "uint_least32_t", + "int_least64_t", + "uint_least64_t", + "int_fast8_t", + "uint_fast8_t", + "int_fast16_t", + "uint_fast16_t", + "int_fast32_t", + "uint_fast32_t", + "int_fast64_t", + "uint_fast64_t", + "intptr_t", + "uintptr_t", + "intmax_t", + "uintmax_t", + "ptrdiff_t", + "size_t", + "ssize_t", + ]: + x = ffi.sizeof(name) + assert 1 <= x <= 16 From noreply at buildbot.pypy.org Tue Apr 28 02:15:05 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 28 Apr 2015 02:15:05 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: Add the 0.9.3 integer types Message-ID: <20150428001505.3FCBB1C114E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1868:2fa0c70b8db6 Date: 2015-04-28 02:15 +0200 http://bitbucket.org/cffi/cffi/changeset/2fa0c70b8db6/ Log: Add the 0.9.3 integer types diff --git a/_cffi1/cffi_opcode.py b/_cffi1/cffi_opcode.py --- a/_cffi1/cffi_opcode.py +++ b/_cffi1/cffi_opcode.py @@ -62,8 +62,26 @@ PRIM_PTRDIFF = 27 PRIM_SIZE = 28 PRIM_SSIZE = 29 +PRIM_INT_LEAST8 = 30 +PRIM_UINT_LEAST8 = 31 +PRIM_INT_LEAST16 = 32 +PRIM_UINT_LEAST16 = 33 +PRIM_INT_LEAST32 = 34 +PRIM_UINT_LEAST32 = 35 +PRIM_INT_LEAST64 = 36 +PRIM_UINT_LEAST64 = 37 +PRIM_INT_FAST8 = 38 +PRIM_UINT_FAST8 = 39 +PRIM_INT_FAST16 = 40 +PRIM_UINT_FAST16 = 41 +PRIM_INT_FAST32 = 42 +PRIM_UINT_FAST32 = 43 +PRIM_INT_FAST64 = 44 +PRIM_UINT_FAST64 = 45 +PRIM_INTMAX = 46 +PRIM_UINTMAX = 47 -_NUM_PRIM = 30 +_NUM_PRIM = 48 PRIMITIVE_TO_INDEX = { 'char': PRIM_CHAR, @@ -95,6 +113,24 @@ 'ptrdiff_t': PRIM_PTRDIFF, 'size_t': PRIM_SIZE, 'ssize_t': PRIM_SSIZE, + 'int_least8_t': PRIM_INT_LEAST8, + 'uint_least8_t': PRIM_UINT_LEAST8, + 'int_least16_t': PRIM_INT_LEAST16, + 'uint_least16_t': PRIM_UINT_LEAST16, + 'int_least32_t': PRIM_INT_LEAST32, + 'uint_least32_t': PRIM_UINT_LEAST32, + 'int_least64_t': PRIM_INT_LEAST64, + 'uint_least64_t': PRIM_UINT_LEAST64, + 'int_fast8_t': PRIM_INT_FAST8, + 'uint_fast8_t': PRIM_UINT_FAST8, + 'int_fast16_t': PRIM_INT_FAST16, + 'uint_fast16_t': PRIM_UINT_FAST16, + 'int_fast32_t': PRIM_INT_FAST32, + 'uint_fast32_t': PRIM_UINT_FAST32, + 'int_fast64_t': PRIM_INT_FAST64, + 'uint_fast64_t': PRIM_UINT_FAST64, + 'intmax_t': PRIM_INTMAX, + 'uintmax_t': PRIM_UINTMAX, } F_UNION = 0x01 diff --git a/_cffi1/parse_c_type.c b/_cffi1/parse_c_type.c --- a/_cffi1/parse_c_type.c +++ b/_cffi1/parse_c_type.c @@ -459,14 +459,36 @@ if (size == 7 && !memcmp(p, "uint8", 5)) return _CFFI_PRIM_UINT8; break; + case 'a': + if (size == 8 && !memcmp(p, "intmax", 6)) return _CFFI_PRIM_INTMAX; + break; + case 'e': if (size == 7 && !memcmp(p, "ssize", 5)) return _CFFI_PRIM_SSIZE; break; + case 'f': + if (size == 11 && !memcmp(p, "int_fast8", 9)) return _CFFI_PRIM_INT_FAST8; + if (size == 12 && !memcmp(p, "int_fast16", 10)) return _CFFI_PRIM_INT_FAST16; + if (size == 12 && !memcmp(p, "int_fast32", 10)) return _CFFI_PRIM_INT_FAST32; + if (size == 12 && !memcmp(p, "int_fast64", 10)) return _CFFI_PRIM_INT_FAST64; + break; + case 'i': if (size == 9 && !memcmp(p, "ptrdiff", 7)) return _CFFI_PRIM_PTRDIFF; break; + case 'l': + if (size == 12 && !memcmp(p, "int_least8", 10)) return _CFFI_PRIM_INT_LEAST8; + if (size == 13 && !memcmp(p, "int_least16", 11)) return _CFFI_PRIM_INT_LEAST16; + if (size == 13 && !memcmp(p, "int_least32", 11)) return _CFFI_PRIM_INT_LEAST32; + if (size == 13 && !memcmp(p, "int_least64", 11)) return _CFFI_PRIM_INT_LEAST64; + break; + + case 'm': + if (size == 9 && !memcmp(p, "uintmax", 7)) return _CFFI_PRIM_UINTMAX; + break; + case 'p': if (size == 9 && !memcmp(p, "uintptr", 7)) return _CFFI_PRIM_UINTPTR; break; @@ -482,6 +504,34 @@ case '_': if (size == 6 && !memcmp(p, "size", 4)) return _CFFI_PRIM_SIZE; if (size == 6 && !memcmp(p, "int8", 4)) return _CFFI_PRIM_INT8; + if (size >= 12) { + switch (p[10]) { + case '1': + if (size == 14 && !memcmp(p, "uint_least16", 12)) return _CFFI_PRIM_UINT_LEAST16; + break; + case '2': + if (size == 13 && !memcmp(p, "uint_fast32", 11)) return _CFFI_PRIM_UINT_FAST32; + break; + case '3': + if (size == 14 && !memcmp(p, "uint_least32", 12)) return _CFFI_PRIM_UINT_LEAST32; + break; + case '4': + if (size == 13 && !memcmp(p, "uint_fast64", 11)) return _CFFI_PRIM_UINT_FAST64; + break; + case '6': + if (size == 14 && !memcmp(p, "uint_least64", 12)) return _CFFI_PRIM_UINT_LEAST64; + if (size == 13 && !memcmp(p, "uint_fast16", 11)) return _CFFI_PRIM_UINT_FAST16; + break; + case '8': + if (size == 13 && !memcmp(p, "uint_least8", 11)) return _CFFI_PRIM_UINT_LEAST8; + break; + case '_': + if (size == 12 && !memcmp(p, "uint_fast8", 10)) return _CFFI_PRIM_UINT_FAST8; + break; + default: + break; + } + } break; default: diff --git a/_cffi1/parse_c_type.h b/_cffi1/parse_c_type.h --- a/_cffi1/parse_c_type.h +++ b/_cffi1/parse_c_type.h @@ -56,8 +56,26 @@ #define _CFFI_PRIM_PTRDIFF 27 #define _CFFI_PRIM_SIZE 28 #define _CFFI_PRIM_SSIZE 29 +#define _CFFI_PRIM_INT_LEAST8 30 +#define _CFFI_PRIM_UINT_LEAST8 31 +#define _CFFI_PRIM_INT_LEAST16 32 +#define _CFFI_PRIM_UINT_LEAST16 33 +#define _CFFI_PRIM_INT_LEAST32 34 +#define _CFFI_PRIM_UINT_LEAST32 35 +#define _CFFI_PRIM_INT_LEAST64 36 +#define _CFFI_PRIM_UINT_LEAST64 37 +#define _CFFI_PRIM_INT_FAST8 38 +#define _CFFI_PRIM_UINT_FAST8 39 +#define _CFFI_PRIM_INT_FAST16 40 +#define _CFFI_PRIM_UINT_FAST16 41 +#define _CFFI_PRIM_INT_FAST32 42 +#define _CFFI_PRIM_UINT_FAST32 43 +#define _CFFI_PRIM_INT_FAST64 44 +#define _CFFI_PRIM_UINT_FAST64 45 +#define _CFFI_PRIM_INTMAX 46 +#define _CFFI_PRIM_UINTMAX 47 -#define _CFFI__NUM_PRIM 30 +#define _CFFI__NUM_PRIM 48 struct _cffi_global_s { diff --git a/_cffi1/realize_c_type.c b/_cffi1/realize_c_type.c --- a/_cffi1/realize_c_type.c +++ b/_cffi1/realize_c_type.c @@ -146,6 +146,24 @@ "ptrdiff_t", "size_t", "ssize_t", + "int_least8_t", + "uint_least8_t", + "int_least16_t", + "uint_least16_t", + "int_least32_t", + "uint_least32_t", + "int_least64_t", + "uint_least64_t", + "int_fast8_t", + "uint_fast8_t", + "int_fast16_t", + "uint_fast16_t", + "int_fast32_t", + "uint_fast32_t", + "int_fast64_t", + "uint_fast64_t", + "intmax_t", + "uintmax_t", }; PyObject *x; diff --git a/_cffi1/test_new_ffi_1.py b/_cffi1/test_new_ffi_1.py --- a/_cffi1/test_new_ffi_1.py +++ b/_cffi1/test_new_ffi_1.py @@ -1613,3 +1613,58 @@ assert ffi.typeof(c) is ffi.typeof("char[]") ffi.cast("unsigned short *", c)[1] += 500 assert list(a) == [10000, 20500, 30000] + + def test_all_primitives(self): + from .cffi_opcode import PRIMITIVE_TO_INDEX + assert set(PRIMITIVE_TO_INDEX) == set([ + "char", + "short", + "int", + "long", + "long long", + "signed char", + "unsigned char", + "unsigned short", + "unsigned int", + "unsigned long", + "unsigned long long", + "float", + "double", + "long double", + "wchar_t", + "_Bool", + "int8_t", + "uint8_t", + "int16_t", + "uint16_t", + "int32_t", + "uint32_t", + "int64_t", + "uint64_t", + "int_least8_t", + "uint_least8_t", + "int_least16_t", + "uint_least16_t", + "int_least32_t", + "uint_least32_t", + "int_least64_t", + "uint_least64_t", + "int_fast8_t", + "uint_fast8_t", + "int_fast16_t", + "uint_fast16_t", + "int_fast32_t", + "uint_fast32_t", + "int_fast64_t", + "uint_fast64_t", + "intptr_t", + "uintptr_t", + "intmax_t", + "uintmax_t", + "ptrdiff_t", + "size_t", + "ssize_t", + ]) + for name in PRIMITIVE_TO_INDEX: + x = ffi.sizeof(name) + assert 1 <= x <= 16 From noreply at buildbot.pypy.org Tue Apr 28 11:30:47 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 28 Apr 2015 11:30:47 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: Check again the value of #define constants if they are not defined with Message-ID: <20150428093047.498581C1439@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1869:1b74d35a49ed Date: 2015-04-28 11:31 +0200 http://bitbucket.org/cffi/cffi/changeset/1b74d35a49ed/ Log: Check again the value of #define constants if they are not defined with '...'. diff --git a/_cffi1/_cffi_include.h b/_cffi1/_cffi_include.h --- a/_cffi1/_cffi_include.h +++ b/_cffi1/_cffi_include.h @@ -170,6 +170,10 @@ (size) == 8 ? ((sign) ? _CFFI_PRIM_INT64 : _CFFI_PRIM_UINT64) : \ 0) +#define _cffi_check_int(got, got_nonpos, expected) \ + ((got_nonpos) == (expected <= 0) && \ + (got) == (unsigned long long)expected) + static int _cffi_init(void) { diff --git a/_cffi1/realize_c_type.c b/_cffi1/realize_c_type.c --- a/_cffi1/realize_c_type.c +++ b/_cffi1/realize_c_type.c @@ -186,24 +186,37 @@ static PyObject *realize_global_int(const struct _cffi_global_s *g) { - PyObject *x; unsigned long long value; /* note: we cast g->address to this function type; we do the same in parse_c_type:parse_sequel() too */ int neg = ((int(*)(unsigned long long*))g->address)(&value); - if (!neg) { + + switch (neg) { + + case 0: if (value <= (unsigned long long)LONG_MAX) - x = PyInt_FromLong((long)value); + return PyInt_FromLong((long)value); else - x = PyLong_FromUnsignedLongLong(value); + return PyLong_FromUnsignedLongLong(value); + + case 1: + if ((long long)value >= (long long)LONG_MIN) + return PyInt_FromLong((long)value); + else + return PyLong_FromLongLong((long long)value); + + default: + break; } - else { - if ((long long)value >= (long long)LONG_MIN) - x = PyInt_FromLong((long)value); - else - x = PyLong_FromLongLong((long long)value); - } - return x; + + char got[64]; + if (neg == 2) + sprintf(got, "%llu (0x%llx)", value, value); + else + sprintf(got, "%lld", (long long)value); + PyErr_Format(FFIError, "the C compiler says '%.200s' is equal to %s, " + "but the cdef disagrees", g->name, got); + return NULL; } static PyObject * @@ -462,6 +475,8 @@ while (p[j] != ',' && p[j] != '\0') j++; tmp = PyString_FromStringAndSize(p, j); + if (tmp == NULL) + break; PyTuple_SET_ITEM(enumerators, i, tmp); gindex = search_in_globals(&builder->ctx, p, j); @@ -470,6 +485,8 @@ assert(g->type_op == _CFFI_OP(_CFFI_OP_ENUM, -1)); tmp = realize_global_int(g); + if (tmp == NULL) + break; PyTuple_SET_ITEM(enumvalues, i, tmp); p += j + 1; diff --git a/_cffi1/recompiler.py b/_cffi1/recompiler.py --- a/_cffi1/recompiler.py +++ b/_cffi1/recompiler.py @@ -615,7 +615,8 @@ # ---------- # constants, declared with "static const ..." - def _generate_cpy_const(self, is_int, name, tp=None, category='const'): + def _generate_cpy_const(self, is_int, name, tp=None, category='const', + check_value=None): if (category, name) in self._seen_constants: raise ffiplatform.VerificationError( "duplicate declaration of %s '%s'" % (category, name)) @@ -626,11 +627,16 @@ if is_int: prnt('static int %s(unsigned long long *o)' % funcname) prnt('{') + prnt(' int n = (%s) <= 0;' % (name,)) prnt(' *o = (unsigned long long)((%s) << 0);' ' /* check that we get an integer */' % (name,)) - prnt(' return (%s) <= 0;' % (name,)) + if check_value is not None: + prnt(' if (!_cffi_check_int(*o, n, %s))' % (check_value,)) + prnt(' n |= 2;') + prnt(' return n;') prnt('}') else: + assert check_value is None prnt('static void %s(char *o)' % funcname) prnt('{') prnt(' *(%s)o = %s;' % (tp.get_c_name('*'), name)) @@ -695,9 +701,11 @@ pass def _generate_cpy_macro_decl(self, tp, name): - # for now, we ignore the value (if != ',,,') given in the cdef - # and always trust the value coming from the C compiler - self._generate_cpy_const(True, name) + if tp == '...': + check_value = None + else: + check_value = tp # an integer + self._generate_cpy_const(True, name, check_value=check_value) def _generate_cpy_macro_ctx(self, tp, name): self._lsts["global"].append( diff --git a/_cffi1/test_recompiler.py b/_cffi1/test_recompiler.py --- a/_cffi1/test_recompiler.py +++ b/_cffi1/test_recompiler.py @@ -175,17 +175,40 @@ assert lib.FOOBAR == -6912 py.test.raises(AttributeError, "lib.FOOBAR = 2") -def test_macro_check_value_ok(): +def test_macro_check_value(): + # the value '-0x80000000' in C sources does not have a clear meaning + # to me; it appears to have a different effect than '-2147483648'... + vals = ['42', '-42', '0x80000000', '-2147483648', + '0', '9223372036854775809ULL', + '-9223372036854775807LL'] ffi = FFI() - ffi.cdef("#define FOOBAR 42") - lib = verify(ffi, 'test_macro_check_value_ok', "#define FOOBAR 42") - assert lib.FOOBAR == 42 + cdef_lines = ['#define FOO_%d_%d %s' % (i, j, vals[i]) + for i in range(len(vals)) + for j in range(len(vals))] + ffi.cdef('\n'.join(cdef_lines)) -def test_macro_check_value_fail(): - ffi = FFI() - ffi.cdef("#define FOOBAR 42") - lib = verify(ffi, 'test_macro_check_value_fail', "#define FOOBAR 43") - assert lib.FOOBAR == 43 # for now, we don't check the cdef value + verify_lines = ['#define FOO_%d_%d %s' % (i, j, vals[j]) # [j], not [i] + for i in range(len(vals)) + for j in range(len(vals))] + lib = verify(ffi, 'test_macro_check_value_ok', + '\n'.join(verify_lines)) + # + for j in range(len(vals)): + c_got = int(vals[j].replace('U', '').replace('L', ''), 0) + c_compiler_msg = str(c_got) + if c_got > 0: + c_compiler_msg += ' (0x%x)' % (c_got,) + # + for i in range(len(vals)): + attrname = 'FOO_%d_%d' % (i, j) + if i == j: + x = getattr(lib, attrname) + assert x == c_got + else: + e = py.test.raises(ffi.error, getattr, lib, attrname) + assert str(e.value) == ( + "the C compiler says '%s' is equal to " + "%s, but the cdef disagrees" % (attrname, c_compiler_msg)) def test_constant(): ffi = FFI() diff --git a/_cffi1/test_verify1.py b/_cffi1/test_verify1.py --- a/_cffi1/test_verify1.py +++ b/_cffi1/test_verify1.py @@ -2171,4 +2171,6 @@ ffi = FFI() ffi.cdef("#define FOO 123") lib = ffi.verify("#define FOO 124") # used to complain - assert lib.FOO == 124 + e = py.test.raises(ffi.error, "lib.FOO") + assert str(e.value) == ("the C compiler says 'FOO' is equal to 124 (0x7c)," + " but the cdef disagrees") diff --git a/cffi/cparser.py b/cffi/cparser.py --- a/cffi/cparser.py +++ b/cffi/cparser.py @@ -23,7 +23,7 @@ _r_partial_array = re.compile(r"\[\s*\.\.\.\s*\]") _r_words = re.compile(r"\w+|\S") _parser_cache = None -_r_int_literal = re.compile(r"^0?x?[0-9a-f]+u?l?$", re.IGNORECASE) +_r_int_literal = re.compile(r"^0?x?[0-9a-f]+[lu]*$", re.IGNORECASE) def _get_parser(): global _parser_cache @@ -218,6 +218,9 @@ def _process_macros(self, macros): for key, value in macros.items(): value = value.strip() + neg = value.startswith('-') + if neg: + value = value[1:].strip() match = _r_int_literal.search(value) if match is not None: int_str = match.group(0).lower().rstrip("ul") @@ -229,6 +232,7 @@ int_str = "0o" + int_str[1:] pyvalue = int(int_str, 0) + if neg: pyvalue = -pyvalue self._add_constants(key, pyvalue) self._declare('macro ' + key, pyvalue) elif value == '...': From noreply at buildbot.pypy.org Tue Apr 28 11:44:41 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 28 Apr 2015 11:44:41 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: Fix the test, still xfail Message-ID: <20150428094441.8A8571C083B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1870:b8665b33150c Date: 2015-04-28 11:45 +0200 http://bitbucket.org/cffi/cffi/changeset/b8665b33150c/ Log: Fix the test, still xfail diff --git a/_cffi1/ffi_obj.c b/_cffi1/ffi_obj.c --- a/_cffi1/ffi_obj.c +++ b/_cffi1/ffi_obj.c @@ -766,7 +766,7 @@ static PyTypeObject FFI_Type = { PyVarObject_HEAD_INIT(NULL, 0) - "FFI", + "CompiledFFI", sizeof(FFIObject), 0, (destructor)ffi_dealloc, /* tp_dealloc */ diff --git a/_cffi1/lib_obj.c b/_cffi1/lib_obj.c --- a/_cffi1/lib_obj.c +++ b/_cffi1/lib_obj.c @@ -328,7 +328,7 @@ static PyTypeObject Lib_Type = { PyVarObject_HEAD_INIT(NULL, 0) - "Lib", + "CompiledLib", sizeof(LibObject), 0, (destructor)lib_dealloc, /* tp_dealloc */ diff --git a/_cffi1/test_new_ffi_1.py b/_cffi1/test_new_ffi_1.py --- a/_cffi1/test_new_ffi_1.py +++ b/_cffi1/test_new_ffi_1.py @@ -14,7 +14,7 @@ def setup_module(): - global ffi + global ffi, ffi1 ffi1 = cffi.FFI() DEFS = r""" struct repr { short a, b, c; }; @@ -1500,64 +1500,32 @@ assert foo2.a == 20 assert foo2.b == 30 - def test_missing_include(self): + def test_include_struct_union_enum_typedef(self): py.test.xfail("ffi.include") - backend = self.Backend() - ffi1 = FFI(backend=backend) - ffi2 = FFI(backend=backend) - ffi1.cdef("typedef signed char schar_t;") - py.test.raises(CDefError, ffi2.cast, "schar_t", 142) - - def test_include_typedef(self): - py.test.xfail("ffi.include") - backend = self.Backend() - ffi1 = FFI(backend=backend) - ffi2 = FFI(backend=backend) - ffi1.cdef("typedef signed char schar_t;") + ffi2 = cffi.FFI() ffi2.include(ffi1) - p = ffi2.cast("schar_t", 142) - assert int(p) == 142 - 256 - - def test_include_struct(self): - py.test.xfail("ffi.include") - backend = self.Backend() - ffi1 = FFI(backend=backend) - ffi2 = FFI(backend=backend) - ffi1.cdef("struct foo { int x; };") - ffi2.include(ffi1) - p = ffi2.new("struct foo *", [142]) - assert p.x == 142 - - def test_include_union(self): - py.test.xfail("ffi.include") - backend = self.Backend() - ffi1 = FFI(backend=backend) - ffi2 = FFI(backend=backend) - ffi1.cdef("union foo { int x; };") - ffi2.include(ffi1) - p = ffi2.new("union foo *", [142]) - assert p.x == 142 - - def test_include_enum(self): - py.test.xfail("ffi.include") - backend = self.Backend() - ffi1 = FFI(backend=backend) - ffi2 = FFI(backend=backend) - ffi1.cdef("enum foo { FA, FB, FC };") - ffi2.include(ffi1) - p = ffi2.cast("enum foo", 1) - assert ffi2.string(p) == "FB" - assert ffi2.sizeof("char[FC]") == 2 - - def test_include_typedef_2(self): - py.test.xfail("ffi.include") - backend = self.Backend() - ffi1 = FFI(backend=backend) - ffi2 = FFI(backend=backend) - ffi1.cdef("typedef struct { int x; } *foo_p;") - ffi2.include(ffi1) - p = ffi2.new("foo_p", [142]) - assert p.x == 142 + outputfilename = recompile(ffi2, + "test_include_struct_union_enum_typedef", + "", tmpdir=str(udir)) + module = imp.load_dynamic("test_include_struct_union_enum_typedef", + outputfilename) + ffi2 = module.ffi + # + p = ffi2.new("struct nonpacked *", ['A', -43141]) + assert p.a == 'A' + assert p.b == -43141 + # + p = ffi.new("union simple_u", [-52525]) + assert p.a == -52525 + # + p = ffi.cast("enum foq", 2) + assert ffi.string(p) == "CC0" + assert ffi2.sizeof("char[CC0]") == 2 + # + p = ffi.new("anon_foo_t *", [-52526]) + assert p.a == -52526 + p = ffi.new("named_foo_p", [-52527]) + assert p.a == -52527 def test_struct_packed(self): # struct nonpacked { char a; int b; }; diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -419,6 +419,10 @@ variables, which must anyway be accessed directly from the lib object returned by the original FFI instance. """ + if not isinstance(ffi_to_include, FFI): + raise TypeError("ffi.include() expects an argument that is also of" + " type cffi.FFI, not %r" % ( + type(ffi_to_include).__name__,)) with ffi_to_include._lock: with self._lock: self._parser.include(ffi_to_include._parser) From noreply at buildbot.pypy.org Tue Apr 28 12:12:03 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 28 Apr 2015 12:12:03 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: Fix the test, now it passes Message-ID: <20150428101203.94E6A1C04A4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1872:c5891bfec7b0 Date: 2015-04-28 12:12 +0200 http://bitbucket.org/cffi/cffi/changeset/c5891bfec7b0/ Log: Fix the test, now it passes diff --git a/_cffi1/test_new_ffi_1.py b/_cffi1/test_new_ffi_1.py --- a/_cffi1/test_new_ffi_1.py +++ b/_cffi1/test_new_ffi_1.py @@ -14,7 +14,7 @@ def setup_module(): - global ffi, ffi1 + global ffi, construction_params ffi1 = cffi.FFI() DEFS = r""" struct repr { short a, b, c; }; @@ -83,6 +83,7 @@ tmpdir=str(udir)) module = imp.load_dynamic("test_new_ffi_1", outputfilename) ffi = module.ffi + construction_params = (ffi1, CCODE) class TestNewFFI1: @@ -1501,12 +1502,13 @@ assert foo2.b == 30 def test_include_struct_union_enum_typedef(self): - py.test.xfail("ffi.include") + #py.test.xfail("ffi.include") + ffi1, CCODE = construction_params ffi2 = cffi.FFI() ffi2.include(ffi1) outputfilename = recompile(ffi2, "test_include_struct_union_enum_typedef", - "", tmpdir=str(udir)) + CCODE, tmpdir=str(udir)) module = imp.load_dynamic("test_include_struct_union_enum_typedef", outputfilename) ffi2 = module.ffi @@ -1515,7 +1517,7 @@ assert p.a == 'A' assert p.b == -43141 # - p = ffi.new("union simple_u", [-52525]) + p = ffi.new("union simple_u *", [-52525]) assert p.a == -52525 # p = ffi.cast("enum foq", 2) From noreply at buildbot.pypy.org Tue Apr 28 12:12:02 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 28 Apr 2015 12:12:02 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: tests and one fix Message-ID: <20150428101202.7DDBC1C04A4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1871:f6e9b11edb1f Date: 2015-04-28 12:08 +0200 http://bitbucket.org/cffi/cffi/changeset/f6e9b11edb1f/ Log: tests and one fix diff --git a/_cffi1/test_recompiler.py b/_cffi1/test_recompiler.py --- a/_cffi1/test_recompiler.py +++ b/_cffi1/test_recompiler.py @@ -514,3 +514,65 @@ q = lib.ff2(p) assert q == p assert p.y == 42 + +def test_include_3(): + ffi1 = FFI() + ffi1.cdef("typedef short sshort_t;") + verify(ffi1, "test_include_3_parent", "typedef short sshort_t;") + ffi = FFI() + ffi.include(ffi1) + ffi.cdef("sshort_t ff3(sshort_t);") + lib = verify(ffi, "test_include_3", + "typedef short sshort_t; //usually from a #include\n" + "sshort_t ff3(sshort_t x) { return x + 42; }") + assert lib.ff3(10) == 52 + assert ffi.typeof(ffi.cast("sshort_t", 42)) is ffi.typeof("short") + +def test_include_4(): + ffi1 = FFI() + ffi1.cdef("typedef struct { int x; } mystruct_t;") + verify(ffi1, "test_include_4_parent", + "typedef struct { int x; } mystruct_t;") + ffi = FFI() + ffi.include(ffi1) + ffi.cdef("mystruct_t *ff4(mystruct_t *);") + lib = verify(ffi, "test_include_4", + "typedef struct {int x; } mystruct_t; //usually from a #include\n" + "mystruct_t *ff4(mystruct_t *p) { p->x += 42; return p; }") + p = ffi.new("mystruct_t *", [10]) + q = lib.ff4(p) + assert q == p + assert p.x == 52 + +def test_include_5(): + py.test.xfail("also fails in 0.9.3") + ffi1 = FFI() + ffi1.cdef("typedef struct { int x; } *mystruct_p;") + verify(ffi1, "test_include_5_parent", + "typedef struct { int x; } *mystruct_p;") + ffi = FFI() + ffi.include(ffi1) + ffi.cdef("mystruct_p ff5(mystruct_p);") + lib = verify(ffi, "test_include_5", + "typedef struct {int x; } *mystruct_p; //usually from a #include\n" + "mystruct_p ff5(mystruct_p p) { p->x += 42; return p; }") + p = ffi.new("mystruct_p", [10]) + q = lib.ff5(p) + assert q == p + assert p.x == 52 + +def test_include_6(): + ffi1 = FFI() + ffi1.cdef("typedef ... mystruct_t;") + verify(ffi1, "test_include_6_parent", + "typedef struct _mystruct_s mystruct_t;") + ffi = FFI() + ffi.include(ffi1) + ffi.cdef("mystruct_t *ff6(void);") + lib = verify(ffi, "test_include_6", + "typedef struct _mystruct_s mystruct_t; //usually from a #include\n" + "struct _mystruct_s { int x; };\n" + "static mystruct_t result_struct = { 42 };\n" + "mystruct_t *ff6(void) { return &result_struct; }") + p = lib.ff6() + assert ffi.cast("int *", p)[0] == 42 diff --git a/cffi/cparser.py b/cffi/cparser.py --- a/cffi/cparser.py +++ b/cffi/cparser.py @@ -608,7 +608,7 @@ def include(self, other): for name, tp in other._declarations.items(): kind = name.split(' ', 1)[0] - if kind in ('struct', 'union', 'enum'): + if kind in ('struct', 'union', 'enum', 'anonymous'): self._declare(name, tp, included=True) elif kind == 'typedef': self._declare(name, tp) From noreply at buildbot.pypy.org Tue Apr 28 14:27:39 2015 From: noreply at buildbot.pypy.org (Rob Young) Date: Tue, 28 Apr 2015 14:27:39 +0200 (CEST) Subject: [pypy-commit] cffi default: Skip pkg-config if OSError EACCES is raised Message-ID: <20150428122739.C24C91C14BF@cobra.cs.uni-duesseldorf.de> Author: Rob Young Branch: Changeset: r1873:e1d2dcf322f8 Date: 2015-04-28 12:57 +0100 http://bitbucket.org/cffi/cffi/changeset/e1d2dcf322f8/ Log: Skip pkg-config if OSError EACCES is raised If one of the directories in the PATH is not executable by the current user then EACCES will be raised. diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -19,7 +19,7 @@ p = subprocess.Popen([pkg_config, option, 'libffi'], stdout=subprocess.PIPE) except OSError as e: - if e.errno != errno.ENOENT: + if e.errno not in [errno.ENOENT, errno.EACCES]: raise else: t = p.stdout.read().decode().strip() From noreply at buildbot.pypy.org Tue Apr 28 23:58:27 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 28 Apr 2015 23:58:27 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: PyPy for mediocre programmers: the abstract Message-ID: <20150428215827.23A591C04A4@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: extradoc Changeset: r5538:4397b357ffb7 Date: 2015-04-28 22:59 +0100 http://bitbucket.org/pypy/extradoc/changeset/4397b357ffb7/ Log: PyPy for mediocre programmers: the abstract diff --git a/talk/ep2015/pypy-abstract.rst b/talk/ep2015/pypy-abstract.rst new file mode 100644 --- /dev/null +++ b/talk/ep2015/pypy-abstract.rst @@ -0,0 +1,22 @@ +============================= +PyPy for mediocre programmers +============================= + +This is a talk for mediocre Python programmers by a mediocre programmer. PyPy +is an alternative implementation of Python. It is notorious for being fast, but +also for using clever algorithms pertaining to advanced concepts such as type +inference, garbage collection, just-in-time compilation, etc. So, can we, +mediocre programmers, realistically use PyPy? + +Yes, absolutely. In fact, PyPy developers did all that hard work so that we +wouldn't have to. As we'll see, it runs most Python code exactly like CPython +does, save that it magically makes it faster. + +Porting existing applications is always more involved than running a simple +script, so we'll also examine likely difficulties such as code relying on +CPython implementation details, and dependencies on C extensions, and explore +simple principles to let PyPy run your code even faster. + +Finally, we'll have a glimpse of the future by looking at what's brewing in +the PyPy lair, such as software transactional memory, new speed optimisations, +better support for Python 3 and NumPy, ... From noreply at buildbot.pypy.org Wed Apr 29 12:49:15 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 29 Apr 2015 12:49:15 +0200 (CEST) Subject: [pypy-commit] cffi default: Add a note about variadic functions Message-ID: <20150429104915.EFEE81C0EC0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1874:32ec3c43f186 Date: 2015-04-29 12:49 +0200 http://bitbucket.org/cffi/cffi/changeset/32ec3c43f186/ Log: Add a note about variadic functions diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -1078,6 +1078,16 @@ C.printf("hello, %f\n", ffi.cast("double", 42)) C.printf("hello, %s\n", ffi.new("char[]", "world")) +Note that if you are using ``dlopen()``, the function declaration in the +``cdef()`` must match the original one in C exactly, as usual --- in +particular, if this function is variadic in C, then its ``cdef()`` +declaration must also be variadic. You cannot declare it in the +``cdef()`` with fixed arguments instead, even if you plan to only call +it with these argument types. The reason is that some architectures +have a different calling convention depending on whether the function +signature is fixed or not. (On x86-64, the difference can sometimes be +seen in PyPy's JIT-generated code if some arguments are ``double``.) + Callbacks --------- From noreply at buildbot.pypy.org Wed Apr 29 13:23:28 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 29 Apr 2015 13:23:28 +0200 (CEST) Subject: [pypy-commit] cffi default: Document what we do with "int foo(); ". Message-ID: <20150429112328.2A0CF1C03F7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1875:98b0b26c05e2 Date: 2015-04-29 13:24 +0200 http://bitbucket.org/cffi/cffi/changeset/98b0b26c05e2/ Log: Document what we do with "int foo();". diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -1088,6 +1088,13 @@ signature is fixed or not. (On x86-64, the difference can sometimes be seen in PyPy's JIT-generated code if some arguments are ``double``.) +Note that the function signature ``int foo();`` is interpreted by CFFI +as equivalent to ``int foo(void);``. This differs from the C standard, +in which ``int foo();`` is really like ``int foo(...);`` and can be +called with any arguments. (This feature of C is a pre-C89 relic: the +arguments cannot be accessed at all in the body of ``foo()`` without +relying on compiler-specific extensions.) + Callbacks --------- From noreply at buildbot.pypy.org Wed Apr 29 13:41:59 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 29 Apr 2015 13:41:59 +0200 (CEST) Subject: [pypy-commit] stmgc default: It is always better to use "void" in the argument list when there is no Message-ID: <20150429114159.1AFAA1C083B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1758:12a01c2778c0 Date: 2015-04-29 13:42 +0200 http://bitbucket.org/pypy/stmgc/changeset/12a01c2778c0/ Log: It is always better to use "void" in the argument list when there is no argument. diff --git a/c8/stm/core.c b/c8/stm/core.c --- a/c8/stm/core.c +++ b/c8/stm/core.c @@ -308,7 +308,7 @@ static void reset_modified_from_backup_copies(int segment_num); /* forward */ -static bool _stm_validate() +static bool _stm_validate(void) { /* returns true if we reached a valid state, or false if we need to abort now */ diff --git a/c8/stm/core.h b/c8/stm/core.h --- a/c8/stm/core.h +++ b/c8/stm/core.h @@ -297,7 +297,7 @@ static void synchronize_objects_flush(void); static void _signal_handler(int sig, siginfo_t *siginfo, void *context); -static bool _stm_validate(); +static bool _stm_validate(void); static inline bool was_read_remote(char *base, object_t *obj) { @@ -329,7 +329,7 @@ spinlock_release(get_priv_segment(segnum)->privatization_lock); } -static inline bool all_privatization_locks_acquired() +static inline bool all_privatization_locks_acquired(void) { #ifndef NDEBUG long l; @@ -343,7 +343,7 @@ #endif } -static inline void acquire_all_privatization_locks() +static inline void acquire_all_privatization_locks(void) { /* XXX: don't do for the sharing seg0 */ long l; @@ -352,7 +352,7 @@ } } -static inline void release_all_privatization_locks() +static inline void release_all_privatization_locks(void) { long l; for (l = NB_SEGMENTS-1; l >= 0; l--) { diff --git a/c8/stm/gcpage.c b/c8/stm/gcpage.c --- a/c8/stm/gcpage.c +++ b/c8/stm/gcpage.c @@ -681,7 +681,7 @@ _stm_smallmalloc_sweep(); } -static void clean_up_commit_log_entries() +static void clean_up_commit_log_entries(void) { struct stm_commit_log_entry_s *cl, *next; From noreply at buildbot.pypy.org Wed Apr 29 13:44:46 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 29 Apr 2015 13:44:46 +0200 (CEST) Subject: [pypy-commit] stmgc default: More of the same Message-ID: <20150429114446.081CC1C083B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1759:64ba29e21134 Date: 2015-04-29 13:45 +0200 http://bitbucket.org/pypy/stmgc/changeset/64ba29e21134/ Log: More of the same diff --git a/c8/stm/misc.c b/c8/stm/misc.c --- a/c8/stm/misc.c +++ b/c8/stm/misc.c @@ -44,7 +44,7 @@ return obj->stm_flags & _STM_GCFLAG_CARDS_SET; } -long _stm_count_cl_entries() +long _stm_count_cl_entries(void) { struct stm_commit_log_entry_s *cl = &commit_log_root; @@ -115,7 +115,7 @@ return cards[get_index_to_card_index(idx)].rm; } -uint8_t _stm_get_transaction_read_version() +uint8_t _stm_get_transaction_read_version(void) { return STM_SEGMENT->transaction_read_version; } @@ -124,7 +124,7 @@ static struct stm_commit_log_entry_s *_last_cl_entry; static long _last_cl_entry_index; -void _stm_start_enum_last_cl_entry() +void _stm_start_enum_last_cl_entry(void) { _last_cl_entry = &commit_log_root; struct stm_commit_log_entry_s *cl = &commit_log_root; @@ -135,7 +135,7 @@ _last_cl_entry_index = 0; } -object_t *_stm_next_last_cl_entry() +object_t *_stm_next_last_cl_entry(void) { if (_last_cl_entry == &commit_log_root) return NULL; @@ -150,7 +150,7 @@ } -void _stm_smallmalloc_sweep_test() +void _stm_smallmalloc_sweep_test(void) { acquire_all_privatization_locks(); _stm_smallmalloc_sweep(); diff --git a/c8/stmgc.h b/c8/stmgc.h --- a/c8/stmgc.h +++ b/c8/stmgc.h @@ -94,13 +94,13 @@ object_t *_stm_allocate_slowpath(ssize_t); object_t *_stm_allocate_external(ssize_t); void _stm_become_inevitable(const char*); -void _stm_collectable_safe_point(); +void _stm_collectable_safe_point(void); object_t *_stm_allocate_old(ssize_t size_rounded_up); char *_stm_real_address(object_t *o); #ifdef STM_TESTS #include -uint8_t _stm_get_transaction_read_version(); +uint8_t _stm_get_transaction_read_version(void); uint8_t _stm_get_card_value(object_t *obj, long idx); bool _stm_was_read(object_t *obj); bool _stm_was_written(object_t *obj); @@ -137,9 +137,9 @@ long _stm_count_objects_pointing_to_nursery(void); object_t *_stm_enum_modified_old_objects(long index); object_t *_stm_enum_objects_pointing_to_nursery(long index); -object_t *_stm_next_last_cl_entry(); -void _stm_start_enum_last_cl_entry(); -long _stm_count_cl_entries(); +object_t *_stm_next_last_cl_entry(void); +void _stm_start_enum_last_cl_entry(void); +long _stm_count_cl_entries(void); long _stm_count_old_objects_with_cards_set(void); object_t *_stm_enum_old_objects_with_cards_set(long index); uint64_t _stm_total_allocated(void); From noreply at buildbot.pypy.org Wed Apr 29 19:29:37 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 29 Apr 2015 19:29:37 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: first attempt at writing setuptools hooks Message-ID: <20150429172937.8D8F51C0823@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1876:35dd6cec69f3 Date: 2015-04-29 19:30 +0200 http://bitbucket.org/cffi/cffi/changeset/35dd6cec69f3/ Log: first attempt at writing setuptools hooks diff --git a/_cffi1/setuptools_ext.py b/_cffi1/setuptools_ext.py new file mode 100644 --- /dev/null +++ b/_cffi1/setuptools_ext.py @@ -0,0 +1,80 @@ + +def error(msg): + from distutils.errors import DistutilsSetupError + raise DistutilsSetupError(msg) + + +def add_cffi_module(dist, mod_spec): + import os + from cffi.api import FFI + from _cffi1 import recompiler + from distutils.core import Extension + from distutils.command.build_ext import build_ext + from distutils.dir_util import mkpath + from distutils import log + + if not isinstance(mod_spec, str): + error("argument to 'cffi_modules=...' must be a str or a list of str," + " not %r" % (type(mod_spec).__name__,)) + try: + build_mod_name, ffi_var_name = mod_spec.split(':') + except ValueError: + error("%r must be of the form 'build_mod_name:ffi_variable'" % + (mod_spec,)) + mod = __import__(build_mod_name, None, None, [ffi_var_name]) + try: + ffi = getattr(mod, ffi_var_name) + except AttributeError: + error("%r: object %r not found in module" % (mod_spec, + ffi_var_name)) + if not isinstance(ffi, FFI): + error("%r is not an FFI instance (got %r)" % (mod_spec, + type(ffi).__name__)) + if not hasattr(ffi, '_assigned_source'): + error("%r: the set_source() method was not called" % (mod_spec,)) + module_name = ffi._recompiler_module_name + source, kwds = ffi._assigned_source + + allsources = ['$PLACEHOLDER'] + allsources.extend(kwds.get('sources', [])) + ext = Extension(name=module_name, sources=allsources, **kwds) + + def make_mod(tmpdir): + mkpath(tmpdir) + file_name = module_name + '.c' + log.info("generating cffi module %r" % file_name) + c_file = os.path.join(tmpdir, file_name) + c_tmp = '%s.%s' % (c_file, os.getpid()) + recompiler.make_c_source(ffi, module_name, source, c_tmp) + try: + with open(c_file, 'r') as f1: + with open(c_tmp, 'r') as f2: + if f1.read() != f2.read(): + raise IOError + except IOError: + os.rename(c_tmp, c_file) + else: + log.info("already up-to-date") + os.unlink(c_tmp) + return c_file + + if dist.ext_modules is None: + dist.ext_modules = [] + dist.ext_modules.append(ext) + + base_class = dist.cmdclass.get('build_ext', build_ext) + class build_ext_make_mod(base_class): + def run(self): + if ext.sources[0] == '$PLACEHOLDER': + ext.sources[0] = make_mod(self.build_temp) + base_class.run(self) + dist.cmdclass['build_ext'] = build_ext_make_mod + + +def cffi_modules(dist, attr, value): + assert attr == 'cffi_modules' + if isinstance(value, str): + value = [value] + + for cffi_module in value: + add_cffi_module(dist, cffi_module) diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -475,6 +475,22 @@ ('_UNICODE', '1')] kwds['define_macros'] = defmacros + def set_source(self, module_name, source, **kwds): + if hasattr(self, '_assigned_source'): + raise ValueError("set_source() cannot be called several times " + "per ffi object") + self._recompiler_module_name = module_name + self._assigned_source = (source, kwds) + + def compile(self, tmpdir='.'): + from _cffi1 import recompile + # + if not hasattr(self, '_assigned_source'): + raise ValueError("set_source() must be called before compile()") + source, kwds = self._assigned_source + return recompile(self, self._recompiler_module_name, + source, tmpdir=tmpdir, **kwds) + def _load_backend_lib(backend, name, flags): if name is None: diff --git a/demo/bsdopendirtype_build.py b/demo/bsdopendirtype_build.py --- a/demo/bsdopendirtype_build.py +++ b/demo/bsdopendirtype_build.py @@ -1,5 +1,4 @@ from cffi import FFI -from _cffi1 import recompile ffi = FFI() ffi.cdef(""" @@ -15,7 +14,10 @@ static const int DT_BLK, DT_CHR, DT_DIR, DT_FIFO, DT_LNK, DT_REG, DT_SOCK; """) -recompile(ffi, "_bsdopendirtype", """ +ffi.set_source("_bsdopendirtype", """ #include #include """) + +if __name__ == '__main__': + ffi.compile() diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -142,8 +142,9 @@ `Mailing list `_ """, - version='0.9.2', - packages=['cffi'], + version='1.0.0', + packages=['cffi', '_cffi1'], + package_data={'_cffi1': ['_cffi_include.h', 'parse_c_type.h']}, zip_safe=False, url='http://cffi.readthedocs.org', @@ -157,6 +158,13 @@ install_requires=[ 'pycparser', ], + + entry_points = { + "distutils.setup_keywords": [ + "cffi_modules = _cffi1.setuptools_ext:cffi_modules", + ], + }, + classifiers=[ 'Programming Language :: Python', 'Programming Language :: Python :: 2', From noreply at buildbot.pypy.org Wed Apr 29 19:41:14 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 29 Apr 2015 19:41:14 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: Add the setup.py corresponding to the demo Message-ID: <20150429174114.DDC7C1C03F7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1877:d0455ef2bbb2 Date: 2015-04-29 19:41 +0200 http://bitbucket.org/cffi/cffi/changeset/d0455ef2bbb2/ Log: Add the setup.py corresponding to the demo diff --git a/demo/bsdopendirtype_setup.py b/demo/bsdopendirtype_setup.py --- a/demo/bsdopendirtype_setup.py +++ b/demo/bsdopendirtype_setup.py @@ -1,22 +1,13 @@ -from cffi import FFI +from setuptools import setup -ffi = FFI() -ffi.csource("_bsdopendirtype", """ - #include - #include -""") -ffi.cdef(""" - typedef ... DIR; - struct dirent { - unsigned char d_type; /* type of file */ - char d_name[]; /* filename */ - ...; - }; - DIR *opendir(const char *name); - int closedir(DIR *dirp); - struct dirent *readdir(DIR *dirp); - static const int DT_BLK, DT_CHR, DT_DIR, DT_FIFO, DT_LNK, DT_REG, DT_SOCK; -""") - -if __name__ == '__main__': - ffi.recompile() +setup( + name="example", + version="0.1", + py_modules=["bsdopendirtype"], + setup_requires=["cffi>=1.0"], + cffi_modules=[ + "bsdopendirtype_build:ffi", + ], + install_requires=["cffi>=1.0"], # should maybe be "cffi-backend" only? + zip_safe=False, +) From noreply at buildbot.pypy.org Wed Apr 29 20:02:37 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 29 Apr 2015 20:02:37 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: Change the other two demos to the ffi.set_source() style too Message-ID: <20150429180237.C4D961C0823@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1878:68b878e3ccbb Date: 2015-04-29 20:03 +0200 http://bitbucket.org/cffi/cffi/changeset/68b878e3ccbb/ Log: Change the other two demos to the ffi.set_source() style too diff --git a/demo/_curses_build.py b/demo/_curses_build.py --- a/demo/_curses_build.py +++ b/demo/_curses_build.py @@ -4,7 +4,6 @@ raise ImportError('No module named _curses') from cffi import FFI -from _cffi1 import recompile ffi = FFI() @@ -283,7 +282,7 @@ """) -recompile(ffi, "_curses_cffi", """ +ffi.set_source("_curses_cffi", """ #ifdef __APPLE__ /* the following define is necessary for OS X 10.6+; without it, the Apple-supplied ncurses.h sets NCURSES_OPAQUE to 1, and then Python @@ -323,3 +322,6 @@ getsyx(yx[0], yx[1]); } """, libraries=['ncurses', 'panel']) + +if __name__ == '__main__': + ffi.compile() diff --git a/demo/_curses_setup.py b/demo/_curses_setup.py new file mode 100644 --- /dev/null +++ b/demo/_curses_setup.py @@ -0,0 +1,13 @@ +from setuptools import setup + +setup( + name="_curses", + version="0.1", + py_modules=["_curses"], + setup_requires=["cffi>=1.0"], + cffi_modules=[ + "_curses_build:ffi", + ], + install_requires=["cffi>=1.0"], # should maybe be "cffi-backend" only? + zip_safe=False, +) diff --git a/demo/readdir2_build.py b/demo/readdir2_build.py --- a/demo/readdir2_build.py +++ b/demo/readdir2_build.py @@ -1,5 +1,4 @@ from cffi import FFI -from _cffi1 import recompile ffi = FFI() ffi.cdef(""" @@ -21,7 +20,7 @@ static const int DT_DIR; """) -recompile(ffi, "_readdir2", """ +ffi.set_source("_readdir2", """ #ifndef _ATFILE_SOURCE # define _ATFILE_SOURCE #endif @@ -32,3 +31,6 @@ #include #include """) + +if __name__ == '__main__': + ffi.compile() diff --git a/demo/readdir2_setup.py b/demo/readdir2_setup.py new file mode 100644 --- /dev/null +++ b/demo/readdir2_setup.py @@ -0,0 +1,13 @@ +from setuptools import setup + +setup( + name="readdir2", + version="0.1", + py_modules=["readdir2"], + setup_requires=["cffi>=1.0"], + cffi_modules=[ + "readdir2_build:ffi", + ], + install_requires=["cffi>=1.0"], # should maybe be "cffi-backend" only? + zip_safe=False, +) From noreply at buildbot.pypy.org Wed Apr 29 20:13:51 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 29 Apr 2015 20:13:51 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: A demo using ffi.include() --- which doesn't work right now Message-ID: <20150429181351.7A8B41C0207@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1879:28be0615f2f3 Date: 2015-04-29 20:14 +0200 http://bitbucket.org/cffi/cffi/changeset/28be0615f2f3/ Log: A demo using ffi.include() --- which doesn't work right now diff --git a/demo/recopendirtype.py b/demo/recopendirtype.py new file mode 100644 --- /dev/null +++ b/demo/recopendirtype.py @@ -0,0 +1,50 @@ +from _recopendirtype import ffi, lib + + +def _posix_error(): + raise OSError(ffi.errno, os.strerror(ffi.errno)) + +_dtype_to_smode = { + lib.DT_BLK: 0o060000, + lib.DT_CHR: 0o020000, + lib.DT_DIR: 0o040000, + lib.DT_FIFO: 0o010000, + lib.DT_LNK: 0o120000, + lib.DT_REG: 0o100000, + lib.DT_SOCK: 0o140000, +} + +def opendir(dir): + if len(dir) == 0: + dir = '.' + dirname = dir + if not dirname.endswith('/'): + dirname += '/' + dirp = lib.opendir(dir) + if dirp == ffi.NULL: + raise _posix_error() + dirent = ffi.new("struct dirent *") + result = ffi.new("struct dirent **") + try: + while True: + ffi.errno = 0 + err = lib.readdir_r(dirp, dirent, result) + if err: # really got an error + raise OSError(err, os.strerror(err)) + if result[0] == ffi.NULL: + return # + name = ffi.string(dirent.d_name) + if name == '.' or name == '..': + continue + name = dirname + name + try: + smode = _dtype_to_smode[dirent.d_type] + except KeyError: + smode = os.lstat(name).st_mode + yield name, smode + finally: + lib.closedir(dirp) + +if __name__ == '__main__': + for name, smode in opendir('/tmp'): + print hex(smode), name diff --git a/demo/recopendirtype_build.py b/demo/recopendirtype_build.py new file mode 100644 --- /dev/null +++ b/demo/recopendirtype_build.py @@ -0,0 +1,19 @@ +from cffi import FFI +import bsdopendirtype_build + +ffi = FFI() + +# ========== This is a demo of ffi.include() ========== +ffi.include(bsdopendirtype_build.ffi) + +ffi.cdef(""" + int readdir_r(DIR *dirp, struct dirent *entry, struct dirent **result); +""") + +ffi.set_source("_recopendirtype", """ + #include + #include +""") + +if __name__ == '__main__': + ffi.compile() From noreply at buildbot.pypy.org Wed Apr 29 20:22:46 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 29 Apr 2015 20:22:46 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: Test and fix Message-ID: <20150429182246.0F3131C0207@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1880:e5f48577bca9 Date: 2015-04-29 20:23 +0200 http://bitbucket.org/cffi/cffi/changeset/e5f48577bca9/ Log: Test and fix diff --git a/_cffi1/test_recompiler.py b/_cffi1/test_recompiler.py --- a/_cffi1/test_recompiler.py +++ b/_cffi1/test_recompiler.py @@ -568,11 +568,31 @@ "typedef struct _mystruct_s mystruct_t;") ffi = FFI() ffi.include(ffi1) - ffi.cdef("mystruct_t *ff6(void);") + ffi.cdef("mystruct_t *ff6(void); int ff6b(mystruct_t *);") lib = verify(ffi, "test_include_6", "typedef struct _mystruct_s mystruct_t; //usually from a #include\n" "struct _mystruct_s { int x; };\n" "static mystruct_t result_struct = { 42 };\n" - "mystruct_t *ff6(void) { return &result_struct; }") + "mystruct_t *ff6(void) { return &result_struct; }\n" + "int ff6b(mystruct_t *p) { return p->x; }") p = lib.ff6() assert ffi.cast("int *", p)[0] == 42 + assert lib.ff6b(p) == 42 + +def test_include_7(): + ffi1 = FFI() + ffi1.cdef("typedef ... mystruct_t;\n" + "int ff7b(mystruct_t *);") + verify(ffi1, "test_include_7_parent", + "typedef struct { int x; } mystruct_t;\n" + "int ff7b(mystruct_t *p) { return p->x; }") + ffi = FFI() + ffi.include(ffi1) + ffi.cdef("mystruct_t *ff7(void);") + lib = verify(ffi, "test_include_7", + "typedef struct { int x; } mystruct_t; //usually from a #include\n" + "static mystruct_t result_struct = { 42 };" + "mystruct_t *ff7(void) { return &result_struct; }") + p = lib.ff7() + assert ffi.cast("int *", p)[0] == 42 + assert lib.ff7b(p) == 42 diff --git a/cffi/cparser.py b/cffi/cparser.py --- a/cffi/cparser.py +++ b/cffi/cparser.py @@ -611,6 +611,6 @@ if kind in ('struct', 'union', 'enum', 'anonymous'): self._declare(name, tp, included=True) elif kind == 'typedef': - self._declare(name, tp) + self._declare(name, tp, included=True) for k, v in other._int_constants.items(): self._add_constants(k, v) From noreply at buildbot.pypy.org Wed Apr 29 21:28:27 2015 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 29 Apr 2015 21:28:27 +0200 (CEST) Subject: [pypy-commit] pypy numpy-fixes: add implementation of 9ea844c184eb Message-ID: <20150429192827.531241C068C@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: numpy-fixes Changeset: r76947:43445ac2ff6b Date: 2015-04-27 20:27 +0300 http://bitbucket.org/pypy/pypy/changeset/43445ac2ff6b/ Log: add implementation of 9ea844c184eb diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1305,12 +1305,30 @@ @complex_binary_op def floordiv(self, v1, v2): - try: - ab = v1[0]*v2[0] + v1[1]*v2[1] - bb = v2[0]*v2[0] + v2[1]*v2[1] - return math.floor(ab/bb), 0. - except ZeroDivisionError: - return rfloat.NAN, 0. + (r1, i1), (r2, i2) = v1, v2 + if r2 < 0: + abs_r2 = -r2 + else: + abs_r2 = r2 + if i2 < 0: + abs_i2 = -i2 + else: + abs_i2 = i2 + if abs_r2 >= abs_i2: + if abs_r2 == 0.0: + return rfloat.NAN, 0. + else: + ratio = i2 / r2 + denom = r2 + i2 * ratio + rr = (r1 + i1 * ratio) / denom + elif rfloat.isnan(r2): + rr = rfloat.NAN + else: + ratio = r2 / i2 + denom = r2 * ratio + i2 + assert i2 != 0.0 + rr = (r1 * ratio + i1) / denom + return math.floor(rr), 0. #complex mod does not exist in numpy #@simple_binary_op From noreply at buildbot.pypy.org Wed Apr 29 21:28:28 2015 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 29 Apr 2015 21:28:28 +0200 (CEST) Subject: [pypy-commit] pypy numpy-fixes: special case sign(nan) Message-ID: <20150429192828.90AD91C068C@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: numpy-fixes Changeset: r76948:15dee07ee651 Date: 2015-04-28 00:18 +0300 http://bitbucket.org/pypy/pypy/changeset/15dee07ee651/ Log: special case sign(nan) diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -722,6 +722,8 @@ def sign(self, v): if v == 0.0: return 0.0 + if rfloat.isnan(v): + return rfloat.NAN return rfloat.copysign(1.0, v) @raw_unary_op From noreply at buildbot.pypy.org Wed Apr 29 21:28:29 2015 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 29 Apr 2015 21:28:29 +0200 (CEST) Subject: [pypy-commit] pypy numpy-fixes: make view actually a view rather than a copy if dtype is same-length Message-ID: <20150429192829.D59681C068C@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: numpy-fixes Changeset: r76949:8e4a51fc2b92 Date: 2015-04-28 22:53 +0300 http://bitbucket.org/pypy/pypy/changeset/8e4a51fc2b92/ Log: make view actually a view rather than a copy if dtype is same-length diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -90,9 +90,13 @@ new_shape, self, orig_array) return None - def get_view(self, space, orig_array, dtype, new_shape): - strides, backstrides = calc_strides(new_shape, dtype, + def get_view(self, space, orig_array, dtype, new_shape, reuse_strides=False): + if not reuse_strides: + strides, backstrides = calc_strides(new_shape, dtype, self.order) + else: + strides = self.get_strides() + backstrides = self.get_backstrides() return SliceArray(self.start, strides, backstrides, new_shape, self, orig_array, dtype=dtype) @@ -328,11 +332,7 @@ return ArrayBuffer(self, readonly) def astype(self, space, dtype): - # we want to create a new array, but must respect the strides - # in self. So find a factor of the itemtype.elsize, and use this - factor = float(dtype.elsize) / self.dtype.elsize - strides = [int(factor*s) for s in self.get_strides()] - backstrides = [int(factor*s) for s in self.get_backstrides()] + strides, backstrides = calc_strides(self.get_shape(), dtype, self.order) impl = ConcreteArray(self.get_shape(), dtype, self.order, strides, backstrides) loop.setslice(space, impl.get_shape(), impl, self) diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -850,7 +850,15 @@ raise OperationError(space.w_ValueError, space.wrap( "new type not compatible with array.")) # Strides, shape does not change - v = impl.astype(space, dtype) + if dtype.is_object() != impl.dtype.is_object(): + raise oefmt(space.w_ValueError, 'expect trouble in ndarray.view,' + ' target dtype %r but self.dtype %r',dtype, impl.dtype) + + base = impl.base() + if base is None: + base = self + v = impl.get_view(space, base, dtype, self.get_shape(), + reuse_strides=True) return wrap_impl(space, w_type, self, v) strides = impl.get_strides() if dims == 1 or strides[0] =0 - assert offset < storage._obj.getlength() + try: + assert offset < storage._obj.getlength() + except AttributeError: + pass return _raw_storage_setitem_unaligned(storage, offset, value) def raw_storage_getitem_unaligned(T, storage, offset): assert offset >=0 - assert offset < storage._obj.getlength() + try: + assert offset < storage._obj.getlength() + except AttributeError: + pass return _raw_storage_getitem_unaligned(T, storage, offset) ''' - def simple_unary_op(func): specialize.argtype(1)(func) @functools.wraps(func) From noreply at buildbot.pypy.org Wed Apr 29 21:28:31 2015 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 29 Apr 2015 21:28:31 +0200 (CEST) Subject: [pypy-commit] pypy numpy-fixes: typos, add failing view test Message-ID: <20150429192831.0EEC91C068C@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: numpy-fixes Changeset: r76950:f169593652bc Date: 2015-04-29 08:12 +0300 http://bitbucket.org/pypy/pypy/changeset/f169593652bc/ Log: typos, add failing view test diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -852,7 +852,7 @@ # Strides, shape does not change if dtype.is_object() != impl.dtype.is_object(): raise oefmt(space.w_ValueError, 'expect trouble in ndarray.view,' - ' target dtype %r but self.dtype %r',dtype, impl.dtype) + ' one of target dtype or dtype is object dtype') base = impl.base() if base is None: diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -1820,6 +1820,10 @@ assert v.strides == s.strides assert v.base is s.base assert (v == 2).all() + y = empty([6,6], 'uint32') + s = y.swapaxes(0, 1) + v = s.view(y.__class__) + assert v.strides == (4, 24) def test_tolist_scalar(self): from numpy import dtype diff --git a/pypy/module/micronumpy/test/test_subtype.py b/pypy/module/micronumpy/test/test_subtype.py --- a/pypy/module/micronumpy/test/test_subtype.py +++ b/pypy/module/micronumpy/test/test_subtype.py @@ -405,7 +405,7 @@ def test_setstate_no_version(self): # Some subclasses of ndarray, like MaskedArray, do not use - # version in __setstare__ + # version in __setstate__ from numpy import ndarray, array from pickle import loads, dumps import sys, new From noreply at buildbot.pypy.org Wed Apr 29 21:28:32 2015 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 29 Apr 2015 21:28:32 +0200 (CEST) Subject: [pypy-commit] pypy numpy-fixes: simplify and fix logic of strides, shapes for view() Message-ID: <20150429192832.30F231C068C@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: numpy-fixes Changeset: r76951:ad108273e79b Date: 2015-04-29 22:28 +0300 http://bitbucket.org/pypy/pypy/changeset/ad108273e79b/ Log: simplify and fix logic of strides, shapes for view() diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -90,13 +90,10 @@ new_shape, self, orig_array) return None - def get_view(self, space, orig_array, dtype, new_shape, reuse_strides=False): - if not reuse_strides: + def get_view(self, space, orig_array, dtype, new_shape, strides=None, backstrides=None): + if not strides: strides, backstrides = calc_strides(new_shape, dtype, self.order) - else: - strides = self.get_strides() - backstrides = self.get_backstrides() return SliceArray(self.start, strides, backstrides, new_shape, self, orig_array, dtype=dtype) diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -844,36 +844,35 @@ if old_itemsize != new_itemsize: raise OperationError(space.w_ValueError, space.wrap( "new type not compatible with array.")) + strides = None + backstrides = None + base = self else: - if not is_c_contiguous(impl) and not is_f_contiguous(impl): - if old_itemsize != new_itemsize: + base = impl.base() + if base is None: + base = self + strides = impl.get_strides()[:] + backstrides = impl.get_backstrides()[:] + if old_itemsize != new_itemsize: + if not is_c_contiguous(impl) and not is_f_contiguous(impl): raise OperationError(space.w_ValueError, space.wrap( "new type not compatible with array.")) - # Strides, shape does not change - if dtype.is_object() != impl.dtype.is_object(): - raise oefmt(space.w_ValueError, 'expect trouble in ndarray.view,' - ' one of target dtype or dtype is object dtype') - - base = impl.base() - if base is None: - base = self - v = impl.get_view(space, base, dtype, self.get_shape(), - reuse_strides=True) - return wrap_impl(space, w_type, self, v) - strides = impl.get_strides() - if dims == 1 or strides[0] Author: Armin Rigo Branch: cffi-1.0 Changeset: r1881:a41b31913b62 Date: 2015-04-29 22:22 +0200 http://bitbucket.org/cffi/cffi/changeset/a41b31913b62/ Log: Python 3 compat diff --git a/_cffi1/__init__.py b/_cffi1/__init__.py --- a/_cffi1/__init__.py +++ b/_cffi1/__init__.py @@ -1,1 +1,1 @@ -from recompiler import make_c_source, recompile +from .recompiler import make_c_source, recompile diff --git a/_cffi1/_cffi_include.h b/_cffi1/_cffi_include.h --- a/_cffi1/_cffi_include.h +++ b/_cffi1/_cffi_include.h @@ -148,7 +148,8 @@ #define _cffi_convert_array_from_object \ ((int(*)(char *, CTypeDescrObject *, PyObject *))_cffi_exports[24]) #define _cffi_init_module \ - ((int(*)(char *, const struct _cffi_type_context_s *))_cffi_exports[25]) + ((PyObject *(*)(char *, const struct _cffi_type_context_s *)) \ + _cffi_exports[25]) #define _CFFI_NUM_EXPORTS 26 typedef struct _ctypedescr CTypeDescrObject; diff --git a/_cffi1/cffi1_module.c b/_cffi1/cffi1_module.c --- a/_cffi1/cffi1_module.c +++ b/_cffi1/cffi1_module.c @@ -98,25 +98,46 @@ return -1; } -static int _cffi_init_module(char *module_name, - const struct _cffi_type_context_s *ctx) +static PyObject *_cffi_init_module(char *module_name, + const struct _cffi_type_context_s *ctx) { - PyObject *m = Py_InitModule(module_name, NULL); + PyObject *m; + +#if PY_MAJOR_VERSION >= 3 + /* note: the module_def leaks, but anyway the C extension module cannot + be unloaded */ + struct PyModuleDef *module_def; + module_def = PyObject_Malloc(sizeof(struct PyModuleDef)); + if (module_def == NULL) + return PyErr_NoMemory(); + + struct PyModuleDef local_module_def = { + PyModuleDef_HEAD_INIT, + module_name, + NULL, + -1, + NULL, NULL, NULL, NULL, NULL + }; + *module_def = local_module_def; + m = PyModule_Create(module_def); +#else + m = Py_InitModule(module_name, NULL); +#endif if (m == NULL) - return -1; + return NULL; FFIObject *ffi = ffi_internal_new(&FFI_Type, ctx); Py_XINCREF(ffi); /* make the ffi object really immortal */ if (ffi == NULL || PyModule_AddObject(m, "ffi", (PyObject *)ffi) < 0) - return -1; + return NULL; LibObject *lib = lib_internal_new(ffi->types_builder, module_name); if (lib == NULL || PyModule_AddObject(m, "lib", (PyObject *)lib) < 0) - return -1; + return NULL; if (make_included_tuples(ctx->includes, &ffi->types_builder->included_ffis, &lib->l_includes) < 0) - return -1; + return NULL; - return 0; + return m; } diff --git a/_cffi1/cffi_opcode.py b/_cffi1/cffi_opcode.py --- a/_cffi1/cffi_opcode.py +++ b/_cffi1/cffi_opcode.py @@ -139,6 +139,6 @@ F_EXTERNAL = 0x08 CLASS_NAME = {} -for _name, _value in globals().items(): +for _name, _value in list(globals().items()): if _name.startswith('OP_') and isinstance(_value, int): CLASS_NAME[_value] = _name[3:] diff --git a/_cffi1/cgc.c b/_cffi1/cgc.c --- a/_cffi1/cgc.c +++ b/_cffi1/cgc.c @@ -39,7 +39,7 @@ PyObject *data; if (const_name_pop == NULL) { - const_name_pop = PyString_InternFromString("pop"); + const_name_pop = PyText_InternFromString("pop"); if (const_name_pop == NULL) return NULL; } diff --git a/_cffi1/ffi_obj.c b/_cffi1/ffi_obj.c --- a/_cffi1/ffi_obj.c +++ b/_cffi1/ffi_obj.c @@ -424,11 +424,11 @@ char *p; base_name_len = strlen(ct->ct_name); - result = PyString_FromStringAndSize(NULL, base_name_len + extra_text_len); + result = PyBytes_FromStringAndSize(NULL, base_name_len + extra_text_len); if (result == NULL) return NULL; - p = PyString_AS_STRING(result); + p = PyBytes_AS_STRING(result); memcpy(p, ct->ct_name, ct->ct_name_position); p += ct->ct_name_position; p += extra_text_len; @@ -473,7 +473,7 @@ if (res == NULL) return NULL; - p = PyString_AS_STRING(res) + ct->ct_name_position; + p = PyBytes_AS_STRING(res) + ct->ct_name_position; if (add_paren) *p++ = '('; if (add_space) @@ -481,6 +481,16 @@ memcpy(p, replace_with, replace_with_len); if (add_paren) p[replace_with_len] = ')'; + +#if PY_MAJOR_VERSION >= 3 + /* bytes -> unicode string */ + PyObject *u = PyUnicode_DecodeLatin1(PyBytes_AS_STRING(res), + PyBytes_GET_SIZE(res), + NULL); + Py_DECREF(res); + res = u; +#endif + return res; } diff --git a/_cffi1/lib_obj.c b/_cffi1/lib_obj.c --- a/_cffi1/lib_obj.c +++ b/_cffi1/lib_obj.c @@ -311,7 +311,7 @@ int i; for (i = 0; i < total; i++) { - PyObject *s = PyString_FromString(g[i].name); + PyObject *s = PyText_FromString(g[i].name); if (s == NULL) { Py_DECREF(lst); return NULL; @@ -370,7 +370,7 @@ LibObject *lib; PyObject *libname, *dict; - libname = PyString_FromString(module_name); + libname = PyText_FromString(module_name); dict = PyDict_New(); if (libname == NULL || dict == NULL) { Py_XDECREF(dict); diff --git a/_cffi1/realize_c_type.c b/_cffi1/realize_c_type.c --- a/_cffi1/realize_c_type.c +++ b/_cffi1/realize_c_type.c @@ -474,7 +474,7 @@ j = 0; while (p[j] != ',' && p[j] != '\0') j++; - tmp = PyString_FromStringAndSize(p, j); + tmp = PyText_FromStringAndSize(p, j); if (tmp == NULL) break; PyTuple_SET_ITEM(enumerators, i, tmp); diff --git a/_cffi1/recompiler.py b/_cffi1/recompiler.py --- a/_cffi1/recompiler.py +++ b/_cffi1/recompiler.py @@ -1,6 +1,6 @@ import os, sys from cffi import ffiplatform, model -from cffi_opcode import * +from .cffi_opcode import * class Recompiler: @@ -225,6 +225,16 @@ # # the init function, loading _cffi_backend and calling a method there base_module_name = self.module_name.split('.')[-1] + prnt('#if PY_MAJOR_VERSION >= 3') + prnt('PyMODINIT_FUNC') + prnt('PyInit_%s(void)' % (base_module_name,)) + prnt('{') + prnt(' if (_cffi_init() < 0)') + prnt(' return NULL;') + prnt(' return _cffi_init_module("%s", &_cffi_type_context);' % ( + self.module_name,)) + prnt('}') + prnt('#else') prnt('PyMODINIT_FUNC') prnt('init%s(void)' % (base_module_name,)) prnt('{') @@ -233,6 +243,7 @@ prnt(' _cffi_init_module("%s", &_cffi_type_context);' % ( self.module_name,)) prnt('}') + prnt('#endif') self.ffi._recompiler_module_name = self.module_name # ---------- diff --git a/_cffi1/support.py b/_cffi1/support.py --- a/_cffi1/support.py +++ b/_cffi1/support.py @@ -1,6 +1,19 @@ +import sys -class U(object): - def __add__(self, other): - return eval('u'+repr(other).replace(r'\\u', r'\u') - .replace(r'\\U', r'\U')) -u = U() +if sys.version_info < (3,): + __all__ = ['u'] + + class U(object): + def __add__(self, other): + return eval('u'+repr(other).replace(r'\\u', r'\u') + .replace(r'\\U', r'\U')) + u = U() + assert u+'a\x00b' == eval(r"u'a\x00b'") + assert u+'a\u1234b' == eval(r"u'a\u1234b'") + assert u+'a\U00012345b' == eval(r"u'a\U00012345b'") + +else: + __all__ = ['u', 'unicode', 'long'] + u = "" + unicode = str + long = int diff --git a/_cffi1/test_ffi_obj.py b/_cffi1/test_ffi_obj.py --- a/_cffi1/test_ffi_obj.py +++ b/_cffi1/test_ffi_obj.py @@ -57,8 +57,8 @@ def test_ffi_string(): ffi = _cffi1_backend.FFI() - p = ffi.new("char[]", "foobar\x00baz") - assert ffi.string(p) == "foobar" + p = ffi.new("char[]", b"foobar\x00baz") + assert ffi.string(p) == b"foobar" def test_ffi_errno(): # xxx not really checking errno, just checking that we can read/write it diff --git a/_cffi1/test_new_ffi_1.py b/_cffi1/test_new_ffi_1.py --- a/_cffi1/test_new_ffi_1.py +++ b/_cffi1/test_new_ffi_1.py @@ -1513,8 +1513,8 @@ outputfilename) ffi2 = module.ffi # - p = ffi2.new("struct nonpacked *", ['A', -43141]) - assert p.a == 'A' + p = ffi2.new("struct nonpacked *", [b'A', -43141]) + assert p.a == b'A' assert p.b == -43141 # p = ffi.new("union simple_u *", [-52525]) diff --git a/_cffi1/test_parse_c_type.py b/_cffi1/test_parse_c_type.py --- a/_cffi1/test_parse_c_type.py +++ b/_cffi1/test_parse_c_type.py @@ -32,7 +32,7 @@ assert global_names == sorted(global_names) ctx = ffi.new("struct _cffi_type_context_s *") -c_struct_names = [ffi.new("char[]", _n) for _n in struct_names] +c_struct_names = [ffi.new("char[]", _n.encode('ascii')) for _n in struct_names] ctx_structs = ffi.new("struct _cffi_struct_union_s[]", len(struct_names)) for _i in range(len(struct_names)): ctx_structs[_i].name = c_struct_names[_i] @@ -40,14 +40,15 @@ ctx.struct_unions = ctx_structs ctx.num_struct_unions = len(struct_names) -c_enum_names = [ffi.new("char[]", _n) for _n in enum_names] +c_enum_names = [ffi.new("char[]", _n.encode('ascii')) for _n in enum_names] ctx_enums = ffi.new("struct _cffi_enum_s[]", len(enum_names)) for _i in range(len(enum_names)): ctx_enums[_i].name = c_enum_names[_i] ctx.enums = ctx_enums ctx.num_enums = len(enum_names) -c_identifier_names = [ffi.new("char[]", _n) for _n in identifier_names] +c_identifier_names = [ffi.new("char[]", _n.encode('ascii')) + for _n in identifier_names] ctx_identifiers = ffi.new("struct _cffi_typename_s[]", len(identifier_names)) for _i in range(len(identifier_names)): ctx_identifiers[_i].name = c_identifier_names[_i] @@ -69,7 +70,7 @@ return 1 ctx_globals = ffi.new("struct _cffi_global_s[]", len(global_names)) -c_glob_names = [ffi.new("char[]", _n) for _n in global_names] +c_glob_names = [ffi.new("char[]", _n.encode('ascii')) for _n in global_names] for _i, _fn in enumerate([fetch_constant_five, fetch_constant_neg, fetch_constant_zero]): @@ -90,9 +91,9 @@ info.output_size = len(out) for j in range(len(out)): out[j] = ffi.cast("void *", -424242) - res = lib.parse_c_type(info, input) + res = lib.parse_c_type(info, input.encode('ascii')) if res < 0: - raise ParseError(ffi.string(info.error_message), + raise ParseError(ffi.string(info.error_message).decode('ascii'), info.error_location) assert 0 <= res < len(out) result = [] diff --git a/_cffi1/test_realize_c_type.py b/_cffi1/test_realize_c_type.py --- a/_cffi1/test_realize_c_type.py +++ b/_cffi1/test_realize_c_type.py @@ -43,6 +43,6 @@ check("int(*)(long[5])", "int(*)(long *)") def test_all_primitives(): - import cffi_opcode + from . import cffi_opcode for name in cffi_opcode.PRIMITIVE_TO_INDEX: check(name, name) diff --git a/_cffi1/test_recompiler.py b/_cffi1/test_recompiler.py --- a/_cffi1/test_recompiler.py +++ b/_cffi1/test_recompiler.py @@ -310,7 +310,7 @@ ffi = FFI() ffi.cdef("""enum e1 { B1, A1, ... }; enum e2 { B2, A2, ... };""") lib = verify(ffi, 'test_verify_enum', - "enum e1 { A1, B1, C1=%d };" % sys.maxint + + "enum e1 { A1, B1, C1=%d };" % sys.maxsize + "enum e2 { A2, B2, C2 };") ffi.typeof("enum e1") ffi.typeof("enum e2") @@ -407,10 +407,10 @@ assert repr(ffi.cast("e1", 2)) == "" # ffi = FFI() - ffi.cdef("typedef enum { AA=%d } e1;" % sys.maxint) + ffi.cdef("typedef enum { AA=%d } e1;" % sys.maxsize) lib = verify(ffi, 'test_verify_anonymous_enum_with_typedef2', - "typedef enum { AA=%d } e1;" % sys.maxint) - assert lib.AA == sys.maxint + "typedef enum { AA=%d } e1;" % sys.maxsize) + assert lib.AA == sys.maxsize assert ffi.sizeof("e1") == ffi.sizeof("long") def test_unique_types(): diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -72,6 +72,7 @@ # define PyText_FromString PyUnicode_FromString # define PyText_FromStringAndSize PyUnicode_FromStringAndSize # define PyText_InternInPlace PyUnicode_InternInPlace +# define PyText_InternFromString PyUnicode_InternFromString # define PyIntOrLong_Check PyLong_Check #else # define STR_OR_BYTES "str" @@ -85,6 +86,7 @@ # define PyText_FromString PyString_FromString # define PyText_FromStringAndSize PyString_FromStringAndSize # define PyText_InternInPlace PyString_InternInPlace +# define PyText_InternFromString PyString_InternFromString # define PyIntOrLong_Check(op) (PyInt_Check(op) || PyLong_Check(op)) #endif @@ -92,6 +94,7 @@ # define PyInt_FromLong PyLong_FromLong # define PyInt_FromSsize_t PyLong_FromSsize_t # define PyInt_AsSsize_t PyLong_AsSsize_t +# define PyInt_AsLong PyLong_AsLong #endif #if PY_MAJOR_VERSION >= 3 @@ -3404,11 +3407,11 @@ const void **pkey; int err; - key = PyString_FromStringAndSize(NULL, keylength * sizeof(void *)); + key = PyBytes_FromStringAndSize(NULL, keylength * sizeof(void *)); if (key == NULL) goto error; - pkey = (const void **)PyString_AS_STRING(key); + pkey = (const void **)PyBytes_AS_STRING(key); for (i = 0; i < keylength; i++) pkey[i] = unique_key[i]; From noreply at buildbot.pypy.org Wed Apr 29 22:31:34 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 29 Apr 2015 22:31:34 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: Fun. Remove this case when testing on 32-bits. Message-ID: <20150429203134.E1F5E1C1334@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1882:0ce3a90f5bb0 Date: 2015-04-29 22:32 +0200 http://bitbucket.org/cffi/cffi/changeset/0ce3a90f5bb0/ Log: Fun. Remove this case when testing on 32-bits. diff --git a/_cffi1/test_recompiler.py b/_cffi1/test_recompiler.py --- a/_cffi1/test_recompiler.py +++ b/_cffi1/test_recompiler.py @@ -178,9 +178,13 @@ def test_macro_check_value(): # the value '-0x80000000' in C sources does not have a clear meaning # to me; it appears to have a different effect than '-2147483648'... + # Moreover, on 32-bits, -2147483648 is actually equal to + # -2147483648U, which in turn is equal to 2147483648U and so positive. vals = ['42', '-42', '0x80000000', '-2147483648', '0', '9223372036854775809ULL', '-9223372036854775807LL'] + if sys.maxsize <= 2**32: + vals.remove('-2147483648') ffi = FFI() cdef_lines = ['#define FOO_%d_%d %s' % (i, j, vals[i]) for i in range(len(vals)) From noreply at buildbot.pypy.org Wed Apr 29 22:37:32 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 29 Apr 2015 22:37:32 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: Python 3 compat Message-ID: <20150429203732.AA7C81C1334@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1883:dd6bfefe3e78 Date: 2015-04-29 22:35 +0200 http://bitbucket.org/cffi/cffi/changeset/dd6bfefe3e78/ Log: Python 3 compat diff --git a/_cffi1/test_verify1.py b/_cffi1/test_verify1.py --- a/_cffi1/test_verify1.py +++ b/_cffi1/test_verify1.py @@ -1733,7 +1733,7 @@ (-1, ''), (0xffffffff, 'U'), (maxulong, 'UL'), - (-maxulong / 3, 'L')]: + (-int(maxulong / 3), 'L')]: if c2c and sys.platform == 'win32': continue # enums may always be signed with MSVC ffi = FFI() From noreply at buildbot.pypy.org Wed Apr 29 22:37:33 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 29 Apr 2015 22:37:33 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: Python 3 compatible code in these demos Message-ID: <20150429203733.C99071C1334@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1884:e6cfa95230d2 Date: 2015-04-29 22:38 +0200 http://bitbucket.org/cffi/cffi/changeset/e6cfa95230d2/ Log: Python 3 compatible code in these demos diff --git a/demo/bsdopendirtype.py b/demo/bsdopendirtype.py --- a/demo/bsdopendirtype.py +++ b/demo/bsdopendirtype.py @@ -16,10 +16,10 @@ def opendir(dir): if len(dir) == 0: - dir = '.' + dir = b'.' dirname = dir - if not dirname.endswith('/'): - dirname += '/' + if not dirname.endswith(b'/'): + dirname += b'/' dirp = lib.opendir(dir) if dirp == ffi.NULL: raise _posix_error() @@ -32,7 +32,7 @@ raise _posix_error() return name = ffi.string(dirent.d_name) - if name == '.' or name == '..': + if name == b'.' or name == b'..': continue name = dirname + name try: @@ -44,5 +44,5 @@ lib.closedir(dirp) if __name__ == '__main__': - for name, smode in opendir('/tmp'): - print hex(smode), name + for name, smode in opendir(b'/tmp'): + print(hex(smode), name) diff --git a/demo/recopendirtype.py b/demo/recopendirtype.py --- a/demo/recopendirtype.py +++ b/demo/recopendirtype.py @@ -16,10 +16,10 @@ def opendir(dir): if len(dir) == 0: - dir = '.' + dir = b'.' dirname = dir - if not dirname.endswith('/'): - dirname += '/' + if not dirname.endswith(b'/'): + dirname += b'/' dirp = lib.opendir(dir) if dirp == ffi.NULL: raise _posix_error() @@ -34,7 +34,7 @@ if result[0] == ffi.NULL: return # name = ffi.string(dirent.d_name) - if name == '.' or name == '..': + if name == b'.' or name == b'..': continue name = dirname + name try: @@ -46,5 +46,5 @@ lib.closedir(dirp) if __name__ == '__main__': - for name, smode in opendir('/tmp'): - print hex(smode), name + for name, smode in opendir(b'/tmp'): + print(hex(smode), name) From noreply at buildbot.pypy.org Thu Apr 30 00:46:36 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 30 Apr 2015 00:46:36 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: Windows-friendlification Message-ID: <20150429224636.E3CBB1C0207@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1885:68e7f03a93d1 Date: 2015-04-30 00:47 +0200 http://bitbucket.org/cffi/cffi/changeset/68e7f03a93d1/ Log: Windows-friendlification diff --git a/_cffi1/cffi1_module.c b/_cffi1/cffi1_module.c --- a/_cffi1/cffi1_module.c +++ b/_cffi1/cffi1_module.c @@ -102,22 +102,22 @@ const struct _cffi_type_context_s *ctx) { PyObject *m; + FFIObject *ffi; + LibObject *lib; #if PY_MAJOR_VERSION >= 3 /* note: the module_def leaks, but anyway the C extension module cannot be unloaded */ - struct PyModuleDef *module_def; - module_def = PyObject_Malloc(sizeof(struct PyModuleDef)); - if (module_def == NULL) - return PyErr_NoMemory(); - - struct PyModuleDef local_module_def = { + struct PyModuleDef *module_def, local_module_def = { PyModuleDef_HEAD_INIT, module_name, NULL, -1, NULL, NULL, NULL, NULL, NULL }; + module_def = PyMem_Malloc(sizeof(struct PyModuleDef)); + if (module_def == NULL) + return PyErr_NoMemory(); *module_def = local_module_def; m = PyModule_Create(module_def); #else @@ -126,12 +126,12 @@ if (m == NULL) return NULL; - FFIObject *ffi = ffi_internal_new(&FFI_Type, ctx); + ffi = ffi_internal_new(&FFI_Type, ctx); Py_XINCREF(ffi); /* make the ffi object really immortal */ if (ffi == NULL || PyModule_AddObject(m, "ffi", (PyObject *)ffi) < 0) return NULL; - LibObject *lib = lib_internal_new(ffi->types_builder, module_name); + lib = lib_internal_new(ffi->types_builder, module_name); if (lib == NULL || PyModule_AddObject(m, "lib", (PyObject *)lib) < 0) return NULL; diff --git a/_cffi1/ffi_obj.c b/_cffi1/ffi_obj.c --- a/_cffi1/ffi_obj.c +++ b/_cffi1/ffi_obj.c @@ -110,6 +110,9 @@ Does not return a new reference! */ if ((accept & ACCEPT_STRING) && PyText_Check(arg)) { + int index, err; + char *input_text; + CTypeDescrObject *ct; PyObject *types_dict = ffi->types_builder->types_dict; PyObject *x = PyDict_GetItem(types_dict, arg); if (x != NULL) { @@ -117,8 +120,8 @@ return (CTypeDescrObject *)x; } - char *input_text = PyText_AS_UTF8(arg); - int index = parse_c_type(&ffi->info, input_text); + input_text = PyText_AS_UTF8(arg); + index = parse_c_type(&ffi->info, input_text); if (index < 0) { size_t num_spaces = ffi->info.error_location; char spaces[num_spaces + 1]; @@ -128,7 +131,6 @@ input_text, spaces); return NULL; } - CTypeDescrObject *ct; if (accept & CONSIDER_FN_AS_FNPTR) { ct = realize_c_type_fn_as_fnptr(ffi->types_builder, ffi->info.output, index); @@ -147,7 +149,7 @@ sure that in any case the next _ffi_type() with the same 'arg' will succeed early, in PyDict_GetItem() above. */ - int err = PyDict_SetItem(types_dict, arg, (PyObject *)ct); + err = PyDict_SetItem(types_dict, arg, (PyObject *)ct); Py_DECREF(ct); /* we know it was written in types_dict (unless we got out of memory), so there is at least this reference left */ if (err < 0) @@ -822,6 +824,8 @@ _fetch_external_struct_or_union(const struct _cffi_struct_union_s *s, PyObject *included_ffis, int recursion) { + Py_ssize_t i; + if (included_ffis == NULL) return NULL; @@ -831,7 +835,6 @@ return NULL; } - Py_ssize_t i; for (i = 0; i < PyTuple_GET_SIZE(included_ffis); i++) { FFIObject *ffi1; const struct _cffi_struct_union_s *s1; diff --git a/_cffi1/lib_obj.c b/_cffi1/lib_obj.c --- a/_cffi1/lib_obj.c +++ b/_cffi1/lib_obj.c @@ -31,6 +31,9 @@ static PyObject *_cpyextfunc_type_index(PyObject *x) { struct CPyExtFunc_s *exf; + LibObject *lib; + PyObject *tuple, *result; + assert(PyErr_Occurred()); if (!PyCFunction_Check(x)) @@ -44,9 +47,7 @@ PyErr_Clear(); - LibObject *lib = (LibObject *)PyCFunction_GET_SELF(x); - PyObject *tuple, *result; - + lib = (LibObject *)PyCFunction_GET_SELF(x); tuple = _realize_c_type_or_func(lib->l_types_builder, lib->l_types_builder->ctx.types, exf->type_index); @@ -84,7 +85,8 @@ by calling _cffi_type(). */ CTypeDescrObject *ct; - int type_index = _CFFI_GETARG(g->type_op); + struct CPyExtFunc_s *xfunc; + int i, type_index = _CFFI_GETARG(g->type_op); _cffi_opcode_t *opcodes = lib->l_types_builder->ctx.types; assert(_CFFI_GETOP(opcodes[type_index]) == _CFFI_OP_FUNCTION); @@ -96,7 +98,7 @@ Py_DECREF(ct); /* argument types: */ - int i = type_index + 1; + i = type_index + 1; while (_CFFI_GETOP(opcodes[i]) != _CFFI_OP_FUNCTION_END) { ct = realize_c_type(lib->l_types_builder, opcodes, i); if (ct == NULL) @@ -110,10 +112,11 @@ There is one per real C function in a CFFI C extension module. CPython never unloads its C extension modules anyway. */ - struct CPyExtFunc_s *xfunc = calloc(1, sizeof(struct CPyExtFunc_s)); + xfunc = PyMem_Malloc(sizeof(struct CPyExtFunc_s)); if (xfunc == NULL) goto no_memory; + memset((char *)xfunc, 0, sizeof(struct CPyExtFunc_s)); xfunc->md.ml_meth = (PyCFunction)g->address; xfunc->md.ml_flags = flags; xfunc->md.ml_name = g->name; @@ -135,15 +138,18 @@ { /* does not return a new reference! */ PyObject *x; - + int index; + const struct _cffi_global_s *g; + CTypeDescrObject *ct; char *s = PyText_AsUTF8(name); if (s == NULL) return NULL; - int index = search_in_globals(&lib->l_types_builder->ctx, s, strlen(s)); + index = search_in_globals(&lib->l_types_builder->ctx, s, strlen(s)); if (index < 0) { if (lib->l_includes != NULL) { + Py_ssize_t i; if (recursion > 100) { PyErr_SetString(PyExc_RuntimeError, @@ -151,7 +157,6 @@ return NULL; } - Py_ssize_t i; for (i = 0; i < PyTuple_GET_SIZE(lib->l_includes); i++) { LibObject *lib1; lib1 = (LibObject *)PyTuple_GET_ITEM(lib->l_includes, i); @@ -180,8 +185,7 @@ return NULL; } - const struct _cffi_global_s *g = &lib->l_types_builder->ctx.globals[index]; - CTypeDescrObject *ct; + g = &lib->l_types_builder->ctx.globals[index]; switch (_CFFI_GETOP(g->type_op)) { @@ -303,13 +307,11 @@ static PyObject *lib_dir(LibObject *lib, PyObject *noarg) { const struct _cffi_global_s *g = lib->l_types_builder->ctx.globals; - int total = lib->l_types_builder->ctx.num_globals; - + int i, total = lib->l_types_builder->ctx.num_globals; PyObject *lst = PyList_New(total); if (lst == NULL) return NULL; - int i; for (i = 0; i < total; i++) { PyObject *s = PyText_FromString(g[i].name); if (s == NULL) { diff --git a/_cffi1/parse_c_type.c b/_cffi1/parse_c_type.c --- a/_cffi1/parse_c_type.c +++ b/_cffi1/parse_c_type.c @@ -225,6 +225,8 @@ type). The 'outer' argument is the index of the opcode outside this "sequel". */ + int check_for_grouping; + _cffi_opcode_t result, *p_current; header: switch (tok->kind) { @@ -244,14 +246,14 @@ break; } - int check_for_grouping = 1; + check_for_grouping = 1; if (tok->kind == TOK_IDENTIFIER) { next_token(tok); /* skip a potential variable name */ check_for_grouping = 0; } - _cffi_opcode_t result = 0; - _cffi_opcode_t *p_current = &result; + result = 0; + p_current = &result; while (tok->kind == TOK_OPEN_PAREN) { next_token(tok); @@ -298,13 +300,15 @@ if (tok->kind != TOK_CLOSE_PAREN) { while (1) { + int arg; + _cffi_opcode_t oarg; + if (tok->kind == TOK_DOTDOTDOT) { has_ellipsis = 1; next_token(tok); break; } - int arg = parse_complete(tok); - _cffi_opcode_t oarg; + arg = parse_complete(tok); switch (_CFFI_GETOP(tok->output[arg])) { case _CFFI_OP_ARRAY: case _CFFI_OP_OPEN_ARRAY: @@ -543,6 +547,10 @@ static int parse_complete(token_t *tok) { + unsigned int t0; + _cffi_opcode_t t1; + int modifiers_length, modifiers_sign; + qualifiers: switch (tok->kind) { case TOK_CONST: @@ -557,10 +565,8 @@ ; } - unsigned int t0; - _cffi_opcode_t t1; - int modifiers_length = 0; - int modifiers_sign = 0; + modifiers_length = 0; + modifiers_sign = 0; modifiers: switch (tok->kind) { @@ -682,12 +688,12 @@ case TOK_STRUCT: case TOK_UNION: { - int kind = tok->kind; + int n, kind = tok->kind; next_token(tok); if (tok->kind != TOK_IDENTIFIER) return parse_error(tok, "struct or union name expected"); - int n = search_in_struct_unions(tok->info->ctx, tok->p, tok->size); + n = search_in_struct_unions(tok->info->ctx, tok->p, tok->size); if (n < 0) return parse_error(tok, "undefined struct/union name"); if (((tok->info->ctx->struct_unions[n].flags & _CFFI_F_UNION) != 0) @@ -699,11 +705,12 @@ } case TOK_ENUM: { + int n; next_token(tok); if (tok->kind != TOK_IDENTIFIER) return parse_error(tok, "enum name expected"); - int n = search_in_enums(tok->info->ctx, tok->p, tok->size); + n = search_in_enums(tok->info->ctx, tok->p, tok->size); if (n < 0) return parse_error(tok, "undefined enum name"); diff --git a/_cffi1/realize_c_type.c b/_cffi1/realize_c_type.c --- a/_cffi1/realize_c_type.c +++ b/_cffi1/realize_c_type.c @@ -89,11 +89,12 @@ static builder_c_t *new_builder_c(const struct _cffi_type_context_s *ctx) { + builder_c_t *builder; PyObject *ldict = PyDict_New(); if (ldict == NULL) return NULL; - builder_c_t *builder = PyMem_Malloc(sizeof(builder_c_t)); + builder = PyMem_Malloc(sizeof(builder_c_t)); if (builder == NULL) { Py_DECREF(ldict); PyErr_NoMemory(); @@ -186,6 +187,7 @@ static PyObject *realize_global_int(const struct _cffi_global_s *g) { + char got[64]; unsigned long long value; /* note: we cast g->address to this function type; we do the same in parse_c_type:parse_sequel() too */ @@ -208,8 +210,6 @@ default: break; } - - char got[64]; if (neg == 2) sprintf(got, "%llu (0x%llx)", value, value); else @@ -236,11 +236,12 @@ return (CTypeDescrObject *)x; } else { + char *text1, *text2; PyObject *y; assert(PyTuple_Check(x)); y = PyTuple_GET_ITEM(x, 0); - char *text1 = ((CTypeDescrObject *)y)->ct_name; - char *text2 = text1 + ((CTypeDescrObject *)y)->ct_name_position + 1; + text1 = ((CTypeDescrObject *)y)->ct_name; + text2 = text1 + ((CTypeDescrObject *)y)->ct_name_position + 1; assert(text2[-3] == '('); text2[-3] = '\0'; PyErr_Format(FFIError, "the type '%s%s' is a function type, not a " @@ -444,15 +445,15 @@ Py_INCREF(x); } else { - PyObject *basetd = get_primitive_type(e->type_prim); - if (basetd == NULL) - return NULL; - PyObject *enumerators = NULL, *enumvalues = NULL, *tmp; Py_ssize_t i, j, n = 0; const char *p; const struct _cffi_global_s *g; int gindex; + PyObject *args; + PyObject *basetd = get_primitive_type(e->type_prim); + if (basetd == NULL) + return NULL; if (*e->enumerators != '\0') { n++; @@ -492,7 +493,7 @@ p += j + 1; } - PyObject *args = NULL; + args = NULL; if (!PyErr_Occurred()) { char *name = alloca(6 + strlen(e->name)); _realize_name(name, "enum ", e->name); @@ -603,30 +604,35 @@ assert(ct->ct_flags & (CT_STRUCT | CT_UNION)); if (ct->ct_flags & CT_LAZY_FIELD_LIST) { + builder_c_t *builder; + char *p; + int n, i, sflags; + const struct _cffi_struct_union_s *s; + const struct _cffi_field_s *fld; + PyObject *fields, *args, *res; + assert(!(ct->ct_flags & CT_IS_OPAQUE)); - builder_c_t *builder = ct->ct_extra; + builder = ct->ct_extra; assert(builder != NULL); - char *p = alloca(2 + strlen(ct->ct_name)); + p = alloca(2 + strlen(ct->ct_name)); _unrealize_name(p, ct->ct_name); - int n = search_in_struct_unions(&builder->ctx, p, strlen(p)); + n = search_in_struct_unions(&builder->ctx, p, strlen(p)); if (n < 0) Py_FatalError("lost a struct/union!"); - const struct _cffi_struct_union_s *s = &builder->ctx.struct_unions[n]; - const struct _cffi_field_s *fld = - &builder->ctx.fields[s->first_field_index]; + s = &builder->ctx.struct_unions[n]; + fld = &builder->ctx.fields[s->first_field_index]; /* XXX painfully build all the Python objects that are the args to b_complete_struct_or_union() */ - PyObject *fields = PyList_New(s->num_fields); + fields = PyList_New(s->num_fields); if (fields == NULL) return -1; - int i; for (i = 0; i < s->num_fields; i++, fld++) { _cffi_opcode_t op = fld->field_type_op; int fbitsize = -1; @@ -674,24 +680,23 @@ PyList_SET_ITEM(fields, i, f); } - int sflags = 0; + sflags = 0; if (s->flags & _CFFI_F_CHECK_FIELDS) sflags |= SF_STD_FIELD_POS; if (s->flags & _CFFI_F_PACKED) sflags |= SF_PACKED; - PyObject *args = Py_BuildValue("(OOOnni)", ct, fields, - Py_None, - (Py_ssize_t)s->size, - (Py_ssize_t)s->alignment, - sflags); + args = Py_BuildValue("(OOOnni)", ct, fields, Py_None, + (Py_ssize_t)s->size, + (Py_ssize_t)s->alignment, + sflags); Py_DECREF(fields); if (args == NULL) return -1; ct->ct_extra = NULL; ct->ct_flags |= CT_IS_OPAQUE; - PyObject *res = b_complete_struct_or_union(NULL, args); + res = b_complete_struct_or_union(NULL, args); ct->ct_flags &= ~CT_IS_OPAQUE; Py_DECREF(args); From noreply at buildbot.pypy.org Thu Apr 30 01:02:14 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 30 Apr 2015 01:02:14 +0200 (CEST) Subject: [pypy-commit] cffi default: More Windows-specific hack Message-ID: <20150429230214.85EAD1C0207@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1886:e77aca5b0519 Date: 2015-04-30 01:00 +0200 http://bitbucket.org/cffi/cffi/changeset/e77aca5b0519/ Log: More Windows-specific hack diff --git a/cffi/ffiplatform.py b/cffi/ffiplatform.py --- a/cffi/ffiplatform.py +++ b/cffi/ffiplatform.py @@ -24,6 +24,7 @@ pass def get_extension(srcfilename, modname, sources=(), **kwds): + _hack_at_distutils() # *before* the following import from distutils.core import Extension allsources = [srcfilename] allsources.extend(sources) From noreply at buildbot.pypy.org Thu Apr 30 01:02:15 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 30 Apr 2015 01:02:15 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: more Windows fixes Message-ID: <20150429230215.859C71C0207@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1887:596f69d5291c Date: 2015-04-30 01:02 +0200 http://bitbucket.org/cffi/cffi/changeset/596f69d5291c/ Log: more Windows fixes diff --git a/_cffi1/ffi_obj.c b/_cffi1/ffi_obj.c --- a/_cffi1/ffi_obj.c +++ b/_cffi1/ffi_obj.c @@ -124,7 +124,7 @@ index = parse_c_type(&ffi->info, input_text); if (index < 0) { size_t num_spaces = ffi->info.error_location; - char spaces[num_spaces + 1]; + char *spaces = alloca(num_spaces + 1); memset(spaces, ' ', num_spaces); spaces[num_spaces] = '\0'; PyErr_Format(FFIError, "%s\n%s\n%s^", ffi->info.error_message, @@ -447,16 +447,16 @@ static PyObject *ffi_getctype(FFIObject *self, PyObject *args) { - PyObject *cdecl, *res; + PyObject *c_decl, *res; char *p, *replace_with = ""; int add_paren, add_space; CTypeDescrObject *ct; size_t replace_with_len; - if (!PyArg_ParseTuple(args, "O|s:getctype", &cdecl, &replace_with)) + if (!PyArg_ParseTuple(args, "O|s:getctype", &c_decl, &replace_with)) return NULL; - ct = _ffi_type(self, cdecl, ACCEPT_STRING|ACCEPT_CTYPE); + ct = _ffi_type(self, c_decl, ACCEPT_STRING|ACCEPT_CTYPE); if (ct == NULL) return NULL; @@ -603,20 +603,20 @@ static PyObject *ffi_callback(FFIObject *self, PyObject *args, PyObject *kwds) { - PyObject *cdecl, *python_callable = Py_None, *error = Py_None; + PyObject *c_decl, *python_callable = Py_None, *error = Py_None; PyObject *res; static char *keywords[] = {"cdecl", "python_callable", "error", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|OO", keywords, - &cdecl, &python_callable, &error)) + &c_decl, &python_callable, &error)) return NULL; - cdecl = (PyObject *)_ffi_type(self, cdecl, ACCEPT_STRING | ACCEPT_CTYPE | - CONSIDER_FN_AS_FNPTR); - if (cdecl == NULL) + c_decl = (PyObject *)_ffi_type(self, c_decl, ACCEPT_STRING | ACCEPT_CTYPE | + CONSIDER_FN_AS_FNPTR); + if (c_decl == NULL) return NULL; - args = Py_BuildValue("(OOO)", cdecl, python_callable, error); + args = Py_BuildValue("(OOO)", c_decl, python_callable, error); if (args == NULL) return NULL; diff --git a/_cffi1/parse_c_type.c b/_cffi1/parse_c_type.c --- a/_cffi1/parse_c_type.c +++ b/_cffi1/parse_c_type.c @@ -351,9 +351,11 @@ case TOK_INTEGER: errno = 0; +#ifndef MS_WIN32 if (sizeof(length) > sizeof(unsigned long)) length = strtoull(tok->p, &endptr, 0); else +#endif length = strtoul(tok->p, &endptr, 0); if (endptr != tok->p + tok->size) return parse_error(tok, "invalid number"); diff --git a/_cffi1/parse_c_type.h b/_cffi1/parse_c_type.h --- a/_cffi1/parse_c_type.h +++ b/_cffi1/parse_c_type.h @@ -1,5 +1,3 @@ -#include - typedef void *_cffi_opcode_t; @@ -137,7 +135,7 @@ struct _cffi_parse_info_s { const struct _cffi_type_context_s *ctx; _cffi_opcode_t *output; - int output_size; + unsigned int output_size; size_t error_location; const char *error_message; }; diff --git a/cffi/ffiplatform.py b/cffi/ffiplatform.py --- a/cffi/ffiplatform.py +++ b/cffi/ffiplatform.py @@ -24,6 +24,7 @@ pass def get_extension(srcfilename, modname, sources=(), **kwds): + _hack_at_distutils() # *before* the following import from distutils.core import Extension allsources = [srcfilename] allsources.extend(sources) From noreply at buildbot.pypy.org Thu Apr 30 01:20:11 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 30 Apr 2015 01:20:11 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: more Windows fixes Message-ID: <20150429232011.C642E1C03F7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1888:f6ac55047b65 Date: 2015-04-30 01:20 +0200 http://bitbucket.org/cffi/cffi/changeset/f6ac55047b65/ Log: more Windows fixes diff --git a/_cffi1/_cffi_include.h b/_cffi1/_cffi_include.h --- a/_cffi1/_cffi_include.h +++ b/_cffi1/_cffi_include.h @@ -175,6 +175,12 @@ ((got_nonpos) == (expected <= 0) && \ (got) == (unsigned long long)expected) +#ifdef __GNUC__ +# define _CFFI_UNUSED_FN __attribute__((unused)) +#else +# define _CFFI_UNUSED_FN /* nothing */ +#endif + static int _cffi_init(void) { diff --git a/_cffi1/recompiler.py b/_cffi1/recompiler.py --- a/_cffi1/recompiler.py +++ b/_cffi1/recompiler.py @@ -467,7 +467,7 @@ return prnt = self._prnt checkfuncname = '_cffi_checkfld_%s' % (approxname,) - prnt('__attribute__((unused))') + prnt('_CFFI_UNUSED_FN') prnt('static void %s(%s *p)' % (checkfuncname, cname)) prnt('{') prnt(' /* only to generate compile-time warnings or errors */') diff --git a/_cffi1/test_new_ffi_1.py b/_cffi1/test_new_ffi_1.py --- a/_cffi1/test_new_ffi_1.py +++ b/_cffi1/test_new_ffi_1.py @@ -74,7 +74,13 @@ DEFS_PACKED = """ struct is_packed { char a; int b; } /*here*/; """ - CCODE = DEFS + DEFS_PACKED.replace('/*here*/', '__attribute__((packed))') + if sys.platform == "win32": + DEFS = DEFS.replace('data[0]', 'data[1]') # not supported + CCODE = (DEFS + "\n#pragma pack(push,1)\n" + DEFS_PACKED + + "\n#pragma pack(pop)\n") + else: + CCODE = (DEFS + + DEFS_PACKED.replace('/*here*/', '__attribute__((packed))')) ffi1.cdef(DEFS) ffi1.cdef(DEFS_PACKED, packed=True) @@ -884,7 +890,8 @@ assert ffi.cast("enum bar", 0) != ffi.cast("int", 0) assert repr(ffi.cast("enum bar", -1)) == "" assert repr(ffi.cast("enum foq", -1)) == ( # enums are unsigned, if - "") # they contain no neg value + "") or ( # they contain no neg value + sys.platform == "win32") # (but not on msvc) # enum baz { A2=0x1000, B2=0x2000 }; assert ffi.string(ffi.cast("enum baz", 0x1000)) == "A2" assert ffi.string(ffi.cast("enum baz", 0x2000)) == "B2" @@ -902,8 +909,8 @@ assert s.e == 2 assert s[0].e == 2 s.e = ffi.cast("enum foo2", -1) - assert s.e == 4294967295 - assert s[0].e == 4294967295 + assert s.e in (4294967295, -1) # two choices + assert s[0].e in (4294967295, -1) s.e = s.e py.test.raises(TypeError, "s.e = 'B3'") py.test.raises(TypeError, "s.e = '2'") @@ -1000,6 +1007,8 @@ def test_bitfield_enum(self): # typedef enum { AA1, BB1, CC1 } foo_e_t; # typedef struct { foo_e_t f:2; } bfenum_t; + if sys.platform == "win32": + py.test.skip("enums are not unsigned") s = ffi.new("bfenum_t *") s.f = 2 assert s.f == 2 @@ -1198,6 +1207,8 @@ assert repr(p.a).startswith(" Author: Armin Rigo Branch: cffi-1.0 Changeset: r1889:1d37117fd28c Date: 2015-04-30 01:22 +0200 http://bitbucket.org/cffi/cffi/changeset/1d37117fd28c/ Log: hg merge default diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -437,9 +437,9 @@ types ``TBYTE TCHAR LPCTSTR PCTSTR LPTSTR PTSTR PTBYTE PTCHAR`` are no longer automatically defined; see ``ffi.set_unicode()`` below. -* *New in version 0.9:* the other standard integer types from stdint.h, +* *New in version 0.9.3:* the other standard integer types from stdint.h, as long as they map to integers of 1, 2, 4 or 8 bytes. Larger integers - are not supported. + are not supported. (Actually added in version 0.9 but this was buggy.) .. _`common Windows types`: http://msdn.microsoft.com/en-us/library/windows/desktop/aa383751%28v=vs.85%29.aspx @@ -1078,6 +1078,23 @@ C.printf("hello, %f\n", ffi.cast("double", 42)) C.printf("hello, %s\n", ffi.new("char[]", "world")) +Note that if you are using ``dlopen()``, the function declaration in the +``cdef()`` must match the original one in C exactly, as usual --- in +particular, if this function is variadic in C, then its ``cdef()`` +declaration must also be variadic. You cannot declare it in the +``cdef()`` with fixed arguments instead, even if you plan to only call +it with these argument types. The reason is that some architectures +have a different calling convention depending on whether the function +signature is fixed or not. (On x86-64, the difference can sometimes be +seen in PyPy's JIT-generated code if some arguments are ``double``.) + +Note that the function signature ``int foo();`` is interpreted by CFFI +as equivalent to ``int foo(void);``. This differs from the C standard, +in which ``int foo();`` is really like ``int foo(...);`` and can be +called with any arguments. (This feature of C is a pre-C89 relic: the +arguments cannot be accessed at all in the body of ``foo()`` without +relying on compiler-specific extensions.) + Callbacks --------- diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -19,7 +19,7 @@ p = subprocess.Popen([pkg_config, option, 'libffi'], stdout=subprocess.PIPE) except OSError as e: - if e.errno != errno.ENOENT: + if e.errno not in [errno.ENOENT, errno.EACCES]: raise else: t = p.stdout.read().decode().strip() From noreply at buildbot.pypy.org Thu Apr 30 09:30:25 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 30 Apr 2015 09:30:25 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: setuptools_ext: generate the C module in memory. Avoids the os.rename() Message-ID: <20150430073025.B449C1C0207@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1890:bae04f21862d Date: 2015-04-30 09:19 +0200 http://bitbucket.org/cffi/cffi/changeset/bae04f21862d/ Log: setuptools_ext: generate the C module in memory. Avoids the os.rename() which, I think, fails to overwrite on Windows. diff --git a/_cffi1/recompiler.py b/_cffi1/recompiler.py --- a/_cffi1/recompiler.py +++ b/_cffi1/recompiler.py @@ -1,4 +1,4 @@ -import os, sys +import os, sys, io from cffi import ffiplatform, model from .cffi_opcode import * @@ -806,11 +806,26 @@ self.cffi_types[index] = CffiOp(OP_ENUM, enum_index) -def make_c_source(ffi, module_name, preamble, target_c_file): +if sys.version_info >= (3,): + NativeIO = io.StringIO +else: + class NativeIO(io.BytesIO): + def write(self, s): + if isinstance(s, unicode): + s = s.encode('ascii') + super(NativeIO, self).write(s) + +def make_c_source(ffi, module_name, preamble, target_c_file=NativeIO): recompiler = Recompiler(ffi, module_name) recompiler.collect_type_table() - with open(target_c_file, 'w') as f: + if target_c_file is NativeIO: + f = NativeIO() recompiler.write_source_to_f(f, preamble) + return f.getvalue() + else: + with open(target_c_file, 'w') as f: + recompiler.write_source_to_f(f, preamble) + return None def _get_extension(module_name, c_file, kwds): source_name = ffiplatform.maybe_relative_path(c_file) diff --git a/_cffi1/setuptools_ext.py b/_cffi1/setuptools_ext.py --- a/_cffi1/setuptools_ext.py +++ b/_cffi1/setuptools_ext.py @@ -40,22 +40,20 @@ ext = Extension(name=module_name, sources=allsources, **kwds) def make_mod(tmpdir): - mkpath(tmpdir) file_name = module_name + '.c' log.info("generating cffi module %r" % file_name) + output = recompiler.make_c_source(ffi, module_name, source) + mkpath(tmpdir) c_file = os.path.join(tmpdir, file_name) - c_tmp = '%s.%s' % (c_file, os.getpid()) - recompiler.make_c_source(ffi, module_name, source, c_tmp) try: with open(c_file, 'r') as f1: - with open(c_tmp, 'r') as f2: - if f1.read() != f2.read(): - raise IOError + if f1.read() != output: + raise IOError except IOError: - os.rename(c_tmp, c_file) + with open(c_file, 'w') as f1: + f1.write(output) else: log.info("already up-to-date") - os.unlink(c_tmp) return c_file if dist.ext_modules is None: From noreply at buildbot.pypy.org Thu Apr 30 09:30:26 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 30 Apr 2015 09:30:26 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: Try to support unicode_literals Message-ID: <20150430073026.B96351C0207@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1891:4480292812da Date: 2015-04-30 09:30 +0200 http://bitbucket.org/cffi/cffi/changeset/4480292812da/ Log: Try to support unicode_literals diff --git a/_cffi1/recompiler.py b/_cffi1/recompiler.py --- a/_cffi1/recompiler.py +++ b/_cffi1/recompiler.py @@ -832,6 +832,8 @@ return ffiplatform.get_extension(source_name, module_name, **kwds) def recompile(ffi, module_name, preamble, tmpdir='.', **kwds): + if not isinstance(module_name, str): + module_name = module_name.encode('ascii') c_file = os.path.join(tmpdir, module_name + '.c') ext = _get_extension(module_name, c_file, kwds) make_c_source(ffi, module_name, preamble, c_file) diff --git a/_cffi1/test_unicode_literals.py b/_cffi1/test_unicode_literals.py new file mode 100644 --- /dev/null +++ b/_cffi1/test_unicode_literals.py @@ -0,0 +1,86 @@ +# +# ---------------------------------------------- +# WARNING, ALL LITERALS IN THIS FILE ARE UNICODE +# ---------------------------------------------- +# +from __future__ import unicode_literals +# +# +# +import sys, math +from cffi import FFI +from _cffi1 import recompiler + +lib_m = "m" +if sys.platform == 'win32': + #there is a small chance this fails on Mingw via environ $CC + import distutils.ccompiler + if distutils.ccompiler.get_default_compiler() == 'msvc': + lib_m = 'msvcrt' + + +def test_cast(): + ffi = FFI() + assert int(ffi.cast("int", 3.14)) == 3 # unicode literal + +def test_new(): + ffi = FFI() + assert ffi.new("int[]", [3, 4, 5])[2] == 5 # unicode literal + +def test_typeof(): + ffi = FFI() + tp = ffi.typeof("int[51]") # unicode literal + assert tp.length == 51 + +def test_sizeof(): + ffi = FFI() + assert ffi.sizeof("int[51]") == 51 * 4 # unicode literal + +def test_alignof(): + ffi = FFI() + assert ffi.alignof("int[51]") == 4 # unicode literal + +def test_getctype(): + ffi = FFI() + assert ffi.getctype("int**") == "int * *" # unicode literal + assert type(ffi.getctype("int**")) is str + +def test_cdef(): + ffi = FFI() + ffi.cdef("typedef int foo_t[50];") # unicode literal + +def test_offsetof(): + ffi = FFI() + ffi.cdef("typedef struct { int x, y; } foo_t;") + assert ffi.offsetof("foo_t", "y") == 4 # unicode literal + +def test_enum(): + ffi = FFI() + ffi.cdef("enum foo_e { AA, BB, CC };") # unicode literal + x = ffi.cast("enum foo_e", 1) + assert int(ffi.cast("int", x)) == 1 + +def test_dlopen(): + ffi = FFI() + ffi.cdef("double sin(double x);") + m = ffi.dlopen(lib_m) # unicode literal + x = m.sin(1.23) + assert x == math.sin(1.23) + +def test_verify(): + ffi = FFI() + ffi.cdef("double test_verify_1(double x);") # unicode literal + lib = ffi.verify("double test_verify_1(double x) { return x * 42.0; }") + assert lib.test_verify_1(-1.5) == -63.0 + +def test_callback(): + ffi = FFI() + cb = ffi.callback("int(int)", # unicode literal + lambda x: x + 42) + assert cb(5) == 47 + +def test_math_sin_unicode(): + ffi = FFI() + ffi.cdef("float sin(double); double cos(double);") + lib = recompiler.verify(ffi, 'test_math_sin_unicode', '#include ') + assert lib.cos(1.43) == math.cos(1.43) From noreply at buildbot.pypy.org Thu Apr 30 09:30:59 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 30 Apr 2015 09:30:59 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: this test works now Message-ID: <20150430073059.AF92D1C0207@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1892:7395fc0d48c2 Date: 2015-04-30 09:31 +0200 http://bitbucket.org/cffi/cffi/changeset/7395fc0d48c2/ Log: this test works now diff --git a/_cffi1/test_verify1.py b/_cffi1/test_verify1.py --- a/_cffi1/test_verify1.py +++ b/_cffi1/test_verify1.py @@ -1670,7 +1670,6 @@ "function myfunc: 'foo_t' is used as result type, but is opaque") def test_include(): - py.test.xfail("test_include") ffi1 = FFI() ffi1.cdef("typedef struct { int x; ...; } foo_t;") ffi1.verify("typedef struct { int y, x; } foo_t;") From noreply at buildbot.pypy.org Thu Apr 30 09:38:52 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 30 Apr 2015 09:38:52 +0200 (CEST) Subject: [pypy-commit] cffi default: Acquire the import lock explicitly around the sys.setdlopenflags() Message-ID: <20150430073852.751DA1C0207@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1893:5df67a682cac Date: 2015-04-30 09:39 +0200 http://bitbucket.org/cffi/cffi/changeset/5df67a682cac/ Log: Acquire the import lock explicitly around the sys.setdlopenflags() diff --git a/cffi/vengine_cpy.py b/cffi/vengine_cpy.py --- a/cffi/vengine_cpy.py +++ b/cffi/vengine_cpy.py @@ -141,19 +141,23 @@ def load_library(self, flags=None): # XXX review all usages of 'self' here! # import it as a new extension module - if hasattr(sys, "getdlopenflags"): - previous_flags = sys.getdlopenflags() + imp.acquire_lock() try: - if hasattr(sys, "setdlopenflags") and flags is not None: - sys.setdlopenflags(flags) - module = imp.load_dynamic(self.verifier.get_module_name(), - self.verifier.modulefilename) - except ImportError as e: - error = "importing %r: %s" % (self.verifier.modulefilename, e) - raise ffiplatform.VerificationError(error) + if hasattr(sys, "getdlopenflags"): + previous_flags = sys.getdlopenflags() + try: + if hasattr(sys, "setdlopenflags") and flags is not None: + sys.setdlopenflags(flags) + module = imp.load_dynamic(self.verifier.get_module_name(), + self.verifier.modulefilename) + except ImportError as e: + error = "importing %r: %s" % (self.verifier.modulefilename, e) + raise ffiplatform.VerificationError(error) + finally: + if hasattr(sys, "setdlopenflags"): + sys.setdlopenflags(previous_flags) finally: - if hasattr(sys, "setdlopenflags"): - sys.setdlopenflags(previous_flags) + imp.release_lock() # # call loading_cpy_struct() to get the struct layout inferred by # the C compiler From noreply at buildbot.pypy.org Thu Apr 30 09:44:06 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 30 Apr 2015 09:44:06 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: hg merge default Message-ID: <20150430074406.F08301C0207@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1894:d4d01f78d16e Date: 2015-04-30 09:42 +0200 http://bitbucket.org/cffi/cffi/changeset/d4d01f78d16e/ Log: hg merge default diff --git a/cffi/vengine_cpy.py b/cffi/vengine_cpy.py --- a/cffi/vengine_cpy.py +++ b/cffi/vengine_cpy.py @@ -141,19 +141,23 @@ def load_library(self, flags=None): # XXX review all usages of 'self' here! # import it as a new extension module - if hasattr(sys, "getdlopenflags"): - previous_flags = sys.getdlopenflags() + imp.acquire_lock() try: - if hasattr(sys, "setdlopenflags") and flags is not None: - sys.setdlopenflags(flags) - module = imp.load_dynamic(self.verifier.get_module_name(), - self.verifier.modulefilename) - except ImportError as e: - error = "importing %r: %s" % (self.verifier.modulefilename, e) - raise ffiplatform.VerificationError(error) + if hasattr(sys, "getdlopenflags"): + previous_flags = sys.getdlopenflags() + try: + if hasattr(sys, "setdlopenflags") and flags is not None: + sys.setdlopenflags(flags) + module = imp.load_dynamic(self.verifier.get_module_name(), + self.verifier.modulefilename) + except ImportError as e: + error = "importing %r: %s" % (self.verifier.modulefilename, e) + raise ffiplatform.VerificationError(error) + finally: + if hasattr(sys, "setdlopenflags"): + sys.setdlopenflags(previous_flags) finally: - if hasattr(sys, "setdlopenflags"): - sys.setdlopenflags(previous_flags) + imp.release_lock() # # call loading_cpy_struct() to get the struct layout inferred by # the C compiler From noreply at buildbot.pypy.org Thu Apr 30 09:44:08 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 30 Apr 2015 09:44:08 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: Forces setdlopenflags() explicitly Message-ID: <20150430074408.1246F1C0207@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1895:705f395febf0 Date: 2015-04-30 09:44 +0200 http://bitbucket.org/cffi/cffi/changeset/705f395febf0/ Log: Forces setdlopenflags() explicitly diff --git a/_cffi1/test_verify1.py b/_cffi1/test_verify1.py --- a/_cffi1/test_verify1.py +++ b/_cffi1/test_verify1.py @@ -2081,22 +2081,27 @@ assert ffi.getwinerror()[0] == n def test_verify_dlopen_flags(): - py.test.xfail("dlopen flags") + if not hasattr(sys, 'setdlopenflags'): + py.test.skip("requires sys.setdlopenflags()") # Careful with RTLD_GLOBAL. If by chance the FFI is not deleted # promptly, like on PyPy, then other tests may see the same # exported symbols as well. So we must not export a simple name # like 'foo'! - ffi1 = FFI() - ffi1.cdef("int foo_verify_dlopen_flags;") + old = sys.getdlopenflags() + try: + ffi1 = FFI() + ffi1.cdef("int foo_verify_dlopen_flags;") - lib1 = ffi1.verify("int foo_verify_dlopen_flags;", - flags=ffi1.RTLD_GLOBAL | ffi1.RTLD_LAZY) - lib2 = get_second_lib() + sys.setdlopenflags(ffi1.RTLD_GLOBAL | ffi1.RTLD_LAZY) + lib1 = ffi1.verify("int foo_verify_dlopen_flags;") + lib2 = get_second_lib() - lib1.foo_verify_dlopen_flags = 42 - assert lib2.foo_verify_dlopen_flags == 42 - lib2.foo_verify_dlopen_flags += 1 - assert lib1.foo_verify_dlopen_flags == 43 + lib1.foo_verify_dlopen_flags = 42 + assert lib2.foo_verify_dlopen_flags == 42 + lib2.foo_verify_dlopen_flags += 1 + assert lib1.foo_verify_dlopen_flags == 43 + finally: + sys.setdlopenflags(old) def get_second_lib(): # Hack, using modulename makes the test fail From noreply at buildbot.pypy.org Thu Apr 30 11:19:06 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 30 Apr 2015 11:19:06 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: tweaks tweaks Message-ID: <20150430091906.1A00B1C1481@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1897:f5beaf1559c2 Date: 2015-04-30 11:19 +0200 http://bitbucket.org/cffi/cffi/changeset/f5beaf1559c2/ Log: tweaks tweaks diff --git a/MANIFEST.in b/MANIFEST.in --- a/MANIFEST.in +++ b/MANIFEST.in @@ -3,5 +3,5 @@ recursive-include testing *.py recursive-include doc *.py *.rst Makefile *.bat recursive-include demo py.cleanup *.py -recursive-include _cffi1 *.py *.h +recursive-include _cffi1 *.py *.c *.h include LICENSE setup_base.py diff --git a/demo/_curses_setup.py b/demo/_curses_setup.py --- a/demo/_curses_setup.py +++ b/demo/_curses_setup.py @@ -4,10 +4,10 @@ name="_curses", version="0.1", py_modules=["_curses"], - setup_requires=["cffi>=1.0"], + setup_requires=["cffi>=1.0.dev0"], cffi_modules=[ "_curses_build:ffi", ], - install_requires=["cffi>=1.0"], # should maybe be "cffi-backend" only? + install_requires=["cffi>=1.0.dev0"], # should maybe be "cffi-backend" only? zip_safe=False, ) diff --git a/demo/bsdopendirtype_setup.py b/demo/bsdopendirtype_setup.py --- a/demo/bsdopendirtype_setup.py +++ b/demo/bsdopendirtype_setup.py @@ -4,10 +4,10 @@ name="example", version="0.1", py_modules=["bsdopendirtype"], - setup_requires=["cffi>=1.0"], + setup_requires=["cffi>=1.0.dev0"], cffi_modules=[ "bsdopendirtype_build:ffi", ], - install_requires=["cffi>=1.0"], # should maybe be "cffi-backend" only? + install_requires=["cffi>=1.0.dev0"], # should maybe be "cffi-backend" only? zip_safe=False, ) diff --git a/demo/readdir2_setup.py b/demo/readdir2_setup.py --- a/demo/readdir2_setup.py +++ b/demo/readdir2_setup.py @@ -4,10 +4,10 @@ name="readdir2", version="0.1", py_modules=["readdir2"], - setup_requires=["cffi>=1.0"], + setup_requires=["cffi>=1.0.dev0"], cffi_modules=[ "readdir2_build:ffi", ], - install_requires=["cffi>=1.0"], # should maybe be "cffi-backend" only? + install_requires=["cffi>=1.0.dev0"], # should maybe be "cffi-backend" only? zip_safe=False, ) diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -142,7 +142,7 @@ `Mailing list `_ """, - version='1.0.dev0', + version='1.0.dev2', packages=['cffi', '_cffi1'], package_data={'_cffi1': ['_cffi_include.h', 'parse_c_type.h']}, zip_safe=False, From noreply at buildbot.pypy.org Thu Apr 30 11:19:04 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 30 Apr 2015 11:19:04 +0200 (CEST) Subject: [pypy-commit] cffi cffi-1.0: updates Message-ID: <20150430091904.E59501C1481@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-1.0 Changeset: r1896:abe9ab7b9b11 Date: 2015-04-30 11:12 +0200 http://bitbucket.org/cffi/cffi/changeset/abe9ab7b9b11/ Log: updates diff --git a/MANIFEST.in b/MANIFEST.in --- a/MANIFEST.in +++ b/MANIFEST.in @@ -3,4 +3,5 @@ recursive-include testing *.py recursive-include doc *.py *.rst Makefile *.bat recursive-include demo py.cleanup *.py +recursive-include _cffi1 *.py *.h include LICENSE setup_base.py diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -142,7 +142,7 @@ `Mailing list `_ """, - version='1.0.0', + version='1.0.dev0', packages=['cffi', '_cffi1'], package_data={'_cffi1': ['_cffi_include.h', 'parse_c_type.h']}, zip_safe=False, From noreply at buildbot.pypy.org Thu Apr 30 13:53:10 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Thu, 30 Apr 2015 13:53:10 +0200 (CEST) Subject: [pypy-commit] pypy can_cast: fix translation Message-ID: <20150430115311.013521C0207@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: can_cast Changeset: r76952:af8b59de3288 Date: 2015-04-30 12:52 +0100 http://bitbucket.org/pypy/pypy/changeset/af8b59de3288/ Log: fix translation diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -2494,35 +2494,37 @@ smaller_types = unrolling_iterable( [(tp, tp.Unsigned) for tp in smaller_types]) def min_dtype(self): + value = rffi.cast(UInt64.T, self.value) for Small, USmall in smaller_types: - signed_max = rffi.cast(UInt_t.T, Small.max_value) - unsigned_max = rffi.cast(UInt_t.T, USmall.max_value) - if self.value <= unsigned_max: - if self.value <= signed_max: + signed_max = rffi.cast(UInt64.T, Small.max_value) + unsigned_max = rffi.cast(UInt64.T, USmall.max_value) + if value <= unsigned_max: + if value <= signed_max: return Small.num, USmall.num else: return USmall.num, USmall.num - if self.value <= rffi.cast(UInt_t.T, Int_t.max_value): + if value <= rffi.cast(UInt64.T, Int_t.max_value): return Int_t.num, UInt_t.num else: return UInt_t.num, UInt_t.num UInt_t.BoxType.min_dtype = min_dtype def min_dtype(self): - if self.value >= 0: + value = rffi.cast(Int64.T, self.value) + if value >= 0: for Small, USmall in smaller_types: - signed_max = rffi.cast(UInt_t.T, Small.max_value) - unsigned_max = rffi.cast(UInt_t.T, USmall.max_value) - if self.value <= unsigned_max: - if self.value <= signed_max: + signed_max = rffi.cast(Int64.T, Small.max_value) + unsigned_max = rffi.cast(Int64.T, USmall.max_value) + if value <= unsigned_max: + if value <= signed_max: return Small.num, USmall.num else: return USmall.num, USmall.num return Int_t.num, UInt_t.num else: for Small, USmall in smaller_types: - signed_min = rffi.cast(UInt_t.T, Small.min_value) - if self.value >= signed_max: + signed_min = rffi.cast(Int64.T, Small.min_value) + if value >= signed_min: return Small.num, Small.num return Int_t.num, Int_t.num Int_t.BoxType.min_dtype = min_dtype From noreply at buildbot.pypy.org Thu Apr 30 18:50:03 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Thu, 30 Apr 2015 18:50:03 +0200 (CEST) Subject: [pypy-commit] pypy can_cast: Implement min_dtype() on float types Message-ID: <20150430165003.A350C1C0EC0@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: can_cast Changeset: r76953:244204b77d90 Date: 2015-04-30 17:38 +0100 http://bitbucket.org/pypy/pypy/changeset/244204b77d90/ Log: Implement min_dtype() on float types diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -4053,3 +4053,8 @@ assert np.can_cast(127, np.int8) assert not np.can_cast(128, np.int8) assert np.can_cast(128, np.int16) + + assert np.can_cast(np.float32('inf'), np.float32) + assert np.can_cast(float('inf'), np.float32) # XXX: False in CNumPy?! + assert np.can_cast(3.3e38, np.float32) + assert not np.can_cast(3.4e38, np.float32) diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1026,6 +1026,7 @@ kind = NPY.FLOATINGLTR char = NPY.HALFLTR BoxType = boxes.W_Float16Box + max_value = 65000. @specialize.argtype(1) def box(self, value): @@ -1070,6 +1071,7 @@ char = NPY.FLOATLTR BoxType = boxes.W_Float32Box format_code = "f" + max_value = 3.4e38 class Float64(BaseType, Float): T = rffi.DOUBLE @@ -1078,6 +1080,7 @@ char = NPY.DOUBLELTR BoxType = boxes.W_Float64Box format_code = "d" + max_value = 1.7e308 class ComplexFloating(object): _mixin_ = True @@ -2487,8 +2490,7 @@ signed_types = [Int8, Int16, Int32, Int64, Long] -for Int_t in signed_types: - UInt_t = Int_t.Unsigned +def make_integer_min_dtype(Int_t, UInt_t): smaller_types = [tp for tp in signed_types if rffi.sizeof(tp.T) < rffi.sizeof(Int_t.T)] smaller_types = unrolling_iterable( @@ -2528,3 +2530,32 @@ return Small.num, Small.num return Int_t.num, Int_t.num Int_t.BoxType.min_dtype = min_dtype + +for Int_t in signed_types: + UInt_t = Int_t.Unsigned + make_integer_min_dtype(Int_t, UInt_t) + + +smaller_float_types = { + Float16: [], Float32: [Float16], Float64: [Float16, Float32], + FloatLong: [Float16, Float32, Float64]} + +def make_float_min_dtype(Float_t): + smaller_types = unrolling_iterable(smaller_float_types[Float_t]) + smallest_type = Float16 + def min_dtype(self): + value = float(self.value) + if not rfloat.isfinite(value): + tp = smallest_type + else: + for SmallFloat in smaller_types: + if -SmallFloat.max_value < value < SmallFloat.max_value: + tp = SmallFloat + break + else: + tp = Float_t + return tp.num, tp.num + Float_t.BoxType.min_dtype = min_dtype + +for Float_t in float_types: + make_float_min_dtype(Float_t) From noreply at buildbot.pypy.org Thu Apr 30 19:13:47 2015 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 30 Apr 2015 19:13:47 +0200 (CEST) Subject: [pypy-commit] pypy default: document fields on pyframe Message-ID: <20150430171347.CCCF41C0207@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r76954:f726f9ee2155 Date: 2015-04-30 19:13 +0200 http://bitbucket.org/pypy/pypy/changeset/f726f9ee2155/ Log: document fields on pyframe diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -49,14 +49,29 @@ last_instr = -1 last_exception = None f_backref = jit.vref_None + # For tracing w_f_trace = None - # For tracing instr_lb = 0 instr_ub = 0 instr_prev_plus_one = 0 + # end of tracing + is_being_profiled = False escaped = False # see mark_as_escaped() + w_globals = None + w_locals = None # dict containing locals, if forced or necessary + pycode = None # code object executed by that frame + locals_stack_w = None # the list of all locals and valuestack + valuestackdepth = -1 # number of items on valuestack + lastblock = None + builtin = None # builtin cache, only if honor__builtins__ is True, + # default to False + f_lineno = -1 # current lineno + cells = None # cells + + # there is also self.space which is removed by the annotator + def __init__(self, space, code, w_globals, outer_func): if not we_are_translated(): assert type(self) == space.FrameClass, ( @@ -65,11 +80,9 @@ assert isinstance(code, pycode.PyCode) self.space = space self.w_globals = w_globals - self.w_locals = None self.pycode = code self.locals_stack_w = [None] * (code.co_nlocals + code.co_stacksize) self.valuestackdepth = code.co_nlocals - self.lastblock = None make_sure_not_resized(self.locals_stack_w) check_nonneg(self.valuestackdepth) # From noreply at buildbot.pypy.org Thu Apr 30 19:20:13 2015 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 30 Apr 2015 19:20:13 +0200 (CEST) Subject: [pypy-commit] pypy default: document two missing fields Message-ID: <20150430172013.93E1A1C0207@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r76955:b4e74fa7ca14 Date: 2015-04-30 19:20 +0200 http://bitbucket.org/pypy/pypy/changeset/b4e74fa7ca14/ Log: document two missing fields diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -65,13 +65,19 @@ locals_stack_w = None # the list of all locals and valuestack valuestackdepth = -1 # number of items on valuestack lastblock = None - builtin = None # builtin cache, only if honor__builtins__ is True, # default to False f_lineno = -1 # current lineno cells = None # cells + # other fields: + + # builtin - builtin cache, only if honor__builtins__ is True, + # there is also self.space which is removed by the annotator + # additionally JIT uses vable_token field that is representing + # frame current virtualizable state as seen by the JIT + def __init__(self, space, code, w_globals, outer_func): if not we_are_translated(): assert type(self) == space.FrameClass, ( From noreply at buildbot.pypy.org Thu Apr 30 22:12:06 2015 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 30 Apr 2015 22:12:06 +0200 (CEST) Subject: [pypy-commit] pypy numpy-fixes: preserve stride order in astype Message-ID: <20150430201206.6F37F1C0D78@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: numpy-fixes Changeset: r76957:e7d09eb72b19 Date: 2015-04-30 04:17 +0300 http://bitbucket.org/pypy/pypy/changeset/e7d09eb72b19/ Log: preserve stride order in astype diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -329,7 +329,11 @@ return ArrayBuffer(self, readonly) def astype(self, space, dtype): - strides, backstrides = calc_strides(self.get_shape(), dtype, self.order) + s_elsize = self.dtype.elsize + t_elsize = dtype.elsize + strides = [s*t_elsize/s_elsize for s in self.get_strides()] + backstrides = calc_backstrides(strides, self.get_shape()) + #strides, backstrides = calc_strides(self.get_shape(), dtype, self.order) impl = ConcreteArray(self.get_shape(), dtype, self.order, strides, backstrides) loop.setslice(space, impl.get_shape(), impl, self) From noreply at buildbot.pypy.org Thu Apr 30 22:12:05 2015 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 30 Apr 2015 22:12:05 +0200 (CEST) Subject: [pypy-commit] pypy numpy-fixes: test, fix array() Message-ID: <20150430201205.473581C0D78@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: numpy-fixes Changeset: r76956:80fe68674c7a Date: 2015-04-30 04:03 +0300 http://bitbucket.org/pypy/pypy/changeset/80fe68674c7a/ Log: test, fix array() diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -124,12 +124,13 @@ copy = True if copy: shape = w_object.get_shape() - elems_w = [None] * w_object.get_size() - elsize = w_object.get_dtype().elsize - # TODO - use w_object.implementation without copying to a list - # unfortunately that causes a union error in translation - for i in range(w_object.get_size()): - elems_w[i] = w_object.implementation.getitem(i * elsize) + w_arr = W_NDimArray.from_shape(space, shape, dtype, order=order) + if support.product(shape) == 1: + w_arr.set_scalar_value(dtype.coerce(space, + w_object.implementation.getitem(0))) + else: + loop.setslice(space, shape, w_arr.implementation, w_object.implementation) + return w_arr else: imp = w_object.implementation with imp as storage: diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -237,7 +237,7 @@ assert np.WRAP is 1 assert np.RAISE is 2 - def test_ndarray(self): + def test_creation(self): from numpy import ndarray, array, dtype, flatiter assert type(ndarray) is type @@ -269,6 +269,12 @@ assert a.flags['C'] assert not a.flags['F'] + x = array([[0, 2], [1, 1], [2, 0]]) + y = array(x.T, dtype=float) + assert (y == x.T).all() + y = array(x.T, copy=False) + assert (y == x.T).all() + def test_ndmin(self): from numpy import array From noreply at buildbot.pypy.org Thu Apr 30 22:12:07 2015 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 30 Apr 2015 22:12:07 +0200 (CEST) Subject: [pypy-commit] pypy numpy-fixes: test, fix creation of record array with ndarray data Message-ID: <20150430201207.90E781C0D78@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: numpy-fixes Changeset: r76958:d7e26a6bffd3 Date: 2015-04-30 22:09 +0300 http://bitbucket.org/pypy/pypy/changeset/d7e26a6bffd3/ Log: test, fix creation of record array with ndarray data diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -209,7 +209,7 @@ while not target_iter.done(target_state): setslice_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) val = source_iter.getitem(source_state) - if dtype.is_str_or_unicode(): + if dtype.is_str_or_unicode() or dtype.is_record(): val = dtype.coerce(space, val) else: val = val.convert_to(space, dtype) diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -3838,6 +3838,14 @@ ([4, 5, 6], [5.5, 6.5, 7.5, 8.5, 9.5])], dtype=d) assert len(list(a[0])) == 2 + + mdtype = dtype([('a', bool), ('b', bool), ('c', bool)]) + a = array([0, 0, 0, 1, 1]) + # this creates a value of (x, x, x) in b for each x in a + b = array(a, dtype=mdtype) + assert b.shape == a.shape + c = array([(x, x, x) for x in [0, 0, 0, 1, 1]], dtype=mdtype) + assert (b == c).all() def test_3d_record(self): from numpy import dtype, array